code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package mysql
import "github.com/go-jet/jet/v2/internal/jet"
// Expression is common interface for all expressions.
// Can be Bool, Int, Float, String, Date, Time, Timez, Timestamp or Timestampz expressions.
type Expression = jet.Expression
// BoolExpression interface
type BoolExpression = jet.BoolExpression
// StringExpression interface
type StringExpression = jet.StringExpression
// IntegerExpression interface
type IntegerExpression = jet.IntegerExpression
// FloatExpression interface
type FloatExpression = jet.FloatExpression
// TimeExpression interface
type TimeExpression = jet.TimeExpression
// DateExpression interface
type DateExpression = jet.DateExpression
// DateTimeExpression interface
type DateTimeExpression = jet.TimestampExpression
// TimestampExpression interface
type TimestampExpression = jet.TimestampExpression
// BoolExp is bool expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as bool expression.
// Does not add sql cast to generated sql builder output.
var BoolExp = jet.BoolExp
// StringExp is string expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as string expression.
// Does not add sql cast to generated sql builder output.
var StringExp = jet.StringExp
// IntExp is int expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as int expression.
// Does not add sql cast to generated sql builder output.
var IntExp = jet.IntExp
// FloatExp is date expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as float expression.
// Does not add sql cast to generated sql builder output.
var FloatExp = jet.FloatExp
// TimeExp is time expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as time expression.
// Does not add sql cast to generated sql builder output.
var TimeExp = jet.TimeExp
// DateExp is date expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as date expression.
// Does not add sql cast to generated sql builder output.
var DateExp = jet.DateExp
// DateTimeExp is timestamp expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as timestamp expression.
// Does not add sql cast to generated sql builder output.
var DateTimeExp = jet.TimestampExp
// TimestampExp is timestamp expression wrapper around arbitrary expression.
// Allows go compiler to see any expression as timestamp expression.
// Does not add sql cast to generated sql builder output.
var TimestampExp = jet.TimestampExp
// Raw can be used for any unsupported functions, operators or expressions.
// For example: Raw("current_database()")
var Raw = jet.Raw
// Func can be used to call an custom or as of yet unsupported function in the database.
var Func = jet.Func
// NewEnumValue creates new named enum value
var NewEnumValue = jet.NewEnumValue | mysql/expressions.go | 0.587115 | 0.412648 | expressions.go | starcoder |
package processor
import (
"bytes"
"context"
"fmt"
"strconv"
"time"
"github.com/Jeffail/benthos/v3/internal/bloblang/field"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/tracing"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/response"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/OneOfOne/xxhash"
olog "github.com/opentracing/opentracing-go/log"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeDedupe] = TypeSpec{
constructor: NewDedupe,
Categories: []Category{
CategoryUtility,
},
Summary: `
Deduplicates message batches by caching selected (and optionally hashed)
messages, dropping batches that are already cached.`,
Description: `
This processor acts across an entire batch, in order to deduplicate individual
messages within a batch use this processor with the
` + "[`for_each`](/docs/components/processors/for_each)" + ` processor.
Optionally, the ` + "`key`" + ` field can be populated in order to hash on a
function interpolated string rather than the full contents of messages. This
allows you to deduplicate based on dynamic fields within a message, such as its
metadata, JSON fields, etc. A full list of interpolation functions can be found
[here](/docs/configuration/interpolation#bloblang-queries).
For example, the following config would deduplicate based on the concatenated
values of the metadata field ` + "`kafka_key`" + ` and the value of the JSON
path ` + "`id`" + ` within the message contents:
` + "```yaml" + `
pipeline:
processors:
- dedupe:
cache: foocache
key: ${! meta("kafka_key") }-${! json("id") }
` + "```" + `
Caches should be configured as a resource, for more information check out the
[documentation here](/docs/components/caches/about).
When using this processor with an output target that might fail you should
always wrap the output within a ` + "[`retry`](/docs/components/outputs/retry)" + `
block. This ensures that during outages your messages aren't reprocessed after
failures, which would result in messages being dropped.
## Delivery Guarantees
Performing deduplication on a stream using a distributed cache voids any
at-least-once guarantees that it previously had. This is because the cache will
preserve message signatures even if the message fails to leave the Benthos
pipeline, which would cause message loss in the event of an outage at the output
sink followed by a restart of the Benthos instance.
If you intend to preserve at-least-once delivery guarantees you can avoid this
problem by using a memory based cache. This is a compromise that can achieve
effective deduplication but parallel deployments of the pipeline as well as
service restarts increase the chances of duplicates passing undetected.`,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("cache", "The [`cache` resource](/docs/components/caches/about) to target with this processor."),
docs.FieldCommon("hash", "The hash type to used.").HasOptions("none", "xxhash"),
docs.FieldCommon("key", "An optional key to use for deduplication (instead of the entire message contents).").IsInterpolated(),
docs.FieldCommon("drop_on_err", "Whether messages should be dropped when the cache returns an error."),
docs.FieldAdvanced("parts", "An array of message indexes within the batch to deduplicate based on. If left empty all messages are included. This field is only applicable when batching messages [at the input level](/docs/configuration/batching).").Array(),
},
}
}
//------------------------------------------------------------------------------
// DedupeConfig contains configuration fields for the Dedupe processor.
type DedupeConfig struct {
Cache string `json:"cache" yaml:"cache"`
HashType string `json:"hash" yaml:"hash"`
Parts []int `json:"parts" yaml:"parts"` // message parts to hash
Key string `json:"key" yaml:"key"`
DropOnCacheErr bool `json:"drop_on_err" yaml:"drop_on_err"`
}
// NewDedupeConfig returns a DedupeConfig with default values.
func NewDedupeConfig() DedupeConfig {
return DedupeConfig{
Cache: "",
HashType: "none",
Parts: []int{0}, // only consider the 1st part
Key: "",
DropOnCacheErr: true,
}
}
//------------------------------------------------------------------------------
type hasher interface {
Write(str []byte) (int, error)
Bytes() []byte
}
type hasherFunc func() hasher
//------------------------------------------------------------------------------
type xxhashHasher struct {
h *xxhash.XXHash64
}
func (x *xxhashHasher) Write(str []byte) (int, error) {
return x.h.Write(str)
}
func (x *xxhashHasher) Bytes() []byte {
return []byte(strconv.FormatUint(x.h.Sum64(), 10))
}
//------------------------------------------------------------------------------
func strToHasher(str string) (hasherFunc, error) {
switch str {
case "none":
return func() hasher {
return bytes.NewBuffer(nil)
}, nil
case "xxhash":
return func() hasher {
return &xxhashHasher{
h: xxhash.New64(),
}
}, nil
}
return nil, fmt.Errorf("hash type not recognised: %v", str)
}
//------------------------------------------------------------------------------
// Dedupe is a processor that deduplicates messages either by hashing the full
// contents of message parts or by hashing the value of an interpolated string.
type Dedupe struct {
conf Config
log log.Modular
stats metrics.Type
key *field.Expression
mgr types.Manager
cacheName string
hasherFunc hasherFunc
mCount metrics.StatCounter
mErrHash metrics.StatCounter
mErrCache metrics.StatCounter
mErr metrics.StatCounter
mDropped metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewDedupe returns a Dedupe processor.
func NewDedupe(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
hFunc, err := strToHasher(conf.Dedupe.HashType)
if err != nil {
return nil, err
}
key, err := interop.NewBloblangField(mgr, conf.Dedupe.Key)
if err != nil {
return nil, fmt.Errorf("failed to parse key expression: %v", err)
}
if err := interop.ProbeCache(context.Background(), mgr, conf.Dedupe.Cache); err != nil {
return nil, err
}
return &Dedupe{
conf: conf,
log: log,
stats: stats,
key: key,
mgr: mgr,
cacheName: conf.Dedupe.Cache,
hasherFunc: hFunc,
mCount: stats.GetCounter("count"),
mErrHash: stats.GetCounter("error.hash"),
mErrCache: stats.GetCounter("error.cache"),
mErr: stats.GetCounter("error"),
mDropped: stats.GetCounter("dropped"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}, nil
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (d *Dedupe) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
d.mCount.Incr(1)
extractedHash := false
hasher := d.hasherFunc()
spans := tracing.CreateChildSpans(TypeDedupe, msg)
defer func() {
for _, s := range spans {
s.Finish()
}
}()
key := d.key.Bytes(0, msg)
if len(key) > 0 {
hasher.Write(key)
extractedHash = true
} else {
for _, index := range d.conf.Dedupe.Parts {
// Attempt to add whole part to hash.
if partBytes := msg.Get(index).Get(); partBytes != nil {
if _, err := hasher.Write(msg.Get(index).Get()); err != nil {
d.mErrHash.Incr(1)
d.mErr.Incr(1)
d.mDropped.Incr(1)
d.log.Errorf("Hash error: %v\n", err)
} else {
extractedHash = true
}
}
}
}
if !extractedHash {
if d.conf.Dedupe.DropOnCacheErr {
d.mDropped.Incr(1)
return nil, response.NewAck()
}
} else {
var err error
if cerr := interop.AccessCache(context.Background(), d.mgr, d.cacheName, func(cache types.Cache) {
err = cache.Add(string(hasher.Bytes()), []byte{'t'})
}); cerr != nil {
err = cerr
}
if err != nil {
if err == types.ErrKeyAlreadyExists {
for _, s := range spans {
s.LogFields(
olog.String("event", "dropped"),
olog.String("type", "deduplicated"),
)
}
d.mDropped.Incr(1)
return nil, response.NewAck()
}
d.mErrCache.Incr(1)
d.mErr.Incr(1)
d.log.Errorf("Cache error: %v\n", err)
for _, s := range spans {
s.LogFields(
olog.String("event", "error"),
olog.String("type", err.Error()),
)
}
if d.conf.Dedupe.DropOnCacheErr {
d.mDropped.Incr(1)
return nil, response.NewAck()
}
}
}
d.mBatchSent.Incr(1)
d.mSent.Incr(int64(msg.Len()))
msgs := [1]types.Message{msg}
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (d *Dedupe) CloseAsync() {
}
// WaitForClose blocks until the processor has closed down.
func (d *Dedupe) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------ | lib/processor/dedupe.go | 0.800146 | 0.647826 | dedupe.go | starcoder |
// Package attrrange simplifies tracking of attributes that apply to a range of
// items.
// Refer to the examples in the test file for details on usage.
package attrrange
import (
"fmt"
"sort"
)
// AttrRange is a range of items that share the same attributes.
type AttrRange struct {
// Low is the first position where these attributes apply.
Low int
// High is the end of the range. The attributes apply to all items in range
// Low <= b < high.
High int
// AttrIdx is the index of the attributes that apply to this range.
AttrIdx int
}
// newAttrRange returns a new AttrRange instance.
func newAttrRange(low, high, attrIdx int) *AttrRange {
return &AttrRange{
Low: low,
High: high,
AttrIdx: attrIdx,
}
}
// Tracker tracks attributes that apply to a range of items.
// This object is not thread safe.
type Tracker struct {
// ranges maps low indices of ranges to the attribute ranges.
ranges map[int]*AttrRange
}
// NewTracker returns a new tracker of ranges that share the same attributes.
func NewTracker() *Tracker {
return &Tracker{
ranges: map[int]*AttrRange{},
}
}
// Add adds a new range of items that share attributes with the specified
// index.
// The low position of the range must not overlap with low position of any
// existing range.
func (t *Tracker) Add(low, high, attrIdx int) error {
ar := newAttrRange(low, high, attrIdx)
if ar, ok := t.ranges[low]; ok {
return fmt.Errorf("already have range starting on low:%d, existing:%+v", low, ar)
}
t.ranges[low] = ar
return nil
}
// ForPosition returns attribute index that apply to the specified position.
// Returns ErrNotFound when the requested position wasn't found in any of the
// known ranges.
func (t *Tracker) ForPosition(pos int) (*AttrRange, error) {
if ar, ok := t.ranges[pos]; ok {
return ar, nil
}
var keys []int
for k := range t.ranges {
keys = append(keys, k)
}
sort.Ints(keys)
var res *AttrRange
for _, k := range keys {
ar := t.ranges[k]
if ar.Low > pos {
break
}
if ar.High > pos {
res = ar
}
}
if res == nil {
return nil, fmt.Errorf("did not find attribute range for position %d", pos)
}
return res, nil
} | private/attrrange/attrrange.go | 0.826922 | 0.431405 | attrrange.go | starcoder |
package execution
import (
"reflect"
"regexp"
"github.com/cube2222/octosql"
"github.com/pkg/errors"
)
type Relation interface {
Apply(variables octosql.Variables, left, right Expression) (bool, error)
}
type Equal struct {
}
func NewEqual() Relation {
return &Equal{}
}
func (rel *Equal) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
leftValue, err := left.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of left operator in equal")
}
rightValue, err := right.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of right operator in equal")
}
if leftValue == nil || rightValue == nil {
if leftValue == nil && rightValue == nil {
return true, nil
}
return false, nil
}
if reflect.TypeOf(leftValue).Kind() != reflect.TypeOf(rightValue).Kind() {
return false, errors.Errorf(
"invalid operands to equal %v and %v with types %v and %v",
leftValue, rightValue, GetType(leftValue), GetType(rightValue))
}
return octosql.AreEqual(leftValue, rightValue), nil
}
type NotEqual struct {
}
func NewNotEqual() Relation {
return &NotEqual{}
}
func (rel *NotEqual) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
equal, err := (*Equal).Apply(nil, variables, left, right)
if err != nil {
return false, errors.Wrap(err, "couldn't check equality")
}
return !equal, nil
}
type MoreThan struct {
}
func NewMoreThan() Relation {
return &MoreThan{}
}
func (rel *MoreThan) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
leftValue, err := left.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of left operator in more than")
}
rightValue, err := right.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of right operator in more than")
}
if leftValue == nil || rightValue == nil {
return false, errors.Errorf("invalid null operand to more_than %v and %v", leftValue, rightValue)
}
if reflect.TypeOf(leftValue).Kind() != reflect.TypeOf(rightValue).Kind() {
return false, errors.Errorf(
"invalid operands to more_than %v and %v with types %v and %v",
leftValue, rightValue, GetType(leftValue), GetType(rightValue))
}
switch leftValue := leftValue.(type) {
case octosql.Int:
rightValue := rightValue.(octosql.Int)
return leftValue > rightValue, nil
case octosql.Float:
rightValue := rightValue.(octosql.Float)
return leftValue > rightValue, nil
case octosql.String:
rightValue := rightValue.(octosql.String)
return leftValue > rightValue, nil
case octosql.Time:
rightValue := rightValue.(octosql.Time)
return leftValue.AsTime().After(rightValue.AsTime()), nil
case octosql.Null, octosql.Phantom, octosql.Bool, octosql.Duration, octosql.Tuple, octosql.Object:
return false, errors.Errorf(
"invalid operands to more_than %v and %v with types %v and %v, only int, float, string and time allowed",
leftValue, rightValue, GetType(leftValue), GetType(rightValue))
}
panic("unreachable")
}
type LessThan struct {
}
func NewLessThan() Relation {
return &LessThan{}
}
func (rel *LessThan) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
more, err := (*MoreThan).Apply(nil, variables, right, left)
if err != nil {
return false, errors.Wrap(err, "couldn't check reverse more_than")
}
return more, nil
}
type GreaterEqual struct {
}
func NewGreaterEqual() Relation {
return &GreaterEqual{}
}
func (rel *GreaterEqual) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
less, err := (*LessThan).Apply(nil, variables, left, right)
if err != nil {
return false, errors.Wrap(err, "couldn't get less for greater_equal")
}
return !less, nil
}
type LessEqual struct {
}
func NewLessEqual() Relation {
return &LessEqual{}
}
func (rel *LessEqual) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
more, err := (*MoreThan).Apply(nil, variables, left, right)
if err != nil {
return false, errors.Wrap(err, "coudln't get more for less_equal")
}
return !more, nil
}
type Like struct {
}
func NewLike() Relation {
return &Like{}
}
func (rel *Like) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
leftValue, err := left.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of left operator in LIKE")
}
rightValue, err := right.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of right operator in LIKE")
}
leftString, ok := leftValue.(octosql.String)
if !ok {
return false, errors.Errorf(
"invalid operands to like %v and %v with types %v and %v, only string allowed",
leftValue, rightValue, GetType(leftValue), GetType(rightValue))
}
rightString, ok := rightValue.(octosql.String)
if !ok {
return false, errors.Errorf(
"invalid operands to like %v and %v with types %v and %v, only string allowed",
leftValue, rightValue, GetType(leftValue), GetType(rightValue))
}
match, err := regexp.MatchString(rightString.AsString(), leftString.AsString())
if err != nil {
return false, errors.Wrapf(err, "couldn't match string in like relation with pattern %v", rightString)
}
return match, nil
}
type In struct {
}
func NewIn() Relation {
return &In{}
}
func (rel *In) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
leftValue, err := left.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of left operator in IN")
}
rightValue, err := right.ExpressionValue(variables)
if err != nil {
return false, errors.Wrap(err, "couldn't get value of right operator in IN")
}
switch set := rightValue.(type) {
case octosql.Tuple:
for i := range set {
if octosql.AreEqual(leftValue, set[i]) {
return true, nil
}
}
return false, nil
default:
return octosql.AreEqual(leftValue, rightValue), nil
}
}
type NotIn struct {
}
func NewNotIn() Relation {
return &NotIn{}
}
func (rel *NotIn) Apply(variables octosql.Variables, left, right Expression) (bool, error) {
in, err := (*In).Apply(nil, variables, left, right)
if err != nil {
return false, errors.Wrap(err, "couldn't check containment")
}
return !in, nil
} | execution/relation.go | 0.804713 | 0.413773 | relation.go | starcoder |
package imageutil
import (
"fmt"
"image"
"math"
"strconv"
)
// Areas represents a list of relative crop areas.
type Areas []Area
// Area represents a relative crop area.
type Area struct {
Name string `json:"name,omitempty"`
X float32 `json:"x,omitempty"`
Y float32 `json:"y,omitempty"`
W float32 `json:"w,omitempty"`
H float32 `json:"h,omitempty"`
}
// Empty tests if the area is empty.
func (a Area) Empty() bool {
return a.X == 0 && a.Y == 0 && a.W == 0 && a.H == 0
}
// String returns a string identifying the crop area.
func (a Area) String() string {
if a.Empty() {
return ""
}
return fmt.Sprintf("%03x%03x%03x%03x", int(a.X*1000), int(a.Y*1000), int(a.W*1000), int(a.H*1000))
}
// Bounds returns absolute coordinates and dimension.
func (a Area) Bounds(img image.Image) (min, max image.Point, dim int) {
size := img.Bounds().Max
min = image.Point{X: int(float32(size.X) * a.X), Y: int(float32(size.Y) * a.Y)}
max = image.Point{X: int(float32(size.X) * (a.X + a.W)), Y: int(float32(size.Y) * (a.Y + a.H))}
dim = int(float32(size.X) * a.W)
return min, max, dim
}
// Top returns the top Y coordinate as float64.
func (a Area) Top() float64 {
return float64(a.Y)
}
// Left returns the left X coordinate as float64.
func (a Area) Left() float64 {
return float64(a.X)
}
// Right returns the right X coordinate as float64.
func (a Area) Right() float64 {
return float64(a.X + a.W)
}
// Bottom returns the bottom Y coordinate as float64.
func (a Area) Bottom() float64 {
return float64(a.Y + a.H)
}
// Surface returns the surface area.
func (a Area) Surface() float64 {
return float64(a.W * a.H)
}
// SurfaceRatio returns the surface ratio.
func (a Area) SurfaceRatio(area float64) float64 {
if area <= 0 {
return 0
}
if s := a.Surface(); s <= 0 {
return 0
} else if area > s {
return s / area
} else {
return area / s
}
}
// Overlap calculates the overlap of two areas.
func (a Area) Overlap(other Area) (x, y float64) {
x = math.Max(0, math.Min(a.Right(), other.Right())-math.Max(a.Left(), other.Left()))
y = math.Max(0, math.Min(a.Bottom(), other.Bottom())-math.Max(a.Top(), other.Top()))
return x, y
}
// OverlapArea calculates the overlap area of two areas.
func (a Area) OverlapArea(other Area) (area float64) {
x, y := a.Overlap(other)
return x * y
}
// OverlapPercent calculates the overlap ratio of two areas in percent.
func (a Area) OverlapPercent(other Area) int {
return int(math.Round(other.SurfaceRatio(a.OverlapArea(other)) * 100))
}
// clipVal ensures the relative size is within a valid range.
func clipVal(f float32) float32 {
if f > 1 {
f = 1
} else if f < 0 {
f = 0
}
return f
}
// NewArea returns new relative image area.
func NewArea(name string, x, y, w, h float32) Area {
return Area{
Name: name,
X: clipVal(x),
Y: clipVal(y),
W: clipVal(w),
H: clipVal(h),
}
}
// AreaFromString returns an image area.
func AreaFromString(s string) Area {
if len(s) != 12 || !IsHex(s) {
return Area{}
}
x, _ := strconv.ParseInt(s[0:3], 16, 32)
y, _ := strconv.ParseInt(s[3:6], 16, 32)
w, _ := strconv.ParseInt(s[6:9], 16, 32)
h, _ := strconv.ParseInt(s[9:12], 16, 32)
return NewArea("crop", float32(x)/1000, float32(y)/1000, float32(w)/1000, float32(h)/1000)
} | imageutil/area.go | 0.911232 | 0.577793 | area.go | starcoder |
package cell
func AppendTo(list Cell, elements ...Cell) Cell {
var pair, prev, start Cell
index := 0
start = Null
if list == nil {
panic("cannot append to non-existent list")
}
if list != Null {
start = list
for prev = list; Cdr(prev) != Null; prev = Cdr(prev) {
}
} else if len(elements) > 0 {
start = Cons(elements[index], Null)
prev = start
index++
}
for ; index < len(elements); index++ {
pair = Cons(elements[index], Null)
SetCdr(prev, pair)
prev = pair
}
return start
}
func Car(c Cell) Cell {
return ToPair(c).car
}
func Cdr(c Cell) Cell {
return ToPair(c).cdr
}
func Caar(c Cell) Cell {
return ToPair(ToPair(c).car).car
}
func Cadr(c Cell) Cell {
return ToPair(ToPair(c).cdr).car
}
func Cdar(c Cell) Cell {
return ToPair(ToPair(c).car).cdr
}
func Cddr(c Cell) Cell {
return ToPair(ToPair(c).cdr).cdr
}
func Caddr(c Cell) Cell {
return ToPair(ToPair(ToPair(c).cdr).cdr).car
}
func IsAtom(c Cell) bool {
switch c.(type) {
case Atom:
return true
}
return false
}
func IsNull(c Cell) bool {
return c == Null
}
func IsSimple(c Cell) bool {
return IsAtom(c) || IsPair(c)
}
func IsNumber(c Cell) bool {
switch t := c.(type) {
case *Symbol:
return t.isNumeric()
case Number:
return true
}
return false
}
func JoinTo(list Cell, elements ...Cell) Cell {
var pair, prev, start Cell
start = list
if list == nil {
panic("cannot append to non-existent list")
} else if list == Null {
panic("cannot destructively modify nil value")
}
for ; list != Null; list = Cdr(list) {
prev = list
}
for index := 0; index < len(elements); index++ {
for list = elements[index]; list != Null; list = Cdr(list) {
pair = Cons(Car(list), Null)
SetCdr(prev, pair)
prev = pair
}
}
return start
}
func Length(list Cell) int64 {
var length int64
for ; list != nil && list != Null && IsPair(list); list = Cdr(list) {
length++
}
return length
}
func List(elements ...Cell) Cell {
if len(elements) <= 0 {
return Null
}
start := Cons(elements[0], Null)
prev := start
for index := 1; index < len(elements); index++ {
pair := Cons(elements[index], Null)
SetCdr(prev, pair)
prev = pair
}
return start
}
func Raw(c Cell) string {
if s, ok := c.(*String); ok {
return s.Raw()
}
return c.String()
}
func Reverse(list Cell) Cell {
reversed := Null
for ; list != nil && list != Null; list = Cdr(list) {
reversed = Cons(Car(list), reversed)
}
return reversed
}
func SetCar(c, value Cell) {
ToPair(c).car = value
}
func SetCdr(c, value Cell) {
ToPair(c).cdr = value
}
func Slice(list Cell, start, end int64) Cell {
length := Length(list)
if start < 0 {
start = length + start
}
if start < 0 {
panic("slice starts before first element")
} else if start >= length {
panic("slice starts after last element")
}
if end <= 0 {
end = length + end
}
if end < 0 {
panic("slice ends before first element")
} else if end > length {
end = length
}
end -= start
if end < 0 {
panic("end of slice before start")
} else if end == 0 {
return Null
}
for ; start > 0; start-- {
list = Cdr(list)
}
slice := Cons(Car(list), Null)
for c := slice; end > 1; end-- {
list = Cdr(list)
n := Cons(Car(list), Null)
SetCdr(c, n)
c = n
}
return slice
}
func Tail(list Cell, index int64, dflt Cell) Cell {
length := Length(list)
if index < 0 {
index = length + index
}
msg := ""
if index < 0 {
msg = "index before first element"
} else if index >= length {
msg = "index after last element"
}
if msg != "" {
if dflt == nil {
panic(msg)
} else {
return dflt
}
}
for ; index > 0; index-- {
list = Cdr(list)
}
return list
} | pkg/cell/cell.go | 0.596551 | 0.582907 | cell.go | starcoder |
package vm
const (
set2BitsMask = uint16(0b1100_0000_0000_0000)
set3BitsMask = uint16(0b1110_0000_0000_0000)
set4BitsMask = uint16(0b1111_0000_0000_0000)
set5BitsMask = uint16(0b1111_1000_0000_0000)
set6BitsMask = uint16(0b1111_1100_0000_0000)
set7BitsMask = uint16(0b1111_1110_0000_0000)
)
// bitvec is a bit vector which maps bytes in a program.
// An unset bit means the byte is an opcode, a set bit means
// it's data (i.e. argument of PUSHxx).
type bitvec []byte
var lookup = [8]byte{
0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1,
}
func (bits bitvec) set1(pos uint64) {
bits[pos/8] |= lookup[pos%8]
}
func (bits bitvec) setN(flag uint16, pos uint64) {
a := flag >> (pos % 8)
bits[pos/8] |= byte(a >> 8)
if b := byte(a); b != 0 {
// If the bit-setting affects the neighbouring byte, we can assign - no need to OR it,
// since it's the first write to that byte
bits[pos/8+1] = b
}
}
func (bits bitvec) set8(pos uint64) {
a := byte(0xFF >> (pos % 8))
bits[pos/8] |= a
bits[pos/8+1] = ^a
}
func (bits bitvec) set16(pos uint64) {
a := byte(0xFF >> (pos % 8))
bits[pos/8] |= a
bits[pos/8+1] = 0xFF
bits[pos/8+2] = ^a
}
// codeSegment checks if the position is in a code segment.
func (bits *bitvec) codeSegment(pos uint64) bool {
return ((*bits)[pos/8] & (0x80 >> (pos % 8))) == 0
}
// codeBitmap collects data locations in code.
func codeBitmap(code []byte) bitvec {
// The bitmap is 4 bytes longer than necessary, in case the code
// ends with a PUSH32, the algorithm will push zeroes onto the
// bitvector outside the bounds of the actual code.
bits := make(bitvec, len(code)/8+1+4)
return codeBitmapInternal(code, bits)
}
// codeBitmapInternal is the internal implementation of codeBitmap.
// It exists for the purpose of being able to run benchmark tests
// without dynamic allocations affecting the results.
func codeBitmapInternal(code, bits bitvec) bitvec {
for pc := uint64(0); pc < uint64(len(code)); {
op := OpCode(code[pc])
pc++
if op < PUSH1 || op > PUSH32 {
continue
}
numbits := op - PUSH1 + 1
if numbits >= 8 {
for ; numbits >= 16; numbits -= 16 {
bits.set16(pc)
pc += 16
}
for ; numbits >= 8; numbits -= 8 {
bits.set8(pc)
pc += 8
}
}
switch numbits {
case 1:
bits.set1(pc)
pc += 1
case 2:
bits.setN(set2BitsMask, pc)
pc += 2
case 3:
bits.setN(set3BitsMask, pc)
pc += 3
case 4:
bits.setN(set4BitsMask, pc)
pc += 4
case 5:
bits.setN(set5BitsMask, pc)
pc += 5
case 6:
bits.setN(set6BitsMask, pc)
pc += 6
case 7:
bits.setN(set7BitsMask, pc)
pc += 7
}
}
return bits
} | minigeth/core/vm/analysis.go | 0.538255 | 0.444384 | analysis.go | starcoder |
package govatar
import (
"github.com/disintegration/imaging"
"golang.org/x/crypto/scrypt"
"image"
"image/color"
"image/draw"
"math/rand"
)
var (
blockX int
blockY int
rows int
columns int
numBlocks int
keyLength int
)
type block struct{}
// Create the avatar and return as image.Image
func CreateAvatar(canvasWidth, canvasHeight, blockWidth, blockHeight int, vibrance uint8, userName, salt string) (image.Image, error) {
var shuffleInt int64
shuffleInt = 1
// number of rows/columns
rows = (canvasWidth / 2) / blockWidth
columns = canvasHeight / blockHeight
// number of blocks
numBlocks = rows * columns
keyLength = numBlocks
var (
hash []uint8
colors []uint8
err error
)
// Generate hash using Scrypt
hash, err = Hash(userName, salt)
if err != nil {
return nil, err
}
// Create the avatar image
r := image.Rect(0, 0, canvasWidth, canvasHeight)
img := image.NewRGBA(r)
// Create and draw a grey block as background color
grayBlock := color.RGBA{R: 210, G: 210, B: 210, A: 255}
draw.Draw(img, img.Bounds(), &image.Uniform{C: grayBlock}, image.ZP, draw.Src)
// Create 32 blocks to fill half of avatar canvas
blocks := make([]block, numBlocks)
// For each block decide if to draw anything and what color to draw based on hash
for i := range blocks {
var gC uint8
var rC uint8
var bC uint8
var aC uint8
var skip bool
num0 := hash[i] % 10
num1 := (hash[i] / 10) % 10
num2 := (hash[i] / 100) % 10
if i == 0 {
colors = make([]uint8, 3)
}
if num2 == 0 {
// If byte is < 100 then display nothing
skip = true
// Use this data to seed the RBG shuffle rand value
if num1*num0 != 0 {
shuffleInt *= int64(num1 * num0)
} else {
shuffleInt += int64(num1 + num0)
}
} else {
skip = false
aC = 255
colors = []uint8{
rC,
gC,
bC,
}
// Generate RGB values
colors[0] = ((num0 * vibrance) * num1) + num2
colors[1] = ((num1 * vibrance) * num2) + num0
colors[2] = ((num2 * vibrance) * num0) + num1
// Shuffle the colors based on rand
Shuffle(colors, shuffleInt)
}
// Start position of block
if i == 0 {
blockX = 0
blockY = 0
} else {
// New row
if i%rows == 0 {
blockX = 0
blockY += blockHeight
// New column
} else {
blockX += blockWidth
}
}
if skip {
continue
}
// If colors have somehow overstepped bounds reset them
for i := range colors {
if colors[i] > 255 {
colors[i] = 255
}
if colors[i] < 0 {
colors[i] = 0
}
}
// Create a color based on hash in random order
colorBlock := color.RGBA{R: colors[0], G: colors[1], B: colors[2], A: aC}
// Print area is size of one block starting from position of block
spMin := image.Point{X: blockX, Y: blockY}
spMax := image.Point{X: blockX + blockWidth, Y: blockY + blockHeight}
// Create a rectangle the size of the block
blockRectangle := image.Rectangle{Min: spMin, Max: spMax}
// Draw the block to the image
draw.Draw(img, blockRectangle, &image.Uniform{C: colorBlock}, image.Point{X: 0, Y: 0}, draw.Src)
}
// Create a horizontal mirror of the generated image
imgHFlip := imaging.FlipH(img)
// Draw to the right side of the canvas
r2 := image.Rect(canvasWidth, canvasHeight, canvasWidth/2, 0)
draw.Draw(img, r2, imgHFlip, image.Point{X: canvasWidth / 2, Y: 0}, draw.Src)
return img, nil
}
// Generates a hashed byte array based on username and salt
func Hash(userName, salt string) ([]byte, error) {
var (
hash []byte
err error
)
hash, err = scrypt.Key([]byte(userName), []byte(salt), 16384, 8, 1, keyLength)
if err != nil {
return nil, err
}
return hash, err
}
// Shuffles a uint8 array
func Shuffle(vals []uint8, colorInt int64) {
r := rand.New(rand.NewSource(colorInt))
r.Shuffle(len(vals), func(i, j int) {
vals[i], vals[j] = vals[j], vals[i]
})
} | govatar.go | 0.610802 | 0.414425 | govatar.go | starcoder |
package marc21
import (
"bytes"
"errors"
"strings"
)
/*
https://www.loc.gov/marc/specifications/specrecstruc.html
Data fields in MARC 21 formats are assigned tags beginning with
ASCII numeric characters other than two zeroes. Such fields contain
indicators and subfield codes, as well as data and a field
terminator. There are no restrictions on the number, length, or
content of data fields other than those already stated or implied,
e.g., those resulting from the limitation of total record length.
Indicators are the first two characters in every variable data
field, preceding any subfield code (delimiter plus data element
identifier) which may be present. Each indicator is one character
and every data field in the record includes two indicators, even if
values have not been defined for the indicators in a particular
field. Indicators supply additional information about the field,
and are defined individually for each field. Indicator values are
interpreted independently; meaning is not ascribed to the two
indicators taken together. Indicators may be any ASCII lowercase
alphabetic, numeric, or blank. A blank is used in an undefined
indicator position, and may also have a defined meaning in a
defined indicator position. The numeric character 9 is reserved for
local definition as an indicator.
Subfield codes identify the individual data elements within the
field, and precede the data elements they identify. Each data field
contains at least one subfield code. The subfield code consists of
a delimiter (ASCII 1F (hex)) followed by a data element identifier.
Data element identifiers defined in MARC 21 may be any ASCII
lowercase alphabetic or numeric character.
*/
/*
http://www.loc.gov/marc/bibliographic/bdintro.html
Variable data fields - The remaining variable fields defined in the
format. In addition to being identified by a field tag in the
Directory, variable data fields contain two indicator positions
stored at the beginning of each field and a two-character subfield
code preceding each data element within the field.
The variable data fields are grouped into blocks according to the
first character of the tag, which with some exceptions identifies
the function of the data within the record. The type of information
in the field is identified by the remainder of the tag.
Indicator positions - The first two character positions in the
variable data fields that contain values which interpret or
supplement the data found in the field. Indicator values are
interpreted independently, that is, meaning is not ascribed to the
two indicators taken together. Indicator values may be a lowercase
alphabetic or a numeric character. A blank (ASCII SPACE),
represented in this document as a #, is used in an undefined
indicator position. In a defined indicator position, a blank may be
assigned a meaning, or may mean no information provided.
Subfield codes - Two characters that distinguish the data elements
within a field which require separate manipulation. A subfield code
consists of a delimiter (ASCII 1F hex), represented in this
document as a $, followed by a data element identifier. Data
element identifiers may be a lowercase alphabetic or a numeric
character. Subfield codes are defined independently for each field;
however, parallel meanings are preserved whenever possible (e.g.,
in the 100, 400, and 600 Personal Name fields). Subfield codes are
defined for purposes of identification, not arrangement. The order
of subfields is generally specified by standards for the data
content, such as cataloging rules.
Also:
http://www.loc.gov/marc/holdings/hdintro.html
http://www.loc.gov/marc/authority/adintro.html
http://www.loc.gov/marc/classification/cdintro.html
http://www.loc.gov/marc/community/ciintro.html
*/
// extractDatafields extracts the data fields/sub-fields from the raw MARC record bytes
func extractDatafields(rawRec []byte, baseAddress int, dir []*directoryEntry) (dfs []*Datafield, err error) {
for _, de := range dir {
if !strings.HasPrefix(de.tag, "00") {
start := baseAddress + de.startingPos
b := rawRec[start : start+de.fieldLength]
if b[de.fieldLength-1] != fieldTerminator {
return nil, errors.New("extractDatafields: Field terminator not found at end of field")
}
df := Datafield{
Tag: de.tag,
Ind1: string(b[0]),
Ind2: string(b[1]),
}
for _, t := range bytes.Split(b[2:de.fieldLength-1], []byte{delimiter}) {
if len(t) > 0 {
df.Subfields = append(df.Subfields, &Subfield{Code: string(t[0]), Text: string(t[1:])})
}
}
dfs = append(dfs, &df)
}
}
return dfs, nil
}
// GetDatafields returns datafields for the record that match the
// specified comma separated list of tags. If no tags are specified
// (empty string) then all datafields are returned
func (rec Record) GetDatafields(tags string) (dfs []*Datafield) {
if tags == "" {
return rec.Datafields
}
for _, t := range strings.Split(tags, ",") {
for _, df := range rec.Datafields {
if df.Tag == t {
dfs = append(dfs, df)
}
}
}
return dfs
}
// GetSubfields returns subfields for the datafield that match the
// specified codes. If no codes are specified (empty string) then all
// subfields are returned
func (df Datafield) GetSubfields(codes string) (sfs []*Subfield) {
if codes == "" {
return df.Subfields
}
for _, c := range []byte(codes) {
for _, sf := range df.Subfields {
if sf.Code == string(c) {
sfs = append(sfs, sf)
}
}
}
return sfs
}
// GetTag returns the tag for the datafield
func (df Datafield) GetTag() string {
return df.Tag
}
// GetInd1 returns the indicator 1 value for the datafield
func (df Datafield) GetInd1() string {
if df.Ind1 == "" {
return " "
}
return df.Ind1
}
// GetInd2 returns the indicator 2 value for the datafield
func (df Datafield) GetInd2() string {
if df.Ind2 == "" {
return " "
}
return df.Ind2
}
// GetCode returns the code for the subfield
func (sf Subfield) GetCode() string {
return sf.Code
}
// GetText returns the text for the subfield
func (sf Subfield) GetText() string {
return sf.Text
} | pkg/marc21/datafield.go | 0.745954 | 0.703639 | datafield.go | starcoder |
package stats
import (
"math"
"sort"
)
// Mean returns the mean of the slice.
func Mean(input []float64) float64 {
sum := 0.0
for _, in := range input {
sum += in
}
return sum / float64(len(input))
}
// Median returns the median of the slice. Panics if the input is not sorted.
func Median(input []float64) (output float64) {
if len(input) < 2 {
return math.NaN()
}
if !sort.Float64sAreSorted(input) {
panic("stats: input is not sorted.")
}
l := len(input)
if l%2 != 0 {
output = input[l/2]
} else {
output = (input[l/2] + input[l/2-1]) / 2
}
return output
}
// Max returns the maximum value of the sample.
func Max(input []float64) float64 {
if len(input) == 0 {
return math.NaN()
}
max := input[0]
for i := 1; i < len(input); i++ {
if math.IsNaN(input[i]) {
return math.NaN()
}
if input[i] > max {
max = input[i]
}
}
return max
}
// Min returns the minimum value of the sample.
func Min(input []float64) float64 {
if len(input) == 0 {
return math.NaN()
}
min := input[0]
for i := 1; i < len(input); i++ {
if math.IsNaN(input[i]) {
return math.NaN()
}
if input[i] < min {
min = input[i]
}
}
return min
}
// Range returns the difference between the largest and smallest values.
func Range(input []float64) float64 {
if len(input) < 2 {
return math.NaN()
}
return Max(input) - Min(input)
}
// sumOfSquaredDifferences returns the sum of the squared differences of each observation from the mean.
func sumOfSquaredDifferences(input []float64) float64 {
if len(input) < 2 {
return math.NaN()
}
mean := Mean(input)
ssd := 0.0
for _, o := range input {
ssd += math.Pow(o-mean, 2.0)
}
return ssd
}
// StdDev returns the standard deviation of the sample.
func StdDev(input []float64) float64 {
return math.Sqrt(Variance(input))
}
// Variance returns the variance of the sample.
func Variance(input []float64) float64 {
return sumOfSquaredDifferences(input) / float64(len(input)-1)
}
// Quartile1 returns the first quartile.
func Quartile1(input []float64) (Q1 float64) {
if len(input) < 4 {
return math.NaN()
}
if !sort.Float64sAreSorted(input) {
panic("stats: input is not sorted.")
}
if len(input)%2 == 0 {
Q1 = Median(input[:len(input)/2])
} else {
Q1 = Median(input[:len(input)/2])
}
return Q1
}
// Quartile2 returns the second quartile (equivalent to the median).
func Quartile2(input []float64) float64 {
return Median(input)
}
// Quartile3 returns the third quartile.
func Quartile3(input []float64) (Q3 float64) {
if len(input) < 4 {
return math.NaN()
}
if !sort.Float64sAreSorted(input) {
panic("stats: input is not sorted.")
}
if len(input)%2 == 0 {
Q3 = Median(input[len(input)/2:])
} else {
Q3 = Median(input[len(input)/2+1:])
}
return Q3
}
// InterQuartileRange returns the difference between the third and the first quartiles.
func InterQuartileRange(input []float64) float64 {
return Quartile3(input) - Quartile1(input)
}
// Covariance returns the covariance between two data samples
func Covariance(a []float64, b []float64) float64 {
if len(a) != len(b) {
return math.NaN()
}
aMean := Mean(a)
bMean := Mean(b)
sum := 0.0
for i := range a {
sum += (a[i] - aMean) * (b[i] - bMean)
}
return sum / float64(len(a)-1)
}
// Correlation returns the correlation between two data samples
func Correlation(a []float64, b []float64) float64 {
cov := Covariance(a, b)
sdA := StdDev(a)
sdB := StdDev(b)
if sdA == 0 || sdB == 0 {
return math.NaN()
}
return cov / (sdA * sdB)
} | stats.go | 0.848455 | 0.586138 | stats.go | starcoder |
package st
// node of the tree
type node struct {
key Comparable
value interface{}
left *node
right *node
nodes int // number of nodes in subtree rooted in this node
}
// BST is Binary Search Tree based implementation of the SymbolTable
type BST struct {
root *node
}
func NewBST() OrderedSymbolTable {
return new(BST)
}
func (bst *BST) Put(key Key, value interface{}) {
bst.root = bst.put(bst.root, key, value)
}
func (bst *BST) put(x *node, key Key, value interface{}) *node {
if x == nil {
return &node{key: key, value: value, }
}
cmp, err := key.Compare(x.key)
if err != nil {
return nil
}
switch {
case cmp < 0:
x.left = bst.put(x.left, key, value)
case cmp > 0:
x.right = bst.put(x.right, key, value)
default:
x.value = value
}
x.nodes = bst.size(x.left) + bst.size(x.right) + 1
return x
}
func (bst *BST) Get(key Key) interface{} {
return bst.get(bst.root, key)
}
func (bst *BST) get(x *node, key Key) interface{} {
if x == nil {
return nil
}
cmp, err := key.Compare(x.key)
if err != nil {
return nil
}
switch {
case cmp < 0:
return bst.get(x.left, key)
case cmp > 0:
return bst.get(x.right, key)
default:
return x.value
}
}
// Delete is using Hibbard deletion algorithm
func (bst *BST) Delete(key Key) {
if bst.root != nil {
bst.root = bst.delete(bst.root, key)
}
}
func (bst *BST) delete(x *node, key Key) *node {
cmp, err := key.Compare(x.key)
if err != nil {
return x
}
switch {
case cmp < 0:
x.left = bst.delete(x.left, key)
case cmp > 0:
x.right = bst.delete(x.right, key)
default:
// found element, need to remove from the tree
if x.right == nil {
return x.left
}
if x.left == nil {
return x.right
}
t := x
x = bst.min(t.right)
x.right = bst.deleteMin(t.right)
x.left = t.left
}
x.nodes = bst.size(x.left) + bst.size(x.right) + 1
return x
}
func (bst *BST)DeleteMin() {
if bst.root == nil {
return
}
bst.root = bst.deleteMin(bst.root)
}
func (bst *BST)deleteMin(x *node) *node {
if x.left == nil { // x is min node
return x.right
}
x.left = bst.deleteMin(x.left)
x.nodes = bst.size(x.left) + bst.size(x.right) + 1
return x
}
func (bst *BST) Contains(key Key) bool {
return bst.get(bst.root, key) != nil
}
func (bst *BST) IsEmpty() bool {
return bst.size(bst.root) == 0
}
func (bst *BST) Size() int {
return bst.size(bst.root)
}
func (bst *BST) size(x *node) int {
if x == nil {
return 0
}
return x.nodes
}
func (bst *BST) Keys() []Key {
return nil
}
func (bst *BST) Min() Key {
if bst.root == nil {
return nil
}
return bst.min(bst.root).key
}
func (bst *BST) min(x *node) *node {
if x.left == nil {
return x
}
return bst.min(x.left)
}
// Max returns the largest key
func (bst *BST) Max() Key {
if bst.root == nil {
return nil
}
return bst.max(bst.root)
}
func (bst *BST) max(x *node) *node {
if x.right == nil {
return x
}
return bst.max(x.right)
}
// Floor returns largest key less then or equal to key
func (bst *BST) Floor(key Key) Key {
var n *node
if n := bst.floor(bst.root, key); n == nil {
return nil
}
return n.key
}
func (bst *BST)floor(x *node, key Key) *node {
if x == nil {
return nil
}
c, err := key.Compare(x.key)
if err != nil {
return nil
}
switch c {
case 0:
return x
case -1:
return bst.floor(x.left, key)
default:
if t := bst.floor(x.right, key); t != nil {
return t
}
return x
}
}
// Ceiling returns smallest key greater then or equal to key
func (bst *BST) Ceiling(key Key) Key {
// TODO
return nil
}
// Rank returns number of keys less then key
func (bst *BST) Rank(key Key) int {
// TODO
return 0
}
// Select returns a key of rank k
func (bst *BST) Select(k int) Key {
// TODO
return nil
}
// DeleteMax deletes largest key
func (bst *BST) DeleteMax() {
// TODO
}
// SizeRange returns number of keys in [lo..hi]
func (bst *BST) SizeRange(lo, hi Key) int {
// TODO
return 0
}
// KeysRange returns slice of keys in [lo..hi], in
func (bst *BST) KeysRange(lo, hi Key) []Key {
// TODO
return nil
} | st/bst.go | 0.718792 | 0.416025 | bst.go | starcoder |
package rasterizer
import (
"image"
"github.com/tdewolff/canvas"
"golang.org/x/image/draw"
"golang.org/x/image/math/f64"
"golang.org/x/image/vector"
)
// Draw draws the canvas on a new image with given resolution (in dots-per-millimeter).
// Higher resolution will result in bigger images.
func Draw(c *canvas.Canvas, resolution canvas.DPMM) *image.RGBA {
img := image.NewRGBA(image.Rect(0, 0, int(c.W*float64(resolution)+0.5), int(c.H*float64(resolution)+0.5)))
ras := New(img, resolution)
c.Render(ras)
return img
}
type Renderer struct {
img draw.Image
resolution canvas.DPMM
}
// New creates a renderer that draws to a rasterized image.
func New(img draw.Image, resolution canvas.DPMM) *Renderer {
return &Renderer{
img: img,
resolution: resolution,
}
}
// Size returns the width and height in millimeters
func (r *Renderer) Size() (float64, float64) {
size := r.img.Bounds().Size()
return float64(size.X) / float64(r.resolution), float64(size.Y) / float64(r.resolution)
}
func (r *Renderer) RenderPath(path *canvas.Path, style canvas.Style, m canvas.Matrix) {
// TODO: use fill rule (EvenOdd, NonZero) for rasterizer
path = path.Transform(m)
strokeWidth := 0.0
if style.StrokeColor.A != 0 && 0.0 < style.StrokeWidth {
strokeWidth = style.StrokeWidth
}
size := r.img.Bounds().Size()
bounds := path.Bounds()
dx, dy := 0, 0
resolution := float64(r.resolution)
x := int((bounds.X - strokeWidth) * resolution)
y := int((bounds.Y - strokeWidth) * resolution)
w := int((bounds.W+2*strokeWidth)*resolution) + 1
h := int((bounds.H+2*strokeWidth)*resolution) + 1
if (x+w <= 0 || size.X <= x) && (y+h <= 0 || size.Y <= y) {
return // outside canvas
}
if x < 0 {
dx = -x
x = 0
}
if y < 0 {
dy = -y
y = 0
}
if size.X <= x+w {
w = size.X - x
}
if size.Y <= y+h {
h = size.Y - y
}
if w <= 0 || h <= 0 {
return // has no size
}
path = path.Translate(-float64(x)/resolution, -float64(y)/resolution)
if style.FillColor.A != 0 {
ras := vector.NewRasterizer(w, h)
path.ToRasterizer(ras, resolution)
ras.Draw(r.img, image.Rect(x, size.Y-y, x+w, size.Y-y-h), image.NewUniform(style.FillColor), image.Point{dx, dy})
}
if style.StrokeColor.A != 0 && 0.0 < style.StrokeWidth {
if 0 < len(style.Dashes) {
path = path.Dash(style.DashOffset, style.Dashes...)
}
path = path.Stroke(style.StrokeWidth, style.StrokeCapper, style.StrokeJoiner)
ras := vector.NewRasterizer(w, h)
path.ToRasterizer(ras, resolution)
ras.Draw(r.img, image.Rect(x, size.Y-y, x+w, size.Y-y-h), image.NewUniform(style.StrokeColor), image.Point{dx, dy})
}
}
func (r *Renderer) RenderText(text *canvas.Text, m canvas.Matrix) {
text.RenderAsPath(r, m)
}
func (r *Renderer) RenderImage(img image.Image, m canvas.Matrix) {
// add transparent margin to image for smooth borders when rotating
margin := 4
size := img.Bounds().Size()
sp := img.Bounds().Min // starting point
img2 := image.NewRGBA(image.Rect(0, 0, size.X+margin*2, size.Y+margin*2))
draw.Draw(img2, image.Rect(margin, margin, size.X+margin, size.Y+margin), img, sp, draw.Over)
// draw to destination image
// note that we need to correct for the added margin in origin and m
// TODO: optimize when transformation is only translation or stretch
origin := m.Dot(canvas.Point{-float64(margin), float64(img2.Bounds().Size().Y - margin)}).Mul(float64(r.resolution))
m = m.Scale(float64(r.resolution)*(float64(size.X+margin)/float64(size.X)), float64(r.resolution)*(float64(size.Y+margin)/float64(size.Y)))
h := float64(r.img.Bounds().Size().Y)
aff3 := f64.Aff3{m[0][0], -m[0][1], origin.X, -m[1][0], m[1][1], h - origin.Y}
draw.CatmullRom.Transform(r.img, aff3, img2, img2.Bounds(), draw.Over, nil)
} | rasterizer/renderer.go | 0.725551 | 0.561275 | renderer.go | starcoder |
package encryptedconfigvalue
import (
"fmt"
"github.com/palantir/go-encrypted-config-value/encryption"
)
// AlgorithmType represents the algorithm used to encrypt a value.
type AlgorithmType string
const (
AES = AlgorithmType("AES")
RSA = AlgorithmType("RSA")
)
type keyPairGenerator func() (KeyPair, error)
type algorithmTypeData struct {
generator keyPairGenerator
encrypter Encrypter
}
var algorithmTypeToData = map[AlgorithmType]algorithmTypeData{
AES: {
generator: NewAESKeyPair,
encrypter: NewAESGCMEncrypter(),
},
RSA: {
generator: NewRSAKeyPair,
encrypter: NewRSAOAEPEncrypter(),
},
}
// GenerateKeyPair generates a new KeyPair using the default size/parameters specified by encrypted-config-value that
// can be used to encrypt and decrypt values for the receiver algorithm.
func (a AlgorithmType) GenerateKeyPair() (KeyPair, error) {
return algorithmTypeToData[a].generator()
}
// Encrypter returns a new Encrypter that uses the default encryption parameters specified by encrypted-config-value
// that can be used to create EncryptedValue objects for the receiver algorithm.
func (a AlgorithmType) Encrypter() Encrypter {
return algorithmTypeToData[a].encrypter
}
// ToAlgorithmType returns the AlgorithmType that matches the provided string. Returns an error if the provided string
// does not match a known algorithm.
func ToAlgorithmType(val string) (AlgorithmType, error) {
algType := AlgorithmType(val)
if _, ok := algorithmTypeToData[algType]; !ok {
return AlgorithmType(""), fmt.Errorf("unknown algorithm type: %q", val)
}
return algType, nil
}
// KeyType represents a specific type of key.
type KeyType string
const (
AESKey = KeyType("AES")
RSAPubKey = KeyType("RSA-PUB")
RSAPrivKey = KeyType("RSA-PRIV")
)
type keyTypeData struct {
generator KeyGenerator
algType AlgorithmType
}
var keyTypeToData = map[KeyType]keyTypeData{
AESKey: {
generator: keyGeneratorFor(AESKey, func(key []byte) (encryption.Key, error) {
return encryption.AESKeyFromBytes(key), nil
}),
algType: AES,
},
RSAPubKey: {
generator: keyGeneratorFor(RSAPubKey, func(key []byte) (encryption.Key, error) {
return encryption.RSAPublicKeyFromPEMBytes(key)
}),
algType: RSA,
},
RSAPrivKey: {
generator: keyGeneratorFor(RSAPrivKey, func(key []byte) (encryption.Key, error) {
return encryption.RSAPrivateKeyFromPKCS8Bytes(key)
}),
algType: RSA,
},
}
// Generator returns a new KeyGenerator which, given the byte representation for the content of a key of the receiver
// type, returns a new KeyWithType for a key of the receiver type.
func (kt KeyType) Generator() KeyGenerator {
return keyTypeToData[kt].generator
}
// AlgorithmType returns the encryption algorithm that corresponds the the key type of the receiver.
func (kt KeyType) AlgorithmType() AlgorithmType {
return keyTypeToData[kt].algType
}
// KeyGenerator defines a function which, given the byte representation of a key, returns a KeyWithType. The provided
// bytes are typically the raw or encoded bytes for the key itself. It is typically the responsibility of the generator
// function to provide the KeyType information required for the returned KeyWithType.
type KeyGenerator func([]byte) (KeyWithType, error)
func keyGeneratorFor(keyType KeyType, keyGen func([]byte) (encryption.Key, error)) KeyGenerator {
return func(keyBytes []byte) (KeyWithType, error) {
key, err := keyGen(keyBytes)
if err != nil {
return KeyWithType{}, err
}
return KeyWithType{
Type: keyType,
Key: key,
}, nil
}
}
// ToKeyType returns the KeyType that matches the provided string. Returns an error if the provided string does not
// match a known key type.
func ToKeyType(val string) (KeyType, error) {
keyType := KeyType(val)
if _, ok := keyTypeToData[keyType]; !ok {
return KeyType(""), fmt.Errorf("unknown key type: %q", val)
}
return keyType, nil
} | vendor/github.com/palantir/go-encrypted-config-value/encryptedconfigvalue/algorithms.go | 0.845049 | 0.481149 | algorithms.go | starcoder |
package server
import (
"math"
"sync/atomic"
)
const (
gcTick uint64 = 3
// ChangeTickThreashold is the minimum number of ticks required to update
// the state of the rate limiter.
ChangeTickThreashold uint64 = 10
)
type followerState struct {
tick uint64
inMemLogSize uint64
}
// RateLimiter is the struct used to keep tracking consumed memory size.
type RateLimiter struct {
size uint64
maxSize uint64
}
// NewRateLimiter creates and returns a rate limiter instance.
func NewRateLimiter(max uint64) *RateLimiter {
return &RateLimiter{
maxSize: max,
}
}
// Enabled returns a boolean flag indicating whether the rate limiter is
// enabled.
func (r *RateLimiter) Enabled() bool {
return r.maxSize > 0 && r.maxSize != math.MaxUint64
}
// Increase increases the recorded in memory log size by sz bytes.
func (r *RateLimiter) Increase(sz uint64) {
atomic.AddUint64(&r.size, sz)
}
// Decrease decreases the recorded in memory log size by sz bytes.
func (r *RateLimiter) Decrease(sz uint64) {
atomic.AddUint64(&r.size, ^(sz - 1))
}
// Set sets the recorded in memory log size to sz bytes.
func (r *RateLimiter) Set(sz uint64) {
atomic.StoreUint64(&r.size, sz)
}
// Get returns the recorded in memory log size.
func (r *RateLimiter) Get() uint64 {
return atomic.LoadUint64(&r.size)
}
// RateLimited returns a boolean flag indicating whether the node is rate
// limited.
func (r *RateLimiter) RateLimited() bool {
if !r.Enabled() {
return false
}
v := r.Get()
if v > r.maxSize {
plog.Infof("rate limited, v: %d, maxSize %d", v, r.maxSize)
return true
}
return false
}
// InMemRateLimiter is the struct used to keep tracking the in memory rate log size.
type InMemRateLimiter struct {
followerSizes map[uint64]followerState
rl RateLimiter
tick uint64
tickLimited uint64
limited bool
}
// NewInMemRateLimiter creates and returns a rate limiter instance.
func NewInMemRateLimiter(maxSize uint64) *InMemRateLimiter {
return &InMemRateLimiter{
// so tickLimited won't be 0
tick: 1,
rl: RateLimiter{maxSize: maxSize},
followerSizes: make(map[uint64]followerState),
}
}
// Enabled returns a boolean flag indicating whether the rate limiter is
// enabled.
func (r *InMemRateLimiter) Enabled() bool {
return r.rl.Enabled()
}
// Tick advances the internal logical clock.
func (r *InMemRateLimiter) Tick() {
r.tick++
}
// GetTick returns the internal logical clock value.
func (r *InMemRateLimiter) GetTick() uint64 {
return r.tick
}
// Increase increases the recorded in memory log size by sz bytes.
func (r *InMemRateLimiter) Increase(sz uint64) {
r.rl.Increase(sz)
}
// Decrease decreases the recorded in memory log size by sz bytes.
func (r *InMemRateLimiter) Decrease(sz uint64) {
r.rl.Decrease(sz)
}
// Set sets the recorded in memory log size to sz bytes.
func (r *InMemRateLimiter) Set(sz uint64) {
r.rl.Set(sz)
}
// Get returns the recorded in memory log size.
func (r *InMemRateLimiter) Get() uint64 {
return r.rl.Get()
}
// Reset clears all recorded follower states.
func (r *InMemRateLimiter) Reset() {
r.followerSizes = make(map[uint64]followerState)
}
// SetFollowerState sets the follower rate identiified by nodeID to sz bytes.
func (r *InMemRateLimiter) SetFollowerState(nodeID uint64, sz uint64) {
r.followerSizes[nodeID] = followerState{
tick: r.tick,
inMemLogSize: sz,
}
}
// RateLimited returns a boolean flag indicating whether the node is rate
// limited.
func (r *InMemRateLimiter) RateLimited() bool {
limited := r.limitedByInMemSize()
if limited != r.limited {
if r.tickLimited == 0 || r.tick-r.tickLimited > ChangeTickThreashold {
r.limited = limited
r.tickLimited = r.tick
}
}
return r.limited
}
func (r *InMemRateLimiter) limitedByInMemSize() bool {
if !r.Enabled() {
return false
}
maxInMemSize := uint64(0)
gc := false
for _, v := range r.followerSizes {
if r.tick-v.tick > gcTick {
gc = true
continue
}
if v.inMemLogSize > maxInMemSize {
maxInMemSize = v.inMemLogSize
}
}
sz := r.Get()
if sz > maxInMemSize {
maxInMemSize = sz
}
if gc {
r.gc()
}
if !r.limited {
return maxInMemSize > r.rl.maxSize
}
return maxInMemSize >= (r.rl.maxSize * 7 / 10)
}
func (r *InMemRateLimiter) gc() {
followerStates := make(map[uint64]followerState)
for nid, v := range r.followerSizes {
if r.tick-v.tick > gcTick {
continue
}
followerStates[nid] = v
}
r.followerSizes = followerStates
} | internal/server/rate.go | 0.747432 | 0.501221 | rate.go | starcoder |
package iso20022
// Account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family.
type InvestmentAccount43 struct {
// Unique and unambiguous identification for the account between the account owner and the account servicer.
Identification *AccountIdentification1 `xml:"Id"`
// Name of the account. It provides an additional means of identification, and is designated by the account servicer in agreement with the account owner.
Name *Max35Text `xml:"Nm,omitempty"`
// Supplementary registration information applying to a specific block of units for dealing and reporting purposes. The supplementary registration information may be used when all the units are registered, for example, to a funds supermarket, but holdings for each investor have to reconciled individually.
Designation *Max35Text `xml:"Dsgnt,omitempty"`
// Party that provides services relating to financial products to investors, for example, advice on products and placement of orders for the investment fund.
IntermediaryInformation []*Intermediary27 `xml:"IntrmyInf,omitempty"`
// Party that manages the account on behalf of the account owner, that is manages the registration and booking of entries on the account, calculates balances on the account and provides information about the account.
AccountServicer *PartyIdentification2Choice `xml:"AcctSvcr,omitempty"`
}
func (i *InvestmentAccount43) AddIdentification() *AccountIdentification1 {
i.Identification = new(AccountIdentification1)
return i.Identification
}
func (i *InvestmentAccount43) SetName(value string) {
i.Name = (*Max35Text)(&value)
}
func (i *InvestmentAccount43) SetDesignation(value string) {
i.Designation = (*Max35Text)(&value)
}
func (i *InvestmentAccount43) AddIntermediaryInformation() *Intermediary27 {
newValue := new(Intermediary27)
i.IntermediaryInformation = append(i.IntermediaryInformation, newValue)
return newValue
}
func (i *InvestmentAccount43) AddAccountServicer() *PartyIdentification2Choice {
i.AccountServicer = new(PartyIdentification2Choice)
return i.AccountServicer
} | InvestmentAccount43.go | 0.692434 | 0.452899 | InvestmentAccount43.go | starcoder |
package main
import (
"bufio"
"fmt"
"image"
"image/color"
"image/gif"
"io"
"os"
"strconv"
"strings"
"sync"
)
var palette = []color.Color{color.White, color.Black}
const (
whiteIndex = iota
blackIndex
)
func usage() {
fmt.Fprintln(os.Stderr, `Usage: ./mandelbrot [flags]
The following flags are valid:
* --startPos <real> [<imag>]: Set the center position at the start
of the animation. If only real given, set imag to real. Mandatory.
* --endPos <real> [<imag>]: Set the center position at the end of
the animation. If only real given, set imag to real. If not given,
this will be set to the starting position.
* --startZoom <real> [<imag>]: Set the zoom (the width/height visible
in the image) at the start of the animation. If imag is not given,
it will be set to 0. Mandatory
* --endZoom <real> [<imag>]: Set the zoom at the end of the animation. If
imag is not given, it will be set to 0. If not given, this will be
set to the starting zoom.
* --size <x> [<y>]: Set the size (in pixels) of the image drawn. If y is
not given, it will be set to x. Defaults to 512x512.
* --iters <n>: Set the number of iterations to use when testing for
membership in the Mandelbrot set. Defaults to 1000.
* --frames <n>: Set the number of frames that will be in the animation.
Defaults to 25.
* --delay <n>: Sets the delay between frames (in 100ths of a second).
Defaults to 8
* --output <f>: Sets the output file to f. If not given, will output to
standard out.
* --help: Displays this message.
* --test: Runs the program with test parameters`)
}
// escapeIters returns the number of iterations
// needed to escape for the given c, or nIter if
// it doesn't escape within nIter iterations
func escapeIters(c complex128, nIter int) int {
val := complex128(0)
escape := float64(2)
i := 0
for i < nIter && real(val) < escape {
val = val*val + c
i++
}
return i
}
// mandelbrot draws an image of the mandelbrot set
// within the given real and imaginary bounds
func mandelbrot(sX, sY, nIter int,
center complex128,
width, height float64) *image.Paletted {
hX := float64(sX) / 2.0
hY := float64(sY) / 2.0
lr := real(center) - width/2.0
hr := real(center) + width/2.0
li := imag(center) - height/2.0
hi := imag(center) + height/2.0
fromPos := func(x, y int) complex128 {
r := real(center) + (float64(x)-hX)/float64(sX)*(hr-lr)
i := imag(center) + (float64(y)-hY)/float64(sY)*(hi-li)
return complex(r, i)
}
rect := image.Rect(0, 0, sX, sY)
img := image.NewPaletted(rect, palette)
for x := 0; x < sX; x++ {
for y := 0; y < sY; y++ {
c := fromPos(x, y)
inSet := nIter == escapeIters(c, nIter)
if inSet {
img.SetColorIndex(x, y, blackIndex)
} else {
img.SetColorIndex(x, y, whiteIndex)
}
}
}
return img
}
// State stores all of the data specifying an animation
type State struct {
startPos, endPos complex128
startZoom, endZoom [2]float64
filename string
sX, sY, nIter, nFrames, delay int
}
// parse command-line arguments, returning a valid State struct
func args() State {
s := State{}
startPosSet := false
endPosSet := false
startZoomSet := false
endZoomSet := false
sizeSet := false
nIterSet := false
nFramesSet := false
delaySet := false
argc := len(os.Args)
if argc == 1 {
usage()
os.Exit(1)
}
i := 1
for i < argc {
var err error
switch os.Args[i] {
case "--help":
usage()
os.Exit(1)
case "--startPos":
var r, im float64 // initialized in inner scopes
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument(s) to --startPos")
os.Exit(1)
}
r, err := strconv.ParseFloat(os.Args[i+1], 64)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --startPos\n", os.Args[i+1])
os.Exit(1)
}
if i == argc-2 {
im = 0.0
i += 2
} else {
im, err = strconv.ParseFloat(os.Args[i+2], 64)
if err != nil {
if strings.HasPrefix(os.Args[i+2], "--") {
im = 0.0
i += 2
} else {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --startPos\n", os.Args[i+2])
os.Exit(1)
}
} else {
i += 3
}
}
s.startPos = complex(r, im)
startPosSet = true
case "--endPos":
var r, im float64 // initialized in inner scopes
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument(s) to --endPos")
os.Exit(1)
}
r, err := strconv.ParseFloat(os.Args[i+1], 64)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --endPos\n", os.Args[i+1])
os.Exit(1)
}
if i == argc-2 {
im = 0.0
i += 2
} else {
im, err = strconv.ParseFloat(os.Args[i+2], 64)
if err != nil {
if strings.HasPrefix(os.Args[i+2], "--") {
im = 0.0
i += 2
} else {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --endPos\n", os.Args[i+2])
os.Exit(1)
}
} else {
i += 3
}
}
s.endPos = complex(r, im)
endPosSet = true
case "--startZoom":
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument(s) to --startZoom")
os.Exit(1)
}
s.startZoom[0], err = strconv.ParseFloat(os.Args[i+1], 64)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --startZoom\n", os.Args[i+1])
os.Exit(1)
}
if i == argc-2 {
s.startZoom[1] = s.startZoom[0]
i += 2
} else {
s.startZoom[1], err = strconv.ParseFloat(os.Args[i+2], 64)
if err != nil {
if strings.HasPrefix(os.Args[i+2], "--") {
s.startZoom[1] = s.startZoom[0]
i += 2
} else {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --startZoom\n", os.Args[i+2])
os.Exit(1)
}
} else {
i += 3
}
}
startZoomSet = true
case "--endZoom":
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument(s) to --endZoom")
os.Exit(1)
}
s.endZoom[0], err = strconv.ParseFloat(os.Args[i+1], 64)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --endZoom\n", os.Args[i+1])
os.Exit(1)
}
if i == argc-2 {
s.endZoom[1] = s.endZoom[0]
i += 2
} else {
s.endZoom[1], err = strconv.ParseFloat(os.Args[i+2], 64)
if err != nil {
if strings.HasPrefix(os.Args[i+2], "--") {
s.endZoom[1] = s.endZoom[0]
i += 2
} else {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --endZoom\n", os.Args[i+2])
os.Exit(1)
}
} else {
i += 3
}
}
endZoomSet = true
case "--output":
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument to --output")
os.Exit(1)
}
s.filename = os.Args[i+1]
i += 2
case "--size":
var sX, sY int64 // initialized in inner scopes
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument(s) to --size")
os.Exit(1)
}
sX, err = strconv.ParseInt(os.Args[i+1], 10, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --size\n", os.Args[i+1])
}
if i == argc-2 {
sY = sX
i += 2
} else {
sY, err = strconv.ParseInt(os.Args[i+2], 10, 0)
if err != nil {
if strings.HasPrefix(os.Args[i+2], "--") {
sY = sX
i += 2
} else {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --size\n", os.Args[i+2])
os.Exit(1)
}
} else {
i += 3
}
}
s.sX = int(sX)
s.sY = int(sY)
sizeSet = true
case "--iters":
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument to --iters")
os.Exit(1)
}
nIter, err := strconv.ParseInt(os.Args[i+1], 10, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --iters\n", os.Args[i+1])
os.Exit(1)
}
s.nIter = int(nIter)
i += 2
nIterSet = true
case "--frames":
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument to --frames")
os.Exit(1)
}
nFrames, err := strconv.ParseInt(os.Args[i+1], 10, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --frames\n", os.Args[i+1])
os.Exit(1)
}
if nFrames < 1 {
fmt.Fprintln(os.Stderr, "number of frames must be at least 1")
os.Exit(1)
}
s.nFrames = int(nFrames)
i += 2
nFramesSet = true
case "--delay":
if i == argc-1 {
fmt.Fprintln(os.Stderr, "expected argument to --delay")
os.Exit(1)
}
delay, err := strconv.ParseInt(os.Args[i+1], 10, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %s as argument to --delay\n", os.Args[i+1])
os.Exit(1)
}
if delay < 1 {
fmt.Fprintf(os.Stderr, "delay time must be at least 1")
os.Exit(1)
}
s.delay = int(delay)
i += 2
delaySet = true
case "--test":
// TODO: This is not a perfect animation...
s.startPos = complex(-1.0, 0.0)
s.endPos = complex(-1.31, 0.0)
s.startZoom[0] = 0.5
s.startZoom[1] = 0.5
s.endZoom[0] = 0.12
s.endZoom[1] = 0.12
s.sX = 512
s.sY = 512
s.nIter = 1000
s.nFrames = 25
s.delay = 8
s.filename = "test.gif" // good decision?
return s
default:
fmt.Fprintf(os.Stderr, "unexpected argument %s\n", os.Args[i])
usage()
os.Exit(1)
}
}
if !startPosSet {
fmt.Fprintln(os.Stderr, "need to specify start position")
usage()
os.Exit(1)
} else if !endPosSet {
s.endPos = s.startPos
}
if !startZoomSet && !endZoomSet {
fmt.Fprintln(os.Stderr, "need to give start zoom")
usage()
os.Exit(1)
} else if !endZoomSet {
s.endZoom[0] = s.startZoom[0]
s.endZoom[1] = s.startZoom[1]
}
if !sizeSet {
s.sX = 512
s.sY = 512
}
if !nIterSet {
s.nIter = 1000
}
if !nFramesSet && !endZoomSet {
s.nFrames = 1
} else if !nFramesSet {
s.nFrames = 25
}
if s.nFrames > 1 && !endZoomSet && !endPosSet {
fmt.Fprintln(os.Stderr, "setting frames argument to 1 due to lack of movement")
s.nFrames = 1
}
if s.nFrames == 1 {
if endZoomSet {
fmt.Fprintln(os.Stderr, "frames set to 1; ignoring end zoom")
}
if endPosSet {
fmt.Fprintln(os.Stderr, "frames set to 1; ignoring end position")
}
}
if !delaySet {
s.delay = 8
}
return s
}
// Frame holds the parameters for a given image (position and zoom)
type Frame struct {
center complex128
width float64
height float64
}
// scale a complex number by a scalar float
func scale(c complex128, s float64) complex128 {
return complex(real(c)*s, imag(c)*s)
}
// create animation according to the State struct
func (s State) animate() {
xs := [2]float64{s.startZoom[0], s.endZoom[0]}
ys := [2]float64{s.startZoom[1], s.endZoom[1]}
frames := make([]Frame, s.nFrames)
for i := 0; i < s.nFrames; i++ {
denom := s.nFrames
if denom > 1 {
denom--
}
frac := float64(i) / float64(denom)
center := s.startPos + (scale(s.endPos, frac) - scale(s.startPos, frac))
width := xs[0] + (xs[1]-xs[0])*frac
height := ys[0] + (ys[1]-ys[0])*frac
frames[i] = Frame{center, width, height}
}
// spawn goroutines to draw the frames, waiting for them all to finish
anim := gif.GIF{LoopCount: 0}
anim.Delay = make([]int, s.nFrames)
anim.Image = make([]*image.Paletted, s.nFrames)
var wg sync.WaitGroup
wg.Add(s.nFrames)
for i, vals := range frames {
anim.Delay[i] = s.delay
go func(i int, vals Frame) {
defer wg.Done()
anim.Image[i] = mandelbrot(s.sX, s.sY, s.nIter,
vals.center, vals.width, vals.height)
}(i, vals)
}
wg.Wait()
var out io.Writer
if s.filename == "" {
out = os.Stdout
} else {
f, err := os.Create(s.filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer f.Close()
out = bufio.NewWriter(f)
}
gif.EncodeAll(out, &anim)
}
func main() {
args().animate()
} | mandelbrot.go | 0.550003 | 0.403391 | mandelbrot.go | starcoder |
package stat
import (
fn "github.com/tokenme/go-fn/fn"
"math"
)
const π = float64(math.Pi)
const ln2 = math.Ln2
const lnSqrt2π = 0.918938533204672741780329736406 // log(sqrt(2*pi))
const min64 = math.SmallestNonzeroFloat64 // DBL_MIN
const eps64 = 1.1102230246251565e-16 // DBL_EPSILON
const maxExp = 1024.0 // DBL_MAX_EXP
const sqrt2 = math.Sqrt2
var nan = math.NaN()
var fZero float64 = float64(0.0)
var fOne float64 = float64(1.0)
var iZero int64 = int64(0)
var iOne int64 = int64(1)
var negInf float64 = math.Inf(-1)
var posInf float64 = math.Inf(+1)
// Functions imported from "math"
var abs func(float64) float64 = math.Abs
var floor func(float64) float64 = math.Floor
var ceil func(float64) float64 = math.Ceil
var log func(float64) float64 = math.Log
var log1p func(float64) float64 = math.Log1p
var log10 func(float64) float64 = math.Log10
var exp func(float64) float64 = math.Exp
var sqrt func(float64) float64 = math.Sqrt
var pow func(float64, float64) float64 = math.Pow
var atan func(float64) float64 = math.Atan
var tan func(float64) float64 = math.Tan
var trunc func(float64) float64 = math.Trunc
var erf func(float64) float64 = math.Erf
var erfc func(float64) float64 = math.Erfc
var isNaN func(float64) bool = math.IsNaN
var isInf func(float64, int) bool = math.IsInf
// Functions imported from "code.google.com/p/go-fn/fn"
var lnB func(float64, float64) float64 = fn.LnB
var lnΓ func(float64) float64 = fn.LnΓ
// vAbs recalculates the data vector to absolute values.
func vAbs(x []float64) {
for i, val := range x {
x[i] = abs(val)
}
}
// vCent recalculates the data vector to centralized values.
func vCent(x []float64) {
mu := Mean(x)
for i, val := range x {
x[i] = val - mu
}
}
// vPow recalculates the data vector to absolute values.
func vPow(x []float64, power float64) {
for i, val := range x {
x[i] = pow(val, power)
}
}
// sum returns the sum of the data vector.
func sum(x []float64) float64 {
s := 0.0
for _, val := range x {
s += val
}
return s
}
// mean returns the mean of the data vector.
func mean(x []float64) float64 {
μ := sum(x)
μ /= float64(len(x))
return μ
}
// diffMean returns the vector of centralized values.
func diffMean(x []float64) []float64 {
d := make([]float64, len(x))
mu := Mean(x)
for i, val := range x {
d[i] = val - mu
}
return d
} | stat/fn.go | 0.701509 | 0.444927 | fn.go | starcoder |
package nn
import (
"github.com/jcla1/matrix"
"math"
)
type TrainingExample struct {
Input, ExpectedOutput *matrix.Matrix
}
type Parameters []*matrix.Matrix
type Deltas []*matrix.Matrix
func CostFunction(data []TrainingExample, thetas Parameters, lambda float64) float64 {
cost := float64(0)
var estimation []float64
var expected_output []float64
// Cost
for _, datum := range data {
estimation = Hypothesis(thetas, datum).Values()
expected_output = datum.ExpectedOutput.Values()
for k, y := range expected_output {
// heart of the cost function
cost += y*math.Log(estimation[k]) + (1-y)*math.Log(1-estimation[k])
}
}
// Regularization
regularizationCost := float64(0)
for _, theta := range thetas {
for i, param := range theta.Values() {
// ignore theta0
if i%theta.C() == 0 {
continue
}
regularizationCost += param * param
}
}
return -cost/float64(len(data)) + (lambda/(2*float64(len(data))))*regularizationCost
}
func Hypothesis(thetas Parameters, trainingEx TrainingExample) *matrix.Matrix {
// Describes the current working values (a_1, a_2, ...)
curValues := trainingEx.Input
// Is simply a 1 in a 1x1 matrix to b
// inserted into a vector as the bias unit
biasValueMatrix := matrix.Ones(1, 1)
for _, theta := range thetas {
// Insert the bias unit, multiply with theta and apply the sigmoid function
curValues = theta.Mul(curValues.InsertRows(biasValueMatrix, 0)).Apply(sigmoidMatrix)
}
return curValues
}
func HypothesisHistory(thetas Parameters, trainingEx TrainingExample) []*matrix.Matrix {
// Describes the current working values (a_1, a_2, ...)
curValues := trainingEx.Input
// Is simply a 1 in a 1x1 matrix to b
// inserted into a vector as the bias unit
biasValueMatrix := matrix.Ones(1, 1)
history := make([]*matrix.Matrix, 0, len(thetas)+1)
history = append(history, curValues.InsertRows(biasValueMatrix, 0))
for i, theta := range thetas {
// Insert the bias unit, multiply with theta and apply the sigmoid function
curValues = theta.Mul(history[len(history)-1]).Apply(sigmoidMatrix)
if i != len(thetas)-1 {
history = append(history, curValues.InsertRows(biasValueMatrix, 0))
} else {
history = append(history, curValues)
}
}
return history
}
func DeltaTerms(thetas Parameters, trainingEx TrainingExample) Deltas {
deltas := make(Deltas, len(thetas))
biasValueMatrix := matrix.Ones(1, 1)
deltas[len(deltas)-1], _ = Hypothesis(thetas, trainingEx).Sub(trainingEx.ExpectedOutput)
for i := len(deltas) - 2; i >= 0; i-- {
workingTheta := thetas[i+1]
levelPrediction := Hypothesis(thetas[:i+1], trainingEx).InsertRows(biasValueMatrix, 0)
tmp, _ := matrix.Ones(levelPrediction.R(), 1).Sub(levelPrediction)
levelGradient := levelPrediction.EWProd(tmp)
deltas[i] = workingTheta.Transpose().Mul(deltas[i+1]).EWProd(levelGradient).RemoveRow(1)
}
return deltas
}
func BackProp(thetas Parameters, trainingSet []TrainingExample, lambda float64) []*matrix.Matrix {
// Make new BigDelta matrix
bigDeltas := make(Deltas, len(thetas))
for i, _ := range bigDeltas {
bigDeltas[i] = matrix.Zeros(thetas[i].R(), thetas[i].C())
}
// Make new gradient matrix
gradients := make([]*matrix.Matrix, len(thetas))
var activations []*matrix.Matrix
var deltaTerms Deltas
for _, trainingEx := range trainingSet {
activations = HypothesisHistory(thetas, trainingEx)
deltaTerms = DeltaTerms(thetas, trainingEx)
for i := 0; i < len(deltaTerms); i++ {
bigDeltas[i], _ = bigDeltas[i].Add(deltaTerms[i].Mul(activations[i].Transpose()))
}
}
for i, _ := range gradients {
biasSanitizer := matrix.Eye(thetas[i].C())
biasSanitizer.Set(1, 1, 0)
sanitizedTheta := thetas[i].Mul(biasSanitizer).Scale(lambda)
summedGradients, _ := bigDeltas[i].Add(sanitizedTheta)
gradients[i] = summedGradients.Scale(1 / float64(len(trainingSet)))
}
return gradients
}
// Helper functions
func sigmoidMatrix(index int, value float64) float64 {
return sigmoid(value)
}
func sigmoid(z float64) float64 {
return 1 / (1 + math.Pow(math.E, -z))
} | nn.go | 0.816699 | 0.767123 | nn.go | starcoder |
package client
import (
"encoding/json"
)
// PeriodRetentionOptions struct for PeriodRetentionOptions
type PeriodRetentionOptions struct {
Type RetentionTypes `json:"type"`
Count int32 `json:"count"`
}
// NewPeriodRetentionOptions instantiates a new PeriodRetentionOptions object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewPeriodRetentionOptions(type_ RetentionTypes, count int32) *PeriodRetentionOptions {
this := PeriodRetentionOptions{}
this.Type = type_
this.Count = count
return &this
}
// NewPeriodRetentionOptionsWithDefaults instantiates a new PeriodRetentionOptions object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPeriodRetentionOptionsWithDefaults() *PeriodRetentionOptions {
this := PeriodRetentionOptions{}
return &this
}
// GetType returns the Type field value
func (o *PeriodRetentionOptions) GetType() RetentionTypes {
if o == nil {
var ret RetentionTypes
return ret
}
return o.Type
}
// GetTypeOk returns a tuple with the Type field value
// and a boolean to check if the value has been set.
func (o *PeriodRetentionOptions) GetTypeOk() (*RetentionTypes, bool) {
if o == nil {
return nil, false
}
return &o.Type, true
}
// SetType sets field value
func (o *PeriodRetentionOptions) SetType(v RetentionTypes) {
o.Type = v
}
// GetCount returns the Count field value
func (o *PeriodRetentionOptions) GetCount() int32 {
if o == nil {
var ret int32
return ret
}
return o.Count
}
// GetCountOk returns a tuple with the Count field value
// and a boolean to check if the value has been set.
func (o *PeriodRetentionOptions) GetCountOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Count, true
}
// SetCount sets field value
func (o *PeriodRetentionOptions) SetCount(v int32) {
o.Count = v
}
func (o PeriodRetentionOptions) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["type"] = o.Type
}
if true {
toSerialize["count"] = o.Count
}
return json.Marshal(toSerialize)
}
type NullablePeriodRetentionOptions struct {
value *PeriodRetentionOptions
isSet bool
}
func (v NullablePeriodRetentionOptions) Get() *PeriodRetentionOptions {
return v.value
}
func (v *NullablePeriodRetentionOptions) Set(val *PeriodRetentionOptions) {
v.value = val
v.isSet = true
}
func (v NullablePeriodRetentionOptions) IsSet() bool {
return v.isSet
}
func (v *NullablePeriodRetentionOptions) Unset() {
v.value = nil
v.isSet = false
}
func NewNullablePeriodRetentionOptions(val *PeriodRetentionOptions) *NullablePeriodRetentionOptions {
return &NullablePeriodRetentionOptions{value: val, isSet: true}
}
func (v NullablePeriodRetentionOptions) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullablePeriodRetentionOptions) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | client/model_period_retention_options.go | 0.826607 | 0.499573 | model_period_retention_options.go | starcoder |
package imagexp
import (
"image/color"
"math"
)
func BasicGrayscale(r, g, b, _ uint32) color.Gray16 {
avg := float64((r + g + b) / 3)
return color.Gray16{uint16(math.Ceil(avg))}
}
func ImprovedGrayscale(r, g, b, _ uint32) color.Gray16 {
avg := float64(0.3)*float64(r) + float64(0.59)*float64(g) + float64(0.11)*float64(b)
return color.Gray16{uint16(math.Ceil(avg))}
}
func Desaturation(r, g, b, a uint32) color.Gray16 {
avg := float64(maxOfThree(r, g, b, a)+minOfThree(r, g, b, a)) / 2
return color.Gray16{uint16(math.Ceil(avg))}
}
func DecompositionMax(r, g, b, a uint32) color.Gray16 {
return color.Gray16{uint16(math.Ceil(float64(maxOfThree(r, g, b, a))))}
}
func DecompositionMin(r, g, b, a uint32) color.Gray16 {
return color.Gray16{uint16(math.Ceil(float64(minOfThree(r, g, b, a))))}
}
func maxOfThree(r, g, b, _ uint32) uint32 {
return max(max(r, g), b)
}
func minOfThree(r, g, b, _ uint32) uint32 {
return min(min(r, g), b)
}
// This is how, I'll do it, Until I figure out a better way
func SingleChannelRed(r, _, _, _ uint32) color.Gray16 {
return color.Gray16{uint16(math.Ceil(float64(r)))}
}
func SingleChannelGreen(_, g, _, _ uint32) color.Gray16 {
return color.Gray16{uint16(math.Ceil(float64(g)))}
}
func SingleChannelBlue(_, _, b, _ uint32) color.Gray16 {
return color.Gray16{uint16(math.Ceil(float64(b)))}
}
func RedFilter(r, g, b, a uint32) color.RGBA64 {
if !(r > b) || !(r > g) {
return color.RGBA64{uint16(255), uint16(255), uint16(255), uint16(255)}
}
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func GreenFilter(r, g, b, a uint32) color.RGBA64 {
if !(g > r) || !(g > b) {
return color.RGBA64{uint16(255), uint16(255), uint16(255), uint16(255)}
}
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func BlueFilter(r, g, b, a uint32) color.RGBA64 {
if !(b > g) || !(b > r) {
return color.RGBA64{uint16(255), uint16(255), uint16(255), uint16(255)}
}
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func max(a, b uint32) uint32 {
if a > b {
return a
}
return b
}
func min(a, b uint32) uint32 {
if a < b {
return a
}
return b
} | filters.go | 0.824568 | 0.459076 | filters.go | starcoder |
package output
import (
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/metadata"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/output/writer"
"github.com/Jeffail/benthos/v3/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeGCPPubSub] = TypeSpec{
constructor: fromSimpleConstructor(NewGCPPubSub),
Summary: `
Sends messages to a GCP Cloud Pub/Sub topic. [Metadata](/docs/configuration/metadata) from messages are sent as attributes.`,
Description: `
For information on how to set up credentials check out [this guide](https://cloud.google.com/docs/authentication/production).
### Troubleshooting
If you're consistently seeing ` + "`Failed to send message to gcp_pubsub: context deadline exceeded`" + ` error logs without any further information it is possible that you are encountering https://github.com/Jeffail/benthos/issues/1042, which occurs when metadata values contain characters that are not valid utf-8. This can frequently occur when consuming from Kafka as the key metadata field may be populated with an arbitrary binary value, but this issue is not exclusive to Kafka.
If you are blocked by this issue then a work around is to delete either the specific problematic keys:
` + "```yaml" + `
pipeline:
processors:
- bloblang: |
meta kafka_key = deleted()
` + "```" + `
Or delete all keys with:
` + "```yaml" + `
pipeline:
processors:
- bloblang: meta = deleted()
` + "```" + ``,
Async: true,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("project", "The project ID of the topic to publish to."),
docs.FieldCommon("topic", "The topic to publish to.").IsInterpolated(),
docs.FieldCommon("max_in_flight", "The maximum number of messages to have in flight at a given time. Increase this to improve throughput."),
docs.FieldAdvanced("publish_timeout", "The maximum length of time to wait before abandoning a publish attempt for a message.", "10s", "5m", "60m"),
docs.FieldAdvanced("ordering_key", "The ordering key to use for publishing messages.").IsInterpolated(),
docs.FieldCommon("metadata", "Specify criteria for which metadata values are sent as attributes.").WithChildren(metadata.ExcludeFilterFields()...),
},
Categories: []Category{
CategoryServices,
CategoryGCP,
},
}
}
//------------------------------------------------------------------------------
// NewGCPPubSub creates a new GCPPubSub output type.
func NewGCPPubSub(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
a, err := writer.NewGCPPubSubV2(conf.GCPPubSub, mgr, log, stats)
if err != nil {
return nil, err
}
w, err := NewAsyncWriter(
TypeGCPPubSub, conf.GCPPubSub.MaxInFlight, a, log, stats,
)
if err != nil {
return nil, err
}
return OnlySinglePayloads(w), nil
}
//------------------------------------------------------------------------------ | lib/output/gcp_pubsub.go | 0.720958 | 0.735784 | gcp_pubsub.go | starcoder |
// Package verify holds helpers for validating the correctness of various
// artifacts and proofs used in the system.
package verify
import (
"bytes"
"encoding/json"
"fmt"
"github.com/google/trillian-examples/binary_transparency/firmware/api"
"github.com/google/trillian-examples/binary_transparency/firmware/internal/crypto"
)
// ConsistencyProofFunc is a function which returns a consistency proof between two tree sizes.
type ConsistencyProofFunc func(from, to uint64) ([][]byte, error)
// BundleForUpdate checks that the manifest, checkpoint, and proofs in a bundle
// are all self-consistent, and that the provided firmware image hash matches
// the one in the bundle. It also checks consistency proof between update log point
// and device log point (for non zero device tree size).
func BundleForUpdate(bundleRaw, fwHash []byte, dc api.LogCheckpoint, cpFunc ConsistencyProofFunc) error {
proofBundle, fwMeta, err := verifyBundle(bundleRaw)
if err != nil {
return err
}
if got, want := fwHash, fwMeta.FirmwareImageSHA512; !bytes.Equal(got, want) {
return fmt.Errorf("firmware update image hash does not match metadata (0x%x != 0x%x)", got, want)
}
cProof, err := cpFunc(dc.TreeSize, proofBundle.Checkpoint.TreeSize)
if err != nil {
return fmt.Errorf("cpFunc failed: %q", err)
}
// Verify the consistency proof between device and bundle checkpoint
if dc.TreeSize > 0 {
lv := NewLogVerifier()
if err := lv.VerifyConsistencyProof(int64(dc.TreeSize), int64(proofBundle.Checkpoint.TreeSize), dc.RootHash, proofBundle.Checkpoint.RootHash, cProof); err != nil {
return fmt.Errorf("failed verification of consistency proof %w", err)
}
}
return nil
}
// BundleForBoot checks that the manifest, checkpoint, and proofs in a bundle
// are all self-consistent, and that the provided firmware measurement matches
// the one expected by the bundle.
func BundleForBoot(bundleRaw, measurement []byte) error {
_, fwMeta, err := verifyBundle(bundleRaw)
if err != nil {
return err
}
if got, want := measurement, fwMeta.ExpectedFirmwareMeasurement; !bytes.Equal(got, want) {
return fmt.Errorf("firmware measurement does not match metadata (0x%x != 0x%x)", got, want)
}
return nil
}
// verifyBundle parses a proof bundle and verifies its self-consistency.
func verifyBundle(bundleRaw []byte) (api.ProofBundle, api.FirmwareMetadata, error) {
var pb api.ProofBundle
if err := json.Unmarshal(bundleRaw, &pb); err != nil {
return api.ProofBundle{}, api.FirmwareMetadata{}, fmt.Errorf("failed to parse proof bundle: %w", err)
}
// TODO(al): check Checkpoint signature
var fwStatement api.FirmwareStatement
if err := json.Unmarshal(pb.ManifestStatement, &fwStatement); err != nil {
return api.ProofBundle{}, api.FirmwareMetadata{}, fmt.Errorf("failed to unmarshal FirmwareStatement: %w", err)
}
// Verify the statement signature:
if err := crypto.VerifySignature(fwStatement.Metadata, fwStatement.Signature); err != nil {
return api.ProofBundle{}, api.FirmwareMetadata{}, fmt.Errorf("failed to verify signature on FirmwareStatement: %w", err)
}
lh := HashLeaf(pb.ManifestStatement)
lv := NewLogVerifier()
if err := lv.VerifyInclusionProof(int64(pb.InclusionProof.LeafIndex), int64(pb.Checkpoint.TreeSize), pb.InclusionProof.Proof, pb.Checkpoint.RootHash, lh); err != nil {
return api.ProofBundle{}, api.FirmwareMetadata{}, fmt.Errorf("invalid inclusion proof in bundle: %w", err)
}
var fwMeta api.FirmwareMetadata
if err := json.Unmarshal(fwStatement.Metadata, &fwMeta); err != nil {
return api.ProofBundle{}, api.FirmwareMetadata{}, fmt.Errorf("failed to unmarshal Metadata: %w", err)
}
return pb, fwMeta, nil
} | binary_transparency/firmware/internal/verify/bundle.go | 0.626353 | 0.407186 | bundle.go | starcoder |
package main
import "math"
func isSameTree(p *TreeNode, q *TreeNode) bool {
if p == nil || q == nil {
return p == q
}
return p.Val == q.Val && isSameTree(p.Left, q.Left) && isSameTree(p.Right, q.Right)
}
func isMirror(p *TreeNode, q *TreeNode) bool {
if p == nil || q == nil {
return p == q
}
return p.Val == q.Val && isMirror(p.Left, q.Right) && isMirror(p.Right, q.Left)
}
//LC: https://leetcode.com/problems/symmetric-tree/
func isSymmetric(root *TreeNode) bool {
var isMirror func(p *TreeNode, q *TreeNode) bool
isMirror = func(p *TreeNode, q *TreeNode) bool {
if p == nil || q == nil {
return p == q
}
return p.Val == q.Val && isMirror(p.Left, q.Right) && isMirror(p.Right, q.Left)
}
return isMirror(root, root)
}
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
if root.Val < p.Val && root.Val < q.Val {
return lowestCommonAncestor(root.Right, p, q)
} else if root.Val > p.Val && root.Val > q.Val {
return lowestCommonAncestor(root.Left, p, q)
} else {
return root
}
}
func lowestCommonAncestorIterative(root, p, q *TreeNode) *TreeNode {
if root == nil {
return nil
}
curr := root
for {
switch {
case curr.Val < p.Val && curr.Val < q.Val:
curr = curr.Right
case curr.Val > p.Val && curr.Val > q.Val:
curr = curr.Left
default:
return curr
}
}
}
func isValidBST(root *TreeNode) bool {
if root == nil {
return true
}
var dfs func(root *TreeNode, min, max int) bool
dfs = func(root *TreeNode, min, max int) bool {
if root == nil {
return true
}
if root.Val < min || root.Val > max {
return false
}
return dfs(root.Left, min, root.Val) && dfs(root.Right, root.Val, max)
}
return dfs(root, math.MinInt32, math.MaxInt32)
}
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func checkBalance(root *TreeNode) (int, bool) {
if root == nil {
return 0, true
}
leftDepth, isLeftBalanced := checkBalance(root.Left)
rightDepth, isRightBalanced := checkBalance(root.Right)
balanced := isLeftBalanced && isRightBalanced
balanced = balanced && abs(leftDepth-rightDepth) <= 1
return max(leftDepth, rightDepth) + 1, balanced
}
func isBalanced(root *TreeNode) bool {
if root == nil {
return true
}
_, result := checkBalance(root)
return result
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func minDepth(root *TreeNode) int {
if root == nil {
return 0
}
if root.Left == nil {
return minDepth(root.Right) + 1
} else if root.Right == nil {
return minDepth(root.Left) + 1
} else {
return min(minDepth(root.Left), minDepth(root.Right)) + 1
}
} | binarytree/checks.go | 0.79799 | 0.465813 | checks.go | starcoder |
package nvm
// Matrix is a matrix interface.
type Matrix interface {
// IsNaM reports whether `m` is "Not-a-Matrix".
IsNaM() bool
// Dims returns the rows `r` and cols `c` of the matrix.
Dims() (r, c int)
// At returns the element at position `i`th row and `j`th col.
// At will panic if `i` or `j` is out of bounds.
At(i, j int) float64
// SetAt sets `f` to the element at position `i`th row and `j`th col.
// SetAt will panic if `i` or `j` is out of bounds.
SetAt(i, j int, f float64) Matrix
// T returns the transpose of the matrix.
T() Matrix
// Det computes the determinant of the square matrix.
Det() float64
// Inv computes the inverse of the square matrix.
// Inv will panic if the square matrix is not invertible.
Inv() Matrix
// // SwapRows swaps the `ri`th row and the `rj`th row.
// SwapRows(ri, rj int) Matrix
// // SwapCols swaps the `ci`th col and the `cj`th col.
// SwapCols(ci, cj int) Matrix
}
// // TM represents a triangular matrix.
// // BM represents a band matrix.
// // SM represents a sparse matrix.
// IsSameShape reports whether the matrices `x` and `y` have the same shape.
// IsSameShape returns `false` if `x` or `y` is NaM.
func IsSameShape(x, y Matrix) bool {
if x.IsNaM() || y.IsNaM() {
return false
}
xr, xc := x.Dims()
yr, yc := y.Dims()
if xr != yr || xc != yc {
return false
}
return true
}
// IsEqual reports whether the matrices `x` and `y` have the same shape,
// and are element-wise equal.
func IsEqual(x, y Matrix) bool {
if !IsSameShape(x, y) {
return false
}
r, c := x.Dims()
for i := 0; i < r; i++ {
for j := 0; j < c; j++ {
if !IsNEqual(x.At(i, j), y.At(i, j)) {
return false
}
}
}
return true
}
// Add adds `x` and `y` element-wise, placing the result in the new matrix.
// Add will panic if the two matrices do not have the same shape.
func Add(x, y Matrix) Matrix {
if !IsSameShape(x, y) {
panic(ErrShape)
}
/// *M & *M
if x, ok := x.(*M); ok {
if y, ok := y.(*M); ok {
return NewM(x.Dims()).Add(x, y)
}
}
panic(ErrImpType)
}
// Sub subtracts `y` from `x` element-wise, placing the result in the new matrix.
// Sub will panic if the two matrices do not have the same shape.
func Sub(x, y Matrix) Matrix {
if !IsSameShape(x, y) {
panic(ErrShape)
}
/// *M & *M
if x, ok := x.(*M); ok {
if y, ok := y.(*M); ok {
return NewM(x.Dims()).Sub(x, y)
}
}
panic(ErrImpType)
}
// Scale multiplies the elements of `x` by `f`, placing the result in the new matrix.
func Scale(f float64, x Matrix) Matrix {
if x.IsNaM() {
panic(ErrNaM)
}
if x, ok := x.(*M); ok {
return NewM(x.Dims()).Scale(f, x)
}
panic(ErrImpType)
}
// DotMul performs element-wise multiplication of `x` and `y`, placing the result in the new matrix.
// DotMul will panic if the two matrices do not have the same shape.
func DotMul(x, y Matrix) Matrix {
if !IsSameShape(x, y) {
panic(ErrShape)
}
if x, ok := x.(*M); ok {
if y, ok := y.(*M); ok {
return NewM(x.Dims()).DotMul(x, y)
}
}
panic(ErrImpType)
}
// Mul computes the matrix product of `x` and `y`, placing the result in the new matrix.
// Mul will panic if the cols of `x` is not equal to the rows of `y`.
func Mul(x, y Matrix) Matrix {
if x, ok := x.(*M); ok {
if y, ok := y.(*M); ok {
xr, _ := x.Dims()
_, yc := y.Dims()
return NewM(xr, yc).Mul(x, y)
}
}
panic(ErrImpType)
} | nvm/matrix.go | 0.902064 | 0.784897 | matrix.go | starcoder |
package raymath
import (
"math"
"github.com/gen2brain/raylib-go/raylib"
)
// Vector2Zero - Vector with components value 0.0
func Vector2Zero() rl.Vector2 {
return rl.NewVector2(0.0, 0.0)
}
// Vector2One - Vector with components value 1.0
func Vector2One() rl.Vector2 {
return rl.NewVector2(1.0, 1.0)
}
// Vector2Add - Add two vectors (v1 + v2)
func Vector2Add(v1, v2 rl.Vector2) rl.Vector2 {
return rl.NewVector2(v1.X+v2.X, v1.Y+v2.Y)
}
// Vector2Subtract - Subtract two vectors (v1 - v2)
func Vector2Subtract(v1, v2 rl.Vector2) rl.Vector2 {
return rl.NewVector2(v1.X-v2.X, v1.Y-v2.Y)
}
// Vector2Length - Calculate vector length
func Vector2Length(v rl.Vector2) float32 {
return float32(math.Sqrt(float64((v.X * v.X) + (v.Y * v.Y))))
}
// Vector2DotProduct - Calculate two vectors dot product
func Vector2DotProduct(v1, v2 rl.Vector2) float32 {
return (v1.X*v2.X + v1.Y*v2.Y)
}
// Vector2Distance - Calculate distance between two vectors
func Vector2Distance(v1, v2 rl.Vector2) float32 {
return float32(math.Sqrt(float64((v1.X-v2.X)*(v1.X-v2.X) + (v1.Y-v2.Y)*(v1.Y-v2.Y))))
}
// Vector2Angle - Calculate angle between two vectors in X-axis
func Vector2Angle(v1, v2 rl.Vector2) float32 {
angle := float32(math.Atan2(float64(v2.Y-v1.Y), float64(v2.X-v1.X)) * (180.0 / float64(rl.Pi)))
if angle < 0 {
angle += 360.0
}
return angle
}
// Vector2Scale - Scale vector (multiply by value)
func Vector2Scale(v *rl.Vector2, scale float32) {
v.X *= scale
v.Y *= scale
}
// Vector2Negate - Negate vector
func Vector2Negate(v *rl.Vector2) {
v.X = -v.X
v.Y = -v.Y
}
// Vector2Divide - Divide vector by a float value
func Vector2Divide(v *rl.Vector2, div float32) {
v.X = v.X / div
v.Y = v.Y / div
}
// Vector2Normalize - Normalize provided vector
func Vector2Normalize(v *rl.Vector2) {
Vector2Divide(v, Vector2Length(*v))
}
// Vector2CrossProduct - Calculate two vectors cross product
func Vector2CrossProduct(v1, v2 rl.Vector2) float32 {
return v1.X*v2.Y - v1.Y*v2.X
}
// Vector2Cross - Calculate the cross product of a vector and a value
func Vector2Cross(value float32, vector rl.Vector2) rl.Vector2 {
return rl.NewVector2(-value*vector.Y, value*vector.X)
}
// Vector2LenSqr - Returns the len square root of a vector
func Vector2LenSqr(vector rl.Vector2) float32 {
return vector.X*vector.X + vector.Y*vector.Y
}
// Mat2Radians - Creates a matrix 2x2 from a given radians value
func Mat2Radians(radians float32) rl.Mat2 {
c := float32(math.Cos(float64(radians)))
s := float32(math.Sin(float64(radians)))
return rl.NewMat2(c, -s, s, c)
}
// Mat2Set - Set values from radians to a created matrix 2x2
func Mat2Set(matrix *rl.Mat2, radians float32) {
cos := float32(math.Cos(float64(radians)))
sin := float32(math.Sin(float64(radians)))
matrix.M00 = cos
matrix.M01 = -sin
matrix.M10 = sin
matrix.M11 = cos
}
// Mat2Transpose - Returns the transpose of a given matrix 2x2
func Mat2Transpose(matrix rl.Mat2) rl.Mat2 {
return rl.NewMat2(matrix.M00, matrix.M10, matrix.M01, matrix.M11)
}
// Mat2MultiplyVector2 - Multiplies a vector by a matrix 2x2
func Mat2MultiplyVector2(matrix rl.Mat2, vector rl.Vector2) rl.Vector2 {
return rl.NewVector2(matrix.M00*vector.X+matrix.M01*vector.Y, matrix.M10*vector.X+matrix.M11*vector.Y)
}
// Vector3Zero - Vector with components value 0.0
func Vector3Zero() rl.Vector3 {
return rl.NewVector3(0.0, 0.0, 0.0)
}
// Vector3One - Vector with components value 1.0
func Vector3One() rl.Vector3 {
return rl.NewVector3(1.0, 1.0, 1.0)
}
// Vector3Add - Add two vectors
func Vector3Add(v1, v2 rl.Vector3) rl.Vector3 {
return rl.NewVector3(v1.X+v2.X, v1.Y+v2.Y, v1.Z+v2.Z)
}
// Vector3Multiply - Multiply vector by scalar
func Vector3Multiply(v rl.Vector3, scalar float32) rl.Vector3 {
result := rl.Vector3{}
result.X = v.X * scalar
result.Y = v.Y * scalar
result.Z = v.Z * scalar
return result
}
// Vector3MultiplyV - Multiply vector by vector
func Vector3MultiplyV(v1, v2 rl.Vector3) rl.Vector3 {
result := rl.Vector3{}
result.X = v1.X * v2.X
result.Y = v1.Y * v2.Y
result.Z = v1.Z * v2.Z
return result
}
// Vector3Subtract - Subtract two vectors
func Vector3Subtract(v1, v2 rl.Vector3) rl.Vector3 {
return rl.NewVector3(v1.X-v2.X, v1.Y-v2.Y, v1.Z-v2.Z)
}
// Vector3CrossProduct - Calculate two vectors cross product
func Vector3CrossProduct(v1, v2 rl.Vector3) rl.Vector3 {
result := rl.Vector3{}
result.X = v1.Y*v2.Z - v1.Z*v2.Y
result.Y = v1.Z*v2.X - v1.X*v2.Z
result.Z = v1.X*v2.Y - v1.Y*v2.X
return result
}
// Vector3Perpendicular - Calculate one vector perpendicular vector
func Vector3Perpendicular(v rl.Vector3) rl.Vector3 {
result := rl.Vector3{}
min := math.Abs(float64(v.X))
cardinalAxis := rl.NewVector3(1.0, 0.0, 0.0)
if math.Abs(float64(v.Y)) < min {
min = math.Abs(float64(v.Y))
cardinalAxis = rl.NewVector3(0.0, 1.0, 0.0)
}
if math.Abs(float64(v.Z)) < min {
cardinalAxis = rl.NewVector3(0.0, 0.0, 1.0)
}
result = Vector3CrossProduct(v, cardinalAxis)
return result
}
// Vector3Length - Calculate vector length
func Vector3Length(v rl.Vector3) float32 {
return float32(math.Sqrt(float64(v.X*v.X + v.Y*v.Y + v.Z*v.Z)))
}
// Vector3DotProduct - Calculate two vectors dot product
func Vector3DotProduct(v1, v2 rl.Vector3) float32 {
return v1.X*v2.X + v1.Y*v2.Y + v1.Z*v2.Z
}
// Vector3Distance - Calculate distance between two vectors
func Vector3Distance(v1, v2 rl.Vector3) float32 {
dx := v2.X - v1.X
dy := v2.Y - v1.Y
dz := v2.Z - v1.Z
return float32(math.Sqrt(float64(dx*dx + dy*dy + dz*dz)))
}
// Vector3Scale - Scale provided vector
func Vector3Scale(v *rl.Vector3, scale float32) {
v.X *= scale
v.Y *= scale
v.Z *= scale
}
// Vector3Negate - Negate provided vector (invert direction)
func Vector3Negate(v *rl.Vector3) {
v.X = -v.X
v.Y = -v.Y
v.Z = -v.Z
}
// Vector3Normalize - Normalize provided vector
func Vector3Normalize(v *rl.Vector3) {
var length, ilength float32
length = Vector3Length(*v)
if length == 0 {
length = 1.0
}
ilength = 1.0 / length
v.X *= ilength
v.Y *= ilength
v.Z *= ilength
}
// Vector3Transform - Transforms a Vector3 by a given Matrix
func Vector3Transform(v *rl.Vector3, mat rl.Matrix) {
x := v.X
y := v.Y
z := v.Z
v.X = mat.M0*x + mat.M4*y + mat.M8*z + mat.M12
v.Y = mat.M1*x + mat.M5*y + mat.M9*z + mat.M13
v.Z = mat.M2*x + mat.M6*y + mat.M10*z + mat.M14
}
// Vector3Lerp - Calculate linear interpolation between two vectors
func Vector3Lerp(v1, v2 rl.Vector3, amount float32) rl.Vector3 {
result := rl.Vector3{}
result.X = v1.X + amount*(v2.X-v1.X)
result.Y = v1.Y + amount*(v2.Y-v1.Y)
result.Z = v1.Z + amount*(v2.Z-v1.Z)
return result
}
// Vector3Reflect - Calculate reflected vector to normal
func Vector3Reflect(vector, normal rl.Vector3) rl.Vector3 {
// I is the original vector
// N is the normal of the incident plane
// R = I - (2*N*( DotProduct[ I,N] ))
result := rl.Vector3{}
dotProduct := Vector3DotProduct(vector, normal)
result.X = vector.X - (2.0*normal.X)*dotProduct
result.Y = vector.Y - (2.0*normal.Y)*dotProduct
result.Z = vector.Z - (2.0*normal.Z)*dotProduct
return result
}
// Vector3Min - Return min value for each pair of components
func Vector3Min(vec1, vec2 rl.Vector3) rl.Vector3 {
result := rl.Vector3{}
result.X = float32(math.Min(float64(vec1.X), float64(vec2.X)))
result.Y = float32(math.Min(float64(vec1.Y), float64(vec2.Y)))
result.Z = float32(math.Min(float64(vec1.Z), float64(vec2.Z)))
return result
}
// Vector3Max - Return max value for each pair of components
func Vector3Max(vec1, vec2 rl.Vector3) rl.Vector3 {
result := rl.Vector3{}
result.X = float32(math.Max(float64(vec1.X), float64(vec2.X)))
result.Y = float32(math.Max(float64(vec1.Y), float64(vec2.Y)))
result.Z = float32(math.Max(float64(vec1.Z), float64(vec2.Z)))
return result
}
// Vector3Barycenter - Barycenter coords for p in triangle abc
func Vector3Barycenter(p, a, b, c rl.Vector3) rl.Vector3 {
v0 := Vector3Subtract(b, a)
v1 := Vector3Subtract(c, a)
v2 := Vector3Subtract(p, a)
d00 := Vector3DotProduct(v0, v0)
d01 := Vector3DotProduct(v0, v1)
d11 := Vector3DotProduct(v1, v1)
d20 := Vector3DotProduct(v2, v0)
d21 := Vector3DotProduct(v2, v1)
denom := d00*d11 - d01*d01
result := rl.Vector3{}
result.Y = (d11*d20 - d01*d21) / denom
result.Z = (d00*d21 - d01*d20) / denom
result.X = 1.0 - (result.Z + result.Y)
return result
}
// MatrixDeterminant - Compute matrix determinant
func MatrixDeterminant(mat rl.Matrix) float32 {
var result float32
a00 := mat.M0
a01 := mat.M1
a02 := mat.M2
a03 := mat.M3
a10 := mat.M4
a11 := mat.M5
a12 := mat.M6
a13 := mat.M7
a20 := mat.M8
a21 := mat.M9
a22 := mat.M10
a23 := mat.M11
a30 := mat.M12
a31 := mat.M13
a32 := mat.M14
a33 := mat.M15
result = a30*a21*a12*a03 - a20*a31*a12*a03 - a30*a11*a22*a03 + a10*a31*a22*a03 +
a20*a11*a32*a03 - a10*a21*a32*a03 - a30*a21*a02*a13 + a20*a31*a02*a13 +
a30*a01*a22*a13 - a00*a31*a22*a13 - a20*a01*a32*a13 + a00*a21*a32*a13 +
a30*a11*a02*a23 - a10*a31*a02*a23 - a30*a01*a12*a23 + a00*a31*a12*a23 +
a10*a01*a32*a23 - a00*a11*a32*a23 - a20*a11*a02*a33 + a10*a21*a02*a33 +
a20*a01*a12*a33 - a00*a21*a12*a33 - a10*a01*a22*a33 + a00*a11*a22*a33
return result
}
// MatrixTrace - Returns the trace of the matrix (sum of the values along the diagonal)
func MatrixTrace(mat rl.Matrix) float32 {
return mat.M0 + mat.M5 + mat.M10 + mat.M15
}
// MatrixTranspose - Transposes provided matrix
func MatrixTranspose(mat *rl.Matrix) {
var temp rl.Matrix
temp.M0 = mat.M0
temp.M1 = mat.M4
temp.M2 = mat.M8
temp.M3 = mat.M12
temp.M4 = mat.M1
temp.M5 = mat.M5
temp.M6 = mat.M9
temp.M7 = mat.M13
temp.M8 = mat.M2
temp.M9 = mat.M6
temp.M10 = mat.M10
temp.M11 = mat.M14
temp.M12 = mat.M3
temp.M13 = mat.M7
temp.M14 = mat.M11
temp.M15 = mat.M15
mat = &temp
}
// MatrixInvert - Invert provided matrix
func MatrixInvert(mat *rl.Matrix) {
var temp rl.Matrix
a00 := mat.M0
a01 := mat.M1
a02 := mat.M2
a03 := mat.M3
a10 := mat.M4
a11 := mat.M5
a12 := mat.M6
a13 := mat.M7
a20 := mat.M8
a21 := mat.M9
a22 := mat.M10
a23 := mat.M11
a30 := mat.M12
a31 := mat.M13
a32 := mat.M14
a33 := mat.M15
b00 := a00*a11 - a01*a10
b01 := a00*a12 - a02*a10
b02 := a00*a13 - a03*a10
b03 := a01*a12 - a02*a11
b04 := a01*a13 - a03*a11
b05 := a02*a13 - a03*a12
b06 := a20*a31 - a21*a30
b07 := a20*a32 - a22*a30
b08 := a20*a33 - a23*a30
b09 := a21*a32 - a22*a31
b10 := a21*a33 - a23*a31
b11 := a22*a33 - a23*a32
// Calculate the invert determinant (inlined to avoid double-caching)
invDet := 1.0 / (b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06)
temp.M0 = (a11*b11 - a12*b10 + a13*b09) * invDet
temp.M1 = (-a01*b11 + a02*b10 - a03*b09) * invDet
temp.M2 = (a31*b05 - a32*b04 + a33*b03) * invDet
temp.M3 = (-a21*b05 + a22*b04 - a23*b03) * invDet
temp.M4 = (-a10*b11 + a12*b08 - a13*b07) * invDet
temp.M5 = (a00*b11 - a02*b08 + a03*b07) * invDet
temp.M6 = (-a30*b05 + a32*b02 - a33*b01) * invDet
temp.M7 = (a20*b05 - a22*b02 + a23*b01) * invDet
temp.M8 = (a10*b10 - a11*b08 + a13*b06) * invDet
temp.M9 = (-a00*b10 + a01*b08 - a03*b06) * invDet
temp.M10 = (a30*b04 - a31*b02 + a33*b00) * invDet
temp.M11 = (-a20*b04 + a21*b02 - a23*b00) * invDet
temp.M12 = (-a10*b09 + a11*b07 - a12*b06) * invDet
temp.M13 = (a00*b09 - a01*b07 + a02*b06) * invDet
temp.M14 = (-a30*b03 + a31*b01 - a32*b00) * invDet
temp.M15 = (a20*b03 - a21*b01 + a22*b00) * invDet
mat = &temp
}
// MatrixNormalize - Normalize provided matrix
func MatrixNormalize(mat *rl.Matrix) {
det := MatrixDeterminant(*mat)
mat.M0 /= det
mat.M1 /= det
mat.M2 /= det
mat.M3 /= det
mat.M4 /= det
mat.M5 /= det
mat.M6 /= det
mat.M7 /= det
mat.M8 /= det
mat.M9 /= det
mat.M10 /= det
mat.M11 /= det
mat.M12 /= det
mat.M13 /= det
mat.M14 /= det
mat.M15 /= det
}
// MatrixIdentity - Returns identity matrix
func MatrixIdentity() rl.Matrix {
return rl.NewMatrix(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0)
}
// MatrixAdd - Add two matrices
func MatrixAdd(left, right rl.Matrix) rl.Matrix {
result := MatrixIdentity()
result.M0 = left.M0 + right.M0
result.M1 = left.M1 + right.M1
result.M2 = left.M2 + right.M2
result.M3 = left.M3 + right.M3
result.M4 = left.M4 + right.M4
result.M5 = left.M5 + right.M5
result.M6 = left.M6 + right.M6
result.M7 = left.M7 + right.M7
result.M8 = left.M8 + right.M8
result.M9 = left.M9 + right.M9
result.M10 = left.M10 + right.M10
result.M11 = left.M11 + right.M11
result.M12 = left.M12 + right.M12
result.M13 = left.M13 + right.M13
result.M14 = left.M14 + right.M14
result.M15 = left.M15 + right.M15
return result
}
// MatrixSubtract - Subtract two matrices (left - right)
func MatrixSubtract(left, right rl.Matrix) rl.Matrix {
result := MatrixIdentity()
result.M0 = left.M0 - right.M0
result.M1 = left.M1 - right.M1
result.M2 = left.M2 - right.M2
result.M3 = left.M3 - right.M3
result.M4 = left.M4 - right.M4
result.M5 = left.M5 - right.M5
result.M6 = left.M6 - right.M6
result.M7 = left.M7 - right.M7
result.M8 = left.M8 - right.M8
result.M9 = left.M9 - right.M9
result.M10 = left.M10 - right.M10
result.M11 = left.M11 - right.M11
result.M12 = left.M12 - right.M12
result.M13 = left.M13 - right.M13
result.M14 = left.M14 - right.M14
result.M15 = left.M15 - right.M15
return result
}
// MatrixTranslate - Returns translation matrix
func MatrixTranslate(x, y, z float32) rl.Matrix {
return rl.NewMatrix(
1.0, 0.0, 0.0, x,
0.0, 1.0, 0.0, y,
0.0, 0.0, 1.0, z,
0, 0, 0, 1.0)
}
// MatrixRotate - Returns rotation matrix for an angle around an specified axis (angle in radians)
func MatrixRotate(axis rl.Vector3, angle float32) rl.Matrix {
var result rl.Matrix
mat := MatrixIdentity()
x := axis.X
y := axis.Y
z := axis.Z
length := float32(math.Sqrt(float64(x*x + y*y + z*z)))
if length != 1.0 && length != 0.0 {
length = 1.0 / length
x *= length
y *= length
z *= length
}
sinres := float32(math.Sin(float64(angle)))
cosres := float32(math.Cos(float64(angle)))
t := 1.0 - cosres
// Cache some matrix values (speed optimization)
a00 := mat.M0
a01 := mat.M1
a02 := mat.M2
a03 := mat.M3
a10 := mat.M4
a11 := mat.M5
a12 := mat.M6
a13 := mat.M7
a20 := mat.M8
a21 := mat.M9
a22 := mat.M10
a23 := mat.M11
// Construct the elements of the rotation matrix
b00 := x*x*t + cosres
b01 := y*x*t + z*sinres
b02 := z*x*t - y*sinres
b10 := x*y*t - z*sinres
b11 := y*y*t + cosres
b12 := z*y*t + x*sinres
b20 := x*z*t + y*sinres
b21 := y*z*t - x*sinres
b22 := z*z*t + cosres
// Perform rotation-specific matrix multiplication
result.M0 = a00*b00 + a10*b01 + a20*b02
result.M1 = a01*b00 + a11*b01 + a21*b02
result.M2 = a02*b00 + a12*b01 + a22*b02
result.M3 = a03*b00 + a13*b01 + a23*b02
result.M4 = a00*b10 + a10*b11 + a20*b12
result.M5 = a01*b10 + a11*b11 + a21*b12
result.M6 = a02*b10 + a12*b11 + a22*b12
result.M7 = a03*b10 + a13*b11 + a23*b12
result.M8 = a00*b20 + a10*b21 + a20*b22
result.M9 = a01*b20 + a11*b21 + a21*b22
result.M10 = a02*b20 + a12*b21 + a22*b22
result.M11 = a03*b20 + a13*b21 + a23*b22
result.M12 = mat.M12
result.M13 = mat.M13
result.M14 = mat.M14
result.M15 = mat.M15
return result
}
// MatrixRotateX - Returns x-rotation matrix (angle in radians)
func MatrixRotateX(angle float32) rl.Matrix {
result := MatrixIdentity()
cosres := float32(math.Cos(float64(angle)))
sinres := float32(math.Sin(float64(angle)))
result.M5 = cosres
result.M6 = -sinres
result.M9 = sinres
result.M10 = cosres
return result
}
// MatrixRotateY - Returns y-rotation matrix (angle in radians)
func MatrixRotateY(angle float32) rl.Matrix {
result := MatrixIdentity()
cosres := float32(math.Cos(float64(angle)))
sinres := float32(math.Sin(float64(angle)))
result.M0 = cosres
result.M2 = sinres
result.M8 = -sinres
result.M10 = cosres
return result
}
// MatrixRotateZ - Returns z-rotation matrix (angle in radians)
func MatrixRotateZ(angle float32) rl.Matrix {
result := MatrixIdentity()
cosres := float32(math.Cos(float64(angle)))
sinres := float32(math.Sin(float64(angle)))
result.M0 = cosres
result.M1 = -sinres
result.M4 = sinres
result.M5 = cosres
return result
}
// MatrixScale - Returns scaling matrix
func MatrixScale(x, y, z float32) rl.Matrix {
result := rl.NewMatrix(
x, 0.0, 0.0, 0.0,
0.0, y, 0.0, 0.0,
0.0, 0.0, z, 0.0,
0.0, 0.0, 0.0, 1.0)
return result
}
// MatrixMultiply - Returns two matrix multiplication
func MatrixMultiply(left, right rl.Matrix) rl.Matrix {
var result rl.Matrix
result.M0 = right.M0*left.M0 + right.M1*left.M4 + right.M2*left.M8 + right.M3*left.M12
result.M1 = right.M0*left.M1 + right.M1*left.M5 + right.M2*left.M9 + right.M3*left.M13
result.M2 = right.M0*left.M2 + right.M1*left.M6 + right.M2*left.M10 + right.M3*left.M14
result.M3 = right.M0*left.M3 + right.M1*left.M7 + right.M2*left.M11 + right.M3*left.M15
result.M4 = right.M4*left.M0 + right.M5*left.M4 + right.M6*left.M8 + right.M7*left.M12
result.M5 = right.M4*left.M1 + right.M5*left.M5 + right.M6*left.M9 + right.M7*left.M13
result.M6 = right.M4*left.M2 + right.M5*left.M6 + right.M6*left.M10 + right.M7*left.M14
result.M7 = right.M4*left.M3 + right.M5*left.M7 + right.M6*left.M11 + right.M7*left.M15
result.M8 = right.M8*left.M0 + right.M9*left.M4 + right.M10*left.M8 + right.M11*left.M12
result.M9 = right.M8*left.M1 + right.M9*left.M5 + right.M10*left.M9 + right.M11*left.M13
result.M10 = right.M8*left.M2 + right.M9*left.M6 + right.M10*left.M10 + right.M11*left.M14
result.M11 = right.M8*left.M3 + right.M9*left.M7 + right.M10*left.M11 + right.M11*left.M15
result.M12 = right.M12*left.M0 + right.M13*left.M4 + right.M14*left.M8 + right.M15*left.M12
result.M13 = right.M12*left.M1 + right.M13*left.M5 + right.M14*left.M9 + right.M15*left.M13
result.M14 = right.M12*left.M2 + right.M13*left.M6 + right.M14*left.M10 + right.M15*left.M14
result.M15 = right.M12*left.M3 + right.M13*left.M7 + right.M14*left.M11 + right.M15*left.M15
return result
}
// MatrixFrustum - Returns perspective projection matrix
func MatrixFrustum(left, right, bottom, top, near, far float32) rl.Matrix {
var result rl.Matrix
rl := right - left
tb := top - bottom
fn := far - near
result.M0 = (near * 2.0) / rl
result.M1 = 0.0
result.M2 = 0.0
result.M3 = 0.0
result.M4 = 0.0
result.M5 = (near * 2.0) / tb
result.M6 = 0.0
result.M7 = 0.0
result.M8 = right + left/rl
result.M9 = top + bottom/tb
result.M10 = -(far + near) / fn
result.M11 = -1.0
result.M12 = 0.0
result.M13 = 0.0
result.M14 = -(far * near * 2.0) / fn
result.M15 = 0.0
return result
}
// MatrixPerspective - Returns perspective projection matrix
func MatrixPerspective(fovy, aspect, near, far float32) rl.Matrix {
top := near * float32(math.Tan(float64(fovy*rl.Pi)/360.0))
right := top * aspect
return MatrixFrustum(-right, right, -top, top, near, far)
}
// MatrixOrtho - Returns orthographic projection matrix
func MatrixOrtho(left, right, bottom, top, near, far float32) rl.Matrix {
var result rl.Matrix
rl := (right - left)
tb := (top - bottom)
fn := (far - near)
result.M0 = 2.0 / rl
result.M1 = 0.0
result.M2 = 0.0
result.M3 = 0.0
result.M4 = 0.0
result.M5 = 2.0 / tb
result.M6 = 0.0
result.M7 = 0.0
result.M8 = 0.0
result.M9 = 0.0
result.M10 = -2.0 / fn
result.M11 = 0.0
result.M12 = -(left + right) / rl
result.M13 = -(top + bottom) / tb
result.M14 = -(far + near) / fn
result.M15 = 1.0
return result
}
// MatrixLookAt - Returns camera look-at matrix (view matrix)
func MatrixLookAt(eye, target, up rl.Vector3) rl.Matrix {
var result rl.Matrix
z := Vector3Subtract(eye, target)
Vector3Normalize(&z)
x := Vector3CrossProduct(up, z)
Vector3Normalize(&x)
y := Vector3CrossProduct(z, x)
Vector3Normalize(&y)
result.M0 = x.X
result.M1 = x.Y
result.M2 = x.Z
result.M3 = -((x.X * eye.X) + (x.Y * eye.Y) + (x.Z * eye.Z))
result.M4 = y.X
result.M5 = y.Y
result.M6 = y.Z
result.M7 = -((y.X * eye.X) + (y.Y * eye.Y) + (y.Z * eye.Z))
result.M8 = z.X
result.M9 = z.Y
result.M10 = z.Z
result.M11 = -((z.X * eye.X) + (z.Y * eye.Y) + (z.Z * eye.Z))
result.M12 = 0.0
result.M13 = 0.0
result.M14 = 0.0
result.M15 = 1.0
return result
}
// QuaternionLength - Compute the length of a quaternion
func QuaternionLength(quat rl.Quaternion) float32 {
return float32(math.Sqrt(float64(quat.X*quat.X + quat.Y*quat.Y + quat.Z*quat.Z + quat.W*quat.W)))
}
// QuaternionNormalize - Normalize provided quaternion
func QuaternionNormalize(q *rl.Quaternion) {
var length, ilength float32
length = QuaternionLength(*q)
if length == 0.0 {
length = 1.0
}
ilength = 1.0 / length
q.X *= ilength
q.Y *= ilength
q.Z *= ilength
q.W *= ilength
}
// QuaternionInvert - Invert provided quaternion
func QuaternionInvert(quat *rl.Quaternion) {
length := QuaternionLength(*quat)
lengthSq := length * length
if lengthSq != 0.0 {
i := 1.0 / lengthSq
quat.X *= -i
quat.Y *= -i
quat.Z *= -i
quat.W *= i
}
}
// QuaternionMultiply - Calculate two quaternion multiplication
func QuaternionMultiply(q1, q2 rl.Quaternion) rl.Quaternion {
var result rl.Quaternion
qax := q1.X
qay := q1.Y
qaz := q1.Z
qaw := q1.W
qbx := q2.X
qby := q2.Y
qbz := q2.Z
qbw := q2.W
result.X = qax*qbw + qaw*qbx + qay*qbz - qaz*qby
result.Y = qay*qbw + qaw*qby + qaz*qbx - qax*qbz
result.Z = qaz*qbw + qaw*qbz + qax*qby - qay*qbx
result.W = qaw*qbw - qax*qbx - qay*qby - qaz*qbz
return result
}
// QuaternionSlerp - Calculates spherical linear interpolation between two quaternions
func QuaternionSlerp(q1, q2 rl.Quaternion, amount float32) rl.Quaternion {
var result rl.Quaternion
cosHalfTheta := q1.X*q2.X + q1.Y*q2.Y + q1.Z*q2.Z + q1.W*q2.W
if math.Abs(float64(cosHalfTheta)) >= 1.0 {
result = q1
} else {
halfTheta := float32(math.Acos(float64(cosHalfTheta)))
sinHalfTheta := float32(math.Sqrt(float64(1.0 - cosHalfTheta*cosHalfTheta)))
if math.Abs(float64(sinHalfTheta)) < 0.001 {
result.X = (q1.X*0.5 + q2.X*0.5)
result.Y = (q1.Y*0.5 + q2.Y*0.5)
result.Z = (q1.Z*0.5 + q2.Z*0.5)
result.W = (q1.W*0.5 + q2.W*0.5)
} else {
ratioA := float32(math.Sin(float64((1-amount)*halfTheta))) / sinHalfTheta
ratioB := float32(math.Sin(float64(amount*halfTheta))) / sinHalfTheta
result.X = (q1.X*ratioA + q2.X*ratioB)
result.Y = (q1.Y*ratioA + q2.Y*ratioB)
result.Z = (q1.Z*ratioA + q2.Z*ratioB)
result.W = (q1.W*ratioA + q2.W*ratioB)
}
}
return result
}
// QuaternionFromMatrix - Returns a quaternion for a given rotation matrix
func QuaternionFromMatrix(matrix rl.Matrix) rl.Quaternion {
var result rl.Quaternion
trace := MatrixTrace(matrix)
if trace > 0.0 {
s := float32(math.Sqrt(float64(trace+1)) * 2.0)
invS := 1.0 / s
result.W = s * 0.25
result.X = (matrix.M6 - matrix.M9) * invS
result.Y = (matrix.M8 - matrix.M2) * invS
result.Z = (matrix.M1 - matrix.M4) * invS
} else {
m00 := matrix.M0
m11 := matrix.M5
m22 := matrix.M10
if m00 > m11 && m00 > m22 {
s := float32(math.Sqrt(float64(1.0+m00-m11-m22)) * 2.0)
invS := 1.0 / s
result.W = (matrix.M6 - matrix.M9) * invS
result.X = s * 0.25
result.Y = (matrix.M4 + matrix.M1) * invS
result.Z = (matrix.M8 + matrix.M2) * invS
} else if m11 > m22 {
s := float32(math.Sqrt(float64(1.0+m11-m00-m22)) * 2.0)
invS := 1.0 / s
result.W = (matrix.M8 - matrix.M2) * invS
result.X = (matrix.M4 + matrix.M1) * invS
result.Y = s * 0.25
result.Z = (matrix.M9 + matrix.M6) * invS
} else {
s := float32(math.Sqrt(float64(1.0+m22-m00-m11)) * 2.0)
invS := 1.0 / s
result.W = (matrix.M1 - matrix.M4) * invS
result.X = (matrix.M8 + matrix.M2) * invS
result.Y = (matrix.M9 + matrix.M6) * invS
result.Z = s * 0.25
}
}
return result
}
// QuaternionToMatrix - Returns a matrix for a given quaternion
func QuaternionToMatrix(q rl.Quaternion) rl.Matrix {
var result rl.Matrix
x := q.X
y := q.Y
z := q.Z
w := q.W
x2 := x + x
y2 := y + y
z2 := z + z
xx := x * x2
xy := x * y2
xz := x * z2
yy := y * y2
yz := y * z2
zz := z * z2
wx := w * x2
wy := w * y2
wz := w * z2
result.M0 = 1.0 - (yy + zz)
result.M1 = xy - wz
result.M2 = xz + wy
result.M3 = 0.0
result.M4 = xy + wz
result.M5 = 1.0 - (xx + zz)
result.M6 = yz - wx
result.M7 = 0.0
result.M8 = xz - wy
result.M9 = yz + wx
result.M10 = 1.0 - (xx + yy)
result.M11 = 0.0
result.M12 = 0.0
result.M13 = 0.0
result.M14 = 0.0
result.M15 = 1.0
return result
}
// QuaternionFromAxisAngle - Returns rotation quaternion for an angle and axis
func QuaternionFromAxisAngle(axis rl.Vector3, angle float32) rl.Quaternion {
result := rl.NewQuaternion(0.0, 0.0, 0.0, 1.0)
if Vector3Length(axis) != 0.0 {
angle *= 0.5
}
Vector3Normalize(&axis)
sinres := float32(math.Sin(float64(angle)))
cosres := float32(math.Cos(float64(angle)))
result.X = axis.X * sinres
result.Y = axis.Y * sinres
result.Z = axis.Z * sinres
result.W = cosres
QuaternionNormalize(&result)
return result
}
// QuaternionToAxisAngle - Returns the rotation angle and axis for a given quaternion
func QuaternionToAxisAngle(q rl.Quaternion, outAxis *rl.Vector3, outAngle *float32) {
if math.Abs(float64(q.W)) > 1.0 {
QuaternionNormalize(&q)
}
resAxis := rl.NewVector3(0.0, 0.0, 0.0)
resAngle := 2.0 * float32(math.Acos(float64(q.W)))
den := float32(math.Sqrt(float64(1.0 - q.W*q.W)))
if den > 0.0001 {
resAxis.X = q.X / den
resAxis.Y = q.Y / den
resAxis.Z = q.Z / den
} else {
// This occurs when the angle is zero.
// Not a problem: just set an arbitrary normalized axis.
resAxis.X = 1.0
}
*outAxis = resAxis
*outAngle = resAngle
}
// QuaternionTransform - Transform a quaternion given a transformation matrix
func QuaternionTransform(q *rl.Quaternion, mat rl.Matrix) {
x := q.X
y := q.Y
z := q.Z
w := q.W
q.X = mat.M0*x + mat.M4*y + mat.M8*z + mat.M12*w
q.Y = mat.M1*x + mat.M5*y + mat.M9*z + mat.M13*w
q.Z = mat.M2*x + mat.M6*y + mat.M10*z + mat.M14*w
q.W = mat.M3*x + mat.M7*y + mat.M11*z + mat.M15*w
}
// Clamp - Clamp float value
func Clamp(value, min, max float32) float32 {
var res float32
if value < min {
res = min
} else {
res = value
}
if res > max {
return max
}
return res
} | raymath/raymath.go | 0.9277 | 0.795896 | raymath.go | starcoder |
package linear
import (
"github.com/amitkgupta/goodlearn/data/dataset"
"github.com/amitkgupta/goodlearn/data/row"
"github.com/amitkgupta/goodlearn/data/slice"
"github.com/amitkgupta/goodlearn/errors/regressor/linearerrors"
"github.com/amitkgupta/goodlearn/parameterestimator/gradientdescentestimator"
)
func NewLinearRegressor() *linearRegressor {
return &linearRegressor{}
}
type linearRegressor struct {
coefficients []float64
}
const (
defaultLearningRate = 0.004
defaultPrecision = 1e-8
defaultMaxIterations = 1e8
)
func (regressor *linearRegressor) Train(trainingData dataset.Dataset) error {
if !trainingData.AllFeaturesFloats() {
return linearerrors.NewNonFloatFeaturesError()
}
if !trainingData.AllTargetsFloats() {
return linearerrors.NewNonFloatTargetsError()
}
if trainingData.NumTargets() != 1 {
return linearerrors.NewInvalidNumberOfTargetsError(trainingData.NumTargets())
}
if trainingData.NumFeatures() == 0 {
return linearerrors.NewNoFeaturesError()
}
estimator, err := gradientdescentestimator.NewGradientDescentParameterEstimator(
defaultLearningRate,
defaultPrecision,
defaultMaxIterations,
gradientdescentestimator.LinearModelLeastSquaresLossGradient,
)
if err != nil {
return linearerrors.NewEstimatorConstructionError(err)
}
err = estimator.Train(trainingData)
if err != nil {
return linearerrors.NewEstimatorTrainingError(err)
}
coefficients, err := estimator.Estimate(defaultInitialCoefficientEstimate(trainingData.NumFeatures()))
if err != nil {
return linearerrors.NewEstimatorEstimationError(err)
}
regressor.coefficients = coefficients
return nil
}
func (regressor *linearRegressor) Predict(testRow row.Row) (float64, error) {
coefficients := regressor.coefficients
if coefficients == nil {
return 0, linearerrors.NewUntrainedRegressorError()
}
numTestRowFeatures := testRow.NumFeatures()
numCoefficients := len(coefficients)
if numCoefficients != numTestRowFeatures+1 {
return 0, linearerrors.NewRowLengthMismatchError(numTestRowFeatures, numCoefficients)
}
testFeatures, ok := testRow.Features().(slice.FloatSlice)
if !ok {
return 0, linearerrors.NewNonFloatFeaturesTestRowError()
}
testFeatureValues := testFeatures.Values()
result := coefficients[numCoefficients-1]
for i, c := range coefficients[:numCoefficients-1] {
result = result + c*testFeatureValues[i]
}
return result, nil
}
func defaultInitialCoefficientEstimate(numFeatures int) []float64 {
return make([]float64, numFeatures+1)
} | regressor/linear/linear.go | 0.681409 | 0.426381 | linear.go | starcoder |
// Package se provides holiday definitions for Sweden.
package se
import (
"time"
"github.com/devechelon/cal/v2"
"github.com/devechelon/cal/v2/aa"
)
var (
// Nyarsdagen represents New Year's Day on 1-Jan
Nyarsdagen = aa.NewYear.Clone(&cal.Holiday{Name: "Nyårsdagen", Type: cal.ObservancePublic})
// TrettondedagJul represents Epiphany on 6-Jan
TrettondedagJul = aa.Epiphany.Clone(&cal.Holiday{Name: "Trettondedag jul", Type: cal.ObservancePublic})
// Langfredagen represents Good Friday on the Friday before Easter
Langfredagen = aa.GoodFriday.Clone(&cal.Holiday{Name: "Långfredagen", Type: cal.ObservancePublic})
// AnnandagPask represents Easter Monday on the day after Easter
AnnandagPask = aa.EasterMonday.Clone(&cal.Holiday{Name: "Annandag påsk", Type: cal.ObservancePublic})
// ForstaMaj represents Labour Day on 1-May
ForstaMaj = aa.WorkersDay.Clone(&cal.Holiday{Name: "Första Maj", Type: cal.ObservancePublic})
// KristiHimmelfardsdag represents Ascension Day on the 39th day after Easter
KristiHimmelfardsdag = aa.AscensionDay.Clone(&cal.Holiday{Name: "Kristi himmelsfärds dag", Type: cal.ObservancePublic})
// Nationaldagen represents National Day of Sweden on 6-Jun
Nationaldagen = &cal.Holiday{
Name: "Sveriges nationaldag",
Type: cal.ObservancePublic,
Month: time.June,
Day: 6,
Func: cal.CalcDayOfMonth,
}
// Midsommarafton represents Midsummer's Eve on the day before Midsummer's Day
Midsommarafton = &cal.Holiday{
Name: "Midsommarafton",
Type: cal.ObservanceOther,
Month: time.June,
Day: 19,
Offset: 1,
Weekday: time.Friday,
Func: cal.CalcWeekdayFrom,
}
// Midsommardagen represents Midsummer's Day on the first Saturday from 20-Jun
Midsommardagen = &cal.Holiday{
Name: "Midsommardagen",
Type: cal.ObservancePublic,
Month: time.June,
Day: 20,
Offset: 1,
Weekday: time.Saturday,
Func: cal.CalcWeekdayFrom,
}
// AllaHelgonsDag represents All Saints' Day on the first Saturday from 31-Oct
AllaHelgonsDag = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservancePublic,
Month: time.October,
Day: 31,
Offset: 1,
Weekday: time.Saturday,
Func: cal.CalcWeekdayFrom,
}
// Julafton represents Christmas Eve on 24-Dec
Julafton = &cal.Holiday{
Name: "Julafton",
Type: cal.ObservanceOther,
Month: time.December,
Day: 24,
Func: cal.CalcDayOfMonth,
}
// Juldagen represents Christmas Day on 25-Dec
Juldagen = aa.ChristmasDay.Clone(&cal.Holiday{Name: "Juldagen", Type: cal.ObservancePublic})
// AnnandagJul represents the second day of Christmas on 26-Dec
AnnandagJul = aa.ChristmasDay2.Clone(&cal.Holiday{Name: "Annandag jul", Type: cal.ObservancePublic})
// Nyarsafton represents New Year's Eve on 31-Dec
Nyarsafton = &cal.Holiday{
Name: "Nyårsafton",
Type: cal.ObservanceOther,
Month: time.December,
Day: 31,
Func: cal.CalcDayOfMonth,
}
// Holidays provides a list of the standard national holidays
Holidays = []*cal.Holiday{
Nyarsdagen,
TrettondedagJul,
Langfredagen,
AnnandagPask,
ForstaMaj,
KristiHimmelfardsdag,
Nationaldagen,
Midsommarafton,
Midsommardagen,
AllaHelgonsDag,
Julafton,
Juldagen,
AnnandagJul,
Nyarsafton,
}
) | v2/se/se_holidays.go | 0.52683 | 0.522385 | se_holidays.go | starcoder |
package model
import (
"fmt"
"log"
"math"
"github.com/drakos74/oremi/label"
)
// Series is a collection of vectors
type Series struct {
vectors []Vector
index int
dim int
min Vector
max Vector
labels []label.Label
events chan Event
}
// NewSeries creates a new series of the specified dimension
func NewSeries(labels ...label.Label) *Series {
dim := len(labels)
min := make([]float64, dim)
for i := range min {
min[i] = math.MaxFloat64
}
return &Series{
dim: dim,
vectors: make([]Vector, 0),
min: NewVector([]string{"min"}, min...),
max: NewVector([]string{"max"}, make([]float64, dim)...),
labels: labels,
events: make(chan Event, 100),
}
}
func (s *Series) Events() <-chan Event {
return s.events
}
// Reset resets the iterator to the start of the collection
func (s *Series) Reset() {
s.index = 0
}
// Next returns the next vector in the series
func (s *Series) Next() (vector Vector, ok, hasNext bool) {
l := len(s.vectors)
if l > s.index {
oldIndex := s.index
s.index++
return s.vectors[oldIndex], true, l > s.index
}
return Vector{}, false, false
}
// Size returns the size of the series
func (s *Series) Size() int {
return len(s.vectors)
}
// Add adds a vector to the series
// the call should fail if the vectors dimensions are not the same as the ones of the defined series
func (s *Series) Add(vector Vector) {
if vector.Dim() != s.dim {
log.Fatalf("cannot add to Series of dimensionality %d vector of dimension %d: %v", s.dim, vector.Dim(), vector)
}
s.vectors = append(s.vectors, vector)
for i, c := range vector.Coords {
if c < s.min.Coords[i] {
s.min.Coords[i] = c
}
if c > s.max.Coords[i] {
s.max.Coords[i] = c
}
}
select {
case s.events <- Event{
T: Added,
A: true,
S: fmt.Sprintf("%+v", vector),
}:
default:
// nothing to do
}
}
// Edge returns the edge values of the series
// this is useful for quick comparisons of collections of data, as well as drawing and scaling
func (s *Series) Edge() (min, max Vector) {
return s.min, s.max
}
// Labels returns the labels of the series.
func (s *Series) Labels() []label.Label {
return s.labels
}
// String returns the data arrays of the series.
func (s Series) String() string {
return fmt.Sprintf("%v", s.vectors)
} | internal/data/model/series.go | 0.770637 | 0.54819 | series.go | starcoder |
package entropy
import (
"encoding/binary"
"errors"
kanzi "github.com/flanglet/kanzi-go"
)
const (
_BINARY_ENTROPY_TOP = uint64(0x00FFFFFFFFFFFFFF)
_MASK_0_56 = uint64(0x00FFFFFFFFFFFFFF)
_MASK_0_24 = uint64(0x0000000000FFFFFF)
_MASK_0_32 = uint64(0x00000000FFFFFFFF)
)
// BinaryEntropyEncoder entropy encoder based on arithmetic coding and
// using an external probability predictor.
type BinaryEntropyEncoder struct {
predictor kanzi.Predictor
low uint64
high uint64
bitstream kanzi.OutputBitStream
disposed bool
buffer []byte
index int
}
// NewBinaryEntropyEncoder creates an instance of BinaryEntropyEncoder using the
// given predictor to predict the probability of the next bit to be one. It outputs
// to the given OutputBitstream
func NewBinaryEntropyEncoder(bs kanzi.OutputBitStream, predictor kanzi.Predictor) (*BinaryEntropyEncoder, error) {
if bs == nil {
return nil, errors.New("Binary entropy codec: Invalid null bitstream parameter")
}
if predictor == nil {
return nil, errors.New("Binary entropy codec: Invalid null predictor parameter")
}
this := new(BinaryEntropyEncoder)
this.predictor = predictor
this.low = 0
this.high = _BINARY_ENTROPY_TOP
this.bitstream = bs
this.buffer = make([]byte, 0)
this.index = 0
return this, nil
}
// EncodeByte encodes the given value into the bitstream bit by bit
func (this *BinaryEntropyEncoder) EncodeByte(val byte) {
this.EncodeBit((val >> 7) & 1)
this.EncodeBit((val >> 6) & 1)
this.EncodeBit((val >> 5) & 1)
this.EncodeBit((val >> 4) & 1)
this.EncodeBit((val >> 3) & 1)
this.EncodeBit((val >> 2) & 1)
this.EncodeBit((val >> 1) & 1)
this.EncodeBit(val & 1)
}
// EncodeBit encodes one bit into the bitstream using arithmetic coding
// and the probability predictor provided at creation time.
func (this *BinaryEntropyEncoder) EncodeBit(bit byte) {
// Calculate interval split
// Written in a way to maximize accuracy of multiplication/division
split := (((this.high - this.low) >> 4) * uint64(this.predictor.Get())) >> 8
// Update fields with new interval bounds
if bit == 0 {
this.low += (split + 1)
} else {
this.high = this.low + split
}
// Update predictor
this.predictor.Update(bit)
// Write unchanged first 32 bits to bitstream
for (this.low^this.high)>>24 == 0 {
this.flush()
}
}
// Write encodes the data provided into the bitstream. Return the number of byte
// written to the bitstream. Splits big blocks into chunks and encode the chunks
// byte by byte sequentially into the bitstream.
func (this *BinaryEntropyEncoder) Write(block []byte) (int, error) {
count := len(block)
if count > 1<<30 {
return -1, errors.New("Binary entropy codec: Invalid block size parameter (max is 1<<30)")
}
startChunk := 0
end := count
length := count
err := error(nil)
if count >= 1<<26 {
// If the block is big (>=64MB), split the encoding to avoid allocating
// too much memory.
if count < 1<<29 {
length = count >> 3
} else {
length = count >> 4
}
} else if count < 64 {
length = 64
}
// Split block into chunks, read bit array from bitstream and decode chunk
for startChunk < end {
chunkSize := length
if startChunk+length >= end {
chunkSize = end - startChunk
}
if len(this.buffer) < (chunkSize + (chunkSize >> 3)) {
this.buffer = make([]byte, chunkSize+(chunkSize>>3))
}
this.index = 0
buf := block[startChunk : startChunk+chunkSize]
for i := range buf {
this.EncodeByte(buf[i])
}
WriteVarInt(this.bitstream, uint32(this.index))
this.bitstream.WriteArray(this.buffer, uint(8*this.index))
startChunk += chunkSize
if startChunk < end {
this.bitstream.WriteBits(this.low|_MASK_0_24, 56)
}
}
return count, err
}
func (this *BinaryEntropyEncoder) flush() {
binary.BigEndian.PutUint32(this.buffer[this.index:], uint32(this.high>>24))
this.index += 4
this.low <<= 32
this.high = (this.high << 32) | _MASK_0_32
}
// BitStream returns the underlying bitstream
func (this *BinaryEntropyEncoder) BitStream() kanzi.OutputBitStream {
return this.bitstream
}
// Dispose must be called before getting rid of the entropy encoder
// This idempotent implmentation writes the last buffered bits into the
// bitstream.
func (this *BinaryEntropyEncoder) Dispose() {
if this.disposed == true {
return
}
this.disposed = true
this.bitstream.WriteBits(this.low|_MASK_0_24, 56)
}
// BinaryEntropyDecoder entropy decoder based on arithmetic coding and
// using an external probability predictor.
type BinaryEntropyDecoder struct {
predictor kanzi.Predictor
low uint64
high uint64
current uint64
initialized bool
bitstream kanzi.InputBitStream
buffer []byte
index int
}
// NewBinaryEntropyDecoder creates an instance of BinaryEntropyDecoder using the
// given predictor to predict the probability of the next bit to be one. It outputs
// to the given OutputBitstream
func NewBinaryEntropyDecoder(bs kanzi.InputBitStream, predictor kanzi.Predictor) (*BinaryEntropyDecoder, error) {
if bs == nil {
return nil, errors.New("Binary entropy codec: Invalid null bitstream parameter")
}
if predictor == nil {
return nil, errors.New("Binary entropy codec: Invalid null predictor parameter")
}
// Defer stream reading. We are creating the object, we should not do any I/O
this := new(BinaryEntropyDecoder)
this.predictor = predictor
this.low = 0
this.high = _BINARY_ENTROPY_TOP
this.bitstream = bs
this.buffer = make([]byte, 0)
this.index = 0
return this, nil
}
// DecodeByte decodes the given value from the bitstream bit by bit
func (this *BinaryEntropyDecoder) DecodeByte() byte {
return (this.DecodeBit() << 7) |
(this.DecodeBit() << 6) |
(this.DecodeBit() << 5) |
(this.DecodeBit() << 4) |
(this.DecodeBit() << 3) |
(this.DecodeBit() << 2) |
(this.DecodeBit() << 1) |
this.DecodeBit()
}
// Initialized returns true if Initialize() has been called at least once
func (this *BinaryEntropyDecoder) Initialized() bool {
return this.initialized
}
// Initialize initializes the decoder by prefetching the first bits
// and saving them into a buffer. This code is idempotent.
func (this *BinaryEntropyDecoder) Initialize() {
if this.initialized == true {
return
}
this.current = this.bitstream.ReadBits(56)
this.initialized = true
}
// DecodeBit decodes one bit from the bitstream using arithmetic coding
// and the probability predictor provided at creation time.
func (this *BinaryEntropyDecoder) DecodeBit() byte {
// Calculate interval split
// Written in a way to maximize accuracy of multiplication/division
split := ((((this.high - this.low) >> 4) * uint64(this.predictor.Get())) >> 8) + this.low
var bit byte
// Update predictor
if split >= this.current {
bit = 1
this.high = split
this.predictor.Update(1)
} else {
bit = 0
this.low = -^split
this.predictor.Update(0)
}
// Read 32 bits from bitstream
for (this.low^this.high)>>24 == 0 {
this.read()
}
return bit
}
func (this *BinaryEntropyDecoder) read() {
this.low = (this.low << 32) & _MASK_0_56
this.high = ((this.high << 32) | _MASK_0_32) & _MASK_0_56
val := uint64(binary.BigEndian.Uint32(this.buffer[this.index:]))
this.current = ((this.current << 32) | val) & _MASK_0_56
this.index += 4
}
// Read decodes data from the bitstream and return it in the provided buffer.
// Return the number of bytes read from the bitstream.
// Splits big blocks into chunks and decode the chunks byte by byte sequentially from the bitstream.
func (this *BinaryEntropyDecoder) Read(block []byte) (int, error) {
count := len(block)
if count > 1<<30 {
return -1, errors.New("Binary entropy codec: Invalid block size parameter (max is 1<<30)")
}
startChunk := 0
end := count
length := count
err := error(nil)
if count >= 1<<26 {
// If the block is big (>=64MB), split the decoding to avoid allocating
// too much memory.
if count < 1<<29 {
length = count >> 3
} else {
length = count >> 4
}
} else if count < 64 {
length = 64
}
// Split block into chunks, read bit array from bitstream and decode chunk
for startChunk < end {
chunkSize := length
if startChunk+length >= end {
chunkSize = end - startChunk
}
if len(this.buffer) < (chunkSize*9)>>3 {
this.buffer = make([]byte, (chunkSize*9)>>3)
}
szBytes := ReadVarInt(this.bitstream)
this.current = this.bitstream.ReadBits(56)
this.initialized = true
if szBytes != 0 {
this.bitstream.ReadArray(this.buffer, uint(8*szBytes))
}
this.index = 0
buf := block[startChunk : startChunk+chunkSize]
for i := range buf {
buf[i] = this.DecodeByte()
}
startChunk += chunkSize
}
return count, err
}
// BitStream returns the underlying bitstream
func (this *BinaryEntropyDecoder) BitStream() kanzi.InputBitStream {
return this.bitstream
}
// Dispose must be called before getting rid of the entropy decoder
// This implementation does nothing.
func (this *BinaryEntropyDecoder) Dispose() {
} | entropy/BinaryEntropyCodec.go | 0.855036 | 0.420838 | BinaryEntropyCodec.go | starcoder |
package dither
import (
"image"
"image/color"
)
// When determining which color is nearest to a pixel,
// color.Palette.Convert() almost solves the problem, but doesn't
// quite manage it. It does not contemplate colors with negative
// values (as can happen after applying the error term in the
// dithering algorithm). We adapt their implementation here to make
// the color values signed.
type colError struct {
R, G, B int32
}
func closest(r, g, b int32, p color.Palette) color.Color {
best_index := 0
best_diff := uint32(1<<32 - 1)
for i, v := range p {
vr, vg, vb, _ := v.RGBA()
component := (r - int32(vr)) >> 1
ssd := uint32(component * component)
component = (g - int32(vg)) >> 1
ssd += uint32(component * component)
component = (b - int32(vb)) >> 1
ssd += uint32(component * component)
if ssd < best_diff {
best_index = i
best_diff = ssd
}
}
return p[best_index]
}
// Context represents an interface that can be dithered. All its
// methods but PaletteAt carry the same meanings as they would in
// image.Image.
type Context interface {
Width() int
Height() int
At(x, y int) color.Color
PaletteAt(x, y int) color.Palette
Set(x, y int, c color.Color)
}
// Convert executes the dithering algorithm upon a context. It will
// call At and PaletteAt, and Set on every pixel left to right, top to
// bottom.
func Convert(ctx Context) {
h, w := ctx.Height(), ctx.Width()
gerror := make([][]colError, h)
for y, _ := range gerror {
gerror[y] = make([]colError, w)
}
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
r, g, b, _ := ctx.At(x, y).RGBA()
cr := int32(r) + gerror[y][x].R
cg := int32(g) + gerror[y][x].G
cb := int32(b) + gerror[y][x].B
target := closest(cr, cg, cb, ctx.PaletteAt(x, y))
tr, tg, tb, _ := target.RGBA()
cr = int32(r) - int32(tr)
cg = int32(g) - int32(tg)
cb = int32(b) - int32(tb)
if x+1 < len(gerror[y]) {
gerror[y][x+1].R += cr * 7 / 16
gerror[y][x+1].G += cg * 7 / 16
gerror[y][x+1].B += cb * 7 / 16
}
if y+1 < len(gerror) {
if x-1 > 0 {
gerror[y+1][x-1].R += cr * 3 / 16
gerror[y+1][x-1].G += cg * 3 / 16
gerror[y+1][x-1].B += cb * 3 / 16
}
if x+2 < len(gerror[y]) {
gerror[y+1][x+1].R += cr / 16
gerror[y+1][x+1].G += cg / 16
gerror[y+1][x+1].B += cb / 16
}
gerror[y+1][x].R += cr * 5 / 16
gerror[y+1][x].G += cg * 5 / 16
gerror[y+1][x].B += cb * 5 / 16
}
ctx.Set(x, y, target)
}
}
}
// Basic context used to map from one image to another
type imageCtx struct {
src image.Image
dest *image.RGBA
palette color.Palette
}
func (ctx *imageCtx) Width() int {
return ctx.src.Bounds().Dx()
}
func (ctx *imageCtx) Height() int {
return ctx.src.Bounds().Dy()
}
func (ctx *imageCtx) At(x, y int) color.Color {
bounds := ctx.src.Bounds()
return ctx.src.At(x+bounds.Min.X, y+bounds.Min.Y)
}
func (ctx *imageCtx) PaletteAt(x, y int) color.Palette {
return ctx.palette
}
func (ctx *imageCtx) Set(x, y int, c color.Color) {
bounds := ctx.dest.Bounds()
ctx.dest.Set(x+bounds.Min.X, y+bounds.Min.Y, c)
}
// ToPalette takes an image and a palette and returns a new image that
// uses only colors in that palette and is a dithered representation
// of its image argument.
func ToPalette(img image.Image, palette color.Palette) *image.RGBA {
ctx := new(imageCtx)
ctx.src = img
bounds := img.Bounds()
ctx.dest = image.NewRGBA(bounds)
ctx.palette = palette
Convert(ctx)
return ctx.dest
} | dither/dither.go | 0.683947 | 0.455078 | dither.go | starcoder |
package gorgonnx
import (
"errors"
"github.com/owulveryck/onnx-go"
"gorgonia.org/gorgonia"
)
// SPEC: https://github.com/onnx/onnx/blob/master/docs/Operators.md#BatchNormalization
// Gorgonia implem: https://godoc.org/gorgonia.org/gorgonia#BatchNorm
type batchnorm struct {
epsilon float64
momentum float64
}
func init() {
register("BatchNormalization", newBatchNorm)
}
func newBatchNorm() operator {
return &batchnorm{}
}
func (b *batchnorm) apply(g *Graph, ns ...*Node) error {
n := ns[0]
children := getOrderedChildren(g.g, n)
err := checkCondition(children, 5)
if err != nil {
return err
}
x, scaleN, biasN, meanN, varN := children[0].gorgoniaNode,
children[1].gorgoniaNode,
children[2].gorgoniaNode,
children[3].gorgoniaNode,
children[4].gorgoniaNode
if len(x.Shape()) != 4 {
return &onnx.ErrNotImplemented{
Operator: "Batchnormalization",
Message: "Only CxBxHxW tensors are supported",
}
}
batchNormOp := &fastBatchnorm{
scale: scaleN.Value(),
bias: biasN.Value(),
mean: meanN.Value(),
varN: varN.Value(),
epsilon: float32(b.epsilon),
}
if x.Shape()[0] != 1 {
// helper func
apply := func(f func(a, b *gorgonia.Node) (*gorgonia.Node, error), a, b *gorgonia.Node) (*gorgonia.Node, error) {
if len(b.Shape()) != 1 {
return nil, errors.New("Batchnorm: wrong shape")
}
ba, err := gorgonia.Reshape(b, []int{1, b.Shape()[0], 1, 1})
if err != nil {
return nil, err
}
aa, bb, err := gorgonia.Broadcast(a, ba, gorgonia.NewBroadcastPattern(nil, []byte{0, 2, 3}))
if err != nil {
return nil, err
}
return f(aa, bb)
}
// xNorm = (x - meanN) / sqrt( varN + b.epsilon)
// output = scaleN * xNorm + biasN
xNorm1, err := apply(gorgonia.Sub, x, meanN)
if err != nil {
return err
}
epsilon := gorgonia.NewConstant(float32(b.epsilon), gorgonia.WithName(getUniqNodeName("epsilon")))
xNorm21, err := gorgonia.Add(varN, epsilon)
if err != nil {
return err
}
xNorm2, err := gorgonia.Sqrt(xNorm21)
if err != nil {
return err
}
xNorm, err := apply(gorgonia.HadamardDiv, xNorm1, xNorm2)
if err != nil {
return err
}
output1, err := apply(gorgonia.HadamardProd, xNorm, scaleN)
if err != nil {
return err
}
n.gorgoniaNode, err = apply(gorgonia.Add, output1, biasN)
return err
}
n.gorgoniaNode, err = gorgonia.ApplyOp(batchNormOp, x)
return err
}
func (b *batchnorm) init(o onnx.Operation) error {
b.epsilon = 1e-5
b.momentum = 0.9
if e, ok := o.Attributes["epsilon"]; ok {
if v, ok := e.(float32); ok {
b.epsilon = float64(v)
} else {
return errors.New("epsilon is not a float64")
}
}
if e, ok := o.Attributes["momentum"]; ok {
if v, ok := e.(float32); ok {
b.momentum = float64(v)
} else {
return errors.New("momentum is not a float64")
}
}
return nil
} | backend/x/gorgonnx/batchnorm.go | 0.687525 | 0.40436 | batchnorm.go | starcoder |
package scsu
import (
"bytes"
"errors"
"fmt"
"io"
"strings"
"unicode/utf16"
)
type Reader struct {
scsu
brd io.ByteReader
bytesRead int
}
var (
ErrIllegalInput = errors.New("illegal input")
)
func NewReader(r io.ByteReader) *Reader {
d := &Reader{
brd: r,
}
d.init()
return d
}
func (r *Reader) readByte() (byte, error) {
b, err := r.brd.ReadByte()
if err == nil {
r.bytesRead++
}
return b, err
}
/** (re-)define (and select) a dynamic window
A sliding window position cannot start at any Unicode value,
so rather than providing an absolute offset, this function takes
an index value which selects among the possible starting values.
Most scripts in Unicode start on or near a half-block boundary
so the default behaviour is to multiply the index by 0x80. Han,
Hangul, Surrogates and other scripts between 0x3400 and 0xDFFF
show very poor locality--therefore no sliding window can be set
there. A jumpOffset is added to the index value to skip that region,
and only 167 index values total are required to select all eligible
half-blocks.
Finally, a few scripts straddle half block boundaries. For them, a
table of fixed offsets is used, and the index values from 0xF9 to
0xFF are used to select these special offsets.
After (re-)defining a windows location it is selected so it is ready
for use.
Recall that all Windows are of the same length (128 code positions).
*/
func (r *Reader) defineWindow(iWindow int, offset byte) error {
// 0 is a reserved value
if offset == 0 {
return ErrIllegalInput
}
if offset < gapThreshold {
r.dynamicOffset[iWindow] = int32(offset) << 7
} else if offset < reservedStart {
r.dynamicOffset[iWindow] = (int32(offset) << 7) + gapOffset
} else if offset < fixedThreshold {
return fmt.Errorf("offset = %d", offset)
} else {
r.dynamicOffset[iWindow] = fixedOffset[offset-fixedThreshold]
}
// make the redefined window the active one
r.window = iWindow
return nil
}
/** (re-)define (and select) a window as an extended dynamic window
The surrogate area in Unicode allows access to 2**20 codes beyond the
first 64K codes by combining one of 1024 characters from the High
Surrogate Area with one of 1024 characters from the Low Surrogate
Area (see Unicode 2.0 for the details).
The tags SDX and UDX set the window such that each subsequent byte in
the range 80 to FF represents a surrogate pair. The following diagram
shows how the bits in the two bytes following the SDX or UDX, and a
subsequent data byte, map onto the bits in the resulting surrogate pair.
hbyte lbyte data
nnnwwwww zzzzzyyy 1xxxxxxx
high-surrogate low-surrogate
110110wwwwwzzzzz 110111yyyxxxxxxx
@param chOffset - Since the three top bits of chOffset are not needed to
set the location of the extended Window, they are used instead
to select the window, thereby reducing the number of needed command codes.
The bottom 13 bits of chOffset are used to calculate the offset relative to
a 7 bit input data byte to yield the 20 bits expressed by each surrogate pair.
**/
func (r *Reader) defineExtendedWindow(chOffset uint16) {
// The top 3 bits of iOffsetHi are the window index
window := chOffset >> 13
// Calculate the new offset
r.dynamicOffset[window] = ((int32(chOffset) & 0x1FFF) << 7) + (1 << 16)
// make the redefined window the active one
r.window = int(window)
}
// convert an io.EOF into io.ErrUnexpectedEOF
func unexpectedEOF(e error) error {
if errors.Is(e, io.EOF) {
return io.ErrUnexpectedEOF
}
return e
}
func (r *Reader) expandUnicode() (rune, error) {
for {
b, err := r.readByte()
if err != nil {
return 0, err
}
if b >= UC0 && b <= UC7 {
r.window = int(b) - UC0
r.unicodeMode = false
return -1, nil
}
if b >= UD0 && b <= UD7 {
b1, err := r.readByte()
if err != nil {
return 0, unexpectedEOF(err)
}
r.unicodeMode = false
return -1, r.defineWindow(int(b)-UD0, b1)
}
if b == UDX {
c, err := r.readUint16()
if err != nil {
return 0, unexpectedEOF(err)
}
r.defineExtendedWindow(c)
r.unicodeMode = false
return -1, nil
}
if b == UQU {
r, err := r.readUint16()
if err != nil {
return 0, err
}
return rune(r), nil
} else {
b1, err := r.readByte()
if err != nil {
return 0, unexpectedEOF(err)
}
ch := rune(uint16FromTwoBytes(b, b1))
if utf16.IsSurrogate(ch) {
ch1, err := r.readUint16()
if err != nil {
return 0, unexpectedEOF(err)
}
surrLo := rune(ch1)
if !utf16.IsSurrogate(surrLo) {
return 0, ErrIllegalInput
}
return utf16.DecodeRune(ch, surrLo), nil
}
return ch, nil
}
}
}
func (r *Reader) readUint16() (uint16, error) {
b1, err := r.readByte()
if err != nil {
return 0, unexpectedEOF(err)
}
b2, err := r.readByte()
if err != nil {
return 0, unexpectedEOF(err)
}
return uint16FromTwoBytes(b1, b2), nil
}
func uint16FromTwoBytes(hi, lo byte) uint16 {
return uint16(hi)<<8 | uint16(lo)
}
/** expand portion of the input that is in single byte mode **/
func (r *Reader) expandSingleByte() (rune, error) {
for {
b, err := r.readByte()
if err != nil {
return 0, err
}
staticWindow := 0
dynamicWindow := r.window
switch b {
case SQ0, SQ1, SQ2, SQ3, SQ4, SQ5, SQ6, SQ7:
// Select window pair to quote from
dynamicWindow = int(b) - SQ0
staticWindow = dynamicWindow
b, err = r.readByte()
if err != nil {
return 0, unexpectedEOF(err)
}
fallthrough
default:
// output as character
if b < 0x80 {
// use static window
return int32(b) + staticOffset[staticWindow], nil
} else {
ch := int32(b) - 0x80
ch += r.dynamicOffset[dynamicWindow]
return ch, nil
}
case SDX:
// define a dynamic window as extended
ch, err := r.readUint16()
if err != nil {
return 0, unexpectedEOF(err)
}
r.defineExtendedWindow(ch)
case SD0, SD1, SD2, SD3, SD4, SD5, SD6, SD7:
// Position a dynamic Window
b1, err := r.readByte()
if err != nil {
return 0, unexpectedEOF(err)
}
err = r.defineWindow(int(b)-SD0, b1)
if err != nil {
return 0, err
}
case SC0, SC1, SC2, SC3, SC4, SC5, SC6, SC7:
// Select a new dynamic Window
r.window = int(b) - SC0
case SCU:
// switch to Unicode mode and continue parsing
r.unicodeMode = true
return -1, nil
case SQU:
// directly extract one Unicode character
ch, err := r.readUint16()
if err != nil {
return 0, err
}
return rune(ch), nil
case Srs:
return 0, ErrIllegalInput
}
}
}
func (r *Reader) readRune() (rune, error) {
for {
var c rune
var err error
if r.unicodeMode {
c, err = r.expandUnicode()
} else {
c, err = r.expandSingleByte()
}
if err != nil {
return 0, err
}
if c == -1 {
continue
}
return c, nil
}
}
// ReadRune reads a single SCSU encoded Unicode character
// and returns the rune and the amount of bytes consumed. If no character is
// available, err will be set.
func (r *Reader) ReadRune() (rune, int, error) {
pr := r.bytesRead
c, err := r.readRune()
return c, r.bytesRead - pr, err
}
// ReadStringSizeHint is like ReadString, but takes a hint about the expected string size.
// Note this is the size of the UTF-8 encoded string in bytes.
func (r *Reader) ReadStringSizeHint(sizeHint int) (string, error) {
var sb strings.Builder
if sizeHint > 0 {
sb.Grow(sizeHint)
}
for {
r, err := r.readRune()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return "", err
}
sb.WriteRune(r)
}
return sb.String(), nil
}
// ReadString reads all available input as a string.
// It keeps reading the source reader until it returns io.EOF or an error occurs.
// In case of io.EOF the error returned by ReadString will be nil.
func (r *Reader) ReadString() (string, error) {
return r.ReadStringSizeHint(0)
}
func (r *Reader) Reset(rd io.ByteReader) {
r.brd, r.bytesRead = rd, 0
r.reset()
r.init()
}
// Decode a byte array as a string.
func Decode(b []byte) (string, error) {
return NewReader(bytes.NewBuffer(b)).ReadStringSizeHint(len(b))
} | decode.go | 0.596198 | 0.408926 | decode.go | starcoder |
package batchingchannels
import (
"context"
"errors"
"github.com/askiada/external-sort/vector"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
// BatchingChannel implements the Channel interface, with the change that instead of producing individual elements
// on Out(), it batches together the entire internal buffer each time. Trying to construct an unbuffered batching channel
// will panic, that configuration is not supported (and provides no benefit over an unbuffered NativeChannel).
type BatchingChannel struct {
input chan string
output chan vector.Vector
buffer vector.Vector
allocate *vector.Allocate
g *errgroup.Group
sem *semaphore.Weighted
dCtx context.Context
size int
}
// NewBatchingChannel returns a BatchingChannel with max workers. It creates a
// goroutine and will stop it when the context is cancelled. It returns an
// error if the input is invalid.
func NewBatchingChannel(ctx context.Context, allocate *vector.Allocate, maxWorker int64, size int) (*BatchingChannel, error) {
if size == 0 {
return nil, errors.New("channels: BatchingChannel does not support unbuffered behaviour")
}
if size < 0 {
return nil, errors.New("channels: invalid negative size in NewBatchingChannel")
}
g, dCtx := errgroup.WithContext(ctx)
ch := &BatchingChannel{
input: make(chan string),
output: make(chan vector.Vector),
size: size,
allocate: allocate,
g: g,
sem: semaphore.NewWeighted(maxWorker),
dCtx: dCtx,
}
go ch.batchingBuffer(ctx)
return ch, nil
}
func (ch *BatchingChannel) In() chan<- string {
return ch.input
}
// Out returns a <-chan vector.Vector in order that BatchingChannel conforms to the standard Channel interface provided
// by this package, however each output value is guaranteed to be of type vector.Vector - a vector collecting the most
// recent batch of values sent on the In channel. The vector is guaranteed to not be empty or nil.
func (ch *BatchingChannel) Out() <-chan vector.Vector {
return ch.output
}
func (ch *BatchingChannel) ProcessOut(f func(vector.Vector) error) error {
for val := range ch.Out() {
if err := ch.sem.Acquire(ch.dCtx, 1); err != nil {
return err
}
val := val
ch.g.Go(func() error {
defer ch.sem.Release(1)
return f(val)
})
}
err := ch.g.Wait()
if err != nil {
return err
}
return nil
}
func (ch *BatchingChannel) Len() int {
return ch.size
}
func (ch *BatchingChannel) Cap() int {
return ch.size
}
func (ch *BatchingChannel) Close() {
close(ch.input)
}
func (ch *BatchingChannel) batchingBuffer(ctx context.Context) {
ch.buffer = ch.allocate.Vector(ch.size, ch.allocate.Key)
defer close(ch.output)
for elem := range ch.input {
select {
case <-ctx.Done():
ch.g.Go(func() error {
return ctx.Err()
})
return
default:
}
err := ch.buffer.PushBack(elem)
if err != nil {
ch.g.Go(func() error {
return err
})
}
if ch.buffer.Len() == ch.size {
ch.output <- ch.buffer
ch.buffer = ch.allocate.Vector(ch.size, ch.allocate.Key)
}
}
if ch.buffer.Len() > 0 {
ch.output <- ch.buffer
}
} | file/batchingchannels/batching_channel.go | 0.707304 | 0.44089 | batching_channel.go | starcoder |
// Package bulletproof implements the zero knowledge protocol bulletproofs as defined in https://eprint.iacr.org/2017/1066.pdf
package bulletproof
import (
crand "crypto/rand"
"math/big"
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/coinbase/kryptology/pkg/core/curves"
)
// RangeProver is the struct used to create RangeProofs
// It specifies which curve to use and holds precomputed generators
// See NewRangeProver() for prover initialization.
type RangeProver struct {
curve curves.Curve
generators *ippGenerators
ippProver *InnerProductProver
}
// RangeProof is the struct used to hold a range proof
// capA is a commitment to a_L and a_R using randomness alpha
// capS is a commitment to s_L and s_R using randomness rho
// capTau1,2 are commitments to t1,t2 respectively using randomness tau_1,2
// tHat represents t(X) as defined on page 19
// taux is the blinding factor for tHat
// ipp is the inner product proof used for compacting the transfer of l,r (See 4.2 on pg20).
type RangeProof struct {
capA, capS, capT1, capT2 curves.Point
taux, mu, tHat curves.Scalar
ipp *InnerProductProof
curve *curves.Curve
}
type RangeProofGenerators struct {
g, h, u curves.Point
}
// NewRangeProver initializes a new prover
// It uses the specified domain to generate generators for vectors of at most maxVectorLength
// A prover can be used to construct range proofs for vectors of length less than or equal to maxVectorLength
// A prover is defined by an explicit curve.
func NewRangeProver(maxVectorLength int, rangeDomain, ippDomain []byte, curve curves.Curve) (*RangeProver, error) {
generators, err := getGeneratorPoints(maxVectorLength, rangeDomain, curve)
if err != nil {
return nil, errors.Wrap(err, "range NewRangeProver")
}
ippProver, err := NewInnerProductProver(maxVectorLength, ippDomain, curve)
if err != nil {
return nil, errors.Wrap(err, "range NewRangeProver")
}
return &RangeProver{curve: curve, generators: generators, ippProver: ippProver}, nil
}
// NewRangeProof initializes a new RangeProof for a specified curve
// This should be used in tandem with UnmarshalBinary() to convert a marshaled proof into the struct.
func NewRangeProof(curve *curves.Curve) *RangeProof {
out := RangeProof{
capA: nil,
capS: nil,
capT1: nil,
capT2: nil,
taux: nil,
mu: nil,
tHat: nil,
ipp: NewInnerProductProof(curve),
curve: curve,
}
return &out
}
// Prove uses the range prover to prove that some value v is within the range [0, 2^n]
// It implements the protocol defined on pgs 19,20 in https://eprint.iacr.org/2017/1066.pdf
// v is the value of which to prove the range
// n is the power that specifies the upper bound of the range, ie. 2^n
// gamma is a scalar used for as a blinding factor
// g, h, u are unique points used as generators for the blinding factor
// transcript is a merlin transcript to be used for the fiat shamir heuristic.
func (prover *RangeProver) Prove(v, gamma curves.Scalar, n int, proofGenerators RangeProofGenerators, transcript *merlin.Transcript) (*RangeProof, error) {
// n must be less than or equal to the number of generators generated
if n > len(prover.generators.G) {
return nil, errors.New("ipp vector length must be less than or equal to maxVectorLength")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := prover.generators.G[0:n]
proofH := prover.generators.H[0:n]
// Check that v is in range [0, 2^n]
if bigZero := big.NewInt(0); v.BigInt().Cmp(bigZero) == -1 {
return nil, errors.New("v is less than 0")
}
bigTwo := big.NewInt(2)
if n < 0 {
return nil, errors.New("n cannot be less than 0")
}
bigN := big.NewInt(int64(n))
var bigTwoToN big.Int
bigTwoToN.Exp(bigTwo, bigN, nil)
if v.BigInt().Cmp(&bigTwoToN) == 1 {
return nil, errors.New("v is greater than 2^n")
}
// L40 on pg19
aL, err := getaL(v, n, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
onen := get1nVector(n, prover.curve)
// L41 on pg19
aR, err := subtractPairwiseScalarVectors(aL, onen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
alpha := prover.curve.Scalar.Random(crand.Reader)
// Calc A (L44, pg19)
halpha := proofGenerators.h.Mul(alpha)
gaL := prover.curve.Point.SumOfProducts(proofG, aL)
haR := prover.curve.Point.SumOfProducts(proofH, aR)
capA := halpha.Add(gaL).Add(haR)
// L45, 46, pg19
sL := getBlindingVector(n, prover.curve)
sR := getBlindingVector(n, prover.curve)
rho := prover.curve.Scalar.Random(crand.Reader)
// Calc S (L47, pg19)
hrho := proofGenerators.h.Mul(rho)
gsL := prover.curve.Point.SumOfProducts(proofG, sL)
hsR := prover.curve.Point.SumOfProducts(proofH, sR)
capS := hrho.Add(gsL).Add(hsR)
// Fiat Shamir for y,z (L49, pg19)
capV := getcapV(v, gamma, proofGenerators.g, proofGenerators.h)
y, z, err := calcyz(capV, capA, capS, transcript, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc t_1, t_2
// See the l(X), r(X), t(X) equations on pg 19
// Use l(X)'s and r(X)'s constant and linear terms to derive t_1 and t_2
// (a_l - z*1^n)
zonen := multiplyScalarToScalarVector(z, onen)
constantTerml, err := subtractPairwiseScalarVectors(aL, zonen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
linearTerml := sL
// z^2 * 2^N
twoN := get2nVector(n, prover.curve)
zSquareTwon := multiplyScalarToScalarVector(z.Square(), twoN)
// a_r + z*1^n
aRPluszonen, err := addPairwiseScalarVectors(aR, zonen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
yn := getknVector(y, n, prover.curve)
hadamard, err := multiplyPairwiseScalarVectors(yn, aRPluszonen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
constantTermr, err := addPairwiseScalarVectors(hadamard, zSquareTwon)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
linearTermr, err := multiplyPairwiseScalarVectors(yn, sR)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// t_1 (as the linear coefficient) is the sum of the dot products of l(X)'s linear term dot r(X)'s constant term
// and r(X)'s linear term dot l(X)'s constant term
t1FirstTerm, err := innerProduct(linearTerml, constantTermr)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t1SecondTerm, err := innerProduct(linearTermr, constantTerml)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t1 := t1FirstTerm.Add(t1SecondTerm)
// t_2 (as the quadratic coefficient) is the dot product of l(X)'s and r(X)'s linear terms
t2, err := innerProduct(linearTerml, linearTermr)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// L52, pg20
tau1 := prover.curve.Scalar.Random(crand.Reader)
tau2 := prover.curve.Scalar.Random(crand.Reader)
// T_1, T_2 (L53, pg20)
capT1 := proofGenerators.g.Mul(t1).Add(proofGenerators.h.Mul(tau1))
capT2 := proofGenerators.g.Mul(t2).Add(proofGenerators.h.Mul(tau2))
// Fiat shamir for x (L55, pg20)
x, err := calcx(capT1, capT2, transcript, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc l (L58, pg20)
// Instead of using the expression in the line, evaluate l() at x
sLx := multiplyScalarToScalarVector(x, linearTerml)
l, err := addPairwiseScalarVectors(constantTerml, sLx)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc r (L59, pg20)
// Instead of using the expression in the line, evaluate r() at x
ynsRx := multiplyScalarToScalarVector(x, linearTermr)
r, err := addPairwiseScalarVectors(constantTermr, ynsRx)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc t hat (L60, pg20)
// For efficiency, instead of calculating the dot product, evaluate t() at x
deltayz, err := deltayz(y, z, n, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t0 := v.Mul(z.Square()).Add(deltayz)
tLinear := t1.Mul(x)
tQuadratic := t2.Mul(x.Square())
tHat := t0.Add(tLinear).Add(tQuadratic)
// Calc tau_x (L61, pg20)
tau2xsquare := tau2.Mul(x.Square())
tau1x := tau1.Mul(x)
zsquaregamma := z.Square().Mul(gamma)
taux := tau2xsquare.Add(tau1x).Add(zsquaregamma)
// Calc mu (L62, pg20)
mu := alpha.Add(rho.Mul(x))
// Calc IPP (See section 4.2)
hPrime, err := gethPrime(proofH, y, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
capPhmu, err := getPhmu(proofG, hPrime, proofGenerators.h, capA, capS, x, y, z, mu, n, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
wBytes := transcript.ExtractBytes([]byte("getw"), 64)
w, err := prover.curve.NewScalar().SetBytesWide(wBytes)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
ipp, err := prover.ippProver.rangeToIPP(proofG, hPrime, l, r, tHat, capPhmu, proofGenerators.u.Mul(w), transcript)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
out := &RangeProof{
capA: capA,
capS: capS,
capT1: capT1,
capT2: capT2,
taux: taux,
mu: mu,
tHat: tHat,
ipp: ipp,
curve: &prover.curve,
}
return out, nil
}
// MarshalBinary takes a range proof and marshals into bytes.
func (proof *RangeProof) MarshalBinary() []byte {
var out []byte
out = append(out, proof.capA.ToAffineCompressed()...)
out = append(out, proof.capS.ToAffineCompressed()...)
out = append(out, proof.capT1.ToAffineCompressed()...)
out = append(out, proof.capT2.ToAffineCompressed()...)
out = append(out, proof.taux.Bytes()...)
out = append(out, proof.mu.Bytes()...)
out = append(out, proof.tHat.Bytes()...)
out = append(out, proof.ipp.MarshalBinary()...)
return out
}
// UnmarshalBinary takes bytes of a marshaled proof and writes them into a range proof
// The range proof used should be from the output of NewRangeProof().
func (proof *RangeProof) UnmarshalBinary(data []byte) error {
scalarLen := len(proof.curve.NewScalar().Bytes())
pointLen := len(proof.curve.NewGeneratorPoint().ToAffineCompressed())
ptr := 0
// Get points
capA, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capA = capA
ptr += pointLen
capS, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capS = capS
ptr += pointLen
capT1, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capT1 = capT1
ptr += pointLen
capT2, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capT2 = capT2
ptr += pointLen
// Get scalars
taux, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary SetBytes")
}
proof.taux = taux
ptr += scalarLen
mu, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary SetBytes")
}
proof.mu = mu
ptr += scalarLen
tHat, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary SetBytes")
}
proof.tHat = tHat
ptr += scalarLen
// Get IPP
err = proof.ipp.UnmarshalBinary(data[ptr:])
if err != nil {
return errors.New("rangeProof UnmarshalBinary")
}
return nil
}
// checkRange validates whether some scalar v is within the range [0, 2^n - 1]
// It will return an error if v is less than 0 or greater than 2^n - 1
// Otherwise it will return nil.
func checkRange(v curves.Scalar, n int) error {
bigOne := big.NewInt(1)
if n < 0 {
return errors.New("n cannot be less than 0")
}
var bigTwoToN big.Int
bigTwoToN.Lsh(bigOne, uint(n))
if v.BigInt().Cmp(&bigTwoToN) == 1 {
return errors.New("v is greater than 2^n")
}
return nil
}
// getBlindingVector returns a vector of scalars used as blinding factors for commitments.
func getBlindingVector(length int, curve curves.Curve) []curves.Scalar {
vec := make([]curves.Scalar, length)
for i := 0; i < length; i++ {
vec[i] = curve.Scalar.Random(crand.Reader)
}
return vec
}
// getcapV returns a commitment to v using blinding factor gamma.
func getcapV(v, gamma curves.Scalar, g, h curves.Point) curves.Point {
return h.Mul(gamma).Add(g.Mul(v))
}
// getaL obtains the bit vector representation of v
// See the a_L definition towards the bottom of pg 17 of https://eprint.iacr.org/2017/1066.pdf
func getaL(v curves.Scalar, n int, curve curves.Curve) ([]curves.Scalar, error) {
var err error
vBytes := v.Bytes()
zero := curve.Scalar.Zero()
one := curve.Scalar.One()
aL := make([]curves.Scalar, n)
for j := 0; j < len(aL); j++ {
aL[j] = zero
}
for i := 0; i < n; i++ {
ithBit := vBytes[i>>3] >> (i & 0x07) & 0x01
aL[i], err = cmoveScalar(zero, one, int(ithBit), curve)
if err != nil {
return nil, errors.Wrap(err, "getaL")
}
}
return aL, nil
}
// cmoveScalar provides a constant time operation that returns x if which is 0 and returns y if which is 1.
func cmoveScalar(x, y curves.Scalar, which int, curve curves.Curve) (curves.Scalar, error) {
if which != 0 && which != 1 {
return nil, errors.New("cmoveScalar which must be 0 or 1")
}
mask := -byte(which)
xBytes := x.Bytes()
yBytes := y.Bytes()
for i, xByte := range xBytes {
xBytes[i] ^= (xByte ^ yBytes[i]) & mask
}
out, err := curve.NewScalar().SetBytes(xBytes)
if err != nil {
return nil, errors.Wrap(err, "cmoveScalar SetBytes")
}
return out, nil
}
// calcyz uses a merlin transcript for Fiat Shamir
// It takes the current state of the transcript and appends the newly calculated capA and capS values
// Two new scalars are then read from the transcript
// See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf
func calcyz(capV, capA, capS curves.Point, transcript *merlin.Transcript, curve curves.Curve) (curves.Scalar, curves.Scalar, error) {
// Add the A,S values to transcript
transcript.AppendMessage([]byte("addV"), capV.ToAffineUncompressed())
transcript.AppendMessage([]byte("addcapA"), capA.ToAffineUncompressed())
transcript.AppendMessage([]byte("addcapS"), capS.ToAffineUncompressed())
// Read 64 bytes twice from, set to scalar for y and z
yBytes := transcript.ExtractBytes([]byte("gety"), 64)
y, err := curve.NewScalar().SetBytesWide(yBytes)
if err != nil {
return nil, nil, errors.Wrap(err, "calcyz NewScalar SetBytesWide")
}
zBytes := transcript.ExtractBytes([]byte("getz"), 64)
z, err := curve.NewScalar().SetBytesWide(zBytes)
if err != nil {
return nil, nil, errors.Wrap(err, "calcyz NewScalar SetBytesWide")
}
return y, z, nil
}
// calcx uses a merlin transcript for <NAME>
// It takes the current state of the transcript and appends the newly calculated capT1 and capT2 values
// A new scalar is then read from the transcript
// See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf
func calcx(capT1, capT2 curves.Point, transcript *merlin.Transcript, curve curves.Curve) (curves.Scalar, error) {
// Add the Tau1,2 values to transcript
transcript.AppendMessage([]byte("addcapT1"), capT1.ToAffineUncompressed())
transcript.AppendMessage([]byte("addcapT2"), capT2.ToAffineUncompressed())
// Read 64 bytes from, set to scalar
outBytes := transcript.ExtractBytes([]byte("getx"), 64)
x, err := curve.NewScalar().SetBytesWide(outBytes)
if err != nil {
return nil, errors.Wrap(err, "calcx NewScalar SetBytesWide")
}
return x, nil
} | pkg/bulletproof/range_prover.go | 0.865053 | 0.474509 | range_prover.go | starcoder |
package techan
import (
"fmt"
"math"
"math/rand"
"testing"
"time"
"strconv"
"github.com/adrenalyse/big"
"github.com/stretchr/testify/assert"
)
var candleIndex int
var mockedTimeSeries = mockTimeSeriesFl(
64.75, 63.79, 63.73,
63.73, 63.55, 63.19,
63.91, 63.85, 62.95,
63.37, 61.33, 61.51)
func randomTimeSeries(size int) *TimeSeries {
vals := make([]string, size)
rand.Seed(time.Now().Unix())
for i := 0; i < size; i++ {
val := rand.Float64() * 100
if i == 0 {
vals[i] = fmt.Sprint(val)
} else {
last, _ := strconv.ParseFloat(vals[i-1], 64)
if i%2 == 0 {
vals[i] = fmt.Sprint(last + (val / 10))
} else {
vals[i] = fmt.Sprint(last - (val / 10))
}
}
}
return mockTimeSeries(vals...)
}
func mockTimeSeriesOCHL(values ...[]float64) *TimeSeries {
ts := NewTimeSeries()
for i, ochl := range values {
candle := NewCandle(NewTimePeriod(time.Unix(int64(i), 0), time.Second))
candle.OpenPrice = big.NewDecimal(ochl[0])
candle.ClosePrice = big.NewDecimal(ochl[1])
candle.MaxPrice = big.NewDecimal(ochl[2])
candle.MinPrice = big.NewDecimal(ochl[3])
candle.Volume = big.NewDecimal(float64(i))
ts.AddCandle(candle)
}
return ts
}
func mockTimeSeries(values ...string) *TimeSeries {
ts := NewTimeSeries()
for _, val := range values {
candle := NewCandle(NewTimePeriod(time.Unix(int64(candleIndex), 0), time.Second))
candle.OpenPrice = big.NewFromString(val)
candle.ClosePrice = big.NewFromString(val)
candle.MaxPrice = big.NewFromString(val).Add(big.ONE)
candle.MinPrice = big.NewFromString(val).Sub(big.ONE)
candle.Volume = big.NewFromString(val)
ts.AddCandle(candle)
candleIndex++
}
return ts
}
func mockTimeSeriesFl(values ...float64) *TimeSeries {
strVals := make([]string, len(values))
for i, val := range values {
strVals[i] = fmt.Sprint(val)
}
return mockTimeSeries(strVals...)
}
func decimalEquals(t *testing.T, expected float64, actual big.Decimal) {
assert.Equal(t, fmt.Sprintf("%.4f", expected), fmt.Sprintf("%.4f", actual.Float()))
}
func dump(indicator Indicator) (values []float64) {
precision := 4.0
m := math.Pow(10, precision)
defer func() {
recover()
}()
var index int
for {
//log.Println(math.Round(indicator.Calculate(index).Float()*m)/m)
values = append(values, math.Round(indicator.Calculate(index).Float()*m)/m)
index++
}
return
}
func indicatorEquals(t *testing.T, expected []float64, indicator Indicator) {
actualValues := dump(indicator)
assert.EqualValues(t, expected, actualValues)
} | testutils.go | 0.61832 | 0.468365 | testutils.go | starcoder |
package voice
import (
"github.com/gotracker/gomixing/panning"
"github.com/gotracker/gomixing/sampling"
"github.com/gotracker/gomixing/volume"
"github.com/gotracker/voice"
"github.com/gotracker/voice/period"
"gotracker/internal/optional"
)
type envSettings struct {
enabled optional.Value //bool
pos optional.Value //int
}
type playingMode uint8
const (
playingModeAttack = playingMode(iota)
playingModeRelease
)
type txn struct {
cancelled bool
Voice voice.Voice
active optional.Value //bool
playing optional.Value //playingMode
fadeout optional.Value //struct{}
period optional.Value //period.Period
periodDelta optional.Value //period.Delta
vol optional.Value //volume.Volume
pos optional.Value //sampling.Pos
pan optional.Value //panning.Position
volEnv envSettings
pitchEnv envSettings
panEnv envSettings
filterEnv envSettings
}
func (t *txn) SetActive(active bool) {
t.active.Set(active)
}
func (t *txn) IsPendingActive() (bool, bool) {
return t.active.GetBool()
}
func (t *txn) IsCurrentlyActive() bool {
return t.Voice.IsActive()
}
// Attack sets the playing mode to Attack
func (t *txn) Attack() {
t.playing.Set(playingModeAttack)
}
// Release sets the playing mode to Release
func (t *txn) Release() {
t.playing.Set(playingModeRelease)
}
// Fadeout activates the voice's fade-out function
func (t *txn) Fadeout() {
t.fadeout.Set(struct{}{})
}
// SetPeriod sets the period
func (t *txn) SetPeriod(period period.Period) {
t.period.Set(period)
}
func (t *txn) GetPendingPeriod() (period.Period, bool) {
if p, set := t.period.GetPeriod(); set {
if pp, ok := p.(period.Period); ok {
return pp, set
}
return nil, set
}
return nil, false
}
func (t *txn) GetCurrentPeriod() period.Period {
return voice.GetPeriod(t.Voice)
}
// SetPeriodDelta sets the period delta
func (t *txn) SetPeriodDelta(delta period.Delta) {
t.periodDelta.Set(delta)
}
func (t *txn) GetPendingPeriodDelta() (period.Delta, bool) {
return t.periodDelta.GetPeriodDelta()
}
func (t *txn) GetCurrentPeriodDelta() period.Delta {
return voice.GetPeriodDelta(t.Voice)
}
// SetVolume sets the volume
func (t *txn) SetVolume(vol volume.Volume) {
t.vol.Set(vol)
}
func (t *txn) GetPendingVolume() (volume.Volume, bool) {
return t.vol.GetVolume()
}
func (t *txn) GetCurrentVolume() volume.Volume {
return voice.GetVolume(t.Voice)
}
// SetPos sets the position
func (t *txn) SetPos(pos sampling.Pos) {
t.pos.Set(pos)
}
func (t *txn) GetPendingPos() (sampling.Pos, bool) {
return t.pos.GetPosition()
}
func (t *txn) GetCurrentPos() sampling.Pos {
return voice.GetPos(t.Voice)
}
// SetPan sets the panning position
func (t *txn) SetPan(pan panning.Position) {
t.pan.Set(pan)
}
func (t *txn) GetPendingPan() (panning.Position, bool) {
return t.pan.GetPanning()
}
func (t *txn) GetCurrentPan() panning.Position {
return voice.GetPan(t.Voice)
}
// SetVolumeEnvelopePosition sets the volume envelope position
func (t *txn) SetVolumeEnvelopePosition(pos int) {
t.volEnv.pos.Set(pos)
}
// EnableVolumeEnvelope sets the volume envelope enable flag
func (t *txn) EnableVolumeEnvelope(enabled bool) {
t.volEnv.enabled.Set(enabled)
}
func (t *txn) IsPendingVolumeEnvelopeEnabled() (bool, bool) {
return t.volEnv.enabled.GetBool()
}
func (t *txn) IsCurrentVolumeEnvelopeEnabled() bool {
return voice.IsVolumeEnvelopeEnabled(t.Voice)
}
// SetPitchEnvelopePosition sets the pitch envelope position
func (t *txn) SetPitchEnvelopePosition(pos int) {
t.pitchEnv.pos.Set(pos)
}
// EnablePitchEnvelope sets the pitch envelope enable flag
func (t *txn) EnablePitchEnvelope(enabled bool) {
t.pitchEnv.enabled.Set(enabled)
}
// SetPanEnvelopePosition sets the panning envelope position
func (t *txn) SetPanEnvelopePosition(pos int) {
t.panEnv.pos.Set(pos)
}
// EnablePanEnvelope sets the pan envelope enable flag
func (t *txn) EnablePanEnvelope(enabled bool) {
t.panEnv.enabled.Set(enabled)
}
// SetFilterEnvelopePosition sets the pitch envelope position
func (t *txn) SetFilterEnvelopePosition(pos int) {
t.filterEnv.pos.Set(pos)
}
// EnableFilterEnvelope sets the filter envelope enable flag
func (t *txn) EnableFilterEnvelope(enabled bool) {
t.filterEnv.enabled.Set(enabled)
}
// SetAllEnvelopePositions sets all the envelope positions to the same value
func (t *txn) SetAllEnvelopePositions(pos int) {
t.volEnv.pos.Set(pos)
t.pitchEnv.pos.Set(pos)
t.panEnv.pos.Set(pos)
t.filterEnv.pos.Set(pos)
}
// ======
// Cancel cancels a pending transaction
func (t *txn) Cancel() {
t.cancelled = true
}
// Commit commits the transaction by applying pending updates
func (t *txn) Commit() {
if t.cancelled {
return
}
t.cancelled = true
if t.Voice == nil {
panic("voice not initialized")
}
if active, ok := t.active.Get(); ok {
t.Voice.SetActive(active.(bool))
}
if p, ok := t.period.Get(); ok {
voice.SetPeriod(t.Voice, p.(period.Period))
}
if delta, ok := t.periodDelta.Get(); ok {
voice.SetPeriodDelta(t.Voice, delta.(period.Delta))
}
if vol, ok := t.vol.Get(); ok {
voice.SetVolume(t.Voice, vol.(volume.Volume))
}
if pos, ok := t.pos.Get(); ok {
voice.SetPos(t.Voice, pos.(sampling.Pos))
}
if pan, ok := t.pan.Get(); ok {
voice.SetPan(t.Voice, pan.(panning.Position))
}
if pos, ok := t.volEnv.pos.Get(); ok {
voice.SetVolumeEnvelopePosition(t.Voice, pos.(int))
}
if enabled, ok := t.volEnv.enabled.Get(); ok {
voice.EnableVolumeEnvelope(t.Voice, enabled.(bool))
}
if pos, ok := t.pitchEnv.pos.Get(); ok {
voice.SetPitchEnvelopePosition(t.Voice, pos.(int))
}
if enabled, ok := t.pitchEnv.enabled.Get(); ok {
voice.EnablePitchEnvelope(t.Voice, enabled.(bool))
}
if pos, ok := t.panEnv.pos.Get(); ok {
voice.SetPanEnvelopePosition(t.Voice, pos.(int))
}
if enabled, ok := t.panEnv.enabled.Get(); ok {
voice.EnablePanEnvelope(t.Voice, enabled.(bool))
}
if pos, ok := t.filterEnv.pos.Get(); ok {
voice.SetFilterEnvelopePosition(t.Voice, pos.(int))
}
if enabled, ok := t.filterEnv.enabled.Get(); ok {
voice.EnableFilterEnvelope(t.Voice, enabled.(bool))
}
if mode, ok := t.playing.Get(); ok {
switch mode.(playingMode) {
case playingModeAttack:
t.Voice.Attack()
case playingModeRelease:
t.Voice.Release()
}
}
if _, ok := t.fadeout.Get(); ok {
t.Voice.Fadeout()
}
}
func (t *txn) GetVoice() voice.Voice {
return t.Voice
}
func (t *txn) Clone() voice.Transaction {
c := *t
return &c
} | internal/voice/transaction.go | 0.749912 | 0.497864 | transaction.go | starcoder |
package dt
import (
"math"
"reflect"
)
func MapReflectType(p reflect.Kind) Type {
switch p {
case reflect.Bool:
return BoolType
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return NumberType
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return NumberType
case reflect.Float64, reflect.Float32:
return NumberType
case reflect.String:
return StringType
default:
return InvalidType
}
}
var invalidReflectValue = reflect.Value{}
var reflectTypes = map[reflect.Kind]reflect.Type{
reflect.Int: reflect.TypeOf(1),
reflect.Int8: reflect.TypeOf(int8(1)),
reflect.Int16: reflect.TypeOf(int16(1)),
reflect.Int32: reflect.TypeOf(int32(1)),
reflect.Int64: reflect.TypeOf(int64(1)),
reflect.Uint: reflect.TypeOf(uint(1)),
reflect.Uint8: reflect.TypeOf(uint8(1)),
reflect.Uint16: reflect.TypeOf(uint16(1)),
reflect.Uint32: reflect.TypeOf(uint32(1)),
reflect.Uint64: reflect.TypeOf(uint64(1)),
reflect.Float32: reflect.TypeOf(float32(1)),
reflect.Float64: reflect.TypeOf(float64(1)),
}
func CheckOverflowFloat(x float64, k reflect.Kind) bool {
switch k {
case reflect.Float32:
if x < 0 {
x = -x
}
return math.MaxFloat32 < x && x <= math.MaxFloat64
case reflect.Float64:
return false
}
panic("invalid kind")
}
func CheckOverflowInt(x int64, k reflect.Kind) bool {
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
bitSize := reflectTypes[k].Size() * 8
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
panic("invalid kind")
}
func CheckOverflowUInt(x uint64, k reflect.Kind) bool {
switch k {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
bitSize := reflectTypes[k].Size() * 8
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
panic("invalid kind")
}
func ConvertToReflectType(v Value, toType reflect.Kind) (value reflect.Value, ok bool) {
defer func() {
v := recover()
if v != nil {
value = invalidReflectValue
ok = false
}
}()
switch vv := v.(type) {
case bool:
if toType == reflect.Bool {
return reflect.ValueOf(vv), true
} else {
return invalidReflectValue, false
}
case string:
if toType == reflect.String {
return reflect.ValueOf(vv), true
} else {
return invalidReflectValue, false
}
case *GenericNumber:
switch toType {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if !vv.IsInt64() {
return invalidReflectValue, false
}
vvv := vv.Int64()
if CheckOverflowInt(vvv, toType) {
return invalidReflectValue, false
}
return reflect.ValueOf(vvv).Convert(reflectTypes[toType]), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if !vv.IsUInt64() {
return invalidReflectValue, false
}
vvv := vv.UInt64()
if CheckOverflowUInt(vvv, toType) {
return invalidReflectValue, false
}
return reflect.ValueOf(vvv).Convert(reflectTypes[toType]), true
case reflect.Float32, reflect.Float64:
if !vv.IsFloat64() {
return invalidReflectValue, false
}
vvv := vv.Float64()
if CheckOverflowFloat(vvv, toType) {
return invalidReflectValue, false
}
return reflect.ValueOf(vvv).Convert(reflectTypes[toType]), true
default:
return invalidReflectValue, false
}
default:
return invalidReflectValue, false
}
} | reflect.go | 0.579043 | 0.574813 | reflect.go | starcoder |
package image2d
// GetR returns the red value of the pixel at (x,y).
func (img *Image2D) GetR(x, y int) uint8 {
idx := img.getIdx(x, y)
return img.data[idx]
}
// GetG returns the green value of the pixel at (x,y).
func (img *Image2D) GetG(x, y int) uint8 {
idx := img.getIdx(x, y)
return img.data[idx+1]
}
// GetB returns the blue value of the pixel at (x,y).
func (img *Image2D) GetB(x, y int) uint8 {
idx := img.getIdx(x, y)
return img.data[idx+2]
}
// GetA returns the alpha value of the pixel at (x,y).
func (img *Image2D) GetA(x, y int) uint8 {
idx := img.getIdx(x, y)
return img.data[idx+3]
}
// GetRGB returns the RGB values of the pixel at (x,y).
func (img *Image2D) GetRGB(x, y int) (uint8, uint8, uint8) {
idx := img.getIdx(x, y)
return img.data[idx],
img.data[idx+1],
img.data[idx+2]
}
// GetRGBA returns the RGBA value of the pixel at (x,y).
func (img *Image2D) GetRGBA(x, y int) (uint8, uint8, uint8, uint8) {
idx := img.getIdx(x, y)
return img.data[idx],
img.data[idx+1],
img.data[idx+2],
img.data[idx+3]
}
// SetR sets the red value of the pixel at (x,y).
func (img *Image2D) SetR(x, y int, r uint8) {
idx := img.getIdx(x, y)
img.data[idx] = r
}
// SetG sets the green value of the pixel at (x,y).
func (img *Image2D) SetG(x, y int, g uint8) {
idx := img.getIdx(x, y)
img.data[idx+1] = g
}
// SetB sets the blue value of the pixel at (x,y).
func (img *Image2D) SetB(x, y int, b uint8) {
idx := img.getIdx(x, y)
img.data[idx+2] = b
}
// SetA sets the alpha value of the pixel at (x,y).
func (img *Image2D) SetA(x, y int, a uint8) {
idx := img.getIdx(x, y)
img.data[idx+3] = a
}
// SetRGB sets the RGB values of the pixel at (x,y).
func (img *Image2D) SetRGB(x, y int, r, g, b uint8) {
idx := img.getIdx(x, y)
img.data[idx] = r
img.data[idx+1] = g
img.data[idx+2] = b
}
// SetRGBA sets the RGBA values of the pixel at (x,y).
func (img *Image2D) SetRGBA(x, y int, r, g, b, a uint8) {
idx := img.getIdx(x, y)
img.data[idx] = r
img.data[idx+1] = g
img.data[idx+2] = b
img.data[idx+3] = a
} | pkg/view/image/image2d/pixel.go | 0.881507 | 0.653991 | pixel.go | starcoder |
package primers
import (
"bytes"
"math"
"strings"
"github.com/Open-Science-Global/poly/transform"
)
// For reference: https://www.sigmaaldrich.com/technical-documents/articles/biology/oligos-melting-temp.html
// thermodynamics stores enthalpy (dH, kcal/mol) and entropy (dS, cal/mol-K) values for nucleotide pairs
type thermodynamics struct{ H, S float64 }
/******************************************************************************
This section contains various penalties applied when calculating primer melting
temperature using the SantaLucia algorithm.
******************************************************************************/
// penalties for nearest neighbor effects
var nearestNeighborsThermodynamics = map[string]thermodynamics{
"AA": {-7.6, -21.3},
"TT": {-7.6, -21.3},
"AT": {-7.2, -20.4},
"TA": {-7.2, -21.3},
"CA": {-8.5, -22.7},
"TG": {-8.5, -22.7},
"GT": {-8.4, -22.4},
"AC": {-8.4, -22.4},
"CT": {-7.8, -21.0},
"AG": {-7.8, -21.0},
"GA": {-8.2, -22.2},
"TC": {-8.2, -22.2},
"CG": {-10.6, -27.2},
"GC": {-9.8, -24.4},
"GG": {-8.0, -19.9},
"CC": {-8.0, -19.9},
}
var initialThermodynamicPenalty = thermodynamics{0.2, -5.7} // penalty for initiating helix
var symmetryThermodynamicPenalty = thermodynamics{0, -1.4} // penalty for self-complementarity
var terminalATThermodynamicPenalty = thermodynamics{2.2, 6.9} // penalty for 3' AT
/******************************************************************************
End of melting temp penalties section for SantaLucia melting temp algorithm.
******************************************************************************/
// SantaLucia calculates the melting point of a short DNA sequence (15-200 bp), using the Nearest Neighbors method [SantaLucia, J. (1998) PNAS, doi:10.1073/pnas.95.4.1460]
func SantaLucia(sequence string, primerConcentration, saltConcentration, magnesiumConcentration float64) (meltingTemp, dH, dS float64) {
sequence = strings.ToUpper(sequence)
const gasConstant = 1.9872 // gas constant (cal / mol - K)
var symmetryFactor float64 // symmetry factor
// apply initialization penalty
dH += initialThermodynamicPenalty.H
dS += initialThermodynamicPenalty.S
// apply symmetry penalty if sequence is self-complementary
if sequence == transform.ReverseComplement(sequence) {
dH += symmetryThermodynamicPenalty.H
dS += symmetryThermodynamicPenalty.S
symmetryFactor = 1
} else {
symmetryFactor = 4
}
// apply penalty if 3' nucleotides are A or T
if sequence[len(sequence)-1] == 'A' || sequence[len(sequence)-1] == 'T' {
dH += terminalATThermodynamicPenalty.H
dS += terminalATThermodynamicPenalty.S
}
// apply salt penalty ; von Ahsen et al 1999
saltEffect := saltConcentration + (magnesiumConcentration * 140)
dS += (0.368 * float64(len(sequence)-1) * math.Log(saltEffect))
// calculate penalty for nearest neighbor effects
for i := 0; i+1 < len(sequence); i++ {
dT := nearestNeighborsThermodynamics[sequence[i:i+2]]
dH += dT.H
dS += dT.S
}
meltingTemp = dH*1000/(dS+gasConstant*math.Log(primerConcentration/symmetryFactor)) - 273.15
return meltingTemp, dH, dS
}
// MarmurDoty calculates the melting point of an extremely short DNA sequence (<15 bp) using a modified Marmur Doty formula [Marmur J & Doty P (1962). Determination of the base composition of deoxyribonucleic acid from its thermal denaturation temperature. J Mol Biol, 5, 109-118.]
func MarmurDoty(sequence string) float64 {
sequence = strings.ToUpper(sequence)
aCount := float64(strings.Count(sequence, "A"))
tCount := float64(strings.Count(sequence, "T"))
cCount := float64(strings.Count(sequence, "C"))
gCount := float64(strings.Count(sequence, "G"))
meltingTemp := 2*(aCount+tCount) + 4*(cCount+gCount) - 7.0
return meltingTemp
}
// MeltingTemp calls SantaLucia with default inputs for primer and salt concentration.
func MeltingTemp(sequence string) float64 {
primerConcentration := 500e-9 // 500 nM (nanomolar) primer concentration
saltConcentration := 50e-3 // 50 mM (millimolar) sodium concentration
magnesiumConcentration := 0.0 // 0 mM (millimolar) magnesium concentration
meltingTemp, _, _ := SantaLucia(sequence, primerConcentration, saltConcentration, magnesiumConcentration)
return meltingTemp
}
/******************************************************************************
May 23 2021
Start of the De Bruijn stuff
=== Barcode basics ===
We're rapidly getting better at sequencing a lot of DNA. At their core, most
DNA sequencing technologies pool together many samples and sequence them all
at once. For example, let's say we have 2 samples of DNA whose true sequence
is as follows:
DNA-1 := ATGC
DNA-2 := AGGC
If we pooled these two samples together into a single tube, and sequenced
them, we would not be able to tell if ATGC came from DNA-1 or DNA-2. In order
to tell the difference, we would have to go through the process of DNA
barcoding. Let's attach(2) two small barcodes to each DNA fragment separately
in their own tubes and then pool them togehter:
Barcode-1 + DNA-1 = GC + ATGC = GCATGC
Barcode-2 + DNA-2 = AT + AGGC = ATAGGC
When we sequence this pool together, we will end up with two sequences,
GCATGC and ATAGGC. If we correlate the first 2 base pairs with the tube
the sample came from, we can derive DNA-1 is ATGC and DNA-2 is AGGC.
=== Redundancy and start sites ===
Now, let's say we have the need for N number of samples to be pooled
together. The minimal barcode length could be expressed as:
n = number of samples
b = bases required in a minimal barcode
4^b = n
OR
log4n = b
In our perfect case, we would only need 8 base pair barcodes to represent 65536
different samples. Reality is a little different, however.
1. Failure of DNA sequencers to accurately sequence the barcode, leading for
one barcoed to be mistaken for a different barcode
2. Misalignment of the barcode to the sequence. We cannot guarantee that the
DNA sequencer will begin sequencing our fragment at an exact base pair.
3. Misreading of sequence as barcode. If our barcode is only 8 base pairs, on
average, it will occur once within 65536 base pairs, and that occurrence may
be misread as a barcode.
These challenges force us to build a barcode that has the following features:
1. Any barcode must be different enough from any other barcode that there will
be no misreading, even with mutated base pairs.
2. Any barcode must be large enough that, on average, it will not occur in a
natural piece of DNA.
While the second feature is quite easy (use ~20-30 base pair barcodes), the
first can be challenging. When developing a large quantity of barcodes, how
do you guarantee that they are optimally distanced from each other so that
there will be no cross-talk?
=== Our solution to distanced barcodes ===
De Bruijn sequences are an interesting data structure where every possible
substring of length N occurs exactly once as a substring(1). For example, a De
Bruijn sequence of length 3 will only have ATG occur once in the entire
sequence.
By constructing a nucleobase De Bruijn sequence, and selecting barcodes from
within that De Bruijn sequence, we can guarantee that each barcode will never
share any N length substring, since it only occurs once within the whole De
Bruijn sequence.
For example, a nucleobase De Bruijn sequence of substring length 6 is 4101
base pairs long (4^n + (n-1)). You can generate 205 20 base pair barcodes with
each barcode guaranteed to never share any 6 base pairs. This makes it very
easy to unambiguously parse which samples came from where, while maintaining
a guarantee of optimal distancing between your barcodes.
Good luck with barcoding,
Keoni
(1) https://en.wikipedia.org/wiki/De_Bruijn_sequence
(2) Barcodes are usually added in a process called "ligation" using an enzyme
called ligase, which basically just glues together DNA fragments. Wikipedia
has a good introduction:
https://en.wikipedia.org/wiki/Ligation_(molecular_biology)
******************************************************************************/
// NucleobaseDeBruijnSequence generates a DNA DeBruijn sequence with alphabet ATGC. DeBruijn sequences are basically a string with all unique substrings of an alphabet represented exactly once. Code is adapted from https://rosettacode.org/wiki/De_Bruijn_sequences#Go
func NucleobaseDeBruijnSequence(substringLength int) string {
alphabet := "ATGC"
alphabetLength := len(alphabet)
a := make([]byte, alphabetLength*substringLength)
var seq []byte
// The following function is mainly adapted from rosettacode.
var ConstructDeBruijn func(int, int) // recursive closure
ConstructDeBruijn = func(t, p int) {
if t > substringLength {
if substringLength%p == 0 {
seq = append(seq, a[1:p+1]...)
}
} else {
a[t] = a[t-p]
ConstructDeBruijn(t+1, p)
for j := int(a[t-p] + 1); j < alphabetLength; j++ {
a[t] = byte(j)
ConstructDeBruijn(t+1, t)
}
}
}
ConstructDeBruijn(1, 1)
var buf bytes.Buffer
for _, i := range seq {
buf.WriteByte(alphabet[i])
}
b := buf.String()
return b + b[0:substringLength-1] // as cyclic append first (n-1) digits
}
// CreateBarcodesWithBannedSequences creates a list of barcodes given a desired barcode length, the maxSubSequence shared in each barcode,
// Sequences may be marked as banned by passing a static list, `bannedSequences`, or, if more flexibility is needed, through a list of `bannedFunctions` that dynamically generates bannedSequences.
// If a sequence is banned, it will not appear within a barcode. The a `bannedFunctions` function can determine if a barcode should be banned or not on the fly. If it is banned, we will continuing iterating until a barcode is found that satisfies the bannedFunction requirement.
func CreateBarcodesWithBannedSequences(length int, maxSubSequence int, bannedSequences []string, bannedFunctions []func(string) bool) []string {
var barcodes []string
var start int
var end int
debruijn := NucleobaseDeBruijnSequence(maxSubSequence)
for barcodeNum := 0; (barcodeNum*(length-(maxSubSequence-1)))+length < len(debruijn); {
start = barcodeNum * (length - (maxSubSequence - 1))
end = start + length
barcodeNum++
for _, bannedSequence := range bannedSequences {
// If the current deBruijn range has the banned sequence, iterate one base pair ahead. If the iteration reaches the end of the deBruijn sequence, close the channel and return the function.
for strings.Contains(debruijn[start:end], bannedSequence) {
if end+1 > len(debruijn) {
return barcodes
}
start++
end++
barcodeNum++
}
// Check reverse complement as well for the banned sequence
for strings.Contains(debruijn[start:end], transform.ReverseComplement(bannedSequence)) {
if end+1 > len(debruijn) {
return barcodes
}
start++
end++
barcodeNum++
}
}
for _, bannedFunction := range bannedFunctions {
// If the function returns False for the deBruijn range, iterate one base pair ahead. If the iteration reaches the end of the deBruijn sequence, close the channel and return the function.
for !bannedFunction(debruijn[start:end]) {
if end+1 > len(debruijn) {
return barcodes
}
start++
end++
barcodeNum++
}
}
barcodes = append(barcodes, debruijn[start:end])
}
return barcodes
}
// CreateBarcodes is a simplified version of CreateBarcodesWithBannedSequences with sane defaults.
func CreateBarcodes(length int, maxSubSequence int) []string {
return CreateBarcodesWithBannedSequences(length, maxSubSequence, []string{}, []func(string) bool{})
} | poly/primers/primers.go | 0.729905 | 0.523968 | primers.go | starcoder |
package lo
import "sync"
type synchronize struct {
locker sync.Locker
}
func (s *synchronize) Do(cb func()) {
s.locker.Lock()
Try0(cb)
s.locker.Unlock()
}
// Synchronize wraps the underlying callback in a mutex. It receives an optional mutex.
func Synchronize(opt ...sync.Locker) *synchronize {
if len(opt) > 1 {
panic("unexpected arguments")
} else if len(opt) == 0 {
opt = append(opt, &sync.Mutex{})
}
return &synchronize{
locker: opt[0],
}
}
// Async executes a function in a goroutine and returns the result in a channel.
func Async[A any](f func() A) chan A {
ch := make(chan A)
go func() {
ch <- f()
}()
return ch
}
// Async0 executes a function in a goroutine and returns a channel set once the function finishes.
func Async0(f func()) chan struct{} {
ch := make(chan struct{})
go func() {
f()
ch <- struct{}{}
}()
return ch
}
// Async1 is an alias to Async.
func Async1[A any](f func() A) chan A {
return Async(f)
}
// Async2 has the same behavior as Async, but returns the 2 results as a tuple inside the channel.
func Async2[A any, B any](f func() (A, B)) chan Tuple2[A, B] {
ch := make(chan Tuple2[A, B])
go func() {
ch <- T2(f())
}()
return ch
}
// Async3 has the same behavior as Async, but returns the 3 results as a tuple inside the channel.
func Async3[A any, B any, C any](f func() (A, B, C)) chan Tuple3[A, B, C] {
ch := make(chan Tuple3[A, B, C])
go func() {
ch <- T3(f())
}()
return ch
}
// Async4 has the same behavior as Async, but returns the 4 results as a tuple inside the channel.
func Async4[A any, B any, C any, D any](f func() (A, B, C, D)) chan Tuple4[A, B, C, D] {
ch := make(chan Tuple4[A, B, C, D])
go func() {
ch <- T4(f())
}()
return ch
}
// Async5 has the same behavior as Async, but returns the 5 results as a tuple inside the channel.
func Async5[A any, B any, C any, D any, E any](f func() (A, B, C, D, E)) chan Tuple5[A, B, C, D, E] {
ch := make(chan Tuple5[A, B, C, D, E])
go func() {
ch <- T5(f())
}()
return ch
}
// Async6 has the same behavior as Async, but returns the 6 results as a tuple inside the channel.
func Async6[A any, B any, C any, D any, E any, F any](f func() (A, B, C, D, E, F)) chan Tuple6[A, B, C, D, E, F] {
ch := make(chan Tuple6[A, B, C, D, E, F])
go func() {
ch <- T6(f())
}()
return ch
} | vendor/github.com/samber/lo/concurrency.go | 0.764364 | 0.438725 | concurrency.go | starcoder |
package base62
import (
"fmt"
"math"
)
// Base62 alphabet, mapped from number to string, and from rune (char) to number
var numberToString = [62]string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"}
var runeToNumber = map[rune]uint64{'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15, 'g': 16, 'h': 17, 'i': 18, 'j': 19, 'k': 20, 'l': 21, 'm': 22, 'n': 23, 'o': 24, 'p': 25, 'q': 26, 'r': 27, 's': 28, 't': 29, 'u': 30, 'v': 31, 'w': 32, 'x': 33, 'y': 34, 'z': 35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61}
// ToB62 accepts a uint64 number as its sole argument, and returns a base62 encoded string
func ToB62(i uint64) (encodedString string) {
// Hard coded value to stop floating point errors
if i == 0 {
return "0"
}
// Start with empty string
s := ""
// Using logarithms, find the exponent of 61 that gives us i. Floor it to find most significant bit
mostSignificantBit := math.Floor(math.Log(float64(i)) / math.Log(62.0))
var value uint64
var multiple uint64
// Begin loop, run until we have calculated least significant bit
for bit := mostSignificantBit; bit >= 0; bit-- {
// The multiple of this bit, calculated by taking 61 to the power of x
multiple = uint64(math.Pow(62, bit))
// Determine how many times the value of the bit position can wholly go into our remaining number, this quotient is the value of this bit's position
value = i / multiple
// Remove the value represented by this bit position from the number using modulus
i = i % multiple
// Add the character of the base62 alphabet associated with this bit's multiple
s += numberToString[value]
}
// After all of the bits have been run through, return the generated string
return s
}
// FromB62 accepts a base62 encoded string, and returns the represented uint64 number and an optional error if invalid characters were found in the provided string
func FromB62(b62 string) (decodedNumber uint64, invalidError error) {
// Get the length of the id
length := len(b62)
// Create array of runes from the string
runes := []rune(b62)
// Store the total value
var total uint64
// Loop through runes starting from the last
for index := 0; index < length; index++ {
// Check if the rune exists in the alphabet, if it doesn't return an error
if valueOfCharacter, ok := runeToNumber[runes[index]]; ok {
total += valueOfCharacter * uint64(math.Pow(62.0, float64(length-index-1)))
} else {
return 0, fmt.Errorf("Invalid base62 character at index %d", index)
}
}
return total, nil
} | base62.go | 0.662796 | 0.430746 | base62.go | starcoder |
package ast
// Node is the interface implemented by all nodes in the AST. It
// provides information about the span of this AST node in terms
// of location in the source file. It also provides information
// about all prior comments (attached as leading comments) and
// optional subsequent comments (attached as trailing comments).
type Node interface {
Start() SourcePos
End() SourcePos
LeadingComments() []Comment
TrailingComments() []Comment
}
// TerminalNode represents a leaf in the AST. These represent
// the tokens/lexemes in the protobuf language. Comments and
// whitespace are accumulated by the lexer and associated with
// the following lexed token.
type TerminalNode interface {
Node
// PopLeadingComment removes the first leading comment from this
// token and returns it. If the node has no leading comments then
// this method will panic.
PopLeadingComment() Comment
// PushTrailingComment appends the given comment to the token's
// trailing comments.
PushTrailingComment(Comment)
// LeadingWhitespace returns any whitespace between the prior comment
// (last leading comment), if any, or prior lexed token and this token.
LeadingWhitespace() string
// RawText returns the raw text of the token as read from the source.
RawText() string
}
var _ TerminalNode = (*StringLiteralNode)(nil)
var _ TerminalNode = (*UintLiteralNode)(nil)
var _ TerminalNode = (*FloatLiteralNode)(nil)
var _ TerminalNode = (*IdentNode)(nil)
var _ TerminalNode = (*BoolLiteralNode)(nil)
var _ TerminalNode = (*SpecialFloatLiteralNode)(nil)
var _ TerminalNode = (*KeywordNode)(nil)
var _ TerminalNode = (*RuneNode)(nil)
// TokenInfo represents state accumulated by the lexer to associated with a
// token (aka terminal node).
type TokenInfo struct {
// The location of the token in the source file.
PosRange
// The raw text of the token.
RawText string
// Any comments encountered preceding this token.
LeadingComments []Comment
// Any leading whitespace immediately preceding this token.
LeadingWhitespace string
// Any trailing comments following this token. This is usually
// empty as tokens are created by the lexer immediately and
// trailing comments are accounted for afterwards, added using
// the node's PushTrailingComment method.
TrailingComments []Comment
}
func (t *TokenInfo) asTerminalNode() terminalNode {
return terminalNode{
posRange: t.PosRange,
leadingComments: t.LeadingComments,
leadingWhitespace: t.LeadingWhitespace,
trailingComments: t.TrailingComments,
raw: t.RawText,
}
}
// CompositeNode represents any non-terminal node in the tree. These
// are interior or root nodes and have child nodes.
type CompositeNode interface {
Node
// All AST nodes that are immediate children of this one.
Children() []Node
}
// terminalNode contains book-keeping shared by all TerminalNode
// implementations. It is embedded in all such node types in this
// package. It provides the implementation of the TerminalNode
// interface.
type terminalNode struct {
posRange PosRange
leadingComments []Comment
leadingWhitespace string
trailingComments []Comment
raw string
}
func (n *terminalNode) Start() SourcePos {
return n.posRange.Start
}
func (n *terminalNode) End() SourcePos {
return n.posRange.End
}
func (n *terminalNode) LeadingComments() []Comment {
return n.leadingComments
}
func (n *terminalNode) TrailingComments() []Comment {
return n.trailingComments
}
func (n *terminalNode) PopLeadingComment() Comment {
c := n.leadingComments[0]
n.leadingComments = n.leadingComments[1:]
return c
}
func (n *terminalNode) PushTrailingComment(c Comment) {
n.trailingComments = append(n.trailingComments, c)
}
func (n *terminalNode) LeadingWhitespace() string {
return n.leadingWhitespace
}
func (n *terminalNode) RawText() string {
return n.raw
}
// compositeNode contains book-keeping shared by all CompositeNode
// implementations. It is embedded in all such node types in this
// package. It provides the implementation of the CompositeNode
// interface.
type compositeNode struct {
children []Node
}
func (n *compositeNode) Children() []Node {
return n.children
}
func (n *compositeNode) Start() SourcePos {
return n.children[0].Start()
}
func (n *compositeNode) End() SourcePos {
return n.children[len(n.children)-1].End()
}
func (n *compositeNode) LeadingComments() []Comment {
return n.children[0].LeadingComments()
}
func (n *compositeNode) TrailingComments() []Comment {
return n.children[len(n.children)-1].TrailingComments()
}
// RuneNode represents a single rune in protobuf source. Runes
// are typically collected into tokens, but some runes stand on
// their own, such as punctuation/symbols like commas, semicolons,
// equals signs, open and close symbols (braces, brackets, angles,
// and parentheses), and periods/dots.
type RuneNode struct {
terminalNode
Rune rune
}
// NewRuneNode creates a new *RuneNode with the given properties.
func NewRuneNode(r rune, info TokenInfo) *RuneNode {
return &RuneNode{
terminalNode: info.asTerminalNode(),
Rune: r,
}
}
// EmptyDeclNode represents an empty declaration in protobuf source.
// These amount to extra semicolons, with no actual content preceding
// the semicolon.
type EmptyDeclNode struct {
compositeNode
Semicolon *RuneNode
}
// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must
// be non-nil.
func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode {
if semicolon == nil {
panic("semicolon is nil")
}
return &EmptyDeclNode{
compositeNode: compositeNode{
children: []Node{semicolon},
},
Semicolon: semicolon,
}
}
func (e *EmptyDeclNode) fileElement() {}
func (e *EmptyDeclNode) msgElement() {}
func (e *EmptyDeclNode) extendElement() {}
func (e *EmptyDeclNode) oneOfElement() {}
func (e *EmptyDeclNode) enumElement() {}
func (e *EmptyDeclNode) serviceElement() {}
func (e *EmptyDeclNode) methodElement() {} | ast/node.go | 0.73412 | 0.513912 | node.go | starcoder |
package graphql
import (
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/language/ast"
)
// CoerceType converts ast.Type to graphql.Type
func CoerceType(typ ast.Type, typMap map[string]graphql.Type) graphql.Type {
switch typ.GetKind() {
case "Named":
if IsScalarType(typ.(*ast.Named).Name.Value) {
return CoerceScalarType(typ.(*ast.Named).Name.Value)
}
if t, ok := typMap[typ.(*ast.Named).Name.Value]; ok {
return t
}
return nil
case "List":
return &graphql.List{
OfType: CoerceType(typ.(*ast.List).Type, typMap),
}
case "NonNull":
return &graphql.NonNull{
OfType: CoerceType(typ.(*ast.NonNull).Type, typMap),
}
}
return nil
}
// IsScalarType returns true for scalar types
func IsScalarType(t string) bool {
switch t {
case "Int", "String", "Float", "Boolean", "ID":
return true
default:
return false
}
}
// CoerceScalarType converts type to graphql.Scalar
func CoerceScalarType(typ string) *graphql.Scalar {
switch typ {
case "String":
return graphql.String
case "Float":
return graphql.Float
case "Int":
return graphql.Int
case "Boolean":
return graphql.Boolean
case "ID":
return graphql.ID
}
return nil
}
// GetInterfaceOrUnionType returns the interface or union type from a given Output type
func GetInterfaceOrUnionType(typ graphql.Output) graphql.Type {
switch typ.(type) {
case *graphql.Interface:
return typ.(*graphql.Interface)
case *graphql.Union:
return typ.(*graphql.Union)
case *graphql.List:
return GetInterfaceOrUnionType(typ.(*graphql.List).OfType)
case *graphql.NonNull:
return GetInterfaceOrUnionType(typ.(*graphql.NonNull).OfType)
}
return nil
}
// GetValue returns value of ast.Value
func GetValue(val ast.Value) interface{} {
if val != nil {
return val.GetValue()
}
return nil
}
// GetAstStringValue returns string value of ast.StringValue object
func GetAstStringValue(val interface{}) string {
if val, ok := val.(ast.StringValue); ok {
return val.Value
}
return ""
} | trigger/graphql/utils.go | 0.692538 | 0.431345 | utils.go | starcoder |
package approvers
import "k8s.io/apimachinery/pkg/util/sets"
// NewApprovers create a new "Approvers" with no approval.
func NewApprovers(owners Owners) Approvers {
return Approvers{
owners: owners,
approvers: sets.NewString(),
assignees: sets.NewString(),
}
}
// Approvers is struct that provide functionality with regard to approvals of a specific
// code change.
type Approvers struct {
owners Owners
approvers sets.String // The keys of this map are normalized to lowercase.
assignees sets.String
}
// GetCCs gets the list of suggested approvers for a pull-request. It
// now considers current assignees as potential approvers. Here is how
// it works:
// - We find suggested approvers from all potential approvers, but
// remove those that are not useful considering current approvers and
// assignees. This only uses leaf approvers to find the closest
// approvers to the changes.
// - We find a subset of suggested approvers from current
// approvers, suggested approvers and assignees, but we remove those
// that are not useful considering suggested approvers and current
// approvers. This uses the full approvers list, and will result in root
// approvers to be suggested when they are assigned.
// We return the union of the two sets: suggested and suggested
// assignees.
// The goal of this second step is to only keep the assignees that are
// the most useful.
func (ap Approvers) GetCCs() []string {
currentApprovers := ap.GetCurrentApproversSet()
approversAndAssignees := currentApprovers.Union(ap.assignees)
randomizedApprovers := ap.owners.GetShuffledApprovers()
leafReverseMap := GetReverseMap(ap.owners.GetLeafApprovers())
suggested := ap.owners.KeepCoveringApprovers(leafReverseMap, approversAndAssignees, randomizedApprovers)
approversAndSuggested := currentApprovers.Union(suggested)
everyone := approversAndSuggested.Union(ap.assignees)
fullReverseMap := GetReverseMap(ap.owners.GetApprovers())
keepAssignees := ap.owners.KeepCoveringApprovers(fullReverseMap, approversAndSuggested, everyone.UnsortedList())
return suggested.Union(keepAssignees).List()
}
// GetCurrentApproversSet returns the set of approvers (login only, normalized to lower case)
func (ap Approvers) GetCurrentApproversSet() sets.String {
return ap.approvers
}
// AddApprover adds a new Approver
func (ap *Approvers) AddApprover(logins ...string) {
ap.approvers.Insert(logins...)
}
// AddAssignee adds assignees to the list
func (ap *Approvers) AddAssignee(logins ...string) {
ap.assignees.Insert(logins...)
} | approvers/approvers.go | 0.650023 | 0.404419 | approvers.go | starcoder |
package internal
import (
"io"
"github.com/pkg/errors"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/plotutil"
"gonum.org/v1/plot/vg"
)
// Plotter knows how to draw a picture to a writer
type Plotter struct {
}
// ToPlotType converts a string name to a known plot type
func ToPlotType(s string) (PlotType, error) {
if s == "" || s == "bar" {
return PlotTypeBar, nil
}
if s == "line" {
return PlotTypeLine, nil
}
return PlotType(0), errors.New("unknown plot type " + s)
}
type PlotType int
const (
_ PlotType = iota
// PlotTypeBar is a bar graph
PlotTypeBar
// PlotTypeLine is a line graph
PlotTypeLine
)
// Plot will write to out this plot.
func (l *Plotter) Plot(log Logger, out io.Writer, imgFormat string, pt PlotType, title string, x string, y string, lines []PlotLine, uniqueKeys OrderedStringSet) error {
p, err := l.createPlot(log, pt, title, x, y, lines, uniqueKeys.Order)
if err != nil {
return errors.Wrap(err, "unable to make plot")
}
if err := l.savePlot(out, p, imgFormat, lines, uniqueKeys); err != nil {
return errors.Wrap(err, "unable to save plot")
}
return nil
}
// PlotLine is a line to plot. It has a name (used in the legend) and values for each x index. It assumes integer
// indexes.
type PlotLine struct {
Name string
Values [][]float64
}
func (l *Plotter) savePlot(out io.Writer, p *plot.Plot, imageFormat string, lines []PlotLine, set OrderedStringSet) error {
x := float64(30*(len(lines))*(len(set.Items)) + 290)
wt, err := p.WriterTo(vg.Points(x), vg.Points(x/2), imageFormat)
if err != nil {
return errors.Wrap(err, "unable to make plot writer")
}
if _, err := wt.WriteTo(out); err != nil {
return errors.Wrap(err, "unable to write plotter to output")
}
return nil
}
func (l *Plotter) createPlot(log Logger, pt PlotType, title string, x string, y string, lines []PlotLine, nominalX []string) (*plot.Plot, error) {
p, err := plot.New()
if err != nil {
return nil, errors.Wrap(err, "unable to create initial plot")
}
p.Title.Text = title
p.Y.Label.Text = y
p.X.Label.Text = x
log.Log(2, "nominal x: %v", nominalX)
p.NominalX(nominalX...)
p.Legend.Top = true
for i, line := range lines {
pl, err := l.makePlotter(log, pt, lines, line, i)
if err != nil {
return nil, errors.Wrap(err, "unable to make plotter")
}
p.Add(pl)
if asT, ok := pl.(plot.Thumbnailer); ok {
p.Legend.Add(line.Name, asT)
}
}
return p, nil
}
func (l *Plotter) addBar(log Logger, line PlotLine, offset int, numLines int) (*plotter.BarChart, error) {
w := vg.Points(30)
log.Log(2, "adding line %s", line.Name)
groupValues := aggregatePlotterValues(line.Values, meanAggregation)
log.Log(2, "Values: %v", groupValues)
bar, err := plotter.NewBarChart(plotter.YValues{XYer: groupValues}, w)
if err != nil {
return nil, errors.Wrap(err, "unable to make bar chart")
}
bar.LineStyle.Width = 0
bar.Offset = w * vg.Points(float64(numLines/-2+offset))
bar.Color = plotutil.Color(offset)
return bar, nil
}
func (l *Plotter) addLine(log Logger, line PlotLine, offset int) (*plotter.Line, error) {
log.Log(2, "adding line %s", line.Name)
groupValues := aggregatePlotterValues(line.Values, meanAggregation)
log.Log(2, "Values: %v", groupValues)
pline, err := plotter.NewLine(groupValues)
if err != nil {
return nil, errors.Wrap(err, "unable to make bar chart")
}
pline.LineStyle.Width = 1
pline.Color = plotutil.Color(offset)
return pline, nil
}
func (l *Plotter) makePlotter(log Logger, pt PlotType, lines []PlotLine, line PlotLine, index int) (plot.Plotter, error) {
if pt == PlotTypeBar {
return l.addBar(log, line, index, len(lines))
}
return l.addLine(log, line, index)
}
func aggregatePlotterValues(f [][]float64, aggregation func([]float64) float64) plotter.XYer {
var ret plotter.XYs
for i, x := range f {
ret = append(ret, plotter.XY{
X: float64(i),
Y: aggregation(x),
})
}
return ret
}
func meanAggregation(vals []float64) float64 {
if len(vals) == 0 {
return 0
}
sum := 0.0
for _, v := range vals {
sum += v
}
return sum / float64(len(vals))
} | internal/plotter.go | 0.734786 | 0.415551 | plotter.go | starcoder |
package plotting
import (
. "github.com/WiseBird/genetic_algorithm"
"math"
pplot "code.google.com/p/plotinum/plot"
"code.google.com/p/plotinum/plotter"
"code.google.com/p/plotinum/plotutil"
"io"
"os"
"path/filepath"
"strings"
"code.google.com/p/plotinum/vg"
"code.google.com/p/plotinum/vg/vgeps"
"code.google.com/p/plotinum/vg/vgimg"
"code.google.com/p/plotinum/vg/vgpdf"
"code.google.com/p/plotinum/vg/vgsvg"
)
var (
Log10 = func(f float64) float64 {
if f > 0 {
return math.Log10(f)
} else if f < 0 {
return -1 * math.Log10(math.Abs(f))
} else {
return 0
}
}
CostsConverter = func(costs []float64) plotter.XYs {
pts := make(plotter.XYs, len(costs))
for i, cost := range costs {
pts[i].X = float64(i)
pts[i].Y = cost
}
return pts
}
)
type canvas interface {
vg.Canvas
Size() (w, h vg.Length)
io.WriterTo
}
type Plotter struct {
plots []*Plot
}
func NewPlotter() *Plotter {
plotter := new(Plotter)
plotter.plots = make([]*Plot, 0, 1)
return plotter
}
func (plotter *Plotter) AddPlot(title string) *Plot {
plot := newPlot(plotter, title)
plotter.plots = append(plotter.plots, plot)
return plot
}
func (plotter *Plotter) Draw(widthInch, heightInch float64, fileName string) [][]StatisticsDataInterface {
for _, p := range plotter.plots {
plotData := make([]interface{}, 0, len(p.providers)*2)
for _, provider := range p.providers {
statisticsData := provider.Data()
for _, dataSet := range provider.dataSets {
plotData = append(plotData, dataSet.name)
plotData = append(plotData, dataSet.values(statisticsData))
}
}
err := plotutil.AddLinePoints(p.plot, plotData...)
if err != nil {
panic(err)
}
}
w, h := vg.Inches(widthInch), vg.Inches(heightInch)
c := plotter.createCanvas(fileName, len(plotter.plots), w, h)
for i, p := range plotter.plots {
plotter.draw(p.plot, i, c, w, h)
}
if err := plotter.saveFile(c, fileName); err != nil {
panic(err)
}
data := make([][]StatisticsDataInterface, len(plotter.plots))
for i, p := range plotter.plots {
data[i] = make([]StatisticsDataInterface, len(p.providers))
for j, provider := range p.providers {
data[i][j] = provider.Data()
}
}
return data
}
func (plotter *Plotter) createCanvas(fileName string, plots int, w, h vg.Length) canvas {
h *= vg.Length(plots)
switch ext := strings.ToLower(filepath.Ext(fileName)); ext {
case ".eps":
return vgeps.NewTitle(w, h, fileName)
case ".jpg", ".jpeg":
return vgimg.JpegCanvas{Canvas: vgimg.New(w, h)}
case ".pdf":
return vgpdf.New(w, h)
case ".png":
return vgimg.PngCanvas{Canvas: vgimg.New(w, h)}
case ".svg":
return vgsvg.New(w, h)
case ".tiff":
return vgimg.TiffCanvas{Canvas: vgimg.New(w, h)}
default:
panic("Unsupported file extension: " + ext)
}
}
func (plotter *Plotter) draw(plot *pplot.Plot, ind int, c canvas, w, h vg.Length) {
_, canvasHeight := c.Size()
da := pplot.DrawArea{
Canvas: c,
Rect: pplot.Rect{
Min: pplot.Point{0, canvasHeight - h*vg.Length(ind+1)},
Size: pplot.Point{w, h},
},
}
plot.Draw(da)
}
func (plotter *Plotter) saveFile(c canvas, fileName string) (err error) {
f, err := os.Create(fileName)
if err != nil {
return err
}
if _, err = c.WriteTo(f); err != nil {
return err
}
return f.Close()
}
type Plot struct {
plotter *Plotter
plot *pplot.Plot
providers []*plotDataProvider
}
func newPlot(plotter *Plotter, title string) *Plot {
p := new(Plot)
p.plotter = plotter
p.providers = make([]*plotDataProvider, 0, 1)
var err error
p.plot, err = pplot.New()
if err != nil {
panic(err)
}
p.plot.Title.Text = title
p.plot.X.Label.Text = "Generations"
p.plot.Y.Label.Text = "Cost"
p.plot.Legend.Top = true
return p
}
func (p *Plot) Title(title string) *Plot {
p.plot.Title.Text = title
return p
}
func (p *Plot) XLabel(label string) *Plot {
p.plot.X.Label.Text = label
return p
}
func (p *Plot) YLabel(label string) *Plot {
p.plot.Y.Label.Text = label
return p
}
func (p *Plot) AddDataProvider(optimizer OptimizerInterface) *plotDataProvider {
plotDataProvider := newPlotDataProvider(p, optimizer, nil)
p.providers = append(p.providers, plotDataProvider)
return plotDataProvider
}
func (p *Plot) AddData(data StatisticsDataInterface) *plotDataProvider {
plotDataProvider := newPlotDataProvider(p, nil, data)
p.providers = append(p.providers, plotDataProvider)
return plotDataProvider
}
func (p *Plot) Done() *Plotter {
return p.plotter
}
func (p *Plot) InnerPlot(title string) *pplot.Plot {
return p.plot
}
type plotDataProvider struct {
plot *Plot
optimizer OptimizerInterface
statisticsData StatisticsDataInterface
dataSets []*plotDataSet
}
func newPlotDataProvider(plot *Plot, optimizer OptimizerInterface, statisticsData StatisticsDataInterface) *plotDataProvider {
provider := new(plotDataProvider)
provider.plot = plot
provider.optimizer = optimizer
provider.statisticsData = statisticsData
return provider
}
func (p *plotDataProvider) AddDataSet(name string, extracter DataExtracter) *plotDataSet {
dataSet := newPlotDataSet(p, name, extracter)
p.dataSets = append(p.dataSets, dataSet)
return dataSet
}
func (p *plotDataProvider) AddMinCostDataSet() *plotDataSet {
return p.AddDataSet("Min", func(sa StatisticsDataInterface) plotter.XYs {
sda, ok := sa.(StatisticsDataDefault)
if !ok {
panic("Expects StatisticsDataDefault")
}
return CostsConverter(sda.MinCosts())
})
}
func (p *plotDataProvider) AddMeanCostDataSet() *plotDataSet {
return p.AddDataSet("Mean", func(sa StatisticsDataInterface) plotter.XYs {
sda, ok := sa.(StatisticsDataDefault)
if !ok {
panic("Expects StatisticsDataDefault")
}
return CostsConverter(sda.MeanCosts())
})
}
func (p *plotDataProvider) Data() StatisticsDataInterface {
if p.statisticsData == nil {
_, p.statisticsData = p.optimizer.Optimize()
}
return p.statisticsData
}
func (p *plotDataProvider) Done() *Plot {
return p.plot
}
type DataExtracter func(StatisticsDataInterface) plotter.XYs
type ValueConverter func(float64) float64
type plotDataSet struct {
provider *plotDataProvider
name string
extracter DataExtracter
xConverter ValueConverter
yConverter ValueConverter
}
func newPlotDataSet(provider *plotDataProvider, name string, extracter DataExtracter) *plotDataSet {
dataSet := new(plotDataSet)
dataSet.provider = provider
dataSet.name = name
dataSet.extracter = extracter
return dataSet
}
func (dataSet *plotDataSet) Name(name string) *plotDataSet {
dataSet.name = name
return dataSet
}
func (dataSet *plotDataSet) XConverter(converter ValueConverter) *plotDataSet {
dataSet.xConverter = converter
return dataSet
}
func (dataSet *plotDataSet) YConverter(converter ValueConverter) *plotDataSet {
dataSet.yConverter = converter
return dataSet
}
func (dataSet *plotDataSet) values(statisticsData StatisticsDataInterface) plotter.XYs {
xys := dataSet.extracter(statisticsData)
if dataSet.xConverter != nil {
for i := 0; i < len(xys); i++ {
xys[i].X = dataSet.xConverter(xys[i].X)
}
}
if dataSet.yConverter != nil {
for i := 0; i < len(xys); i++ {
xys[i].Y = dataSet.yConverter(xys[i].Y)
}
}
return xys
}
func (dataSet *plotDataSet) Done() *plotDataProvider {
return dataSet.provider
} | plotting/plotter.go | 0.689724 | 0.400486 | plotter.go | starcoder |
package geom
//go:generate goderive .
import (
"errors"
"fmt"
"math"
)
// A Layout describes the meaning of an N-dimensional coordinate. Layout(N) for
// N > 4 is a valid layout, in which case the first dimensions are interpreted
// to be X, Y, Z, and M and extra dimensions have no special meaning. M values
// are considered part of a linear referencing system (e.g. classical time or
// distance along a path). 1-dimensional layouts are not supported.
type Layout int
const (
// NoLayout is an unknown layout
NoLayout Layout = iota
// XY is a 2D layout (X and Y)
XY
// XYZ is 3D layout (X, Y, and Z)
XYZ
// XYM is a 2D layout with an M value
XYM
// XYZM is a 3D layout with an M value
XYZM
)
// An ErrLayoutMismatch is returned when geometries with different layouts
// cannot be combined.
type ErrLayoutMismatch struct {
Got Layout
Want Layout
}
func (e ErrLayoutMismatch) Error() string {
return fmt.Sprintf("geom: layout mismatch, got %s, want %s", e.Got, e.Want)
}
// An ErrStrideMismatch is returned when the stride does not match the expected
// stride.
type ErrStrideMismatch struct {
Got int
Want int
}
func (e ErrStrideMismatch) Error() string {
return fmt.Sprintf("geom: stride mismatch, got %d, want %d", e.Got, e.Want)
}
// An ErrUnsupportedLayout is returned when the requested layout is not
// supported.
type ErrUnsupportedLayout Layout
func (e ErrUnsupportedLayout) Error() string {
return fmt.Sprintf("geom: unsupported layout %s", Layout(e))
}
// An ErrUnsupportedType is returned when the requested type is not supported.
type ErrUnsupportedType struct {
Value interface{}
}
func (e ErrUnsupportedType) Error() string {
return fmt.Sprintf("geom: unsupported type %T", e.Value)
}
// A Coord represents an N-dimensional coordinate.
type Coord []float64
// Clone returns a deep copy of c.
func (c Coord) Clone() Coord {
return deriveCloneCoord(c)
}
// X returns the x coordinate of the coordinate. X is assumed to be the first
// ordinate.
func (c Coord) X() float64 {
return c[0]
}
// Y returns the x coordinate of the coordinate. Y is assumed to be the second
// ordinate.
func (c Coord) Y() float64 {
return c[1]
}
// Set copies the ordinate data from the other coord to this coord.
func (c Coord) Set(other Coord) {
copy(c, other)
}
// Equal compares that all ordinates are the same in this and the other coords.
// It is assumed that this coord and other coord both have the same (provided)
// layout.
func (c Coord) Equal(layout Layout, other Coord) bool {
numOrds := len(c)
if layout.Stride() < numOrds {
numOrds = layout.Stride()
}
if (len(c) < layout.Stride() || len(other) < layout.Stride()) && len(c) != len(other) {
return false
}
for i := 0; i < numOrds; i++ {
if math.IsNaN(c[i]) || math.IsNaN(other[i]) {
if !math.IsNaN(c[i]) || !math.IsNaN(other[i]) {
return false
}
} else if c[i] != other[i] {
return false
}
}
return true
}
// T is a generic interface implemented by all geometry types.
type T interface {
Layout() Layout
Stride() int
Bounds() *Bounds
FlatCoords() []float64
Ends() []int
Endss() [][]int
SRID() int
}
// MIndex returns the index of the M dimension, or -1 if the l does not have an
// M dimension.
func (l Layout) MIndex() int {
switch l {
case NoLayout, XY, XYZ:
return -1
case XYM:
return 2
case XYZM:
return 3
default:
return 3
}
}
// Stride returns l's number of dimensions.
func (l Layout) Stride() int {
switch l {
case NoLayout:
return 0
case XY:
return 2
case XYZ:
return 3
case XYM:
return 3
case XYZM:
return 4
default:
return int(l)
}
}
// String returns a human-readable string representing l.
func (l Layout) String() string {
switch l {
case NoLayout:
return "NoLayout"
case XY:
return "XY"
case XYZ:
return "XYZ"
case XYM:
return "XYM"
case XYZM:
return "XYZM"
default:
return fmt.Sprintf("Layout(%d)", int(l))
}
}
// ZIndex returns the index of l's Z dimension, or -1 if l does not have a Z
// dimension.
func (l Layout) ZIndex() int {
switch l {
case NoLayout, XY, XYM:
return -1
default:
return 2
}
}
// Must panics if err is not nil, otherwise it returns g.
func Must(g T, err error) T {
if err != nil {
panic(err)
}
return g
}
var (
errIncorrectEnd = errors.New("geom: incorrect end")
errLengthStrideMismatch = errors.New("geom: length/stride mismatch")
errMisalignedEnd = errors.New("geom: misaligned end")
errNonEmptyEnds = errors.New("geom: non-empty ends")
errNonEmptyEndss = errors.New("geom: non-empty endss")
errNonEmptyFlatCoords = errors.New("geom: non-empty flatCoords")
errOutOfOrderEnd = errors.New("geom: out-of-order end")
errStrideLayoutMismatch = errors.New("geom: stride/layout mismatch")
) | vendor/github.com/whosonfirst/go-whosonfirst-static/vendor/github.com/whosonfirst/go-whosonfirst-readwrite-sqlite/vendor/github.com/whosonfirst/go-whosonfirst-sqlite-features/vendor/github.com/twpayne/go-geom/geom.go | 0.774157 | 0.454654 | geom.go | starcoder |
package reldate
import (
"errors"
"strings"
"time"
)
const (
// The standard format for dates I find convient
YYYYMMDD = "2006-01-02"
// Version of this package
Version = "v0.0.2"
)
// finds the end of the month value (e.g. 28, 29, 30, 31)
func EndOfMonth(t1 time.Time) string {
location := t1.Location()
year := t1.Year()
month := t1.Month()
if month == 12 {
year++
}
month++
t2 := time.Date(year, month, 1, 0, 0, 0, 0, location)
return t2.Add(-time.Hour).Format(YYYYMMDD)
}
// computes the offset of a weekday time for a given weekday
func weekdayOffset(weekday time.Weekday) int {
switch {
case weekday == time.Sunday:
return 0
case weekday == time.Monday:
return 1
case weekday == time.Tuesday:
return 2
case weekday == time.Wednesday:
return 3
case weekday == time.Thursday:
return 4
case weekday == time.Friday:
return 5
case weekday == time.Saturday:
return 6
}
return 0
}
// relativeWeekday converts the weekday name into an offset time and error
func relativeWeekday(t time.Time, weekday time.Weekday) (time.Time, error) {
// Normalize to Sunday then add weekday constant
switch {
case t.Weekday() == time.Sunday:
return t.AddDate(0, 0, weekdayOffset(weekday)), nil
case t.Weekday() == time.Monday:
return t.AddDate(0, 0, (-1 + weekdayOffset(weekday))), nil
case t.Weekday() == time.Tuesday:
return t.AddDate(0, 0, (-2 + weekdayOffset(weekday))), nil
case t.Weekday() == time.Wednesday:
return t.AddDate(0, 0, (-3 + weekdayOffset(weekday))), nil
case t.Weekday() == time.Thursday:
return t.AddDate(0, 0, (-4 + weekdayOffset(weekday))), nil
case t.Weekday() == time.Friday:
return t.AddDate(0, 0, (-5 + weekdayOffset(weekday))), nil
case t.Weekday() == time.Saturday:
return t.AddDate(0, 0, (-6 + weekdayOffset(weekday))), nil
}
return t, errors.New("Expecting Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, or Saturday.")
}
// RelativeTime takes a time, an integer ammount (positive or negative)
// and a unit value (day of week, days, weeks, month, years) and
// computes the relative time in days from time returning a new
// time and error.
func RelativeTime(t time.Time, i int, u string) (time.Time, error) {
switch {
case strings.HasPrefix(u, "sun"):
return relativeWeekday(t, time.Sunday)
case strings.HasPrefix(u, "mon"):
return relativeWeekday(t, time.Monday)
case strings.HasPrefix(u, "tue"):
return relativeWeekday(t, time.Tuesday)
case strings.HasPrefix(u, "wed"):
return relativeWeekday(t, time.Wednesday)
case strings.HasPrefix(u, "thu"):
return relativeWeekday(t, time.Thursday)
case strings.HasPrefix(u, "fri"):
return relativeWeekday(t, time.Friday)
case strings.HasPrefix(u, "sat"):
return relativeWeekday(t, time.Saturday)
case strings.HasPrefix(u, "day"):
return t.AddDate(0, 0, i), nil
case strings.HasPrefix(u, "week"):
return t.AddDate(0, 0, 7*i), nil
case strings.HasPrefix(u, "month"):
return t.AddDate(0, i, 0), nil
case strings.HasPrefix(u, "year"):
return t.AddDate(i, 0, 0), nil
}
return t, errors.New("Time unit must be day(s), week(s), month(s) or year(s) or weekday name.")
} | reldate/reldate.go | 0.575707 | 0.583055 | reldate.go | starcoder |
package tcg
// Clear - fill whole buffer with White
func (b *Buffer) Clear() {
for y := 0; y < len(b.buffer); y++ {
for x := 0; x < len(b.buffer[y]); x++ {
b.buffer[y][x] = 0
}
}
}
// Invert pixels in the buffer
func (b *Buffer) Invert() {
for y := 0; y < len(b.buffer); y++ {
for x := 0; x < len(b.buffer[y]); x++ {
b.buffer[y][x] = ^b.buffer[y][x]
}
}
}
// BitBltAllSrc - copy whole buffer into this buffer
func (b *Buffer) BitBltAllSrc(x, y int, from Buffer) {
if x == 0 && y == 0 {
for i := 0; i < from.Height && i < b.Height; i++ {
copy(b.buffer[i], from.buffer[i])
}
return
}
b.BitBlt(x, y, from.Width, from.Height, from, 0, 0)
}
// BitBlt - copy part of buffer into this buffer
func (b *Buffer) BitBlt(xd, yd, width, height int, from Buffer, xs, ys int) {
for i := 0; i+ys < from.Height && i < height && i+yd < b.Height; i++ {
for j := 0; j+xs < from.Width && j < width && j+xd < b.Width; j++ {
b.Set(j+xd, i+yd, from.At(j+xs, i+ys))
}
}
}
// HFlip - horizontal flip image buffer
func (b *Buffer) HFlip() {
for y := 0; y < b.Height/2; y++ {
b.buffer[y], b.buffer[b.Height-y-1] = b.buffer[b.Height-y-1], b.buffer[y]
}
}
// VFlip - vertical flip image buffer
func (b *Buffer) VFlip() {
for y := 0; y < b.Height; y++ {
for x := 0; x < b.Width/2; x++ {
leftColor, rightColor := b.At(x, y), b.At(b.Width-x-1, y)
b.Set(x, y, rightColor)
b.Set(b.Width-x-1, y, leftColor)
}
}
}
// VScroll - vertical scroll image buffer by cnt pixels, cnt > 0 - scroll down, cnt < 0 - up
func (b *Buffer) VScroll(cnt int) {
zeroLine := make([]byte, widthInBytes(b.Width))
if cnt > 0 {
for y := b.Height - 1; y > cnt-1; y-- {
copy(b.buffer[y], b.buffer[y-cnt])
}
// clear rest
for y := 0; y < cnt; y++ {
copy(b.buffer[y], zeroLine)
}
} else if cnt < 0 {
for y := 0; y < b.Height+cnt; y++ {
copy(b.buffer[y], b.buffer[y-cnt])
}
// clear rest
for y := b.Height + cnt; y < b.Height; y++ {
copy(b.buffer[y], zeroLine)
}
}
}
// HScroll - horizontal scroll image buffer by cnt pixels, cnt > 0 - scroll right, cnt < 0 - left
func (b *Buffer) HScroll(cnt int) {
if cnt > 0 {
for y := 0; y < b.Height; y++ {
for x := b.Width - 1; x > cnt-1; x-- {
b.Set(x, y, b.At(x-cnt, y))
}
}
// clear rest
for x := 0; x < cnt; x++ {
b.VLine(x, 0, b.Height, White)
}
} else if cnt < 0 {
for y := 0; y < b.Height; y++ {
for x := 0; x < b.Width+cnt; x++ {
b.Set(x, y, b.At(x-cnt, y))
}
}
// clear rest
for x := b.Width + cnt; x < b.Width; x++ {
b.VLine(x, 0, b.Height, White)
}
}
} | transform.go | 0.576304 | 0.548492 | transform.go | starcoder |
package main
import (
"log"
"os"
"fmt"
"math"
"math/rand"
"time"
)
// Terminal
func expr_x(x float64, y float64) float64 {
return x
}
func expr_y(x float64, y float64) float64 {
return y
}
// Single
func expr_sin(e float64) float64 {
return math.Sin(math.Pi * e)
}
func expr_cos(e float64) float64 {
return math.Cos(math.Pi * e)
}
func expr_negate(e float64) float64 {
return -e
}
func expr_sqrt(e float64) float64 {
if e < 0.0 {
return -math.Sqrt(-e)
} else {
return math.Sqrt(e)
}
}
// Double
func expr_arith_mean(e1 float64, e2 float64) float64 {
return (e1 + e2) / 2
}
func expr_geo_mean(e1 float64, e2 float64) float64 {
return expr_sqrt(e1 * e2)
}
func expr_mult(e1 float64, e2 float64) float64 {
return e1 * e2
}
func expr_max(e1 float64, e2 float64) float64 {
if e1 > e2 {
return e1
} else {
return e2
}
}
func expr_min(e1 float64, e2 float64) float64 {
if e1 > e2 {
return e2
} else {
return e1
}
}
type expr_terminal func(x float64, y float64) float64
type expr_single func(e float64) float64
type expr_double func(e1 float64, e2 float64) float64
type ExpressionEvaluator interface {
evaluate(x float64, y float64) float64
}
type TerminalExpression struct {
expr expr_terminal
}
type SingleExpression struct {
expr expr_single
e ExpressionEvaluator
}
type DoubleExpression struct {
expr expr_double
e1, e2 ExpressionEvaluator
}
func (expr TerminalExpression) evaluate(x float64, y float64) float64 {
return expr.expr(x, y)
}
func (expr SingleExpression) evaluate(x float64, y float64) float64 {
e := expr.e.evaluate(x, y)
return expr.expr(e)
}
func (expr DoubleExpression) evaluate(x float64, y float64) float64 {
e1 := expr.e1.evaluate(x, y)
e2 := expr.e2.evaluate(x, y)
return expr.expr(e1, e2)
}
func greyscale(e ExpressionEvaluator, scale int) {
size := 2*scale+1
f, err := os.OpenFile("test.pgm", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
log.Fatal(err)
}
fmt.Fprintf(f, "P5 %d %d 255\n", size, size)
buf := make([]byte, size)
for yi := -scale; yi <= scale; yi++ {
for xi := -scale; xi <= scale; xi++ {
x := float64(xi)/float64(scale)
y := float64(yi)/float64(scale)
buf[xi + scale] = byte(127.5 + 127.5*e.evaluate(x,y));
}
f.Write(buf)
}
f.Close()
}
func build_expression(depth int) ExpressionEvaluator {
terminals := []expr_terminal {expr_x, expr_y}
singles := []expr_single { expr_sin, expr_cos, expr_negate, expr_sqrt }
doubles := []expr_double { expr_arith_mean, expr_geo_mean, expr_mult, expr_min, expr_max }
if depth == 1 {
return TerminalExpression { expr: terminals[rand.Intn(2)] }
} else {
i := rand.Intn(4 + 5);
if i < 4 {
e := build_expression(depth - 1)
return SingleExpression { expr: singles[i], e: e }
} else {
e1 := build_expression(depth - 1)
e2 := build_expression(depth - 1)
return DoubleExpression { expr: doubles[i - 4], e1: e1, e2: e2 };
}
}
}
func main() {
rand.Seed(time.Now().Unix());
e := build_expression(15)
greyscale(e, 150)
} | random-art/art.go | 0.556159 | 0.566139 | art.go | starcoder |
package option
import (
"github.com/dairaga/gs"
"github.com/dairaga/gs/funcs"
)
// From returns a Some with given v if given ok is true, or returns a None.
func From[T any](v T, ok bool) gs.Option[T] {
if ok {
return gs.Some(v)
}
return gs.None[T]()
}
// FromWithErr returns a Some with given v if given err is nil, or returns a None.
func FromWithErr[T any](v T, err error) gs.Option[T] {
return From(v, err == nil)
}
// When returns a Some with given v if result of given function p is true, or returns a None.
func When[T any](p funcs.Condition, z T) gs.Option[T] {
return From(z, p())
}
// Unless returns a Some with given v if result of given function p is false, or returns a None.
func Unless[T any](p funcs.Condition, z T) gs.Option[T] {
return From(z, !p())
}
// -----------------------------------------------------------------------------
// TODO: refactor following functions to methods when go 1.19 releases.
// Fold returns result from applying given function succ if o is defined, or returns given default value z.
func Fold[T, R any](o gs.Option[T], z R, succ funcs.Func[T, R]) R {
return funcs.BuildUnit(o.Fetch, funcs.Id(z), succ)
}
// Collect returns a Some with result from applying given function p if o is defined and value of o satifies p, or returns a None.
func Collect[T, R any](o gs.Option[T], p funcs.Partial[T, R]) gs.Option[R] {
return funcs.BuildUnit(o.Fetch, gs.None[R], funcs.PartialTransform(p, From[R]))
}
// FlatMap returns result from applying given function op if o is defined, or returns a None.
func FlatMap[T, R any](o gs.Option[T], op funcs.Func[T, gs.Option[R]]) gs.Option[R] {
return funcs.BuildUnit(o.Fetch, gs.None[R], op)
}
// Map returns a Some with result from applying given function op if o is defined, or returns a None.
func Map[T, R any](o gs.Option[T], op funcs.Func[T, R]) gs.Option[R] {
return funcs.BuildUnit(o.Fetch, gs.None[R], funcs.AndThen(op, gs.Some[R]))
}
// PartialMap returns a Some with result from applying given function op if o is defined and satisfies op, or returns a None.
func PartialMap[T, R any](o gs.Option[T], op funcs.Partial[T, R]) gs.Option[R] {
return funcs.BuildUnit(o.Fetch, gs.None[R], funcs.PartialTransform(op, From[R]))
}
// TryMap returns a Some with result from applying given function op if o is defined and converts to R successfully, or returns a None.
func TryMap[T, R any](o gs.Option[T], op funcs.Try[T, R]) gs.Option[R] {
return funcs.BuildUnit(o.Fetch, gs.None[R], funcs.TryRecover(op, FromWithErr[R]))
}
// Left returns a Left with value from o if o is defined, or returns a Right with given z.
func Left[T, R any](o gs.Option[T], z R) gs.Either[T, R] {
return funcs.BuildUnit(o.Fetch, funcs.UnitAndThen(funcs.Id(z), gs.Right[T, R]), gs.Left[T, R])
}
// Right returns a Right with value from o if o is defined, or returns Left with given z.
func Right[L, T any](o gs.Option[T], z L) gs.Either[L, T] {
return funcs.BuildUnit(o.Fetch, funcs.UnitAndThen(funcs.Id(z), gs.Left[L, T]), gs.Right[L, T])
} | option/option.go | 0.713032 | 0.583352 | option.go | starcoder |
package kvcodec
import (
"fmt"
"reflect"
"strconv"
"strings"
)
func toString(in interface{}) (string, error) {
inValue := reflect.ValueOf(in)
switch inValue.Kind() {
case reflect.String:
return inValue.String(), nil
case reflect.Bool:
b := inValue.Bool()
if b {
return "true", nil
}
return "false", nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return fmt.Sprintf("%v", inValue.Int()), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return fmt.Sprintf("%v", inValue.Uint()), nil
case reflect.Float32:
return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil
case reflect.Float64:
return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil
}
return "", fmt.Errorf("unable to cast " + inValue.Type().String() + " to string")
}
func toBool(in interface{}) (bool, error) {
inValue := reflect.ValueOf(in)
switch inValue.Kind() {
case reflect.String:
s := inValue.String()
switch s {
case "yes":
return true, nil
case "no", "":
return false, nil
default:
return strconv.ParseBool(s)
}
case reflect.Bool:
return inValue.Bool(), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i := inValue.Int()
if i != 0 {
return true, nil
}
return false, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i := inValue.Uint()
if i != 0 {
return true, nil
}
return false, nil
case reflect.Float32, reflect.Float64:
f := inValue.Float()
if f != 0 {
return true, nil
}
return false, nil
}
return false, fmt.Errorf("unable to cast " + inValue.Type().String() + " to bool")
}
func toInt(in interface{}) (int64, error) {
inValue := reflect.ValueOf(in)
switch inValue.Kind() {
case reflect.String:
s := strings.TrimSpace(inValue.String())
if s == "" {
return 0, nil
}
return strconv.ParseInt(s, 0, 64)
case reflect.Bool:
if inValue.Bool() {
return 1, nil
}
return 0, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return inValue.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(inValue.Uint()), nil
case reflect.Float32, reflect.Float64:
return int64(inValue.Float()), nil
}
return 0, fmt.Errorf("unable to cast " + inValue.Type().String() + " to int")
}
func toUint(in interface{}) (uint64, error) {
inValue := reflect.ValueOf(in)
switch inValue.Kind() {
case reflect.String:
s := strings.TrimSpace(inValue.String())
if s == "" {
return 0, nil
}
// float input
if strings.Contains(s, ".") {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0, err
}
return uint64(f), nil
}
return strconv.ParseUint(s, 0, 64)
case reflect.Bool:
if inValue.Bool() {
return 1, nil
}
return 0, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return uint64(inValue.Int()), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return inValue.Uint(), nil
case reflect.Float32, reflect.Float64:
return uint64(inValue.Float()), nil
}
return 0, fmt.Errorf("unable to cast " + inValue.Type().String() + " to uint")
}
func toFloat(in interface{}) (float64, error) {
inValue := reflect.ValueOf(in)
switch inValue.Kind() {
case reflect.String:
s := strings.TrimSpace(inValue.String())
if s == "" {
return 0, nil
}
return strconv.ParseFloat(s, 64)
case reflect.Bool:
if inValue.Bool() {
return 1, nil
}
return 0, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(inValue.Int()), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return float64(inValue.Uint()), nil
case reflect.Float32, reflect.Float64:
return inValue.Float(), nil
}
return 0, fmt.Errorf("unable to cast " + inValue.Type().String() + " to float")
}
func setField(field reflect.Value, value string) error {
switch field.Kind() {
case reflect.String:
s, err := toString(value)
if err != nil {
return err
}
field.SetString(s)
case reflect.Bool:
b, err := toBool(value)
if err != nil {
return err
}
field.SetBool(b)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, err := toInt(value)
if err != nil {
return err
}
field.SetInt(i)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
ui, err := toUint(value)
if err != nil {
return err
}
field.SetUint(ui)
case reflect.Float32, reflect.Float64:
f, err := toFloat(value)
if err != nil {
return err
}
field.SetFloat(f)
default:
err := fmt.Errorf("unable to set field of type %s", field.Kind())
return err
}
return nil
} | vendor/github.com/bcicen/go-haproxy/kvcodec/types.go | 0.555676 | 0.440108 | types.go | starcoder |
package forGraphBLASGo
import "github.com/intel/forGoParallel/parallel"
func isAnyIndexOutOfBounds(indices []int, size int) bool {
return parallel.RangeOr(0, len(indices), func(low, high int) bool {
for i := low; i < high; i++ {
if index := indices[i]; index < 0 || index >= size {
return true
}
}
return false
})
}
func checkIndices(indices []int, size int, checkIndexSize func(int) error) (nindices int, all bool, err error) {
nindices, all = isAll(indices)
if all {
if err = checkIndexSize(nindices); err != nil {
return
}
if size < nindices {
err = IndexOutOfBounds
}
return
}
if err = checkIndexSize(nindices); err != nil {
return
}
if isAnyIndexOutOfBounds(indices, size) {
err = IndexOutOfBounds
}
return
}
func vectorAssignBody[D any](
w *Vector[D], mask *Vector[bool], accum BinaryOp[D, D, D], indices []int, desc Descriptor,
checkIndexSize func(int) error,
simpleAssign func(size int) *vectorReference[D],
complexAssign func() computeVectorT[D],
) error {
size, err := w.Size()
if err != nil {
return err
}
nindices, _, err := checkIndices(indices, size, checkIndexSize)
if err != nil {
return err
}
isComp, err := desc.Is(Mask, Comp)
if err != nil {
panic(err)
}
if size == nindices && mask == nil && !isComp && accum == nil {
w.ref = simpleAssign(size)
return nil
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[D](newComputedVector[D](
size, w.ref,
maskAsStructure, accum,
complexAssign(),
desc,
), -1)
return nil
}
func VectorAssign[D any](w *Vector[D], mask *Vector[bool], accum BinaryOp[D, D, D], u *Vector[D], indices []int, desc Descriptor) error {
return vectorAssignBody[D](
w, mask, accum, indices, desc,
u.expectSize,
func(_ int) *vectorReference[D] {
return u.ref
}, func() computeVectorT[D] {
return newVectorAssign[D](u.ref, fpcopy(indices))
})
}
func VectorAssignConstant[D any](w *Vector[D], mask *Vector[bool], accum BinaryOp[D, D, D], value D, indices []int, desc Descriptor) error {
return vectorAssignBody[D](
w, mask, accum, indices, desc,
func(_ int) error {
return nil
}, func(size int) *vectorReference[D] {
return newVectorReference[D](newHomVectorConstant[D](size, value), int64(size))
}, func() computeVectorT[D] {
return newVectorAssignConstant(value, fpcopy(indices))
})
}
func VectorAssignConstantScalar[D any](w *Vector[D], mask *Vector[bool], accum BinaryOp[D, D, D], scalar *Scalar[D], indices []int, desc Descriptor) error {
if scalar == nil || scalar.ref == nil {
return UninitializedObject
}
return vectorAssignBody[D](
w, mask, accum, indices, desc,
func(_ int) error {
return nil
}, func(size int) *vectorReference[D] {
return newVectorReference[D](newHomVectorScalar[D](size, scalar.ref), -1)
}, func() computeVectorT[D] {
return newVectorAssignConstantScalar[D](scalar.ref, fpcopy(indices))
})
}
func matrixAssignBody[D any](
C *Matrix[D], mask *Matrix[bool], accum BinaryOp[D, D, D], rowIndices, colIndices []int, desc Descriptor,
checkRowIndexSize, checkColIndexSize func(nindices int) error,
simpleAssign func(int, int) *matrixReference[D],
complexAssign func() computeMatrixT[D],
) error {
nrows, ncols, err := C.Size()
if err != nil {
return nil
}
nRowIndices, allRows, rowErr := checkIndices(rowIndices, nrows, checkRowIndexSize)
if rowErr != nil {
return rowErr
}
nColIndices, allCols, colErr := checkIndices(colIndices, ncols, checkColIndexSize)
if colErr != nil {
return colErr
}
isComp, err := desc.Is(Mask, Comp)
if err != nil {
panic(err)
}
if allRows && nRowIndices == nrows &&
allCols && nColIndices == ncols &&
mask == nil && !isComp && accum == nil {
C.ref = simpleAssign(nrows, ncols)
return nil
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[D](newComputedMatrix[D](
nrows, ncols, C.ref,
maskAsStructure, accum,
complexAssign(),
desc,
), -1)
return nil
}
func MatrixAssign[D any](C *Matrix[D], mask *Matrix[bool], accum BinaryOp[D, D, D], A *Matrix[D], rowIndices, colIndices []int, desc Descriptor) error {
ANRows, ANCols, err := A.Size()
if err != nil {
return err
}
AIsTran, err := desc.Is(Inp0, Tran)
if err != nil {
return err
}
if AIsTran {
ANRows, ANCols = ANCols, ANRows
}
return matrixAssignBody(
C, mask, accum, rowIndices, colIndices, desc,
func(nindices int) error {
if nindices != ANRows {
return DimensionMismatch
}
return nil
}, func(nindices int) error {
if nindices != ANCols {
return DimensionMismatch
}
return nil
}, func(_, _ int) *matrixReference[D] {
return maybeTran(A.ref, AIsTran)
}, func() computeMatrixT[D] {
rowIndicesCopy, colIndicesCopy := fpcopy2(rowIndices, colIndices)
return newMatrixAssign[D](maybeTran(A.ref, AIsTran), rowIndicesCopy, colIndicesCopy)
})
}
func ColAssign[D any](C *Matrix[D], mask *Matrix[bool], accum BinaryOp[D, D, D], u *Vector[D], rowIndices []int, col int, desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
if _, _, err = checkIndices(rowIndices, nrows, u.expectSize); err != nil {
return err
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[D](newComputedMatrix[D](
nrows, ncols, C.ref, maskAsStructure, accum,
newColAssign[D](u.ref, fpcopy(rowIndices), col),
desc,
), -1)
return nil
}
func RowAssign[D any](C *Matrix[D], mask *Matrix[bool], accum BinaryOp[D, D, D], u *Vector[D], row int, colIndices []int, desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
if _, _, err = checkIndices(colIndices, ncols, u.expectSize); err != nil {
return err
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[D](newComputedMatrix[D](
nrows, ncols, C.ref, maskAsStructure, accum,
newRowAssign[D](u.ref, row, fpcopy(colIndices)),
desc,
), -1)
return nil
}
func MatrixAssignConstant[D any](C *Matrix[D], mask *Matrix[bool], accum BinaryOp[D, D, D], value D, rowIndices, colIndices []int, desc Descriptor) error {
return matrixAssignBody(
C, mask, accum, rowIndices, colIndices, desc,
func(nindices int) error {
return nil
}, func(nindices int) error {
return nil
}, func(nrows, ncols int) *matrixReference[D] {
return newMatrixReference[D](newHomMatrixConstant[D](nrows, ncols, value), int64(nrows*ncols))
}, func() computeMatrixT[D] {
rowIndicesCopy, colIndicesCopy := fpcopy2(rowIndices, colIndices)
return newMatrixAssignConstant(value, rowIndicesCopy, colIndicesCopy)
})
}
func MatrixAssignConstantScalar[D any](C *Matrix[D], mask *Matrix[bool], accum BinaryOp[D, D, D], scalar *Scalar[D], rowIndices, colIndices []int, desc Descriptor) error {
if scalar == nil || scalar.ref == nil {
return UninitializedObject
}
return matrixAssignBody(
C, mask, accum, rowIndices, colIndices, desc,
func(nindices int) error {
return nil
}, func(nindices int) error {
return nil
}, func(nrows, ncols int) *matrixReference[D] {
return newMatrixReference[D](newHomMatrixScalar[D](nrows, ncols, scalar.ref), -1)
}, func() computeMatrixT[D] {
rowIndicesCopy, colIndicesCopy := fpcopy2(rowIndices, colIndices)
return newMatrixAssignConstantScalar(scalar.ref, rowIndicesCopy, colIndicesCopy)
})
} | api_Assign.go | 0.620507 | 0.50769 | api_Assign.go | starcoder |
package gridspech
import "fmt"
// Valid returns if all tiles in the grid are valid.
func (g Grid) Valid() bool {
for x := 0; x < g.Width(); x++ {
for y := 0; y < g.Height(); y++ {
if !g.ValidTile(TileCoord{X: x, Y: y}) {
return false
}
}
}
return true
}
// ValidTile returns if t is valid in g. If all tiles in g are valid,
// the grid is completed.
func (g Grid) ValidTile(coord TileCoord) bool {
t := *g.TileAtCoord(coord)
switch t.Data.Type {
case TypeHole, TypeBlank:
return true
case TypeGoal:
return g.validGoal(t)
case TypeCrown:
return g.validCrown(t)
case TypeDot1:
return len(g.NeighborSliceWith(t.Coord, func(other Tile) bool {
return other.Data.Color != ColorNone
})) == 1
case TypeDot2:
return len(g.NeighborSliceWith(t.Coord, func(other Tile) bool {
return other.Data.Color != ColorNone
})) == 2
case TypeDot3:
return len(g.NeighborSliceWith(t.Coord, func(other Tile) bool {
return other.Data.Color != ColorNone
})) == 3
case TypeJoin1:
return g.validJoin(t, 1)
case TypeJoin2:
return g.validJoin(t, 2)
default:
panic(fmt.Sprintf("invalid tile type %v", t.Data.Type))
}
}
// the blob of a goal tile should contain a direct path to another goal.
// the way we measure this:
// 1. The blob should contain exactly two goals.
// 2. The goals should have exactly 1 neighbor with the same state.
// 3. All other tiles in the blob should have exactly 2 neighbors with the same state.
func (g Grid) validGoal(start Tile) bool {
blob := g.Blob(start.Coord)
var goals int
for _, t := range blob.Slice() {
if t.Data.Type == TypeGoal {
goals++
// requirement 2: The goals should have exactly 1 neighbor with the same state.
neighbors := g.NeighborSliceWith(t.Coord, func(o Tile) bool {
return t.Data.Color == o.Data.Color
})
if len(neighbors) != 1 {
return false
}
}
// requirement 3: All other tiles in the blob should have exactly 2 neighbors with the same state.
neighborsSameColor := g.NeighborSliceWith(t.Coord, func(o Tile) bool {
return t.Data.Color == o.Data.Color
})
if t.Data.Type != TypeGoal && len(neighborsSameColor) != 2 {
return false
}
}
// requirement 1: The blob should contain exactly two goals.
return goals == 2
}
// crown tiles have the following requirements:
// 1. No other crowns may be in this crown's blob.
// 2. All tiles with the same color must have a crown in its blob.
func (g Grid) validCrown(start Tile) bool {
blob := g.Blob(start.Coord)
// requirement 1: No other crowns may be in this crown's blob.
for _, tile := range blob.Slice() {
if tile.Data.Type == TypeCrown && tile != start {
return false
}
}
crownsWithSameState := g.TilesWith(func(t Tile) bool {
return t.Data.Type == TypeCrown && t.Data.Color == start.Data.Color
})
// set of blobs of all crowns with same color
var crownsBlobSet TileSet
for crown := range crownsWithSameState.Iter() {
crownsBlobSet.Merge(g.Blob(crown.Coord))
}
// set of all tiles with same color
stateSet := g.TilesWith(func(t Tile) bool {
return t.Data.Type != TypeHole && t.Data.Color == start.Data.Color
})
// requirement 2: All tiles with the same color must have a crown in its blob.
return crownsBlobSet.Eq(stateSet)
}
func (g Grid) validJoin(t Tile, n int) bool {
var found int
for _, blobTile := range g.Blob(t.Coord).Slice() {
if blobTile.Data.Type != TypeHole && blobTile.Data.Type != TypeBlank {
found++
if found > n+1 {
return false
}
}
}
return found == n+1
} | rules.go | 0.787441 | 0.690523 | rules.go | starcoder |
package cbor
import (
"math"
"math/big"
"github.com/gocardano/go-cardano-client/errors"
log "github.com/sirupsen/logrus"
"github.com/x448/float16"
)
// decodeArray parses the next array object.
// Only called after the majorType array has been determined.
func (r *BitstreamReader) decodeArray() (*Array, error) {
array := NewArray()
// additionalTypeValue (second parameter) in this case indicates the size of the array
additionalType, arrayLength, err := doGetAdditionalType(r)
if err != nil {
return nil, err
}
log.Tracef("Starting to iterate on array with length: %d", arrayLength)
hasMoreItems := false
counter := uint64(0)
if arrayLength > 0 || additionalType == additionalTypeIndefinite {
hasMoreItems = true
}
for hasMoreItems {
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error reading next array item")
return array, err
}
log.Tracef("Found another item in the array %+v", obj)
if additionalType == additionalTypeIndefinite &&
byte(obj.MajorType()) == indefiniteBreakCodeMajorType &&
obj.AdditionalType() == indefiniteBreakCodeAdditionalType {
log.Tracef("Array of indefinite length reached the break stop code, found [%d] items", array.Length())
hasMoreItems = false
} else {
array.Add(obj)
counter++
if counter == arrayLength {
log.Tracef("Array of [%d] length reached [%d] items", arrayLength, array.Length())
hasMoreItems = false
}
}
}
return array, nil
}
// decodeByteString parses the next byte string object.
// Only called after the majorType byte string has been determined.
func (r *BitstreamReader) decodeByteString() (*ByteString, error) {
result, err := r.doDecodeByteString()
if err != nil {
return nil, err
}
return NewByteString(result), nil
}
// decodeMap parses the next map object. Only called after the majorType map has been determined.
func (r *BitstreamReader) decodeMap() (DataItem, error) {
m := NewMap()
// additionalTypeValue (second parameter) in this case indicates the size of the map
additionalType, mapLength, err := doGetAdditionalType(r)
if err != nil {
return nil, err
}
log.Tracef("Starting to iterate on map with length: %d", mapLength)
hasMoreItems := true
counter := uint64(0)
for hasMoreItems {
key, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error reading map key item")
return m, err
}
if additionalType == additionalTypeIndefinite &&
key.MajorType() == MajorTypePrimitive &&
key.AdditionalType() == primitiveBreakStopCode {
// Found break stop code
hasMoreItems = false
} else {
value, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error reading map value item")
return m, err
}
log.Tracef("Adding map key: [%+v] with value: [%+v]", key, value)
m.Add(key, value)
counter++
if counter == mapLength {
log.Tracef("Map of [%d] length reached [%d] items", mapLength, m.Length())
hasMoreItems = false
}
}
}
return m, nil
}
// decodeNegativeInt parses the next negative integer object.
// Only called after the majorType negative integer has been determined.
func (r *BitstreamReader) decodeNegativeInt() (DataItem, error) {
var result DataItem
// encodedValue (2nd return parameter) for positiveInt/negativeInt is the actual encoded value
additionalType, encodedValue, err := doGetAdditionalType(r)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"encodedValue": encodedValue,
}).Error("Unable to handle negative int")
return nil, err
}
// compute the actual value
actualValue := int64(-1) - int64(encodedValue)
log.WithFields(log.Fields{
"encodedValue": encodedValue,
"actualValue": actualValue,
}).Debug("Decoded CBOR item from array")
switch additionalType {
case additionalType64Bits:
result = NewNegativeInteger64(actualValue)
break
case additionalType32Bits:
result = NewNegativeInteger32(actualValue)
break
case additionalType16Bits:
result = NewNegativeInteger16(actualValue)
break
default:
result = NewNegativeInteger8(actualValue)
break
}
if result == nil {
log.Error("Error creating negative integer instance")
}
return result, nil
}
// decodePositiveUnsignedInt parses the next positive unsigned integer.
// Only called after the majorType negative integer has been determined.
func (r *BitstreamReader) decodePositiveUnsignedInt() (DataItem, error) {
var result DataItem
// actualValue (2nd return parameter) for positiveInt/negativeInt is the unsigned positive value
additionalType, actualValue, err := doGetAdditionalType(r)
if err != nil {
return nil, err
}
switch {
case additionalType == additionalType64Bits:
result = NewPositiveInteger64(actualValue)
break
case additionalType == additionalType32Bits:
result = NewPositiveInteger32(uint32(actualValue))
break
case additionalType == additionalType16Bits:
result = NewPositiveInteger16(uint16(actualValue))
break
case additionalType == additionalType8Bits:
result = NewPositiveInteger8(uint8(actualValue))
break
case actualValue <= uint64(additionalTypeDirectValue23):
result = NewPositiveInteger8(uint8(actualValue))
break
default:
log.WithFields(log.Fields{
"additionalType": additionalType,
"actualValue": actualValue,
}).Error("Unhandled additional type while parsing positive unsigned int")
break
}
return result, nil
}
// decodePrimitive parses the next primitive object.
// Only called after the majorType primitive has been determined.
func (r *BitstreamReader) decodePrimitive() (DataItem, error) {
var obj DataItem
additionalType, err := r.ReadBitsAsUint8(5)
if err != nil {
log.Error("Error parsing additional type for primitive item")
return nil, err
}
switch additionalType {
case primitiveFalse:
obj = NewPrimitiveFalse()
break
case primitiveTrue:
obj = NewPrimitiveTrue()
break
case primitiveNull:
obj = NewPrimitiveNull()
break
case primitiveUndefined:
obj = NewPrimitiveUndefined()
break
case primitiveSimpleValue:
val, err := r.ReadBitsAsUint8(8)
if err != nil {
return nil, err
}
obj = NewPrimitiveSimpleValue(val)
break
case primitiveHalfPrecisionFloat:
val, err := r.ReadBitsAsUint16(16)
if err != nil {
return nil, err
}
float16.Frombits(val)
obj = NewPrimitiveHalfPrecisionFloat(val)
break
case primitiveSinglePrecisionFloat:
val, err := r.ReadBitsAsUint32(32)
if err != nil {
return nil, err
}
obj = NewPrimitiveSinglePrecisionFloat(math.Float32frombits(val))
break
case primitiveDoublePrecisionFloat:
val, err := r.ReadBitsAsUint64(64)
if err != nil {
return nil, err
}
obj = NewPrimitiveDoublePrecisionFloat(math.Float64frombits(val))
break
case primitiveBreakStopCode:
obj = NewPrimitiveBreakStopCode()
break
}
return obj, nil
}
// decodeSemantic parses the next semantic tagged object.
// Only called after the majorType semantic has been determined.
func (r *BitstreamReader) decodeSemantic() (DataItem, error) {
var result DataItem
// semanticTagID (second parameter) in this case indicates the type of the semantic tag
_, semanticTagID, err := doGetAdditionalType(r)
if err != nil {
return nil, err
}
switch semanticTagID {
case semanticDateTimeString:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next text string for semantic date time string", err)
return nil, err
}
if obj.MajorType() != MajorTypeTextString {
log.Error("Unhandled exception, expected to see text as date time string object")
return nil, errors.NewError(errors.ErrCborMajorTypeUnhandled)
}
result = NewDateTimeString(obj.Value().(string))
break
case semanticDateTimeEpoch:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next positive/negative int for semantic date time epoch", err)
return nil, err
}
epoch := int64(0)
switch obj.MajorType() {
case MajorTypePositiveInt:
switch obj.AdditionalType() {
case additionalType64Bits:
epoch = int64(obj.Value().(uint64))
break
case additionalType32Bits:
epoch = int64(obj.Value().(uint32))
break
case additionalType16Bits:
epoch = int64(obj.Value().(uint16))
break
default:
// additional type value below 24 (use same value)
epoch = int64(obj.Value().(uint8))
break
}
result = NewDateTimeEpoch(epoch)
break
case MajorTypeNegativeInt:
switch obj.AdditionalType() {
case additionalType64Bits:
epoch = int64(obj.(*NegativeInteger64).ValueAsInt64())
break
case additionalType32Bits:
epoch = int64(obj.(*NegativeInteger32).ValueAsInt64())
break
case additionalType16Bits:
epoch = int64(obj.(*NegativeInteger16).ValueAsInt64())
break
default:
// additional type value below 24 (use same value)
epoch = int64(obj.(*NegativeInteger8).ValueAsInt64())
break
}
result = NewDateTimeEpoch(epoch)
break
default:
log.Errorf("Unhandled major type [%d] while parsing the value for date time epoch due to [%s]", obj.MajorType(), err)
result = nil
break
}
break
case semanticPositiveBignum:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next string for positive bignum", err)
return nil, err
}
if obj.MajorType() != MajorTypeByteString {
log.Errorf("Expected bytestring payload in positive bignum, unhandled major type: %d", obj.MajorType())
return nil, err
}
n := new(big.Int)
n = n.SetBytes(obj.(*ByteString).ValueAsBytes())
result = NewPositiveBignumber(n)
break
case semanticNegativeBignum:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next string for negative bignum", err)
return nil, err
}
if obj.MajorType() != MajorTypeByteString {
log.Errorf("Expected bytestring payload in negative bignum, unhandled major type: %d", obj.MajorType())
return nil, err
}
n := new(big.Int)
n = n.SetBytes(obj.(*ByteString).ValueAsBytes())
n = n.Sub(big.NewInt(-1), n)
result = NewNegativeBignumber(n)
break
case semanticURI:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next string for uri", err)
return nil, err
}
result = NewURI(obj.Value().(string))
break
case semanticBase64URL:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next text string for base64 string", err)
return nil, err
}
result = NewBase64URL(obj.Value().(string))
break
case semanticBase64:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next text string for base64 string", err)
return nil, err
}
result = NewBase64String(obj.Value().(string))
break
case semanticRegularExpression:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next text string for regular expression string", err)
return nil, err
}
result = NewRegularExpression(obj.Value().(string))
break
case semanticMimeMessage:
obj, err := doGetNextDataItem(r)
if err != nil {
log.Error("Error trying to parse the next text string for mime message string", err)
return nil, err
}
result = NewMimeMessage(obj.Value().(string))
break
case semanticDecimalFraction,
semanticBigFloat,
semanticExpectedConversionToBase64URL,
semanticExpectedConversionToBase64,
semanticExpectedConversionToBase16,
semanticEncodedCBORDataItems,
semanticSelfDescribeCBOR:
log.Infof("Semantic unhandled tag %d", semanticTagID)
// TBD
return nil, errors.NewErrorf(errors.ErrCborAdditionalTypeUnhandled, "Unable to parse due to unhandled semantic tag ID encountered: %d", semanticTagID)
}
return result, nil
}
// decodeTextString parses the next text string object.
// Only called after the majorType text string has been determined.
func (r *BitstreamReader) decodeTextString() (*TextString, error) {
result, err := r.doDecodeByteString()
if err != nil {
return nil, err
}
return NewTextString(string(result)), nil
}
// decodeByteString handles parsing the next byte string or text string.
// Only called after the majorType byteString/textString has been determined.
func (r *BitstreamReader) doDecodeByteString() ([]byte, error) {
// byteLength (second parameter) in this case indicates the length of the byte/text
additionalType, byteLength, err := doGetAdditionalType(r)
if err != nil {
return nil, err
}
payload := []byte{}
if additionalType != additionalTypeIndefinite {
log.Tracef("Reading bytes of payload length: %d", byteLength)
payload, err = r.ReadBytes(byteLength)
if err != nil {
return nil, err
}
} else {
chunkToken, err := r.ReadBitsAsUint64(8)
log.Tracef("ChunkLength: 0x%02x", chunkToken)
if err != nil {
return nil, err
}
for chunkToken != indefiniteBreakCode {
// ignore first 3 bits, only the 5 bits matters for length
chunkLength := chunkToken & 0x1f
tmp, err := r.ReadBytes(chunkLength)
if err != nil {
return nil, err
}
log.Tracef("Read [%d] chunk payload", len(tmp))
payload = append(payload, tmp...)
chunkToken, err = r.ReadBitsAsUint64(8)
log.Tracef("ChunkLength: 0x%02x", chunkToken)
if err != nil {
return nil, err
}
}
}
return payload, nil
} | cbor/bitstreamreader_decoder.go | 0.671471 | 0.426441 | bitstreamreader_decoder.go | starcoder |
package edwards25519
import (
"crypto/subtle"
"encoding/hex"
"fmt"
)
// (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, X*Y=Z*T. Aka P3.
type ExtendedPoint struct {
X, Y, Z, T FieldElement
}
// ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T. Aka P1P1.
type CompletedPoint struct {
X, Y, Z, T FieldElement
}
// (X:Y:Z) satisfying x=X/Z, y=Y/Z.
type ProjectivePoint struct {
X, Y, Z FieldElement
}
// Set p to (-i,0), a point Ristretto-equivalent to 0. Returns p.
func (p *ExtendedPoint) SetTorsion3() *ExtendedPoint {
p.X.Set(&feMinusI)
p.Y.SetZero()
p.Z.Set(&feOne)
p.T.SetZero()
return p
}
// Set p to (i,0), a point Ristretto-equivalent to 0. Returns p.
func (p *ExtendedPoint) SetTorsion2() *ExtendedPoint {
p.X.Set(&feI)
p.Y.SetZero()
p.Z.Set(&feOne)
p.T.SetZero()
return p
}
// Set p to (0,-1), a point Ristretto-equivalent to 0. Returns p.
func (p *ExtendedPoint) SetTorsion1() *ExtendedPoint {
p.X.SetZero()
p.Y.Set(&feMinusOne)
p.Z.Set(&feOne)
p.T.SetZero()
return p
}
// Set p to zero, the neutral element. Return p.
func (p *ProjectivePoint) SetZero() *ProjectivePoint {
p.X.SetZero()
p.Y.SetOne()
p.Z.SetOne()
return p
}
// Set p to zero, the neutral element. Return p.
func (p *ExtendedPoint) SetZero() *ExtendedPoint {
p.X.SetZero()
p.Y.SetOne()
p.Z.SetOne()
p.T.SetZero()
return p
}
// Set p to the basepoint (x,4/5) with x>=0. Returns p
func (p *ExtendedPoint) SetBase() *ExtendedPoint {
return p.Set(&epBase)
}
// Set p to q. Returns p.
func (p *ExtendedPoint) Set(q *ExtendedPoint) *ExtendedPoint {
p.X.Set(&q.X)
p.Y.Set(&q.Y)
p.Z.Set(&q.Z)
p.T.Set(&q.T)
return p
}
// Set p to q if b == 1. Assumes b is 0 or 1. Returns p.
func (p *ExtendedPoint) ConditionalSet(q *ExtendedPoint, b int32) *ExtendedPoint {
p.X.ConditionalSet(&q.X, b)
p.Y.ConditionalSet(&q.Y, b)
p.Z.ConditionalSet(&q.Z, b)
p.T.ConditionalSet(&q.T, b)
return p
}
// Sets p to q+r. Returns p
func (p *CompletedPoint) AddExtended(q, r *ExtendedPoint) *CompletedPoint {
var a, b, c, d, t FieldElement
a.sub(&q.Y, &q.X)
t.sub(&r.Y, &r.X)
a.Mul(&a, &t)
b.add(&q.X, &q.Y)
t.add(&r.X, &r.Y)
b.Mul(&b, &t)
c.Mul(&q.T, &r.T)
c.Mul(&c, &fe2D)
d.Mul(&q.Z, &r.Z)
d.add(&d, &d)
p.X.sub(&b, &a)
p.T.sub(&d, &c)
p.Z.add(&d, &c)
p.Y.add(&b, &a)
return p
}
// Sets p to q-r. Returns p
func (p *CompletedPoint) SubExtended(q, r *ExtendedPoint) *CompletedPoint {
var a, b, c, d, t FieldElement
a.sub(&q.Y, &q.X)
t.add(&r.Y, &r.X)
a.Mul(&a, &t)
b.add(&q.X, &q.Y)
t.sub(&r.Y, &r.X)
b.Mul(&b, &t)
c.Mul(&q.T, &r.T)
c.Mul(&c, &fe2D)
d.Mul(&q.Z, &r.Z)
d.add(&d, &d)
p.X.sub(&b, &a)
p.T.add(&d, &c)
p.Z.sub(&d, &c)
p.Y.add(&b, &a)
return p
}
// Set p to 2 * q. Returns p.
func (p *CompletedPoint) DoubleProjective(q *ProjectivePoint) *CompletedPoint {
var t0 FieldElement
p.X.Square(&q.X)
p.Z.Square(&q.Y)
p.T.DoubledSquare(&q.Z)
p.Y.add(&q.X, &q.Y)
t0.Square(&p.Y)
p.Y.add(&p.Z, &p.X)
p.Z.sub(&p.Z, &p.X)
p.X.sub(&t0, &p.Y)
p.T.sub(&p.T, &p.Z)
return p
}
// Set p to 2 * q. Returns p.
func (p *CompletedPoint) DoubleExtended(q *ExtendedPoint) *CompletedPoint {
var a, b, c, d FieldElement
a.Square(&q.X)
b.Square(&q.Y)
c.DoubledSquare(&q.Z)
d.Neg(&a)
p.X.add(&q.X, &q.Y)
p.X.Square(&p.X)
p.X.sub(&p.X, &a)
p.X.sub(&p.X, &b)
p.Z.add(&d, &b)
p.T.sub(&p.Z, &c)
p.Y.sub(&d, &b)
return p
}
// Set p to q. Returns p.
func (p *ProjectivePoint) SetExtended(q *ExtendedPoint) *ProjectivePoint {
p.X.Set(&q.X)
p.Y.Set(&q.Y)
p.Z.Set(&q.Z)
return p
}
// Set p to q. Returns p.
func (p *ProjectivePoint) SetCompleted(q *CompletedPoint) *ProjectivePoint {
p.X.Mul(&q.X, &q.T)
p.Y.Mul(&q.Y, &q.Z)
p.Z.Mul(&q.Z, &q.T)
return p
}
// Set p to 2 * q. Returns p.
func (p *ExtendedPoint) Double(q *ExtendedPoint) *ExtendedPoint {
var tmp CompletedPoint
tmp.DoubleExtended(q)
p.SetCompleted(&tmp)
return p
}
// Set p to q + r. Returns p.
func (p *ExtendedPoint) Add(q, r *ExtendedPoint) *ExtendedPoint {
var tmp CompletedPoint
tmp.AddExtended(q, r)
p.SetCompleted(&tmp)
return p
}
// Set p to q - r. Returns p.
func (p *ExtendedPoint) Sub(q, r *ExtendedPoint) *ExtendedPoint {
var tmp CompletedPoint
tmp.SubExtended(q, r)
p.SetCompleted(&tmp)
return p
}
// Sets p to q. Returns p.
func (p *ExtendedPoint) SetCompleted(q *CompletedPoint) *ExtendedPoint {
p.X.Mul(&q.X, &q.T)
p.Y.Mul(&q.Y, &q.Z)
p.Z.Mul(&q.Z, &q.T)
p.T.Mul(&q.X, &q.Y)
return p
}
// Set p to a point corresponding to the encoded group element of
// the ristretto group. Returns whether the buffer encoded a group element.
func (p *ExtendedPoint) SetRistretto(buf *[32]byte) bool {
var s, s2, chk, yDen, yNum, yDen2, xDen2, isr, xDenInv FieldElement
var yDenInv, t FieldElement
var b, ret int32
var buf2 [32]byte
s.SetBytes(buf)
// ensures 0 ≤ s < 2^255-19
s.BytesInto(&buf2)
ret = int32(1 - subtle.ConstantTimeCompare(buf[:], buf2[:]))
ret |= int32(buf2[0] & 1) // ensure s is positive
s2.Square(&s)
yDen.add(&feOne, &s2)
yNum.sub(&feOne, &s2)
yDen2.Square(&yDen)
xDen2.Square(&yNum)
xDen2.Mul(&xDen2, &feD)
xDen2.add(&xDen2, &yDen2)
xDen2.Neg(&xDen2)
t.Mul(&xDen2, &yDen2)
isr.InvSqrt(&t)
chk.Square(&isr)
chk.Mul(&chk, &t)
ret |= 1 - chk.IsOneI()
xDenInv.Mul(&isr, &yDen)
yDenInv.Mul(&xDenInv, &isr)
yDenInv.Mul(&yDenInv, &xDen2)
p.X.Mul(&s, &xDenInv)
p.X.add(&p.X, &p.X)
b = p.X.IsNegativeI()
t.Neg(&p.X)
p.X.ConditionalSet(&t, b)
p.Y.Mul(&yNum, &yDenInv)
p.Z.SetOne()
p.T.Mul(&p.X, &p.Y)
ret |= p.T.IsNegativeI()
ret |= 1 - p.Y.IsNonZeroI()
p.X.ConditionalSet(&feZero, ret)
p.Y.ConditionalSet(&feZero, ret)
p.Z.ConditionalSet(&feZero, ret)
p.T.ConditionalSet(&feZero, ret)
return ret == 0
}
// Pack p using the Ristretto encoding and return it.
// Requires p to be even.
func (p *ExtendedPoint) Ristretto() []byte {
var buf [32]byte
p.RistrettoInto(&buf)
return buf[:]
}
// Pack p using the Ristretto encoding and write to buf. Returns p.
// Requires p to be even.
func (p *ExtendedPoint) RistrettoInto(buf *[32]byte) *ExtendedPoint {
var d, u1, u2, isr, i1, i2, zInv, denInv, nx, ny, s FieldElement
var b int32
d.add(&p.Z, &p.Y)
u1.sub(&p.Z, &p.Y)
u1.Mul(&u1, &d)
u2.Mul(&p.X, &p.Y)
isr.Square(&u2)
isr.Mul(&isr, &u1)
isr.InvSqrt(&isr)
i1.Mul(&isr, &u1)
i2.Mul(&isr, &u2)
zInv.Mul(&i1, &i2)
zInv.Mul(&zInv, &p.T)
d.Mul(&zInv, &p.T)
nx.Mul(&p.Y, &feI)
ny.Mul(&p.X, &feI)
denInv.Mul(&feInvSqrtMinusDMinusOne, &i1)
b = 1 - d.IsNegativeI()
nx.ConditionalSet(&p.X, b)
ny.ConditionalSet(&p.Y, b)
denInv.ConditionalSet(&i2, b)
d.Mul(&nx, &zInv)
b = d.IsNegativeI()
d.Neg(&ny)
ny.ConditionalSet(&d, b)
s.sub(&p.Z, &ny)
s.Mul(&s, &denInv)
b = s.IsNegativeI()
d.Neg(&s)
s.ConditionalSet(&d, b)
s.BytesInto(buf)
return p
}
// Compute 5-bit window for the scalar s.
func computeScalarWindow5(s *[32]byte, w *[51]int8) {
for i := 0; i < 6; i++ {
w[8*i+0] = int8(s[5*i+0] & 31)
w[8*i+1] = int8((s[5*i+0] >> 5) & 31)
w[8*i+1] ^= int8((s[5*i+1] << 3) & 31)
w[8*i+2] = int8((s[5*i+1] >> 2) & 31)
w[8*i+3] = int8((s[5*i+1] >> 7) & 31)
w[8*i+3] ^= int8((s[5*i+2] << 1) & 31)
w[8*i+4] = int8((s[5*i+2] >> 4) & 31)
w[8*i+4] ^= int8((s[5*i+3] << 4) & 31)
w[8*i+5] = int8((s[5*i+3] >> 1) & 31)
w[8*i+6] = int8((s[5*i+3] >> 6) & 31)
w[8*i+6] ^= int8((s[5*i+4] << 2) & 31)
w[8*i+7] = int8((s[5*i+4] >> 3) & 31)
}
w[8*6+0] = int8(s[5*6+0] & 31)
w[8*6+1] = int8((s[5*6+0] >> 5) & 31)
w[8*6+1] ^= int8((s[5*6+1] << 3) & 31)
w[8*6+2] = int8((s[5*6+1] >> 2) & 31)
/* Making it signed */
var carry int8 = 0
for i := 0; i < 50; i++ {
w[i] += carry
w[i+1] += w[i] >> 5
w[i] &= 31
carry = w[i] >> 4
w[i] -= carry << 5
}
w[50] += carry
}
// Set p to s * q. Returns p.
func (p *ExtendedPoint) ScalarMult(q *ExtendedPoint, s *[32]byte) *ExtendedPoint {
// See eg. https://cryptojedi.org/peter/data/eccss-20130911b.pdf
var lut [17]ExtendedPoint
var t ExtendedPoint
var window [51]int8
// Precomputations.
computeScalarWindow5(s, &window)
lut[0].SetZero()
lut[1].Set(q)
for i := 2; i < 16; i += 2 {
lut[i].Double(&lut[i>>1])
lut[i+1].Add(&lut[i], q)
}
lut[16].Double(&lut[8])
// Compute!
p.SetZero()
for i := 50; i >= 0; i-- {
var pp ProjectivePoint
var cp CompletedPoint
cp.DoubleExtended(p)
for z := 0; z < 4; z++ {
pp.SetCompleted(&cp)
cp.DoubleProjective(&pp)
}
p.SetCompleted(&cp)
t.Set(&lut[0])
b := int32(window[i])
for j := 1; j <= 16; j++ {
c := equal15(b, int32(-j)) | equal15(b, int32(j))
t.ConditionalSet(&lut[j], c)
}
var v FieldElement
c := negative(b)
v.Neg(&t.X)
t.X.ConditionalSet(&v, c)
v.Neg(&t.T)
t.T.ConditionalSet(&v, c)
p.Add(p, &t)
}
return p
}
// Sets p to -q. Returns p.
func (p *ExtendedPoint) Neg(q *ExtendedPoint) *ExtendedPoint {
p.X.Neg(&q.X)
p.Y.Set(&q.Y)
p.Z.Set(&q.Z)
p.T.Neg(&q.T)
return p
}
// Returns 1 if p and q are in the same Ristretto equivalence class.
// Assumes p and q are both even.
func (p *ExtendedPoint) RistrettoEqualsI(q *ExtendedPoint) int32 {
var x1y2, x2y1, x1x2, y1y2 FieldElement
x1y2.Mul(&p.X, &q.Y)
x2y1.Mul(&q.X, &p.Y)
x1x2.Mul(&p.X, &q.X)
y1y2.Mul(&p.Y, &q.Y)
return 1 - ((1 - x1y2.EqualsI(&x2y1)) & (1 - x1x2.EqualsI(&y1y2)))
}
// WARNING This operation is not constant-time. Do not use for cryptography
// unless you're sure this is not an issue.
func (p *ExtendedPoint) String() string {
return fmt.Sprintf("ExtendedPoint(%v, %v, %v, %v; %v)",
p.X, p.Y, p.Z, p.T, hex.EncodeToString(p.Ristretto()))
}
// WARNING This operation is not constant-time. Do not use for cryptography
// unless you're sure this is not an issue.
func (p *CompletedPoint) String() string {
var ep ExtendedPoint
ep.SetCompleted(p)
return fmt.Sprintf("CompletedPoint(%v, %v, %v, %v; %v)",
p.X, p.Y, p.Z, p.T, hex.EncodeToString(ep.Ristretto()))
} | vendor/github.com/bwesterb/go-ristretto/edwards25519/curve.go | 0.679285 | 0.520374 | curve.go | starcoder |
package dst
// F-distribution, alias Fisher-Snedecor distribution
// FPDF returns the PDF of the F distribution.
func FPDF(d1, d2 int64) func(x float64) float64 {
df1 := float64(d1)
df2 := float64(d2)
normalization := 1 / B(df1/2, df2/2)
return func(x float64) float64 {
return normalization * sqrt(pow(df1*x, df1)*pow(df2, df2)/pow(df1*x+df2, df1+df2)) / x
}
}
// FLnPDF returns the natural logarithm of the PDF of the F distribution.
func FLnPDF(d1, d2 int64) func(x float64) float64 {
df1 := float64(d1)
df2 := float64(d2)
normalization := -logB(df1/2, df2/2)
return func(x float64) float64 {
return normalization + log(df1*x)*df1/2 + log(df2)*df2/2 - log(df1*x+df2)*(df1+df2)/2 - log(x)
}
}
// FPDFAt returns the value of PDF of F distribution at x.
func FPDFAt(d1, d2 int64, x float64) float64 {
pdf := FPDF(d1, d2)
return pdf(x)
}
// FCDF returns the CDF of the F distribution.
func FCDF(d1, d2 int64) func(x float64) float64 {
return func(x float64) float64 {
df1 := float64(d1)
df2 := float64(d2)
y := df1 * x / (df1*x + df2)
return iBr(df1/2.0, df2/2.0, y)
}
}
// FCDFAt returns the value of CDF of the F distribution, at x.
func FCDFAt(d1, d2 int64, x float64) float64 {
cdf := FCDF(d1, d2)
return cdf(x)
}
// FQtl returns the inverse of the CDF (quantile) of the F distribution.
func FQtl(d1, d2 int64) func(p float64) float64 {
df1 := float64(d1)
df2 := float64(d2)
return func(p float64) float64 {
if p < 0.0 {
return NaN
}
if p > 1.0 {
return NaN
}
if df1 < 1.0 {
return NaN
}
if df2 < 1.0 {
return NaN
}
return ((1/BetaQtlFor(df2/2, df1/2, 1-p) - 1) * df2 / df1)
}
}
// FQtlFor returns the inverse of the CDF (quantile) of the F distribution, for given probability.
func FQtlFor(d1, d2 int64, p float64) float64 {
cdf := FQtl(d1, d2)
return cdf(p)
}
// FNext returns random number drawn from the F distribution.
func FNext(d1, d2 int64) float64 {
df1 := float64(d1)
df2 := float64(d2)
return ChiSquareNext(d1) * df2 / (ChiSquareNext(d2) * df1)
}
// F returns the random number generator with F distribution.
func F(d1, d2 int64) func() float64 {
return func() float64 {
return FNext(d1, d2)
}
}
// FMean returns the mean of the F distribution.
func FMean(d1, d2 int64) float64 {
if d2 <= 2 {
return NaN
}
df2 := float64(d2)
return df2 / (df2 - 2)
}
// FMode returns the mode of the F distribution.
func FMode(d1, d2 int64) float64 {
if d1 <= 2 {
return NaN
}
df1 := float64(d1)
df2 := float64(d2)
return ((df1 - 2) / df1) * (df2 / (df2 + 2))
}
// FVar returns the variance of the F distribution.
func FVar(d1, d2 int64) float64 {
if d2 <= 4 {
return NaN
}
df1 := float64(d1)
df2 := float64(d2)
return 2 * df2 * df2 * (df1 + df2 - 2) / (df1 * (df2 - 2) * (df2 - 2) * (df2 - 4))
}
// FStd returns the standard deviation of the F distribution.
func FStd(d1, d2 int64) float64 {
if d2 <= 4 {
return NaN
}
df1 := float64(d1)
df2 := float64(d2)
v := 2 * df2 * df2 * (df1 + df2 - 2) / (df1 * (df2 - 2) * (df2 - 2) * (df2 - 4))
return sqrt(v)
}
// FSkew returns the skewness of the F distribution.
func FSkew(d1, d2 int64) float64 {
if d2 <= 6 {
return NaN
}
df1 := float64(d1)
df2 := float64(d2)
return (2*df1 + df2 - 2) * sqrt(8*(df2-4)) / (df2 - 6) * sqrt(df1*(df1+df2-2))
}
// FExKurt returns the excess kurtosis of the F distribution.
func FExKurt(d1, d2 int64) float64 {
if d2 <= 8 {
return NaN
}
df1 := float64(d1)
df2 := float64(d2)
return 12 * (df1*(5*df2-22)*(df1+df2-2) + (df2-4)*(df2-2)*(df2-2)) / (df1 * (df2 - 6) * (df2 - 8) * (df1 + df2 - 2))
} | dst/f.go | 0.882079 | 0.566498 | f.go | starcoder |
package filter
import (
"github.com/biogo/biogo/alphabet"
"github.com/biogo/biogo/index/kmerindex"
"github.com/biogo/biogo/seq/linear"
"sort"
)
const (
diagonalPadding = 2
)
// A Merger aggregates and clips an ordered set of trapezoids.
type Merger struct {
target, query *linear.Seq
filterParams *Params
maxIGap int
leftPadding, bottomPadding int
binWidth int
selfComparison bool
freeTraps, trapList *trapezoid
trapOrder, tail *trapezoid
eoTerm *trapezoid
trapCount int
valueToCode alphabet.Index
}
// Create a new Merger using the provided kmerindex, query sequence, filter parameters and maximum inter-segment gap length.
// If selfCompare is true only the upper diagonal of the comparison matrix is examined.
func NewMerger(ki *kmerindex.Index, query *linear.Seq, filterParams *Params, maxIGap int, selfCompare bool) *Merger {
tubeWidth := filterParams.TubeOffset + filterParams.MaxError
binWidth := tubeWidth - 1
leftPadding := diagonalPadding + binWidth
eoTerm := &trapezoid{Trapezoid: Trapezoid{
Left: query.Len() + 1 + leftPadding,
Right: query.Len() + 1,
Bottom: -1,
Top: query.Len() + 1,
}}
return &Merger{
target: ki.Seq(),
filterParams: filterParams,
maxIGap: maxIGap,
query: query,
selfComparison: selfCompare,
bottomPadding: ki.K() + 2,
leftPadding: leftPadding,
binWidth: binWidth,
eoTerm: eoTerm,
trapOrder: eoTerm,
valueToCode: ki.Seq().Alpha.LetterIndex(),
}
}
// Merge a filter hit into the collection.
func (m *Merger) MergeFilterHit(h *Hit) {
Left := -h.Diagonal
if m.selfComparison && Left <= m.filterParams.MaxError {
return
}
Top := h.To
Bottom := h.From
var temp, free *trapezoid
for base := m.trapOrder; ; base = temp {
temp = base.next
switch {
case Bottom-m.bottomPadding > base.Top:
if free == nil {
m.trapOrder = temp
} else {
free.join(temp)
}
m.trapList = base.join(m.trapList)
m.trapCount++
case Left-diagonalPadding > base.Right:
free = base
case Left+m.leftPadding >= base.Left:
if Left+m.binWidth > base.Right {
base.Right = Left + m.binWidth
}
if Left < base.Left {
base.Left = Left
}
if Top > base.Top {
base.Top = Top
}
if free != nil && free.Right+diagonalPadding >= base.Left {
free.Right = base.Right
if free.Bottom > base.Bottom {
free.Bottom = base.Bottom
}
if free.Top < base.Top {
free.Top = base.Top
}
free.join(temp)
m.freeTraps = base.join(m.freeTraps)
} else if temp != nil && temp.Left-diagonalPadding <= base.Right {
base.Right = temp.Right
if base.Bottom > temp.Bottom {
base.Bottom = temp.Bottom
}
if base.Top < temp.Top {
base.Top = temp.Top
}
base.join(temp.next)
m.freeTraps = temp.join(m.freeTraps)
temp = base.next
}
return
default:
if m.freeTraps == nil {
m.freeTraps = &trapezoid{}
}
if free == nil {
m.trapOrder = m.freeTraps
} else {
free.join(m.freeTraps)
}
free, m.freeTraps = m.freeTraps.decapitate()
free.join(base)
free.Top = Top
free.Bottom = Bottom
free.Left = Left
free.Right = Left + m.binWidth
return
}
}
}
func (m *Merger) clipVertical() {
for base := m.trapList; base != nil; base = base.next {
lagPosition := base.Bottom - m.maxIGap + 1
if lagPosition < 0 {
lagPosition = 0
}
lastPosition := base.Top + m.maxIGap
if lastPosition > m.query.Len() {
lastPosition = m.query.Len()
}
var pos int
for pos = lagPosition; pos < lastPosition; pos++ {
if m.valueToCode[m.query.Seq[pos]] >= 0 {
if pos-lagPosition >= m.maxIGap {
if lagPosition-base.Bottom > 0 {
if m.freeTraps == nil {
m.freeTraps = &trapezoid{}
}
m.freeTraps = m.freeTraps.prependFrontTo(base)
base.Top = lagPosition
base = base.next
base.Bottom = pos
m.trapCount++
} else {
base.Bottom = pos
}
}
lagPosition = pos + 1
}
}
if pos-lagPosition >= m.maxIGap {
base.Top = lagPosition
}
}
}
func (m *Merger) clipTrapezoids() {
for base := m.trapList; base != nil; base = base.next {
if base.Top-base.Bottom < m.bottomPadding-2 {
continue
}
aBottom := base.Bottom - base.Right
aTop := base.Top - base.Left
lagPosition := aBottom - m.maxIGap + 1
if lagPosition < 0 {
lagPosition = 0
}
lastPosition := aTop + m.maxIGap
if lastPosition > m.target.Len() {
lastPosition = m.target.Len()
}
lagClip := aBottom
var pos int
for pos = lagPosition; pos < lastPosition; pos++ {
if m.valueToCode[m.target.Seq[pos]] >= 0 {
if pos-lagPosition >= m.maxIGap {
if lagPosition > lagClip {
if m.freeTraps == nil {
m.freeTraps = &trapezoid{}
}
m.freeTraps = m.freeTraps.prependFrontTo(base)
base.clip(lagPosition, lagClip)
base = base.next
m.trapCount++
}
lagClip = pos
}
lagPosition = pos + 1
}
}
if pos-lagPosition < m.maxIGap {
lagPosition = aTop
}
base.clip(lagPosition, lagClip)
m.tail = base
}
}
// Finalise the merged collection and return a sorted slice of Trapezoids.
func (m *Merger) FinaliseMerge() Trapezoids {
var next *trapezoid
for base := m.trapOrder; base != m.eoTerm; base = next {
next = base.next
m.trapList = base.join(m.trapList)
m.trapCount++
}
m.clipVertical()
m.clipTrapezoids()
if m.tail != nil {
m.freeTraps = m.tail.join(m.freeTraps)
}
traps := make(Trapezoids, m.trapCount)
for i, z := 0, m.trapList; i < m.trapCount; i++ {
traps[i] = z.Trapezoid
z, z.next = z.next, nil
}
sort.Sort(traps)
return traps
} | align/pals/filter/merge.go | 0.691602 | 0.42054 | merge.go | starcoder |
package core
import (
"log"
"github.com/go-gl/mathgl/mgl64"
)
// PhysicsSystem is an interface which wraps all physics related logic.
type PhysicsSystem interface {
// Start is called by the application at startup time. Implementations should perform bootstapping here.
Start()
// Stop is called by the application at shutdown time. Implementations should perform cleanup here.
Stop()
// Update is called at every cycle of the application runloop with a list of nodes and a time delta from the
// previous iteration. Implementations will want to perform all their computation here.
Update(dt float64, nodes []*Node)
// SetGravity sets the global gravity vector. This is for testing purposes and will be removed.
SetGravity(g mgl64.Vec3)
// AddRigidBody adds a rigid body to the physics world.
AddRigidBody(RigidBody)
// RemoveRigidBody removed a rigid body from the physics world.
RemoveRigidBody(RigidBody)
// CreateRigidBody creates a new rigid body which can be attached to a scenegraph node.
CreateRigidBody(mass float32, collisionShape CollisionShape) RigidBody
// DeleteRigidBody deletes a rigid body
DeleteRigidBody(RigidBody)
// NewStaticPlaneShape returns a collision shape.
NewStaticPlaneShape(normal mgl64.Vec3, constant float64) CollisionShape
// NewSphereShape returns a collision shape.
NewSphereShape(radius float64) CollisionShape
// NewBoxShape returns a collision shape.
NewBoxShape(mgl64.Vec3) CollisionShape
// NewCapsuleShape returns a collision shape.
NewCapsuleShape(radius float64, height float64) CollisionShape
// NewConeShape returns a collision shape.
NewConeShape(radius float64, height float64) CollisionShape
// NewCylinderShape returns a collision shape.
NewCylinderShape(radius float64, height float64) CollisionShape
// NewCompoundSphereShape returns a collision shape.
NewCompoundShape() CollisionShape
// NewConvexHullShape returns a collision shape.
NewConvexHullShape() CollisionShape
// NewStaticTriangleMeshShape returns a collision shape.
NewStaticTriangleMeshShape(Mesh) CollisionShape
// DeleteShape deletes a collision shape.
DeleteShape(CollisionShape)
}
// CollisionShape is an interface which wraps information used to compute object collisions.
type CollisionShape interface {
// AddChildShape adds a child collision shape to this shape.
AddChildShape(childshape CollisionShape, position mgl64.Vec3, orientation mgl64.Quat)
// AddVertex adds a single vertex. Used for convex hull shapes.
AddVertex(mgl64.Vec3)
}
// RigidBody is an interface which wraps a physics rigid body. It contains
// position, orientation, momentum and collision shape information.
type RigidBody interface {
// GetTransform returns the rigid body world transform.
GetTransform() mgl64.Mat4
// SetTransform sets the rigid body world transform.
SetTransform(mgl64.Mat4)
// ApplyImpulse applies `impulse` on the rigid body at its position `localPosition`.
ApplyImpulse(impulse mgl64.Vec3, localPosition mgl64.Vec3)
}
var (
physicsSystem PhysicsSystem
)
// SetPhysicsSystem is meant to be called from PhysicsSystem implementations on their init method
func SetPhysicsSystem(ps PhysicsSystem) {
if physicsSystem != nil {
log.Fatal("Can't replace previously registered physics system. Please make sure you're not importing twice")
}
physicsSystem = ps
}
// GetPhysicsSystem returns the renderSystem, thereby exposing it to any package importing core.
func GetPhysicsSystem() PhysicsSystem {
return physicsSystem
}
// PhysicsComponent is an interface which wraps physics handling logic for a scenegraph node
type PhysicsComponent interface {
// Run is called on each node which should determine whether it should be added to the simulation step or not.
Run(node *Node, nodeBucket *[]*Node)
}
// DefaultPhysicsComponent is a utility physics component which adds all nodes containing a rigid body
// to the bucket.
type DefaultPhysicsComponent struct{}
// Run implements the PhysicsComponent interface
func (p *DefaultPhysicsComponent) Run(n *Node, nodeBucket *[]*Node) {
if n.rigidBody != nil {
*nodeBucket = append(*nodeBucket, n)
}
for _, c := range n.children {
c.physicsComponent.Run(c, nodeBucket)
}
}
// NewDefaultPhysicsComponent returns a new DefaultPhysicsComponent.
func NewDefaultPhysicsComponent(active bool) *DefaultPhysicsComponent {
pc := DefaultPhysicsComponent{}
return &pc
} | core/physics.go | 0.762954 | 0.594698 | physics.go | starcoder |
package bookstore
import (
"math"
"sort"
)
const bookPrice = 800 // 800 cents = $8.00
var discountTiers = [...]int{0, 5, 10, 20, 25}
// Cost implements the book store exercise.
func Cost(books []int) int {
organize(books)
return cost(books, 0)
}
// rework the input array so all the repetitions
// are together at first, ordering by most repeated
// e.g. [1,1,2,3,4,4,5,4,2] -> [4,4,4,1,1,2,2,3,5]
func organize(books []int) {
// used for sorting
type kv struct {
Key int
Value int
}
// calc book frequency: how many 1's ,2's and so on
freq := make(map[int]int)
for i := range books {
freq[books[i]]++
}
// sort frequency in descending order
ss := make([]kv, len(freq))
for k, v := range freq {
ss = append(ss, kv{k, v})
}
sort.Slice(ss, func(i, j int) bool {
return ss[i].Value > ss[j].Value
})
// transform the frequencies back to repetitions
// e.g. 4*1, 3*2 -> 1,1,1,1,2,2,2
p := 0
for _, kv := range ss {
for i := 0; i < kv.Value; i++ {
books[p] = kv.Key
p++
}
}
// give back the modified array
}
func cost(books []int, priceSoFar int) int {
if len(books) == 0 {
return priceSoFar
}
distinctBooks, remainingBooks := getDistinctBooks(books)
minPrice := math.MaxInt32
for i := 1; i <= len(distinctBooks); i++ {
newRemainingBooks := make([]int, len(remainingBooks))
copy(newRemainingBooks, remainingBooks)
newRemainingBooks = append(newRemainingBooks, distinctBooks[i:]...)
price := cost(newRemainingBooks, priceSoFar+groupCost(i))
if price < minPrice {
minPrice = price
}
}
return minPrice
}
func getDistinctBooks(books []int) (distinct, remaining []int) {
exists := make(map[int]bool)
for _, book := range books {
if exists[book] {
remaining = append(remaining, book)
} else {
distinct = append(distinct, book)
exists[book] = true
}
}
return
}
func groupCost(groupSize int) int {
normalPrice := bookPrice * groupSize
discount := (normalPrice * discountTiers[groupSize-1]) / 100
return normalPrice - discount
} | exercises/book-store/example.go | 0.644896 | 0.407569 | example.go | starcoder |
package ttt
import (
"sort"
"strings"
)
/*
DataStore shows master of lectures and course data.
*/
type DataStore interface {
Lectures() []Lecture
Courses() []Course
Init() error
}
/*
Grade means target grades of lectures.
*/
type Grade int
/*
CreditCount means credits of a lectures.
*/
type CreditCount int
/*
Lecture shows a lecture.
*/
type Lecture struct {
Name string `json:"name"`
Grade Grade `json:"grade"`
Credit CreditCount `json:"credit"`
}
/*
Course shows requirements and recommended lectures of a course.
*/
type Course struct {
Name string `json:"name"`
DiplomaCredit CreditCount `json:"diploma-credit"`
Requirements []string `json:"requirements"`
Recommends []string `json:"recommends"`
}
type distance struct {
distance int
lecture Lecture
}
/*
CourseDiplomaResult shows the verification result of course diploma.
*/
type CourseDiplomaResult struct {
Name string
Requirements []string
DiplomaCredit CreditCount
GotCredit CreditCount
GotRequirements []string
RestRequirements []string
}
/*
Checker is for checking course diploma.
*/
type Checker struct {
Store DataStore
}
/*
NewChecker creates an object of Verifier.
*/
func NewChecker(ds DataStore) *Checker {
z := Checker{Store: ds}
return &z
}
func contains(slice []string, item string) bool {
for _, element := range slice {
if element == item {
return true
}
}
return false
}
func (z *Checker) findCreditOfLecture(name string) CreditCount {
lecture := z.FindLecture(name)
if lecture == nil {
return 0
}
return lecture.Credit
}
func (z *Checker) countNumberOfCredits(gotCredits []string, course Course) CreditCount {
var sum CreditCount
for _, credit := range gotCredits {
if contains(course.Requirements, credit) || contains(course.Recommends, credit) {
sum += z.findCreditOfLecture(credit)
}
}
return sum
}
func findRequirements(gotCredits []string, requirements []string, includeFunc func(flag bool) bool) []string {
results := []string{}
for _, r := range requirements {
if includeFunc(contains(gotCredits, r)) {
results = append(results, r)
}
}
return results
}
/*
Check verifies the course diploma.
*/
func (z *Checker) Check(gotCredits []string, course Course) CourseDiplomaResult {
return CourseDiplomaResult{
Name: course.Name,
Requirements: course.Requirements,
GotCredit: z.countNumberOfCredits(gotCredits, course),
DiplomaCredit: course.DiplomaCredit,
GotRequirements: findRequirements(gotCredits, course.Requirements, func(flag bool) bool { return flag }),
RestRequirements: findRequirements(gotCredits, course.Requirements, func(flag bool) bool { return !flag }),
}
}
/*
FindCourses finds courses from name with partial matching.
*/
func (z *Checker) FindCourses(name string) []Course {
results := []Course{}
if name == "" {
return z.Store.Courses()
}
for _, c := range z.Store.Courses() {
if strings.Contains(c.Name, name) {
results = append(results, c)
}
}
return results
}
/*
FindLecture finds a lecture from the given name.
*/
func (z *Checker) FindLecture(name string) *Lecture {
for _, lecture := range z.Store.Lectures() {
if lecture.Name == name {
return &lecture
}
}
return nil
}
func createDistances(name string, z *Checker) []distance {
distances := []distance{}
for _, lecture := range z.Store.Lectures() {
distances = append(distances, distance{distance: LevenshteinS(name, lecture.Name), lecture: lecture})
}
return distances
}
func sortDistances(distances []distance) {
sort.Slice(distances, func(i, j int) bool {
return distances[i].distance < distances[j].distance
})
}
/*
FindSimilarLectures finds lectures which have similar name with the given name.
If exact matched name of lecture is exist, this function returns the 0-sized array.
*/
func (z *Checker) FindSimilarLectures(name string) []Lecture {
distances := createDistances(name, z)
sortDistances(distances)
min := distances[0].distance
if min == 0 {
return []Lecture{}
}
return findLecturesWithMinimumDistance(min, distances)
}
func findLecturesWithMinimumDistance(min int, distances []distance) []Lecture {
results := []Lecture{}
for _, d := range distances {
if d.distance == min {
results = append(results, d.lecture)
} else {
break
}
}
return results
} | ttt.go | 0.543348 | 0.434581 | ttt.go | starcoder |
---------------------------------------------------------------------------
Copyright (c) 2013-2015 AT&T Intellectual Property
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---------------------------------------------------------------------------
*/
/*
Mnemonic: clike.go: atof
Absrtract: a clike atof that doesn't error when it encounters
a non-digit; returning 0 if there are no digits. The input
(string or buffer) is expected to be base 10 with optional leading
zeros, an optional trailing decimal and an optional fraction
following the decimal. This also allows a lead +/-.
There is an extension on the C functions... if the value is
postfixed with M/K/G or m/k/g the return value will be
'expanded' accordingly with the capitalised values being
powrs of 10 (e.g. MB) and the lower case indicating powers
of 2 (e.g. MiB).
Input can be either a string or a byte array
Author: <NAME>
Date: October 12 2013
*/
package clike
import (
"strconv" ;
)
/*
Atof accepts a string or an array of bytes ([]byte) and converts the characters
into a float64 value. The input may be postfixed with either any of the following
characters which cause the value to be expanded:
m,k,g - powers of two expansion. e.g. 10m expands to 10 * 1024 * 1024.
M,K,G - powers of ten expansion. e.g. 10M expands to 10000000
Unlike the Go string functions, this stops parsing at the first non-digit in the
same manner that the Clib functions do.
*/
func Atof( objx interface{} ) (v float64) {
var (
i int;
buf []byte;
)
v = 0; // ensure all early returns have a value of 0
if objx == nil {
return
}
switch objx.( type ) {
case []byte:
buf = objx.([]byte); // type assertion seems backwards doesn't it?
case string:
buf = []byte( objx.(string) );
default:
return; // who knows, but it doesn't convert
}
if len( buf ) < 1 {
return;
}
i = 0;
if buf[i] == '-' || buf[i] == '+' {
i++
}
for ; i < len(buf) && ((buf[i] >= '0' && buf[i] <= '9') || buf[i] == '.'); i++ {} // find last valid character for conversion
if i > 0 {
v, _ = strconv.ParseFloat( string( buf[0:i] ), 64 );
}
if i < len( buf ) {
switch string( buf[i:] ) {
case "M", "MB":
v *= 1000000;
case "G", "GB":
v *= 1000000000;
case "K", "KB":
v *= 1000;
case "m", "MiB":
v *= 1048576;
case "g", "GiB":
v *= 1073741824;
case "k", "KiB":
v *= 1024;
default: break;
}
}
return;
} | clike/atof.go | 0.820901 | 0.454714 | atof.go | starcoder |
package country
import (
"strconv"
"strings"
)
//go:generate go run parser.go
type (
Name string
Alpha2Code string
Numeric3Code string
)
// Country holds fields for a country as defined by ISO 3166.
type Country struct {
Name string
Alpha2Code string
Numeric3Code string
}
// NameToNum converts country name to a country numeric code.
func NameToNum(countryName string) (countryNumCode Numeric3Code, ok bool) {
allCodes, ok := countryNameMap[Name(strings.ToLower(countryName))]
if ok {
countryNumCode = Numeric3Code(allCodes.Numeric3Code)
}
return
}
// ISOToNum converts country ISO code to a country numeric code.
func ISOToNum(countryIsoCode Alpha2Code) (countryNumCode Numeric3Code, ok bool) {
allCodes, ok := iso2LetterMap[countryIsoCode.ToLower()]
if ok {
countryNumCode = Numeric3Code(allCodes.Numeric3Code)
}
return
}
// NumToISO converts a country numeric code into a country 2 letter ISO code.
func NumToISO(countryNumCode Numeric3Code) (countryISOCode Alpha2Code, ok bool) {
allCodes, ok := isoNumericMap[Numeric3Code(countryNumCode)]
if ok {
countryISOCode = Alpha2Code(strings.ToLower(allCodes.Alpha2Code))
}
return
}
// CheckNum validates the country numeric code.
func CheckNum(countryNum Numeric3Code) (countryNumCode Numeric3Code, ok bool) {
allCodes, ok := isoNumericMap[countryNum]
if ok {
countryNumCode = Numeric3Code(allCodes.Numeric3Code)
}
return
}
// ToNumeric3 translates the country to the country ISO 3166-1 numeric code.
// It only supports the english country names and ISO 3166-1 alpha-2 codes.
// If country is of length < 2, it will return an empty string and false.
// If country is of length 2, it will interpret it as ISO 2 letter code.
// If country is of length 3, it will assume, that it is already the ISO number, and just check, if it's known.
// If country is of length > 3, it will interpret it as a country name.
// If a number was found, answerOK will be true, false otherwise.
func ToNumeric3(country string) (countryNumCode Numeric3Code, answerOK bool) {
// It's an ISO 2 letter country code.
if len(country) == 2 {
if num, ok := ISOToNum(Alpha2Code(country)); ok {
countryNumCode = num
answerOK = true
}
}
// Assuming len(country) == 3 means it's a country ISO number
if len(country) == 3 {
if num, ok := CheckNum(Numeric3Code(country)); ok {
countryNumCode = num
answerOK = true
}
}
// It's a country name.
if len(country) > 3 {
if num, ok := NameToNum(country); ok {
countryNumCode = num
answerOK = true
}
}
return
}
// ParseCountries parses country codes from a string list.
func ParseCountries(list string) []Numeric3Code {
countriesStrings := strings.Split(list, ",")
countriesMap := make(map[Numeric3Code]bool)
countriesCodes := make([]Numeric3Code, 0, len(countriesStrings))
for _, c := range countriesStrings {
// get rid of spaces if any
c = strings.Replace(c, " ", "", -1)
cCode, ok := ToNumeric3(c)
if ok {
if !countriesMap[cCode] {
countriesCodes = append(countriesCodes, cCode)
}
countriesMap[cCode] = true
}
}
return countriesCodes
}
// IsValid validates the country numeric 3 code.
func (c Numeric3Code) IsValid() bool {
if len(c) != 3 {
return false
}
_, err := strconv.Atoi(string(c))
_, ok := CheckNum(c)
return err == nil && ok
}
// ToUpper converts an Alpha2Code to its upper case representation
func (a Alpha2Code) ToUpper() Alpha2Code {
return Alpha2Code(strings.ToUpper(string(a)))
}
// ToLower converts an Alpha2Code to its lower case representation
func (a Alpha2Code) ToLower() Alpha2Code {
return Alpha2Code(strings.ToLower(string(a)))
} | country.go | 0.575827 | 0.484868 | country.go | starcoder |
package golds
// Slice is a sequence of values.
// Basically it's an utility wrapper around a plain slice.
type Slice[E any] []E
// NewSlice puts provided values into resulting slice.
//export
func NewSlice[E any](vv ...E) Slice[E] {
return Slice[E](vv)
}
// Repeat returns a new Slice[E] with n copies of v.
func Repeat[E any](n int, v E) Slice[E] {
switch {
case n == 0:
return nil
case n < 0:
panic("golds.Repeat: negative Repeat count")
}
var s = make(Slice[E], n)
s.Fill(v)
return s
}
// SliceIterFn returns a new Slice[E] with results of fn(i) for i := range [0, n].
func SliceIterFn[E any](n int, fn func(int) E) Slice[E] {
switch {
case n == 0:
return nil
case n < 0:
panic("golds.Repeat: negative Repeat count")
}
var s = make(Slice[E], n)
for i := range s {
s[i] = fn(i)
}
return s
}
// Len returns the number of elements in the slice.
func (s Slice[E]) Len() int { return len(s) }
// Cap returns capacity of underlying slice.
func (s Slice[E]) Cap() int { return cap(s) }
// Index returns element by the index.
// If index is negative, then returns element len-index.
// Panics if index is out of [-len, len] range.
func (s Slice[E]) Index(i int) E {
if i < 0 {
return s[s.Len()+i]
}
return s[i]
}
// Swap two elements in the slice.
// If i or j are negative, then uses elements len-i(j).
func (s Slice[E]) Swap(i, j int) {
if i < 0 {
i = s.Len() + i
}
if j < 0 {
j = s.Len() - j
}
s[i], s[j] = s[j], s[i]
}
// Count returns the number of elements e, where fn(e) is true.
func (s Slice[E]) Count(fn func(v E) bool) int {
var i int
for _, v := range s {
if fn(v) {
i++
}
}
return i
}
// Filter returns slice of elements e, where fn(e) is true.
// fn can be called multiple times for each element.
func (s Slice[E]) Filter(fn func(v E) bool) Slice[E] {
var filtered = make(Slice[E], 0, s.Count(fn))
for _, v := range s {
if fn(v) {
filtered = append(filtered, v)
}
}
return filtered
}
func (s Slice[E]) FilterInPlace(fn func(v E) bool) {
var i int
for _, v := range s {
if fn(v) {
s[i] = v
i++
}
}
var empty E
s[i:].Fill(empty)
}
// Select returns gets elements by indexes and puts them into a new slice.
// If index is negative, then len-index element will be used.
// Example:
// Slice[int]{1, 2, 3}.Select(-1, 0, 2) -> Slice[int]{3, 1, 2}
func (s Slice[E]) Select(indexes ...int) Slice[E] {
var selected = make(Slice[E], 0, len(indexes))
for _, i := range indexes {
selected = append(selected, s.Index(i))
}
return selected
}
// Apply creates a new slice with mapped values.
func (s Slice[E]) Apply(fn func(v E) E) Slice[E] {
var result = make(Slice[E], 0, s.Len())
for _, v := range s {
result = append(result, fn(v))
}
return s
}
// Append new elements to the slice in place.
func (s *Slice[E]) Append(vv ...E) {
*s = append(*s, vv...)
}
// Pop returns returns the last element and removes it from the slice.
// If the slice is empty, then returns false.
func (s *Slice[E]) Pop() (E, bool) {
var empty E
var n = s.Len()
if n == 0 {
return empty, false
}
var v = (*s)[n-1]
(*s)[n-1] = empty
*s = (*s)[:n-1]
return v, true
}
func (s Slice[E]) Copy() Slice[E] {
var cp = make(Slice[E], s.Len())
copy(cp, s)
return cp
}
func (s Slice[E]) CopyWith(fn func(E) E) Slice[E] {
var cp = make(Slice[E], s.Len())
for i, v := range s {
cp[i] = fn(v)
}
return cp
}
// Fill slice using provided value.
func (s Slice[E]) Fill(v E) {
var n = s.Len()
if n == 0 {
return
}
s[0] = v
for i := 1; i < n; i *= 2 {
copy(s[i:], s[:i])
}
}
// FillWith uses results of fn to fill the slice.
func (s Slice[E]) FillWith(fn func() E) {
for i := range s {
s[i] = fn()
}
}
// Insert value at i-position, shifting elements to the end of slice.
// Panics if the index is out of range.
// Slice{1, 2, 3}.Insert(1, 100) -> Slice{1, 100, 2, 3}
func (s *Slice[E]) Insert(i int, v E) {
var sl = *s
*s = append(sl[:i+1], sl[i:]...)
(*s)[i] = v
}
// Delete value at i-position, shifting elements to the begining of slice.
// Panics if the index is out of range.
// Slice{1, 2, 3}.Delete(1) -> Slice{2, 3}
func (s *Slice[E]) Delete(i int) {
var sl = *s
sl = append(sl[:i], sl[i+1:]...)
var empty E
(*s)[s.Len()-1] = empty
*s = sl
}
// DeleteNoOrder exchanges the i-th and the last element
// of the slice and cuts the last, now duplicated, element.
func (s *Slice[E]) DeleteNoOrder(i int) {
var n = s.Len()
var sl = *s
sl[i] = sl[n-1]
var empty E
sl[n-1] = empty
*s = sl[:n-1]
}
// EqWith compares slice with other one using provided hook.
func (s Slice[E]) EqWith(other []E, fn func(E, E) bool) bool {
return SliceEqWith(s, other, fn)
}
// SliceEq compares two slices respecting element ordering.
// Empty slice is considered to be equal to nil slices.
func SliceEq[E comparable](a, b []E) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
// SliceEq compares two slices respecting element ordering using provided equality hook.
// Empty slice is considered to be equal to nil slices.
func SliceEqWith[A, B any](a []A, b []B, fn func(A, B) bool) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if fn(v, b[i]) {
return false
}
}
return true
}
// SliceContains returns true if the slice contains the specified element.
func SliceContains[E comparable](slice []E, v E) bool {
for _, e := range slice {
if e == v {
return true
}
}
return false
}
// ContainsFn returns true, if fn-predicate returns true for any element of slice.
func (slice Slice[E]) ContainsFn(fn func(E) bool) bool {
return SliceContainsFn(slice, fn)
}
// SliceContainsFn returns true, if fn-predicate returns true for any element of slice.
func SliceContainsFn[E any](slice []E, fn func(E) bool) bool {
for _, e := range slice {
if fn(e) {
return true
}
}
return false
}
// Reverse reverses the items of slice in place.
func (slice Slice[E]) Reverse() {
Reverse(slice)
}
// Reverse reverses the items of slice in place.
func Reverse[E any](slice []E) {
var n = len(slice)
for i := 0; i < n/2; i++ {
var j = n - i - 1
slice[i], slice[j] = slice[j], slice[i]
}
} | slice.go | 0.777807 | 0.508178 | slice.go | starcoder |
package plot
import (
"math"
)
// Bar implements a stacked-bar plot.
type Bar struct {
Style
Label string
DynamicWidth bool
DynamicMinWidth float64
Data []Point
}
// NewBar creates a bar plot from the given points.
func NewBar(label string, points []Point) *Bar {
return &Bar{
Label: label,
Data: points,
DynamicMinWidth: 2,
}
}
// iter iterates the bar plot with the given sizes.
func (bar *Bar) iter(fn func(p Point, left, right float64)) {
if !bar.DynamicWidth {
for i, p := range bar.Data {
fn(p, float64(i), float64(i+1))
}
return
}
left := 0.0
for i, p := range bar.Data {
if i+1 < len(bar.Data) {
right := (p.X + bar.Data[i+1].X) * 0.5
fn(p, left, right)
left = right
} else {
right := left + p.X
width := right - left
if width < bar.DynamicMinWidth {
width = left + bar.DynamicMinWidth
right = left + width
}
fn(p, left, right)
left = right
}
}
}
// Stats calculates stats from the values.
func (bar *Bar) Stats() Stats {
stats := PointsStats(bar.Data)
stats.Min.X = 0
stats.Min.Y = 0
if !bar.DynamicWidth {
stats.Max.X = float64(len(bar.Data))
} else {
stats.Max.X = 0
bar.iter(func(p Point, left, right float64) {
stats.Max.X = right
})
}
return stats
}
// Draw draws the element to canvas.
func (bar *Bar) Draw(plot *Plot, canvas Canvas) {
x, y := plot.X, plot.Y
size := canvas.Bounds().Size()
canvas = canvas.Clip(canvas.Bounds())
style := &bar.Style
if style.IsZero() {
style = &plot.Theme.Bar
}
lastScreenMin := 0.0
lastScreenMax := 0.0
bar.iter(func(p Point, left, right float64) {
var r Rect
r.Min.X = x.ToCanvas(left, 0, size.X)
r.Max.X = x.ToCanvas(right, 0, size.X)
r.Min.Y = y.ToCanvas(0, 0, size.Y)
r.Max.Y = y.ToCanvas(p.Y, 0, size.Y)
if bar.DynamicWidth && bar.DynamicMinWidth > 0 {
leftToRight := r.Min.X < r.Max.X
r.Min.X = math.Max(math.Max(r.Min.X, lastScreenMin), lastScreenMax)
r.Max.X = math.Max(math.Max(r.Max.X, lastScreenMin), lastScreenMax)
if leftToRight {
if r.Max.X-r.Min.X < bar.DynamicMinWidth {
r.Max.X = r.Min.X + bar.DynamicMinWidth
}
} else {
if r.Min.X-r.Max.X < bar.DynamicMinWidth {
r.Min.X = r.Max.X + bar.DynamicMinWidth
}
}
lastScreenMin = r.Min.X
lastScreenMax = r.Max.X
}
canvas.Rect(r, style)
})
} | bar.go | 0.853394 | 0.612657 | bar.go | starcoder |
package bst
import (
"fmt"
)
// ==========================================================================
// Primary Tree Algorithms
// __________________________________________________________________________
/*The basic tree structure has an integer for a value. Like most trees,
it has "children" that can also be defined as trees. */
type Tree struct {
Value int
Left *Tree
Right *Tree
}
/*Node structure.*/
type Node struct {
Key int
Value int
Left *Node
Right *Node
Parent *Node
}
/*TreeSearch returns a tree with a root node of the given value.
If the desired value cannot be found, returns a Tree{0, nil, nil},
which is a tree with value = 0, and.
Example
output := TreeSearch(input, 8)
input output
3 8
/ \ / \
2 7 5 9
/ / \
1 4 8
/ \
5 9
*/
func TreeSearch(t *Tree, value int) Tree {
for value != t.Value {
if t.Left == nil && t.Right == nil {
return Tree{} // represents a nil tree
}
if value < t.Value {
t = t.Left
continue
}
t = t.Right
}
return *t
}
/*InOrderTreeWalk performs an exhaustive walk through every node in the tree.
Assuming the tree is a binary search tree, each value sent
along the channel will be greater than the previous value. */
func InOrderTreeWalk(t *Tree, channel chan int) {
if t != nil {
InOrderTreeWalk(t.Left, channel)
channel <- t.Value
InOrderTreeWalk(t.Right, channel)
}
}
/*TreeMinimum returns the tree with the root containing the lowest value.*/
func TreeMinimum(t *Tree) Tree {
for t.Left != nil {
t = t.Left
}
return *t
}
/*TreeMaximum returns the tree with the root containing the highest value.*/
func TreeMaximum(t *Tree) Tree {
for t.Right != nil {
t = t.Right
}
return *t
}
// ==========================================================================
// Random Support Functions and Examples
// __________________________________________________________________________
/*printTreeWalk does a simple In-Order Tree Walk on a Binary Search Tree, and
prints out all of the values to standard fmt output.*/
func PrintTreeWalk(t *Tree) {
if t != nil {
PrintTreeWalk(t.Left)
fmt.Println(t.Value)
PrintTreeWalk(t.Right)
}
}
/*isBST checks if a Tree is follows the Binary Search Tree Properties.
returns True if all requirement conditions are met, False if not.
In order for a Tree to be considered a Binary Search Tree, the values of
all nodes to the left of a parent node must be smaller, and all values of the
nodes to right must be larger.*/
func isBST(t *Tree) {
}
// AreIntSlicesEqual compares two integer slices for equality.
func AreIntSlicesEqual(a, b []int) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
/*exampleTree returns a specific example of a Binary Search Tree structure.
This is the tree that exampleTree creates:
3
/ \
2 7
/ / \
1 4 8
/ \
5 9
*/
func ExampleTree() Tree {
n1 := Tree{1, nil, nil}
n2 := Tree{2, &n1, nil}
n4 := Tree{4, nil, nil}
n5 := Tree{5, nil, nil}
n9 := Tree{9, nil, nil}
n8 := Tree{8, &n5, &n9}
n7 := Tree{7, &n4, &n8}
pn := Tree{3, &n2, &n7}
return pn
}
/*
ExampleTree2
slightly similar, but different. Tries to get a walker caught up.
3
/ \
2 7
/ \
1 8
\
9
*/
func ExampleTree2() Tree {
n1 := Tree{1, nil, nil}
n2 := Tree{2, &n1, nil}
n9 := Tree{9, nil, nil}
n8 := Tree{8, nil, &n9}
n7 := Tree{7, nil, &n8}
pn := Tree{3, &n2, &n7}
return pn
}
/*
Under advice of [legal council],
I have been advised not to comment on this
function.
*/
func NoComment() string {
return "No comment"
}
// NOTE: HIDDEN FUNCTION
// tree_walker function returns a channel, receiving values from an
// In-Order Tree Walk. Channel closes when there are no values left.
func tree_walker(t *Tree) <-chan int {
channel := make(chan int)
go func() {
InOrderTreeWalk(t, channel)
close(channel)
}()
return channel
}
//printTreeArray prints out all the values of a tree in-order as an array.
func PrintTreeArray(t *Tree) {
var output []int
c := tree_walker(t)
for {
value, isChanOpen := <-c
if isChanOpen {
output = append(output, value)
continue
}
break
}
fmt.Println(output)
}
/*AreTreesEqual compares two trees for equality by taking 1 step at a time
through each tree, and comparing the values. At any given step, if the
values aren't equal, then the walkers stop and the function returns false.
Credits: slightly modified from: https://golang.org/doc/play/tree.go*/
func AreTreesEqual(t1, t2 *Tree) bool {
c1, c2 := tree_walker(t1), tree_walker(t2)
for {
// blocks; waits for both channels for either values or closes.
value1, isChanOpen1 := <-c1
value2, isChanOpen2 := <-c2
/*Check: is either channel closed?
At this point, We assume: all previous values in tree1 & tree2 equal.
Therefore, if both channels are closed, then there are no more values.
If all values are equal, Then the trees are equal.
If one channel is closed, but the other sends another value,
then the two trees have a different quantity of nodes.
Therefore the two trees are not equal.
[QED] */
if !isChanOpen1 || !isChanOpen2 {
return isChanOpen1 == isChanOpen2
}
if value1 != value2 {
break
}
}
return false
} | bst/bst.go | 0.741674 | 0.449393 | bst.go | starcoder |
package data
import (
"expvar"
"fmt"
)
const (
// UnknownType indicates that the VarType wasn't set.
UnknownType VarType = 0
// IntType indicates we are storing an Int.
IntType VarType = 1
// FloatType indicates we are storing a Float.
FloatType VarType = 2
// StringType indicates we are storing a String.
StringType VarType = 3
// MapType indicates we are storing a Map.
MapType VarType = 4
// FuncType indicates we are storing a func.
FuncType VarType = 5
)
// VarType is used to indicate what type of variable is stored.
type VarType int
// IsVarType indicates this is a VarType.
func (VarType) IsVarType() bool {
return true
}
// String implements fmt.Stringer.
func (v VarType) String() string {
switch v {
case IntType:
return "int64"
case FloatType:
return "float64"
case StringType:
return "string"
case MapType:
return "Map"
case FuncType:
return "Func"
case UnknownType:
return "unknown"
default:
panic("VarType is of a type that was set, but we don't support (error in VarType.String())")
}
}
// SubString returns the string needed to Subscribe to this variable's changes.
func (v VarType) SubString() (string, error) {
switch v {
case IntType:
return "Int", nil
case FloatType:
return "Float", nil
case StringType:
return "String", nil
case MapType:
return "Map", nil
case FuncType:
return "", fmt.Errorf("Func type found, can't subscribe directly to a Func")
case UnknownType:
return "", fmt.Errorf("unknown type found, can't subscribe")
default:
panic("VarType is of a type that was set, but we don't support (error in VarType.Sub())")
}
}
// VarState holds state data for expvar's.
type VarState struct {
// Name is the name of the published variable this represents.
Name string
// Type indicates the type of variable being stored.
Type VarType
// Int represents an int.
Int int64
// Float represents a float64.
Float float64
// Map represents a key/value lookup of expvar.Vars.
Map map[string]expvar.Var
// NoOp is incremented to indicate a Map sub value has changed.
NoOp uint64
// String represents a string
String string
// Func represents a function.
Func func() interface{}
}
// Value returns the internally held value.
func (v VarState) Value() interface{} {
switch v.Type {
case IntType:
return v.Int
case FloatType:
return v.Float
case MapType:
return v.Map
case StringType:
return v.String
case FuncType:
return v.Func
default:
return nil
}
}
// ValueType returns the ValueType held in VarState.
func (v VarState) ValueType() VarType {
return v.Type
} | development/telemetry/streaming/river/state/data/data.go | 0.686055 | 0.50952 | data.go | starcoder |
package dict
import (
"errors"
"fmt"
"math/big"
"github.com/mmcloughlin/addchain"
"github.com/mmcloughlin/addchain/alg"
"github.com/mmcloughlin/addchain/internal/bigint"
"github.com/mmcloughlin/addchain/internal/bigints"
)
// RunsAlgorithm is a custom variant of the dictionary approach that decomposes
// a target into runs of ones. It leverages the observation that building a
// dictionary consisting of runs of 1s of lengths l₁, l₂, ..., l_k can itself
// be reduced to first finding an addition chain for the run lengths. Then from
// this chain we can build a chain for the runs themselves.
type RunsAlgorithm struct {
seqalg alg.SequenceAlgorithm
}
// NewRunsAlgorithm constructs a RunsAlgorithm using the given sequence
// algorithm to generate addition sequences for run lengths. Note that since run
// lengths are far smaller than the integers themselves, this sequence algorithm
// does not need to be able to handle large integers.
func NewRunsAlgorithm(a alg.SequenceAlgorithm) *RunsAlgorithm {
return &RunsAlgorithm{
seqalg: a,
}
}
func (a RunsAlgorithm) String() string {
return fmt.Sprintf("runs(%s)", a.seqalg)
}
// FindChain uses the run lengths method to find a chain for n.
func (a RunsAlgorithm) FindChain(n *big.Int) (addchain.Chain, error) {
// Find the runs in n.
d := RunLength{T: 0}
sum := d.Decompose(n)
runs := sum.Dictionary()
// Treat the run lengths themselves as a sequence to be solved.
lengths := []*big.Int{}
for _, run := range runs {
length := int64(run.BitLen())
lengths = append(lengths, big.NewInt(length))
}
// Delegate to the sequence algorithm for a solution.
lc, err := a.seqalg.FindSequence(lengths)
if err != nil {
return nil, err
}
// Build a dictionary chain from this.
c, err := RunsChain(lc)
if err != nil {
return nil, err
}
// Reduce.
sum, c, err = primitive(sum, c)
if err != nil {
return nil, err
}
// Build chain for n out of the dictionary.
dc := dictsumchain(sum)
c = append(c, dc...)
bigints.Sort(c)
c = addchain.Chain(bigints.Unique(c))
return c, nil
}
// RunsChain takes a chain for the run lengths and generates a chain for the
// runs themselves. That is, if the provided chain is l₁, l₂, ..., l_k then
// the result will contain r(l₁), r(l₂), ..., r(l_k) where r(n) = 2ⁿ - 1.
func RunsChain(lc addchain.Chain) (addchain.Chain, error) {
p, err := lc.Program()
if err != nil {
return nil, err
}
c := addchain.New()
s := map[uint]uint{} // current largest shift of each run length
for _, op := range p {
a, b := bigint.MinMax(lc[op.I], lc[op.J])
if !a.IsUint64() || !b.IsUint64() {
return nil, errors.New("values in lengths chain are far too large")
}
la := uint(a.Uint64())
lb := uint(b.Uint64())
rb := bigint.Ones(lb)
for ; s[lb] < la; s[lb]++ {
shift := new(big.Int).Lsh(rb, s[lb]+1)
c = append(c, shift)
}
c = append(c, bigint.Ones(la+lb))
}
return c, nil
} | vendor/github.com/mmcloughlin/addchain/alg/dict/runs.go | 0.77518 | 0.437583 | runs.go | starcoder |
package redblack
import (
"sync"
)
const (
red = false
black = true
)
// color is used to indicate the color of a node.
type color bool
// Key provides a method for determine if a value is less than another value.
type Key interface{
// LessThan indicates that Key is less than LessThan(value).
LessThan(interface{}) bool
}
type node struct {
color color
key Key
item interface{}
parent, left, right *node
}
type tree struct {
root *node
size int
sync.RWMutex
}
// Insert inserts value "v" for key "k".
func (t *tree) Insert(k Key, v interface{}) {
t.Lock()
defer t.Unlock()
defer func(){t.size++}()
newNode := &node{
color: black,
key: k,
item: v,
}
t.treeInsert(newNode)
t.insertRebalance(newNode)
}
// treeInsert does a standard binary tree insert, ignoring red/black properties.
func (t *tree) treeInsert(newNode *node) {
defer func(){t.size++}()
if t.root == nil {
t.root = newNode
return
}
newNode.color = red
newNode.parent = t.root
n := t.root
for {
// Branch right.
if n.key.LessThan(newNode.key) {
if n.right == nil {
n.right = newNode
break
}
n = n.right
newNode.parent = n
continue
}else{
if n.left == nil {
n.left = newNode
break
}
n = n.left
newNode.parent = n
continue
}
}
}
func (t *tree) insertRebalance(n *node) {
for n != t.root && n.parent.color == red {
if n.parent == n.parent.parent.left {
// If x's parent if a left, y is x's right uncle.
y := n.parent.parent.right
if y.color == red {
// Change the colors.
n.parent.color = black
y.color = black
n.parent.parent.color = red
// Move n up the tree.
n = n.parent.parent
}else {
if n == n.parent.right {
n = n.parent
t.leftRotate(n)
}
n.parent.color = black
n.parent.parent.color = red
t.rightRotate(n.parent.parent)
}
}else{
// If x's parent if a left, y is x's right uncle.
y := n.parent.parent.left
if y.color == red {
// Change the colors.
n.parent.color = black
y.color = black
n.parent.parent.color = red
// Move n up the tree.
n = n.parent.parent
}else {
if n == n.parent.left {
n = n.parent
t.rightRotate(n)
}
n.parent.color = black
n.parent.parent.color = red
t.leftRotate(n.parent.parent)
}
}
}
t.root.color = black
}
func (t *tree) leftRotate(n *node) {
parent := n.parent
y := n.right
z := y.left
y.parent = n.parent
n.parent = y
// Perform rotation
y.left = n
n.right = z
if parent == nil {
t.root = y
return
}
if parent.left == n {
parent.left = y
}else {
parent.right = y
}
}
func (t *tree) rightRotate(n *node) {
parent := n.parent
y := n.left
z := y.right
y.parent = n.parent
n.parent = y
// Perform rotation
y.right = n
n.left = z
if parent == nil {
t.root = y
return
}
if parent.left == n {
parent.left = y
}else {
parent.right = y
}
} | development/tree/redblack/redblack.go | 0.765856 | 0.427815 | redblack.go | starcoder |
package quickhull
import (
"github.com/golang/geo/r3"
)
type ConvexHull struct {
optimizedVertexBuffer []r3.Vector
Vertices []r3.Vector
Indices []int
}
func (hull ConvexHull) Triangles() [][3]r3.Vector {
triangles := make([][3]r3.Vector, len(hull.Indices)/3)
for i, idx := range hull.Indices {
triangles[i/3][i%3] = hull.Vertices[idx]
}
return triangles
}
func newConvexHull(mesh meshBuilder, pointCloud []r3.Vector, ccw bool, useOriginalIndices bool) ConvexHull {
var hull ConvexHull
faceProcessed := make([]bool, len(mesh.faces))
var faceStack []int
for i, f := range mesh.faces {
if !f.isDisabled() {
faceStack = append(faceStack, i)
break
}
}
if len(faceStack) == 0 {
return hull
}
finalMeshFaceCount := len(mesh.faces) - len(mesh.disabledFaces)
hull.Indices = make([]int, 0, finalMeshFaceCount*3)
vertexIndexMapping := make(map[int]int) // Map vertex indices from original point cloud to the new mesh vertex indices
for len(faceStack) > 0 {
lastFaceIndex := len(faceStack) - 1
var top int
top, faceStack = faceStack[lastFaceIndex], faceStack[:lastFaceIndex]
topFace := mesh.faces[top]
assertTrue(!topFace.isDisabled())
if faceProcessed[top] {
continue
}
faceProcessed[top] = true
halfEdges := mesh.halfEdgeIndicesOfFace(topFace)
adjacent := []int{mesh.halfEdges[mesh.halfEdges[halfEdges[0]].Opp].Face, mesh.halfEdges[mesh.halfEdges[halfEdges[1]].Opp].Face, mesh.halfEdges[mesh.halfEdges[halfEdges[2]].Opp].Face}
for _, a := range adjacent {
if !faceProcessed[a] && !mesh.faces[a].isDisabled() {
faceStack = append(faceStack, a)
}
}
vertices := mesh.vertexIndicesOfFace(topFace)
if !useOriginalIndices {
for i, v := range vertices {
it, found := vertexIndexMapping[v]
if !found {
hull.optimizedVertexBuffer = append(hull.optimizedVertexBuffer, pointCloud[v])
addedIdx := len(hull.optimizedVertexBuffer) - 1
vertexIndexMapping[v] = addedIdx
vertices[i] = addedIdx
} else {
vertices[i] = it
}
}
}
hull.Indices = append(hull.Indices, vertices[0])
if ccw {
hull.Indices = append(hull.Indices, vertices[2])
hull.Indices = append(hull.Indices, vertices[1])
} else {
hull.Indices = append(hull.Indices, vertices[1])
hull.Indices = append(hull.Indices, vertices[2])
}
}
if useOriginalIndices {
hull.Vertices = pointCloud
} else {
hull.Vertices = hull.optimizedVertexBuffer
}
return hull
} | convex_hull.go | 0.623835 | 0.642531 | convex_hull.go | starcoder |
package view
import (
"github.com/lesovsky/pgcenter/internal/query"
"regexp"
"time"
)
// View describes how stats received from Postgres should be displayed.
type View struct {
Name string // View name
MinRequiredVersion int // Minimum required Postgres version
QueryTmpl string // Query template used for making particular query.
Query string // Query based on template and runtime options.
DiffIntvl [2]int // Columns interval for diff
Cols []string // Columns names
Ncols int // Number of columns returned by query, used as a right border for OrderKey
OrderKey int // Index of column used for order
OrderDesc bool // Order direction: descending (true) or ascending (false)
UniqueKey int // index of column used as unique key when comparing rows during diffs, by default it's zero which is OK in almost all views
ColsWidth map[int]int // Width used for columns and control an aligning
Aligned bool // Flag shows aligning is calculated or not
Msg string // Show this text in Cmdline when switching to this view
Filters map[int]*regexp.Regexp // Filter patterns: key is the column index, value - regexp pattern
Refresh time.Duration // Number of seconds between update view.
ShowExtra int // Specifies extra stats should be enabled on the view.
}
// Views is a list of all used context units.
type Views map[string]View
// New returns set of predefined views.
func New() Views {
return map[string]View{
"activity": {
Name: "activity",
QueryTmpl: query.PgStatActivityDefault,
DiffIntvl: [2]int{0, 0},
Ncols: 14,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show activity statistics",
Filters: map[int]*regexp.Regexp{},
},
"replication": {
Name: "replication",
QueryTmpl: query.PgStatReplicationDefault,
DiffIntvl: [2]int{6, 6},
Ncols: 15,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show replication statistics",
Filters: map[int]*regexp.Regexp{},
},
"databases": {
Name: "databases",
QueryTmpl: query.PgStatDatabaseDefault,
DiffIntvl: [2]int{1, 16},
Ncols: 18,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show databases statistics",
Filters: map[int]*regexp.Regexp{},
},
"tables": {
Name: "tables",
QueryTmpl: query.PgStatTablesDefault,
DiffIntvl: [2]int{1, 18},
Ncols: 19,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show tables statistics",
Filters: map[int]*regexp.Regexp{},
},
"indexes": {
Name: "indexes",
QueryTmpl: query.PgStatIndexesDefault,
DiffIntvl: [2]int{1, 5},
Ncols: 6,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show indexes statistics",
Filters: map[int]*regexp.Regexp{},
},
"sizes": {
Name: "sizes",
QueryTmpl: query.PgTablesSizesDefault,
DiffIntvl: [2]int{7, 11},
Ncols: 12,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show tables sizes statistics",
Filters: map[int]*regexp.Regexp{},
},
"functions": {
Name: "functions",
QueryTmpl: query.PgStatFunctionsDefault,
DiffIntvl: [2]int{3, 3},
Ncols: 8,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show functions statistics",
Filters: map[int]*regexp.Regexp{},
},
"statements_timings": {
Name: "statements_timings",
QueryTmpl: query.PgStatStatementsTimingDefault,
DiffIntvl: [2]int{6, 10},
Ncols: 13,
OrderKey: 0,
OrderDesc: true,
UniqueKey: 11,
ColsWidth: map[int]int{},
Msg: "Show statements timings statistics",
Filters: map[int]*regexp.Regexp{},
},
"statements_general": {
Name: "statements_general",
QueryTmpl: query.PgStatStatementsGeneralDefault,
DiffIntvl: [2]int{4, 5},
Ncols: 8,
OrderKey: 0,
OrderDesc: true,
UniqueKey: 6,
ColsWidth: map[int]int{},
Msg: "Show statements general statistics",
Filters: map[int]*regexp.Regexp{},
},
"statements_io": {
Name: "statements_io",
QueryTmpl: query.PgStatStatementsIoDefault,
DiffIntvl: [2]int{6, 10},
Ncols: 13,
OrderKey: 0,
OrderDesc: true,
UniqueKey: 11,
ColsWidth: map[int]int{},
Msg: "Show statements IO statistics",
Filters: map[int]*regexp.Regexp{},
},
"statements_temp": {
Name: "statements_temp",
QueryTmpl: query.PgStatStatementsTempDefault,
DiffIntvl: [2]int{4, 6},
Ncols: 9,
OrderKey: 0,
OrderDesc: true,
UniqueKey: 7,
ColsWidth: map[int]int{},
Msg: "Show statements temp files statistics",
Filters: map[int]*regexp.Regexp{},
},
"statements_local": {
Name: "statements_local",
QueryTmpl: query.PgStatStatementsLocalDefault,
DiffIntvl: [2]int{6, 10},
Ncols: 13,
OrderKey: 0,
OrderDesc: true,
UniqueKey: 11,
ColsWidth: map[int]int{},
Msg: "Show statements temp tables statistics (local IO)",
Filters: map[int]*regexp.Regexp{},
},
"statements_wal": {
Name: "statements_wal",
MinRequiredVersion: query.PostgresV13,
QueryTmpl: query.PgStatStatementsWalDefault,
DiffIntvl: [2]int{3, 6},
Ncols: 9,
OrderKey: 0,
OrderDesc: true,
UniqueKey: 7,
ColsWidth: map[int]int{},
Msg: "Show statements WAL statistics",
Filters: map[int]*regexp.Regexp{},
},
"progress_vacuum": {
Name: "progress_vacuum",
MinRequiredVersion: query.PostgresV96,
QueryTmpl: query.PgStatProgressVacuumDefault,
DiffIntvl: [2]int{10, 11},
Ncols: 13,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show vacuum progress statistics",
Filters: map[int]*regexp.Regexp{},
},
"progress_cluster": {
Name: "progress_cluster",
MinRequiredVersion: query.PostgresV12,
QueryTmpl: query.PgStatProgressClusterDefault,
DiffIntvl: [2]int{10, 11},
Ncols: 13,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show cluster/vacuum full progress statistics",
Filters: map[int]*regexp.Regexp{},
},
"progress_index": {
Name: "progress_index",
MinRequiredVersion: query.PostgresV12,
QueryTmpl: query.PgStatProgressCreateIndexDefault,
DiffIntvl: [2]int{0, 0},
Ncols: 14,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show create index/reindex progress statistics",
Filters: map[int]*regexp.Regexp{},
},
"progress_analyze": {
Name: "progress_analyze",
MinRequiredVersion: query.PostgresV13,
QueryTmpl: query.PgStatProgressAnalyzeDefault,
DiffIntvl: [2]int{0, 0},
Ncols: 12,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show analyze progress statistics",
Filters: map[int]*regexp.Regexp{},
},
"progress_basebackup": {
Name: "progress_basebackup",
MinRequiredVersion: query.PostgresV13,
QueryTmpl: query.PgStatProgressBasebackupDefault,
DiffIntvl: [2]int{9, 9},
Ncols: 11,
OrderKey: 0,
OrderDesc: true,
ColsWidth: map[int]int{},
Msg: "Show basebackup progress statistics",
Filters: map[int]*regexp.Regexp{},
},
}
}
// Configure performs adjusting of queries accordingly to Postgres version.
// IN opts Options: struct with additional Postgres properties required for formatting necessary queries
// IN gucTrackCommitTS string: value of track_commit_timestamp GUC (on/off)
func (v Views) Configure(opts query.Options) error {
var track bool
if opts.GucTrackCommitTS == "on" {
track = true
}
for k, view := range v {
switch k {
case "activity":
view.QueryTmpl, view.Ncols = query.SelectStatActivityQuery(opts.Version)
v[k] = view
case "replication":
view.QueryTmpl, view.Ncols = query.SelectStatReplicationQuery(opts.Version, track)
v[k] = view
case "databases":
view.QueryTmpl, view.Ncols, view.DiffIntvl = query.SelectStatDatabaseQuery(opts.Version)
v[k] = view
case "statements_timings":
view.QueryTmpl = query.SelectStatStatementsTimingQuery(opts.Version)
v[k] = view
}
}
// Build query texts based on templates.
for k, view := range v {
q, err := query.Format(view.QueryTmpl, opts)
if err != nil {
return err
}
view.Query = q
v[k] = view
}
return nil
}
// VersionOK tests current version of Postgres is suitable for view.
func (v View) VersionOK(version int) bool {
return version >= v.MinRequiredVersion
} | internal/view/view.go | 0.500732 | 0.476945 | view.go | starcoder |
package binomial
// Node is a user data node on a binomial Tree.
type Node struct {
Item interface{} // Consumer data
t *Tree // Current tree for this Node
}
// Tree is a binomial tree.
type Tree struct {
n *Node // Container for user data
k uint // Rank of the tree.
// Tree structure pointers.
parent *Tree // direct parent
sibling *Tree // next sibling
child *Tree // first child
}
// Less is a function that returns true if a is less than b.
type Less func(a, b interface{}) bool
// NewNode creates a new binomial tree with the specified item.
// It returns the Node created for the item.
func newNode(item interface{}) *Node {
n := &Node{Item: item}
t := &Tree{n: n}
n.t = t
return n
}
// Rank returns the rank of the tree.
func (t *Tree) rank() uint {
return t.k
}
// Merge combines two Trees of the same rank, returning the new binomial tree.
// This consumes n1 and n2 into the new tree.
func merge(t1, t2 *Tree, less Less) *Tree {
// It is up to the caller to understand that only Trees of the
// same rank can be merged.
if t1.rank() != t2.rank() {
return nil
}
// Determine which Tree is the parent and which will be the child.
tp := t1
tc := t2
if less(t2.n.Item, t1.n.Item) {
tp = t2
tc = t1
}
// Save the original last child.
c := tp.child
// Make the new child Tree the last child.
tp.child = tc
// Make the new child Tree's first sibline the root's last child.
tc.sibling = c
// Ensure the new child Tree points to its parent.
tc.parent = tp
// Increase the rank of the parent now that it has a new child.
tp.k++
return tp
}
// Bubble moves an item up the tree if it is less than its successive parents.
// The Node n now sits in the correct place in its Tree.
func (n *Node) bubble(less Less) {
pt := n.t.parent
for pt != nil && less(n.Item, pt.n.Item) {
swap(n.t, pt)
pt = n.t.parent
}
}
// Swap exchanges the nodes between two trees.
func swap(t1 *Tree, t2 *Tree) {
ntemp := t1.n
t1.n = t2.n
t2.n = ntemp
t1.n.t = t1
t2.n.t = t2
} | binomial/tree.go | 0.816809 | 0.522263 | tree.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTAllowEdgePointFilter2371AllOf struct for BTAllowEdgePointFilter2371AllOf
type BTAllowEdgePointFilter2371AllOf struct {
AllowsEdgePoint *bool `json:"allowsEdgePoint,omitempty"`
BtType *string `json:"btType,omitempty"`
}
// NewBTAllowEdgePointFilter2371AllOf instantiates a new BTAllowEdgePointFilter2371AllOf object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTAllowEdgePointFilter2371AllOf() *BTAllowEdgePointFilter2371AllOf {
this := BTAllowEdgePointFilter2371AllOf{}
return &this
}
// NewBTAllowEdgePointFilter2371AllOfWithDefaults instantiates a new BTAllowEdgePointFilter2371AllOf object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTAllowEdgePointFilter2371AllOfWithDefaults() *BTAllowEdgePointFilter2371AllOf {
this := BTAllowEdgePointFilter2371AllOf{}
return &this
}
// GetAllowsEdgePoint returns the AllowsEdgePoint field value if set, zero value otherwise.
func (o *BTAllowEdgePointFilter2371AllOf) GetAllowsEdgePoint() bool {
if o == nil || o.AllowsEdgePoint == nil {
var ret bool
return ret
}
return *o.AllowsEdgePoint
}
// GetAllowsEdgePointOk returns a tuple with the AllowsEdgePoint field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTAllowEdgePointFilter2371AllOf) GetAllowsEdgePointOk() (*bool, bool) {
if o == nil || o.AllowsEdgePoint == nil {
return nil, false
}
return o.AllowsEdgePoint, true
}
// HasAllowsEdgePoint returns a boolean if a field has been set.
func (o *BTAllowEdgePointFilter2371AllOf) HasAllowsEdgePoint() bool {
if o != nil && o.AllowsEdgePoint != nil {
return true
}
return false
}
// SetAllowsEdgePoint gets a reference to the given bool and assigns it to the AllowsEdgePoint field.
func (o *BTAllowEdgePointFilter2371AllOf) SetAllowsEdgePoint(v bool) {
o.AllowsEdgePoint = &v
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTAllowEdgePointFilter2371AllOf) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTAllowEdgePointFilter2371AllOf) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTAllowEdgePointFilter2371AllOf) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTAllowEdgePointFilter2371AllOf) SetBtType(v string) {
o.BtType = &v
}
func (o BTAllowEdgePointFilter2371AllOf) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.AllowsEdgePoint != nil {
toSerialize["allowsEdgePoint"] = o.AllowsEdgePoint
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
return json.Marshal(toSerialize)
}
type NullableBTAllowEdgePointFilter2371AllOf struct {
value *BTAllowEdgePointFilter2371AllOf
isSet bool
}
func (v NullableBTAllowEdgePointFilter2371AllOf) Get() *BTAllowEdgePointFilter2371AllOf {
return v.value
}
func (v *NullableBTAllowEdgePointFilter2371AllOf) Set(val *BTAllowEdgePointFilter2371AllOf) {
v.value = val
v.isSet = true
}
func (v NullableBTAllowEdgePointFilter2371AllOf) IsSet() bool {
return v.isSet
}
func (v *NullableBTAllowEdgePointFilter2371AllOf) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTAllowEdgePointFilter2371AllOf(val *BTAllowEdgePointFilter2371AllOf) *NullableBTAllowEdgePointFilter2371AllOf {
return &NullableBTAllowEdgePointFilter2371AllOf{value: val, isSet: true}
}
func (v NullableBTAllowEdgePointFilter2371AllOf) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTAllowEdgePointFilter2371AllOf) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_allow_edge_point_filter_2371_all_of.go | 0.695855 | 0.442094 | model_bt_allow_edge_point_filter_2371_all_of.go | starcoder |
package advent2021
import (
"bytes"
"fmt"
"strconv"
log "github.com/sirupsen/logrus"
)
// Day3Part1 returns the power consumption of the submarine based off of the diagnostic report intput
func Day3Part1(diagnosticReport []string) (powerConsumption int) {
var gammaRate []byte
var epsilonRate []byte
for col := 0; col < len(diagnosticReport[0]); col++ {
var countOfZeroes int
for row := 0; row < len(diagnosticReport); row++ {
// Note: 48 is the ASCII number for a zero
if 48 == diagnosticReport[row][col] {
countOfZeroes++
}
}
if countOfZeroes > len(diagnosticReport)-countOfZeroes {
// Note: 48 is the ASCII number for a zero
gammaRate = append(gammaRate, 48)
// Note: 49 is the ASCII number for a one
epsilonRate = append(epsilonRate, 49)
} else {
// Note: 49 is the ASCII number for a one
gammaRate = append(gammaRate, 49)
// Note: 48 is the ASCII number for a zero
epsilonRate = append(epsilonRate, 48)
}
}
// Converts the string to binary
gammaRateNumber, err := strconv.ParseInt(bytes.NewBuffer(gammaRate).String(), 2, 64)
if err != nil {
log.Error(err)
}
// Converts the string to binary
epsilonRateNumber, err := strconv.ParseInt(string(epsilonRate), 2, 64)
if err != nil {
log.Error(err)
}
return int(gammaRateNumber * epsilonRateNumber)
}
// getRelevantDiagnosticLines will return a subset of diagnostic report
func getRelevantDiagnosticLines(diagnosticReportLines []string, position int, useHighCount bool) (relevantDiagnosticReportLines []string) {
var zeroList, oneList []string
for row := 0; row < len(diagnosticReportLines); row++ {
if 48 == diagnosticReportLines[row][position] {
zeroList = append(zeroList, diagnosticReportLines[row])
} else {
oneList = append(oneList, diagnosticReportLines[row])
}
}
if useHighCount {
if len(zeroList) > len(oneList) {
relevantDiagnosticReportLines = zeroList
} else {
relevantDiagnosticReportLines = oneList
}
} else {
if len(zeroList) > len(oneList) {
relevantDiagnosticReportLines = oneList
} else {
relevantDiagnosticReportLines = zeroList
}
}
return
}
// Day3Part2 returns the life support rating of the submarine based off of oxygen generator rating and the CO2 scrubber rating
func Day3Part2(diagnosticReport []string) (lifeSupportRating int) {
findRating := func(useHighCount bool) int64 {
// Makes a copy of the diagnosticReport to be whittled down via this process
list := make([]string, len(diagnosticReport))
copy(list, diagnosticReport)
// Loops through diagnosticReport to find the relevant lines of the rating type being looked for
for position := 0; position < len(diagnosticReport[0]); position++ {
list = getRelevantDiagnosticLines(list, position, useHighCount)
if len(list) == 1 {
break
}
}
// Converts the remaining diagnosticReport line, which is the rating for the current type, and converts it to an integer
number, err := strconv.ParseInt(list[0], 2, 64)
if err != nil {
fmt.Println(err)
}
return number
}
oxygenGeneratorRating := findRating(true)
co2scrubberRating := findRating(false)
return int(oxygenGeneratorRating * co2scrubberRating)
} | internal/pkg/advent2021/day3.go | 0.665628 | 0.558207 | day3.go | starcoder |
package analyzer
import (
"errors"
"fmt"
"github.com/toshi0607/kompal-weather/pkg/status"
"github.com/toshi0607/kompal-weather/pkg/storage"
"golang.org/x/net/context"
)
type analyzer struct {
storage storage.Storage
}
type (
// Result is an analysis of the last two statuses of how Kompal-yu is crowded
Result struct {
MaleTrend Trend
FemaleTrend Trend
LatestStatus status.Status
}
// Trend is the trend of Kompal-yu
Trend int
)
const (
// Unknown is unknown
Unknown = 0
// Increasing is increasing
Increasing = 1
// Decreasing is decreasing
Decreasing = 2
// Constant is constant
Constant = 3
// Open is open
Open = 4
// Close is close
Close = 5
)
func (t Trend) String() string {
switch t {
case Unknown:
return "Unknown"
case Increasing:
return "混んできました。"
case Decreasing:
return "空いてきました。"
case Constant:
return "変わりありません。"
case Open:
return "営業を開始しました。"
case Close:
return "営業を終了しました。"
default:
return "Invalid"
}
}
const expectedNumberOfStatuses = 2
// New builds a new Analyzer
func New(storage storage.Storage) Analyzer {
return analyzer{
storage: storage,
}
}
func (a analyzer) Analyze(ctx context.Context) (*Result, error) {
ss, err := a.storage.Statuses(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get statuses: %v", err)
}
if len(ss) != expectedNumberOfStatuses {
return nil, errors.New("no sufficient status")
}
// Expect time series to be ss[0],ss[1]
if ss[0].CreatedAt.After(ss[1].CreatedAt) {
ss[0], ss[1] = ss[1], ss[0]
}
var result Result
result.LatestStatus = ss[1]
if ss[0].MaleSauna != status.Off && ss[1].MaleSauna == status.Off {
result.MaleTrend = Close
} else if ss[0].MaleSauna == status.Off && ss[1].MaleSauna != status.Off {
result.MaleTrend = Open
} else if ss[0].MaleSauna == ss[1].MaleSauna {
result.MaleTrend = Constant
} else if ss[0].MaleSauna > ss[1].MaleSauna {
result.MaleTrend = Decreasing
} else {
result.MaleTrend = Increasing
}
if ss[0].FemaleSauna != status.Off && ss[1].FemaleSauna == status.Off {
result.FemaleTrend = Close
} else if ss[0].FemaleSauna == status.Off && ss[1].FemaleSauna != status.Off {
result.FemaleTrend = Open
} else if ss[0].FemaleSauna == ss[1].FemaleSauna {
result.FemaleTrend = Constant
} else if ss[0].FemaleSauna > ss[1].FemaleSauna {
result.FemaleTrend = Decreasing
} else {
result.FemaleTrend = Increasing
}
return &result, nil
} | pkg/analyzer/analyzer.go | 0.566019 | 0.441492 | analyzer.go | starcoder |
package hist
import (
"fmt"
"math"
"sort"
"time"
)
// Histogram defines a histogram.
type Histogram struct {
start int
end int
scale int
max int
n int
errCnt int
total int
values []int
}
// NewHistogram creates a new Histogram.
func NewHistogram(max int, scale int) *Histogram {
return &Histogram{0, 0, scale, max, 0, 0, 0, make([]int, max+1)}
}
// Start starts the histogram with the given value.
func (h *Histogram) Start(t int) {
h.start = t
}
// End ends the hisogram with the given value.
func (h *Histogram) End(t int) {
h.end = t
}
// Add adds a new value to the histogram.
func (h *Histogram) Add(v int) {
v = int(float64(v) / float64(h.scale))
if v < 1 {
h.values[0]++
} else if v >= h.max {
h.values[h.max]++
} else {
h.values[v]++
}
h.n++
h.total += v
}
// AddError adds a new error value to the histogram.
func (h *Histogram) AddError(v int) {
h.Add(v)
h.errCnt++
}
// Percentiles produces the values for the given percentiles.
func (h *Histogram) Percentiles(percentiles ...float64) []int {
result := make([]int, len(percentiles))
if percentiles == nil || len(percentiles) == 0 {
return result
}
sort.Sort(sort.Float64Slice(percentiles))
accum := 0
idx := int(math.Max(1.0, percentiles[0]*float64(h.n)))
for i, j := 0, 0; i < len(percentiles) && j < len(h.values); j++ {
accum += h.values[j]
for accum >= idx {
result[i] = j
i++
if i >= len(percentiles) {
break
}
idx = int(math.Max(1.0, percentiles[i]*float64(h.n)))
}
}
return result
}
// Average returns the histogram's average value.
func (h *Histogram) Average() float64 {
return float64(h.total) / float64(h.n)
}
// ErrorPercent returns the hisogram's error percentage.
func (h *Histogram) ErrorPercent() float64 {
return float64(h.errCnt) / float64(h.n) * 100.0
}
func (h *Histogram) String() string {
ps := h.Percentiles(0.0, 0.5, 0.9, 0.95, 0.99, 0.999, 0.9999, 1.0)
s := "Percentiles (%s):\n" +
" Min: %d\n" +
" Median: %d\n" +
" 90th: %d\n" +
" 99th: %d\n" +
" 99.9th: %d\n" +
" 99.99th: %d\n" +
" Max: %d\n" +
"Stats:\n" +
" Average (%s): %f\n" +
" Total requests: %d\n" +
" Elapsed Time (sec): %.4f\n" +
" Average QPS: %.2f\n" +
" Errors: %d\n" +
" Percent errors: %.2f\n"
elapsedSecs := float64(h.end-h.start) / float64(time.Second)
averageQPS := float64(h.n) / elapsedSecs
scale := time.Duration(h.scale) * time.Nanosecond
return fmt.Sprintf(s, scale.String(), ps[0], ps[1], ps[2], ps[3], ps[4], ps[5], ps[6],
scale.String(), h.Average(), h.n, elapsedSecs, averageQPS, h.errCnt, h.ErrorPercent())
} | hist/hist.go | 0.73029 | 0.46308 | hist.go | starcoder |
package diff
// Line represents an atom of text from the source material, contextualized
// by its original index.
type Line struct {
Index int
Text string
}
// ToLines is a convenience function for creating a slice of Line structs from a
// slice of strings as input for NewLCSTable.
func ToLines(a []string) []Line {
out := make([]Line, len(a))
for i, line := range a {
out[i] = Line{i, line}
}
return out
}
// LCSTable is a data structure used to compute the LCS and traditional
// LCS-based diff.
type LCSTable struct {
lengths []int
a, b []Line
}
// NewLCSTable constructs an LCSTable, pre-computing the necessary len(a)*len(b)
// array of lengths required for future operations.
func NewLCSTable(a, b []Line) *LCSTable {
t := &LCSTable{
lengths: make([]int, (len(a)+1)*(len(b)+1)),
a: a,
b: b,
}
for i, _ := range a {
for j, _ := range b {
k := (i+1)*(len(b)+1) + (j + 1)
if a[i].Text == b[j].Text {
t.lengths[k] = t.getLength(i, j) + 1
} else {
nextA := t.getLength(i+1, j)
nextB := t.getLength(i, j+1)
if nextA > nextB {
t.lengths[k] = nextA
} else {
t.lengths[k] = nextB
}
}
}
}
return t
}
func (t *LCSTable) getLength(ai, bi int) int {
return t.lengths[ai*(len(t.b)+1)+bi]
}
func (t *LCSTable) LongestCommonSubsequence() [][2]int {
return t.recursiveLcs(len(t.a), len(t.b))
}
func (t *LCSTable) recursiveLcs(i, j int) [][2]int {
if i == 0 || j == 0 {
return nil
}
if t.a[i-1].Text == t.b[j-1].Text {
next := [2]int{t.a[i-1].Index, t.b[j-1].Index}
return append(t.recursiveLcs(i-1, j-1), next)
}
if t.getLength(i, j-1) > t.getLength(i-1, j) {
return t.recursiveLcs(i, j-1)
}
return t.recursiveLcs(i-1, j)
}
// Diff returns a diff of the two sets of lines the LCSTable was created with,
// as determined by the LCS.
func (t *LCSTable) Diff() []Item {
return t.recursiveDiff(len(t.a), len(t.b))
}
func (t *LCSTable) recursiveDiff(i, j int) []Item {
if i == 0 && j == 0 {
return nil
}
var toAdd Item
if i == 0 {
toAdd.Type = Insertion
} else if j == 0 {
toAdd.Type = Deletion
} else if t.a[i-1].Text == t.b[j-1].Text {
toAdd.Type = Unchanged
} else if t.getLength(i, j-1) > t.getLength(i-1, j) {
toAdd.Type = Insertion
} else {
toAdd.Type = Deletion
}
switch toAdd.Type {
case Insertion:
toAdd.Line = t.b[j-1]
j--
case Unchanged:
toAdd.Line = t.a[i-1]
i--
j--
case Deletion:
toAdd.Line = t.a[i-1]
i--
}
return append(t.recursiveDiff(i, j), toAdd)
} | lcs.go | 0.763484 | 0.476032 | lcs.go | starcoder |
package context
import (
"github.com/jakubDoka/mlok/ggl"
"github.com/jakubDoka/mlok/mat"
"github.com/jakubDoka/mlok/mat/rgba"
)
// C helps you draw complex objects built from multiple sprites
type C []Part
// Init Initialized context with given Defaults. This can be called multiple times on same context
// and it will be restarted. C is mant to be reused to reduce allocations.
func (c *C) Init(parts ...PartDefs) {
v := *c
v = v[:0]
for len(parts) > len(v) {
p := parts[len(v)]
v = append(v, Part{
Def: p,
Spr: ggl.NSprite(p.Region),
Mask: rgba.White,
Scale: mat.V(1, 1),
})
v[len(v)-1].Spr.SetPivot(p.Pivot)
}
*c = v
}
// Draw draws context to target with applied transform and mask
func (c C) Draw(t ggl.Target, matrix mat.Mat, mask mat.RGBA) {
c.Update(matrix, mask)
c.Fetch(t)
}
// Update updates the sprite transform of context
func (c C) Update(matrix mat.Mat, mask mat.RGBA) {
for i := range c {
p := &c[i]
p.Spr.Update(
mat.M(
p.Offset.Add(p.Def.Offset),
p.Def.Scale.Mul(p.Scale),
p.Rotation+p.Def.Rotation,
).Chained(matrix),
mask.Mul(p.Mask).Mul(p.Def.Mask),
)
}
}
func (c C) Fetch(t ggl.Target) {
for i := range c {
c[i].Spr.Fetch(t)
}
}
// Part is a building piece of context, if contains lot of configuration that is combined with
// Default configuration to make a final transformation and color
type Part struct {
Def PartDefs
Spr ggl.Sprite
Offset, Scale mat.Vec
Mask mat.RGBA
Rotation float64
}
// TotalOffset returns total offset of part, taking PartDefs into account
func (p *Part) TotalOffset() mat.Vec {
return p.Offset.Add(p.Def.Offset)
}
// TotalMask returns total mask of part, taking PartDefs into account
func (p *Part) TotalMask() mat.RGBA {
return p.Mask.Mul(p.Def.Mask)
}
// TotalRotation returns total rotation of part, taking PartDefs into account
func (p *Part) TotalRotation() float64 {
return p.Rotation + p.Def.Rotation
}
// PartDefs stores the default values for Part
type PartDefs struct {
Offset, Pivot, Scale mat.Vec
Rotation float64
Mask mat.RGBA
Region mat.AABB
}
var DefaultPartDefs = PartDefs{
Scale: mat.V(1, 1),
Mask: rgba.White,
} | ggl/drw/context/context.go | 0.729038 | 0.478102 | context.go | starcoder |
package main
import (
"bufio"
"fmt"
"image"
"io"
"strconv"
"strings"
)
type Grid struct {
lines LineCollection
Points [1000][1000]int
}
func NewGrid(input io.Reader, considerDiagonals bool) *Grid {
result := &Grid{
lines: make(LineCollection, 0, 500),
}
scanner := bufio.NewScanner(input)
for scanner.Scan() {
var x1, x2, y1, y2 int
inputLine := scanner.Text()
inputPointPairs := strings.Split(inputLine, " -> ")
inputPoint1 := strings.Split(inputPointPairs[0], ",")
inputPoint2 := strings.Split(inputPointPairs[1], ",")
x1, _ = strconv.Atoi(inputPoint1[0])
y1, _ = strconv.Atoi(inputPoint1[1])
x2, _ = strconv.Atoi(inputPoint2[0])
y2, _ = strconv.Atoi(inputPoint2[1])
newLine := NewLine(x1, y1, x2, y2)
if considerDiagonals {
result.lines = append(result.lines, newLine)
} else if newLine.IsStraightLine() {
result.lines = append(result.lines, newLine)
}
}
result.plotLines()
return result
}
func (g *Grid) CountOverlaps() int {
result := 0
for y := 0; y < len(g.Points); y++ {
for x := 0; x < len(g.Points); x++ {
if g.Points[y][x] > 1 {
result++
}
}
}
return result
}
func (g *Grid) plotLines() {
for _, line := range g.lines {
g.plotLine(line.Point1, line.Point2)
}
}
func (g *Grid) plotLine(point1, point2 image.Point) {
stepX := 1
stepY := 1
x := point1.X
y := point1.Y
if point1.X > point2.X {
stepX = -1
}
if point1.X == point2.X {
stepX = 0
}
if point1.Y > point2.Y {
stepY = -1
}
if point1.Y == point2.Y {
stepY = 0
}
// Horizontal
if stepX == 0 { // Horizontal
for y != point2.Y+stepY {
g.Points[y][x]++
y += stepY
}
} else if stepY == 0 { // Vertical
for x != point2.X+stepX {
g.Points[y][x]++
x += stepX
}
} else {
for y != point2.Y+stepY { // Diagonal
for x != point2.X+stepX {
g.Points[y][x]++
x += stepX
y += stepY
}
}
}
}
func (g *Grid) String() string {
result := strings.Builder{}
for y := 0; y < len(g.Points); y++ {
for x := 0; x < len(g.Points); x++ {
if g.Points[y][x] == 0 {
result.WriteString(". ")
} else {
result.WriteString(fmt.Sprintf("%d ", g.Points[y][x]))
}
}
result.WriteString("\n")
}
return result.String()
} | cmd/day5/Grid.go | 0.538741 | 0.422862 | Grid.go | starcoder |
// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate
package descriptions
// Local
const LocalFetch = "Fetch Beacons that are similar to a specified concept from the Things and/or Actions subsets on a Weaviate network"
const LocalFetchObj = "An object used to perform a Fuzzy Fetch to search for Things and Actions similar to a specified concept on a Weaviate network"
const LocalFetchActions = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from the Actions subset"
const LocalFetchThings = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from the Things subset"
const LocalFetchFuzzy = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from both the Things and Actions subsets"
const LocalFetchBeacon = "A Beacon result from a local Weaviate Local Fetch query"
const LocalFetchClassName = "The class name of the result from a local Weaviate Local Fetch query"
const LocalFetchCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
const LocalFetchActionsObj = "An object used to Fetch Beacons from the Actions subset of the dataset"
const LocalFetchThingsObj = "An object used to Fetch Beacons from the Things subset of the dataset"
const LocalFetchFuzzyBeacon = "A Beacon result from a local Weaviate Fetch Fuzzy query from both the Things and Actions subsets"
const LocalFetchFuzzyClassName = "Class name of the result from a local Weaviate Fetch Fuzzy query from both the Things and Actions subsets"
const LocalFetchFuzzyCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
const LocalFetchFuzzyObj = "An object used to Fetch Beacons from both the Things and Actions subsets"
// NETWORK
const NetworkFetch = "Fetch Beacons that are similar to a specified concept from the Things and/or Actions subsets on a Weaviate network"
const NetworkFetchObj = "An object used to perform a Fuzzy Fetch to search for Things and Actions similar to a specified concept on a Weaviate network"
const NetworkFetchActions = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from the Actions subset"
const NetworkFetchThings = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from the Things subset"
const NetworkFetchFuzzy = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from both the Things and Actions subsets"
const NetworkFetchActionClassName = "Class name of the result from a network Weaviate Fetch query on the Actions subset"
const NetworkFetchActionBeacon = "A Beacon result from a network Weaviate Fetch query on the Actions subset"
const NetworkFetchActionCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
const NetworkFetchActionsObj = "An object used to Fetch Beacons from the Actions subset of the dataset"
const NetworkFetchThingClassName = "Class name of the result from a network Weaviate Fetch query on the Things subset"
const NetworkFetchThingBeacon = "A Beacon result from a network Weaviate Fetch query on the Things subset"
const NetworkFetchThingCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
const NetworkFetchThingsObj = "An object used to Fetch Beacons from the Things subset of the dataset"
const NetworkFetchFuzzyClassName = "The class name of the result from a network Weaviate Fetch Fuzzy query from both the Things and Actions subsets"
const NetworkFetchFuzzyBeacon = "A Beacon result from a network Weaviate Fetch Fuzzy query from both the Things and Actions subsets"
const NetworkFetchFuzzyCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
const NetworkFetchFuzzyObj = "An object used to Fetch Beacons from both the Things and Actions subsets" | adapters/handlers/graphql/descriptions/fetch.go | 0.710729 | 0.900267 | fetch.go | starcoder |
package main
import (
dl "de.knallisworld/aoc/aoc2019/dayless"
"fmt"
"math"
"strings"
"time"
)
const AocDay = 3
const AocDayName = "day03"
const AocDayTitle = "Day 03"
func main() {
dl.PrintDayHeader(AocDay, AocDayTitle)
defer dl.TimeTrack(time.Now(), AocDayName)
dl.PrintStepHeader(0)
fmt.Printf("💪 Computing...\n")
lines, _ := dl.ReadFileToArray(AocDayName + "/puzzle1.txt")
wires := getWires(lines)
intersections := getIntersections(getPaths(lines))
fmt.Printf("👉 Found %d intersections at all\n", len(intersections))
{
dl.PrintStepHeader(1)
centralCrossingPoint, centralCrossingDistance := getShortestDistance(Point{0, 0}, intersections)
dl.PrintSolution(fmt.Sprintf("Most central intersection is %s with Manhatten Distance %d", centralCrossingPoint.ToString(), centralCrossingDistance))
}
{
dl.PrintStepHeader(2)
minStepsTotal, minStepsPoint := getShortestPath(Point{0, 0}, wires, intersections)
dl.PrintSolution(fmt.Sprintf("Most central intersection is %s with Lowest Total Of Steps %d", minStepsPoint.ToString(), minStepsTotal))
}
}
type Point struct {
X, Y int
}
func (p *Point) Equals(o Point) bool {
return p.X == o.X && p.Y == o.Y
}
func (p *Point) ToString() string {
return fmt.Sprintf("%d/%d", p.X, p.Y)
}
func (p Point) Clone() Point {
return Point{p.X, p.Y}
}
type Wire struct {
Segments []WireSegment
}
type WireSegment struct {
Start Point
Stop Point
Cost int
Length int
Direction uint8
path []Point
}
func (w *WireSegment) IsVertical() bool {
return w.Direction == 'U' || w.Direction == 'D'
}
func (w *WireSegment) isInverted() bool {
return w.Direction == 'D' || w.Direction == 'L'
}
func (w *WireSegment) Path() []Point {
if w.path == nil {
result := make([]Point, 0)
current := w.Start.Clone()
result = append(result, current)
for !w.Stop.Equals(current) {
switch w.Direction {
case 'U':
current = Point{current.X, current.Y + 1}
break
case 'R':
current = Point{current.X + 1, current.Y}
break
case 'D':
current = Point{current.X, current.Y - 1}
break
case 'L':
current = Point{current.X - 1, current.Y}
break
}
result = append(result, current)
}
w.path = result
}
return w.path
}
func newWireByString(line string) Wire {
parts := strings.Split(line, ",")
segments := make([]WireSegment, 0)
current := Point{X: 0, Y: 0}
cost := 0
for _, part := range parts {
dir := part[0]
length := dl.ParseInt(part[1:])
var stop Point
switch dir {
case 'U':
stop = Point{
X: current.X,
Y: current.Y + length,
}
break
case 'R':
stop = Point{
X: current.X + length,
Y: current.Y,
}
break
case 'D':
stop = Point{
X: current.X,
Y: current.Y - length,
}
break
case 'L':
stop = Point{
X: current.X - length,
Y: current.Y,
}
break
}
segments = append(segments, WireSegment{
Start: current,
Stop: stop,
Length: length,
Cost: cost,
Direction: dir,
})
current = stop.Clone()
cost += length
}
return Wire{
Segments: segments,
}
}
func getPath(line string) []Point {
var x = 0
var y = 0
var parts = strings.Split(line, ",")
var result = make([]Point, 0)
result = append(result, Point{x, y})
for _, line := range parts {
length := dl.ParseInt(line[1:])
switch line[0] {
case 'U':
for d := 0; d < length; d++ {
y += 1
result = append(result, Point{x, y})
}
break
case 'R':
for d := 0; d < length; d++ {
x += 1
result = append(result, Point{x, y})
}
break
case 'D':
for d := 0; d < length; d++ {
y -= 1
result = append(result, Point{x, y})
}
break
case 'L':
for d := 0; d < length; d++ {
x -= 1
result = append(result, Point{x, y})
}
break
default:
panic("invalid instruction.Direction")
}
}
return result
}
func getPaths(lines []string) [][]Point {
var result = make([][]Point, len(lines))
for i, line := range lines {
result[i] = getPath(line)
}
return result
}
func getWires(lines []string) []Wire {
var result = make([]Wire, len(lines))
for i, line := range lines {
result[i] = newWireByString(line)
}
return result
}
func getIntersections(paths [][]Point) []Point {
all := make(map[string]int)
for _, path := range paths {
local := make(map[string]bool)
for _, c := range path {
if c.X == 0 && c.Y == 0 {
// ignore 0/0
continue
}
key := fmt.Sprintf("%d/%d", c.X, c.Y)
local[key] = true
}
for key := range local {
if _, exist := all[key]; exist {
all[key] = all[key] + 1
} else {
all[key] = 1
}
}
}
crossed := 0
for _, m := range all {
if m > 1 {
crossed++
}
}
var result = make([]Point, crossed)
var i = 0
for s, m := range all {
if m > 1 {
split := strings.Split(s, "/")
result[i] = Point{dl.ParseInt(split[0]), dl.ParseInt(split[1])}
i++
}
}
return result
}
func getManhattenDistance(a Point, b Point) int {
return int(math.Abs(float64(a.X-b.X))) + int(math.Abs(float64(a.Y-b.Y)))
}
func getShortestDistance(base Point, crossings []Point) (Point, int) {
min := math.MaxInt64
minPoint := base
for _, p := range crossings {
d := getManhattenDistance(base, p)
if !(p.X == 0 && p.Y == 0) && d < min {
min = d
minPoint = p
}
}
return minPoint, min
}
type dataItem struct {
Dist int
Direction uint8
}
func getShortestPath(base Point, wires []Wire, intersections []Point) (int, Point) {
allData := make(map[int]map[string]dataItem)
for i, wire := range wires {
data := make(map[string]dataItem)
for _, segment := range wire.Segments {
for pointIdx, point := range segment.Path() {
// ignore base (0/0)
if point.Equals(base) {
continue
}
pointKey := point.ToString()
d := segment.Cost + pointIdx
if v, exist := data[pointKey]; !exist || d < v.Dist {
data[pointKey] = dataItem{
Dist: d,
Direction: segment.Direction,
}
// TODO what if overridden direction is important?
}
}
}
allData[i] = data
}
minSteps := math.MaxInt64
minStepsPoint := Point{0, 0}
for _, intersection := range intersections {
key := intersection.ToString()
wireSegment0 := allData[0][key]
wireSegment1 := allData[1][key]
wireSegment0IsVertical := wireSegment0.Direction == 'U' || wireSegment0.Direction == 'D'
wireSegment1IsVertical := wireSegment1.Direction == 'U' || wireSegment1.Direction == 'D'
if (wireSegment0IsVertical && !wireSegment1IsVertical) || (!wireSegment0IsVertical && wireSegment1IsVertical) {
steps := wireSegment0.Dist + wireSegment1.Dist
if steps < minSteps {
minSteps = steps
minStepsPoint = intersection.Clone()
}
}
}
return minSteps, minStepsPoint
} | day03/main.go | 0.520496 | 0.406685 | main.go | starcoder |
package queues
const (
INF = 2147483647
Gate = 0
Wall = -1
)
type Room struct {
row int
column int
}
var directions = [4][2]int{
{ -1, 0 }, // North
{ 1, 0 }, // South
{ 0, 1 }, // East
{ 0, -1 }, // West
}
// wallsAndGates takes a grid of ints representing gates, walls, and empty rooms.
// Grid types are represented as follows: gates = 0, walls = -1, empty rooms = (INF) 2147483647.
// Empty rooms will be set to an int denoting the distance to the nearest gate, or INF if not reachable.
// Processing of rooms is done by BFS.
func wallsAndGates(rooms [][]int) {
roomQueue := GetGateRooms(rooms)
roomsVisited := make(map[Room]bool)
distance := 1
for len(roomQueue) > 0 {
queueSize := len(roomQueue)
for index := 0; index < queueSize; index++ {
room := roomQueue[index]
emptyNeighboringRooms := room.GetEmptyNeighboringRooms(rooms)
for _, emptyRoom := range emptyNeighboringRooms {
_, visited := roomsVisited[emptyRoom]
if !visited {
roomQueue = append(roomQueue, emptyRoom)
rooms[emptyRoom.row][emptyRoom.column] = distance
roomsVisited[emptyRoom] = true
}
}
}
distance++
roomQueue = roomQueue[queueSize:]
}
}
func GetGateRooms(rooms [][]int) []Room {
gateRooms := make([]Room, 0, 0)
for rowIndex := 0; rowIndex < len(rooms); rowIndex++ {
for columnIndex := 0; columnIndex < len(rooms[rowIndex]); columnIndex++ {
if rooms[rowIndex][columnIndex] == Gate {
gateRooms = append(gateRooms, Room {
row: rowIndex,
column: columnIndex,
})
}
}
}
return gateRooms
}
func (currentRoom Room) GetEmptyNeighboringRooms(rooms [][]int) []Room {
neighboringRooms := make([]Room, 0, 0)
for _, direction := range directions {
nextRoom := Room {
row: currentRoom.row + direction[0],
column: currentRoom.column + direction[1],
}
gridHeight := len(rooms)
gridWidth := len(rooms[0])
if nextRoom.row < 0 ||
nextRoom.row > gridHeight - 1 ||
nextRoom.column < 0 ||
nextRoom.column > gridWidth - 1 ||
rooms[nextRoom.row][nextRoom.column] != INF {
continue
}
neighboringRooms = append(neighboringRooms, nextRoom)
}
return neighboringRooms
} | leetcode_problems/queues/walls_and_gates.go | 0.665519 | 0.458531 | walls_and_gates.go | starcoder |
package fixture
import (
"bufio"
"encoding/csv"
"io"
"log"
"os"
"strconv"
"github.com/g3n/engine/math32"
)
type Fixture struct {
filepath string // File path
pts []*math32.Vector3 // List of relative LED coordinates
tpts []*math32.Vector3 // List of transformed coordinates
tl *math32.Vector3 // Top left corner
br *math32.Vector3 // Bottom right corner
ttl *math32.Vector3 // Transformed Top left corner
tbr *math32.Vector3 // Transformed Bottom right corner
idx int // internal pointer
translate *math32.Vector3 // translate
scale *math32.Vector3 // matrix multiply to scale points
}
func NewFixture(path string) *Fixture {
f := new(Fixture)
f.filepath = path
tsv, err := os.Open(path)
if err != nil {
log.Printf("Invalid TSV file path: %v\n", path)
}
reader := csv.NewReader(bufio.NewReader(tsv))
reader.Comma = '\t'
for {
line, error := reader.Read()
if error == io.EOF {
break
} else if error != nil {
log.Fatal(error)
}
x, err := strconv.ParseFloat(line[0], 32)
if err != nil {
log.Printf("ERROR: invalid data in %v: %v\n", path, line[0])
continue
}
y, err := strconv.ParseFloat(line[1], 32)
if err != nil {
log.Printf("ERROR: invalid data in %v: %v\n", path, line[1])
continue
}
f.pts = append(f.pts, math32.NewVector3(float32(x), float32(y), 0))
}
f.tl, f.br = f.FindCorners(f.pts)
f.ResetTransformation()
return f
}
func (f *Fixture) FindCorners(pts []*math32.Vector3) (topLeft, bottomRight *math32.Vector3) {
var ftlx, ftly float32 = 10000.0, 0.01
var fbrx, fbry float32 = 0.0, 10000.0
for _, p := range pts {
if float32(p.X) < ftlx {
ftlx = float32(p.X)
}
if float32(p.Y) > ftly {
ftly = float32(p.Y)
}
if float32(p.X) > fbrx {
fbrx = float32(p.X)
}
if float32(p.Y) < fbry {
fbry = float32(p.Y)
}
}
return math32.NewVector3(ftlx, ftly, 0), math32.NewVector3(fbrx, fbry, 0)
}
func (f *Fixture) ResetTransformation() {
f.Transform(math32.NewVector3(1.0, 1.0, 1.0),
math32.NewVector3(0.0, 0.0, 0.0))
}
func (f *Fixture) Available() bool {
return f.idx < len(f.tpts)
}
func (f *Fixture) Length() int {
return len(f.tpts)
}
func (f *Fixture) Next() *math32.Vector3 {
defer func() { f.idx++ }()
return f.tpts[f.idx]
}
func (f *Fixture) Reset() {
f.idx = 0
}
func (f Fixture) TopLeft() *math32.Vector3 {
return f.tl
}
func (f Fixture) BottomRight() *math32.Vector3 {
return f.br
}
func (f Fixture) TransformedTopLeft() *math32.Vector3 {
return f.ttl
}
func (f Fixture) TransformedBottomRight() *math32.Vector3 {
return f.tbr
}
// Make the transformation permanent
// Update all points and corners.
func (f *Fixture) UpdatePoints() {
f.pts = f.tpts
f.tl = f.ttl
f.br = f.tbr
}
func (f *Fixture) Transformed() []*math32.Vector3 {
f.tpts = make([]*math32.Vector3, len(f.pts), len(f.pts))
for iP, p := range f.pts {
f.tpts[iP] = math32.NewVector3(
(p.X*f.scale.X)+f.translate.X,
(p.Y*f.scale.Y)+f.translate.Y, 0)
}
return f.tpts
}
func (f *Fixture) Transform(scale, translate *math32.Vector3) {
f.scale = scale
f.translate = translate
f.ttl, f.tbr = f.FindCorners(f.Transformed())
} | cmd/scenebuild/fixture/fixturefile.go | 0.571288 | 0.449634 | fixturefile.go | starcoder |
package dynamics
import (
"fmt"
"github.com/SOMAS2020/SOMAS2020/internal/clients/team3/ruleevaluation"
"github.com/SOMAS2020/SOMAS2020/internal/common/rules"
"github.com/pkg/errors"
"gonum.org/v1/gonum/mat"
"math"
)
func GetDistanceToSubspace(dynamics []dynamic, location mat.VecDense) float64 {
if len(dynamics) == 0 {
return 0.0
}
var distances []float64
satisfied := true
for _, dyn := range dynamics {
satisfied = satisfied && satisfy(applyDynamic(dyn, location), dyn.aux)
}
if satisfied {
return -1
}
for _, v := range dynamics {
distances = append(distances, findDistanceToHyperplane(v.w, v.b, location))
}
if distances != nil {
if len(distances) == 0 {
return -1
}
return distances[getSmallest(distances)]
}
return 0.0
}
type Input struct {
Name rules.VariableFieldName
ClientAdjustable bool
Value []float64
}
func FindClosestApproach(ruleMatrix rules.RuleMatrix, namedInputs map[rules.VariableFieldName]Input) (namedOutputs map[rules.VariableFieldName]Input) {
// Get raw data for processing
droppedInputs := DropAllInputStructs(namedInputs)
// Evaluate the rule on this data
results, success := evaluateSingle(ruleMatrix, droppedInputs)
if success {
// Find any results in the vector that indicate a failure condition
deficient, err := IdentifyDeficiencies(results, ruleMatrix.AuxiliaryVector)
if len(deficient) == 0 {
// If none are given, the submitted position is good, return
return namedInputs
}
if err == nil {
// Build selected dynamic structures
selectedDynamics := BuildSelectedDynamics(ruleMatrix, ruleMatrix.AuxiliaryVector, deficient)
// We now need to add the dynamics implied by the present inputs (things we can't change)
// First work out which inputs these are
immutables := fetchImmutableInputs(namedInputs)
// Collapse map to list
allInputs := collapseInputsMap(namedInputs)
// Work out dimensions of w
fullSize := calculateFullSpaceSize(allInputs)
// Select the immutable inputs
immutableInputs := selectInputs(namedInputs, immutables)
// Calculate all the extra dynamics
extraDynamics, success2 := constructAllImmutableDynamics(immutableInputs, ruleMatrix, fullSize)
if triggerZeroCheck(namedInputs) {
laced, success := zeroSatisfyCheck(ruleMatrix, namedInputs)
if success {
return laced
}
}
if success2 {
// Append the restrictions to the dynamics
selectedDynamics = append(selectedDynamics, extraDynamics...)
}
copyOfSelectedDynamics := copySelectedDynamics(selectedDynamics)
// We are ready to use the findClosestApproach internal function
bestPosition := findClosestApproachInSubspace(ruleMatrix, selectedDynamics, *DecodeValues(ruleMatrix, droppedInputs))
if ruleevaluation.RuleEvaluation(ruleMatrix, bestPosition) {
return laceOutputs(bestPosition, ruleMatrix, droppedInputs, namedInputs)
}
resolved, left, success3 := combineDefiniteApproaches(copyOfSelectedDynamics, fullSize)
if success3 {
bestPosition = findClosestApproachInSubspace(ruleMatrix, append(left, resolved), *DecodeValues(ruleMatrix, droppedInputs))
}
return laceOutputs(bestPosition, ruleMatrix, droppedInputs, namedInputs)
}
}
return namedInputs
}
func Shift(ruleMatrix rules.RuleMatrix, namedInputs map[rules.VariableFieldName]Input) (newMatrix rules.RuleMatrix, edited bool) {
droppedInputs := DropAllInputStructs(namedInputs)
results, success := evaluateSingle(ruleMatrix, droppedInputs)
if success {
// Find any results in the vector that indicate a failure condition
deficient, err := IdentifyDeficiencies(results, ruleMatrix.AuxiliaryVector)
if err != nil {
return ruleMatrix, false
}
if len(deficient) == 0 {
// If none are given, the submitted position is good, return
return ruleMatrix, false
}
allDynamics := BuildAllDynamics(ruleMatrix, ruleMatrix.AuxiliaryVector)
if len(allDynamics) == 0 {
return ruleMatrix, false
}
for _, row := range deficient {
copyOfMatrix := mat.DenseCopyOf(&ruleMatrix.ApplicableMatrix)
if row < len(allDynamics) {
newDynamic := shiftDynamic(allDynamics[row], DecodeValues(ruleMatrix, droppedInputs))
newRow := translateDynamic(newDynamic)
copyOfMatrix.SetRow(row, newRow)
ruleMatrix.ApplicableMatrix = *copyOfMatrix
}
}
return ruleMatrix, true
}
return ruleMatrix, false
}
func translateDynamic(inputDyn dynamic) []float64 {
w := inputDyn.w
nRows, _ := w.Dims()
outputVect := []float64{}
for i := 0; i < nRows; i++ {
outputVect = append(outputVect, w.AtVec(i))
}
outputVect = append(outputVect, inputDyn.b)
return outputVect
}
func shiftDynamic(inputDyn dynamic, input mat.Vector) (outputDyn dynamic) {
res := mat.Dot(&inputDyn.w, input) + inputDyn.b
switch inputDyn.aux {
case 0:
inputDyn.b -= res
return inputDyn
case 1:
inputDyn.b -= res
inputDyn.b += 1
return inputDyn
case 2:
inputDyn.b -= res
return inputDyn
case 3:
inputDyn.b += 1
return inputDyn
default:
return inputDyn
}
}
// findClosestApproachInSubspace works out the closest point in the rule subspace to the current location
func findClosestApproachInSubspace(matrixOfRules rules.RuleMatrix, dynamics []dynamic, location mat.VecDense) mat.VecDense {
if len(dynamics) == 0 {
return location
}
var distances []float64
for _, v := range dynamics {
distances = append(distances, findDistanceToHyperplane(v.w, v.b, location))
}
indexOfSmall := getSmallestNonZero(distances)
if indexOfSmall == -1 {
return location
}
closestApproach := calculateClosestApproach(dynamics[indexOfSmall], location)
if ruleevaluation.RuleEvaluation(matrixOfRules, closestApproach) {
return closestApproach
} else {
xSlice := dynamics[:indexOfSmall]
ySlice := dynamics[indexOfSmall+1:]
return findClosestApproachInSubspace(matrixOfRules, append(xSlice, ySlice...), closestApproach)
}
}
func triggerZeroCheck(namedInputs map[rules.VariableFieldName]Input) bool {
for _, inp := range namedInputs {
if !inp.ClientAdjustable {
if inp.Value[rules.SingleValueVariableEntry] == 0 {
return true
}
}
}
return false
}
func zeroSatisfyCheck(ruleMatrix rules.RuleMatrix, namedInputs map[rules.VariableFieldName]Input) (map[rules.VariableFieldName]Input, bool) {
newMap := make(map[rules.VariableFieldName][]float64)
for keys, values := range namedInputs {
if values.ClientAdjustable {
newMap[keys] = generateEmpty(len(values.Value))
} else {
newMap[keys] = values.Value
}
}
if ruleevaluation.RuleEvaluation(ruleMatrix, *DecodeValues(ruleMatrix, newMap)) {
laced := laceOutputs(*DecodeValues(ruleMatrix, newMap), ruleMatrix, DropAllInputStructs(namedInputs), namedInputs)
return laced, true
}
return map[rules.VariableFieldName]Input{}, false
}
func copySelectedDynamics(orig []dynamic) []dynamic {
cpy := make([]dynamic, len(orig))
for i, val := range orig {
cpy[i] = val
}
return cpy
}
// combineDefiniteApproaches is a last ditch attempt to get a defined position if all else fails
func combineDefiniteApproaches(selectedDynamics []dynamic, size int) (dynamic, []dynamic, bool) {
leftOver := []dynamic{}
if len(selectedDynamics) != 0 {
baseDynamic := dynamic{
w: *mat.NewVecDense(size, generateEmpty(size)),
b: 0,
aux: 12,
}
for _, val := range selectedDynamics {
if val.aux == 0 {
if baseDynamic.aux == 12 {
baseDynamic = val
} else {
baseDynamic.w.AddScaledVec(&baseDynamic.w, -1, &val.w)
baseDynamic.b -= val.b
}
} else {
leftOver = append(leftOver, val)
}
}
return baseDynamic, leftOver, true
}
return dynamic{}, selectedDynamics, false
}
func generateEmpty(size int) []float64 {
final := []float64{}
for i := 0; i < size; i++ {
final = append(final, 0.0)
}
return final
}
// calculateClosestApproach works out the least squares closes point on hyperplane to current location
func calculateClosestApproach(constraint dynamic, location mat.VecDense) mat.VecDense {
denom := math.Pow(mat.Norm(&constraint.w, 2), 2)
numer := -1 * constraint.b
nRows, _ := location.Dims()
for i := 0; i < nRows; i++ {
numer -= location.AtVec(i) * constraint.w.AtVec(i)
}
t := numer / denom
closestApproach := mat.VecDenseCopyOf(&location)
closestApproach.AddScaledVec(closestApproach, t, &constraint.w)
return *closestApproach
}
// buildSelectedDynamics depending on list of indexes, this function will build dynamics
func BuildSelectedDynamics(matrixOfRules rules.RuleMatrix, auxiliaryVector mat.VecDense, selectedRules []int) []dynamic {
matrixVal := matrixOfRules.ApplicableMatrix
nRows, _ := matrixVal.Dims()
returnDynamics := []dynamic{}
for i := 0; i < nRows; i++ {
if findInSlice(selectedRules, i) {
tempWeights := mat.VecDenseCopyOf(matrixVal.RowView(i))
returnDynamics = append(returnDynamics, constructSingleDynamic(*tempWeights, auxiliaryVector.AtVec(i)))
}
}
return returnDynamics
}
// buildAllDynamics takes an entire rule matrix and returns all dynamics from it
func BuildAllDynamics(matrixOfRules rules.RuleMatrix, auxiliaryVector mat.VecDense) []dynamic {
matrixVal := matrixOfRules.ApplicableMatrix
nRows, _ := matrixVal.Dims()
var returnDynamics []dynamic
for i := 0; i < nRows; i++ {
tempWeights := mat.VecDenseCopyOf(matrixVal.RowView(i))
returnDynamics = append(returnDynamics, constructSingleDynamic(*tempWeights, auxiliaryVector.AtVec(i)))
}
return returnDynamics
}
// constructSingleDynamic builds a dynamic from a row of the rule matrix
func constructSingleDynamic(ruleRow mat.VecDense, auxCode float64) dynamic {
nRows, _ := ruleRow.Dims()
offset := ruleRow.AtVec(nRows - 1)
weights := mat.VecDenseCopyOf(ruleRow.SliceVec(0, nRows-1))
return createDynamic(*weights, offset, auxCode)
}
// findMinimumRequirements takes in addresses of deficient variables and calculates the minimum values required at those entries to satisfy the rule
func FindMinimumRequirements(deficients []int, aux mat.VecDense) []float64 {
var outputMins []float64
for _, v := range deficients {
switch interpret := aux.AtVec(v); interpret {
case 0:
outputMins = append(outputMins, 0)
case 1:
outputMins = append(outputMins, 1)
case 2:
outputMins = append(outputMins, 0)
case 3:
outputMins = append(outputMins, 1)
default:
outputMins = append(outputMins, 0)
}
}
return outputMins
}
func laceOutputs(vector mat.VecDense, ruleMat rules.RuleMatrix, droppedInputs map[rules.VariableFieldName][]float64, original map[rules.VariableFieldName]Input) (namedOutputs map[rules.VariableFieldName]Input) {
outputs := make(map[rules.VariableFieldName]Input)
mark := 0
for _, name := range ruleMat.RequiredVariables {
input := droppedInputs[name]
output := []float64{}
for i := 0; i < len(input); i++ {
output = append(output, vector.AtVec(mark+i))
}
mark = len(input)
inp := Input{
Name: name,
ClientAdjustable: original[name].ClientAdjustable,
Value: output,
}
outputs[name] = inp
}
return outputs
}
func selectInputs(data map[rules.VariableFieldName]Input, needed []rules.VariableFieldName) []Input {
outputs := []Input{}
for _, name := range needed {
outputs = append(outputs, data[name])
}
return outputs
}
func collapseInputsMap(data map[rules.VariableFieldName]Input) []Input {
inputs := []Input{}
for _, val := range data {
inputs = append(inputs, val)
}
return inputs
}
func calculateFullSpaceSize(inputs []Input) int {
size := 0
for _, input := range inputs {
size += len(input.Value)
}
return size
}
func constructAllImmutableDynamics(immutables []Input, ruleMat rules.RuleMatrix, fullSize int) ([]dynamic, bool) {
success := true
allDynamics := []dynamic{}
for _, immutable := range immutables {
dyn, succ := constructImmutableDynamic(immutable, ruleMat.RequiredVariables, fullSize)
success = success && succ
allDynamics = append(allDynamics, dyn)
}
return allDynamics, success
}
func constructImmutableDynamic(immutable Input, reqVar []rules.VariableFieldName, fullSize int) (dynamic, bool) {
index := getIndexOfVar(immutable.Name, reqVar)
if index != -1 {
newDynamic := dynamic{
aux: 0,
}
deltaVect := []float64{}
for i := 0; i < fullSize; i++ {
deltaVect = append(deltaVect, 0)
}
for i := 0; i < len(immutable.Value); i++ {
deltaVect[i+index] = 1.0
}
w := mat.NewVecDense(len(deltaVect), deltaVect)
b := sumBValues(immutable.Value)
newDynamic.w = *w
newDynamic.b = b
return newDynamic, true
}
return dynamic{}, false
}
func sumBValues(values []float64) float64 {
val := 0.0
for _, v := range values {
val += v
}
return -1 * val
}
func getIndexOfVar(name rules.VariableFieldName, reqVar []rules.VariableFieldName) int {
for index, v := range reqVar {
if v == name {
return index
}
}
return -1
}
func fetchImmutableInputs(namedInputs map[rules.VariableFieldName]Input) []rules.VariableFieldName {
immutables := []rules.VariableFieldName{}
for name, input := range namedInputs {
if !input.ClientAdjustable {
immutables = append(immutables, name)
}
}
return immutables
}
func DropAllInputStructs(inputs map[rules.VariableFieldName]Input) map[rules.VariableFieldName][]float64 {
outputMap := make(map[rules.VariableFieldName][]float64)
for key, val := range inputs {
outputMap[key] = dropInputStruct(val)
}
return outputMap
}
func dropInputStruct(input Input) []float64 {
return input.Value
}
func DecodeValues(rm rules.RuleMatrix, values map[rules.VariableFieldName][]float64) *mat.VecDense {
var finalVariableVect []float64
for _, varName := range rm.RequiredVariables {
if value, ok := values[varName]; ok {
finalVariableVect = append(finalVariableVect, value...)
} else {
return nil
}
}
if len(finalVariableVect) == 0 {
_ = fmt.Sprintf("Hello")
}
varVect := mat.NewVecDense(len(finalVariableVect), finalVariableVect)
return varVect
}
func decodeWithConst(rm rules.RuleMatrix, values map[rules.VariableFieldName][]float64) *mat.VecDense {
var finalVariableVect []float64
for _, varName := range rm.RequiredVariables {
if value, ok := values[varName]; ok {
finalVariableVect = append(finalVariableVect, value...)
} else {
return nil
}
}
finalVariableVect = append(finalVariableVect, 1)
varVect := mat.NewVecDense(len(finalVariableVect), finalVariableVect)
return varVect
}
func evaluateSingle(rm rules.RuleMatrix, values map[rules.VariableFieldName][]float64) (outputVec mat.VecDense, success bool) {
varVect := decodeWithConst(rm, values)
if varVect != nil {
nrows, ncols := rm.ApplicableMatrix.Dims()
if nrows == 0 || ncols == 0 {
return mat.VecDense{}, false
}
results := ruleevaluation.RuleMul(*varVect, rm.ApplicableMatrix)
return *results, true
}
return mat.VecDense{}, false
}
// identifyDeficiencies checks result of rule evaluation and finds entries of result vector that do not comply
func IdentifyDeficiencies(b mat.VecDense, aux mat.VecDense) ([]int, error) {
if checkDimensions(b, aux) {
nRows, _ := b.Dims()
outputData := []int{}
for i := 0; i < nRows; i++ {
if aux.AtVec(i) > 4 || aux.AtVec(i) < 0 {
return []int{}, errors.Errorf("Auxilliary vector at entry '%v' has aux code out of range: '%v'", i, aux.AtVec(i))
}
if !satisfy(b.AtVec(i), aux.AtVec(i)) {
outputData = append(outputData, i)
}
}
return outputData, nil
} else {
return []int{}, errors.Errorf("Vectors '%v' and '%v' do not have the same dimensions", b, aux)
}
}
func CollapseRuleMap(input map[string]rules.RuleMatrix) []rules.RuleMatrix {
newInput := []rules.RuleMatrix{}
for key, inp := range input {
if inp.RuleName == "" {
fmt.Sprintf("hello %v", key)
}
newInput = append(newInput, inp)
}
return newInput
}
func RemoveFromMap(input map[string]rules.RuleMatrix, ruleName string) []rules.RuleMatrix {
returnList := []rules.RuleMatrix{}
for key, val := range input {
if key != ruleName {
returnList = append(returnList, val)
}
}
return returnList
}
// satisfy checks whether a condition is met based on result vector value and auxiliary code
func satisfy(x float64, a float64) bool {
switch a {
case 0:
return x == 0
case 1:
return x > 0
case 2:
return x >= 0
case 3:
return x != 0
case 4:
return true
default:
return false
}
} | internal/clients/team3/dynamics/ruleanalysis.go | 0.721154 | 0.505371 | ruleanalysis.go | starcoder |
package sortablemap
import (
"errors"
"fmt"
"reflect"
)
// convenient function that directly gives iterator, may panic if types are not supported
func IteratorFromMap(v interface{}) (iterator *QueryResultIterator) {
data, err := DataFromMap(v)
if err != nil {
panic(err)
}
return data.Iterator()
}
// use this function for proper error handling, can also be used to make multiple iterators of the same data error that's prepared (better performance)
func DataFromMap(v interface{}) (*Data, error) {
typeInfo := reflect.TypeOf(v)
if typeInfo.Kind() != reflect.Map {
return nil, errors.New("must be map")
}
var results map[resultKey]resultValue
data := &Data{
KeyType: typeInfo.Key().Kind(),
ValueType: typeInfo.Elem().Kind(),
}
// check types
if !supportedKeyTypes[data.KeyType] {
return nil, fmt.Errorf("key type %s not supported", data.KeyType.String())
}
if !supportedValueTypes[data.ValueType] {
return nil, fmt.Errorf("value type %s not supported", data.ValueType.String())
}
// prepare data set
initResults := func(n int) {
results = make(map[resultKey]resultValue, n)
}
addResult := func(k interface{}, v interface{}) {
results[resultKey{
Value: k,
}] = resultValue{
Value: v,
}
}
// casts
if data.KeyType == reflect.Int64 {
if data.ValueType == reflect.Float64 {
m := v.(map[int64]float64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Int64 {
m := v.(map[int64]int64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Uint64 {
m := v.(map[int64]uint64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Bool {
m := v.(map[int64]bool)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Interface {
m := v.(map[int64]interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Ptr {
m := v.(map[int64]*interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
}
} else if data.KeyType == reflect.String {
if data.ValueType == reflect.Interface {
m := v.(map[string]interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Ptr {
m := v.(map[string]*interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Float64 {
m := v.(map[string]float64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Int64 {
m := v.(map[string]int64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Uint64 {
m := v.(map[string]uint64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Bool {
m := v.(map[string]bool)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
}
} else if data.KeyType == reflect.Uint64 {
if data.ValueType == reflect.Float64 {
m := v.(map[uint64]float64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Int64 {
m := v.(map[uint64]int64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Uint64 {
m := v.(map[uint64]uint64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Bool {
m := v.(map[uint64]bool)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Interface {
m := v.(map[uint64]interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Ptr {
m := v.(map[uint64]*interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
}
} else if data.KeyType == reflect.Float64 {
if data.ValueType == reflect.Float64 {
m := v.(map[float64]float64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Bool {
m := v.(map[float64]bool)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Int64 {
m := v.(map[float64]int64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Uint64 {
m := v.(map[float64]uint64)
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Interface {
m := v.(map[float64]interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
} else if data.ValueType == reflect.Ptr {
m := v.(map[float64]*interface{})
initResults(len(m))
for k, v := range m {
addResult(k, v)
}
}
}
// validate
if results == nil {
panic(fmt.Sprintf("type not supported key %s values %s", data.KeyType, data.ValueType))
}
data.results = results
return data, nil
} | from_map.go | 0.53777 | 0.415551 | from_map.go | starcoder |
package octree
import (
"fmt"
"strings"
)
// Graph provides a basic public interface for graph types. It does not support multi-edges.
type Graph interface {
// Nodes returns all nodes in the graph.
// The result should have a stable order.
Nodes() []string
// Neighbors returns a list of neighbors (successors) to the given source node.
// The result should have a stable order.
Neighbors(a string) []string
// Weight returns the weight associated with the given edge.
Weight(a, z string) int64
}
type edge struct{ a, z string }
// AdjList is a mutable, directed, weighted graph implemented using an adjacency list.
// It can be treated as an unweighted graph by using AddEdge() which provides default weights of 1.
// It can be used as an undirected graph by using AddEdge() in each direction.
type AdjList struct {
nodes []string
edges map[string][]string
weight map[edge]int64
}
// NewAdjList creates a new adjacency list graph.
func NewAdjList() *AdjList {
return &AdjList{
nodes: []string{},
edges: make(map[string][]string),
weight: make(map[edge]int64),
}
}
// AddNode adds a node with the given name if it does not already exist.
func (g *AdjList) AddNode(n string) {
if _, ok := g.edges[n]; ok {
return
}
g.nodes = append(g.nodes, n)
g.edges[n] = []string{}
}
// AddEdge adds an edge with the given name if it does not already exist.
// The default weight of 1 is used. If a graph is built only using AddEdge() it can be treated as
// unweighted graph in some algorithms below.
func (g *AdjList) AddEdge(a, z string) {
g.AddWeightedEdge(a, z, 1)
}
// AddWeightedEdge adds an edge with the given name and weight if it does not already exist.
func (g *AdjList) AddWeightedEdge(a, z string, weight int64) {
g.AddNode(a)
g.AddNode(z)
g.weight[edge{a, z}] = weight
for _, n := range g.edges[a] {
if z == n {
return
}
}
g.edges[a] = append(g.edges[a], z)
}
// Nodes returns all nodes in the graph.
func (g *AdjList) Nodes() []string {
return g.nodes
}
// Edges returns all edges in the graph.
func (g *AdjList) Edges() map[string][]string {
return g.edges
}
// Neighbors returns a list of neighbors to the given source node.
func (g *AdjList) Neighbors(a string) []string {
return g.edges[a]
}
// Weight returns the edge weight for the given edge.
func (g *AdjList) Weight(a, z string) int64 {
return g.weight[edge{a, z}]
}
// ToDot renders this graph to a dot format string, which can be helpful for debugging.
func (g *AdjList) ToDot() string {
lines := []string{"digraph {"}
for _, n := range g.Nodes() {
lines = append(lines, fmt.Sprintf("\t%q", n))
}
for _, n := range g.Nodes() {
for _, n2 := range g.Neighbors(n) {
lines = append(lines, fmt.Sprintf("\t%q -> %q [label=%d]", n, n2, g.Weight(n, n2)))
}
}
lines = append(lines, "}")
return strings.Join(lines, "\n")
} | octree/graph.go | 0.828106 | 0.592902 | graph.go | starcoder |
package it
import (
"unsafe"
"github.com/m4gshm/gollections/c"
"github.com/m4gshm/gollections/notsafe"
)
//NoStarted is the head Iterator position.
const NoStarted = -1
//New creates the Iter based on elements Iter and returs its reference
func New[T any, TS ~[]T](elements TS) *Iter[T] {
iter := NewHeadS(elements, notsafe.GetTypeSize[T]())
return &iter
}
//NewHead creates the Iter based on elements slice
func NewHead[T any, TS ~[]T](elements TS) Iter[T] {
return NewHeadS(elements, notsafe.GetTypeSize[T]())
}
//NewHeadS creates the Iter based on elements slice with predefined element size
func NewHeadS[T any, TS ~[]T](elements TS, elementSize uintptr) Iter[T] {
var (
header = notsafe.GetSliceHeaderByRef(unsafe.Pointer(&elements))
array = unsafe.Pointer(header.Data)
size = header.Len
)
return Iter[T]{
array: array,
elementSize: elementSize,
size: size,
maxHasNext: size - 2,
current: NoStarted,
}
}
//NewTail creates the Iter based on elements slice for reverse iterating
func NewTail[T any](elements []T) Iter[T] {
return NewTailS(elements, notsafe.GetTypeSize[T]())
}
//NewTailS creates the Iter based on elements slice with predefined element size for reverse iterating
func NewTailS[T any](elements []T, elementSize uintptr) Iter[T] {
var (
header = notsafe.GetSliceHeaderByRef(unsafe.Pointer(&elements))
array = unsafe.Pointer(header.Data)
size = header.Len
)
return Iter[T]{
array: array,
elementSize: elementSize,
size: size,
maxHasNext: size - 2,
current: size,
}
}
//Iter is the Iterator implementation.
type Iter[T any] struct {
array unsafe.Pointer
elementSize uintptr
size, maxHasNext, current int
}
var (
_ c.Iterator[any] = (*Iter[any])(nil)
_ c.PrevIterator[any] = (*Iter[any])(nil)
)
func (i *Iter[T]) HasNext() bool {
return CanIterateByRange(NoStarted, i.maxHasNext, i.current)
}
func (i *Iter[T]) HasPrev() bool {
return CanIterateByRange(1, i.size, i.current)
}
func (i *Iter[T]) GetNext() T {
t, _ := i.Next()
return t
}
func (i *Iter[T]) GetPrev() T {
t, _ := i.Prev()
return t
}
func (i *Iter[T]) Next() (T, bool) {
current := i.current
if CanIterateByRange(NoStarted, i.maxHasNext, current) {
current++
i.current = current
return *(*T)(notsafe.GetArrayElemRef(i.array, current, i.elementSize)), true
}
var no T
return no, false
}
func (i *Iter[T]) Prev() (T, bool) {
current := i.current
if CanIterateByRange(1, i.size, current) {
current--
i.current = current
return *(*T)(notsafe.GetArrayElemRef(i.array, current, i.elementSize)), true
}
var no T
return no, false
}
func (i *Iter[T]) Get() (T, bool) {
current := i.current
if IsValidIndex(i.size, current) {
return *(*T)(notsafe.GetArrayElemRef(i.array, current, i.elementSize)), true
}
var no T
return no, false
}
func (i *Iter[T]) Cap() int {
return i.size
}
//HasNext checks the next element in an iterator by indexs of a current element and slice length.
func HasNext[T any](elements []T, current int) bool {
return HasNextBySize(notsafe.GetLen(elements), current)
}
//HasPrev checks the previos element in an iterator by indexs of a current element and slice length.
func HasPrev[T any](elements []T, current int) bool {
return HasPrevBySize(notsafe.GetLen(elements), current)
}
//HasNextBySize checks the next element in an iterator by indexs of a current element and slice length.
func HasNextBySize(size int, current int) bool {
return CanIterateByRange(NoStarted, size-2, current)
}
//HasPrevBySize checks the previos element in an iterator by indexs of a current element and slice length.
func HasPrevBySize(size, current int) bool {
return CanIterateByRange(1, size, current)
}
//CanIterateByRange checks the next element in an iterator by indexes of the first, the last and a current elements of an underlying slice.
func CanIterateByRange(first, last, current int) bool {
return current >= first && current <= last
}
//IsValidIndex checks if index is out of range
func IsValidIndex(size, index int) bool {
return index > -1 && index < size
}
//IsValidIndex2 checks if index is out of range
func IsValidIndex2(size, index int) bool {
return !((index^size == 0) || index < 0)
}
//Get safely returns an element of a slice by an index or zero value of T if the index is out of range.
func Get[T any, TS ~[]T](elements TS, current int) T {
v, _ := Gett(elements, current)
return v
}
//Gett safely returns an element of a slice adn true by an index or zero value of T and false if the index is out of range.
func Gett[T any, TS ~[]T](elements TS, current int) (T, bool) {
if current >= len(elements) {
var no T
return no, false
} else if current == NoStarted {
var no T
return no, false
}
return (elements)[current], true
} | it/impl/it/slice.go | 0.656438 | 0.432483 | slice.go | starcoder |
// Package mp3gain uses the mp3gain program to analyze MP3s and compute gain adjustments.
package mp3gain
import (
"fmt"
"math"
"os/exec"
"strconv"
"strings"
)
// Info contains information about gain adjustments for a song.
type Info struct {
// TrackGain is the track's dB gain adjustment independent of its album.
TrackGain float64
// AlbumGain is the album's dB gain adjustment.
AlbumGain float64
// PeakAmp is the peak amplitude of the song, with 1.0 being the maximum
// amplitude playable without clipping.
PeakAmp float64
}
// ComputeAlbum uses mp3gain to compute gain adjustments for the
// specified MP3 files, all of which should be from the same album.
// Keys in the returned map are the supplied paths.
func ComputeAlbum(paths []string) (map[string]Info, error) {
// Return hardcoded data for tests if instructed.
if infoForTest != nil {
m := make(map[string]Info)
for _, p := range paths {
m[p] = *infoForTest
}
return m, nil
}
out, err := exec.Command("mp3gain", append([]string{
"-o", // "output is a database-friendly tab-delimited list"
"-q", // "quiet mode: no status messages"
"-s", "s", // "skip (ignore) stored tag info (do not read or write tags)"
}, paths...)...).Output()
if err != nil {
return nil, fmt.Errorf("mp3gain failed: %v", err)
}
m, err := parseMP3GainOutput(string(out))
if err != nil {
return nil, fmt.Errorf("bad mp3gain output: %v", err)
}
return m, nil
}
// parseMP3GainOutput parses output from the mp3gain command for computeGains.
func parseMP3GainOutput(out string) (map[string]Info, error) {
lns := strings.Split(strings.TrimSpace(out), "\n")
if len(lns) < 3 {
return nil, fmt.Errorf("output %q not at least 3 lines", out)
}
// The last line contains the album summary.
p, albumGain, _, err := parseMP3GainLine(lns[len(lns)-1])
if err != nil {
return nil, fmt.Errorf("failed parsing %q", lns[len(lns)-1])
}
if p != `"Album"` {
return nil, fmt.Errorf(`expected "Album" for summary %q`, lns[len(lns)-1])
}
// Skip the header and the album summary.
m := make(map[string]Info)
for _, ln := range lns[1 : len(lns)-1] {
p, gain, peakAmp, err := parseMP3GainLine(ln)
if err != nil {
return nil, fmt.Errorf("failed parsing %q", ln)
}
m[p] = Info{TrackGain: gain, AlbumGain: albumGain, PeakAmp: peakAmp}
}
return m, nil
}
// parseMP3GainLine parses an individual line of output for parseMP3GainOutput.
func parseMP3GainLine(ln string) (path string, gain, peakAmp float64, err error) {
fields := strings.Split(ln, "\t")
if len(fields) != 6 {
return "", 0, 0, fmt.Errorf("got %d field(s); want 6", len(fields))
}
// Fields are path, MP3 gain, dB gain, max amplitude, max global_gain, min global_gain.
if gain, err = strconv.ParseFloat(fields[2], 64); err != nil {
return "", 0, 0, err
}
if peakAmp, err = strconv.ParseFloat(fields[3], 64); err != nil {
return "", 0, 0, err
}
peakAmp /= 32767 // output seems to be based on 16-bit samples
peakAmp = math.Round(peakAmp*100000) / 100000
return fields[0], gain, peakAmp, nil
}
// infoForTest contains hardcoded gain information to return.
var infoForTest *Info
// SetInfoForTest sets a hardcoded Info object to use instead of
// actually running the mp3gain program.
func SetInfoForTest(info *Info) {
infoForTest = info
} | cmd/nup/mp3gain/gain.go | 0.774924 | 0.47658 | gain.go | starcoder |
package common
import "math"
type Matrix4 struct {
elements [16]float32 // COLUMN-MAJOR (just like WebGL)
}
func NewMatrix4() *Matrix4 {
matrix := Matrix4{elements: [16]float32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}} // identity matrix
return &matrix
}
func (self *Matrix4) GetElements() *[16]float32 {
return &self.elements // reference
}
// ----------------------------------------------------------------------------
// Setting element values
// ----------------------------------------------------------------------------
func (self *Matrix4) Set(
v00 float32, v01 float32, v02 float32, v03 float32,
v10 float32, v11 float32, v12 float32, v13 float32,
v20 float32, v21 float32, v22 float32, v23 float32,
v30 float32, v31 float32, v32 float32, v33 float32) *Matrix4 {
self.elements = [16]float32{ // COLUMN-MAJOR (just like WebGL)
v00, v10, v20, v30,
v01, v11, v21, v31,
v02, v12, v22, v32,
v03, v13, v23, v33}
return self
}
func (self *Matrix4) SetIdentity() *Matrix4 {
self.Set(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)
return self
}
func (self *Matrix4) SetCopy(m *Matrix4) *Matrix4 {
self.elements = m.elements
return self
}
func (self *Matrix4) SetTranspose() *Matrix4 {
e := &self.elements // reference
e[1], e[4] = e[4], e[1]
e[2], e[8] = e[8], e[2] // [0], [4], [ 8], [12]
e[3], e[12] = e[12], e[3] // [1], [5], [ 9], [13]
e[6], e[9] = e[9], e[6] // [2], [6], [10], [14]
e[7], e[13] = e[13], e[7] // [3], [7], [11], [15]
e[11], e[14] = e[14], e[11]
return self
}
func (self *Matrix4) SetTranslation(tx float32, ty float32, tz float32) *Matrix4 {
self.Set(
1.0, 0.0, 0.0, tx,
0.0, 1.0, 0.0, ty,
0.0, 0.0, 1.0, tz,
0.0, 0.0, 0.0, 1.0)
return self
}
func (self *Matrix4) SetScaling(sx float32, sy float32, sz float32) *Matrix4 {
self.Set(
sx, 0.0, 0.0, 0,
0.0, sy, 0.0, 0,
0.0, 0.0, sz, 0,
0.0, 0.0, 0.0, 1.0)
return self
}
func (self *Matrix4) normalize_vector(v [3]float32) [3]float32 {
len := float32(math.Sqrt(float64(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])))
return [3]float32{v[0] / len, v[1] / len, v[2] / len}
}
func (self *Matrix4) SetRotationByAxis(axis [3]float32, angle_in_degree float32) *Matrix4 {
axis = self.normalize_vector(axis)
// Based on http://www.gamedev.net/reference/articles/article1199.asp
c := float32(math.Cos(float64(angle_in_degree) * (math.Pi / 180.0)))
s := float32(math.Sin(float64(angle_in_degree) * (math.Pi / 180.0)))
t := 1 - c
x, y, z := axis[0], axis[1], axis[2]
tx, ty := t*x, t*y
self.Set(
tx*x+c, tx*y-s*z, tx*z+s*y, 0,
tx*y+s*z, ty*y+c, ty*z-s*x, 0,
tx*z-s*y, ty*z+s*x, t*z*z+c, 0,
0, 0, 0, 1)
return self
}
func (self *Matrix4) SetMultiplyMatrices(matrices ...*Matrix4) *Matrix4 {
if len(matrices) > 0 {
m := matrices[0] // multiply all the matrices first,
for i := 1; i < len(matrices); i++ {
m = m.MultiplyToTheRight(matrices[i])
}
self.SetCopy(m) // and then copy (overwriting old values)
}
return self
}
// ----------------------------------------------------------------------------
// Creating new matrix
// ----------------------------------------------------------------------------
func (self *Matrix4) Copy() *Matrix4 {
return &Matrix4{elements: self.elements}
}
func (self *Matrix4) Transpose() *Matrix4 {
o := &self.elements // reference
return &Matrix4{elements: [16]float32{
o[0], o[1], o[2], o[3],
o[4], o[5], o[6], o[7],
o[8], o[9], o[10], o[11],
o[12], o[13], o[14], o[15]}}
}
func (self *Matrix4) MultiplyToTheLeft(matrix *Matrix4) *Matrix4 {
o := &self.elements // reference (M*O)T = OT * MT
m := &matrix.elements // reference
return &Matrix4{elements: [16]float32{
o[0]*m[0] + o[1]*m[4] + o[2]*m[8] + o[3]*m[12], // [0], [4], [8], [12]
o[0]*m[1] + o[1]*m[5] + o[2]*m[9] + o[3]*m[13], // [1], [5], [9], [13]
o[0]*m[2] + o[1]*m[6] + o[2]*m[10] + o[3]*m[14], // [2], [6], [10], [14]
o[0]*m[3] + o[1]*m[7] + o[2]*m[11] + o[3]*m[15], // [3], [7], [11], [15]
o[4]*m[0] + o[5]*m[4] + o[6]*m[8] + o[7]*m[12], // 2nd row
o[4]*m[1] + o[5]*m[5] + o[6]*m[9] + o[7]*m[13],
o[4]*m[2] + o[5]*m[6] + o[6]*m[10] + o[7]*m[14],
o[4]*m[3] + o[5]*m[7] + o[6]*m[11] + o[7]*m[15],
o[8]*m[0] + o[9]*m[4] + o[10]*m[8] + o[11]*m[12], // 3rd
o[8]*m[1] + o[9]*m[5] + o[10]*m[9] + o[11]*m[13],
o[8]*m[2] + o[9]*m[6] + o[10]*m[10] + o[11]*m[14],
o[8]*m[3] + o[9]*m[7] + o[10]*m[11] + o[11]*m[15],
o[12]*m[0] + o[13]*m[4] + o[14]*m[8] + o[15]*m[12], // 4th
o[12]*m[1] + o[13]*m[5] + o[14]*m[9] + o[15]*m[13],
o[12]*m[2] + o[13]*m[6] + o[14]*m[10] + o[15]*m[14],
o[12]*m[3] + o[13]*m[7] + o[14]*m[11] + o[15]*m[15]}}
}
func (self *Matrix4) MultiplyToTheRight(matrix *Matrix4) *Matrix4 {
o := &self.elements // reference (O*M)T = MT * OT
m := &matrix.elements // reference
return &Matrix4{elements: [16]float32{
m[0]*o[0] + m[1]*o[4] + m[2]*o[8] + m[3]*o[12], // [0], [4], [8], [12]
m[0]*o[1] + m[1]*o[5] + m[2]*o[9] + m[3]*o[13], // [1], [5], [9], [13]
m[0]*o[2] + m[1]*o[6] + m[2]*o[10] + m[3]*o[14], // [2], [6], [10], [14]
m[0]*o[3] + m[1]*o[7] + m[2]*o[11] + m[3]*o[15], // [3], [7], [11], [15]
m[4]*o[0] + m[5]*o[4] + m[6]*o[8] + m[7]*o[12], // 2nd row
m[4]*o[1] + m[5]*o[5] + m[6]*o[9] + m[7]*o[13],
m[4]*o[2] + m[5]*o[6] + m[6]*o[10] + m[7]*o[14],
m[4]*o[3] + m[5]*o[7] + m[6]*o[11] + m[7]*o[15],
m[8]*o[0] + m[9]*o[4] + m[10]*o[8] + m[11]*o[12], // 3rd
m[8]*o[1] + m[9]*o[5] + m[10]*o[9] + m[11]*o[13],
m[8]*o[2] + m[9]*o[6] + m[10]*o[10] + m[11]*o[14],
m[8]*o[3] + m[9]*o[7] + m[10]*o[11] + m[11]*o[15],
m[12]*o[0] + m[13]*o[4] + m[14]*o[8] + m[15]*o[12], // 4th
m[12]*o[1] + m[13]*o[5] + m[14]*o[9] + m[15]*o[13],
m[12]*o[2] + m[13]*o[6] + m[14]*o[10] + m[15]*o[14],
m[12]*o[3] + m[13]*o[7] + m[14]*o[11] + m[15]*o[15]}}
}
// ----------------------------------------------------------------------------
// Handling Vector
// ----------------------------------------------------------------------------
func (self *Matrix4) MultiplyVector3(v [3]float32) [3]float32 {
e := &self.elements // reference
return [3]float32{
e[0]*v[0] + e[4]*v[1] + e[8]*v[2] + e[12], // COLUMN-MAJOR
e[1]*v[0] + e[5]*v[1] + e[9]*v[2] + e[13],
e[2]*v[0] + e[6]*v[1] + e[10]*v[2] + e[14]}
} | common/matrix4.go | 0.76207 | 0.660008 | matrix4.go | starcoder |
package google_type
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Represents an amount of money with its currency type.
type Money struct {
// The 3-letter currency code defined in ISO 4217.
CurrencyCode string `protobuf:"bytes,1,opt,name=currency_code,json=currencyCode,proto3" json:"currency_code,omitempty"`
// The whole units of the amount.
// For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
Units int64 `protobuf:"varint,2,opt,name=units,proto3" json:"units,omitempty"`
// Number of nano (10^-9) units of the amount.
// The value must be between -999,999,999 and +999,999,999 inclusive.
// If `units` is positive, `nanos` must be positive or zero.
// If `units` is zero, `nanos` can be positive, zero, or negative.
// If `units` is negative, `nanos` must be negative or zero.
// For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
Nanos int32 `protobuf:"varint,3,opt,name=nanos,proto3" json:"nanos,omitempty"`
}
func (m *Money) Reset() { *m = Money{} }
func (m *Money) String() string { return proto.CompactTextString(m) }
func (*Money) ProtoMessage() {}
func (*Money) Descriptor() ([]byte, []int) { return fileDescriptorMoney, []int{0} }
func init() {
proto.RegisterType((*Money)(nil), "google.type.Money")
}
var fileDescriptorMoney = []byte{
// 148 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0xcf, 0xcd, 0xcf, 0x4b, 0xad, 0xd4, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0x48, 0xe8, 0x81, 0x24, 0x94, 0x22, 0xb8, 0x58, 0x7d, 0x41,
0x72, 0x42, 0xca, 0x5c, 0xbc, 0xc9, 0xa5, 0x45, 0x45, 0xa9, 0x79, 0xc9, 0x95, 0xf1, 0xc9, 0xf9,
0x29, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x3c, 0x30, 0x41, 0x67, 0xa0, 0x98, 0x90,
0x08, 0x17, 0x6b, 0x69, 0x5e, 0x66, 0x49, 0xb1, 0x04, 0x13, 0x50, 0x92, 0x39, 0x08, 0xc2, 0x01,
0x89, 0xe6, 0x25, 0xe6, 0xe5, 0x17, 0x4b, 0x30, 0x03, 0x45, 0x59, 0x83, 0x20, 0x1c, 0x27, 0x79,
0x2e, 0xfe, 0xe4, 0xfc, 0x5c, 0x3d, 0x24, 0xcb, 0x9c, 0xb8, 0xc0, 0x56, 0x05, 0x80, 0x5c, 0x11,
0xc0, 0x98, 0xc4, 0x06, 0x76, 0x8e, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x72, 0xfb, 0x57, 0xc6,
0xa9, 0x00, 0x00, 0x00,
} | vendor/go.pedge.io/pb/gogo/google/type/money.pb.go | 0.564579 | 0.400398 | money.pb.go | starcoder |
package UpperBoundConfidence
import "math"
func UpperBoundConfidence(dataSet [][]float64) (float64, []float64, []float64, []int) {
// full dataset
N := dataSet
// list of objects
d := dataSet[0]
// list of positives on each round
var selected []int
// Creates Vector of selections set to 0
numberOfSelections := initializeNilIntSlice(len(d))
// Number of times each item was positive
sumOfRewards := initializeNilIntSlice(len(d))
// Total times it was correct
totalReward := float64(0)
// Ranges over datset
for n := 0; n < len(N); n++ {
x := 0
maxUpperBound := float64(0)
// Ranges over items
for j := 0; j < len(d); j++ {
upperBound := float64(0)
if numberOfSelections[j] > 0 {
averageReward := sumOfRewards[j] / numberOfSelections[j]
// This is a constant -- DO NOT CHANGE --
deltaI := math.Sqrt(3 / 2 * math.Log(float64(n+1)) / float64(numberOfSelections[j]))
upperBound = float64(averageReward) + deltaI
} else {
/*
First iteration upper bound equal to 1^400,
Very large because it needs to evaluate to true on the first
iteration
*/
upperBound = math.Pow(10, 400)
}
if upperBound > maxUpperBound {
maxUpperBound = upperBound
x = j
}
}
// Adds the item that was chosen to the list
numberOfSelections[x] = numberOfSelections[x] + 1
// Adds overall count of the item chosen to keep track
selected = append(selected, x)
// get value of reward, n = {row}, item = {item selected}
reward := dataSet[n][x]
// Increment the times the item has been selected by 1 {the reward}
sumOfRewards[x] = sumOfRewards[x] + float64(reward)
// The amount of times the model was correct
totalReward = totalReward + reward
}
return totalReward, sumOfRewards, numberOfSelections, selected
}
// Creates a list of float64(0) of N size
func initializeNilIntSlice(n int) []float64 {
var negativeList []float64
for i := 0; i < n; i++ {
negativeList = append(negativeList, 0)
}
return negativeList
}
// Time Complexity O(n^2) | Go/UpperBoundConfidence.go | 0.726037 | 0.403214 | UpperBoundConfidence.go | starcoder |
package conf
// Int8Var defines an int8 flag and environment variable with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) Int8Var(p *int8, name string, value int8, usage string) {
c.env().Int8Var(p, name, value, usage)
c.flag().Int8Var(p, name, value, usage)
}
// Int8 defines an int8 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag and/or environment variable.
func (c *Configurator) Int8(name string, value int8, usage string) *int8 {
p := new(int8)
c.Int8Var(p, name, value, usage)
return p
}
// Int8VarE defines an int8 environment variable with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the environment variable.
func (c *Configurator) Int8VarE(p *int8, name string, value int8, usage string) {
c.env().Int8Var(p, name, value, usage)
}
// Int8E defines an int8 environment variable with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the environment variable.
func (c *Configurator) Int8E(name string, value int8, usage string) *int8 {
p := new(int8)
c.Int8VarE(p, name, value, usage)
return p
}
// Int8VarF defines an int8 flag with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag.
func (c *Configurator) Int8VarF(p *int8, name string, value int8, usage string) {
c.flag().Int8Var(p, name, value, usage)
}
// Int8F defines an int8 flag with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag.
func (c *Configurator) Int8F(name string, value int8, usage string) *int8 {
p := new(int8)
c.Int8VarF(p, name, value, usage)
return p
}
// Int8Var defines an int8 flag and environment variable with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag and/or environment variable.
func Int8Var(p *int8, name string, value int8, usage string) {
Global.Int8Var(p, name, value, usage)
}
// Int8 defines an int8 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag and/or environment variable.
func Int8(name string, value int8, usage string) *int8 {
return Global.Int8(name, value, usage)
}
// Int8VarE defines an int8 environment variable with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the environment variable.
func Int8VarE(p *int8, name string, value int8, usage string) {
Global.Int8VarE(p, name, value, usage)
}
// Int8E defines an int8 environment variable with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the environment variable.
func Int8E(name string, value int8, usage string) *int8 {
return Global.Int8E(name, value, usage)
}
// Int8VarF defines an int8 flag with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag.
func Int8VarF(p *int8, name string, value int8, usage string) {
Global.Int8VarF(p, name, value, usage)
}
// Int8F defines an int8 flag with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag.
func Int8F(name string, value int8, usage string) *int8 {
return Global.Int8F(name, value, usage)
} | value_int8.go | 0.737631 | 0.418935 | value_int8.go | starcoder |
package main
import (
"fmt"
"image/color"
"log"
"math/rand"
"time"
"github.com/hajimehoshi/ebiten"
"github.com/hajimehoshi/ebiten/ebitenutil"
)
const RES int = 400
type Game struct {
generation int
board [][]int
}
var (
g *Game
)
// A board with empty state
func emptyGeneration() *Game {
board := make([][]int, RES)
for i := 0; i < RES; i++ {
board[i] = make([]int, RES)
}
return &Game{board: board, generation: 1}
}
// Given an empty board, give it a random state
func giveState(g *Game) {
rand.Seed(time.Now().UnixNano())
for x := 0; x < RES; x++ {
for y := 0; y < RES; y++ {
if rand.Intn(15) == 1 {
g.board[x][y] = 1
}
}
}
}
// Apply the rules to a game's generation
// It returns the next generation
func logic(g *Game) *Game {
n := emptyGeneration() // Next generation
n.generation = g.generation + 1
for x := 0; x < RES; x++ {
for y := 0; y < RES; y++ {
neighbors := checkNeighbors(x, y, g)
live := g.board[x][y] == 1
// Any live cell with fewer than two live neighbors dies, as if by underpopulation
if live && neighbors < 2 {
n.board[x][y] = 0
}
// Any live cell with two or three live neighbors lives on to the next generation
if live && (neighbors == 2 || neighbors == 3) {
n.board[x][y] = 1
}
// Any live cell with more than three live neighbors dies, as if by overpopulation
if live && neighbors > 3 {
n.board[x][y] = 0
}
// Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction
if !live && neighbors == 3 {
n.board[x][y] = 1
}
}
}
return n
}
// Given a position and a game
// Get the number of live neighbors at that position
func checkNeighbors(x int, y int, g *Game) int {
neighbors := 0
if y+1 < RES && g.board[x][y+1] == 1 { // top
neighbors += 1
}
if y+1 < RES && x+1 < RES && g.board[x+1][y+1] == 1 { // top right
neighbors += 1
}
if x+1 < RES && g.board[x+1][y] == 1 { // right
neighbors += 1
}
if x+1 < RES && y-1 > 0 && g.board[x+1][y-1] == 1 { // bottom right
neighbors += 1
}
if y-1 > 0 && g.board[x][y-1] == 1 { // bottom
neighbors += 1
}
if x-1 > 0 && y-1 > 0 && g.board[x-1][y-1] == 1 { // bottom left
neighbors += 1
}
if x-1 > 0 && g.board[x-1][y] == 1 { // left
neighbors += 1
}
if x-1 > 0 && y+1 < RES && g.board[x-1][y+1] == 1 { // top left
neighbors += 1
}
return neighbors
}
// Draw the game onto a black background
func draw(g *Game, background *ebiten.Image) {
for x := 0; x < RES; x++ {
for y := 0; y < RES; y++ {
if g.board[x][y] == 1 {
ebitenutil.DrawRect(background, float64(x), float64(y), 1, 1, color.White)
}
}
}
}
// Place live cells around a point
func interaction(x int, y int, g *Game) *Game {
x = clamp(x, 0, RES-1)
y = clamp(y, 0, RES-1)
topX, topY := x, clamp(y+1, 0, RES-1)
leftX, leftY := clamp(x-1, 0, RES-1), y
botX, botY := x, clamp(y-1, 0, RES-1)
rightX, rightY := clamp(x+1, 0, RES-1), y
g.board[x][y] = 1
g.board[topX][topY] = 1
g.board[leftX][leftY] = 1
g.board[botX][botY] = 1
g.board[rightX][rightY] = 1
return g
}
func update(screen *ebiten.Image) error {
if ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) {
x, y := ebiten.CursorPosition()
interaction(x, y, g)
}
if ebiten.IsDrawingSkipped() {
return nil
}
screen.Fill(color.RGBA{0, 0, 0, 0xff})
background, _ := ebiten.NewImage(RES, RES, ebiten.FilterDefault)
g = logic(g)
draw(g, background)
screen.DrawImage(background, &ebiten.DrawImageOptions{})
ebitenutil.DebugPrint(screen, fmt.Sprintf("Generation: %v", g.generation))
return nil
}
func main() {
g = emptyGeneration()
giveState(g)
if err := ebiten.Run(update, RES, RES, 2, "Conway's Game of Life"); err != nil {
log.Fatal(err)
}
}
func clamp(x int, min int, max int) int {
if x < min {
return min
} else if x > max {
return max
}
return x
} | main.go | 0.626924 | 0.404949 | main.go | starcoder |
package p289
/**
According to the Wikipedia's article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician <NAME> in 1970."
Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):
Any live cell with fewer than two live neighbors dies, as if caused by under-population.
Any live cell with two or three live neighbors lives on to the next generation.
Any live cell with more than three live neighbors dies, as if by over-population..
Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
Write a function to compute the next state (after one update) of the board given its current state.
Follow up:
Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells.
In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems?
*/
/**
[2nd bit, 1st bit] = [next state, current state]
- 00 dead (next) <- dead (current)
- 01 dead (next) <- live (current)
- 10 live (next) <- dead (current)
- 11 live (next) <- live (current)
To get the current state, simply do
board[i][j] & 1
To get the next state, simply do
board[i][j] >> 1
*/
func gameOfLife(board [][]int) {
if board == nil || len(board) == 0 {
return
}
m := len(board)
n := len(board[0])
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
lives := liveNeighbors(board, m, n, i, j)
if board[i][j] == 1 && lives >= 2 && lives <= 3 {
board[i][j] = 3
}
if board[i][j] == 0 && lives == 3 {
board[i][j] = 2
}
}
}
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
board[i][j] >>= 1
}
}
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func liveNeighbors(board [][]int, m, n, i, j int) int {
lives := 0
for x := max(i-1, 0); x <= min(i+1, m-1); x++ {
for y := max(j-1, 0); y <= min(j+1, n-1); y++ {
lives += board[x][y] & 1
}
}
lives -= board[i][j] & 1
return lives
} | algorithms/p289/289.go | 0.772959 | 0.717259 | 289.go | starcoder |
package uniline
import "github.com/shinichy/go-wcwidth"
// char represents a character in the terminal screen
// Its size is defined as follows:
// - 1 rune
// - len(char.b) bytes
// - char.colLen terminal columns
type char struct {
p []byte
r rune
colLen int
}
func charFromRune(r rune) char {
return char{[]byte(string(r)), r, wcwidth.WcwidthUcs(r)}
}
func (c char) Clone() char {
b := make([]byte, len(c.p))
copy(b, c.p)
c.p = b
return c
}
// text represents a sequence of characters
// Its size is defined as follows:
// - len(text.chars) runes
// - len(text.bytes) bytes
// - text.colLen terminal columns
type text struct {
chars []char
bytes []byte
colLen int
}
func textFromString(s string) text {
t := text{chars: make([]char, 0, len(s)), bytes: make([]byte, 0, len(s))}
for _, r := range s {
c := charFromRune(r)
t.chars = append(t.chars, c)
t.bytes = append(t.bytes, c.p...)
t.colLen += c.colLen
}
return t
}
func (t text) AppendChar(c char) text {
return text{append(t.chars, c), append(t.bytes, c.p...), t.colLen + c.colLen}
}
func (t text) AppendText(n text) text {
return text{append(t.chars, n.chars...), append(t.bytes, n.bytes...), t.colLen + n.colLen}
}
func (t text) InsertCharAt(pos position, c char) text {
chars := make([]char, len(t.chars)+1)
copy(chars, t.chars[:pos.runes])
chars[pos.runes] = c
copy(chars[pos.runes+1:], t.chars[pos.runes:])
bytes := make([]byte, len(t.bytes)+len(c.p))
copy(bytes, t.bytes[:pos.bytes])
copy(bytes[pos.bytes:], c.p)
copy(bytes[pos.bytes+len(c.p):], t.bytes[pos.bytes:])
return text{chars, bytes, t.colLen + c.colLen}
}
func (t text) InsertTextAt(pos position, n text) text {
chars := make([]char, len(t.chars)+len(n.chars))
copy(chars, t.chars[:pos.runes])
copy(chars[pos.runes:], n.chars)
copy(chars[pos.runes+len(n.chars):], t.chars[pos.runes:])
bytes := make([]byte, len(t.bytes)+len(n.bytes))
copy(bytes, t.bytes[:pos.bytes])
copy(bytes[pos.bytes+len(n.bytes):], t.bytes[pos.bytes:])
copy(bytes[pos.bytes:], n.bytes)
return text{chars, bytes, t.colLen + n.colLen}
}
func (t text) RemoveCharAt(pos position) text {
c := t.chars[pos.runes]
t.bytes = append(t.bytes[:pos.bytes], t.bytes[pos.bytes+len(c.p):]...)
t.chars = append(t.chars[:pos.runes], t.chars[pos.runes+1:]...)
t.colLen -= c.colLen
return t
}
func (t text) Slice(segment ...position) text {
switch len(segment) {
case 1:
t.chars = t.chars[segment[0].runes:]
t.bytes = t.bytes[segment[0].bytes:]
t.colLen -= segment[0].columns
case 2:
t.chars = t.chars[segment[0].runes:segment[1].runes]
t.bytes = t.bytes[segment[0].bytes:segment[1].bytes]
t.colLen = segment[1].columns - segment[0].columns
default:
panic("Slice expects 1 or 2 position arguments")
}
return t
}
func (t text) Clone() text {
chars := make([]char, len(t.chars))
for i, c := range t.chars {
chars[i] = c.Clone()
}
t.chars = chars
b := make([]byte, len(t.bytes))
copy(b, t.bytes)
t.bytes = b
return t
}
func (t text) String() string {
return string(t.bytes)
}
type position struct {
bytes int
runes int
columns int
}
func (pos position) Add(chars ...char) position {
for _, c := range chars {
pos.runes++
pos.bytes += len(c.p)
pos.columns += c.colLen
}
return pos
}
func (pos position) Subtract(chars ...char) position {
for _, c := range chars {
pos.runes--
pos.bytes -= len(c.p)
pos.columns -= c.colLen
}
return pos
} | vendor/github.com/tiborvass/uniline/utils.go | 0.593727 | 0.493958 | utils.go | starcoder |
package main
import (
"image"
"image/color"
"image/gif"
)
func contain(colors color.Palette, c color.RGBA) (bool, uint8) {
for idx, clr := range colors {
if clr == c {
return true, uint8(idx)
}
}
return false, 0
}
func extractPalette(img image.Image) color.Palette {
palette := color.Palette{color.Transparent}
for y := 0; y < img.Bounds().Max.Y; y++ {
for x := 0; x < img.Bounds().Max.X; x++ {
// Find the pixel color
r, g, b, a := img.At(x, y).RGBA()
c := color.RGBA{uint8(r), uint8(g), uint8(b), uint8(a)}
// Add the pixel color if needed in our palette color
if ok, _ := contain(palette, c); len(palette) == 0 || !ok {
palette = append(palette, c)
}
}
}
return palette
}
func copyImageIntoPaletted(img image.Image, frame *image.Paletted) {
model := frame.ColorModel()
for y := 0; y < img.Bounds().Max.Y; y++ {
for x := 0; x < img.Bounds().Max.X; x++ {
idxColor := uint8(frame.Palette.Index(model.Convert(img.At(x, y))))
frame.SetColorIndex(x, y, idxColor)
}
}
}
func initCells(frame *image.Paletted) []image.Point {
whiteIdx := uint8(frame.Palette.Index(color.RGBA{255, 255, 255, 255}))
blackIdx := uint8(frame.Palette.Index(color.RGBA{0, 0, 0, 255}))
cells := make([]image.Point, 0)
for y := 0; y < frame.Bounds().Max.Y; y++ {
for x := 0; x < frame.Bounds().Max.X; x++ {
pixel := frame.ColorIndexAt(x, y)
if pixel != whiteIdx && pixel != blackIdx {
cells = append(cells, image.Pt(x, y))
}
}
}
return cells
}
func addFrame(g *gif.GIF, f *image.Paletted, d int) {
g.Image = append(g.Image, f)
g.Delay = append(g.Delay, d)
}
func generateFloodfillGif(img image.Image, delay int) *gif.GIF {
// Find image colors to make our color palette
palette := extractPalette(img)
// Initialize the first frame of the gif
frame := image.NewPaletted(img.Bounds(), palette)
copyImageIntoPaletted(img, frame)
// Initialize pixels we need to proceed at first step
pixels := initCells(frame)
// Initialize our output gif
out := &gif.GIF{
Image: make([]*image.Paletted, 0, 1),
Delay: make([]int, 0, 1),
}
addFrame(out, frame, delay)
// Floodfill loop
for len(pixels) != 0 {
// Create the next frame
nextFrame := image.NewPaletted(frame.Bounds(), palette)
copy(nextFrame.Pix, frame.Pix)
// Slice of next pixel position we will need to proceed next step
nextPixels := make([]image.Point, 0)
for _, pix := range pixels {
// Color idx in the palette of the pixel we proceed
colorIdx := uint8(nextFrame.Palette.Index(nextFrame.At(pix.X, pix.Y)))
// Pixel above
if 0 < pix.Y {
above := nextFrame.At(pix.X, pix.Y-1)
// If the above pixel is white, we color it
if (above == color.RGBA{255, 255, 255, 255}) {
// Change the color
nextFrame.SetColorIndex(pix.X, pix.Y-1, colorIdx)
// Add to the proceed list for next step
nextPixels = append(nextPixels, image.Point{pix.X, pix.Y - 1})
}
}
// Pixel on right
if pix.X < nextFrame.Bounds().Max.X-1 {
right := nextFrame.At(pix.X+1, pix.Y)
// If the right pixel is white, we color it
if (right == color.RGBA{255, 255, 255, 255}) {
// Change the color
nextFrame.SetColorIndex(pix.X+1, pix.Y, colorIdx)
// Add to the proceed list for next step
nextPixels = append(nextPixels, image.Point{pix.X + 1, pix.Y})
}
}
// Pixel bellow
if pix.Y < nextFrame.Bounds().Max.Y-1 {
bellow := nextFrame.At(pix.X, pix.Y+1)
// If the pixel bellow is white, we colore it
if (bellow == color.RGBA{255, 255, 255, 255}) {
// Change the color
nextFrame.SetColorIndex(pix.X, pix.Y+1, colorIdx)
// Add to the proceed list for next step
nextPixels = append(nextPixels, image.Point{pix.X, pix.Y + 1})
}
}
// Pixel on left
if 0 < pix.X {
left := nextFrame.At(pix.X-1, pix.Y)
// If the pixel on left is white, we color it
if (left == color.RGBA{255, 255, 255, 255}) {
// Change the color
nextFrame.SetColorIndex(pix.X-1, pix.Y, colorIdx)
// Add to the proceed list for next step
nextPixels = append(nextPixels, image.Point{pix.X - 1, pix.Y})
}
}
}
addFrame(out, frame, delay)
// Update the new proceed pixels
pixels = nextPixels
// Now the next frame is the current
frame = nextFrame
}
return out
} | generateFloorfillGif.go | 0.718989 | 0.450178 | generateFloorfillGif.go | starcoder |
package main
import (
"fmt"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"sort"
"time"
)
const outOfRange = 99999
const daysInLastSixMonths = 183
const weeksInLastSixMonths = 26
type column []int
// stats calculates and prints the stats.
func stats(repoDirectories []string, email string) {
commits := processRepositories(repoDirectories, email)
printCommitsStats(commits)
}
// getBeginningOfDay given a time.Time calculates the start time of that day
func getBeginningOfDay(t time.Time) time.Time {
year, month, day := t.Date()
startOfDay := time.Date(year, month, day, 0, 0, 0, 0, t.Location())
return startOfDay
}
// countDaysSinceDate counts how many days passed since the passed `date`
func countDaysSinceDate(date time.Time) int {
days := 0
now := getBeginningOfDay(time.Now())
for date.Before(now) {
date = date.Add(time.Hour * 24)
days++
if days > daysInLastSixMonths {
return outOfRange
}
}
return days
}
// fillCommits given a repository found in `path`, gets the commits and
// puts them in the `commits` map, returning it when completed
func fillCommits(email string, path string, commits map[int]int) map[int]int {
// instantiate a git repo object from path
repo, err := git.PlainOpen(path)
if err != nil {
panic(err)
}
// get the HEAD reference
ref, err := repo.Head()
if err != nil {
panic(err)
}
// get the commits history starting from HEAD
iterator, err := repo.Log(&git.LogOptions{From: ref.Hash()})
if err != nil {
panic(err)
}
// iterate the commits
offset := calcOffset()
err = iterator.ForEach(func(c *object.Commit) error {
daysAgo := countDaysSinceDate(c.Author.When) + offset
if c.Author.Email != email {
return nil
}
if daysAgo != outOfRange {
commits[daysAgo]++
}
return nil
})
if err != nil {
panic(err)
}
return commits
}
// processRepositories given an user email, returns the
// commits made in the last 6 months
func processRepositories(repoDirectories []string, email string) map[int]int {
daysInMap := daysInLastSixMonths
commits := make(map[int]int, daysInMap)
for i := daysInMap; i > 0; i-- {
commits[i] = 0
}
for _, path := range repoDirectories {
commits = fillCommits(email, path, commits)
}
return commits
}
// calcOffset determines and returns the amount of days missing to fill
// the last row of the stats graph
func calcOffset() int {
var offset int
weekday := time.Now().Weekday()
switch weekday {
case time.Sunday:
offset = 7
case time.Monday:
offset = 6
case time.Tuesday:
offset = 5
case time.Wednesday:
offset = 4
case time.Thursday:
offset = 3
case time.Friday:
offset = 2
case time.Saturday:
offset = 1
}
return offset
}
// printCell given a cell value prints it with a different format
// based on the value amount, and on the `today` flag.
func printCell(val int, today bool) {
escape := "\033[0;37;30m"
switch {
case val > 0 && val < 5:
escape = "\033[1;30;47m"
case val >= 5 && val < 10:
escape = "\033[1;30;43m"
case val >= 10:
escape = "\033[1;30;42m"
}
if today {
escape = "\033[1;37;45m"
}
if val == 0 {
fmt.Printf(escape + " - " + "\033[0m")
return
}
str := " %d "
switch {
case val >= 10:
str = " %d "
case val >= 100:
str = "%d "
}
fmt.Printf(escape+str+"\033[0m", val)
}
// printCommitsStats prints the commits stats
func printCommitsStats(commits map[int]int) {
keys := sortMapIntoSlice(commits)
cols := buildCols(keys, commits)
printCells(cols)
}
// sortMapIntoSlice returns a slice of indexes of a map, ordered
func sortMapIntoSlice(m map[int]int) []int {
// order map
// To store the keys in slice in sorted order
var keys []int
for k := range m {
keys = append(keys, k)
}
sort.Ints(keys)
return keys
}
// buildCols generates a map with rows and columns ready to be printed to screen
func buildCols(keys []int, commits map[int]int) map[int]column {
cols := make(map[int]column)
col := column{}
for _, k := range keys {
week := int(k / 7) //26,25...1
dayinweek := k % 7 // 0,1,2,3,4,5,6
if dayinweek == 0 { //reset
col = column{}
}
col = append(col, commits[k])
if dayinweek == 6 {
cols[week] = col
}
}
return cols
}
// printCells prints the cells of the graph
func printCells(cols map[int]column) {
printMonths()
for j := 6; j >= 0; j-- {
for i := weeksInLastSixMonths + 1; i >= 0; i-- {
if i == weeksInLastSixMonths+1 {
printDayCol(j)
}
if col, ok := cols[i]; ok {
//special case today
if i == 0 && j == calcOffset()-1 {
printCell(col[j], true)
continue
} else {
if len(col) > j {
printCell(col[j], false)
continue
}
}
}
printCell(0, false)
}
fmt.Printf("\n")
}
}
// printMonths prints the month names in the first line, determining when the month
// changed between switching weeks
func printMonths() {
week := getBeginningOfDay(time.Now()).Add(-(daysInLastSixMonths * time.Hour * 24))
month := week.Month()
fmt.Printf(" ")
for {
if week.Month() != month {
fmt.Printf("%s ", week.Month().String()[:3])
month = week.Month()
} else {
fmt.Printf(" ")
}
week = week.Add(7 * time.Hour * 24)
if week.After(time.Now()) {
break
}
}
fmt.Printf("\n")
}
// printDayCol given the day number (0 is Sunday) prints the day name,
// alternating the rows (prints just 2,4,6)
func printDayCol(day int) {
out := " "
switch day {
case 1:
out = " Mon "
case 3:
out = " Wed "
case 5:
out = " Fri "
}
fmt.Printf(out)
} | cmd/git-process.go | 0.585812 | 0.427277 | git-process.go | starcoder |
package dynamics
import (
"math"
"github.com/gonum/matrix/mat64"
)
const (
oneQuarter = 1 / 4.0
)
var (
eye = mat64.NewDense(3, 3, []float64{1, 0, 0, 0, 1, 0, 0, 0, 1})
)
/*-----*/
/* Modified Rodrigez Parameters */
/*-----*/
// MRP defines Modified Rodrigez Parameters.
type MRP struct {
s1, s2, s3 float64
}
// Equals returns true if both MRPs correspond to the same attitude.
func (s *MRP) Equals(o *MRP) bool {
const (
relError = 1e-12
)
if math.Abs(s.s1-o.s1) < relError &&
math.Abs(s.s2-o.s2) < relError &&
math.Abs(s.s3-o.s3) < relError {
return true
}
oc := MRP{o.s1, o.s2, o.s3}
oc.Short() // Create a local short copy.
if math.Abs(s.s1-oc.s1) < relError &&
math.Abs(s.s2-oc.s2) < relError &&
math.Abs(s.s3-oc.s3) < relError {
return true
}
return false
}
func (s *MRP) squared() float64 {
afl := s.floatArray()
return dot(afl, afl)
}
func (s *MRP) norm() float64 {
return norm(s.floatArray())
}
func (s *MRP) floatArray() []float64 {
return []float64{s.s1, s.s2, s.s3}
}
// Short refreshes this MRP representation to use its short notation.
func (s *MRP) Short() {
if s.norm() > 1 {
sq := s.squared()
// Switch to shadow set.
s.s1 = -s.s1 / sq
s.s2 = -s.s2 / sq
s.s3 = -s.s3 / sq
}
}
// Tilde returns the tilde matrix of this MRP.
// The m parameter allows to multiply directly the Tilde matrix.
func (s *MRP) Tilde(m float64) *mat64.Dense {
return mat64.NewDense(3, 3, []float64{0, -s.s3 * m, s.s2 * m,
s.s3 * m, 0, -s.s1 * m,
-s.s2 * m, s.s1 * m, 0})
}
// OuterProduct returns the outer product of this MRP with itself.
// The m parameter allows to multiply directly the outer product with a scalar.
func (s *MRP) OuterProduct(m float64) *mat64.Dense {
return mat64.NewDense(3, 3, []float64{
m * s.s1 * s.s1, m * s.s1 * s.s2, m * s.s1 * s.s3,
m * s.s2 * s.s1, m * s.s2 * s.s2, m * s.s2 * s.s3,
m * s.s3 * s.s1, m * s.s3 * s.s2, m * s.s3 * s.s3,
})
}
// B returns the B matrix for MRP computations.
func (s *MRP) B() *mat64.Dense {
B := mat64.NewDense(3, 3, nil)
e1 := mat64.NewDense(3, 3, []float64{1 - s.squared(), 0, 0,
0, 1 - s.squared(), 0,
0, 0, 1 - s.squared()})
B.Add(e1, s.Tilde(2))
B.Add(B, s.OuterProduct(2))
return B
}
// Attitude defines an attitude with an orientation, an angular velocity and an inertial tensor.
// *ALMOST* implements rk4.Integrable.
type Attitude struct {
Attitude *MRP
Velocity *mat64.Vector
InertiaTensor *mat64.Dense
initAngMom float64 // Initial angular moment (integrator failsafe)
mf1, mf2, mf3 float64 // Inertial tensor ratios
tolerance float64 // Tolerance of integration (error cannot breach this).
}
// NewAttitude returns an Attitude pointer.
func NewAttitude(sigma, omega [3]float64, tensor []float64) *Attitude {
a := Attitude{}
a.Attitude = &MRP{sigma[0], sigma[1], sigma[2]}
a.Velocity = mat64.NewVector(3, []float64{omega[0], omega[1], omega[2]})
a.InertiaTensor = mat64.NewDense(3, 3, tensor)
a.mf1 = (a.InertiaTensor.At(1, 1) - a.InertiaTensor.At(2, 2)) / a.InertiaTensor.At(0, 0)
a.mf2 = (a.InertiaTensor.At(2, 2) - a.InertiaTensor.At(0, 0)) / a.InertiaTensor.At(1, 1)
a.mf3 = (a.InertiaTensor.At(0, 0) - a.InertiaTensor.At(1, 1)) / a.InertiaTensor.At(2, 2)
return &a
}
// Momentum returns the angular moment of this body.
func (a *Attitude) Momentum() float64 {
mom := mat64.Dense{}
mom.Mul(a.InertiaTensor, a.Velocity)
return mat64.Norm(&mom, 2)
}
// GetState returns the state of this attitude for the EOM as defined below.
func (a *Attitude) GetState() []float64 {
return []float64{a.Attitude.s1, a.Attitude.s2, a.Attitude.s3, a.Velocity.At(0, 0), a.Velocity.At(1, 0), a.Velocity.At(2, 0)}
}
// SetState sets the state of this attitude for the EOM as defined below.
func (a *Attitude) SetState(t float64, s []float64) {
a.Attitude.s1 = s[0]
a.Attitude.s2 = s[1]
a.Attitude.s3 = s[2]
a.Velocity.SetVec(0, s[3])
a.Velocity.SetVec(1, s[4])
a.Velocity.SetVec(2, s[5])
}
// Func is the integrator function.
func (a *Attitude) Func(t float64, s []float64) []float64 {
sigma := MRP{s[0], s[1], s[2]}
sigmaDot := mat64.NewVector(3, nil)
omega := mat64.NewVector(3, []float64{s[3], s[4], s[5]})
sigmaDot.MulVec(sigma.B(), omega)
f := make([]float64, 6)
f[0] = oneQuarter * sigmaDot.At(0, 0)
f[1] = oneQuarter * sigmaDot.At(1, 0)
f[2] = oneQuarter * sigmaDot.At(2, 0)
f[3] = a.mf1 * omega.At(1, 0) * omega.At(2, 0)
f[4] = a.mf2 * omega.At(0, 0) * omega.At(2, 0)
f[5] = a.mf3 * omega.At(1, 0) * omega.At(0, 0)
return f
} | examples/angularMomentum/attitude.go | 0.818047 | 0.456894 | attitude.go | starcoder |
package day8
import (
"fmt"
"strconv"
"github.com/Marc3842h/Advent-of-Code-2019/inputs"
)
var imageWidth = 25
var imageHeight = 6
func PartA() {
input := inputs.ReadInputStr(8)
layers := make([]layer, 0)
counter := 0
layerCount := 0
for _, c := range input {
char := string(c)
pixel, _ := strconv.Atoi(char)
if counter >= imageWidth*imageHeight {
counter = 0
layerCount++
}
if !indexExists(layerCount, layers) {
layers = append(layers, layer{contents: make([]int, 0)})
}
layers[layerCount].contents = append(layers[layerCount].contents, pixel)
counter++
}
indexWithLeastZeros := 1000000000000
indexZeroAmount := 10000000000000
for index, layer := range layers {
zeros := 0
for _, pixel := range layer.contents {
if pixel == 0 {
zeros++
}
}
if indexZeroAmount > zeros {
indexZeroAmount = zeros
indexWithLeastZeros = index
}
}
layer := layers[indexWithLeastZeros]
ones := 0
twos := 0
for _, pixel := range layer.contents {
if pixel == 1 {
ones++
} else if pixel == 2 {
twos++
}
}
fmt.Printf("Day8PartA: %d\n", ones*twos)
}
func PartB() {
input := inputs.ReadInputStr(8)
layers := make([]layer, 0)
counter := 0
layerCount := 0
for _, c := range input {
char := string(c)
pixel, _ := strconv.Atoi(char)
if counter >= imageWidth*imageHeight {
counter = 0
layerCount++
}
if !indexExists(layerCount, layers) {
layers = append(layers, layer{contents: make([]int, 0)})
}
layers[layerCount].contents = append(layers[layerCount].contents, pixel)
counter++
}
image := make([]int, imageWidth*imageHeight)
// Make the image transparent
for index := range image {
image[index] = 2
}
for _, layer := range layers {
for index, pixel := range layer.contents {
if pixel == 2 {
continue
}
if image[index] != 2 {
continue
}
image[index] = pixel
}
}
fmt.Printf("Day8PartB:\n")
pointer := 0
for i := 0; i < imageHeight; i++ {
slice := image[pointer : pointer+imageWidth]
for _, pixel := range slice {
if pixel == 0 {
fmt.Printf(" ")
continue
}
fmt.Printf("%d", pixel)
}
fmt.Printf("\n")
pointer += imageWidth
}
}
func indexExists(needleIndex int, haystack []layer) bool {
for a := range haystack {
if a == needleIndex {
return true
}
}
return false
}
type layer struct {
contents []int
} | day8/day8.go | 0.531453 | 0.420362 | day8.go | starcoder |
package cmp
import "reflect"
// valueNode represents a single node within a report, which is a
// structured representation of the value tree, containing information
// regarding which nodes are equal or not.
type valueNode struct {
parent *valueNode
Type reflect.Type
ValueX reflect.Value
ValueY reflect.Value
// NumSame is the number of leaf nodes that are equal.
// All descendants are equal only if NumDiff is 0.
NumSame int
// NumDiff is the number of leaf nodes that are not equal.
NumDiff int
// NumIgnored is the number of leaf nodes that are ignored.
NumIgnored int
// NumCompared is the number of leaf nodes that were compared
// using an Equal method or Comparer function.
NumCompared int
// NumTransformed is the number of non-leaf nodes that were transformed.
NumTransformed int
// NumChildren is the number of transitive descendants of this node.
// This counts from zero; thus, leaf nodes have no descendants.
NumChildren int
// MaxDepth is the maximum depth of the tree. This counts from zero;
// thus, leaf nodes have a depth of zero.
MaxDepth int
// Records is a list of struct fields, slice elements, or map entries.
Records []reportRecord // If populated, implies Value is not populated
// Value is the result of a transformation, pointer indirect, of
// type assertion.
Value *valueNode // If populated, implies Records is not populated
// TransformerName is the name of the transformer.
TransformerName string // If non-empty, implies Value is populated
}
type reportRecord struct {
Key reflect.Value // Invalid for slice element
Value *valueNode
}
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
vx, vy := ps.Values()
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
switch s := ps.(type) {
case StructField:
assert(parent.Value == nil)
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
case SliceIndex:
assert(parent.Value == nil)
parent.Records = append(parent.Records, reportRecord{Value: child})
case MapIndex:
assert(parent.Value == nil)
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
case Indirect:
assert(parent.Value == nil && parent.Records == nil)
parent.Value = child
case TypeAssertion:
assert(parent.Value == nil && parent.Records == nil)
parent.Value = child
case Transform:
assert(parent.Value == nil && parent.Records == nil)
parent.Value = child
parent.TransformerName = s.Name()
parent.NumTransformed++
default:
assert(parent == nil) // Must be the root step
}
return child
}
func (r *valueNode) Report(rs Result) {
assert(r.MaxDepth == 0) // May only be called on leaf nodes
if rs.ByIgnore() {
r.NumIgnored++
} else {
if rs.Equal() {
r.NumSame++
} else {
r.NumDiff++
}
}
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
if rs.ByMethod() {
r.NumCompared++
}
if rs.ByFunc() {
r.NumCompared++
}
assert(r.NumCompared <= 1)
}
func (child *valueNode) PopStep() (parent *valueNode) {
if child.parent == nil {
return nil
}
parent = child.parent
parent.NumSame += child.NumSame
parent.NumDiff += child.NumDiff
parent.NumIgnored += child.NumIgnored
parent.NumCompared += child.NumCompared
parent.NumTransformed += child.NumTransformed
parent.NumChildren += child.NumChildren + 1
if parent.MaxDepth < child.MaxDepth+1 {
parent.MaxDepth = child.MaxDepth + 1
}
return parent
} | vendor/github.com/elastic/beats/vendor/github.com/google/go-cmp/cmp/report_value.go | 0.693473 | 0.5752 | report_value.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.