code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package types
import (
"io"
"github.com/lyraproj/puppet-evaluator/eval"
"github.com/lyraproj/puppet-evaluator/utils"
"reflect"
)
type PatternType struct {
regexps []*RegexpType
}
var Pattern_Type eval.ObjectType
func init() {
Pattern_Type = newObjectType(`Pcore::PatternType`,
`Pcore::ScalarDataType {
attributes => {
patterns => Array[Regexp]
}
}`, func(ctx eval.Context, args []eval.Value) eval.Value {
return NewPatternType2(args...)
})
}
func DefaultPatternType() *PatternType {
return patternType_DEFAULT
}
func NewPatternType(regexps []*RegexpType) *PatternType {
return &PatternType{regexps}
}
func NewPatternType2(regexps ...eval.Value) *PatternType {
return NewPatternType3(WrapValues(regexps))
}
func NewPatternType3(regexps eval.List) *PatternType {
cnt := regexps.Len()
switch cnt {
case 0:
return DefaultPatternType()
case 1:
if av, ok := regexps.At(0).(*ArrayValue); ok {
return NewPatternType3(av)
}
}
rs := make([]*RegexpType, cnt)
regexps.EachWithIndex(func(arg eval.Value, idx int) {
switch arg.(type) {
case *RegexpType:
rs[idx] = arg.(*RegexpType)
case *RegexpValue:
rs[idx] = arg.(*RegexpValue).PType().(*RegexpType)
case *StringValue:
rs[idx] = NewRegexpType2(arg)
default:
panic(NewIllegalArgumentType2(`Pattern[]`, idx, `Type[Regexp], Regexp, or String`, arg))
}
})
return NewPatternType(rs)
}
func (t *PatternType) Accept(v eval.Visitor, g eval.Guard) {
v(t)
for _, rx := range t.regexps {
rx.Accept(v, g)
}
}
func (t *PatternType) Default() eval.Type {
return patternType_DEFAULT
}
func (t *PatternType) Equals(o interface{}, g eval.Guard) bool {
if ot, ok := o.(*PatternType); ok {
return len(t.regexps) == len(ot.regexps) && eval.GuardedIncludesAll(eval.EqSlice(t.regexps), eval.EqSlice(ot.regexps), g)
}
return false
}
func (t *PatternType) Get(key string) (value eval.Value, ok bool) {
switch key {
case `patterns`:
return WrapValues(t.Parameters()), true
}
return nil, false
}
func (t *PatternType) IsAssignable(o eval.Type, g eval.Guard) bool {
if _, ok := o.(*PatternType); ok {
return len(t.regexps) == 0
}
if st, ok := o.(*StringType); ok {
if len(t.regexps) == 0 {
return true
}
str := st.value
return str != `` && utils.MatchesString(MapToRegexps(t.regexps), str)
}
if et, ok := o.(*EnumType); ok {
if len(t.regexps) == 0 {
return true
}
enums := et.values
return len(enums) > 0 && utils.MatchesAllStrings(MapToRegexps(t.regexps), enums)
}
return false
}
func (t *PatternType) IsInstance(o eval.Value, g eval.Guard) bool {
str, ok := o.(*StringValue)
return ok && (len(t.regexps) == 0 || utils.MatchesString(MapToRegexps(t.regexps), str.String()))
}
func (t *PatternType) MetaType() eval.ObjectType {
return Pattern_Type
}
func (t *PatternType) Name() string {
return `Pattern`
}
func (t *PatternType) Parameters() []eval.Value {
top := len(t.regexps)
if top == 0 {
return eval.EMPTY_VALUES
}
rxs := make([]eval.Value, top)
for idx, rx := range t.regexps {
rxs[idx] = WrapRegexp(rx.patternString)
}
return rxs
}
func (t *PatternType) Patterns() *ArrayValue {
rxs := make([]eval.Value, len(t.regexps))
for idx, rx := range t.regexps {
rxs[idx] = rx
}
return WrapValues(rxs)
}
func (t *PatternType) ReflectType(c eval.Context) (reflect.Type, bool) {
return reflect.TypeOf(`x`), true
}
func (t *PatternType) CanSerializeAsString() bool {
return true
}
func (t *PatternType) SerializationString() string {
return t.String()
}
func (t *PatternType) ToString(b io.Writer, s eval.FormatContext, g eval.RDetect) {
TypeToString(t, b, s, g)
}
func (t *PatternType) String() string {
return eval.ToString2(t, NONE)
}
func (t *PatternType) PType() eval.Type {
return &TypeType{t}
}
var patternType_DEFAULT = &PatternType{[]*RegexpType{}} | types/patterntype.go | 0.646237 | 0.466906 | patterntype.go | starcoder |
package models
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// UserTrainingStatusInfo
type UserTrainingStatusInfo struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Date and time of assignment of the training to the user.
assignedDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Date and time of completion of the training by the user.
completionDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Display name of the assigned training.
displayName *string
// Status of the training assigned to the user. Possible values are: unknown, assigned, inProgress, completed, overdue, unknownFutureValue.
trainingStatus *TrainingStatus
}
// NewUserTrainingStatusInfo instantiates a new userTrainingStatusInfo and sets the default values.
func NewUserTrainingStatusInfo()(*UserTrainingStatusInfo) {
m := &UserTrainingStatusInfo{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateUserTrainingStatusInfoFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateUserTrainingStatusInfoFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewUserTrainingStatusInfo(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *UserTrainingStatusInfo) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetAssignedDateTime gets the assignedDateTime property value. Date and time of assignment of the training to the user.
func (m *UserTrainingStatusInfo) GetAssignedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.assignedDateTime
}
}
// GetCompletionDateTime gets the completionDateTime property value. Date and time of completion of the training by the user.
func (m *UserTrainingStatusInfo) GetCompletionDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.completionDateTime
}
}
// GetDisplayName gets the displayName property value. Display name of the assigned training.
func (m *UserTrainingStatusInfo) GetDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.displayName
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *UserTrainingStatusInfo) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["assignedDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetAssignedDateTime(val)
}
return nil
}
res["completionDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetCompletionDateTime(val)
}
return nil
}
res["displayName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDisplayName(val)
}
return nil
}
res["trainingStatus"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseTrainingStatus)
if err != nil {
return err
}
if val != nil {
m.SetTrainingStatus(val.(*TrainingStatus))
}
return nil
}
return res
}
// GetTrainingStatus gets the trainingStatus property value. Status of the training assigned to the user. Possible values are: unknown, assigned, inProgress, completed, overdue, unknownFutureValue.
func (m *UserTrainingStatusInfo) GetTrainingStatus()(*TrainingStatus) {
if m == nil {
return nil
} else {
return m.trainingStatus
}
}
// Serialize serializes information the current object
func (m *UserTrainingStatusInfo) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteTimeValue("assignedDateTime", m.GetAssignedDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("completionDateTime", m.GetCompletionDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("displayName", m.GetDisplayName())
if err != nil {
return err
}
}
if m.GetTrainingStatus() != nil {
cast := (*m.GetTrainingStatus()).String()
err := writer.WriteStringValue("trainingStatus", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *UserTrainingStatusInfo) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetAssignedDateTime sets the assignedDateTime property value. Date and time of assignment of the training to the user.
func (m *UserTrainingStatusInfo) SetAssignedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.assignedDateTime = value
}
}
// SetCompletionDateTime sets the completionDateTime property value. Date and time of completion of the training by the user.
func (m *UserTrainingStatusInfo) SetCompletionDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.completionDateTime = value
}
}
// SetDisplayName sets the displayName property value. Display name of the assigned training.
func (m *UserTrainingStatusInfo) SetDisplayName(value *string)() {
if m != nil {
m.displayName = value
}
}
// SetTrainingStatus sets the trainingStatus property value. Status of the training assigned to the user. Possible values are: unknown, assigned, inProgress, completed, overdue, unknownFutureValue.
func (m *UserTrainingStatusInfo) SetTrainingStatus(value *TrainingStatus)() {
if m != nil {
m.trainingStatus = value
}
} | models/user_training_status_info.go | 0.564098 | 0.444565 | user_training_status_info.go | starcoder |
package transpilers
import (
"fmt"
"cloud.google.com/go/bigquery"
"github.com/beneath-hq/beneath/pkg/schemalang"
)
// ToBigQuery transpiles an Avro schema to a BigQuery schema
func ToBigQuery(s schemalang.Schema, doc bool) bigquery.Schema {
t := &toBigQuery{
Doc: doc,
Refs: make(map[string]*bigquery.FieldSchema),
}
root := t.Transpile(s)
return root.Schema
}
type toBigQuery struct {
Doc bool
Refs map[string]*bigquery.FieldSchema
}
func (t *toBigQuery) Transpile(s schemalang.Schema) *bigquery.FieldSchema {
switch sT := s.(type) {
case *schemalang.Primitive:
return t.fromPrimitive(sT)
case *schemalang.Array:
return t.fromArray(sT)
case *schemalang.Nullable:
return t.fromUnion(sT)
case *schemalang.Fixed:
return t.fromFixed(sT)
case *schemalang.Enum:
return t.fromEnum(sT)
case *schemalang.Record:
return t.fromRecord(sT)
case *schemalang.Ref:
return t.fromRef(sT)
default:
panic(fmt.Errorf("unexpected Avro schema type %T", s))
}
}
func (t *toBigQuery) fromPrimitive(s *schemalang.Primitive) *bigquery.FieldSchema {
field := &bigquery.FieldSchema{
Required: true,
}
if s.LogicalType == "" {
switch s.Type {
case schemalang.BooleanType:
field.Type = bigquery.BooleanFieldType
case schemalang.IntType:
field.Type = bigquery.IntegerFieldType
case schemalang.LongType:
field.Type = bigquery.IntegerFieldType
case schemalang.FloatType:
field.Type = bigquery.FloatFieldType
case schemalang.DoubleType:
field.Type = bigquery.FloatFieldType
case schemalang.BytesType:
field.Type = bigquery.BytesFieldType
case schemalang.StringType:
field.Type = bigquery.StringFieldType
default:
panic(fmt.Errorf("unexpected primitive type '%s'", s.LogicalType))
}
} else {
switch s.LogicalType {
case schemalang.NumericLogicalType:
field.Type = bigquery.NumericFieldType
case schemalang.UUIDLogicalType:
field.Type = bigquery.StringFieldType
case schemalang.TimestampMillisLogicalType:
field.Type = bigquery.TimestampFieldType
default:
panic(fmt.Errorf("unexpected logical type '%s'", s.LogicalType))
}
}
return field
}
func (t *toBigQuery) fromArray(s *schemalang.Array) *bigquery.FieldSchema {
field := t.Transpile(s.ItemType)
field.Repeated = true
return field
}
func (t *toBigQuery) fromUnion(s *schemalang.Nullable) *bigquery.FieldSchema {
field := t.Transpile(s.NonNullType)
field.Required = false
return field
}
func (t *toBigQuery) fromFixed(s *schemalang.Fixed) *bigquery.FieldSchema {
return &bigquery.FieldSchema{
Type: bigquery.BytesFieldType,
Required: true,
}
}
func (t *toBigQuery) fromEnum(s *schemalang.Enum) *bigquery.FieldSchema {
enum := &bigquery.FieldSchema{
Type: bigquery.StringFieldType,
Required: true,
}
t.Refs[s.Name] = enum
return enum
}
func (t *toBigQuery) fromRecord(s *schemalang.Record) *bigquery.FieldSchema {
fields := make([]*bigquery.FieldSchema, len(s.Fields))
for idx, avroField := range s.Fields {
field := t.Transpile(avroField.Type)
field.Name = avroField.Name
if t.Doc {
field.Description = avroField.Doc
}
fields[idx] = field
}
record := &bigquery.FieldSchema{
Name: s.Name,
Required: true,
Type: bigquery.RecordFieldType,
Schema: fields,
}
if t.Doc {
record.Description = s.Doc
}
t.Refs[s.Name] = record
return record
}
func (t *toBigQuery) fromRef(s *schemalang.Ref) *bigquery.FieldSchema {
ref := t.Refs[s.Name]
return &bigquery.FieldSchema{
Required: ref.Required,
Schema: ref.Schema,
Type: ref.Type,
}
} | pkg/schemalang/transpilers/bigquery_to.go | 0.630116 | 0.460895 | bigquery_to.go | starcoder |
package core
//PolyLine - Aka Polygonal chain, linestring,
type PolyLine []Line
// GeomType - Describes geometry type
func (PolyLine) GeomType() string {
return "polyline"
}
// Creates a Polyline from a slice of Points
func createPolylineFromPoints(points []Point) PolyLine {
var p PolyLine
for i, pt := range points {
if i > 0 {
line := createLine(points[i-1], pt)
p = append(p, line)
}
}
return p
}
// Creates a Polyline from a slice of lines
func createPolyLineFromLines(lines []Line) PolyLine {
var p PolyLine
for _, l := range lines {
p = append(p, l)
}
return p
}
//GetLength - Returns length of a polyline.
func (p *PolyLine) length() float64 {
var d float64
for _, l := range *p {
d = d + l.length()
}
return d
}
// GetVertices - returns all vertices in Polyline.
func (p *PolyLine) vertices() []Point {
var v []Point
for i, l := range *p {
if i == 0 {
v = append(v, Point{X: l[0].X, Y: l[0].Y}) // Add first point as well on first line.
}
v = append(v, Point{X: l[1].X, Y: l[1].Y}) // Only add 2nd Point normally so don't duplicate.
}
return v
}
func (p *PolyLine) getNumVertices() int {
return len(*p) + 1
}
func (p *PolyLine) bbox() BoundingBox {
points := p.vertices()
var minX float64
var minY float64
var maxX float64
var maxY float64
for _, pt := range points {
if pt.X < minX {
minX = pt.X
}
if pt.Y < minY {
minY = pt.Y
}
if pt.X > maxX {
maxX = pt.X
}
if pt.Y > maxY {
maxY = pt.Y
}
}
return BoundingBox{Point{X: minX, Y: minY}, Point{X: maxX, Y: maxY}}
}
// NumEdges returns the number of edges in this shape. // Copied from S2 //Move out, and create interface.
func (p *PolyLine) getNumEdges() int {
if len(*p) == 0 {
return 0
}
return len(*p) - 1
}
//ClosedChain - Check if is a closed chain of lines (i.e. it is a Polygon)
func (p *PolyLine) checkClosedChain() bool {
pV := *p
start := pV[0][0]
end := pV[len(pV)-1][1]
x, y := false, false
if start.X == end.X {
x = true
}
if start.Y == end.Y {
y = true
}
if x == true && y == true {
return true
}
return false
}
//centroid - Return centroid of a polyline
func (p *PolyLine) centroid() Point {
var xTop = 0.0
var yTop = 0.0
var xBottom = 0.0
var yBottom = 0.0
for _, l := range *p {
centroid := l.centroid()
length := l.length()
xTop = xTop + centroid.X*length
yTop = yTop + centroid.Y*length
xBottom = xBottom + length
yBottom = yBottom + length
}
xCentroid := xTop / xBottom
yCentroid := yTop / yBottom
return Point{X: xCentroid, Y: yCentroid}
} | core/Polyline.go | 0.845241 | 0.461138 | Polyline.go | starcoder |
package graphs
import (
"math"
// u "github.com/mayukh42/goals/utils"
)
type Point struct {
X int
Y int
Z int
}
func NewPoint(x, y int) *Point {
return &Point{
X: x,
Y: y,
}
}
func (p *Point) CompareX(p_ *Point) int {
// -1, 0, 1
if p_.X < p.X {
return -1
} else if p_.X > p.X {
return 1
}
return 0
}
func (p *Point) CompareY(p_ *Point) int {
// -1, 0, 1
if p_.Y < p.Y {
return -1
} else if p_.Y > p.Y {
return 1
}
return 0
}
func (p *Point) Cartesian(p_ *Point) float64 {
dX := float64(p.X - p_.X)
dY := float64(p.Y - p_.Y)
return math.Sqrt(math.Pow(dX, float64(2)) + math.Pow(dY, float64(2)))
}
func (p *Point) Move(dp *Point) *Point {
return &Point{
X: p.X + dp.X,
Y: p.Y + dp.Y,
}
}
var SHIFT_MAP = map[int]*Point{
0: NewPoint(0, 1),
1: NewPoint(1, 1),
2: NewPoint(1, 0),
3: NewPoint(1, -1),
4: NewPoint(0, -1),
5: NewPoint(-1, -1),
6: NewPoint(-1, 0),
7: NewPoint(-1, 1),
}
/** return all 1-step neighbors of p within the grid
* n ne e se s sw w nw
* 0 1 2 3 4 5 6 7
*/
func (p *Point) Neighbors(r *RectangleGrid) []*Point {
// at most there can be 8 neighbors/ walkable cells
ns := make([]*Point, 8)
for i := range ns {
dp := SHIFT_MAP[i]
pn := p.Move(dp)
if r.IsInside(pn) {
ns[i] = pn
} else {
ns[i] = nil
}
}
return ns
}
/** ClosestTarget()
* Neighbor thatis closest to target
*/
func (p *Point) ClosestTarget(r *RectangleGrid, t *Point) *Point {
if p.CompareX(t) == 0 && p.CompareY(t) == 0 {
// p == t
return t
}
ns := p.Neighbors(r)
var c *Point = nil
var d = math.MaxFloat64
for _, n := range ns {
if n == nil {
continue
}
dist := p.Cartesian(n)
if dist < d {
c = n
}
}
return c
}
type RectangleGrid struct {
SW *Point
NE *Point
}
func NewRectangleGrid(lowerLeft, upperRight *Point) *RectangleGrid {
return &RectangleGrid{
SW: lowerLeft,
NE: upperRight,
}
}
func (r *RectangleGrid) IsInside(p *Point) bool {
return p.X <= r.NE.X && p.X >= r.SW.X && p.Y <= r.NE.Y && p.Y >= r.SW.Y
}
func (r *RectangleGrid) ShortestWalk(a, b *Point) []*Point {
path := make([]*Point, 0)
c := a
for c.CompareX(b) != 0 && c.CompareY(b) != 0 {
p := c.ClosestTarget(r, b)
path = append(path, p)
c = p
}
return path
} | graphs/grid.go | 0.672332 | 0.509032 | grid.go | starcoder |
package model
// Common basic data structures: PdfRectangle, PdfDate, etc.
// These kinds of data structures can be copied, do not need a unique copy of each object.
import (
"errors"
"fmt"
"regexp"
"strconv"
. "github.com/unidoc/unidoc/pdf/core"
)
// Definition of a rectangle.
type PdfRectangle struct {
Llx float64 // Lower left corner (ll).
Lly float64
Urx float64 // Upper right corner (ur).
Ury float64
}
// Create a PDF rectangle object based on an input array of 4 integers.
// Defining the lower left (LL) and upper right (UR) corners with
// floating point numbers.
func NewPdfRectangle(arr PdfObjectArray) (*PdfRectangle, error) {
rect := PdfRectangle{}
if len(arr) != 4 {
return nil, errors.New("Invalid rectangle array, len != 4")
}
var err error
rect.Llx, err = getNumberAsFloat(arr[0])
if err != nil {
return nil, err
}
rect.Lly, err = getNumberAsFloat(arr[1])
if err != nil {
return nil, err
}
rect.Urx, err = getNumberAsFloat(arr[2])
if err != nil {
return nil, err
}
rect.Ury, err = getNumberAsFloat(arr[3])
if err != nil {
return nil, err
}
return &rect, nil
}
// Convert to a PDF object.
func (rect *PdfRectangle) ToPdfObject() PdfObject {
arr := PdfObjectArray{}
arr = append(arr, MakeFloat(rect.Llx))
arr = append(arr, MakeFloat(rect.Lly))
arr = append(arr, MakeFloat(rect.Urx))
arr = append(arr, MakeFloat(rect.Ury))
return &arr
}
// A date is a PDF string of the form:
// (D:YYYYMMDDHHmmSSOHH'mm)
type PdfDate struct {
year int64 // YYYY
month int64 // MM (01-12)
day int64 // DD (01-31)
hour int64 // HH (00-23)
minute int64 // mm (00-59)
second int64 // SS (00-59)
utOffsetSign byte // O ('+' / '-' / 'Z')
utOffsetHours int64 // HH' (00-23 followed by ')
utOffsetMins int64 // mm (00-59)
}
var reDate = regexp.MustCompile(`\s*D\s*:\s*(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})([+-Z])?(\d{2})?'?(\d{2})?`)
// Make a new PdfDate object from a PDF date string (see 7.9.4 Dates).
// format: "D: YYYYMMDDHHmmSSOHH'mm"
func NewPdfDate(dateStr string) (PdfDate, error) {
d := PdfDate{}
matches := reDate.FindAllStringSubmatch(dateStr, 1)
if len(matches) < 1 {
return d, fmt.Errorf("Invalid date string (%s)", dateStr)
}
if len(matches[0]) != 10 {
return d, errors.New("Invalid regexp group match length != 10")
}
// No need to handle err from ParseInt, as pre-validated via regexp.
d.year, _ = strconv.ParseInt(matches[0][1], 10, 32)
d.month, _ = strconv.ParseInt(matches[0][2], 10, 32)
d.day, _ = strconv.ParseInt(matches[0][3], 10, 32)
d.hour, _ = strconv.ParseInt(matches[0][4], 10, 32)
d.minute, _ = strconv.ParseInt(matches[0][5], 10, 32)
d.second, _ = strconv.ParseInt(matches[0][6], 10, 32)
// Some poor implementations do not include the offset.
if len(matches[0][7]) > 0 {
d.utOffsetSign = matches[0][7][0]
} else {
d.utOffsetSign = '+'
}
if len(matches[0][8]) > 0 {
d.utOffsetHours, _ = strconv.ParseInt(matches[0][8], 10, 32)
} else {
d.utOffsetHours = 0
}
if len(matches[0][9]) > 0 {
d.utOffsetMins, _ = strconv.ParseInt(matches[0][9], 10, 32)
} else {
d.utOffsetMins = 0
}
return d, nil
}
// Convert to a PDF string object.
func (date *PdfDate) ToPdfObject() PdfObject {
str := fmt.Sprintf("D:%.4d%.2d%.2d%.2d%.2d%.2d%c%.2d'%.2d'",
date.year, date.month, date.day, date.hour, date.minute, date.second,
date.utOffsetSign, date.utOffsetHours, date.utOffsetMins)
pdfStr := PdfObjectString(str)
return &pdfStr
} | vendor/github.com/unidoc/unidoc/pdf/model/structures.go | 0.695131 | 0.562477 | structures.go | starcoder |
package main
import (
"flag"
"fmt"
"os"
"github.com/kr/pretty"
"invasion"
)
func main() {
flag.Usage = func() {
fmt.Println(`Map Visualizer
Reads in pre-defined map data and displays it on screen
`)
flag.PrintDefaults()
}
mapFile := flag.String("map", "", "map file path (required)")
flag.Parse()
if *mapFile == "" {
fmt.Println("-map is required")
os.Exit(2)
}
Simulation := invasion.New()
Simulation.BuildMap(*mapFile)
visualData := visualize(Simulation)
pretty.Println(visualData)
// fmt.Println(visualData)
}
// cache holds already processed city names to help walk routine back-tracking
var cache map[string]bool
// visualize returns the 2D string array representation of the map
func visualize(data *invasion.Invasion) (result [][]string) {
cache = make(map[string]bool, 0)
result = [][]string{{""}}
walkCities(data, firstCityName(data), 0, 0, 0, 0, &result)
return result
}
// walkCities is a recursive function to walk all roads defined between cities
// it expands the 2D array holding the map automatically in all directions
func walkCities(data *invasion.Invasion, cityName string, x int, y int, xD int, yD int, result *[][]string) {
fmt.Println("-> called: ", cityName, x, y, xD, yD) //debug
if _, ok := cache[cityName]; ok { // city already processed
return
}
if x < 0 { x = 0 }
if y < 0 { y = 0 }
city := data.Map[cityName]
cache[cityName] = true
if x+xD < 0 {
expand(result, -1, 0)
}
if x+xD > len(*result) {
expand(result, 1, 0)
}
if y+yD < 0 {
expand(result, 0, -1)
}
if y+yD > len((*result)[0]) {
expand(result, 0, 1)
}
fmt.Println("---> processing: ", *result, x, y) //debug
// store City at current coordinates, then walk around in all directions
(*result)[x][y] = cityName[0:1]
for direction := 0; direction < 4; direction++ {
if nextCityName, toOk := city.Roads[direction]; toOk {
var newxD int
var newyD int
if direction == 0 { // north
newxD = 0; newyD = -1
}
if direction == 1 { // east
newxD = 1; newyD = 0
}
if direction == 2 { // south
newxD = 0; newyD = 1
}
if direction == 3 { // west
newxD = -1; newyD = 0
}
walkCities(data, nextCityName, x+xD, y+yD, newxD, newyD, result)
}
}
}
// expand expands the 2D array in the desired direction
// only one direction supported at a time
func expand(result *[][]string, x int, y int) {
if y == 1 || y == -1 {
width := len((*result)[0])
emptyLine := []string{" "}
appendSpaces(&emptyLine, width-1)
if y == 1 {
*result = append(*result, emptyLine)
} else {
emptyArray := [][]string{emptyLine}
*result = append(emptyArray, *result...)
}
} else if x == 1 {
for i, line := range *result {
line = append(line, " ")
(*result)[i] = line
}
} else if x == -1 {
for i, line := range *result {
line = append([]string{" "}, line...)
(*result)[i] = line
}
}
}
// appendSpaces extends the string array with the number of one-space string provided on the right
func appendSpaces(slice *[]string, num int) {
for i:=0; i<num; i++ {
*slice = append(*slice, " ")
}
}
// prependSpaces extends the string array with the number of one-space string provided at left
func prependSpaces(slice *[]string, num int) {
for i:=0; i<num; i++ {
*slice = append([]string{" "}, *slice...)
}
}
// firstCityName returns a random city name from data.Map hash, or an empty string
func firstCityName(data *invasion.Invasion) string {
for cityName, _ := range data.Map {
return cityName
}
return ""
} | util/map_visualizer.go | 0.544801 | 0.426381 | map_visualizer.go | starcoder |
// go build
// ./example2
// Sample program to visualize the impact of dimensionality reduction.
package main
import (
"encoding/csv"
"image/color"
"log"
"os"
"strconv"
"github.com/gonum/floats"
"github.com/gonum/matrix/mat64"
"github.com/gonum/plot"
"github.com/gonum/plot/plotter"
"github.com/gonum/plot/plotutil"
"github.com/gonum/plot/vg"
"github.com/gonum/stat"
)
func main() {
// Open the iris dataset file.
f, err := os.Open("../data/iris.csv")
if err != nil {
log.Fatal(err)
}
defer f.Close()
// Create a new CSV reader reading from the opened file.
reader := csv.NewReader(f)
reader.FieldsPerRecord = 5
// Read in all of the CSV records
rawCSVData, err := reader.ReadAll()
if err != nil {
log.Fatal(err)
}
// floatData will hold all the float values that will eventually be
// used to form out matrix.
floatData := make([]float64, 4*len(rawCSVData))
// dataIndex will track the current index of the matrix values.
var dataIndex int
// Sequentially move the rows into a slice of floats.
for _, record := range rawCSVData {
// Loop over the float columns.
for i := 0; i < 4; i++ {
// Convert the value to a float.
val, err := strconv.ParseFloat(record[i], 64)
if err != nil {
log.Fatal("Could not parse float value")
}
// Add the float value to the slice of floats.
floatData[dataIndex] = val
dataIndex++
}
}
// Form the matrix.
mat := mat64.NewDense(len(rawCSVData), 4, floatData)
// Calculate the principal component direction vectors
// and variances.
_, vars, ok := stat.PrincipalComponents(mat, nil)
if !ok {
log.Fatal("Could not calculate principal components")
}
// Sum the eigenvalues (variances).
total := floats.Sum(vars)
// Calculate cumulative variance percentages for each sorted value.
cumVar := make(plotter.Values, 4)
var cumSum float64
for idx, variance := range vars {
cumSum += (variance / total) * 100.0
cumVar[idx] = cumSum
}
// Create a bar plot to visualize the variance percentages.
p, err := plot.New()
if err != nil {
log.Fatal(err)
}
p.X.Label.Text = "Principal components"
p.Y.Label.Text = "Percent of variance captured"
p.Y.Max = 110.0
p.X.Max = 3.1
p.X.Min = -0.1
w := vg.Points(20)
// Create the bars for the percent values.
bars, err := plotter.NewBarChart(cumVar, w)
if err != nil {
log.Fatal(err)
}
bars.LineStyle.Width = vg.Length(0)
bars.Color = plotutil.Color(0)
// Format the bars.
p.Add(bars)
p.NominalX("One", "Two", "Three", "Four")
// Plot a line at 100% for easy inspection.
hundred := plotter.NewFunction(func(x float64) float64 { return 100.0 })
hundred.Color = color.RGBA{B: 255, A: 255}
hundred.Dashes = []vg.Length{vg.Points(2), vg.Points(2)}
hundred.Width = vg.Points(2)
p.Add(hundred)
// Save the graph.
if err := p.Save(4*vg.Inch, 5*vg.Inch, "barchart.png"); err != nil {
log.Fatal(err)
}
} | topics/data_science/dimensionality_reduction/example2/example2.go | 0.634204 | 0.418697 | example2.go | starcoder |
package indicators
// DX = ( (+DI)-(-DI) ) / ( (+DI) + (-DI) )
import (
"errors"
"github.com/thetruetrade/gotrade"
"math"
)
// An Directional Movement Index Indicator (Dx), no storage, for use in other indicators
type DxWithoutStorage struct {
*baseIndicatorWithFloatBounds
// private variables
minusDI *MinusDi
plusDI *PlusDi
currentPlusDi float64
currentMinusDi float64
timePeriod int
}
// NewDxWithoutStorage creates a Directional Movement Index Indicator (Dx) without storage
func NewDxWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *DxWithoutStorage, err error) {
// an indicator without storage MUST have a value available action
if valueAvailableAction == nil {
return nil, ErrValueAvailableActionIsNil
}
// the minimum timeperiod for this indicator is 2
if timePeriod < 2 {
return nil, errors.New("timePeriod is less than the minimum (2)")
}
// check the maximum timeperiod
if timePeriod > MaximumLookbackPeriod {
return nil, errors.New("timePeriod is greater than the maximum (100000)")
}
lookback := 2
if timePeriod > 1 {
lookback = timePeriod
}
ind := DxWithoutStorage{
baseIndicatorWithFloatBounds: newBaseIndicatorWithFloatBounds(lookback, valueAvailableAction),
currentPlusDi: 0.0,
currentMinusDi: 0.0,
timePeriod: timePeriod,
}
ind.minusDI, err = NewMinusDi(timePeriod)
ind.minusDI.valueAvailableAction = func(dataItem float64, streamBarIndex int) {
ind.currentMinusDi = dataItem
}
ind.plusDI, err = NewPlusDi(timePeriod)
ind.plusDI.valueAvailableAction = func(dataItem float64, streamBarIndex int) {
ind.currentPlusDi = dataItem
var result float64
tmp := ind.currentMinusDi + ind.currentPlusDi
if tmp != 0.0 {
result = 100.0 * (math.Abs(ind.currentMinusDi-ind.currentPlusDi) / tmp)
} else {
result = 0.0
}
ind.UpdateIndicatorWithNewValue(result, streamBarIndex)
}
return &ind, err
}
// A Directional Movement Index Indicator (Dx)
type Dx struct {
*DxWithoutStorage
// public variables
Data []float64
}
// NewDx creates a Directional Movement Index Indicator (Dx) for online usage
func NewDx(timePeriod int) (indicator *Dx, err error) {
ind := Dx{}
ind.DxWithoutStorage, err = NewDxWithoutStorage(timePeriod,
func(dataItem float64, streamBarIndex int) {
ind.Data = append(ind.Data, dataItem)
})
return &ind, err
}
// NewDefaultDx creates a Directional Movement Index (Dx) for online usage with default parameters
// - timePeriod: 14
func NewDefaultDx() (indicator *Dx, err error) {
timePeriod := 14
return NewDx(timePeriod)
}
// NewDxWithSrcLen creates a Directional Movement Index (Dx) for offline usage
func NewDxWithSrcLen(sourceLength uint, timePeriod int) (indicator *Dx, err error) {
ind, err := NewDx(timePeriod)
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDefaultDxWithSrcLen creates a Directional Movement Index (Dx) for offline usage with default parameters
func NewDefaultDxWithSrcLen(sourceLength uint) (indicator *Dx, err error) {
ind, err := NewDefaultDx()
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDxForStream creates a Directional Movement Index (Dx) for online usage with a source data stream
func NewDxForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *Dx, err error) {
ind, err := NewDx(timePeriod)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultDxForStream creates a Directional Movement Index (Dx) for online usage with a source data stream
func NewDefaultDxForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Dx, err error) {
ind, err := NewDefaultDx()
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDxForStreamWithSrcLen creates a Directional Movement Index (Dx) for offline usage with a source data stream
func NewDxForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *Dx, err error) {
ind, err := NewDxWithSrcLen(sourceLength, timePeriod)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultDxForStreamWithSrcLen creates a Directional Movement Index (Dx) for offline usage with a source data stream
func NewDefaultDxForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Dx, err error) {
ind, err := NewDefaultDxWithSrcLen(sourceLength)
priceStream.AddTickSubscription(ind)
return ind, err
}
// ReceiveDOHLCVTick consumes a source data DOHLCV price tick
func (ind *DxWithoutStorage) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {
ind.minusDI.ReceiveDOHLCVTick(tickData, streamBarIndex)
ind.plusDI.ReceiveDOHLCVTick(tickData, streamBarIndex)
} | indicators/dx.go | 0.649023 | 0.473414 | dx.go | starcoder |
package maputils
// Keys - takes a map with keys K and values V, returns a slice of type K of the map's keys.
// Note: Go maps do not preserve insertion order.
func Keys[K comparable, V any](mapInstance map[K]V) []K {
keys := make([]K, len(mapInstance))
i := 0
for k := range mapInstance {
keys[i] = k
i++
}
return keys
}
// Values - takes a map with keys K and values V, returns a slice of type V of the map's values.
// Note: Go maps do not preserve insertion order.
func Values[K comparable, V any](mapInstance map[K]V) []V {
values := make([]V, len(mapInstance))
i := 0
for _, v := range mapInstance {
values[i] = v
i++
}
return values
}
// Merge - takes an arbitrary number of map instances with keys K and values V and merges them into a single map.
// Note: merge works from left to right. If a key already exists in a previous map, its value is over-written.
func Merge[K comparable, V any](mapInstances ...map[K]V) map[K]V {
mergedMap := make(map[K]V, 0)
for _, mapInstance := range mapInstances {
for k, v := range mapInstance {
mergedMap[k] = v
}
}
return mergedMap
}
// ForEach - given a map with keys K and values V, executes the passed in function for each key-value pair.
func ForEach[K comparable, V any](mapInstance map[K]V, function func(key K, value V)) {
for key, value := range mapInstance {
function(key, value)
}
}
// Drop - takes a map with keys K and values V, and a slice of keys K, dropping all the key-value pairs that match the keys in the slice.
// Note: this function will modify the passed in map. To get a different object, use the Copy function to pass a copy to this function.
func Drop[K comparable, V any](mapInstance map[K]V, keys []K) map[K]V {
for _, key := range keys {
if _, keyExists := mapInstance[key]; keyExists {
delete(mapInstance, key)
}
}
return mapInstance
}
// Copy - takes a map with keys K and values V and returns a copy of the map.
func Copy[K comparable, V any](mapInstance map[K]V) map[K]V {
mapCopy := make(map[K]V, len(mapInstance))
for key, value := range mapInstance {
mapCopy[key] = value
}
return mapCopy
}
// Filter - takes a map with keys K and values V, and executes the passed in function for each key-value pair.
// If the filter function returns true, the key-value pair will be included in the output, otherwise it is filtered out.
func Filter[K comparable, V any](mapInstance map[K]V, function func(key K, value V) bool) map[K]V {
mapCopy := make(map[K]V, len(mapInstance))
for key, value := range mapInstance {
if function(key, value) {
mapCopy[key] = value
}
}
return mapCopy
} | maputils/maputils.go | 0.873053 | 0.534127 | maputils.go | starcoder |
package vespyr
import (
"fmt"
"math/rand"
"github.com/MaxHalford/gago"
"github.com/sirupsen/logrus"
)
// EMACrossoverStrategy is a strategy for that buys and sells based on
// EMA crossovers.
type EMACrossoverStrategy struct {
ShortPeriod uint `yaml:"short_period"`
LongPeriod uint `yaml:"long_period"`
UpThreshold float64 `yaml:"up_threshold"`
DownThreshold float64 `yaml:"down_threshold"`
strategy *TradingStrategyModel
}
// String returns the string representation of the strategy.
func (e *EMACrossoverStrategy) String() string {
return fmt.Sprintf("EMA Crossover: short period: %d, long period: %d, up threshold: %f, down threshold %f",
e.ShortPeriod, e.LongPeriod, e.UpThreshold, e.DownThreshold,
)
}
// SetTradingStrategy sets the underlying trading strategy.
func (e *EMACrossoverStrategy) SetTradingStrategy(t *TradingStrategyModel) {
e.strategy = t
}
// Indicators returns the indicators returned by the strategy.
func (e *EMACrossoverStrategy) Indicators() []Indicator {
var indicators []Indicator
indicators = append(indicators, NewDEMAIndicator(e.ShortPeriod, e.LongPeriod))
indicators = append(indicators, NewEMAIndicator(e.ShortPeriod))
indicators = append(indicators, NewEMAIndicator(e.LongPeriod))
return indicators
}
// Buy determines whether the currency should be bought using the
// indicator history.
func (e *EMACrossoverStrategy) Buy(history []*IndicatorSet, current int) (bool, error) {
if len(history)-1 < current {
return false, ErrNotEnoughData
}
currentValues := history[current].Values
dema := currentValues[0]
if dema == nil {
return false, ErrNotEnoughData
}
message := fmt.Sprintf("ema crossover (%d, %s) buy dema value: %f, up threshold: %f",
e.strategy.ID, e.strategy.Product, dema.Value, e.UpThreshold)
logrus.Debug(message)
PostStrategyDataToSlack(e, e.strategy, map[string]interface{}{
currentValues[0].IndicatorName: currentValues[0].Value,
currentValues[1].IndicatorName: currentValues[1].Value,
currentValues[2].IndicatorName: currentValues[2].Value,
})
return (dema.Value > e.UpThreshold), nil
}
// Sell determines whether the currency should be sold using the
// indicator history.
func (e *EMACrossoverStrategy) Sell(history []*IndicatorSet, current int) (bool, error) {
if len(history)-1 < current {
return false, ErrNotEnoughData
}
currentValues := history[current].Values
dema := currentValues[0]
if dema == nil {
return false, ErrNotEnoughData
}
message := fmt.Sprintf("ema crossover (%d, %s) sell dema value: %f, down threshold: %f",
e.strategy.ID, e.strategy.Product, dema.Value, e.DownThreshold)
logrus.Debug(message)
PostStrategyDataToSlack(e, e.strategy, map[string]interface{}{
currentValues[0].IndicatorName: currentValues[0].Value,
currentValues[1].IndicatorName: currentValues[1].Value,
currentValues[2].IndicatorName: currentValues[2].Value,
})
return (dema.Value < e.DownThreshold), nil
}
// Rand creates a random version of the strategy.
func (e *EMACrossoverStrategy) Rand(rng *rand.Rand) {
e.ShortPeriod = uint(rng.Float64() * 50)
e.LongPeriod = 2 * e.ShortPeriod
e.UpThreshold = rng.Float64() * .002
e.DownThreshold = -e.UpThreshold
}
// Clone returns a clone of the current strategy.
func (e *EMACrossoverStrategy) Clone() StrategyGenome {
return &EMACrossoverStrategy{
ShortPeriod: e.ShortPeriod,
LongPeriod: e.LongPeriod,
UpThreshold: e.UpThreshold,
DownThreshold: e.DownThreshold,
}
}
// Mutate mutates the underlying strategy.
func (e *EMACrossoverStrategy) Mutate(rng *rand.Rand) {
mutateProb := 0.8
thresholds := []float64{e.DownThreshold, e.UpThreshold}
gago.MutNormalFloat64(thresholds, mutateProb, rng)
e.DownThreshold, e.UpThreshold = thresholds[0], thresholds[1]
if rng.Float64() < mutateProb {
x := int(e.ShortPeriod)
x += int(float64(e.ShortPeriod) * rng.NormFloat64())
if x > 0 {
e.ShortPeriod = uint(x) % 100
}
}
if rng.Float64() < mutateProb {
x := int(e.LongPeriod)
x += int(float64(e.LongPeriod) * rng.NormFloat64())
if x > 0 {
e.LongPeriod = uint(x) % 100
}
}
if e.LongPeriod < e.ShortPeriod {
e.LongPeriod = e.ShortPeriod
}
}
// Crossover crosses over an EMACrossoverStrategy with a different
// one.
func (e *EMACrossoverStrategy) Crossover(m StrategyGenome,
r *rand.Rand) (StrategyGenome, StrategyGenome) {
mate := m.(*EMACrossoverStrategy)
p1 := []float64{float64(e.ShortPeriod), float64(e.LongPeriod),
e.UpThreshold, e.DownThreshold}
p2 := []float64{float64(mate.ShortPeriod), float64(mate.LongPeriod),
mate.UpThreshold, mate.DownThreshold}
c1, c2 := gago.CrossUniformFloat64(p1, p2, r)
s1 := &EMACrossoverStrategy{
ShortPeriod: uint(c1[0]),
LongPeriod: uint(c1[1]),
UpThreshold: c1[2],
DownThreshold: c1[3],
}
if s1.LongPeriod < s1.ShortPeriod {
s1.ShortPeriod = s1.LongPeriod
}
s2 := &EMACrossoverStrategy{
ShortPeriod: uint(c2[0]),
LongPeriod: uint(c2[1]),
UpThreshold: c2[2],
DownThreshold: c2[3],
}
if s2.LongPeriod < s2.ShortPeriod {
s2.ShortPeriod = s2.LongPeriod
}
return s1, s2
} | pkg/vespyr/ema_crossover_strategy.go | 0.792665 | 0.404331 | ema_crossover_strategy.go | starcoder |
package suffix_automata
type Dawg struct {
qW int32
lastTransition int32
states []State
slinks []int32
transitions []Transition
}
func NewDawg(len int) *Dawg {
states := make([]State, 1, 2*len-1)
states[0].len = 0
states[0].lastTransition = -1
slinks := make([]int32, 1, 2*len-1)
slinks[0] = -1
transitions := make([]Transition, 0, 3*len-4)
return &Dawg{
qW: 0,
states: states,
slinks: slinks,
transitions: transitions,
}
}
func (d *Dawg) get(state int32, letter byte) (int32, int32) {
i := d.states[state].lastTransition
for i != -1 {
if d.transitions[i].letter == letter {
return int32(i), d.transitions[i].destinationIndex
}
i = d.transitions[i].prev
}
return -1, -1
}
func (d *Dawg) AddTransition(state int32, letter byte, destinationIndex int32) {
if d.states[state].lastTransition != -1 {
d.transitions = append(d.transitions, Transition{letter: letter, destinationIndex: destinationIndex, prev: d.states[state].lastTransition})
} else {
d.transitions = append(d.transitions, Transition{letter: letter, destinationIndex: destinationIndex, prev: -1})
}
d.states[state].lastTransition = int32(len(d.transitions)) - 1
}
func (d *Dawg) Count() (int, int, int) {
s := d.qW
finalSum := 0
for s != -1 {
finalSum += 1
s = d.slinks[s]
}
return len(d.states), len(d.transitions), finalSum
}
func (d *Dawg) AddState(stateLen int32) int32 {
d.states = append(d.states, State{len: stateLen, lastTransition: -1})
d.slinks = append(d.slinks, -1)
return int32(len(d.states)) - 1
}
// <s, qwa>
func (d *Dawg) FindSLink(letter byte) (int32, int32, int32, int32) {
qWa := d.AddState(d.states[d.qW].len + 1)
state := d.qW
ind, dest := d.get(state, letter)
for dest == -1 {
d.AddTransition(state, letter, qWa)
if d.slinks[state] == -1 {
return -1, qWa, -1, -1
} else {
state = d.slinks[state]
}
ind, dest = d.get(state, letter)
}
return state, qWa, ind, dest
}
func (d *Dawg) ProcessCharacter(letter byte) {
s, qWa, ind, destination := d.FindSLink(letter)
d.qW = qWa
if s == -1 {
d.slinks[d.qW] = 0
return
}
if d.states[destination].len == d.states[s].len+1 {
d.slinks[d.qW] = destination
return
}
sNew := d.AddSlink(s, letter, destination)
d.CopyTransitions(sNew, destination)
d.RedirectTransitions(s, letter, destination, sNew, ind, destination)
}
func (d *Dawg) CopyTransitions(sNew int32, destination int32) {
i := d.states[destination].lastTransition
for i != -1 {
d.AddTransition(sNew, d.transitions[i].letter, d.transitions[i].destinationIndex)
i = d.transitions[i].prev
}
}
func (d *Dawg) RedirectTransitions(s int32, letter byte, t int32, sNew int32, ind int32, destination int32) {
for destination == t {
d.transitions[ind] = Transition{letter: letter, destinationIndex: sNew, prev: d.transitions[ind].prev}
s = d.slinks[s]
if s == -1 {
break
}
ind, destination = d.get(s, letter)
}
}
func (d *Dawg) AddSlink(stateIndex int32, letter byte, destinationIndex int32) int32 {
newStateIndex := d.AddState(d.states[stateIndex].len + 1)
d.slinks[newStateIndex] = d.slinks[destinationIndex]
d.slinks[destinationIndex] = newStateIndex
d.slinks[d.qW] = newStateIndex
return newStateIndex
} | dodo/suffix_automata/dawg.go | 0.622574 | 0.439266 | dawg.go | starcoder |
package main
import (
"fmt"
"math"
"sort"
"strings"
"time"
"github.com/sanderploegsma/advent-of-code/2019/utils"
)
func main() {
input, _ := utils.ReadFile("input.txt")
asteroids := ParseInput(input)
start := time.Now()
p, num := PartOne(asteroids)
fmt.Printf("[PART ONE] position (%d, %d) can detect %d asteroids (took %s)\n", p.x, p.y, num, time.Since(start))
start = time.Now()
destroyed := PartTwo(asteroids, p)
fmt.Printf("[PART TWO] 200th destroyed asteroid: (%d, %d) (took %s)\n", destroyed[199].x, destroyed[199].y, time.Since(start))
}
// Point describes a point in a 2-dimensional plane
type Point struct{ x, y int }
// DistanceTo returns the distance between this point and the given other point
func (p *Point) DistanceTo(o Point) float64 {
return math.Sqrt(math.Pow(float64(p.x-o.x), 2) + math.Pow(float64(p.y-o.y), 2))
}
// AngleTo returns the angle in radians of the other point w.r.t. this point
func (p *Point) AngleTo(o Point) float64 {
return math.Atan2(float64(o.x-p.x), float64(o.y-p.y))
}
// ParseInput parses the given input into a list of points
func ParseInput(input string) (asteroids []Point) {
rows := strings.Split(input, "\n")
for y, row := range rows {
for x := 0; x < len(rows[0]); x++ {
if string(row[x]) == "#" {
asteroids = append(asteroids, Point{x, y})
}
}
}
return asteroids
}
// PartOne - Find the asteroid that can detect the most other asteroids (ones that are directly in its line of sight)
func PartOne(asteroids []Point) (p Point, num int) {
for _, a := range asteroids {
slopes := make([]float64, 0)
for _, b := range asteroids {
if a.x == b.x && a.y == b.y {
continue
}
d := a.AngleTo(b)
exists := false
for _, s := range slopes {
if d == s {
exists = true
}
}
if !exists {
slopes = append(slopes, d)
}
}
if len(slopes) > num {
num = len(slopes)
p = a
}
}
return p, num
}
// PartTwo - Destroy all other asteroids with a laser mounted on the given origin.
func PartTwo(asteroids []Point, origin Point) (destroyed []Point) {
targets := make(map[float64][]Point)
for _, a := range asteroids {
if a.x == origin.x && a.y == origin.y {
continue
}
// Calculate the angle wrt origin, offsetting by 45 degrees so that directly upwards counts as 0. Also, since radians go counter-clockwise, multiply by -1
angle := (origin.AngleTo(a) - 0.5*math.Pi) * -1
if _, ok := targets[angle]; !ok {
targets[angle] = make([]Point, 0)
}
targets[angle] = append(targets[angle], a)
// Store the targets in order ascending from closest to origin
sort.Slice(targets[angle], func(i, j int) bool {
return origin.DistanceTo(targets[angle][i]) < origin.DistanceTo(targets[angle][j])
})
}
// Order the angles ascending
order := make([]float64, 0)
for d := range targets {
order = append(order, d)
}
sort.Float64s(order)
// Kill 'em all
i := 0
for len(destroyed) < len(asteroids)-1 {
d := order[i%len(order)]
if len(targets[d]) > 0 {
destroyed = append(destroyed, targets[d][0])
targets[d] = targets[d][1:]
}
i++
}
return destroyed
} | 2019/go/10/main.go | 0.802013 | 0.489686 | main.go | starcoder |
package f32
import "fmt"
// An Affine is a 3x3 matrix of float32 values for which the bottom row is
// implicitly always equal to [0 0 1].
// Elements are indexed first by row then column, i.e. m[row][column].
type Affine [2]Vec3
func (m Affine) String() string {
return fmt.Sprintf(`Affine[% 0.3f, % 0.3f, % 0.3f,
% 0.3f, % 0.3f, % 0.3f]`,
m[0][0], m[0][1], m[0][2],
m[1][0], m[1][1], m[1][2])
}
// Identity sets m to be the identity transform.
func (m *Affine) Identity() {
*m = Affine{
{1, 0, 0},
{0, 1, 0},
}
}
// Eq reports whether each component of m is within epsilon of the same
// component in n.
func (m *Affine) Eq(n *Affine, epsilon float32) bool {
for i := range m {
for j := range m[i] {
diff := m[i][j] - n[i][j]
if diff < -epsilon || +epsilon < diff {
return false
}
}
}
return true
}
// Mul sets m to be p × q.
func (m *Affine) Mul(p, q *Affine) {
// Store the result in local variables, in case m == a || m == b.
m00 := p[0][0]*q[0][0] + p[0][1]*q[1][0]
m01 := p[0][0]*q[0][1] + p[0][1]*q[1][1]
m02 := p[0][0]*q[0][2] + p[0][1]*q[1][2] + p[0][2]
m10 := p[1][0]*q[0][0] + p[1][1]*q[1][0]
m11 := p[1][0]*q[0][1] + p[1][1]*q[1][1]
m12 := p[1][0]*q[0][2] + p[1][1]*q[1][2] + p[1][2]
m[0][0] = m00
m[0][1] = m01
m[0][2] = m02
m[1][0] = m10
m[1][1] = m11
m[1][2] = m12
}
// Inverse sets m to be the inverse of p.
func (m *Affine) Inverse(p *Affine) {
m00 := p[1][1]
m01 := -p[0][1]
m02 := p[1][2]*p[0][1] - p[1][1]*p[0][2]
m10 := -p[1][0]
m11 := p[0][0]
m12 := p[1][0]*p[0][2] - p[1][2]*p[0][0]
det := m00*m11 - m10*m01
m[0][0] = m00 / det
m[0][1] = m01 / det
m[0][2] = m02 / det
m[1][0] = m10 / det
m[1][1] = m11 / det
m[1][2] = m12 / det
}
// Scale sets m to be a scale followed by p.
// It is equivalent to m.Mul(p, &Affine{{x,0,0}, {0,y,0}}).
func (m *Affine) Scale(p *Affine, x, y float32) {
m[0][0] = p[0][0] * x
m[0][1] = p[0][1] * y
m[0][2] = p[0][2]
m[1][0] = p[1][0] * x
m[1][1] = p[1][1] * y
m[1][2] = p[1][2]
}
// Translate sets m to be a translation followed by p.
// It is equivalent to m.Mul(p, &Affine{{1,0,x}, {0,1,y}}).
func (m *Affine) Translate(p *Affine, x, y float32) {
m[0][0] = p[0][0]
m[0][1] = p[0][1]
m[0][2] = p[0][0]*x + p[0][1]*y + p[0][2]
m[1][0] = p[1][0]
m[1][1] = p[1][1]
m[1][2] = p[1][0]*x + p[1][1]*y + p[1][2]
}
// Rotate sets m to a rotation in radians followed by p.
// It is equivalent to m.Mul(p, affineRotation).
func (m *Affine) Rotate(p *Affine, radians float32) {
s, c := Sin(radians), Cos(radians)
m.Mul(p, &Affine{
{+c, +s, 0},
{-s, +c, 0},
})
} | vendor/github.com/fyne-io/mobile/exp/f32/affine.go | 0.73077 | 0.54353 | affine.go | starcoder |
package bls12381
import (
"errors"
"math"
"math/big"
)
// PointG1 is type for point in G1.
// PointG1 is both used for Affine and Jacobian point representation.
// If z is equal to one the point is considered as in affine form.
type PointG1 [3]fe
func (p *PointG1) Set(p2 *PointG1) *PointG1 {
p[0].set(&p2[0])
p[1].set(&p2[1])
p[2].set(&p2[2])
return p
}
// Zero returns G1 point in point at infinity representation
func (p *PointG1) Zero() *PointG1 {
p[0].zero()
p[1].one()
p[2].zero()
return p
}
type tempG1 struct {
t [9]*fe
}
// G1 is struct for G1 group.
type G1 struct {
tempG1
}
// NewG1 constructs a new G1 instance.
func NewG1() *G1 {
t := newTempG1()
return &G1{t}
}
func newTempG1() tempG1 {
t := [9]*fe{}
for i := 0; i < 9; i++ {
t[i] = &fe{}
}
return tempG1{t}
}
// Q returns group order in big.Int.
func (g *G1) Q() *big.Int {
return new(big.Int).Set(q)
}
func (g *G1) fromBytesUnchecked(in []byte) (*PointG1, error) {
p0, err := fromBytes(in[:48])
if err != nil {
return nil, err
}
p1, err := fromBytes(in[48:])
if err != nil {
return nil, err
}
p2 := new(fe).one()
return &PointG1{*p0, *p1, *p2}, nil
}
// FromBytes constructs a new point given uncompressed byte input.
// FromBytes does not take zcash flags into account.
// Byte input expected to be larger than 96 bytes.
// First 96 bytes should be concatenation of x and y values.
// Point (0, 0) is considered as infinity.
func (g *G1) FromBytes(in []byte) (*PointG1, error) {
if len(in) != 96 {
return nil, errors.New("input string should be equal or larger than 96")
}
p0, err := fromBytes(in[:48])
if err != nil {
return nil, err
}
p1, err := fromBytes(in[48:])
if err != nil {
return nil, err
}
// check if given input points to infinity
if p0.isZero() && p1.isZero() {
return g.Zero(), nil
}
p2 := new(fe).one()
p := &PointG1{*p0, *p1, *p2}
if !g.IsOnCurve(p) {
return nil, errors.New("point is not on curve")
}
return p, nil
}
// DecodePoint given encoded (x, y) coordinates in 128 bytes returns a valid G1 Point.
func (g *G1) DecodePoint(in []byte) (*PointG1, error) {
if len(in) != 128 {
return nil, errors.New("invalid g1 point length")
}
pointBytes := make([]byte, 96)
// decode x
xBytes, err := decodeFieldElement(in[:64])
if err != nil {
return nil, err
}
// decode y
yBytes, err := decodeFieldElement(in[64:])
if err != nil {
return nil, err
}
copy(pointBytes[:48], xBytes)
copy(pointBytes[48:], yBytes)
return g.FromBytes(pointBytes)
}
// ToBytes serializes a point into bytes in uncompressed form.
// ToBytes does not take zcash flags into account.
// ToBytes returns (0, 0) if point is infinity.
func (g *G1) ToBytes(p *PointG1) []byte {
out := make([]byte, 96)
if g.IsZero(p) {
return out
}
g.Affine(p)
copy(out[:48], toBytes(&p[0]))
copy(out[48:], toBytes(&p[1]))
return out
}
// EncodePoint encodes a point into 128 bytes.
func (g *G1) EncodePoint(p *PointG1) []byte {
outRaw := g.ToBytes(p)
out := make([]byte, 128)
// encode x
copy(out[16:], outRaw[:48])
// encode y
copy(out[64+16:], outRaw[48:])
return out
}
// New creates a new G1 Point which is equal to zero in other words point at infinity.
func (g *G1) New() *PointG1 {
return g.Zero()
}
// Zero returns a new G1 Point which is equal to point at infinity.
func (g *G1) Zero() *PointG1 {
return new(PointG1).Zero()
}
// One returns a new G1 Point which is equal to generator point.
func (g *G1) One() *PointG1 {
p := &PointG1{}
return p.Set(&g1One)
}
// IsZero returns true if given point is equal to zero.
func (g *G1) IsZero(p *PointG1) bool {
return p[2].isZero()
}
// Equal checks if given two G1 point is equal in their affine form.
func (g *G1) Equal(p1, p2 *PointG1) bool {
if g.IsZero(p1) {
return g.IsZero(p2)
}
if g.IsZero(p2) {
return g.IsZero(p1)
}
t := g.t
square(t[0], &p1[2])
square(t[1], &p2[2])
mul(t[2], t[0], &p2[0])
mul(t[3], t[1], &p1[0])
mul(t[0], t[0], &p1[2])
mul(t[1], t[1], &p2[2])
mul(t[1], t[1], &p1[1])
mul(t[0], t[0], &p2[1])
return t[0].equal(t[1]) && t[2].equal(t[3])
}
// InCorrectSubgroup checks whether given point is in correct subgroup.
func (g *G1) InCorrectSubgroup(p *PointG1) bool {
tmp := &PointG1{}
g.MulScalar(tmp, p, q)
return g.IsZero(tmp)
}
// IsOnCurve checks a G1 point is on curve.
func (g *G1) IsOnCurve(p *PointG1) bool {
if g.IsZero(p) {
return true
}
t := g.t
square(t[0], &p[1])
square(t[1], &p[0])
mul(t[1], t[1], &p[0])
square(t[2], &p[2])
square(t[3], t[2])
mul(t[2], t[2], t[3])
mul(t[2], b, t[2])
add(t[1], t[1], t[2])
return t[0].equal(t[1])
}
// IsAffine checks a G1 point whether it is in affine form.
func (g *G1) IsAffine(p *PointG1) bool {
return p[2].isOne()
}
// Affine Add adds two G1 points p1, p2 and assigns the result to point at first argument.
func (g *G1) Affine(p *PointG1) *PointG1 {
if g.IsZero(p) {
return p
}
if !g.IsAffine(p) {
t := g.t
inverse(t[0], &p[2])
square(t[1], t[0])
mul(&p[0], &p[0], t[1])
mul(t[0], t[0], t[1])
mul(&p[1], &p[1], t[0])
p[2].one()
}
return p
}
// Add adds two G1 points p1, p2 and assigns the result to point at first argument.
func (g *G1) Add(r, p1, p2 *PointG1) *PointG1 {
// http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#addition-add-2007-bl
if g.IsZero(p1) {
return r.Set(p2)
}
if g.IsZero(p2) {
return r.Set(p1)
}
t := g.t
square(t[7], &p1[2])
mul(t[1], &p2[0], t[7])
mul(t[2], &p1[2], t[7])
mul(t[0], &p2[1], t[2])
square(t[8], &p2[2])
mul(t[3], &p1[0], t[8])
mul(t[4], &p2[2], t[8])
mul(t[2], &p1[1], t[4])
if t[1].equal(t[3]) {
if t[0].equal(t[2]) {
return g.Double(r, p1)
}
return r.Zero()
}
sub(t[1], t[1], t[3])
double(t[4], t[1])
square(t[4], t[4])
mul(t[5], t[1], t[4])
sub(t[0], t[0], t[2])
double(t[0], t[0])
square(t[6], t[0])
sub(t[6], t[6], t[5])
mul(t[3], t[3], t[4])
double(t[4], t[3])
sub(&r[0], t[6], t[4])
sub(t[4], t[3], &r[0])
mul(t[6], t[2], t[5])
double(t[6], t[6])
mul(t[0], t[0], t[4])
sub(&r[1], t[0], t[6])
add(t[0], &p1[2], &p2[2])
square(t[0], t[0])
sub(t[0], t[0], t[7])
sub(t[0], t[0], t[8])
mul(&r[2], t[0], t[1])
return r
}
// Double doubles a G1 point p and assigns the result to the point at first argument.
func (g *G1) Double(r, p *PointG1) *PointG1 {
// http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
if g.IsZero(p) {
return r.Set(p)
}
t := g.t
square(t[0], &p[0])
square(t[1], &p[1])
square(t[2], t[1])
add(t[1], &p[0], t[1])
square(t[1], t[1])
sub(t[1], t[1], t[0])
sub(t[1], t[1], t[2])
double(t[1], t[1])
double(t[3], t[0])
add(t[0], t[3], t[0])
square(t[4], t[0])
double(t[3], t[1])
sub(&r[0], t[4], t[3])
sub(t[1], t[1], &r[0])
double(t[2], t[2])
double(t[2], t[2])
double(t[2], t[2])
mul(t[0], t[0], t[1])
sub(t[1], t[0], t[2])
mul(t[0], &p[1], &p[2])
r[1].set(t[1])
double(&r[2], t[0])
return r
}
// Neg negates a G1 point p and assigns the result to the point at first argument.
func (g *G1) Neg(r, p *PointG1) *PointG1 {
r[0].set(&p[0])
r[2].set(&p[2])
neg(&r[1], &p[1])
return r
}
// Sub subtracts two G1 points p1, p2 and assigns the result to point at first argument.
func (g *G1) Sub(c, a, b *PointG1) *PointG1 {
d := &PointG1{}
g.Neg(d, b)
g.Add(c, a, d)
return c
}
// MulScalar multiplies a point by given scalar value in big.Int and assigns the result to point at first argument.
func (g *G1) MulScalar(c, p *PointG1, e *big.Int) *PointG1 {
q, n := &PointG1{}, &PointG1{}
n.Set(p)
l := e.BitLen()
for i := 0; i < l; i++ {
if e.Bit(i) == 1 {
g.Add(q, q, n)
}
g.Double(n, n)
}
return c.Set(q)
}
// ClearCofactor maps given a G1 point to correct subgroup
func (g *G1) ClearCofactor(p *PointG1) {
g.MulScalar(p, p, cofactorEFFG1)
}
// MultiExp calculates multi exponentiation. Given pairs of G1 point and scalar values
// (P_0, e_0), (P_1, e_1), ... (P_n, e_n) calculates r = e_0 * P_0 + e_1 * P_1 + ... + e_n * P_n
// Length of points and scalars are expected to be equal, otherwise an error is returned.
// Result is assigned to point at first argument.
func (g *G1) MultiExp(r *PointG1, points []*PointG1, powers []*big.Int) (*PointG1, error) {
if len(points) != len(powers) {
return nil, errors.New("point and scalar vectors should be in same length")
}
var c uint32 = 3
if len(powers) >= 32 {
c = uint32(math.Ceil(math.Log10(float64(len(powers)))))
}
bucketSize, numBits := (1<<c)-1, uint32(g.Q().BitLen())
windows := make([]*PointG1, numBits/c+1)
bucket := make([]*PointG1, bucketSize)
acc, sum := g.New(), g.New()
for i := 0; i < bucketSize; i++ {
bucket[i] = g.New()
}
mask := (uint64(1) << c) - 1
j := 0
var cur uint32
for cur <= numBits {
acc.Zero()
bucket = make([]*PointG1, (1<<c)-1)
for i := 0; i < len(bucket); i++ {
bucket[i] = g.New()
}
for i := 0; i < len(powers); i++ {
s0 := powers[i].Uint64()
index := uint(s0 & mask)
if index != 0 {
g.Add(bucket[index-1], bucket[index-1], points[i])
}
powers[i] = new(big.Int).Rsh(powers[i], uint(c))
}
sum.Zero()
for i := len(bucket) - 1; i >= 0; i-- {
g.Add(sum, sum, bucket[i])
g.Add(acc, acc, sum)
}
windows[j] = g.New()
windows[j].Set(acc)
j++
cur += c
}
acc.Zero()
for i := len(windows) - 1; i >= 0; i-- {
for j := uint32(0); j < c; j++ {
g.Double(acc, acc)
}
g.Add(acc, acc, windows[i])
}
return r.Set(acc), nil
}
// MapToCurve given a byte slice returns a valid G1 point.
// This mapping function implements the Simplified Shallue-van de Woestijne-Ulas method.
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06
// Input byte slice should be a valid field element, otherwise an error is returned.
func (g *G1) MapToCurve(in []byte) (*PointG1, error) {
u, err := fromBytes(in)
if err != nil {
return nil, err
}
x, y := swuMapG1(u)
isogenyMapG1(x, y)
one := new(fe).one()
p := &PointG1{*x, *y, *one}
g.ClearCofactor(p)
return g.Affine(p), nil
} | plugin/dapp/evm/executor/vm/common/crypto/bls12381/g1.go | 0.797399 | 0.535159 | g1.go | starcoder |
package randxdr
import (
"math"
"regexp"
"strings"
goxdr "github.com/xdrpp/goxdr/xdr"
)
// Selector is function used to match fields of a goxdr.XdrType
type Selector func(string, goxdr.XdrType) bool
// Setter is a function used to set field values for a goxdr.XdrType
type Setter func(*randMarshaller, string, goxdr.XdrType)
// Preset can be used to restrict values for specific fields of a goxdr.XdrType.
type Preset struct {
Selector Selector
Setter Setter
}
// FieldEquals returns a Selector which matches on a field name by equality
func FieldEquals(toMatch string) Selector {
return func(name string, xdrType goxdr.XdrType) bool {
return name == toMatch
}
}
// FieldMatches returns a Selector which matches on a field name by regexp
func FieldMatches(r *regexp.Regexp) Selector {
return func(name string, xdrType goxdr.XdrType) bool {
return r.MatchString(name)
}
}
// IsPtr is a Selector which matches on all XDR pointer fields
var IsPtr Selector = func(name string, xdrType goxdr.XdrType) bool {
_, ok := goxdr.XdrBaseType(xdrType).(goxdr.XdrPtr)
return ok
}
// IsNestedInnerSet is a Selector which identifies nesting for the following xdr type:
// struct SCPQuorumSet
// {
// uint32 threshold;
// PublicKey validators<>;
// SCPQuorumSet innerSets<>;
// };
// supports things like: A,B,C,(D,E,F),(G,H,(I,J,K,L))
// only allows 2 levels of nesting
var IsNestedInnerSet Selector = func(name string, xdrType goxdr.XdrType) bool {
if strings.HasSuffix(name, ".innerSets") && strings.Count(name, ".innerSets[") > 0 {
_, ok := goxdr.XdrBaseType(xdrType).(goxdr.XdrVec)
return ok
}
return false
}
// SetPtrToPresent is a Setter which ensures that a given XDR pointer field is not nil
var SetPtrToPresent Setter = func(m *randMarshaller, name string, xdrType goxdr.XdrType) {
p := goxdr.XdrBaseType(xdrType).(goxdr.XdrPtr)
p.SetPresent(true)
p.XdrMarshalValue(m, name)
}
// SetVecLen returns a Setter which sets the length of a variable length
// array ( https://tools.ietf.org/html/rfc4506#section-4.13 ) to a fixed value
func SetVecLen(vecLen uint32) Setter {
return func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
v := goxdr.XdrBaseType(xdrType).(goxdr.XdrVec)
v.SetVecLen(vecLen)
v.XdrMarshalN(x, field, vecLen)
}
}
// SetU32 returns a Setter which sets a uint32 XDR field to a fixed value
func SetU32(val uint32) Setter {
return func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum32)
f.SetU32(val)
}
}
// SetPositiveNum64 returns a Setter which sets a uint64 XDR field to a random positive value
func SetPositiveNum64() Setter {
return func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum64)
f.SetU64(uint64(x.rand.Int63n(math.MaxInt64)))
}
}
// SetPositiveNum32 returns a Setter which sets a uint32 XDR field to a random positive value
func SetPositiveNum32() Setter {
return func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum32)
f.SetU32(uint32(x.rand.Int31n(math.MaxInt32)))
}
} | randxdr/presets.go | 0.604983 | 0.727951 | presets.go | starcoder |
package decoder
import (
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"reflect"
)
func NewLayerDecoder(decodingLayers ...gopacket.DecodingLayer) *LayerDecoder {
ld := &LayerDecoder{
DecodingLayerMap: make(map[gopacket.LayerType]gopacket.DecodingLayer),
}
for _, dl := range decodingLayers {
ld.PutDecodingLayer(dl)
}
ld.df = ld
return ld
}
type LayerDecoder struct {
DecodingLayerMap map[gopacket.LayerType]gopacket.DecodingLayer
df gopacket.DecodeFeedback
Truncated bool
}
func (ld *LayerDecoder) SetTruncated() {
ld.Truncated = true
}
func (ld *LayerDecoder) PutDecodingLayer(d gopacket.DecodingLayer) {
for _, layerType := range d.CanDecode().LayerTypes() {
ld.DecodingLayerMap[layerType] = d
}
}
func (ld *LayerDecoder) GetDecodingLayerByType(layerType gopacket.LayerType) (gopacket.DecodingLayer, bool) {
d, ok := ld.DecodingLayerMap[layerType]
return d, ok
}
func (ld *LayerDecoder) GetFirstLayerType(linkType layers.LinkType) gopacket.LayerType {
for k, _ := range ld.DecodingLayerMap {
f1 := layers.LinkTypeMetadata[linkType].DecodeWith
f2 := gopacket.DecodersByLayerName[k.String()]
if reflect.ValueOf(f1) == reflect.ValueOf(f2) {
return k
}
}
return gopacket.LayerTypeZero
}
func (ld *LayerDecoder) DecodeLayers(data []byte, firstLayer gopacket.LayerType, decoded *[]gopacket.LayerType) error {
ld.Truncated = false
layerType, err := ld.Decoder(data, firstLayer, decoded)
if layerType != gopacket.LayerTypeZero {
return gopacket.UnsupportedLayerType(layerType)
}
return err
}
func (ld *LayerDecoder) Decoder(data []byte, firstLayer gopacket.LayerType, decoded *[]gopacket.LayerType) (gopacket.LayerType, error) {
*decoded = (*decoded)[:0]
layerType := firstLayer
decoder, ok := ld.GetDecodingLayerByType(firstLayer)
if !ok {
return firstLayer, nil
}
for {
err := decoder.DecodeFromBytes(data, ld.df)
if err != nil {
return gopacket.LayerTypeZero, err
}
*decoded = append(*decoded, layerType)
nextLayerType := decoder.NextLayerType()
// By default, IPv4 layer will decode fragmented packet to Segment layer.
// To statistic fragmented packet, the first IPv4 layer payload will be decoded
if layerType == layers.LayerTypeIPv4 {
ipv4DecodingLayer, _ := ld.GetDecodingLayerByType(layers.LayerTypeIPv4)
ipv4Layer := ipv4DecodingLayer.(*layers.IPv4)
if ipv4Layer.Flags&layers.IPv4MoreFragments == 1 && ipv4Layer.FragOffset == 0 {
nextLayerType = ipv4Layer.Protocol.LayerType()
}
}
layerType = nextLayerType
data = decoder.LayerPayload()
if len(data) == 0 {
break
}
decoder, ok = ld.GetDecodingLayerByType(layerType)
if !ok {
return layerType, nil
}
}
return gopacket.LayerTypeZero, nil
} | engine/decoder/decoder.go | 0.735547 | 0.421611 | decoder.go | starcoder |
package shamir
// Package secret implements Shamir secret sharing over finite fields and secret sharing over the integers for integers.
// In addition, facilities are offered to perform computations on shares of secrets.
import (
"crypto/rand"
"errors"
"math/big"
)
var (
ErrorNoShares = errors.New("Empty share slice given")
ErrorTooFewShares = errors.New("Too few shares given")
ErrorIncompatibleShares = errors.New("Attempted to combine shares with different parameters")
ErrorFractionalSecret = errors.New("Reconstruction of the secret failed")
)
// A Share is a share of a secret. If FieldSize == nil, it is a share over the integers, otherwise
// it is a Shamir secret share over a finite field.
type Share struct {
FieldSize *big.Int
Factor *big.Int
Degree int
X int
Y *big.Int
}
// ShareFiniteField shares a secret over a finite field of integers modulo fieldSize.
// The caller must ensure that fieldSize is prime.
// It produces a configurable number of shares using a polynomial of given degree. Note that
// degree+1 shares are required for reconstruction of the secret.
func ShareFiniteField(secret *big.Int, fieldSize *big.Int, degree int, nShares int) []Share {
coefficients := make([]*big.Int, degree)
for i := range coefficients {
coefficients[i], _ = rand.Int(rand.Reader, fieldSize)
}
shares := make([]Share, nShares)
for i := range shares {
shares[i].FieldSize = fieldSize
shares[i].Degree = degree
shares[i].X = i + 1
shares[i].Y = big.NewInt(0).Set(secret)
// compute f(i) == secret + sum(j) coeff[j] i^(j+1)
for j := range coefficients {
term := big.NewInt(int64(i + 1))
term.Exp(term, big.NewInt(int64(j+1)), nil)
term.Mul(term, coefficients[j])
shares[i].Y.Add(shares[i].Y, term)
}
shares[i].Y.Mod(shares[i].Y, fieldSize)
}
return shares
}
// ShareIntegers shares a secret over the integers. It requires a known upper bound on the secret
// and will provide statSecParam bits of statistical security.
// It produces a configurable number of shares using a polynomial of given degree. Note that
// degree+1 shares are required for reconstruction of the secret.
func ShareIntegers(secret *big.Int, secretUpperBound *big.Int, statSecParam int, degree int, nShares int) []Share {
coefficientUpperBound := big.NewInt(2)
coefficientUpperBound.
Exp(coefficientUpperBound, big.NewInt(int64(statSecParam)), nil).
Mul(coefficientUpperBound, big.NewInt(int64(nShares*nShares))).
Mul(coefficientUpperBound, secretUpperBound)
coefficients := make([]*big.Int, degree)
for i := range coefficients {
coefficients[i], _ = rand.Int(rand.Reader, coefficientUpperBound)
}
shares := make([]Share, nShares)
nFactorial := factorial(int64(nShares))
secret = big.NewInt(0).Mul(secret, nFactorial)
for i := range shares {
shares[i].Degree = degree
shares[i].Factor = nFactorial
shares[i].X = i + 1
shares[i].Y = big.NewInt(0).Set(secret)
// compute f(i) == secret + sum(j) coeff[j] i^(j+1)
for j := range coefficients {
term := big.NewInt(int64(i + 1))
term.Exp(term, big.NewInt(int64(j+1)), nil)
term.Mul(term, coefficients[j])
shares[i].Y.Add(shares[i].Y, term)
}
}
return shares
}
// ShareCombine combines a set of shares of the same secret and recovers the secret.
// If too few shares are given, or the shares are incompatible, an error is returned instead.
func ShareCombine(shares []Share) (*big.Int, error) {
// Check that we have enough shares and that they're compatible
if len(shares) == 0 {
return nil, ErrorNoShares
}
if len(shares) <= shares[0].Degree {
return nil, ErrorTooFewShares
}
for i := 1; i != len(shares); i++ {
if !equalOrBothNil(shares[0].FieldSize, shares[i].FieldSize) || shares[0].Degree != shares[i].Degree {
return nil, ErrorIncompatibleShares
}
}
// Reconstruct the secret using en.wikipedia.org/wiki/Shamir's_Secret_Sharing#Computationally_efficient_approach
secret := big.NewRat(0, 1)
term := big.NewRat(0, 1)
for i := 0; i <= shares[0].Degree; i++ {
term.SetInt(shares[i].Y)
for j := 0; j <= shares[0].Degree; j++ {
if i == j {
continue
}
term.Mul(term, big.NewRat(int64(shares[j].X), int64(shares[j].X-shares[i].X)))
}
secret.Add(secret, term)
}
if shares[0].FieldSize != nil {
// Rationals auto-normalize, but can't take into account the inversion rules in
// a finite field. We have to do this manually.
return big.NewInt(0).Mod(secret.Num().Mul(
secret.Num(),
secret.Denom().ModInverse(secret.Denom(), shares[0].FieldSize),
), shares[0].FieldSize), nil
} else {
// If incompatible shares were used, this will result in a non-integer
if !secret.IsInt() {
return nil, ErrorFractionalSecret
}
// Rationals auto-normalize, so if it's integer, we can just use the numerator
return big.NewInt(0).Div(secret.Num(), shares[0].Factor), nil
}
}
// ShareAdd adds shares of two secrets to produce a share of the sum of the secrets.
// It requires a set of shares with equal X values, degrees, and field sizes.
func ShareAdd(shares []Share) (Share, error) {
if len(shares) == 0 {
return Share{}, ErrorNoShares
}
sum := Share{
FieldSize: shares[0].FieldSize,
Degree: shares[0].Degree,
Factor: shares[0].Factor,
X: shares[0].X,
Y: big.NewInt(0).Set(shares[0].Y),
}
for i := 1; i != len(shares); i++ {
if !equalOrBothNil(shares[0].FieldSize, shares[i].FieldSize) || shares[0].Degree != shares[i].Degree || shares[0].X != shares[i].X {
return Share{}, ErrorIncompatibleShares
}
sum.Y.Add(sum.Y, shares[i].Y)
if sum.FieldSize != nil {
sum.Y.Mod(sum.Y, sum.FieldSize)
}
}
return sum, nil
}
// ShareMul multiplies shares of two secrets to produce a share of the product of the secrets.
// It requires a set of shares with equal X values, degrees, and field sizes.
// Note that the degree of the product is the sum of the degrees of the factors.
func ShareMul(shares []Share) (Share, error) {
if len(shares) == 0 {
return Share{}, ErrorNoShares
}
sum := Share{
FieldSize: shares[0].FieldSize,
Degree: shares[0].Degree,
X: shares[0].X,
Y: big.NewInt(0).Set(shares[0].Y),
}
if shares[0].Factor != nil {
sum.Factor = big.NewInt(0).Set(shares[0].Factor)
}
for i := 1; i != len(shares); i++ {
if !equalOrBothNil(shares[0].FieldSize, shares[i].FieldSize) || shares[0].Degree != shares[i].Degree || shares[0].X != shares[i].X {
return Share{}, ErrorIncompatibleShares
}
sum.Y.Mul(sum.Y, shares[i].Y)
if sum.FieldSize != nil {
sum.Y.Mod(sum.Y, sum.FieldSize)
}
sum.Degree += shares[i].Degree
if sum.Factor != nil {
sum.Factor.Mul(sum.Factor, shares[i].Factor)
}
}
return sum, nil
}
func equalOrBothNil(a, b *big.Int) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
return a.Cmp(b) == 0
}
func factorial(n int64) *big.Int {
return big.NewInt(0).MulRange(1, n)
} | secretsharing.go | 0.832169 | 0.491212 | secretsharing.go | starcoder |
package day19
import (
"math/rand"
"aoc/internal/geo3d"
)
type Scanner struct {
Position geo3d.Pos
Beacons []geo3d.Pos
rotations []*Scanner
}
func NewScanner(position geo3d.Pos, beacons ...geo3d.Pos) *Scanner {
v := &Scanner{
Position: position,
Beacons: make([]geo3d.Pos, 0, 32),
}
v.Add(beacons...)
return v
}
func (c *Scanner) Add(beacons ...geo3d.Pos) {
c.Beacons = append(c.Beacons, beacons...)
}
func (c *Scanner) IsInRange(p geo3d.Pos) bool {
if p.X < c.Position.X-1000 ||
p.Y < c.Position.Y-1000 ||
p.Z < c.Position.Z-1000 {
return false
}
if p.X > c.Position.X+1000 ||
p.Y > c.Position.Y+1000 ||
p.Z > c.Position.Z+1000 {
return false
}
return true
}
func (c *Scanner) HasBeacon(p geo3d.Pos) bool {
for _, b := range c.Beacons {
if b == p {
return true
}
}
return false
}
func (c *Scanner) Transform(t geo3d.Transform) *Scanner {
v := NewScanner(c.Position.Transform(t))
for _, obj := range c.Beacons {
v.Add(obj.Transform(t))
}
return v
}
func (c *Scanner) Rotations() []*Scanner {
if c.rotations != nil {
return c.rotations
}
c.rotations = make([]*Scanner, len(geo3d.Rotations))
for i, t := range geo3d.Rotations {
c.rotations[i] = c.Transform(t)
}
return c.rotations
}
func (c *Scanner) Move(offset geo3d.Pos) *Scanner {
v := NewScanner(c.Position.Add(offset))
for _, obj := range c.Beacons {
v.Add(obj.Add(offset))
}
return v
}
type Field struct {
Scanners []*Scanner
Beacons map[geo3d.Pos]struct{}
}
func NewField(scanners ...*Scanner) *Field {
v := &Field{
Scanners: make([]*Scanner, 0, 64),
Beacons: make(map[geo3d.Pos]struct{}, 4096),
}
v.Add(scanners...)
return v
}
func (c *Field) Add(scanners ...*Scanner) {
c.Scanners = append(c.Scanners, scanners...)
for _, s := range scanners {
for _, obj := range s.Beacons {
c.Beacons[obj] = struct{}{}
}
}
}
func (c *Field) HasBeacon(beacon geo3d.Pos) bool {
_, ok := c.Beacons[beacon]
return ok
}
// CanFit returns true if s has at least 12 beacons shared with the field and no
// conflicting beacons.
func (c *Field) CanFit(s *Scanner) bool {
n := 0
for _, beacon := range s.Beacons {
if c.HasBeacon(beacon) {
n++
if n == 12 {
break
}
}
}
if n < 12 {
return false
}
for beacon := range c.Beacons {
if !s.IsInRange(beacon) {
continue
}
if !s.HasBeacon(beacon) {
return false
}
}
return true
}
func (c *Field) MaxManhattan() int {
max := 0
for i := 0; i < len(c.Scanners)-1; i++ {
for j := 1; j < len(c.Scanners); j++ {
n := c.Scanners[i].Position.Sub(c.Scanners[j].Position).Manhattan()
if n > max {
max = n
}
}
}
return max
}
func Merge(scanners ...*Scanner) (field *Field, ok bool) {
field = NewField()
if len(scanners) == 0 {
ok = true
return
}
field.Add(scanners[0])
scanners[0] = nil
merges, needMerges := 1, len(scanners)
for merges < needMerges {
for i, scanner := range scanners {
if scanner == nil {
continue
}
r := tryRotations(field, scanner)
if r == nil {
continue
}
field.Add(r)
scanners[i] = nil
merges++
}
}
ok = merges == needMerges
return
}
func tryRotations(field *Field, s *Scanner) *Scanner {
rotations := s.Rotations()
for b := range field.Beacons {
for _, r := range rotations {
beacon := r.Beacons[rand.Int()%len(r.Beacons)]
offset := b.Sub(beacon)
m := r.Move(offset)
if field.CanFit(m) {
return m
}
}
}
return nil
} | go/2021/day19/day19.go | 0.604282 | 0.499146 | day19.go | starcoder |
package com
import (
"errors"
"math"
"reflect"
)
// PowInt is int type of math.Pow function.
func PowInt(x int, y int) int {
if y <= 0 {
return 1
} else {
if y%2 == 0 {
sqrt := PowInt(x, y/2)
return sqrt * sqrt
} else {
return PowInt(x, y-1) * x
}
}
}
// Round return float64 to the nearest whole number
func Round(val float64) (newVal float64) {
_, div := math.Modf(val)
if div >= 0.0 {
if div >= 0.5 {
return math.Ceil(val)
}
return math.Floor(val)
}
if div >= -0.5 {
return math.Ceil(val)
}
return math.Floor(val)
}
// RoundFloat Return rounded version of x with prec precision.
func RoundFloat(val float64, places int) float64 {
var round float64
pow := math.Pow(10, float64(places))
digit := pow * val
_, div := math.Modf(digit)
if div >= 0.5 {
round = math.Ceil(digit)
} else {
round = math.Floor(digit)
}
return round / pow
}
// DoArithmetic performs arithmetic operations (+,-,*,/) using reflection to
// determine the type of the two terms.
func DoArithmetic(a, b interface{}, op rune) (interface{}, error) {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
var ai, bi int64
var af, bf float64
var au, bu uint64
switch av.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
ai = av.Int()
switch bv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
bi = bv.Int()
case reflect.Float32, reflect.Float64:
af = float64(ai) // may overflow
ai = 0
bf = bv.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
bu = bv.Uint()
if ai >= 0 {
au = uint64(ai)
ai = 0
} else {
bi = int64(bu) // may overflow
bu = 0
}
default:
return nil, errors.New("Can't apply the operator to the values")
}
case reflect.Float32, reflect.Float64:
af = av.Float()
switch bv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
bf = float64(bv.Int()) // may overflow
case reflect.Float32, reflect.Float64:
bf = bv.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
bf = float64(bv.Uint()) // may overflow
default:
return nil, errors.New("Can't apply the operator to the values")
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
au = av.Uint()
switch bv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
bi = bv.Int()
if bi >= 0 {
bu = uint64(bi)
bi = 0
} else {
ai = int64(au) // may overflow
au = 0
}
case reflect.Float32, reflect.Float64:
af = float64(au) // may overflow
au = 0
bf = bv.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
bu = bv.Uint()
default:
return nil, errors.New("Can't apply the operator to the values")
}
case reflect.String:
as := av.String()
if bv.Kind() == reflect.String && op == '+' {
bs := bv.String()
return as + bs, nil
}
return nil, errors.New("Can't apply the operator to the values")
default:
return nil, errors.New("Can't apply the operator to the values")
}
switch op {
case '+':
if ai != 0 || bi != 0 {
return ai + bi, nil
} else if af != 0 || bf != 0 {
return af + bf, nil
} else if au != 0 || bu != 0 {
return au + bu, nil
}
return 0, nil
case '-':
if ai != 0 || bi != 0 {
return ai - bi, nil
} else if af != 0 || bf != 0 {
return af - bf, nil
} else if au != 0 || bu != 0 {
return au - bu, nil
}
return 0, nil
case '*':
if ai != 0 || bi != 0 {
return ai * bi, nil
} else if af != 0 || bf != 0 {
return af * bf, nil
} else if au != 0 || bu != 0 {
return au * bu, nil
}
return 0, nil
case '/':
if bi != 0 {
return ai / bi, nil
} else if bf != 0 {
return af / bf, nil
} else if bu != 0 {
return au / bu, nil
}
return nil, errors.New("Can't divide the value by 0")
default:
return nil, errors.New("There is no such an operation")
}
} | math.go | 0.673299 | 0.468547 | math.go | starcoder |
package binary_search
import (
"fmt"
)
type IBST interface {
Insert(data int)
Delete(data int)
Find(data int) (currNode *node, parentNode *node)
}
type ITraverser interface {
Size()
InorderDFS()
PreorderDFS()
PostorderDFS()
BFS()
}
type node struct {
data int
left *node
right *node
}
type bst struct {
root *node
}
// Insert adds an item to the Binary Search Tree
func (b *bst) Insert(data int) {
if b.root == nil {
node := &node{data: data}
b.root = node
return
}
insert(b.root, nil, data)
}
func (b *bst) Find(data int) (currNode *node, parentNode *node) {
return find(b.root, nil, data)
}
func find(currNode *node, parentNode *node, data int) (node *node, parent *node) {
if currNode == nil {
return
}
if currNode.data > data {
node, parent = find(currNode.left, currNode, data)
} else if currNode.data < data {
node, parent = find(currNode.right, currNode, data)
} else {
node = currNode
parent = parentNode
return node, parent
}
return node, parent
}
// Delete performs a delete operation on any given data.
func (b *bst) Delete(data int) {
node, parent := b.Find(data)
// leaf node
if node.left == nil && node.right == nil {
if parent.left.data == data {
parent.left = nil
}
if parent.right.data == data {
parent.right = nil
}
return
}
// one child
if node.left != nil && node.right == nil {
parent.left = node.left
}
if node.right != nil && node.left == nil {
parent.right = node.right
}
// two children
if node.left != nil && node.right != nil {
if parent.data >= data {
child := node.left
child.right = node.right
parent.left = child
} else {
child := node.left
child.right = node.right
parent.right = child
}
}
}
func insert(currNode *node, parent *node, data int) {
if currNode != nil {
if currNode.data >= data {
insert(currNode.left, currNode, data)
} else {
insert(currNode.right, currNode, data)
}
} else {
newNode := &node{data: data}
if parent.data >= data {
parent.left = newNode
} else {
parent.right = newNode
}
}
}
func (b *bst) InorderDFS() []*node {
if b.root == nil {
return nil
}
traversalList := make([]*node, 0)
inorderDFS(b.root, &traversalList)
nodePrint(traversalList)
return traversalList
}
func inorderDFS(node *node, traversalList *[]*node) {
if node == nil {
return
}
inorderDFS(node.left, traversalList)
*traversalList = append(*traversalList, node)
inorderDFS(node.right, traversalList)
}
func nodePrint(arr []*node) {
for i := range arr {
fmt.Printf("%#v, ", arr[i].data)
}
fmt.Println()
fmt.Println()
} | datastructures/trees/binary-search/binary-search-tree.go | 0.672762 | 0.519156 | binary-search-tree.go | starcoder |
package runtime
import "reflect"
func Add(left interface{}, right interface{}) interface{} {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) + typedRight
case float64:
return float64(typedLeft) + typedRight
default:
panic("can not add int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft + float64(typedRight)
case float64:
return typedLeft + typedRight
default:
panic("can not add float with " + reflect.TypeOf(right).String())
}
default:
panic("add does not support " + reflect.TypeOf(left).String())
}
}
func Subtract(left interface{}, right interface{}) interface{} {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) - typedRight
case float64:
return float64(typedLeft) - typedRight
default:
panic("can not subtract int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft - float64(typedRight)
case float64:
return typedLeft - typedRight
default:
panic("can not subtract float with " + reflect.TypeOf(right).String())
}
default:
panic("subtract does not support " + reflect.TypeOf(left).String())
}
}
func Multiply(left interface{}, right interface{}) interface{} {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) * typedRight
case float64:
return float64(typedLeft) * typedRight
default:
panic("can not multiply int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft * float64(typedRight)
case float64:
return typedLeft * typedRight
default:
panic("can not multiply float with " + reflect.TypeOf(right).String())
}
default:
panic("multiply does not support " + reflect.TypeOf(left).String())
}
}
func Divide(left interface{}, right interface{}) interface{} {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return float64(typedLeft) / float64(typedRight)
case float64:
return float64(typedLeft) / typedRight
default:
panic("can not divide int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft / float64(typedRight)
case float64:
return typedLeft / typedRight
default:
panic("can not divide float with " + reflect.TypeOf(right).String())
}
default:
panic("divide does not support " + reflect.TypeOf(left).String())
}
}
func NegativeOf(val interface{}) interface{} {
switch typedVal := val.(type) {
case int:
return -typedVal
case float64:
return -typedVal
default:
panic("negativeOf does not support " + reflect.TypeOf(val).String())
}
} | docstore/runtime/arithmetic.go | 0.713931 | 0.518059 | arithmetic.go | starcoder |
package vm
import (
"fmt"
"strings"
"github.com/dbaumgarten/yodk/pkg/number"
)
// VariableFromString tries to create a variable of the correct type from the given string.
// If the string is enclosed in quotes, the string between the quotes is used as string-value for the variable.
// Else, it tries to parse the given string into a number. If that also fails, the plain given string is used as value.
func VariableFromString(str string) *Variable {
var value interface{}
if strings.HasPrefix(str, "\"") && strings.HasSuffix(str, "\"") && len(str) >= 2 {
value = str[1 : len(str)-1]
} else {
deci, err := number.FromString(str)
if err == nil {
value = deci
} else {
value = str
}
}
return &Variable{
Value: value,
}
}
// VariableFromType creates a new variable from the given input. The type of the variable is decided by the input-type.
func VariableFromType(inp interface{}) (*Variable, error) {
var value interface{}
switch v := inp.(type) {
case string:
value = v
case *string:
value = *v
case int:
value = number.FromInt(v)
case int32:
value = number.FromInt(int(v))
case int64:
value = number.FromInt(int(v))
case float32:
value = number.FromFloat64(float64(v))
case float64:
value = number.FromFloat64(v)
default:
return nil, fmt.Errorf("Can not convert type %T to variable", inp)
}
return &Variable{
Value: value,
}, nil
}
// Variable represents a yolol-variable during the execution
type Variable struct {
Value interface{}
}
// IsNumber returns true if the variable represents a number
func (v *Variable) IsNumber() bool {
_, isNum := v.Value.(number.Number)
_, isNump := v.Value.(*number.Number)
return isNum || isNump
}
// IsString returns true if the variable represents a string
func (v *Variable) IsString() bool {
_, isStr := v.Value.(string)
_, isStrp := v.Value.(string)
return isStr || isStrp
}
// SameType returns true if the variable has the same type as the given variable
func (v *Variable) SameType(other *Variable) bool {
return v.IsNumber() == other.IsNumber()
}
// TypeName returns the name of the type this variable has
func (v *Variable) TypeName() string {
if v.IsString() {
return "string"
}
return "number"
}
// Equals checks if this variable equals another variable
func (v *Variable) Equals(other *Variable) bool {
if !v.SameType(other) {
return false
}
if v.IsString() {
return v.String() == other.String()
}
if v.IsNumber() {
return v.Number() == other.Number()
}
return false
}
func (v *Variable) String() string {
if val, isString := v.Value.(string); isString {
return val
}
return ""
}
// Repr returns the string-representation of the variable.
// If the variable is of type string, its value is enclosed in quotes.
func (v *Variable) Repr() string {
if v.IsNumber() {
return v.Itoa()
}
return "\"" + v.String() + "\""
}
// Itoa returns the string-representation of the number stored in the variable
func (v *Variable) Itoa() string {
if val, isNum := v.Value.(number.Number); isNum {
return val.String()
}
return ""
}
// Number returns the value of the variable as number
func (v *Variable) Number() number.Number {
if val, isNum := v.Value.(number.Number); isNum {
return val
}
return number.Zero
}
// Bool returns the truth-value of a variable
func (v *Variable) Bool() *Variable {
if v.IsString() {
return &Variable{
Value: number.Zero,
}
}
return v
} | pkg/vm/variable.go | 0.701611 | 0.432003 | variable.go | starcoder |
package ondatra
import (
opb "github.com/openconfig/ondatra/proto"
)
// ISIS is a representation of a IS-IS config on the ATE.
type ISIS struct {
pb *opb.ISISConfig
}
// IPReachabilityConfig is the IS-IS config for a simulated network pool.
type IPReachabilityConfig struct {
pb *opb.IPReachability
}
// ISReachabilityConfig is a representation of the simulated topology of IS-IS nodes.
type ISReachabilityConfig struct {
pb *opb.ISReachability
}
// WithName assigns a name to the IS-IS reachability config.
// It should be unique among all reachability configs on the interface.
func (i *ISReachabilityConfig) WithName(name string) *ISReachabilityConfig {
i.pb.Name = name
return i
}
// ISISNode is a representation of a simulated IS-IS node.
type ISISNode struct {
pb *opb.ISReachability_Node
}
// ISISNodeLink is a representation of a simulated IS-IS node link.
type ISISNodeLink struct {
pb *opb.ISReachability_Node_Link
}
// ISISRoutes represents the routes exported by an IS-IS node.
type ISISRoutes struct {
pb *opb.ISReachability_Node_Routes
}
// WithLevelL1 sets the IS-IS level to L1.
func (i *ISIS) WithLevelL1() *ISIS {
i.pb.Level = opb.ISISConfig_L1
return i
}
// WithLevelL2 sets the IS-IS level to L2.
func (i *ISIS) WithLevelL2() *ISIS {
i.pb.Level = opb.ISISConfig_L2
return i
}
// WithNetworkTypeBroadcast sets the IS-IS network type to broadcast.
func (i *ISIS) WithNetworkTypeBroadcast() *ISIS {
i.pb.NetworkType = opb.ISISConfig_BROADCAST
return i
}
// WithNetworkTypePointToPoint sets the IS-IS network type to point-to-point.
func (i *ISIS) WithNetworkTypePointToPoint() *ISIS {
i.pb.NetworkType = opb.ISISConfig_POINT_TO_POINT
return i
}
// WithMetric sets the IS-IS link metric.
func (i *ISIS) WithMetric(metric uint32) *ISIS {
i.pb.Metric = metric
return i
}
// WithAreaID sets the area id for the device.
func (i *ISIS) WithAreaID(areaID string) *ISIS {
i.pb.AreaId = areaID
return i
}
// WithWideMetricEnabled sets whether the wide metric is enabled.
func (i *ISIS) WithWideMetricEnabled(enabled bool) *ISIS {
i.pb.EnableWideMetric = enabled
return i
}
// WithHelloPaddingEnabled sets whether hello padding is enabled.
func (i *ISIS) WithHelloPaddingEnabled(enabled bool) *ISIS {
i.pb.EnableHelloPadding = enabled
return i
}
// WithAuthMD5 sets md5 authentication.
func (i *ISIS) WithAuthMD5(key string) *ISIS {
i.pb.AuthType = opb.ISISConfig_MD5
i.pb.AuthKey = key
return i
}
// WithAuthPassword sets password authentication.
func (i *ISIS) WithAuthPassword(key string) *ISIS {
i.pb.AuthType = opb.ISISConfig_PASSWORD
i.pb.AuthKey = key
return i
}
// WithAuthDisabled disables authentication.
func (i *ISIS) WithAuthDisabled() *ISIS {
i.pb.AuthType = opb.ISISConfig_AUTH_TYPE_UNSPECIFIED
i.pb.AuthKey = ""
return i
}
// WithTEEnabled sets whether traffic engineering is enabled.
func (i *ISIS) WithTEEnabled(enabled bool) *ISIS {
i.pb.EnableTe = enabled
return i
}
// WithTERouterID sets the TE router id.
func (i *ISIS) WithTERouterID(routerID string) *ISIS {
i.pb.TeRouterId = routerID
return i
}
// WithCapabilityRouterID sets the ISIS capability router id.
func (i *ISIS) WithCapabilityRouterID(routerID string) *ISIS {
i.pb.CapabilityRouterId = routerID
return i
}
// WithLSPsDiscarded sets whether to discard learned LSP info.
func (i *ISIS) WithLSPsDiscarded(discard bool) *ISIS {
i.pb.DiscardLsps = discard
return i
}
// WithPriority sets the priority of the interface.
func (i *ISIS) WithPriority(priority uint32) *ISIS {
i.pb.InterfacePriority = priority
return i
}
// WithHelloInterval sets the interval in seconds between hello packets.
func (i *ISIS) WithHelloInterval(intervalSec uint32) *ISIS {
i.pb.HelloIntervalSec = intervalSec
return i
}
// WithDeadInterval sets the interval in seconds before considering that the adjacency is down.
func (i *ISIS) WithDeadInterval(intervalSec uint32) *ISIS {
i.pb.DeadIntervalSec = intervalSec
return i
}
// SegmentRouting creates or returns the ISIS Segment Routing configuration.
func (i *ISIS) SegmentRouting() *ISISSegmentRouting {
if i.pb.SegmentRouting == nil {
i.pb.SegmentRouting = &opb.ISISSegmentRouting{}
}
return &ISISSegmentRouting{pb: i.pb.SegmentRouting}
}
// AddISReachability adds an ISReachability config to the ISIS config.
func (i *ISIS) AddISReachability() *ISReachabilityConfig {
isR := &ISReachabilityConfig{pb: &opb.ISReachability{}}
i.pb.IsReachability = append(i.pb.IsReachability, isR.pb)
return isR
}
// ClearISReachabilities clears ISReachability configs from the ISIS config.
func (i *ISIS) ClearISReachabilities() *ISIS {
i.pb.IsReachability = nil
return i
}
// WithIPReachabilityInternal sets route origin as internal.
func (ip *IPReachabilityConfig) WithIPReachabilityInternal() *IPReachabilityConfig {
ip.pb.RouteOrigin = opb.IPReachability_INTERNAL
return ip
}
// WithIPReachabilityExternal sets route origin as external.
func (ip *IPReachabilityConfig) WithIPReachabilityExternal() *IPReachabilityConfig {
ip.pb.RouteOrigin = opb.IPReachability_EXTERNAL
return ip
}
// WithIPReachabilityMetric sets metric for the reachable IPs.
func (ip *IPReachabilityConfig) WithIPReachabilityMetric(metric uint32) *IPReachabilityConfig {
ip.pb.Metric = metric
return ip
}
// WithIPReachabilityAlgorithm sets SR algorithm for the reachable IPs.
func (ip *IPReachabilityConfig) WithIPReachabilityAlgorithm(algo uint32) *IPReachabilityConfig {
ip.pb.Algorithm = algo
return ip
}
// WithSIDIndexLabelEnabled enables or disables SID/Index/Label for the reachable IPs.
func (ip *IPReachabilityConfig) WithSIDIndexLabelEnabled(enabled bool) *IPReachabilityConfig {
ip.pb.EnableSidIndexLabel = enabled
return ip
}
// WithIPReachabilitySIDIndexLabel sets SID/Index/Label for the reachable IPs.
func (ip *IPReachabilityConfig) WithIPReachabilitySIDIndexLabel(label uint32) *IPReachabilityConfig {
ip.pb.SidIndexLabel = label
return ip
}
// WithFlagReadvertise sets the Readvertise(R) flag.
func (ip *IPReachabilityConfig) WithFlagReadvertise(enabled bool) *IPReachabilityConfig {
ip.pb.FlagReadvertise = enabled
return ip
}
// WithFlagNodeSID sets the NodeSID(N) flag.
func (ip *IPReachabilityConfig) WithFlagNodeSID(enabled bool) *IPReachabilityConfig {
ip.pb.FlagNodeSid = enabled
return ip
}
// WithFlagNoPHP sets the NoPHP(P) flag.
func (ip *IPReachabilityConfig) WithFlagNoPHP(enabled bool) *IPReachabilityConfig {
ip.pb.FlagNoPhp = enabled
return ip
}
// WithFlagExplicitNull sets the ExplicitNull(E) flag.
func (ip *IPReachabilityConfig) WithFlagExplicitNull(enabled bool) *IPReachabilityConfig {
ip.pb.FlagExplicitNull = enabled
return ip
}
// WithFlagValue sets the Value(V) flag.
func (ip *IPReachabilityConfig) WithFlagValue(enabled bool) *IPReachabilityConfig {
ip.pb.FlagValue = enabled
return ip
}
// WithFlagLocal sets the Local(L) flag.
func (ip *IPReachabilityConfig) WithFlagLocal(enabled bool) *IPReachabilityConfig {
ip.pb.FlagLocal = enabled
return ip
}
// AddISISNode adds a simulated IS-IS node with ingress/egress metrics defaulted to 10.
func (isR *ISReachabilityConfig) AddISISNode() *ISISNode {
node := &ISISNode{pb: &opb.ISReachability_Node{
EgressMetric: 10,
IngressMetric: 10,
}}
isR.pb.Nodes = append(isR.pb.Nodes, node.pb)
return node
}
// ClearISISNodes clears simulated IS-IS nodes.
func (isR *ISReachabilityConfig) ClearISISNodes() *ISReachabilityConfig {
isR.pb.Nodes = nil
return isR
}
// WithIngressMetric sets the metric on the ingress link.
func (node *ISISNode) WithIngressMetric(metric uint32) *ISISNode {
node.pb.IngressMetric = metric
return node
}
// WithEgressMetric sets the metric on the egress link.
func (node *ISISNode) WithEgressMetric(metric uint32) *ISISNode {
node.pb.EgressMetric = metric
return node
}
// WithSystemID sets the system id for the simulated node.
func (node *ISISNode) WithSystemID(id string) *ISISNode {
node.pb.SystemId = id
return node
}
// WithTEEnabled enables TE on the simulated IS-IS node.
func (node *ISISNode) WithTEEnabled(enabled bool) *ISISNode {
node.pb.EnableTe = enabled
return node
}
// WithWideMetricEnabled enables wide metric on the simulated IS-IS node.
func (node *ISISNode) WithWideMetricEnabled(enabled bool) *ISISNode {
node.pb.EnableWideMetric = enabled
return node
}
// WithTERouterID sets the TE router ID for the node.
func (node *ISISNode) WithTERouterID(id string) *ISISNode {
node.pb.TeRouterId = id
return node
}
// WithCapabilityRouterID sets the capability router ID for the node.
func (node *ISISNode) WithCapabilityRouterID(id string) *ISISNode {
node.pb.CapabilityRouterId = id
return node
}
// SegmentRouting creates or returns the ISIS Segment Routing configuration for the node.
func (node *ISISNode) SegmentRouting() *ISISSegmentRouting {
if node.pb.SegmentRouting == nil {
node.pb.SegmentRouting = &opb.ISISSegmentRouting{}
}
return &ISISSegmentRouting{pb: node.pb.SegmentRouting}
}
// ISISSegmentRouting holds the segment routing configuration.
type ISISSegmentRouting struct {
pb *opb.ISISSegmentRouting
}
// AdjacencySID holds the Adjacency SID configuration.
type AdjacencySID struct {
pb *opb.ISISSegmentRouting_AdjacencySID
}
// SIDRange holds the SR range configuration.
type SIDRange struct {
pb *opb.ISISSegmentRouting_SIDRange
}
// WithEnabled sets whether segment routing is enabled.
func (sr *ISISSegmentRouting) WithEnabled(enabled bool) *ISISSegmentRouting {
sr.pb.Enable = enabled
return sr
}
// AdjacencySID gets or creates an AdjacencySID configuration with Local(L) and Value(V) flags set.
func (sr *ISISSegmentRouting) AdjacencySID() *AdjacencySID {
if sr.pb.AdjacencySid == nil {
sr.pb.AdjacencySid = &opb.ISISSegmentRouting_AdjacencySID{FlagValue: true, FlagLocal: true}
}
return &AdjacencySID{pb: sr.pb.AdjacencySid}
}
// WithAdjacencySID sets SID for the adjacency.
func (as *AdjacencySID) WithAdjacencySID(sid string) *AdjacencySID {
as.pb.Sid = sid
return as
}
// WithFlagAddressFamily sets the AddressFamily(F) flag.
func (as *AdjacencySID) WithFlagAddressFamily(enabled bool) *AdjacencySID {
as.pb.OverrideFlagAddressFamily = true
as.pb.FlagAddressFamily = enabled
return as
}
// WithFlagBackup sets the Backup(B) flag.
func (as *AdjacencySID) WithFlagBackup(enabled bool) *AdjacencySID {
as.pb.FlagBackup = enabled
return as
}
// WithFlagValue sets the Value(V) flag. [Default = true]
func (as *AdjacencySID) WithFlagValue(enabled bool) *AdjacencySID {
as.pb.FlagValue = enabled
return as
}
// WithFlagLocal sets the Local(L) flag. [Default = true]
func (as *AdjacencySID) WithFlagLocal(enabled bool) *AdjacencySID {
as.pb.FlagLocal = enabled
return as
}
// WithFlagSet sets the Set(S) flag.
func (as *AdjacencySID) WithFlagSet(enabled bool) *AdjacencySID {
as.pb.FlagSet = enabled
return as
}
// WithFlagPersistent sets the Persistent(P) flag.
func (as *AdjacencySID) WithFlagPersistent(enabled bool) *AdjacencySID {
as.pb.FlagPersistent = enabled
return as
}
// WithSIDIndexLabel sets the SID index label.
func (sr *ISISSegmentRouting) WithSIDIndexLabel(label uint32) *ISISSegmentRouting {
sr.pb.SidIndexLabel = label
return sr
}
// WithFlagReadvertise sets the Readvertise(R) flag.
func (sr *ISISSegmentRouting) WithFlagReadvertise(enabled bool) *ISISSegmentRouting {
sr.pb.FlagReadvertise = enabled
return sr
}
// WithFlagNodeSID sets the NodeSID(N) flag.
func (sr *ISISSegmentRouting) WithFlagNodeSID(enabled bool) *ISISSegmentRouting {
sr.pb.FlagNodeSid = enabled
return sr
}
// WithFlagNoPHP sets the NoPHP(P) flag.
func (sr *ISISSegmentRouting) WithFlagNoPHP(enabled bool) *ISISSegmentRouting {
sr.pb.FlagNoPhp = enabled
return sr
}
// WithFlagExplicitNull sets the ExplicitNull(E) flag.
func (sr *ISISSegmentRouting) WithFlagExplicitNull(enabled bool) *ISISSegmentRouting {
sr.pb.FlagExplicitNull = enabled
return sr
}
// WithFlagValue sets the Value(V) flag.
func (sr *ISISSegmentRouting) WithFlagValue(enabled bool) *ISISSegmentRouting {
sr.pb.FlagValue = enabled
return sr
}
// WithFlagLocal sets the Local(L) flag.
func (sr *ISISSegmentRouting) WithFlagLocal(enabled bool) *ISISSegmentRouting {
sr.pb.FlagLocal = enabled
return sr
}
// WithAlgorithms sets the SR algorithms.
func (sr *ISISSegmentRouting) WithAlgorithms(algos ...uint32) *ISISSegmentRouting {
sr.pb.Algorithms = algos
return sr
}
// WithPrefixSID sets the prefix SID, specified in CIDR notation.
func (sr *ISISSegmentRouting) WithPrefixSID(sid string) *ISISSegmentRouting {
sr.pb.PrefixSid = sid
return sr
}
// AddSRGBRange adds a SRGB range.
func (sr *ISISSegmentRouting) AddSRGBRange() *SIDRange {
srr := &SIDRange{pb: &opb.ISISSegmentRouting_SIDRange{}}
sr.pb.SrgbRange = append(sr.pb.SrgbRange, srr.pb)
return srr
}
// ClearSRGBRanges clears SRGB ranges.
func (sr *ISISSegmentRouting) ClearSRGBRanges() *ISISSegmentRouting {
sr.pb.SrgbRange = nil
return sr
}
// AddSRLBRange adds a SRLB range.
func (sr *ISISSegmentRouting) AddSRLBRange() *SIDRange {
srr := &SIDRange{pb: &opb.ISISSegmentRouting_SIDRange{}}
sr.pb.SrlbRange = append(sr.pb.SrlbRange, srr.pb)
return srr
}
// ClearSRLBRanges clears SRLB ranges.
func (sr *ISISSegmentRouting) ClearSRLBRanges() *ISISSegmentRouting {
sr.pb.SrlbRange = nil
return sr
}
// WithSIDStartLabel sets the SID start label.
func (srr *SIDRange) WithSIDStartLabel(label uint32) *SIDRange {
srr.pb.SidStartLabel = label
return srr
}
// WithSIDCount sets the count of the SID labels.
func (srr *SIDRange) WithSIDCount(c uint32) *SIDRange {
srr.pb.SidCount = c
return srr
}
// AddLink adds a simulated IS-IS node link.
func (node *ISISNode) AddLink() *ISISNodeLink {
link := &ISISNodeLink{pb: &opb.ISReachability_Node_Link{}}
node.pb.Links = append(node.pb.Links, link.pb)
return link
}
// ClearLinks clears simulated links for an IS-IS node.
func (node *ISISNode) ClearLinks() *ISISNode {
node.pb.Links = nil
return node
}
// WithFromIPv4 sets the IPv4 'from' address for the link.
func (link *ISISNodeLink) WithFromIPv4(ip string) *ISISNodeLink {
link.pb.FromIpv4 = ip
return link
}
// WithToIPv4 sets the IPv4 'to' address for the link.
func (link *ISISNodeLink) WithToIPv4(ip string) *ISISNodeLink {
link.pb.ToIpv4 = ip
return link
}
// WithFromIPv6 sets the IPv6 'from' address for the link.
func (link *ISISNodeLink) WithFromIPv6(ip string) *ISISNodeLink {
link.pb.FromIpv6 = ip
return link
}
// WithToIPv6 sets the IPv6 'to' address for the link.
func (link *ISISNodeLink) WithToIPv6(ip string) *ISISNodeLink {
link.pb.ToIpv6 = ip
return link
}
// RoutesIPv4 creates or returns the ISIS IPv4 route configuration.
func (node *ISISNode) RoutesIPv4() *ISISRoutes {
if node.pb.RoutesIpv4 == nil {
node.pb.RoutesIpv4 = &opb.ISReachability_Node_Routes{}
}
return &ISISRoutes{pb: node.pb.RoutesIpv4}
}
// WithPrefix sets the (CIDR-string) prefix for the exported routes.
func (routes *ISISRoutes) WithPrefix(prefix string) *ISISRoutes {
routes.pb.Prefix = prefix
return routes
}
// WithNumRoutes sets the number of exported routes.
func (routes *ISISRoutes) WithNumRoutes(numRoutes uint64) *ISISRoutes {
routes.pb.NumRoutes = numRoutes
return routes
}
// IPReachability creates an IP reachability configuration for the network or
// returns the existing config. The default config params are:
// Route Origin: Internal
// Metric: 10
func (routes *ISISRoutes) IPReachability() *IPReachabilityConfig {
if routes.pb.Reachability == nil {
routes.pb.Reachability = &opb.IPReachability{Metric: 10, RouteOrigin: opb.IPReachability_INTERNAL}
}
return &IPReachabilityConfig{pb: routes.pb.Reachability}
} | isis.go | 0.720762 | 0.572902 | isis.go | starcoder |
package venom
import (
"reflect"
"strconv"
"strings"
"time"
"github.com/mitchellh/mapstructure"
)
func stringToTimeDurationHookFunc() mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Duration(5)) {
return data, nil
}
return time.ParseDuration(data.(string))
}
}
func stringToTimeHookFunc(layout string) mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Time{}) {
return data, nil
}
// Convert it by parsing
return time.Parse(layout, data.(string))
}
}
func stringToStringSliceHookFunc(sep string) mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || (t != reflect.TypeOf([]string{})) {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
func stringToBoolSliceHookFunc(sep string) mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || (t != reflect.TypeOf([]bool{})) {
return data, nil
}
raw := data.(string)
raw = strings.TrimPrefix(raw, "[")
raw = strings.TrimSuffix(raw, "]")
vals := make([]bool, 0)
if raw == "" {
return vals, nil
}
for _, s := range strings.Split(raw, sep) {
v, err := strconv.ParseBool(s)
if err != nil {
return nil, err
}
vals = append(vals, v)
}
return vals, nil
}
}
func stringToIntSliceHookFunc(sep string) mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || (t != reflect.TypeOf([]int{})) {
return data, nil
}
raw := data.(string)
raw = strings.TrimPrefix(raw, "[")
raw = strings.TrimSuffix(raw, "]")
vals := make([]int, 0)
if raw == "" {
return vals, nil
}
for _, s := range strings.Split(raw, sep) {
v, err := strconv.ParseInt(s, 10, 0)
if err != nil {
return nil, err
}
vals = append(vals, int(v))
}
return vals, nil
}
}
func stringToUintSliceHookFunc(sep string) mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || (t != reflect.TypeOf([]uint{})) {
return data, nil
}
raw := data.(string)
raw = strings.TrimPrefix(raw, "[")
raw = strings.TrimSuffix(raw, "]")
vals := make([]uint, 0)
if raw == "" {
return vals, nil
}
for _, s := range strings.Split(raw, sep) {
v, err := strconv.ParseUint(s, 10, 0)
if err != nil {
return nil, err
}
vals = append(vals, uint(v))
}
return vals, nil
}
}
func stringToMapStringStringHookFunc(sep, kvsep string) mapstructure.DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || (t != reflect.TypeOf(map[string]string{})) {
return data, nil
}
raw := data.(string)
raw = strings.TrimPrefix(raw, "{")
raw = strings.TrimSuffix(raw, "}")
return parseMapStringString(raw, sep, kvsep)
}
} | decode_hooks.go | 0.552298 | 0.408867 | decode_hooks.go | starcoder |
package set
import (
"github.com/pingcap/tidb/util/hack"
)
// StringSetWithMemoryUsage is a string set with memory usage.
type StringSetWithMemoryUsage struct {
StringSet
bInMap int64
}
// NewStringSetWithMemoryUsage builds a string set.
func NewStringSetWithMemoryUsage(ss ...string) (setWithMemoryUsage StringSetWithMemoryUsage, memDelta int64) {
set := make(StringSet, len(ss))
setWithMemoryUsage = StringSetWithMemoryUsage{
StringSet: set,
bInMap: 0,
}
memDelta = hack.DefBucketMemoryUsageForSetString * (1 << setWithMemoryUsage.bInMap)
for _, s := range ss {
memDelta += setWithMemoryUsage.Insert(s)
}
return setWithMemoryUsage, memDelta
}
// Insert inserts `val` into `s` and return memDelta.
func (s *StringSetWithMemoryUsage) Insert(val string) (memDelta int64) {
s.StringSet.Insert(val)
if s.Count() > (1<<s.bInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
memDelta = hack.DefBucketMemoryUsageForSetString * (1 << s.bInMap)
s.bInMap++
}
return memDelta
}
// Float64SetWithMemoryUsage is a float64 set with memory usage.
type Float64SetWithMemoryUsage struct {
Float64Set
bInMap int64
}
// NewFloat64SetWithMemoryUsage builds a float64 set.
func NewFloat64SetWithMemoryUsage(ss ...float64) (setWithMemoryUsage Float64SetWithMemoryUsage, memDelta int64) {
set := make(Float64Set, len(ss))
setWithMemoryUsage = Float64SetWithMemoryUsage{
Float64Set: set,
bInMap: 0,
}
memDelta = hack.DefBucketMemoryUsageForSetFloat64 * (1 << setWithMemoryUsage.bInMap)
for _, s := range ss {
memDelta += setWithMemoryUsage.Insert(s)
}
return setWithMemoryUsage, memDelta
}
// Insert inserts `val` into `s` and return memDelta.
func (s *Float64SetWithMemoryUsage) Insert(val float64) (memDelta int64) {
s.Float64Set.Insert(val)
if s.Count() > (1<<s.bInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
memDelta = hack.DefBucketMemoryUsageForSetFloat64 * (1 << s.bInMap)
s.bInMap++
}
return memDelta
}
// Int64SetWithMemoryUsage is a int set with memory usage.
type Int64SetWithMemoryUsage struct {
Int64Set
bInMap int64
}
// NewInt64SetWithMemoryUsage builds an int64 set.
func NewInt64SetWithMemoryUsage(ss ...int64) (setWithMemoryUsage Int64SetWithMemoryUsage, memDelta int64) {
set := make(Int64Set, len(ss))
setWithMemoryUsage = Int64SetWithMemoryUsage{
Int64Set: set,
bInMap: 0,
}
memDelta = hack.DefBucketMemoryUsageForSetInt64 * (1 << setWithMemoryUsage.bInMap)
for _, s := range ss {
memDelta += setWithMemoryUsage.Insert(s)
}
return setWithMemoryUsage, memDelta
}
// Insert inserts `val` into `s` and return memDelta.
func (s *Int64SetWithMemoryUsage) Insert(val int64) (memDelta int64) {
s.Int64Set.Insert(val)
if s.Count() > (1<<s.bInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
memDelta = hack.DefBucketMemoryUsageForSetInt64 * (1 << s.bInMap)
s.bInMap++
}
return memDelta
} | util/set/set_with_memory_usage.go | 0.571049 | 0.452778 | set_with_memory_usage.go | starcoder |
package radixtree
import (
"fmt"
"strings"
)
type node struct {
value interface{} // value of the node or nil if there is no value
next []*node // array of pointers to the next node
}
const R = 256 // extended ASCII
// Creates a new node structure, initializing the next array
func createNode() *node {
n := node{next: make([]*node, R)}
return &n
}
type RadixTree struct {
root *node // root of trie
n int // number of keys in trie
}
// Returns the value associated with the given key if the radix tree contains the key or nil.
func (r *RadixTree) Get(key string) interface{} {
x := get(r.root, key, 0)
return x
}
// Returns a boolean indicating if the radix tree contains the given key.
func (r *RadixTree) Contains(key string) bool {
return r.Get(key) != nil
}
func get(x *node, key string, d int) *node {
if d == len(key) {
return x
}
c := key[d]
return get(x.next[c], key, d+1)
}
// Adds the given key and value to the radix tree, overwriting the old value with the new value if the radix tree already contains the key.
func (r *RadixTree) Put(key string, value interface{}) {
if value == nil {
r.Delete(key)
}
r.root = r.put(r.root, key, value, 0)
}
func (r *RadixTree) put(x *node, key string, value interface{}, d int) *node {
if x == nil {
x = createNode()
}
if d == len(key) {
if x.value == nil {
r.n++
}
x.value = value
return x
}
c := key[d]
x.next[c] = r.put(x.next[c], key, value, d+1)
return x
}
// Returns the number of key-value pairs of the radix tree.
func (r *RadixTree) Size() int {
return r.n
}
// Returns a boolean indicating if the radix tree is empty
func (r *RadixTree) IsEmpty() bool {
return r.Size() == 0
}
// Removes the key from the radix tree if the key is present.
func (r *RadixTree) Delete(key string) {
r.root = r.delete(r.root, key, 0)
}
func (r *RadixTree) delete(x *node, key string, d int) *node {
if x == nil {
return nil
}
if d == len(key) {
if x.value != nil {
r.n--
}
x.value = nil
} else {
c := key[d]
x.next[c] = r.delete(x.next[c], key, d-1)
}
// remove subtrie rooted at x if it is completely empty
if x.value != nil {
return x
}
for c := 0; c < R; c++ {
if x.next[c] != nil {
return x
}
}
return nil
}
// Returns all keys of the radix tree.
func (r *RadixTree) Keys() []string {
return r.KeysWithPrefix("")
}
// Returns all keys of the radix tree that start with the given prefix.
func (r *RadixTree) KeysWithPrefix(prefix string) []string {
results := make([]string, 0)
x := get(r.root, prefix, 0)
b := []rune(prefix)
results = collect(x, b, results)
return results
}
func collect(x *node, prefix []rune, results []string) []string {
if x == nil {
return results
}
if x.value != nil {
results = enqueue(results, makeString(prefix))
}
for c := 0; c < R; c++ {
prefix = append(prefix, rune(c))
results = collect(x.next[c], prefix, results)
prefix = deleteCharAt(prefix, len(prefix)-1)
}
return results
}
// Returns all of the keys of the radix tree that match the given pattern,
// where . symbol is treated as wildcard character that matches any single character.
func (r *RadixTree) KeysThatMatch(pattern string) []string {
results := make([]string, 0)
b := make([]rune, 0)
results = collectPattern(r.root, b, []rune(pattern), results)
return results
}
func collectPattern(x *node, prefix []rune, pattern []rune, results []string) []string {
if x == nil {
return results
}
d := len(prefix)
if d == len(pattern) && x.value != nil {
results = enqueue(results, makeString(prefix))
}
if d == len(pattern) {
return results
}
c := pattern[d]
if c == '.' {
for ch := 0; ch < R; ch++ {
prefix = append(prefix, rune(ch))
results = collectPattern(x.next[ch], prefix, pattern, results)
prefix = deleteCharAt(prefix, len(prefix)-1)
}
} else {
prefix = append(prefix, rune(c))
results = collectPattern(x.next[c], prefix, pattern, results)
prefix = deleteCharAt(prefix, len(prefix)-1)
}
return results
}
// Returns the string in the symbol table that is the longest prefix of the given query.
func (r *RadixTree) LongestPrefixOf(query string) string {
q := []rune(query)
length := longestPrefixOf(r.root, q, 0, -1)
if length == -1 {
return ""
} else {
return string(q[:length])
}
}
func longestPrefixOf(x *node, query []rune, d int, length int) int {
if x == nil {
return length
}
if x.value != nil {
length = d
}
if d == len(query) {
return length
}
c := query[d]
return longestPrefixOf(x.next[c], query, d+1, length)
}
// Prints the structure of the radix tree
func (r *RadixTree) PrintStructure() {
var b strings.Builder
printStructure(r.root, 0, &b)
fmt.Println(b.String())
}
func printStructure(x *node, d int, b *strings.Builder) {
runes := make([]rune, 0)
children := make([]*node, 0)
for c := 0; c < R; c++ {
if x.next[c] != nil {
runes = append(runes, rune(c))
children = append(children, x.next[c])
}
}
l := len(runes)
if l == 1 {
b.WriteRune(runes[0])
printStructure(children[0], d+1, b)
} else if l > 1 {
for i, r := range runes {
b.WriteString("\n")
b.WriteString(ws(d))
b.WriteRune(r)
child := children[i]
printStructure(child, d+1, b)
}
}
}
func ws(count int) string {
return strings.Repeat(" ", count)
} | radixtree.go | 0.750278 | 0.472257 | radixtree.go | starcoder |
package csv
import (
"context"
"encoding/csv"
"fmt"
"io"
"os"
"strconv"
"github.com/pbanos/botanic/feature"
"github.com/pbanos/botanic/set"
)
/*
Writer is an interface for a set to which samples
can be written to.
*/
type Writer interface {
// Write will attempt to write the given number
// of samples and will return the actually written
// number of samples and an error (if not all samples
// could be written)
Write(context.Context, []set.Sample) (int, error)
// Count returns the total number of samples written
// to the writer
Count() int
// Flush ensures any pending written operations finish
// before returning. It returns an error if that cannot
// be ensured.
Flush() error
}
/*
SetGenerator is a function that takes a slice of samples
and generates a set with them.
*/
type SetGenerator func([]set.Sample) set.Set
type csvWriter struct {
count int
features []feature.Feature
w *csv.Writer
}
/*
ReadSet takes an io.Reader for a CSV stream, a slice of features and a
SetGenerator and returns set.Set built with the SetGenerator and the
samples parsed from the reader or an error.
The header or first row of the CSV content is expected to consist of the names
of the features in the given slice. The rest of the rows should consist of valid
values for the all features and/or the '?' string to indicate an undefined value.
*/
func ReadSet(reader io.Reader, features []feature.Feature, sg SetGenerator) (set.Set, error) {
samples := []set.Sample{}
err := ReadSetBySample(reader, features, func(_ int, s set.Sample) (bool, error) {
samples = append(samples, s)
return true, nil
})
if err != nil {
return nil, err
}
return sg(samples), nil
}
/*
ReadSetBySample takes an io.Reader for a CSV stream, a slice of features and a
lambda function on an integer and a set.Sample that returns a boolean value.
It parses the samples from the reader and for each it calls the lambda function
with the sample and its index as parameters. If the lambda function returns true,
it will continue processing the next sample, otherwise it will stop. An error is
returned if something goes wrong when reading the file or parsing a sample.
The header or first row of the CSV content is expected to consist of the names
of the features in the given slice. The rest of the rows should consist of valid
values for the all features and/or the '?' string to indicate an undefined value.
*/
func ReadSetBySample(reader io.Reader, features []feature.Feature, lambda func(int, set.Sample) (bool, error)) error {
featuresByName := featureSliceToMap(features)
r := csv.NewReader(reader)
header, err := r.Read()
if err != nil {
return fmt.Errorf("reading header: %v", err)
}
features, err = parseFeaturesFromCSVHeader(header, featuresByName)
if err != nil {
return err
}
for l := 2; ; l++ {
row, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("reading body: %v", err)
}
sample, err := parseSampleFromCSVRow(row, features)
if err != nil {
return fmt.Errorf("parsing line %d from %v: %v", l, reader, err)
}
ok, err := lambda(l-2, sample)
if err != nil {
return err
}
if !ok {
break
}
}
return nil
}
/*
ReadSetFromFilePath takes a filepath string, a slice of features and a SetGenerator,
opens the file to which the filepath points to and uses ReadSet to return a
set.Set or an error read from it. It will return an error if the given filepath
cannot be opened for reading.
*/
func ReadSetFromFilePath(filepath string, features []feature.Feature, sg SetGenerator) (set.Set, error) {
var f *os.File
var err error
if filepath == "" {
f = os.Stdin
} else {
f, err = os.Open(filepath)
if err != nil {
return nil, fmt.Errorf("reading training set: %v", err)
}
}
defer f.Close()
set, err := ReadSet(f, features, sg)
if err != nil {
err = fmt.Errorf("parsing CSV file %s: %v", filepath, err)
}
return set, err
}
/*
ReadSetBySampleFromFilePath takes an filepath string for a CSV stream, a
slice of features and a lambda function on an integer and a set.Sample
that returns a boolean value. It opens the file for reading (if the filapath
is "" os.Stdin is used instead), parses the samples from the reader and for
each it calls the lambda function with the sample and its index as parameters.
If the lambda function returns true, it will continue processing the next
sample, otherwise it will stop. An error is returned if something goes wrong
when reading the file or parsing a sample.
The header or first row of the CSV content is expected to consist of the names
of the features in the given slice. The rest of the rows should consist of valid
values for the all features and/or the '?' string to indicate an undefined value.
*/
func ReadSetBySampleFromFilePath(filepath string, features []feature.Feature, lambda func(int, set.Sample) (bool, error)) error {
var f *os.File
var err error
if filepath == "" {
f = os.Stdin
} else {
f, err = os.Open(filepath)
if err != nil {
return fmt.Errorf("reading training set: %v", err)
}
}
defer f.Close()
err = ReadSetBySample(f, features, lambda)
if err != nil {
return err
}
return nil
}
/*
NewWriter takes an io.Writer and a slice of feature.Features and
returns a Writer that will write any samples on the io.Writer.
*/
func NewWriter(writer io.Writer, features []feature.Feature) (Writer, error) {
w := csv.NewWriter(writer)
record := make([]string, len(features))
for i, f := range features {
record[i] = f.Name()
}
err := w.Write(record)
if err != nil {
return nil, fmt.Errorf("writing CSV header: %v", err)
}
return &csvWriter{features: features, w: w}, nil
}
/*
WriteCSVSet takes a writer, a set.Set and a slice of features and
dumps to the writer the set in CSV format, specifying only the features
in the given slice for the samples. It returns an error if something
went wrong when wrting to the writer, or codifying the samples.
*/
func WriteCSVSet(ctx context.Context, writer io.Writer, s set.Set, features []feature.Feature) error {
cw, err := NewWriter(writer, features)
if err != nil {
return err
}
samples, err := s.Samples(ctx)
if err != nil {
return err
}
_, err = cw.Write(ctx, samples)
if err != nil {
return err
}
return cw.Flush()
}
func parseFeaturesFromCSVHeader(header []string, features map[string]feature.Feature) ([]feature.Feature, error) {
featureOrder := []feature.Feature{}
for i, name := range header {
f, ok := features[name]
if ok {
featureOrder = append(featureOrder, f)
} else {
if i != len(header)-1 {
return nil, fmt.Errorf("parsing header: reference to unknown feature %s", name)
}
}
}
return featureOrder, nil
}
func parseSampleFromCSVRow(row []string, featureOrder []feature.Feature) (set.Sample, error) {
featureValues := make(map[string]interface{})
for i, f := range featureOrder {
v := row[i]
var value interface{}
var err error
var ok bool
if v != "?" {
if _, ok = f.(*feature.ContinuousFeature); ok {
value, err = strconv.ParseFloat(v, 64)
if err != nil {
return nil, fmt.Errorf("converting %s to float64: %v", v, err)
}
} else {
value = v
}
}
if ok, err = f.Valid(value); !ok {
return nil, fmt.Errorf("invalid value %v of type %T for feature %s: %v", value, value, f.Name(), err)
}
featureValues[f.Name()] = value
}
return set.NewSample(featureValues), nil
}
func (cw *csvWriter) Count() int {
return cw.count
}
func (cw *csvWriter) Write(ctx context.Context, samples []set.Sample) (int, error) {
n := 0
var err error
for ; n < len(samples); n++ {
err = cw.WriteSample(samples[n])
if err != nil {
return n, err
}
}
return len(samples), nil
}
func (cw *csvWriter) WriteSample(sample set.Sample) error {
record := make([]string, len(cw.features))
for j, f := range cw.features {
v, err := sample.ValueFor(f)
if err != nil {
return err
}
if v == nil {
record[j] = "?"
} else {
record[j] = fmt.Sprintf("%v", v)
}
}
err := cw.w.Write(record)
if err != nil {
return fmt.Errorf("writing CSV row for sample %d: %v", cw.count+1, err)
}
cw.count++
return nil
}
func (cw *csvWriter) Flush() error {
cw.w.Flush()
return cw.w.Error()
}
func featureSliceToMap(features []feature.Feature) map[string]feature.Feature {
result := make(map[string]feature.Feature)
for _, f := range features {
result[f.Name()] = f
}
return result
} | set/csv/csv.go | 0.573917 | 0.515254 | csv.go | starcoder |
package buffer
import (
"bytes"
"unicode"
"unicode/utf8"
"github.com/satran/e/utils"
)
type RangeFunc func(from Cursor, to Cursor)
// A Cursor represents a position within a buffer.
type Cursor struct {
Line *Line
LineNum int
Boffset int
}
type Range struct {
Start Cursor
End Cursor
}
// Before reports whether the cursor is before other.
func (c Cursor) Before(other Cursor) bool {
return c.LineNum < other.LineNum ||
(c.LineNum == other.LineNum && c.Boffset < other.Boffset)
}
// Before reports whether the cursor is after other.
func (c Cursor) After(other Cursor) bool {
return c.LineNum > other.LineNum ||
(c.LineNum == other.LineNum && c.Boffset > other.Boffset)
}
// Left reports whether the cursor is to the left of other, regardless of line.
func (c Cursor) LeftOf(other Cursor) bool {
return c.Boffset < other.Boffset
}
// Right reports whether the cursor is to the right of other, regardless of line.
func (c Cursor) RightOf(other Cursor) bool {
return c.Boffset > other.Boffset
}
// Above reports whether the cursor is above other, regardless of column.
func (c Cursor) Above(other Cursor) bool {
return c.LineNum < other.LineNum
}
// Below reports whether the cursor is below other, regardless of column.
func (c Cursor) Below(other Cursor) bool {
return c.LineNum > other.LineNum
}
// Equals reports whether the cursor position equals that of other.
// This function avoids a check for pointer equality of the Line pointer.
func (c Cursor) Equals(other Cursor) bool {
return c.LineNum == other.LineNum &&
c.Boffset == other.Boffset
}
// RuneUnder returns the rune under the current cursor and its width in bytes.
func (c *Cursor) RuneUnder() (rune, int) {
return utf8.DecodeRune(c.Line.Data[c.Boffset:])
}
// RuneUnder returns the rune before the current cursor and its width in bytes.
func (c *Cursor) RuneBefore() (rune, int) {
return utf8.DecodeLastRune(c.Line.Data[:c.Boffset])
}
// RuneAfter return the rune after the current cursor and its width in bytes.
func (c *Cursor) RuneAfter() (rune, int) {
if c.Boffset == len(c.Line.Data) {
return utf8.RuneError, 0
}
return utf8.DecodeRune(c.Line.Data[c.Boffset+1:])
}
// FirstLine reports whether the cursor is at the first line of the buffer.
func (c *Cursor) FirstLine() bool {
return c.Line.Prev == nil
}
// LastLine reports whether the cursor is at the last line of the buffer.
func (c *Cursor) LastLine() bool {
return c.Line.Next == nil
}
// EOL reports whether the cursor is at the end of the current line.
func (c *Cursor) EOL() bool {
return c.Boffset == len(c.Line.Data)
}
// BOL reports whether the cursor is at the beginning of the current line.
func (c *Cursor) BOL() bool {
return c.Boffset == 0
}
// EOF reports whether the cursor is at the end of the file.
func (c *Cursor) EOF() bool {
return c.LastLine() && c.EOL()
}
// BOF reports whether the cursor is at the beginning of the file.
func (c *Cursor) BOF() bool {
return c.FirstLine() && c.BOL()
}
// Distance returns the distance between the cursor and another in bytes.
func (a Cursor) Distance(b Cursor) int {
s := 1
if b.LineNum < a.LineNum {
a, b = b, a
s = -1
} else if a.LineNum == b.LineNum && b.Boffset < a.Boffset {
a, b = b, a
s = -1
}
n := 0
for a.Line != b.Line {
n += len(a.Line.Data) - a.Boffset + 1
a.Line = a.Line.Next
a.Boffset = 0
}
n += b.Boffset - a.Boffset
return n * s
}
// VoffsetCoffset returns a visual and a character offset for a given cursor.
func (c *Cursor) VoffsetCoffset() (vo, co int) {
data := c.Line.Data[:c.Boffset]
for len(data) > 0 {
r, rlen := utf8.DecodeRune(data)
data = data[rlen:]
co += 1
vo += utils.RuneAdvanceLen(r, vo)
}
return
}
// ExtractBytes returns a slice of up to n bytes from the current cursor position.
func (c *Cursor) ExtractBytes(n int) []byte {
var buf bytes.Buffer
offset := c.Boffset
line := c.Line
for n > 0 && line != nil {
switch {
case offset < line.Len():
nb := line.Len() - offset
if n < nb {
nb = n
}
buf.Write(line.Data[offset : offset+nb])
n -= nb
offset += nb
case offset == line.Len():
if line.Next != nil {
buf.WriteByte('\n')
}
offset = 0
line = line.Next
n -= 1
default:
panic("unreachable")
}
}
return buf.Bytes()
}
// NextRune moves cursor to the next rune. If wrap is true,
// wraps the cursor to the beginning of next line once the end
// of the current one is reached. Returns true if motion succeeded,
// false otherwise.
func (c *Cursor) NextRune(wrap bool) bool {
switch {
case c.EOF():
return false
case !c.EOL():
_, rlen := c.RuneUnder()
c.Boffset += rlen
return true
case wrap:
c.Line = c.Line.Next
c.LineNum++
c.Boffset = 0
return true
default:
return false
}
}
// PrevRune moves cursor to the previous rune. If wrap is true,
// wraps the cursor to the end of next line once the beginning of
// the current one is reached. Returns true if motion succeeded,
// false otherwise.
func (c *Cursor) PrevRune(wrap bool) bool {
switch {
case c.BOF():
return false
case !c.BOL():
_, rlen := c.RuneBefore()
c.Boffset -= rlen
return true
case wrap:
c.Line = c.Line.Prev
c.LineNum--
c.Boffset = len(c.Line.Data)
return true
default:
return false
}
}
// NextLine moves the cursor to the next line.
// It reports whether the motion succeeded.
func (c *Cursor) NextLine() bool {
if c.LastLine() {
return false
}
c.Line = c.Line.Next
c.LineNum = c.LineNum + 1
c.Boffset = -1
return true
}
// PrevLine moves the cursor to the previous line.
// It reports whether the motion succeeded.
func (c *Cursor) PrevLine() bool {
if c.FirstLine() {
return false
}
c.Line = c.Line.Prev
c.LineNum = c.LineNum - 1
c.Boffset = -1
return true
}
// MoveBOL moves the cursor to the beginning of the current line.
func (c *Cursor) MoveBOL() {
c.Boffset = 0
}
// MoveEOL moves the cursor to the end of the current line.
func (c *Cursor) MoveEOL() {
c.Boffset = len(c.Line.Data)
}
func (c *Cursor) WordUnderCursor() []byte {
end, beg := *c, *c
var (
r rune
rlen int
)
r, _ = beg.RuneUnder()
if unicode.IsSpace(r) {
return nil
}
if !beg.BOL() {
r, rlen = beg.RuneBefore()
if r == utf8.RuneError {
return nil
}
// move the `beg` cursor back to the start of the word
for utils.IsWord(r) && !beg.BOL() {
beg.Boffset -= rlen
r, rlen = beg.RuneBefore()
}
}
// set the end cursor to the same position as the start cursor
end.Boffset = beg.Boffset
// check if the word is just a single character
r, rlen = end.RuneAfter()
if !utils.IsWord(r) {
return c.Line.Data[end.Boffset:end.Boffset+1]
}
// move to the the rune after the end of the word
for utils.IsWord(r) && !end.EOL() {
end.Boffset += rlen
r, rlen = end.RuneAfter()
}
end.NextRune(false)
if beg.Boffset == end.Boffset {
return nil
}
return c.Line.Data[beg.Boffset:end.Boffset]
}
// Move cursor forward until current rune satisfies condition f.
// Returns true if the move was successful, false if EOF reached.
func (c *Cursor) NextRuneFunc(f func(rune) bool) bool {
for {
if c.EOL() {
if c.LastLine() {
return false
} else {
c.Line = c.Line.Next
c.LineNum++
c.Boffset = 0
continue
}
}
r, rlen := c.RuneUnder()
for !f(r) && !c.EOL() {
c.Boffset += rlen
r, rlen = c.RuneUnder()
}
if c.EOL() {
continue
}
break
}
return true
}
// Move cursor forward to beginning of next word.
// Skips the rest of the current word, if any. Returns true if
// the move was successful, false if EOF reached.
func (c *Cursor) NextWord() bool {
isNotSpace := func(r rune) bool {
return !unicode.IsSpace(r)
}
r, _ := c.RuneUnder()
if isNotSpace(r) {
// Lowercase word motion differentiates words consisting of
// (A-Z0-9_) and any other non-whitespace character. Skip until
// we find either the other word type or whitespace.
if utils.IsWord(r) {
c.NextRuneFunc(func(r rune) bool {
return !utils.IsWord(r) || unicode.IsSpace(r)
})
} else {
c.NextRuneFunc(func(r rune) bool {
return utils.IsWord(r) || unicode.IsSpace(r)
})
}
}
// Skip remaining whitespace until next word of any type.
return c.NextRuneFunc(isNotSpace)
}
// EndWord moves cursor to the end of current word or seeks to the
// beginning of next word, if character under cursor is a whitespace.
func (c *Cursor) EndWord() bool {
if !c.NextRune(true) {
return false
}
// Skip spaces until beginning of next word
r, _ := c.RuneUnder()
if c.EOL() || unicode.IsSpace(r) {
c.NextWord()
}
// Skip to after the word.
r, _ = c.RuneUnder()
var f func(r rune) bool
if utils.IsWord(r) {
f = func(r rune) bool {
return !utils.IsWord(r) || unicode.IsSpace(r)
}
} else {
f = func(r rune) bool {
return utils.IsWord(r) || unicode.IsSpace(r)
}
}
// This can go back to end of buffer but can be ignored,
// since we're going to backtrack one character.
c.NextRuneFunc(f)
c.PrevRune(true)
// Keep going back until BOF if we end up at EOL. This
// can happen on empty lines.
for c.EOL() && !(c.BOL() && c.FirstLine()) {
c.PrevRune(true)
}
return true
}
// Move cursor backward until current rune satisfies condition f.
// Returns true if the move was successful, false if EOF reached.
func (c *Cursor) PrevRuneFunc(f func(rune) bool) bool {
for {
if c.BOL() {
if c.FirstLine() {
return false
} else {
c.Line = c.Line.Prev
c.LineNum--
c.Boffset = len(c.Line.Data)
continue
}
}
r, rlen := c.RuneBefore()
for !f(r) && !c.BOL() {
c.Boffset -= rlen
r, rlen = c.RuneBefore()
}
break
}
return true
}
// Move cursor forward to beginning of the previous word.
// Skips the rest of the current word, if any, unless is located at its
// first character. Returns true if the move was successful, false if EOF reached.
func (c *Cursor) PrevWord() bool {
isNotSpace := func(r rune) bool {
return !unicode.IsSpace(r)
}
for {
// Skip space until we find a word character.
// Re-try if we reached beginning-of-line.
if !c.PrevRuneFunc(isNotSpace) {
return false
}
if !c.BOL() {
break
}
}
r, _ := c.RuneBefore()
if isNotSpace(r) {
// Lowercase word motion differentiates words consisting of
// (A-Z0-9_) and any other non-whitespace character. Skip until
// we find either the other word type or whitespace.
if utils.IsWord(r) {
c.PrevRuneFunc(func(r rune) bool {
return !utils.IsWord(r) || unicode.IsSpace(r)
})
} else {
c.PrevRuneFunc(func(r rune) bool {
return utils.IsWord(r) || unicode.IsSpace(r)
})
}
}
return !c.BOL()
}
func (c *Cursor) OnInsertAdjust(a *Action) {
if a.Cursor.LineNum > c.LineNum {
return
}
if a.Cursor.LineNum < c.LineNum {
// inserted something above the cursor, adjust it
c.LineNum += len(a.Lines)
return
}
// insertion on the cursor line
if a.Cursor.Boffset <= c.Boffset {
// insertion before or at the cursor, move cursor along with insertion
if len(a.Lines) == 0 {
// no lines were inserted, simply adjust the offset
c.Boffset += len(a.Data)
} else {
// one or more lines were inserted, adjust cursor
// respectively
c.Line = a.LastLine()
c.LineNum += len(a.Lines)
c.Boffset = a.lastLineAffectionLen() +
c.Boffset - a.Cursor.Boffset
}
}
}
func (c *Cursor) OnDeleteAdjust(a *Action) {
if a.Cursor.LineNum > c.LineNum {
return
}
if a.Cursor.LineNum < c.LineNum {
// deletion above the cursor line, may touch the cursor location
if len(a.Lines) == 0 {
// no lines were deleted, no things to adjust
return
}
first, last := a.DeletedLines()
if first <= c.LineNum && c.LineNum <= last {
// deleted the cursor line, see how much it affects it
n := 0
if last == c.LineNum {
n = c.Boffset - a.lastLineAffectionLen()
if n < 0 {
n = 0
}
}
*c = a.Cursor
c.Boffset += n
} else {
// phew.. no worries
c.LineNum -= len(a.Lines)
return
}
}
// the last case is deletion on the cursor line, see what was deleted
if a.Cursor.Boffset >= c.Boffset {
// deleted something after cursor, don't care
return
}
n := c.Boffset - (a.Cursor.Boffset + a.firstLineAffectionLen())
if n < 0 {
n = 0
}
c.Boffset = a.Cursor.Boffset + n
}
// SortCursors orders a pair of cursors, from closest to
// furthest from the beginning of the buffer.
func SortCursors(c1, c2 Cursor) (r1, r2 Cursor) {
if c2.Before(c1) {
return c2, c1
}
return c1, c2
} | buffer/cursor.go | 0.751192 | 0.42054 | cursor.go | starcoder |
package streams
import "io"
// ByteMapper remaps all intercepted bytes based on the passed ByteMapperFunc. It should be safe
// to use either a statefull or idempotent function in this. However you should avoid reuse of a
// stateful ByteMapperFunc as correct behavior is difficult and error prone to implement.
type ByteMapper struct {
bmfn ByteMapperFunc
}
func NewByteMapper(bmfn ByteMapperFunc) Interceptor {
return &ByteMapper{
bmfn: bmfn,
}
}
func (bm *ByteMapper) InterceptWrite(w io.Writer, p []byte) (n int, err error) {
for i, b := range p {
p[i] = bm.bmfn(b)
}
return w.Write(p)
}
func (bm *ByteMapper) InterceptRead(r io.Reader, p []byte) (n int, err error) {
if n, err = r.Read(p); err != nil && err != io.EOF {
return
}
for i, b := range p[:n] {
p[i] = bm.bmfn(b)
}
return
}
// ByteMapperFunc is used to map a byte to another byte. This can be a stateful or stateless
// function. However you should avoid mutating or reuse of a stateful ByteMapperFunc as correct
// behvior is difficult and error prone to perfect.
type ByteMapperFunc func(byte) byte
// CompileByteMapperFunc prebuilds an array based on the passed ByteMapperFunc. Using this array to
// perform a fast lookup remap of bytes. However as this assumes that the passed ByteMapperFunc is
// idempotent or stateless in nature, it has undefined behvaior if this is not the case.
func CompileByteMapperFunc(byteMapperFunc ByteMapperFunc) ByteMapperFunc {
// This compiles this into a a very fast array lookup
var byteMap [256]byte
for i := range byteMap {
byteMap[i] = byteMapperFunc(byte(i))
}
return func(b byte) byte {
return byteMap[b]
}
}
// CompiledByteMapper prebuilds an array based on the output of the passed ByteMapperFunc, using this
// array as a fast lookup to remap all intercepted bytes. However as this assumes that the passed
// ByteMapperFunc is idempotent or stateless in nature, it has undefined behavr if this is not the
// case.
func CompiledByteMapper(byteMapperFunc ByteMapperFunc) Interceptor {
return NewByteMapper(CompileByteMapperFunc(byteMapperFunc))
} | byte_mapper.go | 0.678647 | 0.404213 | byte_mapper.go | starcoder |
package geometry
import (
"encoding/binary"
"math"
)
// IndexKind is the kind of index to use in the options.
type IndexKind byte
// IndexKind types
const (
None IndexKind = 0
QuadTree IndexKind = 1
)
func (kind IndexKind) String() string {
switch kind {
default:
return "Unknown"
case None:
return "None"
case QuadTree:
return "QuadTree"
}
}
// IndexOptions are segment indexing options
type IndexOptions struct {
Kind IndexKind
MinPoints int
}
var (
DefaultIndexOptions = &IndexOptions{Kind: QuadTree, MinPoints: 64}
NoIndexing = &IndexOptions{Kind: None, MinPoints: 0}
)
// Series is just a series of points with utilities for efficiently accessing
// segments from rectangle queries, making stuff like point-in-polygon lookups
// very quick.
type Series interface {
Rect() Rect
Empty() bool
Convex() bool
Clockwise() bool
NumPoints() int
NumSegments() int
PointAt(index int) Point
SegmentAt(index int) Segment
Index() []byte
RawPoints() []Point
Closed() bool
Search(rect Rect, iter func(seg Segment, index int) bool)
}
func seriesCopyPoints(series Series) []Point {
points := make([]Point, series.NumPoints())
for i := 0; i < len(points); i++ {
points[i] = series.PointAt(i)
}
return points
}
// baseSeries is a concrete type containing all that is needed to make a Series.
type baseSeries struct {
closed bool // points create a closed shape
clockwise bool // points move clockwise
convex bool // points create a convex shape
indexKind IndexKind // index kind
index []byte // actual index
rect Rect // minumum bounding rectangle
points []Point // original points
}
var _ Series = &baseSeries{}
// makeSeries returns a processed baseSeries.
func makeSeries(
points []Point, copyPoints, closed bool, opts *IndexOptions,
) baseSeries {
if opts == nil {
opts = DefaultIndexOptions
}
var series baseSeries
series.closed = closed
if copyPoints {
series.points = make([]Point, len(points))
copy(series.points, points)
} else {
series.points = points
}
series.convex, series.rect, series.clockwise = processPoints(points, closed)
if opts.MinPoints != 0 && len(points) >= opts.MinPoints {
series.indexKind = opts.Kind
series.buildIndex()
}
return series
}
func (series *baseSeries) RawPoints() []Point {
return series.points
}
func (series *baseSeries) Index() []byte {
return series.index
}
func (series *baseSeries) Clockwise() bool {
return series.clockwise
}
func (series *baseSeries) Move(deltaX, deltaY float64) Series {
points := make([]Point, len(series.points))
for i := 0; i < len(series.points); i++ {
points[i].X = series.points[i].X + deltaX
points[i].Y = series.points[i].Y + deltaY
}
nseries := makeSeries(points, false, series.closed, nil)
nseries.indexKind = series.indexKind
if len(series.Index()) > 0 {
nseries.buildIndex()
}
return &nseries
}
// Empty returns true if the series does not take up space.
func (series *baseSeries) Empty() bool {
if series == nil {
return true
}
return (series.closed && len(series.points) < 3) || len(series.points) < 2
}
// Rect returns the series rectangle
func (series *baseSeries) Rect() Rect {
return series.rect
}
// Convex returns true if the points create a convex loop or linestring
func (series *baseSeries) Convex() bool {
return series.convex
}
// Closed return true if the shape is closed
func (series *baseSeries) Closed() bool {
return series.closed
}
// NumPoints returns the number of points in the series
func (series *baseSeries) NumPoints() int {
return len(series.points)
}
// PointAt returns the point at index
func (series *baseSeries) PointAt(index int) Point {
return series.points[index]
}
// Search for segments that intersect the provided rectangle
func (series *baseSeries) Search(
rect Rect,
iter func(seg Segment, idx int) bool,
) {
if len(series.index) == 0 {
n := series.NumSegments()
for i := 0; i < n; i++ {
seg := series.SegmentAt(i)
if seg.Rect().IntersectsRect(rect) {
if !iter(seg, i) {
return
}
}
}
} else {
data := series.index
n := binary.LittleEndian.Uint32(data[1:])
data = data[:n:n]
qCompressSearch(data, 5, series, series.rect, rect, iter)
}
}
// DistanceToSeries returns an arbritary distance to a Series.
// All the calculations are performed within two functions, that must be
// provided by the caller:
// - distToRect to calculate a distance to a Rectangle.
// - distToSegment to calculate a distance to a Segment.
// Returns NaN if the series is empty.
func DistanceToSeries(
series Series,
distToRect func(rect Rect) float64,
distToSegment func(seg Segment) float64,
) (seg Segment, idx int, dist float64) {
dist = math.NaN()
index := series.Index()
base, ok := series.(*baseSeries)
if !ok || len(index) == 0 {
n := series.NumSegments()
for i := 0; i < n; i++ {
sseg := series.SegmentAt(i)
sdist := distToSegment(sseg)
if i == 0 || sdist < dist {
seg = sseg
dist = sdist
}
}
} else {
data := index
n := binary.LittleEndian.Uint32(data[1:])
data = data[:n:n]
// skip over the first 5 bytes.
// NOTE: only qtrees. There is no R-tree support.
seg, idx, dist = qCompressNearbySegment(data, 5, base, base.rect,
distToRect, distToSegment)
}
return seg, idx, dist
}
func (series *baseSeries) NumSegments() int {
if series.closed {
if len(series.points) < 3 {
return 0
}
if series.points[len(series.points)-1] == series.points[0] {
return len(series.points) - 1
}
return len(series.points)
}
if len(series.points) < 2 {
return 0
}
return len(series.points) - 1
}
func (series *baseSeries) SegmentAt(index int) Segment {
var seg Segment
seg.A = series.points[index]
if index == len(series.points)-1 {
seg.B = series.points[0]
} else {
seg.B = series.points[index+1]
}
return seg
}
// processPoints tests if the ring is convex, calculates the outer
// rectangle.
func processPoints(points []Point, closed bool) (
convex bool, rect Rect, clockwise bool,
) {
if (closed && len(points) < 3) || len(points) < 2 {
return
}
var concave bool
var dir int
var a, b, c Point
var cwc float64
for i := 0; i < len(points); i++ {
// process the rectangle inflation
if i == 0 {
rect = Rect{points[i], points[i]}
} else {
if points[i].X < rect.Min.X {
rect.Min.X = points[i].X
} else if points[i].X > rect.Max.X {
rect.Max.X = points[i].X
}
if points[i].Y < rect.Min.Y {
rect.Min.Y = points[i].Y
} else if points[i].Y > rect.Max.Y {
rect.Max.Y = points[i].Y
}
}
// gather some point positions for concave and clockwise detection
a = points[i]
if i == len(points)-1 {
b = points[0]
c = points[1]
} else if i == len(points)-2 {
b = points[i+1]
c = points[0]
} else {
b = points[i+1]
c = points[i+2]
}
// process the clockwise detection
cwc += (b.X - a.X) * (b.Y + a.Y)
// process the convex calculation
if concave {
continue
}
zCrossProduct := (b.X-a.X)*(c.Y-b.Y) - (b.Y-a.Y)*(c.X-b.X)
if dir == 0 {
if zCrossProduct < 0 {
dir = -1
} else if zCrossProduct > 0 {
dir = 1
}
} else if zCrossProduct < 0 {
if dir == 1 {
concave = true
}
} else if zCrossProduct > 0 {
if dir == -1 {
concave = true
}
}
}
return !concave, rect, cwc > 0
}
func (series *baseSeries) clearIndex() {
series.index = nil
}
func (series *baseSeries) setCompressed(data []byte) {
binary.LittleEndian.PutUint32(data[1:], uint32(len(data)))
smaller := make([]byte, len(data))
copy(smaller, data)
series.index = smaller
}
func (series *baseSeries) buildIndex() {
if series.index != nil {
// already built
return
}
root := new(qNode)
n := series.NumSegments()
for i := 0; i < n; i++ {
seg := series.SegmentAt(i)
root.insert(series, series.rect, seg.Rect(), i, 0)
}
series.setCompressed(
root.compress([]byte{byte(series.indexKind), 0, 0, 0, 0}),
)
} | series.go | 0.830697 | 0.589953 | series.go | starcoder |
package tuple
// T0 holds a tuple of 0 values.
type T0 = struct{}
// There is no 1-tuple - a 1-tuple is represented by the type itself.
// T2 holds a tuple of 2 values.
type T2[A0, A1 any] struct {
A0 A0
A1 A1
}
// T returns all the tuple's values.
func (t T2[A0, A1]) T() (A0, A1) {
return t.A0, t.A1
}
// MkT2 returns a 2-tuple formed from its arguments.
func MkT2[A0, A1 any](a0 A0, a1 A1) T2[A0, A1] {
return T2[A0, A1]{a0, a1}
}
// T3 holds a tuple of 3 values.
type T3[A0, A1, A2 any] struct {
A0 A0
A1 A1
A2 A2
}
// T returns all the tuple's values.
func (t T3[A0, A1, A2]) T() (A0, A1, A2) {
return t.A0, t.A1, t.A2
}
// MkT3 returns a 3-tuple formed from its arguments.
func MkT3[A0, A1, A2 any](a0 A0, a1 A1, a2 A2) T3[A0, A1, A2] {
return T3[A0, A1, A2]{a0, a1, a2}
}
// T4 holds a tuple of 4 values.
type T4[A0, A1, A2, A3 any] struct {
A0 A0
A1 A1
A2 A2
A3 A3
}
// T returns all the tuple's values.
func (t T4[A0, A1, A2, A3]) T() (A0, A1, A2, A3) {
return t.A0, t.A1, t.A2, t.A3
}
// MkT4 returns a 4-tuple formed from its arguments.
func MkT4[A0, A1, A2, A3 any](a0 A0, a1 A1, a2 A2, a3 A3) T4[A0, A1, A2, A3] {
return T4[A0, A1, A2, A3]{a0, a1, a2, a3}
}
// T5 holds a tuple of 5 values.
type T5[A0, A1, A2, A3, A4 any] struct {
A0 A0
A1 A1
A2 A2
A3 A3
A4 A4
}
// T returns all the tuple's values.
func (t T5[A0, A1, A2, A3, A4]) T() (A0, A1, A2, A3, A4) {
return t.A0, t.A1, t.A2, t.A3, t.A4
}
// MkT5 returns a 5-tuple formed from its arguments.
func MkT5[A0, A1, A2, A3, A4 any](a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) T5[A0, A1, A2, A3, A4] {
return T5[A0, A1, A2, A3, A4]{a0, a1, a2, a3, a4}
}
// T6 holds a tuple of 6 values.
type T6[A0, A1, A2, A3, A4, A5 any] struct {
A0 A0
A1 A1
A2 A2
A3 A3
A4 A4
A5 A5
}
// T returns all the tuple's values.
func (t T6[A0, A1, A2, A3, A4, A5]) T() (A0, A1, A2, A3, A4, A5) {
return t.A0, t.A1, t.A2, t.A3, t.A4, t.A5
}
// MkT6 returns a 6-tuple formed from its arguments.
func MkT6[A0, A1, A2, A3, A4, A5 any](a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) T6[A0, A1, A2, A3, A4, A5] {
return T6[A0, A1, A2, A3, A4, A5]{a0, a1, a2, a3, a4, a5}
} | tuple/tuple-gen.go | 0.757705 | 0.723053 | tuple-gen.go | starcoder |
package sdp
/*Author - <NAME>
RFC 4566 - https://tools.ietf.org/html/rfc4566#section-5.9
Timing ("t=")
t=<start-time> <stop-time>
The "t=" lines specify the start and stop times for a session.
Multiple "t=" lines MAY be used if a session is active at multiple
irregularly spaced times; each additional "t=" line specifies an
additional period of time for which the session will be active. If
the session is active at regular times, an "r=" line (see below)
should be used in addition to, and following, a "t=" line -- in which
case the "t=" line specifies the start and stop times of the repeat
sequence.
The first and second sub-fields give the start and stop times,
respectively, for the session. These values are the decimal
representation of Network Time Protocol (NTP) time values in seconds
since 1900 [13]. To convert these values to UNIX time, subtract
decimal 2208988800.
NTP timestamps are elsewhere represented by 64-bit values, which wrap
sometime in the year 2036. Since SDP uses an arbitrary length
decimal representation, this should not cause an issue (SDP
timestamps MUST continue counting seconds since 1900, NTP will use
the value modulo the 64-bit limit).
If the <stop-time> is set to zero, then the session is not bounded,
though it will not become active until after the <start-time>. If
the <start-time> is also zero, the session is regarded as permanent.
User interfaces SHOULD strongly discourage the creation of unbounded
and permanent sessions as they give no information about when the
session is actually going to terminate, and so make scheduling
difficult.
The general assumption may be made, when displaying unbounded
sessions that have not timed out to the user, that an unbounded
session will only be active until half an hour from the current time
or the session start time, whichever is the later. If behaviour
other than this is required, an end-time SHOULD be given and modified
as appropriate when new information becomes available about when the
session should really end.
Permanent sessions may be shown to the user as never being active
unless there are associated repeat times that state precisely when
the session will be active.
*/
type sdpTime struct {
TimeStart []byte
TimeStop []byte
Src []byte
}
//Export returns object as string
func (st *sdpTime) String() string {
line := "t="
line += string(st.TimeStart) + " "
line += string(st.TimeStop)
return line
}
//ParserSdpTime parses SDP time header
func ParserSdpTime(v []byte, out *sdpTime) {
pos := 0
state := fieldTimeStart
// Init the output area
out.TimeStart = nil
out.TimeStop = nil
out.Src = nil
// Keep the source line if needed
if keepSrc {
out.Src = v
}
// Loop through the bytes making up the line
for pos < len(v) {
switch state {
case fieldTimeStart:
if v[pos] == ' ' {
state = fieldTimeStop
pos++
continue
}
out.TimeStart = append(out.TimeStart, v[pos])
case fieldTimeStop:
out.TimeStop = append(out.TimeStop, v[pos])
}
pos++
}
} | sdp/sdpTime.go | 0.685634 | 0.524212 | sdpTime.go | starcoder |
package blockchain
import (
"github.com/mohanarpit/yolochain/models"
"crypto/sha256"
"encoding/hex"
"time"
"math/rand"
"log"
"github.com/davecgh/go-spew/spew"
"github.com/mohanarpit/yolochain/blockchainGrpc"
)
func CalculateStringHash(s string) string {
h := sha256.New()
h.Write([]byte(s))
hashed := h.Sum(nil)
return hex.EncodeToString(hashed)
}
// CalculateHash simply calculates the hash for a block and stores it in that block. Used to check if the data is valid
// and the block hasn't been tampered with.
func CalculateHash(block models.Block) string {
record := string(block.Index) + block.Timestamp + string(block.Data) + block.PrevHash
return CalculateStringHash(record)
}
// GenerateBlock creates a new block based on the arbitrary data and the address of the client node that proposed it
func GenerateBlock(oldBlock models.Block, data []byte, address string) (models.Block, error) {
var newBlock models.Block
t := time.Now()
newBlock.Index = oldBlock.Index + 1
newBlock.Timestamp = t.String()
newBlock.Data = data
newBlock.PrevHash = oldBlock.Hash
newBlock.Hash = CalculateHash(newBlock)
newBlock.Validator = address
return newBlock, nil
}
// IsBlockValid checks if the new block is valid as compared to the last old block
func IsBlockValid(newBlock models.Block, oldBlock models.Block) bool {
if (oldBlock.Index+1 != newBlock.Index) || (oldBlock.Hash != newBlock.PrevHash) || (CalculateHash(newBlock) != newBlock.Hash) {
return false
}
return true
}
// ReplaceChains is used to overwrite the local copy of the blockchain if a longer chain is found in the network.
func ReplaceChains(newBlocks []models.Block) {
if (len(newBlocks)) > len(models.Blockchain) {
models.Blockchain = newBlocks
}
}
// BootstrapBlockchain bootstraps a blockchain with the genesis block. It's typically empty values.
// Returns the genesis block
func BootstrapBlockchain() models.Block {
t := time.Now()
genesisBlock := models.Block{0, t.String(), []byte(string(0)), "", "", ""}
spew.Dump(genesisBlock)
models.Blockchain = append(models.Blockchain, genesisBlock)
return genesisBlock
}
// HandleCandidateBlocks simply receives the candidate blocks via a Go channel and adds them to an internal temp array
func HandleCandidateBlocks() {
// Add all the candidate blocks into a temp array
for {
select {
case candidateBlock := <-models.CandidateBlocks:
models.Mutex.Lock()
log.Println("Going to append the candidateBlock to the list of tempBlocks")
models.TempCandidateBlocks = append(models.TempCandidateBlocks, candidateBlock)
models.Mutex.Unlock()
}
}
}
// PickPOSWinner picks the winner node for the new block based on the number of tokens that have been staked by
// individual clients. It does this every 30 seconds.
func PickPOSWinner() {
time.Sleep(30 * time.Second)
log.Println("Going to the pick the winner")
models.Mutex.Lock()
temp := models.TempCandidateBlocks
models.Mutex.Unlock()
lotteryPool := []string{}
if len(temp) > 0 {
OUTER:
for _, block := range temp {
// If the node is already in the lottery pool, skip it
for _, node := range lotteryPool {
if block.Validator == node {
log.Printf("Got the node in lotteryPool as %v. Skipping", node)
continue OUTER
}
}
models.Mutex.Lock()
setValidators := models.Validators
models.Mutex.Unlock()
// Based on the number of tokens staked, add those many items of the Validator address node to the list
// This will ensure that when we randomly pick nodes, the probability of picking the node changes based
// on the number of tokens that have been staked
k, ok := setValidators[block.Validator]
if ok {
for i := 0; i < k; i++ {
lotteryPool = append(lotteryPool, block.Validator)
}
}
}
s := rand.NewSource(time.Now().Unix())
r := rand.New(s)
lotteryWinner := lotteryPool[r.Intn(len(lotteryPool))]
for _, block := range temp {
if block.Validator == lotteryWinner {
// Appending to the local blockchain is done by the AnnounceCandidates GRPC handler
for _ = range models.Validators {
// Transform the local block to the grpcChain Block so that we can push it over the wire
// TODO: Change this to use the GRPC blockchain only so that we don't have to keep transforming the values
grpcBlock := blockchainGrpc.Block{
Data: block.Data,
Validator: block.Validator,
Hash: block.Hash,
PrevHash: block.PrevHash,
Timestamp: block.Timestamp,
Index: block.Index,
}
req := blockchainGrpc.AnnounceCandidateRequest{
Message: "Winning Validator " + lotteryWinner,
Block: &grpcBlock,
}
models.Announcements <- req
}
break
}
}
}
log.Println("Going to clean the tempBlocks array after picking the winner")
models.Mutex.Lock()
models.TempCandidateBlocks = []models.Block{}
models.Mutex.Unlock()
} | blockchain/utils.go | 0.608594 | 0.419291 | utils.go | starcoder |
package graph
import (
"github.com/basp1/pocket/intlist"
)
const NIL = -1
type Graph struct {
VertexCount int
EdgeCount int
Free int
From []int
Next []int
To []int
Vertices []interface{}
Edges []interface{}
intlist *intlist.Intlist
}
func New() *Graph {
self := &Graph{}
self.VertexCount = 0
self.EdgeCount = 0
self.Free = NIL
self.intlist = intlist.New(0)
return self
}
func (self *Graph) Clear() {
self.VertexCount = 0
self.EdgeCount = 0
self.Free = NIL
self.From = self.From[:0]
self.Next = self.Next[:0]
self.To = self.To[:0]
self.Vertices = []interface{}{}
self.Edges = []interface{}{}
}
func (self *Graph) AddVertex(vertexValue interface{}) int {
self.VertexCount += 1
self.From = append(self.From, NIL)
self.Vertices = append(self.Vertices, vertexValue)
return self.VertexCount - 1
}
func (self *Graph) GetVertex(vertex int) interface{} {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
return self.Vertices[vertex]
}
func (self *Graph) SetVertex(vertex int, vertexValue interface{}) {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
self.Vertices[vertex] = vertexValue
}
func (self *Graph) HasEdges(vertex int) bool {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
if NIL == self.From[vertex] {
return false
} else {
return true
}
}
func (self *Graph) HasEdge(From int, To int) bool {
if From < 0 || From >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
if To < 0 {
panic("vertex not in [0; VertexCount)")
}
if !self.HasEdges(From) {
return false
}
j := self.From[From]
for NIL != j {
if To == self.To[j] {
return true
}
j = self.Next[j]
}
return false
}
func (self *Graph) AddEdge(From int, To int, edgeValue interface{}) int {
if From < 0 || From >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
if To < 0 {
panic("vertex not in [0; VertexCount)")
}
p := 0
if self.Free >= 0 {
p = self.Free
self.To[self.Free] = To
self.Edges[self.Free] = edgeValue
self.Free = self.Next[self.Free]
} else {
p = self.EdgeCount
self.Next = append(self.Next, NIL)
self.To = append(self.To, To)
self.Edges = append(self.Edges, edgeValue)
}
self.Next[p] = self.From[From]
self.From[From] = p
self.EdgeCount += 1
return p
}
func (self *Graph) RemoveEdge(From int, To int) {
if From < 0 || From >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
if To < 0 {
panic("vertex not in [0; VertexCount)")
}
if !self.HasEdges(From) {
return
}
k := NIL
p := NIL
j := self.From[From]
for NIL != j {
if To == self.To[j] {
k = j
break
}
p = j
j = self.Next[j]
}
if NIL == k {
return
}
if k == self.From[From] {
self.From[From] = self.Next[k]
self.Next[k] = self.Free
self.Free = k
} else {
self.Next[p] = self.Next[k]
self.Next[k] = self.Free
self.Free = k
}
self.EdgeCount -= 1
}
func (self *Graph) RemoveEdges(vertex int) {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
if !self.HasEdges(vertex) {
return
}
VertexCount := 1
p := self.From[vertex]
for NIL != self.Next[p] {
p = self.Next[p]
VertexCount += 1
}
self.Next[p] = self.Free
self.Free = self.From[vertex]
self.From[vertex] = NIL
self.EdgeCount -= VertexCount
}
func (self *Graph) IsLeaf(vertex int) bool {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
if !self.HasEdges(vertex) {
return true
}
first := vertex
i := self.From[vertex]
for NIL != i {
if vertex != self.To[i] {
first = self.To[i]
break
}
i = self.Next[i]
}
if vertex == first {
return true
}
i = self.From[vertex]
for NIL != i {
if first != self.To[i] && vertex != self.To[i] {
return false
}
i = self.Next[i]
}
return true
}
func (self *Graph) Copy() *Graph {
h := New()
h.From = make([]int, len(self.From))
h.Next = make([]int, len(self.Next))
h.To = make([]int, len(self.To))
copy(h.From, self.From)
copy(h.Next, self.Next)
copy(h.To, self.To)
h.Vertices = make([]interface{}, len(self.Vertices))
h.Edges = make([]interface{}, len(self.Edges))
copy(h.Vertices, self.Vertices)
copy(h.Edges, self.Edges)
h.VertexCount = self.VertexCount
h.EdgeCount = self.EdgeCount
h.Free = self.Free
return h
}
func (self *Graph) Equal(h *Graph) bool {
if nil == h {
panic("null pointer")
}
if self.VertexCount != h.VertexCount {
return false
}
if self.EdgeCount != h.EdgeCount {
return false
}
for i := 0; i < self.VertexCount; i++ {
j := self.From[i]
k := h.From[i]
if self.Vertices[i] != h.Vertices[i] {
return false
}
for NIL != j && NIL != k {
if self.To[j] != h.To[k] {
return false
}
if self.Edges[j] != h.Edges[k] {
return false
}
j = self.Next[j]
k = h.Next[k]
}
if NIL != j || NIL != k {
return false
}
}
return true
}
func (self *Graph) GetEdge(edge int) interface{} {
return self.Edges[edge]
}
func (self *Graph) GetAdjacent(vertex int) []int {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
adjacent := []int{}
j := self.From[vertex]
for NIL != j {
adjacent = append(adjacent, self.To[j])
j = self.Next[j]
}
return adjacent
}
func (self *Graph) GetDegree(vertex int) int {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
degree := 0
j := self.From[vertex]
for NIL != j {
degree += 1
j = self.Next[j]
}
return degree
}
func (self *Graph) GetEdges(vertex int) []int {
if vertex < 0 || vertex >= self.VertexCount {
panic("vertex not in [0; VertexCount)")
}
edges := []int{}
j := self.From[vertex]
for NIL != j {
edges = append(edges, j)
j = self.Next[j]
}
return edges
} | graph/graph.go | 0.575349 | 0.410727 | graph.go | starcoder |
package modules
import (
"regexp"
"strings"
"github.com/bbuck/dragon-mud/scripting/lua"
)
var regexpCache = make(map[string]*regexp.Regexp)
// Sutil contains several features that Lua string handling lacks, things like
// joining and regex matching and splitting and trimming and various other
// things.
// split(input, separator): table
// @param input: string = the string to perform the split operation on
// @param separator: string = the separator with which to split the string
// by
// split the input string in parts based on matching the separator string
// join(words, joiner): string
// @param words: table = list of values that should be joined together
// @param joiner: string = a string value that should act as the glue
// between all values in (words) from ealier.
// combine the input list of strings with the joiner
// test_rx(needle, haystack): boolean
// @param needle: pattern = A Go regular expressoin pattern used to test
// against the given string value?
// @param haystack: string = the body to perform the search within
// test the haystack against the needle (regular expression search)
// starts_with(str, prefix): boolean
// @param str: string = the value to test against the prefix
// @param prefix: string = the prefix that is in question
// determines if the string starts with the given substring
// ends_with(str, suffix): boolean
// @param str: string = the value to test against the suffix
// @param suffix: string = the suffix that is in question
// determines if the string ends with the given substring
// contains(haystack, needle): boolean
// @param haystack: string = the body of data to be searched by the pattern.
// @param needle: string = the pattern (regular expression) to search for
// within the text.
// determines if substring is present in the given string
// matches(needle, haystack): table
// @param needle: string = the pattern (regular expression) to compare
// against the haystack
// @param haystack: string = the body of data to be compared against the
// pattern
// a list of strings that match the needle (regexp)
var Sutil = lua.TableMap{
"split": func(eng *lua.Engine) int {
sep := eng.PopString()
str := eng.PopString()
strs := strings.Split(str, sep)
list := eng.NewTable()
for _, str := range strs {
list.Append(str)
}
eng.PushValue(list)
return 1
},
"join": func(eng *lua.Engine) int {
joiner := eng.PopString()
words := eng.PopTable()
var strs []string
words.ForEach(func(_ *lua.Value, value *lua.Value) {
strs = append(strs, value.AsString())
})
eng.PushValue(strings.Join(strs, joiner))
return 1
},
"test_rx": func(eng *lua.Engine) int {
haystack := eng.PopString()
needle := eng.PopString()
rx, err := fetchRx(needle)
if err != nil {
eng.PushValue(eng.False())
return 1
}
res := rx.MatchString(haystack)
eng.PushValue(res)
return 1
},
"starts_with": func(eng *lua.Engine) int {
prefix := eng.PopString()
str := eng.PopString()
eng.PushValue(strings.HasPrefix(str, prefix))
return 1
},
"ends_with": func(eng *lua.Engine) int {
suffix := eng.PopString()
str := eng.PopString()
eng.PushValue(strings.HasSuffix(str, suffix))
return 1
},
"contains": func(eng *lua.Engine) int {
needle := eng.PopString()
haystack := eng.PopString()
eng.PushValue(strings.Contains(haystack, needle))
return 1
},
"matches": func(eng *lua.Engine) int {
haystack := eng.PopString()
needle := eng.PopString()
rx, err := fetchRx(needle)
if err != nil {
eng.PushValue(eng.NewTable())
return 1
}
res := rx.FindAllString(haystack, -1)
eng.PushValue(eng.TableFromSlice(res))
return 1
},
"inspect_value": func(eng *lua.Engine) int {
val := eng.PopValue()
eng.PushValue(val.Inspect(""))
return 1
},
}
func fetchRx(rx string) (*regexp.Regexp, error) {
if r, ok := regexpCache[rx]; ok {
return r, nil
}
r, err := regexp.Compile(rx)
if err == nil {
regexpCache[rx] = r
}
return r, err
} | scripting/modules/sutil.go | 0.549157 | 0.497376 | sutil.go | starcoder |
package merkletree
import (
"bytes"
"crypto"
)
// isPlausible checks that the given path starts with a leaf and ends with root.
func (p Path) isPlausible() (ok bool) {
test := true
if len(p) < 2 {
return false
}
// Leaf element (first in path).
test = test && p[0].IsLeaf // Must be a leaf.
test = test && !p[0].IsEmpty // Leafs have always data.
test = test && (p[0].Depths > 0) // May not be depths zero (only root is).
// Root element (last in path).
r := len(p) - 1
test = test && !p[r].IsLeaf // Must be interior node.
test = test && p[r].Depths == 0 // Must be depths zero.
test = test && !p[r].IsEmpty // Root nodes are never empty.
// If we have a second leaf in the path.
if test && p[1].Depths == p[0].Depths {
test = test && len(p) > 2 // We have at least three elements.
test = test && p[1].IsLeaf // Must be a leaf.
test = test && p[0].IsLeft != p[1].IsLeft // Leafs on one branch.
}
// Verify that path is descending and ends with root node.
if test {
var i int // DONT SHADOW _i_ !!!!
depths := p[0].Depths
DepthsCheckLoop:
for i = 1; i < len(p); i++ { // DONT SHADOW _i_ !!!!
if p[i].IsEmpty { // We skip empty nodes, they are also never used in verification.
continue DepthsCheckLoop
}
nodeDepths := p[i].Depths
if nodeDepths >= depths { // Depths is ALWAYS decreasing.
if i == 1 && nodeDepths == depths { // Second leaf is the only exception.
continue DepthsCheckLoop
}
test = false
break DepthsCheckLoop
}
if nodeDepths < 0 {
test = false
break DepthsCheckLoop
}
depths = nodeDepths
}
test = test && i == len(p) // Must exhaust path.
}
return test
}
// verifyMyLeafConstruction checks if the given PathElement has been created by the leafcontent.
func (pe PathElement) verifyMyLeafConstruction(leafContent []byte, hash crypto.Hash) (ok bool) {
ne := createLeafFromContent(leafContent, pe.IsLeft, pe.Depths, hash)
return bytes.Equal(ne.Hash, pe.Hash)
}
// VerifyLeaf verifies that the leaf in a path matches it's content.
func (p Path) VerifyLeaf(leafContent []byte, hash crypto.Hash) (ok bool) {
if len(p) <= 1 { // must be always 2 or more. Leaf + Root.
return false
}
return p[0].verifyMyLeafConstruction(leafContent, hash)
} | merkletree/verify.go | 0.595257 | 0.552298 | verify.go | starcoder |
package un
import (
"reflect"
"sync"
)
func init() {
MakeEach(&Each)
MakeEach(&EachInt)
// MakeEach(&EachString)
MakeEach(&EachStringInt)
MakeEachP(&EachP)
}
// Each func(func(A, B), []A)
// Applies the given iterator function to each element of a collection (slice or map).
// If the collection is a Slice, the iterator function arguments are *value, index*
// If the collection is a Map, the iterator function arguments are *value, key*
// Iterator functions accept a value, and the index or key is an optional argument.
// Note: each does not return a value, you may want un.Map
// var Each func(func(value, i interface{}), interface{})
var Each func(fn interface{}, slice_or_map interface{})
// EachP Parallel Each
// *Concurrently* applies the given iterator function to each element of a collection (slice or map).
var EachP func(fn interface{}, slice_or_map interface{})
// EachInt
// Applies the given iterator function to each element of []int
// Iterator function arguments are *value, index*
var EachInt func(func(value, i int), []int)
// EachStringInt
// Applies the given iterator function to each element of map[string]int
// Iterator function arguments are *value, key*
var EachStringInt func(func(value int, key string), map[string]int)
// MakeEach implements a typed Each function in the form Each func(func(A, B), []A)
func MakeEach(fn interface{}) {
Maker(fn, each)
}
// MakeEachP implements a typed Parallel-Each function in the form EachP func(func(A, B), []A)
func MakeEachP(fn interface{}) {
Maker(fn, eachP)
}
func each(values []reflect.Value) []reflect.Value {
fn, col := extractArgs(values)
if col.Kind() == reflect.Map {
eachMap(fn, col)
}
if col.Kind() == reflect.Slice {
eachSlice(fn, col)
}
return nil
}
func eachSlice(fn, s reflect.Value) {
for i := 0; i < s.Len(); i++ {
v := s.Index(i)
eachCall(fn, v, reflect.ValueOf(i))
}
}
func eachMap(fn, m reflect.Value) {
for _, k := range m.MapKeys() {
v := m.MapIndex(k)
eachCall(fn, v, k)
}
}
func eachCall(fn, v, i reflect.Value) {
args := []reflect.Value{v}
if in := fn.Type().NumIn(); in == 2 {
args = append(args, i)
}
fn.Call(args)
}
func eachP(values []reflect.Value) []reflect.Value {
fn, col := extractArgs(values)
if col.Kind() == reflect.Map {
eachMapP(fn, col)
}
if col.Kind() == reflect.Slice {
eachSliceP(fn, col)
}
return nil
}
func eachSliceP(fn, s reflect.Value) {
var done sync.WaitGroup
for i := 0; i < s.Len(); i++ {
v := s.Index(i)
done.Add(1)
go func() {
eachCall(fn, v, reflect.ValueOf(i))
done.Done()
}()
}
done.Wait()
}
func eachMapP(fn, m reflect.Value) {
var done sync.WaitGroup
keys := m.MapKeys()
done.Add(len(keys))
for _, k := range keys {
v := m.MapIndex(k)
go func(fn, v, k reflect.Value) {
eachCall(fn, v, k)
done.Done()
}(fn, v, k)
}
done.Wait()
}
// Reference Each Implementation
func refEach(slice []string, fn func(string)) {
for i := 0; i < len(slice); i++ {
fn(slice[i])
}
}
// Reference Parallel Each Implementation
func refPEach(slice []string, fn func(string)) {
var done sync.WaitGroup
for _, s := range slice {
s := s
done.Add(1)
go func() {
fn(s)
done.Done()
}()
}
done.Wait()
} | each.go | 0.680879 | 0.439326 | each.go | starcoder |
package netpbm
import (
"bufio"
"errors"
"fmt"
"image"
"image/color"
"io"
"strings"
"github.com/spakin/netpbm/npcolor"
)
// GrayM is an in-memory image whose At method returns npcolor.GrayM values.
type GrayM struct {
// Pix holds the image's pixels as gray values. The pixel at (x, y)
// starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
// Model is the image's color model.
Model npcolor.GrayMModel
}
// ColorModel returns the GrayM image's color model.
func (p *GrayM) ColorModel() color.Model { return p.Model }
// Bounds returns the domain for which At can return non-zero color. The
// bounds do not necessarily contain the point (0, 0).
func (p *GrayM) Bounds() image.Rectangle { return p.Rect }
// At returns the color of the pixel at (x, y) as a color.Color.
// At(Bounds().Min.X, Bounds().Min.Y) returns the upper-left pixel of the grid.
// At(Bounds().Max.X-1, Bounds().Max.Y-1) returns the lower-right one.
func (p *GrayM) At(x, y int) color.Color {
return p.GrayMAt(x, y)
}
// GrayMAt returns the color of the pixel at (x, y) as an npcolor.GrayM.
func (p *GrayM) GrayMAt(x, y int) npcolor.GrayM {
if !(image.Point{x, y}.In(p.Rect)) {
return npcolor.GrayM{}
}
i := p.PixOffset(x, y)
return npcolor.GrayM{Y: p.Pix[i], M: p.Model.M}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *GrayM) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
}
// Set sets the pixel at (x, y) to a given color, expressed as a color.Color.
func (p *GrayM) Set(x, y int, c color.Color) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = p.Model.Convert(c).(npcolor.GrayM).Y
}
// SetGrayM sets the pixel at (x, y) to a given color, expressed as an
// npcolor.GrayM.
func (p *GrayM) SetGrayM(x, y int, c npcolor.GrayM) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
if c.M == p.Model.M {
p.Pix[i] = c.Y
} else {
p.Set(x, y, c)
}
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *GrayM) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to
// be inside either r1 or r2 if the intersection is empty. Without
// explicitly checking for this, the Pix[i:] expression below can
// panic.
if r.Empty() {
return &GrayM{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &GrayM{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *GrayM) Opaque() bool {
return true
}
// MaxValue returns the maximum grayscale value allowed.
func (p *GrayM) MaxValue() uint16 {
return uint16(p.Model.M)
}
// Format identifies the image as a PGM image.
func (p *GrayM) Format() Format {
return PGM
}
// HasAlpha indicates that there is no alpha channel.
func (p *GrayM) HasAlpha() bool {
return false
}
// PromoteToRGBM generates an 8-bit color image that looks identical to
// the given grayscale image.
func (p *GrayM) PromoteToRGBM() *RGBM {
rgb := NewRGBM(p.Bounds(), p.Model.M)
for i, g := range p.Pix {
rgb.Pix[i*3+0] = g
rgb.Pix[i*3+1] = g
rgb.Pix[i*3+2] = g
}
return rgb
}
// NewGrayM returns a new GrayM with the given bounds and maximum channel
// value.
func NewGrayM(r image.Rectangle, m uint8) *GrayM {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 1*w*h)
model := npcolor.GrayMModel{M: m}
return &GrayM{pix, 1 * w, r, model}
}
// GrayM32 is an in-memory image whose At method returns npcolor.GrayM32 values.
type GrayM32 struct {
// Pix holds the image's pixels, as gray values. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
// Model is the image's color model.
Model npcolor.GrayM32Model
}
// ColorModel returns the GrayM32 image's color model.
func (p *GrayM32) ColorModel() color.Model { return p.Model }
// Bounds returns the domain for which At can return non-zero color. The
// bounds do not necessarily contain the point (0, 0).
func (p *GrayM32) Bounds() image.Rectangle { return p.Rect }
// At returns the color of the pixel at (x, y) as a color.Color.
// At(Bounds().Min.X, Bounds().Min.Y) returns the upper-left pixel of the grid.
// At(Bounds().Max.X-1, Bounds().Max.Y-1) returns the lower-right one.
func (p *GrayM32) At(x, y int) color.Color {
return p.GrayM32At(x, y)
}
// GrayM32At returns the color of the pixel at (x, y) as an npcolor.GrayM32.
func (p *GrayM32) GrayM32At(x, y int) npcolor.GrayM32 {
if !(image.Point{x, y}.In(p.Rect)) {
return npcolor.GrayM32{}
}
i := p.PixOffset(x, y)
return npcolor.GrayM32{
Y: uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1]),
M: p.Model.M,
}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *GrayM32) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
}
// Set sets the pixel at (x, y) to a given color, expressed as a color.Color.
func (p *GrayM32) Set(x, y int, c color.Color) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := p.Model.Convert(c).(npcolor.GrayM32)
p.Pix[i+0] = uint8(c1.Y >> 8)
p.Pix[i+1] = uint8(c1.Y)
}
// SetGrayM32 sets the pixel at (x, y) to a given color, expressed as an
// npcolor.GrayM32.
func (p *GrayM32) SetGrayM32(x, y int, c npcolor.GrayM32) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
if c.M == p.Model.M {
p.Pix[i+0] = uint8(c.Y >> 8)
p.Pix[i+1] = uint8(c.Y)
} else {
p.Set(x, y, c)
}
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *GrayM32) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to
// be inside either r1 or r2 if the intersection is empty. Without
// explicitly checking for this, the Pix[i:] expression below can
// panic.
if r.Empty() {
return &GrayM32{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &GrayM32{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *GrayM32) Opaque() bool {
return true
}
// MaxValue returns the maximum grayscale value allowed.
func (p *GrayM32) MaxValue() uint16 {
return uint16(p.Model.M)
}
// Format identifies the image as a PGM image.
func (p *GrayM32) Format() Format {
return PGM
}
// HasAlpha indicates that there is no alpha channel.
func (p *GrayM32) HasAlpha() bool {
return false
}
// PromoteToRGBM64 generates a 16-bit color image that looks identical to
// the given grayscale image.
func (p *GrayM32) PromoteToRGBM64() *RGBM64 {
rgb := NewRGBM64(p.Bounds(), p.Model.M)
for i, g := range p.Pix {
base := i / 2
ofs := i % 2
rgb.Pix[base*6+ofs+0] = g
rgb.Pix[base*6+ofs+2] = g
rgb.Pix[base*6+ofs+4] = g
}
return rgb
}
// NewGrayM32 returns a new GrayM32 with the given bounds and maximum channel
// value.
func NewGrayM32(r image.Rectangle, m uint16) *GrayM32 {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 2*w*h)
model := npcolor.GrayM32Model{M: m}
return &GrayM32{pix, 2 * w, r, model}
}
// decodeConfigPGMWithComments reads and parses a PGM header, either "raw"
// (binary) or "plain" (ASCII). Unlike decodeConfigPGM, it also returns any
// comments appearing in the file.
func decodeConfigPGMWithComments(r io.Reader) (image.Config, []string, error) {
// We really want a bufio.Reader. If we were given one, use it. If
// not, create a new one.
br, ok := r.(*bufio.Reader)
if !ok {
br = bufio.NewReader(r)
}
nr := newNetpbmReader(br)
// Parse the PGM header.
header, ok := nr.GetNetpbmHeader()
if !ok {
err := nr.Err()
if err == nil {
err = errors.New("Invalid PGM header")
}
return image.Config{}, nil, err
}
// Store and return the image configuration.
var cfg image.Config
cfg.Width = header.Width
cfg.Height = header.Height
if header.Maxval < 256 {
cfg.ColorModel = npcolor.GrayMModel{M: uint8(header.Maxval)}
} else {
cfg.ColorModel = npcolor.GrayM32Model{M: uint16(header.Maxval)}
}
return cfg, header.Comments, nil
}
// decodeConfigPGM reads and parses a PGM header, either "raw"
// (binary) or "plain" (ASCII).
func decodeConfigPGM(r io.Reader) (image.Config, error) {
img, _, err := decodeConfigPGMWithComments(r)
return img, err
}
// decodePGMWithComments reads a complete "raw" (binary) PGM image. Unlike
// decodePGM, it also returns any comments appearing in the file.
func decodePGMWithComments(r io.Reader) (image.Image, []string, error) {
// Read the image header, and use it to prepare a grayscale image.
br := bufio.NewReader(r)
config, comments, err := decodeConfigPGMWithComments(br)
if err != nil {
return nil, nil, err
}
// Create either a Gray or a Gray16 image.
var img image.Image // Image to return
var data []uint8 // Image data
var maxVal uint // 100% white value
switch model := config.ColorModel.(type) {
case npcolor.GrayMModel:
maxVal = uint(model.M)
gray := NewGrayM(image.Rect(0, 0, config.Width, config.Height), uint8(maxVal))
data = gray.Pix
img = gray
case npcolor.GrayM32Model:
maxVal = uint(model.M)
gray := NewGrayM32(image.Rect(0, 0, config.Width, config.Height), uint16(maxVal))
data = gray.Pix
img = gray
default:
panic("Unexpected color model")
}
// Raw PGM images are nice because we can read directly into the image
// data.
for len(data) > 0 {
nRead, err := br.Read(data)
if err != nil && err != io.EOF {
return img, nil, err
}
if nRead == 0 {
return img, nil, errors.New("Failed to read binary PGM data")
}
data = data[nRead:]
}
return img, comments, nil
}
// decodePGM reads a complete "raw" (binary) PGM image.
func decodePGM(r io.Reader) (image.Image, error) {
img, _, err := decodePGMWithComments(r)
return img, err
}
// decodePGMPlainWithComments reads a complete "plain" (ASCII) PGM image.
// Unlike decodePGMPlain, it also returns any comments appearing in the file.
func decodePGMPlainWithComments(r io.Reader) (image.Image, []string, error) {
// Read the image header, and use it to prepare a grayscale image.
br := bufio.NewReader(r)
config, comments, err := decodeConfigPGMWithComments(br)
if err != nil {
return nil, nil, err
}
var img image.Image // Image to return
// Define a simple error handler.
nr := newNetpbmReader(br)
badness := func() (image.Image, []string, error) {
// Something went wrong. Either we have an error code to
// explain what or we make up a generic error message.
err := nr.Err()
if err == nil {
err = errors.New("Failed to parse ASCII PGM data")
}
return img, nil, err
}
// Create either a Gray or a Gray16 image.
var data []uint8 // Image data
var maxVal int // 100% white value
switch model := config.ColorModel.(type) {
case npcolor.GrayMModel:
maxVal = int(model.M)
gray := NewGrayM(image.Rect(0, 0, config.Width, config.Height), uint8(maxVal))
data = gray.Pix
img = gray
case npcolor.GrayM32Model:
maxVal = int(model.M)
gray := NewGrayM32(image.Rect(0, 0, config.Width, config.Height), uint16(maxVal))
data = gray.Pix
img = gray
default:
panic("Unexpected color model")
}
// Read ASCII base-10 integers into the image data.
if !nr.GetASCIIData(maxVal, data) {
return badness()
}
return img, comments, nil
}
// decodePGMPlain reads a complete "plain" (ASCII) PGM image.
func decodePGMPlain(r io.Reader) (image.Image, error) {
img, _, err := decodePGMPlainWithComments(r)
return img, err
}
// Indicate that we can decode both raw and plain PGM files.
func init() {
image.RegisterFormat("pgm", "P5", decodePGM, decodeConfigPGM)
image.RegisterFormat("pgm", "P2", decodePGMPlain, decodeConfigPGM)
}
// encodePGM writes an arbitrary image in PGM format.
func encodePGM(w io.Writer, img image.Image, opts *EncodeOptions) error {
// Write the PGM header.
if opts.Plain {
fmt.Fprintln(w, "P2")
} else {
fmt.Fprintln(w, "P5")
}
for _, cmt := range opts.Comments {
cmt = strings.Replace(cmt, "\n", " ", -1)
cmt = strings.Replace(cmt, "\r", " ", -1)
fmt.Fprintf(w, "# %s\n", cmt)
}
rect := img.Bounds()
width := rect.Max.X - rect.Min.X
height := rect.Max.Y - rect.Min.Y
fmt.Fprintf(w, "%d %d\n", width, height)
fmt.Fprintf(w, "%d\n", opts.MaxValue)
// Write the PGM data.
if opts.MaxValue < 256 {
return encodeGrayData(w, img, opts)
}
return encodeGray32Data(w, img, opts)
}
// encodeGrayData writes image data as 8-bit samples.
func encodeGrayData(w io.Writer, img image.Image, opts *EncodeOptions) error {
// In the background, write each 8-bit color sample into a channel.
rect := img.Bounds()
width := rect.Max.X - rect.Min.X
samples := make(chan uint16, width)
go func() {
cm := npcolor.GrayMModel{M: uint8(opts.MaxValue)}
for y := rect.Min.Y; y < rect.Max.Y; y++ {
for x := rect.Min.X; x < rect.Max.X; x++ {
c := cm.Convert(img.At(x, y)).(npcolor.GrayM)
samples <- uint16(c.Y)
}
}
close(samples)
}()
// In the foreground, consume grayscale samples and write them to the
// image file.
if opts.Plain {
return writePlainData(w, samples)
}
return writeRawData(w, samples, 1)
}
// encodeGray32Data writes image data as 16-bit samples.
func encodeGray32Data(w io.Writer, img image.Image, opts *EncodeOptions) error {
// In the background, write each 16-bit color sample into a channel.
rect := img.Bounds()
width := rect.Max.X - rect.Min.X
samples := make(chan uint16, width)
go func() {
cm := npcolor.GrayM32Model{M: opts.MaxValue}
for y := rect.Min.Y; y < rect.Max.Y; y++ {
for x := rect.Min.X; x < rect.Max.X; x++ {
c := cm.Convert(img.At(x, y)).(npcolor.GrayM32)
samples <- c.Y
}
}
close(samples)
}()
// In the foreground, consume grayscale samples and write them to the
// image file.
if opts.Plain {
return writePlainData(w, samples)
}
return writeRawData(w, samples, 2)
} | pgm.go | 0.899298 | 0.604107 | pgm.go | starcoder |
package main
import (
"fmt"
"strconv"
"os"
"math"
"math/cmplx"
"image"
"image/png"
"image/color"
)
// inMandelSet returns the number of iteration it takes to test for divergence // at any given point (x,y) on the complex plane. If we can show that If we reach max_iterations without proving divergence
// then we assume the point converges.
func inMandelSet(x, y float64, max_iteration int) int {
var (
k int = 0
z complex128 = 0
c complex128 = complex(x, y)
)
for k =0; k < max_iteration; k++ {
z = cmplx.Pow(z, 2) + c
if cmplx.Abs(z) > 2 {
return k
}
}
return k
}
// drawDot adds a colored pixel to the input image.
// Does stuff to make nice colors for R,G,B
func drawDot(img *image.RGBA, q, r int, valueX, valueY float64, m int) {
var myRed, myGreen, myBlue uint8
i := inMandelSet(valueX, valueY, m)
if (i > (m/2)) && (i < (3*m/2)) {myGreen = uint8( i * 255 / m)}
if (i > (m/3)) && (i < (m/2)) {myRed = uint8( i * 255 / m)}
myBlue = uint8( i * 255 / m )
img.Set(q, r, color.RGBA{myRed, myGreen, myBlue , 255})
return
}
// Main Thing
func main() {
// some variables
const (
width int = 4000
height int = 4000
max_iteration int = 1000
xo float64 = 0.001643721971153
yo float64 = -0.822467633298876
Re float64 = 0.00000000001
)
w := float64(width)
q := float64(height)
deltaX := math.Abs(2 * Re ) / w
deltaY := math.Abs( 2 * Re ) / q
img := image.NewRGBA(image.Rect(0, 0, width, height))
// Draw a red dot at each point inside the set.
for q := 0; q < width; q++ {
for r := 0; r < height; r++ {
valueX := float64(q) * deltaX + xo - Re
valueY := float64(r) * deltaY + yo - Re
drawDot(img, q, r, valueX, valueY, max_iteration)
}
fmt.Println(width-q-1)
}
// Save to a special filename
// The filename is going to have the input values built into it.
// This should help separate the files after making them.
xoStr := strconv.FormatFloat(xo, 'E', -1, 64)
yoStr := strconv.FormatFloat(yo, 'E', -1, 64)
ReStr := strconv.FormatFloat(Re, 'E', -1, 64)
itStr := strconv.Itoa(max_iteration)
filename := "Mandelbrot Set "+"x=("+xoStr+") y=("+yoStr+") r=("+ReStr+") iterations=("+itStr+").png"
fmt.Println("Image has been created! \n Filename: "+filename)
filepath := "coolPictures/"+filename
// This part actually creates the file itself.
f, _ := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0600)
defer f.Close()
png.Encode(f, img)
}
// Adding Parralellism for multicore processors | go-fractal-yourself.go | 0.663451 | 0.422445 | go-fractal-yourself.go | starcoder |
package go_solve_kit
import (
"sort"
"strconv"
)
type Int int
type IntArray []Int
func (i Int) ValueOf() int {
return int(i)
}
func (i Int) ToString() String {
return String(strconv.Itoa(i.ValueOf()))
}
func (array IntArray) Length() Int {
return Int(len(array))
}
func (array IntArray) Map(lambda func(v Int, i int) interface{}) TypeArray {
var output TypeArray
for i, v := range array {
output = append(output, Type{lambda(v, i)})
}
return output
}
func (array IntArray) ForEach(lambda func(s Int, i int)) {
for i, v := range array {
lambda(v, i)
}
}
func (array IntArray) Filter(lambda func(v Int , i int) bool) IntArray {
var output IntArray
for i, v := range array {
if lambda(v, i) {
output = append(output, v)
}
}
return output
}
func (array IntArray) ToTypeArray() TypeArray {
var output TypeArray
for _, v := range array {
output = append(output, Type{v.ValueOf()})
}
return output
}
func (array IntArray) ToStringArray() StringArray {
var output StringArray
for _, v := range array {
output = append(output, v.ToString())
}
return output
}
func (array IntArray) Sum() Int {
var sum Int
for _, v := range array {
sum += v
}
return sum
}
func (array IntArray) Fill(v Int) IntArray {
return NewArray(len(array)).Map(func(_ Type, _ int) interface{} {
return v
}).ToIntArray()
}
func (array IntArray) Every(lambda func(v Int, i int) bool) bool {
for i, val := range array {
if !lambda(val, i) {
return false
}
}
return true
}
func (array IntArray) Some(lambda func(v Int, i int) bool) bool {
for i, val := range array {
if lambda(val, i) {
return true
}
}
return false
}
func (array IntArray) Contains(v int) bool {
return array.Some(func(i Int, _ int) bool {
return i.ValueOf() == v
})
}
func (array IntArray) FindIndex(lambda func(v Int, i int) bool) Int {
for i, val := range array {
if lambda(val, i) {
return Int(i)
}
}
return -1
}
func (array IntArray) IndexOf(v int) Int {
return array.FindIndex(func(i Int, _ int) bool {
return i.ValueOf() == v
})
}
func (array *IntArray) Append(v int) {
*array = append(*array, Int(v))
}
func (array *IntArray) Pop() Int {
output := (*array)[array.Length()-1]
*array = (*array)[:array.Length()-1]
return output
}
func (array *IntArray) Enqueue(v int) {
*array = append(IntArray{Int(v)}, (*array)...)
}
func (array *IntArray) Dequeue() Int {
output := (*array)[0]
*array = (*array)[1:]
return output
}
func (array IntArray) First() Int {
return array[0]
}
func (array IntArray) Last() Int {
return array[array.Length()-1]
}
func (array *IntArray) Remove(v int) {
i := array.IndexOf(v)
*array = append((*array)[:i], (*array)[i+1:]...)
}
func (array IntArray) Sort() {
sort.SliceStable(array, func(i, j int) bool {
return array[i] < array[j]
})
}
func (array IntArray) SortBy(lambda func(x, y Int) bool) {
sort.SliceStable(array, func(i, j int) bool {
return lambda(array[i], array[j])
})
}
func (array IntArray) Copy() IntArray {
return append(make(IntArray, 0), array...)
}
func (array IntArray) Slice(start, end int) IntArray {
if end <= 0 {
end = array.Length().ValueOf() + end
}
return append(make(IntArray, 0), array[start:end]...)
} | Int.go | 0.567697 | 0.425247 | Int.go | starcoder |
package iso20022
// Payment instrument between a debtor and a creditor, which flows through one or more financial institutions or systems.
type CreditTransfer8 struct {
// Information supplied to enable the matching of an entry with the items that the transfer is intended to settle, such as commercial invoices in an accounts' receivable system.
Reference *Max35Text `xml:"Ref,omitempty"`
// Party that owes an amount of money to the (ultimate) creditor. In the context of the payment model, the debtor is also the debit account owner.
Debtor *PartyIdentification113 `xml:"Dbtr,omitempty"`
// Unambiguous identification of the account of the debtor to which a debit entry will be made as a result of the transaction.
DebtorAccount *AccountIdentificationAndName5 `xml:"DbtrAcct,omitempty"`
// Financial institution servicing an account for the debtor.
DebtorAgent *FinancialInstitutionIdentification10 `xml:"DbtrAgt,omitempty"`
// Identifies the account of the debtor's agent.
DebtorAgentAccount *AccountIdentificationAndName5 `xml:"DbtrAgtAcct,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
IntermediaryAgent1 *FinancialInstitutionIdentification10 `xml:"IntrmyAgt1,omitempty"`
// Unambiguous identification of the account of the intermediary agent 1 at its servicing agent in the payment chain.
IntermediaryAgent1Account *AccountIdentificationAndName5 `xml:"IntrmyAgt1Acct,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
IntermediaryAgent2 *FinancialInstitutionIdentification10 `xml:"IntrmyAgt2,omitempty"`
// Unambiguous identification of the account of the intermediary agent 2 at its servicing agent in the payment chain.
IntermediaryAgent2Account *AccountIdentificationAndName5 `xml:"IntrmyAgt2Acct,omitempty"`
// Financial institution servicing an account for the creditor.
CreditorAgent *FinancialInstitutionIdentification10 `xml:"CdtrAgt"`
// Unambiguous identification of the account of the creditor agent at its servicing agent to which a credit entry will be made as a result of the payment transaction.
CreditorAgentAccount *AccountIdentificationAndName5 `xml:"CdtrAgtAcct,omitempty"`
// Party that receives an amount of money from the debtor. In the context of the payment model, the creditor is also the credit account owner.
Creditor *PartyIdentification113 `xml:"Cdtr,omitempty"`
// Unambiguous identification of the account of the creditor to which a credit entry will be posted as a result of the payment transaction.
CreditorAccount *AccountIdentificationAndName5 `xml:"CdtrAcct"`
}
func (c *CreditTransfer8) SetReference(value string) {
c.Reference = (*Max35Text)(&value)
}
func (c *CreditTransfer8) AddDebtor() *PartyIdentification113 {
c.Debtor = new(PartyIdentification113)
return c.Debtor
}
func (c *CreditTransfer8) AddDebtorAccount() *AccountIdentificationAndName5 {
c.DebtorAccount = new(AccountIdentificationAndName5)
return c.DebtorAccount
}
func (c *CreditTransfer8) AddDebtorAgent() *FinancialInstitutionIdentification10 {
c.DebtorAgent = new(FinancialInstitutionIdentification10)
return c.DebtorAgent
}
func (c *CreditTransfer8) AddDebtorAgentAccount() *AccountIdentificationAndName5 {
c.DebtorAgentAccount = new(AccountIdentificationAndName5)
return c.DebtorAgentAccount
}
func (c *CreditTransfer8) AddIntermediaryAgent1() *FinancialInstitutionIdentification10 {
c.IntermediaryAgent1 = new(FinancialInstitutionIdentification10)
return c.IntermediaryAgent1
}
func (c *CreditTransfer8) AddIntermediaryAgent1Account() *AccountIdentificationAndName5 {
c.IntermediaryAgent1Account = new(AccountIdentificationAndName5)
return c.IntermediaryAgent1Account
}
func (c *CreditTransfer8) AddIntermediaryAgent2() *FinancialInstitutionIdentification10 {
c.IntermediaryAgent2 = new(FinancialInstitutionIdentification10)
return c.IntermediaryAgent2
}
func (c *CreditTransfer8) AddIntermediaryAgent2Account() *AccountIdentificationAndName5 {
c.IntermediaryAgent2Account = new(AccountIdentificationAndName5)
return c.IntermediaryAgent2Account
}
func (c *CreditTransfer8) AddCreditorAgent() *FinancialInstitutionIdentification10 {
c.CreditorAgent = new(FinancialInstitutionIdentification10)
return c.CreditorAgent
}
func (c *CreditTransfer8) AddCreditorAgentAccount() *AccountIdentificationAndName5 {
c.CreditorAgentAccount = new(AccountIdentificationAndName5)
return c.CreditorAgentAccount
}
func (c *CreditTransfer8) AddCreditor() *PartyIdentification113 {
c.Creditor = new(PartyIdentification113)
return c.Creditor
}
func (c *CreditTransfer8) AddCreditorAccount() *AccountIdentificationAndName5 {
c.CreditorAccount = new(AccountIdentificationAndName5)
return c.CreditorAccount
} | CreditTransfer8.go | 0.634204 | 0.572364 | CreditTransfer8.go | starcoder |
package convjson
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"github.com/pkg/errors"
)
// Value describes a value
type Value struct {
typ ValueType
val reflect.Value
}
// ValueType describes value type
type ValueType int8
// all available value type
const (
TypeNil = ValueType(0)
TypeInt = ValueType(1)
TypeUint = ValueType(2)
TypeFloat = ValueType(3)
TypeBool = ValueType(4)
TypeString = ValueType(5)
TypeArray = ValueType(6)
TypeMap = ValueType(7)
TypeStruct = ValueType(8)
TypeUnsupported = ValueType(-1)
)
type (
// Map map type value
Map = map[string]interface{}
// Array array type value
Array = []interface{}
)
var (
// Nil represents a nil value
Nil = NewValue(nil)
)
// NewValue new value
func NewValue(val interface{}) *Value {
if val == nil {
return newValue(TypeNil, reflect.Value{})
}
// get the value that val points to
v := reflect.Indirect(reflect.ValueOf(val))
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return newValue(TypeInt, v)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return newValue(TypeUint, v)
case reflect.Float32, reflect.Float64:
return newValue(TypeFloat, v)
case reflect.Bool:
return newValue(TypeBool, v)
case reflect.String:
return newValue(TypeString, v)
case reflect.Array, reflect.Slice:
return newValue(TypeArray, v)
case reflect.Map:
return newValue(TypeMap, v)
case reflect.Struct:
if v.Type() == reflect.TypeOf(Value{}) {
return val.(*Value)
}
return newValue(TypeMap, reflect.ValueOf(convert2Map(val)))
default:
return newValue(TypeUnsupported, reflect.Value{})
}
}
func newValue(valueType ValueType, value reflect.Value) *Value {
return &Value{
typ: valueType,
val: value,
}
}
func convert2Map(val interface{}) map[string]interface{} {
data, err := json.Marshal(val)
if err != nil {
panic(err)
}
res := make(map[string]interface{})
if err := json.Unmarshal(data, &res); err != nil {
panic(err)
}
return res
}
// String returns value as string
func (v Value) String() (string, error) {
switch v.typ {
case TypeNil:
return "", nil
case TypeInt:
return strconv.FormatInt(v.val.Int(), 10), nil
case TypeUint:
return strconv.FormatUint(v.val.Uint(), 10), nil
case TypeFloat:
return strconv.FormatFloat(v.val.Float(), 'e', -1, 64), nil
case TypeBool:
return strconv.FormatBool(v.val.Bool()), nil
case TypeString:
return v.val.String(), nil
case TypeMap, TypeStruct, TypeArray:
return v.JSON()
default:
return "", errors.New(fmt.Sprintf("Failed to convert value with type %s into TypeString", v.val.Type()))
}
}
// MustString returns string value, an empty string will return if
// anything wrong occurs.
func (v Value) MustString() string {
val, _ := v.String()
return val
}
// Int returns value as int64 type
func (v Value) Int() (int64, error) {
switch v.typ {
case TypeNil:
return 0, nil
case TypeInt:
return v.val.Int(), nil
case TypeUint:
return int64(v.val.Uint()), nil
case TypeFloat:
return int64(v.val.Float()), nil
case TypeBool:
if v.val.Bool() {
return 1, nil
}
return 0, nil
case TypeString:
return strconv.ParseInt(v.val.String(), 10, 64)
default:
return 0, errors.New(fmt.Sprintf("Failed to convert value with type %s into TypeInt", v.val.Type()))
}
}
// MustInt returns value as int type ignoring error
func (v Value) MustInt() int64 {
val, _ := v.Int()
return val
}
// Uint returns value as unsigned int type
func (v Value) Uint() (uint64, error) {
value, err := v.Int()
return uint64(value), err
}
// MustUint returns value as unsigned int type ignoring error
func (v Value) MustUint() uint64 {
value, _ := v.Int()
return uint64(value)
}
// Float returns value as float64 type
func (v Value) Float() (float64, error) {
switch v.typ {
case TypeNil:
return 0, nil
case TypeInt:
return float64(v.val.Int()), nil
case TypeUint:
return float64(v.val.Uint()), nil
case TypeFloat:
return v.val.Float(), nil
case TypeBool:
if v.val.Bool() {
return float64(1), nil
}
return float64(0), nil
case TypeString:
return strconv.ParseFloat(v.val.String(), 64)
default:
return float64(0), errors.New(fmt.Sprintf("Failed to convert value with type %s into TypeFloat", v.val.Type()))
}
}
// MustFloat returns value as float64 type ignoring error
func (v Value) MustFloat() float64 {
val, _ := v.Float()
return float64(val)
}
// Bool returns value as boolean type
func (v Value) Bool() (bool, error) {
switch v.typ {
case TypeNil:
return false, nil
case TypeInt:
return v.val.Int() == 1, nil
case TypeUint:
return v.val.Uint() == 1, nil
case TypeFloat:
return v.val.Float() == 1, nil
case TypeBool:
return v.val.Bool(), nil
case TypeString:
return v.val.String() == "true", nil
default:
return false, errors.New(fmt.Sprintf("failed to convert value with type %s into BoolType", v.val.Type()))
}
}
// MustBool returns value as boolean type ignoring error
func (v Value) MustBool() bool {
val, _ := v.Bool()
return val
}
// RawValue returns a reflect.Value
func (v Value) RawValue() reflect.Value {
return v.val
}
// JSON json string
func (v Value) JSON() (string, error) {
if v.typ == TypeStruct || v.typ == TypeMap || v.typ == TypeArray {
b, err := json.Marshal(v.val.Interface())
return string(b), err
}
return "", errors.New("Failed to serialize into json")
}
// IsNil returns true if value represents nil
func (v Value) IsNil() bool {
return v.typ == TypeNil
}
// Set binds value to path.
func (v *Value) Set(path string, value interface{}, options ...OptionFunc) error {
if path == "" {
temp := NewValue(value)
v.typ = temp.typ
v.val = temp.val
return nil
}
// TODO: check whether it's a valid type
opt := newOption()
opt.load(options...)
keys := strings.Split(path, opt.delimiter)
key := keys[0]
if v.typ != TypeMap {
return errors.Errorf("path %s is not available", path)
}
mdata := v.val.Interface().(Map)
// TODO: support set array element
keyname, _, err := parseKey(key)
if err != nil {
return err
}
val := NewValue(mdata[keyname])
val.Set(strings.Join(keys[1:], "."), value)
mdata[keyname] = val
v.val = reflect.ValueOf(mdata)
return nil
}
// Get get value by path.
func (v Value) Get(path string, options ...OptionFunc) (*Value, error) {
opt := newOption()
opt.load(options...)
keys := strings.Split(path, opt.delimiter)
cur := &v
for _, key := range keys {
if cur.typ != TypeMap {
return Nil, errors.Errorf("path %s is not available", path)
}
mdata := cur.val.Interface().(Map)
keyname, indexes, err := parseKey(key)
if err != nil {
return Nil, err
}
val := mdata[keyname]
cur = NewValue(val)
var curPath string = keyname
for _, index := range indexes {
array, ok := cur.val.Interface().(Array)
if !ok {
return Nil, fmt.Errorf("invalid path: %s is not an array", curPath)
}
elem := array[index]
cur = NewValue(elem)
curPath = fmt.Sprintf("%s[%v]", curPath, index)
}
}
return cur, nil
}
//var keyPattern = regexp.MustCompile(`^[<KEY>[[0-9]+\]){0,}$`)
var keyPattern = regexp.MustCompile(`.*`)
// parseKey parse given key to obtain true keyname and indexes if the key represents
// an array. For instance, parsing key 'a[1][2]' will return keyname 'a' and indexes '[]int{1, 2}'
func parseKey(key string) (keyname string, indexes []int, err error) {
if !keyPattern.Match([]byte(key)) {
err = fmt.Errorf("invalid key: doesn't match '%s'", keyPattern.String())
return
}
indx := strings.Index(key, "[")
if indx < 0 {
keyname = key
return
}
keyname = key[:indx]
var numStr string
for i := indx; i < len(key); i++ {
if key[i] == '[' {
continue
}
if key[i] == ']' {
index, _ := strconv.ParseInt(numStr, 10, 64)
indexes = append(indexes, int(index))
numStr = ""
continue
}
numStr = numStr + string(key[i])
}
return
}
// MarshalJSON implements Marshaler interface.
func (v Value) MarshalJSON() ([]byte, error) {
return json.Marshal(v.val.Interface())
} | convjson/convjson.go | 0.660063 | 0.427755 | convjson.go | starcoder |
package pi
import (
"fmt" // Used for error formatting
"math/rand" // Used for random number generation in Monte Carlo method
"runtime" // Used to get information on available CPUs
"time" // Used for seeding the random number generation
)
func MonteCarloPi(randomPoints int) float64 {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
inside := 0
for i := 0; i < randomPoints; i++ {
x := rnd.Float64()
y := rnd.Float64()
if x*x+y*y <= 1 {
inside += 1
}
}
pi := float64(inside) / float64(randomPoints) * 4
return pi
}
// MonteCarloPiConcurrent approximates the value of pi using the Monte Carlo method.
// Unlike the MonteCarloPi function (first version), this implementation uses
// goroutines and channels to parallelize the computation.
// More details on the Monte Carlo method available at https://en.wikipedia.org/wiki/Monte_Carlo_method.
// More details on goroutines parallelization available at https://go.dev/doc/effective_go#parallel.
func MonteCarloPiConcurrent(n int) (float64, error) {
numCPU := runtime.GOMAXPROCS(0)
c := make(chan int, numCPU)
pointsToDraw, err := splitInt(n, numCPU) // split the task in sub-tasks of approximately equal sizes
if err != nil {
return 0, err
}
// launch numCPU parallel tasks
for _, p := range pointsToDraw {
go drawPoints(p, c)
}
// collect the tasks results
inside := 0
for i := 0; i < numCPU; i++ {
inside += <-c
}
return float64(inside) / float64(n) * 4, nil
}
// drawPoints draws n random two-dimensional points in the interval [0, 1), [0, 1) and sends through c
// the number of points which where within the circle of center 0 and radius 1 (unit circle)
func drawPoints(n int, c chan<- int) {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
inside := 0
for i := 0; i < n; i++ {
x, y := rnd.Float64(), rnd.Float64()
if x*x+y*y <= 1 {
inside++
}
}
c <- inside
}
// splitInt takes an integer x and splits it within an integer slice of length n in the most uniform
// way possible.
// For example, splitInt(10, 3) will return []int{4, 3, 3}, nil
func splitInt(x int, n int) ([]int, error) {
if x < n {
return nil, fmt.Errorf("x must be < n - given values are x=%d, n=%d", x, n)
}
split := make([]int, n)
if x%n == 0 {
for i := 0; i < n; i++ {
split[i] = x / n
}
} else {
limit := x % n
for i := 0; i < limit; i++ {
split[i] = x/n + 1
}
for i := limit; i < n; i++ {
split[i] = x / n
}
}
return split, nil
} | math/pi/montecarlopi.go | 0.727395 | 0.413418 | montecarlopi.go | starcoder |
package export
import "github.com/prometheus/client_golang/prometheus"
// CoordinationExporter contains all the Prometheus metrics that are possible to gather from the Jetty service
type CoordinationExporter struct {
TierTotalCapacity *prometheus.GaugeVec `description:"Total capacity in bytes available in each tier."`
TierRequiredCapacity *prometheus.GaugeVec `description:"Total capacity in bytes required in each tier."`
TierReplicationFactor *prometheus.GaugeVec `description:"Configured maximum replication factor in each tier."`
TierHistoricalCount *prometheus.GaugeVec `description:"Number of available historical nodes in each tier."`
SegmentUnderReplicatedCount *prometheus.GaugeVec `description:"Number of segments (including replicas) left to load until segments that should be loaded in the cluster are available for queries."`
SegmentUnavailableCount *prometheus.GaugeVec `description:"Number of segments (not including replicas) left to load until segments that should be loaded in the cluster are available for queries."`
SegmentOvershadowedCount *prometheus.GaugeVec `description:"Number of overshadowed segments."`
SegmentCount *prometheus.GaugeVec `description:"Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs."`
SegmentSize *prometheus.GaugeVec `description:"Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs."`
SegmentDropQueueCount *prometheus.GaugeVec `description:"Number of segments to drop."`
SegmentLoadQueueCount *prometheus.GaugeVec `description:"Number of segments to load."`
SegmentLoadQueueFailed *prometheus.GaugeVec `description:"Number of segments that failed to load."`
SegmentLoadQueueSize *prometheus.GaugeVec `description:"Size in bytes of segments to load. "`
SegmentCostNormalized *prometheus.GaugeVec `description:"Used in cost balancing. The normalized cost of hosting segments."`
SegmentCostNormalization *prometheus.GaugeVec `description:"Used in cost balancing. The normalization of hosting segments."`
SegmentCostRaw *prometheus.GaugeVec `description:"Used in cost balancing. The raw cost of hosting segments."`
SegmentUnneededCount *prometheus.GaugeVec `description:"Number of segments dropped due to being marked as unused."`
SegmentDeletedCount *prometheus.GaugeVec `description:"Number of segments dropped due to rules."`
SegmentDroppedCount *prometheus.GaugeVec `description:"Number of segments dropped due to being overshadowed."`
SegmentMovedCount *prometheus.GaugeVec `description:"Number of segments moved in the cluster."`
SegmentAssignedCount *prometheus.GaugeVec `description:"number of segments assigned to be loaded in the cluster"`
}
// NewCoordinationExporter returns a new Jetty exporter object
func NewCoordinationExporter() *CoordinationExporter {
ce := &CoordinationExporter{
TierTotalCapacity: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "tier_total_capacity",
Help: "Total capacity in bytes available in each tier.",
}, []string{"tier"}),
TierRequiredCapacity: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "tier_required_capacity",
Help: "Total capacity in bytes required in each tier.",
}, []string{"tier"}),
TierHistoricalCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "tier_historical_count",
Help: "Number of available historical nodes in each tier.",
}, []string{"tier"}),
TierReplicationFactor: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "tier_replication_factor",
Help: "Configured maximum replication factor in each tier.",
}, []string{"tier"}),
SegmentCostNormalization: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_cost_normalization",
Help: "Used in cost balancing. The normalization of hosting segments.",
}, []string{"tier"}),
SegmentCostNormalized: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_cost_normalized",
Help: "Used in cost balancing. The normalized cost of hosting segments.",
}, []string{"tier"}),
SegmentCostRaw: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_cost_raw",
Help: "Used in cost balancing. The raw cost of hosting segments.",
}, []string{"tier"}),
SegmentCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_count",
Help: "Number of used segments belonging to a data source. Emitted only for data sources to which at least one used segment belongs.",
}, []string{"dataSource"}),
SegmentDeletedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_deleted_count",
Help: "Number of segments dropped due to rules.",
}, []string{"tier"}),
SegmentDropQueueCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_drop_queue_count",
Help: "Number of segments to drop.",
}, []string{"server"}),
SegmentDroppedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_dropped_count",
Help: "Number of segments dropped due to being overshadowed.",
}, []string{"tier"}),
SegmentLoadQueueCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_load_queue_count",
Help: "Number of segments to load.",
}, []string{"server"}),
SegmentLoadQueueFailed: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_load_queue_failed",
Help: "Number of segments that failed to load.",
}, []string{"server"}),
SegmentLoadQueueSize: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_load_queue_size",
Help: "Size in bytes of segments to load",
}, []string{"server"}),
SegmentMovedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_moved_count",
Help: "Number of segments moved in the cluster.",
}, []string{"tier"}),
SegmentOvershadowedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_overshadowed_count",
Help: "Number of overshadowed segments.",
}, []string{}),
SegmentSize: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_size",
Help: "Total size of used segments in a data source. Emitted only for data sources to which at least one used segment belongs.",
}, []string{"dataSource"}),
SegmentUnavailableCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_unavailable_count",
Help: "Number of segments (not including replicas) left to load until segments that should be loaded in the cluster are available for queries.",
}, []string{"dataSource"}),
SegmentUnderReplicatedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_underreplicated_count",
Help: "Number of segments (including replicas) left to load until segments that should be loaded in the cluster are available for queries.",
}, []string{"tier", "dataSource"}),
SegmentUnneededCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_unneeded_count",
Help: "Number of segments dropped due to being marked as unused.",
}, []string{"tier"}),
SegmentAssignedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "coordinator",
Name: "segment_assigned_count",
Help: "number of segments assigned to be loaded in the cluster",
}, []string{"tier"}),
}
// register all the prometheus metrics
prometheus.MustRegister(ce.TierHistoricalCount)
prometheus.MustRegister(ce.TierReplicationFactor)
prometheus.MustRegister(ce.TierRequiredCapacity)
prometheus.MustRegister(ce.TierTotalCapacity)
prometheus.MustRegister(ce.SegmentCostNormalization)
prometheus.MustRegister(ce.SegmentCostNormalized)
prometheus.MustRegister(ce.SegmentCostRaw)
prometheus.MustRegister(ce.SegmentCount)
prometheus.MustRegister(ce.SegmentDeletedCount)
prometheus.MustRegister(ce.SegmentDropQueueCount)
prometheus.MustRegister(ce.SegmentDroppedCount)
prometheus.MustRegister(ce.SegmentLoadQueueCount)
prometheus.MustRegister(ce.SegmentLoadQueueFailed)
prometheus.MustRegister(ce.SegmentLoadQueueSize)
prometheus.MustRegister(ce.SegmentMovedCount)
prometheus.MustRegister(ce.SegmentAssignedCount)
prometheus.MustRegister(ce.SegmentOvershadowedCount)
prometheus.MustRegister(ce.SegmentSize)
prometheus.MustRegister(ce.SegmentUnavailableCount)
prometheus.MustRegister(ce.SegmentUnderReplicatedCount)
prometheus.MustRegister(ce.SegmentUnneededCount)
return ce
}
// SetTierTotalCapacity .
func (ce *CoordinationExporter) SetTierTotalCapacity(tier string, val float64) {
ce.TierTotalCapacity.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetTierRequiredCapacity .
func (ce *CoordinationExporter) SetTierRequiredCapacity(tier string, val float64) {
ce.TierRequiredCapacity.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetTierReplicationFactor .
func (ce *CoordinationExporter) SetTierReplicationFactor(tier string, val float64) {
ce.TierReplicationFactor.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetTierHistoricalCount .
func (ce *CoordinationExporter) SetTierHistoricalCount(tier string, val float64) {
ce.TierHistoricalCount.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentUnderReplicatedCount .
func (ce *CoordinationExporter) SetSegmentUnderReplicatedCount(labels map[string]string, val float64) {
ce.SegmentUnderReplicatedCount.With(labels).Add(val)
}
// SetSegmentUnavailableCount .
func (ce *CoordinationExporter) SetSegmentUnavailableCount(source string, val float64) {
ce.SegmentUnavailableCount.With(prometheus.Labels{"dataSource": source}).Set(val)
}
// SetSegmentOvershadowedCount .
func (ce *CoordinationExporter) SetSegmentOvershadowedCount(val float64) {
ce.SegmentOvershadowedCount.WithLabelValues().Add(val)
}
// SetSegmentCount .
func (ce *CoordinationExporter) SetSegmentCount(source string, val float64) {
ce.SegmentCount.With(prometheus.Labels{"dataSource": source}).Set(val)
}
// SetSegmentSize .
func (ce *CoordinationExporter) SetSegmentSize(source string, val float64) {
ce.SegmentSize.With(prometheus.Labels{"dataSource": source}).Set(val)
}
// SetSegmentDropQueueCount .
func (ce *CoordinationExporter) SetSegmentDropQueueCount(server string, val float64) {
ce.SegmentDropQueueCount.With(prometheus.Labels{"server": server}).Set(val)
}
// SetSegmentLoadQueueCount .
func (ce *CoordinationExporter) SetSegmentLoadQueueCount(server string, val float64) {
ce.SegmentLoadQueueCount.With(prometheus.Labels{"server": server}).Set(val)
}
// SetSegmentLoadQueueFailed .
func (ce *CoordinationExporter) SetSegmentLoadQueueFailed(server string, val float64) {
ce.SegmentLoadQueueFailed.With(prometheus.Labels{"server": server}).Set(val)
}
// SetSegmentLoadQueueSize .
func (ce *CoordinationExporter) SetSegmentLoadQueueSize(server string, val float64) {
ce.SegmentLoadQueueSize.With(prometheus.Labels{"server": server}).Set(val)
}
// SetSegmentCostNormalized .
func (ce *CoordinationExporter) SetSegmentCostNormalized(tier string, val float64) {
ce.SegmentCostNormalized.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentCostNormalization .
func (ce *CoordinationExporter) SetSegmentCostNormalization(tier string, val float64) {
ce.SegmentCostNormalization.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentCostRaw .
func (ce *CoordinationExporter) SetSegmentCostRaw(tier string, val float64) {
ce.SegmentCostRaw.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentUnneededCount .
func (ce *CoordinationExporter) SetSegmentUnneededCount(tier string, val float64) {
ce.SegmentUnneededCount.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentDeletedCount .
func (ce *CoordinationExporter) SetSegmentDeletedCount(tier string, val float64) {
ce.SegmentDeletedCount.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentDroppedCount .
func (ce *CoordinationExporter) SetSegmentDroppedCount(tier string, val float64) {
ce.SegmentDroppedCount.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentMovedCount .
func (ce *CoordinationExporter) SetSegmentMovedCount(tier string, val float64) {
ce.SegmentMovedCount.With(prometheus.Labels{"tier": tier}).Set(val)
}
// SetSegmentAssignedCount .
func (ce *CoordinationExporter) SetSegmentAssignedCount(tier string, val float64) {
ce.SegmentAssignedCount.With(prometheus.Labels{"tier": tier}).Set(val)
} | pkg/export/coordination.go | 0.811601 | 0.531878 | coordination.go | starcoder |
package fastimage
// Type represents the type of the image detected, or `Unknown`.
type Type uint64
const (
// Unknown represents an unknown image type
Unknown Type = iota
// BMP represendts a BMP image
BMP
// BPM represendts a BPM image
BPM
// GIF represendts a GIF image
GIF
// JPEG represendts a JPEG image
JPEG
// MNG represendts a MNG image
MNG
// PBM represendts a PBM image
PBM
// PCX represendts a PCX image
PCX
// PGM represendts a PGM image
PGM
// PNG represendts a PNG image
PNG
// PPM represendts a PPM image
PPM
// PSD represendts a PSD image
PSD
// RAS represendts a RAS image
RAS
// RGB represendts a RGB image
RGB
// TIFF represendts a TIFF image
TIFF
// WEBP represendts a WEBP image
WEBP
// XBM represendts a XBM image
XBM
// XPM represendts a XPM image
XPM
// XV represendts a XV image
XV
)
// String return a lower name of image type
func (t Type) String() string {
switch t {
case BMP:
return "bmp"
case BPM:
return "bpm"
case GIF:
return "gif"
case JPEG:
return "jpeg"
case MNG:
return "mng"
case PBM:
return "pbm"
case PCX:
return "pcx"
case PGM:
return "pgm"
case PNG:
return "png"
case PPM:
return "ppm"
case PSD:
return "psd"
case RAS:
return "ras"
case RGB:
return "rgb"
case TIFF:
return "tiff"
case WEBP:
return "webp"
case XBM:
return "xbm"
case XPM:
return "xpm"
case XV:
return "xv"
}
return ""
}
// Mime return mime type of image type
func (t Type) Mime() string {
switch t {
case BMP:
return "image/bmp"
case BPM:
return "image/x-portable-pixmap"
case GIF:
return "image/gif"
case JPEG:
return "image/jpeg"
case MNG:
return "video/x-mng"
case PBM:
return "image/x-portable-bitmap"
case PCX:
return "image/x-pcx"
case PGM:
return "image/x-portable-graymap"
case PNG:
return "image/png"
case PPM:
return "image/x-portable-pixmap"
case PSD:
return "image/vnd.adobe.photoshop"
case RAS:
return "image/x-cmu-raster"
case RGB:
return "image/x-rgb"
case TIFF:
return "image/tiff"
case WEBP:
return "image/webp"
case XBM:
return "image/x-xbitmap"
case XPM:
return "image/x-xpixmap"
case XV:
return "image/x-portable-pixmap"
}
return ""
}
// GetType detects a image info of data.
func GetType(p []byte) Type {
const minOffset = 80 // 1 pixel gif
if len(p) < minOffset {
return Unknown
}
_ = p[minOffset-1]
switch p[0] {
case '\xff':
if p[1] == '\xd8' {
return JPEG
}
case '\x89':
if p[1] == 'P' &&
p[2] == 'N' &&
p[3] == 'G' &&
p[4] == '\x0d' &&
p[5] == '\x0a' &&
p[6] == '\x1a' &&
p[7] == '\x0a' {
return PNG
}
case 'R':
if p[1] == 'I' &&
p[2] == 'F' &&
p[3] == 'F' &&
p[8] == 'W' &&
p[9] == 'E' &&
p[10] == 'B' &&
p[11] == 'P' {
return WEBP
}
case 'G':
if p[1] == 'I' &&
p[2] == 'F' &&
p[3] == '8' &&
(p[4] == '7' || p[4] == ',' || p[4] == '9') &&
p[5] == 'a' {
return GIF
}
case 'B':
if p[1] == 'M' {
return BMP
}
case 'P':
switch p[1] {
case '1', '2', '3', '4', '5', '6', '7':
return PPM
}
case '#':
if p[1] == 'd' &&
p[2] == 'e' &&
p[3] == 'f' &&
p[4] == 'i' &&
p[5] == 'n' &&
p[6] == 'e' &&
(p[7] == ' ' || p[7] == '\t') {
return XBM
}
case '/':
if p[1] == '*' &&
p[2] == ' ' &&
p[3] == 'X' &&
p[4] == 'P' &&
p[5] == 'M' &&
p[6] == ' ' &&
p[7] == '*' &&
p[8] == '/' {
return XPM
}
case 'M':
if p[1] == 'M' && p[2] == '\x00' && p[3] == '\x2a' {
return TIFF
}
case 'I':
if p[1] == 'I' && p[2] == '\x2a' && p[3] == '\x00' {
return TIFF
}
case '8':
if p[1] == 'B' && p[2] == 'P' && p[3] == 'S' {
return PSD
}
case '\x8a':
if p[1] == 'M' &&
p[2] == 'N' &&
p[3] == 'G' &&
p[4] == '\x0d' &&
p[5] == '\x0a' &&
p[6] == '\x1a' &&
p[7] == '\x0a' {
return MNG
}
case '\x01':
if p[1] == '\xda' &&
p[2] == '[' &&
p[3] == '\x01' &&
p[4] == '\x00' &&
p[5] == ']' {
return RGB
}
case '\x59':
if p[1] == '\xa6' && p[2] == '\x6a' && p[3] == '\x95' {
return RAS
}
case '\x0a':
if p[2] == '\x01' {
return PCX
}
}
return Unknown
}
// Info holds the type and dismissons of an image
type Info struct {
Type Type
Width uint32
Height uint32
}
// GetInfo detects a image info of data.
func GetInfo(p []byte) (info Info) {
const minOffset = 80 // 1 pixel gif
if len(p) < minOffset {
return
}
_ = p[minOffset-1]
switch p[0] {
case '\xff':
if p[1] == '\xd8' {
jpeg(p, &info)
}
case '\x89':
if p[1] == 'P' &&
p[2] == 'N' &&
p[3] == 'G' &&
p[4] == '\x0d' &&
p[5] == '\x0a' &&
p[6] == '\x1a' &&
p[7] == '\x0a' {
png(p, &info)
}
case 'R':
if p[1] == 'I' &&
p[2] == 'F' &&
p[3] == 'F' &&
p[8] == 'W' &&
p[9] == 'E' &&
p[10] == 'B' &&
p[11] == 'P' {
webp(p, &info)
}
case 'G':
if p[1] == 'I' &&
p[2] == 'F' &&
p[3] == '8' &&
(p[4] == '7' || p[4] == ',' || p[4] == '9') &&
p[5] == 'a' {
gif(p, &info)
}
case 'B':
if p[1] == 'M' {
bmp(p, &info)
}
case 'P':
switch p[1] {
case '1', '2', '3', '4', '5', '6', '7':
ppm(p, &info)
}
case '#':
if p[1] == 'd' &&
p[2] == 'e' &&
p[3] == 'f' &&
p[4] == 'i' &&
p[5] == 'n' &&
p[6] == 'e' &&
(p[7] == ' ' || p[7] == '\t') {
xbm(p, &info)
}
case '/':
if p[1] == '*' &&
p[2] == ' ' &&
p[3] == 'X' &&
p[4] == 'P' &&
p[5] == 'M' &&
p[6] == ' ' &&
p[7] == '*' &&
p[8] == '/' {
xpm(p, &info)
}
case 'M':
if p[1] == 'M' && p[2] == '\x00' && p[3] == '\x2a' {
tiff(p, &info, bigEndian)
}
case 'I':
if p[1] == 'I' && p[2] == '\x2a' && p[3] == '\x00' {
tiff(p, &info, littleEndian)
}
case '8':
if p[1] == 'B' && p[2] == 'P' && p[3] == 'S' {
psd(p, &info)
}
case '\x8a':
if p[1] == 'M' &&
p[2] == 'N' &&
p[3] == 'G' &&
p[4] == '\x0d' &&
p[5] == '\x0a' &&
p[6] == '\x1a' &&
p[7] == '\x0a' {
mng(p, &info)
}
case '\x01':
if p[1] == '\xda' &&
p[2] == '[' &&
p[3] == '\x01' &&
p[4] == '\x00' &&
p[5] == ']' {
rgb(p, &info)
}
case '\x59':
if p[1] == '\xa6' && p[2] == '\x6a' && p[3] == '\x95' {
ras(p, &info)
}
case '\x0a':
if p[2] == '\x01' {
pcx(p, &info)
}
}
return
}
func jpeg(b []byte, info *Info) {
i := 2
for {
length := int(b[i+3]) | int(b[i+2])<<8
code := b[i+1]
marker := b[i]
i += 4
switch {
case marker != 0xff:
return
case code >= 0xc0 && code <= 0xc3:
info.Type = JPEG
info.Width = uint32(b[i+4]) | uint32(b[i+3])<<8
info.Height = uint32(b[i+2]) | uint32(b[i+1])<<8
return
default:
i += int(length) - 2
}
}
}
func webp(b []byte, info *Info) {
if len(b) < 30 {
return
}
_ = b[29]
if !(b[12] == 'V' && b[13] == 'P' && b[14] == '8') {
return
}
switch b[15] {
case ' ': // VP8
info.Width = (uint32(b[27])&0x3f)<<8 | uint32(b[26])
info.Height = (uint32(b[29])&0x3f)<<8 | uint32(b[28])
case 'L': // VP8L
info.Width = (uint32(b[22])<<8|uint32(b[21]))&16383 + 1
info.Height = (uint32(b[23])<<2|uint32(b[22]>>6))&16383 + 1
case 'X': // VP8X
info.Width = (uint32(b[24]) | uint32(b[25])<<8 | uint32(b[26])<<16) + 1
info.Height = (uint32(b[27]) | uint32(b[28])<<8 | uint32(b[29])<<16) + 1
}
if info.Width != 0 && info.Height != 0 {
info.Type = WEBP
}
}
func png(b []byte, info *Info) {
if len(b) < 24 {
return
}
_ = b[23]
// IHDR
if b[12] == 'I' && b[13] == 'H' && b[14] == 'D' && b[15] == 'R' {
info.Width = uint32(b[16])<<24 |
uint32(b[17])<<16 |
uint32(b[18])<<8 |
uint32(b[19])
info.Height = uint32(b[20])<<24 |
uint32(b[21])<<16 |
uint32(b[22])<<8 |
uint32(b[23])
}
if info.Width != 0 && info.Height != 0 {
info.Type = PNG
}
}
func gif(b []byte, info *Info) {
if len(b) < 12 {
return
}
_ = b[11]
info.Width = uint32(b[7])<<8 | uint32(b[6])
info.Height = uint32(b[9])<<8 | uint32(b[8])
if info.Width != 0 && info.Height != 0 {
info.Type = GIF
}
}
func bmp(b []byte, info *Info) {
if len(b) < 26 {
return
}
_ = b[25]
info.Width = uint32(b[21])<<24 |
uint32(b[20])<<16 |
uint32(b[19])<<8 |
uint32(b[18])
info.Height = uint32(b[25])<<24 |
uint32(b[24])<<16 |
uint32(b[23])<<8 |
uint32(b[22])
if info.Width != 0 && info.Height != 0 {
info.Type = BMP
}
}
func ppm(b []byte, info *Info) {
switch b[1] {
case '1':
info.Type = PBM
case '2', '5':
info.Type = PGM
case '3', '6':
info.Type = PPM
case '4':
info.Type = BPM
case '7':
info.Type = XV
}
i := skipSpace(b, 2)
info.Width, i = parseUint32(b, i)
i = skipSpace(b, i)
info.Height, _ = parseUint32(b, i)
if info.Width == 0 || info.Height == 0 {
info.Type = Unknown
}
}
func xbm(b []byte, info *Info) {
var p []byte
var i int
_, i = readNonSpace(b, i)
i = skipSpace(b, i)
_, i = readNonSpace(b, i)
i = skipSpace(b, i)
info.Width, i = parseUint32(b, i)
i = skipSpace(b, i)
p, i = readNonSpace(b, i)
if !(len(p) == 7 &&
p[6] == 'e' &&
p[0] == '#' &&
p[1] == 'd' &&
p[2] == 'e' &&
p[3] == 'f' &&
p[4] == 'i' &&
p[5] == 'n') {
return
}
i = skipSpace(b, i)
_, i = readNonSpace(b, i)
i = skipSpace(b, i)
info.Height, i = parseUint32(b, i)
if info.Width != 0 && info.Height != 0 {
info.Type = XBM
}
}
func xpm(b []byte, info *Info) {
var line []byte
var i, j int
for {
line, i = readLine(b, i)
if len(line) == 0 {
break
}
j = skipSpace(line, 0)
if line[j] != '"' {
continue
}
info.Width, j = parseUint32(line, j+1)
j = skipSpace(line, j)
info.Height, j = parseUint32(line, j)
break
}
if info.Width != 0 && info.Height != 0 {
info.Type = XPM
}
}
func tiff(b []byte, info *Info, order byteOrder) {
i := int(order.Uint32(b[4:8]))
n := int(order.Uint16(b[i+2 : i+4]))
i += 2
for ; i < n*12; i += 12 {
tag := order.Uint16(b[i : i+2])
datatype := order.Uint16(b[i+2 : i+4])
var value uint32
switch datatype {
case 1, 6:
value = uint32(b[i+9])
case 3, 8:
value = uint32(order.Uint16(b[i+8 : i+10]))
case 4, 9:
value = order.Uint32(b[i+8 : i+12])
default:
return
}
switch tag {
case 256:
info.Width = value
case 257:
info.Height = value
}
if info.Width > 0 && info.Height > 0 {
info.Type = TIFF
return
}
}
}
func psd(b []byte, info *Info) {
if len(b) < 22 {
return
}
_ = b[21]
info.Width = uint32(b[18])<<24 |
uint32(b[19])<<16 |
uint32(b[20])<<8 |
uint32(b[21])
info.Height = uint32(b[14])<<24 |
uint32(b[15])<<16 |
uint32(b[16])<<8 |
uint32(b[17])
if info.Width != 0 && info.Height != 0 {
info.Type = PSD
}
}
func mng(b []byte, info *Info) {
if len(b) < 24 {
return
}
_ = b[23]
if !(b[12] == 'M' && b[13] == 'H' && b[14] == 'D' && b[15] == 'R') {
return
}
info.Width = uint32(b[16])<<24 |
uint32(b[17])<<16 |
uint32(b[18])<<8 |
uint32(b[19])
info.Height = uint32(b[20])<<24 |
uint32(b[21])<<16 |
uint32(b[22])<<8 |
uint32(b[23])
if info.Width != 0 && info.Height != 0 {
info.Type = MNG
}
}
func rgb(b []byte, info *Info) {
if len(b) < 10 {
return
}
_ = b[9]
info.Width = uint32(b[6])<<8 |
uint32(b[7])
info.Height = uint32(b[8])<<8 |
uint32(b[9])
if info.Width != 0 && info.Height != 0 {
info.Type = RGB
}
}
func ras(b []byte, info *Info) {
if len(b) < 12 {
return
}
_ = b[11]
info.Width = uint32(b[4])<<24 |
uint32(b[5])<<16 |
uint32(b[6])<<8 |
uint32(b[7])
info.Height = uint32(b[8])<<24 |
uint32(b[9])<<16 |
uint32(b[10])<<8 |
uint32(b[11])
if info.Width != 0 && info.Height != 0 {
info.Type = RAS
}
}
func pcx(b []byte, info *Info) {
if len(b) < 12 {
return
}
_ = b[11]
info.Width = 1 +
(uint32(b[9])<<8 | uint32(b[8])) -
(uint32(b[5])<<8 | uint32(b[4]))
info.Height = 1 +
(uint32(b[11])<<8 | uint32(b[10])) -
(uint32(b[7])<<8 | uint32(b[6]))
if info.Width != 0 && info.Height != 0 {
info.Type = PCX
}
}
func skipSpace(b []byte, i int) (j int) {
_ = b[len(b)-1]
for j = i; j < len(b); j++ {
if b[j] != ' ' && b[j] != '\t' && b[j] != '\r' && b[j] != '\n' {
break
}
}
return
}
func readNonSpace(b []byte, i int) (p []byte, j int) {
_ = b[len(b)-1]
for j = i; j < len(b); j++ {
if b[j] == ' ' || b[j] == '\t' || b[j] == '\r' || b[j] == '\n' {
break
}
}
p = b[i:j]
return
}
func readLine(b []byte, i int) (p []byte, j int) {
_ = b[len(b)-1]
for j = i; j < len(b); j++ {
if b[j] == '\n' {
break
}
}
j++
p = b[i:j]
return
}
func parseUint32(b []byte, i int) (n uint32, j int) {
_ = b[len(b)-1]
for j = i; j < len(b); j++ {
x := uint32(b[j] - '0')
if x < 0 || x > 9 {
break
}
n = n*10 + x
}
return
}
type byteOrder interface {
Uint16([]byte) uint16
Uint32([]byte) uint32
}
var littleEndian littleOrder
type littleOrder struct{}
func (littleOrder) Uint16(b []byte) uint16 {
_ = b[1]
return uint16(b[0]) | uint16(b[1])<<8
}
func (littleOrder) Uint32(b []byte) uint32 {
_ = b[3]
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
var bigEndian bigOrder
type bigOrder struct{}
func (bigOrder) Uint16(b []byte) uint16 {
_ = b[1]
return uint16(b[1]) | uint16(b[0])<<8
}
func (bigOrder) Uint32(b []byte) uint32 {
_ = b[3]
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
} | fastimage.go | 0.720663 | 0.549218 | fastimage.go | starcoder |
package gollection
// Returns true if the target is included in the iterator.
func Contains[T comparable](target T, it Iterator[T]) bool {
for v, ok := it.Next().Get(); ok; v, ok = it.Next().Get() {
if v == target {
return true
}
}
return false
}
// Returns the sum of all the elements in the iterator.
func Sum[T Number](it Iterator[T]) T {
var result T
ForEach(func(item Pair[int, T]) {
if item.First == 0 {
result = item.Second
} else {
result += item.Second
}
}, Indexer(it))
return result
}
// Returns the product of all the elements in the iterator.
func Product[T Number](it Iterator[T]) T {
var result T
ForEach(func(item Pair[int, T]) {
if item.First == 0 {
result = item.Second
} else {
result *= item.Second
}
}, Indexer(it))
return result
}
// Returns the average of all the elements in the iterator.
func Average[T Number](it Iterator[T]) float64 {
var result float64
ForEach(func(item Pair[int, T]) {
result += (float64(item.Second) - result) / float64(item.First+1)
}, Indexer(it))
return result
}
// Return the total number of iterators.
func Count[T any](it Iterator[T]) int {
var result int
ForEach(func(item T) { result++ }, it)
return result
}
// Return the maximum value of all elements of the iterator.
func Max[T Number](it Iterator[T]) T {
var result T
ForEach(func(item Pair[int, T]) {
if item.First == 0 {
result = item.Second
} else if result < item.Second {
result = item.Second
}
}, Indexer(it))
return result
}
// Return the minimum value of all elements of the iterator.
func Min[T Number](it Iterator[T]) T {
var result T
ForEach(func(item Pair[int, T]) {
if item.First == 0 {
result = item.Second
} else if result > item.Second {
result = item.Second
}
}, Indexer(it))
return result
}
// The action is executed for each element of the iterator, and the argument to the action is the element.
func ForEach[T any](action func(T), it Iterator[T]) {
for v, ok := it.Next().Get(); ok; v, ok = it.Next().Get() {
action(v)
}
}
// Returns true if all elements in the iterator match the condition.
func AllMatch[T any](predicate func(T) bool, it Iterator[T]) bool {
for v, ok := it.Next().Get(); ok; v, ok = it.Next().Get() {
if !predicate(v) {
return false
}
}
return true
}
// Returns true if none elements in the iterator match the condition.
func NoneMatch[T any](predicate func(T) bool, it Iterator[T]) bool {
for v, ok := it.Next().Get(); ok; v, ok = it.Next().Get() {
if predicate(v) {
return false
}
}
return true
}
// Returns true if any elements in the iterator match the condition.
func AnyMatch[T any](predicate func(T) bool, it Iterator[T]) bool {
for v, ok := it.Next().Get(); ok; v, ok = it.Next().Get() {
if predicate(v) {
return true
}
}
return false
}
// Return the first element.
func First[T any](it Iterator[T]) Option[T] {
return it.Next()
}
// Return the last element.
func Last[T any](it Iterator[T]) Option[T] {
var curr = it.Next()
var last = curr
for curr.IsSome() {
last = curr
curr = it.Next()
}
return last
}
// Return the element at index.
func At[T any](index int, it Iterator[T]) Option[T] {
var result = it.Next()
var i = 0
for i < index && result.IsSome() {
result = it.Next()
i++
}
return result
}
// Return the value of the final composite, operates on the iterator from front to back.
func Reduce[T any, R any](initial R, operation func(R, T) R, it Iterator[T]) R {
var result = initial
for v, ok := it.Next().Get(); ok; v, ok = it.Next().Get() {
result = operation(result, v)
}
return result
}
// Return the value of the final composite, operates on the iterator from back to front.
func Fold[T any, R any](initial R, operation func(T, R) R, it Iterator[T]) R {
var reverse = make([]T, 0)
for v, ok := it.Next().Get(); ok; v, ok = it.Next().Get() {
reverse = append(reverse, v)
}
var result = initial
for i := len(reverse) - 1; i >= 0; i-- {
result = operation(reverse[i], result)
}
return result
} | terminal.go | 0.796174 | 0.561095 | terminal.go | starcoder |
package geometry
import (
"math"
"github.com/gonum/matrix/mat64"
)
type CatmullRome3 struct {
pc PointCloud
}
func NewCatmullRome3(pc PointCloud) *CatmullRome3 {
cr := &CatmullRome3{pc}
return cr
}
func (cr *CatmullRome3) GetPoint(t float64) *mat64.Vector {
point := mat64.NewVector(3, []float64{0, 0, 0})
// Vector for calculations
tmp := mat64.NewVector(3, []float64{0, 0, 0})
points := cr.pc.Vectors
l := len(points)
p := (float64(l) - 1) * t
intPoint := int(math.Floor(p))
weight := p - float64(intPoint)
if weight == 0 && intPoint == l-1 {
intPoint = l - 2
weight = 1
}
p0 := mat64.NewVector(3, []float64{0, 0, 0})
p1 := mat64.NewVector(3, []float64{0, 0, 0})
p2 := mat64.NewVector(3, []float64{0, 0, 0})
p3 := mat64.NewVector(3, []float64{0, 0, 0})
if intPoint > 0 {
p0 = points[(intPoint-1)%l]
} else {
// extrapolate first point
tmp.SubVec(points[0], points[1])
tmp.AddVec(tmp, points[0])
p0.CloneVec(tmp)
}
p1 = points[intPoint%l]
p2 = points[(intPoint+1)%l]
if intPoint+2 < l {
p3 = points[(intPoint+2)%l]
} else {
tmp.SubVec(points[l-1], points[l-2])
tmp.AddVec(tmp, points[l-1])
p3.CloneVec(tmp)
}
px := NewCubicPoly()
py := NewCubicPoly()
pz := NewCubicPoly()
// init Centripetal / Chordal Catmull-Rom
pow := 0.25
var dt0 = math.Pow(DistanceSquared(p0, p1), pow)
var dt1 = math.Pow(DistanceSquared(p1, p2), pow)
var dt2 = math.Pow(DistanceSquared(p2, p3), pow)
// safety check for repeated points
if dt1 < 1e-4 {
dt1 = 1.0
}
if dt0 < 1e-4 {
dt0 = dt1
}
if dt2 < 1e-4 {
dt2 = dt1
}
px.initNonuniformCatmullRom(p0.At(0, 0), p1.At(0, 0), p2.At(0, 0), p3.At(0, 0), dt0, dt1, dt2)
py.initNonuniformCatmullRom(p0.At(1, 0), p1.At(1, 0), p2.At(1, 0), p3.At(1, 0), dt0, dt1, dt2)
pz.initNonuniformCatmullRom(p0.At(2, 0), p1.At(2, 0), p2.At(2, 0), p3.At(2, 0), dt0, dt1, dt2)
point.SetVec(0, px.calc(weight))
point.SetVec(1, py.calc(weight))
point.SetVec(2, pz.calc(weight))
// return point;
return point
}
func (cr *CatmullRome3) IntersectPlane(plane *Plane3) (*mat64.Vector, bool) {
intersection := mat64.NewVector(3, []float64{0, 0, 0})
threshold := 10e-10
maxIterations := 10000
errorDistance := math.Inf(1)
t := 0.0
i := 0
for errorDistance > threshold && i < maxIterations {
intersection = cr.GetPoint(t)
errorDistance = math.Abs(plane.DistanceToPoint(intersection))
t = t + 0.001*errorDistance
i++
}
if errorDistance > threshold {
return nil, false
}
return intersection, true
}
type CubicPoly struct {
c0 float64
c1 float64
c2 float64
c3 float64
}
func NewCubicPoly() *CubicPoly {
cp := &CubicPoly{}
return cp
}
func (cp *CubicPoly) init(x0, x1, t0, t1 float64) {
cp.c0 = x0
cp.c1 = t0
cp.c2 = -3*x0 + 3*x1 - 2*t0 - t1
cp.c3 = 2*x0 - 2*x1 + t0 + t1
}
func (cp *CubicPoly) initCatmullRom(x0, x1, x2, x3, tension float64) {
cp.init(x1, x2, tension*(x2-x0), tension*(x3-x1))
}
func (cp *CubicPoly) initNonuniformCatmullRom(x0, x1, x2, x3, dt0, dt1, dt2 float64) {
// compute tangents when parameterized in [t1,t2]
t1 := (x1-x0)/dt0 - (x2-x0)/(dt0+dt1) + (x2-x1)/dt1
t2 := (x2-x1)/dt1 - (x3-x1)/(dt1+dt2) + (x3-x2)/dt2
// rescale tangents for parametrization in [0,1]
t1 *= dt1
t2 *= dt1
cp.init(x1, x2, t1, t2)
}
func (cp *CubicPoly) calc(t float64) float64 {
t2 := t * t
t3 := t2 * t
return cp.c0 + cp.c1*t + cp.c2*t2 + cp.c3*t3
} | curve3.go | 0.531209 | 0.657387 | curve3.go | starcoder |
// Just for playing around a bit and testing stuff.
package main
import (
"bufio"
"flag"
"fmt"
"os"
"time"
br "github.com/FabianWe/boolrecognition"
"github.com/FabianWe/boolrecognition/lpb"
)
// iterativeAverage computes iteratively the average of a series of values.
// Implemented as described here: http://people.revoledu.com/kardi/tutorial/RecursiveStatistic/Time-Average.htm
// In contrast to the method described above t starts with 0, not 1.
// So to compute the average of a series do:
// 1. Initialize your current average to whatever you want
// 2. Initialize t = 0
// 3. For each sample update current = iterativeAverage(t, nextSample, current)
// and increase t by 1.
func iterativeAverage(t int, value, current float64) float64 {
return (float64(t)/float64(t+1))*current + (1.0/float64(t+1))*value
}
type foo int
const (
a foo = -2
b
c = iota
d
)
func main() {
tighten := lpb.TightenNone
lpbFileFlag := flag.String("lpb", "", "Path to the lpb file")
verify := flag.Bool("verify", false, "If true also verify that the produced LPB is correct")
solverType := flag.String("solver", "minComb", "The solver to use, currently \"mincomb\" and \"lp\" are available")
numberLoops := flag.Int("N", 5, "The number of times you want to repeat each conversion")
repeat := flag.Int("R", 3, "How many times to repeat the conversions N times? Best value will be used")
tightenFlag := flag.String("tighten", "none", "If the solver is lp solver this describes how to tighten the lp:"+
" \"none\" for now additional constraints, \"neighbours\" for constraints v(i) and v(i + 1) and \"all\""+
" for constraints between all v(i) and v(j). Default is \"none\"")
flag.Parse()
var converter lpb.DNFToLPB
if *lpbFileFlag == "" {
fmt.Fprintln(os.Stderr, "lpb must be provided and must point to the file containg all the LPBs")
os.Exit(1)
}
switch *solverType {
case "minComb":
converter = lpb.NewCombinatorialSolver(lpb.NewMinSolver())
fmt.Println("Using combinatorial solver with minimum chooser")
fmt.Println()
case "lp":
switch *tightenFlag {
case "none":
case "neighbours":
tighten = lpb.TightenNeighbours
case "all":
tighten = lpb.TightenAll
default:
fmt.Fprintln(os.Stderr, "Tighten type must be either \"none\", \"neighbours\" or \"all\", got", *tightenFlag)
os.Exit(1)
}
converter = lpb.NewLPSolver(tighten)
fmt.Println("Using linear program solver with tighten option", *tightenFlag)
fmt.Println()
default:
fmt.Fprintln(os.Stderr, "Only \"minComb\" and \"lp\" are valid solvers, got", *solverType)
os.Exit(1)
}
if *numberLoops <= 0 {
fmt.Fprintln(os.Stderr, "N must be > 0")
os.Exit(1)
}
if *repeat <= 0 {
fmt.Fprintln(os.Stderr, "R must be > 0")
os.Exit(1)
}
lpbs, dnfs, parseErr := parseLPBs(*lpbFileFlag)
var numFailedConv, numNotEqual int
bestSoFarSucc := -1.0
bestSoFarAll := -1.0
if parseErr != nil {
fmt.Fprintln(os.Stderr, "Error parsing LPBs:", parseErr)
os.Exit(1)
}
for i := 0; i < *repeat; i++ {
// repeat the test, get average
var avgSucc, avgAll float64
// run verify only in the last run, no need to always do it
avgSucc, avgAll, numFailedConv, numNotEqual = runTest(lpbs, dnfs, *numberLoops, *verify && (i == *repeat-1), converter)
if bestSoFarSucc < 0 || avgSucc < bestSoFarSucc {
bestSoFarSucc = avgSucc
}
if bestSoFarAll < 0 || avgAll < bestSoFarAll {
bestSoFarAll = avgAll
}
}
// print evaluation
fmt.Printf("Ran tests %d times, showing best average of %d repeats\n\n", *numberLoops, *repeat)
failRate := (float64(numFailedConv) / float64(len(lpbs))) * 100.0
fmt.Printf("Conversion failed on %d of %d tests (%.2f%%)\n", numFailedConv, len(lpbs), failRate)
if *verify {
errorRate := (float64(numNotEqual) / float64(len(lpbs)-numFailedConv)) * 100.0
fmt.Printf("From the times the conversion was successful the output was wrong in %d cases (%.2f%%)\n", numNotEqual, errorRate)
}
fmt.Println("\nRuntime results:")
fmt.Printf("One single conversion took %s on average on all successful runs\n", time.Duration(bestSoFarSucc))
fmt.Printf("One single conversion took %s on average on all runs (including failed ones)\n", time.Duration(bestSoFarAll))
}
func parseLPBs(path string) ([]*lpb.LPB, []br.ClauseSet, error) {
lpbs := make([]*lpb.LPB, 0)
dnfs := make([]br.ClauseSet, 0)
f, openErr := os.Open(path)
if openErr != nil {
return nil, nil, openErr
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
nextLPB, parseErr := lpb.ParseLPB(scanner.Text())
if parseErr != nil {
return nil, nil, parseErr
}
lpbs = append(lpbs, nextLPB)
dnfs = append(dnfs, nextLPB.ToDNF())
}
return lpbs, dnfs, nil
}
func runTest(lpbs []*lpb.LPB, dnfs []br.ClauseSet, n int, verify bool, converter lpb.DNFToLPB) (avgSucc, avgAll float64, numFailedConv, numNotEqual int) {
avgSucc = 0.0
avgAll = 0.0
tSucc := 0
tAll := 0
for num := 0; num < n; num++ {
numFailedConv = 0
numNotEqual = 0
for i, phi := range dnfs {
start := time.Now()
// start the solver
computedLPB, convErr := converter.Convert(phi, len(lpbs[i].Coefficients))
dur := time.Since(start)
ok := true
if convErr != nil {
ok = false
numFailedConv++
}
if verify && convErr == nil {
if !computedLPB.ToDNF().DeepSortedEquals(phi) {
ok = false
numNotEqual++
}
}
if ok {
avgSucc = iterativeAverage(tSucc, float64(dur), avgSucc)
tSucc++
}
// always update avgAll
avgAll = iterativeAverage(tAll, float64(dur), avgAll)
tAll++
}
}
return
} | cmd/benchmarklpb/benchmarklpb.go | 0.509276 | 0.445952 | benchmarklpb.go | starcoder |
package pass
import (
"fmt"
"github.com/mmcloughlin/addchain/acc/ir"
"github.com/mmcloughlin/addchain/internal/errutil"
)
// Interface for a processing pass.
type Interface interface {
Execute(*ir.Program) error
}
// Func adapts a function to the pass Interface.
type Func func(*ir.Program) error
// Execute calls p.
func (f Func) Execute(p *ir.Program) error {
return f(p)
}
// Concat returns a pass that executes the given passes in order, stopping on
// the first error.
func Concat(passes ...Interface) Interface {
return Func(func(p *ir.Program) error {
for _, pass := range passes {
if err := pass.Execute(p); err != nil {
return err
}
}
return nil
})
}
// Exec is a convenience for executing a list of passes on p.
func Exec(p *ir.Program, passes ...Interface) error {
return Concat(passes...).Execute(p)
}
// CanonicalizeOperands ensures there is only one Operand object for each
// operand index in the program. In particular, this ensures there are not
// conflicting names for the same index. Populates the Operands field of the
// program.
func CanonicalizeOperands(p *ir.Program) error {
if p.Operands != nil {
return nil
}
p.Operands = map[int]*ir.Operand{}
// First pass through determines canonical operand for each index.
for _, i := range p.Instructions {
for _, operand := range i.Operands() {
// Look for an existing operand object for this index.
existing, found := p.Operands[operand.Index]
if !found {
p.Operands[operand.Index] = operand
continue
}
if existing == operand {
continue
}
// They're different objects. Check for a name conflict.
if existing.Identifier != "" && operand.Identifier != "" && existing.Identifier != operand.Identifier {
return fmt.Errorf("identifier conflict: index %d named %q and %q", operand.Index, operand.Identifier, existing.Identifier)
}
if operand.Identifier != "" {
existing.Identifier = operand.Identifier
}
}
}
// Second pass through replaces all operands with the canonical version.
for _, i := range p.Instructions {
switch op := i.Op.(type) {
case ir.Add:
i.Op = ir.Add{
X: p.Operands[op.X.Index],
Y: p.Operands[op.Y.Index],
}
case ir.Double:
i.Op = ir.Double{
X: p.Operands[op.X.Index],
}
case ir.Shift:
i.Op = ir.Shift{
X: p.Operands[op.X.Index],
S: op.S,
}
default:
return errutil.UnexpectedType(op)
}
}
return nil
}
// ReadCounts computes how many times each index is read in the program. This
// populates the ReadCount field of the program.
func ReadCounts(p *ir.Program) error {
if p.ReadCount != nil {
return nil
}
p.ReadCount = map[int]int{}
for _, i := range p.Instructions {
for _, input := range i.Op.Inputs() {
p.ReadCount[input.Index]++
}
}
return nil
} | vendor/github.com/mmcloughlin/addchain/acc/pass/pass.go | 0.739611 | 0.413004 | pass.go | starcoder |
package units
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/JojiiOfficial/gaw"
)
// Datasize represents a unit of data size (in bits, bit)
type Datasize float32
// ...
const (
// base 10 (SI prefixes)
Bit Datasize = 1e0
Byte = Bit * 8
Kilobyte = Byte * 1e3
Megabyte = Byte * 1e6
Gigabyte = Byte * 1e9
Terabyte = Byte * 1e12
Petabyte = Byte * 1e15
Exabyte = Byte * 1e18
)
// Bits returns the datasize in bit
func (b Datasize) Bits() float64 {
return float64(b)
}
// Bytes returns the datasize in B
func (b Datasize) Bytes() float64 {
return float64(b / Byte)
}
// Kilobytes returns the datasize in kB
func (b Datasize) Kilobytes() float64 {
return float64(b / Kilobyte)
}
// Megabytes returns the datasize in MB
func (b Datasize) Megabytes() float64 {
return float64(b / Megabyte)
}
// Gigabytes returns the datasize in GB
func (b Datasize) Gigabytes() float64 {
return float64(b / Gigabyte)
}
// Terabytes returns the datasize in TB
func (b Datasize) Terabytes() float64 {
return float64(b / Terabyte)
}
// Petabytes returns the datasize in PB
func (b Datasize) Petabytes() float64 {
return float64(b / Petabyte)
}
// Exabytes returns the datasize in EB
func (b Datasize) Exabytes() float64 {
return float64(b / Exabyte)
}
func (b Datasize) String() string {
if b > Exabyte {
return fmt.Sprintf("%dEB", int(b.Exabytes()))
}
if b > Petabyte {
return fmt.Sprintf("%dPB", int(b.Petabytes()))
}
if b > Terabyte {
return fmt.Sprintf("%dTB", int(b.Terabytes()))
}
if b > Gigabyte {
return fmt.Sprintf("%dGB", int(b.Gigabytes()))
}
if b > Megabyte {
return fmt.Sprintf("%dMB", int(b.Megabytes()))
}
if b > Kilobyte {
return fmt.Sprintf("%dKB", int(b.Kilobytes()))
}
return fmt.Sprintf("%dB", int(b.Bytes()))
}
// ParseDatasize parses datasize
func ParseDatasize(str string) (float32, error) {
numBuff := ""
runes := []rune(str)
i := 0
for _, r := range runes {
s := string(r)
if isInt(s) {
numBuff += s
} else {
break
}
i++
}
if len(numBuff) == 0 {
return 0, errors.New("No number provided")
}
unit := strings.ToLower(string(runes[i:]))
num, _ := strconv.Atoi(numBuff)
if !gaw.IsInStringArray(unit, []string{"b", "kb", "mb", "gb", "tb", "pb", "eb"}) {
return 0, errors.New("Invaild unit")
}
switch unit {
case "b":
return float32(Datasize(num) * Byte), nil
case "kb":
return float32(Datasize(num) * Kilobyte), nil
case "mb":
return float32(Datasize(num) * Megabyte), nil
case "gb":
return float32(Datasize(num) * Gigabyte), nil
case "tb":
return float32(Datasize(num) * Terabyte), nil
case "pb":
return float32(Datasize(num) * Petabyte), nil
case "eb":
return float32(Datasize(num) * Exabyte), nil
}
return 0, nil
}
func isInt(text string) bool {
_, err := strconv.Atoi(text)
return err == nil
}
//UnmarshalText unmashal data
func (b *Datasize) UnmarshalText(text []byte) error {
duration, err := ParseDatasize(string(text))
if err == nil {
*b = Datasize(duration)
}
return err
}
// MarshalText implements encoding.TextMarshaler
func (b Datasize) MarshalText() ([]byte, error) {
return []byte(b.String()), nil
} | models/units/Datasize.go | 0.724286 | 0.523481 | Datasize.go | starcoder |
package document
import (
"baliance.com/gooxml"
"baliance.com/gooxml/color"
"baliance.com/gooxml/measurement"
"baliance.com/gooxml/schema/soo/wml"
)
// TableBorders allows manipulation of borders on a table.
type TableBorders struct {
x *wml.CT_TblBorders
}
// X returns the inner wml.CT_TblBorders
func (b TableBorders) X() *wml.CT_TblBorders {
return b.x
}
func setBorder(brd *wml.CT_Border, t wml.ST_Border, c color.Color, thickness measurement.Distance) {
brd.ValAttr = t
brd.ColorAttr = &wml.ST_HexColor{}
if c.IsAuto() {
brd.ColorAttr.ST_HexColorAuto = wml.ST_HexColorAutoAuto
} else {
brd.ColorAttr.ST_HexColorRGB = c.AsRGBString()
}
if thickness != measurement.Zero {
// sz here is in 1/8'th points, the range is 0.25 to 12 pts
brd.SzAttr = gooxml.Uint64(uint64(thickness / measurement.Point * 8))
}
}
// SetAll sets all of the borders to a given value.
func (b TableBorders) SetAll(t wml.ST_Border, c color.Color, thickness measurement.Distance) {
b.SetBottom(t, c, thickness)
b.SetLeft(t, c, thickness)
b.SetRight(t, c, thickness)
b.SetTop(t, c, thickness)
b.SetInsideHorizontal(t, c, thickness)
b.SetInsideVertical(t, c, thickness)
}
// SetBottom sets the bottom border to a specified type, color and thickness.
func (b TableBorders) SetBottom(t wml.ST_Border, c color.Color, thickness measurement.Distance) {
b.x.Bottom = wml.NewCT_Border()
setBorder(b.x.Bottom, t, c, thickness)
}
// SetTop sets the top border to a specified type, color and thickness.
func (b TableBorders) SetTop(t wml.ST_Border, c color.Color, thickness measurement.Distance) {
b.x.Top = wml.NewCT_Border()
setBorder(b.x.Top, t, c, thickness)
}
// SetLeft sets the left border to a specified type, color and thickness.
func (b TableBorders) SetLeft(t wml.ST_Border, c color.Color, thickness measurement.Distance) {
b.x.Left = wml.NewCT_Border()
setBorder(b.x.Left, t, c, thickness)
}
// SetRight sets the right border to a specified type, color and thickness.
func (b TableBorders) SetRight(t wml.ST_Border, c color.Color, thickness measurement.Distance) {
b.x.Right = wml.NewCT_Border()
setBorder(b.x.Right, t, c, thickness)
}
// SetInsideHorizontal sets the interior horizontal borders to a specified type, color and thickness.
func (b TableBorders) SetInsideHorizontal(t wml.ST_Border, c color.Color, thickness measurement.Distance) {
b.x.InsideH = wml.NewCT_Border()
setBorder(b.x.InsideH, t, c, thickness)
}
// SetInsideVertical sets the interior vertical borders to a specified type, color and thickness.
func (b TableBorders) SetInsideVertical(t wml.ST_Border, c color.Color, thickness measurement.Distance) {
b.x.InsideV = wml.NewCT_Border()
setBorder(b.x.InsideV, t, c, thickness)
} | document/tableborders.go | 0.811937 | 0.424591 | tableborders.go | starcoder |
package multiless
type (
// A helper for long chains of "less-than" comparisons, where later comparisons are only
// required if earlier ones haven't resolved the comparison.
Computation struct {
ok bool
less bool
}
)
func New() Computation {
return Computation{}
}
func (me Computation) EagerSameLess(same, less bool) Computation {
if me.ok || same {
return me
}
return Computation{
ok: true,
less: less,
}
}
func (me Computation) LazySameLess(lazy func() (same, less bool)) Computation {
if me.ok {
return me
}
same, less := lazy()
if !same {
me.less = less
}
return me
}
// Sorts so that false comes before true.
func (me Computation) Bool(l, r bool) Computation {
return me.EagerSameLess(l == r, r)
}
func (me Computation) Uint32(l, r uint32) Computation {
return me.EagerSameLess(l == r, l < r)
}
func (me Computation) Int64(l, r int64) Computation {
return me.EagerSameLess(l == r, l < r)
}
func (me Computation) Uint64(l, r uint64) Computation {
return me.EagerSameLess(l == r, l < r)
}
func (me Computation) Int(l, r int) Computation {
return me.EagerSameLess(l == r, l < r)
}
func (me Computation) CmpInt64(i int64) Computation {
return me.EagerSameLess(i == 0, i < 0)
}
func (me Computation) Cmp(i int) Computation {
return me.EagerSameLess(i == 0, i < 0)
}
func (me Computation) Uintptr(l, r uintptr) Computation {
return me.EagerSameLess(l == r, l < r)
}
func (me Computation) Less() bool {
return me.less
}
func (me Computation) Ok() bool {
return me.ok
}
func (me Computation) LessOk() (less, ok bool) {
return me.less, me.ok
}
func (me Computation) MustLess() bool {
less, ok := me.LessOk()
if !ok {
panic("computation has not differentiated yet")
}
return less
}
func (me Computation) Float64(l, r float64) Computation {
return me.EagerSameLess(l == r, l < r)
}
func (me Computation) Lazy(f func() Computation) Computation {
if me.ok {
return me
}
return f()
}
func (me Computation) AndThen(then Computation) Computation {
if me.ok {
return me
} else {
return then
}
} | vendor/github.com/anacrolix/multiless/multiless.go | 0.669637 | 0.432663 | multiless.go | starcoder |
package binary_search_tree
import "fmt"
/*
root,
left,
right,
Insert, recursive add
Delete,
Find,
Rotation
Depth
Traversal, Depth-first search, Pre-order(NLR)
Traversal, Depth-first search, In-order(LNR)
Traversal, Depth-first Post-order (LRN)
Traversal, Breadth-first search
inorder successor, is the largest thing smaller then the node
inorder predessor, is the smallest thing large then the node
*/
type Node struct {
Left *Node
Right *Node
Data int
}
func NewNode(data int) *Node {
return &Node{
Left: nil,
Right: nil,
Data: data,
}
}
func (t *Node) Insert(data int) *Node {
if data > t.Data {
if t.Right != nil {
t.Right.Insert(data)
} else {
t.Right = NewNode(data)
}
} else {
if t.Left != nil {
t.Left.Insert(data)
} else {
t.Left = NewNode(data)
}
}
return t
}
func (t *Node) InOrderSuccessor() *Node {
cur := t
if cur.Left != nil {
cur = cur.Left
}
return cur
}
func (t *Node) Delete(data int) *Node {
// remove a node with two child, swap with inorder successor or predessor then delete the leaf
if t == nil {
return nil
}
if data < t.Data {
t.Left = t.Left.Delete(data)
return t
}
if data > t.Data {
t.Right = t.Right.Delete(data)
return t
}
// remove the leaf, just set parent to null
if t.Left == nil && t.Right == nil {
t = nil
return t
}
// remove a node with one child, set parent to the child
if t.Left == nil {
t = t.Right
return t
}
if t.Right == nil {
t = t.Left
return t
}
successor := t.InOrderSuccessor()
t.Data = successor.Data
t.Left = t.Left.Delete(data)
return t
}
func (t *Node) Find(data int) bool {
if t.Data == data {
return true
}
if data < t.Data {
if t.Left != nil {
return t.Left.Find(data)
}
} else {
if t.Right != nil {
return t.Right.Find(data)
}
}
return false
}
func (t *Node) FindMin() int {
if t.Left == nil {
fmt.Println(t.Data)
return t.Data
}
return t.Left.FindMin()
}
func (t *Node) FindMax() int {
if t.Right == nil {
fmt.Println(t.Data)
return t.Data
}
return t.Right.FindMax()
}
func CountNode(node *Node) int {
if node == nil {
return 0
}
return CountNode(node.Left) + CountNode(node.Right) + 1
}
func KthSmallest(root *Node, k int) int {
leftCount := CountNode(root.Left)
if k == leftCount+1 {
return root.Data
}
if k <= leftCount {
return KthSmallest(root.Left, k)
} else {
return KthSmallest(root.Right, k-leftCount-1)
}
}
// Breadth-first search
func BreadthFirst(root *Node) {
var q []*Node
q = append(q, root)
for len(q) > 0 {
n := q[0]
fmt.Print(n.Data, " ")
q = q[1:]
if n.Left != nil {
q = append(q, n.Left)
}
if n.Right != nil {
q = append(q, n.Right)
}
}
} | algorithms/data-structures/tree/binary_search_tree/binary_search_tree.go | 0.63341 | 0.442637 | binary_search_tree.go | starcoder |
package ec2
import (
"math/big"
s256 "github.com/fsn-dev/dcrm-walletService/crypto/secp256k1"
"github.com/fsn-dev/dcrm-walletService/crypto/sha3"
"github.com/fsn-dev/dcrm-walletService/internal/common/math/random"
)
type ZkUProof struct {
E *big.Int
S *big.Int
}
type ZkABProof struct {
Alpha []*big.Int
Beta []*big.Int
T *big.Int
U *big.Int
}
func ZkUProve(u *big.Int) *ZkUProof {
r := random.GetRandomIntFromZn(s256.S256().N)
rGx, rGy := s256.S256().ScalarBaseMult(r.Bytes())
uGx, uGy := s256.S256().ScalarBaseMult(u.Bytes())
hellofusion := "hello fusion"
sha3256 := sha3.New256()
sha3256.Write(rGx.Bytes())
sha3256.Write(rGy.Bytes())
sha3256.Write(uGx.Bytes())
sha3256.Write(uGy.Bytes())
sha3256.Write([]byte(hellofusion))
eBytes := sha3256.Sum(nil)
e := new(big.Int).SetBytes(eBytes)
s := new(big.Int).Mul(e, u)
s = new(big.Int).Add(r, s)
s = new(big.Int).Mod(s, s256.S256().N)
zkUProof := &ZkUProof{E: e, S: s}
return zkUProof
}
func ZkUVerify(uG []*big.Int, zkUProof *ZkUProof) bool {
sGx, sGy := s256.S256().ScalarBaseMult(zkUProof.S.Bytes())
minusE := new(big.Int).Mul(big.NewInt(-1), zkUProof.E)
minusE = new(big.Int).Mod(minusE, s256.S256().N)
eUx, eUy := s256.S256().ScalarMult(uG[0], uG[1], minusE.Bytes())
rGx, rGy := s256.S256().Add(sGx, sGy, eUx, eUy)
hellofusion := "hello fusion"
sha3256 := sha3.New256()
sha3256.Write(rGx.Bytes())
sha3256.Write(rGy.Bytes())
sha3256.Write(uG[0].Bytes())
sha3256.Write(uG[1].Bytes())
sha3256.Write([]byte(hellofusion))
eBytes := sha3256.Sum(nil)
e := new(big.Int).SetBytes(eBytes)
if e.Cmp(zkUProof.E) == 0 {
return true
} else {
return false
}
}
/*func ZkUProve(u *big.Int) *ZkUProof {
r := random.GetRandomIntFromZn(s256.S256().N)
rGx, rGy := s256.S256().ScalarBaseMult(r.Bytes())
hellofusion := "hello fusion"
sha3256 := sha3.New256()
sha3256.Write(rGx.Bytes())
sha3256.Write(rGy.Bytes())
sha3256.Write([]byte(hellofusion))
eBytes := sha3256.Sum(nil)
e := new(big.Int).SetBytes(eBytes)
s := new(big.Int).Mul(e, u)
s = new(big.Int).Add(r, s)
s = new(big.Int).Mod(s, s256.S256().N)
zkUProof := &ZkUProof{E: e, S: s}
return zkUProof
}
func ZkUVerify(uG []*big.Int, zkUProof *ZkUProof) bool {
sGx, sGy := s256.S256().ScalarBaseMult(zkUProof.S.Bytes())
minusE := new(big.Int).Mul(big.NewInt(-1), zkUProof.E)
minusE = new(big.Int).Mod(minusE, s256.S256().N)
eUx, eUy := s256.S256().ScalarMult(uG[0], uG[1], minusE.Bytes())
rGx, rGy := s256.S256().Add(sGx, sGy, eUx, eUy)
hellofusion := "hello fusion"
sha3256 := sha3.New256()
sha3256.Write(rGx.Bytes())
sha3256.Write(rGy.Bytes())
sha3256.Write([]byte(hellofusion))
eBytes := sha3256.Sum(nil)
e := new(big.Int).SetBytes(eBytes)
if e.Cmp(zkUProof.E) == 0 {
return true
} else {
return false
}
}
*/
// a(rho) b(l)
func ZkABProve(a *big.Int, b *big.Int, s *big.Int, R []*big.Int) *ZkABProof {
r_a := random.GetRandomIntFromZn(s256.S256().N)
r_b := random.GetRandomIntFromZn(s256.S256().N)
alphax, alphay := s256.S256().ScalarMult(R[0], R[1], r_a.Bytes())
r_bGx, r_bGy := s256.S256().ScalarBaseMult(r_b.Bytes())
alphax, alphay = s256.S256().Add(alphax, alphay, r_bGx, r_bGy)
aGx, aGy := s256.S256().ScalarBaseMult(a.Bytes())
betax, betay := s256.S256().ScalarMult(aGx, aGy, r_b.Bytes())
bAx, bAy := s256.S256().ScalarMult(aGx, aGy, b.Bytes())
hellofusion := "hello fusion"
sha3256 := sha3.New256()
sha3256.Write(alphax.Bytes())
sha3256.Write(alphay.Bytes())
sha3256.Write(betax.Bytes())
sha3256.Write(betay.Bytes())
sha3256.Write(aGx.Bytes())
sha3256.Write(aGy.Bytes())
sha3256.Write(bAx.Bytes())
sha3256.Write(bAy.Bytes())
sha3256.Write([]byte(hellofusion))
eBytes := sha3256.Sum(nil)
e := new(big.Int).SetBytes(eBytes)
t := new(big.Int).Mul(e, s)
t = new(big.Int).Add(t, r_a)
t = new(big.Int).Mod(t, s256.S256().N)
u := new(big.Int).Mul(e, b)
u = new(big.Int).Add(u, r_b)
u = new(big.Int).Mod(u, s256.S256().N)
zkABProof := &ZkABProof{Alpha: []*big.Int{alphax, alphay}, Beta: []*big.Int{betax, betay}, T: t, U: u}
return zkABProof
}
func ZkABVerify(A []*big.Int, B []*big.Int, V []*big.Int, R []*big.Int, zkABProof *ZkABProof) bool {
hellofusion := "hello fusion"
sha3256 := sha3.New256()
sha3256.Write(zkABProof.Alpha[0].Bytes())
sha3256.Write(zkABProof.Alpha[1].Bytes())
sha3256.Write(zkABProof.Beta[0].Bytes())
sha3256.Write(zkABProof.Beta[1].Bytes())
sha3256.Write(A[0].Bytes())
sha3256.Write(A[1].Bytes())
sha3256.Write(B[0].Bytes())
sha3256.Write(B[1].Bytes())
sha3256.Write([]byte(hellofusion))
eBytes := sha3256.Sum(nil)
e := new(big.Int).SetBytes(eBytes)
R_tG_ux, R_tG_uy := s256.S256().ScalarMult(R[0], R[1], zkABProof.T.Bytes())
G_ux, G_uy := s256.S256().ScalarBaseMult(zkABProof.U.Bytes())
R_tG_ux, R_tG_uy = s256.S256().Add(R_tG_ux, R_tG_uy, G_ux, G_uy)
alphaV_ex, alphaV_ey := s256.S256().ScalarMult(V[0], V[1], e.Bytes())
alphaV_ex, alphaV_ey = s256.S256().Add(alphaV_ex, alphaV_ey, zkABProof.Alpha[0], zkABProof.Alpha[1])
if R_tG_ux.Cmp(alphaV_ex) != 0 {
return false
}
if R_tG_uy.Cmp(alphaV_ey) != 0 {
return false
}
A_ux, A_uy := s256.S256().ScalarMult(A[0], A[1], zkABProof.U.Bytes())
betaB_ex, betaB_ey := s256.S256().ScalarMult(B[0], B[1], e.Bytes())
betaB_ex, betaB_ey = s256.S256().Add(betaB_ex, betaB_ey, zkABProof.Beta[0], zkABProof.Beta[1])
if A_ux.Cmp(betaB_ex) != 0 {
return false
}
if A_uy.Cmp(betaB_ey) != 0 {
return false
}
return true
} | mpcdsa/crypto/ec2/schnorrZK.go | 0.614625 | 0.46393 | schnorrZK.go | starcoder |
package model
import "github.com/markcheno/go-talib"
// 単純移動平均
type SMA struct {
period int
values []float64
}
func NewSMA(inReal []float64, period int) *SMA {
if period <= 0 || len(inReal) <= period {
return nil
}
values := talib.Sma(inReal, period)
if values == nil {
return nil
}
return &SMA{
period: period,
values: values,
}
}
func (sma *SMA) Period() int {
return sma.period
}
func (sma *SMA) Values() []float64 {
return sma.values
}
// 指数平滑移動平均線
type EMA struct {
period int
values []float64
}
func NewEMA(inReal []float64, period int) *EMA {
if period <= 0 || len(inReal) <= period {
return nil
}
values := talib.Ema(inReal, period)
if values == nil {
return nil
}
return &EMA{
period: period,
values: values,
}
}
func (ema *EMA) Period() int {
return ema.period
}
func (ema *EMA) Values() []float64 {
return ema.values
}
// ボリンジャーバンド
type BBands struct {
n int
k float64
up []float64
mid []float64
down []float64
}
func NewBBands(inReal []float64, n int, k float64) *BBands {
if n <= 0 || len(inReal) < n {
return nil
}
if k <= 0 {
return nil
}
up, mid, down := talib.BBands(inReal, n, k, k, 0)
return &BBands{
n: n,
k: k,
up: up,
mid: mid,
down: down,
}
}
func (bbands *BBands) N() int {
return bbands.n
}
func (bbands *BBands) K() float64 {
return bbands.k
}
func (bbands *BBands) Up() []float64 {
return bbands.up
}
func (bbands *BBands) Mid() []float64 {
return bbands.mid
}
func (bbands *BBands) Down() []float64 {
return bbands.down
}
// 一目均衡表
type IchimokuCloud struct {
tenkan []float64
kijun []float64
senkouA []float64
senkouB []float64
chikou []float64
}
func minMax(inReal []float64) (float64, float64) {
min := inReal[0]
max := inReal[0]
for _, price := range inReal {
if min > price {
min = price
}
if max < price {
max = price
}
}
return min, max
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func NewIchimokuCloud(inReal []float64) *IchimokuCloud {
length := len(inReal)
if length < 52 {
return nil
}
tenkan := make([]float64, min(9, length))
kijun := make([]float64, min(26, length))
senkouA := make([]float64, min(26, length))
senkouB := make([]float64, min(52, length))
chikou := make([]float64, min(26, length))
for i := range inReal {
if i >= 9 {
min, max := minMax(inReal[i-9 : i])
tenkan = append(tenkan, (min+max)/2)
}
if i >= 26 {
min, max := minMax(inReal[i-26 : i])
kijun = append(kijun, (min+max)/2)
senkouA = append(senkouA, (tenkan[i]+kijun[i])/2)
chikou = append(chikou, inReal[i-26])
}
if i >= 52 {
min, max := minMax(inReal[i-52 : i])
senkouB = append(senkouB, (min+max)/2)
}
}
return &IchimokuCloud{
tenkan: tenkan,
kijun: kijun,
senkouA: senkouA,
senkouB: senkouB,
chikou: chikou,
}
}
func (ichimokuCloud *IchimokuCloud) Tenkan() []float64 {
return ichimokuCloud.tenkan
}
func (ichimokuCloud *IchimokuCloud) Kijun() []float64 {
return ichimokuCloud.kijun
}
func (ichimokuCloud *IchimokuCloud) SenkouA() []float64 {
return ichimokuCloud.senkouA
}
func (ichimokuCloud *IchimokuCloud) SenkouB() []float64 {
return ichimokuCloud.senkouB
}
func (ichimokuCloud *IchimokuCloud) Chikou() []float64 {
return ichimokuCloud.chikou
}
// Relative Strength Index
type RSI struct {
period int
values []float64
}
func NewRSI(inReal []float64, period int) *RSI {
if period <= 0 || len(inReal) <= period {
return nil
}
values := talib.Rsi(inReal, period)
return &RSI{
period: period,
values: values,
}
}
func (rsi *RSI) Period() int {
return rsi.period
}
func (rsi *RSI) Values() []float64 {
return rsi.values
}
// Moving Average Convergence/Divergence: 移動平均・収束拡散
type MACD struct {
fastPeriod int
slowPeriod int
signalPeriod int
macd []float64
macdSignal []float64
macdHist []float64
}
func NewMACD(inReal []float64, inFastPeriod, inSlowPeriod, inSignalPeriod int) *MACD {
if len(inReal) <= 0 {
return nil
}
if inFastPeriod <= 0 || len(inReal) <= inFastPeriod {
return nil
}
if inSlowPeriod <= 0 || len(inReal) <= inSlowPeriod {
return nil
}
if inSignalPeriod <= 0 || len(inReal) <= inSignalPeriod {
return nil
}
outMACD, outMACDSignal, outMACDHist := talib.Macd(inReal, inFastPeriod, inSlowPeriod, inSignalPeriod)
return &MACD{
fastPeriod: inFastPeriod,
slowPeriod: inSlowPeriod,
signalPeriod: inSignalPeriod,
macd: outMACD,
macdSignal: outMACDSignal,
macdHist: outMACDHist,
}
}
func (macd *MACD) FastPeriod() int {
return macd.fastPeriod
}
func (macd *MACD) SlowPeriod() int {
return macd.slowPeriod
}
func (macd *MACD) SignalPeriod() int {
return macd.signalPeriod
}
func (macd *MACD) Macd() []float64 {
return macd.macd
}
func (macd *MACD) MacdSignal() []float64 {
return macd.macdSignal
}
func (macd *MACD) MacdHist() []float64 {
return macd.macdHist
} | trader/domain/model/indicator.go | 0.583559 | 0.485112 | indicator.go | starcoder |
package gtreap
type Treap struct {
compare Compare
root *node
}
// Compare returns an integer comparing the two items
// lexicographically. The result will be 0 if a==b, -1 if a < b, and
// +1 if a > b.
type Compare func(a, b interface{}) int
// Item can be anything.
type Item interface{}
type node struct {
item Item
priority int
left *node
right *node
}
func NewTreap(c Compare) *Treap {
return &Treap{compare: c, root: nil}
}
func (t *Treap) Min() Item {
n := t.root
if n == nil {
return nil
}
for n.left != nil {
n = n.left
}
return n.item
}
func (t *Treap) Max() Item {
n := t.root
if n == nil {
return nil
}
for n.right != nil {
n = n.right
}
return n.item
}
func (t *Treap) Get(target Item) Item {
n := t.root
for n != nil {
c := t.compare(target, n.item)
if c < 0 {
n = n.left
} else if c > 0 {
n = n.right
} else {
return n.item
}
}
return nil
}
// Note: only the priority of the first insert of an item is used.
// Priorities from future updates on already existing items are
// ignored. To change the priority for an item, you need to do a
// Delete then an Upsert.
func (t *Treap) Upsert(item Item, itemPriority int) *Treap {
r := t.union(t.root, &node{item: item, priority: itemPriority})
return &Treap{compare: t.compare, root: r}
}
func (t *Treap) union(this *node, that *node) *node {
if this == nil {
return that
}
if that == nil {
return this
}
if this.priority > that.priority {
left, middle, right := t.split(that, this.item)
if middle == nil {
return &node{
item: this.item,
priority: this.priority,
left: t.union(this.left, left),
right: t.union(this.right, right),
}
}
return &node{
item: middle.item,
priority: this.priority,
left: t.union(this.left, left),
right: t.union(this.right, right),
}
}
// We don't use middle because the "that" has precendence.
left, _, right := t.split(this, that.item)
return &node{
item: that.item,
priority: that.priority,
left: t.union(left, that.left),
right: t.union(right, that.right),
}
}
// Splits a treap into two treaps based on a split item "s".
// The result tuple-3 means (left, X, right), where X is either...
// nil - meaning the item s was not in the original treap.
// non-nil - returning the node that had item s.
// The tuple-3's left result treap has items < s,
// and the tuple-3's right result treap has items > s.
func (t *Treap) split(n *node, s Item) (*node, *node, *node) {
if n == nil {
return nil, nil, nil
}
c := t.compare(s, n.item)
if c == 0 {
return n.left, n, n.right
}
if c < 0 {
left, middle, right := t.split(n.left, s)
return left, middle, &node{
item: n.item,
priority: n.priority,
left: right,
right: n.right,
}
}
left, middle, right := t.split(n.right, s)
return &node{
item: n.item,
priority: n.priority,
left: n.left,
right: left,
}, middle, right
}
func (t *Treap) Delete(target Item) *Treap {
left, _, right := t.split(t.root, target)
return &Treap{compare: t.compare, root: t.join(left, right)}
}
// All the items from this are < items from that.
func (t *Treap) join(this *node, that *node) *node {
if this == nil {
return that
}
if that == nil {
return this
}
if this.priority > that.priority {
return &node{
item: this.item,
priority: this.priority,
left: this.left,
right: t.join(this.right, that),
}
}
return &node{
item: that.item,
priority: that.priority,
left: t.join(this, that.left),
right: that.right,
}
}
// ItemVistor callback should return true to keep going on the visitation.
type ItemVisitor func(i Item) bool
// Visit items greater-than-or-equal to the pivot, in ascending order.
func (t *Treap) VisitAscend(pivot Item, visitor ItemVisitor) {
t.visitAscend(t.root, pivot, visitor)
}
func (t *Treap) visitAscend(n *node, pivot Item, visitor ItemVisitor) bool {
if n == nil {
return true
}
c := t.compare(pivot, n.item)
if c < 0 && !t.visitAscend(n.left, pivot, visitor) {
return false
}
if c <= 0 && !visitor(n.item) {
return false
}
return t.visitAscend(n.right, pivot, visitor)
}
// Visit items less-than-or-equal to the pivot, in descending order.
func (t *Treap) VisitDescend(pivot Item, visitor ItemVisitor) {
t.visitDescend(t.root, pivot, visitor)
}
func (t *Treap) visitDescend(n *node, pivot Item, visitor ItemVisitor) bool {
if n == nil {
return true
}
c := t.compare(pivot, n.item)
if c > 0 && !t.visitDescend(n.right, pivot, visitor) {
return false
}
if c >= 0 && !visitor(n.item) {
return false
}
return t.visitDescend(n.left, pivot, visitor)
} | vendor/github.com/blevesearch/gtreap/treap.go | 0.777849 | 0.443721 | treap.go | starcoder |
package drawing
import (
"image/color"
"math"
"time"
)
// Clear ... Clears the entire surface.
func (s *Surface) Clear(c color.RGBA) {
defer s.trackDuration("Clear", time.Now())
s.Background = c
s.FillRect(s.Bounds, s.Background)
}
// GetPoint ... Gets a point from the surface.
func (s *Surface) GetPoint(x, y int) color.RGBA {
return s.Image.At(x, y).(color.RGBA)
}
// GetWeightedColor ... Calculate weighted (0..100) color moving from c1 to c2.
func (s *Surface) GetWeightedColor(c1, c2 color.RGBA, weight float64) color.RGBA {
weight = math.Min(1, math.Max(0, weight))
c1R, c1G, c1B := float64(c1.R), float64(c1.G), float64(c1.B)
mvR, mvG, mvB := float64(c2.R)-c1R, float64(c2.G)-c1G, float64(c2.B)-c1B
return color.RGBA{
R: uint8(c1R + (mvR * weight)),
G: uint8(c1G + (mvG * weight)),
B: uint8(c1B + (mvB * weight)),
A: c2.A,
}
}
// Plot ... Places a point on the surface.
func (s *Surface) Plot(x, y int, c color.RGBA) {
s.Image.Set(x, y, c)
}
// PlotA ... Plots, with an antialiased weighted (0..1) second pixel.
func (s *Surface) PlotA(x1, y1, x2, y2 float64, c color.RGBA, weight float64) {
X1, Y1, X2, Y2 := int(x1), int(y1), int(x2), int(y2)
weight = math.Min(1.0, weight*1.3)
c2 := s.GetWeightedColor(s.GetPoint(X2, Y2), c, weight*1.2)
s.Image.Set(X1, Y1, c)
s.Image.Set(X2, Y2, c2)
}
// PlotPoint ... Places a point on the surface.
func (s *Surface) PlotPoint(point Point, c color.RGBA) {
s.Image.Set(point.X, point.Y, c)
}
// Hline ... Draws a horizontal line, ignoring any Y deviation.
func (s *Surface) Hline(start, end Point, c color.RGBA) {
x1 := start.X
x2 := end.X
y := start.Y
for x := x1; x <= x2; x++ {
s.Plot(x, y, c)
}
}
// Vline ... Draws a vertical line, ignoring any X deviation.
func (s *Surface) Vline(start, end Point, c color.RGBA) {
x := start.X
y1 := start.Y
y2 := end.Y
for y := y1; y <= y2; y++ {
s.Plot(x, y, c)
}
}
// Line ... Draws a line (using Bresenham, no anti-aliasing).
func (s *Surface) Line(start, end Point, c color.RGBA) {
p := NewPoint(start.X, start.Y)
// Calculate movement and inherent deviation.
dX := intAbs(end.X - start.X)
dY := intAbs(end.Y - start.Y)
err := dX - dY
// Calculate step in each axis.
stepX, stepY := -1, -1
if start.X < end.X {
stepX = 1
}
if start.Y < end.Y {
stepY = 1
}
// Iterate over the line's intermediate pixels.
deviation := 0
for {
s.PlotPoint(p, c)
if p.X == end.X && p.Y == end.Y {
return
}
deviation = 2 * err
if deviation > -dY {
err, p.X = err-dY, p.X+stepX
}
if deviation < dX {
err, p.Y = err+dX, p.Y+stepY
}
}
}
// LineA ... Draws an arbitrary naive antialiased line (<NAME>'s algorithm).
func (s *Surface) LineA(start, end Point, c color.RGBA) {
defer s.trackDuration("LineA", time.Now())
x0 := float64(start.X)
y0 := float64(start.Y)
x1 := float64(end.X)
y1 := float64(end.Y)
// Quadrant switch based on if the line more horizontal than vertical.
tall := math.Abs(y1-y0) > math.Abs(x1-x0)
if tall {
x0, y0 = swapFloats(x0, y0)
x1, y1 = swapFloats(x1, y1)
}
// Quadrant switch based on the line's horizontal direction.
if x0 > x1 {
x0, x1 = swapFloats(x0, x1)
y0, y1 = swapFloats(y0, y1)
}
// Calculate the slope.
dx := x1 - x0
dy := y1 - y0
gradient := 1.0
if dx != 0.0 {
gradient = dy / dx
}
// First endpoint.
xend := round(x0)
yend := y0 + gradient*(xend-x0)
xgap := inverseMantissa(x0 + 0.5)
xpxl1 := xend
ypxl1 := math.Floor(yend)
if tall {
s.PlotA(ypxl1, xpxl1, ypxl1+1, xpxl1, c, mantissa(yend)*xgap)
} else {
s.PlotA(xpxl1, ypxl1, xpxl1, ypxl1+1, c, mantissa(yend)*xgap)
}
intersect := yend + gradient
// Second endpoint.
xend = round(x1)
yend = y1 + gradient*(xend-x1)
xgap = mantissa(x1 + 0.5)
xpxl2 := xend //this will be used in the main loop
ypxl2 := math.Floor(yend)
if tall {
s.PlotA(ypxl2, xpxl2, ypxl2+1, xpxl2, c, mantissa(yend)*xgap)
} else {
s.PlotA(xpxl2, ypxl2, xpxl2, ypxl2+1, c, mantissa(yend)*xgap)
}
// Line body.
if tall {
for x := xpxl1 + 1; x <= xpxl2-1; x++ {
s.PlotA(math.Floor(intersect), x, math.Floor(intersect)+1, x, c, mantissa(intersect))
intersect = intersect + gradient
}
} else {
for x := xpxl1 + 1; x <= xpxl2-1; x++ {
s.PlotA(x, math.Floor(intersect), x, math.Floor(intersect)+1, c, mantissa(intersect))
intersect = intersect + gradient
}
}
}
// DrawRect ... Draws a rectangular outline.
func (s *Surface) DrawRect(rect Rect, c color.RGBA) {
defer s.trackDuration("DrawRect", time.Now())
s.Hline(rect.TopLeft, rect.TopRight, c) // top
s.Hline(rect.BottomLeft, rect.BottomRight, c) // bottom
s.Vline(rect.TopLeft, rect.BottomLeft, c) // left
s.Vline(rect.TopRight, rect.BottomRight, c) // right
}
// FillRect ... Draws a solid rectangle.
func (s *Surface) FillRect(rect Rect, c color.RGBA) {
defer s.trackDuration("FillRect", time.Now())
for offs := 0; offs <= rect.Height; offs++ {
start := NewPoint(rect.TopLeft.X, rect.TopLeft.Y+offs)
end := NewPoint(rect.TopRight.X, rect.TopLeft.Y+offs)
s.Hline(start, end, c)
}
}
// Circle ... Draws a fast circle.
func (s *Surface) Circle(centreX, centreY, radius int, c color.RGBA) {
defer s.trackDuration("Circle", time.Now())
x := radius
y := 0
err := 0
for {
// Plot all octants at the same time.
s.Plot(centreX+x, centreY+y, c)
s.Plot(centreX+x, centreY-y, c)
s.Plot(centreX-x, centreY-y, c)
s.Plot(centreX-x, centreY+y, c)
s.Plot(centreX+y, centreY+x, c)
s.Plot(centreX+y, centreY-x, c)
s.Plot(centreX-y, centreY-x, c)
s.Plot(centreX-y, centreY+x, c)
if x <= y {
return
}
// Move on.
err += 2*y + 1
y++
if err > x {
err += 1 - 2*x
x--
}
}
}
// FillCircle ... Draws a filled circle.
func (s *Surface) FillCircle(centreX, centreY, radius int, c color.RGBA) {
defer s.trackDuration("FillCircle", time.Now())
r2 := radius * radius
for cy := -radius; cy <= radius; cy++ {
cx := (int)(math.Sqrt(float64(r2-cy*cy)) + 0.5)
cyy := cy + centreY
s.Line(NewPoint(centreX-cx, cyy), NewPoint(centreX+cx, cyy), c)
}
}
// DrawPolygon ... Draws a polygon, joining the end point back to the start.
func (s *Surface) DrawPolygon(polygon Polygon, c color.RGBA) {
if len(polygon) < 3 {
return
}
for i := 1; i < len(polygon); i++ {
s.Line(polygon[i-1], polygon[i], c)
}
s.Line(polygon[len(polygon)-1], polygon[0], c)
}
// DrawPolygonA ... Draws an antialiased polygon, joining the end point back to the start.
func (s *Surface) DrawPolygonA(polygon Polygon, c color.RGBA) {
if len(polygon) < 3 {
return
}
for i := 1; i < len(polygon); i++ {
s.LineA(polygon[i-1], polygon[i], c)
}
s.LineA(polygon[len(polygon)-1], polygon[0], c)
} | primitives.go | 0.85226 | 0.67551 | primitives.go | starcoder |
package paletted
import (
"image"
)
func DrawOver(dst *image.Paletted, r image.Rectangle, src *image.Paletted, sp image.Point) {
for y := 0; y < r.Dy(); y++ {
for x := 0; x < r.Dx(); x++ {
sx := src.Rect.Min.X + sp.X + x
sy := src.Rect.Min.Y + sp.Y + y
dx := dst.Rect.Min.X + r.Min.X + x
dy := dst.Rect.Min.Y + r.Min.Y + y
srcPixel := src.Pix[sy*src.Rect.Max.X+sx]
if srcPixel == 0 {
continue
}
dst.Pix[dy*dst.Rect.Max.X+dx] = srcPixel
}
}
}
func FlipHorizontal(img *image.Paletted) {
w := img.Rect.Dx()
h := img.Rect.Dy()
for j := 0; j < h; j++ {
y := img.Rect.Min.Y + j
for i := 0; i < w/2; i++ {
x0 := img.Rect.Min.X + i
x1 := img.Rect.Min.X + w - i - 1
img.Pix[y*img.Rect.Max.X+x0], img.Pix[y*img.Rect.Max.X+x1] = img.Pix[y*img.Rect.Max.X+x1], img.Pix[y*img.Rect.Max.X+x0]
}
}
}
func FlipVertical(img *image.Paletted) {
w := img.Rect.Dx()
h := img.Rect.Dy()
for j := 0; j < h/2; j++ {
y0 := img.Rect.Min.Y + j
y1 := img.Rect.Min.Y + h - j - 1
upper := make([]uint8, w)
copy(upper, img.Pix[y0*img.Rect.Max.X:y0*img.Rect.Max.X+w])
copy(img.Pix[y0*img.Rect.Max.X:y0*img.Rect.Max.X+w], img.Pix[y1*img.Rect.Max.X:y1*img.Rect.Max.X+w])
copy(img.Pix[y1*img.Rect.Max.X:y1*img.Rect.Max.X+w], upper)
}
}
func FindTrim(img *image.Paletted) image.Rectangle {
left := img.Rect.Min.X
top := img.Rect.Min.Y
right := img.Rect.Max.X
bottom := img.Rect.Max.Y
for left = img.Rect.Min.X; left < img.Rect.Max.X; left++ {
for y := img.Rect.Min.Y; y < img.Rect.Max.Y; y++ {
if img.Pix[y*img.Rect.Max.X+left] != 0 {
goto leftDone
}
}
continue
leftDone:
break
}
for top = img.Rect.Min.Y; top < img.Rect.Max.Y; top++ {
for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
if img.Pix[top*img.Rect.Max.X+x] != 0 {
goto topDone
}
}
continue
topDone:
break
}
for right = img.Rect.Max.X - 1; right >= img.Rect.Min.X; right-- {
for y := img.Rect.Min.Y; y < img.Rect.Max.Y; y++ {
if img.Pix[y*img.Rect.Max.X+right] != 0 {
goto rightDone
}
}
continue
rightDone:
break
}
right++
for bottom = img.Rect.Max.Y - 1; bottom >= img.Rect.Min.Y; bottom-- {
for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
if img.Pix[bottom*img.Rect.Max.X+x] != 0 {
goto bottomDone
}
}
continue
bottomDone:
break
}
bottom++
if right < left || bottom < top {
return image.Rect(0, 0, 0, 0)
}
return image.Rectangle{image.Point{left, top}, image.Point{right, bottom}}
} | paletted/mod.go | 0.58676 | 0.55911 | mod.go | starcoder |
package tuner
// tuner.go is a texel tuning implementation for Blunder.
import (
"blunder/engine"
"bufio"
"fmt"
"math"
"os"
"strings"
)
const (
DataFile = "/home/algerbrex/quiet-labeled.epd"
NumCores = 4
NumWeights = 774
Draw float64 = 0.5
WhiteWin float64 = 1.0
BlackWin float64 = 0.0
NumPositions float64 = 362500.0
K float64 = 1.62
)
// A struct object to hold data concering a position loaded from the training file.
// Each position consists of a position board object and the outcome of the game
// the position was from.
type Position struct {
Pos engine.Position
Outcome float64
}
// A global variable to hold the positions loaded from the training file.
var Positions = loadPositions(362500)
// A global variable to hold the parallel computations of the MSE function.
var Answers = make(chan float64)
// A method to specifiy which weights should be ignored when tuning.
var IgnoreWeights = make([]bool, len(Weights))
func setIgnoredWeights(from, to int) {
for i := from; i < to; i++ {
IgnoreWeights[i] = true
}
}
// The weights to be adjusted during the tuning process.
var Weights []int16 = loadWeights()
// Load the weights for tuning from the current evaluation terms.
func loadWeights() (weights []int16) {
weights = make([]int16, NumWeights)
copy(weights[0:64], engine.PSQT_MG[engine.Pawn][:])
copy(weights[64:128], engine.PSQT_MG[engine.Knight][:])
copy(weights[128:192], engine.PSQT_MG[engine.Bishop][:])
copy(weights[192:256], engine.PSQT_MG[engine.Rook][:])
copy(weights[256:320], engine.PSQT_MG[engine.Queen][:])
copy(weights[320:384], engine.PSQT_MG[engine.King][:])
copy(weights[384:448], engine.PSQT_EG[engine.Pawn][:])
copy(weights[448:512], engine.PSQT_EG[engine.Knight][:])
copy(weights[512:576], engine.PSQT_EG[engine.Bishop][:])
copy(weights[576:640], engine.PSQT_EG[engine.Rook][:])
copy(weights[640:704], engine.PSQT_EG[engine.Queen][:])
copy(weights[704:768], engine.PSQT_EG[engine.King][:])
weights[768] = engine.KnightMobility
weights[769] = engine.BishopMobility
weights[770] = engine.RookMobilityMG
weights[771] = engine.RookMobilityEG
weights[772] = engine.QueenMobilityMG
weights[773] = engine.QueenMobilityEG
return weights
}
// Load the given number of positions from the training set file.
func loadPositions(start int) (positions []Position) {
file, err := os.Open(DataFile)
if err != nil {
panic(err)
}
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
for positionCount := 0; scanner.Scan() && positionCount < start+int(NumPositions); positionCount++ {
if positionCount < start {
continue
}
line := scanner.Text()
fields := strings.Fields(line)
fen := fields[0] + " " + fields[1] + " - - 0 1"
result := fields[5]
outcome := Draw
if result == "\"1-0\";" {
outcome = WhiteWin
} else if result == "\"0-1\";" {
outcome = BlackWin
}
var pos engine.Position
pos.LoadFEN(fen)
positions = append(positions, Position{Pos: pos, Outcome: outcome})
}
fmt.Printf("Done loading %d positions...\n", int(NumPositions))
return positions
}
func mapWeightsToParameters() {
copy(engine.PSQT_MG[engine.Pawn][:], Weights[0:64])
copy(engine.PSQT_MG[engine.Knight][:], Weights[64:128])
copy(engine.PSQT_MG[engine.Bishop][:], Weights[128:192])
copy(engine.PSQT_MG[engine.Rook][:], Weights[192:256])
copy(engine.PSQT_MG[engine.Queen][:], Weights[256:320])
copy(engine.PSQT_MG[engine.King][:], Weights[320:384])
copy(engine.PSQT_EG[engine.Pawn][:], Weights[384:448])
copy(engine.PSQT_EG[engine.Knight][:], Weights[448:512])
copy(engine.PSQT_EG[engine.Bishop][:], Weights[512:576])
copy(engine.PSQT_EG[engine.Rook][:], Weights[576:640])
copy(engine.PSQT_EG[engine.Queen][:], Weights[640:704])
copy(engine.PSQT_EG[engine.King][:], Weights[704:768])
engine.KnightMobility = Weights[768]
engine.BishopMobility = Weights[769]
engine.RookMobilityMG = Weights[770]
engine.RookMobilityEG = Weights[771]
engine.QueenMobilityMG = Weights[772]
engine.QueenMobilityEG = Weights[773]
}
// Evaluate the position from the training set file.
func evaluate(pos engine.Position) int16 {
score := engine.EvaluatePos(&pos)
// For texel tuning, we always score a position from white's perspective
if pos.SideToMove == engine.Black {
return -score
}
return score
}
func processor(start, end int, K float64) {
var errorSum float64
for i := start; i < end; i++ {
score := float64(evaluate(Positions[i].Pos))
sigmoid := 1 / (1 + math.Pow(10, -K*score/400))
errorSum += math.Pow(Positions[i].Outcome-sigmoid, 2)
}
Answers <- errorSum
}
// Calculate the mean square error given the current weights. Credit to
// <NAME> (author of Zahak) for this parallelized implementation.
func meanSquaredError(K float64) float64 {
mapWeightsToParameters()
var errorSum float64
batchSize := len(Positions) / NumCores
for i := 0; i < NumCores; i++ {
start := i * batchSize
end := (i + 1) * batchSize
if i == NumCores-1 {
end = len(Positions)
}
go processor(start, end, K)
}
for i := 0; i < NumCores; i++ {
ans := <-Answers
errorSum += ans
}
return errorSum / float64(len(Positions))
}
func findK() float64 {
improved := true
bestK := 0.5
bestError := meanSquaredError(bestK)
for iteration := 1; improved; iteration++ {
improved = false
fmt.Println("Iteration:", iteration)
fmt.Println("Best error:", bestError)
fmt.Println("Best K:", bestK)
fmt.Println()
bestK += 0.01
newError := meanSquaredError(bestK)
if newError < bestError {
bestError = newError
improved = true
} else {
bestK -= 0.02
newError = meanSquaredError(bestK)
if newError < bestError {
bestError = newError
improved = true
}
}
}
return bestK
}
func tune() {
numParams := len(Weights)
bestError := meanSquaredError(K)
improved := true
for iteration := 1; improved; iteration++ {
improved = false
for weightIdx := 0; weightIdx < numParams; weightIdx++ {
if IgnoreWeights[weightIdx] {
continue
}
// fmt.Println("Best error:", bestError)
// fmt.Printf("Tuning parameter number %d...\n", weightIdx)
Weights[weightIdx] += 1
newError := meanSquaredError(K)
if newError < bestError {
//fmt.Printf(
// "Improved parameter number %d from %d to %d\n",
// weight_idx, Weights[weight_idx]-1, Weights[weight_idx],
//)
bestError = newError
improved = true
} else {
Weights[weightIdx] -= 2
if weightIdx > 768 && Weights[weightIdx] < 0 {
Weights[weightIdx] += 1
continue
}
newError = meanSquaredError(K)
if newError < bestError {
//fmt.Printf(
// "Improved parameter number %d from %d to %d\n",
// weight_idx, Weights[weight_idx]+1, Weights[weight_idx],
//)
bestError = newError
improved = true
} else {
Weights[weightIdx] += 1
}
}
}
fmt.Printf("Iteration %d complete...\n", iteration)
fmt.Printf("Best error: %f\n", bestError)
if iteration%10 == 0 {
printParameters()
}
}
fmt.Println("Done tuning!")
}
func prettyPrintPSQT(psqt [64]int16) {
fmt.Print("\n")
for sq := 0; sq < 64; sq++ {
if sq%8 == 0 {
fmt.Println()
}
fmt.Print(psqt[sq], ", ")
}
fmt.Print("\n")
}
func printParameters() {
prettyPrintPSQT(engine.PSQT_MG[engine.Pawn])
prettyPrintPSQT(engine.PSQT_MG[engine.Knight])
prettyPrintPSQT(engine.PSQT_MG[engine.Bishop])
prettyPrintPSQT(engine.PSQT_MG[engine.Rook])
prettyPrintPSQT(engine.PSQT_MG[engine.Queen])
prettyPrintPSQT(engine.PSQT_MG[engine.King])
prettyPrintPSQT(engine.PSQT_EG[engine.Pawn])
prettyPrintPSQT(engine.PSQT_EG[engine.Knight])
prettyPrintPSQT(engine.PSQT_EG[engine.Bishop])
prettyPrintPSQT(engine.PSQT_EG[engine.Rook])
prettyPrintPSQT(engine.PSQT_EG[engine.Queen])
prettyPrintPSQT(engine.PSQT_EG[engine.King])
fmt.Println(engine.KnightMobility)
fmt.Println(engine.BishopMobility)
fmt.Println(engine.RookMobilityMG)
fmt.Println(engine.RookMobilityEG)
fmt.Println(engine.QueenMobilityMG)
fmt.Println(engine.QueenMobilityEG)
}
func RunTuner(verbose bool) {
// K := findK()
// fmt.Println("Best K is:", K)
// setIgnoredWeights(0, 768)
tune()
mapWeightsToParameters()
if verbose {
printParameters()
}
} | tuner/tuner.go | 0.663233 | 0.465509 | tuner.go | starcoder |
package expect
import (
"fmt"
"reflect"
"regexp"
"strings"
)
type To struct {
Be *Be
Have *Have
Else *Else
And *To
t T
actual interface{}
assert bool
}
func newTo(t T, actual interface{}, assert bool) *To {
to := &To{
t: t,
actual: actual,
assert: assert,
}
to.Else = newElse(t)
to.Be = newBe(t, to.Else, actual, assert)
to.Have = newHave(t, to.Else, actual, assert)
to.And = to
return to
}
// Assert that a string starts with `s`
func (t *To) StartWith(s string) *To {
msg := t.msg(fmt.Sprintf("start with %v", s))
if strings.HasPrefix(t.Str(), s) != t.assert {
t.fail(2, msg)
}
return t
}
// Assert that a string ends with `s`
func (t *To) EndWith(s string) *To {
msg := t.msg(fmt.Sprintf("end with %v", s))
if strings.HasSuffix(t.Str(), s) != t.assert {
t.fail(2, msg)
}
return t
}
// Assert that a string conatins `s`
func (t *To) Contains(s string) *To {
msg := t.msg(fmt.Sprintf("contains %v", s))
if strings.Contains(t.Str(), s) != t.assert {
t.fail(2, msg)
}
return t
}
// Assert whether a textual regular expression matches a string
func (t *To) Match(s string) *To {
msg := t.msg(fmt.Sprintf("matches %v", s))
matched, err := regexp.MatchString(s, t.Str())
if err != nil {
t.t.Fatal(err)
}
if matched != t.assert {
t.fail(2, msg)
}
return t
}
// Assert two values are equals(deeply)
func (t *To) Equal(exp interface{}) *To {
msg := t.msg(fmt.Sprintf("equal to %v", exp))
if reflect.DeepEqual(t.actual, exp) != t.assert {
t.fail(2, msg)
}
return t
}
// Assert func to panic
func (t *To) Panic(args ...interface{}) *To {
testMsg := len(args) > 0
switch t.actual.(type) {
case func():
fn := reflect.ValueOf(t.actual)
if p, m := ifPanic(fn); p != t.assert || testMsg && args[0] == m != t.assert {
if testMsg {
m = args[0]
}
t.fail(2, t.msg(fmt.Sprintf("panic: %v", m)))
}
default:
t.t.Fatal(invMsg("func"))
}
return t
}
func (t *To) Pass(matcher Matcher) *To {
err := matcher.Match(t.actual)
switch t.assert {
case true:
if err != nil {
t.fail(2, t.msg(err.Error()))
}
case false:
if err == nil {
t.fail(2, t.msg(fmt.Sprintf("match %#v", matcher)))
}
}
return t
}
func (t *To) fail(callers int, msg string) {
fail(t.t, callers+1, msg)
t.Else.failed = true
}
func ifPanic(f reflect.Value) (isPnc bool, msg interface{}) {
func() {
defer func() {
if msg = recover(); msg != nil {
isPnc = true
}
}()
f.Call([]reflect.Value{})
}()
return
}
func (t *To) Str() (s string) {
if s, ok := t.actual.(string); ok {
return s
}
t.t.Fatal(invMsg("string"))
return
}
func (t *To) msg(s string) string {
return errMsg("to")(t.actual, s, t.assert)
} | to.go | 0.603348 | 0.579876 | to.go | starcoder |
package tilecover
import (
"log"
"math"
"github.com/paulmach/orb"
"github.com/paulmach/orb/maptile"
)
// LineString creates a tile cover for the line string.
func LineString(ls orb.LineString, z maptile.Zoom) maptile.Set {
set := make(maptile.Set)
line(set, ls, z, nil)
return set
}
// LineStringCount creates a tile cover for the line string.
func LineStringCount(ls orb.LineString, z maptile.Zoom) int64 {
_, cnt := lineCount(ls, z, nil)
return cnt
}
// LineStringChannel creates a tile cover for the line string.
func LineStringChannel(ls orb.LineString, z maptile.Zoom, ch chan<- maptile.Tile) {
lineChannel(ls, z, nil, ch)
}
// MultiLineString creates a tile cover for the line strings.
func MultiLineString(mls orb.MultiLineString, z maptile.Zoom) maptile.Set {
set := make(maptile.Set)
for _, ls := range mls {
line(set, ls, z, nil)
}
return set
}
// MultiLineStringCount creates a tile cover for the line strings.
func MultiLineStringCount(mls orb.MultiLineString, z maptile.Zoom) int64 {
set := make(maptile.Set)
for _, ls := range mls {
line(set, ls, z, nil)
}
var cnt int64
for _, v := range set {
if v {
cnt++
}
}
return cnt
}
// MultiLineStringChannel creates a tile cover for the line string.
func MultiLineStringChannel(mls orb.MultiLineString, z maptile.Zoom, ch chan<- maptile.Tile) {
set := make(maptile.Set)
for _, ls := range mls {
line(set, ls, z, nil)
}
for t, v := range set {
if v {
ch <- t
}
}
}
func line(
set maptile.Set,
line orb.LineString,
zoom maptile.Zoom,
ring [][2]uint32,
) [][2]uint32 {
inf := math.Inf(1)
prevX := -1.0
prevY := -1.0
var x, y float64
for i := 0; i < len(line)-1; i++ {
start := maptile.Fraction(line[i], zoom)
stop := maptile.Fraction(line[i+1], zoom)
dx := stop[0] - start[0]
dy := stop[1] - start[1]
if dy == 0 && dx == 0 {
continue
}
sx := -1.0
if dx > 0 {
sx = 1.0
}
sy := -1.0
if dy > 0 {
sy = 1.0
}
x = math.Floor(start[0])
y = math.Floor(start[1])
tMaxX := inf
if dx != 0 {
d := 0.0
if dx > 0 {
d = 1.0
}
tMaxX = math.Abs((d + x - start[0]) / dx)
}
tMaxY := inf
if dy != 0 {
d := 0.0
if dy > 0 {
d = 1.0
}
tMaxY = math.Abs((d + y - start[1]) / dy)
}
tdx := math.Abs(sx / dx)
tdy := math.Abs(sy / dy)
if x != prevX || y != prevY {
set[maptile.New(uint32(x), uint32(y), zoom)] = true
if ring != nil && y != prevY {
ring = append(ring, [2]uint32{uint32(x), uint32(y)})
}
prevX = x
prevY = y
}
for tMaxX < 1 || tMaxY < 1 {
if tMaxX < tMaxY {
tMaxX += tdx
x += sx
} else {
tMaxY += tdy
y += sy
}
set[maptile.New(uint32(x), uint32(y), zoom)] = true
if ring != nil && y != prevY {
ring = append(ring, [2]uint32{uint32(x), uint32(y)})
}
prevX = x
prevY = y
}
}
if ring != nil && uint32(y) == ring[0][1] {
ring = ring[:len(ring)-1]
}
return ring
}
func lineCount(
line orb.LineString,
zoom maptile.Zoom,
ring [][2]uint32,
) ([][2]uint32, int64) {
var cnt int64
inf := math.Inf(1)
prevX := -1.0
prevY := -1.0
var x, y float64
for i := 0; i < len(line)-1; i++ {
start := maptile.Fraction(line[i], zoom)
stop := maptile.Fraction(line[i+1], zoom)
dx := stop[0] - start[0]
dy := stop[1] - start[1]
if dy == 0 && dx == 0 {
continue
}
sx := -1.0
if dx > 0 {
sx = 1.0
}
sy := -1.0
if dy > 0 {
sy = 1.0
}
x = math.Floor(start[0])
y = math.Floor(start[1])
tMaxX := inf
if dx != 0 {
d := 0.0
if dx > 0 {
d = 1.0
}
tMaxX = math.Abs((d + x - start[0]) / dx)
}
tMaxY := inf
if dy != 0 {
d := 0.0
if dy > 0 {
d = 1.0
}
tMaxY = math.Abs((d + y - start[1]) / dy)
}
tdx := math.Abs(sx / dx)
tdy := math.Abs(sy / dy)
if x != prevX || y != prevY {
// set[maptile.New(uint32(x), uint32(y), zoom)] = true
cnt++
if ring != nil && y != prevY {
ring = append(ring, [2]uint32{uint32(x), uint32(y)})
}
prevX = x
prevY = y
}
for tMaxX < 1 || tMaxY < 1 {
if tMaxX < tMaxY {
tMaxX += tdx
x += sx
} else {
tMaxY += tdy
y += sy
}
// set[maptile.New(uint32(x), uint32(y), zoom)] = true
cnt++
if ring != nil && y != prevY {
ring = append(ring, [2]uint32{uint32(x), uint32(y)})
}
prevX = x
prevY = y
}
}
if ring != nil && uint32(y) == ring[0][1] {
ring = ring[:len(ring)-1]
}
return ring, cnt
}
func lineChannel(
line orb.LineString,
zoom maptile.Zoom,
ring [][2]uint32,
ch chan<- maptile.Tile,
) [][2]uint32 {
defer func() {
if recover() != nil {
log.Println("buffer got closed...")
}
}()
inf := math.Inf(1)
prevX := -1.0
prevY := -1.0
var x, y float64
for i := 0; i < len(line)-1; i++ {
start := maptile.Fraction(line[i], zoom)
stop := maptile.Fraction(line[i+1], zoom)
dx := stop[0] - start[0]
dy := stop[1] - start[1]
if dy == 0 && dx == 0 {
continue
}
sx := -1.0
if dx > 0 {
sx = 1.0
}
sy := -1.0
if dy > 0 {
sy = 1.0
}
x = math.Floor(start[0])
y = math.Floor(start[1])
tMaxX := inf
if dx != 0 {
d := 0.0
if dx > 0 {
d = 1.0
}
tMaxX = math.Abs((d + x - start[0]) / dx)
}
tMaxY := inf
if dy != 0 {
d := 0.0
if dy > 0 {
d = 1.0
}
tMaxY = math.Abs((d + y - start[1]) / dy)
}
tdx := math.Abs(sx / dx)
tdy := math.Abs(sy / dy)
if x != prevX || y != prevY {
// set[maptile.New(uint32(x), uint32(y), zoom)] = true
ch <- maptile.New(uint32(x), uint32(y), zoom)
if ring != nil && y != prevY {
ring = append(ring, [2]uint32{uint32(x), uint32(y)})
}
prevX = x
prevY = y
}
for tMaxX < 1 || tMaxY < 1 {
if tMaxX < tMaxY {
tMaxX += tdx
x += sx
} else {
tMaxY += tdy
y += sy
}
// set[maptile.New(uint32(x), uint32(y), zoom)] = true
ch <- maptile.New(uint32(x), uint32(y), zoom)
if ring != nil && y != prevY {
ring = append(ring, [2]uint32{uint32(x), uint32(y)})
}
prevX = x
prevY = y
}
}
if ring != nil && uint32(y) == ring[0][1] {
ring = ring[:len(ring)-1]
}
return ring
} | maptile/tilecover/line_string.go | 0.597138 | 0.473962 | line_string.go | starcoder |
package structex
import (
"fmt"
"reflect"
"strconv"
"strings"
)
type endian int
const (
little endian = 0
big endian = 1
undefined endian = 2
)
type bitfield struct {
nbits uint64
reserved bool
}
const (
none = iota
sizeOf
countOf
)
type layout struct {
format int
name string
relative bool
value uint64
}
type alignment uint64
type tags struct {
endian endian
bitfield bitfield
layout layout
alignment alignment
truncate bool
}
// A TaggingError occurs when the pack/unpack routines have
// detected the structure field annotation does not match
// what is expected in the data stream.
type TaggingError struct {
tag string
kind reflect.Kind
}
func (e *TaggingError) Error() string {
return fmt.Sprintf("Invalid tag '%s' for %s", e.tag, e.kind.String())
}
/*
Method which parses the field tags and returns a series of informative
structures defined by the the structure extension values.
*/
func parseFieldTags(sf reflect.StructField) tags {
t := tags{
endian: undefined,
bitfield: bitfield{0, false},
layout: layout{none, "", false, 0},
alignment: 0,
truncate: false,
}
// Always encode the size of the field, regardless of tags
switch sf.Type.Kind() {
case reflect.Array, reflect.Slice, reflect.Struct, reflect.Ptr:
break
case reflect.Bool:
t.bitfield.nbits = 1
default:
t.bitfield.nbits = uint64(sf.Type.Bits())
}
if s, ok := sf.Tag.Lookup("structex"); ok {
t.parseString(sf, s, parseOptions{sep: ',', quote: '\'', assign: '='})
} else {
t.parseString(sf, string(sf.Tag), parseOptions{sep: ' ', quote: '"', assign: ':'})
}
return t
}
type parseOptions struct {
sep rune
quote rune
assign rune
}
// Full tag format i.e. `structex:"bitfield='4,reserved',sizeof='Array'"`
// Bare tag format i.e. `bitfield:"3,reserved" sizeOf:"Array"`
func (t *tags) parseString(sf reflect.StructField, tagString string, opts parseOptions) {
if len(tagString) == 0 {
return
}
key := []rune{}
val := []rune{}
inKey := true
inVal := false
addKey := func(r rune) {
key = append(key, r)
}
addVal := func(r rune) {
val = append(val, r)
}
runes := []rune(tagString)
for idx, r := range runes {
switch r {
case opts.assign:
if inKey {
inKey = false
}
case opts.quote:
if inVal {
inVal = false
goto ADDTAG
} else {
inVal = true
}
case opts.sep, ',':
if !inVal {
inKey = true
} else {
addVal(r)
}
default:
if inKey {
addKey(r)
if idx == len(runes)-1 {
goto ADDTAG
}
} else if inVal {
addVal(r)
}
}
continue
ADDTAG:
t.add(sf, string(key), string(val))
key = []rune{}
val = []rune{}
}
}
func (t *tags) add(sf reflect.StructField, key string, val string) {
switch strings.ToLower(key) {
case "little":
t.endian = little
case "big":
t.endian = big
case "bitfield":
if nbs := strings.Split(val, ",")[0]; len(nbs) != 0 {
var nbits int64
switch sf.Type.Kind() {
case reflect.Bool:
nbits = 1
default:
var err error
nbits, err = strconv.ParseInt(nbs, 0, int(sf.Type.Bits()))
if err != nil {
panic(&TaggingError{string(sf.Tag), sf.Type.Kind()})
}
}
t.bitfield.nbits = uint64(nbits)
}
t.bitfield.reserved = strings.Contains(val, "reserved")
case "sizeof":
t.layout.format = sizeOf
t.layout.name = strings.Split(val, ",")[0]
t.layout.relative = strings.Contains(val, "relative")
case "countof":
t.layout.format = countOf
t.layout.name = val
case "truncate":
t.truncate = true
case "align":
align, err := strconv.ParseInt(val, 0, 64)
if err != nil {
panic(&TaggingError{string(sf.Tag), sf.Type.Kind()})
}
t.alignment = alignment(align)
}
}
func (t *tags) parseBitfield(sf reflect.StructField, s string, opts parseOptions) {
}
func (t *tags) print() {
fmt.Printf("Bitfield: Bits: %d Reserved: %t\n", t.bitfield.nbits, t.bitfield.reserved)
fmt.Printf("Layout: Type: %d Field: %s Relative %t\n", t.layout.format, t.layout.name, t.layout.relative)
fmt.Printf("Alignment: %d\n", t.alignment)
fmt.Println("")
} | tags.go | 0.541166 | 0.403684 | tags.go | starcoder |
package trie
import (
"errors"
"github.com/FilipNikolovski/go-datastructs-and-algorithms/ds/stacks/arraystack"
)
// Trie is a trie of runes. Each trie node has an 'end' bool flag, which
// indicates whether the node represents the end character of a word.
type Trie struct {
children map[rune]*Trie
end bool
}
// NewTrie creates a new Trie.
func NewTrie() *Trie {
return &Trie{}
}
// Find walks the trie by the given string and checks if the last node represents
// an ending of a word or not.
func (t *Trie) Find(word string) bool {
node := t
for _, r := range word {
node = node.children[r]
if node == nil {
return false
}
}
return node.end
}
// Put adds a new word to the trie.
func (t *Trie) Put(word string) {
node := t
for _, r := range word {
if node.children[r] == nil {
if node.children == nil {
node.children = make(map[rune]*Trie)
}
node.children[r] = &Trie{}
}
node = node.children[r]
}
node.end = true
}
type nodeRune struct {
r rune
node *Trie
}
var (
// ErrWordNotFound is returned when a word is not found.
ErrWordNotFound = errors.New("trie: word not found")
// ErrAssertType is returned if the type is not a nodeRune.
ErrAssertType = errors.New("trie: type assertion: not a nodeRune")
)
// Delete deletes a word from the trie.
func (t *Trie) Delete(word string) error {
s := arraystack.New()
node := t
for _, r := range word {
s.Push(&nodeRune{r: r, node: node})
node = node.children[r]
if node == nil {
return ErrWordNotFound
}
}
// no longer indicating an end of a word
node.end = false
if !node.isLeafNode() { // parent has other nodes, we stop here.
return nil
}
for !s.Empty() {
el, err := s.Pop()
if err != nil {
return err
}
nr, ok := el.(*nodeRune)
if !ok {
return ErrAssertType
}
delete(nr.node.children, nr.r)
if !nr.node.isLeafNode() {
break
}
nr.node.children = nil
if nr.node.end {
break
}
}
return nil
}
func (t *Trie) isLeafNode() bool {
return len(t.children) == 0
} | ds/trees/trie/trie.go | 0.739234 | 0.497803 | trie.go | starcoder |
package spdx
import (
"encoding/json"
"fmt"
"strings"
)
type Supplier struct {
// can be "NOASSERTION"
Supplier string
// SupplierType can be one of "Person", "Organization", or empty if Supplier is "NOASSERTION"
SupplierType string
}
// UnmarshalJSON takes a supplier in the typical one-line format and parses it into a Supplier struct.
// This function is also used when unmarshalling YAML
func (s *Supplier) UnmarshalJSON(data []byte) error {
// the value is just a string presented as a slice of bytes
supplierStr := string(data)
supplierStr = strings.Trim(supplierStr, "\"")
if supplierStr == "NOASSERTION" {
s.Supplier = supplierStr
return nil
}
supplierFields := strings.SplitN(supplierStr, ": ", 2)
if len(supplierFields) != 2 {
return fmt.Errorf("failed to parse Supplier '%s'", supplierStr)
}
s.SupplierType = supplierFields[0]
s.Supplier = supplierFields[1]
return nil
}
// MarshalJSON converts the receiver into a slice of bytes representing a Supplier in string form.
// This function is also used when marshalling to YAML
func (s Supplier) MarshalJSON() ([]byte, error) {
if s.Supplier == "NOASSERTION" {
return json.Marshal(s.Supplier)
} else if s.SupplierType != "" && s.Supplier != "" {
return json.Marshal(fmt.Sprintf("%s: %s", s.SupplierType, s.Supplier))
}
return []byte{}, fmt.Errorf("failed to marshal invalid Supplier: %+v", s)
}
type Originator struct {
// can be "NOASSERTION"
Originator string
// OriginatorType can be one of "Person", "Organization", or empty if Originator is "NOASSERTION"
OriginatorType string
}
// UnmarshalJSON takes an originator in the typical one-line format and parses it into an Originator struct.
// This function is also used when unmarshalling YAML
func (o *Originator) UnmarshalJSON(data []byte) error {
// the value is just a string presented as a slice of bytes
originatorStr := string(data)
originatorStr = strings.Trim(originatorStr, "\"")
if originatorStr == "NOASSERTION" {
o.Originator = originatorStr
return nil
}
originatorFields := strings.SplitN(originatorStr, ": ", 2)
if len(originatorFields) != 2 {
return fmt.Errorf("failed to parse Originator '%s'", originatorStr)
}
o.OriginatorType = originatorFields[0]
o.Originator = originatorFields[1]
return nil
}
// MarshalJSON converts the receiver into a slice of bytes representing an Originator in string form.
// This function is also used when marshalling to YAML
func (o Originator) MarshalJSON() ([]byte, error) {
if o.Originator == "NOASSERTION" {
return json.Marshal(o.Originator)
} else if o.Originator != "" {
return json.Marshal(fmt.Sprintf("%s: %s", o.OriginatorType, o.Originator))
}
return []byte{}, nil
}
type PackageVerificationCode struct {
// Cardinality: mandatory, one if filesAnalyzed is true / omitted;
// zero (must be omitted) if filesAnalyzed is false
Value string `json:"packageVerificationCodeValue"`
// Spec also allows specifying files to exclude from the
// verification code algorithm; intended to enable exclusion of
// the SPDX document file itself.
ExcludedFiles []string `json:"packageVerificationCodeExcludedFiles"`
}
// Package2_1 is a Package section of an SPDX Document for version 2.1 of the spec.
type Package2_1 struct {
// 3.1: Package Name
// Cardinality: mandatory, one
PackageName string `json:"name"`
// 3.2: Package SPDX Identifier: "SPDXRef-[idstring]"
// Cardinality: mandatory, one
PackageSPDXIdentifier ElementID `json:"SPDXID"`
// 3.3: Package Version
// Cardinality: optional, one
PackageVersion string `json:"versionInfo,omitempty"`
// 3.4: Package File Name
// Cardinality: optional, one
PackageFileName string `json:"packageFileName,omitempty"`
// 3.5: Package Supplier: may have single result for either Person or Organization,
// or NOASSERTION
// Cardinality: optional, one
PackageSupplier *Supplier `json:"supplier,omitempty"`
// 3.6: Package Originator: may have single result for either Person or Organization,
// or NOASSERTION
// Cardinality: optional, one
PackageOriginator *Originator `json:"originator,omitempty"`
// 3.7: Package Download Location
// Cardinality: mandatory, one
PackageDownloadLocation string `json:"downloadLocation"`
// 3.8: FilesAnalyzed
// Cardinality: optional, one; default value is "true" if omitted
FilesAnalyzed bool `json:"filesAnalyzed,omitempty"`
// NOT PART OF SPEC: did FilesAnalyzed tag appear?
IsFilesAnalyzedTagPresent bool `json:"-"`
// 3.9: Package Verification Code
PackageVerificationCode PackageVerificationCode `json:"packageVerificationCode"`
// 3.10: Package Checksum: may have keys for SHA1, SHA256 and/or MD5
// Cardinality: optional, one or many
PackageChecksums []Checksum `json:"checksums,omitempty"`
// 3.11: Package Home Page
// Cardinality: optional, one
PackageHomePage string `json:"homepage,omitempty"`
// 3.12: Source Information
// Cardinality: optional, one
PackageSourceInfo string `json:"sourceInfo,omitempty"`
// 3.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
PackageLicenseConcluded string `json:"licenseConcluded"`
// 3.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one or many if filesAnalyzed is true / omitted;
// zero (must be omitted) if filesAnalyzed is false
PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles"`
// 3.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
PackageLicenseDeclared string `json:"licenseDeclared"`
// 3.16: Comments on License
// Cardinality: optional, one
PackageLicenseComments string `json:"licenseComments,omitempty"`
// 3.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
PackageCopyrightText string `json:"copyrightText"`
// 3.18: Package Summary Description
// Cardinality: optional, one
PackageSummary string `json:"summary,omitempty"`
// 3.19: Package Detailed Description
// Cardinality: optional, one
PackageDescription string `json:"description,omitempty"`
// 3.20: Package Comment
// Cardinality: optional, one
PackageComment string `json:"comment,omitempty"`
// 3.21: Package External Reference
// Cardinality: optional, one or many
PackageExternalReferences []*PackageExternalReference2_1 `json:"externalRefs,omitempty"`
// Files contained in this Package
Files []*File2_1
Annotations []Annotation2_1 `json:"annotations,omitempty"`
}
// PackageExternalReference2_1 is an External Reference to additional info
// about a Package, as defined in section 3.21 in version 2.1 of the spec.
type PackageExternalReference2_1 struct {
// category is "SECURITY", "PACKAGE-MANAGER" or "OTHER"
Category string `json:"referenceCategory"`
// type is an [idstring] as defined in Appendix VI;
// called RefType here due to "type" being a Golang keyword
RefType string `json:"referenceType"`
// locator is a unique string to access the package-specific
// info, metadata or content within the target location
Locator string `json:"referenceLocator"`
// 3.22: Package External Reference Comment
// Cardinality: conditional (optional, one) for each External Reference
ExternalRefComment string `json:"comment"`
}
// Package2_2 is a Package section of an SPDX Document for version 2.2 of the spec.
type Package2_2 struct {
// NOT PART OF SPEC
// flag: does this "package" contain files that were in fact "unpackaged",
// e.g. included directly in the Document without being in a Package?
IsUnpackaged bool
// 3.1: Package Name
// Cardinality: mandatory, one
PackageName string `json:"name"`
// 3.2: Package SPDX Identifier: "SPDXRef-[idstring]"
// Cardinality: mandatory, one
PackageSPDXIdentifier ElementID `json:"SPDXID"`
// 3.3: Package Version
// Cardinality: optional, one
PackageVersion string `json:"versionInfo,omitempty"`
// 3.4: Package File Name
// Cardinality: optional, one
PackageFileName string `json:"packageFileName,omitempty"`
// 3.5: Package Supplier: may have single result for either Person or Organization,
// or NOASSERTION
// Cardinality: optional, one
PackageSupplier *Supplier `json:"supplier,omitempty"`
// 3.6: Package Originator: may have single result for either Person or Organization,
// or NOASSERTION
// Cardinality: optional, one
PackageOriginator *Originator `json:"originator,omitempty"`
// 3.7: Package Download Location
// Cardinality: mandatory, one
PackageDownloadLocation string `json:"downloadLocation"`
// 3.8: FilesAnalyzed
// Cardinality: optional, one; default value is "true" if omitted
FilesAnalyzed bool `json:"filesAnalyzed,omitempty"`
// NOT PART OF SPEC: did FilesAnalyzed tag appear?
IsFilesAnalyzedTagPresent bool
// 3.9: Package Verification Code
PackageVerificationCode PackageVerificationCode `json:"packageVerificationCode"`
// 3.10: Package Checksum: may have keys for SHA1, SHA256 and/or MD5
// Cardinality: optional, one or many
PackageChecksums []Checksum `json:"checksums"`
// 3.11: Package Home Page
// Cardinality: optional, one
PackageHomePage string `json:"homepage,omitempty"`
// 3.12: Source Information
// Cardinality: optional, one
PackageSourceInfo string `json:"sourceInfo,omitempty"`
// 3.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
PackageLicenseConcluded string `json:"licenseConcluded"`
// 3.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one or many if filesAnalyzed is true / omitted;
// zero (must be omitted) if filesAnalyzed is false
PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles"`
// 3.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
PackageLicenseDeclared string `json:"licenseDeclared"`
// 3.16: Comments on License
// Cardinality: optional, one
PackageLicenseComments string `json:"licenseComments,omitempty"`
// 3.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
PackageCopyrightText string `json:"copyrightText"`
// 3.18: Package Summary Description
// Cardinality: optional, one
PackageSummary string `json:"summary,omitempty"`
// 3.19: Package Detailed Description
// Cardinality: optional, one
PackageDescription string `json:"description,omitempty"`
// 3.20: Package Comment
// Cardinality: optional, one
PackageComment string `json:"comment,omitempty"`
// 3.21: Package External Reference
// Cardinality: optional, one or many
PackageExternalReferences []*PackageExternalReference2_2 `json:"externalRefs,omitempty"`
// 3.22: Package External Reference Comment
// Cardinality: conditional (optional, one) for each External Reference
// contained within PackageExternalReference2_1 struct, if present
// 3.23: Package Attribution Text
// Cardinality: optional, one or many
PackageAttributionTexts []string `json:"attributionTexts,omitempty"`
// Files contained in this Package
Files []*File2_2
Annotations []Annotation2_2 `json:"annotations"`
}
// PackageExternalReference2_2 is an External Reference to additional info
// about a Package, as defined in section 3.21 in version 2.2 of the spec.
type PackageExternalReference2_2 struct {
// category is "SECURITY", "PACKAGE-MANAGER" or "OTHER"
Category string `json:"referenceCategory"`
// type is an [idstring] as defined in Appendix VI;
// called RefType here due to "type" being a Golang keyword
RefType string `json:"referenceType"`
// locator is a unique string to access the package-specific
// info, metadata or content within the target location
Locator string `json:"referenceLocator"`
// 3.22: Package External Reference Comment
// Cardinality: conditional (optional, one) for each External Reference
ExternalRefComment string `json:"comment"`
} | spdx/package.go | 0.680454 | 0.474996 | package.go | starcoder |
package data
import (
"math"
"github.com/farshidtz/senml/v2"
"github.com/linksmart/historical-datastore/registry"
)
func Same_name_same_types(count int, series registry.TimeSeries, decremental bool) senml.Pack {
value := 22.1
stringValue := "Machine Room"
boolValue := false
dataValue := "aGkgCg"
timeinit := 1543059346.0
mult := 1.0
if decremental == true {
timeinit = timeinit + float64(count-1)
mult = -1.0
}
var s = make([]senml.Record, count)
switch series.Type {
case registry.Float:
s[0] = senml.Record{BaseName: series.Name,
BaseUnit: series.Unit,
Value: &value, Time: timeinit}
for i := 1; i < count; i++ {
s[i] = senml.Record{Value: &value, Time: timeinit + float64(i)*mult}
}
case registry.String:
s[0] = senml.Record{BaseName: series.Name,
BaseUnit: series.Unit,
StringValue: stringValue, Time: timeinit}
for i := 1; i < count; i++ {
s[i] = senml.Record{StringValue: stringValue, Time: timeinit + float64(i)*mult}
}
case registry.Bool:
s[0] = senml.Record{BaseName: series.Name,
BaseUnit: series.Unit,
BoolValue: &boolValue, Time: timeinit}
for i := 1; i < count; i++ {
s[i] = senml.Record{BoolValue: &boolValue, Time: timeinit + float64(i)*mult}
}
case registry.Data:
s[0] = senml.Record{BaseName: series.Name,
BaseUnit: series.Unit,
DataValue: dataValue, Time: timeinit}
for i := 1; i < count; i++ {
s[i] = senml.Record{DataValue: dataValue, Time: timeinit + float64(i)*mult}
}
}
return s
}
func CompareRecords(r1 senml.Record, r2 senml.Record) (same bool) {
return math.Abs(r1.Time-r2.Time) < 1e-6 &&
r1.Name == r2.Name &&
r1.DataValue == r2.DataValue &&
r1.StringValue == r2.StringValue &&
((r1.Sum == nil && r2.Sum == nil) || *r1.Sum == *r2.Sum) &&
((r1.BoolValue == nil && r2.BoolValue == nil) || *r1.BoolValue == *r2.BoolValue) &&
((r1.Value == nil && r2.Value == nil) || *r1.Value == *r2.Value)
}
func CompareSenml(s1 senml.Pack, s2 senml.Pack) (same bool) {
s1Len := len(s1)
s2Len := len(s2)
if s1Len != s2Len {
return false
}
for _, r1 := range s1 {
matched := false
for _, r2 := range s2 {
if CompareRecords(r1, r2) == false {
matched = true
break
}
}
if matched == false {
return false
}
}
return true
}
func Diff_name_diff_types() senml.Pack {
value := 22.1
sum := 0.0
vb := true
var s = []senml.Record{
{BaseName: "dev123",
BaseTime: -45.67,
BaseUnit: "degC",
Value: &value, Unit: "degC", Name: "temp", Time: -1.0, UpdateTime: 10.0, Sum: &sum},
{StringValue: "kitchen", Name: "room", Time: -1.0},
{DataValue: "abc", Name: "data"},
{BoolValue: &vb, Name: "ok"},
}
return s
} | vendor/github.com/linksmart/historical-datastore/data/senmlfaker.go | 0.511961 | 0.427815 | senmlfaker.go | starcoder |
package cryptoapis
import (
"encoding/json"
)
// AddressCoinsTransactionConfirmedEachConfirmationDataItem Defines an `item` as one result.
type AddressCoinsTransactionConfirmedEachConfirmationDataItem struct {
// Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
Blockchain string `json:"blockchain"`
// Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\", \"rinkeby\" are test networks.
Network string `json:"network"`
// Defines the specific address to which the transaction has been sent.
Address string `json:"address"`
MinedInBlock AddressCoinsTransactionConfirmedEachConfirmationDataItemMinedInBlock `json:"minedInBlock"`
// Defines the unique ID of the specific transaction, i.e. its identification number.
TransactionId string `json:"transactionId"`
// Defines the number of currently received confirmations for the transaction.
CurrentConfirmations int32 `json:"currentConfirmations"`
// Defines the number of confirmation transactions requested as callbacks, i.e. the system can notify till the n-th confirmation.
TargetConfirmations int32 `json:"targetConfirmations"`
// Defines the amount of coins sent with the confirmed transaction.
Amount string `json:"amount"`
// Defines the unit of the transaction, e.g. BTC.
Unit string `json:"unit"`
// Defines whether the transaction is \"incoming\" or \"outgoing\".
Direction string `json:"direction"`
}
// NewAddressCoinsTransactionConfirmedEachConfirmationDataItem instantiates a new AddressCoinsTransactionConfirmedEachConfirmationDataItem object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewAddressCoinsTransactionConfirmedEachConfirmationDataItem(blockchain string, network string, address string, minedInBlock AddressCoinsTransactionConfirmedEachConfirmationDataItemMinedInBlock, transactionId string, currentConfirmations int32, targetConfirmations int32, amount string, unit string, direction string) *AddressCoinsTransactionConfirmedEachConfirmationDataItem {
this := AddressCoinsTransactionConfirmedEachConfirmationDataItem{}
this.Blockchain = blockchain
this.Network = network
this.Address = address
this.MinedInBlock = minedInBlock
this.TransactionId = transactionId
this.CurrentConfirmations = currentConfirmations
this.TargetConfirmations = targetConfirmations
this.Amount = amount
this.Unit = unit
this.Direction = direction
return &this
}
// NewAddressCoinsTransactionConfirmedEachConfirmationDataItemWithDefaults instantiates a new AddressCoinsTransactionConfirmedEachConfirmationDataItem object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewAddressCoinsTransactionConfirmedEachConfirmationDataItemWithDefaults() *AddressCoinsTransactionConfirmedEachConfirmationDataItem {
this := AddressCoinsTransactionConfirmedEachConfirmationDataItem{}
return &this
}
// GetBlockchain returns the Blockchain field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetBlockchain() string {
if o == nil {
var ret string
return ret
}
return o.Blockchain
}
// GetBlockchainOk returns a tuple with the Blockchain field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetBlockchainOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Blockchain, true
}
// SetBlockchain sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetBlockchain(v string) {
o.Blockchain = v
}
// GetNetwork returns the Network field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetNetwork() string {
if o == nil {
var ret string
return ret
}
return o.Network
}
// GetNetworkOk returns a tuple with the Network field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetNetworkOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Network, true
}
// SetNetwork sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetNetwork(v string) {
o.Network = v
}
// GetAddress returns the Address field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetAddress() string {
if o == nil {
var ret string
return ret
}
return o.Address
}
// GetAddressOk returns a tuple with the Address field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetAddressOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Address, true
}
// SetAddress sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetAddress(v string) {
o.Address = v
}
// GetMinedInBlock returns the MinedInBlock field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetMinedInBlock() AddressCoinsTransactionConfirmedEachConfirmationDataItemMinedInBlock {
if o == nil {
var ret AddressCoinsTransactionConfirmedEachConfirmationDataItemMinedInBlock
return ret
}
return o.MinedInBlock
}
// GetMinedInBlockOk returns a tuple with the MinedInBlock field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetMinedInBlockOk() (*AddressCoinsTransactionConfirmedEachConfirmationDataItemMinedInBlock, bool) {
if o == nil {
return nil, false
}
return &o.MinedInBlock, true
}
// SetMinedInBlock sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetMinedInBlock(v AddressCoinsTransactionConfirmedEachConfirmationDataItemMinedInBlock) {
o.MinedInBlock = v
}
// GetTransactionId returns the TransactionId field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetTransactionId() string {
if o == nil {
var ret string
return ret
}
return o.TransactionId
}
// GetTransactionIdOk returns a tuple with the TransactionId field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.TransactionId, true
}
// SetTransactionId sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetTransactionId(v string) {
o.TransactionId = v
}
// GetCurrentConfirmations returns the CurrentConfirmations field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetCurrentConfirmations() int32 {
if o == nil {
var ret int32
return ret
}
return o.CurrentConfirmations
}
// GetCurrentConfirmationsOk returns a tuple with the CurrentConfirmations field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetCurrentConfirmationsOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.CurrentConfirmations, true
}
// SetCurrentConfirmations sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetCurrentConfirmations(v int32) {
o.CurrentConfirmations = v
}
// GetTargetConfirmations returns the TargetConfirmations field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetTargetConfirmations() int32 {
if o == nil {
var ret int32
return ret
}
return o.TargetConfirmations
}
// GetTargetConfirmationsOk returns a tuple with the TargetConfirmations field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetTargetConfirmationsOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.TargetConfirmations, true
}
// SetTargetConfirmations sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetTargetConfirmations(v int32) {
o.TargetConfirmations = v
}
// GetAmount returns the Amount field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetAmount() string {
if o == nil {
var ret string
return ret
}
return o.Amount
}
// GetAmountOk returns a tuple with the Amount field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetAmountOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Amount, true
}
// SetAmount sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetAmount(v string) {
o.Amount = v
}
// GetUnit returns the Unit field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetUnit() string {
if o == nil {
var ret string
return ret
}
return o.Unit
}
// GetUnitOk returns a tuple with the Unit field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetUnitOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Unit, true
}
// SetUnit sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetUnit(v string) {
o.Unit = v
}
// GetDirection returns the Direction field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetDirection() string {
if o == nil {
var ret string
return ret
}
return o.Direction
}
// GetDirectionOk returns a tuple with the Direction field value
// and a boolean to check if the value has been set.
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) GetDirectionOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Direction, true
}
// SetDirection sets field value
func (o *AddressCoinsTransactionConfirmedEachConfirmationDataItem) SetDirection(v string) {
o.Direction = v
}
func (o AddressCoinsTransactionConfirmedEachConfirmationDataItem) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["blockchain"] = o.Blockchain
}
if true {
toSerialize["network"] = o.Network
}
if true {
toSerialize["address"] = o.Address
}
if true {
toSerialize["minedInBlock"] = o.MinedInBlock
}
if true {
toSerialize["transactionId"] = o.TransactionId
}
if true {
toSerialize["currentConfirmations"] = o.CurrentConfirmations
}
if true {
toSerialize["targetConfirmations"] = o.TargetConfirmations
}
if true {
toSerialize["amount"] = o.Amount
}
if true {
toSerialize["unit"] = o.Unit
}
if true {
toSerialize["direction"] = o.Direction
}
return json.Marshal(toSerialize)
}
type NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem struct {
value *AddressCoinsTransactionConfirmedEachConfirmationDataItem
isSet bool
}
func (v NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem) Get() *AddressCoinsTransactionConfirmedEachConfirmationDataItem {
return v.value
}
func (v *NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem) Set(val *AddressCoinsTransactionConfirmedEachConfirmationDataItem) {
v.value = val
v.isSet = true
}
func (v NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem) IsSet() bool {
return v.isSet
}
func (v *NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableAddressCoinsTransactionConfirmedEachConfirmationDataItem(val *AddressCoinsTransactionConfirmedEachConfirmationDataItem) *NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem {
return &NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem{value: val, isSet: true}
}
func (v NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableAddressCoinsTransactionConfirmedEachConfirmationDataItem) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_address_coins_transaction_confirmed_each_confirmation_data_item.go | 0.823612 | 0.449513 | model_address_coins_transaction_confirmed_each_confirmation_data_item.go | starcoder |
package pvoc
import(
"fmt"
"math"
)
type SlidingBuffer struct {
Data []float64
lastValidSample int
hasReceivedData bool
}
func NewSlidingBuffer(length int) (buffer *SlidingBuffer) {
buffer = &SlidingBuffer{
Data: make([]float64, length, length),
lastValidSample: -1,
hasReceivedData: false,
}
return buffer
}
func (sb *SlidingBuffer) HasValidSamples() bool {
return sb.lastValidSample >= 0
}
func (sb *SlidingBuffer) ShiftIn(data []float64, validSamples int) error {
dataLen := len(data)
if validSamples > dataLen {
return fmt.Errorf("validSamples %d cannot be more than buffer data length %d", validSamples, dataLen)
}
if dataLen > len(sb.Data) {
return fmt.Errorf("Attempted to ShiftIn %d samples, but buffer can only hold %d samples", dataLen, len(sb.Data))
}
// shift the data over by len(data), then copy the data in
for i := dataLen; i < len(sb.Data); i++ {
sb.Data[i - dataLen] = sb.Data[i]
}
// copy new data to end
for i := 0; i < dataLen; i++ {
sb.Data[len(sb.Data) - dataLen + i] = data[i]
}
// pad with zeros any non-valid samples
if !sb.hasReceivedData {
sb.lastValidSample = len(sb.Data) - dataLen + validSamples - 1
} else {
sb.lastValidSample = sb.lastValidSample - dataLen + validSamples
}
for i := len(sb.Data) - dataLen + validSamples; i < len(sb.Data); i++ {
sb.Data[i] = 0
}
sb.hasReceivedData = true
return nil
}
// shifts over by a given length, pads the rest with 0s
func (sb *SlidingBuffer) ShiftOver(dataLen int) error {
if dataLen > len(sb.Data) {
return fmt.Errorf("Attempted to ShiftOver %d samples, but buffer can only hold %d samples", dataLen, len(sb.Data))
}
// shift the data over by dataLen
for i := dataLen; i < len(sb.Data); i += 1 {
sb.Data[i - dataLen] = sb.Data[i]
}
// pad with 0s
for i := len(sb.Data) - dataLen; i < len(sb.Data); i += 1 {
sb.Data[i] = 0
}
if !sb.hasReceivedData {
sb.lastValidSample = len(sb.Data) - dataLen - 1
} else {
sb.lastValidSample = sb.lastValidSample - dataLen
}
sb.hasReceivedData = true
return nil
}
func (sb *SlidingBuffer) DataInts() (buffer []int) {
buffer = make([]int, len(sb.Data), len(sb.Data))
for i := 0; i < len(sb.Data); i++ {
buffer[i] = int(math.Round(sb.Data[i]))
}
return buffer
} | pvoc/buffers.go | 0.693577 | 0.410225 | buffers.go | starcoder |
package compilerutil
import (
"hash/fnv"
hamt "github.com/raviqqe/hamt.go"
)
// ImmutableMap defines an immutable map struct, where Set-ing a new key returns a new ImmutableMap.
type ImmutableMap interface {
// Get returns the value found at the given key, if any.
Get(key string) (interface{}, bool)
// Set returns a new ImmutableMap which is a copy of this map, but with the given key set
// to the given value.
Set(key string, value interface{}) ImmutableMap
}
// NewImmutableMap creates a new, empty immutable map.
func NewImmutableMap() ImmutableMap {
return copyImmutableMap{
internalMap: map[string]interface{}{},
}
}
// hamtMapThreshold defines the threshold at which we switch from a copy map
// to an hamt map.
const hamtMapThreshold = 10
// copyImmutableMap is an immutable map that uses the internal Go map
// and fully copies it everytime a Set operation is called. This is quite
// fast for maps of size <= 10, after which it starts to lose some performance
// and really gets bad at sizes >= 50.
type copyImmutableMap struct {
internalMap map[string]interface{}
}
// hamtImmutableMap is an immutable map thatuses the hamt package's
// Map implementation. This has better performance for maps of size >= 50.
type hamtImmutableMap struct {
internalMap hamt.Map
}
func (i copyImmutableMap) Get(key string) (interface{}, bool) {
value, ok := i.internalMap[key]
return value, ok
}
func (i copyImmutableMap) Set(key string, value interface{}) ImmutableMap {
// If we've reached the threshold, switch from a copy map to the hamt map.
length := len(i.internalMap)
if length >= hamtMapThreshold {
var hamtMap ImmutableMap = hamtImmutableMap{hamt.NewMap()}
for existingKey, value := range i.internalMap {
hamtMap = hamtMap.Set(existingKey, value)
}
hamtMap = hamtMap.Set(key, value)
return hamtMap
}
// Otherwise, create a new map, copy over all the existing elements, and
// set the new key.
newMap := make(map[string]interface{}, length)
for existingKey, value := range i.internalMap {
newMap[existingKey] = value
}
newMap[key] = value
return copyImmutableMap{newMap}
}
func (i hamtImmutableMap) Get(key string) (interface{}, bool) {
value := i.internalMap.Find(hamtString(key))
return value, value != nil
}
func (i hamtImmutableMap) Set(key string, value interface{}) ImmutableMap {
return hamtImmutableMap{i.internalMap.Insert(hamtString(key), value)}
}
type hamtString string
func (h hamtString) Equal(other hamt.Entry) bool {
return string(h) == string(other.(hamtString))
}
func (h hamtString) Hash() uint32 {
hsh := fnv.New32a()
hsh.Write([]byte(string(h)))
return hsh.Sum32()
} | compilerutil/immutablemap.go | 0.828072 | 0.608943 | immutablemap.go | starcoder |
package gfx
import (
"github.com/go-gl/gl/v2.1/gl"
)
// FilterMode represents the interpolation mode for texture rendering.
type FilterMode int
const (
// NearestFilter scales images with nearest neighbor interpolation.
NearestFilter FilterMode = iota
// LinearFilter scales image with linear interpolation.
LinearFilter
)
type texture struct {
id uint32
width, height float64
}
var currentlyBoundTextureID uint32
func (t *texture) activate(mode FilterMode) {
gl.Enable(gl.TEXTURE_2D)
if currentlyBoundTextureID != t.id {
gl.BindTexture(gl.TEXTURE_2D, t.id)
currentlyBoundTextureID = t.id
}
switch mode {
case NearestFilter:
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
case LinearFilter:
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
}
}
func (t *texture) render(p *Params) {
gl.Begin(gl.QUADS)
gl.Color4d(p.R, p.G, p.B, p.A)
gl.TexCoord2f(0, 0)
gl.Vertex3d(0, 0, 0)
gl.Color4d(p.R, p.G, p.B, p.A)
gl.TexCoord2f(0, 1)
gl.Vertex3d(0, -t.height, 0)
gl.Color4d(p.R, p.G, p.B, p.A)
gl.TexCoord2f(1, 1)
gl.Vertex3d(t.width, -t.height, 0)
gl.Color4d(p.R, p.G, p.B, p.A)
gl.TexCoord2f(1, 0)
gl.Vertex3d(t.width, 0, 0)
gl.End()
}
func (t *texture) delete() {
gl.DeleteTextures(1, &t.id)
}
func newTexture(width, height int, pixelData []byte) *texture {
numBytes := width * height / len(pixelData)
format := gl.RGBA
switch numBytes {
case 1:
format = gl.ALPHA
for i, p := range pixelData {
if p > 0 {
pixelData[i] *= 255
}
}
case 3:
format = gl.RGB
case 4:
format = gl.RGBA
}
var id uint32
gl.Enable(gl.TEXTURE_2D)
gl.GenTextures(1, &id)
gl.BindTexture(gl.TEXTURE_2D, id)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.TexImage2D(
gl.TEXTURE_2D,
0,
int32(format),
int32(width),
int32(height),
0,
uint32(format),
gl.UNSIGNED_BYTE,
gl.Ptr(pixelData))
return &texture{id: id, width: float64(width), height: float64(height)}
} | gfx/texture.go | 0.72027 | 0.452173 | texture.go | starcoder |
package axon
import (
"fmt"
"reflect"
"unsafe"
)
// SynapseVarStart is the byte offset of fields in the Synapse structure
// where the float32 named variables start.
// Note: all non-float32 infrastructure variables must be at the start!
const SynapseVarStart = 4
// axon.Synapse holds state for the synaptic connection between neurons
type Synapse struct {
CaUpT int32 `desc:"time in CycleTot of last updating of Ca values at the synapse level, for optimized synaptic-level Ca integration"`
Wt float32 `desc:"effective synaptic weight value, determining how much conductance one spike drives on the receiving neuron. Wt = SWt * WtSig(LWt), where WtSig produces values between 0-2 based on LWt, centered on 1"`
SWt float32 `desc:"slowly adapting structural weight value, which acts as a multiplicative scaling factor on synaptic efficacy: biologically represents the physical size and efficacy of the dendritic spine, while the LWt reflects the AMPA receptor efficacy and number. SWt values adapt in an outer loop along with synaptic scaling, with constraints to prevent runaway positive feedback loops and maintain variance and further capacity to learn. Initial variance is all in SWt, with LWt set to .5, and scaling absorbs some of LWt into SWt."`
LWt float32 `desc:"rapidly learning, linear weight value -- learns according to the lrate specified in the connection spec. Initially all LWt are .5, which gives 1 from WtSig function, "`
DWt float32 `desc:"change in synaptic weight, from learning"`
DSWt float32 `desc:"change in SWt slow synaptic weight -- accumulates DWt"`
TDWt float32 `desc:"transitional, temporary DWt value, which is updated in a window after synaptic activity when Ca levels are still elevated, and added to the DWt value after a longer break of spiking where there is enough time for CaMKII driven AMPA receptor trafficking to take place"`
Ca float32 `desc:"Raw calcium singal for Kinase based learning: send.SnmdaO * recv.RCa"`
CaM float32 `desc:"first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP"`
CaP float32 `desc:"shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule"`
CaD float32 `desc:"longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule"`
CaDMax float32 `desc:"maximum CaD value since last DWt change -- DWt occurs when current CaD has decreased by a given proportion from this recent peak"`
DWtRaw float32 `desc:"raw change in synaptic weight, from learning -- temporary for Kinase analysis"`
}
func (sy *Synapse) VarNames() []string {
return SynapseVars
}
var SynapseVars = []string{"Wt", "SWt", "LWt", "DWt", "DSWt", "TDWt", "Ca", "CaM", "CaP", "CaD", "CaDMax", "DWtRaw"}
var SynapseVarProps = map[string]string{
"DWt": `auto-scale:"+"`,
"DSWt": `auto-scale:"+"`,
"TDWt": `auto-scale:"+"`,
"CaM": `auto-scale:"+"`,
"CaP": `auto-scale:"+"`,
"CaD": `auto-scale:"+"`,
"CaDMax": `auto-scale:"+"`,
"DWtRaw": `auto-scale:"+"`,
}
var SynapseVarsMap map[string]int
func init() {
SynapseVarsMap = make(map[string]int, len(SynapseVars))
typ := reflect.TypeOf((*Synapse)(nil)).Elem()
for i, v := range SynapseVars {
SynapseVarsMap[v] = i
pstr := SynapseVarProps[v]
if fld, has := typ.FieldByName(v); has {
if desc, ok := fld.Tag.Lookup("desc"); ok {
pstr += ` desc:"` + desc + `"`
SynapseVarProps[v] = pstr
}
}
}
}
// SynapseVarByName returns the index of the variable in the Synapse, or error
func SynapseVarByName(varNm string) (int, error) {
i, ok := SynapseVarsMap[varNm]
if !ok {
return -1, fmt.Errorf("Synapse VarByName: variable name: %v not valid", varNm)
}
return i, nil
}
// VarByIndex returns variable using index (0 = first variable in SynapseVars list)
func (sy *Synapse) VarByIndex(idx int) float32 {
fv := (*float32)(unsafe.Pointer(uintptr(unsafe.Pointer(sy)) + uintptr(SynapseVarStart+4*idx)))
return *fv
}
// VarByName returns variable by name, or error
func (sy *Synapse) VarByName(varNm string) (float32, error) {
i, err := SynapseVarByName(varNm)
if err != nil {
return 0, err
}
return sy.VarByIndex(i), nil
}
func (sy *Synapse) SetVarByIndex(idx int, val float32) {
fv := (*float32)(unsafe.Pointer(uintptr(unsafe.Pointer(sy)) + uintptr(SynapseVarStart+4*idx)))
*fv = val
}
// SetVarByName sets synapse variable to given value
func (sy *Synapse) SetVarByName(varNm string, val float32) error {
i, err := SynapseVarByName(varNm)
if err != nil {
return err
}
sy.SetVarByIndex(i, val)
return nil
} | axon/synapse.go | 0.706494 | 0.622861 | synapse.go | starcoder |
package linear
import (
"time"
)
// Units for Distance values. Always multiply with a unit when setting the initial value like you would for
// time.Time. This prevents you from having to worry about the internal storage format.
const (
Nanometer Distance = 1e-6
Micrometer Distance = 1e-3
Millimeter Distance = 1
Centimeter Distance = 1e1
Decimeter Distance = 1e2
Meter Distance = 1e3
Kilometer Distance = 1e6
Thou Distance = 0.0254 * Millimeter
Inch Distance = 25.4 * Millimeter
Foot Distance = 304.8 * Millimeter
Yard Distance = 3 * Foot
Chain Distance = 66 * Foot
Furlong Distance = 660 * Foot
Mile Distance = 5280 * Foot
Fathom Distance = 6.08 * Foot
Cable Distance = 608 * Foot
NauticalMile Distance = 6080 * Foot
)
// Nanometers returns d as a floating point number of nanometers.
func (d Distance) Nanometers() float64 {
return float64(d / Nanometer)
}
// Micrometers returns d as a floating point number of micrometers.
func (d Distance) Micrometers() float64 {
return float64(d / Micrometer)
}
// Millimeters returns d as a floating point number of millimeters.
func (d Distance) Millimeters() float64 {
return float64(d / Millimeter)
}
// Centimeters returns d as a floating point number of centimeters.
func (d Distance) Centimeters() float64 {
return float64(d / Centimeter)
}
// Decimeters returns d as a floating point number of decimeters.
func (d Distance) Decimeters() float64 {
return float64(d / Decimeter)
}
// Meters returns d as a floating point number of meters.
func (d Distance) Meters() float64 {
return float64(d / Meter)
}
// Kilometers returns d as a floating point number of kilometers.
func (d Distance) Kilometers() float64 {
return float64(d / Kilometer)
}
// Thou returns d as a floating point number of thou.
func (d Distance) Thou() float64 {
return float64(d / Thou)
}
// Inches returns d as a floating point number of inches.
func (d Distance) Inches() float64 {
return float64(d / Inch)
}
// Feet returns d as a floating point number of feet.
func (d Distance) Feet() float64 {
return float64(d / Foot)
}
// Yards returns d as a floating point number of yards.
func (d Distance) Yards() float64 {
return float64(d / Yard)
}
// Chains returns d as a floating point number of chains.
func (d Distance) Chains() float64 {
return float64(d / Chain)
}
// Furlongs returns d as a floating point number of furlongs.
func (d Distance) Furlongs() float64 {
return float64(d / Furlong)
}
// Miles returns d as a floating point number of miles.
func (d Distance) Miles() float64 {
return float64(d / Mile)
}
// Fathoms returns d as a floating point number of fathoms.
func (d Distance) Fathoms() float64 {
return float64(d / Fathom)
}
// Cables returns d as a floating point number of cables.
func (d Distance) Cables() float64 {
return float64(d / Cable)
}
// NauticalMiles returns d as a floating point number of nauticalmiles.
func (d Distance) NauticalMiles() float64 {
return float64(d / NauticalMile)
}
// Abs returns the absolute value of d as a copy.
func (d Distance) Abs() Distance {
if d < 0 {
return -d
}
return d
}
// Mul returns the product of d * x as a new Distance.
func (d Distance) Mul(x float64) Distance {
return d * Distance(x)
}
// Div returns the quotient of d / x as a new Distance.
func (d Distance) Div(x float64) Distance {
return d / Distance(x)
}
// DivDistance returns the quotient of d / x as a floating point number.
func (d Distance) DivDistance(x Distance) float64 {
return float64(d / x)
}
// DivDuration returns the quotient of d / t as a Velocity.
func (d Distance) DivDuration(t time.Duration) Velocity {
return Velocity(float64(d) / float64(t))
}
// DivVelocity returns the quotient of d / x as a time.Duration.
func (d Distance) DivVelocity(x Velocity) time.Duration {
return time.Duration(float64(d) / float64(x))
} | linear/distance_generated.go | 0.927855 | 0.766556 | distance_generated.go | starcoder |
package solver
import (
"fmt"
"sync"
digest "github.com/opencontainers/go-digest"
)
// EdgeIndex is a synchronous map for detecting edge collisions.
type EdgeIndex struct {
mu sync.Mutex
items map[indexedDigest]map[indexedDigest]map[*edge]struct{}
backRefs map[*edge]map[indexedDigest]map[indexedDigest]struct{}
}
func NewEdgeIndex() *EdgeIndex {
return &EdgeIndex{
items: map[indexedDigest]map[indexedDigest]map[*edge]struct{}{},
backRefs: map[*edge]map[indexedDigest]map[indexedDigest]struct{}{},
}
}
func (ei *EdgeIndex) LoadOrStore(e *edge, dgst digest.Digest, index Index, deps [][]CacheKey) *edge {
ei.mu.Lock()
defer ei.mu.Unlock()
if old := ei.load(e, dgst, index, deps); old != nil && !(!old.edge.Vertex.Options().IgnoreCache && e.edge.Vertex.Options().IgnoreCache) {
return old
}
ei.store(e, dgst, index, deps)
return nil
}
func (ei *EdgeIndex) Release(e *edge) {
ei.mu.Lock()
defer ei.mu.Unlock()
for id, backRefs := range ei.backRefs[e] {
for id2 := range backRefs {
delete(ei.items[id][id2], e)
if len(ei.items[id][id2]) == 0 {
delete(ei.items[id], id2)
}
}
if len(ei.items[id]) == 0 {
delete(ei.items, id)
}
}
delete(ei.backRefs, e)
}
func (ei *EdgeIndex) load(ignore *edge, dgst digest.Digest, index Index, deps [][]CacheKey) *edge {
id := indexedDigest{dgst: dgst, index: index, depsCount: len(deps)}
m, ok := ei.items[id]
if !ok {
return nil
}
if len(deps) == 0 {
m2, ok := m[indexedDigest{}]
if !ok {
return nil
}
// prioritize edges with ignoreCache
for e := range m2 {
if e.edge.Vertex.Options().IgnoreCache && e != ignore {
return e
}
}
for e := range m2 {
if e != ignore {
return e
}
}
return nil
}
matches := map[*edge]struct{}{}
for i, keys := range deps {
if i == 0 {
for _, key := range keys {
id := indexedDigest{dgst: getUniqueID(key), index: Index(i)}
for e := range m[id] {
if e != ignore {
matches[e] = struct{}{}
}
}
}
} else {
loop0:
for match := range matches {
for _, key := range keys {
id := indexedDigest{dgst: getUniqueID(key), index: Index(i)}
if m[id] != nil {
if _, ok := m[id][match]; ok {
continue loop0
}
}
}
delete(matches, match)
}
}
if len(matches) == 0 {
break
}
}
// prioritize edges with ignoreCache
for m := range matches {
if m.edge.Vertex.Options().IgnoreCache {
return m
}
}
for m := range matches {
return m
}
return nil
}
func (ei *EdgeIndex) store(e *edge, dgst digest.Digest, index Index, deps [][]CacheKey) {
id := indexedDigest{dgst: dgst, index: index, depsCount: len(deps)}
m, ok := ei.items[id]
if !ok {
m = map[indexedDigest]map[*edge]struct{}{}
ei.items[id] = m
}
backRefsMain, ok := ei.backRefs[e]
if !ok {
backRefsMain = map[indexedDigest]map[indexedDigest]struct{}{}
ei.backRefs[e] = backRefsMain
}
backRefs, ok := backRefsMain[id]
if !ok {
backRefs = map[indexedDigest]struct{}{}
backRefsMain[id] = backRefs
}
if len(deps) == 0 {
m2, ok := m[indexedDigest{}]
if !ok {
m2 = map[*edge]struct{}{}
m[indexedDigest{}] = m2
}
m2[e] = struct{}{}
backRefs[indexedDigest{}] = struct{}{}
return
}
for i, keys := range deps {
for _, key := range keys {
id := indexedDigest{dgst: getUniqueID(key), index: Index(i)}
m2, ok := m[id]
if !ok {
m2 = map[*edge]struct{}{}
m[id] = m2
}
m2[e] = struct{}{}
backRefs[id] = struct{}{}
}
}
}
type indexedDigest struct {
dgst digest.Digest
index Index
depsCount int
}
type internalKeyT string
var internalKey = internalKeyT("buildkit/unique-cache-id")
func getUniqueID(k CacheKey) digest.Digest {
internalV := k.GetValue(internalKey)
if internalV != nil {
return internalV.(digest.Digest)
}
dgstr := digest.SHA256.Digester()
for _, inp := range k.Deps() {
dgstr.Hash().Write([]byte(getUniqueID(inp.CacheKey)))
dgstr.Hash().Write([]byte(inp.Selector))
}
dgstr.Hash().Write([]byte(k.Digest()))
dgstr.Hash().Write([]byte(fmt.Sprintf("%d", k.Output())))
dgst := dgstr.Digest()
k.SetValue(internalKey, dgst)
return dgst
} | vendor/github.com/moby/buildkit/solver-next/index.go | 0.614741 | 0.414247 | index.go | starcoder |
package keras2go
import "math"
/**
* Just your basic 1d matrix multipication.
* computes C = A*B
* assumes A,B,C are all 1d arrays of matrices stored in row major order.
*
* :param C: output Array.
* :param A: input Array 1.
* :param B: input Array 2.
* :param outrows: number of rows of C and A.
* :param outcols: number of cols of C and B.
* :param innderdim: number of cols of A and rows of B
*/
func k2c_matmul(C []float64, A []float64, B []float64, outrows int, outcols int, innerdim int) {
float64SliceToZero(C)
for i := 0; i < outrows; i++ {
var outrowidx = i * outcols
var inneridx = i * innerdim
for k := 0; k < innerdim; k++ {
for j := 0; j < outcols; j++ {
C[outrowidx+j] += A[inneridx+k] * B[k*outcols+j]
}
}
}
}
func float64SliceToZero(a []float64) {
for idx := 0; idx < len(a); idx++ {
a[idx] = 0
}
}
/**
* Affine matrix multiplication.
* computes C = A*B + d, where d is a vector that is added to each
row of A*B
* assumes A,B,C are all 1d arrays of matrices stored in row major order
*
* :param C: output Array.
* :param A: input Array 1.
* :param B: input Array 2.
* :param d: input Array 3.
* :param outrows: number of rows of C and A.
* :param outcols: number of cols of C, B and d.
* :param innderdim: number of cols of A and rows of B
*/
func k2c_affine_matmul(C []float64, A []float64, B []float64, d []float64, outrows int, outcols int, innerdim int) {
float64SliceToZero(C)
for i := 0; i < outrows; i++ {
var outrowidx = i * outcols
var inneridx = i * innerdim
for j := 0; j < outcols; j++ {
for k := 0; k < innerdim; k++ {
C[outrowidx+j] += A[inneridx+k] * B[k*outcols+j]
}
C[outrowidx+j] += d[j]
}
}
}
/**
* Converts subscripts to linear indices in row major order.
*
* :param sub: Array[Ndim] subscript to convert.
* :param Shape: Array[Ndim] Shape of Array being indexed.
* :param Ndim: number of dimensions of Array being indexed.
* :return: linear index in row major order.
*/
func k2c_sub2idx(sub []int, shape []int, ndim int) int {
var idx = 0
var temp = 0
for i := 0; i < ndim; i++ {
temp = sub[i]
for j := ndim - 1; j > i; j-- {
temp *= shape[j]
}
idx += temp
}
return idx
}
/**
* Converts linear indices to subscripts in row major order.
*
* :param idx: linear index in row major order.
* :param sub: Array[Ndim] output subscript.
* :param Shape: Array[Ndim] Shape of Array being indexed.
* :param Ndim: number of dimensions of Array being indexed.
*/
func k2c_idx2sub(idx int, sub []int, shape []int, ndim int) {
idx2 := idx
for i := ndim - 1; i >= 0; i-- {
sub[i] = idx2 % shape[i]
idx2 /= shape[i]
}
}
/**
* Dot product (tensor contraction) between 2 tensors. C=A*B
*
* :param C: output tensor.
* :param A: input tensor 1.
* :param B: input tensor 2.
* :param axesA: Array[naxes] of axes of A being contracted.
* :param axesB: Array[naxes] of axes of B being contracted.
* :param naxes: number of axes being contracted from each input.
* :param normalize: (0,1) whether to L2-normalize samples along the dot product axis before taking the dot product. If set to 1, then the output of the dot product is the cosine proximity between the two samples.
* :param fwork: Array of working space, size(fwork) = size(A) + size(B)
*/
func k2c_dot(C *K2c_tensor, A *K2c_tensor, B *K2c_tensor, axesA []int, axesB []int,
naxes int, normalize int, fwork []float64) {
var permA [K2C_MAX_NDIM]int
var permB [K2C_MAX_NDIM]int
var prod_axesA = 1
var prod_axesB = 1
var free_axesA, free_axesB int
var freeA [K2C_MAX_NDIM]int
var freeB [K2C_MAX_NDIM]int
var count int
var isin bool
var newshpA [K2C_MAX_NDIM]int
var newshpB [K2C_MAX_NDIM]int
var ndimA = A.Ndim
var ndimB = B.Ndim
var reshapeA = fwork // temp working storage
var reshapeB = fwork[A.Numel:]
var Asub [K2C_MAX_NDIM]int
var Bsub [K2C_MAX_NDIM]int
// find which axes are free (ie, not being summed over)
count = 0
for i := 0; i < ndimA; i++ {
isin = false
for j := 0; j < naxes; j++ {
if i == axesA[j] {
isin = true
break
}
}
if !isin {
freeA[count] = i
count++
}
}
count = 0
for i := 0; i < ndimB; i++ {
isin = false
for j := 0; j < naxes; j++ {
if i == axesB[j] {
isin = true
break
}
}
if !isin {
freeB[count] = i
count++
}
}
// number of elements in inner dimension
for i := 0; i < naxes; i++ {
prod_axesA *= A.Shape[axesA[i]]
}
for i := 0; i < naxes; i++ {
prod_axesB *= B.Shape[axesB[i]]
}
// number of elements in free dimension
free_axesA = A.Numel / prod_axesA
free_axesB = B.Numel / prod_axesB
// find permutation of axes to get into matmul Shape
for i := 0; i < ndimA-naxes; i++ {
permA[i] = freeA[i]
}
{
i := ndimA - naxes
j := 0
for i < ndimA {
permA[i] = axesA[j]
i++
j++
}
}
for i := 0; i < naxes; i++ {
permB[i] = axesB[i]
}
{
i := naxes
j := 0
for i < ndimB {
permB[i] = freeB[j]
i++
j++
}
}
for i := 0; i < ndimA; i++ {
newshpA[i] = A.Shape[permA[i]]
}
for i := 0; i < ndimB; i++ {
newshpB[i] = B.Shape[permB[i]]
}
// reshape arrays
for i := 0; i < A.Numel; i++ {
k2c_idx2sub(i, Asub[:], A.Shape[:], ndimA)
for j := 0; j < ndimA; j++ {
Bsub[j] = Asub[permA[j]]
}
bidx := k2c_sub2idx(Bsub[:], newshpA[:], ndimA)
reshapeA[bidx] = A.Array[i]
}
for i := 0; i < B.Numel; i++ {
k2c_idx2sub(i, Bsub[:], B.Shape[:], ndimB)
for j := 0; j < ndimB; j++ {
Asub[j] = Bsub[permB[j]]
}
bidx := k2c_sub2idx(Asub[:], newshpB[:], ndimB)
reshapeB[bidx] = B.Array[i]
}
if normalize != 0 {
var sum float64
var inorm float64
for i := 0; i < free_axesA; i++ {
sum = 0
for j := 0; j < prod_axesA; j++ {
sum += reshapeA[i*prod_axesA+j] * reshapeA[i*prod_axesA+j]
}
inorm = 1.0 / math.Sqrt(sum)
for j := 0; j < prod_axesA; j++ {
reshapeA[i*prod_axesA+j] *= inorm
}
}
for i := 0; i < free_axesB; i++ {
sum = 0
for j := 0; j < prod_axesB; j++ {
sum += reshapeB[i+free_axesB*j] * reshapeB[i+free_axesB*j]
}
inorm = 1.0 / math.Sqrt(sum)
for j := 0; j < prod_axesB; j++ {
reshapeB[i+free_axesB*j] *= inorm
}
}
}
k2c_matmul(C.Array, reshapeA, reshapeB, free_axesA, free_axesB, prod_axesA)
}
/**
* Adds bias vector b to tensor A.
* assumes b is a rank 1 tensor that is added to the last dimension of A.
*
* :param A: input tensor. Overwritten with outputs.
* :param b: bias tensor.
*/
func k2c_bias_add(A *K2c_tensor, b *K2c_tensor) {
for i := 0; i < A.Numel; i += b.Numel {
for j := 0; j < b.Numel; j++ {
A.Array[i+j] += b.Array[j]
}
}
}
/**
* Flips a tensor along specified axis.
* overwrites input with flipped output.
*
* :param A: input tensor. Overwritten with outputs.
* :param axis: axis along which to flip
*/
func k2c_flip(A *K2c_tensor, axis int) {
var ndim = A.Ndim
var shape = A.Shape
var numel = A.Numel
var sub [K2C_MAX_NDIM]int
var step = 1
var k = 0
var idx = 0
var temp float64
var reduced_size = 1
for i := axis; i < ndim; i++ {
reduced_size *= shape[i]
}
var threshold = reduced_size / 2
var jump = reduced_size
for k < numel {
k2c_idx2sub(k, sub[:], shape[:], ndim)
sub[axis] = shape[axis] - sub[axis] - 1
idx = k2c_sub2idx(sub[:], shape[:], ndim)
temp = A.Array[k]
A.Array[k] = A.Array[idx]
A.Array[idx] = temp
if (k+step)%jump >= threshold {
k = k + step - threshold + jump
} else {
k += step
}
}
} | helper_functions.go | 0.693265 | 0.719114 | helper_functions.go | starcoder |
package transform
import (
"math"
)
const (
xPi = math.Pi * 3000.0 / 180.0
a = 6378245.0
ee = 0.00669342162296594323
mc = 20037508.34
threshold = 0.000001
)
func inChina(lon, lat float64) bool {
return !(lon > 72.004 && lon < 137.8347 && lat > 0.8293 && lat < 55.8271)
}
// BD09toGCJ02 百度坐标系->火星坐标系
func BD09toGCJ02(lon, lat float64) (float64, float64) {
x := lon - 0.0065
y := lat - 0.006
z := math.Sqrt(x*x+y*y) - 0.00002*math.Sin(y*xPi)
theta := math.Atan2(y, x) - 0.000003*math.Cos(x*xPi)
return z * math.Cos(theta), z * math.Sin(theta)
}
// GCJ02toBD09 火星坐标系->百度坐标系
func GCJ02toBD09(lon, lat float64) (float64, float64) {
z := math.Sqrt(lon*lon+lat*lat) + 0.00002*math.Sin(lat*xPi)
theta := math.Atan2(lat, lon) + 0.000003*math.Cos(lon*xPi)
return z*math.Cos(theta) + 0.0065, z*math.Sin(theta) + 0.006
}
// WGS84toGCJ02 WGS84坐标系->火星坐标系
func WGS84toGCJ02(lon, lat float64) (float64, float64) {
if inChina(lon, lat) {
return lon, lat
}
dLon, dLat := transform(lon-105.0, lat-35.0)
radLat := lat / 180.0 * math.Pi
magic := math.Sin(radLat)
magic = 1 - ee*magic*magic
sMagic := math.Sqrt(magic)
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sMagic) * math.Pi)
dLon = (dLon * 180.0) / (a / sMagic * math.Cos(radLat) * math.Pi)
return lon + dLon, lat + dLat
}
// GCJ02toWGS84 火星坐标系->WGS84坐标系 精度为1m至2m
func GCJ02toWGS84(lon, lat float64) (float64, float64) {
mgLon, mgLat := WGS84toGCJ02(lon, lat)
return lon*2 - mgLon, lat*2 - mgLat
}
// GCJ02toWGS84Exact 火星坐标系->WGS84坐标系 精度小于0.5m 较GCJ02toWGS84慢15倍
func GCJ02toWGS84Exact(lon, lat float64) (wgsLon, wgsLat float64) {
dLon, dLat := 0.01, 0.01
mLon, mLat := lon-dLon, lat-dLat
pLon, pLat := lon+dLon, lat+dLat
for {
wgsLon, wgsLat = (mLon+pLon)/2, (mLat+pLat)/2
tmpLon, tmpLat := WGS84toGCJ02(wgsLon, wgsLat)
dLon, dLat = tmpLon-lon, tmpLat-lat
if math.Abs(dLon) < threshold && math.Abs(dLat) < threshold {
return
}
if dLon > 0 {
pLon = wgsLon
} else {
mLon = wgsLon
}
if dLat > 0 {
pLat = wgsLat
} else {
mLat = wgsLat
}
}
}
// BD09toWGS84 百度坐标系->WGS84坐标系
func BD09toWGS84(lon, lat float64) (float64, float64) {
return GCJ02toWGS84(BD09toGCJ02(lon, lat))
}
// WGS84toBD09 WGS84坐标系->百度坐标系
func WGS84toBD09(lon, lat float64) (float64, float64) {
return GCJ02toBD09(WGS84toGCJ02(lon, lat))
}
// WebMCtoWGS84 球面墨卡托->WGS84坐标系
func WebMCtoWGS84(x, y float64) (lon, lat float64) {
if !(x >= -mc && x <= mc) {
return x, y
}
lon = x / mc * 180
lat = y / mc * 180
lat = 180 / math.Pi * (2*math.Atan(math.Exp(lat*math.Pi/180)) - math.Pi/2)
return lon, lat
}
// WGS84toWebMC WGS84坐标系->球面墨卡托
func WGS84toWebMC(lon, lat float64) (x, y float64) {
x = lon * mc / 180
y = math.Log(math.Tan((90+lat)*math.Pi/360)) / (math.Pi / 180)
y = y * mc / 180
return x, y
}
func transform(x, y float64) (lon, lat float64) {
absX := math.Sqrt(math.Abs(x))
xPi, yPi := x*math.Pi, y*math.Pi
d := 20.0*math.Sin(6.0*xPi) + 20.0*math.Sin(2.0*xPi)
lat, lon = d, d
lat += 20.0*math.Sin(yPi) + 40.0*math.Sin(yPi/3.0)
lon += 20.0*math.Sin(xPi) + 40.0*math.Sin(xPi/3.0)
lat += 160.0*math.Sin(yPi/12.0) + 320*math.Sin(yPi/30.0)
lon += 150.0*math.Sin(xPi/12.0) + 300.0*math.Sin(xPi/30.0)
lat *= 2.0 / 3.0
lon *= 2.0 / 3.0
lat += -100.0 + 2.0*x + 3.0*y + 0.2*y*y + 0.1*x*y + 0.2*absX
lon += 300.0 + x + 2.0*y + 0.1*x*x + 0.1*x*y + 0.1*absX
return
}
// 百度墨卡托
var mcBand = []float64{12890594.86, 8362377.87, 5591021, 3481989.83, 1678043.12, 0}
var mc2ll = [][]float64{
[]float64{1.410526172116255e-8, 0.00000898305509648872, -1.9939833816331, 200.9824383106796, -187.2403703815547, 91.6087516669843, -23.38765649603339, 2.57121317296198, -0.03801003308653, 17337981.2},
[]float64{-7.435856389565537e-9, 0.000008983055097726239, -0.78625201886289, 96.32687599759846, -1.85204757529826, -59.36935905485877, 47.40033549296737, -16.50741931063887, 2.28786674699375, 10260144.86},
[]float64{-3.030883460898826e-8, 0.00000898305509983578, 0.30071316287616, 59.74293618442277, 7.357984074871, -25.38371002664745, 13.45380521110908, -3.29883767235584, 0.32710905363475, 6856817.37},
[]float64{-1.981981304930552e-8, 0.000008983055099779535, 0.03278182852591, 40.31678527705744, 0.65659298677277, -4.44255534477492, 0.85341911805263, 0.12923347998204, -0.04625736007561, 4482777.06},
[]float64{3.09191371068437e-9, 0.000008983055096812155, 0.00006995724062, 23.10934304144901, -0.00023663490511, -0.6321817810242, -0.00663494467273, 0.03430082397953, -0.00466043876332, 2555164.4},
[]float64{2.890871144776878e-9, 0.000008983055095805407, -3.068298e-8, 7.47137025468032, -0.00000353937994, -0.02145144861037, -0.00001234426596, 0.00010322952773, -0.00000323890364, 826088.5},
}
var llBand = []float64{75, 60, 45, 30, 15, 0}
var ll2mc = [][]float64{
[]float64{-0.0015702102444, 111320.7020616939, 1704480524535203, -10338987376042340, 26112667856603880, -35149669176653700, 26595700718403920, -10725012454188240, 1800819912950474, 82.5},
[]float64{0.0008277824516172526, 111320.7020463578, 647795574.6671607, -4082003173.641316, 10774905663.51142, -15171875531.51559, 12053065338.62167, -5124939663.577472, 913311935.9512032, 67.5},
[]float64{0.00337398766765, 111320.7020202162, 4481351.045890365, -23393751.19931662, 79682215.47186455, -115964993.2797253, 97236711.15602145, -43661946.33752821, 8477230.501135234, 52.5},
[]float64{0.00220636496208, 111320.7020209128, 51751.86112841131, 3796837.749470245, 992013.7397791013, -1221952.21711287, 1340652.697009075, -620943.6990984312, 144416.9293806241, 37.5},
[]float64{-0.0003441963504368392, 111320.7020576856, 278.2353980772752, 2485758.690035394, 6070.750963243378, 54821.18345352118, 9540.606633304236, -2710.55326746645, 1405.483844121726, 22.5},
[]float64{-0.0003218135878613132, 111320.7020701615, 0.00369383431289, 823725.6402795718, 0.46104986909093, 2351.343141331292, 1.58060784298199, 8.77738589078284, 0.37238884252424, 7.45},
}
// BD09MCtoBD09 百度墨卡托->百度坐标系
func BD09MCtoBD09(x, y float64) (float64, float64) {
x, y = math.Abs(x), math.Abs(y)
var f []float64
for k := range mcBand {
if y >= mcBand[k] {
f = mc2ll[k]
break
}
}
return convert(x, y, f)
}
// BD09MCtoWGS84 百度墨卡托->WGS84坐标系
func BD09MCtoWGS84(x, y float64) (float64, float64) {
return BD09toWGS84(BD09MCtoBD09(x, y))
}
// BD09toBD09MC 百度坐标系->百度墨卡托
func BD09toBD09MC(lon, lat float64) (float64, float64) {
lon = getLoop(lon, -180, 180)
lat = getRange(lat, -74, 74)
var f []float64
for i := 0; i < len(llBand); i++ {
if lat >= llBand[i] {
f = ll2mc[i]
break
}
}
if len(f) > 0 {
for i := len(llBand) - 1; i >= 0; i-- {
if lat <= -llBand[i] {
f = ll2mc[i]
break
}
}
}
return convert(lon, lat, f)
}
func convert(x, y float64, f []float64) (lon, lat float64) {
lon = f[0] + f[1]*math.Abs(x)
cc := math.Abs(y) / f[9]
for i := 0; i <= 6; i++ {
lat += f[i+2] * math.Pow(cc, float64(i))
}
if x < 0 {
lon *= -1
}
if y < 0 {
lat *= -1
}
return lon, lat
}
func getLoop(lng, min, max float64) float64 {
for lng > max {
lng -= max - min
}
for lng < min {
lng += max - min
}
return lng
}
func getRange(lat, min, max float64) float64 {
if min != 0 {
lat = math.Max(lat, min)
}
if max != 0 {
lat = math.Min(lat, max)
}
return lat
} | transform.go | 0.50952 | 0.548069 | transform.go | starcoder |
package render3d
import "github.com/unixpickle/model3d/model3d"
// Translate moves the object by an additive offset.
func Translate(obj Object, offset model3d.Coord3D) Object {
return &translatedObject{
Object: obj,
Offset: offset,
}
}
type translatedObject struct {
Object Object
Offset model3d.Coord3D
}
func (t *translatedObject) Min() model3d.Coord3D {
return t.Object.Min().Add(t.Offset)
}
func (t *translatedObject) Max() model3d.Coord3D {
return t.Object.Max().Add(t.Offset)
}
func (t *translatedObject) Cast(r *model3d.Ray) (model3d.RayCollision, Material, bool) {
return t.Object.Cast(&model3d.Ray{
Origin: r.Origin.Sub(t.Offset),
Direction: r.Direction,
})
}
// Rotate creates a new Object by rotating an Object by
// a given angle (in radians) around a given (unit) axis.
func Rotate(obj Object, axis model3d.Coord3D, angle float64) Object {
return MatrixMultiply(obj, model3d.NewMatrix3Rotation(axis, angle))
}
// Scale creates a new Object by scaling an Object by the
// given factor along all axes.
func Scale(obj Object, scale float64) Object {
return MatrixMultiply(obj, &model3d.Matrix3{scale, 0, 0, 0, scale, 0, 0, 0, scale})
}
// MatrixMultiply left-multiplies coordinates in an object
// by a matrix m.
// It can be used for rotations, scaling, etc.
func MatrixMultiply(obj Object, m *model3d.Matrix3) Object {
transform := &model3d.Matrix3Transform{Matrix: m}
min, max := transform.ApplyBounds(obj.Min(), obj.Max())
return &matrixObject{
Object: obj,
MinVal: min,
MaxVal: max,
Matrix: m,
Inverse: m.Inverse(),
}
}
type matrixObject struct {
Object Object
MinVal model3d.Coord3D
MaxVal model3d.Coord3D
Matrix *model3d.Matrix3
Inverse *model3d.Matrix3
}
func (m *matrixObject) Min() model3d.Coord3D {
return m.MinVal
}
func (m *matrixObject) Max() model3d.Coord3D {
return m.MaxVal
}
func (m *matrixObject) Cast(r *model3d.Ray) (model3d.RayCollision, Material, bool) {
rc, mat, ok := m.Object.Cast(&model3d.Ray{
Origin: m.Inverse.MulColumn(r.Origin),
Direction: m.Inverse.MulColumn(r.Direction),
})
if ok {
rc.Normal = m.Matrix.MulColumn(rc.Normal).Normalize()
}
return rc, mat, ok
} | render3d/transform.go | 0.867204 | 0.492737 | transform.go | starcoder |
package xz
/* from linux/lib/xz/xz_lzma2.h ***************************************/
/* Range coder constants */
const (
rcShiftBits = 8
rcTopBits = 24
rcTopValue = 1 << rcTopBits
rcBitModelTotalBits = 11
rcBitModelTotal = 1 << rcBitModelTotalBits
rcMoveBits = 5
)
/*
* Maximum number of position states. A position state is the lowest pb
* number of bits of the current uncompressed offset. In some places there
* are different sets of probabilities for different position states.
*/
const posStatesMax = 1 << 4
/*
* lzmaState is used to track which LZMA symbols have occurred most recently
* and in which order. This information is used to predict the next symbol.
*
* Symbols:
* - Literal: One 8-bit byte
* - Match: Repeat a chunk of data at some distance
* - Long repeat: Multi-byte match at a recently seen distance
* - Short repeat: One-byte repeat at a recently seen distance
*
* The symbol names are in from STATE-oldest-older-previous. REP means
* either short or long repeated match, and NONLIT means any non-literal.
*/
type lzmaState int
const (
stateLitLit lzmaState = iota
stateMatchLitLit
stateRepLitLit
stateShortrepLitLit
stateMatchLit
stateRepList
stateShortrepLit
stateLitMatch
stateLitLongrep
stateLitShortrep
stateNonlitMatch
stateNonlitRep
)
/* Total number of states */
const states = 12
/* The lowest 7 states indicate that the previous state was a literal. */
const litStates = 7
/* Indicate that the latest symbol was a literal. */
func lzmaStateLiteral(state *lzmaState) {
switch {
case *state <= stateShortrepLitLit:
*state = stateLitLit
case *state <= stateLitShortrep:
*state -= 3
default:
*state -= 6
}
}
/* Indicate that the latest symbol was a match. */
func lzmaStateMatch(state *lzmaState) {
if *state < litStates {
*state = stateLitMatch
} else {
*state = stateNonlitMatch
}
}
/* Indicate that the latest state was a long repeated match. */
func lzmaStateLongRep(state *lzmaState) {
if *state < litStates {
*state = stateLitLongrep
} else {
*state = stateNonlitRep
}
}
/* Indicate that the latest symbol was a short match. */
func lzmaStateShortRep(state *lzmaState) {
if *state < litStates {
*state = stateLitShortrep
} else {
*state = stateNonlitRep
}
}
/* Test if the previous symbol was a literal. */
func lzmaStateIsLiteral(state lzmaState) bool {
return state < litStates
}
/* Each literal coder is divided in three sections:
* - 0x001-0x0FF: Without match byte
* - 0x101-0x1FF: With match byte; match bit is 0
* - 0x201-0x2FF: With match byte; match bit is 1
*
* Match byte is used when the previous LZMA symbol was something else than
* a literal (that is, it was some kind of match).
*/
const literalCoderSize = 0x300
/* Maximum number of literal coders */
const literalCodersMax = 1 << 4
/* Minimum length of a match is two bytes. */
const matchLenMin = 2
/* Match length is encoded with 4, 5, or 10 bits.
*
* Length Bits
* 2-9 4 = Choice=0 + 3 bits
* 10-17 5 = Choice=1 + Choice2=0 + 3 bits
* 18-273 10 = Choice=1 + Choice2=1 + 8 bits
*/
const (
lenLowBits = 3
lenLowSymbols = 1 << lenLowBits
lenMidBits = 3
lenMidSymbols = 1 << lenMidBits
lenHighBits = 8
lenHighSymbols = 1 << lenHighBits
)
/*
* Different sets of probabilities are used for match distances that have
* very short match length: Lengths of 2, 3, and 4 bytes have a separate
* set of probabilities for each length. The matches with longer length
* use a shared set of probabilities.
*/
const distStates = 4
/*
* Get the index of the appropriate probability array for decoding
* the distance slot.
*/
func lzmaGetDistState(len uint32) uint32 {
if len < distStates+matchLenMin {
return len - matchLenMin
} else {
return distStates - 1
}
}
/*
* The highest two bits of a 32-bit match distance are encoded using six bits.
* This six-bit value is called a distance slot. This way encoding a 32-bit
* value takes 6-36 bits, larger values taking more bits.
*/
const (
distSlotBits = 6
distSlots = 1 << distSlotBits
)
/* Match distances up to 127 are fully encoded using probabilities. Since
* the highest two bits (distance slot) are always encoded using six bits,
* the distances 0-3 don't need any additional bits to encode, since the
* distance slot itself is the same as the actual distance. distModelStart
* indicates the first distance slot where at least one additional bit is
* needed.
*/
const distModelStart = 4
/*
* Match distances greater than 127 are encoded in three pieces:
* - distance slot: the highest two bits
* - direct bits: 2-26 bits below the highest two bits
* - alignment bits: four lowest bits
*
* Direct bits don't use any probabilities.
*
* The distance slot value of 14 is for distances 128-191.
*/
const distModelEnd = 14
/* Distance slots that indicate a distance <= 127. */
const (
fullDistancesBits = distModelEnd / 2
fullDistances = 1 << fullDistancesBits
)
/*
* For match distances greater than 127, only the highest two bits and the
* lowest four bits (alignment) is encoded using probabilities.
*/
const (
alignBits = 4
alignSize = 1 << alignBits
)
/* from linux/lib/xz/xz_dec_lzma2.c ***********************************/
/*
* Range decoder initialization eats the first five bytes of each LZMA chunk.
*/
const rcInitBytes = 5
/*
* Minimum number of usable input buffer to safely decode one LZMA symbol.
* The worst case is that we decode 22 bits using probabilities and 26
* direct bits. This may decode at maximum of 20 bytes of input. However,
* lzmaMain does an extra normalization before returning, thus we
* need to put 21 here.
*/
const lzmaInRequired = 21
/*
* Dictionary (history buffer)
*
* These are always true:
* start <= pos <= full <= end
* pos <= limit <= end
* end == size
* size <= sizeMax
* len(buf) <= size
*/
type dictionary struct {
/* The history buffer */
buf []byte
/* Old position in buf (before decoding more data) */
start uint32
/* Position in buf */
pos uint32
/*
* How full dictionary is. This is used to detect corrupt input that
* would read beyond the beginning of the uncompressed stream.
*/
full uint32
/* Write limit; we don't write to buf[limit] or later bytes. */
limit uint32
/*
* End of the dictionary buffer. This is the same as the
* dictionary size.
*/
end uint32
/*
* Size of the dictionary as specified in Block Header. This is used
* together with "full" to detect corrupt input that would make us
* read beyond the beginning of the uncompressed stream.
*/
size uint32
/* Maximum allowed dictionary size. */
sizeMax uint32
}
/* Range decoder */
type rcDec struct {
rnge uint32
code uint32
/*
* Number of initializing bytes remaining to be read
* by rcReadInit.
*/
initBytesLeft uint32
/*
* Buffer from which we read our input. It can be either
* temp.buf or the caller-provided input buffer.
*/
in []byte
inPos int
inLimit int
}
/* Probabilities for a length decoder. */
type lzmaLenDec struct {
/* Probability of match length being at least 10 */
choice uint16
/* Probability of match length being at least 18 */
choice2 uint16
/* Probabilities for match lengths 2-9 */
low [posStatesMax][lenLowSymbols]uint16
/* Probabilities for match lengths 10-17 */
mid [posStatesMax][lenMidSymbols]uint16
/* Probabilities for match lengths 18-273 */
high [lenHighSymbols]uint16
}
type lzmaDec struct {
/* Distances of latest four matches */
rep0 uint32
rep1 uint32
rep2 uint32
rep3 uint32
/* Types of the most recently seen LZMA symbols */
state lzmaState
/*
* Length of a match. This is updated so that dictRepeat can
* be called again to finish repeating the whole match.
*/
len uint32
/*
* LZMA properties or related bit masks (number of literal
* context bits, a mask derived from the number of literal
* position bits, and a mask derived from the number
* position bits)
*/
lc uint32
literalPosMask uint32
posMask uint32
/* If 1, it's a match. Otherwise it's a single 8-bit literal. */
isMatch [states][posStatesMax]uint16
/* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
isRep [states]uint16
/*
* If 0, distance of a repeated match is rep0.
* Otherwise check is_rep1.
*/
isRep0 [states]uint16
/*
* If 0, distance of a repeated match is rep1.
* Otherwise check is_rep2.
*/
isRep1 [states]uint16
/* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
isRep2 [states]uint16
/*
* If 1, the repeated match has length of one byte. Otherwise
* the length is decoded from rep_len_decoder.
*/
isRep0Long [states][posStatesMax]uint16
/*
* Probability tree for the highest two bits of the match
* distance. There is a separate probability tree for match
* lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
*/
distSlot [distStates][distSlots]uint16
/*
* Probility trees for additional bits for match distance
* when the distance is in the range [4, 127].
*/
distSpecial [fullDistances - distModelEnd]uint16
/*
* Probability tree for the lowest four bits of a match
* distance that is equal to or greater than 128.
*/
distAlign [alignSize]uint16
/* Length of a normal match */
matchLenDec lzmaLenDec
/* Length of a repeated match */
repLenDec lzmaLenDec
/* Probabilities of literals */
literal [literalCodersMax][literalCoderSize]uint16
}
// type of lzma2Dec.sequence
type lzma2Seq int
const (
seqControl lzma2Seq = iota
seqUncompressed1
seqUncompressed2
seqCompressed0
seqCompressed1
seqProperties
seqLZMAPrepare
seqLZMARun
seqCopy
)
type lzma2Dec struct {
/* Position in xzDecLZMA2Run. */
sequence lzma2Seq
/* Next position after decoding the compressed size of the chunk. */
nextSequence lzma2Seq
/* Uncompressed size of LZMA chunk (2 MiB at maximum) */
uncompressed int
/*
* Compressed size of LZMA chunk or compressed/uncompressed
* size of uncompressed chunk (64 KiB at maximum)
*/
compressed int
/*
* True if dictionary reset is needed. This is false before
* the first chunk (LZMA or uncompressed).
*/
needDictReset bool
/*
* True if new LZMA properties are needed. This is false
* before the first LZMA chunk.
*/
needProps bool
}
type xzDecLZMA2 struct {
/*
* The order below is important on x86 to reduce code size and
* it shouldn't hurt on other platforms. Everything up to and
* including lzma.pos_mask are in the first 128 bytes on x86-32,
* which allows using smaller instructions to access those
* variables. On x86-64, fewer variables fit into the first 128
* bytes, but this is still the best order without sacrificing
* the readability by splitting the structures.
*/
rc rcDec
dict dictionary
lzma2 lzma2Dec
lzma lzmaDec
/*
* Temporary buffer which holds small number of input bytes between
* decoder calls. See lzma2LZMA for details.
*/
temp struct {
buf []byte // slice buf will be backed by bufArray
bufArray [3 * lzmaInRequired]byte
}
}
/**************
* Dictionary *
**************/
/*
* Reset the dictionary state. When in single-call mode, set up the beginning
* of the dictionary to point to the actual output buffer.
*/
func dictReset(dict *dictionary, b *xzBuf) {
dict.start = 0
dict.pos = 0
dict.limit = 0
dict.full = 0
}
/* Set dictionary write limit */
func dictLimit(dict *dictionary, outMax int) {
if dict.end-dict.pos <= uint32(outMax) {
dict.limit = dict.end
} else {
dict.limit = dict.pos + uint32(outMax)
}
}
/* Return true if at least one byte can be written into the dictionary. */
func dictHasSpace(dict *dictionary) bool {
return dict.pos < dict.limit
}
/*
* Get a byte from the dictionary at the given distance. The distance is
* assumed to valid, or as a special case, zero when the dictionary is
* still empty. This special case is needed for single-call decoding to
* avoid writing a '\x00' to the end of the destination buffer.
*/
func dictGet(dict *dictionary, dist uint32) uint32 {
var offset uint32 = dict.pos - dist - 1
if dist >= dict.pos {
offset += dict.end
}
if dict.full > 0 {
return uint32(dict.buf[offset])
}
return 0
}
/*
* Put one byte into the dictionary. It is assumed that there is space for it.
*/
func dictPut(dict *dictionary, byte byte) {
dict.buf[dict.pos] = byte
dict.pos++
if dict.full < dict.pos {
dict.full = dict.pos
}
}
/*
* Repeat given number of bytes from the given distance. If the distance is
* invalid, false is returned. On success, true is returned and *len is
* updated to indicate how many bytes were left to be repeated.
*/
func dictRepeat(dict *dictionary, len *uint32, dist uint32) bool {
var back uint32
var left uint32
if dist >= dict.full || dist >= dict.size {
return false
}
left = dict.limit - dict.pos
if left > *len {
left = *len
}
*len -= left
back = dict.pos - dist - 1
if dist >= dict.pos {
back += dict.end
}
for {
dict.buf[dict.pos] = dict.buf[back]
dict.pos++
back++
if back == dict.end {
back = 0
}
left--
if !(left > 0) {
break
}
}
if dict.full < dict.pos {
dict.full = dict.pos
}
return true
}
/* Copy uncompressed data as is from input to dictionary and output buffers. */
func dictUncompressed(dict *dictionary, b *xzBuf, left *int) {
var copySize int
for *left > 0 && b.inPos < len(b.in) && b.outPos < len(b.out) {
copySize = len(b.in) - b.inPos
if copySize > len(b.out)-b.outPos {
copySize = len(b.out) - b.outPos
}
if copySize > int(dict.end-dict.pos) {
copySize = int(dict.end - dict.pos)
}
if copySize > *left {
copySize = *left
}
*left -= copySize
copy(dict.buf[dict.pos:], b.in[b.inPos:b.inPos+copySize])
dict.pos += uint32(copySize)
if dict.full < dict.pos {
dict.full = dict.pos
}
if dict.pos == dict.end {
dict.pos = 0
}
copy(b.out[b.outPos:], b.in[b.inPos:b.inPos+copySize])
dict.start = dict.pos
b.outPos += copySize
b.inPos += copySize
}
}
/*
* Flush pending data from dictionary to b.out. It is assumed that there is
* enough space in b.out. This is guaranteed because caller uses dictLimit
* before decoding data into the dictionary.
*/
func dictFlush(dict *dictionary, b *xzBuf) int {
var copySize int = int(dict.pos - dict.start)
if dict.pos == dict.end {
dict.pos = 0
}
copy(b.out[b.outPos:], dict.buf[dict.start:dict.start+uint32(copySize)])
dict.start = dict.pos
b.outPos += copySize
return copySize
}
/*****************
* Range decoder *
*****************/
/* Reset the range decoder. */
func rcReset(rc *rcDec) {
rc.rnge = ^uint32(0)
rc.code = 0
rc.initBytesLeft = rcInitBytes
}
/*
* Read the first five initial bytes into rc->code if they haven't been
* read already. (Yes, the first byte gets completely ignored.)
*/
func rcReadInit(rc *rcDec, b *xzBuf) bool {
for rc.initBytesLeft > 0 {
if b.inPos == len(b.in) {
return false
}
rc.code = rc.code<<8 + uint32(b.in[b.inPos])
b.inPos++
rc.initBytesLeft--
}
return true
}
/* Return true if there may not be enough input for the next decoding loop. */
func rcLimitExceeded(rc *rcDec) bool {
return rc.inPos > rc.inLimit
}
/*
* Return true if it is possible (from point of view of range decoder) that
* we have reached the end of the LZMA chunk.
*/
func rcIsFinished(rc *rcDec) bool {
return rc.code == 0
}
/* Read the next input byte if needed. */
func rcNormalize(rc *rcDec) {
if rc.rnge < rcTopValue {
rc.rnge <<= rcShiftBits
rc.code = rc.code<<rcShiftBits + uint32(rc.in[rc.inPos])
rc.inPos++
}
}
/* Decode one bit. */
func rcBit(rc *rcDec, prob *uint16) bool {
var bound uint32
var bit bool
rcNormalize(rc)
bound = (rc.rnge >> rcBitModelTotalBits) * uint32(*prob)
if rc.code < bound {
rc.rnge = bound
*prob += (rcBitModelTotal - *prob) >> rcMoveBits
bit = false
} else {
rc.rnge -= bound
rc.code -= bound
*prob -= *prob >> rcMoveBits
bit = true
}
return bit
}
/* Decode a bittree starting from the most significant bit. */
func rcBittree(rc *rcDec, probs []uint16, limit uint32) uint32 {
var symbol uint32 = 1
for {
if rcBit(rc, &probs[symbol-1]) {
symbol = symbol<<1 + 1
} else {
symbol <<= 1
}
if !(symbol < limit) {
break
}
}
return symbol
}
/* Decode a bittree starting from the least significant bit. */
func rcBittreeReverse(rc *rcDec, probs []uint16, dest *uint32, limit uint32) {
var symbol uint32 = 1
var i uint32 = 0
for {
if rcBit(rc, &probs[symbol-1]) {
symbol = symbol<<1 + 1
*dest += 1 << i
} else {
symbol <<= 1
}
i++
if !(i < limit) {
break
}
}
}
/* Decode direct bits (fixed fifty-fifty probability) */
func rcDirect(rc *rcDec, dest *uint32, limit uint32) {
var mask uint32
for {
rcNormalize(rc)
rc.rnge >>= 1
rc.code -= rc.rnge
mask = 0 - rc.code>>31
rc.code += rc.rnge & mask
*dest = *dest<<1 + mask + 1
limit--
if !(limit > 0) {
break
}
}
}
/********
* LZMA *
********/
/* Get pointer to literal coder probability array. */
func lzmaLiteralProbs(s *xzDecLZMA2) []uint16 {
var prevByte uint32 = dictGet(&s.dict, 0)
var low uint32 = prevByte >> (8 - s.lzma.lc)
var high uint32 = (s.dict.pos & s.lzma.literalPosMask) << s.lzma.lc
return s.lzma.literal[low+high][:]
}
/* Decode a literal (one 8-bit byte) */
func lzmaLiteral(s *xzDecLZMA2) {
var probs []uint16
var symbol uint32
var matchByte uint32
var matchBit uint32
var offset uint32
var i uint32
probs = lzmaLiteralProbs(s)
if lzmaStateIsLiteral(s.lzma.state) {
symbol = rcBittree(&s.rc, probs[1:], 0x100)
} else {
symbol = 1
matchByte = dictGet(&s.dict, s.lzma.rep0) << 1
offset = 0x100
for {
matchBit = matchByte & offset
matchByte <<= 1
i = offset + matchBit + symbol
if rcBit(&s.rc, &probs[i]) {
symbol = symbol<<1 + 1
offset &= matchBit
} else {
symbol <<= 1
offset &= ^matchBit
}
if !(symbol < 0x100) {
break
}
}
}
dictPut(&s.dict, byte(symbol))
lzmaStateLiteral(&s.lzma.state)
}
/* Decode the length of the match into s.lzma.len. */
func lzmaLen(s *xzDecLZMA2, l *lzmaLenDec, posState uint32) {
var probs []uint16
var limit uint32
switch {
case !rcBit(&s.rc, &l.choice):
probs = l.low[posState][:]
limit = lenLowSymbols
s.lzma.len = matchLenMin
case !rcBit(&s.rc, &l.choice2):
probs = l.mid[posState][:]
limit = lenMidSymbols
s.lzma.len = matchLenMin + lenLowSymbols
default:
probs = l.high[:]
limit = lenHighSymbols
s.lzma.len = matchLenMin + lenLowSymbols + lenMidSymbols
}
s.lzma.len += rcBittree(&s.rc, probs[1:], limit) - limit
}
/* Decode a match. The distance will be stored in s.lzma.rep0. */
func lzmaMatch(s *xzDecLZMA2, posState uint32) {
var probs []uint16
var distSlot uint32
var limit uint32
lzmaStateMatch(&s.lzma.state)
s.lzma.rep3 = s.lzma.rep2
s.lzma.rep2 = s.lzma.rep1
s.lzma.rep1 = s.lzma.rep0
lzmaLen(s, &s.lzma.matchLenDec, posState)
probs = s.lzma.distSlot[lzmaGetDistState(s.lzma.len)][:]
distSlot = rcBittree(&s.rc, probs[1:], distSlots) - distSlots
if distSlot < distModelStart {
s.lzma.rep0 = distSlot
} else {
limit = distSlot>>1 - 1
s.lzma.rep0 = 2 + distSlot&1
if distSlot < distModelEnd {
s.lzma.rep0 <<= limit
probs = s.lzma.distSpecial[s.lzma.rep0-distSlot:]
rcBittreeReverse(&s.rc, probs, &s.lzma.rep0, limit)
} else {
rcDirect(&s.rc, &s.lzma.rep0, limit-alignBits)
s.lzma.rep0 <<= alignBits
rcBittreeReverse(
&s.rc, s.lzma.distAlign[1:], &s.lzma.rep0, alignBits)
}
}
}
/*
* Decode a repeated match. The distance is one of the four most recently
* seen matches. The distance will be stored in s.lzma.rep0.
*/
func lzmaRepMatch(s *xzDecLZMA2, posState uint32) {
var tmp uint32
if !rcBit(&s.rc, &s.lzma.isRep0[s.lzma.state]) {
if !rcBit(&s.rc, &s.lzma.isRep0Long[s.lzma.state][posState]) {
lzmaStateShortRep(&s.lzma.state)
s.lzma.len = 1
return
}
} else {
if !rcBit(&s.rc, &s.lzma.isRep1[s.lzma.state]) {
tmp = s.lzma.rep1
} else {
if !rcBit(&s.rc, &s.lzma.isRep2[s.lzma.state]) {
tmp = s.lzma.rep2
} else {
tmp = s.lzma.rep3
s.lzma.rep3 = s.lzma.rep2
}
s.lzma.rep2 = s.lzma.rep1
}
s.lzma.rep1 = s.lzma.rep0
s.lzma.rep0 = tmp
}
lzmaStateLongRep(&s.lzma.state)
lzmaLen(s, &s.lzma.repLenDec, posState)
}
/* LZMA decoder core */
func lzmaMain(s *xzDecLZMA2) bool {
var posState uint32
/*
* If the dictionary was reached during the previous call, try to
* finish the possibly pending repeat in the dictionary.
*/
if dictHasSpace(&s.dict) && s.lzma.len > 0 {
dictRepeat(&s.dict, &s.lzma.len, s.lzma.rep0)
}
/*
* Decode more LZMA symbols. One iteration may consume up to
* lzmaInRequired - 1 bytes.
*/
for dictHasSpace(&s.dict) && !rcLimitExceeded(&s.rc) {
posState = s.dict.pos & s.lzma.posMask
if !rcBit(&s.rc, &s.lzma.isMatch[s.lzma.state][posState]) {
lzmaLiteral(s)
} else {
if rcBit(&s.rc, &s.lzma.isRep[s.lzma.state]) {
lzmaRepMatch(s, posState)
} else {
lzmaMatch(s, posState)
}
if !dictRepeat(&s.dict, &s.lzma.len, s.lzma.rep0) {
return false
}
}
}
/*
* Having the range decoder always normalized when we are outside
* this function makes it easier to correctly handle end of the chunk.
*/
rcNormalize(&s.rc)
return true
}
/*
* Reset the LZMA decoder and range decoder state. Dictionary is not reset
* here, because LZMA state may be reset without resetting the dictionary.
*/
func lzmaReset(s *xzDecLZMA2) {
s.lzma.state = stateLitLit
s.lzma.rep0 = 0
s.lzma.rep1 = 0
s.lzma.rep2 = 0
s.lzma.rep3 = 0
/* All probabilities are initialized to the same value, v */
v := uint16(rcBitModelTotal / 2)
s.lzma.matchLenDec.choice = v
s.lzma.matchLenDec.choice2 = v
s.lzma.repLenDec.choice = v
s.lzma.repLenDec.choice2 = v
for _, m := range [][]uint16{
s.lzma.isRep[:], s.lzma.isRep0[:], s.lzma.isRep1[:],
s.lzma.isRep2[:], s.lzma.distSpecial[:], s.lzma.distAlign[:],
s.lzma.matchLenDec.high[:], s.lzma.repLenDec.high[:],
} {
for j := range m {
m[j] = v
}
}
for i := range s.lzma.isMatch {
for j := range s.lzma.isMatch[i] {
s.lzma.isMatch[i][j] = v
}
}
for i := range s.lzma.isRep0Long {
for j := range s.lzma.isRep0Long[i] {
s.lzma.isRep0Long[i][j] = v
}
}
for i := range s.lzma.distSlot {
for j := range s.lzma.distSlot[i] {
s.lzma.distSlot[i][j] = v
}
}
for i := range s.lzma.literal {
for j := range s.lzma.literal[i] {
s.lzma.literal[i][j] = v
}
}
for i := range s.lzma.matchLenDec.low {
for j := range s.lzma.matchLenDec.low[i] {
s.lzma.matchLenDec.low[i][j] = v
}
}
for i := range s.lzma.matchLenDec.mid {
for j := range s.lzma.matchLenDec.mid[i] {
s.lzma.matchLenDec.mid[i][j] = v
}
}
for i := range s.lzma.repLenDec.low {
for j := range s.lzma.repLenDec.low[i] {
s.lzma.repLenDec.low[i][j] = v
}
}
for i := range s.lzma.repLenDec.mid {
for j := range s.lzma.repLenDec.mid[i] {
s.lzma.repLenDec.mid[i][j] = v
}
}
rcReset(&s.rc)
}
/*
* Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
* from the decoded lp and pb values. On success, the LZMA decoder state is
* reset and true is returned.
*/
func lzmaProps(s *xzDecLZMA2, props byte) bool {
if props > (4*5+4)*9+8 {
return false
}
s.lzma.posMask = 0
for props >= 9*5 {
props -= 9 * 5
s.lzma.posMask++
}
s.lzma.posMask = 1<<s.lzma.posMask - 1
s.lzma.literalPosMask = 0
for props >= 9 {
props -= 9
s.lzma.literalPosMask++
}
s.lzma.lc = uint32(props)
if s.lzma.lc+s.lzma.literalPosMask > 4 {
return false
}
s.lzma.literalPosMask = 1<<s.lzma.literalPosMask - 1
lzmaReset(s)
return true
}
/*********
* LZMA2 *
*********/
/*
* The LZMA decoder assumes that if the input limit (s.rc.inLimit) hasn't
* been exceeded, it is safe to read up to lzmaInRequired bytes. This
* wrapper function takes care of making the LZMA decoder's assumption safe.
*
* As long as there is plenty of input left to be decoded in the current LZMA
* chunk, we decode directly from the caller-supplied input buffer until
* there's lzmaInRequired bytes left. Those remaining bytes are copied into
* s.temp.buf, which (hopefully) gets filled on the next call to this
* function. We decode a few bytes from the temporary buffer so that we can
* continue decoding from the caller-supplied input buffer again.
*/
func lzma2LZMA(s *xzDecLZMA2, b *xzBuf) bool {
var inAvail int
var tmp int
inAvail = len(b.in) - b.inPos
if len(s.temp.buf) > 0 || s.lzma2.compressed == 0 {
tmp = 2*lzmaInRequired - len(s.temp.buf)
if tmp > s.lzma2.compressed-len(s.temp.buf) {
tmp = s.lzma2.compressed - len(s.temp.buf)
}
if tmp > inAvail {
tmp = inAvail
}
copy(s.temp.bufArray[len(s.temp.buf):], b.in[b.inPos:b.inPos+tmp])
switch {
case len(s.temp.buf)+tmp == s.lzma2.compressed:
for i := len(s.temp.buf) + tmp; i < len(s.temp.bufArray); i++ {
s.temp.bufArray[i] = 0
}
s.rc.inLimit = len(s.temp.buf) + tmp
case len(s.temp.buf)+tmp < lzmaInRequired:
s.temp.buf = s.temp.bufArray[:len(s.temp.buf)+tmp]
b.inPos += tmp
return true
default:
s.rc.inLimit = len(s.temp.buf) + tmp - lzmaInRequired
}
s.rc.in = s.temp.bufArray[:]
s.rc.inPos = 0
if !lzmaMain(s) || s.rc.inPos > len(s.temp.buf)+tmp {
return false
}
s.lzma2.compressed -= s.rc.inPos
if s.rc.inPos < len(s.temp.buf) {
copy(s.temp.buf, s.temp.buf[s.rc.inPos:])
s.temp.buf = s.temp.buf[:len(s.temp.buf)-s.rc.inPos]
return true
}
b.inPos += s.rc.inPos - len(s.temp.buf)
s.temp.buf = nil
}
inAvail = len(b.in) - b.inPos
if inAvail >= lzmaInRequired {
s.rc.in = b.in
s.rc.inPos = b.inPos
if inAvail >= s.lzma2.compressed+lzmaInRequired {
s.rc.inLimit = b.inPos + s.lzma2.compressed
} else {
s.rc.inLimit = len(b.in) - lzmaInRequired
}
if !lzmaMain(s) {
return false
}
inAvail = s.rc.inPos - b.inPos
if inAvail > s.lzma2.compressed {
return false
}
s.lzma2.compressed -= inAvail
b.inPos = s.rc.inPos
}
inAvail = len(b.in) - b.inPos
if inAvail < lzmaInRequired {
if inAvail > s.lzma2.compressed {
inAvail = s.lzma2.compressed
}
s.temp.buf = s.temp.bufArray[:inAvail]
copy(s.temp.buf, b.in[b.inPos:])
b.inPos += inAvail
}
return true
}
/*
* Take care of the LZMA2 control layer, and forward the job of actual LZMA
* decoding or copying of uncompressed chunks to other functions.
*/
func xzDecLZMA2Run(s *xzDecLZMA2, b *xzBuf) xzRet {
var tmp int
for b.inPos < len(b.in) || s.lzma2.sequence == seqLZMARun {
switch s.lzma2.sequence {
case seqControl:
/*
* LZMA2 control byte
*
* Exact values:
* 0x00 End marker
* 0x01 Dictionary reset followed by
* an uncompressed chunk
* 0x02 Uncompressed chunk (no dictionary reset)
*
* Highest three bits (s.control & 0xE0):
* 0xE0 Dictionary reset, new properties and state
* reset, followed by LZMA compressed chunk
* 0xC0 New properties and state reset, followed
* by LZMA compressed chunk (no dictionary
* reset)
* 0xA0 State reset using old properties,
* followed by LZMA compressed chunk (no
* dictionary reset)
* 0x80 LZMA chunk (no dictionary or state reset)
*
* For LZMA compressed chunks, the lowest five bits
* (s.control & 1F) are the highest bits of the
* uncompressed size (bits 16-20).
*
* A new LZMA2 stream must begin with a dictionary
* reset. The first LZMA chunk must set new
* properties and reset the LZMA state.
*
* Values that don't match anything described above
* are invalid and we return xzDataError.
*/
tmp = int(b.in[b.inPos])
b.inPos++
if tmp == 0x00 {
return xzStreamEnd
}
switch {
case tmp >= 0xe0 || tmp == 0x01:
s.lzma2.needProps = true
s.lzma2.needDictReset = false
dictReset(&s.dict, b)
case s.lzma2.needDictReset:
return xzDataError
}
if tmp >= 0x80 {
s.lzma2.uncompressed = (tmp & 0x1f) << 16
s.lzma2.sequence = seqUncompressed1
switch {
case tmp >= 0xc0:
/*
* When there are new properties,
* state reset is done at
* seqProperties.
*/
s.lzma2.needProps = false
s.lzma2.nextSequence = seqProperties
case s.lzma2.needProps:
return xzDataError
default:
s.lzma2.nextSequence = seqLZMAPrepare
if tmp >= 0xa0 {
lzmaReset(s)
}
}
} else {
if tmp > 0x02 {
return xzDataError
}
s.lzma2.sequence = seqCompressed0
s.lzma2.nextSequence = seqCopy
}
case seqUncompressed1:
s.lzma2.uncompressed += int(b.in[b.inPos]) << 8
b.inPos++
s.lzma2.sequence = seqUncompressed2
case seqUncompressed2:
s.lzma2.uncompressed += int(b.in[b.inPos]) + 1
b.inPos++
s.lzma2.sequence = seqCompressed0
case seqCompressed0:
s.lzma2.compressed += int(b.in[b.inPos]) << 8
b.inPos++
s.lzma2.sequence = seqCompressed1
case seqCompressed1:
s.lzma2.compressed += int(b.in[b.inPos]) + 1
b.inPos++
s.lzma2.sequence = s.lzma2.nextSequence
case seqProperties:
if !lzmaProps(s, b.in[b.inPos]) {
return xzDataError
}
b.inPos++
s.lzma2.sequence = seqLZMAPrepare
fallthrough
case seqLZMAPrepare:
if s.lzma2.compressed < rcInitBytes {
return xzDataError
}
if !rcReadInit(&s.rc, b) {
return xzOK
}
s.lzma2.compressed -= rcInitBytes
s.lzma2.sequence = seqLZMARun
fallthrough
case seqLZMARun:
/*
* Set dictionary limit to indicate how much we want
* to be encoded at maximum. Decode new data into the
* dictionary. Flush the new data from dictionary to
* b.out. Check if we finished decoding this chunk.
* In case the dictionary got full but we didn't fill
* the output buffer yet, we may run this loop
* multiple times without changing s.lzma2.sequence.
*/
outMax := len(b.out) - b.outPos
if outMax > s.lzma2.uncompressed {
outMax = s.lzma2.uncompressed
}
dictLimit(&s.dict, outMax)
if !lzma2LZMA(s, b) {
return xzDataError
}
s.lzma2.uncompressed -= dictFlush(&s.dict, b)
switch {
case s.lzma2.uncompressed == 0:
if s.lzma2.compressed > 0 || s.lzma.len > 0 ||
!rcIsFinished(&s.rc) {
return xzDataError
}
rcReset(&s.rc)
s.lzma2.sequence = seqControl
case b.outPos == len(b.out) ||
b.inPos == len(b.in) &&
len(s.temp.buf) < s.lzma2.compressed:
return xzOK
}
case seqCopy:
dictUncompressed(&s.dict, b, &s.lzma2.compressed)
if s.lzma2.compressed > 0 {
return xzOK
}
s.lzma2.sequence = seqControl
}
}
return xzOK
}
/*
* Allocate memory for LZMA2 decoder. xzDecLZMA2Reset must be used
* before calling xzDecLZMA2Run.
*/
func xzDecLZMA2Create(dictMax uint32) *xzDecLZMA2 {
s := new(xzDecLZMA2)
s.dict.sizeMax = dictMax
return s
}
/*
* Decode the LZMA2 properties (one byte) and reset the decoder. Return
* xzOK on success, xzMemlimitError if the preallocated dictionary is not
* big enough, and xzOptionsError if props indicates something that this
* decoder doesn't support.
*/
func xzDecLZMA2Reset(s *xzDecLZMA2, props byte) xzRet {
if props > 40 {
return xzOptionsError // Bigger than 4 GiB
}
if props == 40 {
s.dict.size = ^uint32(0)
} else {
s.dict.size = uint32(2 + props&1)
s.dict.size <<= props>>1 + 11
}
if s.dict.size > s.dict.sizeMax {
return xzMemlimitError
}
s.dict.end = s.dict.size
if len(s.dict.buf) < int(s.dict.size) {
s.dict.buf = make([]byte, s.dict.size)
}
s.lzma.len = 0
s.lzma2.sequence = seqControl
s.lzma2.compressed = 0
s.lzma2.uncompressed = 0
s.lzma2.needDictReset = true
s.temp.buf = nil
return xzOK
} | Godeps/_workspace/src/xi2.org/x/xz/dec_lzma2.go | 0.5794 | 0.404331 | dec_lzma2.go | starcoder |
package beerjson
import "encoding/json"
import "fmt"
// ID: https://raw.githubusercontent.com/beerjson/beerjson/master/json/recipe.json
// The efficiencyType stores each efficiency component.
type EfficiencyType struct {
// The percentage of sugar from the grain yield that is extracted and converted during the mash.
Conversion *PercentType `json:"conversion,omitempty"`
// The percentage of sugar that makes it from the mash tun to the kettle.
Lauter *PercentType `json:"lauter,omitempty"`
// The percentage of sugar that makes it from the grain to the kettle.
Mash *PercentType `json:"mash,omitempty"`
// The percentage of sugar that makes it from the grain to the fermenter.
Brewhouse PercentType `json:"brewhouse", validate:"required"`
}
type IngredientsType struct {
// fermentable_additions collects all the fermentable ingredients for use in a recipe
FermentableAdditions []FermentableAdditionType `json:"fermentable_additions", validate:"required"`
// hop_additions collects all the hops for use in a recipe
HopAdditions []HopAdditionType `json:"hop_additions,omitempty"`
// miscellaneous_additions collects all the miscellaneous items for use in a recipe
MiscellaneousAdditions []MiscellaneousAdditionType `json:"miscellaneous_additions,omitempty"`
// culture_additions collects all the culture items for use in a recipe
CultureAdditions []CultureAdditionType `json:"culture_additions,omitempty"`
// water_additions collects all the water items for use in a recipe
WaterAdditions []WaterAdditionType `json:"water_additions,omitempty"`
}
// RecipeType composes the information stored in a beerjson recipe.
type RecipeType struct {
// The final carbonation of the beer when packaged or served.
Carbonation *float64 `json:"carbonation,omitempty"`
// FermentationProcedureType defines the procedure for performing fermentation.
Fermentation *FermentationProcedureType `json:"fermentation,omitempty"`
// Defines the procedure for performing a boil. A boil procedure with no steps is the same as a standard single step boil.
Boil *BoilProcedureType `json:"boil,omitempty"`
// Used to store subjective tasting notes, and rating.
Taste *TasteType `json:"taste,omitempty"`
// This defines the procedure for performing unique mashing processes.
Mash *MashProcedureType `json:"mash,omitempty"`
// The gravity of beer at the end of fermentation.
FinalGravity *GravityType `json:"final_gravity,omitempty"`
// The final beer pH at the end of fermentation.
BeerPH *AcidityType `json:"beer_pH,omitempty"`
Created *DateType `json:"created,omitempty"`
// Used to store each efficiency component, including conversion, and brewhouse.
Efficiency EfficiencyType `json:"efficiency", validate:"required"`
Style *RecipeStyleType `json:"style,omitempty"`
// Describes the procedure for packaging your beverage.
Packaging *PackagingProcedureType `json:"packaging,omitempty"`
Name string `json:"name", validate:"required"`
Author string `json:"author", validate:"required"`
Coauthor *string `json:"coauthor,omitempty"`
// The gravity of wort when transffered to the fermenter.
OriginalGravity *GravityType `json:"original_gravity,omitempty"`
// The color of the finished beer, using SRM or EBC.
ColorEstimate *ColorType `json:"color_estimate,omitempty"`
// The total apparent attenuation of the finished beer after fermentation.
ApparentAttenuation *PercentType `json:"apparent_attenuation,omitempty"`
CaloriesPerPint *float64 `json:"calories_per_pint,omitempty"`
// The volume into the fermenter.
BatchSize VolumeType `json:"batch_size", validate:"required"`
// A collection of all ingredients used for the recipe.
Ingredients IngredientsType `json:"ingredients", validate:"required"`
Notes *string `json:"notes,omitempty"`
RecipeTypeType RecipeTypeType `json:"type", validate:"required"`
AlcoholByVolume *PercentType `json:"alcohol_by_volume,omitempty"`
// Used to differentiate which IBU formula is being used in a recipe. If formula is modified in any way, eg to support whirlpool/flameout additions etc etc, please use `Other` for transparency.
IbuEstimate *IBUEstimateType `json:"ibu_estimate,omitempty"`
}
type RecipeTypeType string
func (s *RecipeTypeType) UnmarshalJSON(b []byte) error {
var v string
err := json.Unmarshal(b, &v)
if err != nil {
return err
}
*s = RecipeTypeType(v)
switch *s {
case RecipeTypeType_Cider:
return nil
case RecipeTypeType_Kombucha:
return nil
case RecipeTypeType_Soda:
return nil
case RecipeTypeType_Other:
return nil
case RecipeTypeType_Mead:
return nil
case RecipeTypeType_Wine:
return nil
case RecipeTypeType_Extract:
return nil
case RecipeTypeType_PartialMash:
return nil
case RecipeTypeType_AllGrain:
return nil
}
return fmt.Errorf("RecipeTypeType: value '%v' does not match any value", v)
}
const (
RecipeTypeType_Cider RecipeTypeType = "cider"
RecipeTypeType_Kombucha RecipeTypeType = "kombucha"
RecipeTypeType_Soda RecipeTypeType = "soda"
RecipeTypeType_Other RecipeTypeType = "other"
RecipeTypeType_Mead RecipeTypeType = "mead"
RecipeTypeType_Wine RecipeTypeType = "wine"
RecipeTypeType_Extract RecipeTypeType = "extract"
RecipeTypeType_PartialMash RecipeTypeType = "partial mash"
RecipeTypeType_AllGrain RecipeTypeType = "all grain"
)
type TasteType struct {
Notes string `json:"notes", validate:"required"`
Rating float64 `json:"rating", validate:"required"`
} | recipe.go | 0.664976 | 0.40295 | recipe.go | starcoder |
package coordinate
import (
"math"
)
const (
xPi = 3.14159265358979324 * 3000.0 / 180.0
// pi
pi = 3.1415926535897932384626
// 长半轴
a = 6378245.0
// 扁率
ee = 0.00669342162296594323
)
func transformLatitude(longitude, latitude float64) float64 {
ret := -100.0 + 2.0 * longitude + 3.0 * latitude + 0.2 * latitude * latitude + 0.1 * longitude * latitude + 0.2 * math.Sqrt(math.Abs(longitude))
ret += (20.0 * math.Sin(6.0 * longitude * pi) + 20.0 * math.Sin(2.0 * longitude * pi)) * 2.0 / 3.0
ret += (20.0 * math.Sin(latitude * pi) + 40.0 * math.Sin(latitude / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * math.Sin(latitude / 12.0 * pi) + 320 * math.Sin(latitude * pi / 30.0)) * 2.0 / 3.0
return ret
}
func transformLongitude(longitude, latitude float64) float64 {
ret := 300.0 + longitude + 2.0 * latitude + 0.1 * longitude * longitude + 0.1 * longitude * latitude + 0.1 * math.Sqrt(math.Abs(longitude))
ret += (20.0 * math.Sin(6.0 * longitude * pi) + 20.0 * math.Sin(2.0 * longitude * pi)) * 2.0 / 3.0
ret += (20.0 * math.Sin(longitude * pi) + 40.0 * math.Sin(longitude / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * math.Sin(longitude / 12.0 * pi) + 300.0 * math.Sin(longitude / 30.0 * pi)) * 2.0 / 3.0
return ret
}
func isOversea(longitude, latitude float64) bool {
if longitude < 72.004 || longitude > 137.8347 {
return true
}
if latitude < 0.8293 || latitude > 55.8271 {
return true
}
return false
}
func Wgs84ToGcj02(longitude, latitude float64) (float64, float64) {
if isOversea(longitude, latitude) {
return longitude, latitude
}
dLat := transformLatitude(longitude - 105.0, latitude - 35.0)
dLng := transformLongitude(longitude - 105.0, latitude - 35.0)
radLat := latitude / 180.0 * pi
magic := math.Sin(radLat)
magic = 1 - ee * magic * magic
sqrtMagic := math.Sqrt(magic)
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi)
dLng = (dLng * 180.0) / (a / sqrtMagic * math.Cos(radLat) * pi)
return longitude + dLng, latitude + dLat
}
func Wgs84ToBd09(longitude, latitude float64) (float64, float64) {
gcjLng, gcjLat := Wgs84ToGcj02(longitude, latitude)
return Gcj02ToBd09(gcjLng, gcjLat)
}
func Gcj02ToBd09(longitude, latitude float64) (float64, float64) {
z := math.Sqrt(longitude * longitude + latitude * latitude) + 0.00002 * math.Sin(latitude * xPi)
theta := math.Atan2(latitude, longitude) + 0.000003 * math.Cos(longitude * xPi)
return z * math.Cos(theta) + 0.0065, z * math.Sin(theta) + 0.006
}
func Gcj02ToWgs84(longitude, latitude float64) (float64, float64) {
if isOversea(longitude, latitude) {
return longitude, latitude
}
dLat := transformLatitude(longitude - 105.0, latitude - 35.0);
dLng := transformLongitude(longitude - 105.0, latitude - 35.0);
radLat := latitude / 180.0 * pi
magic := math.Sin(radLat)
magic = 1 - ee * magic * magic
sqrtMagic := math.Sqrt(magic)
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi)
dLng = (dLng * 180.0) / (a / sqrtMagic * math.Cos(radLat) * pi)
mgLat := latitude + dLat
mgLng := longitude + dLng
return longitude * 2 - mgLng, latitude * 2 - mgLat
}
func Bd09ToGcj02(longitude, latitude float64) (float64, float64) {
x := longitude - 0.0065
y := latitude - 0.006
z := math.Sqrt(x * x + y * y) - 0.00002 * math.Sin(y * xPi)
theta := math.Atan2(y, x) - 0.000003 * math.Cos(x * xPi)
return z * math.Cos(theta), z * math.Sin(theta)
}
func Bd09ToWgs84(longitude, latitude float64) (float64, float64) {
gcjLng, gcjLat := Bd09ToGcj02(longitude, latitude)
return Gcj02ToWgs84(gcjLng, gcjLat)
} | coordinate.go | 0.771585 | 0.565119 | coordinate.go | starcoder |
package seq
import (
"fmt"
"github.com/callpraths/gorobdd/internal/node"
)
type leafOp func(a node.Leaf, b node.Leaf) node.Leaf
// GraphEqual determines if the BDDs rooted at the given nodes have identical
// graph structures. For this purpose, Leaf nodes with the same value are considered
// equal.
func GraphEqual(a *node.Node, b *node.Node) (bool, error) {
if a.Type != b.Type {
return false, nil
}
switch a.Type {
case node.LeafType:
return a.Value == b.Value, nil
case node.InternalType:
if a.Ply != b.Ply {
return false, nil
}
if eq, e := GraphEqual(a.True, b.True); e != nil || !eq {
return eq, e
}
return GraphEqual(a.False, b.False)
default:
return false, fmt.Errorf("Unexpected node type: %v in %v", a.Type, a)
}
}
// Equal determines if the BDDs rooted at the two nodes are logically equal.
// We can not use Reduce here since Equal will be used in testing Reduce.
func Equal(a *node.Node, b *node.Node) (bool, error) {
switch a.Type {
case node.LeafType:
switch b.Type {
case node.LeafType:
return a.Value == b.Value, nil
case node.InternalType:
return equalSkippingRoot(b, a)
default:
return false, fmt.Errorf("Unknown node type: %v", b)
}
case node.InternalType:
switch b.Type {
case node.LeafType:
return equalSkippingRoot(a, b)
case node.InternalType:
if a.Ply == b.Ply {
if r, e := Equal(a.True, b.True); e != nil || !r {
return r, e
}
return Equal(a.False, b.False)
} else if a.Ply > b.Ply {
return equalSkippingRoot(a, b)
} else { // a.Ply < b.Ply
return equalSkippingRoot(b, a)
}
default:
return false, fmt.Errorf("Unknown node type: %v", b)
}
default:
return false, fmt.Errorf("Unknown node type: %v", a)
}
}
// equalSkippingRoot skips one level on tall and compares the rest with short.
func equalSkippingRoot(tall *node.Node, short *node.Node) (bool, error) {
// tall and short are both node.InternalType
if r, e := Equal(tall.True, tall.False); e != nil || !r {
return r, e
}
return Equal(tall.True, short)
}
// And returns a BDD that represents the conjunction of the given BDDs.
func And(a *node.Node, b *node.Node) (*node.Node, error) {
return walk(a, b, andLeafOp)
}
// Or returns a BDD that represents the disjunction of the given BDDs.
func Or(a *node.Node, b *node.Node) (*node.Node, error) {
return walk(a, b, orLeafOp)
}
// Not returns a BDD that represents the negation of the given BDD.
func Not(a *node.Node) *node.Node {
return &node.Node{
Type: node.LeafType,
Leaf: node.Leaf{
Value: !a.Value,
},
}
}
func walk(a *node.Node, b *node.Node, op leafOp) (*node.Node, error) {
if a.Type != b.Type {
return nil, fmt.Errorf("Mismatched bdd path heights: %v, %v", a, b)
}
switch a.Type {
case node.LeafType:
return &node.Node{
Type: node.LeafType,
Leaf: op(a.Leaf, b.Leaf),
}, nil
case node.InternalType:
tb, e1 := walk(a.True, b.True, op)
if e1 != nil {
return tb, e1
}
fb, e2 := walk(a.False, b.False, op)
if e2 != nil {
return fb, e2
}
return &node.Node{
Type: node.InternalType,
Internal: node.Internal{
Ply: a.Ply,
True: tb,
False: fb,
},
}, nil
default:
return nil, fmt.Errorf("Unexpected node type: %v in %v", a.Type, a)
}
}
func orLeafOp(a node.Leaf, b node.Leaf) node.Leaf {
return node.Leaf{Value: a.Value || b.Value}
}
func andLeafOp(a node.Leaf, b node.Leaf) node.Leaf {
return node.Leaf{Value: a.Value && b.Value}
} | internal/seq/binary.go | 0.784897 | 0.555194 | binary.go | starcoder |
package draw2dAnimation
import (
"code.google.com/p/draw2d/draw2d"
)
const (
TopMargin int = iota
BottomMargin
LeftMargin
RightMargin
)
type TextWithFrame struct {
*ComposedFigure
Margins []float64
}
// Constructor accepting initialized base class and creating text with rectangular frame and equal margins for all sides.
func NewTextWithFrame(fontData draw2d.FontData, fontSize float64, text string, margin float64,
base *ComposedFigure) *TextWithFrame {
return NewTextWithFrameCustomMargins(fontData, fontSize, text, []float64{margin, margin, margin, margin}, base)
}
// Constructor accepting initialized base class and creating text with rectangular frame and custom margin for each sides.
func NewTextWithFrameCustomMargins(fontData draw2d.FontData, fontSize float64, text string, margins []float64,
base *ComposedFigure) *TextWithFrame {
return newTextWithFrame(fontData, fontSize, text, margins, false, 0.0, base)
}
// Constructor accepting initialized base class and radius and creating text with rectangular frame with round edges and equal margins for all sides.
func NewTextWithRoundFrame(fontData draw2d.FontData, fontSize float64, text string, margin float64, radius float64,
base *ComposedFigure) *TextWithFrame {
return NewTextWithRoundFrameCustomMargins(fontData, fontSize, text, []float64{margin, margin, margin, margin}, radius, base)
}
// Constructor accepting initialized base class and radius and creating text with rectangular frame with round edges and custom margin for each sides.
func NewTextWithRoundFrameCustomMargins(fontData draw2d.FontData, fontSize float64, text string, margins []float64, radius float64,
base *ComposedFigure) *TextWithFrame {
return newTextWithFrame(fontData, fontSize, text, margins, true, radius, base)
}
// Called by constructors to set initial state of the figure.
func newTextWithFrame(
fontData draw2d.FontData, fontSize float64, text string, margins []float64, roundedFrame bool, radius float64,
base *ComposedFigure) *TextWithFrame {
textWithFrame := &TextWithFrame{base, margins}
textFigure := NewText6(fontData, fontSize, text, 1, Point{margins[LeftMargin], margins[TopMargin]}, 0.0)
textWithFrame.AddFigure("text", textFigure)
graphicContext := GetTheImageGraphicContext()
graphicContext.SetFontSize(fontSize)
graphicContext.SetFontData(fontData)
left, top, right, bottom := graphicContext.GetStringBounds(text)
width := right - left + margins[LeftMargin] + margins[RightMargin]
height:= bottom - top + margins[TopMargin] + margins[BottomMargin]
var frame Rectangler
if roundedFrame {
frame = NewRoundRectangle(radius, width, height, textWithFrame.GetLineWidth())
} else {
frame = NewRectangle(width, height, base.GetLineWidth())
}
textWithFrame.AddFigure("frame", frame)
return textWithFrame
} | draw2dAnimation/textWithFrame.go | 0.812347 | 0.434461 | textWithFrame.go | starcoder |
package errors
import (
"errors"
"fmt"
"github.com/pandulaDW/go-frames/base"
)
// CustomError will return a custom error based on the message provided
func CustomError(msg string) error {
return errors.New(msg)
}
// CustomWithStandardError will return a custom error massage combined with a standard error message
func CustomWithStandardError(msg string, err error) error {
return errors.New(fmt.Sprintf("%s: \n%s", msg, err.Error()))
}
// ColumnNotFound returns an error to indicate the specified column is not found in the dataframe
func ColumnNotFound(column string) error {
return errors.New(fmt.Sprintf("%s column not found in the dataframe", column))
}
// DuplicatedColumn returns an error to indicate the specified column is already in the dataframe
func DuplicatedColumn(column string) error {
return errors.New(fmt.Sprintf("%s column is already in the dataframe", column))
}
// MismatchedNumOfColumns provides an error mentioning the mismatched number of columns
func MismatchedNumOfColumns(expected, actual int) error {
err := fmt.Sprintf("mismatched number of columns provided. requires %d columns, but %d was provided",
expected, actual)
return errors.New(err)
}
// MismatchedNumOfRows provides an error mentioning the mismatched number of columns
func MismatchedNumOfRows(expected, actual int) error {
err := fmt.Sprintf("mismatched number of rows provided. requires %d rows, but %d was provided",
expected, actual)
return errors.New(err)
}
// IncorrectDataType returns an error mentioning the expected type
func IncorrectDataType(dtype base.DType) error {
return errors.New(fmt.Sprintf("expected a %s type Series", dtype))
}
// IncorrectTypedParameter returns an error mentioning the expected type
func IncorrectTypedParameter(param, expected string) error {
return errors.New(fmt.Sprintf("expected %s to be of type %s", param, expected))
}
// InvalidRowValue returns an error if an incorrect value is found in a series at the given row number
func InvalidRowValue(rowNum int) error {
return errors.New(fmt.Sprintf("invalid value at row %d", rowNum))
}
//InvalidSeriesValError return an error based an error specifying the index and column name
func InvalidSeriesValError(val interface{}, i int, col string) error {
var errStr string
if val == "" {
errStr = fmt.Sprintf("blank value at row no %d on column %s", i, col)
} else {
errStr = fmt.Sprintf("invalid value at row no %d on column %s", i, col)
}
return errors.New(errStr)
}
// SeriesDataTypeNotPermitted return an error specifying whether the series data type permits the given operation
func SeriesDataTypeNotPermitted(operation string, dtype base.DType) error {
return fmt.Errorf("%s operation is not permitted for %s type series", operation, dtype)
} | errors/errors.go | 0.77193 | 0.469216 | errors.go | starcoder |
package wasm
import (
"context"
"errors"
"fmt"
"reflect"
"github.com/tetratelabs/wazero/api"
)
// FunctionKind identifies the type of function that can be called.
type FunctionKind byte
const (
// FunctionKindWasm is not a Go function: it is implemented in Wasm.
FunctionKindWasm FunctionKind = iota
// FunctionKindGoNoContext is a function implemented in Go, with a signature matching FunctionType.
FunctionKindGoNoContext
// FunctionKindGoContext is a function implemented in Go, with a signature matching FunctionType, except arg zero is
// a context.Context.
FunctionKindGoContext
// FunctionKindGoModule is a function implemented in Go, with a signature matching FunctionType, except arg
// zero is a Module.
FunctionKindGoModule
)
// Below are reflection code to get the interface type used to parse functions and set values.
var moduleType = reflect.TypeOf((*api.Module)(nil)).Elem()
var goContextType = reflect.TypeOf((*context.Context)(nil)).Elem()
var errorType = reflect.TypeOf((*error)(nil)).Elem()
// GetHostFunctionCallContextValue returns a reflect.Value for a context param[0], or nil if there isn't one.
func GetHostFunctionCallContextValue(fk FunctionKind, ctx *ModuleContext) *reflect.Value {
switch fk {
case FunctionKindGoNoContext: // no special param zero
case FunctionKindGoContext:
val := reflect.New(goContextType).Elem()
val.Set(reflect.ValueOf(ctx.Context()))
return &val
case FunctionKindGoModule:
val := reflect.New(moduleType).Elem()
val.Set(reflect.ValueOf(ctx))
return &val
}
return nil
}
// getFunctionType returns the function type corresponding to the function signature or errs if invalid.
func getFunctionType(fn *reflect.Value, allowErrorResult bool) (fk FunctionKind, ft *FunctionType, hasErrorResult bool, err error) {
p := fn.Type()
if fn.Kind() != reflect.Func {
err = fmt.Errorf("kind != func: %s", fn.Kind().String())
return
}
pOffset := 0
if fk = kind(p); fk != FunctionKindGoNoContext {
pOffset = 1
}
rCount := p.NumOut()
if (allowErrorResult && rCount > 2) || (!allowErrorResult && rCount > 1) {
err = errors.New("multiple results are unsupported")
return
}
if allowErrorResult && rCount > 0 {
maybeErrIdx := rCount - 1
if p.Out(maybeErrIdx).Implements(errorType) {
hasErrorResult = true
rCount--
}
}
ft = &FunctionType{Params: make([]ValueType, p.NumIn()-pOffset), Results: make([]ValueType, rCount)}
for i := 0; i < len(ft.Params); i++ {
pI := p.In(i + pOffset)
if t, ok := getTypeOf(pI.Kind()); ok {
ft.Params[i] = t
continue
}
// Now, we will definitely err, decide which message is best
var arg0Type reflect.Type
if hc := pI.Implements(moduleType); hc {
arg0Type = moduleType
} else if gc := pI.Implements(goContextType); gc {
arg0Type = goContextType
}
if arg0Type != nil {
err = fmt.Errorf("param[%d] is a %s, which may be defined only once as param[0]", i+pOffset, arg0Type)
} else {
err = fmt.Errorf("param[%d] is unsupported: %s", i+pOffset, pI.Kind())
}
return
}
if rCount == 0 {
return
}
result := p.Out(0)
if t, ok := getTypeOf(result.Kind()); ok {
ft.Results[0] = t
return
}
if result.Implements(errorType) {
err = errors.New("result[0] is an error, which is unsupported")
} else {
err = fmt.Errorf("result[0] is unsupported: %s", result.Kind())
}
return
}
func kind(p reflect.Type) FunctionKind {
pCount := p.NumIn()
if pCount > 0 && p.In(0).Kind() == reflect.Interface {
p0 := p.In(0)
if p0.Implements(moduleType) {
return FunctionKindGoModule
} else if p0.Implements(goContextType) {
return FunctionKindGoContext
}
}
return FunctionKindGoNoContext
}
func getTypeOf(kind reflect.Kind) (ValueType, bool) {
switch kind {
case reflect.Float64:
return ValueTypeF64, true
case reflect.Float32:
return ValueTypeF32, true
case reflect.Int32, reflect.Uint32:
return ValueTypeI32, true
case reflect.Int64, reflect.Uint64:
return ValueTypeI64, true
default:
return 0x00, false
}
} | vendor/github.com/tetratelabs/wazero/internal/wasm/gofunc.go | 0.550124 | 0.437042 | gofunc.go | starcoder |
package main
import (
"fmt"
"math"
"strconv"
"strings"
)
/**
--- Day 12: Rain Risk ---
Your ferry made decent progress toward the island, but the storm came in faster than anyone expected. The ferry needs to take evasive actions!
Unfortunately, the ship's navigation computer seems to be malfunctioning; rather than giving a route directly to safety, it produced extremely circuitous instructions. When the captain uses the PA system to ask if anyone can help, you quickly volunteer.
The navigation instructions (your puzzle input) consists of a sequence of single-character actions paired with integer input values. After staring at them for a few minutes, you work out what they probably mean:
Action N means to move north by the given value.
Action S means to move south by the given value.
Action E means to move east by the given value.
Action W means to move west by the given value.
Action L means to turn left the given number of degrees.
Action R means to turn right the given number of degrees.
Action F means to move forward by the given value in the direction the ship is currently facing.
The ship starts by facing east. Only the L and R actions change the direction the ship is facing. (That is, if the ship is facing east and the next instruction is N10, the ship would move north 10 units, but would still move east if the following action were F.)
For example:
F10
N3
F7
R90
F11
These instructions would be handled as follows:
F10 would move the ship 10 units east (because the ship starts by facing east) to east 10, north 0.
N3 would move the ship 3 units north to east 10, north 3.
F7 would move the ship another 7 units east (because the ship is still facing east) to east 17, north 3.
R90 would cause the ship to turn right by 90 degrees and face south; it remains at east 17, north 3.
F11 would move the ship 11 units south to east 17, south 8.
At the end of these instructions, the ship's Manhattan distance (sum of the absolute values of its east/west position and its north/south position) from its starting position is 17 + 8 = 25.
Figure out where the navigation instructions lead. What is the Manhattan distance between that location and the ship's starting position?
*/
func day12_part1() {
contents := getFilesContents("day12.input")
lines := strings.Split(contents, "\n")
// deg -W/E+ -N/S+
// (from top left) X Y
state := []int{90, 0, 0}
for _, cmd := range lines {
fmt.Println(state, cmd)
state = parse(cmd, state)
}
fmt.Println(state)
a := math.Abs(float64(state[1]))
b := math.Abs(float64(state[2]))
fmt.Println("manhattan distance", a, b, a+b)
}
func parse(cmd string, state []int) []int {
runes := []rune(cmd)
valint, _ := strconv.Atoi(cmd[1:])
if string(runes[0]) == "F" {
switch state[0] {
case 0:
runes[0] = []rune("N")[0]
case 90:
runes[0] = []rune("E")[0]
case 180:
runes[0] = []rune("S")[0]
case 270:
runes[0] = []rune("W")[0]
}
}
switch string(runes[0]) {
case "N":
state[2] -= valint
case "S":
state[2] += valint
case "E":
state[1] += valint
case "W":
state[1] -= valint
case "L":
state[0] -= valint
if state[0] < 0 {
state[0] += 360
}
case "R":
state[0] += valint
if state[0] >= 360 {
state[0] -= 360
}
}
return state
}
/**
--- Part Two ---
Before you can give the destination to the captain, you realize that the actual action meanings were printed on the back of the instructions the whole time.
Almost all of the actions indicate how to move a waypoint which is relative to the ship's position:
Action N means to move the waypoint north by the given value.
Action S means to move the waypoint south by the given value.
Action E means to move the waypoint east by the given value.
Action W means to move the waypoint west by the given value.
Action L means to rotate the waypoint around the ship left (counter-clockwise) the given number of degrees.
Action R means to rotate the waypoint around the ship right (clockwise) the given number of degrees.
Action F means to move forward to the waypoint a number of times equal to the given value.
The waypoint starts 10 units east and 1 unit north relative to the ship. The waypoint is relative to the ship; that is, if the ship moves, the waypoint moves with it.
For example, using the same instructions as above:
F10 moves the ship to the waypoint 10 times (a total of 100 units east and 10 units north), leaving the ship at east 100, north 10. The waypoint stays 10 units east and 1 unit north of the ship.
N3 moves the waypoint 3 units north to 10 units east and 4 units north of the ship. The ship remains at east 100, north 10.
F7 moves the ship to the waypoint 7 times (a total of 70 units east and 28 units north), leaving the ship at east 170, north 38. The waypoint stays 10 units east and 4 units north of the ship.
R90 rotates the waypoint around the ship clockwise 90 degrees, moving it to 4 units east and 10 units south of the ship. The ship remains at east 170, north 38.
F11 moves the ship to the waypoint 11 times (a total of 44 units east and 110 units south), leaving the ship at east 214, south 72. The waypoint stays 4 units east and 10 units south of the ship.
After these operations, the ship's Manhattan distance from its starting position is 214 + 72 = 286.
Figure out where the navigation instructions actually lead. What is the Manhattan distance between that location and the ship's starting position?
*/
func day12_part2() {
contents := getFilesContents("day12.input")
lines := strings.Split(contents, "\n")
// deg -W/E+ -N/S+
// (from top left) X Y
ship := []int{0, 0}
waypt := []int{10, -1}
for _, cmd := range lines {
fmt.Println(ship, waypt, cmd)
ship, waypt = parse2(cmd, ship, waypt)
}
fmt.Println(ship)
a := math.Abs(float64(ship[0]))
b := math.Abs(float64(ship[1]))
fmt.Println("manhattan distance", a, b, a+b)
}
func parse2(cmd string, ship []int, waypt []int) ([]int, []int) {
runes := []rune(cmd)
valint, _ := strconv.Atoi(cmd[1:])
if string(runes[0]) == "F" {
ship[0] += waypt[0] * valint
ship[1] += waypt[1] * valint
}
switch string(runes[0]) {
case "N":
waypt[1] -= valint
case "S":
waypt[1] += valint
case "E":
waypt[0] += valint
case "W":
waypt[0] -= valint
case "L":
cnt := valint / 90
for i := 1; i <= cnt; i++ {
waypt[0], waypt[1] = waypt[1]*1, waypt[0]*-1 // transpose waypoint left 90deg
}
case "R":
cnt := valint / 90
for i := 1; i <= cnt; i++ {
waypt[0], waypt[1] = waypt[1]*-1, waypt[0]*1 // transpose waypoint right 90deg
}
}
return ship, waypt
} | day12.go | 0.699357 | 0.664989 | day12.go | starcoder |
package limage
import (
"image"
"image/color"
"vimagination.zapto.org/limage/lcolor"
)
// GrayAlpha is an image of GrayAlpha pixels
type GrayAlpha struct {
Pix []lcolor.GrayAlpha
Stride int
Rect image.Rectangle
}
// NewGrayAlpha create a new GrayAlpha image with the given bounds
func NewGrayAlpha(r image.Rectangle) *GrayAlpha {
w, h := r.Dx(), r.Dy()
return &GrayAlpha{
Pix: make([]lcolor.GrayAlpha, w*h),
Stride: w,
Rect: r,
}
}
// At returns the color for the pixel at the specified coords
func (g *GrayAlpha) At(x, y int) color.Color {
return g.GrayAlphaAt(x, y)
}
// Bounds returns the limits of the image
func (g *GrayAlpha) Bounds() image.Rectangle {
return g.Rect
}
// ColorModel returns a color model to transform arbitrary colours into a
// GrayAlpha color
func (g *GrayAlpha) ColorModel() color.Model {
return lcolor.GrayAlphaModel
}
// GrayAlphaAt returns a GrayAlpha colr for the specified coords
func (g *GrayAlpha) GrayAlphaAt(x, y int) lcolor.GrayAlpha {
if !(image.Point{x, y}.In(g.Rect)) {
return lcolor.GrayAlpha{}
}
return g.Pix[g.PixOffset(x, y)]
}
// Opaque returns true if all pixels have full alpha
func (g *GrayAlpha) Opaque() bool {
for _, c := range g.Pix {
if c.A != 255 {
return false
}
}
return true
}
// PixOffset returns the index of the element of Pix corresponding to the given
// coords
func (g *GrayAlpha) PixOffset(x, y int) int {
return (y-g.Rect.Min.Y)*g.Stride + x - g.Rect.Min.X
}
// Set converts the given colour to a GrayAlpha colour and sets it at the given
// coords
func (g *GrayAlpha) Set(x, y int, c color.Color) {
if !(image.Point{x, y}.In(g.Rect)) {
return
}
g.Pix[g.PixOffset(x, y)] = lcolor.GrayAlphaModel.Convert(c).(lcolor.GrayAlpha)
}
// SetGrayAlpha sets the colour at the given coords
func (g *GrayAlpha) SetGrayAlpha(x, y int, ga lcolor.GrayAlpha) {
if !(image.Point{x, y}.In(g.Rect)) {
return
}
g.Pix[g.PixOffset(x, y)] = ga
}
// SubImage retuns the Image viewable through the given bounds
func (g *GrayAlpha) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(g.Rect)
if r.Empty() {
return &GrayAlpha{}
}
return &GrayAlpha{
Pix: g.Pix[g.PixOffset(r.Min.X, r.Min.Y):],
Stride: g.Stride,
Rect: r,
}
} | grayalpha.go | 0.863622 | 0.536677 | grayalpha.go | starcoder |
package reads
import (
"github.com/influxdata/influxdb/storage/reads/datatypes"
"github.com/influxdata/influxdb/tsdb/cursors"
)
func (w *ResponseWriter) getFloatPointsFrame() *datatypes.ReadResponse_Frame_FloatPoints {
var res *datatypes.ReadResponse_Frame_FloatPoints
if len(w.buffer.Float) > 0 {
i := len(w.buffer.Float) - 1
res = w.buffer.Float[i]
w.buffer.Float[i] = nil
w.buffer.Float = w.buffer.Float[:i]
} else {
res = &datatypes.ReadResponse_Frame_FloatPoints{
FloatPoints: &datatypes.ReadResponse_FloatPointsFrame{
Timestamps: make([]int64, 0, batchSize),
Values: make([]float64, 0, batchSize),
},
}
}
return res
}
func (w *ResponseWriter) putFloatPointsFrame(f *datatypes.ReadResponse_Frame_FloatPoints) {
f.FloatPoints.Timestamps = f.FloatPoints.Timestamps[:0]
f.FloatPoints.Values = f.FloatPoints.Values[:0]
w.buffer.Float = append(w.buffer.Float, f)
}
func (w *ResponseWriter) streamFloatArraySeries(cur cursors.FloatArrayCursor) {
w.sf.DataType = datatypes.DataTypeFloat
ss := len(w.res.Frames) - 1
a := cur.Next()
if len(a.Timestamps) == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) streamFloatArrayPoints(cur cursors.FloatArrayCursor) {
w.sf.DataType = datatypes.DataTypeFloat
ss := len(w.res.Frames) - 1
p := w.getFloatPointsFrame()
frame := p.FloatPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
var seriesValueCount = 0
for {
// If the number of values produced by cur > 1000,
// cur.Next() will produce batches of values that are of
// length ≤ 1000.
// We attempt to limit the frame Timestamps / Values lengths
// the same to avoid allocations. These frames are recycled
// after flushing so that on repeated use there should be enough space
// to append values from a into frame without additional allocations.
a := cur.Next()
if len(a.Timestamps) == 0 {
break
}
seriesValueCount += a.Len()
// As specified in the struct definition, w.sz is an estimated
// size (in bytes) of the buffered data. It is therefore a
// deliberate choice to accumulate using the array Size, which is
// cheap to calculate. Calling frame.Size() can be expensive
// when using varint encoding for numbers.
w.sz += a.Size()
frame.Timestamps = append(frame.Timestamps, a.Timestamps...)
frame.Values = append(frame.Values, a.Values...)
// given the expectation of cur.Next, we attempt to limit
// the number of values appended to the frame to batchSize (1000)
needsFrame := len(frame.Timestamps) >= batchSize
if w.sz >= writeSize {
needsFrame = true
w.Flush()
if w.err != nil {
break
}
}
if needsFrame {
// new frames are returned with Timestamps and Values preallocated
// to a minimum of batchSize length to reduce further allocations.
p = w.getFloatPointsFrame()
frame = p.FloatPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
}
}
w.vc += seriesValueCount
if seriesValueCount == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) getIntegerPointsFrame() *datatypes.ReadResponse_Frame_IntegerPoints {
var res *datatypes.ReadResponse_Frame_IntegerPoints
if len(w.buffer.Integer) > 0 {
i := len(w.buffer.Integer) - 1
res = w.buffer.Integer[i]
w.buffer.Integer[i] = nil
w.buffer.Integer = w.buffer.Integer[:i]
} else {
res = &datatypes.ReadResponse_Frame_IntegerPoints{
IntegerPoints: &datatypes.ReadResponse_IntegerPointsFrame{
Timestamps: make([]int64, 0, batchSize),
Values: make([]int64, 0, batchSize),
},
}
}
return res
}
func (w *ResponseWriter) putIntegerPointsFrame(f *datatypes.ReadResponse_Frame_IntegerPoints) {
f.IntegerPoints.Timestamps = f.IntegerPoints.Timestamps[:0]
f.IntegerPoints.Values = f.IntegerPoints.Values[:0]
w.buffer.Integer = append(w.buffer.Integer, f)
}
func (w *ResponseWriter) streamIntegerArraySeries(cur cursors.IntegerArrayCursor) {
w.sf.DataType = datatypes.DataTypeInteger
ss := len(w.res.Frames) - 1
a := cur.Next()
if len(a.Timestamps) == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) streamIntegerArrayPoints(cur cursors.IntegerArrayCursor) {
w.sf.DataType = datatypes.DataTypeInteger
ss := len(w.res.Frames) - 1
p := w.getIntegerPointsFrame()
frame := p.IntegerPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
var seriesValueCount = 0
for {
// If the number of values produced by cur > 1000,
// cur.Next() will produce batches of values that are of
// length ≤ 1000.
// We attempt to limit the frame Timestamps / Values lengths
// the same to avoid allocations. These frames are recycled
// after flushing so that on repeated use there should be enough space
// to append values from a into frame without additional allocations.
a := cur.Next()
if len(a.Timestamps) == 0 {
break
}
seriesValueCount += a.Len()
// As specified in the struct definition, w.sz is an estimated
// size (in bytes) of the buffered data. It is therefore a
// deliberate choice to accumulate using the array Size, which is
// cheap to calculate. Calling frame.Size() can be expensive
// when using varint encoding for numbers.
w.sz += a.Size()
frame.Timestamps = append(frame.Timestamps, a.Timestamps...)
frame.Values = append(frame.Values, a.Values...)
// given the expectation of cur.Next, we attempt to limit
// the number of values appended to the frame to batchSize (1000)
needsFrame := len(frame.Timestamps) >= batchSize
if w.sz >= writeSize {
needsFrame = true
w.Flush()
if w.err != nil {
break
}
}
if needsFrame {
// new frames are returned with Timestamps and Values preallocated
// to a minimum of batchSize length to reduce further allocations.
p = w.getIntegerPointsFrame()
frame = p.IntegerPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
}
}
w.vc += seriesValueCount
if seriesValueCount == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) getUnsignedPointsFrame() *datatypes.ReadResponse_Frame_UnsignedPoints {
var res *datatypes.ReadResponse_Frame_UnsignedPoints
if len(w.buffer.Unsigned) > 0 {
i := len(w.buffer.Unsigned) - 1
res = w.buffer.Unsigned[i]
w.buffer.Unsigned[i] = nil
w.buffer.Unsigned = w.buffer.Unsigned[:i]
} else {
res = &datatypes.ReadResponse_Frame_UnsignedPoints{
UnsignedPoints: &datatypes.ReadResponse_UnsignedPointsFrame{
Timestamps: make([]int64, 0, batchSize),
Values: make([]uint64, 0, batchSize),
},
}
}
return res
}
func (w *ResponseWriter) putUnsignedPointsFrame(f *datatypes.ReadResponse_Frame_UnsignedPoints) {
f.UnsignedPoints.Timestamps = f.UnsignedPoints.Timestamps[:0]
f.UnsignedPoints.Values = f.UnsignedPoints.Values[:0]
w.buffer.Unsigned = append(w.buffer.Unsigned, f)
}
func (w *ResponseWriter) streamUnsignedArraySeries(cur cursors.UnsignedArrayCursor) {
w.sf.DataType = datatypes.DataTypeUnsigned
ss := len(w.res.Frames) - 1
a := cur.Next()
if len(a.Timestamps) == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) streamUnsignedArrayPoints(cur cursors.UnsignedArrayCursor) {
w.sf.DataType = datatypes.DataTypeUnsigned
ss := len(w.res.Frames) - 1
p := w.getUnsignedPointsFrame()
frame := p.UnsignedPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
var seriesValueCount = 0
for {
// If the number of values produced by cur > 1000,
// cur.Next() will produce batches of values that are of
// length ≤ 1000.
// We attempt to limit the frame Timestamps / Values lengths
// the same to avoid allocations. These frames are recycled
// after flushing so that on repeated use there should be enough space
// to append values from a into frame without additional allocations.
a := cur.Next()
if len(a.Timestamps) == 0 {
break
}
seriesValueCount += a.Len()
// As specified in the struct definition, w.sz is an estimated
// size (in bytes) of the buffered data. It is therefore a
// deliberate choice to accumulate using the array Size, which is
// cheap to calculate. Calling frame.Size() can be expensive
// when using varint encoding for numbers.
w.sz += a.Size()
frame.Timestamps = append(frame.Timestamps, a.Timestamps...)
frame.Values = append(frame.Values, a.Values...)
// given the expectation of cur.Next, we attempt to limit
// the number of values appended to the frame to batchSize (1000)
needsFrame := len(frame.Timestamps) >= batchSize
if w.sz >= writeSize {
needsFrame = true
w.Flush()
if w.err != nil {
break
}
}
if needsFrame {
// new frames are returned with Timestamps and Values preallocated
// to a minimum of batchSize length to reduce further allocations.
p = w.getUnsignedPointsFrame()
frame = p.UnsignedPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
}
}
w.vc += seriesValueCount
if seriesValueCount == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) getStringPointsFrame() *datatypes.ReadResponse_Frame_StringPoints {
var res *datatypes.ReadResponse_Frame_StringPoints
if len(w.buffer.String) > 0 {
i := len(w.buffer.String) - 1
res = w.buffer.String[i]
w.buffer.String[i] = nil
w.buffer.String = w.buffer.String[:i]
} else {
res = &datatypes.ReadResponse_Frame_StringPoints{
StringPoints: &datatypes.ReadResponse_StringPointsFrame{
Timestamps: make([]int64, 0, batchSize),
Values: make([]string, 0, batchSize),
},
}
}
return res
}
func (w *ResponseWriter) putStringPointsFrame(f *datatypes.ReadResponse_Frame_StringPoints) {
f.StringPoints.Timestamps = f.StringPoints.Timestamps[:0]
f.StringPoints.Values = f.StringPoints.Values[:0]
w.buffer.String = append(w.buffer.String, f)
}
func (w *ResponseWriter) streamStringArraySeries(cur cursors.StringArrayCursor) {
w.sf.DataType = datatypes.DataTypeString
ss := len(w.res.Frames) - 1
a := cur.Next()
if len(a.Timestamps) == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) streamStringArrayPoints(cur cursors.StringArrayCursor) {
w.sf.DataType = datatypes.DataTypeString
ss := len(w.res.Frames) - 1
p := w.getStringPointsFrame()
frame := p.StringPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
var seriesValueCount = 0
for {
// If the number of values produced by cur > 1000,
// cur.Next() will produce batches of values that are of
// length ≤ 1000.
// We attempt to limit the frame Timestamps / Values lengths
// the same to avoid allocations. These frames are recycled
// after flushing so that on repeated use there should be enough space
// to append values from a into frame without additional allocations.
a := cur.Next()
if len(a.Timestamps) == 0 {
break
}
seriesValueCount += a.Len()
// As specified in the struct definition, w.sz is an estimated
// size (in bytes) of the buffered data. It is therefore a
// deliberate choice to accumulate using the array Size, which is
// cheap to calculate. Calling frame.Size() can be expensive
// when using varint encoding for numbers.
w.sz += a.Size()
frame.Timestamps = append(frame.Timestamps, a.Timestamps...)
frame.Values = append(frame.Values, a.Values...)
// given the expectation of cur.Next, we attempt to limit
// the number of values appended to the frame to batchSize (1000)
needsFrame := len(frame.Timestamps) >= batchSize
if w.sz >= writeSize {
needsFrame = true
w.Flush()
if w.err != nil {
break
}
}
if needsFrame {
// new frames are returned with Timestamps and Values preallocated
// to a minimum of batchSize length to reduce further allocations.
p = w.getStringPointsFrame()
frame = p.StringPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
}
}
w.vc += seriesValueCount
if seriesValueCount == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) getBooleanPointsFrame() *datatypes.ReadResponse_Frame_BooleanPoints {
var res *datatypes.ReadResponse_Frame_BooleanPoints
if len(w.buffer.Boolean) > 0 {
i := len(w.buffer.Boolean) - 1
res = w.buffer.Boolean[i]
w.buffer.Boolean[i] = nil
w.buffer.Boolean = w.buffer.Boolean[:i]
} else {
res = &datatypes.ReadResponse_Frame_BooleanPoints{
BooleanPoints: &datatypes.ReadResponse_BooleanPointsFrame{
Timestamps: make([]int64, 0, batchSize),
Values: make([]bool, 0, batchSize),
},
}
}
return res
}
func (w *ResponseWriter) putBooleanPointsFrame(f *datatypes.ReadResponse_Frame_BooleanPoints) {
f.BooleanPoints.Timestamps = f.BooleanPoints.Timestamps[:0]
f.BooleanPoints.Values = f.BooleanPoints.Values[:0]
w.buffer.Boolean = append(w.buffer.Boolean, f)
}
func (w *ResponseWriter) streamBooleanArraySeries(cur cursors.BooleanArrayCursor) {
w.sf.DataType = datatypes.DataTypeBoolean
ss := len(w.res.Frames) - 1
a := cur.Next()
if len(a.Timestamps) == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
}
func (w *ResponseWriter) streamBooleanArrayPoints(cur cursors.BooleanArrayCursor) {
w.sf.DataType = datatypes.DataTypeBoolean
ss := len(w.res.Frames) - 1
p := w.getBooleanPointsFrame()
frame := p.BooleanPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
var seriesValueCount = 0
for {
// If the number of values produced by cur > 1000,
// cur.Next() will produce batches of values that are of
// length ≤ 1000.
// We attempt to limit the frame Timestamps / Values lengths
// the same to avoid allocations. These frames are recycled
// after flushing so that on repeated use there should be enough space
// to append values from a into frame without additional allocations.
a := cur.Next()
if len(a.Timestamps) == 0 {
break
}
seriesValueCount += a.Len()
// As specified in the struct definition, w.sz is an estimated
// size (in bytes) of the buffered data. It is therefore a
// deliberate choice to accumulate using the array Size, which is
// cheap to calculate. Calling frame.Size() can be expensive
// when using varint encoding for numbers.
w.sz += a.Size()
frame.Timestamps = append(frame.Timestamps, a.Timestamps...)
frame.Values = append(frame.Values, a.Values...)
// given the expectation of cur.Next, we attempt to limit
// the number of values appended to the frame to batchSize (1000)
needsFrame := len(frame.Timestamps) >= batchSize
if w.sz >= writeSize {
needsFrame = true
w.Flush()
if w.err != nil {
break
}
}
if needsFrame {
// new frames are returned with Timestamps and Values preallocated
// to a minimum of batchSize length to reduce further allocations.
p = w.getBooleanPointsFrame()
frame = p.BooleanPoints
w.res.Frames = append(w.res.Frames, datatypes.ReadResponse_Frame{Data: p})
}
}
w.vc += seriesValueCount
if seriesValueCount == 0 {
w.sz -= w.sf.Size()
w.putSeriesFrame(w.res.Frames[ss].Data.(*datatypes.ReadResponse_Frame_Series))
w.res.Frames = w.res.Frames[:ss]
} else if w.sz > writeSize {
w.Flush()
}
} | storage/reads/response_writer.gen.go | 0.593256 | 0.455986 | response_writer.gen.go | starcoder |
package memstorage
import (
"sync"
"time"
)
// supposed to be slow
func nanoNow() uint64 {
return uint64(time.Now().UnixNano())
}
// tick returns the total number of times the interval has occurred between start and current
func tick(start, current uint64, interval time.Duration) uint64 {
return (current - start) / uint64(interval)
}
func availableTokens(lastTick, currentTick, max uint64, fillRate float64) uint64 {
delta := currentTick - lastTick
available := uint64(float64(delta) * fillRate)
if available > max {
return max
}
return available
}
// bucket is supposed to be internal usage only implementation of leaky bucket
type bucket struct {
// startTime is the number of nanoseconds from epoch when the bucket was created.
startTime uint64
// maxTokens is the maximum number of tokens available for this bucket at any time.
// The actual number og available tokens should never exceed this value.
maxTokens uint64
// interval is the time at which occurs tick
interval time.Duration
// fillRate is the number of tokens to add per nanosecond.
// Its calculated based on maxTokens and interval.
fillRate float64
// availableTokens is the number of current remaining tokens.
availableTokens uint64
// lastTick is the last clock tick which is used to recalculate the number
//of bucket tokens.
lastTick uint64
// lock is used to guard the mutable fields
lock sync.Mutex
}
func newBucket(tokens uint64, interval time.Duration) *bucket {
b := &bucket{
startTime: nanoNow(),
maxTokens: tokens,
interval: interval,
fillRate: float64(interval) / float64(tokens),
availableTokens: tokens,
}
return b
}
// get returns info about the bucket
func (b *bucket) get() (tokens uint64, remaining uint64, err error) {
b.lock.Lock()
defer b.lock.Unlock()
tokens = b.maxTokens
remaining = b.availableTokens
return
}
func (b *bucket) take() (tokens uint64, remaining uint64, reset uint64, ok bool, err error) {
now := nanoNow()
currentTick := tick(b.startTime, now, b.interval)
tokens = b.maxTokens
reset = b.startTime + ((currentTick + 1) * uint64(b.interval))
b.lock.Lock()
if b.lastTick < currentTick {
b.availableTokens = availableTokens(b.lastTick, currentTick, b.maxTokens, b.fillRate)
b.lastTick = currentTick
}
if b.availableTokens > 0 {
b.availableTokens--
ok = true
remaining = b.availableTokens
}
b.lock.Unlock()
return
} | pkg/memstorage/bucket.go | 0.68215 | 0.410284 | bucket.go | starcoder |
package keeper
import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/gravity-devs/liquidity/x/liquidity/types"
)
// RegisterInvariants registers all liquidity invariants.
func RegisterInvariants(ir sdk.InvariantRegistry, k Keeper) {
ir.RegisterRoute(types.ModuleName, "escrow-amount",
LiquidityPoolsEscrowAmountInvariant(k))
}
// AllInvariants runs all invariants of the liquidity module.
func AllInvariants(k Keeper) sdk.Invariant {
return func(ctx sdk.Context) (string, bool) {
res, stop := LiquidityPoolsEscrowAmountInvariant(k)(ctx)
return res, stop
}
}
// LiquidityPoolsEscrowAmountInvariant checks that outstanding unwithdrawn fees are never negative.
func LiquidityPoolsEscrowAmountInvariant(k Keeper) sdk.Invariant {
return func(ctx sdk.Context) (string, bool) {
remainingCoins := sdk.NewCoins()
batches := k.GetAllPoolBatches(ctx)
for _, batch := range batches {
swapMsgs := k.GetAllPoolBatchSwapMsgStatesNotToBeDeleted(ctx, batch)
for _, msg := range swapMsgs {
remainingCoins = remainingCoins.Add(msg.RemainingOfferCoin)
}
depositMsgs := k.GetAllPoolBatchDepositMsgStatesNotToBeDeleted(ctx, batch)
for _, msg := range depositMsgs {
remainingCoins = remainingCoins.Add(msg.Msg.DepositCoins...)
}
withdrawMsgs := k.GetAllPoolBatchWithdrawMsgStatesNotToBeDeleted(ctx, batch)
for _, msg := range withdrawMsgs {
remainingCoins = remainingCoins.Add(msg.Msg.PoolCoin)
}
}
batchEscrowAcc := k.accountKeeper.GetModuleAddress(types.ModuleName)
escrowAmt := k.bankKeeper.GetAllBalances(ctx, batchEscrowAcc)
broken := !escrowAmt.IsAllGTE(remainingCoins)
return sdk.FormatInvariant(types.ModuleName, "batch escrow amount invariant broken",
"batch escrow amount LT batch remaining amount"), broken
}
}
// These invariants cannot be registered via RegisterInvariants since the module uses per-block batch execution.
// We should approach adding these invariant checks inside actual logics of deposit / withdraw / swap.
var (
BatchLogicInvariantCheckFlag = false // It is only used at the development stage, and is disabled at the product level.
// For coin amounts less than coinAmountThreshold, a high errorRate does not mean
// that the calculation logic has errors.
// For example, if there were two X coins and three Y coins in the pool, and someone deposits
// one X coin and one Y coin, it's an acceptable input.
// But pool price would change from 2/3 to 3/4 so errorRate will report 1/8(=0.125),
// meaning that the price has changed by 12.5%.
// This happens with small coin amounts, so there should be a threshold for coin amounts
// before we calculate the errorRate.
errorRateThreshold = sdk.NewDecWithPrec(5, 2) // 5%
coinAmountThreshold = sdk.NewInt(20) // If a decimal error occurs at a value less than 20, the error rate is over 5%.
)
func errorRate(expected, actual sdk.Dec) sdk.Dec {
// To prevent divide-by-zero panics, return 1.0(=100%) as the error rate
// when the expected value is 0.
if expected.IsZero() {
return sdk.OneDec()
}
return actual.Sub(expected).Quo(expected).Abs()
}
// MintingPoolCoinsInvariant checks the correct ratio of minting amount of pool coins.
func MintingPoolCoinsInvariant(poolCoinTotalSupply, mintPoolCoin, depositCoinA, depositCoinB, lastReserveCoinA, lastReserveCoinB, refundedCoinA, refundedCoinB sdk.Int) {
if !refundedCoinA.IsZero() {
depositCoinA = depositCoinA.Sub(refundedCoinA)
}
if !refundedCoinB.IsZero() {
depositCoinB = depositCoinB.Sub(refundedCoinB)
}
poolCoinRatio := mintPoolCoin.ToDec().QuoInt(poolCoinTotalSupply)
depositCoinARatio := depositCoinA.ToDec().QuoInt(lastReserveCoinA)
depositCoinBRatio := depositCoinB.ToDec().QuoInt(lastReserveCoinB)
expectedMintPoolCoinAmtBasedA := depositCoinARatio.MulInt(poolCoinTotalSupply).TruncateInt()
expectedMintPoolCoinAmtBasedB := depositCoinBRatio.MulInt(poolCoinTotalSupply).TruncateInt()
// NewPoolCoinAmount / LastPoolCoinSupply == AfterRefundedDepositCoinA / LastReserveCoinA
// NewPoolCoinAmount / LastPoolCoinSupply == AfterRefundedDepositCoinA / LastReserveCoinB
if depositCoinA.GTE(coinAmountThreshold) && depositCoinB.GTE(coinAmountThreshold) &&
lastReserveCoinA.GTE(coinAmountThreshold) && lastReserveCoinB.GTE(coinAmountThreshold) &&
mintPoolCoin.GTE(coinAmountThreshold) && poolCoinTotalSupply.GTE(coinAmountThreshold) {
if errorRate(depositCoinARatio, poolCoinRatio).GT(errorRateThreshold) ||
errorRate(depositCoinBRatio, poolCoinRatio).GT(errorRateThreshold) {
panic("invariant check fails due to incorrect ratio of pool coins")
}
}
if mintPoolCoin.GTE(coinAmountThreshold) &&
(sdk.MaxInt(mintPoolCoin, expectedMintPoolCoinAmtBasedA).Sub(sdk.MinInt(mintPoolCoin, expectedMintPoolCoinAmtBasedA)).ToDec().QuoInt(mintPoolCoin).GT(errorRateThreshold) ||
sdk.MaxInt(mintPoolCoin, expectedMintPoolCoinAmtBasedB).Sub(sdk.MinInt(mintPoolCoin, expectedMintPoolCoinAmtBasedA)).ToDec().QuoInt(mintPoolCoin).GT(errorRateThreshold)) {
panic("invariant check fails due to incorrect amount of pool coins")
}
}
// DepositInvariant checks after deposit amounts.
func DepositInvariant(lastReserveCoinA, lastReserveCoinB, depositCoinA, depositCoinB, afterReserveCoinA, afterReserveCoinB, refundedCoinA, refundedCoinB sdk.Int) {
depositCoinA = depositCoinA.Sub(refundedCoinA)
depositCoinB = depositCoinB.Sub(refundedCoinB)
depositCoinRatio := depositCoinA.ToDec().Quo(depositCoinB.ToDec())
lastReserveRatio := lastReserveCoinA.ToDec().Quo(lastReserveCoinB.ToDec())
afterReserveRatio := afterReserveCoinA.ToDec().Quo(afterReserveCoinB.ToDec())
// AfterDepositReserveCoinA = LastReserveCoinA + AfterRefundedDepositCoinA
// AfterDepositReserveCoinB = LastReserveCoinB + AfterRefundedDepositCoinA
if !afterReserveCoinA.Equal(lastReserveCoinA.Add(depositCoinA)) ||
!afterReserveCoinB.Equal(lastReserveCoinB.Add(depositCoinB)) {
panic("invariant check fails due to incorrect deposit amounts")
}
if depositCoinA.GTE(coinAmountThreshold) && depositCoinB.GTE(coinAmountThreshold) &&
lastReserveCoinA.GTE(coinAmountThreshold) && lastReserveCoinB.GTE(coinAmountThreshold) {
// AfterRefundedDepositCoinA / AfterRefundedDepositCoinA = LastReserveCoinA / LastReserveCoinB
if errorRate(lastReserveRatio, depositCoinRatio).GT(errorRateThreshold) {
panic("invariant check fails due to incorrect deposit ratio")
}
// LastReserveCoinA / LastReserveCoinB = AfterDepositReserveCoinA / AfterDepositReserveCoinB
if errorRate(lastReserveRatio, afterReserveRatio).GT(errorRateThreshold) {
panic("invariant check fails due to incorrect pool price ratio")
}
}
}
// BurningPoolCoinsInvariant checks the correct burning amount of pool coins.
func BurningPoolCoinsInvariant(burnedPoolCoin, withdrawCoinA, withdrawCoinB, reserveCoinA, reserveCoinB, lastPoolCoinSupply sdk.Int, withdrawFeeCoins sdk.Coins) {
burningPoolCoinRatio := burnedPoolCoin.ToDec().Quo(lastPoolCoinSupply.ToDec())
if burningPoolCoinRatio.Equal(sdk.OneDec()) {
return
}
withdrawCoinARatio := withdrawCoinA.Add(withdrawFeeCoins[0].Amount).ToDec().Quo(reserveCoinA.ToDec())
withdrawCoinBRatio := withdrawCoinB.Add(withdrawFeeCoins[1].Amount).ToDec().Quo(reserveCoinB.ToDec())
// BurnedPoolCoinAmount / LastPoolCoinSupply >= (WithdrawCoinA+WithdrawFeeCoinA) / LastReserveCoinA
// BurnedPoolCoinAmount / LastPoolCoinSupply >= (WithdrawCoinB+WithdrawFeeCoinB) / LastReserveCoinB
if withdrawCoinARatio.GT(burningPoolCoinRatio) || withdrawCoinBRatio.GT(burningPoolCoinRatio) {
panic("invariant check fails due to incorrect ratio of burning pool coins")
}
expectedBurningPoolCoinBasedA := lastPoolCoinSupply.ToDec().MulTruncate(withdrawCoinARatio).TruncateInt()
expectedBurningPoolCoinBasedB := lastPoolCoinSupply.ToDec().MulTruncate(withdrawCoinBRatio).TruncateInt()
if burnedPoolCoin.GTE(coinAmountThreshold) &&
(sdk.MaxInt(burnedPoolCoin, expectedBurningPoolCoinBasedA).Sub(sdk.MinInt(burnedPoolCoin, expectedBurningPoolCoinBasedA)).ToDec().QuoInt(burnedPoolCoin).GT(errorRateThreshold) ||
sdk.MaxInt(burnedPoolCoin, expectedBurningPoolCoinBasedB).Sub(sdk.MinInt(burnedPoolCoin, expectedBurningPoolCoinBasedB)).ToDec().QuoInt(burnedPoolCoin).GT(errorRateThreshold)) {
panic("invariant check fails due to incorrect amount of burning pool coins")
}
}
// WithdrawReserveCoinsInvariant checks the after withdraw amounts.
func WithdrawReserveCoinsInvariant(withdrawCoinA, withdrawCoinB, reserveCoinA, reserveCoinB,
afterReserveCoinA, afterReserveCoinB, afterPoolCoinTotalSupply, lastPoolCoinSupply, burnedPoolCoin sdk.Int) {
// AfterWithdrawReserveCoinA = LastReserveCoinA - WithdrawCoinA
if !afterReserveCoinA.Equal(reserveCoinA.Sub(withdrawCoinA)) {
panic("invariant check fails due to incorrect withdraw coin A amount")
}
// AfterWithdrawReserveCoinB = LastReserveCoinB - WithdrawCoinB
if !afterReserveCoinB.Equal(reserveCoinB.Sub(withdrawCoinB)) {
panic("invariant check fails due to incorrect withdraw coin B amount")
}
// AfterWithdrawPoolCoinSupply = LastPoolCoinSupply - BurnedPoolCoinAmount
if !afterPoolCoinTotalSupply.Equal(lastPoolCoinSupply.Sub(burnedPoolCoin)) {
panic("invariant check fails due to incorrect total supply")
}
}
// WithdrawAmountInvariant checks the correct ratio of withdraw coin amounts.
func WithdrawAmountInvariant(withdrawCoinA, withdrawCoinB, reserveCoinA, reserveCoinB, burnedPoolCoin, poolCoinSupply sdk.Int, withdrawFeeRate sdk.Dec) {
ratio := burnedPoolCoin.ToDec().Quo(poolCoinSupply.ToDec()).Mul(sdk.OneDec().Sub(withdrawFeeRate))
idealWithdrawCoinA := reserveCoinA.ToDec().Mul(ratio)
idealWithdrawCoinB := reserveCoinB.ToDec().Mul(ratio)
diffA := idealWithdrawCoinA.Sub(withdrawCoinA.ToDec()).Abs()
diffB := idealWithdrawCoinB.Sub(withdrawCoinB.ToDec()).Abs()
if !burnedPoolCoin.Equal(poolCoinSupply) {
if diffA.GTE(sdk.OneDec()) {
panic(fmt.Sprintf("withdraw coin amount %v differs too much from %v", withdrawCoinA, idealWithdrawCoinA))
}
if diffB.GTE(sdk.OneDec()) {
panic(fmt.Sprintf("withdraw coin amount %v differs too much from %v", withdrawCoinB, idealWithdrawCoinB))
}
}
}
// ImmutablePoolPriceAfterWithdrawInvariant checks the immutable pool price after withdrawing coins.
func ImmutablePoolPriceAfterWithdrawInvariant(reserveCoinA, reserveCoinB, withdrawCoinA, withdrawCoinB, afterReserveCoinA, afterReserveCoinB sdk.Int) {
// TestReinitializePool tests a scenario where after reserve coins are zero
if !afterReserveCoinA.IsZero() && !afterReserveCoinB.IsZero() {
reserveCoinA = reserveCoinA.Sub(withdrawCoinA)
reserveCoinB = reserveCoinB.Sub(withdrawCoinB)
reserveCoinRatio := reserveCoinA.ToDec().Quo(reserveCoinB.ToDec())
afterReserveCoinRatio := afterReserveCoinA.ToDec().Quo(afterReserveCoinB.ToDec())
// LastReserveCoinA / LastReserveCoinB = AfterWithdrawReserveCoinA / AfterWithdrawReserveCoinB
if reserveCoinA.GTE(coinAmountThreshold) && reserveCoinB.GTE(coinAmountThreshold) &&
withdrawCoinA.GTE(coinAmountThreshold) && withdrawCoinB.GTE(coinAmountThreshold) &&
errorRate(reserveCoinRatio, afterReserveCoinRatio).GT(errorRateThreshold) {
panic("invariant check fails due to incorrect pool price ratio")
}
}
}
// SwapMatchingInvariants checks swap matching results of both X to Y and Y to X cases.
func SwapMatchingInvariants(xToY, yToX []*types.SwapMsgState, matchResultXtoY, matchResultYtoX []types.MatchResult) {
beforeMatchingXtoYLen := len(xToY)
beforeMatchingYtoXLen := len(yToX)
afterMatchingXtoYLen := len(matchResultXtoY)
afterMatchingYtoXLen := len(matchResultYtoX)
notMatchedXtoYLen := beforeMatchingXtoYLen - afterMatchingXtoYLen
notMatchedYtoXLen := beforeMatchingYtoXLen - afterMatchingYtoXLen
if notMatchedXtoYLen != types.CountNotMatchedMsgs(xToY) {
panic("invariant check fails due to invalid xToY match length")
}
if notMatchedYtoXLen != types.CountNotMatchedMsgs(yToX) {
panic("invariant check fails due to invalid yToX match length")
}
}
// SwapPriceInvariants checks swap price invariants.
func SwapPriceInvariants(matchResultXtoY, matchResultYtoX []types.MatchResult, poolXDelta, poolYDelta, poolXDelta2, poolYDelta2 sdk.Dec, result types.BatchResult) {
invariantCheckX := sdk.ZeroDec()
invariantCheckY := sdk.ZeroDec()
for _, m := range matchResultXtoY {
invariantCheckX = invariantCheckX.Sub(m.TransactedCoinAmt)
invariantCheckY = invariantCheckY.Add(m.ExchangedDemandCoinAmt)
}
for _, m := range matchResultYtoX {
invariantCheckY = invariantCheckY.Sub(m.TransactedCoinAmt)
invariantCheckX = invariantCheckX.Add(m.ExchangedDemandCoinAmt)
}
invariantCheckX = invariantCheckX.Add(poolXDelta2)
invariantCheckY = invariantCheckY.Add(poolYDelta2)
if !invariantCheckX.IsZero() && !invariantCheckY.IsZero() {
panic(fmt.Errorf("invariant check fails due to invalid swap price: %s", invariantCheckX.String()))
}
validitySwapPrice := types.CheckSwapPrice(matchResultXtoY, matchResultYtoX, result.SwapPrice)
if !validitySwapPrice {
panic("invariant check fails due to invalid swap price")
}
}
// SwapPriceDirectionInvariants checks whether the calculated swap price is increased, decreased, or stayed from the last pool price.
func SwapPriceDirectionInvariants(currentPoolPrice sdk.Dec, batchResult types.BatchResult) {
switch batchResult.PriceDirection {
case types.Increasing:
if !batchResult.SwapPrice.GT(currentPoolPrice) {
panic("invariant check fails due to incorrect price direction")
}
case types.Decreasing:
if !batchResult.SwapPrice.LT(currentPoolPrice) {
panic("invariant check fails due to incorrect price direction")
}
case types.Staying:
if !batchResult.SwapPrice.Equal(currentPoolPrice) {
panic("invariant check fails due to incorrect price direction")
}
}
}
// SwapMsgStatesInvariants checks swap match result states invariants.
func SwapMsgStatesInvariants(matchResultXtoY, matchResultYtoX []types.MatchResult, matchResultMap map[uint64]types.MatchResult,
swapMsgStates []*types.SwapMsgState, xToY, yToX []*types.SwapMsgState) {
if len(matchResultXtoY)+len(matchResultYtoX) != len(matchResultMap) {
panic("invalid length of match result")
}
for k, v := range matchResultMap {
if k != v.SwapMsgState.MsgIndex {
panic("broken map consistency")
}
}
for _, sms := range swapMsgStates {
for _, smsXtoY := range xToY {
if sms.MsgIndex == smsXtoY.MsgIndex {
if *(sms) != *(smsXtoY) || sms != smsXtoY {
panic("swap message state not matched")
} else {
break
}
}
}
for _, smsYtoX := range yToX {
if sms.MsgIndex == smsYtoX.MsgIndex {
if *(sms) != *(smsYtoX) || sms != smsYtoX {
panic("swap message state not matched")
} else {
break
}
}
}
if msgAfter, ok := matchResultMap[sms.MsgIndex]; ok {
if sms.MsgIndex == msgAfter.SwapMsgState.MsgIndex {
if *(sms) != *(msgAfter.SwapMsgState) || sms != msgAfter.SwapMsgState {
panic("batch message not matched")
}
} else {
panic("fail msg pointer consistency")
}
}
}
}
// SwapOrdersExecutionStateInvariants checks all executed orders have order price which is not "executable" or not "unexecutable".
func SwapOrdersExecutionStateInvariants(matchResultMap map[uint64]types.MatchResult, swapMsgStates []*types.SwapMsgState,
batchResult types.BatchResult, denomX string) {
for _, sms := range swapMsgStates {
if _, ok := matchResultMap[sms.MsgIndex]; ok {
if !sms.Executed || !sms.Succeeded {
panic("swap msg state consistency error, matched but not succeeded")
}
if sms.Msg.OfferCoin.Denom == denomX {
// buy orders having equal or higher order price than found swapPrice
if !sms.Msg.OrderPrice.GTE(batchResult.SwapPrice) {
panic("execution validity failed, executed but unexecutable")
}
} else {
// sell orders having equal or lower order price than found swapPrice
if !sms.Msg.OrderPrice.LTE(batchResult.SwapPrice) {
panic("execution validity failed, executed but unexecutable")
}
}
} else {
// check whether every unexecuted orders have order price which is not "executable"
if sms.Executed && sms.Succeeded {
panic("sms consistency error, not matched but succeeded")
}
if sms.Msg.OfferCoin.Denom == denomX {
// buy orders having equal or lower order price than found swapPrice
if !sms.Msg.OrderPrice.LTE(batchResult.SwapPrice) {
panic("execution validity failed, unexecuted but executable")
}
} else {
// sell orders having equal or higher order price than found swapPrice
if !sms.Msg.OrderPrice.GTE(batchResult.SwapPrice) {
panic("execution validity failed, unexecuted but executable")
}
}
}
}
} | x/liquidity/keeper/invariants.go | 0.718594 | 0.437403 | invariants.go | starcoder |
package tokens
import "fmt"
// Claims represents token claims.
type Claims map[string]interface{}
func (c Claims) getFloat64Claim(claim string) (float64, bool) {
if value, ok := c[claim]; ok {
switch tvalue := value.(type) {
case float32:
return float64(tvalue), true
case uint:
return float64(tvalue), true
case uint32:
return float64(tvalue), true
case uint64:
return float64(tvalue), true
case int:
return float64(tvalue), true
case int32:
return float64(tvalue), true
case int64:
return float64(tvalue), true
case float64:
return tvalue, true
default:
return 0, false
}
}
return 0, false
}
func (c Claims) getInt64Claim(claim string) (int64, bool) {
if value, ok := c[claim]; ok {
switch tvalue := value.(type) {
case float32:
return int64(tvalue), true
case float64:
return int64(tvalue), true
case uint:
return int64(tvalue), true
case uint32:
return int64(tvalue), true
case uint64:
return int64(tvalue), true
case int:
return int64(tvalue), true
case int32:
return int64(tvalue), true
case int64:
return tvalue, true
default:
return 0, false
}
}
return 0, false
}
func (c Claims) getBoolClaim(claim string) (bool, bool) {
if value, ok := c[claim]; ok {
switch tvalue := value.(type) {
case bool:
return tvalue, true
default:
return false, false
}
}
return false, false
}
func (c Claims) HasClaim(claim string) bool {
_, ok := c[claim]
return ok
}
func (c Claims) GetClaim(claim string) (interface{}, bool) {
value, ok := c[claim]
return value, ok
}
func (c Claims) GetClaimMustString(claim string) (string, bool) {
if value, ok := c[claim]; ok {
switch tvalue := value.(type) {
case string:
return tvalue, ok
default:
return fmt.Sprintf("%v", tvalue), ok
}
}
return "", false
}
// TokenType is a token type.
type TokenType string
const (
// BearerTokenType is a bearer token type.
BearerTokenType TokenType = "Bearer"
// RefreshTokenType is a refresh token type.
RefreshTokenType TokenType = "Refresh"
)
func getTokenParts(parts []string) (string, string, string) {
return parts[0], parts[1], parts[2]
}
type defaultToken struct {
claims Claims
}
func (at *defaultToken) RawClaims() Claims {
return at.claims
} | tokens/shared.go | 0.658637 | 0.523055 | shared.go | starcoder |
package jp
// X creates an empty Expr.
func X() Expr {
return Expr{}
}
// A creates an Expr with a At (@) fragment.
func A() Expr {
return Expr{At('@')}
}
// B creates an Expr with a Bracket fragment.
func B() Expr {
return Expr{Bracket(' ')}
}
// C creates an Expr with a Child fragment.
func C(key string) Expr {
return Expr{Child(key)}
}
// D creates an Expr with a recursive Descent fragment.
func D() Expr {
return Expr{Descent('.')}
}
// F creates an Expr with a Filter fragment.
func F(e *Equation) Expr {
return Expr{e.Filter()}
}
// N creates an Expr with an Nth fragment.
func N(n int) Expr {
return Expr{Nth(n)}
}
// R creates an Expr with a Root fragment.
func R() Expr {
return Expr{Root('$')}
}
// S creates an Expr with a Slice fragment.
func S(start int, rest ...int) Expr {
return Expr{Slice(append([]int{start}, rest...))}
}
// U creates an Expr with an Union fragment.
func U(keys ...interface{}) Expr {
return Expr{NewUnion(keys...)}
}
// W creates an Expr with a Wildcard fragment.
func W() Expr {
return Expr{Wildcard('*')}
}
// A appends an At fragment to the Expr.
func (x Expr) A() Expr {
return append(x, At('@'))
}
// At appends an At fragment to the Expr.
func (x Expr) At() Expr {
return append(x, At('@'))
}
// B appends a Bracket fragment to the Expr.
func (x Expr) B() Expr {
return append(x, Bracket(' '))
}
// C appends a Child fragment to the Expr.
func (x Expr) C(key string) Expr {
return append(x, Child(key))
}
// Child appends a Child fragment to the Expr.
func (x Expr) Child(key string) Expr {
return append(x, Child(key))
}
// D appends a recursive Descent fragment to the Expr.
func (x Expr) D() Expr {
return append(x, Descent('.'))
}
// Descent appends a recursive Descent fragment to the Expr.
func (x Expr) Descent() Expr {
return append(x, Descent('.'))
}
// F appends a Filter fragment to the Expr.
func (x Expr) F(e *Equation) Expr {
return append(x, e.Filter())
}
// Filter appends a Filter fragment to the Expr.
func (x Expr) Filter(e *Equation) Expr {
return append(x, e.Filter())
}
// N appends an Nth fragment to the Expr.
func (x Expr) N(n int) Expr {
return append(x, Nth(n))
}
// Nth appends an Nth fragment to the Expr.
func (x Expr) Nth(n int) Expr {
return append(x, Nth(n))
}
// R appends a Root fragment to the Expr.
func (x Expr) R() Expr {
return append(x, Root('$'))
}
// Root appends a Root fragment to the Expr.
func (x Expr) Root() Expr {
return append(x, Root('$'))
}
// S appends a Slice fragment to the Expr.
func (x Expr) S(start int, rest ...int) Expr {
return append(x, Slice(append([]int{start}, rest...)))
}
// Slice appends a Slice fragment to the Expr.
func (x Expr) Slice(start int, rest ...int) Expr {
return append(x, Slice(append([]int{start}, rest...)))
}
// U appends a Union fragment to the Expr.
func (x Expr) U(keys ...interface{}) Expr {
return append(x, NewUnion(keys...))
}
// Union appends a Union fragment to the Expr.
func (x Expr) Union(keys ...interface{}) Expr {
return append(x, NewUnion(keys...))
}
// W appends a Wildcard fragment to the Expr.
func (x Expr) W() Expr {
return append(x, Wildcard('*'))
}
// Wildcard appends a Wildcard fragment to the Expr.
func (x Expr) Wildcard() Expr {
return append(x, Wildcard('*'))
} | jp/build.go | 0.829699 | 0.467453 | build.go | starcoder |
package options
// UpdateOptions represents all possible options to the UpdateOne() and UpdateMany() functions.
type UpdateOptions struct {
ArrayFilters *ArrayFilters // A set of filters specifying to which array elements an update should apply
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
Collation *Collation // Specifies a collation
Upsert *bool // When true, creates a new document if no document matches the query
}
// Update returns a pointer to a new UpdateOptions
func Update() *UpdateOptions {
return &UpdateOptions{}
}
// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply
// Valid for server versions >= 3.6.
func (uo *UpdateOptions) SetArrayFilters(af ArrayFilters) *UpdateOptions {
uo.ArrayFilters = &af
return uo
}
// SetBypassDocumentValidation allows the write to opt-out of document level validation.
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
func (uo *UpdateOptions) SetBypassDocumentValidation(b bool) *UpdateOptions {
uo.BypassDocumentValidation = &b
return uo
}
// SetCollation specifies a collation.
// Valid for server versions >= 3.4.
func (uo *UpdateOptions) SetCollation(c *Collation) *UpdateOptions {
uo.Collation = c
return uo
}
// SetUpsert allows the creation of a new document if not document matches the query
func (uo *UpdateOptions) SetUpsert(b bool) *UpdateOptions {
uo.Upsert = &b
return uo
}
// MergeUpdateOptions combines the argued UpdateOptions into a single UpdateOptions in a last-one-wins fashion
func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions {
uOpts := Update()
for _, uo := range opts {
if uo == nil {
continue
}
if uo.ArrayFilters != nil {
uOpts.ArrayFilters = uo.ArrayFilters
}
if uo.BypassDocumentValidation != nil {
uOpts.BypassDocumentValidation = uo.BypassDocumentValidation
}
if uo.Collation != nil {
uOpts.Collation = uo.Collation
}
if uo.Upsert != nil {
uOpts.Upsert = uo.Upsert
}
}
return uOpts
} | vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go | 0.816991 | 0.403214 | updateoptions.go | starcoder |
package msgraph
// RatingUnitedStatesMoviesType undocumented
type RatingUnitedStatesMoviesType string
const (
// RatingUnitedStatesMoviesTypeVAllAllowed undocumented
RatingUnitedStatesMoviesTypeVAllAllowed RatingUnitedStatesMoviesType = "AllAllowed"
// RatingUnitedStatesMoviesTypeVAllBlocked undocumented
RatingUnitedStatesMoviesTypeVAllBlocked RatingUnitedStatesMoviesType = "AllBlocked"
// RatingUnitedStatesMoviesTypeVGeneral undocumented
RatingUnitedStatesMoviesTypeVGeneral RatingUnitedStatesMoviesType = "General"
// RatingUnitedStatesMoviesTypeVParentalGuidance undocumented
RatingUnitedStatesMoviesTypeVParentalGuidance RatingUnitedStatesMoviesType = "ParentalGuidance"
// RatingUnitedStatesMoviesTypeVParentalGuidance13 undocumented
RatingUnitedStatesMoviesTypeVParentalGuidance13 RatingUnitedStatesMoviesType = "ParentalGuidance13"
// RatingUnitedStatesMoviesTypeVRestricted undocumented
RatingUnitedStatesMoviesTypeVRestricted RatingUnitedStatesMoviesType = "Restricted"
// RatingUnitedStatesMoviesTypeVAdults undocumented
RatingUnitedStatesMoviesTypeVAdults RatingUnitedStatesMoviesType = "Adults"
)
// RatingUnitedStatesMoviesTypePAllAllowed returns a pointer to RatingUnitedStatesMoviesTypeVAllAllowed
func RatingUnitedStatesMoviesTypePAllAllowed() *RatingUnitedStatesMoviesType {
v := RatingUnitedStatesMoviesTypeVAllAllowed
return &v
}
// RatingUnitedStatesMoviesTypePAllBlocked returns a pointer to RatingUnitedStatesMoviesTypeVAllBlocked
func RatingUnitedStatesMoviesTypePAllBlocked() *RatingUnitedStatesMoviesType {
v := RatingUnitedStatesMoviesTypeVAllBlocked
return &v
}
// RatingUnitedStatesMoviesTypePGeneral returns a pointer to RatingUnitedStatesMoviesTypeVGeneral
func RatingUnitedStatesMoviesTypePGeneral() *RatingUnitedStatesMoviesType {
v := RatingUnitedStatesMoviesTypeVGeneral
return &v
}
// RatingUnitedStatesMoviesTypePParentalGuidance returns a pointer to RatingUnitedStatesMoviesTypeVParentalGuidance
func RatingUnitedStatesMoviesTypePParentalGuidance() *RatingUnitedStatesMoviesType {
v := RatingUnitedStatesMoviesTypeVParentalGuidance
return &v
}
// RatingUnitedStatesMoviesTypePParentalGuidance13 returns a pointer to RatingUnitedStatesMoviesTypeVParentalGuidance13
func RatingUnitedStatesMoviesTypePParentalGuidance13() *RatingUnitedStatesMoviesType {
v := RatingUnitedStatesMoviesTypeVParentalGuidance13
return &v
}
// RatingUnitedStatesMoviesTypePRestricted returns a pointer to RatingUnitedStatesMoviesTypeVRestricted
func RatingUnitedStatesMoviesTypePRestricted() *RatingUnitedStatesMoviesType {
v := RatingUnitedStatesMoviesTypeVRestricted
return &v
}
// RatingUnitedStatesMoviesTypePAdults returns a pointer to RatingUnitedStatesMoviesTypeVAdults
func RatingUnitedStatesMoviesTypePAdults() *RatingUnitedStatesMoviesType {
v := RatingUnitedStatesMoviesTypeVAdults
return &v
} | v1.0/RatingUnitedStatesMoviesTypeEnum.go | 0.601359 | 0.478894 | RatingUnitedStatesMoviesTypeEnum.go | starcoder |
package otlptest
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.opentelemetry.io/otel/exporters/otlp"
commonpb "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen/common/v1"
"go.opentelemetry.io/otel/label"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
// RunEndToEndTest can be used by protocol driver tests to validate
// themselves.
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlp.Exporter, mcTraces, mcMetrics Collector) {
pOpts := []sdktrace.TracerProviderOption{
sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
sdktrace.WithBatcher(
exp,
// add following two options to ensure flush
sdktrace.WithBatchTimeout(5),
sdktrace.WithMaxExportBatchSize(10),
),
}
tp1 := sdktrace.NewTracerProvider(append(pOpts,
sdktrace.WithResource(resource.NewWithAttributes(
label.String("rk1", "rv11)"),
label.Int64("rk2", 5),
)))...)
tp2 := sdktrace.NewTracerProvider(append(pOpts,
sdktrace.WithResource(resource.NewWithAttributes(
label.String("rk1", "rv12)"),
label.Float32("rk3", 6.5),
)))...)
tr1 := tp1.Tracer("test-tracer1")
tr2 := tp2.Tracer("test-tracer2")
// Now create few spans
m := 4
for i := 0; i < m; i++ {
_, span := tr1.Start(ctx, "AlwaysSample")
span.SetAttributes(label.Int64("i", int64(i)))
span.End()
_, span = tr2.Start(ctx, "AlwaysSample")
span.SetAttributes(label.Int64("i", int64(i)))
span.End()
}
selector := simple.NewWithInexpensiveDistribution()
processor := processor.New(selector, exportmetric.StatelessExportKindSelector())
pusher := push.New(processor, exp)
pusher.Start()
meter := pusher.MeterProvider().Meter("test-meter")
labels := []label.KeyValue{label.Bool("test", true)}
type data struct {
iKind metric.InstrumentKind
nKind number.Kind
val int64
}
instruments := map[string]data{
"test-int64-counter": {metric.CounterInstrumentKind, number.Int64Kind, 1},
"test-float64-counter": {metric.CounterInstrumentKind, number.Float64Kind, 1},
"test-int64-valuerecorder": {metric.ValueRecorderInstrumentKind, number.Int64Kind, 2},
"test-float64-valuerecorder": {metric.ValueRecorderInstrumentKind, number.Float64Kind, 2},
"test-int64-valueobserver": {metric.ValueObserverInstrumentKind, number.Int64Kind, 3},
"test-float64-valueobserver": {metric.ValueObserverInstrumentKind, number.Float64Kind, 3},
}
for name, data := range instruments {
data := data
switch data.iKind {
case metric.CounterInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64Counter(name).Add(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64Counter(name).Add(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case metric.ValueRecorderInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64ValueRecorder(name).Record(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64ValueRecorder(name).Record(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case metric.ValueObserverInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64ValueObserver(name,
func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(data.val, labels...)
},
)
case number.Float64Kind:
callback := func(v float64) metric.Float64ObserverFunc {
return metric.Float64ObserverFunc(func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(v, labels...) })
}(float64(data.val))
metric.Must(meter).NewFloat64ValueObserver(name, callback)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
default:
assert.Failf(t, "unsupported metrics testing kind", data.iKind.String())
}
}
// Flush and close.
pusher.Stop()
func() {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := tp1.Shutdown(ctx); err != nil {
t.Fatalf("failed to shut down a tracer provider 1: %v", err)
}
if err := tp2.Shutdown(ctx); err != nil {
t.Fatalf("failed to shut down a tracer provider 2: %v", err)
}
}()
// Wait >2 cycles.
<-time.After(40 * time.Millisecond)
// Now shutdown the exporter
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
t.Fatalf("failed to stop the exporter: %v", err)
}
// Shutdown the collector too so that we can begin
// verification checks of expected data back.
_ = mcTraces.Stop()
_ = mcMetrics.Stop()
// Now verify that we only got two resources
rss := mcTraces.GetResourceSpans()
if got, want := len(rss), 2; got != want {
t.Fatalf("resource span count: got %d, want %d\n", got, want)
}
// Now verify spans and attributes for each resource span.
for _, rs := range rss {
if len(rs.InstrumentationLibrarySpans) == 0 {
t.Fatalf("zero Instrumentation Library Spans")
}
if got, want := len(rs.InstrumentationLibrarySpans[0].Spans), m; got != want {
t.Fatalf("span counts: got %d, want %d", got, want)
}
attrMap := map[int64]bool{}
for _, s := range rs.InstrumentationLibrarySpans[0].Spans {
if gotName, want := s.Name, "AlwaysSample"; gotName != want {
t.Fatalf("span name: got %s, want %s", gotName, want)
}
attrMap[s.Attributes[0].Value.Value.(*commonpb.AnyValue_IntValue).IntValue] = true
}
if got, want := len(attrMap), m; got != want {
t.Fatalf("span attribute unique values: got %d want %d", got, want)
}
for i := 0; i < m; i++ {
_, ok := attrMap[int64(i)]
if !ok {
t.Fatalf("span with attribute %d missing", i)
}
}
}
metrics := mcMetrics.GetMetrics()
assert.Len(t, metrics, len(instruments), "not enough metrics exported")
seen := make(map[string]struct{}, len(instruments))
for _, m := range metrics {
data, ok := instruments[m.Name]
if !ok {
assert.Failf(t, "unknown metrics", m.Name)
continue
}
seen[m.Name] = struct{}{}
switch data.iKind {
case metric.CounterInstrumentKind:
switch data.nKind {
case number.Int64Kind:
if dp := m.GetIntSum().DataPoints; assert.Len(t, dp, 1) {
assert.Equal(t, data.val, dp[0].Value, "invalid value for %q", m.Name)
}
case number.Float64Kind:
if dp := m.GetDoubleSum().DataPoints; assert.Len(t, dp, 1) {
assert.Equal(t, float64(data.val), dp[0].Value, "invalid value for %q", m.Name)
}
default:
assert.Failf(t, "invalid number kind", data.nKind.String())
}
case metric.ValueObserverInstrumentKind:
switch data.nKind {
case number.Int64Kind:
if dp := m.GetIntGauge().DataPoints; assert.Len(t, dp, 1) {
assert.Equal(t, data.val, dp[0].Value, "invalid value for %q", m.Name)
}
case number.Float64Kind:
if dp := m.GetDoubleGauge().DataPoints; assert.Len(t, dp, 1) {
assert.Equal(t, float64(data.val), dp[0].Value, "invalid value for %q", m.Name)
}
default:
assert.Failf(t, "invalid number kind", data.nKind.String())
}
case metric.ValueRecorderInstrumentKind:
switch data.nKind {
case number.Int64Kind:
assert.NotNil(t, m.GetIntHistogram())
if dp := m.GetIntHistogram().DataPoints; assert.Len(t, dp, 1) {
count := dp[0].Count
assert.Equal(t, uint64(1), count, "invalid count for %q", m.Name)
assert.Equal(t, int64(data.val*int64(count)), dp[0].Sum, "invalid sum for %q (value %d)", m.Name, data.val)
}
case number.Float64Kind:
assert.NotNil(t, m.GetDoubleHistogram())
if dp := m.GetDoubleHistogram().DataPoints; assert.Len(t, dp, 1) {
count := dp[0].Count
assert.Equal(t, uint64(1), count, "invalid count for %q", m.Name)
assert.Equal(t, float64(data.val*int64(count)), dp[0].Sum, "invalid sum for %q (value %d)", m.Name, data.val)
}
default:
assert.Failf(t, "invalid number kind", data.nKind.String())
}
default:
assert.Failf(t, "invalid metrics kind", data.iKind.String())
}
}
for i := range instruments {
if _, ok := seen[i]; !ok {
assert.Fail(t, fmt.Sprintf("no metric(s) exported for %q", i))
}
}
} | exporters/otlp/internal/otlptest/otlptest.go | 0.577138 | 0.432962 | otlptest.go | starcoder |
// Useful test functions for validating (mostly) string outputs match
// what is expected.
package assert
import (
"bytes"
"github.com/danos/mgmterror"
"github.com/danos/utils/exec"
"io"
"os"
"strings"
"testing"
)
func init() {
exec.NewExecError = func(path []string, err string) error {
return mgmterror.NewExecError(path, err)
}
}
type ExpectedError struct {
expected string
}
func NewExpectedError(expect string) *ExpectedError {
return &ExpectedError{expected: expect}
}
func (e *ExpectedError) Matches(t *testing.T, actual error) {
if actual == nil {
t.Fatalf("Unexpected success")
}
CheckStringDivergence(t, e.expected, actual.Error())
}
type ExpectedMessages struct {
expected []string
}
func NewExpectedMessages(expect ...string) *ExpectedMessages {
return &ExpectedMessages{expected: expect}
}
func (e *ExpectedMessages) ContainedIn(t *testing.T, actual string) {
if len(actual) == 0 {
t.Fatalf("No output in which to search for expected message(s).")
return
}
for _, exp := range e.expected {
if !strings.Contains(actual, exp) {
t.Fatalf("Actual output doesn't contain expected output:\n"+
"Exp:\n%s\nAct:\n%v\n", exp, actual)
}
}
}
func (e *ExpectedMessages) ContainedInOrderIn(t *testing.T, actual string) {
if len(actual) == 0 {
t.Fatalf("No output in which to search for expected message(s).")
return
}
actIx := 0
actualLines := strings.Split(actual, "\n")
for _, exp := range e.expected {
found := false
for ; actIx < len(actualLines); actIx++ {
if strings.Contains(actualLines[actIx], exp) {
found = true
break
}
}
if !found {
t.Fatalf(
"Actual output doesn't contain expected output in order:\n"+
"Exp:\n%s\nAct:\n%v\n", exp, actual)
}
}
}
func (e *ExpectedMessages) NotContainedIn(t *testing.T, actual string) {
if len(actual) == 0 {
t.Fatalf("No output in which to search for expected message(s).")
return
}
for _, exp := range e.expected {
if strings.Contains(actual, exp) {
t.Fatalf("Actual output contain unexpected output:\n"+
"NotExp:\n%s\nAct:\n%v\n", exp, actual)
}
}
}
// Check each expected message appears in at least one of the actual strings.
func (e *ExpectedMessages) ContainedInAny(t *testing.T, actual []string) {
if len(actual) == 0 {
t.Fatalf("No output in which to search for expected message(s).")
return
}
outerLoop:
for _, exp := range e.expected {
for _, act := range actual {
if strings.Contains(act, exp) {
continue outerLoop
}
}
t.Fatalf("Actual output doesn't contain expected output:\n"+
"Exp:\n%s\nAct:\n%v\n", exp, actual)
}
}
// Very useful when debugging outputs that don't match up.
func CheckStringDivergence(t *testing.T, expOut, actOut string) {
if expOut == actOut {
return
}
var expOutCopy = expOut
var act bytes.Buffer
var charsToDump = 10
var expCharsToDump = 10
var actCharsLeft, expCharsLeft int
for index, char := range actOut {
if len(expOutCopy) > 0 {
if char == rune(expOutCopy[0]) {
act.WriteByte(byte(char))
} else {
act.WriteString("###") // Mark point of divergence.
expCharsLeft = len(expOutCopy)
actCharsLeft = len(actOut) - index
if expCharsLeft < charsToDump {
expCharsToDump = expCharsLeft
}
if actCharsLeft < charsToDump {
charsToDump = actCharsLeft
}
act.WriteString(actOut[index : index+charsToDump])
break
}
} else {
t.Logf("Expected output terminates early.\n")
t.Fatalf("Exp:\n%s\nGot extra:\n%s\n",
expOut[:index], act.String()[index:])
}
expOutCopy = expOutCopy[1:]
}
// When expOut is longer than actOut, need to update the expCharsToDump
if len(expOutCopy) < charsToDump {
expCharsToDump = len(expOutCopy)
}
// Useful to print whole output first for reference (useful when debugging
// when you don't want to have to construct the expected output up front).
t.Logf("Actual output:\n%s\n--- ENDS ---\n", actOut)
// After that we then print up to the point of divergence so it's easy to
// work out what went wrong ...
t.Fatalf("Unexpected output.\nGot:\n%s\nExp at ###:\n'%s ...'\n",
act.String(), expOutCopy[:expCharsToDump])
}
type actionFn func() ([]*exec.Output, []error, bool)
// For some tests we need to capture stdout for validation with expected
// output. Code here is based on various similar code snippets found by
// googling StackOverflow and the like.
func RunTestAndCaptureStdout(
fn actionFn,
) (out []*exec.Output, retErr []error, result bool, debug string) {
// Save 'stdout' so we can restore later.
stdout := os.Stdout
r, w, err := os.Pipe()
if err != nil {
return nil, nil, false, ""
}
os.Stdout = w
outC := make(chan string)
// Set up go routine to collect stdout.
go func() {
var buf bytes.Buffer
_, err := io.Copy(&buf, r)
r.Close()
if err != nil {
return
}
outC <- buf.String()
}()
result = true
// Clean up in a deferred call so we can recover.
defer func() {
// Close pipe, restore stdout, get output.
w.Close()
os.Stdout = stdout
debug = <-outC
err := recover()
if err != nil {
panic(err)
}
}()
// Run our test
out, retErr, result = fn()
return out, retErr, result, debug
} | testutils/assert/assert.go | 0.655336 | 0.548976 | assert.go | starcoder |
package output
import (
"errors"
"fmt"
"github.com/Jeffail/benthos/v3/lib/broker"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/x/docs"
)
//------------------------------------------------------------------------------
var (
// ErrBrokerNoOutputs is returned when creating a Broker type with zero
// outputs.
ErrBrokerNoOutputs = errors.New("attempting to create broker output type with no outputs")
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeBroker] = TypeSpec{
brokerConstructor: NewBroker,
Summary: `
Allows you to route messages to multiple child outputs using a range of
brokering [patterns](#patterns).`,
Description: `
[Processors](/docs/components/processors/about) can be listed to apply across
individual outputs or all outputs:
` + "``` yaml" + `
output:
broker:
pattern: fan_out
outputs:
- foo:
foo_field_1: value1
- bar:
bar_field_1: value2
bar_field_2: value3
# Processors only applied to messages sent to bar.
processors:
- type: bar_processor
# Processors applied to messages sent to all brokered outputs.
processors:
- type: some_processor
` + "```" + ``,
Footnotes: `
## Patterns
The broker pattern determines the way in which messages are allocated and can be
chosen from the following:
### ` + "`fan_out`" + `
With the fan out pattern all outputs will be sent every message that passes
through Benthos in parallel.
If an output applies back pressure it will block all subsequent messages, and if
an output fails to send a message it will be retried continuously until
completion or service shut down.
### ` + "`fan_out_sequential`" + `
Similar to the fan out pattern except outputs are written to sequentially,
meaning an output is only written to once the preceding output has confirmed
receipt of the same message.
### ` + "`round_robin`" + `
With the round robin pattern each message will be assigned a single output
following their order. If an output applies back pressure it will block all
subsequent messages. If an output fails to send a message then the message will
be re-attempted with the next input, and so on.
### ` + "`greedy`" + `
The greedy pattern results in higher output throughput at the cost of
potentially disproportionate message allocations to those outputs. Each message
is sent to a single output, which is determined by allowing outputs to claim
messages as soon as they are able to process them. This results in certain
faster outputs potentially processing more messages at the cost of slower
outputs.`,
FieldSpecs: docs.FieldSpecs{
docs.FieldAdvanced("copies", "The number of copies of each configured output to spawn."),
docs.FieldCommon("pattern", "The brokering pattern to use.").HasOptions(
"fan_out", "fan_out_sequential", "round_robin", "greedy", "try",
),
docs.FieldCommon("outputs", "A list of child outputs to broker."),
batch.FieldSpec(),
},
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
nestedOutputs := conf.Broker.Outputs
outSlice := []interface{}{}
for _, output := range nestedOutputs {
sanOutput, err := SanitiseConfig(output)
if err != nil {
return nil, err
}
outSlice = append(outSlice, sanOutput)
}
batchSanit, err := batch.SanitisePolicyConfig(conf.Broker.Batching)
if err != nil {
return nil, err
}
return map[string]interface{}{
"copies": conf.Broker.Copies,
"pattern": conf.Broker.Pattern,
"outputs": outSlice,
"batching": batchSanit,
}, nil
},
}
}
//------------------------------------------------------------------------------
// BrokerConfig contains configuration fields for the Broker output type.
type BrokerConfig struct {
Copies int `json:"copies" yaml:"copies"`
Pattern string `json:"pattern" yaml:"pattern"`
Outputs brokerOutputList `json:"outputs" yaml:"outputs"`
Batching batch.PolicyConfig `json:"batching" yaml:"batching"`
}
// NewBrokerConfig creates a new BrokerConfig with default values.
func NewBrokerConfig() BrokerConfig {
batching := batch.NewPolicyConfig()
batching.Count = 1
return BrokerConfig{
Copies: 1,
Pattern: "fan_out",
Outputs: brokerOutputList{},
Batching: batching,
}
}
//------------------------------------------------------------------------------
// NewBroker creates a new Broker output type. Messages will be sent out to the
// list of outputs according to the chosen broker pattern.
func NewBroker(
conf Config,
mgr types.Manager,
log log.Modular,
stats metrics.Type,
pipelines ...types.PipelineConstructorFunc,
) (Type, error) {
outputConfs := conf.Broker.Outputs
lOutputs := len(outputConfs) * conf.Broker.Copies
if lOutputs <= 0 {
return nil, ErrBrokerNoOutputs
}
if lOutputs == 1 {
b, err := New(outputConfs[0], mgr, log, stats, pipelines...)
if err != nil {
return nil, err
}
if !conf.Broker.Batching.IsNoop() {
policy, err := batch.NewPolicy(conf.Broker.Batching, mgr, log.NewModule(".batching"), metrics.Namespaced(stats, "batching"))
if err != nil {
return nil, fmt.Errorf("failed to construct batch policy: %v", err)
}
b = NewBatcher(policy, b, log, stats)
}
return b, nil
}
outputs := make([]types.Output, lOutputs)
_, isThreaded := map[string]struct{}{
"round_robin": {},
"greedy": {},
}[conf.Broker.Pattern]
var err error
for j := 0; j < conf.Broker.Copies; j++ {
for i, oConf := range outputConfs {
ns := fmt.Sprintf("broker.outputs.%v", i)
var pipes []types.PipelineConstructorFunc
if isThreaded {
pipes = pipelines
}
outputs[j*len(outputConfs)+i], err = New(
oConf, mgr,
log.NewModule("."+ns),
metrics.Combine(stats, metrics.Namespaced(stats, ns)),
pipes...)
if err != nil {
return nil, fmt.Errorf("failed to create output '%v' type '%v': %v", i, oConf.Type, err)
}
}
}
var b Type
switch conf.Broker.Pattern {
case "fan_out":
b, err = broker.NewFanOut(outputs, log, stats)
case "fan_out_sequential":
b, err = broker.NewFanOutSequential(outputs, log, stats)
case "round_robin":
b, err = broker.NewRoundRobin(outputs, stats)
case "greedy":
b, err = broker.NewGreedy(outputs)
case "try":
b, err = broker.NewTry(outputs, stats)
default:
return nil, fmt.Errorf("broker pattern was not recognised: %v", conf.Broker.Pattern)
}
if err == nil && !isThreaded {
b, err = WrapWithPipelines(b, pipelines...)
}
if !conf.Broker.Batching.IsNoop() {
policy, err := batch.NewPolicy(conf.Broker.Batching, mgr, log.NewModule(".batching"), metrics.Namespaced(stats, "batching"))
if err != nil {
return nil, fmt.Errorf("failed to construct batch policy: %v", err)
}
b = NewBatcher(policy, b, log, stats)
}
return b, err
}
//------------------------------------------------------------------------------ | lib/output/broker.go | 0.751283 | 0.647325 | broker.go | starcoder |
// Package epochs implements time-based feeds using epochs as index
// and provide sequential as well as concurrent lookup algorithms
package epochs
import (
"encoding/binary"
"fmt"
"github.com/penguintop/penguin/pkg/crypto"
"github.com/penguintop/penguin/pkg/feeds"
)
const (
maxLevel = 32
)
var _ feeds.Index = (*epoch)(nil)
// epoch is referencing a slot in the epoch grid and represents an update
// it implements the feeds.Index interface
type epoch struct {
start uint64
level uint8
}
func (e *epoch) String() string {
return fmt.Sprintf("%d/%d", e.start, e.level)
}
// MarshalBinary implements the BinaryMarshaler interface
func (e *epoch) MarshalBinary() ([]byte, error) {
epochBytes := make([]byte, 8)
binary.BigEndian.PutUint64(epochBytes, e.start)
return crypto.LegacyKeccak256(append(epochBytes, e.level))
}
func next(e feeds.Index, last int64, at uint64) feeds.Index {
if e == nil {
return &epoch{0, maxLevel}
}
return e.Next(last, at)
}
// Next implements feeds.Index advancement
func (e *epoch) Next(last int64, at uint64) feeds.Index {
if e.start+e.length() > at {
return e.childAt(at)
}
return lca(int64(at), last).childAt(at)
}
// lca calculates the lowest common ancestor epoch given two unix times
func lca(at, after int64) *epoch {
if after == 0 {
return &epoch{0, maxLevel}
}
diff := uint64(at - after)
length := uint64(1)
var level uint8
for level < maxLevel && (length < diff || uint64(at)/length != uint64(after)/length) {
length <<= 1
level++
}
start := (uint64(after) / length) * length
return &epoch{start, level}
}
// parent returns the ancestor of an epoch
// the call is unsafe in that it must not be called on a toplevel epoch
func (e *epoch) parent() *epoch {
length := e.length() << 1
start := (e.start / length) * length
return &epoch{start, e.level + 1}
}
// left returns the left sister of an epoch
// it is unsafe in that it must not be called on a left sister epoch
func (e *epoch) left() *epoch {
return &epoch{e.start - e.length(), e.level}
}
// at returns the left of right child epoch of an epoch depending on where `at` falls
// it is unsafe in that it must not be called with an at that does not fall within the epoch
func (e *epoch) childAt(at uint64) *epoch {
e = &epoch{e.start, e.level - 1}
if at&e.length() > 0 {
e.start |= e.length()
}
return e
}
// isLeft returns true if epoch is a left sister of its parent
func (e *epoch) isLeft() bool {
return e.start&e.length() == 0
}
// length returns the span of the epoch
func (e *epoch) length() uint64 {
return 1 << e.level
} | pkg/feeds/epochs/epoch.go | 0.891321 | 0.402774 | epoch.go | starcoder |
package conf
// Float32Var defines a float32 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) Float32Var(p *float32, name string, value float32, usage string) {
c.env().Float32Var(p, name, value, usage)
c.flag().Float32Var(p, name, value, usage)
}
// Float32 defines a float32 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag and/or environment variable.
func (c *Configurator) Float32(name string, value float32, usage string) *float32 {
p := new(float32)
c.Float32Var(p, name, value, usage)
return p
}
// Float32VarE defines a float32 environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the environment variable.
func (c *Configurator) Float32VarE(p *float32, name string, value float32, usage string) {
c.env().Float32Var(p, name, value, usage)
}
// Float32E defines a float32 environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the environment variable.
func (c *Configurator) Float32E(name string, value float32, usage string) *float32 {
p := new(float32)
c.Float32VarE(p, name, value, usage)
return p
}
// Float32VarF defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func (c *Configurator) Float32VarF(p *float32, name string, value float32, usage string) {
c.flag().Float32Var(p, name, value, usage)
}
// Float32F defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func (c *Configurator) Float32F(name string, value float32, usage string) *float32 {
p := new(float32)
c.Float32VarF(p, name, value, usage)
return p
}
// Float32Var defines a float32 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag and/or environment variable.
func Float32Var(p *float32, name string, value float32, usage string) {
Global.Float32Var(p, name, value, usage)
}
// Float32 defines a float32 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag and/or environment variable.
func Float32(name string, value float32, usage string) *float32 {
return Global.Float32(name, value, usage)
}
// Float32VarE defines a float32 environment variable with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the environment variable.
func Float32VarE(p *float32, name string, value float32, usage string) {
Global.Float32VarE(p, name, value, usage)
}
// Float32E defines a float32 environment variable with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the environment variable.
func Float32E(name string, value float32, usage string) *float32 {
return Global.Float32E(name, value, usage)
}
// Float32VarF defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func Float32VarF(p *float32, name string, value float32, usage string) {
Global.Float32VarF(p, name, value, usage)
}
// Float32F defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func Float32F(name string, value float32, usage string) *float32 {
return Global.Float32F(name, value, usage)
} | value_float32.go | 0.891457 | 0.78691 | value_float32.go | starcoder |
// Package tkr contains functions for working with Tanzu Kubernetes Release information.
package tkr
import (
"fmt"
"os"
"gopkg.in/yaml.v3"
)
// ImagePackage represents information for an image.
type ImagePackage struct {
ImagePath string `yaml:"imagePath"`
Tag string `yaml:"tag"`
Repository string `yaml:"repository"`
}
// PackageData contains metadata about a package.
type PackageData struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Arch string `yaml:"arch"`
}
// PackageInfo contains information about a package.
type PackageInfo struct {
Category string `yaml:"category"`
ClusterTypes []string `yaml:"clusterTypes"`
PackageName string `yaml:"packageName"`
Repository string `yaml:"repository"`
}
type Bom struct {
APIVersion string `yaml:"apiVersion"`
Release struct {
Version string `yaml:"version"`
} `yaml:"release"`
Components struct {
AkoOperator []struct {
Version string `yaml:"version"`
Images struct {
AkoOperatorImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"akoOperatorImage"`
} `yaml:"images"`
} `yaml:"ako-operator"`
Antrea []struct {
Version string `yaml:"version"`
Images struct {
AntreaImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"antreaImage"`
} `yaml:"images"`
} `yaml:"antrea"`
CalicoAll []struct {
Version string `yaml:"version"`
Images struct {
CalicoCniImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"calicoCniImage"`
CalicoKubecontrollerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"calicoKubecontrollerImage"`
CalicoNodeImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"calicoNodeImage"`
CalicoPodDaemonImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"calicoPodDaemonImage"`
} `yaml:"images"`
} `yaml:"calico_all"`
CloudProviderVsphere []struct {
Version string `yaml:"version"`
Images struct {
CcmControllerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"ccmControllerImage"`
} `yaml:"images"`
} `yaml:"cloud_provider_vsphere"`
CniPlugins []struct {
Version string `yaml:"version"`
} `yaml:"cni_plugins"`
Containerd []struct {
Version string `yaml:"version"`
} `yaml:"containerd"`
Coredns []struct {
Version string `yaml:"version"`
Images struct {
Coredns struct {
ImagePackage `yaml:",inline"`
} `yaml:"coredns"`
} `yaml:"images"`
} `yaml:"coredns"`
CriTools []struct {
Version string `yaml:"version"`
} `yaml:"cri_tools"`
CsiAttacher []struct {
Version string `yaml:"version"`
Images struct {
CsiAttacherImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"csiAttacherImage"`
} `yaml:"images"`
} `yaml:"csi_attacher"`
CsiLivenessprobe []struct {
Version string `yaml:"version"`
Images struct {
CsiLivenessProbeImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"csiLivenessProbeImage"`
} `yaml:"images"`
} `yaml:"csi_livenessprobe"`
CsiNodeDriverRegistrar []struct {
Version string `yaml:"version"`
Images struct {
CsiNodeDriverRegistrarImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"csiNodeDriverRegistrarImage"`
} `yaml:"images"`
} `yaml:"csi_node_driver_registrar"`
CsiProvisioner []struct {
Version string `yaml:"version"`
Images struct {
CsiProvisonerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"csiProvisonerImage"`
} `yaml:"images"`
} `yaml:"csi_provisioner"`
Dex []struct {
Version string `yaml:"version"`
Images struct {
DexImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"dexImage"`
} `yaml:"images"`
} `yaml:"dex"`
Etcd []struct {
Version string `yaml:"version"`
Images struct {
Etcd struct {
ImagePackage `yaml:",inline"`
} `yaml:"etcd"`
} `yaml:"images"`
} `yaml:"etcd"`
KappController []struct {
Version string `yaml:"version"`
Images struct {
KappControllerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"kappControllerImage"`
} `yaml:"images"`
} `yaml:"kapp-controller"`
Kubernetes []struct {
Version string `yaml:"version"`
Images struct {
KubeAPIServer struct {
ImagePackage `yaml:",inline"`
} `yaml:"kubeAPIServer"`
KubeControllerManager struct {
ImagePackage `yaml:",inline"`
} `yaml:"kubeControllerManager"`
KubeE2E struct {
ImagePackage `yaml:",inline"`
} `yaml:"kubeE2e"`
KubeProxy struct {
ImagePackage `yaml:",inline"`
} `yaml:"kubeProxy"`
KubeScheduler struct {
ImagePackage `yaml:",inline"`
} `yaml:"kubeScheduler"`
Pause struct {
ImagePackage `yaml:",inline"`
} `yaml:"pause"`
PauseWindows1809 struct {
ImagePackage `yaml:",inline"`
} `yaml:"pause_windows_1809"`
PauseWindows1903 struct {
ImagePackage `yaml:",inline"`
} `yaml:"pause_windows_1903"`
PauseWindows1909 struct {
ImagePackage `yaml:",inline"`
} `yaml:"pause_windows_1909"`
PauseWindows2004 struct {
ImagePackage `yaml:",inline"`
} `yaml:"pause_windows_2004"`
} `yaml:"images"`
} `yaml:"kubernetes"`
KubernetesCsiExternalResizer []struct {
Version string `yaml:"version"`
Images struct {
CsiExternalResizer struct {
ImagePackage `yaml:",inline"`
} `yaml:"csiExternalResizer"`
} `yaml:"images"`
} `yaml:"kubernetes-csi_external-resizer"`
KubernetesSigsKind []struct {
Version string `yaml:"version"`
Images struct {
KindNodeImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"kindNodeImage"`
} `yaml:"images"`
} `yaml:"kubernetes-sigs_kind"`
KubernetesSigsMinikube []struct {
Version string `yaml:"version"`
Images struct {
MinikubeNodeImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"minikubeNodeImage"`
} `yaml:"images"`
} `yaml:"kubernetes-sigs_minikube"`
LoadBalancerAndIngressService []struct {
Version string `yaml:"version"`
Images struct {
LoadBalancerAndIngressServiceImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"loadBalancerAndIngressServiceImage"`
} `yaml:"images"`
} `yaml:"load-balancer-and-ingress-service"`
MetricsServer []struct {
Version string `yaml:"version"`
Images struct {
MetricsServerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"metricsServerImage"`
} `yaml:"images"`
} `yaml:"metrics-server"`
Pinniped []struct {
Version string `yaml:"version"`
Images struct {
PinnipedImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"pinnipedImage"`
} `yaml:"images"`
} `yaml:"pinniped"`
TanzuFrameworkAddons []struct {
Version string `yaml:"version"`
Images struct {
TanzuAddonsManagerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"tanzuAddonsManagerImage"`
TkgPinnipedPostDeployImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"tkgPinnipedPostDeployImage"`
} `yaml:"images"`
} `yaml:"tanzu-framework-addons"`
TkgCorePackages []struct {
Version string `yaml:"version"`
Images struct {
AddonsManagerTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"addons-manager.tanzu.vmware.com"`
AkoOperatorTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"ako-operator.tanzu.vmware.com"`
AntreaTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"antrea.tanzu.vmware.com"`
CalicoTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"calico.tanzu.vmware.com"`
KappControllerTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"kapp-controller.tanzu.vmware.com"`
LoadBalancerAndIngressServiceTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"load-balancer-and-ingress-service.tanzu.vmware.com"`
MetricsServerTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"metrics-server.tanzu.vmware.com"`
PinnipedTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"pinniped.tanzu.vmware.com"`
TanzuCorePackageRepositoryImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"tanzuCorePackageRepositoryImage"`
TanzuUserPackageRepositoryImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"tanzuUserPackageRepositoryImage"`
VsphereCpiTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"vsphere-cpi.tanzu.vmware.com"`
VsphereCsiTanzuVmwareCom struct {
ImagePackage `yaml:",inline"`
} `yaml:"vsphere-csi.tanzu.vmware.com"`
} `yaml:"images"`
} `yaml:"tkg-core-packages"`
VsphereCsiDriver []struct {
Version string `yaml:"version"`
Images struct {
CsiControllerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"csiControllerImage"`
CsiMetaDataSyncerImage struct {
ImagePackage `yaml:",inline"`
} `yaml:"csiMetaDataSyncerImage"`
} `yaml:"images"`
} `yaml:"vsphere_csi_driver"`
} `yaml:"components"`
KubeadmConfigSpec struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
ImageRepository string `yaml:"imageRepository"`
KubernetesVersion string `yaml:"kubernetesVersion"`
Etcd struct {
Local struct {
DataDir string `yaml:"dataDir"`
ImageRepository string `yaml:"imageRepository"`
ImageTag string `yaml:"imageTag"`
} `yaml:"local"`
} `yaml:"etcd"`
DNS struct {
Type string `yaml:"type"`
ImageRepository string `yaml:"imageRepository"`
ImageTag string `yaml:"imageTag"`
} `yaml:"dns"`
} `yaml:"kubeadmConfigSpec"`
Ova []struct {
Name string `yaml:"name"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
Version string `yaml:"version"`
} `yaml:"ova"`
Ami struct {
ApNortheast1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"ap-northeast-1"`
ApNortheast2 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"ap-northeast-2"`
ApSouth1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"ap-south-1"`
ApSoutheast1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"ap-southeast-1"`
ApSoutheast2 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"ap-southeast-2"`
EuCentral1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"eu-central-1"`
EuWest1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"eu-west-1"`
EuWest2 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"eu-west-2"`
EuWest3 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"eu-west-3"`
SaEast1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"sa-east-1"`
UsEast1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"us-east-1"`
UsEast2 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"us-east-2"`
UsGovEast1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"us-gov-east-1"`
UsGovWest1 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"us-gov-west-1"`
UsWest2 []struct {
ID string `yaml:"id"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"us-west-2"`
} `yaml:"ami"`
Azure []struct {
Sku string `yaml:"sku"`
Publisher string `yaml:"publisher"`
Offer string `yaml:"offer"`
Version string `yaml:"version"`
ThirdPartyImage bool `yaml:"thirdPartyImage"`
Osinfo struct {
PackageData `yaml:",inline"`
} `yaml:"osinfo"`
} `yaml:"azure"`
ImageConfig struct {
ImageRepository string `yaml:"imageRepository"`
} `yaml:"imageConfig"`
Addons struct {
AkoOperator struct {
PackageInfo `yaml:",inline"`
} `yaml:"ako-operator"`
Antrea struct {
PackageInfo `yaml:",inline"`
} `yaml:"antrea"`
Calico struct {
PackageInfo `yaml:",inline"`
} `yaml:"calico"`
KappController struct {
PackageInfo `yaml:",inline"`
} `yaml:"kapp-controller"`
LoadBalancerAndIngressService struct {
PackageInfo `yaml:",inline"`
} `yaml:"load-balancer-and-ingress-service"`
MetricsServer struct {
PackageInfo `yaml:",inline"`
} `yaml:"metrics-server"`
Pinniped struct {
PackageInfo `yaml:",inline"`
} `yaml:"pinniped"`
TanzuAddonsManager struct {
PackageInfo `yaml:",inline"`
} `yaml:"tanzu-addons-manager"`
VsphereCpi struct {
PackageInfo `yaml:",inline"`
} `yaml:"vsphere-cpi"`
VsphereCsi struct {
PackageInfo `yaml:",inline"`
} `yaml:"vsphere-csi"`
} `yaml:"addons"`
}
func ReadTKRBom(filePath string) (*Bom, error) {
bom := &Bom{}
rawBom, err := os.ReadFile(filePath)
if err != nil {
return bom, err
}
err = yaml.Unmarshal(rawBom, bom)
if err != nil {
return bom, err
}
return bom, nil
}
func (tkr *Bom) getTKRRegistry() string {
return tkr.ImageConfig.ImageRepository
}
func (tkr *Bom) GetTKRNodeImage(provider string) string {
switch provider {
case "kind":
repo := tkr.getTKRNodeRepository()
path := tkr.Components.KubernetesSigsKind[0].Images.KindNodeImage.ImagePath
tag := tkr.Components.KubernetesSigsKind[0].Images.KindNodeImage.Tag
return fmt.Sprintf("%s/%s:%s", repo, path, tag)
case "minikube":
repo := tkr.Components.KubernetesSigsMinikube[0].Images.MinikubeNodeImage.Repository
path := tkr.Components.KubernetesSigsMinikube[0].Images.MinikubeNodeImage.ImagePath
tag := tkr.Components.KubernetesSigsMinikube[0].Images.MinikubeNodeImage.Tag
return fmt.Sprintf("%s/%s:%s", repo, path, tag)
}
// Unsupported provider
return ""
}
func (tkr *Bom) GetTKRCoreRepoBundlePath() string {
registry := tkr.Components.TkgCorePackages[0].Images.TanzuCorePackageRepositoryImage.Repository
if registry == "" {
registry = tkr.getTKRRegistry()
}
path := tkr.Components.TkgCorePackages[0].Images.TanzuCorePackageRepositoryImage.ImagePath
tag := tkr.Components.TkgCorePackages[0].Images.TanzuCorePackageRepositoryImage.Tag
return fmt.Sprintf("%s/%s:%s", registry, path, tag)
}
func (tkr *Bom) GetTKRUserRepoBundlePath() string {
registry := tkr.Components.TkgCorePackages[0].Images.TanzuUserPackageRepositoryImage.Repository
if registry == "" {
registry = tkr.getTKRRegistry()
}
path := tkr.Components.TkgCorePackages[0].Images.TanzuUserPackageRepositoryImage.ImagePath
tag := tkr.Components.TkgCorePackages[0].Images.TanzuUserPackageRepositoryImage.Tag
if path == "" || tag == "" {
return ""
}
return fmt.Sprintf("%s/%s:%s", registry, path, tag)
}
func (tkr *Bom) GetTKRKappImage() (ImageReader, error) {
registry := tkr.getTKRKappRepository()
path := tkr.Components.TkgCorePackages[0].Images.KappControllerTanzuVmwareCom.ImagePath
tag := tkr.Components.TkgCorePackages[0].Images.KappControllerTanzuVmwareCom.Tag
t, err := NewTkrImageReader(fmt.Sprintf("%s/%s:%s", registry, path, tag))
if err != nil {
return nil, err
}
return t, nil
}
func (tkr *Bom) getTKRNodeRepository() string {
if tkr.Components.KubernetesSigsKind[0].Images.KindNodeImage.Repository == "" {
return tkr.getTKRRegistry()
}
return tkr.Components.KubernetesSigsKind[0].Images.KindNodeImage.Repository
}
func (tkr *Bom) getTKRKappRepository() string {
if tkr.Components.TkgCorePackages[0].Images.KappControllerTanzuVmwareCom.Repository == "" {
return tkr.getTKRRegistry()
}
return tkr.Components.TkgCorePackages[0].Images.KappControllerTanzuVmwareCom.Repository
} | cli/cmd/plugin/unmanaged-cluster/tkr/tkr.go | 0.52683 | 0.501831 | tkr.go | starcoder |
package linkedlist
// Node is the node type used within the linked list.
type node[T comparable] struct {
prev *node[T]
next *node[T]
val T
}
// LinkedList is the main linked list type.
type LinkedList[T comparable] struct {
first *node[T]
last *node[T]
count int
}
// New is used to create a new linked list.
func New[T comparable]() LinkedList[T] {
return LinkedList[T]{}
}
// Count returns the amount of entries in the linked list.
func (l LinkedList[T]) Count() int {
return l.count
}
// Empty returns true if the linked list is empty, false if not.
func (l LinkedList[T]) Empty() bool {
return l.Count() == 0
}
// InsertFirst inserts a value at the beginning of the linked list.
func (l *LinkedList[T]) InsertFirst(val T) {
n := &node[T]{val: val}
if l.first == nil {
l.first = n
l.last = n
} else {
l.first.prev = n
n.next = l.first
l.first = n
}
l.count++
}
// First returns the value at the beginning of the linked list.
func (l LinkedList[T]) First() T {
if l.first == nil {
panic("linked list empty, getting first failed")
}
return l.first.val
}
// RemoveFirst removes the first value in the linked list.
func (l *LinkedList[T]) RemoveFirst() {
if l.first != nil {
if l.first.next == nil {
l.first = nil
l.last = nil
} else {
l.first = l.first.next
l.first.prev = nil
}
}
l.count--
}
// InsertLast inserts a value at the end of the linked list.
func (l *LinkedList[T]) InsertLast(val T) {
n := &node[T]{val: val}
if l.last == nil {
l.first = n
l.last = n
} else {
l.last.next = n
n.prev = l.last
l.last = n
}
l.count++
}
// Last returns the value at the end of the linked list.
func (l LinkedList[T]) Last() T {
if l.last == nil {
panic("linked list empty, getting last failed")
}
return l.last.val
}
// RemoveLast removes the last value in the linked list.
func (l *LinkedList[T]) RemoveLast() {
if l.last != nil {
if l.last.prev == nil {
l.first = nil
l.last = nil
} else {
l.last = l.last.prev
l.last.next = nil
}
}
l.count--
}
// Insert inserts a value at the specified index.
func (l *LinkedList[T]) Insert(val T, index int) {
if index > l.Count() {
panic("Insertion index invalid")
}
if index == 0 {
l.InsertFirst(val)
} else if index == l.Count() {
l.InsertLast(val)
} else {
n := &node[T]{val: val}
var listIndex int = 0
for ln := l.first; ln != nil; ln = ln.next {
if listIndex == index {
ln.prev.next = n
n.prev = ln.prev
n.next = ln
ln.prev = n
break
}
listIndex++
}
l.count++
}
}
// Get gets a value at the specified index.
func (l LinkedList[T]) Get(index int) T {
if index >= l.Count() {
panic("index outside of linked list bounds")
}
if index == 0 {
return l.first.val
} else if index == l.Count()-1 {
return l.last.val
} else {
var listIndex int = 0
for ln := l.first; ln != nil; ln = ln.next {
if listIndex == index {
return ln.val
}
listIndex++
}
}
panic("unreachable")
}
// Update updates a value at the specified index.
func (l *LinkedList[T]) Update(index int, val T) {
if index >= l.Count() {
panic("index outside of linked list bounds")
}
var listIndex int = 0
for ln := l.first; ln != nil; ln = ln.next {
if listIndex == index {
ln.val = val
break
}
listIndex++
}
}
// Remove removes a value at the specified index.
func (l *LinkedList[T]) Remove(index int) {
if l.Count() == 0 {
panic("linked list is empty")
}
if index >= l.Count() {
panic("index outside of linked list bounds")
}
if index == 0 {
l.RemoveFirst()
} else if index == l.Count()-1 {
l.RemoveLast()
} else {
var listIndex int = 0
for ln := l.first; ln != nil; ln = ln.next {
if listIndex == index {
ln.prev.next = ln.next
ln.next.prev = ln.prev
break
}
listIndex++
}
l.count--
}
}
// Contains returns true if the value exists in the linked list, false if not.
func (l LinkedList[T]) Contains(val T) bool {
for ln := l.first; ln != nil; ln = ln.next {
if ln.val == val {
return true
}
}
return false
}
// Clear empties the entire linked list.
func (l *LinkedList[T]) Clear() {
l.first = nil
l.last = nil
l.count = 0
}
// ForEach iterates over the dataset within the linked list, calling the passed
// function for each value.
func (l LinkedList[T]) ForEach(f func(i int, val T)) {
var index int
for ln := l.first; ln != nil; ln = ln.next {
f(index, ln.val)
index++
}
} | linkedlist/linkedlist.go | 0.763968 | 0.480601 | linkedlist.go | starcoder |
package plaid
import (
"encoding/json"
)
// AssetReportTransaction struct for AssetReportTransaction
type AssetReportTransaction struct {
// Please use the `payment_channel` field, `transaction_type` will be deprecated in the future. `digital:` transactions that took place online. `place:` transactions that were made at a physical location. `special:` transactions that relate to banks, e.g. fees or deposits. `unresolved:` transactions that do not fit into the other three types.
TransactionType *string `json:"transaction_type,omitempty"`
// The ID of a posted transaction's associated pending transaction, where applicable.
PendingTransactionId NullableString `json:"pending_transaction_id,omitempty"`
// The ID of the category to which this transaction belongs. See [Categories](https://plaid.com/docs/#category-overview). If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.
CategoryId NullableString `json:"category_id,omitempty"`
// A hierarchical array of the categories to which this transaction belongs. See [Categories](https://plaid.com/docs/#category-overview). If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.
Category []string `json:"category,omitempty"`
Location *Location `json:"location,omitempty"`
PaymentMeta *PaymentMeta `json:"payment_meta,omitempty"`
// The name of the account owner. This field is not typically populated and only relevant when dealing with sub-accounts.
AccountOwner NullableString `json:"account_owner,omitempty"`
// The merchant name or transaction description. If the `transactions` object was returned by a Transactions endpoint such as `/transactions/get`, this field will always appear. If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.
Name *string `json:"name,omitempty"`
// The string returned by the financial institution to describe the transaction. For transactions returned by `/transactions/get`, this field is in beta and will be omitted unless the client is both enrolled in the closed beta program and has set `options.include_original_description` to `true`.
OriginalDescription NullableString `json:"original_description"`
// The ID of the account in which this transaction occurred.
AccountId string `json:"account_id"`
// The settled value of the transaction, denominated in the account's currency, as stated in `iso_currency_code` or `unofficial_currency_code`. Positive values when money moves out of the account; negative values when money moves in. For example, debit card purchases are positive; credit card payments, direct deposits, and refunds are negative.
Amount float32 `json:"amount"`
// The ISO-4217 currency code of the transaction. Always `null` if `unofficial_currency_code` is non-null.
IsoCurrencyCode NullableString `json:"iso_currency_code"`
// The unofficial currency code associated with the transaction. Always `null` if `iso_currency_code` is non-`null`. Unofficial currency codes are used for currencies that do not have official ISO currency codes, such as cryptocurrencies and the currencies of certain countries. See the [currency code schema](https://plaid.com/docs/api/accounts#currency-code-schema) for a full listing of supported `iso_currency_code`s.
UnofficialCurrencyCode NullableString `json:"unofficial_currency_code"`
// For pending transactions, the date that the transaction occurred; for posted transactions, the date that the transaction posted. Both dates are returned in an [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format ( `YYYY-MM-DD` ).
Date string `json:"date"`
// When `true`, identifies the transaction as pending or unsettled. Pending transaction details (name, type, amount, category ID) may change before they are settled.
Pending bool `json:"pending"`
// The unique ID of the transaction. Like all Plaid identifiers, the `transaction_id` is case sensitive.
TransactionId string `json:"transaction_id"`
// The date on which the transaction took place, in IS0 8601 format.
DateTransacted NullableString `json:"date_transacted,omitempty"`
}
// NewAssetReportTransaction instantiates a new AssetReportTransaction object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewAssetReportTransaction(originalDescription NullableString, accountId string, amount float32, isoCurrencyCode NullableString, unofficialCurrencyCode NullableString, date string, pending bool, transactionId string) *AssetReportTransaction {
this := AssetReportTransaction{}
this.OriginalDescription = originalDescription
this.AccountId = accountId
this.Amount = amount
this.IsoCurrencyCode = isoCurrencyCode
this.UnofficialCurrencyCode = unofficialCurrencyCode
this.Date = date
this.Pending = pending
this.TransactionId = transactionId
return &this
}
// NewAssetReportTransactionWithDefaults instantiates a new AssetReportTransaction object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewAssetReportTransactionWithDefaults() *AssetReportTransaction {
this := AssetReportTransaction{}
return &this
}
// GetTransactionType returns the TransactionType field value if set, zero value otherwise.
func (o *AssetReportTransaction) GetTransactionType() string {
if o == nil || o.TransactionType == nil {
var ret string
return ret
}
return *o.TransactionType
}
// GetTransactionTypeOk returns a tuple with the TransactionType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetTransactionTypeOk() (*string, bool) {
if o == nil || o.TransactionType == nil {
return nil, false
}
return o.TransactionType, true
}
// HasTransactionType returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasTransactionType() bool {
if o != nil && o.TransactionType != nil {
return true
}
return false
}
// SetTransactionType gets a reference to the given string and assigns it to the TransactionType field.
func (o *AssetReportTransaction) SetTransactionType(v string) {
o.TransactionType = &v
}
// GetPendingTransactionId returns the PendingTransactionId field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *AssetReportTransaction) GetPendingTransactionId() string {
if o == nil || o.PendingTransactionId.Get() == nil {
var ret string
return ret
}
return *o.PendingTransactionId.Get()
}
// GetPendingTransactionIdOk returns a tuple with the PendingTransactionId field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetPendingTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.PendingTransactionId.Get(), o.PendingTransactionId.IsSet()
}
// HasPendingTransactionId returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasPendingTransactionId() bool {
if o != nil && o.PendingTransactionId.IsSet() {
return true
}
return false
}
// SetPendingTransactionId gets a reference to the given NullableString and assigns it to the PendingTransactionId field.
func (o *AssetReportTransaction) SetPendingTransactionId(v string) {
o.PendingTransactionId.Set(&v)
}
// SetPendingTransactionIdNil sets the value for PendingTransactionId to be an explicit nil
func (o *AssetReportTransaction) SetPendingTransactionIdNil() {
o.PendingTransactionId.Set(nil)
}
// UnsetPendingTransactionId ensures that no value is present for PendingTransactionId, not even an explicit nil
func (o *AssetReportTransaction) UnsetPendingTransactionId() {
o.PendingTransactionId.Unset()
}
// GetCategoryId returns the CategoryId field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *AssetReportTransaction) GetCategoryId() string {
if o == nil || o.CategoryId.Get() == nil {
var ret string
return ret
}
return *o.CategoryId.Get()
}
// GetCategoryIdOk returns a tuple with the CategoryId field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetCategoryIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.CategoryId.Get(), o.CategoryId.IsSet()
}
// HasCategoryId returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasCategoryId() bool {
if o != nil && o.CategoryId.IsSet() {
return true
}
return false
}
// SetCategoryId gets a reference to the given NullableString and assigns it to the CategoryId field.
func (o *AssetReportTransaction) SetCategoryId(v string) {
o.CategoryId.Set(&v)
}
// SetCategoryIdNil sets the value for CategoryId to be an explicit nil
func (o *AssetReportTransaction) SetCategoryIdNil() {
o.CategoryId.Set(nil)
}
// UnsetCategoryId ensures that no value is present for CategoryId, not even an explicit nil
func (o *AssetReportTransaction) UnsetCategoryId() {
o.CategoryId.Unset()
}
// GetCategory returns the Category field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *AssetReportTransaction) GetCategory() []string {
if o == nil {
var ret []string
return ret
}
return o.Category
}
// GetCategoryOk returns a tuple with the Category field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetCategoryOk() (*[]string, bool) {
if o == nil || o.Category == nil {
return nil, false
}
return &o.Category, true
}
// HasCategory returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasCategory() bool {
if o != nil && o.Category != nil {
return true
}
return false
}
// SetCategory gets a reference to the given []string and assigns it to the Category field.
func (o *AssetReportTransaction) SetCategory(v []string) {
o.Category = v
}
// GetLocation returns the Location field value if set, zero value otherwise.
func (o *AssetReportTransaction) GetLocation() Location {
if o == nil || o.Location == nil {
var ret Location
return ret
}
return *o.Location
}
// GetLocationOk returns a tuple with the Location field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetLocationOk() (*Location, bool) {
if o == nil || o.Location == nil {
return nil, false
}
return o.Location, true
}
// HasLocation returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasLocation() bool {
if o != nil && o.Location != nil {
return true
}
return false
}
// SetLocation gets a reference to the given Location and assigns it to the Location field.
func (o *AssetReportTransaction) SetLocation(v Location) {
o.Location = &v
}
// GetPaymentMeta returns the PaymentMeta field value if set, zero value otherwise.
func (o *AssetReportTransaction) GetPaymentMeta() PaymentMeta {
if o == nil || o.PaymentMeta == nil {
var ret PaymentMeta
return ret
}
return *o.PaymentMeta
}
// GetPaymentMetaOk returns a tuple with the PaymentMeta field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetPaymentMetaOk() (*PaymentMeta, bool) {
if o == nil || o.PaymentMeta == nil {
return nil, false
}
return o.PaymentMeta, true
}
// HasPaymentMeta returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasPaymentMeta() bool {
if o != nil && o.PaymentMeta != nil {
return true
}
return false
}
// SetPaymentMeta gets a reference to the given PaymentMeta and assigns it to the PaymentMeta field.
func (o *AssetReportTransaction) SetPaymentMeta(v PaymentMeta) {
o.PaymentMeta = &v
}
// GetAccountOwner returns the AccountOwner field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *AssetReportTransaction) GetAccountOwner() string {
if o == nil || o.AccountOwner.Get() == nil {
var ret string
return ret
}
return *o.AccountOwner.Get()
}
// GetAccountOwnerOk returns a tuple with the AccountOwner field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetAccountOwnerOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.AccountOwner.Get(), o.AccountOwner.IsSet()
}
// HasAccountOwner returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasAccountOwner() bool {
if o != nil && o.AccountOwner.IsSet() {
return true
}
return false
}
// SetAccountOwner gets a reference to the given NullableString and assigns it to the AccountOwner field.
func (o *AssetReportTransaction) SetAccountOwner(v string) {
o.AccountOwner.Set(&v)
}
// SetAccountOwnerNil sets the value for AccountOwner to be an explicit nil
func (o *AssetReportTransaction) SetAccountOwnerNil() {
o.AccountOwner.Set(nil)
}
// UnsetAccountOwner ensures that no value is present for AccountOwner, not even an explicit nil
func (o *AssetReportTransaction) UnsetAccountOwner() {
o.AccountOwner.Unset()
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *AssetReportTransaction) GetName() string {
if o == nil || o.Name == nil {
var ret string
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetNameOk() (*string, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given string and assigns it to the Name field.
func (o *AssetReportTransaction) SetName(v string) {
o.Name = &v
}
// GetOriginalDescription returns the OriginalDescription field value
// If the value is explicit nil, the zero value for string will be returned
func (o *AssetReportTransaction) GetOriginalDescription() string {
if o == nil || o.OriginalDescription.Get() == nil {
var ret string
return ret
}
return *o.OriginalDescription.Get()
}
// GetOriginalDescriptionOk returns a tuple with the OriginalDescription field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetOriginalDescriptionOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.OriginalDescription.Get(), o.OriginalDescription.IsSet()
}
// SetOriginalDescription sets field value
func (o *AssetReportTransaction) SetOriginalDescription(v string) {
o.OriginalDescription.Set(&v)
}
// GetAccountId returns the AccountId field value
func (o *AssetReportTransaction) GetAccountId() string {
if o == nil {
var ret string
return ret
}
return o.AccountId
}
// GetAccountIdOk returns a tuple with the AccountId field value
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetAccountIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.AccountId, true
}
// SetAccountId sets field value
func (o *AssetReportTransaction) SetAccountId(v string) {
o.AccountId = v
}
// GetAmount returns the Amount field value
func (o *AssetReportTransaction) GetAmount() float32 {
if o == nil {
var ret float32
return ret
}
return o.Amount
}
// GetAmountOk returns a tuple with the Amount field value
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetAmountOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Amount, true
}
// SetAmount sets field value
func (o *AssetReportTransaction) SetAmount(v float32) {
o.Amount = v
}
// GetIsoCurrencyCode returns the IsoCurrencyCode field value
// If the value is explicit nil, the zero value for string will be returned
func (o *AssetReportTransaction) GetIsoCurrencyCode() string {
if o == nil || o.IsoCurrencyCode.Get() == nil {
var ret string
return ret
}
return *o.IsoCurrencyCode.Get()
}
// GetIsoCurrencyCodeOk returns a tuple with the IsoCurrencyCode field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetIsoCurrencyCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.IsoCurrencyCode.Get(), o.IsoCurrencyCode.IsSet()
}
// SetIsoCurrencyCode sets field value
func (o *AssetReportTransaction) SetIsoCurrencyCode(v string) {
o.IsoCurrencyCode.Set(&v)
}
// GetUnofficialCurrencyCode returns the UnofficialCurrencyCode field value
// If the value is explicit nil, the zero value for string will be returned
func (o *AssetReportTransaction) GetUnofficialCurrencyCode() string {
if o == nil || o.UnofficialCurrencyCode.Get() == nil {
var ret string
return ret
}
return *o.UnofficialCurrencyCode.Get()
}
// GetUnofficialCurrencyCodeOk returns a tuple with the UnofficialCurrencyCode field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetUnofficialCurrencyCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.UnofficialCurrencyCode.Get(), o.UnofficialCurrencyCode.IsSet()
}
// SetUnofficialCurrencyCode sets field value
func (o *AssetReportTransaction) SetUnofficialCurrencyCode(v string) {
o.UnofficialCurrencyCode.Set(&v)
}
// GetDate returns the Date field value
func (o *AssetReportTransaction) GetDate() string {
if o == nil {
var ret string
return ret
}
return o.Date
}
// GetDateOk returns a tuple with the Date field value
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetDateOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Date, true
}
// SetDate sets field value
func (o *AssetReportTransaction) SetDate(v string) {
o.Date = v
}
// GetPending returns the Pending field value
func (o *AssetReportTransaction) GetPending() bool {
if o == nil {
var ret bool
return ret
}
return o.Pending
}
// GetPendingOk returns a tuple with the Pending field value
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetPendingOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.Pending, true
}
// SetPending sets field value
func (o *AssetReportTransaction) SetPending(v bool) {
o.Pending = v
}
// GetTransactionId returns the TransactionId field value
func (o *AssetReportTransaction) GetTransactionId() string {
if o == nil {
var ret string
return ret
}
return o.TransactionId
}
// GetTransactionIdOk returns a tuple with the TransactionId field value
// and a boolean to check if the value has been set.
func (o *AssetReportTransaction) GetTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.TransactionId, true
}
// SetTransactionId sets field value
func (o *AssetReportTransaction) SetTransactionId(v string) {
o.TransactionId = v
}
// GetDateTransacted returns the DateTransacted field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *AssetReportTransaction) GetDateTransacted() string {
if o == nil || o.DateTransacted.Get() == nil {
var ret string
return ret
}
return *o.DateTransacted.Get()
}
// GetDateTransactedOk returns a tuple with the DateTransacted field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AssetReportTransaction) GetDateTransactedOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.DateTransacted.Get(), o.DateTransacted.IsSet()
}
// HasDateTransacted returns a boolean if a field has been set.
func (o *AssetReportTransaction) HasDateTransacted() bool {
if o != nil && o.DateTransacted.IsSet() {
return true
}
return false
}
// SetDateTransacted gets a reference to the given NullableString and assigns it to the DateTransacted field.
func (o *AssetReportTransaction) SetDateTransacted(v string) {
o.DateTransacted.Set(&v)
}
// SetDateTransactedNil sets the value for DateTransacted to be an explicit nil
func (o *AssetReportTransaction) SetDateTransactedNil() {
o.DateTransacted.Set(nil)
}
// UnsetDateTransacted ensures that no value is present for DateTransacted, not even an explicit nil
func (o *AssetReportTransaction) UnsetDateTransacted() {
o.DateTransacted.Unset()
}
func (o AssetReportTransaction) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.TransactionType != nil {
toSerialize["transaction_type"] = o.TransactionType
}
if o.PendingTransactionId.IsSet() {
toSerialize["pending_transaction_id"] = o.PendingTransactionId.Get()
}
if o.CategoryId.IsSet() {
toSerialize["category_id"] = o.CategoryId.Get()
}
if o.Category != nil {
toSerialize["category"] = o.Category
}
if o.Location != nil {
toSerialize["location"] = o.Location
}
if o.PaymentMeta != nil {
toSerialize["payment_meta"] = o.PaymentMeta
}
if o.AccountOwner.IsSet() {
toSerialize["account_owner"] = o.AccountOwner.Get()
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if true {
toSerialize["original_description"] = o.OriginalDescription.Get()
}
if true {
toSerialize["account_id"] = o.AccountId
}
if true {
toSerialize["amount"] = o.Amount
}
if true {
toSerialize["iso_currency_code"] = o.IsoCurrencyCode.Get()
}
if true {
toSerialize["unofficial_currency_code"] = o.UnofficialCurrencyCode.Get()
}
if true {
toSerialize["date"] = o.Date
}
if true {
toSerialize["pending"] = o.Pending
}
if true {
toSerialize["transaction_id"] = o.TransactionId
}
if o.DateTransacted.IsSet() {
toSerialize["date_transacted"] = o.DateTransacted.Get()
}
return json.Marshal(toSerialize)
}
type NullableAssetReportTransaction struct {
value *AssetReportTransaction
isSet bool
}
func (v NullableAssetReportTransaction) Get() *AssetReportTransaction {
return v.value
}
func (v *NullableAssetReportTransaction) Set(val *AssetReportTransaction) {
v.value = val
v.isSet = true
}
func (v NullableAssetReportTransaction) IsSet() bool {
return v.isSet
}
func (v *NullableAssetReportTransaction) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableAssetReportTransaction(val *AssetReportTransaction) *NullableAssetReportTransaction {
return &NullableAssetReportTransaction{value: val, isSet: true}
}
func (v NullableAssetReportTransaction) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableAssetReportTransaction) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_asset_report_transaction.go | 0.840292 | 0.453867 | model_asset_report_transaction.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.