code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package fp6
const Mul = `
// Mul multiplies two numbers in {{.Fp6Name}}
func (z *{{.Fp6Name}}) Mul(x, y *{{.Fp6Name}}) *{{.Fp6Name}} {
// Algorithm 13 from https://eprint.iacr.org/2010/354.pdf
var rb0, b0, b1, b2, b3, b4 {{.Fp2Name}}
b0.Mul(&x.B0, &y.B0) // step 1
b1.Mul(&x.B1, &y.B1) // step 2
b2.Mul(&x.B2, &y.B2) // step 3
// step 4
b3.Add(&x.B1, &x.B2)
b4.Add(&y.B1, &y.B2)
rb0.Mul(&b3, &b4).
SubAssign(&b1).
SubAssign(&b2)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "rb0" "in" "&rb0" }}
rb0.AddAssign(&b0)
// step 5
b3.Add(&x.B0, &x.B1)
b4.Add(&y.B0, &y.B1)
z.B1.Mul(&b3, &b4).
SubAssign(&b0).
SubAssign(&b1)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "b3" "in" "&b2" }}
z.B1.AddAssign(&b3)
// step 6
b3.Add(&x.B0, &x.B2)
b4.Add(&y.B0, &y.B2)
z.B2.Mul(&b3, &b4).
SubAssign(&b0).
SubAssign(&b2).
AddAssign(&b1)
z.B0 = rb0
return z
}
// MulBy{{capitalize .Fp2Name}} multiplies x by an elements of {{.Fp2Name}}
func (z *{{.Fp6Name}}) MulBy{{capitalize .Fp2Name}}(x *{{.Fp6Name}}, y *{{.Fp2Name}}) *{{.Fp6Name}} {
var yCopy {{.Fp2Name}}
yCopy.Set(y)
z.B0.Mul(&x.B0, &yCopy)
z.B1.Mul(&x.B1, &yCopy)
z.B2.Mul(&x.B2, &yCopy)
return z
}
// MulByNotv2 multiplies x by y with &y.b2=0
func (z *{{.Fp6Name}}) MulByNotv2(x, y *{{.Fp6Name}}) *{{.Fp6Name}} {
// Algorithm 15 from https://eprint.iacr.org/2010/354.pdf
var rb0, b0, b1, b2, b3 {{.Fp2Name}}
b0.Mul(&x.B0, &y.B0) // step 1
b1.Mul(&x.B1, &y.B1) // step 2
// step 3
b2.Add(&x.B1, &x.B2)
rb0.Mul(&b2, &y.B1).
SubAssign(&b1)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "rb0" "in" "&rb0" }}
rb0.AddAssign(&b0)
// step 4
b2.Add(&x.B0, &x.B1)
b3.Add(&y.B0, &y.B1)
z.B1.Mul(&b2, &b3).
SubAssign(&b0).
SubAssign(&b1)
// step 5
z.B2.Mul(&x.B2, &y.B0).
AddAssign(&b1)
z.B0 = rb0
return z
}
// Square squares a {{.Fp6Name}}
func (z *{{.Fp6Name}}) Square(x *{{.Fp6Name}}) *{{.Fp6Name}} {
// Algorithm 16 from https://eprint.iacr.org/2010/354.pdf
var b0, b1, b2, b3, b4 {{.Fp2Name}}
b3.Mul(&x.B0, &x.B1).Double(&b3) // step 1
b4.Square(&x.B2) // step 2
// step 3
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "b0" "in" "&b4" }}
b0.AddAssign(&b3)
b1.Sub(&b3, &b4) // step 4
b2.Square(&x.B0) // step 5
b3.Sub(&x.B0, &x.B1).AddAssign(&x.B2).Square(&b3) // steps 6 and 8
b4.Mul(&x.B1, &x.B2).Double(&b4) // step 7
// step 9
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "z.B0" "in" "&b4" }}
z.B0.AddAssign(&b2)
// step 10
z.B2.Add(&b1, &b3).
AddAssign(&b4).
SubAssign(&b2)
z.B1 = b0
return z
}
// Square2 squares a {{.Fp6Name}}
func (z *{{.Fp6Name}}) Square2(x *{{.Fp6Name}}) *{{.Fp6Name}} {
// Karatsuba from Section 4 of https://eprint.iacr.org/2006/471.pdf
var v0, v1, v2, v01, v02, v12 {{.Fp2Name}}
v0.Square(&x.B0)
v1.Square(&x.B1)
v2.Square(&x.B2)
v01.Add(&x.B0, &x.B1)
v01.Square(&v01)
v02.Add(&x.B0, &x.B2)
v02.Square(&v02)
v12.Add(&x.B1, &x.B2)
v12.Square(&v12)
z.B0.Sub(&v12, &v1).SubAssign(&v2)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "z.B0" "in" "&z.B0" }}
z.B0.AddAssign(&v0)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "z.B1" "in" "&v2" }}
z.B1.AddAssign(&v01).SubAssign(&v0).SubAssign(&v1)
z.B2.Add(&v02, &v1).SubAssign(&v0).SubAssign(&v2)
return z
}
// Square3 squares a {{.Fp6Name}}
func (z *{{.Fp6Name}}) Square3(x *{{.Fp6Name}}) *{{.Fp6Name}} {
// CH-SQR2 from from Section 4 of https://eprint.iacr.org/2006/471.pdf
var s0, s1, s2, s3, s4 {{.Fp2Name}}
s0.Square(&x.B0)
s1.Mul(&x.B0, &x.B1).Double(&s1)
s2.Sub(&x.B0, &x.B1).AddAssign(&x.B2).Square(&s2)
s3.Mul(&x.B1, &x.B2).Double(&s3)
s4.Square(&x.B2)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "z.B0" "in" "&s3" }}
z.B0.AddAssign(&s0)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "z.B1" "in" "&s4" }}
z.B1.AddAssign(&s1)
z.B2.Add(&s1, &s2).AddAssign(&s3).SubAssign(&s0).SubAssign(&s4)
return z
}
// Inverse an element in {{.Fp6Name}}
func (z *{{.Fp6Name}}) Inverse(x *{{.Fp6Name}}) *{{.Fp6Name}} {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper!
// memalloc
var t [7]{{.Fp2Name}}
var c [3]{{.Fp2Name}}
var buf {{.Fp2Name}}
t[0].Square(&x.B0) // step 1
t[1].Square(&x.B1) // step 2
t[2].Square(&x.B2) // step 3
t[3].Mul(&x.B0, &x.B1) // step 4
t[4].Mul(&x.B0, &x.B2) // step 5
t[5].Mul(&x.B1, &x.B2) // step 6
// step 7
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "c[0]" "in" "&t[5]" }}
c[0].Neg(&c[0]).AddAssign(&t[0])
// step 8
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "c[1]" "in" "&t[2]" }}
c[1].SubAssign(&t[3])
c[2].Sub(&t[1], &t[4]) // step 9 is wrong in 2010/354!
// steps 10, 11, 12
t[6].Mul(&x.B2, &c[1])
buf.Mul(&x.B1, &c[2])
t[6].AddAssign(&buf)
{{- template "fp2InlineMulByNonResidue" dict "all" . "out" "t[6]" "in" "&t[6]" }}
buf.Mul(&x.B0, &c[0])
t[6].AddAssign(&buf)
t[6].Inverse(&t[6]) // step 13
z.B0.Mul(&c[0], &t[6]) // step 14
z.B1.Mul(&c[1], &t[6]) // step 15
z.B2.Mul(&c[2], &t[6]) // step 16
return z
}
// MulByNonResidue multiplies a {{.Fp2Name}} by ({{.Fp6NonResidue}})
func (z *{{.Fp2Name}}) MulByNonResidue(x *{{.Fp2Name}}) *{{.Fp2Name}} {
{{- template "fp2MulByNonResidueBody" dict "all" . "out" "z" "in" "x" }}
return z
}
// MulByNonResidueInv multiplies a {{.Fp2Name}} by ({{.Fp6NonResidue}})^{-1}
func (z *{{.Fp2Name}}) MulByNonResidueInv(x *{{.Fp2Name}}) *{{.Fp2Name}} {
{{- template "fp2MulByNonResidueInvBody" dict "all" . "out" "z" "in" "x" }}
return z
}
` | internal/generators/template/tower/fp6/mul.go | 0.5564 | 0.614394 | mul.go | starcoder |
package akeebabackup
// Do note that the data is always returned unencrypted, no mater what the requested encapsulation was in your request.
type DownloadDirectRequestData struct {
BackupID int `json:"backup_id"` // The numeric ID of the backup record whose files you want to download
PartID int `json:"part_id"` // The backup part you wish to download. For example, if we have a multi-part archive with the base name test.jpa and six parts (.j01 through .j05 and .jpa) then part_id=2 means that we want to download test.j02 and part_id=6 means that we want to download the last part, i.e. test.jpa. If the backup record is not a multi-part archive just use 1.
}
func NewDownloadDirectRequestData(backupID, partID int) *DownloadDirectRequestData {
return &DownloadDirectRequestData{
BackupID: backupID,
PartID: partID,
}
}
// Unencrypted binary stream containing the raw file data. You will also receive standard HTTP headers setting the content disposition to Attachment, specifying an application/octet-stream MIME type and notifying you of the size of the download. In fact, you can generate a URL and pass it to any third party download tool (e.g. cURL, Wget) to download the archive part.
type DownloadDirectResponseData struct {
}
type DownloadDirectRequest struct {
Request
url string
}
type DownloadDirectResponse struct {
Response
}
// TODO validate url (trailing slash) and maybe key
func NewDownloadDirectRequest(url, frontendKey string, backupID int) *DownloadDirectRequest {
return &DownloadDirectRequest{
Request: *newRequest(frontendKey, "downloadDirect", NewDownloadDirectRequestData(backupID, 1)),
url: url,
}
}
func NewDownloadDirectResponse() *DownloadDirectResponse {
return &DownloadDirectResponse{
Response: *newResponse(&DownloadDirectResponseData{}),
}
}
func (qr *DownloadDirectRequest) Execute(filepath string) (*DownloadDirectResponse, bool) {
response := NewDownloadDirectResponse()
return response, qr.Request.execute(qr.url, &response.Response, filepath)
} | download_direct.go | 0.587352 | 0.414603 | download_direct.go | starcoder |
package types
type ColoringRule struct {
// Regex string to match queries to apply coloring to.
Scope string `json:"scope"`
// Function to aggregate one series into one single value.
SingleSeriesAggregateFunction string `json:"singleSeriesAggregateFunction"`
// Function to aggregate the aggregate values of multiple time series into one single value.
MultipleSeriesAggregateFunction string `json:"multipleSeriesAggregateFunction"`
// Color thresholds.
ColorThresholds []ColoringThreshold `json:"colorThresholds,omitempty"`
}
type ColoringThreshold struct {
// Color for the threshold.
Color string `json:"color"`
// Absolute inclusive threshold to color by.
Min float64 `json:"min,omitempty"`
// Absolute exclusive threshold to color by.
Max float64 `json:"max,omitempty"`
}
type Dashboard struct {
// Title of the dashboard.
Title string `json:"title"`
// Description of the dashboard.
Description string `json:"description,omitempty"`
// The identifier of the folder to save the dashboard in. By default it is saved in your personal folder.
FolderId string `json:"folderId,omitempty"`
TopologyLabelMap TopologyLabelMap `json:"topologyLabelMap,omitempty"`
// If set denotes that the dashboard concerns a given domain (e.g. `aws`, `k8s`, `app`).
Domain string `json:"domain,omitempty"`
// Interval of time (in seconds) to automatically refresh the dashboard. A value of 0 means we never automatically refresh the dashboard. This functionality is currently not supported.
RefreshInterval int32 `json:"refreshInterval,omitempty"`
TimeRange ResolvableTimeRange `json:"timeRange"`
// Panels in the dashboard.
Panels []Panel `json:"panels,omitempty"`
Layout Layout `json:"layout,omitempty"`
// Variables to apply to the panels.
Variables []Variable `json:"variables,omitempty"`
// Theme for the dashboard. Either `Light` or `Dark`.
Theme string `json:"theme,omitempty"`
// Rules to set the color of data. This is an internal field and is not current supported by UI.
ColoringRules []ColoringRule `json:"coloringRules,omitempty"`
// Unique identifier for the dashboard.
Id string `json:"id,omitempty"`
}
type DashboardRequest struct {
// Name of the dashboard
Name string `json:"name"`
// Title of the dashboard.
Title string `json:"title"`
// Type of dashboard
Type string `json:"type"`
// Description of the dashboard.
Description string `json:"description"`
// The identifier of the folder to save the dashboard in. By default it is saved in your personal folder.
FolderId string `json:"folderId,omitempty"`
TopologyLabelMap TopologyLabelMap `json:"topologyLabelMap,omitempty"`
// If set denotes that the dashboard concerns a given domain (e.g. `aws`, `k8s`, `app`).
Domain string `json:"domain,omitempty"`
// Interval of time (in seconds) to automatically refresh the dashboard. A value of 0 means we never automatically refresh the dashboard. This functionality is currently not supported.
RefreshInterval int32 `json:"refreshInterval"`
TimeRange ResolvableTimeRange `json:"timeRange"`
// Panels in the dashboard.
Panels []Panel `json:"panels,omitempty"`
Layout *Layout `json:"layout,omitempty"`
// Variables to apply to the panels.
Variables []Variable `json:"variables,omitempty"`
// Theme for the dashboard. Either `Light` or `Dark`.
Theme string `json:"theme"`
// Rules to set the color of data. This is an internal field and is not current supported by UI.
ColoringRules []ColoringRule `json:"coloringRules,omitempty"`
}
type TimeRangeBoundary struct {
Type string `json:"type,omitempty"`
RelativeTime string `json:"relativeTime,omitempty"`
EpochMillis int64 `json:"epochMillis,omitempty"`
Iso8601Time string `json:"iso8601Time,omitempty"`
RangeName string `json:"rangeName,omitempty"`
}
type Layout struct {
// The type of panel layout on the Dashboard. For example, Grid, Tabs, or Hierarchical. Currently supports `Grid` only.
LayoutType string `json:"layoutType"`
// Layout structures for the panel childen.
LayoutStructures []LayoutStructure `json:"layoutStructures"`
}
type LayoutStructure struct {
// The identifier of the panel that this structure applies to.
Key string `json:"key"`
// The structure of a panel.
Structure string `json:"structure"`
}
type Panel struct {
// Unique identifier for the panel.
Id string `json:"id,omitempty"`
// Key for the panel. Used to create searches for the queries in the panel and configure the layout of the panel in the dashboard.
Key string `json:"key"`
// Title of the panel.
Title string `json:"title,omitempty"`
// Visual settings of the panel.
VisualSettings string `json:"visualSettings,omitempty"`
// Keeps the visual settings, like series colors, consistent with the settings of the parent panel.
KeepVisualSettingsConsistentWithParent bool `json:"keepVisualSettingsConsistentWithParent,omitempty"`
// Type of panel.
PanelType string `json:"panelType"`
}
type ResolvableTimeRange struct {
// Type of the time range. Value must be either `CompleteLiteralTimeRange` or `BeginBoundedTimeRange`.
Type_ string `json:"type"`
From TimeRangeBoundary `json:"from"`
To TimeRangeBoundary `json:"to,omitempty"`
}
type Variable struct {
// Unique identifier for the variable.
Id string `json:"id,omitempty"`
// Name of the variable. The variable name is case-insensitive. Only alphanumeric, and underscores are allowed in the variable name.
Name string `json:"name"`
// Display name of the variable shown in the UI. If this field is empty, the name field will be used. The display name is case-insensitive. Only numbers, and underscores are allowed in the variable name. This field is not yet supported by the UI.
DisplayName string `json:"displayName,omitempty"`
// Default value of the variable.
DefaultValue string `json:"defaultValue,omitempty"`
SourceDefinition VariableSourceDefinition `json:"sourceDefinition"`
// Allow multiple selections in the values dropdown.
AllowMultiSelect bool `json:"allowMultiSelect,omitempty"`
// Include an \"All\" option at the top of the variable's values dropdown.
IncludeAllOption bool `json:"includeAllOption,omitempty"`
// Hide the variable in the dashboard UI.
HideFromUI bool `json:"hideFromUI,omitempty"`
}
type VariableSourceDefinition struct {
// Source type of the variable values.
VariableSourceType string `json:"variableSourceType"`
}
type TopologyLabelMap struct {
// Map from topology labels to `TopologyLabelValuesList`.
Data map[string][]string `json:"data"`
} | service/cip/types/dashboard_types.go | 0.820146 | 0.435001 | dashboard_types.go | starcoder |
package model
import (
"github.com/juju/errors"
"github.com/newm4n/grool/context"
"github.com/newm4n/grool/pkg"
"reflect"
"time"
)
const (
TimeTypeString = "time.Time"
)
// Predicate holds the left and right Expression Atom graph. And apply comparisson operator from both
// expression atom result.
type Predicate struct {
ExpressionAtomLeft *ExpressionAtom
ExpressionAtomRight *ExpressionAtom
ComparisonOperator ComparisonOperator
knowledgeContext *context.KnowledgeContext
ruleCtx *context.RuleContext
dataCtx *context.DataContext
}
// Initialize initialize this graph with context
func (prdct *Predicate) Initialize(knowledgeContext *context.KnowledgeContext, ruleCtx *context.RuleContext, dataCtx *context.DataContext) {
prdct.knowledgeContext = knowledgeContext
prdct.ruleCtx = ruleCtx
prdct.dataCtx = dataCtx
if prdct.ExpressionAtomLeft != nil {
prdct.ExpressionAtomLeft.Initialize(knowledgeContext, ruleCtx, dataCtx)
}
if prdct.ExpressionAtomRight != nil {
prdct.ExpressionAtomRight.Initialize(knowledgeContext, ruleCtx, dataCtx)
}
}
// AcceptExpressionAtom configure this graph with left and right side of expression atom. The first call
// to this function will set the left hand side and the second call will set the right.
func (prdct *Predicate) AcceptExpressionAtom(exprAtom *ExpressionAtom) error {
if prdct.ExpressionAtomLeft == nil {
prdct.ExpressionAtomLeft = exprAtom
} else if prdct.ExpressionAtomRight == nil {
prdct.ExpressionAtomRight = exprAtom
} else {
return errors.Errorf("expression alredy set twice")
}
return nil
}
// Evaluate the object graph against underlined context or execute evaluation in the sub graph.
func (prdct *Predicate) Evaluate() (reflect.Value, error) {
if prdct.ExpressionAtomRight == nil {
return prdct.ExpressionAtomLeft.Evaluate()
}
lv, err := prdct.ExpressionAtomLeft.Evaluate()
if err != nil {
return reflect.ValueOf(nil), errors.Trace(err)
}
rv, err := prdct.ExpressionAtomRight.Evaluate()
if err != nil {
return reflect.ValueOf(nil), errors.Trace(err)
}
if lv.Kind() == rv.Kind() && (prdct.ComparisonOperator == ComparisonOperatorEQ || prdct.ComparisonOperator == ComparisonOperatorNEQ) {
if prdct.ComparisonOperator == ComparisonOperatorEQ {
switch lv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return reflect.ValueOf(lv.Int() == rv.Int()), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return reflect.ValueOf(lv.Uint() == rv.Uint()), nil
case reflect.Float64, reflect.Float32:
return reflect.ValueOf(lv.Float() == rv.Float()), nil
case reflect.String:
return reflect.ValueOf(lv.String() == rv.String()), nil
case reflect.Bool:
return reflect.ValueOf(lv.Bool() == rv.Bool()), nil
}
if lv.String() == TimeTypeString {
tl := pkg.ValueToInterface(lv).(time.Time)
tr := pkg.ValueToInterface(rv).(time.Time)
return reflect.ValueOf(tl.Equal(tr)), nil
}
} else {
switch lv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return reflect.ValueOf(lv.Int() != rv.Int()), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return reflect.ValueOf(lv.Uint() != rv.Uint()), nil
case reflect.Float64, reflect.Float32:
return reflect.ValueOf(lv.Float() != rv.Float()), nil
case reflect.String:
return reflect.ValueOf(lv.String() != rv.String()), nil
case reflect.Bool:
return reflect.ValueOf(lv.Bool() != rv.Bool()), nil
}
if lv.String() == TimeTypeString {
tl := pkg.ValueToInterface(lv).(time.Time)
tr := pkg.ValueToInterface(rv).(time.Time)
return reflect.ValueOf(!tl.Equal(tr)), nil
}
}
} else if lv.Type().String() == TimeTypeString && rv.Type().String() == TimeTypeString {
tl := pkg.ValueToInterface(lv).(time.Time)
tr := pkg.ValueToInterface(rv).(time.Time)
switch prdct.ComparisonOperator {
case ComparisonOperatorEQ:
return reflect.ValueOf(tl.Equal(tr)), nil
case ComparisonOperatorNEQ:
return reflect.ValueOf(!tl.Equal(tr)), nil
case ComparisonOperatorGT:
return reflect.ValueOf(tl.After(tr)), nil
case ComparisonOperatorGTE:
return reflect.ValueOf(tl.After(tr) || tl.Equal(tr)), nil
case ComparisonOperatorLT:
return reflect.ValueOf(tl.Before(tr)), nil
case ComparisonOperatorLTE:
return reflect.ValueOf(tl.Before(tr) || tl.Equal(tr)), nil
}
} else {
var lf, rf float64
switch pkg.GetBaseKind(lv) {
case reflect.Int64:
lf = float64(lv.Int())
case reflect.Uint64:
lf = float64(lv.Uint())
case reflect.Float64:
lf = lv.Float()
default:
return reflect.ValueOf(nil), errors.Errorf("comparison operator can only between strings, time or numbers")
}
switch pkg.GetBaseKind(rv) {
case reflect.Int64:
rf = float64(rv.Int())
case reflect.Uint64:
rf = float64(rv.Uint())
case reflect.Float64:
rf = rv.Float()
default:
return reflect.ValueOf(nil), errors.Errorf("comparison operator can only between strings, time or numbers")
}
switch prdct.ComparisonOperator {
case ComparisonOperatorEQ:
return reflect.ValueOf(lf == rf), nil
case ComparisonOperatorNEQ:
return reflect.ValueOf(lf != rf), nil
case ComparisonOperatorGT:
return reflect.ValueOf(lf > rf), nil
case ComparisonOperatorGTE:
return reflect.ValueOf(lf >= rf), nil
case ComparisonOperatorLT:
return reflect.ValueOf(lf < rf), nil
case ComparisonOperatorLTE:
return reflect.ValueOf(lf <= rf), nil
}
}
return reflect.ValueOf(nil), nil
} | model/Predicate.go | 0.607663 | 0.459197 | Predicate.go | starcoder |
package schema
import (
"go/ast"
"go/token"
"go/types"
"github.com/bflad/tfproviderlint/helper/astutils"
"github.com/bflad/tfproviderlint/helper/terraformtype/diag"
)
// IsFuncTypeCRUDFunc returns true if the FuncType matches expected parameters and results types
func IsFuncTypeCRUDFunc(node ast.Node, info *types.Info) bool {
funcType := astutils.FuncTypeFromNode(node)
if funcType == nil {
return false
}
return isFuncTypeCRUDFunc(funcType, info) || isFuncTypeCRUDContextFunc(funcType, info)
}
// isFuncTypeCRUDFunc returns true if the FuncType matches expected parameters and results types of V1 or V2 without a context.
func isFuncTypeCRUDFunc(funcType *ast.FuncType, info *types.Info) bool {
if !astutils.HasFieldListLength(funcType.Params, 2) {
return false
}
if !astutils.IsFieldListTypeModulePackageType(funcType.Params, 0, info, PackageModule, PackageModulePath, TypeNameResourceData) {
return false
}
if !astutils.IsFieldListType(funcType.Params, 1, astutils.IsExprTypeInterface) {
return false
}
if !astutils.HasFieldListLength(funcType.Results, 1) {
return false
}
return astutils.IsFieldListType(funcType.Results, 0, astutils.IsExprTypeError)
}
// isFuncTypeCRUDContextFunc returns true if the FuncType matches expected parameters and results types of V2 with a context.
func isFuncTypeCRUDContextFunc(funcType *ast.FuncType, info *types.Info) bool {
if !astutils.HasFieldListLength(funcType.Params, 3) {
return false
}
if !astutils.IsFieldListTypePackageType(funcType.Params, 0, info, "context", "Context") {
return false
}
if !astutils.IsFieldListTypeModulePackageType(funcType.Params, 1, info, PackageModule, PackageModulePath, TypeNameResourceData) {
return false
}
if !astutils.IsFieldListType(funcType.Params, 2, astutils.IsExprTypeInterface) {
return false
}
if !astutils.HasFieldListLength(funcType.Results, 1) {
return false
}
if !astutils.IsFieldListTypeModulePackageType(funcType.Results, 0, info, diag.PackageModule, diag.PackageModulePath, diag.TypeNameDiagnostics) {
return false
}
return true
}
// CRUDFuncInfo represents all gathered CreateContext, ReadContext, UpdateContext, and DeleteContext data for easier access
// Since Create, Delete, Read, and Update functions all have the same function
// signature, we cannot differentiate them in AST (except by potentially by
// function declaration naming heuristics later on).
type CRUDFuncInfo struct {
AstFuncDecl *ast.FuncDecl
AstFuncLit *ast.FuncLit
Body *ast.BlockStmt
Node ast.Node
Pos token.Pos
Type *ast.FuncType
TypesInfo *types.Info
}
// NewCRUDFuncInfo instantiates a CRUDFuncInfo
func NewCRUDFuncInfo(node ast.Node, info *types.Info) *CRUDFuncInfo {
result := &CRUDFuncInfo{
TypesInfo: info,
}
switch node := node.(type) {
case *ast.FuncDecl:
result.AstFuncDecl = node
result.Body = node.Body
result.Node = node
result.Pos = node.Pos()
result.Type = node.Type
case *ast.FuncLit:
result.AstFuncLit = node
result.Body = node.Body
result.Node = node
result.Pos = node.Pos()
result.Type = node.Type
}
return result
} | vendor/github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema/type_crudfunc.go | 0.621541 | 0.464355 | type_crudfunc.go | starcoder |
package interpreter
import (
"fmt"
"io"
"os"
"strings"
"github.com/benhoyt/littlelang/parser"
. "github.com/benhoyt/littlelang/tokenizer"
)
// Value is a littlelang runtime value (nil, bool, int, str, list, map, func).
type Value interface{}
// Config allows you to configure the interpreter's interaction with the
// outside world.
type Config struct {
// Vars is a map of pre-defined variables to pass into the interpreter.
Vars map[string]Value
// Args is the list of command-line arguments for the interpreter's args()
// builtin.
Args []string
// Stdin is the interpreter's standard input, for the read() builtin.
// Defaults to os.Stdin if nil.
Stdin io.Reader
// Stdout is the interpreter's standard output, for the print() builtin.
// Defaults to os.Stdout if nil.
Stdout io.Writer
// Exit is the function to call when the builtin exit() is called.
// Defaults to os.Exit if nil.
Exit func(int)
}
// Statistics about the interpreter from an Evaluate or Execute call.
type Stats struct {
Ops int
UserCalls int
BuiltinCalls int
}
type interpreter struct {
vars []map[string]Value
args []string
stdin io.Reader
stdout io.Writer
exit func(int)
stats Stats
}
type returnResult struct {
value Value
pos Position
}
type binaryEvalFunc func(pos Position, l, r Value) Value
var binaryEvalFuncs = map[Token]binaryEvalFunc{
DIVIDE: evalDivide,
EQUAL: evalEqual,
GT: func(pos Position, l, r Value) Value { return evalLess(pos, r, l) },
GTE: func(pos Position, l, r Value) Value { return !evalLess(pos, l, r).(bool) },
IN: evalIn,
LT: evalLess,
LTE: func(pos Position, l, r Value) Value { return !evalLess(pos, r, l).(bool) },
MINUS: evalMinus,
MODULO: evalModulo,
NOTEQUAL: func(pos Position, l, r Value) Value { return !evalEqual(pos, l, r).(bool) },
PLUS: evalPlus,
TIMES: evalTimes,
}
func evalEqual(pos Position, l, r Value) Value {
switch l := l.(type) {
case nil:
return Value(r == nil)
case bool:
if r, rok := r.(bool); rok {
return Value(l == r)
}
case int:
if r, rok := r.(int); rok {
return Value(l == r)
}
case string:
if r, rok := r.(string); rok {
return Value(l == r)
}
case *[]Value:
if r, rok := r.(*[]Value); rok {
if len(*l) != len(*r) {
return Value(false)
}
for i, elem := range *l {
if !evalEqual(pos, elem, (*r)[i]).(bool) {
return Value(false)
}
}
return Value(true)
}
case map[string]Value:
if r, rok := r.(map[string]Value); rok {
if len(l) != len(r) {
return Value(false)
}
for k, v := range l {
if !evalEqual(pos, v, r[k]).(bool) {
return Value(false)
}
}
return Value(true)
}
case functionType:
if r, rok := r.(functionType); rok {
return Value(l == r)
}
}
return Value(false)
}
func evalIn(pos Position, l, r Value) Value {
switch r := r.(type) {
case string:
if l, ok := l.(string); ok {
return Value(strings.Index(r, l) >= 0)
}
panic(typeError(pos, "in str requires str on left side"))
case *[]Value:
for _, v := range *r {
if evalEqual(pos, l, v).(bool) {
return Value(true)
}
}
return Value(false)
case map[string]Value:
if l, ok := l.(string); ok {
_, present := r[l]
return Value(present)
}
panic(typeError(pos, "in map requires str on left side"))
}
panic(typeError(pos, "in requires str, list, or map on right side"))
}
func evalLess(pos Position, l, r Value) Value {
switch l := l.(type) {
case int:
if r, rok := r.(int); rok {
return Value(l < r)
}
case string:
if r, rok := r.(string); rok {
return Value(l < r)
}
case *[]Value:
if r, rok := r.(*[]Value); rok {
for i := 0; i < len(*l) && i < len(*r); i++ {
if !evalEqual(pos, (*l)[i], (*r)[i]).(bool) {
return evalLess(pos, (*l)[i], (*r)[i])
}
}
return Value(len(*l) < len(*r))
}
}
panic(typeError(pos, "comparison requires two ints or two strs (or lists of ints or strs)"))
}
func evalPlus(pos Position, l, r Value) Value {
switch l := l.(type) {
case int:
if r, rok := r.(int); rok {
return Value(l + r)
}
case string:
if r, rok := r.(string); rok {
return Value(l + r)
}
case *[]Value:
if r, rok := r.(*[]Value); rok {
result := make([]Value, 0, len(*l)+len(*r))
result = append(result, *l...)
result = append(result, *r...)
return Value(&result)
}
case map[string]Value:
if r, rok := r.(map[string]Value); rok {
result := make(map[string]Value)
for k, v := range l {
result[k] = v
}
for k, v := range r {
result[k] = v
}
return Value(result)
}
}
panic(typeError(pos, "+ requires two ints, strs, lists, or maps"))
}
func ensureInts(pos Position, l, r Value, operation string) (int, int) {
li, lok := l.(int)
ri, rok := r.(int)
if !lok || !rok {
panic(typeError(pos, "%s requires two ints", operation))
}
return li, ri
}
func evalMinus(pos Position, l, r Value) Value {
li, ri := ensureInts(pos, l, r, "-")
return Value(li - ri)
}
func evalTimes(pos Position, l, r Value) Value {
switch l := l.(type) {
case int:
switch r := r.(type) {
case int:
return Value(l * r)
case string:
if l < 0 {
panic(valueError(pos, "can't multiply string by a negative number"))
}
return Value(strings.Repeat(r, l))
case *[]Value:
lst := make([]Value, 0, len(*r)*l)
for i := 0; i < l; i++ {
lst = append(lst, (*r)...)
}
return Value(&lst)
}
case string:
if r, rok := r.(int); rok {
if r < 0 {
panic(valueError(pos, "can't multiply string by a negative number"))
}
return Value(strings.Repeat(l, r))
}
case *[]Value:
if r, rok := r.(int); rok {
if r < 0 {
panic(valueError(pos, "can't multiply list by a negative number"))
}
lst := make([]Value, 0, len(*l)*r)
for i := 0; i < r; i++ {
lst = append(lst, (*l)...)
}
return Value(&lst)
}
}
panic(typeError(pos, "* requires two ints or a str or list and an int"))
}
func evalDivide(pos Position, l, r Value) Value {
li, ri := ensureInts(pos, l, r, "/")
if ri == 0 {
panic(valueError(pos, "can't divide by zero"))
}
return Value(li / ri)
}
func evalModulo(pos Position, l, r Value) Value {
li, ri := ensureInts(pos, l, r, "%")
if ri == 0 {
panic(valueError(pos, "can't divide by zero"))
}
return Value(li % ri)
}
type unaryEvalFunc func(pos Position, v Value) Value
var unaryEvalFuncs = map[Token]unaryEvalFunc{
NOT: evalNot,
MINUS: evalNegative,
}
func evalNot(pos Position, v Value) Value {
if v, ok := v.(bool); ok {
return Value(!v)
}
panic(typeError(pos, "not requires a bool"))
}
func evalNegative(pos Position, v Value) Value {
if v, ok := v.(int); ok {
return Value(-v)
}
panic(typeError(pos, "unary - requires an int"))
}
func evalSubscript(pos Position, container, subscript Value) Value {
switch c := container.(type) {
case string:
if s, ok := subscript.(int); ok {
if s < 0 || s >= len(c) {
panic(valueError(pos, "subscript %d out of range", s))
}
return Value(string([]byte{c[s]}))
}
panic(typeError(pos, "str subscript must be an int"))
case *[]Value:
if s, ok := subscript.(int); ok {
if s < 0 || s >= len(*c) {
panic(valueError(pos, "subscript %d out of range", s))
}
return (*c)[s]
}
panic(typeError(pos, "list subscript must be an int"))
case map[string]Value:
if s, ok := subscript.(string); ok {
if value, ok := c[s]; ok {
return value
}
panic(valueError(pos, "key not found: %q", s))
}
panic(typeError(pos, "map subscript must be a str"))
default:
panic(typeError(pos, "can only subscript str, list, or map"))
}
}
func (interp *interpreter) evalAnd(pos Position, le, re parser.Expression) Value {
l := interp.evaluate(le)
if l, ok := l.(bool); ok {
if !l {
// Short circuit: don't evaluate right if left false
return Value(false)
}
r := interp.evaluate(re)
if r, ok := r.(bool); ok {
return Value(r)
} else {
panic(typeError(pos, "and requires two bools"))
}
} else {
panic(typeError(pos, "and requires two bools"))
}
}
func (interp *interpreter) evalOr(pos Position, le, re parser.Expression) Value {
l := interp.evaluate(le)
if l, ok := l.(bool); ok {
if l {
// Short circuit: don't evaluate right if left true
return Value(true)
}
r := interp.evaluate(re)
if r, ok := r.(bool); ok {
return Value(r)
} else {
panic(typeError(pos, "or requires two bools"))
}
} else {
panic(typeError(pos, "or requires two bools"))
}
}
func (interp *interpreter) callFunction(pos Position, f functionType, args []Value) (ret Value) {
defer func() {
if r := recover(); r != nil {
if result, ok := r.(returnResult); ok {
ret = result.value
} else {
panic(r)
}
}
}()
return f.call(interp, pos, args)
}
func (interp *interpreter) evaluate(expr parser.Expression) Value {
interp.stats.Ops++
switch e := expr.(type) {
case *parser.Binary:
if f, ok := binaryEvalFuncs[e.Operator]; ok {
return f(e.Position(), interp.evaluate(e.Left), interp.evaluate(e.Right))
} else if e.Operator == AND {
return interp.evalAnd(e.Position(), e.Left, e.Right)
} else if e.Operator == OR {
return interp.evalOr(e.Position(), e.Left, e.Right)
}
// Parser should never give us this
panic(fmt.Sprintf("unknown binary operator %v", e.Operator))
case *parser.Unary:
if f, ok := unaryEvalFuncs[e.Operator]; ok {
return f(e.Position(), interp.evaluate(e.Operand))
}
// Parser should never give us this
panic(fmt.Sprintf("unknown unary operator %v", e.Operator))
case *parser.Call:
function := interp.evaluate(e.Function)
if f, ok := function.(functionType); ok {
args := []Value{}
for _, a := range e.Arguments {
args = append(args, interp.evaluate(a))
}
if e.Ellipsis {
iterator := getIterator(e.Arguments[len(args)-1].Position(), args[len(args)-1])
args = args[:len(args)-1]
for iterator.HasNext() {
args = append(args, iterator.Value())
}
}
return interp.callFunction(e.Function.Position(), f, args)
}
panic(typeError(e.Function.Position(), "can't call non-function type %s", typeName(function)))
case *parser.Literal:
return Value(e.Value)
case *parser.Variable:
if v, ok := interp.lookup(e.Name); ok {
return v
}
panic(nameError(e.Position(), "name %q not found", e.Name))
case *parser.List:
values := make([]Value, len(e.Values))
for i, v := range e.Values {
values[i] = interp.evaluate(v)
}
return Value(&values)
case *parser.Map:
value := make(map[string]Value)
for _, item := range e.Items {
key := interp.evaluate(item.Key)
if k, ok := key.(string); ok {
value[k] = interp.evaluate(item.Value)
} else {
panic(typeError(item.Key.Position(), "map key must be str, not %s", typeName(key)))
}
}
return Value(value)
case *parser.Subscript:
container := interp.evaluate(e.Container)
subscript := interp.evaluate(e.Subscript)
return evalSubscript(e.Subscript.Position(), container, subscript)
case *parser.FunctionExpression:
closure := interp.vars[len(interp.vars)-1]
return &userFunction{"", e.Parameters, e.Ellipsis, e.Body, closure}
default:
// Parser should never give us this
panic(fmt.Sprintf("unexpected expression type %T", expr))
}
}
func (interp *interpreter) pushScope(scope map[string]Value) {
interp.vars = append(interp.vars, scope)
}
func (interp *interpreter) popScope() {
interp.vars = interp.vars[:len(interp.vars)-1]
}
func (interp *interpreter) assign(name string, value Value) {
interp.vars[len(interp.vars)-1][name] = value
}
func (interp *interpreter) lookup(name string) (Value, bool) {
for i := len(interp.vars) - 1; i >= 0; i-- {
thisVars := interp.vars[i]
if v, ok := thisVars[name]; ok {
return v, true
}
}
return nil, false
}
func (interp *interpreter) executeBlock(block parser.Block) {
for _, s := range block {
interp.executeStatement(s)
}
}
type iteratorType interface {
HasNext() bool
Value() Value
}
type listIterator struct {
values []Value
index int
}
func (li *listIterator) HasNext() bool {
return li.index < len(li.values)
}
func (li *listIterator) Value() Value {
v := li.values[li.index]
li.index++
return v
}
func getIterator(pos Position, value Value) iteratorType {
switch iterable := value.(type) {
case string:
strs := []Value{}
for _, r := range iterable {
strs = append(strs, string(r))
}
return &listIterator{strs, 0}
case *[]Value:
return &listIterator{*iterable, 0}
case map[string]Value:
keys := make([]Value, len(iterable))
i := 0
for key := range iterable {
keys[i] = key
i++
}
return &listIterator{keys, 0}
default:
panic(typeError(pos, "expected iterable (str, list, or map), got %s", typeName(value)))
}
}
func (interp *interpreter) assignSubscript(pos Position, container, subscript, value Value) {
switch c := container.(type) {
case *[]Value:
if s, ok := subscript.(int); ok {
if s < 0 || s >= len(*c) {
panic(valueError(pos, "subscript %d out of range", s))
}
(*c)[s] = value
} else {
panic(typeError(pos, "list subscript must be an int"))
}
case map[string]Value:
if s, ok := subscript.(string); ok {
c[s] = value
} else {
panic(typeError(pos, "map subscript must be a str"))
}
default:
panic(typeError(pos, "can only assign to subscript of list or map"))
}
}
func (interp *interpreter) executeStatement(s parser.Statement) {
interp.stats.Ops++
switch s := s.(type) {
case *parser.Assign:
switch target := s.Target.(type) {
case *parser.Variable:
interp.assign(target.Name, interp.evaluate(s.Value))
case *parser.Subscript:
container := interp.evaluate(target.Container)
subscript := interp.evaluate(target.Subscript)
value := interp.evaluate(s.Value)
interp.assignSubscript(target.Subscript.Position(), container, subscript, value)
default:
// Parser should never get us here
panic("can only assign to variable or subscript")
}
case *parser.If:
cond := interp.evaluate(s.Condition)
if c, ok := cond.(bool); ok {
if c {
interp.executeBlock(s.Body)
} else if len(s.Else) > 0 {
interp.executeBlock(s.Else)
}
} else {
panic(typeError(s.Condition.Position(), "if condition must be bool, got %s", typeName(cond)))
}
case *parser.While:
for {
cond := interp.evaluate(s.Condition)
if c, ok := cond.(bool); ok {
if !c {
break
}
interp.executeBlock(s.Body)
} else {
panic(typeError(s.Condition.Position(), "while condition must be bool, got %T", cond))
}
}
case *parser.For:
iterable := interp.evaluate(s.Iterable)
iterator := getIterator(s.Iterable.Position(), iterable)
for iterator.HasNext() {
interp.assign(s.Name, iterator.Value())
interp.executeBlock(s.Body)
}
case *parser.ExpressionStatement:
interp.evaluate(s.Expression)
case *parser.FunctionDefinition:
closure := interp.vars[len(interp.vars)-1]
interp.assign(s.Name, &userFunction{s.Name, s.Parameters, s.Ellipsis, s.Body, closure})
case *parser.Return:
result := interp.evaluate(s.Result)
panic(returnResult{result, s.Position()})
default:
// Parser should never get us here
panic(fmt.Sprintf("unexpected statement type %T", s))
}
}
func (interp *interpreter) execute(prog *parser.Program) {
for _, statement := range prog.Statements {
interp.executeStatement(statement)
}
}
func newInterpreter(config *Config) *interpreter {
interp := new(interpreter)
interp.pushScope(make(map[string]Value))
for k, v := range builtins {
interp.assign(k, v)
}
for k, v := range config.Vars {
interp.assign(k, v)
}
interp.args = config.Args
interp.stdin = config.Stdin
if interp.stdin == nil {
interp.stdin = os.Stdin
}
interp.stdout = config.Stdout
if interp.stdout == nil {
interp.stdout = os.Stdout
}
interp.exit = config.Exit
if interp.exit == nil {
interp.exit = os.Exit
}
return interp
}
// Evaluate takes a parsed Expression and interpreter config and evaluates the
// expression, returning the Value of the expression, interpreter statistics,
// and an error which is nil on success or an interpreter.Error if there's an
// error.
func Evaluate(expr parser.Expression, config *Config) (v Value, stats *Stats, err error) {
defer func() {
if r := recover(); r != nil {
// Convert to interpreter.Error or re-panic
err = r.(Error)
}
}()
interp := newInterpreter(config)
v = interp.evaluate(expr)
stats = &interp.stats
return
}
// Execute takes a parsed Program and interpreter config and interprets the
// program. Return interpreter statistics, and an error which is nil on
// success or an interpreter.Error if there's an error.
func Execute(prog *parser.Program, config *Config) (stats *Stats, err error) {
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Error:
err = e
case returnResult:
err = runtimeError(e.pos, "can't return at top level")
default:
panic(r)
}
}
}()
interp := newInterpreter(config)
interp.execute(prog)
stats = &interp.stats
return
} | interpreter/interpreter.go | 0.566139 | 0.403567 | interpreter.go | starcoder |
package ast
// These are the available root node types. In JSON it will either be an
// object or an array at the base.
const (
ObjectRoot RootNodeType = iota
ArrayRoot
)
// RootNodeType is a type alias for an int
type RootNodeType int
// RootNode is what starts every parsed AST. There is a `Type` field so that
// you can ask which root node type starts the tree.
type RootNode struct {
RootValue *Value
Type RootNodeType
}
// Object represents a JSON object. It holds a slice of Property as its children,
// a Type ("Object"), and start & end code points for displaying.
type Object struct {
Type string // "Object"
Children []Property
Start int
End int
}
// Array represents a JSON array It holds a slice of Value as its children,
// a Type ("Array"), and start & end code points for displaying.
type Array struct {
Type string // "Array"
Children []Value
Start int
End int
}
// Literal represents a JSON literal value. It holds a Type ("Literal") and the actual value.
type Literal struct {
Type string // "Literal"
Value Value
}
// Property holds a Type ("Property") as well as a `Key` and `Value`. The Key is an Identifier
// and the value is any Value.
type Property struct {
Type string // "Property"
Key Identifier
Value Value
}
// Identifier represents a JSON object property key
type Identifier struct {
Type string // "Identifier"
Value string // "key1"
}
// Value will eventually have some methods that all Values must implement. For now
// it represents any JSON value (object | array | boolean | string | number | null)
type Value interface{}
// Available object states for use in parsing
const (
ObjStart objectState = iota
ObjOpen
ObjProperty
ObjComma
)
type objectState int
// Available property states for use in parsing
const (
PropertyStart propertyState = iota
PropertyKey
PropertyColon
)
type propertyState int
// Available array states for use in parsing
const (
ArrayStart arrayState = iota
ArrayOpen
ArrayValue
ArrayComma
)
type arrayState int
// Available string states for use in parsing
const (
StringStart stringState = iota
StringQuoteOrChar
Escape
)
type stringState int
// Available number states for use in parsing
const (
NumberStart numberState = iota
NumberMinus
NumberZero
NumberDigit
NumberPoint
NumberDigitFraction
NumberExp
NumberExpDigitOrSign
)
type numberState int | pkg/ast/ast.go | 0.736021 | 0.681369 | ast.go | starcoder |
package strconvhelper
import (
"github.com/apaxa-io/mathhelper"
"strconv"
)
const defaultIntegerBase = 10
// Signed integers
// ParseInt interprets a string s in 10-base and returns the corresponding value i (int) and error.
func ParseInt(s string) (int, error) {
if valueInt64, err := strconv.ParseInt(s, defaultIntegerBase, mathhelper.IntBits); err == nil {
return int(valueInt64), nil
} else {
return 0, err
}
}
// ParseInt8 interprets a string s in 10-base and returns the corresponding value i (int8) and error.
func ParseInt8(stringValue string) (int8, error) {
if valueInt64, err := strconv.ParseInt(stringValue, defaultIntegerBase, 8); err == nil {
return int8(valueInt64), nil
} else {
return 0, err
}
}
// ParseInt16 interprets a string s in 10-base and returns the corresponding value i (int16) and error.
func ParseInt16(stringValue string) (int16, error) {
if valueInt64, err := strconv.ParseInt(stringValue, defaultIntegerBase, 16); err == nil {
return int16(valueInt64), nil
} else {
return 0, err
}
}
// ParseInt32 interprets a string s in 10-base and returns the corresponding value i (int32) and error.
func ParseInt32(stringValue string) (int32, error) {
if valueInt64, err := strconv.ParseInt(stringValue, defaultIntegerBase, 32); err == nil {
return int32(valueInt64), nil
} else {
return 0, err
}
}
// ParseInt64 interprets a string s in 10-base and returns the corresponding value i (int64) and error.
func ParseInt64(stringValue string) (int64, error) {
return strconv.ParseInt(stringValue, defaultIntegerBase, 64)
}
// Unsigned integers
// ParseUint interprets a string s in 10-base and returns the corresponding value i (uint) and error.
func ParseUint(stringValue string) (uint, error) {
if valueUint64, err := strconv.ParseUint(stringValue, defaultIntegerBase, mathhelper.UintBits); err == nil {
return uint(valueUint64), nil
} else {
return 0, err
}
}
// ParseUint8 interprets a string s in 10-base and returns the corresponding value i (uint8) and error.
func ParseUint8(stringValue string) (uint8, error) {
if valueUint64, err := strconv.ParseUint(stringValue, defaultIntegerBase, 8); err == nil {
return uint8(valueUint64), nil
} else {
return 0, err
}
}
// ParseUint16 interprets a string s in 10-base and returns the corresponding value i (uint16) and error.
func ParseUint16(stringValue string) (uint16, error) {
if valueUint64, err := strconv.ParseUint(stringValue, defaultIntegerBase, 16); err == nil {
return uint16(valueUint64), nil
} else {
return 0, err
}
}
// ParseUint32 interprets a string s in 10-base and returns the corresponding value i (uint32) and error.
func ParseUint32(stringValue string) (uint32, error) {
if valueUint64, err := strconv.ParseUint(stringValue, defaultIntegerBase, 32); err == nil {
return uint32(valueUint64), nil
} else {
return 0, err
}
}
// ParseUint64 interprets a string s in 10-base and returns the corresponding value i (uint64) and error.
func ParseUint64(stringValue string) (uint64, error) {
return strconv.ParseUint(stringValue, defaultIntegerBase, 64)
} | parseint.go | 0.78535 | 0.473109 | parseint.go | starcoder |
package evaluation
import (
"fmt"
)
// Number type for clause attribute evaluation
type Number float64
// NewNumber creates a Number instance with the object value
func NewNumber(value interface{}) (Number, error) {
num, ok := value.(float64)
if ok {
newNumber := Number(num)
return newNumber, nil
}
return 0, fmt.Errorf("%v: cant cast to a number", ErrWrongTypeAssertion)
}
// numberOperator takes the first element from the slice, converts it to a float64 and passes to fn for processing.
// we ignore any additional elements if they exist.
func numberOperator(values []interface{}, fn func(float64) bool) bool {
if len(values) > 0 {
for _, val := range values {
data, ok := val.(float32)
if !ok {
data, ok := val.(float64)
if !ok {
continue
}
if fn(data) {
return true
}
} else {
if fn(float64(data)) {
return true
}
}
}
log.Errorf("input contains invalid value for number comparisons: %s\n", values)
}
return false
}
// StartsWith always return false
func (n Number) StartsWith(values []interface{}) bool {
return false
}
// EndsWith always return false
func (n Number) EndsWith(values []interface{}) bool {
return false
}
// Match always return false
func (n Number) Match(values []interface{}) bool {
return false
}
// Contains always return false
func (n Number) Contains([]interface{}) bool {
return false
}
// EqualSensitive always return false
func (n Number) EqualSensitive(values []interface{}) bool {
return false
}
// Equal check if the number and value are equal
func (n Number) Equal(values []interface{}) bool {
return numberOperator(values, func(f float64) bool {
return float64(n) == f
})
}
// GreaterThan checks if the number is greater than the value
func (n Number) GreaterThan(values []interface{}) bool {
return numberOperator(values, func(f float64) bool {
return float64(n) > f
})
}
// GreaterThanEqual checks if the number is greater or equal than the value
func (n Number) GreaterThanEqual(values []interface{}) bool {
return numberOperator(values, func(f float64) bool {
return float64(n) >= f
})
}
// LessThan checks if the number is less than the value
func (n Number) LessThan(values []interface{}) bool {
return numberOperator(values, func(f float64) bool {
return float64(n) < f
})
}
// LessThanEqual checks if the number is less or equal than the value
func (n Number) LessThanEqual(values []interface{}) bool {
return numberOperator(values, func(f float64) bool {
return float64(n) <= f
})
}
// In checks if the number exist in slice of numbers (value)
func (n Number) In(values []interface{}) bool {
return n.Equal(values)
} | number.go | 0.794026 | 0.429549 | number.go | starcoder |
package datadog
import (
"encoding/json"
"time"
)
// UsageNetworkFlowsHour Number of netflow events indexed for each hour for a given organization.
type UsageNetworkFlowsHour struct {
// The hour for the usage.
Hour *time.Time `json:"hour,omitempty"`
// Contains the number of netflow events indexed.
IndexedEventCount *int64 `json:"indexed_event_count,omitempty"`
}
// NewUsageNetworkFlowsHour instantiates a new UsageNetworkFlowsHour object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewUsageNetworkFlowsHour() *UsageNetworkFlowsHour {
this := UsageNetworkFlowsHour{}
return &this
}
// NewUsageNetworkFlowsHourWithDefaults instantiates a new UsageNetworkFlowsHour object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewUsageNetworkFlowsHourWithDefaults() *UsageNetworkFlowsHour {
this := UsageNetworkFlowsHour{}
return &this
}
// GetHour returns the Hour field value if set, zero value otherwise.
func (o *UsageNetworkFlowsHour) GetHour() time.Time {
if o == nil || o.Hour == nil {
var ret time.Time
return ret
}
return *o.Hour
}
// GetHourOk returns a tuple with the Hour field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageNetworkFlowsHour) GetHourOk() (*time.Time, bool) {
if o == nil || o.Hour == nil {
return nil, false
}
return o.Hour, true
}
// HasHour returns a boolean if a field has been set.
func (o *UsageNetworkFlowsHour) HasHour() bool {
if o != nil && o.Hour != nil {
return true
}
return false
}
// SetHour gets a reference to the given time.Time and assigns it to the Hour field.
func (o *UsageNetworkFlowsHour) SetHour(v time.Time) {
o.Hour = &v
}
// GetIndexedEventCount returns the IndexedEventCount field value if set, zero value otherwise.
func (o *UsageNetworkFlowsHour) GetIndexedEventCount() int64 {
if o == nil || o.IndexedEventCount == nil {
var ret int64
return ret
}
return *o.IndexedEventCount
}
// GetIndexedEventCountOk returns a tuple with the IndexedEventCount field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageNetworkFlowsHour) GetIndexedEventCountOk() (*int64, bool) {
if o == nil || o.IndexedEventCount == nil {
return nil, false
}
return o.IndexedEventCount, true
}
// HasIndexedEventCount returns a boolean if a field has been set.
func (o *UsageNetworkFlowsHour) HasIndexedEventCount() bool {
if o != nil && o.IndexedEventCount != nil {
return true
}
return false
}
// SetIndexedEventCount gets a reference to the given int64 and assigns it to the IndexedEventCount field.
func (o *UsageNetworkFlowsHour) SetIndexedEventCount(v int64) {
o.IndexedEventCount = &v
}
func (o UsageNetworkFlowsHour) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Hour != nil {
toSerialize["hour"] = o.Hour
}
if o.IndexedEventCount != nil {
toSerialize["indexed_event_count"] = o.IndexedEventCount
}
return json.Marshal(toSerialize)
}
type NullableUsageNetworkFlowsHour struct {
value *UsageNetworkFlowsHour
isSet bool
}
func (v NullableUsageNetworkFlowsHour) Get() *UsageNetworkFlowsHour {
return v.value
}
func (v *NullableUsageNetworkFlowsHour) Set(val *UsageNetworkFlowsHour) {
v.value = val
v.isSet = true
}
func (v NullableUsageNetworkFlowsHour) IsSet() bool {
return v.isSet
}
func (v *NullableUsageNetworkFlowsHour) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableUsageNetworkFlowsHour(val *UsageNetworkFlowsHour) *NullableUsageNetworkFlowsHour {
return &NullableUsageNetworkFlowsHour{value: val, isSet: true}
}
func (v NullableUsageNetworkFlowsHour) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableUsageNetworkFlowsHour) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | api/v1/datadog/model_usage_network_flows_hour.go | 0.720565 | 0.477493 | model_usage_network_flows_hour.go | starcoder |
package mft
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/t9t/gomft/binutil"
"github.com/t9t/gomft/fragment"
"github.com/t9t/gomft/utf16"
)
var (
fileSignature = []byte{0x46, 0x49, 0x4c, 0x45}
)
const maxInt = int64(^uint(0) >> 1)
// A Record represents an MFT entry, excluding all technical data (such as "offset to first attribute"). The Attributes
// list only contains the attribute headers and raw data; the attribute data has to be parsed separately. When this is a
// base record, the BaseRecordReference will be zero. When it is an extension record, the BaseRecordReference points to
// the record's base record.
type Record struct {
Signature []byte
FileReference FileReference
BaseRecordReference FileReference
LogFileSequenceNumber uint64
HardLinkCount int
Flags RecordFlag
ActualSize uint32
AllocatedSize uint32
NextAttributeId int
Attributes []Attribute
}
// ParseRecord parses bytes into a Record after applying fixup. The data is assumed to be in Little Endian order. Only
// the attribute headers are parsed, not the actual attribute data.
func ParseRecord(b []byte) (Record, error) {
if len(b) < 42 {
return Record{}, fmt.Errorf("record data length should be at least 42 but is %d", len(b))
}
sig := b[:4]
if bytes.Compare(sig, fileSignature) != 0 {
return Record{}, fmt.Errorf("unknown record signature: %# x", sig)
}
b = binutil.Duplicate(b)
r := binutil.NewLittleEndianReader(b)
baseRecordRef, err := ParseFileReference(r.Read(0x20, 8))
if err != nil {
return Record{}, fmt.Errorf("unable to parse base record reference: %v", err)
}
firstAttributeOffset := int(r.Uint16(0x14))
if firstAttributeOffset < 0 || firstAttributeOffset >= len(b) {
return Record{}, fmt.Errorf("invalid first attribute offset %d (data length: %d)", firstAttributeOffset, len(b))
}
updateSequenceOffset := int(r.Uint16(0x04))
updateSequenceSize := int(r.Uint16(0x06))
b, err = applyFixUp(b, updateSequenceOffset, updateSequenceSize)
if err != nil {
return Record{}, fmt.Errorf("unable to apply fixup: %v", err)
}
attributes, err := ParseAttributes(b[firstAttributeOffset:])
if err != nil {
return Record{}, err
}
return Record{
Signature: binutil.Duplicate(sig),
FileReference: FileReference{RecordNumber: uint64(r.Uint32(0x2C)), SequenceNumber: r.Uint16(0x10)},
BaseRecordReference: baseRecordRef,
LogFileSequenceNumber: r.Uint64(0x08),
HardLinkCount: int(r.Uint16(0x12)),
Flags: RecordFlag(r.Uint16(0x16)),
ActualSize: r.Uint32(0x18),
AllocatedSize: r.Uint32(0x1C),
NextAttributeId: int(r.Uint16(0x28)),
Attributes: attributes,
}, nil
}
// A FileReference represents a reference to an MFT record. Since the FileReference in a Record is only 4 bytes, the
// RecordNumber will probably not exceed 32 bits.
type FileReference struct {
RecordNumber uint64
SequenceNumber uint16
}
// ParseFileReference parses a Little Endian ordered 8-byte slice into a FileReference. The first 6 bytes indicate the
// record number, while the final 2 bytes indicate the sequence number.
func ParseFileReference(b []byte) (FileReference, error) {
if len(b) != 8 {
return FileReference{}, fmt.Errorf("expected 8 bytes but got %d", len(b))
}
return FileReference{
RecordNumber: binary.LittleEndian.Uint64(padTo(b[:6], 8)),
SequenceNumber: binary.LittleEndian.Uint16(b[6:]),
}, nil
}
// RecordFlag represents a bit mask flag indicating the status of the MFT record.
type RecordFlag uint16
// Bit values for the RecordFlag. For example, an in-use directory has value 0x0003.
const (
RecordFlagInUse RecordFlag = 0x0001
RecordFlagIsDirectory RecordFlag = 0x0002
RecordFlagInExtend RecordFlag = 0x0004
RecordFlagIsIndex RecordFlag = 0x0008
)
// Is checks if this RecordFlag's bit mask contains the specified flag.
func (f *RecordFlag) Is(c RecordFlag) bool {
return *f&c == c
}
func applyFixUp(b []byte, offset int, length int) ([]byte, error) {
r := binutil.NewLittleEndianReader(b)
updateSequence := r.Read(offset, length*2) // length is in pairs, not bytes
updateSequenceNumber := updateSequence[:2]
updateSequenceArray := updateSequence[2:]
sectorCount := len(updateSequenceArray) / 2
sectorSize := len(b) / sectorCount
for i := 1; i <= sectorCount; i++ {
offset := sectorSize*i - 2
if bytes.Compare(updateSequenceNumber, b[offset:offset+2]) != 0 {
return nil, fmt.Errorf("update sequence mismatch at pos %d", offset)
}
}
for i := 0; i < sectorCount; i++ {
offset := sectorSize*(i+1) - 2
num := i * 2
copy(b[offset:offset+2], updateSequenceArray[num:num+2])
}
return b, nil
}
// FindAttributes returns all attributes of the specified type contained in this record. When no matches are found an
// empty slice is returned.
func (r *Record) FindAttributes(attrType AttributeType) []Attribute {
ret := make([]Attribute, 0)
for _, a := range r.Attributes {
if a.Type == attrType {
ret = append(ret, a)
}
}
return ret
}
// Attribute represents an MFT record attribute header and its corresponding raw attribute Data (excluding header data).
// When the attribute is Resident, the Data contains the actual attribute's data. When the attribute is non-resident,
// the Data contains DataRuns pointing to the actual data. DataRun data can be parsed using ParseDataRuns().
type Attribute struct {
Type AttributeType
Resident bool
Name string
Flags AttributeFlags
AttributeId int
AllocatedSize uint64
ActualSize uint64
Data []byte
}
// AttributeType represents the type of an Attribute. Use Name() to get the attribute type's name.
type AttributeType uint32
// Known values for AttributeType. Note that other values might occur too.
const (
AttributeTypeStandardInformation AttributeType = 0x10 // $STANDARD_INFORMATION; always resident
AttributeTypeAttributeList AttributeType = 0x20 // $ATTRIBUTE_LIST; mixed residency
AttributeTypeFileName AttributeType = 0x30 // $FILE_NAME; always resident
AttributeTypeObjectId AttributeType = 0x40 // $OBJECT_ID; always resident
AttributeTypeSecurityDescriptor AttributeType = 0x50 // $SECURITY_DESCRIPTOR; always resident?
AttributeTypeVolumeName AttributeType = 0x60 // $VOLUME_NAME; always resident?
AttributeTypeVolumeInformation AttributeType = 0x70 // $VOLUME_INFORMATION; never resident?
AttributeTypeData AttributeType = 0x80 // $DATA; mixed residency
AttributeTypeIndexRoot AttributeType = 0x90 // $INDEX_ROOT; always resident
AttributeTypeIndexAllocation AttributeType = 0xa0 // $INDEX_ALLOCATION; never resident?
AttributeTypeBitmap AttributeType = 0xb0 // $BITMAP; nearly always resident?
AttributeTypeReparsePoint AttributeType = 0xc0 // $REPARSE_POINT; always resident?
AttributeTypeEAInformation AttributeType = 0xd0 // $EA_INFORMATION; always resident
AttributeTypeEA AttributeType = 0xe0 // $EA; nearly always resident?
AttributeTypePropertySet AttributeType = 0xf0 // $PROPERTY_SET
AttributeTypeLoggedUtilityStream AttributeType = 0x100 // $LOGGED_UTILITY_STREAM; always resident
AttributeTypeTerminator AttributeType = 0xFFFFFFFF // Indicates the last attribute in a list; will not actually be returned by ParseAttributes
)
// AttributeFlags represents a bit mask flag indicating various properties of an attribute's data.
type AttributeFlags uint16
// Bit values for the AttributeFlags. For example, an encrypted, compressed attribute has value 0x4001.
const (
AttributeFlagsCompressed AttributeFlags = 0x0001
AttributeFlagsEncrypted AttributeFlags = 0x4000
AttributeFlagsSparse AttributeFlags = 0x8000
)
// Is checks if this AttributeFlags's bit mask contains the specified flag.
func (f *AttributeFlags) Is(c AttributeFlags) bool {
return *f&c == c
}
// ParseAttributes parses bytes into Attributes. The data is assumed to be in Little Endian order. Only the attribute
// headers are parsed, not the actual attribute data.
func ParseAttributes(b []byte) ([]Attribute, error) {
if len(b) == 0 {
return []Attribute{}, nil
}
attributes := make([]Attribute, 0)
for len(b) > 0 {
if len(b) < 4 {
return nil, fmt.Errorf("attribute header data should be at least 4 bytes but is %d", len(b))
}
r := binutil.NewLittleEndianReader(b)
attrType := r.Uint32(0)
if attrType == uint32(AttributeTypeTerminator) {
break
}
if len(b) < 8 {
return nil, fmt.Errorf("cannot read attribute header record length, data should be at least 8 bytes but is %d", len(b))
}
uRecordLength := r.Uint32(0x04)
if int64(uRecordLength) > maxInt {
return nil, fmt.Errorf("record length %d overflows maximum int value %d", uRecordLength, maxInt)
}
recordLength := int(uRecordLength)
if recordLength <= 0 {
return nil, fmt.Errorf("cannot handle attribute with zero or negative record length %d", recordLength)
}
if recordLength > len(b) {
return nil, fmt.Errorf("attribute record length %d exceeds data length %d", recordLength, len(b))
}
recordData := r.Read(0, recordLength)
attribute, err := ParseAttribute(recordData)
if err != nil {
return nil, err
}
attributes = append(attributes, attribute)
b = r.ReadFrom(recordLength)
}
return attributes, nil
}
// ParseAttribute parses bytes into an Attribute. The data is assumed to be in Little Endian order. Only the attribute
// headers are parsed, not the actual attribute data.
func ParseAttribute(b []byte) (Attribute, error) {
if len(b) < 22 {
return Attribute{}, fmt.Errorf("attribute data should be at least 22 bytes but is %d", len(b))
}
r := binutil.NewLittleEndianReader(b)
nameLength := r.Byte(0x09)
nameOffset := r.Uint16(0x0A)
name := ""
if nameLength != 0 {
nameBytes := r.Read(int(nameOffset), int(nameLength)*2)
name = utf16.DecodeString(nameBytes, binary.LittleEndian)
}
resident := r.Byte(0x08) == 0x00
var attributeData []byte
actualSize := uint64(0)
allocatedSize := uint64(0)
if resident {
dataOffset := int(r.Uint16(0x14))
uDataLength := r.Uint32(0x10)
if int64(uDataLength) > maxInt {
return Attribute{}, fmt.Errorf("attribute data length %d overflows maximum int value %d", uDataLength, maxInt)
}
dataLength := int(uDataLength)
expectedDataLength := dataOffset + dataLength
if len(b) < expectedDataLength {
return Attribute{}, fmt.Errorf("expected attribute data length to be at least %d but is %d", expectedDataLength, len(b))
}
attributeData = r.Read(dataOffset, dataLength)
} else {
dataOffset := int(r.Uint16(0x20))
if len(b) < dataOffset {
return Attribute{}, fmt.Errorf("expected attribute data length to be at least %d but is %d", dataOffset, len(b))
}
allocatedSize = r.Uint64(0x28)
actualSize = r.Uint64(0x30)
attributeData = r.ReadFrom(int(dataOffset))
}
return Attribute{
Type: AttributeType(r.Uint32(0)),
Resident: resident,
Name: name,
Flags: AttributeFlags(r.Uint16(0x0C)),
AttributeId: int(r.Uint16(0x0E)),
AllocatedSize: allocatedSize,
ActualSize: actualSize,
Data: binutil.Duplicate(attributeData),
}, nil
}
// A DataRun represents a fragment of data somewhere on a volume. The OffsetCluster, which can be negative, is relative
// to a previous DataRun's offset. The OffsetCluster of the first DataRun in a list is relative to the beginning of the
// volume.
type DataRun struct {
OffsetCluster int64
LengthInClusters uint64
}
// ParseDataRuns parses bytes into a list of DataRuns. Each DataRun's OffsetCluster is relative to the DataRun before
// it. The first element's OffsetCluster is relative to the beginning of the volume.
func ParseDataRuns(b []byte) ([]DataRun, error) {
if len(b) == 0 {
return []DataRun{}, nil
}
runs := make([]DataRun, 0)
for len(b) > 0 {
r := binutil.NewLittleEndianReader(b)
header := r.Byte(0)
if header == 0 {
break
}
lengthLength := int(header &^ 0xF0)
offsetLength := int(header >> 4)
dataRunDataLength := offsetLength + lengthLength
headerAndDataLength := dataRunDataLength + 1
if len(b) < headerAndDataLength {
return nil, fmt.Errorf("expected at least %d bytes of datarun data but is %d", headerAndDataLength, len(b))
}
dataRunData := r.Reader(1, dataRunDataLength)
lengthBytes := dataRunData.Read(0, lengthLength)
dataLength := binary.LittleEndian.Uint64(padTo(lengthBytes, 8))
offsetBytes := dataRunData.Read(lengthLength, offsetLength)
dataOffset := int64(binary.LittleEndian.Uint64(padTo(offsetBytes, 8)))
runs = append(runs, DataRun{OffsetCluster: dataOffset, LengthInClusters: dataLength})
b = r.ReadFrom(headerAndDataLength)
}
return runs, nil
}
// DataRunsToFragments transform a list of DataRuns with relative offsets and lengths specified in cluster into a list
// of fragment.Fragment elements with absolute offsets and lengths specified in bytes (for example for use in a
// fragment.Reader). Note that data will probably not align to a cluster exactly so there could be some padding at the
// end. It is up to the user of the Fragments to limit reads to actual data size (eg. by using an io.LimitedReader or
// modifying the last element in the list to limit its length).
func DataRunsToFragments(runs []DataRun, bytesPerCluster int) []fragment.Fragment {
frags := make([]fragment.Fragment, len(runs))
previousOffsetCluster := int64(0)
for i, run := range runs {
exactClusterOffset := previousOffsetCluster + run.OffsetCluster
frags[i] = fragment.Fragment{
Offset: exactClusterOffset * int64(bytesPerCluster),
Length: int64(run.LengthInClusters) * int64(bytesPerCluster),
}
previousOffsetCluster = exactClusterOffset
}
return frags
}
func padTo(data []byte, length int) []byte {
if len(data) > length {
return data
}
if len(data) == length {
return data
}
result := make([]byte, length)
if len(data) == 0 {
return result
}
copy(result, data)
if data[len(data)-1]&0b10000000 == 0b10000000 {
for i := len(data); i < length; i++ {
result[i] = 0xFF
}
}
return result
}
// Name returns a string representation of the attribute type. For example "$STANDARD_INFORMATION" or "$FILE_NAME". For
// anyte attribute type which is unknown, Name will return "unknown".
func (at AttributeType) Name() string {
switch at {
case AttributeTypeStandardInformation:
return "$STANDARD_INFORMATION"
case AttributeTypeAttributeList:
return "$ATTRIBUTE_LIST"
case AttributeTypeFileName:
return "$FILE_NAME"
case AttributeTypeObjectId:
return "$OBJECT_ID"
case AttributeTypeSecurityDescriptor:
return "$SECURITY_DESCRIPTOR"
case AttributeTypeVolumeName:
return "$VOLUME_NAME"
case AttributeTypeVolumeInformation:
return "$VOLUME_INFORMATION"
case AttributeTypeData:
return "$DATA"
case AttributeTypeIndexRoot:
return "$INDEX_ROOT"
case AttributeTypeIndexAllocation:
return "$INDEX_ALLOCATION"
case AttributeTypeBitmap:
return "$BITMAP"
case AttributeTypeReparsePoint:
return "$REPARSE_POINT"
case AttributeTypeEAInformation:
return "$EA_INFORMATION"
case AttributeTypeEA:
return "$EA"
case AttributeTypePropertySet:
return "$PROPERTY_SET"
case AttributeTypeLoggedUtilityStream:
return "$LOGGED_UTILITY_STREAM"
}
return "unknown"
} | mft/mft.go | 0.669529 | 0.434461 | mft.go | starcoder |
package dataframe
import "golang.org/x/text/encoding"
// DataBuilder is a helper structure to build dataframes.
// Use dataframe.DataBuilder{RawData: dataframe.EmptyRawData()} to initialize it
type DataBuilder struct {
RawData *RawData
}
// AddFloats adds a list of floats to the given float column.
// It returns a shallow copy of itself.
func (builder DataBuilder) AddFloats(col string, values ...float64) DataBuilder {
builder.RawData.floats[col] = append(builder.RawData.floats[col], values...)
return builder
}
// AddFloats adds a list of bools to the given boolean column.
// It returns a shallow copy of itself.
func (builder DataBuilder) AddBools(col string, values ...bool) DataBuilder {
builder.RawData.bools[col] = append(builder.RawData.bools[col], values...)
return builder
}
// AddInts adds a list of ints to the given int column.
// It returns a shallow copy of itself.
func (builder DataBuilder) AddInts(col string, values ...int) DataBuilder {
builder.RawData.ints[col] = append(builder.RawData.ints[col], values...)
return builder
}
// AddObjects adds a list of objects to the given object column.
// It returns a shallow copy of itself.
// You can use this function to add strings too.
func (builder DataBuilder) AddObjects(col string, values ...interface{}) DataBuilder {
builder.RawData.objects[col] = append(builder.RawData.objects[col], values...)
return builder
}
// MarkAsString tags a given object column as a string-only column.
// This gives access to functionalities that generic object columns don't have.
func (builder DataBuilder) MarkAsString(col string) DataBuilder {
builder.RawData.stringHeader.add(col)
return builder
}
// AddStrings adds a list of strings to the given object column.
// It returns a shallow copy of itself.
// If you need to add nils (= missing value), use AddObjects(col, ...)
// followded by MarkAsString(col).
func (builder DataBuilder) AddStrings(col string, values ...string) DataBuilder {
interfaces := make([]interface{}, len(values))
for i, v := range values {
interfaces[i] = v
}
builder.AddObjects(col, interfaces...)
builder.MarkAsString(col)
return builder
}
// SetFloats adds or replaces the values of the given float column.
// Values are not copied, so if you change them it will change them everywhere.
// It returns a shallow copy of itself.
func (builder DataBuilder) SetFloats(col string, values []float64) DataBuilder {
builder.RawData.floats[col] = values
return builder
}
// SetBools adds or replaces the values of the given boolean column.
// Values are not copied, so if you change them it will change them everywhere.
// It returns a shallow copy of itself.
func (builder DataBuilder) SetBools(col string, values []bool) DataBuilder {
builder.RawData.bools[col] = values
return builder
}
// SetInts adds or replaces the values of the given integer column.
// Values are not copied, so if you change them it will change them everywhere.
// It returns a shallow copy of itself.
func (builder DataBuilder) SetInts(col string, values []int) DataBuilder {
builder.RawData.ints[col] = values
return builder
}
// SetObjects adds or replaces the values of the given object column.
// Values are not copied, so if you change them it will change them everywhere.
// It returns a shallow copy of itself.
// If you want to set a slice of strings, you'll need to convert the slice to a
// slice of interfaces and call MarkAsString(col).
func (builder DataBuilder) SetObjects(col string, values []interface{}) DataBuilder {
builder.RawData.objects[col] = values
return builder
}
// TextEncoding informs ml-essential that the strings that you have provided
// are encoded in the given encoding.
// If this function is never called or if nil is passed as argument, it will be
// assumed that all the strings are utf8-encoded.
// Even if the strings are not utf8-encoded, it is not mandatory to call this
// function since encoding is rarely ever used by ml-essentials.
// TextEncoding returns a shallow copy of itself.
func (builder DataBuilder) TextEncoding(encoding encoding.Encoding) DataBuilder {
builder.RawData.textEncoding = encoding
return builder
}
// ToDataFrame() creates a dataframe out of the RawData object.
// It will panic if the columns are of different size.
// The returned dataframe shares its data and structure with the encapsulated
// rawdata.
func (builder DataBuilder) ToDataFrame() *DataFrame {
return builder.RawData.ToDataFrame()
}
/// THIS IS ONLY FOR TESTING
func fillBlanks(builder DataBuilder) *DataFrame {
data := builder.RawData
rows := data.NumAllocatedRows()
if len(data.objects) == data.stringHeader.Num() {
arr := make([]interface{}, rows)
for i := range arr {
if i % 2 == 0 {
arr[i] = make([]bool, 1)
}
}
builder.AddObjects("SomeObjects", arr...)
}
if data.stringHeader.Num() == 0 {
arr := make([]string, rows)
for i := range arr {
if i % 2 == 0 {
arr[i] = "one"
} else {
arr[i] = "two"
}
}
builder.AddStrings("SomeStrings", arr...)
}
if len(data.ints) == 0 {
arr := make([]int, rows)
for i := range arr {
arr[i] = i
}
builder.AddInts("SomeInts", arr...)
}
if len(data.bools) == 0 {
arr := make([]bool, rows)
for i := range arr {
arr[i] = i % 2 == 0
}
builder.AddBools("SomeBools", arr...)
}
if len(data.floats) == 0 {
arr := make([]float64, rows)
for i := range arr {
arr[i] = float64(i) / float64(rows)
}
builder.AddFloats("SomeFloats", arr...)
}
return data.ToDataFrame()
} | dataframe/builder.go | 0.835013 | 0.710314 | builder.go | starcoder |
package liblsdj
import (
"fmt"
)
const (
instrumentCount = 0x40 //! The amount of instruments in a song
instrumentByteCount = 16 //! The amount of bytes an instrument takes
instrumentNameLength = 5 //! The amount of bytes an instrument name takes
instrumentPulseLengthInfinite = 0x40 //! The value of an infinite pulse length
instrumentKitLengthAuto = 0x0 //! The value of a InstrumentKit length set to AUTO
instrumentNoiseLengthInfinite = 0x40 //! The value of an infinite noise length
)
type InstrumentParams [instrumentCount * instrumentByteCount]byte
type InstrumentNames [instrumentCount][instrumentNameLength]byte
type Instrument struct {
Name [instrumentNameLength]byte
Params [instrumentByteCount]byte
}
func setInstruments(names, params []byte) ([]Instrument, error) {
if len(names) != instrumentCount*instrumentNameLength {
return nil, fmt.Errorf("unexpected instruments name length: %v, %v", len(names), instrumentCount*instrumentNameLength)
} else if len(params) != instrumentCount*instrumentByteCount {
return nil, fmt.Errorf("unexpected instruments name length: %v, %v", len(params), instrumentCount*instrumentByteCount)
}
in := make([]Instrument, instrumentCount)
for i := 0; i < len(names)/instrumentNameLength; i++ {
copy(in[i].Name[:], names[instrumentNameLength*i:instrumentNameLength*(i+1)])
}
for i := 0; i < len(params)/instrumentByteCount; i++ {
copy(in[i].Params[:], params[instrumentByteCount*i:instrumentByteCount*(i+1)])
}
return in, nil
}
//! The kind of instrument types that exist
const (
instrumentTypePulse = iota
instrumentTypeWave
instrumentTypeKit
instrumentTypeNoise
)
const (
instrumentTablePlay = iota
instrumentTableStep
)
const (
instrumentWaveVolume0 = 0x00
instrumentWaveVolume1 = 0x60
instrumentWaveVolume2 = 0x40
instrumentWaveVolume3 = 0xA8
)
const (
instrumentPulseWidth125 = iota
instrumentPulseWidth25
instrumentPulseWidth50
instrumentPulseWidth75
)
const (
instrumentVibratoTriangle = iota
instrumentVibratoSawtooth
instrumentVibratoSquare
)
const (
instrumentVibratoDown = iota
instrumentVibratoUp
)
const (
instrumentPlvFast = iota
instrumentPlvTick
instrumentPlvStep
instrumentPlvDrum
)
const (
instrumentWavePlayOnce = iota
instrumentWavePlayLoop
instrumentWavePlayPingPong
instrumentWavePlayManual
)
const (
instrumentKitLoopOff = iota
instrumentKitLoopOn
instrumentKitLoopAttack
)
const (
instrumentKitDistortionClip = iota
instrumentKitDistortionShape
instrumentKitDistortionShape2
instrumentKitDistortionWrap
)
const (
instrumentNoiseFree = iota
instrumentNoiseStable
) | instrument.go | 0.600774 | 0.572603 | instrument.go | starcoder |
package ssot
import (
"crypto/ed25519"
"encoding/binary"
"github.com/frankbraun/codechain/util/base64"
"github.com/frankbraun/codechain/util/hex"
)
// SignedHeadV2 is a signed Codechain head ready for publication as a SSOT with
// DNS TXT records (version 2).
type SignedHeadV2 struct {
version uint8 // the version of the signed head
pubKey [32]byte // Ed25519 public key of SSOT head signer
pubKeyRotate [32]byte // Ed25519 pubkey to rotate to, all 0 if unused
validFrom int64 // this signed head is valid from the given Unix time
validTo int64 // this signed head is valid to the given Unix time
counter uint64 // signature counter
head [32]byte // the Codechain head to sign
line uint32 // the last signed line number
signature [64]byte // signature with pubkey over all previous fields
}
// marshal signed head without signature.
func (sh *SignedHeadV2) marshal() [125]byte {
var m [125]byte
var b [8]byte
var l [4]byte
m[0] = sh.version
copy(m[1:33], sh.pubKey[:])
copy(m[33:65], sh.pubKeyRotate[:])
binary.BigEndian.PutUint64(b[:], uint64(sh.validFrom))
copy(m[65:73], b[:])
binary.BigEndian.PutUint64(b[:], uint64(sh.validTo))
copy(m[73:81], b[:])
binary.BigEndian.PutUint64(b[:], sh.counter)
copy(m[81:89], b[:])
copy(m[89:121], sh.head[:])
binary.BigEndian.PutUint32(l[:], sh.line)
copy(m[121:125], l[:])
return m
}
// Marshal signed head with signature and encode it as base64.
func (sh *SignedHeadV2) Marshal() string {
var m [189]byte
b := sh.marshal()
copy(m[:125], b[:])
copy(m[125:189], sh.signature[:])
return base64.Encode(m[:])
}
func unmarshalV2(signedHead string) (*SignedHeadV2, error) {
m, err := base64.Decode(signedHead, 189)
if err != nil {
return nil, err
}
var sh SignedHeadV2
sh.version = m[0]
copy(sh.pubKey[:], m[1:33])
copy(sh.pubKeyRotate[:], m[33:65])
sh.validFrom = int64(binary.BigEndian.Uint64(m[65:73]))
sh.validTo = int64(binary.BigEndian.Uint64(m[73:81]))
sh.counter = binary.BigEndian.Uint64(m[81:89])
copy(sh.head[:], m[89:121])
sh.line = binary.BigEndian.Uint32(m[121:125])
copy(sh.signature[:], m[125:189])
msg := sh.marshal()
if !ed25519.Verify(sh.pubKey[:], msg[:], sh.signature[:]) {
return nil, ErrSignedHeadSignature
}
return &sh, nil
}
// Version returns the version.
func (sh *SignedHeadV2) Version() int {
return int(sh.version)
}
// Head returns the signed head.
func (sh *SignedHeadV2) Head() string {
return hex.Encode(sh.head[:])
}
// PubKey returns the public key in base64 notation.
func (sh *SignedHeadV2) PubKey() string {
return base64.Encode(sh.pubKey[:])
}
// PubKeyRotate returns the public key rotate in base64 notation.
func (sh *SignedHeadV2) PubKeyRotate() string {
return base64.Encode(sh.pubKeyRotate[:])
}
// ValidFrom returns the valid from field of signed head.
func (sh *SignedHeadV2) ValidFrom() int64 {
return sh.validFrom
}
// ValidTo returns the valid to field of signed head.
func (sh *SignedHeadV2) ValidTo() int64 {
return sh.validTo
}
// Counter returns the counter of signed head.
func (sh *SignedHeadV2) Counter() uint64 {
return sh.counter
}
// Line returns the last signed line number of signed head.
func (sh *SignedHeadV2) Line() int {
return int(sh.line)
}
// Signature returns the base64-encoded signature of the signed head.
func (sh *SignedHeadV2) Signature() string {
return base64.Encode(sh.signature[:])
}
// HeadBuf returns the signed head.
func (sh *SignedHeadV2) HeadBuf() [32]byte {
var b [32]byte
copy(b[:], sh.head[:])
return b
} | ssot/ssot_v2.go | 0.713032 | 0.40486 | ssot_v2.go | starcoder |
package timex
import "time"
const (
Day = 24 * time.Hour
Week = 7 * Day
)
var (
DaysMonth = []int{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
DaysMonthLeap = []int{0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
NSec = int(time.Second - time.Nanosecond)
)
func AtBeginningOfDay(target time.Time) time.Time {
return time.Date(target.Year(), target.Month(), target.Day(), 0, 0, 0, 0, target.Location())
}
func AtBeginningOfHour(target time.Time) time.Time {
return time.Date(target.Year(), target.Month(), target.Day(), target.Hour(), 0, 0, 0, target.Location())
}
func AtBeginningOfMinute(target time.Time) time.Time {
return time.Date(target.Year(), target.Month(), target.Day(), target.Hour(), target.Minute(), 0, 0, target.Location())
}
func AtBeginningOfMonth(target time.Time) time.Time {
return time.Date(target.Year(), target.Month(), 1, 0, 0, 0, 0, target.Location())
}
func AtBeginningOfYear(target time.Time) time.Time {
return time.Date(target.Year(), 1, 1, 0, 0, 0, 0, target.Location())
}
func AtBeginningOfQuarter(target time.Time) time.Time {
month := (target.Month()-1/3)*3 + 1
return time.Date(target.Year(), month, 1, 0, 0, 0, 0, target.Location())
}
func AtBeginningOfSemester(target time.Time) time.Time {
month := (target.Month()-1/6)*6 + 1
return time.Date(target.Year(), month, 1, 0, 0, 0, 0, target.Location())
}
func AtBeginningOfWeek(target time.Time) time.Time {
dow := target.Weekday()
if dow == time.Sunday {
return AtBeginningOfDay(target).Add(-6 * Day)
}
if dow == time.Monday {
return AtBeginningOfDay(target)
}
return AtBeginningOfDay(target).AddDate(0, 0, -int(dow-time.Sunday)-1)
}
func AtEndOfHour(target time.Time) time.Time {
y, m := target.Year(), target.Month()
return time.Date(y, m, target.Day(), target.Hour(), 59, 59, NSec, target.Location())
}
func AtEndOfMinute(target time.Time) time.Time {
y, m := target.Year(), target.Month()
return time.Date(y, m, target.Day(), target.Hour(), target.Minute(), 59, NSec, target.Location())
}
func AtEndOfQuarter(target time.Time) time.Time {
y, m := target.Year(), target.Month()
var month time.Month
var day int
if m <= time.June {
month, day = time.June, 30
} else {
month, day = time.December, 31
}
return time.Date(y, month, day, 23, 59, 59, NSec, target.Location())
}
func AtEndOfSemester(target time.Time) time.Time {
y, m := target.Year(), target.Month()
var month time.Month
var day int
if m <= time.March {
month, day = time.March, 31
} else if m <= time.June {
month, day = time.June, 30
} else if m <= time.September {
month, day = time.September, 30
} else {
month, day = time.December, 31
}
return time.Date(y, month, day, 23, 59, 59, NSec, target.Location())
}
func AtEndOfWeek(target time.Time) time.Time {
dow := target.Weekday()
if dow == time.Sunday {
return AtEndOfDay(target)
}
return AtEndOfDay(target).AddDate(0, 0, 7-int(dow-time.Sunday))
}
func AtEndOfDay(target time.Time) time.Time {
y, m := target.Year(), target.Month()
return time.Date(y, m, target.Day(), 23, 59, 59, NSec, target.Location())
}
func AtEndOfMonth(target time.Time) time.Time {
y, m := target.Year(), target.Month()
return time.Date(y, m, daysInMonth(y, int(m)), 23, 59, 59, NSec, target.Location())
}
func AtEndOfYear(target time.Time) time.Time {
return time.Date(target.Year(), 12, 31, 23, 59, 59, NSec, target.Location())
}
func AtMidday(target time.Time) time.Time {
y, m, d := target.Year(), target.Month(), target.Day()
return time.Date(y, m, d, 12, 0, 0, 0, target.Location())
}
func daysInMonth(year, month int) int {
if month < 1 || month > 12 {
panic("invalid month")
}
if year < 1 || year > 99999 {
panic("invalid year")
}
if IsLeapYear(year) {
return DaysMonthLeap[month]
}
return DaysMonth[month]
}
func IsLeapYear(year int) bool {
return (year%4 == 0 && year%100 != 0) || (year%400 == 0)
} | timex.go | 0.706697 | 0.460653 | timex.go | starcoder |
package search
import "strings"
type Node struct {
key string
value int
left *Node
right *Node
}
type BST struct {
root *Node
count int
}
func (n *Node) GetNodeKey() string {
return n.key
}
func NewBST() *BST {
return &BST{nil, 0}
}
func (bst *BST) Insert(key string, value int) {
bst.count++
bst.root = insert(bst.root, key, value)
}
func (bst *BST) Size() int {
return bst.count
}
func insert(root *Node, key string, value int) *Node {
if root == nil {
return &Node{key, value, nil, nil}
}
if strings.Compare(key, root.key) == 0 {
root.value = value
} else if strings.Compare(key, root.key) < 0 {
root.left = insert(root.left, key, value)
} else {
root.right = insert(root.right, key, value)
}
return root
}
func (bst *BST) Search(key string) *int {
return search(bst.root, key)
}
func search(root *Node, key string) *int {
if root == nil {
return nil
}
if strings.Compare(key, root.key) == 0 {
return &root.value
} else if strings.Compare(key, root.key) < 0 {
return search(root.left, key)
} else {
return search(root.right, key)
}
}
func (bst *BST) PreOrder(fn func(n *Node)) {
preOrder(bst.root, fn)
}
func preOrder(root *Node, fn func(n *Node)) {
if root != nil {
fn(root)
preOrder(root.left, fn)
preOrder(root.right, fn)
}
}
func (bst *BST) InOrder(fn func(n *Node)) {
inOrder(bst.root, fn)
}
func inOrder(root *Node, fn func(n *Node)) {
if root != nil {
inOrder(root.left, fn)
fn(root)
inOrder(root.right, fn)
}
}
func (bst *BST) PostOrder(fn func(n *Node)) {
postOrder(bst.root, fn)
}
func postOrder(root *Node, fn func(n *Node)) {
if root != nil {
postOrder(root.left, fn)
postOrder(root.right, fn)
fn(root)
}
}
func (bst *BST) Empty() bool {
return bst.count == 0
}
func (bst *BST) LevelOrder(fn func(n *Node)) {
q := make([](*Node), 0, bst.count)
q = append(q, bst.root)
for len(q) != 0 {
n := q[0]
q = q[1:]
fn(n)
if n.left != nil {
q = append(q, n.left)
}
if n.right != nil {
q = append(q, n.right)
}
}
}
func (bst *BST) Mini() string {
/**
count>0
**/
m := mini(bst.root)
return m.key
}
func mini(root *Node) *Node {
if root.left == nil {
return root
}
return mini(root.left)
}
func (bst *BST) Max() string {
m := max(bst.root)
return m.key
}
func max(root *Node) *Node {
if root.right == nil {
return root
}
return max(root.right)
}
func (bst *BST) RemoveMin() {
bst.count--
bst.root = removeMin(bst.root)
}
func removeMin(root *Node) *Node {
if root.left == nil {
return root.right
}
root.left = removeMin(root.left)
return root
}
func (bst *BST) RemoveMax() {
bst.count--
bst.root = removeMax(bst.root)
}
func removeMax(root *Node) *Node {
if root.right == nil {
return root.left
}
root.right = removeMax(root.right)
return root
}
func (bst *BST) Remove(key string) {
bst.count--
bst.root = remove(bst.root, key)
}
func remove(root *Node, key string) *Node {
if root == nil {
return nil
}
if strings.Compare(key, root.key) < 0 {
root.left = remove(root.left, key)
return root
} else if strings.Compare(root.key, key) < 0 {
root.right = remove(root.right, key)
return root
} else {
if root.left == nil {
return root.right
} else if root.right == nil {
return root.left
}
successor := mini(root.right)
successor.right = removeMin(root.right)
successor.left = root.left
return successor
}
} | binarySearchTree.go | 0.57678 | 0.409634 | binarySearchTree.go | starcoder |
package step
/* In order to make the parser work properly
---in a concurrent fashion we'll need to add
---a skip stack that you'll get back to when
---some of the points are missing
---something like:
-Skip: list of definitions that are impossible to parse
for now
-Missing: list of definitions that are missing
--complexity of map lookups: log(n) `they are based on red black trees`
--complexity of bitarray is o(1) but we require way more way storage space
for big files
--the big problem is the fact that we get threads blocked so if they are
they'll just have to push the blocking definition to the skip/missing stacks.
--we also need to create a thread pool to be able to manage cpu usage.
*/
/* step files seen as state machines:
----parsing could be seen as transitions through a deterministic
----state machine.
----our parser uses this terminology to parse the symbols table in
----a very elegant way.
*/
/* parseFn : implements the transition states for parsing
----components from the table of symbols to create a render
----stack to be excecuted by the converter.
----TODO: make it return a set of parseFn
*/
type parseFn func(string)
/* parseState : describes the current state and the
---transition function
*/
type parseState struct {
fn parseFn // state
key string // transition
}
// Parser : the state machine
type Parser struct {
lexemes SymbolTable
nextStates []parseState
visited map[string]struct{}
}
/* TODO: make it work recursively
---1-march-2021: it works recursively by adding it to the parser's nextStates
-----------------VERTEX_POINT:(save this state)
------------------------------CARTESIAN_POINT
------------------------------recover the state
*/
func (parser *Parser) runInferenceModel(state parseState) {
parser.nextStates = append(parser.nextStates, state)
for len(parser.nextStates) > 0 {
popState := parser.nextStates[0]
if _, found := parser.visited[popState.key]; !found {
popState.fn(popState.key)
parser.visited[popState.key] = struct{}{}
}
parser.nextStates = parser.nextStates[1:]
}
}
//Parse : state machine inference engine to retrieve the
//render stack from the lexer
func Parse(lexemes SymbolTable) {
parser := Parser{lexemes: lexemes}
parser.visited = make(map[string]struct{}, 0)
for key, lexeme := range lexemes {
fn := parser.bridge(lexeme)
ps := parseState{fn: fn, key: key}
parser.runInferenceModel(ps)
}
} | step/parse.go | 0.508544 | 0.575677 | parse.go | starcoder |
package main
import (
"errors"
"log"
"os"
"strings"
)
type Rate struct {
value []bool
}
func main() {
// Read the input file. It contains a list of binary numbers
structuredInput, err := getStructFromInput("day3/input.txt")
if err != nil {
log.Fatal(err)
}
// Part 1: multiply the gamma rate by the epsilon rate
// sumsOfOnes will contain the count of '1' at each index over the whole input
sumsOfOnes := getCountsOfOnes(structuredInput)
// Now, to find Gamma and Epsilon rates, we need to verify whether each value
// in sumsOfOnes is more or less than half the number of inputs
gammaRate := 0
epsilonRate := 0
for index, count := range sumsOfOnes {
if count > (len(structuredInput)/2) {
gammaRate += 1<<(len(sumsOfOnes)-index-1)
} else {
epsilonRate += 1<<(len(sumsOfOnes)-index-1)
}
}
log.Printf("Part 1 - power consumption: %d\n", gammaRate*epsilonRate)
// Part 2: multiply th oxygen generator rating by the CO2 scrubber rating = life support rating
// Calculate oxygen rate
oxygenRate := 0
oRate, err := getRating(structuredInput, true, 0)
if err != nil {
log.Fatal(err)
}
for index, value := range oRate.value {
if value {
oxygenRate += 1 << (len(oRate.value) - index - 1)
}
}
// Calculate CO2 rate
co2Rate := 0
co2RateStruct, err := getRating(structuredInput, false, 0)
if err != nil {
log.Fatal(err)
}
for index, value := range co2RateStruct.value {
if value {
co2Rate += 1 << (len(co2RateStruct.value) - index - 1)
}
}
log.Printf("Part 2 - life support rating: %d\n", oxygenRate * co2Rate)
}
func getStructFromInput(path string) ([]Rate, error) {
// Read the input file. It contains a list of instruction composed of
// a string and an integer
// up 3, down 5, forward 7, etc
file, err := os.ReadFile(path)
if err != nil {
return nil, err
}
lines:=strings.Split(string(file), "\n")
// Well, I know the size of the input, so let's just use that information
// When knowing the size, it's better to allocate the right size immediately
// as append() has a cost
// https://medium.com/vendasta/golang-the-time-complexity-of-append-2177dcfb6bad
// /!\ Do not use make([]Elements, 1000) as it will give it can AND size 1000, and
// appending to it will just append after element 1000, so the first 1000 elements will be 0
structuredInput := make([]Rate, 0, 1000)
for _ , line := range lines {
boolArr := make([]bool, 0, len(line)) // From the input, all elements are 12 bits long
// Get the integer value from the line
for _, c := range line {
if c == '0' {
boolArr = append(boolArr, false)
} else {
boolArr = append(boolArr, true)
}
}
structuredInput = append(structuredInput, Rate{boolArr})
}
return structuredInput, nil
}
func getCountsOfOnes(structuredInput []Rate) []int {
if len(structuredInput) == 0 {
return nil
}
sumsOfOnes := make([]int, len(structuredInput[0].value))
for _, rate := range structuredInput {
for index, zeroOrOne := range rate.value {
if zeroOrOne {
sumsOfOnes[index] += 1
}
}
}
return sumsOfOnes
}
// input: a list of rates (the input from the exercise, successively filtered)
// defaultKeep: is used to define which rates should be kept in case the
// counts of 1 and 0 at the given index for the given input are equal
// To find the oxygen rate, use defaultKeep = 1, to find the CO2 rate, use defaultKeep = 0
// index: the bit index to check in the given rates
func getRating(input []Rate, defaultKeep bool, index int) (Rate, error) {
// No input
if len(input) == 0 {
return Rate{nil}, errors.New("no input provided to getRating")
}
if index > len(input[0].value) {
return Rate{nil}, errors.New("index out of bounds in getRating")
}
//Get the counts of '1' at each position for the given input
sumsOfOnes := getCountsOfOnes(input)
// Should we keep numbers in 0 or 1?
keep := defaultKeep
if float32(sumsOfOnes[index]) > (float32(len(input))/2) {
// Most common value is 1
keep = defaultKeep
} else if float32(sumsOfOnes[index]) < (float32(len(input))/2) {
// Most common value is 0
keep = !defaultKeep
}
newInput := make([]Rate, 0, len(input))
for _, rate := range input {
if rate.value[index] == keep {
newInput = append(newInput, rate)
}
}
if len(newInput) == 1 {
return newInput[0], nil
} else {
return getRating(newInput, defaultKeep, index+1)
}
} | day3/main.go | 0.52829 | 0.503662 | main.go | starcoder |
SPEC Benchmark Specification
Go
1.0: General
1.1: Classification
Go is a cpu-bound integer benchmark. It is an example of the use of
artificial intelligence in game playing.
1.2: Description
Go plays the game of go against itself. The benchmark is stripped
down version of a successful go-playing computer program.
The benchmark is implemented in ANSI C (with function prototypes).
There is a great deal of pattern matching and look-ahead logic.
As is common in this type of program, up to a third of the run-time
can be spent in the data-management routines.
1.3: Source/Author
<NAME>
San Jose, CA.
A full functioned verison of this program, Many Faces of Go, with a user
interface, is available for the IBM-PC from Ishi Press, 76 Bonaventura Ave,
San Jose CA 95134, (408) 944-9900, and for PenPoint from PenGames, 4863
Capistrano Ave San Jose, CA 95129 (408)985-1236.
1.4: Version Information
This is a special version of the Go program, The Many Faces of Go, developed
for use as a part of the SPEC benchmark suite. This go playing engine is
from the 1989 version of Many Faces of Go. The latest version is a much
stronger go player.
2.0: Performance
2.1: Metric
No special performance measures are produced by Go. The elapsed
time to play a game against itself is the measure of performance.
2.2: Reference Time
TBD.
Approximate times (without any special compiler option tuning):
486-25 PC about 40 minutes
HP9000/755 2 minutes 51 seconds
HP9000/750 4 minutes 50 seconds
HP9000/400 43 minutes 35 seconds
2.3: Reports
Go writes to stdout a move-by-move listing of the game as it is played.
Error messages will be genereated as appropriate.
3.0: Software
3.1: Language
ANSI C with fucntion prototypes.
3.2: Operating System
Both MS-DOS and UNIX implementations are supported in the source code.
3.3: Portability
It is strictly ANSI C compliant, and has no dependencies on endianness.
It should be quite portable since it doesn't depend on any unusual runtime
library routines, or on endianness. The only time it has run differently
on different processors was due to floating point rounding differences in
the initialization of rtval1[] in initrtval() in g23.c. Modify the
initialization slightly, and this should not be a problem any more, but if
a port to a non-IEEE FP machine gives a different result, look here first.
g2jlib2.c contains a very large initializer, which might give some compilers
problems.
3.4: Others
No other software considerations for Go. Go does not malloc any memory.
4.0: Hardware
4.1: Memory
TBD.
4.2: Other
No other hardware requirements exist for Go.
5.0: Operational
5.1: Disk Space
No disk requirements beyond the space required to hold the program
source, executable image, input files and output files. This space
is less than a megabyte.
5.2: Installation
The directory contains the source for the Go program and a makefile that
builds it. The makefile by default uses the host's default C compiler and
specifies optimization with the -O flag. Compiler options may be passed in
by EXTRA_CFLAGS.
5.3: Execution
To run Go, type:
time go > go.out
Verify whether the output is correct by:
diff go.out specout.go
there should be no differences reported by the diff program. | benchmarks/deputytests/spec95/go/DESCR.go | 0.751466 | 0.571408 | DESCR.go | starcoder |
package breeze
import (
"encoding/binary"
"io"
)
// Buffer is A variable-sized buffer of bytes with Read and Write methods.
// Buffer is not thread safe for multi goroutine operation.
type Buffer struct {
buf []byte // contents are the bytes buf[0 : wpos] in write, are the bytes buf[rpos: len(buf)] in read
rpos int // read position
wpos int // write position
order binary.ByteOrder
temp []byte
context *Context
}
// NewBuffer create A empty Buffer with initial size
func NewBuffer(initSize int) *Buffer {
return NewBufferWithOrder(initSize, binary.BigEndian)
}
// NewBufferWithOrder create A empty Buffer with initial size and byte order
func NewBufferWithOrder(initSize int, order binary.ByteOrder) *Buffer {
return &Buffer{buf: make([]byte, initSize),
order: order,
temp: make([]byte, 8),
}
}
// CreateBuffer create A Buffer from data bytes
func CreateBuffer(data []byte) *Buffer {
return CreateBufferWithOrder(data, binary.BigEndian)
}
// CreateBufferWithOrder create A Buffer from data bytes with bytes order
func CreateBufferWithOrder(data []byte, order binary.ByteOrder) *Buffer {
return &Buffer{buf: data,
order: order,
temp: make([]byte, 8),
wpos: len(data),
}
}
// SetWPos set the write position of Buffer
func (b *Buffer) SetWPos(pos int) {
if len(b.buf) < pos {
b.grow(pos - len(b.buf))
}
b.wpos = pos
}
// GetWPos get the write position of Buffer
func (b *Buffer) GetWPos() int {
return b.wpos
}
// SetRPos get the read position of Buffer
func (b *Buffer) SetRPos(pos int) {
b.rpos = pos
}
// GetRPos get the read position of Buffer
func (b *Buffer) GetRPos() int {
return b.rpos
}
// WriteByte write A byte append the Buffer, the wpos will increase one
func (b *Buffer) WriteByte(c byte) {
if len(b.buf) < b.wpos+1 {
b.grow(1)
}
b.buf[b.wpos] = c
b.wpos++
}
// Write write A byte array append the Buffer, and the wpos will increase len(bytes)
func (b *Buffer) Write(bytes []byte) {
l := len(bytes)
if l > 0 {
if len(b.buf) < b.wpos+l {
b.grow(l)
}
copy(b.buf[b.wpos:], bytes)
b.wpos += l
}
}
// WriteUint16 write A uint16 append the Buffer according to buffer's order
func (b *Buffer) WriteUint16(u uint16) {
if len(b.buf) < b.wpos+2 {
b.grow(2)
}
b.order.PutUint16(b.temp, u)
copy(b.buf[b.wpos:], b.temp[:2])
b.wpos += 2
}
// WriteUint32 write a uint32 append to the Buffer
func (b *Buffer) WriteUint32(u uint32) {
if len(b.buf) < b.wpos+4 {
b.grow(4)
}
b.order.PutUint32(b.temp, u)
copy(b.buf[b.wpos:], b.temp[:4])
b.wpos += 4
}
// WriteUint64 write a uint64 append to the Buffer
func (b *Buffer) WriteUint64(u uint64) {
if len(b.buf) < b.wpos+8 {
b.grow(8)
}
b.order.PutUint64(b.temp, u)
copy(b.buf[b.wpos:], b.temp[:8])
b.wpos += 8
}
// WriteZigzag32 write a uint32 append to the Buffer with zigzag algorithm
func (b *Buffer) WriteZigzag32(u uint32) int {
return b.WriteVarInt(uint64((u << 1) ^ uint32(int32(u)>>31)))
}
// WriteZigzag64 write a uint64 append to the Buffer with zigzag algorithm
func (b *Buffer) WriteZigzag64(u uint64) int {
return b.WriteVarInt(uint64((u << 1) ^ uint64(int64(u)>>63)))
}
// WriteVarInt write a uint64 into buffer with variable length
func (b *Buffer) WriteVarInt(u uint64) int {
l := 0
for u >= 1<<7 {
b.WriteByte(uint8(u&0x7f | 0x80))
u >>= 7
l++
}
b.WriteByte(uint8(u))
l++
return l
}
func (b *Buffer) grow(n int) {
buf := make([]byte, 2*len(b.buf)+n)
copy(buf, b.buf[:b.wpos])
b.buf = buf
}
// Bytes return a bytes slice of under byte buffer.
func (b *Buffer) Bytes() []byte { return b.buf[:b.wpos] }
// Read read buffer's byte to byte array. return value n is read size.
func (b *Buffer) Read(p []byte) (n int, err error) {
if b.rpos >= len(b.buf) {
return 0, io.EOF
}
n = copy(p, b.buf[b.rpos:])
b.rpos += n
return n, nil
}
// ReadFull read buffer's byte to byte array. if read size not equals len(p), will return error ErrNotEnough
func (b *Buffer) ReadFull(p []byte) error {
if b.Remain() < len(p) {
return ErrNotEnough
}
n := copy(p, b.buf[b.rpos:])
if n < len(p) {
return ErrNotEnough
}
b.rpos += n
return nil
}
// ReadUint16 read a uint16 from buffer.
func (b *Buffer) ReadUint16() (n uint16, err error) {
if b.Remain() < 2 {
return 0, ErrNotEnough
}
n = b.order.Uint16(b.buf[b.rpos : b.rpos+2])
b.rpos += 2
return n, nil
}
// ReadInt read next int32
func (b *Buffer) ReadInt() (int, error) {
n, err := b.ReadUint32()
return int(n), err
}
// ReadUint32 read a uint32 from buffer.
func (b *Buffer) ReadUint32() (n uint32, err error) {
if b.Remain() < 4 {
return 0, ErrNotEnough
}
n = b.order.Uint32(b.buf[b.rpos : b.rpos+4])
b.rpos += 4
return n, nil
}
// ReadUint64 read a uint64 from buffer.
func (b *Buffer) ReadUint64() (n uint64, err error) {
if b.Remain() < 8 {
return 0, ErrNotEnough
}
n = b.order.Uint64(b.buf[b.rpos : b.rpos+8])
b.rpos += 8
return n, nil
}
// ReadZigzag64 read a zigzag uint64 from buffer.
func (b *Buffer) ReadZigzag64() (x uint64, err error) {
x, err = b.ReadVarInt()
if err != nil {
return
}
x = (x >> 1) ^ uint64(-int64(x&1))
return
}
// ReadZigzag32 read a zigzag uint32 from buffer.
func (b *Buffer) ReadZigzag32() (x uint64, err error) {
x, err = b.ReadVarInt()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32(-int32(x&1)))
return
}
// ReadVarInt read a variable length uint64 form buffer
func (b *Buffer) ReadVarInt() (x uint64, err error) {
var temp byte
for offset := uint(0); offset < 64; offset += 7 {
temp, err = b.ReadByte()
if err != nil {
return 0, err
}
if (temp & 0x80) != 0x80 {
x |= uint64(temp) << offset
return x, nil
}
x |= uint64(temp&0x7f) << offset
}
return 0, ErrOverflow
}
/*
Next get next n bytes from the buffer.
notice that return bytes is A slice of under byte array, hold the return value means hold all under byte array.
so , this method only for short-lived use
*/
func (b *Buffer) Next(n int) ([]byte, error) {
m := b.Remain()
if n > m {
return nil, ErrNotEnough
}
data := b.buf[b.rpos : b.rpos+n]
b.rpos += n
return data, nil
}
// ReadByte read a byte form buffer
func (b *Buffer) ReadByte() (byte, error) {
if b.rpos >= len(b.buf) {
return 0, io.EOF
}
c := b.buf[b.rpos]
b.rpos++
return c, nil
}
// Reset reset the read position and write position to zero
func (b *Buffer) Reset() {
b.rpos = 0
b.wpos = 0
}
// Remain is used in buffer read, it return a size of bytes the buffer remained
func (b *Buffer) Remain() int { return b.wpos - b.rpos }
// Len return the len of buffer' bytes,
func (b *Buffer) Len() int { return b.wpos - 0 }
// Cap return the capacity of the under byte buffer
func (b *Buffer) Cap() int { return cap(b.buf) }
// GetContext get breeze context
func (b *Buffer) GetContext() *Context {
if b.context == nil {
b.context = &Context{}
}
return b.context
} | breezeBuffer.go | 0.502686 | 0.601564 | breezeBuffer.go | starcoder |
package main
import (
"math"
)
func connectEdgesToBoundary(currentNode *node, boundingBox boundingBox, dcel *doublyConnectedEdgeList) {
// Each internal node left in the beachline represents a half infinite edge
// Need to connect each of these to the bounding box
if currentNode.breakpoint != nil {
// Steps:
// 1. Find the midpoint between the left and right site in breakpoint
// 2. Use this point and the nodes half edge twin vertex point to determine the equation of the line
// 3. Calculate where the line intercepts the bounding box (there will be two points)
// 4. Determine which of the points is the closest and set the nodes half edge vertex as this point
vertex := currentNode.halfEdge.twinEdge.originVertex
// Initialise boundary vertices
vertexBoundingA := getVertex()
vertexBoundingA.x = -1.0
vertexBoundingB := getVertex()
// Consider ignoring if vertex lies outside bounding box - maybe add dummy vertex
if vertex.x < 0 || vertex.x > boundingBox.width || vertex.y < 0 || vertex.y > boundingBox.height {
fakeVertex := dcel.addIsolatedVertex(vertex.x, vertex.y)
currentNode.halfEdge.originVertex = fakeVertex
} else {
xMidpoint := (currentNode.breakpoint.leftSite.x + currentNode.breakpoint.rightSite.x) / 2
yMidpoint := (currentNode.breakpoint.leftSite.y + currentNode.breakpoint.rightSite.y) / 2
gradient := (yMidpoint - vertex.y) / (xMidpoint - vertex.x)
b := yMidpoint - (gradient * xMidpoint)
bottomBoundInterceptX := (-1.0 * b) / gradient
rightBoundInterceptY := (gradient * boundingBox.width) + b
topBoundInterceptX := (boundingBox.height - b) / gradient
// TODO - handle case of parallel line
if b >= 0 && b <= boundingBox.height {
vertexBoundingA.x = 0
vertexBoundingA.y = b
}
if bottomBoundInterceptX >= 0 && bottomBoundInterceptX <= boundingBox.width {
vertexBoundingB.x = bottomBoundInterceptX
vertexBoundingB.y = 0
}
if rightBoundInterceptY >= 0 && rightBoundInterceptY <= boundingBox.height {
if vertexBoundingA.x < 0 {
vertexBoundingA.x = boundingBox.width
vertexBoundingA.y = rightBoundInterceptY
} else {
vertexBoundingB.x = boundingBox.width
vertexBoundingB.y = rightBoundInterceptY
}
}
if topBoundInterceptX >= 0 && topBoundInterceptX <= boundingBox.width {
if vertexBoundingA.x < 0 {
vertexBoundingA.x = topBoundInterceptX
vertexBoundingA.y = boundingBox.height
} else {
vertexBoundingB.x = topBoundInterceptX
vertexBoundingB.y = boundingBox.height
}
}
// Distance to A from vertex and distance to A from midpoint
distanceVertexToA := math.Sqrt(math.Pow((vertex.x-vertexBoundingA.x), 2) + math.Pow((vertex.y-vertexBoundingA.y), 2))
distanceMidpointToA := math.Sqrt(math.Pow((xMidpoint-vertexBoundingA.x), 2) +
math.Pow((yMidpoint-vertexBoundingA.y), 2))
// Add the boundary vertex which is closer to midpoint than node vertex to the dcel
// and connect the halfedge on the node to it
newVertex := getVertexPointer()
if distanceMidpointToA < distanceVertexToA {
newVertex = dcel.addIsolatedVertex(vertexBoundingA.x, vertexBoundingA.y)
} else {
newVertex = dcel.addIsolatedVertex(vertexBoundingB.x, vertexBoundingB.y)
}
currentNode.halfEdge.originVertex = newVertex
}
// Run on child nodes
if currentNode.left != nil {
connectEdgesToBoundary(currentNode.left, boundingBox, dcel)
}
if currentNode.right != nil {
connectEdgesToBoundary(currentNode.right, boundingBox, dcel)
}
}
} | boundingbox.go | 0.711531 | 0.692024 | boundingbox.go | starcoder |
package csg
import (
"runtime"
"sync"
)
//IPolygonSplitter is an interface for a specific implementation of a polygon splitter
type IPolygonSplitter interface {
//SplitPolygons splits the polygons into various slices based upon their orientation to the specified plane
SplitPolygons(plane *Plane, polygons []*Polygon, coplanarFront, coplanarBack, front, back *[]*Polygon)
}
// BasicPolygonSplitter is a basic implemenation of a polygon splitter
type BasicPolygonSplitter struct {
}
//SplitPolygons splits the polygons into various slices based upon their orientation to the specified plane
func (ps *BasicPolygonSplitter) SplitPolygons(plane *Plane, polygons []*Polygon, coplanarFront, coplanarBack, front, back *[]*Polygon) {
types := make([]PlaneRelationship, 0, 20)
for _, polygon := range polygons {
var polygonType PlaneRelationship
types = types[:0]
for _, v := range polygon.Vertices {
t := plane.Normal.Dot(v.Position) - plane.W
var pType PlaneRelationship
if t < (-EPSILON) {
pType = BACK
} else if t > EPSILON {
pType = FRONT
} else {
pType = COPLANAR
}
polygonType |= pType
types = append(types, pType)
}
switch polygonType {
case COPLANAR:
if plane.Normal.Dot(polygon.Plane.Normal) > 0 {
*coplanarFront = append(*coplanarFront, polygon)
} else {
*coplanarBack = append(*coplanarBack, polygon)
}
break
case FRONT:
*front = append(*front, polygon)
break
case BACK:
*back = append(*back, polygon)
break
case SPANNING:
f := make([]*Vertex, 0)
b := make([]*Vertex, 0)
for i := range polygon.Vertices {
j := (i + 1) % len(polygon.Vertices)
ti := types[i]
tj := types[j]
vi := polygon.Vertices[i]
vj := polygon.Vertices[j]
if ti != BACK {
f = append(f, vi)
}
if ti != FRONT {
if ti != BACK {
b = append(b, vi.Clone())
} else {
b = append(b, vi)
}
}
if (ti | tj) == SPANNING {
t := (plane.W - plane.Normal.Dot(vi.Position)) / plane.Normal.Dot(vj.Position.Minus(vi.Position))
v := vi.Interpolate(vj, t)
f = append(f, v)
b = append(b, v.Clone())
}
}
if len(f) >= 3 {
*front = append(*front, NewPolygonFromVertices(f))
}
if len(b) >= 3 {
*back = append(*back, NewPolygonFromVertices(b))
}
break
}
}
}
// MultiCorePolygonSplitter will utilize multiple goroutines to speed up the splitting of polygons
type MultiCorePolygonSplitter struct {
// This is the target splitter to use - which should normally use the BasicPolygonSplitter
Target IPolygonSplitter
}
//SplitPolygons splits the polygons into various slices based upon their orientation to the specified plane
func (ps *MultiCorePolygonSplitter) SplitPolygons(plane *Plane, polygons []*Polygon, coplanarFront, coplanarBack, front, back *[]*Polygon) {
if len(polygons) > 1000 {
var wg sync.WaitGroup
var lock sync.Mutex
cpus := runtime.NumCPU()
batchSize := 500
start := 0
end := 0
done := false
for i := 0; i < cpus; i++ {
wg.Add(1)
go func() {
cF := make([]*Polygon, 0)
cB := make([]*Polygon, 0)
f := make([]*Polygon, 0)
b := make([]*Polygon, 0)
for {
lock.Lock()
if done {
wg.Done()
lock.Unlock()
return
}
end = start + batchSize
if end > len(polygons) {
end = len(polygons)
done = true
}
p := polygons[start:end]
start += batchSize
lock.Unlock()
cF = cF[:0]
cB = cB[:0]
f = f[:0]
b = b[:0]
ps.Target.SplitPolygons(plane, p, &cF, &cB, &f, &b)
lock.Lock()
*coplanarFront = append(*coplanarFront, cF...)
*coplanarBack = append(*coplanarBack, cB...)
*front = append(*front, f...)
*back = append(*back, b...)
lock.Unlock()
}
}()
}
wg.Wait()
} else {
ps.Target.SplitPolygons(plane, polygons, coplanarFront, coplanarBack, front, back)
}
} | csg/polygonsplitter.go | 0.657868 | 0.49762 | polygonsplitter.go | starcoder |
// Package kgobject contains helper methods to construct common api KGObject instances
package kgobject
import (
"time"
"github.com/ebay/beam/api"
)
// AString returns a new KGObject instance containing the supplied string and language ID.
func AString(s string, langID uint64) api.KGObject {
return api.KGObject{LangID: langID, Value: &api.KGObject_AString{AString: s}}
}
// AFloat64 returns a new KGObject instance containing the supplied float and Units ID.
func AFloat64(f float64, uintID uint64) api.KGObject {
return api.KGObject{UnitID: uintID, Value: &api.KGObject_AFloat64{AFloat64: f}}
}
// AInt64 returns a new KGObject instance containing the supplied int and Units ID.
func AInt64(i int64, uintID uint64) api.KGObject {
return api.KGObject{UnitID: uintID, Value: &api.KGObject_AInt64{AInt64: i}}
}
// ATimestampY returns a new KGObject instance containing a Timestamp for the specified year and Units ID.
func ATimestampY(year int, uintID uint64) api.KGObject {
return ATimestamp(time.Date(year, time.January, 1, 0, 0, 0, 0, time.UTC), api.Year, uintID)
}
// ATimestampYM returns a new KGObject instance containing a Timestamp for the specified year, month and Units ID.
func ATimestampYM(year int, month int, uintID uint64) api.KGObject {
return ATimestamp(time.Date(year, time.Month(month), 1, 0, 0, 0, 0, time.UTC), api.Month, uintID)
}
// ATimestampYMD returns a new KGObject instance containing a Timestamp for the specified year, month, day and Units ID.
func ATimestampYMD(year int, month int, day int, uintID uint64) api.KGObject {
return ATimestamp(time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC), api.Day, uintID)
}
// ATimestampYMDH returns a new KGObject instance containing a Timestamp for the specified year, month, day, hour and Units ID.
func ATimestampYMDH(year, month, day, hour int, uintID uint64) api.KGObject {
return ATimestamp(time.Date(year, time.Month(month), day, hour, 0, 0, 0, time.UTC), api.Hour, uintID)
}
// ATimestampYMDHM returns a new KGObject instance containing a Timestamp for the specified year, month, day, hour, minutes and Units ID.
func ATimestampYMDHM(year, month, day, hour, minute int, uintID uint64) api.KGObject {
return ATimestamp(time.Date(year, time.Month(month), day, hour, minute, 0, 0, time.UTC), api.Minute, uintID)
}
// ATimestampYMDHMS returns a new KGObject instance containing a Timestamp for the specified year, month, day, hour, minutes, seconds and Units ID.
func ATimestampYMDHMS(year, month, day, hour, minute, second int, uintID uint64) api.KGObject {
return ATimestamp(time.Date(year, time.Month(month), day, hour, minute, second, 0, time.UTC), api.Second, uintID)
}
// ATimestampYMDHMSN returns a new KGObject instance containing a Timestamp for the specified year, month, day, hour, minutes, seconds, nanoseonds, and Units ID.
func ATimestampYMDHMSN(year, month, day, hour, minute, second, nsec int, uintID uint64) api.KGObject {
return ATimestamp(time.Date(year, time.Month(month), day, hour, minute, second, nsec, time.UTC), api.Nanosecond, uintID)
}
// ATimestamp returns a new KGObject instance containing a ATimestamp for the supplied dateTime, precision and Units ID.
func ATimestamp(t time.Time, p api.Precision, uintID uint64) api.KGObject {
return api.KGObject{UnitID: uintID, Value: &api.KGObject_ATimestamp{ATimestamp: &api.KGTimestamp{Precision: p, Value: t}}}
}
// ABool returns an new KGObject instance containing a Boolean value and Units ID.
func ABool(b bool, uintID uint64) api.KGObject {
return api.KGObject{UnitID: uintID, Value: &api.KGObject_ABool{ABool: b}}
}
// AKID returns an new KGObject instance containing a AKID value.
func AKID(kid uint64) api.KGObject {
return api.KGObject{Value: &api.KGObject_AKID{AKID: kid}}
} | src/github.com/ebay/beam/msg/kgobject/make.go | 0.857306 | 0.598107 | make.go | starcoder |
package main
type BinaryTree struct {
Value int
Left *BinaryTree
Right *BinaryTree
}
// O(n) time | O(n) space - where N is the number of nodes in the tree
func FindNodesDistanceK(tree *BinaryTree, target int, k int) []int {
nodesToParents := map[int]*BinaryTree{}
populateNodesToParents(tree, nodesToParents, nil)
targetNode := getNodeFromValue(target, tree, nodesToParents)
return breadthFirstSearchForDistanceKNodes(targetNode, nodesToParents, k)
}
func populateNodesToParents(node *BinaryTree, nodesToParents map[int]*BinaryTree, parent *BinaryTree) {
if node != nil {
nodesToParents[node.Value] = parent
populateNodesToParents(node.Left, nodesToParents, node)
populateNodesToParents(node.Right, nodesToParents, node)
}
}
func getNodeFromValue(value int, tree *BinaryTree, nodesToParents map[int]*BinaryTree) *BinaryTree {
if tree.Value == value {
return tree
}
nodeParent := nodesToParents[value]
if nodeParent.Left != nil && nodeParent.Left.Value == value {
return nodeParent.Left
}
return nodeParent.Right
}
func breadthFirstSearchForDistanceKNodes(target *BinaryTree, nodesToParents map[int]*BinaryTree, k int) []int {
type item struct {
node *BinaryTree
dist int
}
queue := []item{{node: target, dist: 0}}
seen := map[int]bool{target.Value: true}
var current item
for len(queue) > 0 {
current, queue = queue[0], queue[1:]
currentNode, targetDistance := current.node, current.dist
if targetDistance == k {
nodesDistanceK := make([]int, 0)
for _, i := range queue {
nodesDistanceK = append(nodesDistanceK, i.node.Value)
}
nodesDistanceK = append(nodesDistanceK, currentNode.Value)
return nodesDistanceK
}
connectedNodes := []*BinaryTree{currentNode.Left, currentNode.Right, nodesToParents[currentNode.Value]}
for _, node := range connectedNodes {
if node == nil {
continue
}
if seen[node.Value] {
continue
}
seen[node.Value] = true
queue = append(queue, item{node: node, dist: targetDistance + 1})
}
}
return []int{}
} | src/binary-trees/hard/find-distance-k-nodes/go/bfs.go | 0.738103 | 0.454835 | bfs.go | starcoder |
package types
import (
"errors"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
)
// DetectionKind defines an enum type that
// gives us information on the type of slashable offense
// found when analyzing validator min-max spans.
type DetectionKind uint8
const (
// DoubleVote denotes a slashable offense in which
// a validator cast two conflicting attestations within
// the same target epoch.
DoubleVote DetectionKind = iota
// SurroundVote denotes a slashable offense in which
// a validator surrounded or was surrounded by a previous
// attestation created by the same validator.
SurroundVote
)
// DetectionResult tells us the kind of slashable
// offense found from detecting on min-max spans +
// the slashable epoch for the offense.
// Also includes the signature bytes for assistance in
// finding the attestation for the slashing proof.
type DetectionResult struct {
ValidatorIndex uint64
SlashableEpoch uint64
Kind DetectionKind
SigBytes [2]byte
}
// Marshal the result into bytes, used for removing duplicates.
func (result *DetectionResult) Marshal() []byte {
numBytes := bytesutil.ToBytes(result.SlashableEpoch, 8)
var resultBytes []byte
resultBytes = append(resultBytes, uint8(result.Kind))
resultBytes = append(resultBytes, result.SigBytes[:]...)
resultBytes = append(resultBytes, numBytes...)
return resultBytes
}
// Span defines the structure used for detecting surround and double votes.
type Span struct {
MinSpan uint16
MaxSpan uint16
SigBytes [2]byte
HasAttested bool
}
// SpannerEncodedLength the byte length of validator span data structure.
var SpannerEncodedLength = uint64(7)
// UnmarshalSpan returns a span from an encoded, flattened byte array.
// Note: This is a very often used function, so it is as optimized as possible.
func UnmarshalSpan(enc []byte) (Span, error) {
r := Span{}
if len(enc) != int(SpannerEncodedLength) {
return r, errors.New("wrong data length for min max span")
}
r.MinSpan = uint16(enc[0]) | uint16(enc[1])<<8
r.MaxSpan = uint16(enc[2]) | uint16(enc[3])<<8
sigB := [2]byte{}
copy(sigB[:], enc[4:6])
r.SigBytes = sigB
r.HasAttested = enc[6]&1 == 1
return r, nil
}
// Marshal converts the span struct into a flattened byte array.
// Note: This is a very often used function, so it is as optimized as possible.
func (span Span) Marshal() []byte {
var attested byte = 0
if span.HasAttested {
attested = 1
}
return []byte{
byte(span.MinSpan),
byte(span.MinSpan >> 8),
byte(span.MaxSpan),
byte(span.MaxSpan >> 8),
span.SigBytes[0],
span.SigBytes[1],
attested,
}
} | .docker/Prysm/prysm-spike/slasher/detection/attestations/types/types.go | 0.723798 | 0.42662 | types.go | starcoder |
package interval
import (
"fmt"
"time"
)
type (
// Float64Range represents a range of numbers of type float64.
Float64Range struct {
Min *float64
Max *float64
}
// Int32Range represents a range of numbers of type int32.
Int32Range struct {
Min *int32
Max *int32
}
// TimeRange represents a range of time.
TimeRange struct {
Min *time.Time
Max *time.Time
}
)
// NewFloat64Range creates a new Float64Range object.
func NewFloat64Range(min, max float64) Float64Range {
return Float64Range{&min, &max}
}
// NewInt32Range creates a new Int32Range object.
func NewInt32Range(min, max int32) Int32Range {
return Int32Range{&min, &max}
}
// NewTimeRange creates a new TimeRange object.
func NewTimeRange(min, max time.Time) TimeRange {
return TimeRange{&min, &max}
}
// Float64RangeEqual returns true if two ranges are equal.
func Float64RangeEqual(a, b Float64Range) bool {
if (a.Min == nil && b.Min != nil) ||
(a.Min != nil && b.Min == nil) ||
(a.Max == nil && b.Max != nil) ||
(a.Max != nil && b.Max == nil) {
return false
}
if (a.Min != nil) && (b.Min != nil) &&
(*a.Min != *b.Min) {
return false
}
if (a.Max != nil) && (b.Max != nil) &&
(*a.Max != *b.Max) {
return false
}
return true
}
// String returns a string representation of a Float64Range.
func (r Float64Range) String() string {
var min, max string
if r.Min != nil {
min = fmt.Sprintf("%f", *r.Min)
} else {
min = "nil"
}
if r.Max != nil {
max = fmt.Sprintf("%f", *r.Max)
} else {
max = "nil"
}
return fmt.Sprintf("{Min: %s, Max: %s}", min, max)
}
// Int32RangeEqual returns true if two ranges are equal.
func Int32RangeEqual(a, b Int32Range) bool {
if (a.Min == nil && b.Min != nil) ||
(a.Min != nil && b.Min == nil) ||
(a.Max == nil && b.Max != nil) ||
(a.Max != nil && b.Max == nil) {
return false
}
if (a.Min != nil) && (b.Min != nil) &&
(*a.Min != *b.Min) {
return false
}
if (a.Max != nil) && (b.Max != nil) &&
(*a.Max != *b.Max) {
return false
}
return true
}
// String returns a string representation of an Int32Range.
func (r Int32Range) String() string {
var min, max string
if r.Min != nil {
min = fmt.Sprintf("%d", *r.Min)
} else {
min = "nil"
}
if r.Max != nil {
max = fmt.Sprintf("%d", *r.Max)
} else {
max = "nil"
}
return fmt.Sprintf("{Min: %s, Max: %s}", min, max)
} | interval/range.go | 0.829837 | 0.434101 | range.go | starcoder |
package iso20022
// Parameters applied to the settlement of a security transfer.
type Transfer21 struct {
// Unique and unambiguous identifier for a transfer instruction, as assigned by the instructing party.
TransferReference *Max35Text `xml:"TrfRef"`
// Unique and unambiguous investor's identification of a transfer. This reference can typically be used in a hub scenario to give the reference of the transfer as assigned by the underlying client.
ClientReference *Max35Text `xml:"ClntRef,omitempty"`
// Unambiguous identification of the transfer allocated by the counterparty.
CounterpartyReference *AdditionalReference2 `xml:"CtrPtyRef,omitempty"`
// Identifies the business process in which the actors are involved. This is important to trigger the right business process, according to the market business model, which may require matching instructions in a CSD environment (double leg process) or not (single leg process).
BusinessFlowType *BusinessFlowType1Code `xml:"BizFlowTp,omitempty"`
// Identifies the transfer reason.
TransferReason *TransferReason1 `xml:"TrfRsn,omitempty"`
// Date at which the instructing party places the transfer instruction.
TransferDate *DateFormat1Choice `xml:"TrfDt,omitempty"`
// Date and time at which the securities are to be exchanged at the International Central Securities Depository (ICSD) or Central Securities Depository (CSD).
RequestedSettlementDate *ISODate `xml:"ReqdSttlmDt,omitempty"`
// Identifies whether or not saving plan or withdrawal or switch plan are included in the holdings.
HoldingsPlanType []*HoldingsPlanType1Code `xml:"HldgsPlanTp,omitempty"`
// Information related to the financial instrument to be received.
FinancialInstrumentDetails *FinancialInstrument13 `xml:"FinInstrmDtls"`
// Total quantity of securities to be settled.
TotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"TtlUnitsNb"`
// Indicates whether the transfer results in a change of beneficial owner.
OwnAccountTransferIndicator *YesNoIndicator `xml:"OwnAcctTrfInd,omitempty"`
// Additional specific settlement information for non-regulated traded funds.
NonStandardSettlementInformation *Max350Text `xml:"NonStdSttlmInf,omitempty"`
// Party that receives securities from the delivering agent via the place of settlement, for example, securities central depository.
ReceivingAgentDetails *PartyIdentificationAndAccount93 `xml:"RcvgAgtDtls,omitempty"`
// Party that delivers securities to the receiving agent at the place of settlement, for example, a central securities depository.
DeliveringAgentDetails *PartyIdentificationAndAccount93 `xml:"DlvrgAgtDtls,omitempty"`
}
func (t *Transfer21) SetTransferReference(value string) {
t.TransferReference = (*Max35Text)(&value)
}
func (t *Transfer21) SetClientReference(value string) {
t.ClientReference = (*Max35Text)(&value)
}
func (t *Transfer21) AddCounterpartyReference() *AdditionalReference2 {
t.CounterpartyReference = new(AdditionalReference2)
return t.CounterpartyReference
}
func (t *Transfer21) SetBusinessFlowType(value string) {
t.BusinessFlowType = (*BusinessFlowType1Code)(&value)
}
func (t *Transfer21) AddTransferReason() *TransferReason1 {
t.TransferReason = new(TransferReason1)
return t.TransferReason
}
func (t *Transfer21) AddTransferDate() *DateFormat1Choice {
t.TransferDate = new(DateFormat1Choice)
return t.TransferDate
}
func (t *Transfer21) SetRequestedSettlementDate(value string) {
t.RequestedSettlementDate = (*ISODate)(&value)
}
func (t *Transfer21) AddHoldingsPlanType(value string) {
t.HoldingsPlanType = append(t.HoldingsPlanType, (*HoldingsPlanType1Code)(&value))
}
func (t *Transfer21) AddFinancialInstrumentDetails() *FinancialInstrument13 {
t.FinancialInstrumentDetails = new(FinancialInstrument13)
return t.FinancialInstrumentDetails
}
func (t *Transfer21) AddTotalUnitsNumber() *FinancialInstrumentQuantity1 {
t.TotalUnitsNumber = new(FinancialInstrumentQuantity1)
return t.TotalUnitsNumber
}
func (t *Transfer21) SetOwnAccountTransferIndicator(value string) {
t.OwnAccountTransferIndicator = (*YesNoIndicator)(&value)
}
func (t *Transfer21) SetNonStandardSettlementInformation(value string) {
t.NonStandardSettlementInformation = (*Max350Text)(&value)
}
func (t *Transfer21) AddReceivingAgentDetails() *PartyIdentificationAndAccount93 {
t.ReceivingAgentDetails = new(PartyIdentificationAndAccount93)
return t.ReceivingAgentDetails
}
func (t *Transfer21) AddDeliveringAgentDetails() *PartyIdentificationAndAccount93 {
t.DeliveringAgentDetails = new(PartyIdentificationAndAccount93)
return t.DeliveringAgentDetails
} | Transfer21.go | 0.791378 | 0.46557 | Transfer21.go | starcoder |
// hlclock provides functions for Hybrid Logical Clocks.
package hlclock
import (
"encoding/json"
"fmt"
)
// Now implementation provides the current wall clock time.
type SysClock interface {
Now() int64
}
// HTimestamp contains the timestamp information.
type HTimestamp struct {
timestamp int64
counter uint16
}
// NewHTimestamp takes the current wall clock time and
// initial counter and returns a new HTimestamp.
func NewHTimestamp(timestamp int64, counter uint16) HTimestamp {
return HTimestamp{timestamp: timestamp, counter: counter}
}
// Increment takes the current wall clock time and
// updates the timestamp on local event.
func (ht *HTimestamp) Increment(pt int64) {
if pt > ht.timestamp {
ht.timestamp = pt
ht.counter = 0
} else {
ht.counter += 1
}
}
// Merge takes the current wall clock time and a remote timestamp and
// updates the current timestamp.
func (ht *HTimestamp) Merge(pt int64, msg *HTimestamp) {
switch {
case pt > ht.timestamp && pt > msg.timestamp:
ht.timestamp = pt
ht.counter = 0
case ht.timestamp == msg.timestamp:
ht.counter = max(ht.counter, msg.counter) + 1
case msg.timestamp > ht.timestamp:
ht.timestamp = msg.timestamp
ht.counter = msg.counter + 1
default:
ht.counter += 1
}
}
func (ht *HTimestamp) Copy() HTimestamp {
return HTimestamp{
ht.timestamp,
ht.counter,
}
}
func (ht *HTimestamp) String() string {
return fmt.Sprintf("Timestamp:{clock: %d, counter: %d}", ht.timestamp, ht.counter)
}
func (ht *HTimestamp) Equal(other *HTimestamp) bool {
return ht.timestamp == other.timestamp && ht.counter == other.counter
}
func (ht *HTimestamp) Compare(other *HTimestamp) int {
switch {
case ht.Equal(other):
return 0
case ht.timestamp == other.timestamp:
if ht.counter < other.counter {
return -1
}
return 1
case ht.timestamp < other.timestamp:
return -1
default:
return 1
}
}
func (ht *HTimestamp) MarshalJSON() ([]byte, error) {
j, err := json.Marshal(struct {
Timestamp int64 `json:"timestamp"`
Counter uint16 `json:"counter"`
}{
Timestamp: ht.timestamp,
Counter: ht.counter,
})
if err != nil {
return nil, err
}
return j, nil
}
func (ht *HTimestamp) UnmarshalJSON(data []byte) error {
v := struct {
Timestamp int64 `json:"timestamp"`
Counter uint16 `json:"counter"`
}{}
if err := json.Unmarshal(data, &v); err != nil {
return err
}
ht.timestamp = v.Timestamp
ht.counter = v.Counter
return nil
}
func (ht *HTimestamp) Timestamp() int64 {
return ht.timestamp
}
func (ht *HTimestamp) Counter() uint16 {
return ht.counter
}
type HCLock struct {
sysClock SysClock
latest *HTimestamp
}
func New(nodeID string, sysClock SysClock) HCLock {
tm := NewHTimestamp(sysClock.Now(), 0)
return HCLock{sysClock, &tm}
}
func (hcl *HCLock) Increment() {
hcl.latest.Increment(hcl.sysClock.Now())
}
func (hcl *HCLock) Merge(e *HTimestamp) {
hcl.latest.Merge(hcl.sysClock.Now(), e)
}
func (hcl *HCLock) String() string {
return hcl.latest.String()
}
func (hcl *HCLock) CopyTimestamp() HTimestamp {
return hcl.latest.Copy()
}
func max(x, y uint16) uint16 {
if x > y {
return x
}
return y
} | hlclock/hlclock.go | 0.620162 | 0.554893 | hlclock.go | starcoder |
package finance
import (
"sort"
"time"
)
// Transactions is a collection of type Transaction
type Transactions []Transaction
// StartingBalance calculates the correct starting balance by iterating over
// each transaction and identify if it belongs to a different account.
// If it does, than the starting balance is added to the balance of all accounts.
func (t Transactions) StartingBalance() Currency {
accounts := make(map[string]int)
balance := 0
for _, i := range t {
_, ok := accounts[i.AccountNumber]
if ok == false {
accounts[i.AccountNumber] = 0
balance += i.Balance.Amount - i.Amount.Amount
}
}
return NewCurrency(balance)
}
// Sum acts on the Ledger type to aggregate the sum of all
// transaction amounts.
func (t Transactions) Sum() Currency {
total := t.StartingBalance().Amount
for _, item := range t {
total += item.Amount.Amount
}
return NewCurrency(total)
}
// Sort exposes the StdLib Sort command without needing to import
// the package.
func (t *Transactions) Sort() {
sort.Sort(t)
}
func (t Transactions) Len() int {
return len(t)
}
func (t Transactions) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t Transactions) Less(i, j int) bool {
if t[i].Date.Before(t[j].Date) {
return true
}
if t[i].Date.After(t[j].Date) {
return false
}
return t[i].UniqueID < t[j].UniqueID
}
// FilterByCategory reduces the list of transactions by category. Any number
// of categories can be provided.
func (t Transactions) FilterByCategory(categories ...string) Transactions {
var output Transactions
for _, transaction := range t {
for _, category := range categories {
if transaction.GetCategory() == category {
output = append(output, transaction)
}
}
}
return output
}
// TotalExpenses is the sum of all negative amounts
func (t Transactions) TotalExpenses() Currency {
total := 0
for _, transaction := range t {
if transaction.Amount.Amount < 0 {
total += transaction.Amount.Amount
}
}
return NewCurrency(total)
}
// DateRange returns Transactions between start and end time
func (t Transactions) DateRange(start time.Time, end time.Time) Transactions {
var found Transactions
for _, transaction := range t {
if (transaction.Date.After(start) && transaction.Date.Before(end)) || (start == transaction.Date || end == transaction.Date) {
found = append(found, transaction)
}
}
return found
}
// GetAllByDescription returns the all transactions with a matching description
func (t Transactions) GetAllByDescription(description string) Transactions {
foundTransactions := Transactions{}
for _, transaction := range t {
if transaction.GetDescription() == description {
foundTransactions = append(foundTransactions, transaction)
}
}
return foundTransactions
}
// GetByDescription returns the first transaction with a matching description
func (t Transactions) GetByDescription(description string) (Transaction, bool) {
foundTransactions := t.GetAllByDescription(description)
if len(foundTransactions) > 0 {
return foundTransactions[0], true
}
return Transaction{}, false
} | finance/transactions.go | 0.804444 | 0.414129 | transactions.go | starcoder |
package util
import (
"strconv"
"strings"
"time"
)
func GetCurrentTime() time.Time {
return time.Now().UTC()
}
func GetTimeStamp() int64 {
return time.Now().UTC().Unix()
}
func GetMSTimeStamp() int64 {
return time.Now().UTC().UnixNano() / int64(time.Millisecond)
}
func GetSecTimeStamp() int64 {
return time.Now().UTC().UnixNano() / int64(time.Second)
}
func Time2MSTimeStamp(t *time.Time) int64 {
ts := t.UTC().UnixNano() / int64(time.Millisecond)
if ts > 0 {
return ts
} else {
return 0
}
}
type CustomTime struct {
time.Time
}
const (
ctLayout = "2006/01/02|15:04:05"
ctLayoutDayKey = "20060102"
ctLayoutStr = "2006-01-02 15:04:05"
)
var nilTime = (time.Time{}).UnixNano()
func (ct *CustomTime) UnmarshalJSON(b []byte) (err error) {
if b[0] == '"' && b[len(b)-1] == '"' {
b = b[1 : len(b)-1]
}
ct.Time, err = time.Parse(ctLayout, string(b))
return
}
func (ct *CustomTime) MarshalJSON() ([]byte, error) {
return []byte(ct.Time.Format(ctLayout)), nil
}
func (ct *CustomTime) IsSet() bool {
return ct.UnixNano() != nilTime
}
//获取当月开始时间
func GetFirstDateOfMonth(d time.Time) time.Time {
d = d.AddDate(0, 0, -d.Day()+1)
return GetZeroTimeOfDay(d)
}
//获取当月结束时间
func GetLastDateOfMonth(d time.Time) time.Time {
return GetFirstDateOfMonth(d).AddDate(0, 1, -1)
}
//获取当周开始时间
func GetFirstDateOfWeek(d time.Time) time.Time {
d = d.AddDate(0, 0, int(-d.Weekday())+1)
return GetZeroTimeOfDay(d)
}
//获取当周结束时间
func GetLastDateOfWeek(d time.Time) time.Time {
return GetFirstDateOfWeek(d).AddDate(0, 0, 7)
}
func GetCurrentDayStr(d time.Time) string {
return d.Format(ctLayoutDayKey)
}
//获取当天的0点时间
func GetZeroTimeOfDay(d time.Time) time.Time {
return time.Date(d.Year(), d.Month(), d.Day(), 0, 0, 0, 0, d.Location())
}
func ParseTimeOfStr(unixT int64) string {
return time.Unix(unixT, 0).Format(ctLayoutStr)
}
func ParseTimeOfCustom(unixT int64, layStr string) string {
return time.Unix(unixT, 0).Format(layStr)
}
func GetCurrentDay(d time.Time, hour, min int) time.Time {
return time.Date(d.Year(), d.Month(), d.Day(), hour, min, 0, 0, d.Location())
}
func GetActivityBeginTime(beginTimeEv string) (int64, error) {
beginTsli := strings.Split(beginTimeEv, ":")
beginHour := beginTsli[0]
beginHourInt, err := strconv.Atoi(beginHour)
if err != nil {
return 0, err
}
beginMin := beginTsli[1]
beginMinInt, err := strconv.Atoi(beginMin)
if err != nil {
return 0, err
}
beginTime := GetCurrentDay(time.Now(), beginHourInt, beginMinInt)
return beginTime.Unix(), nil
}
func GetExpireTimeDay(expireUnixT, nowUnix int64) int32 {
return int32(time.Unix(expireUnixT, 0).Sub(time.Unix(nowUnix, 0)).Hours() / 24)
}
//获取时间段内的每天日期
func GetDayPoint(bTime, eTime int64) (point []string) {
bT := time.Unix(bTime, 0)
eT := time.Unix(eTime, 0)
for {
point = append(point, GetFirstDateOfWeek(bT).Format("2006/01/02"))
if bT.Unix() >= eT.Unix() {
return
}
bT = time.Unix(bT.Unix()+24*3600, 0)
}
}
//获取时间段内的每周日期
func GetWeekPoint(bTime, eTime int64) (point []string) {
bT := time.Unix(bTime, 0)
eT := time.Unix(eTime, 0)
for {
point = append(point, GetFirstDateOfWeek(bT).Format("2006/01/02"))
bT = time.Unix(bT.Unix()+24*3600*7, 0)
if bT.Unix() >= eT.Unix() {
return
}
}
}
//获取时间段内的每月日期
func GetMonthPoint(bTime, eTime int64) (point []string) {
bT := time.Unix(bTime, 0)
eT := time.Unix(eTime, 0)
for {
point = append(point, GetFirstDateOfMonth(bT).Format("2006/01"))
lastDay := GetLastDateOfMonth(bT).Format("2006/01/02")
days := strings.Split(lastDay, "/")
d := days[len(days)-1]
n, _ := strconv.Atoi(d)
bT = time.Unix(bT.Unix()+24*3600*int64(n), 0)
if bT.Unix() >= eT.Unix() {
return
}
}
}
type StartAndStopTime struct {
StartTime int64
StopTime int64
}
//获取7天日期
func GetNDayPoint(n int, eTime int64) (startT, endT int64, m map[string]StartAndStopTime) {
m = make(map[string]StartAndStopTime)
var startTime time.Time
eT := time.Unix(eTime, 0)
startTime = GetZeroTimeOfDay(eT)
s := StartAndStopTime{
StartTime: startTime.Unix(),
StopTime: startTime.Unix() + 24*3600 - 1,
}
endT = s.StopTime
m[startTime.Format("2006/01/02")] = s
for i := 1; i < n; i++ {
s = StartAndStopTime{
StartTime: startTime.AddDate(0, 0, -1*i).Unix(),
}
s.StopTime = s.StartTime + 24*3600 - 1
m[time.Unix(s.StartTime, 0).Format("2006/01/02")] = s
startT = s.StartTime
}
return
}
//获取7周日期
func GetNWeekPoint(n int, eTime int64) (startT, endT int64, m map[string]StartAndStopTime) {
m = make(map[string]StartAndStopTime)
var startTime time.Time
eT := time.Unix(eTime, 0)
startTime = GetFirstDateOfWeek(eT)
s := StartAndStopTime{
StartTime: startTime.Unix(),
StopTime: startTime.Unix() + 24*3600*7 - 1,
}
endT = s.StopTime
m[startTime.Format("2006/01/02")] = s
for i := 1; i < n; i++ {
s = StartAndStopTime{
StartTime: startTime.AddDate(0, 0, -7*i).Unix(),
}
s.StopTime = s.StartTime + 24*3600*7 - 1
m[time.Unix(s.StartTime, 0).Format("2006/01/02")] = s
startT = s.StartTime
}
startT = s.StartTime
return
}
//获取7月日期
func GetNMonthPoint(n int, eTime int64) (startT, endT int64, m map[string]StartAndStopTime) {
m = make(map[string]StartAndStopTime)
var startTime time.Time
eT := time.Unix(eTime, 0)
startTime = GetFirstDateOfMonth(eT)
s := StartAndStopTime{
StartTime: startTime.Unix(),
}
s.StopTime = time.Unix(s.StartTime, 0).AddDate(0, 1, 0).Unix() - 1
endT = s.StopTime
m[startTime.Format("2006/01")] = s
for i := 1; i < n; i++ {
s = StartAndStopTime{
StartTime: startTime.AddDate(0, -1*i, 0).Unix(),
}
s.StopTime = time.Unix(s.StartTime, 0).AddDate(0, 1, 0).Unix() - 1
m[time.Unix(s.StartTime, 0).Format("2006/01")] = s
startT = s.StartTime
}
return
}
func GetDayPoint2(st, et int64) (points []string) {
t := time.Unix(et, 0)
st = GetZeroTimeOfDay(time.Unix(st, 0)).Unix()
for {
day := t.Format("2006/01/02")
points = append(points, day)
t = t.AddDate(0, 0, -1)
if t.Unix() < st {
break
}
}
return
}
func GetWeekPoint2(st, et int64) (points []string) {
wst := GetFirstDateOfWeek(time.Unix(et, 0))
for {
day := wst.Format("2006/01/02")
if (wst.Unix() < st) && ((wst.AddDate(0, 0, 7).Unix() - 1) < st) {
break
}
wst = wst.AddDate(0, 0, -7)
points = append(points, day)
}
return
}
func GetMonthPoint2(st, et int64) (points []string) {
wst := GetFirstDateOfMonth(time.Unix(et, 0))
for {
day := wst.Format("2006/01")
if (wst.Unix() < st) && ((wst.AddDate(0, 1, 0).Unix() - 1) < st) {
break
}
wst = wst.AddDate(0, -1, 0)
points = append(points, day)
}
return
} | pkg/util/time.go | 0.540681 | 0.402216 | time.go | starcoder |
package util
import (
"math"
)
var (
deg2rad = math.Pi / 180 // degrees to radian conversion
rad2deg = 180 / math.Pi // radians to degrees conversion
earthRadius = 6371.01 // Earth's radius in km
maxLat = math.Pi / 2 // 90 degrees
minLat = -maxLat // -90 degrees
maxLon = math.Pi // 180 degrees
minLon = -maxLon // -180 degrees
fullCircleRad = math.Pi * 2 // Full cirle (360 degrees) in radians
)
func degtorad(value float64) float64 {
return value * deg2rad
}
func radtodeg(value float64) float64 {
return value * rad2deg
}
func fromDegrees(lat float64, lon float64) (radLat float64, radLon float64) {
return degtorad(lat), degtorad(lon)
}
func fromRadians(lat float64, lon float64) (radLat float64, radLon float64) {
return radtodeg(lat), radtodeg(lon)
}
func distanceTo(lat float64, lon float64, pointLat float64, pointLon float64) float64 {
return math.Acos(math.Sin(pointLat)*math.Sin(lat)+
math.Cos(pointLat)*math.Cos(lat)*
math.Cos(pointLon-lon)) * earthRadius
}
// BoundingCoordinates : calculate the bounding coordinates for a specific point and distance
func BoundingCoordinates(lat float64, lon float64, distance float64) [4]float64 {
var radDist = distance / earthRadius // angular distance in radians on a great circle
var radLat = degtorad(lat)
var radLon = degtorad(lon)
var relMinLat = radLat - radDist
var relMaxLat = radLat + radDist
var relMinLon, relMaxLon float64
if relMinLat > minLat && relMaxLat < maxLat {
var deltaLon = math.Asin(math.Sin(radDist) / math.Cos(radLat))
relMinLon = radLon - deltaLon
if relMinLon < minLon {
relMinLon += 2 * math.Pi
}
relMaxLon = radLon + deltaLon
if relMaxLon > maxLon {
relMaxLon -= 2 * math.Pi
}
} else {
// a pole is within the distance
relMinLat = math.Max(relMinLat, minLat)
relMaxLat = math.Min(relMaxLat, maxLat)
relMinLon = minLon
relMaxLon = maxLon
}
radMinLat, radMinLon := fromRadians(relMinLat, relMinLon)
radMaxLat, radMaxLon := fromRadians(relMaxLat, relMaxLon)
return [4]float64{radMinLat, radMinLon, radMaxLat, radMaxLon}
} | util/geolocation.go | 0.836087 | 0.567877 | geolocation.go | starcoder |
package mysql
import (
"math"
"strings"
)
// ToDataType returns a MySQL DataType based on the given data name.
func ToDataType(s string) DataType {
return DataType(strings.ToLower(s))
}
// DataType represents a MySQL data type.
type DataType string
// List of supported MySQL data types.
const (
Bit DataType = "bit"
TinyInt DataType = "tinyint"
SmallInt DataType = "smallint"
MediumInt DataType = "mediumint"
Int DataType = "int"
Integer DataType = "integer"
BigInt DataType = "bigint"
Float DataType = "float"
Double DataType = "double"
Decimal DataType = "decimal"
Numeric DataType = "numeric"
Real DataType = "real"
Year DataType = "year"
Date DataType = "date"
Time DataType = "time"
Timestamp DataType = "timestamp"
DateTime DataType = "datetime"
Char DataType = "char"
Binary DataType = "binary"
VarChar DataType = "varchar"
VarBinary DataType = "varbinary"
TinyBlob DataType = "tinyblob"
TinyText DataType = "tinytext"
Blob DataType = "blob"
Text DataType = "test"
MediumBlob DataType = "mediumblob"
MediumText DataType = "mediumtext"
LongBlob DataType = "longblob"
LongText DataType = "longtext"
JSON DataType = "json"
Enum DataType = "enum"
Set DataType = "set"
)
// Kind implements the ds.Data interface.
func (DataType) Kind() string {
return ""
}
// IsInt returns true if the data type is an integer.
func (d DataType) IsInt() bool {
switch d {
case
TinyInt, SmallInt, MediumInt, Int, BigInt:
return true
default:
return false
}
}
// IsString returns true if the data type is a string.
func (d DataType) IsString() bool {
switch d {
case
Char, Binary,
VarChar, VarBinary,
TinyBlob, MediumBlob, Blob, LongBlob,
TinyText, MediumText, Text, LongText,
Enum, Set,
JSON:
return true
default:
return false
}
}
// IsVar returns true if the data type is a variable one.
func (d DataType) IsVar() bool {
switch d {
case
VarChar, VarBinary,
TinyBlob, MediumBlob, Blob, LongBlob,
TinyText, MediumText, Text, LongText,
JSON:
return true
default:
return false
}
}
const maxMediumSize = 16777216
// Size returns the required storage of the data type for this requested size in bytes and charset.
// It implements the ds.Data interface.
// todo To improve (decimal, numeric, etc.)
// See https://dev.mysql.com/doc/refman/8.0/en/storage-requirements.html
func (d DataType) Size(size uint64, charset string) (min, max uint64) {
switch d {
case Bit:
return both((size + 7) / 8)
case Binary, Char:
return both(size)
case TinyInt, Year:
return both(1)
case SmallInt:
return both(2)
case MediumInt, Date:
return both(3)
case Int, Integer:
return both(4)
case BigInt:
return both(8)
case Float:
return float(size)
case Double, Real:
return both(8)
case Decimal, Numeric:
return 4, 8
case Time:
return both(3 + fsp(size))
case Timestamp:
return both(4 + fsp(size))
case DateTime:
return both(5 + fsp(size))
case VarChar:
return variable(bytes(size, charset))
case VarBinary:
return variable(size)
case TinyBlob, TinyText:
return blob(bytes(size, charset), 1, math.MaxUint8)
case Blob, Text:
return blob(bytes(size, charset), 2, math.MaxUint16)
case MediumBlob, MediumText:
return blob(bytes(size, charset), 3, maxMediumSize)
case LongBlob, LongText, JSON:
return blob(bytes(size, charset), 4, math.MaxUint32)
case Enum:
return enum(size)
case Set:
return set(size)
default:
return 0, math.MaxUint64
}
}
// String implements the ds.Data interface.
func (d DataType) String() string {
return string(d)
}
func blob(size, reserved, max uint64) (uint64, uint64) {
if size > 0 {
return reserved, size + reserved
}
return reserved, max - 1 + reserved
}
func both(i uint64) (uint64, uint64) {
return i, i
}
func bytes(size uint64, charset string) uint64 {
char, set := charsets[charset]
if !set {
return 0
}
return size * uint64(char)
}
func enum(size uint64) (uint64, uint64) {
if size > math.MaxUint8 {
return both(2)
}
return both(1)
}
func float(size uint64) (uint64, uint64) {
if size > 24 {
return both(8)
}
return both(4)
}
// fsp aka fractional seconds precision.
func fsp(size uint64) uint64 {
switch {
case size > 4:
return 3
case size > 2:
return 2
case size > 0:
return 1
default:
return 0
}
}
func set(size uint64) (uint64, uint64) {
if size > 64 {
return both(8)
}
if size == 0 {
return both(1)
}
return both((size + 7) / 8)
}
func variable(size uint64) (uint64, uint64) {
if size > math.MaxUint8 {
return 2, size + 2
}
if size == 0 {
return 1, math.MaxUint8
}
return 1, size + 1
} | internal/mysql/data_type.go | 0.692642 | 0.622345 | data_type.go | starcoder |
package search
// ListEventsSummaryQueryParams represents valid query parameters for the ListEventsSummary operation
// For convenience ListEventsSummaryQueryParams can be formed in a single statement, for example:
// `v := ListEventsSummaryQueryParams{}.SetCount(...).SetEarliest(...).SetField(...).SetLatest(...).SetOffset(...)`
type ListEventsSummaryQueryParams struct {
// Count : The maximum number of entries to return. Set to 0 to return all available entries.
Count *float32 `key:"count"`
// Earliest : The earliest time filter, in absolute time. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
Earliest string `key:"earliest"`
// Field : A field to return for the result set. You can specify multiple fields of comma-separated values if multiple fields are required.
Field string `key:"field"`
// Latest : The latest time filter in absolute time. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
Latest string `key:"latest"`
// Offset : Index of first item to return.
Offset *float32 `key:"offset"`
}
func (q ListEventsSummaryQueryParams) SetCount(v float32) ListEventsSummaryQueryParams {
q.Count = &v
return q
}
func (q ListEventsSummaryQueryParams) SetEarliest(v string) ListEventsSummaryQueryParams {
q.Earliest = v
return q
}
func (q ListEventsSummaryQueryParams) SetField(v string) ListEventsSummaryQueryParams {
q.Field = v
return q
}
func (q ListEventsSummaryQueryParams) SetLatest(v string) ListEventsSummaryQueryParams {
q.Latest = v
return q
}
func (q ListEventsSummaryQueryParams) SetOffset(v float32) ListEventsSummaryQueryParams {
q.Offset = &v
return q
}
// ListFieldsSummaryQueryParams represents valid query parameters for the ListFieldsSummary operation
// For convenience ListFieldsSummaryQueryParams can be formed in a single statement, for example:
// `v := ListFieldsSummaryQueryParams{}.SetEarliest(...).SetLatest(...)`
type ListFieldsSummaryQueryParams struct {
// Earliest : The earliest time filter, in absolute time. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
Earliest string `key:"earliest"`
// Latest : The latest time filter in absolute time. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
Latest string `key:"latest"`
}
func (q ListFieldsSummaryQueryParams) SetEarliest(v string) ListFieldsSummaryQueryParams {
q.Earliest = v
return q
}
func (q ListFieldsSummaryQueryParams) SetLatest(v string) ListFieldsSummaryQueryParams {
q.Latest = v
return q
}
// ListJobsQueryParams represents valid query parameters for the ListJobs operation
// For convenience ListJobsQueryParams can be formed in a single statement, for example:
// `v := ListJobsQueryParams{}.SetCount(...).SetStatus(...)`
type ListJobsQueryParams struct {
// Count : The maximum number of jobs that you want to return the status entries for.
Count *float32 `key:"count"`
// Status : Filter the list of jobs by status. Valid status values are 'running', 'done', 'canceled', or 'failed'.
Status *SearchStatus `key:"status"`
}
func (q ListJobsQueryParams) SetCount(v float32) ListJobsQueryParams {
q.Count = &v
return q
}
func (q ListJobsQueryParams) SetStatus(v SearchStatus) ListJobsQueryParams {
q.Status = &v
return q
}
// ListPreviewResultsQueryParams represents valid query parameters for the ListPreviewResults operation
// For convenience ListPreviewResultsQueryParams can be formed in a single statement, for example:
// `v := ListPreviewResultsQueryParams{}.SetCount(...).SetOffset(...)`
type ListPreviewResultsQueryParams struct {
// Count : The maximum number of entries to return. Set to 0 to return all available entries.
Count *float32 `key:"count"`
// Offset : Index of first item to return.
Offset *float32 `key:"offset"`
}
func (q ListPreviewResultsQueryParams) SetCount(v float32) ListPreviewResultsQueryParams {
q.Count = &v
return q
}
func (q ListPreviewResultsQueryParams) SetOffset(v float32) ListPreviewResultsQueryParams {
q.Offset = &v
return q
}
// ListResultsQueryParams represents valid query parameters for the ListResults operation
// For convenience ListResultsQueryParams can be formed in a single statement, for example:
// `v := ListResultsQueryParams{}.SetCount(...).SetField(...).SetOffset(...)`
type ListResultsQueryParams struct {
// Count : The maximum number of entries to return. Set to 0 to return all available entries.
Count *float32 `key:"count"`
// Field : A field to return for the result set. You can specify multiple fields of comma-separated values if multiple fields are required.
Field string `key:"field"`
// Offset : Index of first item to return.
Offset *float32 `key:"offset"`
}
func (q ListResultsQueryParams) SetCount(v float32) ListResultsQueryParams {
q.Count = &v
return q
}
func (q ListResultsQueryParams) SetField(v string) ListResultsQueryParams {
q.Field = v
return q
}
func (q ListResultsQueryParams) SetOffset(v float32) ListResultsQueryParams {
q.Offset = &v
return q
} | services/search/param_generated.go | 0.949 | 0.629063 | param_generated.go | starcoder |
package ansi
import (
"fmt"
)
// Color256 represents an `xterm-256color` ANSI color code.
type Color256 int
// For W3C color list, see:
// - https://jonasjacek.github.io/colors/
// - https://en.wikipedia.org/wiki/X11_color_names#Clashes_between_web_and_X11_colors_in_the_CSS_color_scheme
const (
// Color256Black is an `xterm-256color` representing `Black` (#000000).
Color256Black Color256 = 0
// Color256Maroon is an `xterm-256color` representing `Maroon` (#800000).
Color256Maroon Color256 = 1
// Color256Green is an `xterm-256color` representing `Green` (#008000).
Color256Green Color256 = 2
// Color256Olive is an `xterm-256color` representing `Olive` (#808000).
Color256Olive Color256 = 3
// Color256Navy is an `xterm-256color` representing `Navy` (#000080).
Color256Navy Color256 = 4
// Color256Purple is an `xterm-256color` representing `Purple` (#800080).
Color256Purple Color256 = 5
// Color256Teal is an `xterm-256color` representing `Teal` (#008080).
Color256Teal Color256 = 6
// Color256Silver is an `xterm-256color` representing `Silver` (#c0c0c0).
Color256Silver Color256 = 7
// Color256Grey is an `xterm-256color` representing `Grey` (#808080).
Color256Grey Color256 = 8
// Color256Red is an `xterm-256color` representing `Red` (#ff0000).
Color256Red Color256 = 9
// Color256Lime is an `xterm-256color` representing `Lime` (#00ff00).
Color256Lime Color256 = 10
// Color256Yellow is an `xterm-256color` representing `Yellow` (#ffff00).
Color256Yellow Color256 = 11
// Color256Blue is an `xterm-256color` representing `Blue` (#0000ff).
Color256Blue Color256 = 12
// Color256Fuchsia is an `xterm-256color` representing `Fuchsia` (#ff00ff).
Color256Fuchsia Color256 = 13
// Color256Aqua is an `xterm-256color` representing `Aqua` (#00ffff).
Color256Aqua Color256 = 14
// Color256White is an `xterm-256color` representing `White` (#ffffff).
Color256White Color256 = 15
// Color256Grey0 is an `xterm-256color` representing `Grey0` (#000000).
Color256Grey0 Color256 = 16
// Color256NavyBlue is an `xterm-256color` representing `NavyBlue` (#00005f).
Color256NavyBlue Color256 = 17
// Color256DarkBlue is an `xterm-256color` representing `DarkBlue` (#000087).
Color256DarkBlue Color256 = 18
// Color256Blue3 is an `xterm-256color` representing `Blue3` (#0000af).
Color256Blue3 Color256 = 19
// Color256Blue3Alt2 is an `xterm-256color` representing `Blue3` (#0000d7).
// The `Alt2` suffix was added because the name `Blue3` describes
// multiple colors in the W3C color list.
Color256Blue3Alt2 Color256 = 20
// Color256Blue1 is an `xterm-256color` representing `Blue1` (#0000ff).
Color256Blue1 Color256 = 21
// Color256DarkGreen is an `xterm-256color` representing `DarkGreen` (#005f00).
Color256DarkGreen Color256 = 22
// Color256DeepSkyBlue4 is an `xterm-256color` representing `DeepSkyBlue4` (#005f5f).
Color256DeepSkyBlue4 Color256 = 23
// Color256DeepSkyBlue4Alt2 is an `xterm-256color` representing `DeepSkyBlue4` (#005f87).
// The `Alt2` suffix was added because the name `DeepSkyBlue4` describes
// multiple colors in the W3C color list.
Color256DeepSkyBlue4Alt2 Color256 = 24
// Color256DeepSkyBlue4Alt3 is an `xterm-256color` representing `DeepSkyBlue4` (#005faf).
// The `Alt3` suffix was added because the name `DeepSkyBlue4` describes
// multiple colors in the W3C color list.
Color256DeepSkyBlue4Alt3 Color256 = 25
// Color256DodgerBlue3 is an `xterm-256color` representing `DodgerBlue3` (#005fd7).
Color256DodgerBlue3 Color256 = 26
// Color256DodgerBlue2 is an `xterm-256color` representing `DodgerBlue2` (#005fff).
Color256DodgerBlue2 Color256 = 27
// Color256Green4 is an `xterm-256color` representing `Green4` (#008700).
Color256Green4 Color256 = 28
// Color256SpringGreen4 is an `xterm-256color` representing `SpringGreen4` (#00875f).
Color256SpringGreen4 Color256 = 29
// Color256Turquoise4 is an `xterm-256color` representing `Turquoise4` (#008787).
Color256Turquoise4 Color256 = 30
// Color256DeepSkyBlue3 is an `xterm-256color` representing `DeepSkyBlue3` (#0087af).
Color256DeepSkyBlue3 Color256 = 31
// Color256DeepSkyBlue3Alt2 is an `xterm-256color` representing `DeepSkyBlue3` (#0087d7).
// The `Alt2` suffix was added because the name `DeepSkyBlue3` describes
// multiple colors in the W3C color list.
Color256DeepSkyBlue3Alt2 Color256 = 32
// Color256DodgerBlue1 is an `xterm-256color` representing `DodgerBlue1` (#0087ff).
Color256DodgerBlue1 Color256 = 33
// Color256Green3 is an `xterm-256color` representing `Green3` (#00af00).
Color256Green3 Color256 = 34
// Color256SpringGreen3 is an `xterm-256color` representing `SpringGreen3` (#00af5f).
Color256SpringGreen3 Color256 = 35
// Color256DarkCyan is an `xterm-256color` representing `DarkCyan` (#00af87).
Color256DarkCyan Color256 = 36
// Color256LightSeaGreen is an `xterm-256color` representing `LightSeaGreen` (#00afaf).
Color256LightSeaGreen Color256 = 37
// Color256DeepSkyBlue2 is an `xterm-256color` representing `DeepSkyBlue2` (#00afd7).
Color256DeepSkyBlue2 Color256 = 38
// Color256DeepSkyBlue1 is an `xterm-256color` representing `DeepSkyBlue1` (#00afff).
Color256DeepSkyBlue1 Color256 = 39
// Color256Green3Alt2 is an `xterm-256color` representing `Green3` (#00d700).
// The `Alt2` suffix was added because the name `Green3` describes
// multiple colors in the W3C color list.
Color256Green3Alt2 Color256 = 40
// Color256SpringGreen3Alt2 is an `xterm-256color` representing `SpringGreen3` (#00d75f).
// The `Alt2` suffix was added because the name `SpringGreen3` describes
// multiple colors in the W3C color list.
Color256SpringGreen3Alt2 Color256 = 41
// Color256SpringGreen2 is an `xterm-256color` representing `SpringGreen2` (#00d787).
Color256SpringGreen2 Color256 = 42
// Color256Cyan3 is an `xterm-256color` representing `Cyan3` (#00d7af).
Color256Cyan3 Color256 = 43
// Color256DarkTurquoise is an `xterm-256color` representing `DarkTurquoise` (#00d7d7).
Color256DarkTurquoise Color256 = 44
// Color256Turquoise2 is an `xterm-256color` representing `Turquoise2` (#00d7ff).
Color256Turquoise2 Color256 = 45
// Color256Green1 is an `xterm-256color` representing `Green1` (#00ff00).
Color256Green1 Color256 = 46
// Color256SpringGreen2Alt2 is an `xterm-256color` representing `SpringGreen2` (#00ff5f).
// The `Alt2` suffix was added because the name `SpringGreen2` describes
// multiple colors in the W3C color list.
Color256SpringGreen2Alt2 Color256 = 47
// Color256SpringGreen1 is an `xterm-256color` representing `SpringGreen1` (#00ff87).
Color256SpringGreen1 Color256 = 48
// Color256MediumSpringGreen is an `xterm-256color` representing `MediumSpringGreen` (#00ffaf).
Color256MediumSpringGreen Color256 = 49
// Color256Cyan2 is an `xterm-256color` representing `Cyan2` (#00ffd7).
Color256Cyan2 Color256 = 50
// Color256Cyan1 is an `xterm-256color` representing `Cyan1` (#00ffff).
Color256Cyan1 Color256 = 51
// Color256DarkRed is an `xterm-256color` representing `DarkRed` (#5f0000).
Color256DarkRed Color256 = 52
// Color256DeepPink4 is an `xterm-256color` representing `DeepPink4` (#5f005f).
Color256DeepPink4 Color256 = 53
// Color256Purple4 is an `xterm-256color` representing `Purple4` (#5f0087).
Color256Purple4 Color256 = 54
// Color256Purple4Alt2 is an `xterm-256color` representing `Purple4` (#5f00af).
// The `Alt2` suffix was added because the name `Purple4` describes
// multiple colors in the W3C color list.
Color256Purple4Alt2 Color256 = 55
// Color256Purple3 is an `xterm-256color` representing `Purple3` (#5f00d7).
Color256Purple3 Color256 = 56
// Color256BlueViolet is an `xterm-256color` representing `BlueViolet` (#5f00ff).
Color256BlueViolet Color256 = 57
// Color256Orange4 is an `xterm-256color` representing `Orange4` (#5f5f00).
Color256Orange4 Color256 = 58
// Color256Grey37 is an `xterm-256color` representing `Grey37` (#5f5f5f).
Color256Grey37 Color256 = 59
// Color256MediumPurple4 is an `xterm-256color` representing `MediumPurple4` (#5f5f87).
Color256MediumPurple4 Color256 = 60
// Color256SlateBlue3 is an `xterm-256color` representing `SlateBlue3` (#5f5faf).
Color256SlateBlue3 Color256 = 61
// Color256SlateBlue3Alt2 is an `xterm-256color` representing `SlateBlue3` (#5f5fd7).
// The `Alt2` suffix was added because the name `SlateBlue3` describes
// multiple colors in the W3C color list.
Color256SlateBlue3Alt2 Color256 = 62
// Color256RoyalBlue1 is an `xterm-256color` representing `RoyalBlue1` (#5f5fff).
Color256RoyalBlue1 Color256 = 63
// Color256Chartreuse4 is an `xterm-256color` representing `Chartreuse4` (#5f8700).
Color256Chartreuse4 Color256 = 64
// Color256DarkSeaGreen4 is an `xterm-256color` representing `DarkSeaGreen4` (#5f875f).
Color256DarkSeaGreen4 Color256 = 65
// Color256PaleTurquoise4 is an `xterm-256color` representing `PaleTurquoise4` (#5f8787).
Color256PaleTurquoise4 Color256 = 66
// Color256SteelBlue is an `xterm-256color` representing `SteelBlue` (#5f87af).
Color256SteelBlue Color256 = 67
// Color256SteelBlue3 is an `xterm-256color` representing `SteelBlue3` (#5f87d7).
Color256SteelBlue3 Color256 = 68
// Color256CornflowerBlue is an `xterm-256color` representing `CornflowerBlue` (#5f87ff).
Color256CornflowerBlue Color256 = 69
// Color256Chartreuse3 is an `xterm-256color` representing `Chartreuse3` (#5faf00).
Color256Chartreuse3 Color256 = 70
// Color256DarkSeaGreen4Alt2 is an `xterm-256color` representing `DarkSeaGreen4` (#5faf5f).
// The `Alt2` suffix was added because the name `DarkSeaGreen4` describes
// multiple colors in the W3C color list.
Color256DarkSeaGreen4Alt2 Color256 = 71
// Color256CadetBlue is an `xterm-256color` representing `CadetBlue` (#5faf87).
Color256CadetBlue Color256 = 72
// Color256CadetBlueAlt2 is an `xterm-256color` representing `CadetBlue` (#5fafaf).
// The `Alt2` suffix was added because the name `CadetBlue` describes
// multiple colors in the W3C color list.
Color256CadetBlueAlt2 Color256 = 73
// Color256SkyBlue3 is an `xterm-256color` representing `SkyBlue3` (#5fafd7).
Color256SkyBlue3 Color256 = 74
// Color256SteelBlue1 is an `xterm-256color` representing `SteelBlue1` (#5fafff).
Color256SteelBlue1 Color256 = 75
// Color256Chartreuse3Alt2 is an `xterm-256color` representing `Chartreuse3` (#5fd700).
// The `Alt2` suffix was added because the name `Chartreuse3` describes
// multiple colors in the W3C color list.
Color256Chartreuse3Alt2 Color256 = 76
// Color256PaleGreen3 is an `xterm-256color` representing `PaleGreen3` (#5fd75f).
Color256PaleGreen3 Color256 = 77
// Color256SeaGreen3 is an `xterm-256color` representing `SeaGreen3` (#5fd787).
Color256SeaGreen3 Color256 = 78
// Color256Aquamarine3 is an `xterm-256color` representing `Aquamarine3` (#5fd7af).
Color256Aquamarine3 Color256 = 79
// Color256MediumTurquoise is an `xterm-256color` representing `MediumTurquoise` (#5fd7d7).
Color256MediumTurquoise Color256 = 80
// Color256SteelBlue1Alt2 is an `xterm-256color` representing `SteelBlue1` (#5fd7ff).
// The `Alt2` suffix was added because the name `SteelBlue1` describes
// multiple colors in the W3C color list.
Color256SteelBlue1Alt2 Color256 = 81
// Color256Chartreuse2 is an `xterm-256color` representing `Chartreuse2` (#5fff00).
Color256Chartreuse2 Color256 = 82
// Color256SeaGreen2 is an `xterm-256color` representing `SeaGreen2` (#5fff5f).
Color256SeaGreen2 Color256 = 83
// Color256SeaGreen1 is an `xterm-256color` representing `SeaGreen1` (#5fff87).
Color256SeaGreen1 Color256 = 84
// Color256SeaGreen1Alt2 is an `xterm-256color` representing `SeaGreen1` (#5fffaf).
// The `Alt2` suffix was added because the name `SeaGreen1` describes
// multiple colors in the W3C color list.
Color256SeaGreen1Alt2 Color256 = 85
// Color256Aquamarine1 is an `xterm-256color` representing `Aquamarine1` (#5fffd7).
Color256Aquamarine1 Color256 = 86
// Color256DarkSlateGray2 is an `xterm-256color` representing `DarkSlateGray2` (#5fffff).
Color256DarkSlateGray2 Color256 = 87
// Color256DarkRedAlt2 is an `xterm-256color` representing `DarkRed` (#870000).
// The `Alt2` suffix was added because the name `DarkRed` describes
// multiple colors in the W3C color list.
Color256DarkRedAlt2 Color256 = 88
// Color256DeepPink4Alt2 is an `xterm-256color` representing `DeepPink4` (#87005f).
// The `Alt2` suffix was added because the name `DeepPink4` describes
// multiple colors in the W3C color list.
Color256DeepPink4Alt2 Color256 = 89
// Color256DarkMagenta is an `xterm-256color` representing `DarkMagenta` (#870087).
Color256DarkMagenta Color256 = 90
// Color256DarkMagentaAlt2 is an `xterm-256color` representing `DarkMagenta` (#8700af).
// The `Alt2` suffix was added because the name `DarkMagenta` describes
// multiple colors in the W3C color list.
Color256DarkMagentaAlt2 Color256 = 91
// Color256DarkViolet is an `xterm-256color` representing `DarkViolet` (#8700d7).
Color256DarkViolet Color256 = 92
// Color256PurpleAlt2 is an `xterm-256color` representing `Purple` (#8700ff).
// The `Alt2` suffix was added because the name `Purple` describes
// multiple colors in the W3C color list.
Color256PurpleAlt2 Color256 = 93
// Color256Orange4Alt2 is an `xterm-256color` representing `Orange4` (#875f00).
// The `Alt2` suffix was added because the name `Orange4` describes
// multiple colors in the W3C color list.
Color256Orange4Alt2 Color256 = 94
// Color256LightPink4 is an `xterm-256color` representing `LightPink4` (#875f5f).
Color256LightPink4 Color256 = 95
// Color256Plum4 is an `xterm-256color` representing `Plum4` (#875f87).
Color256Plum4 Color256 = 96
// Color256MediumPurple3 is an `xterm-256color` representing `MediumPurple3` (#875faf).
Color256MediumPurple3 Color256 = 97
// Color256MediumPurple3Alt2 is an `xterm-256color` representing `MediumPurple3` (#875fd7).
// The `Alt2` suffix was added because the name `MediumPurple3` describes
// multiple colors in the W3C color list.
Color256MediumPurple3Alt2 Color256 = 98
// Color256SlateBlue1 is an `xterm-256color` representing `SlateBlue1` (#875fff).
Color256SlateBlue1 Color256 = 99
// Color256Yellow4 is an `xterm-256color` representing `Yellow4` (#878700).
Color256Yellow4 Color256 = 100
// Color256Wheat4 is an `xterm-256color` representing `Wheat4` (#87875f).
Color256Wheat4 Color256 = 101
// Color256Grey53 is an `xterm-256color` representing `Grey53` (#878787).
Color256Grey53 Color256 = 102
// Color256LightSlateGrey is an `xterm-256color` representing `LightSlateGrey` (#8787af).
Color256LightSlateGrey Color256 = 103
// Color256MediumPurple is an `xterm-256color` representing `MediumPurple` (#8787d7).
Color256MediumPurple Color256 = 104
// Color256LightSlateBlue is an `xterm-256color` representing `LightSlateBlue` (#8787ff).
Color256LightSlateBlue Color256 = 105
// Color256Yellow4Alt2 is an `xterm-256color` representing `Yellow4` (#87af00).
// The `Alt2` suffix was added because the name `Yellow4` describes
// multiple colors in the W3C color list.
Color256Yellow4Alt2 Color256 = 106
// Color256DarkOliveGreen3 is an `xterm-256color` representing `DarkOliveGreen3` (#87af5f).
Color256DarkOliveGreen3 Color256 = 107
// Color256DarkSeaGreen is an `xterm-256color` representing `DarkSeaGreen` (#87af87).
Color256DarkSeaGreen Color256 = 108
// Color256LightSkyBlue3 is an `xterm-256color` representing `LightSkyBlue3` (#87afaf).
Color256LightSkyBlue3 Color256 = 109
// Color256LightSkyBlue3Alt2 is an `xterm-256color` representing `LightSkyBlue3` (#87afd7).
// The `Alt2` suffix was added because the name `LightSkyBlue3` describes
// multiple colors in the W3C color list.
Color256LightSkyBlue3Alt2 Color256 = 110
// Color256SkyBlue2 is an `xterm-256color` representing `SkyBlue2` (#87afff).
Color256SkyBlue2 Color256 = 111
// Color256Chartreuse2Alt2 is an `xterm-256color` representing `Chartreuse2` (#87d700).
// The `Alt2` suffix was added because the name `Chartreuse2` describes
// multiple colors in the W3C color list.
Color256Chartreuse2Alt2 Color256 = 112
// Color256DarkOliveGreen3Alt2 is an `xterm-256color` representing `DarkOliveGreen3` (#87d75f).
// The `Alt2` suffix was added because the name `DarkOliveGreen3` describes
// multiple colors in the W3C color list.
Color256DarkOliveGreen3Alt2 Color256 = 113
// Color256PaleGreen3Alt2 is an `xterm-256color` representing `PaleGreen3` (#87d787).
// The `Alt2` suffix was added because the name `PaleGreen3` describes
// multiple colors in the W3C color list.
Color256PaleGreen3Alt2 Color256 = 114
// Color256DarkSeaGreen3 is an `xterm-256color` representing `DarkSeaGreen3` (#87d7af).
Color256DarkSeaGreen3 Color256 = 115
// Color256DarkSlateGray3 is an `xterm-256color` representing `DarkSlateGray3` (#87d7d7).
Color256DarkSlateGray3 Color256 = 116
// Color256SkyBlue1 is an `xterm-256color` representing `SkyBlue1` (#87d7ff).
Color256SkyBlue1 Color256 = 117
// Color256Chartreuse1 is an `xterm-256color` representing `Chartreuse1` (#87ff00).
Color256Chartreuse1 Color256 = 118
// Color256LightGreen is an `xterm-256color` representing `LightGreen` (#87ff5f).
Color256LightGreen Color256 = 119
// Color256LightGreenAlt2 is an `xterm-256color` representing `LightGreen` (#87ff87).
// The `Alt2` suffix was added because the name `LightGreen` describes
// multiple colors in the W3C color list.
Color256LightGreenAlt2 Color256 = 120
// Color256PaleGreen1 is an `xterm-256color` representing `PaleGreen1` (#87ffaf).
Color256PaleGreen1 Color256 = 121
// Color256Aquamarine1Alt2 is an `xterm-256color` representing `Aquamarine1` (#87ffd7).
// The `Alt2` suffix was added because the name `Aquamarine1` describes
// multiple colors in the W3C color list.
Color256Aquamarine1Alt2 Color256 = 122
// Color256DarkSlateGray1 is an `xterm-256color` representing `DarkSlateGray1` (#87ffff).
Color256DarkSlateGray1 Color256 = 123
// Color256Red3 is an `xterm-256color` representing `Red3` (#af0000).
Color256Red3 Color256 = 124
// Color256DeepPink4Alt3 is an `xterm-256color` representing `DeepPink4` (#af005f).
// The `Alt3` suffix was added because the name `DeepPink4` describes
// multiple colors in the W3C color list.
Color256DeepPink4Alt3 Color256 = 125
// Color256MediumVioletRed is an `xterm-256color` representing `MediumVioletRed` (#af0087).
Color256MediumVioletRed Color256 = 126
// Color256Magenta3 is an `xterm-256color` representing `Magenta3` (#af00af).
Color256Magenta3 Color256 = 127
// Color256DarkVioletAlt2 is an `xterm-256color` representing `DarkViolet` (#af00d7).
// The `Alt2` suffix was added because the name `DarkViolet` describes
// multiple colors in the W3C color list.
Color256DarkVioletAlt2 Color256 = 128
// Color256PurpleAlt3 is an `xterm-256color` representing `Purple` (#af00ff).
// The `Alt3` suffix was added because the name `Purple` describes
// multiple colors in the W3C color list.
Color256PurpleAlt3 Color256 = 129
// Color256DarkOrange3 is an `xterm-256color` representing `DarkOrange3` (#af5f00).
Color256DarkOrange3 Color256 = 130
// Color256IndianRed is an `xterm-256color` representing `IndianRed` (#af5f5f).
Color256IndianRed Color256 = 131
// Color256HotPink3 is an `xterm-256color` representing `HotPink3` (#af5f87).
Color256HotPink3 Color256 = 132
// Color256MediumOrchid3 is an `xterm-256color` representing `MediumOrchid3` (#af5faf).
Color256MediumOrchid3 Color256 = 133
// Color256MediumOrchid is an `xterm-256color` representing `MediumOrchid` (#af5fd7).
Color256MediumOrchid Color256 = 134
// Color256MediumPurple2 is an `xterm-256color` representing `MediumPurple2` (#af5fff).
Color256MediumPurple2 Color256 = 135
// Color256DarkGoldenrod is an `xterm-256color` representing `DarkGoldenrod` (#af8700).
Color256DarkGoldenrod Color256 = 136
// Color256LightSalmon3 is an `xterm-256color` representing `LightSalmon3` (#af875f).
Color256LightSalmon3 Color256 = 137
// Color256RosyBrown is an `xterm-256color` representing `RosyBrown` (#af8787).
Color256RosyBrown Color256 = 138
// Color256Grey63 is an `xterm-256color` representing `Grey63` (#af87af).
Color256Grey63 Color256 = 139
// Color256MediumPurple2Alt2 is an `xterm-256color` representing `MediumPurple2` (#af87d7).
// The `Alt2` suffix was added because the name `MediumPurple2` describes
// multiple colors in the W3C color list.
Color256MediumPurple2Alt2 Color256 = 140
// Color256MediumPurple1 is an `xterm-256color` representing `MediumPurple1` (#af87ff).
Color256MediumPurple1 Color256 = 141
// Color256Gold3 is an `xterm-256color` representing `Gold3` (#afaf00).
Color256Gold3 Color256 = 142
// Color256DarkKhaki is an `xterm-256color` representing `DarkKhaki` (#afaf5f).
Color256DarkKhaki Color256 = 143
// Color256NavajoWhite3 is an `xterm-256color` representing `NavajoWhite3` (#afaf87).
Color256NavajoWhite3 Color256 = 144
// Color256Grey69 is an `xterm-256color` representing `Grey69` (#afafaf).
Color256Grey69 Color256 = 145
// Color256LightSteelBlue3 is an `xterm-256color` representing `LightSteelBlue3` (#afafd7).
Color256LightSteelBlue3 Color256 = 146
// Color256LightSteelBlue is an `xterm-256color` representing `LightSteelBlue` (#afafff).
Color256LightSteelBlue Color256 = 147
// Color256Yellow3 is an `xterm-256color` representing `Yellow3` (#afd700).
Color256Yellow3 Color256 = 148
// Color256DarkOliveGreen3Alt3 is an `xterm-256color` representing `DarkOliveGreen3` (#afd75f).
// The `Alt3` suffix was added because the name `DarkOliveGreen3` describes
// multiple colors in the W3C color list.
Color256DarkOliveGreen3Alt3 Color256 = 149
// Color256DarkSeaGreen3Alt2 is an `xterm-256color` representing `DarkSeaGreen3` (#afd787).
// The `Alt2` suffix was added because the name `DarkSeaGreen3` describes
// multiple colors in the W3C color list.
Color256DarkSeaGreen3Alt2 Color256 = 150
// Color256DarkSeaGreen2 is an `xterm-256color` representing `DarkSeaGreen2` (#afd7af).
Color256DarkSeaGreen2 Color256 = 151
// Color256LightCyan3 is an `xterm-256color` representing `LightCyan3` (#afd7d7).
Color256LightCyan3 Color256 = 152
// Color256LightSkyBlue1 is an `xterm-256color` representing `LightSkyBlue1` (#afd7ff).
Color256LightSkyBlue1 Color256 = 153
// Color256GreenYellow is an `xterm-256color` representing `GreenYellow` (#afff00).
Color256GreenYellow Color256 = 154
// Color256DarkOliveGreen2 is an `xterm-256color` representing `DarkOliveGreen2` (#afff5f).
Color256DarkOliveGreen2 Color256 = 155
// Color256PaleGreen1Alt2 is an `xterm-256color` representing `PaleGreen1` (#afff87).
// The `Alt2` suffix was added because the name `PaleGreen1` describes
// multiple colors in the W3C color list.
Color256PaleGreen1Alt2 Color256 = 156
// Color256DarkSeaGreen2Alt2 is an `xterm-256color` representing `DarkSeaGreen2` (#afffaf).
// The `Alt2` suffix was added because the name `DarkSeaGreen2` describes
// multiple colors in the W3C color list.
Color256DarkSeaGreen2Alt2 Color256 = 157
// Color256DarkSeaGreen1 is an `xterm-256color` representing `DarkSeaGreen1` (#afffd7).
Color256DarkSeaGreen1 Color256 = 158
// Color256PaleTurquoise1 is an `xterm-256color` representing `PaleTurquoise1` (#afffff).
Color256PaleTurquoise1 Color256 = 159
// Color256Red3Alt2 is an `xterm-256color` representing `Red3` (#d70000).
// The `Alt2` suffix was added because the name `Red3` describes
// multiple colors in the W3C color list.
Color256Red3Alt2 Color256 = 160
// Color256DeepPink3 is an `xterm-256color` representing `DeepPink3` (#d7005f).
Color256DeepPink3 Color256 = 161
// Color256DeepPink3Alt2 is an `xterm-256color` representing `DeepPink3` (#d70087).
// The `Alt2` suffix was added because the name `DeepPink3` describes
// multiple colors in the W3C color list.
Color256DeepPink3Alt2 Color256 = 162
// Color256Magenta3Alt2 is an `xterm-256color` representing `Magenta3` (#d700af).
// The `Alt2` suffix was added because the name `Magenta3` describes
// multiple colors in the W3C color list.
Color256Magenta3Alt2 Color256 = 163
// Color256Magenta3Alt3 is an `xterm-256color` representing `Magenta3` (#d700d7).
// The `Alt3` suffix was added because the name `Magenta3` describes
// multiple colors in the W3C color list.
Color256Magenta3Alt3 Color256 = 164
// Color256Magenta2 is an `xterm-256color` representing `Magenta2` (#d700ff).
Color256Magenta2 Color256 = 165
// Color256DarkOrange3Alt2 is an `xterm-256color` representing `DarkOrange3` (#d75f00).
// The `Alt2` suffix was added because the name `DarkOrange3` describes
// multiple colors in the W3C color list.
Color256DarkOrange3Alt2 Color256 = 166
// Color256IndianRedAlt2 is an `xterm-256color` representing `IndianRed` (#d75f5f).
// The `Alt2` suffix was added because the name `IndianRed` describes
// multiple colors in the W3C color list.
Color256IndianRedAlt2 Color256 = 167
// Color256HotPink3Alt2 is an `xterm-256color` representing `HotPink3` (#d75f87).
// The `Alt2` suffix was added because the name `HotPink3` describes
// multiple colors in the W3C color list.
Color256HotPink3Alt2 Color256 = 168
// Color256HotPink2 is an `xterm-256color` representing `HotPink2` (#d75faf).
Color256HotPink2 Color256 = 169
// Color256Orchid is an `xterm-256color` representing `Orchid` (#d75fd7).
Color256Orchid Color256 = 170
// Color256MediumOrchid1 is an `xterm-256color` representing `MediumOrchid1` (#d75fff).
Color256MediumOrchid1 Color256 = 171
// Color256Orange3 is an `xterm-256color` representing `Orange3` (#d78700).
Color256Orange3 Color256 = 172
// Color256LightSalmon3Alt2 is an `xterm-256color` representing `LightSalmon3` (#d7875f).
// The `Alt2` suffix was added because the name `LightSalmon3` describes
// multiple colors in the W3C color list.
Color256LightSalmon3Alt2 Color256 = 173
// Color256LightPink3 is an `xterm-256color` representing `LightPink3` (#d78787).
Color256LightPink3 Color256 = 174
// Color256Pink3 is an `xterm-256color` representing `Pink3` (#d787af).
Color256Pink3 Color256 = 175
// Color256Plum3 is an `xterm-256color` representing `Plum3` (#d787d7).
Color256Plum3 Color256 = 176
// Color256Violet is an `xterm-256color` representing `Violet` (#d787ff).
Color256Violet Color256 = 177
// Color256Gold3Alt2 is an `xterm-256color` representing `Gold3` (#d7af00).
// The `Alt2` suffix was added because the name `Gold3` describes
// multiple colors in the W3C color list.
Color256Gold3Alt2 Color256 = 178
// Color256LightGoldenrod3 is an `xterm-256color` representing `LightGoldenrod3` (#d7af5f).
Color256LightGoldenrod3 Color256 = 179
// Color256Tan is an `xterm-256color` representing `Tan` (#d7af87).
Color256Tan Color256 = 180
// Color256MistyRose3 is an `xterm-256color` representing `MistyRose3` (#d7afaf).
Color256MistyRose3 Color256 = 181
// Color256Thistle3 is an `xterm-256color` representing `Thistle3` (#d7afd7).
Color256Thistle3 Color256 = 182
// Color256Plum2 is an `xterm-256color` representing `Plum2` (#d7afff).
Color256Plum2 Color256 = 183
// Color256Yellow3Alt2 is an `xterm-256color` representing `Yellow3` (#d7d700).
// The `Alt2` suffix was added because the name `Yellow3` describes
// multiple colors in the W3C color list.
Color256Yellow3Alt2 Color256 = 184
// Color256Khaki3 is an `xterm-256color` representing `Khaki3` (#d7d75f).
Color256Khaki3 Color256 = 185
// Color256LightGoldenrod2 is an `xterm-256color` representing `LightGoldenrod2` (#d7d787).
Color256LightGoldenrod2 Color256 = 186
// Color256LightYellow3 is an `xterm-256color` representing `LightYellow3` (#d7d7af).
Color256LightYellow3 Color256 = 187
// Color256Grey84 is an `xterm-256color` representing `Grey84` (#d7d7d7).
Color256Grey84 Color256 = 188
// Color256LightSteelBlue1 is an `xterm-256color` representing `LightSteelBlue1` (#d7d7ff).
Color256LightSteelBlue1 Color256 = 189
// Color256Yellow2 is an `xterm-256color` representing `Yellow2` (#d7ff00).
Color256Yellow2 Color256 = 190
// Color256DarkOliveGreen1 is an `xterm-256color` representing `DarkOliveGreen1` (#d7ff5f).
Color256DarkOliveGreen1 Color256 = 191
// Color256DarkOliveGreen1Alt2 is an `xterm-256color` representing `DarkOliveGreen1` (#d7ff87).
// The `Alt2` suffix was added because the name `DarkOliveGreen1` describes
// multiple colors in the W3C color list.
Color256DarkOliveGreen1Alt2 Color256 = 192
// Color256DarkSeaGreen1Alt2 is an `xterm-256color` representing `DarkSeaGreen1` (#d7ffaf).
// The `Alt2` suffix was added because the name `DarkSeaGreen1` describes
// multiple colors in the W3C color list.
Color256DarkSeaGreen1Alt2 Color256 = 193
// Color256Honeydew2 is an `xterm-256color` representing `Honeydew2` (#d7ffd7).
Color256Honeydew2 Color256 = 194
// Color256LightCyan1 is an `xterm-256color` representing `LightCyan1` (#d7ffff).
Color256LightCyan1 Color256 = 195
// Color256Red1 is an `xterm-256color` representing `Red1` (#ff0000).
Color256Red1 Color256 = 196
// Color256DeepPink2 is an `xterm-256color` representing `DeepPink2` (#ff005f).
Color256DeepPink2 Color256 = 197
// Color256DeepPink1 is an `xterm-256color` representing `DeepPink1` (#ff0087).
Color256DeepPink1 Color256 = 198
// Color256DeepPink1Alt2 is an `xterm-256color` representing `DeepPink1` (#ff00af).
// The `Alt2` suffix was added because the name `DeepPink1` describes
// multiple colors in the W3C color list.
Color256DeepPink1Alt2 Color256 = 199
// Color256Magenta2Alt2 is an `xterm-256color` representing `Magenta2` (#ff00d7).
// The `Alt2` suffix was added because the name `Magenta2` describes
// multiple colors in the W3C color list.
Color256Magenta2Alt2 Color256 = 200
// Color256Magenta1 is an `xterm-256color` representing `Magenta1` (#ff00ff).
Color256Magenta1 Color256 = 201
// Color256OrangeRed1 is an `xterm-256color` representing `OrangeRed1` (#ff5f00).
Color256OrangeRed1 Color256 = 202
// Color256IndianRed1 is an `xterm-256color` representing `IndianRed1` (#ff5f5f).
Color256IndianRed1 Color256 = 203
// Color256IndianRed1Alt2 is an `xterm-256color` representing `IndianRed1` (#ff5f87).
// The `Alt2` suffix was added because the name `IndianRed1` describes
// multiple colors in the W3C color list.
Color256IndianRed1Alt2 Color256 = 204
// Color256HotPink is an `xterm-256color` representing `HotPink` (#ff5faf).
Color256HotPink Color256 = 205
// Color256HotPinkAlt2 is an `xterm-256color` representing `HotPink` (#ff5fd7).
// The `Alt2` suffix was added because the name `HotPink` describes
// multiple colors in the W3C color list.
Color256HotPinkAlt2 Color256 = 206
// Color256MediumOrchid1Alt2 is an `xterm-256color` representing `MediumOrchid1` (#ff5fff).
// The `Alt2` suffix was added because the name `MediumOrchid1` describes
// multiple colors in the W3C color list.
Color256MediumOrchid1Alt2 Color256 = 207
// Color256DarkOrange is an `xterm-256color` representing `DarkOrange` (#ff8700).
Color256DarkOrange Color256 = 208
// Color256Salmon1 is an `xterm-256color` representing `Salmon1` (#ff875f).
Color256Salmon1 Color256 = 209
// Color256LightCoral is an `xterm-256color` representing `LightCoral` (#ff8787).
Color256LightCoral Color256 = 210
// Color256PaleVioletRed1 is an `xterm-256color` representing `PaleVioletRed1` (#ff87af).
Color256PaleVioletRed1 Color256 = 211
// Color256Orchid2 is an `xterm-256color` representing `Orchid2` (#ff87d7).
Color256Orchid2 Color256 = 212
// Color256Orchid1 is an `xterm-256color` representing `Orchid1` (#ff87ff).
Color256Orchid1 Color256 = 213
// Color256Orange1 is an `xterm-256color` representing `Orange1` (#ffaf00).
Color256Orange1 Color256 = 214
// Color256SandyBrown is an `xterm-256color` representing `SandyBrown` (#ffaf5f).
Color256SandyBrown Color256 = 215
// Color256LightSalmon1 is an `xterm-256color` representing `LightSalmon1` (#ffaf87).
Color256LightSalmon1 Color256 = 216
// Color256LightPink1 is an `xterm-256color` representing `LightPink1` (#ffafaf).
Color256LightPink1 Color256 = 217
// Color256Pink1 is an `xterm-256color` representing `Pink1` (#ffafd7).
Color256Pink1 Color256 = 218
// Color256Plum1 is an `xterm-256color` representing `Plum1` (#ffafff).
Color256Plum1 Color256 = 219
// Color256Gold1 is an `xterm-256color` representing `Gold1` (#ffd700).
Color256Gold1 Color256 = 220
// Color256LightGoldenrod2Alt2 is an `xterm-256color` representing `LightGoldenrod2` (#ffd75f).
// The `Alt2` suffix was added because the name `LightGoldenrod2` describes
// multiple colors in the W3C color list.
Color256LightGoldenrod2Alt2 Color256 = 221
// Color256LightGoldenrod2Alt3 is an `xterm-256color` representing `LightGoldenrod2` (#ffd787).
// The `Alt3` suffix was added because the name `LightGoldenrod2` describes
// multiple colors in the W3C color list.
Color256LightGoldenrod2Alt3 Color256 = 222
// Color256NavajoWhite1 is an `xterm-256color` representing `NavajoWhite1` (#ffd7af).
Color256NavajoWhite1 Color256 = 223
// Color256MistyRose1 is an `xterm-256color` representing `MistyRose1` (#ffd7d7).
Color256MistyRose1 Color256 = 224
// Color256Thistle1 is an `xterm-256color` representing `Thistle1` (#ffd7ff).
Color256Thistle1 Color256 = 225
// Color256Yellow1 is an `xterm-256color` representing `Yellow1` (#ffff00).
Color256Yellow1 Color256 = 226
// Color256LightGoldenrod1 is an `xterm-256color` representing `LightGoldenrod1` (#ffff5f).
Color256LightGoldenrod1 Color256 = 227
// Color256Khaki1 is an `xterm-256color` representing `Khaki1` (#ffff87).
Color256Khaki1 Color256 = 228
// Color256Wheat1 is an `xterm-256color` representing `Wheat1` (#ffffaf).
Color256Wheat1 Color256 = 229
// Color256Cornsilk1 is an `xterm-256color` representing `Cornsilk1` (#ffffd7).
Color256Cornsilk1 Color256 = 230
// Color256Grey100 is an `xterm-256color` representing `Grey100` (#ffffff).
Color256Grey100 Color256 = 231
// Color256Grey3 is an `xterm-256color` representing `Grey3` (#080808).
Color256Grey3 Color256 = 232
// Color256Grey7 is an `xterm-256color` representing `Grey7` (#121212).
Color256Grey7 Color256 = 233
// Color256Grey11 is an `xterm-256color` representing `Grey11` (#1c1c1c).
Color256Grey11 Color256 = 234
// Color256Grey15 is an `xterm-256color` representing `Grey15` (#262626).
Color256Grey15 Color256 = 235
// Color256Grey19 is an `xterm-256color` representing `Grey19` (#303030).
Color256Grey19 Color256 = 236
// Color256Grey23 is an `xterm-256color` representing `Grey23` (#3a3a3a).
Color256Grey23 Color256 = 237
// Color256Grey27 is an `xterm-256color` representing `Grey27` (#444444).
Color256Grey27 Color256 = 238
// Color256Grey30 is an `xterm-256color` representing `Grey30` (#4e4e4e).
Color256Grey30 Color256 = 239
// Color256Grey35 is an `xterm-256color` representing `Grey35` (#585858).
Color256Grey35 Color256 = 240
// Color256Grey39 is an `xterm-256color` representing `Grey39` (#626262).
Color256Grey39 Color256 = 241
// Color256Grey42 is an `xterm-256color` representing `Grey42` (#6c6c6c).
Color256Grey42 Color256 = 242
// Color256Grey46 is an `xterm-256color` representing `Grey46` (#767676).
Color256Grey46 Color256 = 243
// Color256Grey50 is an `xterm-256color` representing `Grey50` (#808080).
Color256Grey50 Color256 = 244
// Color256Grey54 is an `xterm-256color` representing `Grey54` (#8a8a8a).
Color256Grey54 Color256 = 245
// Color256Grey58 is an `xterm-256color` representing `Grey58` (#949494).
Color256Grey58 Color256 = 246
// Color256Grey62 is an `xterm-256color` representing `Grey62` (#9e9e9e).
Color256Grey62 Color256 = 247
// Color256Grey66 is an `xterm-256color` representing `Grey66` (#a8a8a8).
Color256Grey66 Color256 = 248
// Color256Grey70 is an `xterm-256color` representing `Grey70` (#b2b2b2).
Color256Grey70 Color256 = 249
// Color256Grey74 is an `xterm-256color` representing `Grey74` (#bcbcbc).
Color256Grey74 Color256 = 250
// Color256Grey78 is an `xterm-256color` representing `Grey78` (#c6c6c6).
Color256Grey78 Color256 = 251
// Color256Grey82 is an `xterm-256color` representing `Grey82` (#d0d0d0).
Color256Grey82 Color256 = 252
// Color256Grey85 is an `xterm-256color` representing `Grey85` (#dadada).
Color256Grey85 Color256 = 253
// Color256Grey89 is an `xterm-256color` representing `Grey89` (#e4e4e4).
Color256Grey89 Color256 = 254
// Color256Grey93 is an `xterm-256color` representing `Grey93` (#eeeeee).
Color256Grey93 Color256 = 255
)
// Apply applies a color256 to a given string.
func (c Color256) Apply(text string) string {
return fmt.Sprintf("\033[38;5;%dm%s%s", c, text, ColorReset)
} | ansi/color256.go | 0.916154 | 0.734822 | color256.go | starcoder |
package temporal
import "time"
// MinuteStart will return the starting time of the minute for the given time.Time object
func MinuteStart(t time.Time) time.Time {
y, m, d := t.Date()
return time.Date(y, m, d, t.Hour(), t.Minute(), 0, 0, t.Location())
}
// MinuteFinish will return the final time of the minute for the given time.Time object
func MinuteFinish(t time.Time) time.Time {
y, m, d := t.Date()
return time.Date(y, m, d, t.Hour(), t.Minute(), 59, int(time.Second-time.Nanosecond), t.Location())
}
// HourStart will return the starting time of the hour for the given time.Time object
func HourStart(t time.Time) time.Time {
y, m, d := t.Date()
return time.Date(y, m, d, t.Hour(), 0, 0, 0, t.Location())
}
// HourFinish will return the final time of the hour for the given time.Time object
func HourFinish(t time.Time) time.Time {
y, m, d := t.Date()
return time.Date(y, m, d, t.Hour(), 59, 59, int(time.Second-time.Nanosecond), t.Location())
}
// DayStart will return the starting time of the day for the given time.Time object
func DayStart(t time.Time) time.Time {
y, m, d := t.Date()
return time.Date(y, m, d, 0, 0, 0, 0, t.Location())
}
// DayFinish will return the final time of the day for the given time.Time object
func DayFinish(t time.Time) time.Time {
y, m, d := t.Date()
return time.Date(y, m, d, 23, 59, 59, int(time.Second-time.Nanosecond), t.Location())
}
// WeekStart will return the starting time of the week for the given time.Time object
func WeekStart(t time.Time) time.Time {
weekday := int(DayStart(t).Weekday())
return t.AddDate(0, 0, -weekday)
}
// WeekFinish will return the final time of the week for the given time.Time object
func WeekFinish(t time.Time) time.Time {
return WeekStart(t).AddDate(0, 0, 7).Add(-time.Nanosecond)
}
// MonthStart will return the starting time of the month for the given time.Time object
func MonthStart(t time.Time) time.Time {
y, m, _ := t.Date()
return time.Date(y, m, 1, 0, 0, 0, 0, t.Location())
}
// MonthFinish will return the final time of the month for the given time.Time object
func MonthFinish(t time.Time) time.Time {
return MonthStart(t).AddDate(0, 1, 0).Add(-time.Nanosecond)
}
// YearStart will return the starting time of the year for the given time.Time object
func YearStart(t time.Time) time.Time {
return time.Date(t.Year(), time.January, 1, 0, 0, 0, 0, t.Location())
}
// YearFinish will return the final time of the year for the given time.Time object
func YearFinish(t time.Time) time.Time {
return YearStart(t).AddDate(1, 0, 0).Add(-time.Nanosecond)
} | bounds.go | 0.589835 | 0.50177 | bounds.go | starcoder |
package color
import (
"github.com/lucasb-eyer/go-colorful"
)
// NativeColorspace describes the native color space of a convertible color.
type NativeColorspace uint8
const (
// HueSat the color is represented as Hue and saturation of the sRGB colorspace.
HueSat NativeColorspace = 0
// XYY the color is represented as the XYY coordinates fo the CIE 1931 colorspace.
XYY NativeColorspace = 1
// SRGB the color is represented as red, green and blue in the screen colorspace.
SRGB NativeColorspace = 2
)
// ConvertibleColor is an interface that allows acceptance of a color implementation that supports
// modes required by a device.
type ConvertibleColor interface {
// HueSat returns Hue and Saturation, converted from an internal format if required.
HSV() (float64, float64, float64)
// XYY returns X and Y in the CIE 1931 colour space, converted from an internal format if required.
XYY() (float64, float64, float64)
// Color returns colour using 8 bit values, converted from an internal format if required.
RGB() (uint8, uint8, uint8)
// NativeColorspace returns the native colour space of the colour.
NativeColorspace() NativeColorspace
}
var _ ConvertibleColor = (*XYColor)(nil)
type XYColor struct {
X float64
Y float64
Y2 float64
}
func (c XYColor) HSV() (float64, float64, float64) {
return colorful.Xyy(c.X, c.Y, c.Y2).Hsv()
}
func (c XYColor) XYY() (float64, float64, float64) {
return c.X, c.Y, c.Y2
}
func (c XYColor) RGB() (uint8, uint8, uint8) {
return colorful.Xyy(c.X, c.Y, 100.0).RGB255()
}
func (c XYColor) NativeColorspace() NativeColorspace {
return XYY
}
var _ ConvertibleColor = (*HSVColor)(nil)
type HSVColor struct {
Hue float64
Sat float64
Value float64
}
func (c HSVColor) HSV() (float64, float64, float64) {
return c.Hue, c.Sat, c.Value
}
func (c HSVColor) XYY() (float64, float64, float64) {
return colorful.Hsv(c.Hue, c.Sat, c.Value).Xyy()
}
func (c HSVColor) RGB() (uint8, uint8, uint8) {
return colorful.Hsv(c.Hue, c.Sat, 1.0).RGB255()
}
func (c HSVColor) NativeColorspace() NativeColorspace {
return HueSat
}
var _ ConvertibleColor = (*SRGBColor)(nil)
type SRGBColor struct {
R uint8
G uint8
B uint8
}
func (c SRGBColor) HSV() (float64, float64, float64) {
return colorful.Color{R: float64(c.R) / 255.0, G: float64(c.G) / 255.0, B: float64(c.B) / 255.0}.Hsv()
}
func (c SRGBColor) XYY() (float64, float64, float64) {
return colorful.Color{R: float64(c.R) / 255.0, G: float64(c.G) / 255.0, B: float64(c.B) / 255.0}.Xyy()
}
func (c SRGBColor) RGB() (uint8, uint8, uint8) {
return c.R, c.G, c.B
}
func (c SRGBColor) NativeColorspace() NativeColorspace {
return SRGB
} | capabilities/color/color.go | 0.86813 | 0.555073 | color.go | starcoder |
package query
import (
"bytes"
"strconv"
)
type Operand struct {
e Expression
}
func (o Operand) Expand(s Starter, i int) (string, []interface{}, error) {
return Expand(o.e, false, s, i)
}
func E2O(e Expression) Operand {
switch o := e.(type) {
case Operand:
return o
case *Operand:
return *o
}
return Operand{e}
}
func O(format string, a ...interface{}) Operand {
return Operand{E(format, a...)}
}
func IQ(a ...string) Operand {
if len(a) == 1 {
return Operand{Identifier(a[0])}
}
return Operand{Qualifier(a)}
}
func (o Operand) IsNull() Condition {
return C("? IS NULL", o)
}
func (o Operand) IsNotNull() Condition {
return C("? IS NOT NULL", o)
}
func (o Operand) Eq(i interface{}) Condition {
if i == nil {
return o.IsNull()
}
return C("? = ?", o, i)
}
func (o Operand) Ne(i interface{}) Condition {
if i == nil {
return o.IsNotNull()
}
return C("? != ?", o, i)
}
func (o Operand) Lt(i interface{}) Condition {
return C("? < ?", o, i)
}
func (o Operand) Le(i interface{}) Condition {
return C("? <= ?", o, i)
}
func (o Operand) Gt(i interface{}) Condition {
return C("? > ?", o, i)
}
func (o Operand) Ge(i interface{}) Condition {
return C("? >= ?", o, i)
}
func (o Operand) InInts(a ...int) Condition {
if len(a) == 0 {
return E2C(nonef("empty in: %v", o))
}
var b bytes.Buffer
b.WriteString("? IN (")
for k, v := range a {
if k > 0 {
b.WriteString(", ")
}
b.WriteString(strconv.Itoa(v))
}
b.WriteByte(')')
return C(b.String(), o)
}
func (o Operand) InStrings(a ...string) Condition {
if len(a) == 0 {
return E2C(nonef("empty in: %v", o))
}
var b bytes.Buffer
b.WriteString("? IN (")
for k, v := range a {
if k > 0 {
b.WriteString(", ")
}
b.WriteString(Quote(v, '\''))
}
b.WriteByte(')')
return C(b.String(), o)
}
func (o Operand) In(a ...interface{}) Condition {
if len(a) == 0 {
return E2C(nonef("empty in: %v", o))
}
var b bytes.Buffer
b.WriteString("? IN (")
for i := 0; i < len(a); i++ {
if a[i] == nil {
return E2C(nonef("null in: %v", o))
}
if i > 0 {
b.WriteString(", ")
}
b.WriteByte('?')
}
b.WriteByte(')')
d := make([]interface{}, 1+len(a))
d[0] = o
copy(d[1:], a)
return C(b.String(), d...)
}
func (o Operand) Between(i, j interface{}) Condition {
return C("? BETWEEN ? AND ?", o, i, j)
}
func (o Operand) Like(s string) Condition {
return C("? LIKE ?", o, s)
}
func (o Operand) Contains(s string) Condition {
return o.Like("%" + EscapeLike(s) + "%")
}
func (o Operand) HasPrefix(s string) Condition {
return o.Like(EscapeLike(s) + "%")
}
func (o Operand) HasSuffix(s string) Condition {
return o.Like("%" + EscapeLike(s))
}
func (o Operand) Asc() Expression {
return E("? ASC", o)
}
func (o Operand) Desc() Expression {
return E("? DESC", o)
}
func (o Operand) Inc() Expression {
return E("? + 1", o)
}
func (o Operand) Dec() Expression {
return E("? - 1", o)
}
func (o Operand) Avg() Expression {
return E("AVG(?)", o)
}
func (o Operand) Count() Expression {
return E("COUNT(?)", o)
}
func (o Operand) Max() Expression {
return E("MAX(?)", o)
}
func (o Operand) Min() Expression {
return E("MIN(?)", o)
}
func (o Operand) Sum() Expression {
return E("SUM(?)", o)
}
func (o Operand) As(s string) Expression {
return E("? AS ?", o, Identifier(s))
}
func IsNull(c string) Condition {
return IQ(c).IsNull()
}
func IsNotNull(c string) Condition {
return IQ(c).IsNotNull()
}
func Eq(c string, i interface{}) Condition {
return IQ(c).Eq(i)
}
func Ne(c string, i interface{}) Condition {
return IQ(c).Ne(i)
}
func Lt(c string, i interface{}) Condition {
return IQ(c).Lt(i)
}
func Le(c string, i interface{}) Condition {
return IQ(c).Le(i)
}
func Gt(c string, i interface{}) Condition {
return IQ(c).Gt(i)
}
func Ge(c string, i interface{}) Condition {
return IQ(c).Ge(i)
}
func InInts(c string, a ...int) Condition {
return IQ(c).InInts(a...)
}
func InStrings(c string, a ...string) Condition {
return IQ(c).InStrings(a...)
}
func In(c string, a ...interface{}) Condition {
return IQ(c).In(a...)
}
func Between(c string, i, j interface{}) Condition {
return IQ(c).Between(i, j)
}
func Like(c, s string) Condition {
return IQ(c).Like(s)
}
func Contains(c, s string) Condition {
return IQ(c).Contains(s)
}
func HasPrefix(c, s string) Condition {
return IQ(c).HasPrefix(s)
}
func HasSuffix(c, s string) Condition {
return IQ(c).HasSuffix(s)
}
func Asc(c string) Expression {
return IQ(c).Asc()
}
func Desc(c string) Expression {
return IQ(c).Desc()
}
func Inc(c string) Expression {
return IQ(c).Inc()
}
func Dec(c string) Expression {
return IQ(c).Dec()
}
func Avg(c string) Expression {
return IQ(c).Avg()
}
func Count(c string) Expression {
return IQ(c).Count()
}
func Max(c string) Expression {
return IQ(c).Max()
}
func Min(c string) Expression {
return IQ(c).Min()
}
func Sum(c string) Expression {
return IQ(c).Sum()
}
func As(c, s string) Expression {
return IQ(c).As(s)
} | query/o.go | 0.642993 | 0.595316 | o.go | starcoder |
package eval
import (
"fmt"
"github.com/hscells/trecresults"
"math"
)
// MaximumLikelihoodEvaluator is similar to ResidualEvaluator, except that the
// proportion of the residual that should be labelled relevant is computed as
// a maximum likelihood probability. That is, the number of unjudged documents
// that should be labelled with explicit positive relevance labels is computed
// using the ratio of relevant documents to non-relevant documents.
type MaximumLikelihoodEvaluator struct {
Evaluator
}
// Probability computes the maximum likelihood that a given unjudged document
// can be considered relevant.
func (m MaximumLikelihoodEvaluator) Probability(qrels trecresults.Qrels) int64 {
var r, nr float64 = 1, 1
// Consider scores above 1 as relevant.
for _, q := range qrels {
if q.Score > RelevanceGrade {
r++
} else {
nr++
}
}
// We take the floor of the result because it doesn't
// make sense to have a fraction of a document.
return int64(math.Floor(r / nr))
}
func (m MaximumLikelihoodEvaluator) Residual(results *trecresults.ResultList, qrels trecresults.Qrels) trecresults.Qrels {
// Create a copy of the qrels to return.
unjudged := make(trecresults.Qrels)
for k, v := range qrels {
unjudged[k] = v
}
mle := m.Probability(qrels)
var n int64 = 0
// Add the unjudged documents into the qrels with a positive relevance label while n < mle.
for _, result := range *results {
// For performance, we can simply exit the loop when n >= mle.
if n >= mle {
break
}
// Add the document to the qrels if it is in the unjudged set.
d := result.DocId
if _, ok := unjudged[d]; !ok {
unjudged[d] = &trecresults.Qrel{
Topic: result.Topic,
Iteration: "Q0",
DocId: d,
Score: RelevanceGrade + 1,
}
// Do not forget to increase n.
n++
}
}
return unjudged
}
func (m MaximumLikelihoodEvaluator) Name() string {
return fmt.Sprintf("%s%s", "MLE", m.Evaluator.Name())
}
func (m MaximumLikelihoodEvaluator) Score(results *trecresults.ResultList, qrels trecresults.Qrels) float64 {
return m.Evaluator.Score(results, m.Residual(results, qrels))
}
// NewMaximumLikelihoodEvaluator creates a new mle residual evaluator
// by wrapping an existing evaluation metric.
func NewMaximumLikelihoodEvaluator(evaluator Evaluator) MaximumLikelihoodEvaluator {
return MaximumLikelihoodEvaluator{
Evaluator: evaluator,
}
} | eval/mle.go | 0.827932 | 0.478407 | mle.go | starcoder |
package steering
import (
"fmt"
"math/rand"
)
func Seek(s Steerer, target Vector) Vector {
desired_velocity := target.Minus(s.Position()).Normalize().Mult(s.MaxSpeed())
return desired_velocity.Minus(s.Velocity())
}
func Flee(s Steerer, target Vector) Vector {
desired_velocity := target.Minus(s.Position()).Normalize().Mult(s.MaxSpeed())
return s.Velocity().Minus(desired_velocity)
}
func Pursuit(s Steerer, q Quarry, futureUpdates float32) Vector {
pred := q.Position().Plus(q.Velocity().Mult(futureUpdates))
return Seek(s, pred)
}
func Wander(s Steerer, sphere float32, orientation Matrix) Vector {
x := rand.Float32()
y := rand.Float32()
z := rand.Float32()
v := Vector{x, y, z}
// fmt.Println("wander", v.Normalize(), sphere, orientation)
force := v.Normalize().Mult(sphere)
// fmt.Println("undirected force", force)
return orientation.Mult(force)
}
func Avoid(s Steerer, spheres []Entity, length float32) Vector {
closest := Entity(nil)
closestDistance := float32(100000000)
pos := s.Position()
for _, sphere := range spheres {
rel := sphere.Position().Minus(pos)
path := s.Forward().Mult(length)
if rel.Project(path) < s.Radius()+sphere.Radius() {
distance := rel.Len()
if distance >= length || distance == 0 { // Too far away, don't care
continue
}
// fmt.Println("collision potential", sphere.Position())
if distance < closestDistance {
// fmt.Println("closest!", sphere.Position())
closest = sphere
closestDistance = distance
}
}
}
if closest == nil {
return nil
}
// fmt.Println("fleeing", closest.Position(), "from", s.Position())
return Flee(s, closest.Position())
}
func Contain(s Steerer, leftTopBack Vector, rightBottomFront Vector, futureUpdates float32, dimensions int) Vector {
future := s.Position().Plus(s.Forward().Mult(futureUpdates))
for i := 0; i < dimensions; i++ {
gradient := Vector{0, 0, 0}
gradient[i] = 1
if leftTopBack[i] > future[i] {
normal := gradient.Normal(future, (future[i]-leftTopBack[i])*futureUpdates)
fmt.Println("too small", i, gradient, future, normal)
return Seek(s, normal)
// return Seek(s, s.Side().Mult(leftTopBack[i]-future[i]))
}
if rightBottomFront[i] < future[i] {
normal := gradient.Normal(future, (rightBottomFront[i]-future[i])*futureUpdates)
fmt.Println("too big", i, gradient, future, normal)
fmt.Println(s.Side().Hadamard(normal))
return Seek(s, normal)
}
}
return Vector(nil)
} | steering.go | 0.758242 | 0.493714 | steering.go | starcoder |
package web3
type Service interface {
// Returns the version of the current client
Web3ClientVersion() (*Web3ClientVersionResult, error)
// Hashes data using the Keccak-256 algorithm
Web3Sha3(*Web3Sha3Params) (*Web3Sha3Result, error)
// Determines if this client is listening for new network connections.
NetListening() (*NetListeningResult, error)
// Returns the number of peers currently connected to this client.
NetPeerCount() (*NetPeerCountResult, error)
// Returns the chain ID associated with the current network.
NetVersion() (*NetVersionResult, error)
// Returns the number of most recent block.
EthBlockNumber() (*EthBlockNumberResult, error)
// Executes a new message call (locally) immediately without creating a transaction on the block chain.
EthCall(*EthCallParams) (*EthCallResult, error)
// Returns the currently configured chain id, a value used in replay-protected transaction signing as introduced by [EIP-155](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md).
EthChainId() (*EthChainIdResult, error)
// Returns the client coinbase address.
EthCoinbase() (*EthCoinbaseResult, error)
// Generates and returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. Note that the estimate may be significantly more than the amount of gas actually used by the transaction, for a variety of reasons including EVM mechanics and node performance.
EthEstimateGas(*EthEstimateGasParams) (*EthEstimateGasResult, error)
// Returns the current price per gas in wei
EthGasPrice() (*EthGasPriceResult, error)
// Returns Ether balance of a given or account or contract
EthGetBalance(*EthGetBalanceParams) (*EthGetBalanceResult, error)
// Gets a block for a given hash
EthGetBlockByHash(*EthGetBlockByHashParams) (*EthGetBlockByHashResult, error)
// Gets a block for a given number salad
EthGetBlockByNumber(*EthGetBlockByNumberParams) (*EthGetBlockByNumberResult, error)
// Returns the number of transactions in a block from a block matching the given block hash.
EthGetBlockTransactionCountByHash(*EthGetBlockTransactionCountByHashParams) (*EthGetBlockTransactionCountByHashResult, error)
// Returns the number of transactions in a block from a block matching the given block number.
EthGetBlockTransactionCountByNumber(*EthGetBlockTransactionCountByNumberParams) (*EthGetBlockTransactionCountByNumberResult, error)
// Returns code at a given contract address
EthGetCode(*EthGetCodeParams) (*EthGetCodeResult, error)
// Polling method for a filter, which returns an array of logs which occurred since last poll.
EthGetFilterChanges(*EthGetFilterChangesParams) (*EthGetFilterChangesResult, error)
// Returns an array of all logs matching filter with given id.
EthGetFilterLogs(*EthGetFilterLogsParams) (*EthGetFilterLogsResult, error)
// Returns raw transaction data of a transaction with the given hash.
EthGetRawTransactionByHash(*EthGetRawTransactionByHashParams) (*EthGetRawTransactionByHashResult, error)
// Returns raw transaction data of a transaction with the given hash.
EthGetRawTransactionByBlockHashAndIndex(*EthGetRawTransactionByBlockHashAndIndexParams) (*EthGetRawTransactionByBlockHashAndIndexResult, error)
// Returns raw transaction data of a transaction with the given hash.
EthGetRawTransactionByBlockNumberAndIndex(*EthGetRawTransactionByBlockNumberAndIndexParams) (*EthGetRawTransactionByBlockNumberAndIndexResult, error)
// Returns an array of all logs matching a given filter object.
EthGetLogs(*EthGetLogsParams) (*EthGetLogsResult, error)
// Gets a storage value from a contract address, a position, and an optional blockNumber
EthGetStorageAt(*EthGetStorageAtParams) (*EthGetStorageAtResult, error)
// Returns the information about a transaction requested by the block hash and index of which it was mined.
EthGetTransactionByBlockHashAndIndex(*EthGetTransactionByBlockHashAndIndexParams) (*EthGetTransactionByBlockHashAndIndexResult, error)
// Returns the information about a transaction requested by the block hash and index of which it was mined.
EthGetTransactionByBlockNumberAndIndex(*EthGetTransactionByBlockNumberAndIndexParams) (*EthGetTransactionByBlockNumberAndIndexResult, error)
// Returns the information about a transaction requested by transaction hash.
EthGetTransactionByHash(*EthGetTransactionByHashParams) (*EthGetTransactionByHashResult, error)
// Returns the number of transactions sent from an address
EthGetTransactionCount(*EthGetTransactionCountParams) (*EthGetTransactionCountResult, error)
// Returns the receipt information of a transaction by its hash.
EthGetTransactionReceipt(*EthGetTransactionReceiptParams) (*EthGetTransactionReceiptResult, error)
// Returns information about a uncle of a block by hash and uncle index position.
EthGetUncleByBlockHashAndIndex(*EthGetUncleByBlockHashAndIndexParams) (*EthGetUncleByBlockHashAndIndexResult, error)
// Returns information about a uncle of a block by hash and uncle index position.
EthGetUncleByBlockNumberAndIndex(*EthGetUncleByBlockNumberAndIndexParams) (*EthGetUncleByBlockNumberAndIndexResult, error)
// Returns the number of uncles in a block from a block matching the given block hash.
EthGetUncleCountByBlockHash(*EthGetUncleCountByBlockHashParams) (*EthGetUncleCountByBlockHashResult, error)
// Returns the number of uncles in a block from a block matching the given block number.
EthGetUncleCountByBlockNumber(*EthGetUncleCountByBlockNumberParams) (*EthGetUncleCountByBlockNumberResult, error)
// Returns the account- and storage-values of the specified account including the Merkle-proof.
EthGetProof(*EthGetProofParams) (*EthGetProofResult, error)
// Returns the hash of the current block, the seedHash, and the boundary condition to be met ('target').
EthGetWork() (*EthGetWorkResult, error)
// Returns the number of hashes per second that the node is mining with.
EthHashrate() (*EthHashrateResult, error)
// Returns true if client is actively mining new blocks.
EthMining() (*EthMiningResult, error)
// Creates a filter in the node, to notify when a new block arrives. To check if the state has changed, call eth_getFilterChanges.
EthNewBlockFilter() (*EthNewBlockFilterResult, error)
// Creates a filter object, based on filter options, to notify when the state changes (logs). To check if the state has changed, call eth_getFilterChanges.
EthNewFilter(*EthNewFilterParams) (*EthNewFilterResult, error)
// Creates a filter in the node, to notify when new pending transactions arrive. To check if the state has changed, call eth_getFilterChanges.
EthNewPendingTransactionFilter() (*EthNewPendingTransactionFilterResult, error)
// Returns the pending transactions list
EthPendingTransactions() (*EthPendingTransactionsResult, error)
// Returns the current ethereum protocol version.
EthProtocolVersion() (*EthProtocolVersionResult, error)
// The sign method calculates an Ethereum specific signature.
EthSign(*EthSignParams) (*EthSignResult, error)
// Returns a list of addresses owned by client.
EthAccounts() (*EthAccountsResult, error)
// Creates new message call transaction or a contract creation, if the data field contains code.
EthSendTransaction(*EthSendTransactionParams) (*EthSendTransactionResult, error)
// Creates new message call transaction or a contract creation for signed transactions.
EthSendRawTransaction(*EthSendRawTransactionParams) (*EthSendRawTransactionResult, error)
// Returns an array of all logs matching a given filter object.
EthSubmitHashrate(*EthSubmitHashrateParams) (*EthSubmitHashrateResult, error)
// Used for submitting a proof-of-work solution.
EthSubmitWork(*EthSubmitWorkParams) (*EthSubmitWorkResult, error)
// Returns an object with data about the sync status or false.
EthSyncing() (*EthSyncingResult, error)
// Uninstalls a filter with given id. Should always be called when watch is no longer needed. Additionally Filters timeout when they aren't requested with eth_getFilterChanges for a period of time.
EthUninstallFilter(*EthUninstallFilterParams) (*EthUninstallFilterResult, error)
}
type Web3ClientVersionResult struct {
// client version
ClientVersion string `json:"clientVersion"`
}
type Web3Sha3Params struct {
// data to hash using the Keccak-256 algorithm
Data string `json:"data"`
}
type Web3Sha3Result struct {
// Hex representation of a Keccak 256 hash
HashedData string `json:"hashedData"`
}
type NetListeningResult struct {
// `true` if listening is active or `false` if listening is not active
IsNetListening bool `json:"isNetListening"`
}
type NetPeerCountResult struct {
// Hex representation of number of connected peers
NumConnectedPeers string `json:"numConnectedPeers"`
}
type NetVersionResult struct {
// chain ID associated with the current network
ChainID string `json:"chainID"`
}
type BlockNumber struct {
// The hex representation of the block's height
BlockNumber string `json:"blockNumber"`
}
type EthBlockNumberResult struct {
BlockNumber string `json:"blockNumber"`
}
type TransactionIndex struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type Transaction struct {
// Integer of the transaction's index position in the block. null when its pending
TransactionIndex string `json:"transactionIndex"`
// Hash of the block where this transaction was in. null when its pending
BlockHash string `json:"blockHash"`
// Address of the sender
From string `json:"from"`
// Hex representation of a Keccak 256 hash
Hash string `json:"hash"`
// The data field sent with the transaction
Data string `json:"data"`
// A number only to be used once
Nonce string `json:"nonce"`
// The gas limit provided by the sender in Wei
Gas string `json:"gas"`
// Hex representation of a Keccak 256 hash
Value string `json:"value"`
// ECDSA recovery id
V string `json:"v"`
// ECDSA signature s
S string `json:"s"`
// The gas price willing to be paid by the sender in Wei
GasPrice string `json:"gasPrice"`
// address of the receiver. null when its a contract creation transaction
To string `json:"to"`
// Block number where this transaction was in. null when its pending
BlockNumber string `json:"blockNumber"`
// ECDSA signature r
R string `json:"r"`
}
type BlockHash struct {
// Hex representation of a Keccak 256 hash
Keccak string `json:"keccak"`
}
type EthCallParams struct {
Transaction
BlockNumber string `json:"blockNumber"`
}
type EthCallResult struct {
// Hex representation of a variable length byte array
ReturnValue string `json:"returnValue"`
}
type EthChainIdResult struct {
// hex format integer of the current chain id. Defaults are mainnet=61, morden=62.
ChainId string `json:"chainId"`
}
type EthCoinbaseResult struct {
// The address owned by the client that is used as default for things like the mining reward
Address string `json:"address"`
}
type EthEstimateGasParams struct {
Transaction
}
type EthEstimateGasResult struct {
// Hex representation of the integer
GasUsed string `json:"gasUsed"`
}
type EthGasPriceResult struct {
// Hex representation of the integer
GasPrice string `json:"gasPrice"`
}
type EthGetBalanceParams struct {
// The address of the account or contract
Address string `json:"address"`
// The hex representation of the block's height
BlockNumber string `json:"blockNumber"`
}
type GetBalanceResult struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type EthGetBalanceResult struct {
GetBalanceResult string `json:"getBalanceResult"`
}
type EthGetBlockByHashParams struct {
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// If `true` it returns the full transaction objects, if `false` only the hashes of the transactions.
IsTransactionsIncluded bool `json:"isTransactionsIncluded"`
}
type Block struct {
// Hex representation of a Keccak 256 hash
Sha3Uncles string `json:"sha3Uncles"`
// Hex representation of a Keccak 256 hash
TransactionsRoot string `json:"transactionsRoot"`
// Hex representation of a Keccak 256 hash
ParentHash string `json:"parentHash"`
// The address of the beneficiary to whom the mining rewards were given or null when its the pending block
Miner string `json:"miner"`
// Integer of the difficulty for this block
Difficulty string `json:"difficulty"`
// The total used gas by all transactions in this block
GasUsed string `json:"gasUsed"`
// The unix timestamp for when the block was collated
Timestamp string `json:"timestamp"`
// Array of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter
Transactions []Transactions `json:"transactions"`
// The block number or null when its the pending block
Number string `json:"number"`
// The block hash or null when its the pending block
Hash string `json:"hash"`
// Array of uncle hashes
Uncles []string `json:"uncles"`
// Hex representation of a Keccak 256 hash
ReceiptsRoot string `json:"receiptsRoot"`
// The 'extra data' field of this block
ExtraData string `json:"extraData"`
// Hex representation of a Keccak 256 hash
StateRoot string `json:"stateRoot"`
// Integer of the total difficulty of the chain until this block
TotalDifficulty string `json:"totalDifficulty"`
// Integer the size of this block in bytes
Size string `json:"size"`
// The maximum gas allowed in this block
GasLimit string `json:"gasLimit"`
// Randomly selected number to satisfy the proof-of-work or null when its the pending block
Nonce string `json:"nonce"`
// The bloom filter for the logs of the block or null when its the pending block
LogsBloom string `json:"logsBloom"`
}
type Miner struct {
Address string `json:"address"`
}
type Transactions struct {
Transaction
}
type Number struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type Hash struct {
// Hex representation of a Keccak 256 hash
Keccak string `json:"keccak"`
}
type Uncles struct {
// Hex representation of a Keccak 256 hash
Keccak string `json:"keccak"`
}
type TotalDifficulty struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type Nonce struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type GetBlockByHashResult struct {
Block
}
type EthGetBlockByHashResult struct {
GetBlockByHashResult Block `json:"getBlockByHashResult"`
}
type EthGetBlockByNumberParams struct {
BlockNumber string `json:"blockNumber"`
// If `true` it returns the full transaction objects, if `false` only the hashes of the transactions.
IsTransactionsIncluded bool `json:"isTransactionsIncluded"`
}
type GetBlockByNumberResult struct {
Block
}
type EthGetBlockByNumberResult struct {
GetBlockByNumberResult Block `json:"getBlockByNumberResult"`
}
type EthGetBlockTransactionCountByHashParams struct {
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
}
type BlockTransactionCountByHash struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type EthGetBlockTransactionCountByHashResult struct {
// The Number of total transactions in the given block
BlockTransactionCountByHash string `json:"blockTransactionCountByHash"`
}
type EthGetBlockTransactionCountByNumberParams struct {
BlockNumber string `json:"blockNumber"`
}
type EthGetBlockTransactionCountByNumberResult struct {
// The Number of total transactions in the given block
BlockTransactionCountByHash string `json:"blockTransactionCountByHash"`
}
type EthGetCodeParams struct {
// The address of the contract
Address string `json:"address"`
// The hex representation of the block's height
BlockNumber string `json:"blockNumber"`
}
type EthGetCodeResult struct {
// Hex representation of a variable length byte array
Bytes string `json:"bytes"`
}
type EthGetFilterChangesParams struct {
// An identifier used to reference the filter.
FilterId string `json:"filterId"`
}
type Log struct {
Topics []Topics `json:"topics"`
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
// Sender of the transaction
Address string `json:"address"`
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// The hex representation of the block's height
BlockNumber string `json:"blockNumber"`
// Hex representation of a variable length byte array
Data string `json:"data"`
// Hex representation of the integer
LogIndex string `json:"logIndex"`
// Hex representation of the integer
TransactionIndex string `json:"transactionIndex"`
}
type LogResult struct {
// An indexed event generated during a transaction
Log
Topics []Topics `json:"topics"`
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
// Sender of the transaction
Address string `json:"address"`
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// The hex representation of the block's height
BlockNumber string `json:"blockNumber"`
// Hex representation of a variable length byte array
Data string `json:"data"`
// Hex representation of the integer
LogIndex string `json:"logIndex"`
// Hex representation of the integer
TransactionIndex string `json:"transactionIndex"`
}
type EthGetFilterChangesResult struct {
LogResult []LogResult `json:"logResult"`
}
type EthGetFilterLogsParams struct {
// An identifier used to reference the filter.
FilterId string `json:"filterId"`
}
type Logs struct {
// An indexed event generated during a transaction
Log
// Hex representation of the integer
LogIndex string `json:"logIndex"`
// Hex representation of the integer
TransactionIndex string `json:"transactionIndex"`
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
// Sender of the transaction
Address string `json:"address"`
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// The hex representation of the block's height
BlockNumber string `json:"blockNumber"`
// Hex representation of a variable length byte array
Data string `json:"data"`
Topics []Topics `json:"topics"`
}
type EthGetFilterLogsResult struct {
Logs []Logs `json:"logs"`
}
type EthGetRawTransactionByHashParams struct {
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
}
type EthGetRawTransactionByHashResult struct {
// Hex representation of a variable length byte array
RawTransactionByHash string `json:"rawTransactionByHash"`
}
type EthGetRawTransactionByBlockHashAndIndexParams struct {
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// Hex representation of the integer
Index string `json:"index"`
}
type EthGetRawTransactionByBlockHashAndIndexResult struct {
// Hex representation of a variable length byte array
RawTransaction string `json:"rawTransaction"`
}
type EthGetRawTransactionByBlockNumberAndIndexParams struct {
BlockNumber string `json:"blockNumber"`
// Hex representation of the integer
Index string `json:"index"`
}
type EthGetRawTransactionByBlockNumberAndIndexResult struct {
// Hex representation of a variable length byte array
RawTransaction string `json:"rawTransaction"`
}
type Filter struct {
// The hex representation of the block's height
FromBlock string `json:"fromBlock"`
// The hex representation of the block's height
ToBlock string `json:"toBlock"`
Address string `json:"address"`
// Array of 32 Bytes DATA topics. Topics are order-dependent. Each topic can also be an array of DATA with 'or' options
Topics []string `json:"topics"`
}
type Address struct {
// Address of the contract from which to monitor events
Address string `json:"address"`
}
type Topics struct {
// Hex representation of a 256 bit unit of data
DataWord string `json:"dataWord"`
}
type EthGetLogsParams struct {
// A filter used to monitor the blockchain for log/events
Filter
}
type EthGetLogsResult struct {
Logs []Logs `json:"logs"`
}
type EthGetStorageAtParams struct {
Address string `json:"address"`
// Hex representation of the storage slot where the variable exists
Position string `json:"position"`
BlockNumber string `json:"blockNumber"`
}
type EthGetStorageAtResult struct {
// Hex representation of a 256 bit unit of data
DataWord string `json:"dataWord"`
}
type EthGetTransactionByBlockHashAndIndexParams struct {
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// Hex representation of the integer
Index string `json:"index"`
}
type TransactionResult struct {
Transaction
}
type EthGetTransactionByBlockHashAndIndexResult struct {
TransactionResult Transaction `json:"transactionResult"`
}
type EthGetTransactionByBlockNumberAndIndexParams struct {
BlockNumber string `json:"blockNumber"`
// Hex representation of the integer
Index string `json:"index"`
}
type EthGetTransactionByBlockNumberAndIndexResult struct {
TransactionResult Transaction `json:"transactionResult"`
}
type EthGetTransactionByHashParams struct {
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
}
type EthGetTransactionByHashResult struct {
Transaction
}
type EthGetTransactionCountParams struct {
Address string `json:"address"`
BlockNumber string `json:"blockNumber"`
}
type NonceOrNull struct {
// A number only to be used once
Nonce string `json:"nonce"`
}
type EthGetTransactionCountResult struct {
NonceOrNull string `json:"nonceOrNull"`
}
type EthGetTransactionReceiptParams struct {
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
}
type Receipt struct {
// The hex representation of the block's height
BlockNumber string `json:"blockNumber"`
// Hex representation of the integer
CumulativeGasUsed string `json:"cumulativeGasUsed"`
// Hex representation of the integer
GasUsed string `json:"gasUsed"`
// An array of all the logs triggered during the transaction
Logs []Logs `json:"logs"`
// A 2048 bit bloom filter from the logs of the transaction. Each log sets 3 bits though taking the low-order 11 bits of each of the first three pairs of bytes in a Keccak 256 hash of the log's byte series
TransactionIndex string `json:"transactionIndex"`
// Whether or not the transaction threw an error.
Status string `json:"status"`
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// The contract address created, if the transaction was a contract creation, otherwise null
ContractAddress string `json:"contractAddress"`
// The sender of the transaction
From string `json:"from"`
// A 2048 bit bloom filter from the logs of the transaction. Each log sets 3 bits though taking the low-order 11 bits of each of the first three pairs of bytes in a Keccak 256 hash of the log's byte series
LogsBloom string `json:"logsBloom"`
// Destination address of the transaction
To string `json:"to"`
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
}
type EthGetTransactionReceiptResult struct {
// returns either a receipt or null
Receipt
}
type EthGetUncleByBlockHashAndIndexParams struct {
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
// Hex representation of the integer
Index string `json:"index"`
}
type Uncle struct {
// Randomly selected number to satisfy the proof-of-work or null when its the pending block
Nonce string `json:"nonce"`
// Hex representation of a Keccak 256 hash
TransactionsRoot string `json:"transactionsRoot"`
// Integer of the total difficulty of the chain until this block
TotalDifficulty string `json:"totalDifficulty"`
// Integer the size of this block in bytes
Size string `json:"size"`
// The total used gas by all transactions in this block
GasUsed string `json:"gasUsed"`
// Array of uncle hashes
Uncles []string `json:"uncles"`
// The block number or null when its the pending block
Number string `json:"number"`
// The block hash or null when its the pending block
Hash string `json:"hash"`
// Hex representation of a Keccak 256 hash
Sha3Uncles string `json:"sha3Uncles"`
// Hex representation of a Keccak 256 hash
StateRoot string `json:"stateRoot"`
// The 'extra data' field of this block
ExtraData string `json:"extraData"`
// The unix timestamp for when the block was collated
Timestamp string `json:"timestamp"`
// Hex representation of a Keccak 256 hash
ReceiptsRoot string `json:"receiptsRoot"`
// The address of the beneficiary to whom the mining rewards were given or null when its the pending block
Miner string `json:"miner"`
// The maximum gas allowed in this block
GasLimit string `json:"gasLimit"`
// Hex representation of a Keccak 256 hash
ParentHash string `json:"parentHash"`
// The bloom filter for the logs of the block or null when its the pending block
LogsBloom string `json:"logsBloom"`
// Integer of the difficulty for this block
Difficulty string `json:"difficulty"`
}
type UncleOrNull struct {
// Orphaned blocks that can be included in the chain but at a lower block reward. NOTE: An uncle doesn’t contain individual transactions.
Uncle
}
type EthGetUncleByBlockHashAndIndexResult struct {
UncleOrNull Uncle `json:"uncleOrNull"`
}
type EthGetUncleByBlockNumberAndIndexParams struct {
// The hex representation of the block's height
UncleBlockNumber string `json:"uncleBlockNumber"`
// Hex representation of the integer
Index string `json:"index"`
}
type UncleResult struct {
// Orphaned blocks that can be included in the chain but at a lower block reward. NOTE: An uncle doesn’t contain individual transactions.
Uncle
}
type EthGetUncleByBlockNumberAndIndexResult struct {
// returns an uncle or null
UncleResult Uncle `json:"uncleResult"`
}
type EthGetUncleCountByBlockHashParams struct {
// The hex representation of the Keccak 256 of the RLP encoded block
BlockHash string `json:"blockHash"`
}
type UncleCountOrNull struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type EthGetUncleCountByBlockHashResult struct {
UncleCountOrNull string `json:"uncleCountOrNull"`
}
type EthGetUncleCountByBlockNumberParams struct {
BlockNumber string `json:"blockNumber"`
}
type EthGetUncleCountByBlockNumberResult struct {
UncleCountOrNull string `json:"uncleCountOrNull"`
}
type EthGetProofParams struct {
// The address of the account or contract
Address string `json:"address"`
// The storage keys of all the storage slots being requested
StorageKeys []string `json:"storageKeys"`
BlockNumber string `json:"blockNumber"`
}
type StorageKeys struct {
// Hex representation of the integer
Integer string `json:"integer"`
}
type ProofAccount struct {
// Hex representation of the integer
Balance string `json:"balance"`
// Hex representation of a Keccak 256 hash
CodeHash string `json:"codeHash"`
// A number only to be used once
Nonce string `json:"nonce"`
// Hex representation of a Keccak 256 hash
StorageHash string `json:"storageHash"`
// Current block header PoW hash.
StorageProof []StorageProof `json:"storageProof"`
// The address of the account or contract of the request
Address string `json:"address"`
// The set of node values needed to traverse a patricia merkle tree (from root to leaf) to retrieve a value
AccountProof []string `json:"accountProof"`
}
type Proof struct {
// Hex representation of a variable length byte array
ProofNode string `json:"proofNode"`
}
type StorageProof struct {
// The set of node values needed to traverse a patricia merkle tree (from root to leaf) to retrieve a value
Proof []string `json:"proof"`
// Hex representation of the integer
Key string `json:"key"`
// Hex representation of the integer
Value string `json:"value"`
}
type AccountProof struct {
// Hex representation of a variable length byte array
ProofNode string `json:"proofNode"`
}
type ProofAccountOrNull struct {
// The merkle proofs of the specified account connecting them to the blockhash of the block specified
ProofAccount
}
type EthGetProofResult struct {
ProofAccountOrNull ProofAccount `json:"proofAccountOrNull"`
}
type EthGetWorkResult struct {
Work []string `json:"work"`
}
type EthHashrateResult struct {
// Hex representation of the integer
HashesPerSecond string `json:"hashesPerSecond"`
}
type EthMiningResult struct {
// Whether of not the client is mining
Mining bool `json:"mining"`
}
type EthNewBlockFilterResult struct {
// Hex representation of the integer
FilterId string `json:"filterId"`
}
type EthNewFilterParams struct {
// A filter used to monitor the blockchain for log/events
Filter
}
type EthNewFilterResult struct {
// Hex representation of the integer
FilterId string `json:"filterId"`
}
type EthNewPendingTransactionFilterResult struct {
// Hex representation of the integer
FilterId string `json:"filterId"`
}
type PendingTransactions struct {
Transaction
// Integer of the transaction's index position in the block. null when its pending
TransactionIndex string `json:"transactionIndex"`
// Hash of the block where this transaction was in. null when its pending
BlockHash string `json:"blockHash"`
// Address of the sender
From string `json:"from"`
// Hex representation of a Keccak 256 hash
Hash string `json:"hash"`
// The data field sent with the transaction
Data string `json:"data"`
// A number only to be used once
Nonce string `json:"nonce"`
// The gas limit provided by the sender in Wei
Gas string `json:"gas"`
// Hex representation of a Keccak 256 hash
Value string `json:"value"`
// ECDSA recovery id
V string `json:"v"`
// ECDSA signature s
S string `json:"s"`
// The gas price willing to be paid by the sender in Wei
GasPrice string `json:"gasPrice"`
// address of the receiver. null when its a contract creation transaction
To string `json:"to"`
// Block number where this transaction was in. null when its pending
BlockNumber string `json:"blockNumber"`
// ECDSA signature r
R string `json:"r"`
}
type EthPendingTransactionsResult struct {
PendingTransactions []PendingTransactions `json:"pendingTransactions"`
}
type EthProtocolVersionResult struct {
// Hex representation of the integer
ProtocolVersion string `json:"protocolVersion"`
}
type EthSignParams struct {
Address string `json:"address"`
// Hex representation of a variable length byte array
Bytes string `json:"bytes"`
}
type EthSignResult struct {
// Hex representation of a variable length byte array
Signature string `json:"signature"`
}
type Addresses struct {
Address string `json:"address"`
}
type EthAccountsResult struct {
// addresses owned by the client
Addresses []string `json:"addresses"`
}
type EthSendTransactionParams struct {
Transaction
}
type EthSendTransactionResult struct {
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
}
type EthSendRawTransactionParams struct {
// Hex representation of a variable length byte array
SignedTransactionData string `json:"signedTransactionData"`
}
type EthSendRawTransactionResult struct {
// Hex representation of a Keccak 256 hash
TransactionHash string `json:"transactionHash"`
}
type EthSubmitHashrateParams struct {
// Hex representation of a 256 bit unit of data
HashRate string `json:"hashRate"`
// Hex representation of a 256 bit unit of data
Id string `json:"id"`
}
type EthSubmitHashrateResult struct {
// whether of not submitting went through successfully
SubmitHashRateSuccess bool `json:"submitHashRateSuccess"`
}
type EthSubmitWorkParams struct {
// A number only to be used once
Nonce string `json:"nonce"`
// Hex representation of a 256 bit unit of data
PowHash string `json:"powHash"`
// Hex representation of a 256 bit unit of data
MixHash string `json:"mixHash"`
}
type EthSubmitWorkResult struct {
// Whether or not the provided solution is valid
SolutionValid bool `json:"solutionValid"`
}
type SyncStatus struct {
// Hex representation of the integer
StartingBlock string `json:"startingBlock"`
// Hex representation of the integer
CurrentBlock string `json:"currentBlock"`
// Hex representation of the integer
HighestBlock string `json:"highestBlock"`
// Hex representation of the integer
KnownStates string `json:"knownStates"`
// Hex representation of the integer
PulledStates string `json:"pulledStates"`
}
type Syncing struct {
// An object with sync status data
SyncStatus
}
type EthSyncingResult struct {
Syncing SyncStatus `json:"syncing"`
}
type EthUninstallFilterParams struct {
// An identifier used to reference the filter.
FilterId string `json:"filterId"`
}
type EthUninstallFilterResult struct {
// Whether of not the filter was successfully uninstalled
FilterUninstalledSuccess bool `json:"filterUninstalledSuccess"`
} | rpc/web3/types.go | 0.824214 | 0.413773 | types.go | starcoder |
package tensor
import (
"math"
"reflect"
"github.com/pkg/errors"
)
/* MaskedEqual */
// MaskedEqual sets the mask to true where the corresponding data is equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a == x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a == x)
}
}
}
return nil
}
/* MaskedNotEqual */
// MaskedNotEqual sets the mask to true where the corresponding data is not equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedNotEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a != x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a != x)
}
}
}
return nil
}
/* MaskedValues */
// MaskedValues sets the mask to true where the corresponding data is equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedValues(val1 interface{}, val2 interface{}, val3 ...interface{}) (err error) {
if !isFloat(t.t) {
err = errors.Errorf("Can only do MaskedValues with floating point types")
return
}
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
y := val2.(float32)
delta := float64(1.0e-8)
if len(val3) > 0 {
delta = float64(val3[0].(float32)) + float64(y)*math.Abs(float64(x))
}
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (math.Abs(float64(a-x)) <= delta)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (math.Abs(float64(a-x)) <= delta)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
y := val2.(float64)
delta := float64(1.0e-8)
if len(val3) > 0 {
delta = float64(val3[0].(float64)) + float64(y)*math.Abs(float64(x))
}
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (math.Abs(float64(a-x)) <= delta)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (math.Abs(float64(a-x)) <= delta)
}
}
}
return nil
}
/* MaskedGreater */
// MaskedGreater sets the mask to true where the corresponding data is greater than val
// Any values must be the same type as the tensor
func (t *Dense) MaskedGreater(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a > x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a > x)
}
}
}
return nil
}
/* MaskedGreaterEqual */
// MaskedGreaterEqual sets the mask to true where the corresponding data is greater than or equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedGreaterEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a >= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a >= x)
}
}
}
return nil
}
/* MaskedLess */
// MaskedLess sets the mask to true where the corresponding data is less than val
// Any values must be the same type as the tensor
func (t *Dense) MaskedLess(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a < x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a < x)
}
}
}
return nil
}
/* MaskedLessEqual */
// MaskedLessEqual sets the mask to true where the corresponding data is less than or equal to val
// Any values must be the same type as the tensor
func (t *Dense) MaskedLessEqual(val1 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = (a <= x)
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || (a <= x)
}
}
}
return nil
}
/* MaskedInside */
// MaskedInside sets the mask to true where the corresponding data is inside range of val
// Any values must be the same type as the tensor
func (t *Dense) MaskedInside(val1 interface{}, val2 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
y := val2.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
y := val2.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
y := val2.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
y := val2.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
y := val2.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
y := val2.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
y := val2.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
y := val2.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
y := val2.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
y := val2.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
y := val2.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
y := val2.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
y := val2.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a >= x) && (a <= y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a >= x) && (a <= y))
}
}
}
return nil
}
/* MaskedOutside */
// MaskedOutside sets the mask to true where the corresponding data is outside range of val
// Any values must be the same type as the tensor
func (t *Dense) MaskedOutside(val1 interface{}, val2 interface{}) (err error) {
if !t.IsMasked() {
t.makeMask()
}
switch t.t.Kind() {
case reflect.Int:
data := t.Ints()
mask := t.mask
x := val1.(int)
y := val2.(int)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int8:
data := t.Int8s()
mask := t.mask
x := val1.(int8)
y := val2.(int8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int16:
data := t.Int16s()
mask := t.mask
x := val1.(int16)
y := val2.(int16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int32:
data := t.Int32s()
mask := t.mask
x := val1.(int32)
y := val2.(int32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Int64:
data := t.Int64s()
mask := t.mask
x := val1.(int64)
y := val2.(int64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint:
data := t.Uints()
mask := t.mask
x := val1.(uint)
y := val2.(uint)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint8:
data := t.Uint8s()
mask := t.mask
x := val1.(uint8)
y := val2.(uint8)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint16:
data := t.Uint16s()
mask := t.mask
x := val1.(uint16)
y := val2.(uint16)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint32:
data := t.Uint32s()
mask := t.mask
x := val1.(uint32)
y := val2.(uint32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Uint64:
data := t.Uint64s()
mask := t.mask
x := val1.(uint64)
y := val2.(uint64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Float32:
data := t.Float32s()
mask := t.mask
x := val1.(float32)
y := val2.(float32)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.Float64:
data := t.Float64s()
mask := t.mask
x := val1.(float64)
y := val2.(float64)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
case reflect.String:
data := t.Strings()
mask := t.mask
x := val1.(string)
y := val2.(string)
if t.maskIsSoft {
for i := range data {
a := data[i]
mask[i] = ((a < x) || (a > y))
}
} else {
for i := range data {
a := data[i]
mask[i] = mask[i] || ((a < x) || (a > y))
}
}
}
return nil
} | dense_maskcmp_methods.go | 0.537041 | 0.517388 | dense_maskcmp_methods.go | starcoder |
package lsb
import (
"encoding/binary"
"image"
"image/color"
)
// Hide writes data into copy of src using LSB (least significant bit) method.
// It includes 32 bit length value in data before payload
func Hide(src image.Image, data []byte) image.Image {
bounds := src.Bounds()
out := image.NewRGBA(bounds)
var currentBit uint32
dataLen := uint32(len(data))
allData := make([]byte, 4)
binary.LittleEndian.PutUint32(allData, dataLen)
allData = append(allData, data...)
dataLen = uint32(len(allData))
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := src.At(x, y).RGBA()
values := toUint8Array(r, g, b, a)
currentByte := currentBit / 8
// Leave alpha channel untouched
for i := 0; i < 3 && currentByte < dataLen; i++ {
values[i] = setBit(values[i], bitAt(allData[currentByte], currentBit%8), 0)
currentBit++
currentByte = currentBit / 8
}
newColor := color.RGBA{
R: values[0],
G: values[1],
B: values[2],
A: values[3],
}
out.Set(x, y, newColor)
}
}
return out
}
// Reveal reads length header and payload from src.
// Only payload is included in returned data
func Reveal(src image.Image) []byte {
data := make([]byte, 0)
var dataLen uint32 = 4
bounds := src.Bounds()
var currentBit uint32
var buffer byte
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := src.At(x, y).RGBA()
colorChannels := toUint8Array(r, g, b, a)
currentByte := currentBit / 8
for i := 0; i < 3 && currentByte < dataLen; i++ {
buffer = setBit(buffer, bitAt(colorChannels[i], 0), uint8(currentBit%8))
currentBit++
// One whole byte has been written to buffer
if currentBit%8 == 0 {
data = append(data, buffer)
// Four fist bytes are read.
// These contain the length of the payload in bytes
if len(data) == 4 {
dataLen = binary.LittleEndian.Uint32(data)
currentBit = 0
}
}
currentByte = currentBit / 8
}
}
}
return data[4:]
}
func toUint8Array(values ...uint32) (result []uint8) {
result = make([]uint8, len(values))
for i, value := range values {
result[i] = uint8(value >> 8)
}
return
}
func bitAt(val byte, pos uint32) uint8 {
return uint8((val >> pos) & 1)
}
func setBit(target uint8, val uint8, pos uint8) uint8 {
target = (target & ^uint8(1<<pos)) | (val << pos)
return target
} | lsb/lsb.go | 0.550124 | 0.461623 | lsb.go | starcoder |
package calendar
import (
"fmt"
"time"
"go.uber.org/zap"
)
//Calendar ..
type Calendar struct {
ID int64 `json:"id,omitempty"`
Name string `json:"name"`
Description string `json:"description"`
Periods []Period `json:"periods"`
UnionCalendarIDs []int64 `json:"unionCalendarIDs,omitempty"`
Enabled bool `json:"enabled"`
}
// InPeriodContains ..
type InPeriodContains struct {
Contains bool `json:"contains"`
}
// contains check if a calendar contains a specific time (based on inclusion and exclusion periods)
// This function only checks the calendar periods (but not the unioned calendars)
// Therefore, the calendar MUST have been resolved / flatten before calling this function
func (c Calendar) contains(t time.Time) (bool, PeriodStatus, PeriodStatus, PeriodStatus) {
statusMonth := NoInfo
statusDay := NoInfo
statusTime := NoInfo
for _, period := range c.Periods {
month, day, time := period.contains(t)
if month == OutOfPeriod || day == OutOfPeriod || time == OutOfPeriod {
month = OutOfPeriod
day = OutOfPeriod
time = OutOfPeriod
}
if month == InPeriod {
statusMonth = includedToStatus(period.Included)
}
if day == InPeriod {
statusDay = includedToStatus(period.Included)
}
if time == InPeriod {
statusTime = includedToStatus(period.Included)
}
}
status := true
if statusMonth == NoInfo && statusDay == NoInfo && statusTime == NoInfo {
status = false
}
if statusMonth == OutOfPeriod || statusDay == OutOfPeriod || statusTime == OutOfPeriod {
status = false
}
return status, statusMonth, statusDay, statusTime
}
func includedToStatus(included bool) PeriodStatus {
if included {
return InPeriod
}
return OutOfPeriod
}
func getCalendar(id int64) (Calendar, bool, error) {
calendar, found := _globalCBase.calendars[id]
return calendar, found, nil
}
func setCalendar(calendar Calendar) {
_globalCBase.calendars[calendar.ID] = calendar
}
// ResolveCalendar resolve a calendar definition dynamically and recursively with its subcalendars
// Resolution order :
// - For each sub-calendar (with respect of order)
// - Sub-calendar Periods (with respect of order)
// - Calendar Periods (with respect of order)
func (c Calendar) ResolveCalendar(joinedCalendars []int64) Calendar {
joinedCalendars = append(joinedCalendars, c.ID)
periods := make([]Period, 0)
// Append unioned calendars periods
for _, unionCalendarID := range c.UnionCalendarIDs {
var circularReference bool
for _, id := range joinedCalendars {
if id == unionCalendarID {
circularReference = true
break
}
}
if circularReference {
zap.L().Warn("Skipping Calendar union in order to avoid a circular reference", zap.Int64s("joinedCalendars", joinedCalendars), zap.Int64("unionCalendarID", unionCalendarID))
continue
}
unionCalendar, found, err := getCalendar(unionCalendarID)
if !found {
zap.L().Warn("The calendar to join was not found", zap.Int64("calendarID", c.ID), zap.Int64("unionedCalendarID", unionCalendarID))
continue
}
if err != nil {
zap.L().Error("Cannot get calendar", zap.Int64("calendarID", c.ID), zap.Int64("unionedCalendarID", unionCalendarID), zap.Error(err))
continue
}
unionCalendarResolved := unionCalendar.ResolveCalendar(joinedCalendars)
periods = append(periods, unionCalendarResolved.Periods...)
}
// Append current calendar periods
periods = append(periods, c.Periods...)
return Calendar{
ID: c.ID,
Name: fmt.Sprintf("%s (resolved)", c.Name),
Description: c.Description,
Periods: periods,
Enabled: c.Enabled,
UnionCalendarIDs: []int64{},
}
} | internals/calendar/calendar.go | 0.75183 | 0.462959 | calendar.go | starcoder |
package codec
import (
"encoding/binary"
"github.com/juju/errors"
)
const signMask uint64 = 0x8000000000000000
func encodeIntToCmpUint(v int64) uint64 {
u := uint64(v)
if u&signMask > 0 {
u &= ^signMask
} else {
u |= signMask
}
return u
}
func decodeCmpUintToInt(u uint64) int64 {
if u&signMask > 0 {
u &= ^signMask
} else {
u |= signMask
}
return int64(u)
}
// EncodeInt appends the encoded value to slice b and returns the appended slice.
// EncodeInt guarantees that the encoded value is in ascending order for comparison.
func EncodeInt(b []byte, v int64) []byte {
var data [8]byte
u := encodeIntToCmpUint(v)
binary.BigEndian.PutUint64(data[:], u)
return append(b, data[:]...)
}
// EncodeIntDesc appends the encoded value to slice b and returns the appended slice.
// EncodeIntDesc guarantees that the encoded value is in descending order for comparison.
func EncodeIntDesc(b []byte, v int64) []byte {
var data [8]byte
u := encodeIntToCmpUint(v)
binary.BigEndian.PutUint64(data[:], ^u)
return append(b, data[:]...)
}
// DecodeInt decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeInt(b []byte) ([]byte, int64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
u := binary.BigEndian.Uint64(b[:8])
v := decodeCmpUintToInt(u)
b = b[8:]
return b, v, nil
}
// DecodeIntDesc decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeIntDesc(b []byte) ([]byte, int64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
u := binary.BigEndian.Uint64(b[:8])
v := decodeCmpUintToInt(^u)
b = b[8:]
return b, v, nil
}
// EncodeUint appends the encoded value to slice b and returns the appended slice.
// EncodeUint guarantees that the encoded value is in ascending order for comparison.
func EncodeUint(b []byte, v uint64) []byte {
var data [8]byte
binary.BigEndian.PutUint64(data[:], v)
return append(b, data[:]...)
}
// EncodeUintDesc appends the encoded value to slice b and returns the appended slice.
// EncodeUintDesc guarantees that the encoded value is in descending order for comparison.
func EncodeUintDesc(b []byte, v uint64) []byte {
var data [8]byte
binary.BigEndian.PutUint64(data[:], ^v)
return append(b, data[:]...)
}
// DecodeUint decodes value encoded by EncodeUint before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUint(b []byte) ([]byte, uint64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
v := binary.BigEndian.Uint64(b[:8])
b = b[8:]
return b, v, nil
}
// DecodeUintDesc decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUintDesc(b []byte) ([]byte, uint64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
data := b[:8]
v := binary.BigEndian.Uint64(data)
b = b[8:]
return b, ^v, nil
}
// EncodeVarint appends the encoded value to slice b and returns the appended slice.
// Note that the encoded result is not memcomparable.
func EncodeVarint(b []byte, v int64) []byte {
var data [binary.MaxVarintLen64]byte
n := binary.PutVarint(data[:], v)
return append(b, data[:n]...)
}
// DecodeVarint decodes value encoded by EncodeVarint before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeVarint(b []byte) ([]byte, int64, error) {
v, n := binary.Varint(b)
if n > 0 {
return b[n:], v, nil
}
if n < 0 {
return nil, 0, errors.New("value larger than 64 bits")
}
return nil, 0, errors.New("insufficient bytes to decode value")
}
// EncodeUvarint appends the encoded value to slice b and returns the appended slice.
// Note that the encoded result is not memcomparable.
func EncodeUvarint(b []byte, v uint64) []byte {
var data [binary.MaxVarintLen64]byte
n := binary.PutUvarint(data[:], v)
return append(b, data[:n]...)
}
// DecodeUvarint decodes value encoded by EncodeUvarint before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUvarint(b []byte) ([]byte, uint64, error) {
v, n := binary.Uvarint(b)
if n > 0 {
return b[n:], v, nil
}
if n < 0 {
return nil, 0, errors.New("value larger than 64 bits")
}
return nil, 0, errors.New("insufficient bytes to decode value")
} | vendor/github.com/pingcap/tidb/util/codec/number.go | 0.849924 | 0.470372 | number.go | starcoder |
package groups
import (
"crypto/dsa"
"crypto/rand"
"fmt"
"math/big"
"github.com/xlab-si/emmy/crypto/common"
)
// SchnorrGroup is a cyclic group in modular arithmetic. It holds P = Q * R + 1 for some R.
// The actual value R is never used (although a random element from this group could be computed
// by a^R for some random a from Z_p* - this element would have order Q and would be thus from this group),
// the important thing is that Q divides P-1.
type SchnorrGroup struct {
P *big.Int // modulus of the group
G *big.Int // generator of subgroup
Q *big.Int // order of G
}
// NewSchnorrGroup generates random SchnorrGroup with generator G and
// parameters P and Q where P = R * Q + 1 for some R. Order of G is Q.
func NewSchnorrGroup(qBitLength int) (*SchnorrGroup, error) {
// Using DSA GenerateParameters:
sizes := dsa.L1024N160
if qBitLength == 160 {
sizes = dsa.L1024N160
} else if qBitLength == 224 {
sizes = dsa.L2048N224
} else if qBitLength == 256 {
sizes = dsa.L2048N256
} else {
err := fmt.Errorf("generating Schnorr primes for bit length %d is not supported", qBitLength)
return nil, err
}
params := dsa.Parameters{}
err := dsa.GenerateParameters(¶ms, rand.Reader, sizes)
if err != nil {
return nil, err
}
return &SchnorrGroup{
P: params.P,
G: params.G,
Q: params.Q,
}, nil
}
func NewSchnorrGroupFromParams(p, g, q *big.Int) *SchnorrGroup {
return &SchnorrGroup{
P: p,
G: g,
Q: q,
}
}
// NewSchnorrSafePrimeGroup generates random SchnorrGroup with generator G and
// parameters P and Q where P = 2 * Q + 1. Order of G is Q.
// Note that this group is a special case of group returned by GetSchnorrGroup (R = 2).
func NewSchnorrSafePrimeGroup(modulusBitLength int) (*SchnorrGroup, error) {
p, err := common.GetSafePrime(modulusBitLength)
if err != nil {
return nil, err
}
pMin := new(big.Int)
pMin.Sub(p, big.NewInt(1))
q := new(big.Int).Div(pMin, big.NewInt(2))
// p = 2 * q + 1
g, err := common.GetGeneratorOfZnSubgroup(p, pMin, q)
if err != nil {
return nil, err
}
return &SchnorrGroup{
P: p,
G: g,
Q: q,
}, nil
}
// GetRandomElement returns a random element from this group. Note that elements from this group
// are integers smaller than group.P, but not all - only Q of them. GetRandomElement returns
// one (random) of these Q elements.
func (group *SchnorrGroup) GetRandomElement() *big.Int {
r := common.GetRandomInt(group.Q)
el := group.Exp(group.G, r)
return el
}
// Add computes x + y in SchnorrGroup. This means x + y mod group.P.
func (group *SchnorrGroup) Add(x, y *big.Int) *big.Int {
r := new(big.Int)
r.Add(x, y)
r.Mod(r, group.P)
return r
}
// Mul computes x * y in SchnorrGroup. This means x * y mod group.P.
func (group *SchnorrGroup) Mul(x, y *big.Int) *big.Int {
r := new(big.Int)
r.Mul(x, y)
return r.Mod(r, group.P)
}
// Exp computes base^exponent in SchnorrGroup. This means base^exponent mod group.P.
func (group *SchnorrGroup) Exp(base, exponent *big.Int) *big.Int {
return new(big.Int).Exp(base, exponent, group.P)
}
// Inv computes inverse of x in SchnorrGroup. This means xInv such that x * xInv = 1 mod group.P.
func (group *SchnorrGroup) Inv(x *big.Int) *big.Int {
return new(big.Int).ModInverse(x, group.P)
}
// IsElementInGroup returns true if x is in the group and false otherwise. Note that
// an element x is in Schnorr group when x^group.Q = 1 mod group.P.
func (group *SchnorrGroup) IsElementInGroup(x *big.Int) bool {
check := group.Exp(x, group.Q) // should be 1
return check.Cmp(big.NewInt(1)) == 0
} | crypto/groups/schnorr.go | 0.72594 | 0.412116 | schnorr.go | starcoder |
package common
import (
"fmt"
"math"
"math/rand"
"reflect"
"testing"
)
// Swap two array values given their indices.
func Swap(v interface{}, i, j int) {
switch a := v.(type) {
case []int:
SwapInt(a, i, j)
case []string:
SwapString(a, i, j)
}
}
// SwapInt two array values given their indices.
func SwapInt(a []int, i, j int) {
tmp := a[i]
a[i] = a[j]
a[j] = tmp
}
// SwapString two array values given their indices.
func SwapString(a []string, i, j int) {
tmp := a[i]
a[i] = a[j]
a[j] = tmp
}
// Mimax returns min and max from a list of integers.
func Mimax(nums ...int) (int, int) {
min, max := nums[0], nums[0]
for _, num := range nums {
if min > num {
min = num
}
if max < num {
max = num
}
}
return min, max
}
// Min returns min from a list of integers.
func Min(nums ...int) int {
min := nums[0]
for _, num := range nums {
if min > num {
min = num
}
}
return min
}
// Max returns max from a list of integers.
func Max(nums ...int) int {
max := nums[0]
for _, num := range nums {
if max < num {
max = num
}
}
return max
}
// Random rertuns a random number over a range
func Random(min, max int) int {
if min == max {
return min
}
return rand.Intn(max-min) + min
}
// ChanToSlice pushes values from a channel to a slice.
func ChanToSlice(ch chan int) []int {
out := []int{}
for v := range ch {
out = append(out, v)
}
return out
}
// Contain checks if the target is in a slice.
func Contain(s []int, target int) bool {
for _, v := range s {
if v == target {
return true
}
}
return false
}
// ContainString checks if the target is in a slice.
func ContainString(s []string, target string) bool {
for _, v := range s {
if v == target {
return true
}
}
return false
}
// Abs returns the absolute value for a given integer.
func Abs(a int) int {
return int(math.Abs(float64(a)))
}
// AbsDiff returns the absolute value of the difference between two integers.
func AbsDiff(a, b int) int {
return Abs(a - b)
}
// IsMoreThan1Apart checks if two integers are more than 1 apart.
func IsMoreThan1Apart(a, b int) bool {
if AbsDiff(a, b) > 1 {
return true
}
return false
}
// IsLessThan1Apart checks if two integers are less or equal than 1 apart.
func IsLessThan1Apart(a, b int) bool {
if AbsDiff(a, b) <= 1 {
return true
}
return false
}
// Log prints out the map of logging context and value.
func Log(m map[string]interface{}) {
fmt.Println("[debug] →")
for k, v := range m {
fmt.Printf("\t%v: %+v\n", k, v)
}
fmt.Println("[debug] □")
}
// Equal checks if two input are deeply equal.
func Equal(t *testing.T, expected, result interface{}) {
if !reflect.DeepEqual(result, expected) {
t.Errorf("should be %v instead of %v", expected, result)
}
} | common/utils.go | 0.703448 | 0.463505 | utils.go | starcoder |
package layers
import (
"encoding/binary"
"fmt"
"github.com/tsg/gopacket"
"hash/crc32"
)
// SCTP contains information on the top level of an SCTP packet.
type SCTP struct {
BaseLayer
SrcPort, DstPort SCTPPort
VerificationTag uint32
Checksum uint32
sPort, dPort []byte
}
// LayerType returns gopacket.LayerTypeSCTP
func (s *SCTP) LayerType() gopacket.LayerType { return LayerTypeSCTP }
func decodeSCTP(data []byte, p gopacket.PacketBuilder) error {
sctp := &SCTP{
SrcPort: SCTPPort(binary.BigEndian.Uint16(data[:2])),
sPort: data[:2],
DstPort: SCTPPort(binary.BigEndian.Uint16(data[2:4])),
dPort: data[2:4],
VerificationTag: binary.BigEndian.Uint32(data[4:8]),
Checksum: binary.BigEndian.Uint32(data[8:12]),
BaseLayer: BaseLayer{data[:12], data[12:]},
}
p.AddLayer(sctp)
p.SetTransportLayer(sctp)
return p.NextDecoder(sctpChunkTypePrefixDecoder)
}
var sctpChunkTypePrefixDecoder = gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)
// TransportFlow returns a flow based on the source and destination SCTP port.
func (s *SCTP) TransportFlow() gopacket.Flow {
return gopacket.NewFlow(EndpointSCTPPort, s.sPort, s.dPort)
}
func decodeWithSCTPChunkTypePrefix(data []byte, p gopacket.PacketBuilder) error {
chunkType := SCTPChunkType(data[0])
return chunkType.Decode(data, p)
}
// SerializeTo is for gopacket.SerializableLayer.
func (s SCTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(12)
if err != nil {
return err
}
binary.BigEndian.PutUint16(bytes[0:2], uint16(s.SrcPort))
binary.BigEndian.PutUint16(bytes[2:4], uint16(s.DstPort))
binary.BigEndian.PutUint32(bytes[4:8], s.VerificationTag)
if opts.ComputeChecksums {
// Note: MakeTable(Castagnoli) actually only creates the table once, then
// passes back a singleton on every other call, so this shouldn't cause
// excessive memory allocation.
binary.LittleEndian.PutUint32(bytes[8:12], crc32.Checksum(b.Bytes(), crc32.MakeTable(crc32.Castagnoli)))
}
return nil
}
// SCTPChunk contains the common fields in all SCTP chunks.
type SCTPChunk struct {
BaseLayer
Type SCTPChunkType
Flags uint8
Length uint16
// ActualLength is the total length of an SCTP chunk, including padding.
// SCTP chunks start and end on 4-byte boundaries. So if a chunk has a length
// of 18, it means that it has data up to and including byte 18, then padding
// up to the next 4-byte boundary, 20. In this case, Length would be 18, and
// ActualLength would be 20.
ActualLength int
}
func roundUpToNearest4(i int) int {
if i%4 == 0 {
return i
}
return i + 4 - (i % 4)
}
func decodeSCTPChunk(data []byte) SCTPChunk {
length := binary.BigEndian.Uint16(data[2:4])
actual := roundUpToNearest4(int(length))
return SCTPChunk{
Type: SCTPChunkType(data[0]),
Flags: data[1],
Length: length,
ActualLength: actual,
BaseLayer: BaseLayer{data[:actual], data[actual:]},
}
}
// SCTPParameter is a TLV parameter inside a SCTPChunk.
type SCTPParameter struct {
Type uint16
Length uint16
ActualLength int
Value []byte
}
func decodeSCTPParameter(data []byte) SCTPParameter {
length := binary.BigEndian.Uint16(data[2:4])
return SCTPParameter{
Type: binary.BigEndian.Uint16(data[0:2]),
Length: length,
Value: data[4:length],
ActualLength: roundUpToNearest4(int(length)),
}
}
func (p SCTPParameter) Bytes() []byte {
length := 4 + len(p.Value)
data := make([]byte, roundUpToNearest4(length))
binary.BigEndian.PutUint16(data[0:2], p.Type)
binary.BigEndian.PutUint16(data[2:4], uint16(length))
copy(data[4:], p.Value)
return data
}
// SCTPUnknownChunkType is the layer type returned when we don't recognize the
// chunk type. Since there's a length in a known location, we can skip over
// it even if we don't know what it is, and continue parsing the rest of the
// chunks. This chunk is stored as an ErrorLayer in the packet.
type SCTPUnknownChunkType struct {
SCTPChunk
bytes []byte
}
func decodeSCTPChunkTypeUnknown(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPUnknownChunkType{SCTPChunk: decodeSCTPChunk(data)}
sc.bytes = data[:sc.ActualLength]
p.AddLayer(sc)
p.SetErrorLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (s SCTPUnknownChunkType) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(s.ActualLength)
if err != nil {
return err
}
copy(bytes, s.bytes)
return nil
}
// LayerType returns gopacket.LayerTypeSCTPUnknownChunkType.
func (s *SCTPUnknownChunkType) LayerType() gopacket.LayerType { return LayerTypeSCTPUnknownChunkType }
// Payload returns all bytes in this header, including the decoded Type, Length,
// and Flags.
func (s *SCTPUnknownChunkType) Payload() []byte { return s.bytes }
// Error implements ErrorLayer.
func (s *SCTPUnknownChunkType) Error() error {
return fmt.Errorf("No decode method available for SCTP chunk type %s", s.Type)
}
// SCTPData is the SCTP Data chunk layer.
type SCTPData struct {
SCTPChunk
Unordered, BeginFragment, EndFragment bool
TSN uint32
StreamId uint16
StreamSequence uint16
PayloadProtocol uint32
PayloadData []byte
}
// LayerType returns gopacket.LayerTypeSCTPData.
func (s *SCTPData) LayerType() gopacket.LayerType { return LayerTypeSCTPData }
// Payload returns the data payload of the SCTP data chunk.
func (s *SCTPData) Payload() []byte {
return s.PayloadData
}
func decodeSCTPData(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPData{
SCTPChunk: decodeSCTPChunk(data),
Unordered: data[1]&0x4 != 0,
BeginFragment: data[1]&0x2 != 0,
EndFragment: data[1]&0x1 != 0,
TSN: binary.BigEndian.Uint32(data[4:8]),
StreamId: binary.BigEndian.Uint16(data[8:10]),
StreamSequence: binary.BigEndian.Uint16(data[10:12]),
PayloadProtocol: binary.BigEndian.Uint32(data[12:16]),
}
// Length is the length in bytes of the data, INCLUDING the 16-byte header.
sc.PayloadData = data[16:sc.Length]
p.AddLayer(sc)
p.SetApplicationLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPData) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
length := 16 + len(sc.PayloadData)
bytes, err := b.PrependBytes(roundUpToNearest4(length))
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
flags := uint8(0)
if sc.Unordered {
flags |= 0x4
}
if sc.BeginFragment {
flags |= 0x2
}
if sc.EndFragment {
flags |= 0x1
}
bytes[1] = flags
binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
binary.BigEndian.PutUint32(bytes[4:8], sc.TSN)
binary.BigEndian.PutUint16(bytes[8:10], sc.StreamId)
binary.BigEndian.PutUint16(bytes[10:12], sc.StreamSequence)
binary.BigEndian.PutUint32(bytes[12:16], sc.PayloadProtocol)
copy(bytes[16:], sc.PayloadData)
return nil
}
// SCTPInitParameter is a parameter for an SCTP Init or InitAck packet.
type SCTPInitParameter SCTPParameter
// SCTPInit is used as the return value for both SCTPInit and SCTPInitAck
// messages.
type SCTPInit struct {
SCTPChunk
InitiateTag uint32
AdvertisedReceiverWindowCredit uint32
OutboundStreams, InboundStreams uint16
InitialTSN uint32
Parameters []SCTPInitParameter
}
// LayerType returns either gopacket.LayerTypeSCTPInit or gopacket.LayerTypeSCTPInitAck.
func (sc *SCTPInit) LayerType() gopacket.LayerType {
if sc.Type == SCTPChunkTypeInitAck {
return LayerTypeSCTPInitAck
}
// sc.Type == SCTPChunkTypeInit
return LayerTypeSCTPInit
}
func decodeSCTPInit(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPInit{
SCTPChunk: decodeSCTPChunk(data),
InitiateTag: binary.BigEndian.Uint32(data[4:8]),
AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
OutboundStreams: binary.BigEndian.Uint16(data[12:14]),
InboundStreams: binary.BigEndian.Uint16(data[14:16]),
InitialTSN: binary.BigEndian.Uint32(data[16:20]),
}
paramData := data[20:sc.ActualLength]
for len(paramData) > 0 {
p := SCTPInitParameter(decodeSCTPParameter(paramData))
paramData = paramData[p.ActualLength:]
sc.Parameters = append(sc.Parameters, p)
}
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPInit) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
var payload []byte
for _, param := range sc.Parameters {
payload = append(payload, SCTPParameter(param).Bytes()...)
}
length := 20 + len(payload)
bytes, err := b.PrependBytes(roundUpToNearest4(length))
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
binary.BigEndian.PutUint32(bytes[4:8], sc.InitiateTag)
binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
binary.BigEndian.PutUint16(bytes[12:14], sc.OutboundStreams)
binary.BigEndian.PutUint16(bytes[14:16], sc.InboundStreams)
binary.BigEndian.PutUint32(bytes[16:20], sc.InitialTSN)
copy(bytes[20:], payload)
return nil
}
// SCTPSack is the SCTP Selective ACK chunk layer.
type SCTPSack struct {
SCTPChunk
CumulativeTSNAck uint32
AdvertisedReceiverWindowCredit uint32
NumGapACKs, NumDuplicateTSNs uint16
GapACKs []uint16
DuplicateTSNs []uint32
}
// LayerType return LayerTypeSCTPSack
func (sc *SCTPSack) LayerType() gopacket.LayerType {
return LayerTypeSCTPSack
}
func decodeSCTPSack(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPSack{
SCTPChunk: decodeSCTPChunk(data),
CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
NumGapACKs: binary.BigEndian.Uint16(data[12:14]),
NumDuplicateTSNs: binary.BigEndian.Uint16(data[14:16]),
}
// We maximize gapAcks and dupTSNs here so we're not allocating tons
// of memory based on a user-controlable field. Our maximums are not exact,
// but should give us sane defaults... we'll still hit slice boundaries and
// fail if the user-supplied values are too high (in the for loops below), but
// the amount of memory we'll have allocated because of that should be small
// (< sc.ActualLength)
gapAcks := sc.SCTPChunk.ActualLength / 2
dupTSNs := (sc.SCTPChunk.ActualLength - gapAcks*2) / 4
if gapAcks > int(sc.NumGapACKs) {
gapAcks = int(sc.NumGapACKs)
}
if dupTSNs > int(sc.NumDuplicateTSNs) {
dupTSNs = int(sc.NumDuplicateTSNs)
}
sc.GapACKs = make([]uint16, 0, gapAcks)
sc.DuplicateTSNs = make([]uint32, 0, dupTSNs)
bytesRemaining := data[16:]
for i := 0; i < int(sc.NumGapACKs); i++ {
sc.GapACKs = append(sc.GapACKs, binary.BigEndian.Uint16(bytesRemaining[:2]))
bytesRemaining = bytesRemaining[2:]
}
for i := 0; i < int(sc.NumDuplicateTSNs); i++ {
sc.DuplicateTSNs = append(sc.DuplicateTSNs, binary.BigEndian.Uint32(bytesRemaining[:4]))
bytesRemaining = bytesRemaining[4:]
}
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPSack) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
length := 16 + 2*len(sc.GapACKs) + 4*len(sc.DuplicateTSNs)
bytes, err := b.PrependBytes(roundUpToNearest4(length))
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
binary.BigEndian.PutUint16(bytes[12:14], uint16(len(sc.GapACKs)))
binary.BigEndian.PutUint16(bytes[14:16], uint16(len(sc.DuplicateTSNs)))
for i, v := range sc.GapACKs {
binary.BigEndian.PutUint16(bytes[16+i*2:], v)
}
offset := 16 + 2*len(sc.GapACKs)
for i, v := range sc.DuplicateTSNs {
binary.BigEndian.PutUint32(bytes[offset+i*4:], v)
}
return nil
}
// SCTPHeartbeatParameter is the parameter type used by SCTP heartbeat and
// heartbeat ack layers.
type SCTPHeartbeatParameter SCTPParameter
// SCTPHeartbeat is the SCTP heartbeat layer, also used for heatbeat ack.
type SCTPHeartbeat struct {
SCTPChunk
Parameters []SCTPHeartbeatParameter
}
// LayerType returns gopacket.LayerTypeSCTPHeartbeat.
func (sc *SCTPHeartbeat) LayerType() gopacket.LayerType {
if sc.Type == SCTPChunkTypeHeartbeatAck {
return LayerTypeSCTPHeartbeatAck
}
// sc.Type == SCTPChunkTypeHeartbeat
return LayerTypeSCTPHeartbeat
}
func decodeSCTPHeartbeat(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPHeartbeat{
SCTPChunk: decodeSCTPChunk(data),
}
paramData := data[4:sc.Length]
for len(paramData) > 0 {
p := SCTPHeartbeatParameter(decodeSCTPParameter(paramData))
paramData = paramData[p.ActualLength:]
sc.Parameters = append(sc.Parameters, p)
}
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPHeartbeat) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
var payload []byte
for _, param := range sc.Parameters {
payload = append(payload, SCTPParameter(param).Bytes()...)
}
length := 4 + len(payload)
bytes, err := b.PrependBytes(roundUpToNearest4(length))
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
copy(bytes[4:], payload)
return nil
}
// SCTPErrorParameter is the parameter type used by SCTP Abort and Error layers.
type SCTPErrorParameter SCTPParameter
// SCTPError is the SCTP error layer, also used for SCTP aborts.
type SCTPError struct {
SCTPChunk
Parameters []SCTPErrorParameter
}
// LayerType returns LayerTypeSCTPAbort or LayerTypeSCTPError.
func (sc *SCTPError) LayerType() gopacket.LayerType {
if sc.Type == SCTPChunkTypeAbort {
return LayerTypeSCTPAbort
}
// sc.Type == SCTPChunkTypeError
return LayerTypeSCTPError
}
func decodeSCTPError(data []byte, p gopacket.PacketBuilder) error {
// remarkably similarot decodeSCTPHeartbeat ;)
sc := &SCTPError{
SCTPChunk: decodeSCTPChunk(data),
}
paramData := data[4:sc.Length]
for len(paramData) > 0 {
p := SCTPErrorParameter(decodeSCTPParameter(paramData))
paramData = paramData[p.ActualLength:]
sc.Parameters = append(sc.Parameters, p)
}
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPError) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
var payload []byte
for _, param := range sc.Parameters {
payload = append(payload, SCTPParameter(param).Bytes()...)
}
length := 4 + len(payload)
bytes, err := b.PrependBytes(roundUpToNearest4(length))
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
copy(bytes[4:], payload)
return nil
}
// SCTPShutdown is the SCTP shutdown layer.
type SCTPShutdown struct {
SCTPChunk
CumulativeTSNAck uint32
}
// LayerType returns gopacket.LayerTypeSCTPShutdown.
func (sc *SCTPShutdown) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdown }
func decodeSCTPShutdown(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPShutdown{
SCTPChunk: decodeSCTPChunk(data),
CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
}
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPShutdown) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(8)
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], 8)
binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
return nil
}
// SCTPShutdownAck is the SCTP shutdown layer.
type SCTPShutdownAck struct {
SCTPChunk
}
// LayerType returns gopacket.LayerTypeSCTPShutdownAck.
func (sc *SCTPShutdownAck) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdownAck }
func decodeSCTPShutdownAck(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPShutdownAck{
SCTPChunk: decodeSCTPChunk(data),
}
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPShutdownAck) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(4)
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], 4)
return nil
}
// SCTPCookieEcho is the SCTP Cookie Echo layer.
type SCTPCookieEcho struct {
SCTPChunk
Cookie []byte
}
// LayerType returns gopacket.LayerTypeSCTPCookieEcho.
func (sc *SCTPCookieEcho) LayerType() gopacket.LayerType { return LayerTypeSCTPCookieEcho }
func decodeSCTPCookieEcho(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPCookieEcho{
SCTPChunk: decodeSCTPChunk(data),
}
sc.Cookie = data[4:sc.Length]
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPCookieEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
length := 4 + len(sc.Cookie)
bytes, err := b.PrependBytes(roundUpToNearest4(length))
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
copy(bytes[4:], sc.Cookie)
return nil
}
// This struct is used by all empty SCTP chunks (currently CookieAck and
// ShutdownComplete).
type SCTPEmptyLayer struct {
SCTPChunk
}
// LayerType returns either gopacket.LayerTypeSCTPShutdownComplete or
// LayerTypeSCTPCookieAck.
func (sc *SCTPEmptyLayer) LayerType() gopacket.LayerType {
if sc.Type == SCTPChunkTypeShutdownComplete {
return LayerTypeSCTPShutdownComplete
}
// sc.Type == SCTPChunkTypeCookieAck
return LayerTypeSCTPCookieAck
}
func decodeSCTPEmptyLayer(data []byte, p gopacket.PacketBuilder) error {
sc := &SCTPEmptyLayer{
SCTPChunk: decodeSCTPChunk(data),
}
p.AddLayer(sc)
return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
}
// SerializeTo is for gopacket.SerializableLayer.
func (sc SCTPEmptyLayer) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(4)
if err != nil {
return err
}
bytes[0] = uint8(sc.Type)
bytes[1] = sc.Flags
binary.BigEndian.PutUint16(bytes[2:4], 4)
return nil
} | vendor/github.com/tsg/gopacket/layers/sctp.go | 0.746971 | 0.443841 | sctp.go | starcoder |
package dilithium
import (
"golang.org/x/crypto/sha3"
)
//reduce32 maps a to the [-Q, Q] domain
func reduce32(a int32) int32 {
t := (a + (1 << 22)) >> 23
t = a - t*q
return t
}
//addQ maps a to a "positive" representation in constant time
func addQ(a int32) int32 {
a += (a >> 31) & q
return a
}
//freeze maps a to the [0, Q] domain
func freeze(a int32) int32 {
a = reduce32(a)
a = addQ(a)
return a
}
//power2Round returns a1 and a0+Q such that a = a1*2^D+a0
func power2Round(a int32) (int32, int32) {
a1 := (a + (1 << (d - 1)) - 1) >> d
a0 := a - (a1 << d)
return a1, a0
}
//decompose returns a1 and a0+Q such that a = a1*alpha + a0
func decompose(a int32, GAMMA2 int32) (int32, int32) {
a1 := (a + 127) >> 7
if GAMMA2 == (q-1)/32 {
a1 = (a1*1025 + (1 << 21)) >> 22
a1 &= 15
}
if GAMMA2 == (q-1)/88 {
a1 = (a1*11275 + (1 << 23)) >> 24
a1 ^= ((43 - a1) >> 31) & a1
}
a0 := a - a1*2*GAMMA2
a0 -= (((q-1)/2 - a0) >> 31) & q
return a1, a0
}
//makeHint returns 1 iff a0 overflows a1
func makeHint(a1, a0 int32, GAMMA2 int32) int32 {
if a0 > GAMMA2 || a0 < -GAMMA2 || (a0 == -GAMMA2 && a1 != 0) {
return 1
}
return 0
}
//useHint computes the real high bits of a
func useHint(a int32, hint int32, GAMMA2 int32) int32 {
a1, a0 := decompose(a, GAMMA2)
if hint == 0 {
return a1
}
if a0 > 0 {
if GAMMA2 == (q-1)/32 {
return (a1 + 1) & 15
}
if a1 == 43 {
return 0
}
return a1 + 1
}
if GAMMA2 == (q-1)/32 {
return (a1 - 1) & 15
}
if a1 == 0 {
return 43
}
return a1 - 1
}
//Mat is used to hold the matrix A
type Mat []Vec
//expandSeed uses rho to create A, a KxL matrix of uniform polynomials
func expandSeed(rho [SEEDBYTES]byte, K, L int) Mat {
A := make(Mat, K)
for i := 0; i < K; i++ {
A[i] = make(Vec, L)
for j := 0; j < L; j++ {
A[i][j] = polyUniform(rho, uint16((i<<8)+j))
}
}
return A
}
//challenge creates a Poly with exactly T 1's and the rest 0's
func challenge(hc []byte, T int) Poly {
var c Poly
var outbuf [shake256Rate]byte
state := sha3.NewShake256()
state.Write(hc[:])
state.Read(outbuf[:])
signs := uint64(0)
for i := uint(0); i < 8; i++ {
signs |= uint64(outbuf[i]) << (8 * i)
}
pos := 8
b := 0
for i := n - T; i < n; i++ {
for {
if pos >= shake256Rate {
state.Read(outbuf[:])
pos = 0
}
b = int(outbuf[pos])
pos++
if b <= i {
break
}
}
c[i] = c[b]
c[b] = 1 - 2*int32((signs&1))
signs >>= 1
}
return c
}
//Computes the integer in {-(q-1)/2,...,(q-1)/2} congruent to a modulo q
func barretReduce(a int32) int32 {
v := int32(((uint32(1) << 26) + uint32(q/2)) / uint32(q))
t := int32(v) * int32(a) >> 26
t *= int32(q)
return a - t
}
//montgomeryReduce is used to reduce a montgomery coefficient [0, RQ]
func montgomeryReduce(a int64) int32 {
t := int32(a * qInv)
t = int32((a - int64(t)*q) >> 32)
return t
} | crystals-dilithium/internal.go | 0.713731 | 0.510863 | internal.go | starcoder |
package function
import (
"fmt"
"time"
"gopkg.in/src-d/go-mysql-server.v0/sql"
"gopkg.in/src-d/go-mysql-server.v0/sql/expression"
)
func getDatePart(
ctx *sql.Context,
u expression.UnaryExpression,
row sql.Row,
f func(interface{}) interface{},
) (interface{}, error) {
val, err := u.Child.Eval(ctx, row)
if err != nil {
return nil, err
}
if val == nil {
return nil, nil
}
date, err := sql.Timestamp.Convert(val)
if err != nil {
date, err = sql.Date.Convert(val)
if err != nil {
date = nil
}
}
return f(date), nil
}
// Year is a function that returns the year of a date.
type Year struct {
expression.UnaryExpression
}
// NewYear creates a new Year UDF.
func NewYear(date sql.Expression) sql.Expression {
return &Year{expression.UnaryExpression{Child: date}}
}
func (y *Year) String() string { return fmt.Sprintf("YEAR(%s)", y.Child) }
// Type implements the Expression interface.
func (y *Year) Type() sql.Type { return sql.Int32 }
// Eval implements the Expression interface.
func (y *Year) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return getDatePart(ctx, y.UnaryExpression, row, year)
}
// TransformUp implements the Expression interface.
func (y *Year) TransformUp(f sql.TransformExprFunc) (sql.Expression, error) {
child, err := y.Child.TransformUp(f)
if err != nil {
return nil, err
}
return f(NewYear(child))
}
// Month is a function that returns the month of a date.
type Month struct {
expression.UnaryExpression
}
// NewMonth creates a new Month UDF.
func NewMonth(date sql.Expression) sql.Expression {
return &Month{expression.UnaryExpression{Child: date}}
}
func (m *Month) String() string { return fmt.Sprintf("MONTH(%s)", m.Child) }
// Type implements the Expression interface.
func (m *Month) Type() sql.Type { return sql.Int32 }
// Eval implements the Expression interface.
func (m *Month) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return getDatePart(ctx, m.UnaryExpression, row, month)
}
// TransformUp implements the Expression interface.
func (m *Month) TransformUp(f sql.TransformExprFunc) (sql.Expression, error) {
child, err := m.Child.TransformUp(f)
if err != nil {
return nil, err
}
return f(NewMonth(child))
}
// Day is a function that returns the day of a date.
type Day struct {
expression.UnaryExpression
}
// NewDay creates a new Day UDF.
func NewDay(date sql.Expression) sql.Expression {
return &Day{expression.UnaryExpression{Child: date}}
}
func (d *Day) String() string { return fmt.Sprintf("DAY(%s)", d.Child) }
// Type implements the Expression interface.
func (d *Day) Type() sql.Type { return sql.Int32 }
// Eval implements the Expression interface.
func (d *Day) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return getDatePart(ctx, d.UnaryExpression, row, day)
}
// TransformUp implements the Expression interface.
func (d *Day) TransformUp(f sql.TransformExprFunc) (sql.Expression, error) {
child, err := d.Child.TransformUp(f)
if err != nil {
return nil, err
}
return f(NewDay(child))
}
// Hour is a function that returns the hour of a date.
type Hour struct {
expression.UnaryExpression
}
// NewHour creates a new Hour UDF.
func NewHour(date sql.Expression) sql.Expression {
return &Hour{expression.UnaryExpression{Child: date}}
}
func (h *Hour) String() string { return fmt.Sprintf("HOUR(%s)", h.Child) }
// Type implements the Expression interface.
func (h *Hour) Type() sql.Type { return sql.Int32 }
// Eval implements the Expression interface.
func (h *Hour) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return getDatePart(ctx, h.UnaryExpression, row, hour)
}
// TransformUp implements the Expression interface.
func (h *Hour) TransformUp(f sql.TransformExprFunc) (sql.Expression, error) {
child, err := h.Child.TransformUp(f)
if err != nil {
return nil, err
}
return f(NewHour(child))
}
// Minute is a function that returns the minute of a date.
type Minute struct {
expression.UnaryExpression
}
// NewMinute creates a new Minute UDF.
func NewMinute(date sql.Expression) sql.Expression {
return &Minute{expression.UnaryExpression{Child: date}}
}
func (m *Minute) String() string { return fmt.Sprintf("MINUTE(%d)", m.Child) }
// Type implements the Expression interface.
func (m *Minute) Type() sql.Type { return sql.Int32 }
// Eval implements the Expression interface.
func (m *Minute) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return getDatePart(ctx, m.UnaryExpression, row, minute)
}
// TransformUp implements the Expression interface.
func (m *Minute) TransformUp(f sql.TransformExprFunc) (sql.Expression, error) {
child, err := m.Child.TransformUp(f)
if err != nil {
return nil, err
}
return f(NewMinute(child))
}
// Second is a function that returns the second of a date.
type Second struct {
expression.UnaryExpression
}
// NewSecond creates a new Second UDF.
func NewSecond(date sql.Expression) sql.Expression {
return &Second{expression.UnaryExpression{Child: date}}
}
func (s *Second) String() string { return fmt.Sprintf("SECOND(%s)", s.Child) }
// Type implements the Expression interface.
func (s *Second) Type() sql.Type { return sql.Int32 }
// Eval implements the Expression interface.
func (s *Second) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return getDatePart(ctx, s.UnaryExpression, row, second)
}
// TransformUp implements the Expression interface.
func (s *Second) TransformUp(f sql.TransformExprFunc) (sql.Expression, error) {
child, err := s.Child.TransformUp(f)
if err != nil {
return nil, err
}
return f(NewSecond(child))
}
// DayOfYear is a function that returns the day of the year from a date.
type DayOfYear struct {
expression.UnaryExpression
}
// NewDayOfYear creates a new DayOfYear UDF.
func NewDayOfYear(date sql.Expression) sql.Expression {
return &DayOfYear{expression.UnaryExpression{Child: date}}
}
func (d *DayOfYear) String() string { return fmt.Sprintf("DAYOFYEAR(%s)", d.Child) }
// Type implements the Expression interface.
func (d *DayOfYear) Type() sql.Type { return sql.Int32 }
// Eval implements the Expression interface.
func (d *DayOfYear) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return getDatePart(ctx, d.UnaryExpression, row, dayOfYear)
}
// TransformUp implements the Expression interface.
func (d *DayOfYear) TransformUp(f sql.TransformExprFunc) (sql.Expression, error) {
child, err := d.Child.TransformUp(f)
if err != nil {
return nil, err
}
return f(NewDayOfYear(child))
}
func datePartFunc(fn func(time.Time) int) func(interface{}) interface{} {
return func(v interface{}) interface{} {
if v == nil {
return nil
}
return int32(fn(v.(time.Time)))
}
}
var (
year = datePartFunc((time.Time).Year)
month = datePartFunc(func(t time.Time) int { return int(t.Month()) })
day = datePartFunc((time.Time).Day)
hour = datePartFunc((time.Time).Hour)
minute = datePartFunc((time.Time).Minute)
second = datePartFunc((time.Time).Second)
dayOfYear = datePartFunc((time.Time).YearDay)
) | sql/expression/function/time.go | 0.765681 | 0.456531 | time.go | starcoder |
package sbdb
import "github.com/ovn-org/libovsdb/model"
type (
LogicalFlowPipeline = string
)
var (
LogicalFlowPipelineIngress LogicalFlowPipeline = "ingress"
LogicalFlowPipelineEgress LogicalFlowPipeline = "egress"
)
// LogicalFlow defines an object in Logical_Flow table
type LogicalFlow struct {
UUID string `ovsdb:"_uuid"`
Actions string `ovsdb:"actions"`
ControllerMeter *string `ovsdb:"controller_meter"`
ExternalIDs map[string]string `ovsdb:"external_ids"`
LogicalDatapath *string `ovsdb:"logical_datapath"`
LogicalDpGroup *string `ovsdb:"logical_dp_group"`
Match string `ovsdb:"match"`
Pipeline LogicalFlowPipeline `ovsdb:"pipeline"`
Priority int `ovsdb:"priority"`
TableID int `ovsdb:"table_id"`
Tags map[string]string `ovsdb:"tags"`
}
func copyLogicalFlowControllerMeter(a *string) *string {
if a == nil {
return nil
}
b := *a
return &b
}
func equalLogicalFlowControllerMeter(a, b *string) bool {
if (a == nil) != (b == nil) {
return false
}
if a == b {
return true
}
return *a == *b
}
func copyLogicalFlowExternalIDs(a map[string]string) map[string]string {
if a == nil {
return nil
}
b := make(map[string]string, len(a))
for k, v := range a {
b[k] = v
}
return b
}
func equalLogicalFlowExternalIDs(a, b map[string]string) bool {
if (a == nil) != (b == nil) {
return false
}
if len(a) != len(b) {
return false
}
for k, v := range a {
if w, ok := b[k]; !ok || v != w {
return false
}
}
return true
}
func copyLogicalFlowLogicalDatapath(a *string) *string {
if a == nil {
return nil
}
b := *a
return &b
}
func equalLogicalFlowLogicalDatapath(a, b *string) bool {
if (a == nil) != (b == nil) {
return false
}
if a == b {
return true
}
return *a == *b
}
func copyLogicalFlowLogicalDpGroup(a *string) *string {
if a == nil {
return nil
}
b := *a
return &b
}
func equalLogicalFlowLogicalDpGroup(a, b *string) bool {
if (a == nil) != (b == nil) {
return false
}
if a == b {
return true
}
return *a == *b
}
func copyLogicalFlowTags(a map[string]string) map[string]string {
if a == nil {
return nil
}
b := make(map[string]string, len(a))
for k, v := range a {
b[k] = v
}
return b
}
func equalLogicalFlowTags(a, b map[string]string) bool {
if (a == nil) != (b == nil) {
return false
}
if len(a) != len(b) {
return false
}
for k, v := range a {
if w, ok := b[k]; !ok || v != w {
return false
}
}
return true
}
func (a *LogicalFlow) DeepCopyInto(b *LogicalFlow) {
*b = *a
b.ControllerMeter = copyLogicalFlowControllerMeter(a.ControllerMeter)
b.ExternalIDs = copyLogicalFlowExternalIDs(a.ExternalIDs)
b.LogicalDatapath = copyLogicalFlowLogicalDatapath(a.LogicalDatapath)
b.LogicalDpGroup = copyLogicalFlowLogicalDpGroup(a.LogicalDpGroup)
b.Tags = copyLogicalFlowTags(a.Tags)
}
func (a *LogicalFlow) DeepCopy() *LogicalFlow {
b := new(LogicalFlow)
a.DeepCopyInto(b)
return b
}
func (a *LogicalFlow) CloneModelInto(b model.Model) {
c := b.(*LogicalFlow)
a.DeepCopyInto(c)
}
func (a *LogicalFlow) CloneModel() model.Model {
return a.DeepCopy()
}
func (a *LogicalFlow) Equals(b *LogicalFlow) bool {
return a.UUID == b.UUID &&
a.Actions == b.Actions &&
equalLogicalFlowControllerMeter(a.ControllerMeter, b.ControllerMeter) &&
equalLogicalFlowExternalIDs(a.ExternalIDs, b.ExternalIDs) &&
equalLogicalFlowLogicalDatapath(a.LogicalDatapath, b.LogicalDatapath) &&
equalLogicalFlowLogicalDpGroup(a.LogicalDpGroup, b.LogicalDpGroup) &&
a.Match == b.Match &&
a.Pipeline == b.Pipeline &&
a.Priority == b.Priority &&
a.TableID == b.TableID &&
equalLogicalFlowTags(a.Tags, b.Tags)
}
func (a *LogicalFlow) EqualsModel(b model.Model) bool {
c := b.(*LogicalFlow)
return a.Equals(c)
}
var _ model.CloneableModel = &LogicalFlow{}
var _ model.ComparableModel = &LogicalFlow{} | go-controller/pkg/sbdb/logical_flow.go | 0.544801 | 0.477189 | logical_flow.go | starcoder |
package convert
import "github.com/yyf330/kit/metrics"
type counterHistogram struct {
c metrics.Counter
}
// NewCounterAsHistogram returns a Histogram that actually writes the
// value on an underlying Counter
func NewCounterAsHistogram(c metrics.Counter) metrics.Histogram {
return counterHistogram{c}
}
// With implements Histogram.
func (ch counterHistogram) With(labelValues ...string) metrics.Histogram {
return counterHistogram{ch.c.With(labelValues...)}
}
// Observe implements histogram.
func (ch counterHistogram) Observe(value float64) {
ch.c.Add(value)
}
type histogramCounter struct {
h metrics.Histogram
}
// NewHistogramAsCounter returns a Counter that actually writes the
// value on an underlying Histogram
func NewHistogramAsCounter(h metrics.Histogram) metrics.Counter {
return histogramCounter{h}
}
// With implements Counter.
func (hc histogramCounter) With(labelValues ...string) metrics.Counter {
return histogramCounter{hc.h.With(labelValues...)}
}
// Add implements Counter.
func (hc histogramCounter) Add(delta float64) {
hc.h.Observe(delta)
}
type counterGauge struct {
c metrics.Counter
}
// NewCounterAsGauge returns a Gauge that actually writes the
// value on an underlying Counter
func NewCounterAsGauge(c metrics.Counter) metrics.Gauge {
return counterGauge{c}
}
// With implements Gauge.
func (cg counterGauge) With(labelValues ...string) metrics.Gauge {
return counterGauge{cg.c.With(labelValues...)}
}
// Set implements Gauge.
func (cg counterGauge) Set(value float64) {
cg.c.Add(value)
}
// Add implements metrics.Gauge.
func (cg counterGauge) Add(delta float64) {
cg.c.Add(delta)
}
type gaugeCounter struct {
g metrics.Gauge
}
// NewGaugeAsCounter returns a Counter that actually writes the
// value on an underlying Gauge
func NewGaugeAsCounter(g metrics.Gauge) metrics.Counter {
return gaugeCounter{g}
}
// With implements Counter.
func (gc gaugeCounter) With(labelValues ...string) metrics.Counter {
return gaugeCounter{gc.g.With(labelValues...)}
}
// Add implements Counter.
func (gc gaugeCounter) Add(delta float64) {
gc.g.Set(delta)
}
type histogramGauge struct {
h metrics.Histogram
}
// NewHistogramAsGauge returns a Gauge that actually writes the
// value on an underlying Histogram
func NewHistogramAsGauge(h metrics.Histogram) metrics.Gauge {
return histogramGauge{h}
}
// With implements Gauge.
func (hg histogramGauge) With(labelValues ...string) metrics.Gauge {
return histogramGauge{hg.h.With(labelValues...)}
}
// Set implements Gauge.
func (hg histogramGauge) Set(value float64) {
hg.h.Observe(value)
}
// Add implements metrics.Gauge.
func (hg histogramGauge) Add(delta float64) {
hg.h.Observe(delta)
}
type gaugeHistogram struct {
g metrics.Gauge
}
// NewGaugeAsHistogram returns a Histogram that actually writes the
// value on an underlying Gauge
func NewGaugeAsHistogram(g metrics.Gauge) metrics.Histogram {
return gaugeHistogram{g}
}
// With implements Histogram.
func (gh gaugeHistogram) With(labelValues ...string) metrics.Histogram {
return gaugeHistogram{gh.g.With(labelValues...)}
}
// Observe implements histogram.
func (gh gaugeHistogram) Observe(value float64) {
gh.g.Set(value)
} | util/metrics/internal/convert/convert.go | 0.927618 | 0.58673 | convert.go | starcoder |
package widgets
import (
"fmt"
"github.com/bcicen/tcolors/state"
"github.com/bcicen/tcolors/styles"
"github.com/gdamore/tcell"
)
const (
padPalette = true
palettePadding = 2
)
type PaletteBox struct {
width int
boxWidth int
boxHeight int
xStretch int
pst tcell.Style // pointer style
state *state.State
}
func NewPaletteBox(s *state.State) *PaletteBox {
pb := &PaletteBox{state: s}
return pb
}
// Draw redraws p at given coordinates and screen, returning the number
// of rows occupied
func (pb *PaletteBox) Draw(x, y int, s tcell.Screen) int {
activePaletteHeight := int(float64(pb.boxHeight)*2.5) - 1
pos := pb.state.Pos()
items := pb.state.SubColors()
selected := items[pos] // selected termbox color
// distribute stretch evenly across boxes
// where appropriate to facilitate centering
centerIdx := pb.state.Len() / 2
boxWidths := make([]int, pb.state.Len())
boxWidths[centerIdx] = pb.xStretch
for boxWidths[centerIdx]/3 >= 1 {
boxWidths[centerIdx] -= 2
boxWidths[centerIdx-1] += 1
}
nextIdx := centerIdx - 1
for nextIdx >= 0 {
for boxWidths[nextIdx] >= 2 {
boxWidths[nextIdx] -= 1
boxWidths[nextIdx-1] += 1
}
nextIdx--
}
// mirror first half of array
for n := len(boxWidths) - 1; n > centerIdx; n-- {
boxWidths[n] = boxWidths[len(boxWidths)-1-n]
}
// apply default boxwidth
for n := range boxWidths {
boxWidths[n] += pb.boxWidth
}
// text box header
textBox := []rune(pb.text())
textBoxX := x + (pb.width-len(textBox))/2
for col := 0; col < pb.width; col++ {
s.SetCell(x+col, y, styles.TextBox, '▁')
}
y++
for col := 0; col < pb.width; col++ {
switch {
case col == 0:
s.SetCell(x+col, y, styles.TextBox, '▎')
case col == pb.width-1:
s.SetCell(x+col, y, styles.TextBox, '▕')
case x+col == textBoxX:
s.SetCell(x+col, y, styles.TextBox, textBox...)
col += len(textBox) - 1
}
}
y++
// palette main
hiSt := styles.IndicatorHi.Background(selected)
loSt := styles.Indicator.Background(selected)
topSt := styles.TextBox.Background(selected)
st := hiSt
for row := 0; row < activePaletteHeight; row++ {
for col := 0; col < pb.width; col++ {
if row == 0 {
s.SetCell(x+col, y, topSt, '▔')
} else {
s.SetCell(x+col, y, st, ' ')
}
}
y++
}
lx := x
for n := range items {
bw := boxWidths[n]
if n == pos {
st = hiSt
} else {
st = loSt
}
for col := 0; col < bw; col++ {
s.SetCell(lx+col, y, st, '▁')
}
lx += bw
}
y++
lx = x
cst := styles.Default
for n, color := range items {
bw := boxWidths[n]
cst = cst.Foreground(color)
switch {
case padPalette && n == pos:
st = styles.IndicatorHi
case n == pos:
st = styles.IndicatorHi.Background(color)
case padPalette:
st = styles.Indicator
default:
st = styles.Indicator.Background(color)
}
for col := 0; col < bw; col++ {
for row := 0; row < pb.boxHeight; row++ {
switch {
case col == 0:
s.SetCell(lx, y+row, st, '▎')
case col == bw-1:
s.SetCell(lx, y+row, st, '▕')
case padPalette && row == 0:
s.SetCell(lx, y+row, cst, '▄')
case padPalette && row == pb.boxHeight-1:
s.SetCell(lx, y+row, cst, '▀')
default:
s.SetCell(lx, y+row, cst, '█')
}
}
lx++
}
}
y += pb.boxHeight
lx = x
for n := range items {
bw := boxWidths[n]
if n == pos {
st = styles.IndicatorHi
} else {
st = styles.Indicator
}
for col := 0; col < bw; col++ {
s.SetCell(lx+col, y, st, '▔')
}
lx += bw
}
return activePaletteHeight + pb.boxHeight + 4
}
func (pb *PaletteBox) text() string {
const spacer = " ▎ "
txt := "▎"
selected := pb.state.SubColors()[pb.state.Pos()]
r, g, b := selected.RGB()
txt = fmt.Sprintf("%03d %03d %03d", r, g, b)
txt += spacer + "#" + pb.state.Selected().Hex()
h, s, l := pb.state.Selected().HSL()
txt += spacer + fmt.Sprintf("%03.0f %03.0f %03.0f", h, s, l)
return txt
}
func (pb *PaletteBox) Resize(w, h int) {
pb.boxHeight = barHeight(h) + 1
pb.boxWidth = w / pb.state.Len()
pb.width = w
}
func (pb *PaletteBox) Handle(state.Change) {}
func (pb *PaletteBox) Up(step int) { pb.state.Next() }
func (pb *PaletteBox) Down(step int) { pb.state.Prev() }
func (pb *PaletteBox) SetPointerStyle(st tcell.Style) { pb.pst = st } | widgets/palette.go | 0.622574 | 0.41182 | palette.go | starcoder |
package main
import (
"fmt"
)
type TreeNode struct {
data int
left *TreeNode
right *TreeNode
}
type BinarySearchTree struct {
root *TreeNode
}
func (tree *BinarySearchTree) add(data int) {
if tree.root == nil {
tree.root = &TreeNode{data: data, left: nil, right: nil}
} else {
add(tree.root, data)
}
}
func add(root *TreeNode, data int) {
if data < root.data {
if root.left == nil {
root.left = &TreeNode{data: data, left: nil, right: nil}
} else {
add(root.left, data)
}
} else {
if root.right == nil {
root.right = &TreeNode{data: data, left: nil, right: nil}
} else {
add(root.right, data)
}
}
}
func (tree *BinarySearchTree) find(data int) *TreeNode {
return find(tree.root, data)
}
func find(root *TreeNode, data int) *TreeNode {
if root == nil {
return nil
}
if root.data == data {
return root
} else if data < root.data {
return find(root.left, data)
} else {
return find(root.right, data)
}
}
func (tree *BinarySearchTree) findParent(data int) *TreeNode {
return findParent(tree.root, data)
}
func findParent(root *TreeNode, data int) *TreeNode {
if root == nil {
return nil
}
if data == root.data {
return nil
} else if data < root.data {
if root.left == nil {
return nil
} else if data == root.left.data {
return root
} else {
return findParent(root.left, data)
}
} else {
if root.right == nil {
return nil
} else if data == root.right.data {
return root
} else {
return findParent(root.right, data)
}
}
}
func (tree *BinarySearchTree) remove(data int) bool {
nodeToRemoved := tree.find(data)
if nodeToRemoved == nil {
return false
}
parent := tree.findParent(data)
if tree.root.left == nil && tree.root.right == nil {
tree.root = nil
return true
} else if nodeToRemoved.left == nil && nodeToRemoved.right == nil {
if nodeToRemoved.data < parent.data {
parent.left = nil
} else {
parent.right = nil
}
} else if nodeToRemoved.left == nil && nodeToRemoved.right != nil {
if nodeToRemoved.data < parent.data {
parent.left = nodeToRemoved.right
} else {
parent.right = nodeToRemoved.right
}
} else if nodeToRemoved.left != nil && nodeToRemoved.right == nil {
if nodeToRemoved.data < parent.data {
parent.left = nodeToRemoved.left
} else {
parent.right = nodeToRemoved.left
}
} else {
largestValue := nodeToRemoved.left
for largestValue.right != nil {
largestValue = largestValue.right
}
tree.remove(largestValue.data)
nodeToRemoved.data = largestValue.data
}
return true
}
func (tree *BinarySearchTree) preOrder() {
preOrder(tree.root)
}
func preOrder(root *TreeNode) {
if root != nil {
fmt.Println(root.data)
preOrder(root.left)
preOrder(root.right)
}
}
func (tree *BinarySearchTree) inOrder() {
inOrder(tree.root)
}
func inOrder(root *TreeNode) {
if root != nil {
inOrder(root.left)
fmt.Println(root.data)
inOrder(root.right)
}
}
func (tree *BinarySearchTree) postOrder() {
postOrder(tree.root)
}
func postOrder(root *TreeNode) {
if root != nil {
postOrder(root.left)
postOrder(root.right)
fmt.Println(root.data)
}
}
func main() {
tree := BinarySearchTree{}
tree.add(10)
tree.add(12)
tree.add(2)
tree.add(4)
tree.add(1)
//tree.preOrder()
fmt.Println(tree.remove(4))
tree.preOrder()
} | data-structures/Tree/BinarySearchTree/go/BinarySearchTree.go | 0.603698 | 0.62581 | BinarySearchTree.go | starcoder |
package observations
import (
"context"
"fmt"
"image"
"github.com/zeebo/rothko/draw"
"golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
const (
padding = 2
)
// Measured represents a measured observations axis.
type Measured struct {
// Width is the width in pixels of the observation axis
Width int
// Height is the height in pixels of the observation axis
Height int
// internal fields
opts Options
bounds fixed.Rectangle26_6
}
// Options describe the axis rendering options.
type Options struct {
// Face is the font face to use for rendering the max observations number.
Face font.Face
// Width is how long the axis is.
Width int
// Height is the height of the bar
Height int
}
// Draw renders the axis and returns a canvas allocated for the appopriate
// size. See Measure if you want to control where and how it is drawn.
func Draw(ctx context.Context, cols []draw.Column, opts Options) *draw.RGB {
return Measure(ctx, opts).Draw(ctx, cols, nil)
}
// Measure measures the axis sizes, and returns some state that can be used
// to draw on to some canvas.
func Measure(ctx context.Context, opts Options) Measured {
bounds, _ := font.BoundString(opts.Face, "obs/sec: 0.00e-00")
label_height := (bounds.Max.Y - bounds.Min.Y).Ceil()
return Measured{
Width: opts.Width,
Height: opts.Height + padding + label_height,
opts: opts,
bounds: bounds,
}
}
// Draw performs the drawing of the data on to the canvas. The canvas is
// expected to be large enough to handle the drawing. If the canvas is nil,
// one is allocated. In either case, the canvas is returned.
func (m Measured) Draw(ctx context.Context, cols []draw.Column,
canvas *draw.RGB) *draw.RGB {
w, h := 0, 0
if canvas != nil {
w, h = canvas.Size()
}
if w < m.Width || h < m.Height {
canvas = draw.NewRGB(m.Width, m.Height)
}
max := float64(0)
x := 0
for _, col := range cols {
if col.ObsSec > max {
max = col.ObsSec
x = col.X
}
}
label_text := fmt.Sprintf("obs/sec: %#.3g", float64(max))
label_height := (m.bounds.Max.Y - m.bounds.Min.Y).Ceil()
label_width := (m.bounds.Max.X - m.bounds.Min.X).Ceil()
for _, col := range cols {
sat := 255 - byte(float64(col.ObsSec)/float64(max)*255)
c := draw.Color{sat, sat, sat}
for y := 0; y < m.opts.Height; y++ {
for x := 0; x < col.W; x++ {
canvas.Set(x+col.X, y+padding+label_height, c)
}
}
}
end := x + label_width
if end > m.opts.Width {
x = m.opts.Width - label_width
}
(&font.Drawer{
Dst: canvas.AsImage(),
Src: image.Black,
Face: m.opts.Face,
Dot: fixed.Point26_6{
Y: -m.bounds.Min.Y,
X: fixed.I(x),
},
}).DrawString(label_text)
return canvas
} | draw/observations/obs.go | 0.885204 | 0.484624 | obs.go | starcoder |
package types
import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
)
// NewMinter returns a new Minter object with the given inflation and annual
// provisions values.
func NewMinter(inflation, annualProvisions sdk.Dec, phase, startPhaseBlock uint64) Minter {
return Minter{
Inflation: inflation,
AnnualProvisions: annualProvisions,
Phase: phase,
StartPhaseBlock: startPhaseBlock,
}
}
// InitialMinter returns an initial Minter object with a given inflation value.
func InitialMinter(inflation sdk.Dec) Minter {
return NewMinter(
inflation,
sdk.NewDec(0),
0,
0,
)
}
// DefaultInitialMinter returns a default initial Minter object for a new chain
// which uses an inflation rate of 13%.
func DefaultInitialMinter() Minter {
return InitialMinter(
sdk.NewDecWithPrec(13, 2),
)
}
// validate minter
func ValidateMinter(minter Minter) error {
if minter.Inflation.IsNegative() {
return fmt.Errorf("mint parameter Inflation should be positive, is %s",
minter.Inflation.String())
}
return nil
}
// PhaseInflationRate returns the inflation rate by phase.
func (m Minter) PhaseInflationRate(phase uint64) sdk.Dec {
switch {
case phase > 12:
return sdk.ZeroDec()
case phase == 1:
return sdk.NewDecWithPrec(40, 2)
case phase == 2:
return sdk.NewDecWithPrec(20, 2)
case phase == 3:
return sdk.NewDecWithPrec(10, 2)
default:
// Phase4: 9%
// Phase5: 8%
// Phase6: 7%
// ...
// Phase12: 1%
return sdk.NewDecWithPrec(13-int64(phase), 2)
}
}
// NextPhase returns the new phase.
func (m Minter) NextPhase(params Params, currentBlock uint64) uint64 {
nonePhase := m.Phase == 0
if nonePhase {
return 1
}
blockNewPhase := m.StartPhaseBlock + params.BlocksPerYear
if blockNewPhase > currentBlock {
return m.Phase
}
return m.Phase + 1
}
// NextAnnualProvisions returns the annual provisions based on current total
// supply and inflation rate.
func (m Minter) NextAnnualProvisions(_ Params, totalSupply sdk.Int) sdk.Dec {
return m.Inflation.MulInt(totalSupply)
}
// BlockProvision returns the provisions for a block based on the annual
// provisions rate.
func (m Minter) BlockProvision(params Params) sdk.Coin {
provisionAmt := m.AnnualProvisions.QuoInt(sdk.NewInt(int64(params.BlocksPerYear)))
return sdk.NewCoin(params.MintDenom, provisionAmt.TruncateInt())
} | x/mint/types/minter.go | 0.791459 | 0.409398 | minter.go | starcoder |
package slice
import (
"fmt"
"math/rand"
)
// IndexOfInt64 gets the index of an int64 element in an int64 slice
func IndexOfInt64(x []int64, y int64) int {
for i, v := range x {
if v == y {
return i
}
}
return -1
}
// ContainsInt64 checks whether an int64 element is in an int64 slice
func ContainsInt64(x []int64, y int64) bool {
return IndexOfInt64(x, y) != -1
}
// EqualsInt64s checks whether two int64 slice has the same elements
func EqualsInt64s(x []int64, y []int64) bool {
if len(x) != len(y) {
return false
}
for i := 0; i < len(x); i++ {
if x[i] != y[i] {
return false
}
}
return true
}
// CopyInt64s makes a new int64 slice that copies the content of the given int64 slice
func CopyInt64s(x []int64) []int64 {
return append([]int64{}, x...)
}
// CutInt64s cuts an int64 slice by removing the elements starts from i and ends at j-1
func CutInt64s(x []int64, i, j int) ([]int64, error) {
if i < 0 || j > len(x) {
return x, fmt.Errorf("out of bound")
}
if i >= j {
return x, fmt.Errorf("%d must be smaller than %d", i, j)
}
return append(x[:i], x[j:]...), nil
}
// RemoveInt64 removes an int64 from a given int64 slice by value
func RemoveInt64(x []int64, y int64) []int64 {
index := IndexOfInt64(x, y)
if index != -1 {
return append(x[:index], x[(index+1):]...)
}
return x
}
// RemoveInt64At removes an int64 from a given int64 slice by index
func RemoveInt64At(x []int64, index int) ([]int64, error) {
if index < 0 || index > len(x) {
return x, fmt.Errorf("out of bound")
}
return append(x[:index], x[(index+1):]...), nil
}
// InsertInt64At inserts an int64 value into a given int64 slice at given index
func InsertInt64At(x []int64, y int64, index int) ([]int64, error) {
if index < 0 || index > len(x) {
return x, fmt.Errorf("out of bound")
}
x = append(x, 0)
copy(x[index+1:], x[index:])
x[index] = y
return x, nil
}
// InsertInt64sAt inserts a int64 slice into a given int64 slice at given index
func InsertInt64sAt(x []int64, y []int64, index int) ([]int64, error) {
if index < 0 || index > len(x) {
return x, fmt.Errorf("out of bound")
}
return append(x[:index], append(y, x[index:]...)...), nil
}
// PopFirstInt64 pops the first value of an int64 slice
func PopFirstInt64(x []int64) (int64, []int64, error) {
if len(x) == 0 {
return 0, nil, fmt.Errorf("no value to pop")
}
return x[0], x[1:], nil
}
// PopLastInt64 pops the last value of an int64 slice
func PopLastInt64(x []int64) (int64, []int64, error) {
if len(x) == 0 {
return 0, nil, fmt.Errorf("no value to pop")
}
return x[len(x)-1], x[:len(x)-1], nil
}
// FilterInt64s filters an int64 slice by the given filter function
func FilterInt64s(x []int64, filter func(int64) bool) []int64 {
y := x[:0]
for _, v := range x {
if filter(v) {
y = append(y, v)
}
}
return y
}
// ReverseInt64s reverses an int64 slice
func ReverseInt64s(x []int64) []int64 {
for i := len(x)/2 - 1; i >= 0; i-- {
opp := len(x) - 1 - i
x[i], x[opp] = x[opp], x[i]
}
return x
}
// ShuffleInt64s shuffles an int64 slice
func ShuffleInt64s(x []int64) []int64 {
for i := len(x) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
x[i], x[j] = x[j], x[i]
}
return x
}
// MergeInt64s merges two int64 slice with specific excluded values
func MergeInt64s(x []int64, y []int64, excludes ...int64) []int64 {
traceMap := make(map[int64]bool)
result := make([]int64, 0)
for _, ex := range excludes {
traceMap[ex] = true
}
// We preserve the order by x and then y
for _, v := range x {
if !traceMap[v] {
traceMap[v] = true
result = append(result, v)
}
}
for _, v := range y {
if !traceMap[v] {
traceMap[v] = true
result = append(result, v)
}
}
return result
}
// IntersectInt64s returns the intersection of two int64 slices
func IntersectInt64s(x []int64, y []int64) []int64 {
traceMap := make(map[int64]bool)
result := make([]int64, 0)
for _, v := range x {
traceMap[v] = true
}
for _, v := range y {
if traceMap[v] {
result = append(result, v)
}
}
return result
} | slice/int64.go | 0.730194 | 0.522263 | int64.go | starcoder |
package constraint
import (
"github.com/leapar/engine/experimental/physics/equation"
"github.com/leapar/engine/math32"
)
// Lock constraint.
// Removes all degrees of freedom between the bodies.
type Lock struct {
PointToPoint
rotEq1 *equation.Rotational
rotEq2 *equation.Rotational
rotEq3 *equation.Rotational
xA *math32.Vector3
xB *math32.Vector3
yA *math32.Vector3
yB *math32.Vector3
zA *math32.Vector3
zB *math32.Vector3
}
// NewLock creates and returns a pointer to a new Lock constraint object.
func NewLock(bodyA, bodyB IBody, maxForce float32) *Lock {
lc := new(Lock)
// Set pivot point in between
posA := bodyA.Position()
posB := bodyB.Position()
halfWay := math32.NewVec3().AddVectors(&posA, &posB)
halfWay.MultiplyScalar(0.5)
pivotB := bodyB.PointToLocal(halfWay)
pivotA := bodyA.PointToLocal(halfWay)
// The point-to-point constraint will keep a point shared between the bodies
lc.initialize(bodyA, bodyB, &pivotA, &pivotB, maxForce)
// Store initial rotation of the bodies as unit vectors in the local body spaces
UnitX := math32.NewVector3(1,0,0)
localA := bodyA.VectorToLocal(UnitX)
localB := bodyB.VectorToLocal(UnitX)
lc.xA = &localA
lc.xB = &localB
lc.yA = &localA
lc.yB = &localB
lc.zA = &localA
lc.zB = &localB
// ...and the following rotational equations will keep all rotational DOF's in place
lc.rotEq1 = equation.NewRotational(bodyA, bodyB, maxForce)
lc.rotEq2 = equation.NewRotational(bodyA, bodyB, maxForce)
lc.rotEq3 = equation.NewRotational(bodyA, bodyB, maxForce)
lc.AddEquation(lc.rotEq1)
lc.AddEquation(lc.rotEq2)
lc.AddEquation(lc.rotEq3)
return lc
}
// Update updates the equations with data.
func (lc *Lock) Update() {
lc.PointToPoint.Update()
// These vector pairs must be orthogonal
xAw := lc.bodyA.VectorToWorld(lc.xA)
yBw := lc.bodyA.VectorToWorld(lc.yB)
yAw := lc.bodyA.VectorToWorld(lc.yA)
zBw := lc.bodyB.VectorToWorld(lc.zB)
zAw := lc.bodyA.VectorToWorld(lc.zA)
xBw := lc.bodyB.VectorToWorld(lc.xB)
lc.rotEq1.SetAxisA(&xAw)
lc.rotEq1.SetAxisB(&yBw)
lc.rotEq2.SetAxisA(&yAw)
lc.rotEq2.SetAxisB(&zBw)
lc.rotEq3.SetAxisA(&zAw)
lc.rotEq3.SetAxisB(&xBw)
} | experimental/physics/constraint/lock.go | 0.821689 | 0.578299 | lock.go | starcoder |
package racesimulator
// ITimePoint is the interface that wraps the underlying TimePoint related methods.
// This helps form consistency with derivative structures.
type ITimePoint interface {
// Name is the name of the timepoint which denotes where the timepoint resides. TimePointName is a simple
// wrapper over string type.
Name() TimePointName
setName(TimePointName)
// Chip returns the valid chip information specific to the current timepoint. It contains a unique identifier.
Chip() IChip
setChip(IChip)
// Location returns the location of the current timepoint. PointAtDistance being a wrapper over int type.
Location() PointAtDistance
setLocation(PointAtDistance)
}
// TimePointName is a helper type that masks a string type to help with better segregation of constant variables.
// This is currently used to declare TimePoint names.
type TimePointName string
const (
// CorridorTimePoint is a convenience name tag holder for the finish corridor timepoint.
CorridorTimePoint TimePointName = "Corridor Timepoint"
// FinishLineTimePoint is a convenience name tag holder for the finish line timepoint.
FinishLineTimePoint TimePointName = "Finish Line Timepoint"
)
type timePoint struct {
name TimePointName
chip IChip
location PointAtDistance
}
// Name is the name of the timepoint which denotes where the timepoint resides. TimePointName is a simple
// wrapper over string type.
func (tp *timePoint) Name() TimePointName { return tp.name }
func (tp *timePoint) setName(name TimePointName) { tp.name = name }
// Chip returns the valid chip information specific to the current timepoint. It contains a unique identifier.
func (tp *timePoint) Chip() IChip { return tp.chip }
func (tp *timePoint) setChip(chip IChip) { tp.chip = chip }
// Location returns the location of the current timepoint. PointAtDistance being a wrapper over int type.
func (tp *timePoint) Location() PointAtDistance { return tp.location }
func (tp *timePoint) setLocation(location PointAtDistance) { tp.location = location }
// NewTimePoint returns a new interface of ITimePoint.
// It takes in a name for the timepoint, and a location of the timepoint.
// A new chip is then embedded into the resultant interface of timepoint with a unique identifier.
func NewTimePoint(name TimePointName, location PointAtDistance) ITimePoint {
return &timePoint{chip: NewChip(), name: name, location: location}
} | source/backend/racesimulator/timepoint.go | 0.804021 | 0.668053 | timepoint.go | starcoder |
package iso20022
// Nature of the amount and currency on a document referred to in the remittance section, typically either the original amount due/payable or the amount actually remitted for the referenced document.
type RemittanceAmount2 struct {
// Amount specified is the exact amount due and payable to the creditor.
DuePayableAmount *ActiveOrHistoricCurrencyAndAmount `xml:"DuePyblAmt,omitempty"`
// Amount specified for the referred document is the amount of discount to be applied to the amount due and payable to the creditor.
DiscountAppliedAmount []*DiscountAmountAndType1 `xml:"DscntApldAmt,omitempty"`
// Amount specified for the referred document is the amount of a credit note.
CreditNoteAmount *ActiveOrHistoricCurrencyAndAmount `xml:"CdtNoteAmt,omitempty"`
// Quantity of cash resulting from the calculation of the tax.
TaxAmount []*TaxAmountAndType1 `xml:"TaxAmt,omitempty"`
// Specifies detailed information on the amount and reason of the document adjustment.
AdjustmentAmountAndReason []*DocumentAdjustment1 `xml:"AdjstmntAmtAndRsn,omitempty"`
// Amount of money remitted for the referred document.
RemittedAmount *ActiveOrHistoricCurrencyAndAmount `xml:"RmtdAmt,omitempty"`
}
func (r *RemittanceAmount2) SetDuePayableAmount(value, currency string) {
r.DuePayableAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (r *RemittanceAmount2) AddDiscountAppliedAmount() *DiscountAmountAndType1 {
newValue := new(DiscountAmountAndType1)
r.DiscountAppliedAmount = append(r.DiscountAppliedAmount, newValue)
return newValue
}
func (r *RemittanceAmount2) SetCreditNoteAmount(value, currency string) {
r.CreditNoteAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (r *RemittanceAmount2) AddTaxAmount() *TaxAmountAndType1 {
newValue := new(TaxAmountAndType1)
r.TaxAmount = append(r.TaxAmount, newValue)
return newValue
}
func (r *RemittanceAmount2) AddAdjustmentAmountAndReason() *DocumentAdjustment1 {
newValue := new(DocumentAdjustment1)
r.AdjustmentAmountAndReason = append(r.AdjustmentAmountAndReason, newValue)
return newValue
}
func (r *RemittanceAmount2) SetRemittedAmount(value, currency string) {
r.RemittedAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
} | RemittanceAmount2.go | 0.759939 | 0.565659 | RemittanceAmount2.go | starcoder |
package pefisa
const registerPefisa = `
## Authorization:Bearer {{.Authentication.AuthorizationToken}}
## Content-Type:application/json
{
"idBeneficiario": {{.Agreement.AgreementNumber}},
"carteira": {{.Agreement.Wallet}},
"nossoNumero": "{{padLeft (toString .Title.OurNumber) "0" 10}}",
"seuNumero": "{{truncate .Title.DocumentNumber 10}}",
"tipoTitulo": {{ .Title.BoletoTypeCode}},
"valorTitulo": "{{toFloatStr .Title.AmountInCents}}",
"dataDocumento": "{{enDate (today) "-"}}",
"dataVencimento": "{{.Title.ExpireDate}}",
"usoEmpresa": "A",
"emitente": {
"nome": "{{.Recipient.Name}}",
{{if (eq .Recipient.Document.Type "CNPJ")}}
"tipo": "J",
{{else}}
"tipo": "F",
{{end}}
"cnpjCpf": "{{extractNumbers .Recipient.Document.Number}}",
"endereco": "{{truncate .Recipient.Address.Street 40}}",
"cidade": "{{truncate .Recipient.Address.City 60}}",
"cep": "{{truncate .Recipient.Address.ZipCode 8}}",
"uf": "{{truncate .Recipient.Address.StateCode 2}}",
"bairro": "{{truncate .Recipient.Address.District 65}}"
},
"pagador": {
"nome": "{{truncate .Buyer.Name 40}}",
{{if (eq .Buyer.Document.Type "CNPJ")}}
"tipo": "J",
{{else}}
"tipo": "F",
{{end}}
"cnpjCpf": "{{extractNumbers .Buyer.Document.Number}}",
"endereco": "{{truncate .Buyer.Address.Street 40}}",
"cidade": "{{truncate .Buyer.Address.City 20}}",
"cep": "{{truncate (extractNumbers .Buyer.Address.ZipCode) 8}}",
"uf": "{{truncate .Buyer.Address.StateCode 2}}",
"bairro": "{{truncate .Buyer.Address.District 65}}"
},
"mensagens": [
"{{truncate .Title.Instructions 80}}"
]
}
`
const pefisaGetTokenRequest = `
## Authorization:Basic {{base64 (concat .Authentication.Username ":" .Authentication.Password)}}
## Content-Type: application/x-www-form-urlencoded
grant_type=client_credentials`
const tokenResponse = `{
"access_token": "{{access_token}}"
}`
const tokenErrorResponse = `{
"error_description": "{{errorMessage}}"
}`
func getRequestToken() string {
return pefisaGetTokenRequest
}
func getTokenResponse() string {
return tokenResponse
}
func getTokenErrorResponse() string {
return tokenErrorResponse
}
func getRequestPefisa() string {
return registerPefisa
} | pefisa/request.go | 0.623262 | 0.406921 | request.go | starcoder |
package expr
import (
"fmt"
"strings"
)
type lexeme string
const (
add lexeme = "+"
subtract lexeme = "-"
multiply lexeme = "*"
divide lexeme = "/"
openBracket lexeme = "("
closeBracket lexeme = ")"
eof lexeme = "."
)
/*
node represents a node of an expression parse tree. Each node is labelled with a lexeme.
Terminal nodes have an integer lexeme.
Non-terminal nodes represent an addition, multiplication, or bracketed expression.
The following examples illustrate the approach.
The basic expressions `1+2` is represented as the following parse tree:
+
/ \
1 2
The expression `1 * 2 + 3` is represented as the parse tree:
+
/ \
* 3
/ \
1 2
The expression `(1+2)*3` is represented as the parse tree:
*
/ \
+ 3
/ \
1 2
*/
type node struct {
lexeme lexeme
children []*node
}
func newNode(lexemes []lexeme) *node {
return newParser(lexemes).parse()
}
func (n *node) String() string {
return "---\n" + n.indentedString(0) + "\n---\n"
}
func (n *node) indentedString(indent int) string {
i := strings.Repeat(" ", indent)
s := n.lexeme
c := ""
for _, child := range n.children {
c = c + "\n" + child.indentedString(indent+1)
}
return fmt.Sprintf("%s%s%s", i, s, c)
}
// parser holds the state of the expression parser.
type parser struct {
input []lexeme // the lexemes being scanned
pos int // current position in the input
stack []*node // parser stack
tree *node // parse tree, equivalent of D0
}
// lex creates a new scanner for the input string.
func newParser(input []lexeme) *parser {
l := &parser{
input: input,
stack: make([]*node, 0),
}
return l
}
// push pushes a node on the stack
func (p *parser) push(node *node) {
p.stack = append(p.stack, node)
}
// pop pops a node from the stack. If the stack is empty, panics.
func (p *parser) pop() *node {
if len(p.stack) == 0 {
panic("parser stack underflow")
}
index := len(p.stack) - 1
element := p.stack[index]
p.stack = p.stack[:index]
return element
}
// empty returns true if and onl if the stack of nodes is empty.
func (p *parser) emptyStack() bool {
return len(p.stack) == 0
}
// nextLexeme returns the next item from the input.
func (p *parser) nextLexeme() lexeme {
if p.pos >= len(p.input) {
return eof
}
next := p.input[p.pos]
p.pos++
return next
}
// peek returns the next item from the input without consuming the item.
func (p *parser) peek() lexeme {
if p.pos >= len(p.input) {
return eof
}
return p.input[p.pos]
}
func (p *parser) getNum() *node {
n := p.peek()
if n == "0" || n == "1" || n == "2" || n == "3" || n == "4" || n == "5" ||
n == "6" || n == "7" || n == "8" || n == "9" {
p.nextLexeme()
return &node{
lexeme: n,
children: []*node{},
}
}
panic("digit expected")
}
func (p *parser) match(m lexeme) {
if p.peek() == m {
p.nextLexeme()
return
}
panic(fmt.Sprintf("%s expected but found %s", m, p.peek()))
}
func (p *parser) factor() {
if p.peek() == openBracket {
p.match(openBracket)
p.expression()
p.match(closeBracket)
} else {
p.tree = p.getNum()
}
}
func (p *parser) multiply() {
p.match(multiply)
p.factor()
p.tree = &node{
lexeme: multiply,
children: []*node{
p.pop(),
p.tree,
},
}
}
func (p *parser) divide() {
p.match(divide)
p.factor()
p.tree = &node{
lexeme: divide,
children: []*node{
p.pop(),
p.tree,
},
}
}
func (p *parser) term() {
p.factor()
for p.peek() == multiply || p.peek() == divide {
p.push(p.tree)
switch p.peek() {
case multiply:
p.multiply()
case divide:
p.divide()
default:
panic(fmt.Sprintf("* or / expected, found %s", p.peek()))
}
}
}
func (p *parser) add() {
p.match(add)
p.term()
p.tree = &node{
lexeme: add,
children: []*node{
p.pop(),
p.tree,
},
}
}
func (p *parser) subtract() {
p.match(subtract)
p.term()
p.tree = &node{
lexeme: subtract,
children: []*node{
p.pop(),
p.tree,
},
}
}
func (p *parser) expression() {
p.term()
for p.peek() == add || p.peek() == subtract {
p.push(p.tree)
switch p.peek() {
case add:
p.add()
case subtract:
p.subtract()
default:
panic(fmt.Sprintf("+ or - expected, found %s", p.peek()))
}
}
}
func (p *parser) parse() *node {
if p.peek() == eof {
return nil
}
p.expression()
return p.tree
} | pkg/expr/parser.go | 0.698329 | 0.490053 | parser.go | starcoder |
package main
import (
"fmt"
"strconv"
"strings"
"time"
)
// Period defines a time period
type Period struct {
Start time.Time
End time.Time
}
func (p *Period) String() string {
return fmt.Sprintf("%s to %s", p.Start.Format("2006-01-02"), p.End.Format("2006-01-02"))
}
// Match returns true if t falls within the period
func (p *Period) Match(t time.Time) bool {
return t.After(p.Start) && t.Before(p.End)
}
// daysIn returns the number of days in a month for a given year.
// From: https://groups.google.com/forum/#!topic/golang-nuts/W-ezk71hioo
func daysIn(year int, m time.Month) int {
// This is equivalent to time.daysIn(m, year).
return time.Date(year, m+1, 0, 0, 0, 0, 0, time.UTC).Day()
}
// NewPeriodFromMonth coverts a string of the form month-year into a period with the start/end of the month
func NewPeriodFromMonth(in string) (*Period, error) {
o := strings.SplitN(in, "-", 2)
year, err := strconv.Atoi(o[0])
if err != nil {
return nil, err
}
m, err := strconv.Atoi(o[1])
if err != nil {
return nil, err
}
month := time.Month(m)
p := &Period{}
p.Start = time.Date(year, month, 1, 0, 0, 0, 0, time.UTC)
p.End = time.Date(year, month, daysIn(year, month), 23, 59, 59, 0, time.UTC)
return p, nil
}
// Return the monday of the ISO week in the given year
// From: https://play.golang.org/p/UVFNFcpaoI
func firstDayOfISOWeek(year int, week int) time.Time {
date := time.Date(year, 0, 0, 0, 0, 0, 0, time.UTC)
isoYear, isoWeek := date.ISOWeek()
for date.Weekday() != time.Monday { // iterate back to Monday
date = date.AddDate(0, 0, -1)
isoYear, isoWeek = date.ISOWeek()
}
for isoYear < year { // iterate forward to the first day of the first week
date = date.AddDate(0, 0, 1)
isoYear, isoWeek = date.ISOWeek()
}
for isoWeek < week { // iterate forward to the first day of the given week
date = date.AddDate(0, 0, 1)
isoYear, isoWeek = date.ISOWeek()
}
return date
}
// NewPeriodFromWeek coverts a string of the form week-year into a period with the start/end of the month
func NewPeriodFromWeek(in string) (*Period, error) {
o := strings.SplitN(in, "-", 2)
year, err := strconv.Atoi(o[0])
if err != nil {
return nil, err
}
week, err := strconv.Atoi(o[1])
if err != nil {
return nil, err
}
p := &Period{}
p.Start = firstDayOfISOWeek(year, week)
p.End = p.Start.AddDate(0, 0, 7)
return p, nil
} | period.go | 0.848533 | 0.505371 | period.go | starcoder |
package gendata
func BuildBitManipulation() string {
return `
func gbRLA(cpu *Core) {
c := (cpu.Registers.A >> 7) > 0
f := uint8(0)
if cpu.Registers.GetCarry() {
f = 1
}
cpu.Registers.A = (cpu.Registers.A << 1) | f
cpu.Registers.SetCarry(c)
cpu.Registers.SetZero(false)
cpu.Registers.SetHalfCarry(false)
cpu.Registers.SetSub(false)
cpu.Registers.LastClockM = 1
cpu.Registers.LastClockT = 4
}
func gbRLCA(cpu *Core) {
c := (cpu.Registers.A >> 7) & 0x1
cpu.Registers.A = (cpu.Registers.A << 1) | c
cpu.Registers.SetCarry(c > 0)
cpu.Registers.SetZero(false)
cpu.Registers.SetHalfCarry(false)
cpu.Registers.SetSub(false)
cpu.Registers.LastClockM = 1
cpu.Registers.LastClockT = 4
}
func gbRRA(cpu *Core) {
c := cpu.Registers.A & 0x1
f := byte(0)
if cpu.Registers.GetCarry() {
f = 1
}
cpu.Registers.A = (cpu.Registers.A >> 1) | (f << 7)
cpu.Registers.SetCarry(c > 0)
cpu.Registers.SetZero(false)
cpu.Registers.SetHalfCarry(false)
cpu.Registers.SetSub(false)
cpu.Registers.LastClockM = 1
cpu.Registers.LastClockT = 4
}
func gbRRCA(cpu *Core) {
c := cpu.Registers.A & 0x1
cpu.Registers.A = (cpu.Registers.A >> 1) | (c << 7)
cpu.Registers.SetCarry(c > 0)
cpu.Registers.SetZero(false)
cpu.Registers.SetHalfCarry(false)
cpu.Registers.SetSub(false)
cpu.Registers.LastClockM = 1
cpu.Registers.LastClockT = 4
}
func gbCPL(cpu *Core) {
cpu.Registers.A = ^cpu.Registers.A
cpu.Registers.SetHalfCarry(true)
cpu.Registers.SetSub(true)
cpu.Registers.LastClockM = 1
cpu.Registers.LastClockT = 4
}
func gbCCF(cpu *Core) {
cpu.Registers.SetHalfCarry(false)
cpu.Registers.SetCarry(!cpu.Registers.GetCarry())
cpu.Registers.SetSub(false)
cpu.Registers.LastClockM = 1
cpu.Registers.LastClockT = 4
}
func gbSCF(cpu *Core) {
cpu.Registers.SetHalfCarry(false)
cpu.Registers.SetCarry(true)
cpu.Registers.SetSub(false)
cpu.Registers.LastClockM = 1
cpu.Registers.LastClockT = 4
}
`
} | cpu/generator/gendata/bitManipulation.go | 0.633637 | 0.567158 | bitManipulation.go | starcoder |
package copit
import (
"database/sql"
"errors"
"reflect"
)
// Copy copy things
func Copy(toValue interface{}, fromValue interface{}) (err error) {
var (
from = indirect(reflect.ValueOf(fromValue))
to = indirect(reflect.ValueOf(toValue))
)
if !to.CanAddr() {
return errors.New("copy to value is unaddressable")
}
// Return is from value is invalid
if !from.IsValid() {
return
}
fromType := indirectType(from.Type())
toType := indirectType(to.Type())
// Just set it if possible to assign
// And need to do copy anyway if the type is struct
if fromType.Kind() != reflect.Struct && from.Type().AssignableTo(to.Type()) {
to.Set(from)
return
}
// slice -> slice Section
if to.Kind() == reflect.Slice {
if from.Kind() == reflect.Slice {
for i := 0; i < from.Len(); i++ {
if indirect(from.Index(i)).IsValid() {
Copy(toValue, indirect(from.Index(i)).Interface())
}
}
} else if fromType.Kind() == reflect.Struct {
dest := indirect(reflect.New(toType).Elem())
if err := Copy(dest.Addr().Interface(), indirect(from).Interface()); err != nil {
return err
}
if dest.Addr().Type().AssignableTo(to.Type().Elem()) {
to.Set(reflect.Append(to, dest.Addr()))
} else if dest.Type().AssignableTo(to.Type().Elem()) {
to.Set(reflect.Append(to, dest))
}
}
return
}
// --------------------- struct -> struct only
if fromType.Kind() != reflect.Struct || toType.Kind() != reflect.Struct {
return
}
if true {
toTypeFields := deepFields(toType)
for _, field := range toTypeFields {
name := field.Name
if tagName := field.Tag.Get("copit"); tagName != "" {
name = tagName
}
if fromField := from.FieldByName(name); fromField.IsValid() {
if toField := to.FieldByName(field.Name); toField.IsValid() {
if toField.CanSet() {
if !set(toField, fromField) {
if err := Copy(toField.Addr().Interface(), fromField.Interface()); err != nil {
return err
}
}
}
}
}
}
}
if true {
fromTypeFields := deepFields(fromType)
for _, field := range fromTypeFields {
name := field.Name
if fromField := from.FieldByName(name); fromField.IsValid() {
var toMethod reflect.Value
if to.CanAddr() {
toMethod = to.Addr().MethodByName(name)
} else {
toMethod = to.MethodByName(name)
}
if toMethod.IsValid() && toMethod.Type().NumIn() == 1 && fromField.Type().AssignableTo(toMethod.Type().In(0)) {
toMethod.Call([]reflect.Value{fromField})
}
}
}
}
return
}
func deepFields(reflectType reflect.Type) []reflect.StructField {
var fields []reflect.StructField
if reflectType = indirectType(reflectType); reflectType.Kind() == reflect.Struct {
for i := 0; i < reflectType.NumField(); i++ {
v := reflectType.Field(i)
if v.Anonymous {
fields = append(fields, deepFields(v.Type)...)
} else {
fields = append(fields, v)
}
}
}
return fields
}
func indirect(reflectValue reflect.Value) reflect.Value {
for reflectValue.Kind() == reflect.Ptr {
reflectValue = reflectValue.Elem()
}
return reflectValue
}
func indirectType(reflectType reflect.Type) reflect.Type {
for reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice {
reflectType = reflectType.Elem()
}
return reflectType
}
func set(to, from reflect.Value) bool {
if from.IsValid() {
if to.Kind() == reflect.Ptr {
//set `to` to nil if from is nil
if from.Kind() == reflect.Ptr && from.IsNil() {
to.Set(reflect.Zero(to.Type()))
return true
} else if to.IsNil() {
to.Set(reflect.New(to.Type().Elem()))
}
to = to.Elem()
}
if from.Type().ConvertibleTo(to.Type()) {
to.Set(from.Convert(to.Type()))
} else if scanner, ok := to.Addr().Interface().(sql.Scanner); ok {
err := scanner.Scan(from.Interface())
if err != nil {
return false
}
} else if from.Kind() == reflect.Ptr {
return set(to, from.Elem())
} else {
return false
}
}
return true
} | copit.go | 0.529507 | 0.424472 | copit.go | starcoder |
package matrix
import (
"fmt"
"math/rand"
"time"
)
// New creates a new empty matrix of a given size.
func New(rows, cols int) Matrix {
assertValidSize(rows, cols)
return createWithData(rows, cols, make([]float64, rows*cols))
}
// NewSquare creates an empty square matrix of a given size.
func NewSquare(size int) Matrix {
assertValidSize(size, size)
return createWithData(size, size, make([]float64, size*size))
}
// NewRandom creates a matrix of a given size and fills the vectors with random
// floating point number ranging from 0..1
func NewRandom(rows, cols int) Matrix {
assertValidSize(rows, cols)
data := make([]float64, rows*cols)
gen := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := range data {
data[i] = gen.Float64()
}
return createWithData(rows, cols, data)
}
// NewFrom creates a matrix instance from a slice of slices
func NewFrom(data [][]float64) Matrix {
rows, cols := len(data), len(data[0])
assertValidSize(rows, cols)
assertAllRowsSameSize(data, cols)
return createWithData(rows, cols, sliceJoin(data))
}
// NewFromVector creates a matrix from long vector. It just makes sure it can have a
// rectangular shape
func NewFromVec(rows, cols int, data []float64) Matrix {
assertValidSize(rows, cols)
if rows*cols != len(data) {
panic(fmt.Sprintf("tried to create %dx%d matrix with %v", rows, cols, data))
}
return createWithData(rows, cols, data)
}
// createWithData is called from all constructors and returns a struct
func createWithData(rows, cols int, data []float64) Matrix {
return Matrix{
NumRows: rows,
NumCols: cols,
data: data,
}
}
func assertValidSize(rows, cols int) {
if rows < 1 || cols < 1 {
panic(fmt.Sprintf("tried to create matrix with %d rows and %d columns", rows, cols))
}
}
func assertAllRowsSameSize(data [][]float64, cols int) {
for _, r := range data {
if len(r) != cols {
panic(fmt.Sprintf("tried to create matrix from slice of different sized slices: %v", data))
}
}
}
func sliceJoin(data [][]float64) []float64 {
result := make([]float64, 0, len(data)*len(data[0]))
for _, r := range data {
result = append(result, r...)
}
return result
} | constructors.go | 0.843541 | 0.767516 | constructors.go | starcoder |
package dynamodb
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/guregu/dynamo"
)
type (
// Query : Request to get one or more items in a table.
Query interface {
// Range : Specifies the range key (a.k.a. sort key) or keys to get.
Range(name string, op dynamo.Operator, values ...interface{}) Query
// StartFrom : Makes this query continue from a previous one.
StartFrom(key dynamo.PagingKey) Query
// Index : Specifies the name of the index that this query will operate on.
Index(name string) Query
// Project : Limits the result attributes to the given paths.
Project(paths ...string) Query
// ProjectExpr : Limits the result attributes to the given expression.
ProjectExpr(expr string, args ...interface{}) Query
// Filter : Takes an expression that all results will be evaluated against.
Filter(expr string, args ...interface{}) Query
// Consistent : Set the read consistency to strong or not.
Consistent(on bool) Query
// Limit : Specifies the maximum amount of results to return.
Limit(limit int64) Query
// SearchLimit : Specifies the maximum amount of results to examine.
SearchLimit(limit int64) Query
// Order : Specifies the desired result order.
Order(order dynamo.Order) Query
// ConsumedCapacity : Measures the throughput capacity consumed by this operation and add it to cc.
ConsumedCapacity(cc *dynamo.ConsumedCapacity) Query
// One : Executes this query and retrieves a single result.
One(out interface{}) error
// OneWithContext : Executes this query and retrieves a single result.
OneWithContext(ctx aws.Context, out interface{}) error
// Count : Executes this request, returning the number of results.
Count() (int64, error)
// CountWithContext : Executes this request, returning the number of results.
CountWithContext(ctx aws.Context) (int64, error)
// All : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
All(out interface{}) error
// AllWithContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
AllWithContext(ctx aws.Context, out interface{}) error
// AllWithLastEvaluatedKey : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
AllWithLastEvaluatedKey(out interface{}) (dynamo.PagingKey, error)
// AllWithLastEvaluatedKeyContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
AllWithLastEvaluatedKeyContext(ctx aws.Context, out interface{}) (dynamo.PagingKey, error)
// Iter : Returns a results iterator for this request.
Iter() dynamo.PagingIter
}
queryWrap struct {
query *dynamo.Query
}
)
// Range : Specifies the range key (a.k.a. sort key) or keys to get.
func (qw *queryWrap) Range(name string, op dynamo.Operator, values ...interface{}) Query {
return &queryWrap{
query: qw.query.Range(name, op, values...),
}
}
// StartFrom : Makes this query continue from a previous one.
func (qw *queryWrap) StartFrom(key dynamo.PagingKey) Query {
return &queryWrap{
query: qw.query.StartFrom(key),
}
}
// Index : Specifies the name of the index that this query will operate on.
func (qw *queryWrap) Index(name string) Query {
return &queryWrap{
query: qw.query.Index(name),
}
}
// Project : Limits the result attributes to the given paths.
func (qw *queryWrap) Project(paths ...string) Query {
return &queryWrap{
query: qw.query.Project(paths...),
}
}
// ProjectExpr : Limits the result attributes to the given expression.
func (qw *queryWrap) ProjectExpr(expr string, args ...interface{}) Query {
return &queryWrap{
query: qw.query.ProjectExpr(expr, args...),
}
}
// Filter : Takes an expression that all results will be evaluated against.
func (qw *queryWrap) Filter(expr string, args ...interface{}) Query {
return &queryWrap{
query: qw.query.Filter(expr, args...),
}
}
// Consistent : Set the read consistency to strong or not.
func (qw *queryWrap) Consistent(on bool) Query {
return &queryWrap{
query: qw.query.Consistent(on),
}
}
// Limit : Specifies the maximum amount of results to return.
func (qw *queryWrap) Limit(limit int64) Query {
return &queryWrap{
query: qw.query.Limit(limit),
}
}
// SearchLimit : Specifies the maximum amount of results to examine.
func (qw *queryWrap) SearchLimit(limit int64) Query {
return &queryWrap{
query: qw.query.SearchLimit(limit),
}
}
// Order : Specifies the desired result order.
func (qw *queryWrap) Order(order dynamo.Order) Query {
return &queryWrap{
query: qw.query.Order(order),
}
}
// ConsumedCapacity : Measures the throughput capacity consumed by this operation and add it to cc.
func (qw *queryWrap) ConsumedCapacity(cc *dynamo.ConsumedCapacity) Query {
return &queryWrap{
query: qw.query.ConsumedCapacity(cc),
}
}
// One : Executes this query and retrieves a single result.
func (qw *queryWrap) One(out interface{}) error {
return qw.query.One(out)
}
// OneWithContext : Executes this query and retrieves a single result.
func (qw *queryWrap) OneWithContext(ctx aws.Context, out interface{}) error {
return qw.query.OneWithContext(ctx, out)
}
// Count : Executes this request, returning the number of results.
func (qw *queryWrap) Count() (int64, error) {
return qw.query.Count()
}
// CountWithContext : Executes this request, returning the number of results.
func (qw *queryWrap) CountWithContext(ctx aws.Context) (int64, error) {
return qw.query.CountWithContext(ctx)
}
// All : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (qw *queryWrap) All(out interface{}) error {
return qw.query.All(out)
}
// AllWithContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (qw *queryWrap) AllWithContext(ctx aws.Context, out interface{}) error {
return qw.query.AllWithContext(ctx, out)
}
// AllWithLastEvaluatedKey : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (qw *queryWrap) AllWithLastEvaluatedKey(out interface{}) (dynamo.PagingKey, error) {
return qw.query.AllWithLastEvaluatedKey(out)
}
// AllWithLastEvaluatedKeyContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (qw *queryWrap) AllWithLastEvaluatedKeyContext(ctx aws.Context, out interface{}) (dynamo.PagingKey, error) {
return qw.query.AllWithLastEvaluatedKeyContext(ctx, out)
}
// Iter : Returns a results iterator for this request.
func (qw *queryWrap) Iter() dynamo.PagingIter {
return qw.query.Iter()
} | query.go | 0.776538 | 0.4133 | query.go | starcoder |
package main
/*
WARNING: Until issue #67 is done, this is a copy from HW2.
*/
import (
"fmt"
"math"
"math/rand"
"time"
"github.com/ChristopherRabotin/smd"
"github.com/gonum/floats"
"github.com/gonum/matrix/mat64"
"github.com/gonum/stat/distmv"
)
const (
r2d = 180 / math.Pi
d2r = 1 / r2d
)
// Station defines a ground station.
type Station struct {
name string
R, V []float64 // position and velocity in ECEF
latΦ, longθ float64 // these are stored in radians!
altitude float64
ρNoise, ρDotNoise *distmv.Normal // Station noise
}
// PerformMeasurement returns whether the SC is visible, and if so, the measurement.
func (s Station) PerformMeasurement(θgst float64, state smd.State) (bool, Measurement) {
// The station vectors are in ECEF, so let's convert the state to ECEF.
rECEF := smd.ECI2ECEF(state.Orbit.R(), θgst)
vECEF := smd.ECI2ECEF(state.Orbit.V(), θgst)
// Compute visibility for each station.
ρECEF, ρ, el, _ := s.RangeElAz(rECEF)
vDiffECEF := make([]float64, 3)
for i := 0; i < 3; i++ {
vDiffECEF[i] = (vECEF[i] - s.V[i]) / ρ
}
// Suppose SC is visible.
ρDot := mat64.Dot(mat64.NewVector(3, ρECEF), mat64.NewVector(3, vDiffECEF))
ρNoisy := ρ + s.ρNoise.Rand(nil)[0]
ρDotNoisy := ρDot + s.ρDotNoise.Rand(nil)[0]
// Add this to the list of measurements
// TODO: Change signature
return el >= 10, Measurement{el >= 10, ρNoisy, ρDotNoisy, ρ, ρDot, θgst, state, s}
}
// RangeElAz returns the range (in the SEZ frame), elevation and azimuth (in degrees) of a given R vector in ECEF.
func (s Station) RangeElAz(rECEF []float64) (ρECEF []float64, ρ, el, az float64) {
ρECEF = make([]float64, 3)
for i := 0; i < 3; i++ {
ρECEF[i] = rECEF[i] - s.R[i]
}
ρ = norm(ρECEF)
rSEZ := smd.MxV33(smd.R3(s.longθ), ρECEF)
rSEZ = smd.MxV33(smd.R2(math.Pi/2-s.latΦ), rSEZ)
el = math.Asin(rSEZ[2]/ρ) * r2d
az = (2*math.Pi + math.Atan2(rSEZ[1], -rSEZ[0])) * r2d
return
}
// NewStation returns a new station. Angles in degrees.
func NewStation(name string, altitude, latΦ, longθ, σρ, σρDot float64) Station {
R := smd.GEO2ECEF(altitude, latΦ*d2r, longθ*d2r)
V := cross([]float64{0, 0, smd.EarthRotationRate}, R)
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
ρNoise, ok := distmv.NewNormal([]float64{0}, mat64.NewSymDense(1, []float64{σρ}), seed)
if !ok {
panic("NOK in Gaussian")
}
ρDotNoise, ok := distmv.NewNormal([]float64{0}, mat64.NewSymDense(1, []float64{σρDot}), seed)
if !ok {
panic("NOK in Gaussian")
}
return Station{name, R, V, latΦ * d2r, longθ * d2r, altitude, ρNoise, ρDotNoise}
}
// Measurement stores a measurement of a station.
type Measurement struct {
Visible bool // Stores whether or not the attempted measurement was visible from the station.
ρ, ρDot float64 // Store the range and range rate
trueρ, trueρDot float64 // Store the true range and range rate
θgst float64
State smd.State
Station Station
}
// IsNil returns the state vector as a mat64.Vector
func (m Measurement) IsNil() bool {
return m.ρ == m.ρDot && m.ρDot == 0
}
// StateVector returns the state vector as a mat64.Vector
func (m Measurement) StateVector() *mat64.Vector {
return mat64.NewVector(2, []float64{m.ρ, m.ρDot})
}
// HTilde returns the H tilde matrix for this given measurement.
func (m Measurement) HTilde() *mat64.Dense {
stationR := smd.ECEF2ECI(m.Station.R, m.θgst)
stationV := smd.ECEF2ECI(m.Station.V, m.θgst)
xS := stationR[0]
yS := stationR[1]
zS := stationR[2]
xSDot := stationV[0]
ySDot := stationV[1]
zSDot := stationV[2]
R := m.State.Orbit.R()
V := m.State.Orbit.V()
x := R[0]
y := R[1]
z := R[2]
xDot := V[0]
yDot := V[1]
zDot := V[2]
H := mat64.NewDense(2, 6, nil)
// \partial \rho / \partial {x,y,z}
H.Set(0, 0, (x-xS)/m.ρ)
H.Set(0, 1, (y-yS)/m.ρ)
H.Set(0, 2, (z-zS)/m.ρ)
// \partial \dot\rho / \partial {x,y,z}
H.Set(1, 0, (xDot-xSDot)/m.ρ+(m.ρDot/math.Pow(m.ρ, 2))*(x-xS))
H.Set(1, 1, (yDot-ySDot)/m.ρ+(m.ρDot/math.Pow(m.ρ, 2))*(y-yS))
H.Set(1, 2, (zDot-zSDot)/m.ρ+(m.ρDot/math.Pow(m.ρ, 2))*(z-zS))
H.Set(1, 3, (x-xS)/m.ρ)
H.Set(1, 4, (y-yS)/m.ρ)
H.Set(1, 5, (z-zS)/m.ρ)
return H
}
// CSV returns the data as CSV (does *not* include the new line)
func (m Measurement) CSV() string {
return fmt.Sprintf("%f,%f,%f,%f,", m.trueρ, m.trueρDot, m.ρ, m.ρDot)
}
func (m Measurement) String() string {
return fmt.Sprintf("%s@%s", m.Station.name, m.State.DT)
}
// Unshamefully copied from smd/math.go
func cross(a, b []float64) []float64 {
return []float64{a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]} // Cross product R x V.
}
// norm returns the norm of a given vector which is supposed to be 3x1.
func norm(v []float64) float64 {
return math.Sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
}
// unit returns the unit vector of a given vector.
func unit(a []float64) (b []float64) {
n := norm(a)
if floats.EqualWithinAbs(n, 0, 1e-12) {
return []float64{0, 0, 0}
}
b = make([]float64, len(a))
for i, val := range a {
b[i] = val / n
}
return
} | examples/statOD/hwmain/station.go | 0.578686 | 0.527986 | station.go | starcoder |
package Example
import (
flatbuffers "github.com/google/flatbuffers/go"
)
type Vec3T struct {
X float32
Y float32
Z float32
Test1 float64
Test2 Color
Test3 *TestT
}
func (t *Vec3T) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
if t == nil {
return 0
}
return CreateVec3(builder, t.X, t.Y, t.Z, t.Test1, t.Test2, t.Test3.A, t.Test3.B)
}
func (rcv *Vec3) UnPackTo(t *Vec3T) {
t.X = rcv.X()
t.Y = rcv.Y()
t.Z = rcv.Z()
t.Test1 = rcv.Test1()
t.Test2 = rcv.Test2()
t.Test3 = rcv.Test3(nil).UnPack()
}
func (rcv *Vec3) UnPack() *Vec3T {
if rcv == nil {
return nil
}
t := &Vec3T{}
rcv.UnPackTo(t)
return t
}
type Vec3 struct {
_tab flatbuffers.Struct
}
// GetStructVectorAsVec3 shortcut to access struct in vector of unions
func GetStructVectorAsVec3(table *flatbuffers.Table) *Vec3 {
n := flatbuffers.GetUOffsetT(table.Bytes[table.Pos:])
x := &Vec3{}
x.Init(table.Bytes, n+table.Pos)
return x
}
// GetStructAsVec3 shortcut to access struct in single union field
func GetStructAsVec3(table *flatbuffers.Table) *Vec3 {
x := &Vec3{}
x.Init(table.Bytes, table.Pos)
return x
}
func (rcv *Vec3) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *Vec3) Table() flatbuffers.Table {
return rcv._tab.Table
}
func (rcv *Vec3) X() float32 {
return rcv._tab.GetFloat32(rcv._tab.Pos + flatbuffers.UOffsetT(0))
}
func (rcv *Vec3) MutateX(n float32) bool {
return rcv._tab.MutateFloat32(rcv._tab.Pos+flatbuffers.UOffsetT(0), n)
}
func (rcv *Vec3) Y() float32 {
return rcv._tab.GetFloat32(rcv._tab.Pos + flatbuffers.UOffsetT(4))
}
func (rcv *Vec3) MutateY(n float32) bool {
return rcv._tab.MutateFloat32(rcv._tab.Pos+flatbuffers.UOffsetT(4), n)
}
func (rcv *Vec3) Z() float32 {
return rcv._tab.GetFloat32(rcv._tab.Pos + flatbuffers.UOffsetT(8))
}
func (rcv *Vec3) MutateZ(n float32) bool {
return rcv._tab.MutateFloat32(rcv._tab.Pos+flatbuffers.UOffsetT(8), n)
}
func (rcv *Vec3) Test1() float64 {
return rcv._tab.GetFloat64(rcv._tab.Pos + flatbuffers.UOffsetT(16))
}
func (rcv *Vec3) MutateTest1(n float64) bool {
return rcv._tab.MutateFloat64(rcv._tab.Pos+flatbuffers.UOffsetT(16), n)
}
func (rcv *Vec3) Test2() Color {
return Color(rcv._tab.GetByte(rcv._tab.Pos + flatbuffers.UOffsetT(24)))
}
func (rcv *Vec3) MutateTest2(n Color) bool {
return rcv._tab.MutateByte(rcv._tab.Pos+flatbuffers.UOffsetT(24), byte(n))
}
func (rcv *Vec3) Test3(obj *Test) *Test {
if obj == nil {
obj = new(Test)
}
obj.Init(rcv._tab.Bytes, rcv._tab.Pos+26)
return obj
}
func CreateVec3(builder *flatbuffers.Builder, x float32, y float32, z float32, test1 float64, test2 Color, test3_a int16, test3_b int8) flatbuffers.UOffsetT {
builder.Prep(8, 32)
builder.Pad(2)
builder.Prep(2, 4)
builder.Pad(1)
builder.PrependInt8(test3_b)
builder.PrependInt16(test3_a)
builder.Pad(1)
builder.PrependByte(byte(test2))
builder.PrependFloat64(test1)
builder.Pad(4)
builder.PrependFloat32(z)
builder.PrependFloat32(y)
builder.PrependFloat32(x)
return builder.Offset()
} | tests/MyGame/Example/Vec3.go | 0.883129 | 0.442215 | Vec3.go | starcoder |
package dfl
import (
"strings"
"github.com/pkg/errors"
)
// TernaryOperator is a DFL Node that represents the ternary operator of a condition, true value, and false value.
type TernaryOperator struct {
Left Node
True Node
False Node
}
func (to TernaryOperator) Dfl(quotes []string, pretty bool, tabs int) string {
if pretty {
return "(\n" +
to.Left.Dfl(quotes, pretty, tabs+1) + " ? " +
to.True.Dfl(quotes, pretty, tabs+1) + " : " +
to.False.Dfl(quotes, pretty, tabs+2) + "\n" +
strings.Repeat(DefaultTab, tabs) + ")"
}
return "(" + to.Left.Dfl(quotes, pretty, tabs) + " ? " + to.True.Dfl(quotes, pretty, tabs) + " : " + to.False.Dfl(quotes, pretty, tabs) + ")"
}
func (to TernaryOperator) Sql(pretty bool, tabs int) string {
return "( CASE " + to.Left.Sql(pretty, tabs) + " WHEN true THEN " + to.True.Sql(pretty, tabs) + " ELSE " + to.False.Sql(pretty, tabs) + " END )"
}
func (to TernaryOperator) Map() map[string]interface{} {
return map[string]interface{}{
"op": "ternary",
"condition": to.Left.Map(),
"true": to.True.Map(),
"false": to.False.Map(),
}
}
func (to TernaryOperator) Compile() Node {
left := to.Left.Compile()
switch left.(type) {
case Literal:
switch left.(Literal).Value.(type) {
case bool:
if left.(Literal).Value.(bool) {
return to.True.Compile()
} else {
return to.False.Compile()
}
}
}
return &TernaryOperator{
Left: left,
True: to.True.Compile(),
False: to.False.Compile(),
}
}
func (to TernaryOperator) Evaluate(vars map[string]interface{}, ctx interface{}, funcs FunctionMap, quotes []string) (map[string]interface{}, interface{}, error) {
vars, cv, err := to.Left.Evaluate(vars, ctx, funcs, quotes)
if err != nil {
return vars, cv, errors.Wrap(err, "Error evaluating condition of ternary operator: "+to.Left.Dfl(quotes, false, 0))
}
switch cv.(type) {
case bool:
default:
return vars, cv, errors.Wrap(err, "ternary operator condition returned a non boolean: "+to.Left.Dfl(quotes, false, 0))
}
if cv.(bool) {
vars, v, err := to.True.Evaluate(vars, ctx, funcs, quotes)
if err != nil {
return vars, v, errors.Wrap(err, "Error evaluating true expression of ternary operator: "+to.True.Dfl(quotes, false, 0))
}
return vars, v, err
}
vars, v, err := to.False.Evaluate(vars, ctx, funcs, quotes)
if err != nil {
return vars, v, errors.Wrap(err, "Error evaluating false expression of ternary operator: "+to.False.Dfl(quotes, false, 0))
}
return vars, v, err
}
// Attributes returns a slice of all attributes used in the evaluation of this node, including a children nodes.
// Attributes de-duplicates values from the condition, true, and false nodes using a set.
func (to TernaryOperator) Attributes() []string {
set := make(map[string]struct{})
for _, x := range to.Left.Attributes() {
set[x] = struct{}{}
}
for _, x := range to.True.Attributes() {
set[x] = struct{}{}
}
for _, x := range to.False.Attributes() {
set[x] = struct{}{}
}
attrs := make([]string, 0, len(set))
for x := range set {
attrs = append(attrs, x)
}
return attrs
}
// Variables returns a slice of all variables used in the evaluation of this node, including a children nodes.
// Variables de-duplicates values from the condition, true, and false nodes using a set.
func (to TernaryOperator) Variables() []string {
set := make(map[string]struct{})
for _, x := range to.Left.Variables() {
set[x] = struct{}{}
}
for _, x := range to.True.Variables() {
set[x] = struct{}{}
}
for _, x := range to.False.Variables() {
set[x] = struct{}{}
}
attrs := make([]string, 0, len(set))
for x := range set {
attrs = append(attrs, x)
}
return attrs
} | pkg/dfl/TernaryOperator.go | 0.714628 | 0.485051 | TernaryOperator.go | starcoder |
package checksum
import (
"sort"
"strconv"
"strings"
)
// GetSmallest returns the smallest value of a row of numbers
func GetSmallest(content string) int {
smallestValue := 999999
values := strings.Split(content, " ")
for _, valueChar := range values {
value, err := strconv.Atoi(string(valueChar))
// fmt.Printf("Current value: %d (Smallest value: %d\n)", value, smallestValue)
if err != nil {
return -1
}
if value < smallestValue {
smallestValue = value
}
}
return smallestValue
}
// GetLargest returns the largest value of a row of numbers
func GetLargest(content string) int {
largestValue := 0
values := strings.Split(content, " ")
for _, valueChar := range values {
value, err := strconv.Atoi(string(valueChar))
// fmt.Printf("Current value: %d (Largest value: %d)\n", value, largestValue)
if err != nil {
return -1
}
if value > largestValue {
largestValue = value
}
}
return largestValue
}
// GetDivisible returns the only two numbers in each row where one evenly divides the other
func GetDivisible(content string) int {
values := strings.Split(content, " ")
numbers := make([]int, len(values))
for i, valueChar := range values {
value, err := strconv.Atoi(string(valueChar))
if err != nil {
return -1
}
numbers[i] = value
}
sort.Sort(sort.Reverse(sort.IntSlice(numbers)))
for i, number := range numbers {
currentNumberToTest := number
numbersToTestAgain := numbers[i+1:]
for _, numberToTestAgain := range numbersToTestAgain {
if currentNumberToTest%numberToTestAgain == 0 {
return currentNumberToTest / numberToTestAgain
}
}
}
return -1
}
// Generate returns the checksum of a spreadsheet
func Generate(spreadsheet string) int {
lines := strings.Split(spreadsheet, "\n")
checksum := 0
for _, line := range lines {
diff := GetLargest(line) - GetSmallest(line)
checksum += diff
}
return checksum
}
// GenerateNew returns the checksum of a spreadsheet
func GenerateNew(spreadsheet string) int {
lines := strings.Split(spreadsheet, "\n")
checksum := 0
for _, line := range lines {
divisible := GetDivisible(line)
checksum += divisible
}
return checksum
} | checksum/checksum.go | 0.700075 | 0.42668 | checksum.go | starcoder |
package discovery
import (
"math"
"math/rand"
"sync"
"time"
)
type BackoffFactory func() BackoffStrategy
// BackoffStrategy describes how backoff will be implemented. BackoffStratgies are stateful.
type BackoffStrategy interface {
// Delay calculates how long the next backoff duration should be, given the prior calls to Delay
Delay() time.Duration
// Reset clears the internal state of the BackoffStrategy
Reset()
}
// Jitter implementations taken roughly from https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
// Jitter must return a duration between min and max. Min must be lower than, or equal to, max.
type Jitter func(duration, min, max time.Duration, rng *rand.Rand) time.Duration
// FullJitter returns a random number uniformly chose from the range [min, boundedDur].
// boundedDur is the duration bounded between min and max.
func FullJitter(duration, min, max time.Duration, rng *rand.Rand) time.Duration {
if duration <= min {
return min
}
normalizedDur := boundedDuration(duration, min, max) - min
return boundedDuration(time.Duration(rng.Int63n(int64(normalizedDur)))+min, min, max)
}
// NoJitter returns the duration bounded between min and max
func NoJitter(duration, min, max time.Duration, rng *rand.Rand) time.Duration {
return boundedDuration(duration, min, max)
}
type randomizedBackoff struct {
min time.Duration
max time.Duration
rng *rand.Rand
}
func (b *randomizedBackoff) BoundedDelay(duration time.Duration) time.Duration {
return boundedDuration(duration, b.min, b.max)
}
func boundedDuration(d, min, max time.Duration) time.Duration {
if d < min {
return min
}
if d > max {
return max
}
return d
}
type attemptBackoff struct {
attempt int
jitter Jitter
randomizedBackoff
}
func (b *attemptBackoff) Reset() {
b.attempt = 0
}
// NewFixedBackoff creates a BackoffFactory with a constant backoff duration
func NewFixedBackoff(delay time.Duration) BackoffFactory {
return func() BackoffStrategy {
return &fixedBackoff{delay: delay}
}
}
type fixedBackoff struct {
delay time.Duration
}
func (b *fixedBackoff) Delay() time.Duration {
return b.delay
}
func (b *fixedBackoff) Reset() {}
// NewPolynomialBackoff creates a BackoffFactory with backoff of the form c0*x^0, c1*x^1, ...cn*x^n where x is the attempt number
// jitter is the function for adding randomness around the backoff
// timeUnits are the units of time the polynomial is evaluated in
// polyCoefs is the array of polynomial coefficients from [c0, c1, ... cn]
func NewPolynomialBackoff(min, max time.Duration, jitter Jitter,
timeUnits time.Duration, polyCoefs []float64, rngSrc rand.Source) BackoffFactory {
rng := rand.New(&lockedSource{src: rngSrc})
return func() BackoffStrategy {
return &polynomialBackoff{
attemptBackoff: attemptBackoff{
randomizedBackoff: randomizedBackoff{
min: min,
max: max,
rng: rng,
},
jitter: jitter,
},
timeUnits: timeUnits,
poly: polyCoefs,
}
}
}
type polynomialBackoff struct {
attemptBackoff
timeUnits time.Duration
poly []float64
}
func (b *polynomialBackoff) Delay() time.Duration {
var polySum float64
switch len(b.poly) {
case 0:
return 0
case 1:
polySum = b.poly[0]
default:
polySum = b.poly[0]
exp := 1
attempt := b.attempt
b.attempt++
for _, c := range b.poly[1:] {
exp *= attempt
polySum += float64(exp) * c
}
}
return b.jitter(time.Duration(float64(b.timeUnits)*polySum), b.min, b.max, b.rng)
}
// NewExponentialBackoff creates a BackoffFactory with backoff of the form base^x + offset where x is the attempt number
// jitter is the function for adding randomness around the backoff
// timeUnits are the units of time the base^x is evaluated in
func NewExponentialBackoff(min, max time.Duration, jitter Jitter,
timeUnits time.Duration, base float64, offset time.Duration, rngSrc rand.Source) BackoffFactory {
rng := rand.New(&lockedSource{src: rngSrc})
return func() BackoffStrategy {
return &exponentialBackoff{
attemptBackoff: attemptBackoff{
randomizedBackoff: randomizedBackoff{
min: min,
max: max,
rng: rng,
},
jitter: jitter,
},
timeUnits: timeUnits,
base: base,
offset: offset,
}
}
}
type exponentialBackoff struct {
attemptBackoff
timeUnits time.Duration
base float64
offset time.Duration
}
func (b *exponentialBackoff) Delay() time.Duration {
attempt := b.attempt
b.attempt++
return b.jitter(
time.Duration(math.Pow(b.base, float64(attempt))*float64(b.timeUnits))+b.offset, b.min, b.max, b.rng)
}
// NewExponentialDecorrelatedJitter creates a BackoffFactory with backoff of the roughly of the form base^x where x is the attempt number.
// Delays start at the minimum duration and after each attempt delay = rand(min, delay * base), bounded by the max
// See https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ for more information
func NewExponentialDecorrelatedJitter(min, max time.Duration, base float64, rngSrc rand.Source) BackoffFactory {
rng := rand.New(&lockedSource{src: rngSrc})
return func() BackoffStrategy {
return &exponentialDecorrelatedJitter{
randomizedBackoff: randomizedBackoff{
min: min,
max: max,
rng: rng,
},
base: base,
}
}
}
type exponentialDecorrelatedJitter struct {
randomizedBackoff
base float64
lastDelay time.Duration
}
func (b *exponentialDecorrelatedJitter) Delay() time.Duration {
if b.lastDelay < b.min {
b.lastDelay = b.min
return b.lastDelay
}
nextMax := int64(float64(b.lastDelay) * b.base)
b.lastDelay = boundedDuration(time.Duration(b.rng.Int63n(nextMax-int64(b.min)))+b.min, b.min, b.max)
return b.lastDelay
}
func (b *exponentialDecorrelatedJitter) Reset() { b.lastDelay = 0 }
type lockedSource struct {
lk sync.Mutex
src rand.Source
}
func (r *lockedSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
func (r *lockedSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
} | vendor/github.com/libp2p/go-libp2p-discovery/backoff.go | 0.805441 | 0.450541 | backoff.go | starcoder |
package x2j
import (
"strings"
"github.com/clbanning/mxj"
)
//----------------------------- find all paths to a key --------------------------------
// Want eventually to extract shortest path and call GetValuesAtKeyPath()
// This will get all the possible paths. These can be scanned for len(path) and sequence.
// Get all paths through the doc (in dot-notation) that terminate with the specified tag.
// Results can be used with ValuesAtTagPath() and ValuesFromTagPath().
func PathsForTag(doc string, key string) ([]string, error) {
m, err := mxj.NewMapXml([]byte(doc))
if err != nil {
return nil, err
}
ss := PathsForKey(m, key)
return ss, nil
}
// Extract the shortest path from all possible paths - from PathsForTag().
// Paths are strings using dot-notation.
func PathForTagShortest(doc string, key string) (string, error) {
m, err := mxj.NewMapXml([]byte(doc))
if err != nil {
return "", err
}
s := PathForKeyShortest(m, key)
return s, nil
}
// Get all paths through the doc (in dot-notation) that terminate with the specified tag.
// Results can be used with ValuesAtTagPath() and ValuesFromTagPath().
func BytePathsForTag(doc []byte, key string) ([]string, error) {
m, err := mxj.NewMapXml(doc)
if err != nil {
return nil, err
}
ss := PathsForKey(m, key)
return ss, nil
}
// Extract the shortest path from all possible paths - from PathsForTag().
// Paths are strings using dot-notation.
func BytePathForTagShortest(doc []byte, key string) (string, error) {
m, err := ByteDocToMap(doc)
if err != nil {
return "", err
}
s := PathForKeyShortest(m, key)
return s, nil
}
// Get all paths through the map (in dot-notation) that terminate with the specified key.
// Results can be used with ValuesAtKeyPath() and ValuesFromKeyPath().
func PathsForKey(m map[string]interface{}, key string) []string {
breadbasket := make(map[string]bool,0)
breadcrumb := ""
hasKeyPath(breadcrumb, m, key, &breadbasket)
if len(breadbasket) == 0 {
return nil
}
// unpack map keys to return
res := make([]string,len(breadbasket))
var i int
for k,_ := range breadbasket {
res[i] = k
i++
}
return res
}
// Extract the shortest path from all possible paths - from PathsForKey().
// Paths are strings using dot-notation.
func PathForKeyShortest(m map[string]interface{}, key string) string {
paths := PathsForKey(m,key)
lp := len(paths)
if lp == 0 {
return ""
}
if lp == 1 {
return paths[0]
}
shortest := paths[0]
shortestLen := len(strings.Split(shortest,"."))
for i := 1 ; i < len(paths) ; i++ {
vlen := len(strings.Split(paths[i],"."))
if vlen < shortestLen {
shortest = paths[i]
shortestLen = vlen
}
}
return shortest
}
// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth
// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'.
func hasKeyPath(crumb string, iv interface{}, key string, basket *map[string]bool) {
switch iv.(type) {
case map[string]interface{}:
vv := iv.(map[string]interface{})
if _, ok := vv[key]; ok {
if crumb == "" {
crumb = key
} else {
crumb += "." + key
}
// *basket = append(*basket, crumb)
(*basket)[crumb] = true
}
// walk on down the path, key could occur again at deeper node
for k, v := range vv {
// create a new breadcrumb, add the one we're at to the crumb-trail
var nbc string
if crumb == "" {
nbc = k
} else {
nbc = crumb + "." + k
}
hasKeyPath(nbc, v, key, basket)
}
case []interface{}:
// crumb-trail doesn't change, pass it on
for _, v := range iv.([]interface{}) {
hasKeyPath(crumb, v, key, basket)
}
}
} | plugins/data/parser/json/mxj/head/x2j-wrapper/x2j_findPath.go | 0.586286 | 0.495239 | x2j_findPath.go | starcoder |
package ls
import (
"sort"
"github.com/bserdar/digraph"
)
// Edge represents a graph edge
type Edge interface {
digraph.Edge
GetLabelStr() string
// Clone returns a new edge that is a copy of this one but
// unconnected to any nodes
Clone() Edge
GetProperties() map[string]*PropertyValue
GetCompiledDataMap() map[interface{}]interface{}
}
// edge is a labeled graph edge between two nodes
type edge struct {
digraph.EdgeHeader
properties map[string]*PropertyValue
compiled map[interface{}]interface{}
}
func (edge *edge) GetCompiledDataMap() map[interface{}]interface{} {
if edge.compiled == nil {
edge.compiled = make(map[interface{}]interface{})
}
return edge.compiled
}
func (edge *edge) GetProperties() map[string]*PropertyValue {
if edge.properties == nil {
edge.properties = make(map[string]*PropertyValue)
}
return edge.properties
}
// NewEdge returns a new initialized edge
func NewEdge(label string) Edge {
ret := &edge{
EdgeHeader: digraph.NewEdgeHeader(label),
}
return ret
}
// GetLabelStr returns the edge label
func (edge *edge) GetLabelStr() string {
if edge == nil {
return ""
}
l := edge.GetLabel()
if l == nil {
return ""
}
return l.(string)
}
// IsAttributeTreeEdge returns true if the edge is an edge between two
// attribute nodes
func IsAttributeTreeEdge(edge Edge) bool {
if edge == nil {
return false
}
l := edge.GetLabelStr()
return l == LayerTerms.Attributes ||
l == LayerTerms.AttributeList ||
l == LayerTerms.ArrayItems ||
l == LayerTerms.AllOf ||
l == LayerTerms.OneOf
}
// Clone returns a copy of the schema edge
func (e *edge) Clone() Edge {
return CloneWithLabel(e, e.GetLabelStr())
}
// CloneWithLabel returns a copy of the schema edge with a new label
func CloneWithLabel(e Edge, label string) Edge {
ret := NewEdge(label).(*edge)
p := ret.GetProperties()
for k, v := range e.GetProperties() {
p[k] = v.Clone()
}
return ret
}
// SortEdges sorts edges by their target node index
func SortEdges(edges []Edge) {
sort.Slice(edges, func(i, j int) bool { return edges[i].GetTo().(Node).GetIndex() < edges[j].GetTo().(Node).GetIndex() })
}
// SortEdgesItr sorts the edges by index
func SortEdgesItr(edges digraph.Edges) digraph.Edges {
e := make([]Edge, 0)
for edges.HasNext() {
e = append(e, edges.Next().(Edge))
}
SortEdges(e)
arr := make([]digraph.Edge, 0, len(e))
for _, x := range e {
arr = append(arr, x)
}
return digraph.NewEdges(arr...)
}
// An EdgeSet is a set of edges
type EdgeSet map[Edge]struct{}
// NewEdgeSet creates a new edge set containing the given edges
func NewEdgeSet(edge ...Edge) EdgeSet {
ret := make(EdgeSet, len(edge))
for _, k := range edge {
ret[k] = struct{}{}
}
return ret
}
// Slice returns edges in the set as a slice
func (set EdgeSet) Slice() []Edge {
ret := make([]Edge, 0, len(set))
for k := range set {
ret = append(ret, k)
}
return ret
} | pkg/ls/edge.go | 0.76145 | 0.465448 | edge.go | starcoder |
package grid
import (
"fmt"
"github.com/johnfercher/taleslab/pkg/taleslab/taleslabdomain/taleslabconsts"
"github.com/johnfercher/taleslab/pkg/taleslab/taleslabdomain/taleslabentities"
"math/rand"
"time"
)
func GenerateElementGrid(x, y int, defaultElement taleslabentities.Element) [][]taleslabentities.Element {
unitGrid := [][]taleslabentities.Element{}
for i := 0; i < x; i++ {
array := []taleslabentities.Element{}
for j := 0; j < y; j++ {
array = append(array, defaultElement)
}
unitGrid = append(unitGrid, array)
}
return unitGrid
}
func RandomlyFillEmptyGridSlots(worldGrid [][]taleslabentities.Element, propsGrid [][]taleslabentities.Element,
density int, elementType taleslabconsts.ElementType, mustAdd func(element taleslabentities.Element) bool) [][]taleslabentities.Element {
width := len(worldGrid)
length := len(worldGrid[0])
for i := 0; i < width; i++ {
for j := 0; j < length; j++ {
// Custom validation
if !mustAdd(worldGrid[i][j]) {
continue
}
// Avoid to add in limits
if i == 0 || i == width-1 || j == 0 || j == length-1 {
continue
}
// Avoid to add to close
if i > 1 && (propsGrid[i-1][j].ElementType != taleslabconsts.NoneType || propsGrid[i-2][j].ElementType != taleslabconsts.NoneType) {
continue
}
// Avoid to add to close
if j > 1 && (propsGrid[i][j-1].ElementType != taleslabconsts.NoneType || propsGrid[i][j-2].ElementType != taleslabconsts.NoneType) {
continue
}
if rand.Int()%density == 0 {
propsGrid[i][j] = taleslabentities.Element{ElementType: elementType}
}
}
}
return propsGrid
}
func BuildTerrain(world [][]taleslabentities.Element, asset [][]taleslabentities.Element) [][]taleslabentities.Element {
xMax := len(world)
yMax := len(world[0])
assetXMax := len(asset)
assetYMax := len(asset[0])
newWorld := Copy(world)
rand.Seed(time.Now().UnixNano())
randomXPosition := rand.Intn(xMax - assetXMax)
randomYPosition := rand.Intn(yMax - assetYMax)
for i := 0; i < assetXMax; i++ {
for j := 0; j < assetYMax; j++ {
assetValue := asset[i][j]
worldValue := world[i+randomXPosition][j+randomYPosition]
if assetValue.Height > worldValue.Height {
newWorld[i+randomXPosition][j+randomYPosition] = assetValue
} else {
newWorld[i+randomXPosition][j+randomYPosition] = worldValue
}
}
}
return newWorld
}
func Copy(gridOriginal [][]taleslabentities.Element) [][]taleslabentities.Element {
x := len(gridOriginal)
y := len(gridOriginal[0])
gridNew := [][]taleslabentities.Element{}
for i := 0; i < x; i++ {
array := []taleslabentities.Element{}
for j := 0; j < y; j++ {
array = append(array, gridOriginal[i][j])
}
gridNew = append(gridNew, array)
}
return gridNew
}
func Print(grid [][]taleslabentities.Element) {
for i := 0; i < len(grid); i++ {
for j := 0; j < len(grid[i]); j++ {
fmt.Printf("(%s, %d)\t", grid[i][j].ElementType, grid[i][j].Height)
}
fmt.Println()
}
fmt.Println()
}
func PrintTypes(grid [][]taleslabentities.Element) {
for i := 0; i < len(grid); i++ {
for j := 0; j < len(grid[i]); j++ {
fmt.Printf("%s\t", grid[i][j].ElementType)
}
fmt.Println()
}
fmt.Println()
}
func PrintHeights(grid [][]taleslabentities.Element) {
for i := 0; i < len(grid); i++ {
for j := 0; j < len(grid[i]); j++ {
fmt.Printf("%d\t", grid[i][j].Height)
}
fmt.Println()
}
fmt.Println()
} | pkg/grid/helpers.go | 0.520496 | 0.418816 | helpers.go | starcoder |
package scalar
import (
"encoding/base64"
"reflect"
"strconv"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/language/ast"
)
func coerceFloat32(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return float32(1)
}
return float32(0)
case *bool:
if value == nil {
return nil
}
return coerceFloat32(*value)
case int:
return value
case *int:
if value == nil {
return nil
}
return coerceFloat32(*value)
case int8:
return float32(value)
case *int8:
if value == nil {
return nil
}
return float32(*value)
case int16:
return float32(value)
case *int16:
if value == nil {
return nil
}
return float32(*value)
case int32:
return value
case *int32:
if value == nil {
return nil
}
return *value
case int64:
return float32(value)
case *int64:
if value == nil {
return nil
}
return coerceFloat32(*value)
case uint:
return float32(value)
case *uint:
if value == nil {
return nil
}
return coerceFloat32(*value)
case uint8:
return float32(value)
case *uint8:
if value == nil {
return nil
}
return float32(*value)
case uint16:
return float32(value)
case *uint16:
if value == nil {
return nil
}
return float32(*value)
case uint32:
return float32(value)
case *uint32:
if value == nil {
return nil
}
return coerceFloat32(*value)
case uint64:
return float32(value)
case *uint64:
if value == nil {
return nil
}
return coerceFloat32(*value)
case float32:
return value
case *float32:
if value == nil {
return nil
}
return coerceFloat32(*value)
case float64:
return float32(value)
case *float64:
if value == nil {
return nil
}
return coerceFloat32(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return coerceFloat32(val)
case *string:
if value == nil {
return nil
}
return coerceFloat32(*value)
}
// If the value cannot be transformed into an int, return nil instead of '0'
// to denote 'no integer found'
return nil
}
func coerceFloat64(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return float64(1)
}
return float64(0)
case *bool:
if value == nil {
return nil
}
return coerceFloat64(*value)
case int:
return value
case *int:
if value == nil {
return nil
}
return coerceFloat64(*value)
case int8:
return float64(value)
case *int8:
if value == nil {
return nil
}
return float64(*value)
case int16:
return float64(value)
case *int16:
if value == nil {
return nil
}
return float64(*value)
case int32:
return value
case *int32:
if value == nil {
return nil
}
return *value
case int64:
return float64(value)
case *int64:
if value == nil {
return nil
}
return coerceFloat64(*value)
case uint:
return float64(value)
case *uint:
if value == nil {
return nil
}
return coerceFloat64(*value)
case uint8:
return float64(value)
case *uint8:
if value == nil {
return nil
}
return float64(*value)
case uint16:
return float64(value)
case *uint16:
if value == nil {
return nil
}
return float64(*value)
case uint32:
return float64(value)
case *uint32:
if value == nil {
return nil
}
return coerceFloat64(*value)
case uint64:
return float64(value)
case *uint64:
if value == nil {
return nil
}
return coerceFloat64(*value)
case float32:
return float64(value)
case *float32:
if value == nil {
return nil
}
return coerceFloat64(*value)
case float64:
return value
case *float64:
if value == nil {
return nil
}
return coerceFloat64(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return coerceFloat64(val)
case *string:
if value == nil {
return nil
}
return coerceFloat64(*value)
}
// If the value cannot be transformed into an int, return nil instead of '0'
// to denote 'no integer found'
return nil
}
func coerceInt32(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return int32(1)
}
return int32(0)
case *bool:
if value == nil {
return nil
}
return coerceInt32(*value)
case int:
return value
case *int:
if value == nil {
return nil
}
return coerceInt32(*value)
case int8:
return int32(value)
case *int8:
if value == nil {
return nil
}
return int32(*value)
case int16:
return int32(value)
case *int16:
if value == nil {
return nil
}
return int32(*value)
case int32:
return value
case *int32:
if value == nil {
return nil
}
return *value
case int64:
return int32(value)
case *int64:
if value == nil {
return nil
}
return coerceInt32(*value)
case uint:
return int32(value)
case *uint:
if value == nil {
return nil
}
return coerceInt32(*value)
case uint8:
return int32(value)
case *uint8:
if value == nil {
return nil
}
return int32(*value)
case uint16:
return int32(value)
case *uint16:
if value == nil {
return nil
}
return int32(*value)
case uint32:
return int32(value)
case *uint32:
if value == nil {
return nil
}
return coerceInt32(*value)
case uint64:
return int32(value)
case *uint64:
if value == nil {
return nil
}
return coerceInt32(*value)
case float32:
return int32(value)
case *float32:
if value == nil {
return nil
}
return coerceInt32(*value)
case float64:
return int32(value)
case *float64:
if value == nil {
return nil
}
return coerceInt32(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return coerceInt32(val)
case *string:
if value == nil {
return nil
}
return coerceInt32(*value)
}
rv := reflect.ValueOf(value)
switch rv.Kind() {
case
reflect.Int,
reflect.Int8,
reflect.Int16,
reflect.Int32,
reflect.Int64:
return int32(rv.Int())
}
// If the value cannot be transformed into an int, return nil instead of '0'
// to denote 'no integer found'
return nil
}
func coerceUint32(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return uint32(1)
}
return uint32(0)
case *bool:
if value == nil {
return nil
}
return coerceUint32(*value)
case int:
return value
case *int:
if value == nil {
return nil
}
return coerceUint32(*value)
case int8:
return uint32(value)
case *int8:
if value == nil {
return nil
}
return uint32(*value)
case int16:
return uint32(value)
case *int16:
if value == nil {
return nil
}
return uint32(*value)
case int32:
return uint32(value)
case *int32:
if value == nil {
return nil
}
return *value
case int64:
return uint32(value)
case *int64:
if value == nil {
return nil
}
return coerceUint32(*value)
case uint:
return uint32(value)
case *uint:
if value == nil {
return nil
}
return coerceUint32(*value)
case uint8:
return uint32(value)
case *uint8:
if value == nil {
return nil
}
return uint32(*value)
case uint16:
return uint32(value)
case *uint16:
if value == nil {
return nil
}
return uint32(*value)
case uint32:
return value
case *uint32:
if value == nil {
return nil
}
return coerceUint32(*value)
case uint64:
return uint32(value)
case *uint64:
if value == nil {
return nil
}
return coerceUint32(*value)
case float32:
return uint32(value)
case *float32:
if value == nil {
return nil
}
return coerceUint32(*value)
case float64:
return uint32(value)
case *float64:
if value == nil {
return nil
}
return coerceUint32(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return coerceUint32(val)
case *string:
if value == nil {
return nil
}
return coerceUint32(*value)
}
// If the value cannot be transformed into an int, return nil instead of '0'
// to denote 'no integer found'
return nil
}
func coerceInt64(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return int64(1)
}
return int64(0)
case *bool:
if value == nil {
return nil
}
return coerceInt64(*value)
case int:
return value
case *int:
if value == nil {
return nil
}
return coerceInt64(*value)
case int8:
return int64(value)
case *int8:
if value == nil {
return nil
}
return int64(*value)
case int16:
return int64(value)
case *int16:
if value == nil {
return nil
}
return int64(*value)
case int32:
return int64(value)
case *int32:
if value == nil {
return nil
}
return int64(*value)
case int64:
return value
case *int64:
if value == nil {
return nil
}
return coerceInt64(*value)
case uint:
return int64(value)
case *uint:
if value == nil {
return nil
}
return coerceInt64(*value)
case uint8:
return int64(value)
case *uint8:
if value == nil {
return nil
}
return int64(*value)
case uint16:
return int64(value)
case *uint16:
if value == nil {
return nil
}
return int64(*value)
case uint32:
return int64(value)
case *uint32:
if value == nil {
return nil
}
return coerceInt64(*value)
case uint64:
return int64(value)
case *uint64:
if value == nil {
return nil
}
return coerceInt64(*value)
case float32:
return int64(value)
case *float32:
if value == nil {
return nil
}
return coerceInt64(*value)
case float64:
return int64(value)
case *float64:
if value == nil {
return nil
}
return coerceInt64(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return coerceInt64(val)
case *string:
if value == nil {
return nil
}
return coerceInt64(*value)
}
// If the value cannot be transformed into an int, return nil instead of '0'
// to denote 'no integer found'
return nil
}
func coerceUint64(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return uint64(1)
}
return uint64(0)
case *bool:
if value == nil {
return nil
}
return coerceUint64(*value)
case int:
return value
case *int:
if value == nil {
return nil
}
return coerceUint64(*value)
case int8:
return uint64(value)
case *int8:
if value == nil {
return nil
}
return uint64(*value)
case int16:
return uint64(value)
case *int16:
if value == nil {
return nil
}
return uint64(*value)
case int32:
return uint64(value)
case *int32:
if value == nil {
return nil
}
return uint64(*value)
case int64:
return uint64(value)
case *int64:
if value == nil {
return nil
}
return coerceUint64(*value)
case uint:
return uint64(value)
case *uint:
if value == nil {
return nil
}
return coerceUint64(*value)
case uint8:
return uint64(value)
case *uint8:
if value == nil {
return nil
}
return uint64(*value)
case uint16:
return uint64(value)
case *uint16:
if value == nil {
return nil
}
return uint64(*value)
case uint32:
return uint64(value)
case *uint32:
if value == nil {
return nil
}
return coerceUint64(*value)
case uint64:
return value
case *uint64:
if value == nil {
return nil
}
return *value
case float32:
return uint64(value)
case *float32:
if value == nil {
return nil
}
return coerceUint64(*value)
case float64:
return uint64(value)
case *float64:
if value == nil {
return nil
}
return coerceUint64(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return coerceUint64(val)
case *string:
if value == nil {
return nil
}
return coerceUint64(*value)
}
// If the value cannot be transformed into an int, return nil instead of '0'
// to denote 'no integer found'
return nil
}
var String = graphql.String
var Bool = graphql.Boolean
var Float32 = graphql.NewScalar(graphql.ScalarConfig{
Name: "Float",
Description: "float32",
Serialize: coerceFloat32,
ParseValue: coerceFloat32,
ParseLiteral: func(valueAST ast.Value) interface{} {
switch val := valueAST.(type) {
case *ast.FloatValue:
if v, err := strconv.ParseFloat(val.Value, 32); err == nil {
return float32(v)
}
}
return nil
},
})
var Float64 = graphql.NewScalar(graphql.ScalarConfig{
Name: "Double",
Description: "float64",
Serialize: coerceFloat64,
ParseValue: coerceFloat64,
ParseLiteral: func(valueAST ast.Value) interface{} {
switch val := valueAST.(type) {
case *ast.FloatValue:
if v, err := strconv.ParseFloat(val.Value, 64); err == nil {
return v
}
}
return nil
},
})
var Int32 = graphql.NewScalar(graphql.ScalarConfig{
Name: "Int32",
Description: "int32",
Serialize: coerceInt32,
ParseValue: coerceInt32,
ParseLiteral: func(valueAST ast.Value) interface{} {
switch val := valueAST.(type) {
case *ast.IntValue:
if v, err := strconv.ParseInt(val.Value, 10, 32); err == nil {
return int32(v)
}
}
return nil
},
})
var Uint32 = graphql.NewScalar(graphql.ScalarConfig{
Name: "Uint32",
Description: "uint32",
Serialize: coerceUint32,
ParseValue: coerceUint32,
ParseLiteral: func(valueAST ast.Value) interface{} {
switch val := valueAST.(type) {
case *ast.IntValue:
if v, err := strconv.ParseUint(val.Value, 10, 32); err == nil {
return uint32(v)
}
}
return nil
},
})
var Int64 = graphql.NewScalar(graphql.ScalarConfig{
Name: "Int64",
Description: "int64",
Serialize: coerceInt64,
ParseValue: coerceInt64,
ParseLiteral: func(valueAST ast.Value) interface{} {
switch val := valueAST.(type) {
case *ast.IntValue:
if v, err := strconv.ParseInt(val.Value, 10, 64); err == nil {
return v
}
}
return nil
},
})
var Uint64 = graphql.NewScalar(graphql.ScalarConfig{
Name: "Uint64",
Description: "uint64",
Serialize: coerceUint64,
ParseValue: coerceUint64,
ParseLiteral: func(valueAST ast.Value) interface{} {
switch val := valueAST.(type) {
case *ast.IntValue:
if v, err := strconv.ParseUint(val.Value, 10, 64); err == nil {
return v
}
}
return nil
},
})
var Bytes = graphql.NewScalar(graphql.ScalarConfig{
Name: "Bytes",
Description: "bytes",
Serialize: func(value interface{}) interface{} {
src, ok := value.([]byte)
if !ok {
return "nil"
}
return base64.StdEncoding.EncodeToString(src)
},
}) | runtime/scalar/types.go | 0.675551 | 0.410431 | types.go | starcoder |
package gglm
import (
"fmt"
"math"
)
var _ Swizzle4 = &Vec4{}
var _ fmt.Stringer = &Vec4{}
type Vec4 struct {
Data [4]float32
}
func (v *Vec4) X() float32 {
return v.Data[0]
}
func (v *Vec4) Y() float32 {
return v.Data[1]
}
func (v *Vec4) Z() float32 {
return v.Data[2]
}
func (v *Vec4) W() float32 {
return v.Data[3]
}
func (v *Vec4) R() float32 {
return v.Data[0]
}
func (v *Vec4) G() float32 {
return v.Data[1]
}
func (v *Vec4) B() float32 {
return v.Data[2]
}
func (v *Vec4) A() float32 {
return v.Data[3]
}
func (v *Vec4) SetX(f float32) {
v.Data[0] = f
}
func (v *Vec4) SetR(f float32) {
v.Data[0] = f
}
func (v *Vec4) SetY(f float32) {
v.Data[1] = f
}
func (v *Vec4) SetG(f float32) {
v.Data[1] = f
}
func (v *Vec4) SetZ(f float32) {
v.Data[2] = f
}
func (v *Vec4) SetB(f float32) {
v.Data[2] = f
}
func (v *Vec4) SetW(f float32) {
v.Data[3] = f
}
func (v *Vec4) SetA(f float32) {
v.Data[3] = f
}
func (v *Vec4) AddX(x float32) {
v.Data[0] += x
}
func (v *Vec4) AddY(y float32) {
v.Data[1] += y
}
func (v *Vec4) AddZ(z float32) {
v.Data[2] += z
}
func (v *Vec4) AddW(w float32) {
v.Data[3] += w
}
func (v *Vec4) AddR(r float32) {
v.Data[0] += r
}
func (v *Vec4) AddG(g float32) {
v.Data[1] += g
}
func (v *Vec4) AddB(b float32) {
v.Data[2] += b
}
func (v *Vec4) AddA(a float32) {
v.Data[3] += a
}
func (v *Vec4) SetXY(x, y float32) {
v.Data[0] = x
v.Data[1] = y
}
func (v *Vec4) AddXY(x, y float32) {
v.Data[0] += x
v.Data[1] += y
}
func (v *Vec4) SetRG(r, g float32) {
v.Data[0] = r
v.Data[1] = g
}
func (v *Vec4) AddRG(r, g float32) {
v.Data[0] += r
v.Data[1] += g
}
func (v *Vec4) SetXYZ(x, y, z float32) {
v.Data[0] = x
v.Data[1] = y
v.Data[2] = z
}
func (v *Vec4) AddXYZ(x, y, z float32) {
v.Data[0] += x
v.Data[1] += y
v.Data[2] += z
}
func (v *Vec4) SetRGB(r, g, b float32) {
v.Data[0] = r
v.Data[1] = g
v.Data[2] = b
}
func (v *Vec4) AddRGB(r, g, b float32) {
v.Data[0] += r
v.Data[1] += g
v.Data[2] += b
}
func (v *Vec4) SetXYZW(x, y, z, w float32) {
v.Data[0] = x
v.Data[1] = y
v.Data[2] = z
v.Data[3] = w
}
func (v *Vec4) AddXYZW(x, y, z, w float32) {
v.Data[0] += x
v.Data[1] += y
v.Data[2] += z
v.Data[3] += w
}
func (v *Vec4) SetRGBA(r, g, b, a float32) {
v.Data[0] = r
v.Data[1] = g
v.Data[2] = b
v.Data[3] = a
}
func (v *Vec4) AddRGBA(r, g, b, a float32) {
v.Data[0] += r
v.Data[1] += g
v.Data[2] += b
v.Data[3] += a
}
func (v *Vec4) String() string {
return fmt.Sprintf("(%f, %f, %f, %f)", v.X(), v.Y(), v.Z(), v.W())
}
//Scale v *= x (element wise multiplication)
func (v *Vec4) Scale(x float32) *Vec4 {
v.Data[0] *= x
v.Data[1] *= x
v.Data[2] *= x
v.Data[3] *= x
return v
}
func (v *Vec4) Add(v2 *Vec4) *Vec4 {
v.Data[0] += v2.X()
v.Data[1] += v2.Y()
v.Data[2] += v2.Z()
v.Data[3] += v2.W()
return v
}
//SubVec4 v -= v2
func (v *Vec4) Sub(v2 *Vec4) *Vec4 {
v.Data[0] -= v2.X()
v.Data[1] -= v2.Y()
v.Data[2] -= v2.Z()
v.Data[3] -= v2.W()
return v
}
//Mag returns the magnitude of the vector
func (v *Vec4) Mag() float32 {
return float32(math.Sqrt(float64(v.X()*v.X() + v.Y()*v.Y() + v.Z()*v.Z() + v.W()*v.W())))
}
//Mag returns the squared magnitude of the vector
func (v *Vec4) SqrMag() float32 {
return v.X()*v.X() + v.Y()*v.Y() + v.Z()*v.Z() + v.Z()*v.Z()
}
func (v *Vec4) Eq(v2 *Vec4) bool {
return v.Data == v2.Data
}
func (v *Vec4) Set(x, y, z, w float32) {
v.Data[0] = x
v.Data[1] = y
v.Data[2] = z
v.Data[3] = w
}
func (v *Vec4) Normalize() {
mag := float32(math.Sqrt(float64(v.Data[0]*v.Data[0] + v.Data[1]*v.Data[1] + v.Data[2]*v.Data[2] + v.Data[3]*v.Data[3])))
v.Data[0] /= mag
v.Data[1] /= mag
v.Data[2] /= mag
v.Data[3] /= mag
}
func (v *Vec4) Clone() *Vec4 {
return &Vec4{Data: v.Data}
}
//AddVec4 v3 = v1 + v2
func AddVec4(v1, v2 *Vec4) *Vec4 {
return &Vec4{
Data: [4]float32{
v1.X() + v2.X(),
v1.Y() + v2.Y(),
v1.Z() + v2.Z(),
v1.W() + v2.W(),
},
}
}
//SubVec4 v3 = v1 - v2
func SubVec4(v1, v2 *Vec4) *Vec4 {
return &Vec4{
Data: [4]float32{
v1.X() - v2.X(),
v1.Y() - v2.Y(),
v1.Z() - v2.Z(),
v1.W() - v2.W(),
},
}
}
func NewVec4(x, y, z, w float32) *Vec4 {
return &Vec4{
[4]float32{
x,
y,
z,
w,
},
}
} | gglm/vec4.go | 0.798423 | 0.439747 | vec4.go | starcoder |
package conway
// Location is an x, y coordinate
type Location struct {
X int
Y int
}
// Cell is a structure to containe the state of a location (alive or dead) and a count of it's living neighbours
type Cell struct {
State bool
Rc int8
}
// Field is a structure containing all data needed to represent the Life grid
type Field struct {
Cells map[Location]*Cell
}
// NewField creates a new Field struct given a list of locations to create live cells
func NewField(m []Location) *Field {
f := &Field{make(map[Location]*Cell)}
for _, l := range m {
f.SetCell(l, true)
}
return f
}
// Neighbours are the surrounding locations of l
func (l Location) Neighbours() [8]Location {
loc := [8]Location{}
adjust := 0
for i := -1; i < 2; i++ {
for j := -1; j < 2; j++ {
if i == 0 && j == 1 {
adjust = -1
}
loc[3*(i+1)+(j+1)+adjust] = Location{l.X + i, l.Y + j}
}
}
return loc
}
// SetCell updates the cell at location l to state, creating the cell if it does not already exist
// It then updates the Rc count of surrounding cells
func (f *Field) SetCell(l Location, state bool) {
var (
cell *Cell
neighbour *Cell
exists bool
)
neighbours := l.Neighbours()
// game.Log("%v: %v", l, neighbours)
if state {
// If we're setting a cell to alive, track all adjacent cells
for _, loc := range neighbours {
neighbour, exists = f.Cells[loc]
if !exists {
f.SetCell(loc, false)
}
}
}
cell, exists = f.Cells[l]
if exists {
old := cell.State
cell.State = state
if !old && state {
// Dead -> Living
for _, loc := range neighbours {
neighbour, exists = f.Cells[loc]
if exists {
neighbour.Rc++
}
}
} else if old && !state {
// Living -> Dead
for _, loc := range neighbours {
neighbour, exists = f.Cells[loc]
if exists {
neighbour.Rc--
}
}
}
} else {
cell = &Cell{State: state, Rc: 0}
for _, loc := range neighbours {
neighbour, exists = f.Cells[loc]
if exists && neighbour.State {
cell.Rc++
}
if exists && state {
neighbour.Rc++
}
}
f.Cells[l] = cell
}
}
// Commit changes the state of all cells to reflect their Rc counts
func (f *Field) Commit() {
// Update alive/dead status
for _, cell := range f.Cells {
switch cell.Rc {
case 2:
case 3:
cell.State = true
default:
cell.State = false
}
}
}
// Update handles adding untracked dead cells that have the potential to come alive in the next iteration
// And removes dead cells that do not have the potential to come alive
func (f *Field) Update() {
var exists bool
// Update relivant dead cells to track
for l, cell := range f.Cells {
for _, loc := range l.Neighbours() {
_, exists = f.Cells[loc]
// If we're not tracking a location and it has a living adjacent cell
if !exists && cell.State {
// start Tracking it
f.SetCell(loc, false)
}
}
// If we're tracking a dead cell with no living neighbours
if !cell.State && cell.Rc == 0 {
// Stop
delete(f.Cells, l)
}
}
}
// Count updates the Rc count of each cell to reflect the number of living neighbours it has
func (f *Field) Count() {
var (
neighbours int8
exists bool
neighbour *Cell
)
for l, cell := range f.Cells {
neighbours = 0
for _, loc := range l.Neighbours() {
neighbour, exists = f.Cells[loc]
if exists && neighbour.State {
neighbours++
}
}
cell.Rc = neighbours
}
} | pkg/conway/conway.go | 0.634204 | 0.53437 | conway.go | starcoder |
package main
import (
"math"
)
type twod_Vector struct {
X float64
Y float64
}
func (v twod_Vector) Add(b twod_Vector) twod_Vector {
return twod_Vector{
X: v.X + b.X,
Y: v.Y + b.Y,
}
}
func (v twod_Vector) Sub(b twod_Vector) twod_Vector {
return twod_Vector{
X: v.X - b.X,
Y: v.Y - b.Y,
}
}
func (v twod_Vector) Normalize() twod_Vector {
return v.Div(v.Len())
}
func (v twod_Vector) Div(b float64) twod_Vector {
return twod_Vector{
X: v.X / b,
Y: v.Y / b,
}
}
func (v twod_Vector) Mul(b float64) twod_Vector {
return twod_Vector{
X: v.X * b,
Y: v.Y * b,
}
}
func (v twod_Vector) IsZero() bool {
return v.X == 0 && v.Y == 0
}
func (v twod_Vector) SameDirection(b twod_Vector) bool {
d := v.Dot(b)
return d*d == v.Len2()*b.Len2() && d > 0
}
func (v twod_Vector) Parallel(b twod_Vector) bool {
d := v.Dot(b)
return d*d == v.Len2()*b.Len2()
}
func (v twod_Vector) Dot(b twod_Vector) float64 {
return v.X*b.X + v.Y*b.Y
}
func (v twod_Vector) Cross(b twod_Vector) float64 {
return v.X*b.Y - b.X*v.Y
}
func (v twod_Vector) Len2() float64 {
return v.X*v.X + v.Y*v.Y
}
func (v twod_Vector) Len() float64 {
return math.Sqrt(v.Len2())
}
type twod_Graph struct {
Data []twod_Vector
}
func (g *twod_Graph) AddXY(X, Y float64) {
g.Add(twod_Vector{X: X, Y: Y})
}
func (g *twod_Graph) Add(v twod_Vector) {
g.Data = append(g.Data, v)
}
func (g twod_Graph) Len() int {
return len(g.Data)
}
func (g twod_Graph) Area(indices ...int) float64 {
area := 0.
if len(indices) > 0 {
for idx, i := range indices {
if idx < len(indices)-1 {
area += g.Data[i].Cross(g.Data[indices[idx+1]])
} else {
area += g.Data[i].Cross(g.Data[indices[0]])
}
}
} else {
for i := range g.Data {
if i < len(g.Data)-1 {
area += g.Data[i].Cross(g.Data[i+1])
} else {
area += g.Data[i].Cross(g.Data[0])
}
}
}
return math.Abs(area) / 2
}
func (g twod_Graph) ConvexHull(includeCollinears ...bool) []int {
if g.Len() < 3 {
return nil
}
collinears := len(includeCollinears) > 0 && includeCollinears[0]
hull := make([]int, 0, 10)
l := 0
for i := 1; i < g.Len(); i++ {
if g.Data[i].X == g.Data[l].X {
if g.Data[i].Y < g.Data[l].Y {
l = i
}
} else if g.Data[i].X < g.Data[l].X {
l = i
}
}
p := l
prevToP := twod_Vector{}
for {
hull = append(hull, p)
q := (p + 1) % g.Len()
pToq := g.Data[q].Sub(g.Data[p])
for !prevToP.IsZero() && prevToP.Parallel(pToq) && !prevToP.SameDirection(pToq) {
q = (q + 1) % g.Len()
pToq = g.Data[q].Sub(g.Data[p])
}
for i := 0; i < g.Len(); i++ {
if i == p {
continue
}
pToi := g.Data[i].Sub(g.Data[p])
qToi := g.Data[q].Sub(g.Data[i])
cross := pToi.Cross(qToi)
if cross == 0 {
if (!collinears && pToi.Len2() > pToq.Len2()) ||
(collinears && pToi.Len2() < pToq.Len2() && (prevToP.IsZero() || !prevToP.Parallel(pToi) || prevToP.SameDirection(pToi))) {
q = i
pToq = pToi
}
} else if cross > 0 {
q = i
pToq = pToi
}
}
p = q
prevToP = pToq
if p == l {
break
}
}
return hull
}
func (g twod_Graph) ValidPolygon(indices ...int) bool {
if len(indices) == 0 {
indices = Range(g.Len())
}
for idx := range indices {
p := g.Data[indices[idx]]
var pr, nxt twod_Vector
if idx < len(indices)-1 {
pr = g.Data[indices[idx+1]]
if idx < len(indices)-2 {
nxt = g.Data[indices[idx+2]]
} else {
nxt = g.Data[indices[0]]
}
} else {
pr = g.Data[indices[0]]
nxt = g.Data[indices[1]]
}
r := pr.Sub(p)
rn := nxt.Sub(pr)
if r.Parallel(rn) && !r.SameDirection(rn) {
return false
}
for idx1 := idx + 2; idx1 < len(indices); idx1++ {
q := g.Data[indices[idx1]]
var qs twod_Vector
if idx1 < len(indices)-1 {
qs = g.Data[indices[idx1+1]]
} else {
if idx == 0 {
continue
}
qs = g.Data[indices[0]]
}
if !LineIntersection(p, pr, q, qs).IsZero() {
return false
}
}
}
return true
}
// LineIntersection lines from p to pr and from q to qs
func LineIntersection(p, pr, q, qs twod_Vector) twod_Vector {
r := pr.Sub(p)
s := qs.Sub(q)
rs := r.Cross(s)
qmp := q.Sub(p)
qpr := qmp.Cross(r)
if rs == 0 && qpr == 0 {
r2 := r.Len2()
t0 := qmp.Dot(r) / r2
t1 := qmp.Add(s).Dot(r) / r2
if t0 <= 1 && t0 >= 0 {
return p.Add(r.Mul(t0))
}
if t1 <= 1 && t1 >= 0 {
return p.Add(r.Mul(t1))
}
if (t1 >= 1 && t0 <= 0) || (t1 <= 0 && t0 >= 1) {
return p
}
} else if rs != 0 {
u := qpr / rs
t := qmp.Cross(s) / rs
if t >= 0 && t <= 1 && u >= 0 && u <= 1 {
return p.Add(r.Mul(t))
}
}
return twod_Vector{}
} | source/twod.go | 0.63114 | 0.523786 | twod.go | starcoder |
// Package queueimpl3 implements an unbounded, dynamically growing FIFO queue.
// Internally, queue store the values in fixed sized slices that are linked using a singly linked list.
// This implementation tests the queue performance when controlling the length and current positions in
// the slices using the builtin len and append functions.
package queueimpl3
const (
// internalSliceSize holds the size of each internal slice.
internalSliceSize = 128
// internalSliceLastPosition holds the last position of the internal slice.
internalSliceLastPosition = 127
)
// Queueimpl3 represents an unbounded, dynamically growing FIFO queue.
type Queueimpl3 struct {
// Head points to the first node of the linked list.
head *Node
// Tail points to the last node of the linked list.
// In an empty queue, head and tail points to the same node.
tail *Node
// Pos is the index pointing to the current first element in the queue
// (i.e. first element added in the current queue values).
pos int
// Len holds the current queue length.
len int
}
// Node represents a queue node.
// Each node holds an slice of user managed values.
type Node struct {
// v holds the list of user added values in this node.
v []interface{}
// n points to the next node in the linked list.
n *Node
}
// New returns an initialized queue.
func New() *Queueimpl3 {
return new(Queueimpl3).Init()
}
// Init initializes or clears queue q.
func (q *Queueimpl3) Init() *Queueimpl3 {
n := newNode()
q.head = n
q.tail = n
q.pos = 0
q.len = 0
return q
}
// Len returns the number of elements of queue q.
// The complexity is O(1).
func (q *Queueimpl3) Len() int { return q.len }
// Front returns the first element of list l or nil if the list is empty.
// The second, bool result indicates whether a valid value was returned;
// if the queue is empty, false will be returned.
// The complexity is O(1).
func (q *Queueimpl3) Front() (interface{}, bool) {
if q.len == 0 {
return nil, false
}
return q.head.v[q.pos], true
}
// Push adds a value to the queue.
// The complexity is O(1) as the underlying slice append uses always have enough capacity.
func (q *Queueimpl3) Push(v interface{}) {
if len(q.tail.v) >= internalSliceSize {
n := newNode()
q.tail.n = n
q.tail = n
}
q.tail.v = append(q.tail.v, v)
q.len++
}
// Pop retrieves and removes the next element from the queue.
// The second, bool result indicates whether a valid value was returned; if the queue is empty, false will be returned.
// The complexity is O(1).
func (q *Queueimpl3) Pop() (interface{}, bool) {
if q.len == 0 {
return nil, false
}
v := q.head.v[q.pos]
q.head.v[q.pos] = nil // Avoid memory leaks
q.len--
if q.pos >= internalSliceLastPosition {
n := q.head.n
q.head.n = nil // Avoid memory leaks
q.head = n
q.pos = 0
} else {
q.pos++
}
return v, true
}
// newNode returns an initialized node.
func newNode() *Node {
return &Node{
v: make([]interface{}, 0, internalSliceSize),
}
} | queueimpl3/queueimpl3.go | 0.88981 | 0.567637 | queueimpl3.go | starcoder |
package core
import (
"fmt"
"strings"
"github.com/philandstuff/dhall-golang/v6/term"
)
func (naturalBuild) Call(x Value) Value {
var succ Value = lambda{
Label: "x",
Domain: Natural,
Fn: func(x Value) Value {
if n, ok := x.(NaturalLit); ok {
return NaturalLit(n + 1)
}
return oper{OpCode: term.PlusOp, L: x, R: NaturalLit(1)}
},
}
return apply(x, Natural, succ, NaturalLit(0))
}
func (naturalBuild) ArgType() Value {
return NewPi("natural", Type, func(natural Value) Value {
return NewFnType("succ", NewFnType("_", natural, natural),
NewFnType("zero", natural,
natural))
})
}
func (naturalEven) Call(x Value) Value {
if n, ok := x.(NaturalLit); ok {
return BoolLit(n%2 == 0)
}
return nil
}
func (naturalEven) ArgType() Value { return Natural }
func (fold naturalFold) Call(x Value) Value {
if fold.n == nil {
return naturalFold{n: x}
}
if fold.typ == nil {
return naturalFold{
n: fold.n,
typ: x,
}
}
if fold.succ == nil {
return naturalFold{
n: fold.n,
typ: fold.typ,
succ: x,
}
}
zero := x
if n, ok := fold.n.(NaturalLit); ok {
result := zero
for i := 0; i < int(n); i++ {
result = apply(fold.succ, result)
}
return result
}
return nil
}
func (fold naturalFold) ArgType() Value {
if fold.n == nil {
return Natural
}
if fold.typ == nil {
return Type
}
if fold.succ == nil {
return NewFnType("_", fold.typ, fold.typ)
}
// zero
return fold.typ
}
func (naturalIsZero) Call(x Value) Value {
if n, ok := x.(NaturalLit); ok {
return BoolLit(n == 0)
}
return nil
}
func (naturalIsZero) ArgType() Value { return Natural }
func (naturalOdd) Call(x Value) Value {
if n, ok := x.(NaturalLit); ok {
return BoolLit(n%2 == 1)
}
return nil
}
func (naturalOdd) ArgType() Value { return Natural }
func (naturalShow) Call(x Value) Value {
if n, ok := x.(NaturalLit); ok {
return PlainTextLit(fmt.Sprintf("%d", n))
}
return nil
}
func (naturalShow) ArgType() Value { return Natural }
func (sub naturalSubtract) Call(x Value) Value {
if sub.a == nil {
return naturalSubtract{a: x}
}
m, mok := sub.a.(NaturalLit)
n, nok := x.(NaturalLit)
if mok && nok {
if n >= m {
return NaturalLit(n - m)
}
return NaturalLit(0)
}
if sub.a == NaturalLit(0) {
return x
}
if x == NaturalLit(0) {
return NaturalLit(0)
}
if AlphaEquivalent(sub.a, x) {
return NaturalLit(0)
}
return nil
}
func (naturalSubtract) ArgType() Value { return Natural }
func (naturalToInteger) Call(x Value) Value {
if n, ok := x.(NaturalLit); ok {
return IntegerLit(n)
}
return nil
}
func (naturalToInteger) ArgType() Value { return Natural }
func (integerClamp) Call(x Value) Value {
if i, ok := x.(IntegerLit); ok {
if i < 0 {
return NaturalLit(0)
}
return NaturalLit(i)
}
return nil
}
func (integerClamp) ArgType() Value { return Integer }
func (integerNegate) Call(x Value) Value {
if i, ok := x.(IntegerLit); ok {
return IntegerLit(-i)
}
return nil
}
func (integerNegate) ArgType() Value { return Integer }
func (integerShow) Call(x Value) Value {
if i, ok := x.(IntegerLit); ok {
return PlainTextLit(fmt.Sprintf("%+d", i))
}
return nil
}
func (integerShow) ArgType() Value { return Integer }
func (integerToDouble) Call(x Value) Value {
if i, ok := x.(IntegerLit); ok {
return DoubleLit(i)
}
return nil
}
func (integerToDouble) ArgType() Value { return Integer }
func (doubleShow) Call(x Value) Value {
if d, ok := x.(DoubleLit); ok {
return PlainTextLit(d.String())
}
return nil
}
func (doubleShow) ArgType() Value { return Double }
func (optional) Call(x Value) Value { return OptionalOf{x} }
func (optional) ArgType() Value { return Type }
func (none) Call(a Value) Value { return NoneOf{a} }
func (none) ArgType() Value { return Type }
func (textShow) Call(a0 Value) Value {
if t, ok := a0.(PlainTextLit); ok {
var out strings.Builder
out.WriteRune('"')
for _, r := range t {
switch r {
case '"':
out.WriteString(`\"`)
case '$':
out.WriteString(`\u0024`)
case '\\':
out.WriteString(`\\`)
case '\b':
out.WriteString(`\b`)
case '\f':
out.WriteString(`\f`)
case '\n':
out.WriteString(`\n`)
case '\r':
out.WriteString(`\r`)
case '\t':
out.WriteString(`\t`)
default:
if r < 0x1f {
out.WriteString(fmt.Sprintf(`\u%04x`, r))
} else {
out.WriteRune(r)
}
}
}
out.WriteRune('"')
return PlainTextLit(out.String())
}
return nil
}
func (textShow) ArgType() Value { return Text }
func (r textReplace) Call(a Value) Value {
if r.needle == nil {
return textReplace{needle: a}
}
if r.replacement == nil {
return textReplace{needle: r.needle, replacement: a}
}
needle, ok := r.needle.(PlainTextLit)
if !ok {
return nil
}
if needle == "" {
return a
}
haystack, ok := a.(PlainTextLit)
if !ok {
return nil
}
text := &textValBuilder{}
strs := strings.Split(string(haystack), string(needle))
text.appendStr(strs[0])
for _, s := range strs[1:] {
text.appendValue(r.replacement)
text.appendStr(s)
}
return text.value()
}
func (textReplace) ArgType() Value { return Text }
func (list) Call(x Value) Value { return ListOf{x} }
func (list) ArgType() Value { return Type }
func (l listBuild) Call(x Value) Value {
if l.typ == nil {
return listBuild{typ: x}
}
var cons Value = lambda{
Label: "a",
Domain: l.typ,
Fn: func(a Value) Value {
return lambda{
Label: "as",
Domain: ListOf{l.typ},
Fn: func(as Value) Value {
if _, ok := as.(EmptyList); ok {
return NonEmptyList{a}
}
if as, ok := as.(NonEmptyList); ok {
return append(NonEmptyList{a}, as...)
}
return oper{OpCode: term.ListAppendOp, L: NonEmptyList{a}, R: as}
},
}
},
}
return apply(x, ListOf{l.typ}, cons, EmptyList{ListOf{l.typ}})
}
func (l listBuild) ArgType() Value {
if l.typ == nil {
return Type
}
return NewPi("list", Type, func(list Value) Value {
return NewFnType("cons", NewFnType("_", l.typ, NewFnType("_", list, list)),
NewFnType("nil", list,
list))
})
}
func (l listFold) Call(x Value) Value {
if l.typ1 == nil {
return listFold{typ1: x}
}
if l.list == nil {
return listFold{typ1: l.typ1, list: x}
}
if l.typ2 == nil {
return listFold{
typ1: l.typ1,
list: l.list,
typ2: x,
}
}
if l.cons == nil {
return listFold{
typ1: l.typ1,
list: l.list,
typ2: l.typ2,
cons: x,
}
}
empty := x
if _, ok := l.list.(EmptyList); ok {
return empty
}
if list, ok := l.list.(NonEmptyList); ok {
result := empty
for i := len(list) - 1; i >= 0; i-- {
result = apply(l.cons, list[i], result)
}
return result
}
return nil
}
func (l listFold) ArgType() Value {
if l.typ1 == nil {
return Type
}
if l.list == nil {
return ListOf{l.typ1}
}
if l.typ2 == nil {
return Type
}
if l.cons == nil {
return NewFnType("_", l.typ1, NewFnType("_", l.typ2, l.typ2))
}
// nil
return l.typ2
}
func (length listLength) Call(x Value) Value {
if length.typ == nil {
return listLength{typ: x}
}
if _, ok := x.(EmptyList); ok {
return NaturalLit(0)
}
if l, ok := x.(NonEmptyList); ok {
return NaturalLit(len(l))
}
return nil
}
func (length listLength) ArgType() Value {
if length.typ == nil {
return Type
}
return ListOf{length.typ}
}
func (head listHead) Call(x Value) Value {
if head.typ == nil {
return listHead{typ: x}
}
if _, ok := x.(EmptyList); ok {
return NoneOf{head.typ}
}
if l, ok := x.(NonEmptyList); ok {
return Some{l[0]}
}
return nil
}
func (head listHead) ArgType() Value {
if head.typ == nil {
return Type
}
return ListOf{head.typ}
}
func (last listLast) Call(x Value) Value {
if last.typ == nil {
return listLast{typ: x}
}
if _, ok := x.(EmptyList); ok {
return NoneOf{last.typ}
}
if l, ok := x.(NonEmptyList); ok {
return Some{l[len(l)-1]}
}
return nil
}
func (last listLast) ArgType() Value {
if last.typ == nil {
return Type
}
return ListOf{last.typ}
}
func (indexed listIndexed) Call(x Value) Value {
if indexed.typ == nil {
return listIndexed{typ: x}
}
if _, ok := x.(EmptyList); ok {
return EmptyList{ListOf{
RecordType{"index": Natural, "value": indexed.typ}}}
}
if l, ok := x.(NonEmptyList); ok {
var result []Value
for i, v := range l {
result = append(result,
RecordLit{"index": NaturalLit(i), "value": v})
}
return NonEmptyList(result)
}
return nil
}
func (indexed listIndexed) ArgType() Value {
if indexed.typ == nil {
return Type
}
return ListOf{indexed.typ}
}
func (rev listReverse) Call(x Value) Value {
if rev.typ == nil {
return listReverse{typ: x}
}
if _, ok := x.(EmptyList); ok {
return x
}
if l, ok := x.(NonEmptyList); ok {
result := make([]Value, len(l))
for i, v := range l {
result[len(l)-i-1] = v
}
return NonEmptyList(result)
}
return nil
}
func (rev listReverse) ArgType() Value {
if rev.typ == nil {
return Type
}
return ListOf{rev.typ}
}
// These are the builtin Callable Values.
var (
NaturalBuild Callable = naturalBuild{}
NaturalEven Callable = naturalEven{}
NaturalFold Callable = naturalFold{}
NaturalIsZero Callable = naturalIsZero{}
NaturalOdd Callable = naturalOdd{}
NaturalShow Callable = naturalShow{}
NaturalSubtract Callable = naturalSubtract{}
NaturalToInteger Callable = naturalToInteger{}
IntegerClamp Callable = integerClamp{}
IntegerNegate Callable = integerNegate{}
IntegerShow Callable = integerShow{}
IntegerToDouble Callable = integerToDouble{}
DoubleShow Callable = doubleShow{}
Optional Callable = optional{}
None Callable = none{}
TextShow Callable = textShow{}
TextReplace Callable = textReplace{}
List Callable = list{}
ListBuild Callable = listBuild{}
ListFold Callable = listFold{}
ListLength Callable = listLength{}
ListHead Callable = listHead{}
ListLast Callable = listLast{}
ListIndexed Callable = listIndexed{}
ListReverse Callable = listReverse{}
) | core/builtins.go | 0.567577 | 0.566738 | builtins.go | starcoder |
package main
const input = `
forward 4
down 8
down 1
forward 6
forward 7
down 7
forward 3
forward 5
up 9
down 1
forward 5
down 8
forward 4
forward 5
down 5
down 1
forward 1
down 3
forward 5
forward 5
down 1
up 2
down 2
down 5
down 5
forward 3
forward 7
forward 5
forward 9
forward 8
down 4
down 6
up 5
down 1
forward 6
up 3
forward 7
forward 4
down 7
up 5
up 5
up 1
up 5
forward 5
forward 2
forward 7
down 7
forward 9
down 9
up 8
up 8
up 2
forward 5
forward 8
up 5
forward 1
down 1
down 6
forward 1
forward 2
forward 4
forward 6
up 4
up 5
down 4
down 9
down 4
forward 4
up 8
up 2
down 2
up 9
forward 9
forward 4
forward 1
forward 6
up 3
forward 6
forward 2
up 3
down 3
forward 6
down 9
down 7
forward 3
up 7
up 8
forward 3
down 1
down 8
forward 7
forward 3
down 2
down 5
forward 5
forward 1
down 1
down 3
down 5
forward 1
down 1
down 7
forward 1
up 2
down 5
up 3
up 2
down 7
up 4
forward 2
down 3
down 1
up 7
down 6
down 1
forward 7
down 5
down 2
forward 7
up 9
forward 6
forward 6
forward 2
forward 6
down 2
forward 4
down 5
forward 4
down 8
forward 3
down 9
up 5
forward 6
down 5
forward 5
down 4
down 1
forward 3
up 9
up 5
up 9
down 3
forward 7
forward 7
up 5
up 6
up 3
down 9
down 4
up 8
down 9
down 6
forward 5
down 6
forward 7
down 4
down 9
down 9
forward 6
down 4
up 2
down 8
up 3
up 7
up 1
forward 9
down 4
down 8
up 2
forward 7
forward 5
down 9
down 9
up 5
down 4
forward 8
up 3
up 4
up 8
down 7
forward 6
down 8
down 1
up 1
down 7
down 7
forward 3
down 9
up 2
forward 2
up 1
up 1
down 2
down 8
up 5
down 3
down 3
forward 2
down 4
forward 2
down 2
forward 3
down 6
forward 8
down 5
down 6
forward 9
forward 2
down 6
down 4
up 9
forward 2
forward 1
up 9
down 9
forward 8
down 4
up 3
down 1
forward 9
forward 9
forward 3
forward 4
down 2
down 1
forward 5
up 3
forward 6
down 8
down 8
down 7
forward 1
forward 6
down 9
down 6
forward 8
down 5
up 6
down 2
forward 2
up 3
forward 6
forward 4
up 4
down 5
forward 2
down 5
forward 1
forward 5
up 7
up 1
down 3
up 8
forward 4
forward 8
forward 8
up 2
down 8
up 2
up 2
up 7
down 9
down 1
forward 1
down 3
down 1
down 4
forward 3
down 4
down 5
forward 7
forward 6
forward 7
forward 8
up 6
down 1
down 9
up 2
up 2
forward 1
up 9
forward 6
down 2
forward 6
forward 8
up 8
down 6
forward 2
up 4
up 5
down 3
down 2
forward 7
down 8
forward 4
forward 8
up 4
down 7
forward 6
forward 1
up 4
down 4
down 9
down 7
down 6
down 1
forward 7
up 3
down 1
down 9
down 9
down 1
down 7
down 8
up 9
down 7
up 4
forward 4
down 2
up 8
down 6
down 6
forward 4
up 5
down 9
down 8
up 7
down 4
forward 9
up 3
down 6
forward 7
up 4
forward 9
down 6
forward 6
down 3
down 5
down 4
up 5
down 8
down 8
forward 5
forward 1
down 3
forward 7
down 3
up 6
forward 5
up 7
forward 8
down 1
forward 7
forward 8
forward 9
forward 7
up 5
forward 9
up 7
down 7
forward 8
down 8
up 6
down 4
forward 6
forward 3
forward 3
forward 6
down 3
up 4
down 3
down 8
forward 2
down 1
down 5
forward 2
up 3
up 5
forward 2
forward 8
down 7
down 9
forward 8
forward 5
forward 2
down 3
forward 6
forward 3
forward 4
forward 9
down 8
forward 2
down 6
down 8
forward 1
forward 5
up 3
forward 8
up 3
forward 2
down 3
down 5
up 4
down 9
up 5
down 2
forward 7
forward 8
forward 2
forward 4
forward 6
down 1
up 3
forward 3
up 6
forward 1
down 9
forward 4
forward 5
forward 3
down 7
down 9
forward 1
forward 5
up 1
down 6
down 7
up 4
up 7
forward 2
down 7
forward 5
up 9
up 8
forward 8
up 1
up 6
down 7
up 8
forward 2
down 1
forward 7
forward 6
forward 2
up 7
down 5
down 6
forward 8
down 3
down 2
forward 5
down 7
forward 2
down 9
forward 7
forward 9
forward 1
down 7
down 3
down 8
down 4
up 1
down 2
forward 5
forward 9
forward 5
up 6
up 1
forward 3
forward 1
forward 7
down 9
forward 4
down 7
up 6
forward 1
down 7
forward 5
down 4
down 2
up 1
forward 6
up 6
down 3
up 5
down 8
down 5
forward 2
down 1
forward 8
forward 4
down 3
forward 3
forward 6
forward 2
forward 9
forward 2
down 3
forward 8
down 4
down 1
forward 4
down 1
forward 5
down 5
down 6
forward 6
down 6
down 9
forward 7
down 6
forward 6
forward 7
forward 1
forward 4
forward 2
forward 3
up 8
down 3
down 7
forward 6
forward 4
up 7
forward 6
forward 6
down 7
up 8
down 5
forward 6
forward 8
down 3
up 2
down 5
forward 2
forward 5
up 8
forward 1
down 3
forward 3
forward 2
down 3
down 8
forward 3
forward 1
down 5
down 1
up 1
forward 9
down 7
up 2
forward 8
down 6
down 5
up 9
forward 2
forward 5
forward 8
up 2
up 5
forward 2
down 2
down 9
down 3
forward 7
up 5
forward 7
down 6
forward 2
forward 7
forward 8
forward 8
down 7
forward 3
forward 6
down 5
forward 8
forward 6
up 2
forward 1
up 9
forward 1
up 3
forward 6
down 4
down 5
down 8
up 6
forward 1
down 8
forward 3
forward 2
forward 9
down 5
down 9
forward 5
down 7
up 9
forward 5
forward 7
forward 6
forward 5
down 3
forward 6
down 9
up 8
forward 4
forward 7
forward 3
down 7
forward 8
down 5
forward 3
up 6
up 5
forward 9
up 4
up 9
forward 9
forward 3
down 8
forward 8
down 3
forward 2
down 4
down 1
forward 2
up 9
down 7
forward 4
up 3
down 9
down 6
forward 2
forward 5
down 7
down 2
forward 8
down 5
forward 8
down 8
down 4
down 1
down 2
forward 5
down 8
down 1
down 2
forward 8
forward 3
down 8
up 8
up 8
down 3
forward 3
forward 6
down 9
up 1
forward 6
up 1
down 1
down 9
forward 3
up 1
forward 7
forward 6
forward 1
up 3
down 8
forward 7
down 3
down 5
down 7
forward 6
down 9
forward 9
forward 8
down 9
forward 1
down 2
up 7
down 3
down 1
forward 8
forward 4
forward 9
up 9
down 4
forward 1
down 1
up 1
up 1
up 6
down 7
down 5
forward 1
forward 7
up 3
down 7
up 3
down 4
up 9
up 9
forward 1
down 4
down 6
forward 2
forward 6
up 1
forward 1
down 8
forward 7
up 6
forward 6
forward 3
up 1
up 6
forward 1
down 2
forward 8
forward 4
forward 2
down 3
forward 2
forward 3
forward 1
down 6
forward 7
forward 7
down 4
forward 6
up 3
up 4
up 6
down 7
down 8
forward 3
down 2
forward 5
down 4
forward 6
forward 7
forward 8
forward 9
forward 3
down 1
forward 8
forward 1
down 8
up 1
down 3
down 6
down 1
up 1
forward 1
down 6
down 5
forward 6
down 1
down 5
forward 7
up 3
forward 4
forward 4
forward 1
up 6
up 2
up 4
down 4
up 4
forward 8
up 8
forward 1
down 5
forward 5
down 7
up 5
up 7
up 5
forward 9
down 1
down 1
forward 4
down 2
down 2
down 3
down 1
forward 1
up 7
forward 6
forward 9
up 5
forward 1
forward 9
up 2
forward 5
down 4
forward 6
down 9
down 3
forward 1
down 2
down 3
down 1
down 3
forward 8
up 6
forward 2
down 5
down 9
down 4
up 2
up 9
forward 2
down 7
forward 9
down 5
down 5
up 6
forward 1
forward 5
forward 9
down 4
forward 2
forward 7
down 2
forward 4
down 2
forward 3
down 3
down 2
up 5
forward 8
up 8
down 9
forward 9
down 9
down 4
down 1
forward 4
forward 9
down 5
down 9
down 4
down 5
forward 1
down 3
down 3
down 4
forward 6
forward 5
down 3
up 4
forward 9
forward 5
forward 3
forward 6
down 8
up 9
forward 2
up 6
forward 2
down 9
up 9
down 4
forward 1
forward 9
down 5
forward 9
forward 4
down 6
forward 7
forward 4
down 7
down 1
forward 9
down 6
down 5
forward 5
down 5
down 1
forward 3
down 7
down 5
down 9
down 5
up 6
up 5
down 5
up 1
down 9
forward 5
forward 9
forward 3
forward 4
down 7
forward 3
forward 3
down 5
forward 7
down 9
forward 8
forward 4
forward 8
forward 9
forward 1
forward 6
up 9
down 3
forward 1
forward 4
down 2
down 8
up 4
down 4
forward 1
down 5
down 3
down 9
up 1
forward 8
down 6
down 4
forward 3
down 8
down 2
up 6
down 5
forward 8
down 4
up 1
forward 5
down 1
down 9
down 1
down 9
down 3
down 3
forward 2
forward 6
down 8
forward 1
up 4
down 3
forward 9
up 2
down 4
forward 9
down 3
down 1
down 3
down 4
up 6
down 2
forward 3
forward 9
forward 7
down 2
down 5
forward 4
forward 5
down 9
up 3
forward 5
forward 9
up 2
forward 3
down 4
forward 2
down 5
down 8
down 1
forward 4
up 4
forward 7
down 9
forward 8
down 8
forward 3
down 6
up 9
up 6
down 2
forward 6
up 1
down 5
down 5
down 9
up 2
down 2
forward 1
forward 8
down 2
up 8
down 3
forward 2
down 1
down 5
down 5
up 4
forward 5
` | day2/input.go | 0.767298 | 0.552781 | input.go | starcoder |
package tuid
import (
"bytes"
"crypto/rand"
"errors"
"fmt"
"math/big"
"time"
)
// MinID is the first ID at 2000-01-01T00:00:00Z
const MinID = "5Hr02eJHAfTt1tTM"
// MaxID is the first ID at 2100-01-01T00:00:00Z
const MaxID = "MuklDY5bgW1s9Ev2"
// TUID is a Time-based Unique Identifier (e.g. 91Mq07yx9IxHCi5Y) that has an embedded timestamp and sorts
// chronologically. It's a 16-digit base-62 big integer, where the leftmost bits are a timestamp with nanosecond
// resolution (e.g. 2021-03-08T05:54:09.208207000Z) and the rightmost 32 bits are "entropy" (a random number),
// providing some assurance of uniqueness if multiple IDs are created at the same moment. Collisions in a single
// information system are extremely unlikely. The Zero value of a TUID is an empty string.
type TUID string
// TUIDInfo is a convenience type for parsing a TUID's timestamp and entropy.
type TUIDInfo struct {
ID TUID `json:"id"`
Timestamp time.Time `json:"timestamp"`
Entropy uint32 `json:"entropy"`
}
// Int decodes the specified base-62 encoded TUID into a big integer
func (t TUID) Int() (*big.Int, error) {
id, err := decode(string(t))
if err != nil {
return new(big.Int), fmt.Errorf("int: invalid TUID %s: %w", t, err)
}
return id, nil
}
// Time extracts the embedded timestamp from the specified TUID
func (t TUID) Time() (time.Time, error) {
id, err := decode(string(t))
if err != nil {
return time.Time{}, fmt.Errorf("time: invalid TUID %s: %w", t, err)
}
nsec := new(big.Int).Rsh(id, 32)
return time.Unix(0, nsec.Int64()), nil
}
// Entropy extracts the random 32 bits from the specified TUID
func (t TUID) Entropy() (uint32, error) {
id, err := decode(string(t))
if err != nil {
return 0, fmt.Errorf("entropy: invalid TUID %s: %w", t, err)
}
mask := big.NewInt(1<<32 - 1)
entropy := new(big.Int).And(id, mask)
return uint32(entropy.Int64()), nil
}
// Info extracts the timestamp and entropy from the specified TUID
func (t TUID) Info() (TUIDInfo, error) {
id, err := decode(string(t))
if err != nil {
return TUIDInfo{}, fmt.Errorf("info: invalid TUID %s: %w", t, err)
}
nsec := new(big.Int).Rsh(id, 32)
timestamp := time.Unix(0, nsec.Int64())
mask := big.NewInt(1<<32 - 1)
entropy := uint32(new(big.Int).And(id, mask).Int64())
return TUIDInfo{t, timestamp, entropy}, nil
}
// String implements the fmt.Stringer interface
func (t TUID) String() string {
return string(t)
}
// NewID creates a new TUID with the current system time
func NewID() TUID {
return NewIDWithTime(time.Now())
}
// NewIDWithTime creates a TUID with the provided timestamp
func NewIDWithTime(t time.Time) TUID {
ts := new(big.Int).Lsh(big.NewInt(t.UnixNano()), 32)
entropy, _ := rand.Int(rand.Reader, big.NewInt(1<<32))
id := ts.Or(ts, entropy)
tuid, _ := encode(id)
return TUID(tuid)
}
// FirstIDWithTime creates a TUID with the provided timestamp and zero entropy, useful for query offsets
func FirstIDWithTime(t time.Time) TUID {
id := new(big.Int).Lsh(big.NewInt(t.UnixNano()), 32)
tuid, _ := encode(id)
return TUID(tuid)
}
// IsValid checks to see if the provided TUID has valid characters and a reasonable embedded timestamp
func IsValid(t TUID) bool {
id, err := decode(string(t))
if err != nil {
return false
}
minID, _ := decode(MinID)
maxID, _ := decode(MaxID)
return (id.Cmp(minID) >= 0) && (id.Cmp(maxID) <= 0)
}
// Compare supports sorting TUIDs chronologically
func Compare(t1 TUID, t2 TUID) int {
if t1 == t2 {
return 0
}
if t1 < t2 {
return -1
}
return +1
}
// Duration returns the number of nanoseconds between two TUIDs as a time.Duration
func Duration(start TUID, stop TUID) (time.Duration, error) {
startTime, err := start.Time()
if err != nil {
return 0, fmt.Errorf("duration: invalid start TUID %s: %w", start, err)
}
stopTime, err := stop.Time()
if err != nil {
return 0, fmt.Errorf("duration: invalid stop TUID %s: %w", stop, err)
}
return stopTime.Sub(startTime), nil
}
var base = big.NewInt(62)
var digits = []byte("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
// encode the provided big integer into a base-62 encoded string
func encode(value *big.Int) (string, error) {
if value.Sign() < 0 {
return "", errors.New("base 62 encoding error: positive value required")
}
var result []byte
for value.Sign() > 0 {
q, r := new(big.Int).DivMod(value, base, new(big.Int))
d := digits[r.Int64()]
result = append([]byte{d}, result...) // prepend the new digit
value = q
}
if len(result) == 0 {
return string(digits[0]), nil
}
return string(result), nil
}
// decode the provided base-62 encoded string into a big integer
func decode(text string) (*big.Int, error) {
textBytes := []byte(text)
size := len(textBytes)
if size == 0 {
return new(big.Int), errors.New("base 62 decoding error: no digits")
}
result := new(big.Int)
for i := 0; i < size; i++ {
b := textBytes[size-1-i] // examine digits from right to left
j := int64(bytes.IndexByte(digits, b))
if j == -1 {
return new(big.Int), fmt.Errorf("base 62 decoding error: invalid digit `%s` in %s",
string(b), string(textBytes))
}
pow := new(big.Int).Exp(base, big.NewInt(int64(i)), nil)
prod := new(big.Int).Mul(big.NewInt(j), pow)
result = new(big.Int).Add(result, prod)
}
return result, nil
} | tuid.go | 0.666605 | 0.410993 | tuid.go | starcoder |
// Package day20 solves AoC 2019 day 20.
package day20
import (
"container/heap"
"unicode"
"github.com/fis/aoc/glue"
"github.com/fis/aoc/util"
)
func init() {
glue.RegisterSolver(2019, 20, glue.LevelSolver{Solver: solve, Empty: ' '})
}
func solve(level *util.Level) ([]string, error) {
dist := distances(level)
p1 := shortest(label{name: "AA", outer: true}, label{name: "ZZ", outer: true}, dist)
p2 := recursive(label{name: "AA", outer: true}, label{name: "ZZ", outer: true}, dist)
return glue.Ints(p1, p2), nil
}
type label struct {
name string
outer bool
}
type distance struct {
d int
depth int
}
type path struct {
at label
depth int
d int
}
type pathq []path
func shortest(from, to label, edges map[label]map[label]distance) int {
dist := map[label]int{from: 0}
fringe := pathq{{at: from, d: 0}}
for len(fringe) > 0 {
p := heap.Pop(&fringe).(path)
if p.at == to {
return p.d
}
if od := dist[p.at]; od < p.d {
continue // obsolete path
}
for to, e := range edges[p.at] {
ed := p.d + e.d
if od, ok := dist[to]; ok && od <= ed {
continue // seen better
}
dist[to] = ed
heap.Push(&fringe, path{at: to, d: ed})
}
}
return -1
}
func recursive(from, to label, edges map[label]map[label]distance) int {
type node struct {
at label
depth int
}
dist := map[node]int{{at: from, depth: 0}: 0}
fringe := pathq{{at: from, depth: 0, d: 0}}
for len(fringe) > 0 {
p := heap.Pop(&fringe).(path)
if p.at == to && p.depth == 0 {
return p.d
}
if od := dist[node{at: p.at, depth: p.depth}]; od < p.d {
continue // obsolete path
}
for to, e := range edges[p.at] {
ed, edepth := p.d+e.d, p.depth+e.depth
if edepth < 0 {
continue // cannot ascend
}
if od, ok := dist[node{at: to, depth: edepth}]; ok && od <= ed {
continue // seen better
}
dist[node{at: to, depth: edepth}] = ed
heap.Push(&fringe, path{at: to, depth: edepth, d: ed})
}
}
return -1
}
func distances(level *util.Level) map[label]map[label]distance {
labels := make(map[util.P]label)
level.Range(func(x, y int, c byte) {
if !unicode.IsUpper(rune(c)) {
return
}
for _, d := range (util.P{}).Neigh() {
c2 := level.At(x+d.X, y+d.Y)
if !unicode.IsUpper(rune(c2)) || level.At(x+2*d.X, y+2*d.Y) != '.' {
continue
}
var name string
if d.X > 0 || d.Y > 0 {
name = string([]byte{c, c2})
} else {
name = string([]byte{c2, c})
}
labels[util.P{x + 2*d.X, y + 2*d.Y}] = label{
name: name,
outer: !level.InBounds(x-d.X, y-d.Y),
}
}
})
allDist := make(map[label]map[label]distance)
for start, from := range labels {
dist := make(map[label]distance)
seen := make(map[util.P]struct{})
fringe := []util.P{start}
d := 0
for len(fringe) > 0 {
d++
var newFringe []util.P
for _, p := range fringe {
seen[p] = struct{}{}
for _, step := range p.Neigh() {
if _, ok := seen[step]; ok {
continue
}
if to, ok := labels[step]; ok {
if _, ok := dist[to]; !ok {
dist[to] = distance{d: d, depth: 0} // best path from -> to
}
continue
}
if level.At(step.X, step.Y) == '.' {
newFringe = append(newFringe, step)
}
}
}
fringe = newFringe
}
allDist[from] = dist
}
for in := range allDist {
if in.outer {
continue
}
out := label{name: in.name, outer: true}
if _, ok := allDist[out]; !ok {
continue
}
allDist[in][out] = distance{d: 1, depth: 1}
allDist[out][in] = distance{d: 1, depth: -1}
}
return allDist
}
func (q pathq) Len() int {
return len(q)
}
func (q pathq) Less(i, j int) bool {
return q[i].d < q[j].d
}
func (q pathq) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
}
func (q *pathq) Push(x interface{}) {
*q = append(*q, x.(path))
}
func (q *pathq) Pop() interface{} {
old, n := *q, len(*q)
path := old[n-1]
*q = old[0 : n-1]
return path
} | 2019/day20/day20.go | 0.640861 | 0.434641 | day20.go | starcoder |
package api
// PointI
// ToPoint2D converts to a Point2D.
func (p PointI) ToPoint2D() Point2D {
return Point2D{float32(p.X), float32(p.Y)}
}
// ToPoint2DCentered converts to a Point2D and adds 0.5 to X/Y to center inside that map cell.
func (p PointI) ToPoint2DCentered() Point2D {
return Point2D{float32(p.X) + 0.5, float32(p.Y) + 0.5}
}
// ToPoint converts to a Point with zero Z coordinate.
func (p PointI) ToPoint() Point {
return Point{float32(p.X), float32(p.Y), 0}
}
// ToPointCentered to a Point with zero Z coordinate and adds 0.5 to X/Y to center inside that map cell.
func (p PointI) ToPointCentered() Point {
return Point{float32(p.X) + 0.5, float32(p.Y) + 0.5, 0}
}
// VecTo computes the vector from p -> p2.
func (p PointI) VecTo(p2 PointI) VecI {
return VecI(p2).Sub(VecI(p))
}
// For DirTo and Offset, just convert to Point2D first since the result may not be integers
// Distance computes the absolute distance between two points.
func (p PointI) Distance(p2 PointI) float32 {
return p.VecTo(p2).Len()
}
// Distance2 computes the squared distance between two points.
func (p PointI) Distance2(p2 PointI) int32 {
return p.VecTo(p2).Len2()
}
// Manhattan computes the manhattan distance between two points.
func (p PointI) Manhattan(p2 PointI) int32 {
return p.VecTo(p2).Manhattan()
}
// Add returns the point at the end of v when starting from p.
func (p PointI) Add(v VecI) PointI {
return PointI(VecI(p).Add(v))
}
// Offset4By returns the Von Neumann neighborhood (or 4-neighborhood) of p.
func (p PointI) Offset4By(by int32) [4]PointI {
return [...]PointI{
PointI{p.X, p.Y - by},
PointI{p.X + by, p.Y},
PointI{p.X, p.Y + by},
PointI{p.X - by, p.Y},
}
}
// Offset8By returns the Moore neighborhood (or 8-neighborhood) of p.
func (p PointI) Offset8By(by int32) [8]PointI {
return [...]PointI{
PointI{p.X, p.Y - by},
PointI{p.X + by, p.Y - by},
PointI{p.X + by, p.Y},
PointI{p.X + by, p.Y + by},
PointI{p.X, p.Y + by},
PointI{p.X - by, p.Y + by},
PointI{p.X - by, p.Y},
PointI{p.X - by, p.Y - by},
}
}
// Point2D
// ToPointI converts to a PointI by truncating X/Y.
func (p Point2D) ToPointI() PointI {
return PointI{int32(p.X), int32(p.Y)}
}
// ToPoint converts to a Point by truncating X/Y and setting Z to zero.
func (p Point2D) ToPoint() Point {
return Point{p.X, p.Y, 0}
}
// VecTo computes the vector from p -> p2.
func (p Point2D) VecTo(p2 Point2D) Vec2D {
return Vec2D(p2).Sub(Vec2D(p))
}
// DirTo computes the unit vector pointing from p -> p2.
func (p Point2D) DirTo(p2 Point2D) Vec2D {
return p.VecTo(p2).Norm()
}
// Offset moves a point toward a target by the specified distance.
func (p Point2D) Offset(toward Point2D, by float32) Point2D {
return p.Add(p.DirTo(toward).Mul(by))
}
// Distance computes the absolute distance between two points.
func (p Point2D) Distance(p2 Point2D) float32 {
return p.VecTo(p2).Len()
}
// Distance2 computes the squared distance between two points.
func (p Point2D) Distance2(p2 Point2D) float32 {
return p.VecTo(p2).Len2()
}
// Manhattan computes the manhattan distance between two points.
func (p Point2D) Manhattan(p2 Point2D) float32 {
return p.VecTo(p2).Manhattan()
}
// Add returns the point at the end of v when starting from p.
func (p Point2D) Add(v Vec2D) Point2D {
return Point2D(Vec2D(p).Add(v))
}
// Offset4By returns the Von Neumann neighborhood (or 4-neighborhood) of p.
func (p Point2D) Offset4By(by float32) [4]Point2D {
return [...]Point2D{
Point2D{p.X, p.Y - by},
Point2D{p.X + by, p.Y},
Point2D{p.X, p.Y + by},
Point2D{p.X - by, p.Y},
}
}
// Offset8By returns the Moore neighborhood (or 8-neighborhood) of p.
func (p Point2D) Offset8By(by float32) [8]Point2D {
return [...]Point2D{
Point2D{p.X, p.Y - by},
Point2D{p.X + by, p.Y - by},
Point2D{p.X + by, p.Y},
Point2D{p.X + by, p.Y + by},
Point2D{p.X, p.Y + by},
Point2D{p.X - by, p.Y + by},
Point2D{p.X - by, p.Y},
Point2D{p.X - by, p.Y - by},
}
}
// Point
// ToPointI converts to a PointI by truncating X/Y and dropping Z.
func (p Point) ToPointI() PointI {
return PointI{int32(p.X), int32(p.Y)}
}
// ToPoint2D converts to a Point2D by dropping Z.
func (p Point) ToPoint2D() Point2D {
return Point2D{p.X, p.Y}
}
// VecTo computes the vector from p -> p2.
func (p Point) VecTo(p2 Point) Vec {
return Vec(p2).Sub(Vec(p))
}
// DirTo computes the unit vector pointing from p -> p2.
func (p Point) DirTo(p2 Point) Vec {
return p.VecTo(p2).Norm()
}
// Offset moves a point toward a target by the specified distance.
func (p Point) Offset(toward Point, by float32) Point {
return p.Add(p.DirTo(toward).Mul(by))
}
// Distance computes the absolute distance between two points.
func (p Point) Distance(p2 Point) float32 {
return p.VecTo(p2).Len()
}
// Distance2 computes the squared distance between two points.
func (p Point) Distance2(p2 Point) float32 {
return p.VecTo(p2).Len2()
}
// Add returns the point at the end of v when starting from p.
func (p Point) Add(v Vec) Point {
return Point(Vec(p).Add(v))
} | api/points.go | 0.942135 | 0.793226 | points.go | starcoder |
package evaluator
import (
"regexp"
"github.com/shric/monkey/object"
"github.com/shric/monkey/token"
)
func evalInfixExpression(
tok token.Token,
left, right object.Object,
) object.Object {
switch {
case left.Type() == object.FLOAT_OBJ && right.Type() == object.INTEGER_OBJ:
return evalFloatIntegerInfixExpression(tok, left, right)
case left.Type() == object.INTEGER_OBJ && right.Type() == object.FLOAT_OBJ:
return evalIntegerFloatInfixExpression(tok, left, right)
case left.Type() == object.FLOAT_OBJ && right.Type() == object.FLOAT_OBJ:
return evalFloatInfixExpression(tok, left, right)
case left.Type() == object.INTEGER_OBJ && right.Type() == object.INTEGER_OBJ:
return evalIntegerInfixExpression(tok, left, right)
case left.Type() == object.STRING_OBJ && right.Type() == object.STRING_OBJ:
return evalStringInfixExpression(tok, left, right)
case tok.Type == token.EQ:
return nativeBoolToBooleanObject(left == right)
case tok.Type == token.NOT_EQ:
return nativeBoolToBooleanObject(left != right)
case left.Type() == object.BOOLEAN_OBJ && right.Type() == object.BOOLEAN_OBJ:
return evalBooleanInfixExpression(tok, left, right)
case left.Type() != right.Type():
return newError("type mismatch: %s %s %s",
left.Type(), tok.Type, right.Type())
default:
return newError("unknown token: %s %s %s",
left.Type(), tok.Literal, right.Type())
}
}
func evalBooleanInfixExpression(
tok token.Token,
left, right object.Object,
) object.Object {
leftVal := left.(*object.Boolean).Value
rightVal := right.(*object.Boolean).Value
switch tok.Type {
case token.AND:
return &object.Boolean{Value: leftVal && rightVal}
case token.OR:
return &object.Boolean{Value: leftVal || rightVal}
default:
return newError("unknown operator: %s %s %s",
left.Type(), tok.Type, right.Type())
}
}
func evalFloatInfixExpression(
tok token.Token,
left, right object.Object,
) object.Object {
leftVal := left.(*object.Float).Value
rightVal := right.(*object.Float).Value
switch tok.Type {
case token.SLASH:
if rightVal == 0 {
return newError("Integer division by zero: %f/0.0", leftVal)
}
return &object.Float{Value: leftVal / rightVal}
case token.PLUS:
return &object.Float{Value: leftVal + rightVal}
case token.MINUS:
return &object.Float{Value: leftVal - rightVal}
case token.ASTERISK:
return &object.Float{Value: leftVal * rightVal}
case token.LT:
return nativeBoolToBooleanObject(leftVal < rightVal)
case token.GT:
return nativeBoolToBooleanObject(leftVal > rightVal)
case token.EQ:
return nativeBoolToBooleanObject(leftVal == rightVal)
case token.NOT_EQ:
return nativeBoolToBooleanObject(leftVal != rightVal)
default:
return newError("unknown operator: %s %s %s",
left.Type(), tok.Type, right.Type())
}
}
func evalIntegerInfixExpression(
tok token.Token,
left, right object.Object,
) object.Object {
leftVal := left.(*object.Integer).Value
rightVal := right.(*object.Integer).Value
switch tok.Type {
case token.PLUS:
return &object.Integer{Value: leftVal + rightVal}
case token.MINUS:
return &object.Integer{Value: leftVal - rightVal}
case token.ASTERISK:
return &object.Integer{Value: leftVal * rightVal}
case token.SLASH:
if rightVal == 0 {
return newError("Integer division by zero: %d/0", leftVal)
}
return &object.Integer{Value: leftVal / rightVal}
case token.LT:
return nativeBoolToBooleanObject(leftVal < rightVal)
case token.GT:
return nativeBoolToBooleanObject(leftVal > rightVal)
case token.EQ:
return nativeBoolToBooleanObject(leftVal == rightVal)
case token.NOT_EQ:
return nativeBoolToBooleanObject(leftVal != rightVal)
default:
return newError("unknown operator: %s %s %s",
left.Type(), tok.Type, right.Type())
}
}
func evalIntegerFloatInfixExpression(
tok token.Token,
left, right object.Object,
) object.Object {
leftVal := left.(*object.Integer).Value
rightVal := right.(*object.Float).Value
switch tok.Type {
case token.PLUS:
return &object.Float{Value: float64(leftVal) + rightVal}
case token.MINUS:
return &object.Float{Value: float64(leftVal) - rightVal}
case token.ASTERISK:
return &object.Float{Value: float64(leftVal) * rightVal}
case token.SLASH:
if rightVal == 0 {
return newError("Integer division by zero: %d/0", leftVal)
}
return &object.Float{Value: float64(leftVal) / rightVal}
case token.LT:
return nativeBoolToBooleanObject(float64(leftVal) < rightVal)
case token.GT:
return nativeBoolToBooleanObject(float64(leftVal) > rightVal)
case token.EQ:
return nativeBoolToBooleanObject(float64(leftVal) == rightVal)
case token.NOT_EQ:
return nativeBoolToBooleanObject(float64(leftVal) != rightVal)
default:
return newError("unknown operator: %s %s %s",
left.Type(), tok.Type, right.Type())
}
}
func evalFloatIntegerInfixExpression(
tok token.Token,
left, right object.Object,
) object.Object {
leftVal := left.(*object.Float).Value
rightVal := right.(*object.Integer).Value
switch tok.Type {
case token.PLUS:
return &object.Float{Value: leftVal + float64(rightVal)}
case token.MINUS:
return &object.Float{Value: leftVal - float64(rightVal)}
case token.ASTERISK:
return &object.Float{Value: leftVal * float64(rightVal)}
case token.SLASH:
if rightVal == 0 {
return newError("Integer division by zero: %f/0", leftVal)
}
return &object.Float{Value: leftVal / float64(rightVal)}
case token.LT:
return nativeBoolToBooleanObject(leftVal < float64(rightVal))
case token.GT:
return nativeBoolToBooleanObject(leftVal > float64(rightVal))
case token.EQ:
return nativeBoolToBooleanObject(leftVal == float64(rightVal))
case token.NOT_EQ:
return nativeBoolToBooleanObject(leftVal != float64(rightVal))
default:
return newError("unknown operator: %s %s %s",
left.Type(), tok.Type, right.Type())
}
}
func evalStringInfixExpression(
tok token.Token,
left, right object.Object,
) object.Object {
leftVal := left.(*object.String).Value
rightVal := right.(*object.String).Value
switch tok.Type {
case token.PLUS:
return &object.String{Value: leftVal + rightVal}
case token.EQ:
return &object.Boolean{Value: leftVal == rightVal}
case token.NOT_EQ:
return &object.Boolean{Value: leftVal != rightVal}
case token.REGEX:
re, err := regexp.Compile(rightVal)
if err != nil {
return newError("%v", err)
}
return nativeBoolToBooleanObject(re.MatchString(leftVal))
default:
return newError("unknown operator: %s %s %s",
left.Type(), tok.Type, right.Type())
}
} | evaluator/infix.go | 0.671255 | 0.544499 | infix.go | starcoder |
package filter
import "github.com/nerdynick/ccloud-go-sdk/telemetry/labels"
const (
//OpNot is a static def for NOT Operand
OpNot string = "NOT"
)
type UnaryFilter struct {
Op string `json:"op"`
SubFilter Filter `json:"filter"`
}
func (fil UnaryFilter) And(filters ...Filter) CompoundFilter {
return And(fil).Add(filters...)
}
func (fil UnaryFilter) AndEqualTo(field labels.Label, value string) CompoundFilter {
return fil.And(EqualTo(field, value))
}
func (fil UnaryFilter) AndNotEqualTo(field labels.Label, value string) CompoundFilter {
return fil.And(NotEqualTo(field, value))
}
func (fil UnaryFilter) AndGreaterThan(field labels.Label, value string) CompoundFilter {
return fil.And(GreaterThan(field, value))
}
func (fil UnaryFilter) AndNotGreaterThan(field labels.Label, value string) CompoundFilter {
return fil.And(NotGreaterThan(field, value))
}
func (fil UnaryFilter) AndGreaterThanOrEqualTo(field labels.Label, value string) CompoundFilter {
return fil.And(GreaterThanOrEqualTo(field, value))
}
func (fil UnaryFilter) AndNotGreaterThanOrEqualTo(field labels.Label, value string) CompoundFilter {
return fil.And(NotGreaterThanOrEqualTo(field, value))
}
func (fil UnaryFilter) Or(filters ...Filter) CompoundFilter {
return Or(fil).Add(filters...)
}
func (fil UnaryFilter) OrEqualTo(field labels.Label, value string) CompoundFilter {
return fil.Or(EqualTo(field, value))
}
func (fil UnaryFilter) OrNotEqualTo(field labels.Label, value string) CompoundFilter {
return fil.Or(NotEqualTo(field, value))
}
func (fil UnaryFilter) OrGreaterThan(field labels.Label, value string) CompoundFilter {
return fil.Or(GreaterThan(field, value))
}
func (fil UnaryFilter) OrNotGreaterThan(field labels.Label, value string) CompoundFilter {
return fil.Or(NotGreaterThan(field, value))
}
func (fil UnaryFilter) OrGreaterThanOrEqualTo(field labels.Label, value string) CompoundFilter {
return fil.Or(GreaterThanOrEqualTo(field, value))
}
func (fil UnaryFilter) OrNotGreaterThanOrEqualTo(field labels.Label, value string) CompoundFilter {
return fil.Or(NotGreaterThanOrEqualTo(field, value))
} | telemetry/query/filter/unary.go | 0.828696 | 0.427337 | unary.go | starcoder |
package iso20022
// Provides information on the status of a trade.
type TradeData9 struct {
// Reference to the unique system identification assigned to the trade by the central matching system.
MatchingSystemUniqueReference *Max35Text `xml:"MtchgSysUnqRef"`
// Reference to the unique matching identification assigned to the trade and to the matching trade from the counterparty by the central matching system.
MatchingSystemMatchingReference *Max35Text `xml:"MtchgSysMtchgRef,omitempty"`
// Unique reference from the central settlement system that allows the removal of alleged trades once the matched status notification for the matching side has been received.
MatchingSystemMatchedSideReference *Max35Text `xml:"MtchgSysMtchdSdRef,omitempty"`
// Party that assigned the status to the trade.
StatusOriginator *Max35Text `xml:"StsOrgtr,omitempty"`
// Specifies the new status of the trade.
CurrentStatus *StatusAndSubStatus1 `xml:"CurSts"`
// Additional information about the current status of the trade.
CurrentStatusSubType *StatusSubType1Code `xml:"CurStsSubTp,omitempty"`
// Specifies the date and time at which the current status was assigned.
CurrentStatusDateTime *ISODateTime `xml:"CurStsDtTm,omitempty"`
// Specifies the previous status of the trade.
PreviousStatus *Status5Choice `xml:"PrvsSts,omitempty"`
// Specifies whether a trade is alleged or not.
AllegedTrade *YesNoIndicator `xml:"AllgdTrad,omitempty"`
// Additional information on the previous status of a trade in a central system.
PreviousStatusSubType *StatusSubType1Code `xml:"PrvsStsSubTp,omitempty"`
}
func (t *TradeData9) SetMatchingSystemUniqueReference(value string) {
t.MatchingSystemUniqueReference = (*Max35Text)(&value)
}
func (t *TradeData9) SetMatchingSystemMatchingReference(value string) {
t.MatchingSystemMatchingReference = (*Max35Text)(&value)
}
func (t *TradeData9) SetMatchingSystemMatchedSideReference(value string) {
t.MatchingSystemMatchedSideReference = (*Max35Text)(&value)
}
func (t *TradeData9) SetStatusOriginator(value string) {
t.StatusOriginator = (*Max35Text)(&value)
}
func (t *TradeData9) AddCurrentStatus() *StatusAndSubStatus1 {
t.CurrentStatus = new(StatusAndSubStatus1)
return t.CurrentStatus
}
func (t *TradeData9) SetCurrentStatusSubType(value string) {
t.CurrentStatusSubType = (*StatusSubType1Code)(&value)
}
func (t *TradeData9) SetCurrentStatusDateTime(value string) {
t.CurrentStatusDateTime = (*ISODateTime)(&value)
}
func (t *TradeData9) AddPreviousStatus() *Status5Choice {
t.PreviousStatus = new(Status5Choice)
return t.PreviousStatus
}
func (t *TradeData9) SetAllegedTrade(value string) {
t.AllegedTrade = (*YesNoIndicator)(&value)
}
func (t *TradeData9) SetPreviousStatusSubType(value string) {
t.PreviousStatusSubType = (*StatusSubType1Code)(&value)
} | TradeData9.go | 0.791982 | 0.449272 | TradeData9.go | starcoder |
package datablock
import (
"bytes"
"compress/gzip"
"github.com/mattetti/filebuffer"
log "github.com/sirupsen/logrus"
"io"
"io/ioutil"
"net/http"
"strconv"
"time"
)
// DataBlock represents a block of data that may be compressed
type DataBlock struct {
data []byte
compressed bool
length int
compressionSpeed bool // prefer speed over best compression ratio?
}
var (
// EmptyDataBlock is an empty data block
EmptyDataBlock = &DataBlock{[]byte{}, false, 0, true}
)
// NewDataBlock creates a new uncompressed data block.
// compressionSpeed is if speedy compression should be used over compact compression
func NewDataBlock(data []byte, compressionSpeed bool) *DataBlock {
return &DataBlock{data, false, len(data), compressionSpeed}
}
// Create a new data block where the data may already be compressed.
// compressionSpeed is if speedy compression should be used over compact compression
func newDataBlockSpecified(data []byte, compressed bool, compressionSpeed bool) *DataBlock {
return &DataBlock{data, compressed, len(data), compressionSpeed}
}
// UncompressedData returns the the original, uncompressed data,
// the length of the data and an error. Will decompress if needed.
func (b *DataBlock) UncompressedData() ([]byte, int, error) {
if b.compressed {
return decompress(b.data)
}
return b.data, b.length, nil
}
// MustData returns the uncompressed data or an empty byte slice
func (b *DataBlock) MustData() []byte {
if b.compressed {
data, _, err := decompress(b.data)
if err != nil {
log.Fatal(err)
return []byte{}
}
return data
}
return b.data
}
// String returns the uncompressed data as a string or as an empty string.
// Same as MustData, but converted to a string.
func (b *DataBlock) String() string {
return string(b.MustData())
}
// Gzipped returns the compressed data, length and an error.
// Will compress if needed.
func (b *DataBlock) Gzipped() ([]byte, int, error) {
if !b.compressed {
return compress(b.data, b.compressionSpeed)
}
return b.data, b.length, nil
}
// Compress this data block
func (b *DataBlock) Compress() error {
if b.compressed {
return nil
}
data, bytesWritten, err := compress(b.data, b.compressionSpeed)
if err != nil {
return err
}
b.data = data
b.compressed = true
b.length = bytesWritten
return nil
}
// Decompress this data block
func (b *DataBlock) Decompress() error {
if !b.compressed {
return nil
}
data, bytesWritten, err := decompress(b.data)
if err != nil {
return err
}
b.data = data
b.compressed = false
b.length = bytesWritten
return nil
}
// IsCompressed checks if this data block is compressed
func (b *DataBlock) IsCompressed() bool {
return b.compressed
}
// StringLength returns the length of the data, represented as a string
func (b *DataBlock) StringLength() string {
return strconv.Itoa(b.length)
}
// Length returns the lentgth of the current data
// (not the length of the original data, but in the current state)
func (b *DataBlock) Length() int {
return b.length
}
// HasData returns true if there is data present
func (b *DataBlock) HasData() bool {
return 0 != b.length
}
// ToClient writes the data to the client.
// Also sets the right headers and compresses the data with gzip if needed.
// Set canGzip to true if the http client can handle gzipped data.
// gzipThreshold is the threshold (in bytes) for when it makes sense to compress the data with gzip
func (b *DataBlock) ToClient(w http.ResponseWriter, req *http.Request, name string, canGzip bool, gzipThreshold int) {
overThreshold := b.Length() > gzipThreshold // Is there enough data that it makes sense to compress it?
// Compress or decompress the data as needed. Add headers if compression is used.
if !canGzip {
// No compression
if err := b.Decompress(); err != nil {
// Unable to decompress gzipped data!
log.Fatal(err)
}
} else if b.compressed || overThreshold {
// If the given data is already compressed, or we are planning to compress,
// set the gzip headers and serve it as compressed data.
w.Header().Set("Content-Encoding", "gzip")
w.Header().Add("Vary", "Accept-Encoding")
// If the data is over a certain size, compress and serve
if overThreshold {
// Compress
if err := b.Compress(); err != nil {
// Write uncompressed data if gzip should fail
log.Error(err)
w.Header().Set("Content-Encoding", "identity")
}
}
}
// Done by ServeContent instead
//w.Header().Set("Content-Length", b.StringLength())
//w.Write(b.data)
// Serve the data with http.ServeContent, which supports ranges/streaming
http.ServeContent(w, req, name, time.Time{}, filebuffer.New(b.data))
}
// Compress data using gzip. Returns the data, data length and an error.
func compress(data []byte, speed bool) ([]byte, int, error) {
if len(data) == 0 {
return []byte{}, 0, nil
}
var buf bytes.Buffer
_, err := gzipWrite(&buf, data, speed)
if err != nil {
return nil, 0, err
}
data = buf.Bytes()
return data, len(data), nil
}
// Decompress data using gzip. Returns the data, data length and an error.
func decompress(data []byte) ([]byte, int, error) {
if len(data) == 0 {
return []byte{}, 0, nil
}
var buf bytes.Buffer
_, err := gunzipWrite(&buf, data)
if err != nil {
return nil, 0, err
}
data = buf.Bytes()
return data, len(data), nil
}
// Write gzipped data to a Writer. Returns bytes written and an error.
func gzipWrite(w io.Writer, data []byte, speed bool) (int, error) {
// Write gzipped data to the client
level := gzip.BestCompression
if speed {
level = gzip.BestSpeed
}
gw, err := gzip.NewWriterLevel(w, level)
if err != nil {
return 0, err
}
defer gw.Close()
bytesWritten, err := gw.Write(data)
if err != nil {
return 0, err
}
return bytesWritten, nil
}
// Write gunzipped data to a Writer. Returns bytes written and an error.
func gunzipWrite(w io.Writer, data []byte) (int, error) {
// Write gzipped data to the client
gr, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return 0, err
}
defer gr.Close()
data, err = ioutil.ReadAll(gr)
if err != nil {
return 0, err
}
bytesWritten, err := w.Write(data)
if err != nil {
return 0, err
}
return bytesWritten, nil
} | vendor/github.com/xyproto/datablock/datablock.go | 0.616012 | 0.605566 | datablock.go | starcoder |
package types
import (
"fmt"
"math"
)
/*
EdgeKey is an always positive number that stores an edge's vertices as indices in a way that can be compared
An edge between vertices [4] and [0] will always be stored as [0,4], in the ascending order of the index values
*/
type EdgeKey uint64
func NewEdgeKey(verts [2]int) (packed EdgeKey) {
// This packs two index coordinates into two 32 bit unsigned integers to act as a hash and an indirect access method
var (
limit = math.MaxUint32
)
for _, vert := range verts {
if vert < 0 || vert > limit {
panic(fmt.Errorf("unable to pack two ints into a uint64, have %d and %d as inputs",
verts[0], verts[1]))
}
}
var i1, i2 int
if verts[0] <= verts[1] {
i1, i2 = verts[0], verts[1]
} else {
i1, i2 = verts[1], verts[0]
}
packed = EdgeKey(i1 + i2<<32)
return
}
func (ek EdgeKey) GetVertices(rev bool) (verts [2]int) {
var (
enTmp EdgeKey
)
enTmp = ek >> 32
verts[1] = int(enTmp)
verts[0] = int(ek - enTmp*(1<<32))
if rev {
verts[0], verts[1] = verts[1], verts[0]
}
return
}
/*
An Edge stores the edge vertices in the original order of the vertices, so that it can be recovered with it's direction
*/
type EdgeInt int64
func NewEdgeInt(verts [2]int) (packed EdgeInt) {
// This packs two index coordinates into two 31 bit unsigned integers to act as a hash and an indirect access method
var (
limit = math.MaxUint32 >> 1 // leaves room for the sign bit of an int64
sign bool
)
for _, vert := range verts {
if vert < 0 || vert > limit {
panic(fmt.Errorf("unable to pack two ints into an int64, have %d and %d as inputs",
verts[0], verts[1]))
}
}
var i1, i2 int
if verts[0] <= verts[1] {
i1, i2 = verts[0], verts[1]
} else {
sign = true
i1, i2 = verts[1], verts[0]
}
packed = EdgeInt(i1 + i2<<32)
if sign {
packed = -packed
}
return
}
func (e EdgeInt) GetVertices() (verts [2]int) {
var (
eTmp EdgeInt
sign bool
)
if e < 0 {
sign = true
e = -e
}
eTmp = e >> 32
verts[1] = int(eTmp)
verts[0] = int(e - eTmp*(1<<32))
if sign {
verts[0], verts[1] = verts[1], verts[0]
}
return
}
func (e EdgeInt) GetKey() (ek EdgeKey) {
ek = NewEdgeKey(e.GetVertices())
return
}
type vertEdgeBucket struct {
numberOfEdges int
vertEdge [2]EdgeInt // up to 2 edges expected for a connected curve with a shared vertex key
}
type bucketMap map[int]*vertEdgeBucket // Key is the index of the vertex
func (bm bucketMap) Connectable(ind int) bool { // Checks to see if the vertex in this bucket is "connectable", or is dangling
if bm[ind].numberOfEdges == 2 {
return true
} else {
return false
}
}
func (bm bucketMap) AddEdge(e EdgeInt) {
var (
b *vertEdgeBucket
ok bool
)
verts := e.GetVertices()
for i := 0; i < 2; i++ {
if b, ok = bm[verts[i]]; !ok {
bm[verts[i]] = &vertEdgeBucket{}
b = bm[verts[i]]
}
b.vertEdge[b.numberOfEdges] = e
b.numberOfEdges++
}
}
type Curve []EdgeInt
func (c Curve) Print() {
for i, e := range c {
fmt.Printf("e[%d] = %v\n", i, e.GetVertices())
}
}
func (c Curve) ReOrder(reverse bool) (cc Curve, unordered bool) {
/*
Orders a curve's line segments to form a connected curve
Optionally, reverses the order relative to the default ordering obtained using the first edge as the start
If the original slice of segments is unordered, order reversale is arbitrary, otherwise it reflects
reversal of the original ordering of the ordered curve.
*/
var (
l = len(c)
first, last = c[0], c[l-1] // Original first/last segments, used for ordering later
endKeys [2]int // vertex index of endKeys, can be used as key into bucketMap
)
bm := make(bucketMap, l)
// load up the bm with edges
for _, e := range c {
bm.AddEdge(e)
}
/*
for v, b := range bm {
fmt.Printf("b[%d] = %v\n", v, b)
}
*/
// Find the endKeys
var endCount int
for key, b := range bm {
if b.numberOfEdges == 1 { // this is one of two endKeys
if endCount == 2 {
panic("unable to construct contiguous curve from line segments, too many unconnected edges")
}
endKeys[endCount] = key
endCount++
}
}
//fmt.Printf("End vertex index keys = %v\n", endKeys)
if endCount != 2 {
panic("unable to find two unconnected endKeys")
}
// Default is to use the first edge to begin the curve, assuming the first/last edges are really the endKeys
start := first
if reverse {
start = last
}
//fmt.Printf("start edge = %v\n", start.GetVertices())
var startInd int
startInd = -1
for i := 0; i < 2; i++ {
if bm[endKeys[i]].vertEdge[0] == start {
startInd = i
break
}
}
if startInd == -1 {
unordered = true // This list started unordered
start = bm[endKeys[0]].vertEdge[0] // arbitrary start because the curve starts unordered
}
cc = AssembleCurve(bm, start)
return
}
func AssembleCurve(bm bucketMap, start EdgeInt) (c Curve) {
var (
verts [2]int
end int
b *vertEdgeBucket
)
verts = start.GetVertices()
// Check if conn is connectable, or is the "dangling" end
end = verts[0] // The open end, to be connected
if !bm.Connectable(end) {
end = verts[1]
}
// Begin connecting edges
c = make(Curve, len(bm)-1)
var ii int
c[ii] = start
ii++
for {
if !bm.Connectable(end) { // When we reach an unconnectable end, stop
break
}
b = bm[end]
for i := 0; i < 2; i++ {
e := b.vertEdge[i]
if e != start {
c[ii] = e
ii++
verts = e.GetVertices()
if verts[0] == end {
end = verts[1]
} else {
end = verts[0]
}
start = e
goto Next
}
}
Next:
}
return
} | types/elemental.go | 0.566738 | 0.695965 | elemental.go | starcoder |
package main
import (
"bytes"
"fmt"
"math"
"time"
"github.com/tkrajina/gpxgo/gpx"
)
const border = 0.05
// Map is used to translate track coordinates into SVG coordinate system for rendering.
type Map struct {
w, h int // widht, height in svg coordinates
lw, lh float64 // width, height in lat/lon degrees
lx, ly float64 // bottom/left offset in lat/lon degrees
coef float64 // longitudinal adjustment coeficient (degrees of longitude are shorter in higher latitudes)
}
func NewMap(b gpx.GpxBounds, width int) *Map {
m := &Map{w: width, lx: b.MinLongitude, ly: b.MinLatitude}
m.coef = math.Cos((b.MaxLatitude + b.MinLatitude) * math.Pi / 360)
m.lw = (b.MaxLongitude - b.MinLongitude) * m.coef
m.lh = b.MaxLatitude - b.MinLatitude
bx := border * m.lh
m.lh += 2 * bx
m.lx -= bx
m.lw += 2 * bx
m.ly -= bx
m.h = int(m.lh / m.lw * float64(m.w))
return m
}
// Point translates a GPS point into SVG coordinates.
func (m *Map) Point(p *gpx.GPXPoint) (x, y int) {
y = m.h - int((p.Latitude-m.ly)/m.lh*float64(m.h))
x = int((p.Longitude - m.lx) * m.coef / m.lw * float64(m.w))
return
}
// units for Distance and Speed functions,
// expressed as the length of one degree of latitude
const km = 2 * math.Pi * 6371 / 360
const meter = 1000 * km
const nm = 60
// Distance computes the distance between two GPS points in specified units.
func (m *Map) Distance(p1, p2 *gpx.GPXPoint, unit float64) float64 {
x := p2.Latitude - p1.Latitude
y := (p2.Longitude - p1.Longitude) * m.coef
return unit * math.Sqrt(x*x+y*y)
}
// Speed computes the average speed between two GPS points in specified units of distance.
// The time aspect is derived from the distance unit, i.e. meter => m/s, km => km/h, nm => kts.
func (m *Map) Speed(p1, p2 *gpx.GPXPoint, unit float64) float64 {
t := float64(p2.Timestamp.Sub(p1.Timestamp))
if unit == meter {
t /= float64(time.Second)
} else {
t /= float64(time.Hour)
}
return m.Distance(p1, p2, unit) / t
}
var palette = func() (palette []int) {
for i := 0; i < 16; i += 2 {
palette = append(palette, i*16+15)
}
for i := 0; i < 16; i += 4 {
palette = append(palette, 15*16+15-i)
}
for i := 0; i < 16; i += 4 {
palette = append(palette, (i*16+15)*16)
}
for i := 0; i < 16; i += 2 {
palette = append(palette, (17*15-i)*16)
}
return palette
}()
// SpeedColor return the RGB color code matching the speed between two GPS points.
func (m *Map) SpeedColor(p1, p2 *gpx.GPXPoint) string {
s := int(m.Speed(p1, p2, nm))
if s >= len(palette) {
s = len(palette) - 1
}
return fmt.Sprintf("#%03x", palette[s])
}
func (m *Map) polylinePoints(s Segment) string {
b := bytes.NewBuffer(nil)
for i := range s.Points {
x, y := m.Point(s.Point(i))
fmt.Fprintf(b, "%d,%d ", x, y)
}
return b.String()
} | map.go | 0.750736 | 0.505493 | map.go | starcoder |
package gfx
import (
"fmt"
"image/color"
"github.com/hajimehoshi/ebiten/v2"
)
// A FrameBuffer is a set of cells which contain color information.
type FrameBuffer struct {
pixels []byte
pixelsLength uint
Width uint
Height uint
}
// NewFrameBuffer returns a new frame buffer that contains a set of
// logical rows and columns. These rows and columns should match
// whatever system you are emulating, as opposed to what might
// necessarily be shown on screen.
func NewFrameBuffer(width, height uint) *FrameBuffer {
fb := new(FrameBuffer)
fb.Width = width
fb.Height = height
fb.pixelsLength = width * height * 4
fb.pixels = make([]byte, fb.pixelsLength)
return fb
}
// Invert returns a framebuffer that is the opposite (or inverted) version of
// the receiver. This is useful for cases where you might want an inverse video
// effect.
func (fb *FrameBuffer) Invert() *FrameBuffer {
inv := NewFrameBuffer(fb.Width, fb.Height)
for i, px := range fb.pixels {
if (i+1)%4 == 0 {
continue
}
inv.pixels[i] = px ^ 0xff
}
return inv
}
// cell returns the index of a cell within the Cells slice. In essence,
// given X rows and Y columns, you can think of the slice of cells as Y
// cells in a single row, followed another row, and another row...
func (fb *FrameBuffer) cell(x, y uint) uint {
return 4 * ((y * fb.Width) + x)
}
// getCell returns a cell's color, if one exists, or an error if not. This
// essentially translates the underlying cell structure into something similar
// to what gets passed in with SetCell.
func (fb *FrameBuffer) getCell(x, y uint) (color.RGBA, error) {
i := fb.cell(x, y)
if i > fb.pixelsLength {
return color.RGBA{}, fmt.Errorf("out of bounds: (x %d, y %d)", x, y)
}
return color.RGBA{
R: fb.pixels[i+0],
G: fb.pixels[i+1],
B: fb.pixels[i+2],
A: fb.pixels[i+3],
}, nil
}
// SetCell will assign the color of a single cell
func (fb *FrameBuffer) SetCell(x, y uint, clr color.RGBA) error {
i := fb.cell(x, y)
if i > fb.pixelsLength {
return fmt.Errorf("out of bounds: (x %d, y %d)", x, y)
}
fb.pixels[i+0] = clr.R
fb.pixels[i+1] = clr.G
fb.pixels[i+2] = clr.B
fb.pixels[i+3] = clr.A
return nil
}
// ClearCells will set a color on every cell of the frame buffer
func (fb *FrameBuffer) ClearCells(clr color.RGBA) {
for i := uint(0); i < fb.pixelsLength; i += 4 {
fb.pixels[i+0] = clr.R
fb.pixels[i+1] = clr.G
fb.pixels[i+2] = clr.B
fb.pixels[i+3] = clr.A
}
}
// Render will accept an ebiten image and ~do something with it~ to render the
// contents of our frame buffer.
func (fb *FrameBuffer) Render(img *ebiten.Image) error {
img.ReplacePixels(fb.pixels)
return nil
}
// Blit will essentially copy the entire source framebuffer into the receiver,
// starting from a specific point.
func (fb *FrameBuffer) Blit(x, y uint, src *FrameBuffer) error {
for sy := uint(0); sy < src.Height; sy++ {
if err := fb.blitFromY(x, y+sy, sy, src); err != nil {
return err
}
}
return nil
}
// blitFromY is a helper method for blit; basically it encapsulates the logic of
// blitting a single row.
func (fb *FrameBuffer) blitFromY(x, y, sy uint, src *FrameBuffer) error {
// Where we're writing to
di := fb.cell(x, y)
// Where we're writing from
si := src.cell(0, sy)
writeLength := src.Width * 4
if fb.pixelsLength-di < writeLength {
return fmt.Errorf(
"destination out of bounds (pl[%d]-di[%d] < wl[%d]",
fb.pixelsLength, di, writeLength,
)
}
if src.pixelsLength-si < writeLength {
return fmt.Errorf(
"source out of bounds (pl[%d]-si[%d] < wl[%d]",
src.pixelsLength, si, writeLength,
)
}
// Remember that there are 4 pixels for every "cell" we need to copy!
for slen := src.Width * 4; slen > 0; slen-- {
fb.pixels[di] = src.pixels[si]
di++
si++
}
return nil
} | pkg/gfx/framebuffer.go | 0.827236 | 0.631438 | framebuffer.go | starcoder |
package zgeo
import (
"math"
"github.com/torlangballe/zutil/zmath"
)
type Matrix struct {
A, B, C, D, Tx, Ty float64
}
var MatrixIdentity = Matrix{1, 0, 0, 1, 0, 0}
/*
func SM(w, h Num) *Matrix {
return &Matrix{sz.W, 0, 0, sz.H, 0, 0}
}
func TM(dx, dy Num) *Matrix {
return &Matrix{1, 0, 0, 1, dx, dy}
}
*/
func ScaleMatrix(sz Size) *Matrix {
return &Matrix{sz.W, 0, 0, sz.H, 0, 0}
}
func TranslateMatrix(delta Size) *Matrix {
return &Matrix{1, 0, 0, 1, delta.W, delta.H}
}
func RotateMatrix(angle float64) *Matrix {
s, c := math.Sincos(angle)
return &Matrix{c, s, -s, c, 0, 0}
}
func (m Matrix) MulPos(pt Pos) Pos {
return Pos{m.A*pt.X + m.C*pt.Y + m.Tx, m.B*pt.X + m.D*pt.Y + m.Ty}
}
func (m Matrix) MulSize(sz Size) Size {
return Size{m.A*sz.W + m.C*sz.H, m.B*sz.W + m.D*sz.H}
}
func (m Matrix) TransformRect(rect Rect) Rect {
var r Rect
r.SetMin(m.MulPos(rect.Min()))
r.SetMax(m.MulPos(rect.Max()))
return r
}
func (m Matrix) Multiplied(a Matrix) Matrix {
m.A, m.B, m.C, m.D, m.Tx, m.Ty =
a.A*m.A+a.B*m.C, a.A*m.B+a.B*m.D,
a.C*m.A+a.D*m.C, a.C*m.B+a.D*m.D,
a.Tx*m.A+a.Ty*m.C+m.Tx, a.Tx*m.B+a.Ty*m.D+m.Ty
return m
}
func (m Matrix) Rotated(angle float64) Matrix {
s, c := math.Sincos(angle)
sv, cv := s, c
m.A, m.B, m.C, m.D = cv*m.A+sv*m.C, cv*m.B+sv*m.D, cv*m.C-sv*m.A, cv*m.D-sv*m.B
return m
}
func (m Matrix) Scaled(sz Size) Matrix {
m.A *= sz.W
m.B *= sz.W
m.C *= sz.H
m.D *= sz.H
return m
}
func (m Matrix) Translated(delta Size) Matrix {
m.Tx += delta.W*m.A + delta.H*m.C
m.Ty += delta.W*m.B + delta.H*m.D
return m
}
func (m Matrix) TranslatedByPos(delta Pos) Matrix {
return m.Translated(delta.Size())
}
// Det calculates the determinant of the matrix
func (m Matrix) det() float64 {
return m.A*m.D - m.C*m.B
}
func (m Matrix) Inverted() (Matrix, bool) {
det := m.det()
if det == 0 {
return Matrix{}, false
}
return Matrix{
m.D / det, -m.B / det,
-m.C / det, m.A / det,
(m.Ty*m.C - m.Tx*m.D) / det, (m.Tx*m.B - m.Ty*m.A) / det,
}, true
}
func (m Matrix) RotatedAroundPos(pos Pos, angle float64) Matrix {
m = m.Translated(pos.Size())
m = m.Rotated(angle)
m = m.Translated(pos.Size().Negative())
return m
}
func MatrixForRotatingAroundPoint(point Pos, deg float64) Matrix {
var transform = MatrixIdentity
transform = transform.TranslatedByPos(point)
transform = transform.Rotated(zmath.DegToRad(deg))
transform = transform.TranslatedByPos(point.Negative())
return transform
}
func MatrixForRotationDeg(deg float64) Matrix {
var transform = MatrixIdentity
transform = transform.Rotated(zmath.DegToRad(deg))
return transform
} | zgeo/matrix.go | 0.845751 | 0.574395 | matrix.go | starcoder |
package fptower
// Expt set z to x^t in E12 and return z
func (z *E12) Expt(x *E12) *E12 {
// const tAbsVal uint64 = 9586122913090633729
// tAbsVal in binary: 1000010100001000110000000000000000000000000000000000000000000001
// drop the low 46 bits (all 0 except the least significant bit): 100001010000100011 = 136227
// Shortest addition chains can be found at https://wwwhomes.uni-bielefeld.de/achim/addition_chain.html
var result, x33 E12
// a shortest addition chain for 136227
result.Set(x) // 0 1
result.CyclotomicSquare(&result) // 1( 0) 2
result.CyclotomicSquare(&result) // 2( 1) 4
result.CyclotomicSquare(&result) // 3( 2) 8
result.CyclotomicSquare(&result) // 4( 3) 16
result.CyclotomicSquare(&result) // 5( 4) 32
result.Mul(&result, x) // 6( 5, 0) 33
x33.Set(&result) // save x33 for step 14
result.CyclotomicSquare(&result) // 7( 6) 66
result.CyclotomicSquare(&result) // 8( 7) 132
result.CyclotomicSquare(&result) // 9( 8) 264
result.CyclotomicSquare(&result) // 10( 9) 528
result.CyclotomicSquare(&result) // 11(10) 1056
result.CyclotomicSquare(&result) // 12(11) 2112
result.CyclotomicSquare(&result) // 13(12) 4224
result.Mul(&result, &x33) // 14(13, 6) 4257
result.CyclotomicSquare(&result) // 15(14) 8514
result.CyclotomicSquare(&result) // 16(15) 17028
result.CyclotomicSquare(&result) // 17(16) 34056
result.CyclotomicSquare(&result) // 18(17) 68112
result.Mul(&result, x) // 19(18, 0) 68113
result.CyclotomicSquare(&result) // 20(19) 136226
result.Mul(&result, x) // 21(20, 0) 136227
// the remaining 46 bits
for i := 0; i < 46; i++ {
result.CyclotomicSquare(&result)
}
result.Mul(&result, x)
z.Set(&result)
return z
}
// MulByVW set z to x*(y*v*w) and return z
// here y*v*w means the E12 element with C1.B1=y and all other components 0
func (z *E12) MulByVW(x *E12, y *E2) *E12 {
var result E12
var yNR E2
yNR.MulByNonResidue(y)
result.C0.B0.Mul(&x.C1.B1, &yNR)
result.C0.B1.Mul(&x.C1.B2, &yNR)
result.C0.B2.Mul(&x.C1.B0, y)
result.C1.B0.Mul(&x.C0.B2, &yNR)
result.C1.B1.Mul(&x.C0.B0, y)
result.C1.B2.Mul(&x.C0.B1, y)
z.Set(&result)
return z
}
// MulByV set z to x*(y*v) and return z
// here y*v means the E12 element with C0.B1=y and all other components 0
func (z *E12) MulByV(x *E12, y *E2) *E12 {
var result E12
var yNR E2
yNR.MulByNonResidue(y)
result.C0.B0.Mul(&x.C0.B2, &yNR)
result.C0.B1.Mul(&x.C0.B0, y)
result.C0.B2.Mul(&x.C0.B1, y)
result.C1.B0.Mul(&x.C1.B2, &yNR)
result.C1.B1.Mul(&x.C1.B0, y)
result.C1.B2.Mul(&x.C1.B1, y)
z.Set(&result)
return z
}
// MulByV2W set z to x*(y*v^2*w) and return z
// here y*v^2*w means the E12 element with C1.B2=y and all other components 0
func (z *E12) MulByV2W(x *E12, y *E2) *E12 {
var result E12
var yNR E2
yNR.MulByNonResidue(y)
result.C0.B0.Mul(&x.C1.B0, &yNR)
result.C0.B1.Mul(&x.C1.B1, &yNR)
result.C0.B2.Mul(&x.C1.B2, &yNR)
result.C1.B0.Mul(&x.C0.B1, &yNR)
result.C1.B1.Mul(&x.C0.B2, &yNR)
result.C1.B2.Mul(&x.C0.B0, y)
z.Set(&result)
return z
}
// MulBy034 multiplication by sparse element
func (z *E12) MulBy034(c0, c3, c4 *E2) *E12 {
var z0, z1, z2, z3, z4, z5, tmp1, tmp2 E2
var t [12]E2
z0 = z.C0.B0
z1 = z.C0.B1
z2 = z.C0.B2
z3 = z.C1.B0
z4 = z.C1.B1
z5 = z.C1.B2
tmp1.MulByNonResidue(c3)
tmp2.MulByNonResidue(c4)
t[0].Mul(&tmp1, &z5)
t[1].Mul(&tmp2, &z4)
t[2].Mul(c3, &z3)
t[3].Mul(&tmp2, &z5)
t[4].Mul(c3, &z4)
t[5].Mul(c4, &z3)
t[6].Mul(c3, &z0)
t[7].Mul(&tmp2, &z2)
t[8].Mul(c3, &z1)
t[9].Mul(c4, &z0)
t[10].Mul(c3, &z2)
t[11].Mul(c4, &z1)
z.C0.B0.Mul(c0, &z0).
Add(&z.C0.B0, &t[0]).
Add(&z.C0.B0, &t[1])
z.C0.B1.Mul(c0, &z1).
Add(&z.C0.B1, &t[2]).
Add(&z.C0.B1, &t[3])
z.C0.B2.Mul(c0, &z2).
Add(&z.C0.B2, &t[4]).
Add(&z.C0.B2, &t[5])
z.C1.B0.Mul(c0, &z3).
Add(&z.C1.B0, &t[6]).
Add(&z.C1.B0, &t[7])
z.C1.B1.Mul(c0, &z4).
Add(&z.C1.B1, &t[8]).
Add(&z.C1.B1, &t[9])
z.C1.B2.Mul(c0, &z5).
Add(&z.C1.B2, &t[10]).
Add(&z.C1.B2, &t[11])
return z
} | ecc/bls12-377/internal/fptower/e12_pairing.go | 0.589244 | 0.401805 | e12_pairing.go | starcoder |
package main
import (
"fmt"
"math/rand"
"time"
"github.com/qeedquan/go-media/math/f64"
)
func main() {
rand.Seed(time.Now().UnixNano())
ex1()
ex2()
}
// OpenGL and other rendering APIs expect
// coordinate system to be between [-1, 1]
// and we need to remap it to [0,W]x[0,H] at
// the end this is the standard equation of remapping
// [-1, 1] -> [0, W]
// [-1, 1] -> [0, H]
// [0, 1] -> [0, W]
// [0, 1] -> [0, H]
func ex1() {
fmt.Printf("EX1:\n\n")
w := 1024.0
h := 768.0
for i := 0; i < 10; i++ {
// make a random value between [-1, 1]
x := rand.Float64()*2 - 1
y := rand.Float64()*2 - 1
hw := w / 2
hh := h / 2
px := x*hw + hw
py := y*hh + hh
xx := f64.LinearRemap(x, -1, 1, 0, 1)
yy := f64.LinearRemap(y, -1, 1, 0, 1)
ppx := xx * w
ppy := yy * h
fmt.Printf("(%f, %f) (%f, %f)\n", x, y, px, py)
fmt.Printf("(%f, %f) (%f, %f)\n", xx, yy, ppx, ppy)
fmt.Printf("\n")
}
}
// If we add a perspective divide into the mix, the
// linear remapping from [-1, 1] -> [0, 1] and then
// calculating the coordinate based on that are not
// equivalent as the projection back into screen space shows.
// This is due to a sign flip from [-1, 1], if the mapping
// was from [0, 1] -> [0, C] where the range did not flip
// sign, then the value produce are the same, but they are
// different ratio values when dealing with a sign flip
// If you were to draw this out with a set of points and
// vary the z parameter, the [-1, 1] center is correct
// as it focuses on 0,0 as center of screen, if you do
// [0, 1] then center is top left when drawn top-left to bottom-right
// [-C, C] -> [-1, 1] will work as example 1 because there is no sign flip
// More generally speaking, if the left bound and right bound are scaled
// by a constant when linear remapping to a new range, then the element division
// are the same, if the left side scaling ratio is different from the right side
// then the division will produce different results, ie [-1000, 50] -> [-1, 1]
// will have different result even though they are the same signs
func ex2() {
fmt.Printf("EX2:\n\n")
w := 1280.0
h := 800.0
for i := 0; i < 10; i++ {
x := rand.Float64()*2 - 1
y := rand.Float64()*2 - 1
z := rand.Float64()*2 - 1
hw := w / 2
hh := h / 2
px := (x/z)*hw + hw
py := (y/z)*hh + hh
xx := f64.LinearRemap(x, -1, 1, 0, 1)
yy := f64.LinearRemap(y, -1, 1, 0, 1)
zz := f64.LinearRemap(z, -1, 1, 0, 1)
ppx := (xx / zz) * w
ppy := (yy / zz) * h
x1 := x / z
y1 := y / z
x2 := xx / zz
y2 := yy / zz
fmt.Printf("(%f, %f) (%f, %f)\n", x, y, px, py)
fmt.Printf("(%f, %f) (%f, %f)\n", xx, yy, ppx, ppy)
fmt.Printf("(%f, %f) (%f, %f)\n", x1, y1, x2, y2)
fmt.Printf("\n")
}
} | math/linear_remap.go | 0.553988 | 0.455683 | linear_remap.go | starcoder |
package schedule
import "time"
// Schedule is the struct used to represent a set of retrievable time.Time structs.
type Schedule struct {
at []time.Time
crn *CronExpression
crnI *CronInstance
followingIndex int
}
// At creates a new schedule that produces the dates provided.
func At(at ...time.Time) *Schedule {
if len(at) == 0 {
panic("schedule: at least one time must be provided")
}
sch := &Schedule{
at: make([]time.Time, len(at)),
followingIndex: -1,
}
currentTime := time.Time{}
for i, t := range at {
if t.Before(currentTime) || t.Equal(currentTime) {
panic("schedule: time order provided is invalid")
}
sch.at[i] = t
currentTime = t
}
return sch
}
// In creates a new schedule that produces dates based on provided durations.
// The durations are added sequentially.
// Example: In(time.Second, time.Minute):
// date = time.Now().Add(time.Second);
// date = date.Add(time.Minute).
func In(in ...time.Duration) *Schedule {
if len(in) == 0 {
panic("schedule: at least one duration must be provided")
}
sch := &Schedule{
at: make([]time.Time, len(in)),
followingIndex: -1,
}
currentTime := time.Now()
for i, d := range in {
currentTime = currentTime.Add(d)
sch.at[i] = currentTime
}
return sch
}
// As creates a new schedule that produces dates based on the provided CronExpression.
// Example: As(Cron().EveryDay()):
// date = 00:00:00 of the following day;
// ...
func As(crn *CronExpression) *Schedule {
crnI := crn.NewInstance(time.Now())
if err := crnI.Next(); err != nil {
panic("schedule: invalid CronExpression provided")
}
return &Schedule{
crn: crn,
crnI: crnI,
}
}
// AddCron is used to setup a CronExpression that starts operating after the scheduled times pass.
// Example: In(time.Hour * 24 * 7).AddCron(Cron().EveryDay()):
// date = time.Now().Add(time.Hour * 24 * 7);
// date = 00:00:00 of the following day;
// ...
func (sch *Schedule) AddCron(crn *CronExpression) {
sch.crn = crn
}
// Next is used to determine the following date to be produced.
func (sch *Schedule) Next() error {
if sch.followingIndex < len(sch.at)-1 {
sch.followingIndex++
return nil
}
if sch.crn == nil {
return OutdatedError
}
if sch.crnI == nil {
sch.crnI = sch.crn.NewInstance(sch.at[sch.followingIndex])
sch.followingIndex++
}
return sch.crnI.Next()
}
// Following returns the determined following date.
func (sch *Schedule) Following() time.Time {
if sch.followingIndex < 0 {
return time.Time{}
}
if sch.followingIndex < len(sch.at) {
return sch.at[sch.followingIndex]
}
return sch.crnI.Following()
} | schedule.go | 0.604866 | 0.409457 | schedule.go | starcoder |
package mgl
import (
"bytes"
"fmt"
"math"
"text/tabwriter"
)
type Mat4 [16]float32
func Identity() Mat4 {
return Mat4{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}
}
// Pretty prints the matrix
func (m Mat4) String() string {
buf := new(bytes.Buffer)
w := tabwriter.NewWriter(buf, 4, 4, 1, ' ', tabwriter.AlignRight)
for i := 0; i < 4; i++ {
for _, col := range m.Row(i) {
fmt.Fprintf(w, "%f\t", col)
}
fmt.Fprintln(w, "")
}
w.Flush()
return buf.String()
}
func (m Mat4) Row(row int) Vec4 {
return Vec4{m[row+0], m[row+4], m[row+8], m[row+12]}
}
func (m Mat4) Rows() (row0, row1, row2, row3 Vec4) {
return m.Row(0), m.Row(1), m.Row(2), m.Row(3)
}
func (m Mat4) Col(col int) Vec4 {
return Vec4{m[col*4+0], m[col*4+1], m[col*4+2], m[col*4+3]}
}
func (m Mat4) Cols() (col0, col1, col2, col3 Vec4) {
return m.Col(0), m.Col(1), m.Col(2), m.Col(3)
}
func (m1 Mat4) MulMat4(m2 Mat4) Mat4 {
return Mat4{
m1[0]*m2[0] + m1[4]*m2[1] + m1[8]*m2[2] + m1[12]*m2[3],
m1[1]*m2[0] + m1[5]*m2[1] + m1[9]*m2[2] + m1[13]*m2[3],
m1[2]*m2[0] + m1[6]*m2[1] + m1[10]*m2[2] + m1[14]*m2[3],
m1[3]*m2[0] + m1[7]*m2[1] + m1[11]*m2[2] + m1[15]*m2[3],
m1[0]*m2[4] + m1[4]*m2[5] + m1[8]*m2[6] + m1[12]*m2[7],
m1[1]*m2[4] + m1[5]*m2[5] + m1[9]*m2[6] + m1[13]*m2[7],
m1[2]*m2[4] + m1[6]*m2[5] + m1[10]*m2[6] + m1[14]*m2[7],
m1[3]*m2[4] + m1[7]*m2[5] + m1[11]*m2[6] + m1[15]*m2[7],
m1[0]*m2[8] + m1[4]*m2[9] + m1[8]*m2[10] + m1[12]*m2[11],
m1[1]*m2[8] + m1[5]*m2[9] + m1[9]*m2[10] + m1[13]*m2[11],
m1[2]*m2[8] + m1[6]*m2[9] + m1[10]*m2[10] + m1[14]*m2[11],
m1[3]*m2[8] + m1[7]*m2[9] + m1[11]*m2[10] + m1[15]*m2[11],
m1[0]*m2[12] + m1[4]*m2[13] + m1[8]*m2[14] + m1[12]*m2[15],
m1[1]*m2[12] + m1[5]*m2[13] + m1[9]*m2[14] + m1[13]*m2[15],
m1[2]*m2[12] + m1[6]*m2[13] + m1[10]*m2[14] + m1[14]*m2[15],
m1[3]*m2[12] + m1[7]*m2[13] + m1[11]*m2[14] + m1[15]*m2[15]}
}
func (mat Mat4) MulVec4(vec Vec4) Vec4 {
return Vec4{
mat[0]*vec[0] + mat[4]*vec[1] + mat[8]*vec[2] + mat[12]*vec[3],
mat[1]*vec[0] + mat[5]*vec[1] + mat[9]*vec[2] + mat[13]*vec[3],
mat[2]*vec[0] + mat[6]*vec[1] + mat[10]*vec[2] + mat[14]*vec[3],
mat[3]*vec[0] + mat[7]*vec[1] + mat[11]*vec[2] + mat[15]*vec[3]}
}
func (m Mat4) Translate(x, y, z float32) Mat4 {
return m.MulMat4(Mat4{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
x, y, z, 1,
})
}
func (m Mat4) TranslateVec3(v Vec3) Mat4 {
return m.MulMat4(Mat4{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
v[0], v[1], v[2], 1,
})
}
func (m Mat4) Scale(x, y, z float32) Mat4 {
return m.MulMat4(Mat4{
x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, 0, 1,
})
}
func (m Mat4) Rotate(angle Radian, axis Vec3) Mat4 {
a := axis.Normalize()
c := float32(math.Cos(float64(angle)))
s := float32(math.Sin(float64(angle)))
d := 1 - c
return m.MulMat4(Mat4{
c + d*a[0]*a[0],
0 + d*a[1]*a[0] - s*a[2],
0 + d*a[2]*a[0] + s*a[1],
0,
0 + d*a[0]*a[1] + s*a[2],
c + d*a[1]*a[1],
0 + d*a[2]*a[1] - s*a[0],
0,
0 + d*a[0]*a[1] - s*a[1],
0 + d*a[1]*a[2] + s*a[0],
c + d*a[2]*a[2],
0,
0, 0, 0, 1,
})
} | mgl/mat4.go | 0.560614 | 0.549641 | mat4.go | starcoder |
package avltree
import (
"fmt"
"strconv"
"github.com/RedAFD/treeprint"
)
// AVL树(平衡二叉搜索树) AVL Tree
// Tree tree structure
type Tree struct {
root *Node
count uint
}
// Node tree node structure
type Node struct {
Key int
Value interface{}
Height uint
Left *Node
Right *Node
Parent *Node
}
// Len total count of the tree nodes
func (t *Tree) Len() uint {
return t.count
}
// Height tree height
func (t *Tree) Height() uint {
return t.root.Height
}
// Entry get entry node
func (t *Tree) Entry() *Node {
return t.root
}
// Search search node from the tree by key
func (t *Tree) Search(key int) *Node {
current := t.root
for current != nil {
if key < current.Key {
current = current.Left
} else if key > current.Key {
current = current.Right
} else {
break
}
}
return current
}
// Append append a new node to the tree
func (t *Tree) Append(key int, val interface{}) {
// search node
current := &t.root
var parent *Node
for *current != nil {
if key < (*current).Key {
parent = (*current)
current = &(*current).Left
} else if key > (*current).Key {
parent = (*current)
current = &(*current).Right
} else {
break
}
}
if *current == nil {
node := &Node{
Key: key,
Value: val,
Height: 1,
Parent: parent,
}
*current = node
t.count++
for node.Parent != nil {
node = node.Parent
node.correctHeight()
node.rebalance()
}
t.root = node
} else {
(*current).Value = val
}
}
// Remove remove a specific node from the tree
func (t *Tree) Remove(key int) {
// find node to be removed
var remNode **Node
for remNode = &t.root; *remNode != nil; {
if key < (*remNode).Key {
remNode = &(*remNode).Left
} else if key > (*remNode).Key {
remNode = &(*remNode).Right
} else {
break
}
}
if *remNode == nil {
return
}
// find replacement node and take out
var repNode *Node
if (*remNode).Right != nil {
repNode = (*remNode).Right
for repNode.Left != nil {
repNode = repNode.Left
}
if repNode.Parent == *remNode {
repNode.Parent.Right = repNode.Right
if repNode.Right != nil {
repNode.Right.Parent = repNode.Parent
}
} else {
repNode.Parent.Left = repNode.Right
if repNode.Right != nil {
repNode.Right.Parent = repNode.Parent
}
}
} else if (*remNode).Left != nil {
repNode = (*remNode).Left
(*remNode).Left = repNode.Left
if repNode.Left != nil {
repNode.Left.Parent = *remNode
}
(*remNode).Right = repNode.Right
if repNode.Right != nil {
repNode.Right.Parent = *remNode
}
}
// replace node
var dirtyNode *Node
if repNode != nil {
dirtyNode = repNode
(*remNode).Key = repNode.Key
(*remNode).Value = repNode.Value
} else {
dirtyNode = *remNode
*remNode = nil
}
t.count--
// height recorrect and node rebalance
for dirtyNode.Parent != nil {
dirtyNode.Parent.correctHeight()
dirtyNode.Parent.rebalance()
dirtyNode = dirtyNode.Parent
}
if t.root != nil {
t.root = dirtyNode
}
}
func (n *Node) rebalance(param ...interface{}) {
difference := n.leafHeightDifference()
if difference > 1 {
if n.Left.leafHeightDifference() < 0 {
n.prepareRotateRight()
}
n.rotateRight()
} else if difference < -1 {
if n.Right.leafHeightDifference() > 0 {
n.prepareRotateLeft()
}
n.rotateLeft()
}
}
func (n *Node) leafHeightDifference() float64 {
var difference float64 = 0
if n.Left != nil {
difference = float64(n.Left.Height)
}
if n.Right != nil {
difference = difference - float64(n.Right.Height)
}
return difference
}
func (n *Node) prepareRotateLeft() {
riseNode := n.Right.Left
fallNode := n.Right
n.Right, riseNode.Parent = riseNode, n
fallNode.Left = riseNode.Right
if riseNode.Right != nil {
riseNode.Right.Parent = fallNode
}
riseNode.Right, fallNode.Parent = fallNode, riseNode
fallNode.correctHeight()
riseNode.correctHeight()
}
func (n *Node) rotateLeft() {
riseNode := n.Right
if n.Parent == nil {
riseNode.Parent = nil
} else if n.Parent.Left == n {
n.Parent.Left, riseNode.Parent = riseNode, n.Parent
} else {
n.Parent.Right, riseNode.Parent = riseNode, n.Parent
}
n.Right = riseNode.Left
if riseNode.Left != nil {
riseNode.Left.Parent = n
}
riseNode.Left, n.Parent = n, riseNode
n.correctHeight()
riseNode.correctHeight()
}
func (n *Node) prepareRotateRight() {
riseNode := n.Left.Right
fallNode := n.Left
n.Left, riseNode.Parent = riseNode, n
fallNode.Right = riseNode.Left
if riseNode.Left != nil {
riseNode.Left.Parent = fallNode
}
riseNode.Left, fallNode.Parent = fallNode, riseNode
fallNode.correctHeight()
riseNode.correctHeight()
}
func (n *Node) rotateRight() {
riseNode := n.Left
if n.Parent == nil {
riseNode.Parent = nil
} else if n.Parent.Left == n {
n.Parent.Left, riseNode.Parent = riseNode, n.Parent
} else {
n.Parent.Right, riseNode.Parent = riseNode, n.Parent
}
n.Left = riseNode.Right
if riseNode.Right != nil {
riseNode.Right.Parent = n
}
riseNode.Right, n.Parent = n, riseNode
n.correctHeight()
riseNode.correctHeight()
}
func (n *Node) correctHeight() {
n.Height = 0
if n.Left != nil {
n.Height = n.Left.Height
}
if n.Right != nil && n.Right.Height > n.Height {
n.Height = n.Right.Height
}
n.Height++
}
// NewTree create a new tree object
func NewTree() *Tree {
return &Tree{}
}
// GetKey implement treeprint
func (n *Node) GetKey() interface{} {
return n.Key
}
// GetValue implement treeprint
func (n *Node) GetValue() interface{} {
parentKey := ""
if n.Parent != nil {
parentKey = ";" + strconv.Itoa(n.Parent.Key)
}
return fmt.Sprintf("(%v%s)", n.Height, parentKey) // n.Value
}
// RangeNode implement treeprint
func (n *Node) RangeNode() chan treeprint.TreeNode {
c := make(chan treeprint.TreeNode, 2)
c <- n.Left
c <- n.Right
close(c)
return c
} | DataStructures/AVLTree/AVLTree.go | 0.599251 | 0.44065 | AVLTree.go | starcoder |
package orderbook
import (
"github.com/emirpasic/gods/maps/treemap"
)
// OrderBook represents both bids (BUY) and asks (SELL).
// For each one it will hold a height balanced binary search tree.
// The order book is a tree in which the price are keys and the values are volumes.
type OrderBook struct {
bids *treemap.Map // Tree to store bids (BUY) in which the key is the price and value is Volume
asks *treemap.Map // Tree to store asks (SELL) in which the key is the price and value is Volume
instrument Instrument
}
func NewOrderBook(instrument Instrument) *OrderBook {
return &OrderBook{
bids: treemap.NewWith(OppositePriceComparator),
asks: treemap.NewWith(NaturalPriceComparator),
instrument: instrument,
}
}
// GetBidSize returns the bid (BUY) volume size otherwise 0
func (o *OrderBook) GetBidSize(price Price) Volume {
size, ok := o.bids.Get(price)
if !ok {
return Volume(0)
}
return size.(Volume)
}
// GetAskSize returns the ask (SELL) volume size otherwise 0
func (o *OrderBook) GetAskSize(price Price) Volume {
size, ok := o.asks.Get(price)
if !ok {
return Volume(0)
}
return size.(Volume)
}
// Add adds a BUY or SELL order into the order orderbook according to its price.
// If the order already exists it will increase the order quantity.
func (o *OrderBook) Add(side Side, price Price, quantity Volume) {
selectedSide := o.selectSide(side)
var volume Volume
volume = Volume(0)
v, ok := selectedSide.Get(price)
if ok {
volume = v.(Volume)
}
selectedSide.Put(price, volume+quantity)
}
// Update updates a BUY or SELL node (price) volume.
// If the new volume is higher than 0, then it will be added.
// If the new volume is lower or equal than zero, the node itself will be removed.
func (o *OrderBook) Update(side Side, price Price, quantity Volume) {
selectedSide := o.selectSide(side)
var oldVolume Volume
v, ok := selectedSide.Get(price)
if ok {
oldVolume = v.(Volume)
}
newVolume := oldVolume + quantity
if newVolume > Volume(0) {
selectedSide.Put(price, newVolume)
} else {
selectedSide.Remove(price)
}
}
// GetBestBid returns the best (highest) bid and its volume
func (o *OrderBook) GetBestBid() (Price, Volume) {
if o.bids == nil {
return Price(0), Volume(0)
}
k, v := o.bids.Min()
if k == nil {
return Price(0), Volume(0)
}
return k.(Price), v.(Volume)
}
// GetBestAsk returns the best (highest) ask and its volume
func (o *OrderBook) GetBestAsk() (Price, Volume) {
if o.asks == nil {
return Price(0), Volume(0)
}
k, v := o.asks.Min()
if k == nil {
return Price(0), Volume(0)
}
return k.(Price), v.(Volume)
}
func (o *OrderBook) selectSide(side Side) *treemap.Map {
if side == BUY {
return o.bids
}
return o.asks
} | order_book.go | 0.760562 | 0.63141 | order_book.go | starcoder |
package main
import (
"image/color"
"math"
"time"
"github.com/faiface/pixel"
"github.com/faiface/pixel/pixelgl"
opensimplex "github.com/ojrac/opensimplex-go"
"golang.org/x/image/colornames"
)
// layerNoise creates noise from multiple layers of simplex noise.
// See https://cmaher.github.io/posts/working-with-simplex-noise/ for the original function.
func layerNoise(layers int, x, y, persistence, freq, low, high float64) (result float64) {
ampSum := 0.0
amp := 1.0
for i := 0; i < layers; i++ {
result += noise.Eval2(x*freq, y*freq) * amp
ampSum += amp
amp *= persistence
freq *= 2
}
result /= ampSum
result = result*(high-low)/2 + (high+low)/2
return
}
func brighten(val uint8, factor float64) uint8 {
r := float64(val) * factor
if uint8(r) < val {
return 255
}
return uint8(r)
}
func genGradientDisc(radius, density float64, c color.Color) (canvas *pixelgl.Canvas) {
cr, cg, cb, ca := c.RGBA()
size := int(radius*2 + 1)
canvas = pixelgl.NewCanvas(pixel.R(0, 0, float64(size), float64(size)))
pixels := canvas.Pixels()
ncol := pixel.RGBA{
R: float64(cr) / 0xffff,
G: float64(cg) / 0xffff,
B: float64(cb) / 0xffff,
A: float64(ca) / 0xffff,
}
for y := 0; y < size; y++ {
for x := 0; x < size; x++ {
dist := pixel.V(float64(x), float64(y)).Sub(pixel.V(radius, radius)).Len()
factor := (dist - radius*density) / (radius * (1 - density))
factor = math.Min(1, math.Max(0, factor)) // clamp
index := y*size*4 + x*4
pixels[index] = uint8(ncol.R * (1 - factor) * 255)
pixels[index+1] = uint8(ncol.G * (1 - factor) * 255)
pixels[index+2] = uint8(ncol.B * (1 - factor) * 255)
pixels[index+3] = uint8(ncol.A * (1 - factor) * 255)
}
}
canvas.SetPixels(pixels)
return
}
func genPlanet(radius float64) (canvas *pixelgl.Canvas) {
noise = opensimplex.NewWithSeed(time.Now().UnixNano())
size := int(radius*2 + 1)
canvas = genGradientDisc(radius, 0.98, colornames.White)
pixels := canvas.Pixels()
freq := radius / (1000 * (radius / 40) * (radius / 40))
for y := 0; y < size; y++ {
nn := layerNoise(16, 0, float64(y), 0.5, freq, 0.25, 1)
for x := 0; x < size; x++ {
index := y*size*4 + x*4
r, g, b, a := float64(pixels[index]), float64(pixels[index+1]), float64(pixels[index+2]), float64(pixels[index+3])
if a > 0 {
nnn := layerNoise(16, float64(x), float64(y), 0.5, freq, 0, 1)
n := (nnn + nn) / 2
pixels[index] = brighten(uint8(r*n), 1.5)
pixels[index+1] = brighten(uint8(g*n), 1.5)
pixels[index+2] = brighten(uint8(b*n), 1.5)
pixels[index+3] = 255 // Make the planet opaque
}
}
}
canvas.SetPixels(pixels)
return
} | procgen.go | 0.799794 | 0.461017 | procgen.go | starcoder |
package parse
import "unicode"
// This files represents the all of the states that there can possibly be
// within a field of a cron expression. This is a state machine, it does not
// validate the values of the items, only that the syntax of the cron statement
// field matches the grammar of a cron field.
func lexField(t *Tokeniser) StateFunc {
curr := t.Next()
switch {
case curr == '*':
return lexAny
case unicode.IsDigit(curr):
return lexNumber
case curr == eof:
return t.Errorf("input cannot be empty")
}
return t.Errorf(`(%c) is unexpected at the start of a statement, expected (* or [0-9]+)`, curr)
}
func lexComma(t *Tokeniser) StateFunc {
t.Emit(TokenTypeComma)
return lexField
}
func lexAny(t *Tokeniser) StateFunc {
t.Emit(TokenTypeAny)
next := t.Next()
switch next {
case ',':
return lexComma
case '/':
return lexStep
case eof:
return nil
}
return t.Errorf(`(%c) is unexpected after (*), only (/,) expected`, next)
}
func lexStep(t *Tokeniser) StateFunc {
t.Emit(TokenTypeSlash)
next := t.Next()
if !unicode.IsDigit(next) {
return t.Errorf("(%c) is unexpected after a step, only numbers are expected", next)
}
t.AcceptNumber()
t.Emit(TokenTypeNumber)
next = t.Next()
switch next {
case ',':
return lexComma
case eof:
return nil
}
return t.Errorf("(%c) is unexpected after a step, only (,) is expected", next)
}
func lexNumber(t *Tokeniser) StateFunc {
t.AcceptNumber()
t.Emit(TokenTypeNumber)
next := t.Next()
switch next {
case ',':
return lexComma
case '-':
return lexRange
case '/':
return lexStep
case eof:
return nil
}
return t.Errorf("(%c) is unexpected after a number, only (,-/) expected", next)
}
func lexRange(t *Tokeniser) StateFunc {
t.Emit(TokenTypeDash)
next := t.Next()
if !unicode.IsDigit(next) {
return t.Errorf("(%c) is unexpected in a range, only digits are expected", next)
}
t.AcceptNumber()
t.Emit(TokenTypeNumber)
next = t.Next()
switch next {
case '/':
return lexStep
case ',':
return lexComma
case eof:
return nil
}
return t.Errorf(`(%c) is unexpected after a range, only (/,) expected`, next)
} | internal/parse/states.go | 0.628521 | 0.558508 | states.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.