code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package models
import (
"fmt"
"strings"
)
type matrix map[string]map[string]int
func (m *Model) BuildMatrix(expected, predicted []string) (matrix, []string, error) {
if len(expected) != len(predicted) {
return nil, nil, fmt.Errorf("Input slices are not equal length; expected length: %v, predicted length: %v", len(expected), len(predicted))
}
all := append(expected, predicted...)
distinctAssignees := []string{}
j := 0
for i := 0; i < len(all); i++ {
for j = 0; j < len(distinctAssignees); j++ {
if all[i] == distinctAssignees[j] {
break
}
}
if j == len(distinctAssignees) {
distinctAssignees = append(distinctAssignees, all[i])
}
}
outputMatrix := make(map[string]map[string]int)
for i := 0; i < len(expected); i++ {
exp := expected[i]
pre := predicted[i]
if _, ok := outputMatrix[exp]; ok {
outputMatrix[exp][pre] += 1
} else {
outputMatrix[exp] = make(map[string]int)
outputMatrix[exp][pre] = 1
}
}
return outputMatrix, distinctAssignees, nil
}
func (m matrix) getClassTP(class string) float64 {
return float64(m[class][class])
}
func (m matrix) getClassTN(class string) float64 {
count := 0.0
for columnHead := range m {
if columnHead == class {
continue
}
for rowHead := range m[columnHead] {
if rowHead == class {
continue
}
count += float64(m[columnHead][rowHead])
}
}
return count
}
func (m matrix) getClassFP(class string) float64 {
count := 0.0
for columnHead := range m {
if columnHead == class {
continue
}
count += float64(m[columnHead][class])
}
return count
}
func (m matrix) getClassFN(class string) float64 {
count := 0.0
for rowHead := range m[class] {
if rowHead == class {
continue
}
count += float64(m[class][rowHead])
}
return count
}
func (m matrix) getPrecision(class string) float64 {
classTP := m.getClassTP(class)
classFP := m.getClassFP(class)
return Round(classTP / (classTP + classFP))
}
func (m matrix) getRecall(class string) float64 {
classTP := m.getClassTP(class)
classFN := m.getClassFN(class)
return Round(classTP / (classTP + classFN))
}
func (m matrix) getAccuracy() float64 {
correct := 0.0
total := 0.0
for columnHead := range m {
for rowHead := range m[columnHead] {
if columnHead == rowHead {
correct += float64(m[columnHead][rowHead])
}
total += float64(m[columnHead][rowHead])
}
}
return Round(float64(correct) / float64(total))
}
func (m matrix) getTestCount() float64 {
count := 0.0
for columnHead := range m {
for rowHead := range m[columnHead] {
count += float64(m[columnHead][rowHead])
}
}
return count
}
func (m matrix) fillMatrix() matrix {
for columnHead := range m {
for key := range m {
if _, ok := m[columnHead][key]; ok {
continue
} else {
m[columnHead][key] = 0
}
}
}
return m
}
func (m matrix) getClassF1(class string) float64 {
p := m.getClassTP(class) / (m.getClassTP(class) + m.getClassFP(class))
r := m.getClassTP(class) / (m.getClassTP(class) + m.getClassFN(class))
output := (2 * p * r) / (p + r)
return output
}
func (m matrix) classesEvaluation(classes []string) {
for i := 0; i < len(classes); i++ {
fmt.Println("Class:", classes[i], "\n", m.ClassSummary(classes[i]))
//TODO: Fix
//utils.ModelLog.Debug("Class: " + classes[i] + "\n" + m.ClassSummary(classes[i]))
}
}
func (m matrix) ClassSummary(class string) string {
input := []string{"Summary results for class: ", class, "\n",
"True positives: ", ToString(m.getClassTP(class)), "\n",
"True negatives: ", ToString(m.getClassTN(class)), "\n",
"False positives: ", ToString(m.getClassFP(class)), "\n",
"False negatives: ", ToString(m.getClassFN(class)), "\n",
"Precision: ", ToString(m.getPrecision(class)), "\n",
"Recall: ", ToString(m.getRecall(class)), "\n",
"F1 score: ", ToString(m.getClassF1(class)), "\n",
}
output := strings.Join(input, "")
return output
}
func (m matrix) FullSummary() string {
input := []string{"Summary results for full matrix\n",
"Total tests: ", ToString(m.getTestCount()), "\n",
"Total accuracy: ", ToString(m.getAccuracy()), "\n",
}
output := strings.Join(input, " ")
return output
} | models/confuse.go | 0.513425 | 0.491944 | confuse.go | starcoder |
package processor
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/internal/tracing"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/gabs/v2"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeProcessField] = TypeSpec{
constructor: NewProcessField,
Summary: `
A processor that extracts the value of a field [dot path](/docs/configuration/field_paths)
within payloads according to a specified [codec](#codec), applies a list of
processors to the extracted value and finally sets the field within the original
payloads to the processed result.`,
Status: docs.StatusDeprecated,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("codec", "A [codec](#codec) to use in order to extract (and set) the target field.").HasOptions("json", "metadata"),
docs.FieldCommon("path", "A [dot path](/docs/configuration/field_paths) pointing to the target field."),
docs.FieldCommon(
"result_type", "The final data type to marshal the processing result into. The `discard` type is a special case that discards the result of the processing steps entirely.",
).HasOptions("string", "int", "float", "bool", "object", "discard"),
docs.FieldCommon("processors", "A list of child processors to execute on the extracted value.").Array().HasType(docs.FieldTypeProcessor),
PartsFieldSpec,
},
Description: `
The result can be marshalled into a specific data type with the field
[` + "`result_type`" + `](#result_type).
It's therefore possible to use this codec without any child processors as a way
of casting string values into other types. For example, with an input JSON
document ` + "`{\"foo\":\"10\"}`" + ` it's possible to cast the value of the
field foo to an integer type with:
` + "```yaml" + `
process_field:
path: foo
result_type: int
` + "```" + `
## Codecs
### ` + "`json`" + `
Parses the payload as a JSON document, extracts and sets the field using a dot
notation path.
### ` + "`metadata`" + `
Extracts and sets a metadata value identified by the path field.`,
Footnotes: `
## Alternatives
The ` + "[`branch` processor](/docs/components/processors/branch)" + ` offers a
more flexible and robust way to perform the actions of this processor.`,
}
}
//------------------------------------------------------------------------------
// ProcessFieldConfig is a config struct containing fields for the ProcessField
// processor.
type ProcessFieldConfig struct {
Parts []int `json:"parts" yaml:"parts"`
Codec string `json:"codec" yaml:"codec"`
Path string `json:"path" yaml:"path"`
ResultType string `json:"result_type" yaml:"result_type"`
Processors []Config `json:"processors" yaml:"processors"`
}
// NewProcessFieldConfig returns a default ProcessFieldConfig.
func NewProcessFieldConfig() ProcessFieldConfig {
return ProcessFieldConfig{
Parts: []int{},
Codec: "json",
Path: "",
ResultType: "string",
Processors: []Config{},
}
}
//------------------------------------------------------------------------------
type processFieldCodec interface {
CreateRequest(types.Part) (types.Part, error)
ExtractResult(from, to types.Part) error
Discard() bool
}
// ProcessField is a processor that applies a list of child processors to a
// field extracted from the original payload.
type ProcessField struct {
parts []int
path []string
children []types.Processor
codec processFieldCodec
log log.Modular
mCount metrics.StatCounter
mErr metrics.StatCounter
mErrParse metrics.StatCounter
mErrMisaligned metrics.StatCounter
mErrMisalignedBatch metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewProcessField returns a ProcessField processor.
func NewProcessField(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
var children []types.Processor
for i, pconf := range conf.ProcessField.Processors {
pMgr, pLog, pStats := interop.LabelChild(fmt.Sprintf("%v", i), mgr, log, stats)
proc, err := New(pconf, pMgr, pLog, pStats)
if err != nil {
return nil, err
}
children = append(children, proc)
}
codec, err := stringToProcessFieldCodec(conf.ProcessField.Path, conf.ProcessField.Codec, conf.ProcessField.ResultType)
if err != nil {
return nil, err
}
return &ProcessField{
parts: conf.ProcessField.Parts,
path: strings.Split(conf.ProcessField.Path, "."),
children: children,
codec: codec,
log: log,
mCount: stats.GetCounter("count"),
mErr: stats.GetCounter("error"),
mErrParse: stats.GetCounter("error.parse"),
mErrMisaligned: stats.GetCounter("error.misaligned"),
mErrMisalignedBatch: stats.GetCounter("error.misaligned_messages"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}, nil
}
//------------------------------------------------------------------------------
type processFieldJSONCodec struct {
path []string
resultMarshaller func(p types.Part) (interface{}, error)
}
func newProcessFieldJSONCodec(path, resultStr string) (*processFieldJSONCodec, error) {
var resultMarshaller func(p types.Part) (interface{}, error)
switch resultStr {
case "string":
resultMarshaller = processFieldJSONResultStringMarshaller
case "int":
resultMarshaller = processFieldJSONResultIntMarshaller
case "float":
resultMarshaller = processFieldJSONResultFloatMarshaller
case "bool":
resultMarshaller = processFieldJSONResultBoolMarshaller
case "object":
resultMarshaller = processFieldJSONResultObjectMarshaller
case "array":
resultMarshaller = processFieldJSONResultArrayMarshaller
case "discard":
resultMarshaller = nil
default:
return nil, fmt.Errorf("unrecognised json codec result_type: %v", resultStr)
}
return &processFieldJSONCodec{
path: strings.Split(path, "."),
resultMarshaller: resultMarshaller,
}, nil
}
func (p *processFieldJSONCodec) CreateRequest(source types.Part) (types.Part, error) {
reqPart := source.Copy()
jObj, err := reqPart.JSON()
if err != nil {
return nil, err
}
gObj := gabs.Wrap(jObj)
gTarget := gObj.S(p.path...)
switch t := gTarget.Data().(type) {
case string:
reqPart.Set([]byte(t))
default:
reqPart.SetJSON(gTarget.Data())
}
return reqPart, nil
}
func (p *processFieldJSONCodec) ExtractResult(from, to types.Part) error {
resVal, err := p.resultMarshaller(from)
if err != nil {
return err
}
jObj, err := to.JSON()
if err == nil {
jObj, err = message.CopyJSON(jObj)
}
if err != nil {
return err
}
gObj := gabs.Wrap(jObj)
gObj.Set(resVal, p.path...)
return to.SetJSON(gObj.Data())
}
func (p *processFieldJSONCodec) Discard() bool {
return p.resultMarshaller == nil
}
func processFieldJSONResultStringMarshaller(p types.Part) (interface{}, error) {
return string(p.Get()), nil
}
func processFieldJSONResultIntMarshaller(p types.Part) (interface{}, error) {
return strconv.Atoi(string(p.Get()))
}
func processFieldJSONResultFloatMarshaller(p types.Part) (interface{}, error) {
return strconv.ParseFloat(string(p.Get()), 64)
}
func processFieldJSONResultBoolMarshaller(p types.Part) (interface{}, error) {
str := string(p.Get())
if str == "true" {
return true, nil
}
if str == "false" {
return false, nil
}
return nil, fmt.Errorf("value '%v' could not be parsed as bool", str)
}
func processFieldJSONResultObjectMarshaller(p types.Part) (interface{}, error) {
jVal, err := p.JSON()
if err != nil {
return nil, err
}
// We consider null as an object
if jVal == nil {
return nil, nil
}
if jObj, ok := jVal.(map[string]interface{}); ok {
return jObj, nil
}
return nil, fmt.Errorf("failed to parse JSON type '%T' into object", jVal)
}
func processFieldJSONResultArrayMarshaller(p types.Part) (interface{}, error) {
jVal, err := p.JSON()
if err != nil {
return nil, err
}
if jArray, ok := jVal.([]interface{}); ok {
return jArray, nil
}
return nil, fmt.Errorf("failed to parse JSON type '%T' into array", jVal)
}
//------------------------------------------------------------------------------
type processFieldMetadataCodec struct {
key string
discard bool
}
func newProcessFieldMetadataCodec(path, resultStr string) (*processFieldMetadataCodec, error) {
return &processFieldMetadataCodec{
key: path,
discard: resultStr == "discard",
}, nil
}
func (p *processFieldMetadataCodec) CreateRequest(source types.Part) (types.Part, error) {
reqPart := source.Copy()
reqPart.Set([]byte(reqPart.Metadata().Get(p.key)))
return reqPart, nil
}
func (p *processFieldMetadataCodec) ExtractResult(from, to types.Part) error {
to.Metadata().Set(p.key, string(from.Get()))
return nil
}
func (p *processFieldMetadataCodec) Discard() bool {
return p.discard
}
//------------------------------------------------------------------------------
func stringToProcessFieldCodec(path, codecStr, resultStr string) (processFieldCodec, error) {
switch codecStr {
case "json":
return newProcessFieldJSONCodec(path, resultStr)
case "metadata":
return newProcessFieldMetadataCodec(path, resultStr)
}
return nil, fmt.Errorf("unrecognised codec: %v", codecStr)
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (p *ProcessField) ProcessMessage(msg types.Message) (msgs []types.Message, res types.Response) {
p.mCount.Incr(1)
payload := msg.Copy()
resMsgs := [1]types.Message{payload}
msgs = resMsgs[:]
targetParts := p.parts
if len(targetParts) == 0 {
targetParts = make([]int, payload.Len())
for i := range targetParts {
targetParts[i] = i
}
}
reqMsg := message.New(nil)
for _, index := range targetParts {
reqPart, err := p.codec.CreateRequest(payload.Get(index))
if err != nil {
p.mErrParse.Incr(1)
p.mErr.Incr(1)
p.log.Errorf("Failed to decode part: %v\n", err)
reqPart = payload.Get(index).Copy()
reqPart.Set(nil)
FlagErr(reqPart, err)
}
reqMsg.Append(reqPart)
}
propMsg, _ := tracing.WithChildSpans(TypeProcessField, reqMsg)
resultMsgs, _ := ExecuteAll(p.children, propMsg)
resMsg := message.New(nil)
for _, rMsg := range resultMsgs {
rMsg.Iter(func(i int, p types.Part) error {
resMsg.Append(p.Copy())
return nil
})
}
defer tracing.FinishSpans(propMsg)
if p.codec.Discard() {
// With no result codec, if our results are inline with our original
// batch we copy the metadata only.
if len(targetParts) == resMsg.Len() {
for i, index := range targetParts {
tPart := payload.Get(index)
tPartMeta := tPart.Metadata()
resMsg.Get(i).Metadata().Iter(func(k, v string) error {
tPartMeta.Set(k, v)
return nil
})
}
}
p.mBatchSent.Incr(1)
p.mSent.Incr(int64(payload.Len()))
return
}
if exp, act := len(targetParts), resMsg.Len(); exp != act {
p.mBatchSent.Incr(1)
p.mSent.Incr(int64(payload.Len()))
p.mErr.Incr(1)
p.mErrMisalignedBatch.Incr(1)
p.log.Errorf("Misaligned processor result batch. Expected %v messages, received %v\n", exp, act)
partsErr := fmt.Errorf("mismatched processor result, expected %v, received %v messages", exp, act)
payload.Iter(func(i int, p types.Part) error {
FlagErr(p, partsErr)
return nil
})
return
}
for i, index := range targetParts {
tPart := payload.Get(index)
tPartMeta := tPart.Metadata()
resMsg.Get(i).Metadata().Iter(func(k, v string) error {
tPartMeta.Set(k, v)
return nil
})
rErr := p.codec.ExtractResult(resMsg.Get(i), tPart)
if rErr != nil {
p.log.Errorf("Failed to marshal result: %v\n", rErr)
FlagErr(tPart, rErr)
continue
}
}
p.mBatchSent.Incr(1)
p.mSent.Incr(int64(payload.Len()))
return
}
// CloseAsync shuts down the processor and stops processing requests.
func (p *ProcessField) CloseAsync() {
for _, c := range p.children {
c.CloseAsync()
}
}
// WaitForClose blocks until the processor has closed down.
func (p *ProcessField) WaitForClose(timeout time.Duration) error {
stopBy := time.Now().Add(timeout)
for _, c := range p.children {
if err := c.WaitForClose(time.Until(stopBy)); err != nil {
return err
}
}
return nil
}
//------------------------------------------------------------------------------ | lib/processor/process_field.go | 0.719384 | 0.663819 | process_field.go | starcoder |
package unityai
type PathCorridorState uint8
const (
kPathCorridorValid PathCorridorState = 1 << 0
kPathCorridorPartial PathCorridorState = 1 << 1
kPathCorridorInterrupted PathCorridorState = 1 << 2
)
type PathCorridor struct {
m_pos Vector3f
m_target Vector3f
m_path []NavMeshPolyRef
m_stateFlags PathCorridorState
}
func NewPathCorridor() *PathCorridor {
return &PathCorridor{
m_pos: Vector3f{},
m_target: Vector3f{},
m_path: make([]NavMeshPolyRef, 0, 4),
m_stateFlags: 0,
}
}
func (this *PathCorridor) GetCurrentPos() Vector3f {
return this.m_pos
}
func (this *PathCorridor) ClearPath() {
this.m_path = this.m_path[:0]
}
func (this *PathCorridor) Reset(ref NavMeshPolyRef, pos Vector3f) {
if ref == 0 {
this.Invalidate()
return
}
this.m_path = this.m_path[:0]
this.m_path = append(this.m_path, ref)
this.m_pos = pos
this.m_target = pos
this.m_stateFlags = kPathCorridorValid
}
func (this *PathCorridor) Invalidate() {
// Preserve the position and target
this.m_path = this.m_path[:0]
this.m_path = append(this.m_path, NavMeshPolyRef(0))
this.m_stateFlags = 0
}
func (this *PathCorridor) SetToEnd() {
Assert(this.GetPathCount() != 0)
this.m_pos = this.m_target
this.m_path[0] = this.GetLastPoly()
this.m_path = this.m_path[:1]
}
const kMinTargetDistSq float32 = 0.0001
func (this *PathCorridor) GetPathCount() int32 {
return int32(len(this.m_path))
}
func (this *PathCorridor) FindCorners(cornerVerts []Vector3f, cornerFlags []uint8,
cornerPolys []NavMeshPolyRef, cornerCount *int32, maxCorners int32,
navquery *NavMeshQuery) NavMeshStatus {
Assert(this.GetPathCount() != 0)
var ncorners int32 = 0
status := navquery.FindStraightPath(this.m_pos, this.m_target, this.m_path, this.GetPathCount(),
cornerVerts, cornerFlags, cornerPolys, &ncorners, maxCorners)
if ncorners == 0 {
*cornerCount = 0
return status
}
// Prune points in the beginning of the path which are too close.
var prune int32
for prune = 0; prune < ncorners; prune++ {
if (NavMeshStraightPathFlags(cornerFlags[prune])&kStraightPathOffMeshConnection) != 0 || SqrDistance2D(cornerVerts[prune], this.m_pos) > kMinTargetDistSq {
break
}
}
ncorners -= prune
if prune != 0 && ncorners != 0 {
for i := int32(0); i < ncorners; i++ {
cornerFlags[i] = cornerFlags[i+prune]
cornerPolys[i] = cornerPolys[i+prune]
cornerVerts[i] = cornerVerts[i+prune]
}
}
// Prune points after an off-mesh connection.
for prune = 0; prune < ncorners; prune++ {
if NavMeshStraightPathFlags(cornerFlags[prune])&kStraightPathOffMeshConnection != 0 {
ncorners = prune + 1
break
}
}
*cornerCount = ncorners
if NavMeshStatusDetail(status, kNavMeshPartialResult) {
return kNavMeshSuccess | kNavMeshPartialResult
}
return kNavMeshSuccess
}
func (this *PathCorridor) OptimizePathVisibility(next Vector3f, navquery *NavMeshQuery, filter *QueryFilter) {
var res [kMaxResults]NavMeshPolyRef
var nres int32 = 0
var result NavMeshRaycastResult
navquery.Raycast(this.m_path[0], this.m_pos, next, filter, &result, res[:], &nres, kMaxResults)
if nres > 1 && result.t > 0.99 {
ReplacePathStart(&this.m_path, res[:], nres)
}
}
const kMaxIterations int32 = 8
func (this *PathCorridor) OptimizePathTopology(navquery *NavMeshQuery, filter *QueryFilter) bool {
if this.GetPathCount() < 3 {
return false
}
var res [kMaxResults]NavMeshPolyRef
var nres int32 = 0
status := navquery.InitSlicedFindPath2(this.m_path[0], this.GetLastPoly(), this.m_pos, this.m_target, filter)
if !NavMeshStatusFailed(status) {
status = navquery.UpdateSlicedFindPath(kMaxIterations, nil)
}
if !NavMeshStatusSucceed(status) {
// don't accept kNavMeshInProgress
return false
}
status = navquery.FinalizeSlicedFindPathPartial(&nres, this.m_path, this.GetPathCount())
if !NavMeshStatusSucceed(status) {
return false
}
status = navquery.GetPath(res[:], &nres, kMaxResults)
if !NavMeshStatusSucceed(status) {
return false
}
return ReplacePathStart(&this.m_path, res[:], nres)
}
func (this *PathCorridor) MoveOverOffmeshConnection(offMeshConRef NavMeshPolyRef, currentPos Vector3f,
startPos Vector3f, endPos Vector3f, navquery *NavMeshQuery) bool {
Assert(navquery != nil)
Assert(this.GetPathCount() != 0)
// Advance the path up to and over the off-mesh connection.
var prevRef, nextRef NavMeshPolyRef
polyRef := this.m_path[0]
var npos int32 = 0
npath := this.GetPathCount()
for npos < npath && polyRef != offMeshConRef {
prevRef = polyRef
polyRef = this.m_path[npos]
if npos+1 < npath {
nextRef = this.m_path[npos+1]
}
npos++
}
if npos == npath {
// Could not find offMeshConRef
return false
}
// Prune path
this.m_path = this.m_path[npos:]
nav := navquery.GetAttachedNavMesh()
Assert(nav != nil)
conn := nav.GetOffMeshConnection(polyRef)
if conn == nil {
return false
}
if conn.width > 0.0 {
// Handle wide link
status := nav.GetNearestOffMeshConnectionEndPoints(prevRef, polyRef, nextRef, currentPos, &startPos, &endPos)
if NavMeshStatusSucceed(status) {
this.m_pos = endPos
return true
}
} else {
status := nav.GetOffMeshConnectionEndPoints(prevRef, polyRef, &startPos, &endPos)
if NavMeshStatusSucceed(status) {
this.m_pos = endPos
return true
}
}
return false
}
// TODO : notify callers - return success/failure
const kMaxVisited int32 = 16
func (this *PathCorridor) MovePosition(newPos Vector3f, navquery *NavMeshQuery, filter *QueryFilter) bool {
Assert(this.GetPathCount() != 0)
if SqrDistance2D(newPos, this.m_pos) == 0.0 {
return false
}
// Move along navmesh and update new position.
var result Vector3f
var visited [kMaxVisited]NavMeshPolyRef
var nvisited int32 = 0
status := navquery.MoveAlongSurface(this.m_path[0], this.m_pos, newPos, filter,
&result, visited[:], &nvisited, kMaxVisited)
if !NavMeshStatusSucceed(status) {
return false
}
ReplacePathStartReverse(&this.m_path, visited[:], nvisited)
// Adjust the position to stay on top of the navmesh.
navquery.ProjectToPoly(&this.m_pos, this.m_path[0], result)
return true
}
func (this *PathCorridor) UpdateTargetPosition(ref NavMeshPolyRef, target Vector3f) bool {
if ref != this.GetLastPoly() {
return false
}
this.m_target = target
return true
}
const kExtraCapacity uint32 = 16
func (this *PathCorridor) SetCorridor(target Vector3f, navquery *NavMeshQuery, path []NavMeshPolyRef, npath int32, partialPath bool) {
Assert(npath > 0)
// Reserving room for extra polygons allows us to subsequently extend the path a bit,
// e.g. from a thread/job, without allocating memory and possibly locking.
this.m_path = make([]NavMeshPolyRef, npath)
this.m_target = target
copy(this.m_path, path[:npath])
this.m_stateFlags = kPathCorridorValid
this.SetPathPartial(partialPath)
// Adjust the position to stay on top of the navmesh.
navquery.ProjectToPoly(&this.m_target, this.GetLastPoly(), target)
}
func (this *PathCorridor) SetStateFlag(setFlag bool, stateFlag PathCorridorState) {
if setFlag {
this.m_stateFlags |= stateFlag
} else {
this.m_stateFlags &= ^stateFlag
}
}
func (this *PathCorridor) SetPathValid(inbool bool) {
this.SetStateFlag(inbool, kPathCorridorValid)
}
func (this *PathCorridor) SetPathPartial(inbool bool) {
this.SetStateFlag(inbool, kPathCorridorPartial)
}
func (this *PathCorridor) SetPathInterrupted(inbool bool) {
this.SetStateFlag(inbool, kPathCorridorInterrupted)
}
func (this *PathCorridor) GetLastPoly() NavMeshPolyRef {
return this.m_path[this.GetPathCount()-1]
}
func (this *PathCorridor) GetPath() []NavMeshPolyRef {
return this.m_path
}
func ReplacePathStart(path *[]NavMeshPolyRef, start []NavMeshPolyRef, nstart int32) bool {
npath := int32(len(*path))
var ipath, istart int32
if !FindFurthestIntersectionIndices(*path, start, npath, nstart, &ipath, &istart) {
return false
}
// the result may only grow before the elements are moved in-place.
tmpath := make([]NavMeshPolyRef, npath)
copy(tmpath, *path)
nres := istart + (npath - ipath)
if nres > npath {
*path = append(*path, make([]NavMeshPolyRef, nres-npath)...)
}
// move elements in place
copy((*path)[istart:], tmpath[ipath:npath])
copy(*path, start[:istart])
*path = (*path)[:nres]
// shrink result to fit
return true
}
func FindFurthestIntersectionIndices(a []NavMeshPolyRef, b []NavMeshPolyRef, na, nb int32, ia, ib *int32) bool {
for i := na - 1; i >= int32(0); i-- {
for j := nb - 1; j >= 0; j-- {
if a[i] == b[j] {
*ia = i
*ib = j
return true
}
}
}
return false
}
func ReplacePathStartReverse(path *[]NavMeshPolyRef, start []NavMeshPolyRef, nstart int32) bool {
npath := int32(len(*path))
var ipath, istart int32
if !FindFurthestIntersectionIndices(*path, start, npath, nstart, &ipath, &istart) {
return false
}
// the pivot index for the reversed start segment
istartrev := nstart - 1 - istart
tmpath := make([]NavMeshPolyRef, npath)
copy(tmpath, *path)
nres := istartrev + (npath - ipath)
// the result may only grow before the elements are moved in-place.
if nres > npath {
*path = append(*path, make([]NavMeshPolyRef, nres-npath)...)
}
// move elements in place
copy((*path)[istartrev:], tmpath[ipath:])
for i := int32(0); i < istartrev; i++ {
(*path)[i] = start[nstart-1-i]
}
// shrink result to fit
*path = (*path)[:nres]
return true
} | path_corridor.go | 0.669745 | 0.545225 | path_corridor.go | starcoder |
package petstore
import (
"encoding/json"
)
// ReadOnlyFirst struct for ReadOnlyFirst
type ReadOnlyFirst struct {
Bar *string `json:"bar,omitempty"`
Baz *string `json:"baz,omitempty"`
}
// NewReadOnlyFirst instantiates a new ReadOnlyFirst object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewReadOnlyFirst() *ReadOnlyFirst {
this := ReadOnlyFirst{}
return &this
}
// NewReadOnlyFirstWithDefaults instantiates a new ReadOnlyFirst object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewReadOnlyFirstWithDefaults() *ReadOnlyFirst {
this := ReadOnlyFirst{}
return &this
}
// GetBar returns the Bar field value if set, zero value otherwise.
func (o *ReadOnlyFirst) GetBar() string {
if o == nil || o.Bar == nil {
var ret string
return ret
}
return *o.Bar
}
// GetBarOk returns a tuple with the Bar field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ReadOnlyFirst) GetBarOk() (*string, bool) {
if o == nil || o.Bar == nil {
return nil, false
}
return o.Bar, true
}
// HasBar returns a boolean if a field has been set.
func (o *ReadOnlyFirst) HasBar() bool {
if o != nil && o.Bar != nil {
return true
}
return false
}
// SetBar gets a reference to the given string and assigns it to the Bar field.
func (o *ReadOnlyFirst) SetBar(v string) {
o.Bar = &v
}
// GetBaz returns the Baz field value if set, zero value otherwise.
func (o *ReadOnlyFirst) GetBaz() string {
if o == nil || o.Baz == nil {
var ret string
return ret
}
return *o.Baz
}
// GetBazOk returns a tuple with the Baz field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ReadOnlyFirst) GetBazOk() (*string, bool) {
if o == nil || o.Baz == nil {
return nil, false
}
return o.Baz, true
}
// HasBaz returns a boolean if a field has been set.
func (o *ReadOnlyFirst) HasBaz() bool {
if o != nil && o.Baz != nil {
return true
}
return false
}
// SetBaz gets a reference to the given string and assigns it to the Baz field.
func (o *ReadOnlyFirst) SetBaz(v string) {
o.Baz = &v
}
func (o ReadOnlyFirst) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Bar != nil {
toSerialize["bar"] = o.Bar
}
if o.Baz != nil {
toSerialize["baz"] = o.Baz
}
return json.Marshal(toSerialize)
}
type NullableReadOnlyFirst struct {
value *ReadOnlyFirst
isSet bool
}
func (v NullableReadOnlyFirst) Get() *ReadOnlyFirst {
return v.value
}
func (v *NullableReadOnlyFirst) Set(val *ReadOnlyFirst) {
v.value = val
v.isSet = true
}
func (v NullableReadOnlyFirst) IsSet() bool {
return v.isSet
}
func (v *NullableReadOnlyFirst) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableReadOnlyFirst(val *ReadOnlyFirst) *NullableReadOnlyFirst {
return &NullableReadOnlyFirst{value: val, isSet: true}
}
func (v NullableReadOnlyFirst) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableReadOnlyFirst) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
<<<<<<< HEAD
=======
>>>>>>> ooof | samples/openapi3/client/petstore/go-experimental/go-petstore/model_read_only_first.go | 0.736211 | 0.50061 | model_read_only_first.go | starcoder |
package runeutil
// InRange : check min <= c <= max
func InRange(c rune, min, max int) bool {
return rune(min) <= c && c <= rune(max)
}
// IsLower : Is rune lower case?
func IsLower(c rune) bool {
return rune('a') <= c && c <= rune('z')
}
// IsUpper : Is rune upper case?
func IsUpper(c rune) bool {
return rune('A') <= c && c <= rune('Z')
}
// IsLetter : Is rune alphabet
func IsLetter(c rune) bool {
return IsLower(c) || IsUpper(c)
}
// IsDigit : Is rune Digit?
func IsDigit(c rune) bool {
return rune('0') <= c && c <= rune('9')
}
// IsHexDigit : Is rune Digit?
func IsHexDigit(c rune) bool {
return InRange(c, int('0'), int('9')) ||
InRange(c, int('a'), int('f')) ||
InRange(c, int('A'), int('F'))
}
// IsHankaku : Is rune Hankaku?
func IsHankaku(c rune) bool {
return InRange(c, int('0'), int('9')) ||
InRange(c, int('a'), int('z')) ||
InRange(c, int('A'), int('Z'))
}
// IsFlag : Is rune Flag?
func IsFlag(c rune) bool {
return InRange(c, 0x21, 0x2F) ||
InRange(c, 0x3A, 0x40) ||
InRange(c, 0x5B, 0x60) ||
InRange(c, 0x7B, 0x7E)
}
// IsWordRune : Is rune WORD token ?
func IsWordRune(c rune) bool {
return c == '_' ||
IsLetter(c) ||
IsHiragana(c) ||
IsKatakana(c) ||
IsKanji(c) ||
IsEmoji(c)
}
// IsGreek : Is rune Greek ?
func IsGreek(c rune) bool {
return InRange(c, 0x0370, 0x03FF) || // Greek
InRange(c, 0x1F00, 0x1FFF) || // Greek Extended
InRange(c, 0x10140, 0x1018F) // Ancient Greek Numbers
}
// IsLatin : Is rune Latine ?
func IsLatin(c rune) bool {
return InRange(c, 0x80, 0xFF) || // Latin-1
InRange(c, 0x0100, 0x024F) // Latin Extend-A/B
}
// IsKanji : Is rune Kanji ?
func IsKanji(c rune) bool {
return InRange(c, 0x2E80, 0x2FDF) || // CJK部首補助
rune(c) == '々' || // 3005
rune(c) == '〇' || // 3007
rune(c) == '〻' || // 303B
InRange(c, 0x3400, 0x4DBF) || // CJK漢字拡張A
InRange(c, 0x4E00, 0x9FFC) || // CJK統合漢字
InRange(c, 0xF900, 0xFAFF) || // CJK互換漢字
InRange(c, 0x20000, 0x2FFFF) // CJK統合漢字拡張B-Fなど
}
// IsEmoji : Is rune Emoji ?
func IsEmoji(c rune) bool {
return InRange(c, 0x2700, 0x27BF) || // 装飾記号
InRange(c, 0x1F650, 0x1F67F) || // 装飾用絵記号
InRange(c, 0x1F600, 0x1F64F) || // 顔文字
InRange(c, 0x2600, 0x26FF) || // その他の記号
InRange(c, 0x1F300, 0x1F5FF) || // その他の記号と絵文字
InRange(c, 0x1F900, 0x1F9FF) || // 記号と絵文字補助
InRange(c, 0x1FA70, 0x1FAFF) || // 絵文字機拡張A
InRange(c, 0x1F680, 0x1F6FF) // 交通と地図の記号
}
// IsMultibytes : Is rune multibytes?
func IsMultibytes(c rune) bool {
return (c > 0xFF)
}
// IsHiragana : Is rune Hiragana?
func IsHiragana(c rune) bool {
// 3040~309F
// ('ぁ' <= c && c <= 'ん') // 0x3041 - 3093
return rune(0x3040) <= c && c <= rune(0x309F)
}
// IsKatakana : Is rune Ktakana?
func IsKatakana(c rune) bool {
// 30A0~30FF | 31F0~31FF
return (rune(0x30A0) <= c && c <= rune(0x30FF)) ||
(rune(0x31F0) <= c && c <= rune(0x31FF))
}
// HasRune : Has rune in runes?
func HasRune(runes []rune, c rune) bool {
for _, v := range runes {
if v == c {
return true
}
}
return false
}
// Length : Count String Length
func Length(s string) int {
r := []rune(s)
return len(r)
}
// Equal : check a equal b
func Equal(a []rune, b []rune) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
// ToKatakana : Hira to Kana
func ToKatakana(s string) string {
sr := []rune(s)
for i, v := range sr {
// カタカナ?
if IsKatakana(v) {
sr[i] = sr[i] - 0x60
}
}
return string(sr)
}
// ToHiragana : Kana to Hira
func ToHiragana(s string) string {
sr := []rune(s)
for i, v := range sr {
// ひらがな?
if IsHiragana(v) {
sr[i] = sr[i] + 0x60
}
}
return string(sr)
}
// ToZenkaku : hankaku to zenkaku
func ToZenkaku(s string) string {
sr := []rune(s)
for i, v := range sr {
// hankaku?
if IsHankaku(v) {
sr[i] = sr[i] + 0xFEE0
}
}
return string(sr)
}
// ToHankaku : zen to han
func ToHankaku(s string) string {
sr := []rune(s)
for i, v := range sr {
// hankaku?
if InRange(v, int('A'), int('Z')) || InRange(v, int('a'), int('z')) || InRange(v, '0', '9') {
sr[i] = sr[i] - 0xFEE0
}
}
return string(sr)
}
// ToZenkakuAndKigou : hankaku to zenkaku
func ToZenkakuAndKigou(s string) string {
sr := []rune(s)
for i, v := range sr {
// hankaku?
if InRange(v, 0x20, 0x7f) {
sr[i] = sr[i] + 0xFEE0
}
}
return string(sr)
}
// ToHankakuAndKigou : zen to han
func ToHankakuAndKigou(s string) string {
sr := []rune(s)
for i, v := range sr {
// hankaku?
if InRange(v, 0xFF00, 0xFF5F) {
sr[i] = sr[i] - 0xFEE0
}
}
return string(sr)
}
var hanKana = []string{
"ガ", "ギ", "グ", "ゲ", "ゴ",
"ザ", "ジ", "ズ", "ゼ", "ゾ",
"ダ", "ヂ", "ヅ", "デ", "ド",
"バ", "パ", "ビ", "ピ", "ブ", "プ", "ベ", "ペ", "ボ", "ポ",
"ヷ", "ヺ", "ヴ",
"。", "「", "」", "、", "・", "ー", "゙", "゚",
"ア", "イ", "ウ", "エ", "オ",
"カ", "キ", "ク", "ケ", "コ",
"サ", "シ", "ス", "セ", "ソ", "タ", "チ", "ツ", "テ", "ト",
"ナ", "ニ", "ヌ", "ネ", "ノ",
"ハ", "ヒ", "フ", "ヘ", "ホ",
"マ", "ミ", "ム", "メ", "モ",
"ヤ", "ユ", "ヨ",
"ラ", "リ", "ル", "レ", "ロ",
"ワ", "ヲ", "ン",
"ァ", "ィ", "ゥ", "ェ", "ォ",
"ャ", "ュ", "ョ", "ッ",
}
var zenKana = []string{
"ガ", "ギ", "グ", "ゲ", "ゴ",
"ザ", "ジ", "ズ", "ゼ", "ゾ",
"ダ", "ヂ", "ヅ", "デ", "ド",
"バ", "パ", "ビ", "ピ", "ブ", "プ", "ベ", "ペ", "ボ", "ポ",
"ヷ", "ヺ", "ヴ",
"。", "「", "」", "、", "・", "ー", "゛", "゜",
"ア", "イ", "ウ", "エ", "オ",
"カ", "キ", "ク", "ケ", "コ",
"サ", "シ", "ス", "セ", "ソ",
"タ", "チ", "ツ", "テ", "ト",
"ナ", "ニ", "ヌ", "ネ", "ノ",
"ハ", "ヒ", "フ", "ヘ", "ホ",
"マ", "ミ", "ム", "メ", "モ",
"ヤ", "ユ", "ヨ",
"ラ", "リ", "ル", "レ", "ロ",
"ワ", "ヲ", "ン",
"ァ", "ィ", "ゥ", "ェ", "ォ",
"ャ", "ュ", "ョ", "ッ",
}
// ToZenkakuKatakana : hankaku katakana to zenkaku katakana
func ToZenkakuKatakana(s string) string {
// 辞書を初期化
dic := map[string]int{}
for i, v := range hanKana {
dic[v] = i
}
res := ""
sr := []rune(s)
i := 0
for i < len(sr) {
// 濁点のチェック
if i < len(sr)-1 {
ch2 := string(sr[i]) + string(sr[i+1])
val, ok := dic[ch2]
if ok {
res += zenKana[val]
i += 2
continue
}
}
// 普通のカナをチェック
ch1 := string(sr[i])
val, ok := dic[ch1]
if ok {
res += zenKana[val]
i++
continue
}
res += ch1
i++
}
return res
}
// ToHankakuKatakana : zen katakana to han katakana
func ToHankakuKatakana(s string) string {
// 辞書を初期化
dic := map[string]int{}
for i, v := range zenKana {
dic[v] = i
}
res := ""
sr := []rune(s)
i := 0
for i < len(sr) {
// 普通のカナをチェック
ch1 := string(sr[i])
val, ok := dic[ch1]
if ok {
res += hanKana[val]
i++
continue
}
res += ch1
i++
}
return res
} | runeutil/runeutil.go | 0.535341 | 0.61832 | runeutil.go | starcoder |
package fake
import (
"github.com/stretchr/testify/mock"
dem "github.com/markus-wa/demoinfocs-golang"
"github.com/markus-wa/demoinfocs-golang/common"
st "github.com/markus-wa/demoinfocs-golang/sendtables"
)
var _ dem.IGameState = new(GameState)
// GameState is a mock for of demoinfocs.IGameState.
type GameState struct {
mock.Mock
}
// IngameTick is a mock-implementation of IGameState.IngameTick().
func (gs *GameState) IngameTick() int {
return gs.Called().Int(0)
}
// TeamCounterTerrorists is a mock-implementation of IGameState.TeamCounterTerrorists().
func (gs *GameState) TeamCounterTerrorists() *common.TeamState {
return gs.Called().Get(0).(*common.TeamState)
}
// TeamTerrorists is a mock-implementation of IGameState.TeamTerrorists().
func (gs *GameState) TeamTerrorists() *common.TeamState {
return gs.Called().Get(0).(*common.TeamState)
}
// Team is a mock-implementation of IGameState.Team().
func (gs *GameState) Team(team common.Team) *common.TeamState {
return gs.Called().Get(0).(*common.TeamState)
}
// Participants is a mock-implementation of IGameState.Participants().
func (gs *GameState) Participants() dem.IParticipants {
return gs.Called().Get(0).(dem.IParticipants)
}
// GrenadeProjectiles is a mock-implementation of IGameState.GrenadeProjectiles().
func (gs *GameState) GrenadeProjectiles() map[int]*common.GrenadeProjectile {
return gs.Called().Get(0).(map[int]*common.GrenadeProjectile)
}
// Infernos is a mock-implementation of IGameState.Infernos().
func (gs *GameState) Infernos() map[int]*common.Inferno {
return gs.Called().Get(0).(map[int]*common.Inferno)
}
// Weapons is a mock-implementation of IGameState.Weapons().
func (gs *GameState) Weapons() map[int]*common.Equipment {
return gs.Called().Get(0).(map[int]*common.Equipment)
}
// Entities is a mock-implementation of IGameState.Entities().
func (gs *GameState) Entities() map[int]*st.Entity {
return gs.Called().Get(0).(map[int]*st.Entity)
}
// Bomb is a mock-implementation of IGameState.Bomb().
func (gs *GameState) Bomb() *common.Bomb {
return gs.Called().Get(0).(*common.Bomb)
}
// TotalRoundsPlayed is a mock-implementation of IGameState.TotalRoundsPlayed().
func (gs *GameState) TotalRoundsPlayed() int {
return gs.Called().Int(0)
}
// GamePhase is a mock-implementation of IGameState.GamePhase().
func (gs *GameState) GamePhase() common.GamePhase {
return gs.Called().Get(0).(common.GamePhase)
}
// IsWarmupPeriod is a mock-implementation of IGameState.IsWarmupPeriod().
func (gs *GameState) IsWarmupPeriod() bool {
return gs.Called().Bool(0)
}
// IsMatchStarted is a mock-implementation of IGameState.IsMatchStarted().
func (gs *GameState) IsMatchStarted() bool {
return gs.Called().Bool(0)
}
// ConVars is a mock-implementation of IGameState.ConVars().
func (gs *GameState) ConVars() map[string]string {
return gs.Called().Get(0).(map[string]string)
} | fake/game_state.go | 0.660282 | 0.408218 | game_state.go | starcoder |
package main
import (
"fmt"
)
var (
//% is repaced with the "from" or "to" depot name
pathTransform = Transform{
From: "^//%s/",
To: "//%s/",
}
identTransform = Transform{
From: "^%s$",
To: "%s",
}
//Transforms are indexed by the <db name>:<field index> where field index enumerates from 0
//fields are documented here: https://www.perforce.com/perforce/doc.current/schema/
Transforms = map[string]Transform{
"db.archmap:0": pathTransform,
"db.archmap:1": pathTransform,
"db.change:7": pathTransform,
"db.changex:7": pathTransform,
"db.depot:0": identTransform,
"db.depot:3": {From: "^%s/", To: "%s/"},
"db.domain:0": identTransform,
"db.excl:0": pathTransform,
"db.graphperm:0": identTransform,
"db.have:1": pathTransform,
"db.have.pt:1": pathTransform,
"db.have.rp:1": pathTransform,
"db.haveg:2": pathTransform,
"db.haveview:4": pathTransform,
"db.integed:0": pathTransform,
"db.integed:1": pathTransform,
"db.integtx:0": pathTransform,
"db.integtx:1": pathTransform,
"db.label:1": pathTransform,
"db.locks:0": pathTransform,
"db.locksg:0": pathTransform,
"db.protect:6": pathTransform,
"db.resolve:0": pathTransform,
"db.resolve:1": pathTransform,
"db.resolve:8": pathTransform,
"db.resolveg:0": pathTransform,
"db.resolveg:1": pathTransform,
"db.resolvex:0": pathTransform,
"db.resolvex:1": pathTransform,
"db.resolvex:8": pathTransform,
"db.rev:0": pathTransform,
"db.rev:11": pathTransform,
"db.revbx:0": pathTransform,
"db.revbx:11": pathTransform,
"db.revcx:1": pathTransform,
"db.revdx:0": pathTransform,
"db.revdx:11": pathTransform,
"db.revhx:0": pathTransform,
"db.revhx:11": pathTransform,
"db.review:3": pathTransform,
"db.revpx:0": pathTransform,
"db.revpx:11": pathTransform,
"db.revsh:0": pathTransform,
"db.revsh:11": pathTransform,
"db.revsx:0": pathTransform,
"db.revsx:11": pathTransform,
"db.revtx:0": pathTransform,
"db.revtx:11": pathTransform,
"db.revux:0": pathTransform,
"db.revux:11": pathTransform,
"db.sendq:3": pathTransform,
"db.sendq:10": pathTransform,
"db.storage:0": pathTransform,
"db.template:7": pathTransform,
"db.templatesx:8": pathTransform,
"db.tamplatewx:8": pathTransform,
"db.trigger:3": pathTransform,
"db.trigger:4": pathTransform,
"db.trigger:6": {From: "%%//%s/", To: "%%//%s/"},
"db.user:2": pathTransform,
"db.view:3": pathTransform,
"db.view:4": pathTransform,
"db.view.rp:3": pathTransform,
"db.view.rp:4": pathTransform,
"db.working:1": pathTransform,
"db.working:17": pathTransform,
"db.workingg:1": pathTransform,
"db.workingg:17": pathTransform,
"db.workingx:0": {From: "^//([0-9]+)/%s/", To: "//$1/%s/"},
"db.workingx:1": pathTransform,
"db.workingx:17": pathTransform,
}
)
//Transform defines a specific transformation
type Transform struct {
From string
To string
}
func transformer(t <-chan JournalLine, out chan<- JournalLine, batchArguments []BatchArgument) {
for input := range t {
if input.EndOfFile {
out <- input
break
}
if input.Parsed {
for idx := range input.RowElems {
key := fmt.Sprintf("%s:%d", input.Table, idx)
for _, batchArgument := range batchArguments {
// If an inclusion map was specified, do not proceed if the transform key is not found
if len(batchArgument.IncludedTransformsMap) > 0 {
if !batchArgument.IncludedTransformsMap[key] {
continue
}
// If an exclusion map was specified, do not proceed if the transform key is found
} else if len(batchArgument.ExcludedTransformsMap) > 0 {
if batchArgument.ExcludedTransformsMap[key] {
continue
}
}
//appy transforms to each element if they have a
//transform defined for their table and column index
if val, ok := Transforms[key]; ok {
input.RowElems[idx].applyTransform(
fmt.Sprintf(val.From, batchArgument.PathFrom),
fmt.Sprintf(val.To, batchArgument.PathTo),
)
}
}
}
}
out <- input
}
} | transforms.go | 0.512449 | 0.400017 | transforms.go | starcoder |
package node
import (
"fmt"
"github.com/bazo-blockchain/lazo/lexer/token"
"math/big"
)
// Node is the interface that wraps the basic Node functions.
type Node interface {
// Pos returns the position of the node in the source code.
// It is also the position of the first token.
Pos() token.Position
// String returns a readable string representation of the node.
String() string
// Accept lets a visitor to traverse its node structure.
Accept(v Visitor)
}
// AbstractNode contains node position, which all concrete nodes have.
type AbstractNode struct {
Position token.Position
}
// Pos returns the node position
func (n *AbstractNode) Pos() token.Position {
return n.Position
}
// StatementNode is the interface for statements, such as variable, assignment, if-statement etc.
type StatementNode interface {
Node
}
// ExpressionNode is the interface for expressions, such as literal, identifier, binary expression etc.
type ExpressionNode interface {
Node
}
// DesignatorNode is the interface for designators, such as identifier, member access and array access.
type DesignatorNode interface {
Node
}
// TypeNode is the interface for types, such as array types or basic types
type TypeNode interface {
Node
Type() string
}
// Concrete Nodes
// -------------------------
// ProgramNode composes abstract node and holds contract.
type ProgramNode struct {
AbstractNode
Contract *ContractNode
}
func (n *ProgramNode) String() string {
return getNodeString(n.Contract)
}
// Accept lets a visitor to traverse its node structure.
func (n *ProgramNode) Accept(v Visitor) {
v.VisitProgramNode(n)
}
// --------------------------
// ContractNode composes abstract node and holds a name, state variables and functions.
type ContractNode struct {
AbstractNode
Name string
Fields []*FieldNode
Structs []*StructNode
Constructor *ConstructorNode
Functions []*FunctionNode
}
func (n *ContractNode) String() string {
var strConstructor string
if n.Constructor != (*ConstructorNode)(nil) {
strConstructor = n.Constructor.String()
}
return fmt.Sprintf("[%s] CONTRACT %s \n FIELDS: %s \n\n STRUCTS: %s \n\n CONSTRUCTOR: %s \n\n FUNCS: %s",
n.Pos(), n.Name, n.Fields, n.Structs, strConstructor, n.Functions)
}
// Accept lets a visitor to traverse its node structure
func (n *ContractNode) Accept(v Visitor) {
v.VisitContractNode(n)
}
// --------------------------
// Contract Body Parts
// --------------------------
// FieldNode composes abstract node and holds the type, identifier and expression
type FieldNode struct {
AbstractNode
Type TypeNode
Identifier string
Expression ExpressionNode
}
func (n *FieldNode) String() string {
str := fmt.Sprintf("\n [%s] FIELD %s %s", n.Pos(), getNodeString(n.Type), n.Identifier)
if n.Expression != nil {
str += fmt.Sprintf(" = %s", getNodeString(n.Expression))
}
return str
}
// Accept lets a visitor to traverse its node structure
func (n *FieldNode) Accept(v Visitor) {
v.VisitFieldNode(n)
}
// --------------------------
// StructNode composes abstract node
type StructNode struct {
AbstractNode
Name string
Fields []*StructFieldNode
}
func (n *StructNode) String() string {
return fmt.Sprintf("\n [%s] STRUCT %s \n FIELDS: %s", n.Pos(), n.Name, n.Fields)
}
// Accept lets a visitor to traverse its node structure
func (n *StructNode) Accept(v Visitor) {
v.VisitStructNode(n)
}
// --------------------------
// StructFieldNode composes abstract node and holds the type and identifier
type StructFieldNode struct {
AbstractNode
Type TypeNode
Identifier string
}
func (n *StructFieldNode) String() string {
return fmt.Sprintf("\n [%s] FIELD %s %s", n.Pos(), getNodeString(n.Type), n.Identifier)
}
// Accept lets a visitor to traverse its node structure
func (n *StructFieldNode) Accept(v Visitor) {
v.VisitStructFieldNode(n)
}
// --------------------------
// ConstructorNode composes abstract node and holds parameters and statements.
type ConstructorNode struct {
AbstractNode
Parameters []*ParameterNode
Body []StatementNode
}
func (n *ConstructorNode) String() string {
return fmt.Sprintf("\n [%s] CONSTRUCTOR PARAMs %s, %s",
n.Pos(), n.Parameters, n.Body)
}
// Accept lets a visitor to traverse its node structure
func (n *ConstructorNode) Accept(v Visitor) {
v.VisitConstructorNode(n)
}
// --------------------------
// FunctionNode composes abstract node and holds a name, return types, parameters and statements.
type FunctionNode struct {
AbstractNode
Name string
ReturnTypes []TypeNode
Parameters []*ParameterNode
Body []StatementNode
}
func (n *FunctionNode) String() string {
return fmt.Sprintf("\n [%s] FUNCTION %s, PARAMs %s, RTYPES %s %s",
n.Pos(), n.Name, n.Parameters, n.ReturnTypes, n.Body)
}
// Accept lets a visitor to traverse its node structure
func (n *FunctionNode) Accept(v Visitor) {
v.VisitFunctionNode(n)
}
// --------------------------
// ParameterNode composes abstract node and holds the type and identifier
type ParameterNode struct {
AbstractNode
Type TypeNode
Identifier string
}
func (n *ParameterNode) String() string {
return fmt.Sprintf("\n [%s] PARAM %s %s", n.Pos(), getNodeString(n.Type), n.Identifier)
}
// Accept lets a visitor to traverse its node structure
func (n *ParameterNode) Accept(v Visitor) {
v.VisitParameterNode(n)
}
// --------------------------
// Statement Nodes
// --------------------------
// VariableNode composes abstract node and holds the type, identifier and expression
type VariableNode struct {
AbstractNode
Type TypeNode
Identifier string
Expression ExpressionNode
}
func (n *VariableNode) String() string {
str := fmt.Sprintf("\n [%s] VAR %s %s", n.Pos(), getNodeString(n.Type), n.Identifier)
if n.Expression != nil {
str += fmt.Sprintf(" = %s", getNodeString(n.Expression))
}
return str
}
// Accept lets a visitor to traverse its node structure
func (n *VariableNode) Accept(v Visitor) {
v.VisitVariableNode(n)
}
// --------------------------
// MultiVariableNode composes abstract node and holds multiple variables and a function call
type MultiVariableNode struct {
AbstractNode
Types []TypeNode
Identifiers []string
FuncCall *FuncCallNode
}
func (n *MultiVariableNode) String() string {
str := fmt.Sprintf("\n [%s] VARS", n.Pos())
for i, id := range n.Identifiers {
str += fmt.Sprintf(" %s %s", n.Types[i], id)
}
str += fmt.Sprintf(" = %s", getNodeString(n.FuncCall))
return str
}
// GetType returns the type of the given variable identifier
func (n *MultiVariableNode) GetType(id string) TypeNode {
for i, varID := range n.Identifiers {
if id == varID {
return n.Types[i]
}
}
return nil
}
// Accept lets a visitor to traverse its node structure
func (n *MultiVariableNode) Accept(v Visitor) {
v.VisitMultiVariableNode(n)
}
// --------------------------
// BasicTypeNode composes abstract node and holds the type identifier.
type BasicTypeNode struct {
AbstractNode
Identifier string
}
func (n *BasicTypeNode) String() string {
return n.Identifier
}
// Accept lets a visitor to traverse its node structure
func (n *BasicTypeNode) Accept(v Visitor) {
v.VisitBasicTypeNode(n)
}
// Type returns the unique type representation
func (n *BasicTypeNode) Type() string {
return n.Identifier
}
// --------------------------
// ArrayTypeNode composes abstract node and holds the type identifier.
type ArrayTypeNode struct {
AbstractNode
ElementType TypeNode
}
func (n *ArrayTypeNode) String() string {
return fmt.Sprintf("%s[]", n.ElementType)
}
// Accept lets a visitor to traverse its node structure
func (n *ArrayTypeNode) Accept(v Visitor) {
v.VisitArrayTypeNode(n)
}
// Type returns the unique type representation
func (n *ArrayTypeNode) Type() string {
return n.String()
}
// --------------------------
// MapTypeNode composes abstract node and holds the types of key and value.
type MapTypeNode struct {
AbstractNode
KeyType TypeNode
ValueType TypeNode
}
func (n *MapTypeNode) String() string {
return fmt.Sprintf("Map<%s,%s>", n.KeyType, n.ValueType)
}
// Accept lets a visitor to traverse its node structure
func (n *MapTypeNode) Accept(v Visitor) {
v.VisitMapTypeNode(n)
}
// Type returns the unique type representation
func (n *MapTypeNode) Type() string {
return n.String()
}
// --------------------------
// IfStatementNode composes abstract node and holds the condition, then and else statement block.
type IfStatementNode struct {
AbstractNode
Condition ExpressionNode
Then []StatementNode
Else []StatementNode
}
func (n *IfStatementNode) String() string {
return fmt.Sprintf("\n [%s] IF %s THEN %s ELSE %s", n.Pos(), getNodeString(n.Condition), n.Then, n.Else)
}
// Accept lets a visitor to traverse its node structure
func (n *IfStatementNode) Accept(v Visitor) {
v.VisitIfStatementNode(n)
}
// --------------------------
// ReturnStatementNode composes abstract node and holds the return expressions.
type ReturnStatementNode struct {
AbstractNode
Expressions []ExpressionNode
}
func (n *ReturnStatementNode) String() string {
return fmt.Sprintf("\n [%s] RETURNSTMT %s", n.Pos(), n.Expressions)
}
// Accept lets a visitor to traverse its node structure
func (n *ReturnStatementNode) Accept(v Visitor) {
v.VisitReturnStatementNode(n)
}
// --------------------------
// AssignmentStatementNode composes abstract node and holds the target designator and value expression.
type AssignmentStatementNode struct {
AbstractNode
Left DesignatorNode
Right ExpressionNode
}
func (n *AssignmentStatementNode) String() string {
return fmt.Sprintf("\n [%s] ASSIGN %s %s", n.Pos(), getNodeString(n.Left), getNodeString(n.Right))
}
// Accept lets a visitor to traverse its node structure
func (n *AssignmentStatementNode) Accept(v Visitor) {
v.VisitAssignmentStatementNode(n)
}
// --------------------------
// MultiAssignmentStatementNode composes abstract node and holds the target designators and a function call
type MultiAssignmentStatementNode struct {
AbstractNode
Designators []DesignatorNode
FuncCall *FuncCallNode
}
func (n *MultiAssignmentStatementNode) String() string {
str := fmt.Sprintf("\n [%s] VARS", n.Pos())
for _, id := range n.Designators {
str += fmt.Sprintf(" %s", id)
}
str += fmt.Sprintf(" = %s", getNodeString(n.FuncCall))
return str
}
// Accept lets a visitor to traverse its node structure
func (n *MultiAssignmentStatementNode) Accept(v Visitor) {
v.VisitMultiAssignmentStatementNode(n)
}
// --------------------------
// ShorthandAssignmentStatementNode composes abstract node and holds the designator, operator and expression.
type ShorthandAssignmentStatementNode struct {
AbstractNode
Designator DesignatorNode
Operator token.Symbol
Expression ExpressionNode
}
func (n *ShorthandAssignmentStatementNode) String() string {
return fmt.Sprintf("\n %s %s=%s", n.Designator, token.SymbolLexeme[n.Operator], n.Expression)
}
// Accept lets a visitor to traverse its node structure
func (n *ShorthandAssignmentStatementNode) Accept(v Visitor) {
v.VisitShorthandAssignmentNode(n)
}
// --------------------------
// CallStatementNode composes abstract node and holds the function call expression
type CallStatementNode struct {
AbstractNode
Call *FuncCallNode
}
func (n *CallStatementNode) String() string {
return fmt.Sprintf("\n [%s] CALL %s", n.Pos(), getNodeString(n.Call))
}
// Accept lets a visitor to traverse its node structure
func (n *CallStatementNode) Accept(v Visitor) {
v.VisitCallStatementNode(n)
}
// --------------------------
// DeleteStatementNode composes abstract node and holds the map element to be deleted
type DeleteStatementNode struct {
AbstractNode
Element *ElementAccessNode
}
func (n *DeleteStatementNode) String() string {
return fmt.Sprintf("\n delete %s", n.Element)
}
// Accept lets a visitor to traverse its node structure
func (n *DeleteStatementNode) Accept(v Visitor) {
v.VisitDeleteStatementNode(n)
}
// --------------------------
// Expression Nodes
// --------------------------
// TernaryExpressionNode composes abstract node and holds the binary operator and left & right expressions.
type TernaryExpressionNode struct {
AbstractNode
Condition ExpressionNode
Then ExpressionNode
Else ExpressionNode
}
func (n *TernaryExpressionNode) String() string {
return fmt.Sprintf("%s ? %s : %s", n.Condition, n.Then, n.Else)
}
// Accept lets a visitor to traverse its node structure
func (n *TernaryExpressionNode) Accept(v Visitor) {
v.VisitTernaryExpressionNode(n)
}
// --------------------------
// BinaryExpressionNode composes abstract node and holds the binary operator and left & right expressions.
type BinaryExpressionNode struct {
AbstractNode
Left ExpressionNode
Operator token.Symbol
Right ExpressionNode
}
func (n *BinaryExpressionNode) String() string {
return fmt.Sprintf("(%s %s %s)", n.Left, token.SymbolLexeme[n.Operator], getNodeString(n.Right))
}
// Accept lets a visitor to traverse its node structure
func (n *BinaryExpressionNode) Accept(v Visitor) {
v.VisitBinaryExpressionNode(n)
}
// --------------------------
// UnaryExpressionNode composes abstract node and holds the type, identifier and expression
type UnaryExpressionNode struct {
AbstractNode
Operator token.Symbol
Expression ExpressionNode
}
func (n *UnaryExpressionNode) String() string {
return fmt.Sprintf("(%s%s)", token.SymbolLexeme[n.Operator], getNodeString(n.Expression))
}
// Accept lets a visitor to traverse its node structure
func (n *UnaryExpressionNode) Accept(v Visitor) {
v.VisitUnaryExpressionNode(n)
}
// --------------------------
// TypeCastNode composes abstract node and holds the type and the designator
type TypeCastNode struct {
AbstractNode
Type *BasicTypeNode
Expression ExpressionNode
}
func (n *TypeCastNode) String() string {
return fmt.Sprintf("(%s) %s", n.Type, n.Expression)
}
// Accept lets a visitor to traverse its node structure
func (n *TypeCastNode) Accept(v Visitor) {
v.VisitTypeCastNode(n)
}
// --------------------------
// BasicDesignatorNode composes abstract node and holds the designator name.
type BasicDesignatorNode struct {
AbstractNode
Value string
}
func (n *BasicDesignatorNode) String() string {
return n.Value
}
// Accept lets a visitor to traverse its node structure.
func (n *BasicDesignatorNode) Accept(v Visitor) {
v.VisitBasicDesignatorNode(n)
}
// --------------------------
// ElementAccessNode composes abstract node and holds designator and expression
type ElementAccessNode struct {
AbstractNode
Designator DesignatorNode
Expression ExpressionNode
}
func (n *ElementAccessNode) String() string {
return fmt.Sprintf("%s[%s]", n.Designator, n.Expression)
}
// Accept lets a visitor traverse its node structure
func (n *ElementAccessNode) Accept(v Visitor) {
v.VisitElementAccessNode(n)
}
// --------------------------
//MemberAccessNode composes abstract node and holds designator and identifier
type MemberAccessNode struct {
AbstractNode
Designator DesignatorNode
Identifier string
}
func (n *MemberAccessNode) String() string {
return fmt.Sprintf("%s.%s", n.Designator, n.Identifier)
}
// Accept lets a visitor traverse its node structure
func (n *MemberAccessNode) Accept(v Visitor) {
v.VisitMemberAccessNode(n)
}
// --------------------------
// FuncCallNode composes abstract node and holds designator and arguments
type FuncCallNode struct {
AbstractNode
Designator DesignatorNode
Args []ExpressionNode
}
func (n *FuncCallNode) String() string {
return fmt.Sprintf("%s(%s)", n.Designator, n.Args)
}
// Accept lets a visitor traverse its node structure
func (n *FuncCallNode) Accept(v Visitor) {
v.VisitFuncCallNode(n)
}
// --------------------------
// StructCreationNode composes abstract node and holds the target struct and field arguments.
type StructCreationNode struct {
AbstractNode
Name string
FieldValues []ExpressionNode
}
func (n *StructCreationNode) String() string {
return fmt.Sprintf("%s(%s)", n.Name, n.FieldValues)
}
// Accept lets a visitor traverse its node structure
func (n *StructCreationNode) Accept(v Visitor) {
v.VisitStructCreationNode(n)
}
// --------------------------
// StructNamedCreationNode composes abstract node and holds the target struct and field arguments with field name.
type StructNamedCreationNode struct {
AbstractNode
Name string
FieldValues []*StructFieldAssignmentNode
}
func (n *StructNamedCreationNode) String() string {
return fmt.Sprintf("%s(%s)", n.Name, n.FieldValues)
}
// Accept lets a visitor traverse its node structure
func (n *StructNamedCreationNode) Accept(v Visitor) {
v.VisitStructNamedCreationNode(n)
}
// --------------------------
// StructFieldAssignmentNode composes abstract node and holds the target struct field name and expression.
type StructFieldAssignmentNode struct {
AbstractNode
Name string
Expression ExpressionNode
}
func (n *StructFieldAssignmentNode) String() string {
return fmt.Sprintf("%s=%s", n.Name, n.Expression)
}
// Accept lets a visitor traverse its node structure
func (n *StructFieldAssignmentNode) Accept(v Visitor) {
v.VisitStructFieldAssignmentNode(n)
}
// --------------------------
// ArrayLengthCreationNode composes abstract node and holds the target struct and field arguments.
type ArrayLengthCreationNode struct {
AbstractNode
ElementType TypeNode
Lengths []ExpressionNode
}
func (n *ArrayLengthCreationNode) String() string {
line := fmt.Sprintf("%s[%s]", n.ElementType, n.Lengths[0])
for i := 1; i < len(n.Lengths); i++ {
line = line + fmt.Sprintf("[%s]", n.Lengths[i])
}
return line
}
// Accept lets a visitor traverse its node structure
func (n *ArrayLengthCreationNode) Accept(v Visitor) {
v.VisitArrayLengthCreationNode(n)
}
// --------------------------
// ArrayValueCreationNode composes abstract node and holds the target struct and field arguments.
type ArrayValueCreationNode struct {
AbstractNode
Type TypeNode
Elements *ArrayInitializationNode
}
func (n *ArrayValueCreationNode) String() string {
return fmt.Sprintf("%s{%s}", n.Type, n.Elements)
}
// Accept lets a visitor traverse its node structure
func (n *ArrayValueCreationNode) Accept(v Visitor) {
v.VisitArrayValueCreationNode(n)
}
// --------------------------
// ArrayInitializationNode composes abstract node and holds the target struct and field arguments.
type ArrayInitializationNode struct {
AbstractNode
Values []ExpressionNode
}
func (n *ArrayInitializationNode) String() string {
return fmt.Sprintf("[%s]", n.Values)
}
// Accept lets a visitor traverse its node structure
func (n *ArrayInitializationNode) Accept(v Visitor) {
v.VisitArrayInitializationNode(n)
}
// --------------------------
// Literal Nodes
// --------------------------
// IntegerLiteralNode composes abstract node and holds the int value.
type IntegerLiteralNode struct {
AbstractNode
Value *big.Int
}
func (n *IntegerLiteralNode) String() string {
return fmt.Sprintf("%d", n.Value)
}
// Accept lets a visitor to traverse its node structure.
func (n *IntegerLiteralNode) Accept(v Visitor) {
v.VisitIntegerLiteralNode(n)
}
// --------------------------
// StringLiteralNode composes abstract node and holds string literal value.
type StringLiteralNode struct {
AbstractNode
Value string
}
func (n *StringLiteralNode) String() string {
return fmt.Sprintf("%s", n.Value)
}
// Accept lets a visitor to traverse its node structure.
func (n *StringLiteralNode) Accept(v Visitor) {
v.VisitStringLiteralNode(n)
}
// --------------------------
// CharacterLiteralNode composes abstract node and holds character literal value.
type CharacterLiteralNode struct {
AbstractNode
Value rune
}
func (n *CharacterLiteralNode) String() string {
return fmt.Sprintf("%c", n.Value)
}
// Accept lets a visitor to traverse its node structure
func (n *CharacterLiteralNode) Accept(v Visitor) {
v.VisitCharacterLiteralNode(n)
}
// --------------------------
// BoolLiteralNode composes abstract node and holds boolean literal value.
type BoolLiteralNode struct {
AbstractNode
Value bool
}
func (n *BoolLiteralNode) String() string {
return fmt.Sprintf("%t", n.Value)
}
// Accept lets a visitor to traverse its node structure
func (n *BoolLiteralNode) Accept(v Visitor) {
v.VisitBoolLiteralNode(n)
}
// --------------------------
// ErrorNode composes abstract node and holds the syntax error message.
type ErrorNode struct {
AbstractNode
Message string
}
func (n *ErrorNode) String() string {
return fmt.Sprintf("[%s] ERROR: %s", n.Pos(), n.Message)
}
// Accept lets a visitor to traverse its node structure.
func (n *ErrorNode) Accept(v Visitor) {
v.VisitErrorNode(n)
}
func getNodeString(node Node) string {
if node == nil {
return ""
}
return node.String()
} | parser/node/nodes.go | 0.738386 | 0.486941 | nodes.go | starcoder |
package problems
import "math"
/**
* Definition for a binary tree node.
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func InorderTraversal(root *TreeNode) []int {
res := []int{}
stack := []*TreeNode{}
for root != nil || len(stack) > 0 {
for root != nil {
stack = append(stack, root)
root = root.Left
}
root = stack[len(stack)-1]
stack = stack[:len(stack)-1]
res = append(res, root.Val)
root = root.Right
}
return res
}
// 100. same-tree
func IsSameTree(p *TreeNode, q *TreeNode) bool {
if p == nil && q == nil {
return true
}
if p == nil || q == nil {
return false
}
if p.Val != q.Val {
return false
}
return IsSameTree(p.Left, q.Left) && IsSameTree(p.Right, q.Right)
}
//101. 对称二叉树 给你一个二叉树的根节点 root , 检查它是否轴对称
func isSymmetric(root *TreeNode) bool {
return dfscheck(root.Left, root.Right)
}
func dfscheck(left, right *TreeNode) bool {
if left == nil && right == nil {
return true
}
if left == nil || right == nil {
return false
}
return left.Val == right.Val && dfscheck(left.Left, right.Right) && dfscheck(left.Right, right.Left)
}
//104. 二叉树的最大深度
func maxDepth(root *TreeNode) int {
if root == nil {
return 0
}
if root.Left == nil && root.Right == nil {
return 1
}
return max(maxDepth(root.Left), maxDepth(root.Right)) + 1
}
//108. 将有序数组转换为二叉搜索树
func sortedArrayToBST(nums []int) *TreeNode {
root := &TreeNode{}
if len(nums) == 0 {
return nil
}
root.Val = nums[len(nums)/2]
root.Left = sortedArrayToBST(nums[0 : len(nums)/2])
root.Right = sortedArrayToBST(nums[len(nums)/2+1 : len(nums)])
return root
}
//110. 平衡二叉树 给定一个二叉树,判断它是否是高度平衡的二叉树。
func isBalanced(root *TreeNode) bool {
return height(root) >= 0
}
func height(root *TreeNode) int {
if root == nil {
return 0
}
leftHeight := height(root.Left)
rightHeight := height(root.Right)
if leftHeight == -1 || rightHeight == -1 || math.Abs(float64(leftHeight-rightHeight)) > 1 {
return -1
}
return int(math.Max(float64(leftHeight), float64(rightHeight))) + 1
}
//111. 二叉树的最小深度
func minDepth(root *TreeNode) int {
if root == nil {
return 0
}
if root.Left == nil {
return minDepth(root.Right) + 1
}
if root.Right == nil {
return minDepth(root.Left) + 1
}
return min(minDepth(root.Left), minDepth(root.Right)) + 1
}
// 112.path-sum 判断该树中是否存在 根节点到叶子节点 的路径,这条路径上所有节点值相加等于目标和 targetSum
func hasPathSum(root *TreeNode, targetSum int) bool {
if root == nil {
return false
}
if root.Left == nil && root.Right == nil {
return targetSum == root.Val
}
return hasPathSum(root.Left, targetSum-root.Val) || hasPathSum(root.Right, targetSum-root.Val)
}
//144. 二叉树的前序遍历
func preorderTraversal(root *TreeNode) (res []int) {
if root == nil {
res = []int{}
return
}
res = append(res, root.Val)
res = append(res, preorderTraversal(root.Left)...)
res = append(res, preorderTraversal(root.Right)...)
return
}
//145. 二叉树的后序遍历
func postorderTraversal(root *TreeNode) (res []int) {
if root == nil {
res = []int{}
return
}
res = append(res, postorderTraversal(root.Left)...)
res = append(res, postorderTraversal(root.Right)...)
res = append(res, root.Val)
return
}
//归并2颗二叉搜索树
func getAllElements(root1 *TreeNode, root2 *TreeNode) []int {
nums1 := inorderTraversal(root1)
nums2 := inorderTraversal(root2)
p1, n1 := 0, len(nums1)
p2, n2 := 0, len(nums2)
merged := make([]int, 0, n1+n2)
for {
if p1 == n1 {
return append(merged, nums2[p2:]...)
}
if p2 == n2 {
return append(merged, nums1[p1:]...)
}
if nums1[p1] < nums2[p2] {
merged = append(merged, nums1[p1])
p1++
} else {
merged = append(merged, nums2[p2])
p2++
}
}
}
//中序遍历
func inorderTraversal(root *TreeNode) (res []int) {
if root == nil {
res = []int{}
return
}
res = append(res, inorderTraversal(root.Left)...)
res = append(res, root.Val)
res = append(res, inorderTraversal(root.Right)...)
return
}
func min(a, b int) int {
if a < b {
return a
}
return b
} | problems/binary_tree.go | 0.630344 | 0.48121 | binary_tree.go | starcoder |
Package ntp implementns ntp packet and basic functions to work with.
It provides quick and transparent translation between 48 bytes and
simply accessible struct in the most efficient way.
*/
package ntp
import (
"net"
"time"
)
// NanosecondsToUnix is the difference between NTP and Unix epoch in NS
const NanosecondsToUnix = int64(2208988800000000000)
// Time is converting Unix time to sec and frac NTP format
func Time(t time.Time) (seconds uint32, fracions uint32) {
nsec := t.UnixNano() + NanosecondsToUnix
sec := nsec / time.Second.Nanoseconds()
return uint32(sec), uint32((nsec - sec*time.Second.Nanoseconds()) << 32 / time.Second.Nanoseconds())
}
// Unix is converting NTP seconds and fractions into Unix time
func Unix(seconds, fractions uint32) time.Time {
secs := int64(seconds) - NanosecondsToUnix/time.Second.Nanoseconds()
nanos := (int64(fractions) * time.Second.Nanoseconds()) >> 32 // convert fractional to nanos
return time.Unix(secs, nanos)
}
// abs returns the absolute value of x
func abs(x int64) int64 {
if x < 0 {
return -x
}
return x
}
// AvgNetworkDelay uses formula from RFC #958 to calculate average network delay
func AvgNetworkDelay(clientTransmitTime, serverReceiveTime, serverTransmitTime, clientReceiveTime time.Time) int64 {
forwardPath := serverReceiveTime.Sub(clientTransmitTime).Nanoseconds()
returnPath := clientReceiveTime.Sub(serverTransmitTime).Nanoseconds()
return abs(forwardPath+returnPath) / 2
}
// CurrentRealTime returns "true" unix time after adjusting to avg network offset
func CurrentRealTime(serverTransmitTime time.Time, avgNetworkDelay int64) time.Time {
return serverTransmitTime.Add(time.Duration(avgNetworkDelay) * time.Nanosecond)
}
// CalculateOffset returns offset between local time and "real" time
func CalculateOffset(currentRealTime, curentLocaTime time.Time) int64 {
return currentRealTime.UnixNano() - curentLocaTime.UnixNano()
}
// connFd returns file descriptor of a connection
func connFd(conn *net.UDPConn) (int, error) {
sc, err := conn.SyscallConn()
if err != nil {
return -1, err
}
var intfd int
err = sc.Control(func(fd uintptr) {
intfd = int(fd)
})
if err != nil {
return -1, err
}
return intfd, nil
} | protocol/ntp/ntp.go | 0.748904 | 0.607721 | ntp.go | starcoder |
package pdp
// Expression abstracts any PDP expression.
// The GetResultType method returns type of particular expression.
// The Calculate method returns calculated value for particular expression.
type Expression interface {
GetResultType() Type
Calculate(ctx *Context) (AttributeValue, error)
}
type functionMaker func(args []Expression) Expression
type functionArgumentValidator func(args []Expression) functionMaker
// FunctionArgumentValidators maps function name to list of validators.
// For given set of arguments validator returns nil if the function
// doesn't accept the arguments or function which creates expression based
// on desired function and set of argument expressions.
var FunctionArgumentValidators = map[string][]functionArgumentValidator{
"equal": {
functionStringEqualValidator,
functionIntegerEqualValidator,
functionFloatEqualValidator,
functionListOfStringsEqualValidator,
functionSetOfStringsEqualValidator,
},
"greater": {
functionIntegerGreaterValidator,
functionFloatGreaterValidator,
},
"add": {
functionIntegerAddValidator,
functionFloatAddValidator,
},
"subtract": {
functionIntegerSubtractValidator,
functionFloatSubtractValidator,
},
"multiply": {
functionIntegerMultiplyValidator,
functionFloatMultiplyValidator,
},
"divide": {
functionIntegerDivideValidator,
functionFloatDivideValidator,
},
"contains": {
functionStringContainsValidator,
functionListOfStringsContainsValidator,
functionNetworkContainsAddressValidator,
functionSetOfStringsContainsValidator,
functionSetOfNetworksContainsAddressValidator,
functionSetOfDomainsContainsValidator,
},
"not": {functionBooleanNotValidator},
"or": {functionBooleanOrValidator},
"and": {functionBooleanAndValidator},
"range": {
functionIntegerRangeValidator,
functionFloatRangeValidator,
},
"list of strings": {
functionListOfStringsValidator,
},
"intersect": {
functionListOfStringsIntersectValidator,
functionSetOfStringsIntersectValidator,
},
"len": {
functionListOfStringsLenValidator,
functionSetOfStringsLenValidator,
},
"concat": {
functionConcatValidator,
},
"try": {
functionTryValidator,
},
} | pdp/expression.go | 0.585931 | 0.532729 | expression.go | starcoder |
package validator
import "fmt"
// DigitsBetweenFloat64 returns true if value lies between left and right border
func DigitsBetweenFloat64(value, left, right float64) bool {
if left > right {
left, right = right, left
}
return value >= left && value <= right
}
// MaxFloat64 is the validation function for validating if the current field's value is less than or equal to the param's value.
func MaxFloat64(v, param float64) bool {
return LteFloat64(v, param)
}
// MinFloat64 is the validation function for validating if the current field's value is greater than or equal to the param's value.
func MinFloat64(v, param float64) bool {
return GteFloat64(v, param)
}
// LtFloat64 is the validation function for validating if the current field's value is less than the param's value.
func LtFloat64(v, param float64) bool {
return v < param
}
// LteFloat64 is the validation function for validating if the current field's value is less than or equal to the param's value.
func LteFloat64(v, param float64) bool {
return v <= param
}
// GteFloat64 is the validation function for validating if the current field's value is greater than or equal to the param's value.
func GteFloat64(v, param float64) bool {
return v >= param
}
// GtFloat64 is the validation function for validating if the current field's value is greater than to the param's value.
func GtFloat64(v, param float64) bool {
return v > param
}
// compareFloat64 determine if a comparison passes between the given values.
func compareFloat64(first float64, second float64, operator string) bool {
switch operator {
case "<":
return first < second
case ">":
return first > second
case "<=":
return first <= second
case ">=":
return first >= second
case "==":
return first == second
default:
panic(fmt.Sprintf("validator: compareFloat64 unsupport operator %s", operator))
}
}
// DistinctFloat32 is the validation function for validating an attribute is unique among other values.
func DistinctFloat32(v []float32) bool {
return inArrayFloat32(v, v)
}
// DistinctFloat64 is the validation function for validating an attribute is unique among other values.
func DistinctFloat64(v []float64) bool {
return inArrayFloat64(v, v)
}
func inArrayFloat32(needle []float32, haystack []float32) bool {
for _, n := range needle {
for _, s := range haystack {
if n == s {
return true
}
}
}
return false
}
func inArrayFloat64(needle []float64, haystack []float64) bool {
for _, n := range needle {
for _, s := range haystack {
if n == s {
return true
}
}
}
return false
} | validator_float.go | 0.819026 | 0.728024 | validator_float.go | starcoder |
package ffi
import (
"reflect"
"github.com/kode4food/ale/data"
)
type (
floatWrapper reflect.Kind
intWrapper reflect.Kind
stringWrapper reflect.Kind
boolWrapper bool
)
var (
_stringWrapper stringWrapper
_boolWrapper boolWrapper
float32zero = reflect.ValueOf(float32(0))
float64zero = reflect.ValueOf(float64(0))
int64zero = reflect.ValueOf(int64(0))
int32zero = reflect.ValueOf(int32(0))
int16zero = reflect.ValueOf(int16(0))
int8zero = reflect.ValueOf(int8(0))
intZero = reflect.ValueOf(int(0))
uint64zero = reflect.ValueOf(uint64(0))
uint32zero = reflect.ValueOf(uint32(0))
uint16zero = reflect.ValueOf(uint16(0))
uint8zero = reflect.ValueOf(uint8(0))
uintZero = reflect.ValueOf(uint(0))
boolZero = reflect.ValueOf(false)
)
func makeWrappedFloat(t reflect.Type) Wrapper {
return floatWrapper(t.Kind())
}
func makeWrappedInt(t reflect.Type) Wrapper {
return intWrapper(t.Kind())
}
func makeWrappedBool(_ reflect.Type) Wrapper {
return _boolWrapper
}
func makeWrappedString(_ reflect.Type) Wrapper {
return _stringWrapper
}
func (f floatWrapper) Wrap(_ *WrapContext, v reflect.Value) data.Value {
if !v.IsValid() {
return data.Nil
}
return data.Float(v.Float())
}
func (f floatWrapper) Unwrap(_ *UnwrapContext, v data.Value) reflect.Value {
switch reflect.Kind(f) {
case reflect.Float32:
if v == nil {
return float32zero
}
return reflect.ValueOf(float32(v.(data.Float)))
case reflect.Float64:
if v == nil {
return float64zero
}
return reflect.ValueOf(float64(v.(data.Float)))
}
panic("float kind is incorrect")
}
func (i intWrapper) Wrap(_ *WrapContext, v reflect.Value) data.Value {
if !v.IsValid() {
return data.Nil
}
return data.Integer(v.Int())
}
func (i intWrapper) Unwrap(_ *UnwrapContext, v data.Value) reflect.Value {
switch reflect.Kind(i) {
case reflect.Int64:
if v == nil {
return int64zero
}
return reflect.ValueOf(int64(v.(data.Integer)))
case reflect.Int32:
if v == nil {
return int32zero
}
return reflect.ValueOf(int32(v.(data.Integer)))
case reflect.Int16:
if v == nil {
return int16zero
}
return reflect.ValueOf(int16(v.(data.Integer)))
case reflect.Int8:
if v == nil {
return int8zero
}
return reflect.ValueOf(int8(v.(data.Integer)))
case reflect.Int:
if v == nil {
return intZero
}
return reflect.ValueOf(int(v.(data.Integer)))
case reflect.Uint64:
if v == nil {
return uint64zero
}
return reflect.ValueOf(uint64(v.(data.Integer)))
case reflect.Uint32:
if v == nil {
return uint32zero
}
return reflect.ValueOf(uint32(v.(data.Integer)))
case reflect.Uint16:
if v == nil {
return uint16zero
}
return reflect.ValueOf(uint16(v.(data.Integer)))
case reflect.Uint8:
if v == nil {
return uint8zero
}
return reflect.ValueOf(uint8(v.(data.Integer)))
case reflect.Uint:
if v == nil {
return uintZero
}
return reflect.ValueOf(uint(v.(data.Integer)))
}
panic("int kind is incorrect")
}
func (stringWrapper) Wrap(_ *WrapContext, v reflect.Value) data.Value {
if !v.IsValid() {
return data.Nil
}
return data.String(v.Interface().(string))
}
func (stringWrapper) Unwrap(_ *UnwrapContext, v data.Value) reflect.Value {
if v == nil {
v = data.Nil
}
return reflect.ValueOf(v.String())
}
func (b boolWrapper) Wrap(_ *WrapContext, v reflect.Value) data.Value {
if !v.IsValid() {
return data.False
}
return data.Bool(v.Bool())
}
func (b boolWrapper) Unwrap(_ *UnwrapContext, v data.Value) reflect.Value {
if v == nil {
return boolZero
}
return reflect.ValueOf(bool(v.(data.Bool)))
} | ffi/primitive.go | 0.540924 | 0.421254 | primitive.go | starcoder |
package snowball
import (
"fmt"
"github.com/ava-labs/avalanchego/ids"
)
// Consensus represents a general snow instance that can be used directly to
// process the results of network queries.
type Consensus interface {
fmt.Stringer
// Takes in alpha, beta1, beta2, and the initial choice
Initialize(params Parameters, initialPreference ids.ID)
// Returns the parameters that describe this snowball instance
Parameters() Parameters
// Adds a new choice to vote on
Add(newChoice ids.ID)
// Returns the currently preferred choice to be finalized
Preference() ids.ID
// RecordPoll records the results of a network poll. Assumes all choices
// have been previously added.
RecordPoll(votes ids.Bag)
// RecordUnsuccessfulPoll resets the snowflake counters of this consensus
// instance
RecordUnsuccessfulPoll()
// Return whether a choice has been finalized
Finalized() bool
}
// NnarySnowball augments NnarySnowflake with a counter that tracks the total
// number of positive responses from a network sample.
type NnarySnowball interface{ NnarySnowflake }
// NnarySnowflake is a snowflake instance deciding between an unbounded number
// of values. After performing a network sample of k nodes, if you have alpha
// votes for one of the choices, you should vote for that choice. Otherwise, you
// should reset.
type NnarySnowflake interface {
fmt.Stringer
// Takes in beta1, beta2, and the initial choice
Initialize(betaVirtuous, betaRogue int, initialPreference ids.ID)
// Adds a new possible choice
Add(newChoice ids.ID)
// Returns the currently preferred choice to be finalized
Preference() ids.ID
// RecordSuccessfulPoll records a successful poll towards finalizing the
// specified choice. Assumes the choice was previously added.
RecordSuccessfulPoll(choice ids.ID)
// RecordUnsuccessfulPoll resets the snowflake counter of this instance
RecordUnsuccessfulPoll()
// Return whether a choice has been finalized
Finalized() bool
}
// NnarySlush is a slush instance deciding between an unbounded number of
// values. After performing a network sample of k nodes, if you have alpha
// votes for one of the choices, you should vote for that choice.
type NnarySlush interface {
fmt.Stringer
// Takes in the initial choice
Initialize(initialPreference ids.ID)
// Returns the currently preferred choice to be finalized
Preference() ids.ID
// RecordSuccessfulPoll records a successful poll towards finalizing the
// specified choice. Assumes the choice was previously added.
RecordSuccessfulPoll(choice ids.ID)
}
// BinarySnowball augments BinarySnowflake with a counter that tracks the total
// number of positive responses from a network sample.
type BinarySnowball interface{ BinarySnowflake }
// BinarySnowflake is a snowball instance deciding between two values
// After performing a network sample of k nodes, if you have alpha votes for
// one of the choices, you should vote for that choice. Otherwise, you should
// reset.
type BinarySnowflake interface {
fmt.Stringer
// Takes in the beta value, and the initial choice
Initialize(beta, initialPreference int)
// Returns the currently preferred choice to be finalized
Preference() int
// RecordSuccessfulPoll records a successful poll towards finalizing the
// specified choice
RecordSuccessfulPoll(choice int)
// RecordUnsuccessfulPoll resets the snowflake counter of this instance
RecordUnsuccessfulPoll()
// Return whether a choice has been finalized
Finalized() bool
}
// BinarySlush is a slush instance deciding between two values. After performing
// a network sample of k nodes, if you have alpha votes for one of the choices,
// you should vote for that choice.
type BinarySlush interface {
fmt.Stringer
// Takes in the initial choice
Initialize(initialPreference int)
// Returns the currently preferred choice to be finalized
Preference() int
// RecordSuccessfulPoll records a successful poll towards finalizing the
// specified choice
RecordSuccessfulPoll(choice int)
}
// UnarySnowball is a snowball instance deciding on one value. After performing
// a network sample of k nodes, if you have alpha votes for the choice, you
// should vote. Otherwise, you should reset.
type UnarySnowball interface {
fmt.Stringer
// Takes in the beta value
Initialize(beta int)
// RecordSuccessfulPoll records a successful poll towards finalizing
RecordSuccessfulPoll()
// RecordUnsuccessfulPoll resets the snowflake counter of this instance
RecordUnsuccessfulPoll()
// Return whether a choice has been finalized
Finalized() bool
// Returns a new binary snowball instance with the agreement parameters
// transferred. Takes in the new beta value and the original choice
Extend(beta, originalPreference int) BinarySnowball
// Returns a new unary snowball instance with the same state
Clone() UnarySnowball
}
// UnarySnowflake is a snowflake instance deciding on one value. After
// performing a network sample of k nodes, if you have alpha votes for the
// choice, you should vote. Otherwise, you should reset.
type UnarySnowflake interface {
fmt.Stringer
// Takes in the beta value
Initialize(beta int)
// RecordSuccessfulPoll records a successful poll towards finalizing
RecordSuccessfulPoll()
// RecordUnsuccessfulPoll resets the snowflake counter of this instance
RecordUnsuccessfulPoll()
// Return whether a choice has been finalized
Finalized() bool
// Returns a new binary snowball instance with the agreement parameters
// transferred. Takes in the new beta value and the original choice
Extend(beta, originalPreference int) BinarySnowflake
// Returns a new unary snowflake instance with the same state
Clone() UnarySnowflake
} | snow/consensus/snowball/consensus.go | 0.760384 | 0.440168 | consensus.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// OptionalClaims
type OptionalClaims struct {
// The optional claims returned in the JWT access token.
accessToken []OptionalClaimable
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The optional claims returned in the JWT ID token.
idToken []OptionalClaimable
// The optional claims returned in the SAML token.
saml2Token []OptionalClaimable
}
// NewOptionalClaims instantiates a new optionalClaims and sets the default values.
func NewOptionalClaims()(*OptionalClaims) {
m := &OptionalClaims{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateOptionalClaimsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateOptionalClaimsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewOptionalClaims(), nil
}
// GetAccessToken gets the accessToken property value. The optional claims returned in the JWT access token.
func (m *OptionalClaims) GetAccessToken()([]OptionalClaimable) {
if m == nil {
return nil
} else {
return m.accessToken
}
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *OptionalClaims) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *OptionalClaims) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["accessToken"] = func (n i<PASSWORD>5<PASSWORD>f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateOptionalClaimFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]OptionalClaimable, len(val))
for i, v := range val {
res[i] = v.(OptionalClaimable)
}
m.SetAccessToken(res)
}
return nil
}
res["idToken"] = func (n i<PASSWORD>.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateOptionalClaimFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]OptionalClaimable, len(val))
for i, v := range val {
res[i] = v.(OptionalClaimable)
}
m.SetIdToken(res)
}
return nil
}
res["saml2Token"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateOptionalClaimFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]OptionalClaimable, len(val))
for i, v := range val {
res[i] = v.(OptionalClaimable)
}
m.SetSaml2Token(res)
}
return nil
}
return res
}
// GetIdToken gets the idToken property value. The optional claims returned in the JWT ID token.
func (m *OptionalClaims) GetIdToken()([]OptionalClaimable) {
if m == nil {
return nil
} else {
return m.idToken
}
}
// GetSaml2Token gets the saml2Token property value. The optional claims returned in the SAML token.
func (m *OptionalClaims) GetSaml2Token()([]OptionalClaimable) {
if m == nil {
return nil
} else {
return m.saml2Token
}
}
// Serialize serializes information the current object
func (m *OptionalClaims) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
if m.GetAccessToken() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAccessToken()))
for i, v := range m.GetAccessToken() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("accessToken", cast)
if err != nil {
return err
}
}
if m.GetIdToken() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetIdToken()))
for i, v := range m.GetIdToken() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("idToken", cast)
if err != nil {
return err
}
}
if m.GetSaml2Token() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetSaml2Token()))
for i, v := range m.GetSaml2Token() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("saml2Token", cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAccessToken sets the accessToken property value. The optional claims returned in the JWT access token.
func (m *OptionalClaims) SetAccessToken(value []OptionalClaimable)() {
if m != nil {
m.accessToken = value
}
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *OptionalClaims) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetIdToken sets the idToken property value. The optional claims returned in the JWT ID token.
func (m *OptionalClaims) SetIdToken(value []OptionalClaimable)() {
if m != nil {
m.idToken = value
}
}
// SetSaml2Token sets the saml2Token property value. The optional claims returned in the SAML token.
func (m *OptionalClaims) SetSaml2Token(value []OptionalClaimable)() {
if m != nil {
m.saml2Token = value
}
} | models/optional_claims.go | 0.742515 | 0.407805 | optional_claims.go | starcoder |
package duration
import (
"errors"
"time"
)
// Duration is a standard unit of time.
type Duration time.Duration
// String returns a string representing the duration in the form "3d1h3m".
// Leading zero units are omitted. As a special case, durations less than one
// second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure
// that the leading digit is non-zero. Duration more than a day or more than a
// week lose granularity and are truncated to resp. days-hours-minutes and
// weeks-days-hours. The zero duration formats as 0s.
func (d Duration) String() string {
// Largest time is 2540400h10m10.000000000s
var buf [32]byte
w := len(buf)
u := uint64(d)
neg := d < 0
if neg {
u = -u
}
if u < uint64(Second) {
// Special case: if duration is smaller than a second,
// use smaller units, like 1.2ms
var prec int
w--
buf[w] = 's'
w--
switch {
case u == 0:
return "0s"
case u < uint64(Microsecond):
// print nanoseconds
prec = 0
buf[w] = 'n'
case u < uint64(Millisecond):
// print microseconds
prec = 3
// U+00B5 'µ' micro sign == 0xC2 0xB5
w-- // Need room for two bytes.
copy(buf[w:], "µ")
default:
// print milliseconds
prec = 6
buf[w] = 'm'
}
w, u = fmtFrac(buf[:w], u, prec)
w = fmtInt(buf[:w], u)
} else if u >= uint64(Week) {
// Special case: if duration is larger than a week,
// use bigger units like 4w3d2h
u /= uint64(Hour)
if u%24 != 0 {
w--
buf[w] = 'h'
// u is now integer hours
w = fmtInt(buf[:w], u%24)
}
u /= 24
// u is now integer days
if u > 0 {
if u%7 != 0 {
w--
buf[w] = 'd'
w = fmtInt(buf[:w], u%7)
}
u /= 7
// u is now integer weeks
// Stop at hours because days can be different lengths.
if u > 0 {
w--
buf[w] = 'w'
w = fmtInt(buf[:w], u)
}
}
} else if u >= uint64(Day) {
// Special case: if duration is larger than a day,
// use bigger units like 3d2h6m
u /= uint64(Minute)
if u%60 != 0 {
w--
buf[w] = 'm'
// u is now integer minutes
w = fmtInt(buf[:w], u%60)
}
u /= 60
// u is now integer hours
if u > 0 {
if u%24 != 0 {
w--
buf[w] = 'h'
w = fmtInt(buf[:w], u%24)
}
u /= 24
// u is now integer weeks
if u > 0 {
w--
buf[w] = 'd'
w = fmtInt(buf[:w], u)
}
}
} else {
if !zeroSeconds(u, 9) {
w--
buf[w] = 's'
w, u = fmtFrac(buf[:w], u, 9)
w = fmtInt(buf[:w], u%60)
} else {
u /= 1000000000 // = 10^9
}
// u is now integer seconds
u /= 60
// u is now integer minutes
if u > 0 {
if u%60 != 0 {
w--
buf[w] = 'm'
w = fmtInt(buf[:w], u%60)
}
u /= 60
// u is now integer hours
// Stop at hours because days can be different lengths.
if u > 0 {
w--
buf[w] = 'h'
w = fmtInt(buf[:w], u)
}
}
}
if neg {
w--
buf[w] = '-'
}
return string(buf[w:])
}
// zeroSeconds returns true if the number of seconds with a
// precision `prec` equals to 0.
func zeroSeconds(v uint64, prec int) bool {
for i := 0; i < prec; i++ {
if v%10 != 0 {
return false
}
v /= 10
}
return v%60 == 0
}
// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
// tail of buf, omitting trailing zeros. it omits the decimal
// point too when the fraction is 0. It returns the index where the
// output bytes begin and the value v/10**prec.
func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
// Omit trailing zeros up to and including decimal point.
w := len(buf)
print := false
for i := 0; i < prec; i++ {
digit := v % 10
print = print || digit != 0
if print {
w--
buf[w] = byte(digit) + '0'
}
v /= 10
}
if print {
w--
buf[w] = '.'
}
return w, v
}
// fmtInt formats v into the tail of buf.
// It returns the index where the output begins.
func fmtInt(buf []byte, v uint64) int {
w := len(buf)
if v == 0 {
w--
buf[w] = '0'
} else {
for v > 0 {
w--
buf[w] = byte(v%10) + '0'
v /= 10
}
}
return w
}
// Nanoseconds returns the duration as an integer nanosecond count.
func (d Duration) Nanoseconds() int64 { return int64(d) }
// Seconds returns the duration as a floating point number of seconds.
func (d Duration) Seconds() float64 {
sec := d / Second
nsec := d % Second
return float64(sec) + float64(nsec)*1e-9
}
// Hours returns the duration as a floating point number of hours.
func (d Duration) Hours() float64 {
hour := d / Hour
nsec := d % Hour
return float64(hour) + float64(nsec)*(1e-9/60/60)
}
// Days returns the duration as a floating point number of days.
func (d Duration) Days() float64 {
hour := d / Hour
nsec := d % Hour
return float64(hour) + float64(nsec)*(1e-9/60/60/24)
}
// Weeks returns the duration as a floating point number of days.
func (d Duration) Weeks() float64 {
hour := d / Hour
nsec := d % Hour
return float64(hour) + float64(nsec)*(1e-9/60/60/24/7)
}
// Minutes returns the duration as a floating point number of minutes.
func (d Duration) Minutes() float64 {
min := d / Minute
nsec := d % Minute
return float64(min) + float64(nsec)*(1e-9/60)
}
// Standard unit of time.
var (
Nanosecond = Duration(time.Nanosecond)
Microsecond = Duration(time.Microsecond)
Millisecond = Duration(time.Millisecond)
Second = Duration(time.Second)
Minute = Duration(time.Minute)
Hour = Duration(time.Hour)
Day = Hour * 24
Week = Day * 7
Fortnight = Week * 2
Month = Day * 30 // Approximation
Year = Day * 365 // Approximation
Decade = Year * 10 // Approximation
Century = Year * 100 // Approximation
Millennium = Year * 1000 // Approximation
)
var errLeadingInt = errors.New("duration: bad [0-9]*") // never printed
// leadingInt consumes the leading [0-9]* from s.
func leadingInt(s string) (x int64, rem string, err error) {
i := 0
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
if x > (1<<63-1)/10 {
// overflow
return 0, "", errLeadingInt
}
x = x*10 + int64(c) - '0'
if x < 0 {
// overflow
return 0, "", errLeadingInt
}
}
return x, s[i:], nil
}
var unitMap = map[string]int64{
"ns": int64(Nanosecond),
"us": int64(Microsecond),
"µs": int64(Microsecond), // U+00B5 = micro symbol
"μs": int64(Microsecond), // U+03BC = Greek letter mu
"ms": int64(Millisecond),
"s": int64(Second),
"m": int64(Minute),
"h": int64(Hour),
"d": int64(Day),
"w": int64(Week),
"y": int64(Year), // Approximation
}
// ParseDuration parses a duration string.
// A duration string is a possibly signed sequence of
// decimal numbers, each with optional fraction and a unit suffix,
// such as "300ms", "-1.5h" or "2h45m".
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w", "y".
func ParseDuration(s string) (Duration, error) {
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
orig := s
var d int64
neg := false
// Consume [-+]?
if s != "" {
c := s[0]
if c == '-' || c == '+' {
neg = c == '-'
s = s[1:]
}
}
// Special case: if all that is left is "0", this is zero.
if s == "0" {
return 0, nil
}
if s == "" {
return 0, errors.New("time: invalid duration " + orig)
}
for s != "" {
var (
v, f int64 // integers before, after decimal point
scale float64 = 1 // value = v + f/scale
)
var err error
// The next character must be [0-9.]
if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
return 0, errors.New("time: invalid duration " + orig)
}
// Consume [0-9]*
pl := len(s)
v, s, err = leadingInt(s)
if err != nil {
return 0, errors.New("time: invalid duration " + orig)
}
pre := pl != len(s) // whether we consumed anything before a period
// Consume (\.[0-9]*)?
post := false
if s != "" && s[0] == '.' {
s = s[1:]
pl := len(s)
f, s, err = leadingInt(s)
if err != nil {
return 0, errors.New("time: invalid duration " + orig)
}
for n := pl - len(s); n > 0; n-- {
scale *= 10
}
post = pl != len(s)
}
if !pre && !post {
// no digits (e.g. ".s" or "-.s")
return 0, errors.New("time: invalid duration " + orig)
}
// Consume unit.
i := 0
for ; i < len(s); i++ {
c := s[i]
if c == '.' || '0' <= c && c <= '9' {
break
}
}
if i == 0 {
return 0, errors.New("time: missing unit in duration " + orig)
}
u := s[:i]
s = s[i:]
unit, ok := unitMap[u]
if !ok {
return 0, errors.New("time: unknown unit " + u + " in duration " + orig)
}
if v > (1<<63-1)/unit {
// overflow
return 0, errors.New("time: invalid duration " + orig)
}
v *= unit
if f > 0 {
// float64 is needed to be nanosecond accurate for fractions of hours.
// v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
v += int64(float64(f) * (float64(unit) / scale))
if v < 0 {
// overflow
return 0, errors.New("time: invalid duration " + orig)
}
}
d += v
if d < 0 {
// overflow
return 0, errors.New("time: invalid duration " + orig)
}
}
if neg {
d = -d
}
return Duration(d), nil
} | duration.go | 0.796965 | 0.414188 | duration.go | starcoder |
// Package shaderir offers intermediate representation for shader programs.
package shaderir
import (
"go/constant"
"go/token"
"strings"
)
type Program struct {
UniformNames []string
Uniforms []Type
TextureNum int
Attributes []Type
Varyings []Type
Funcs []Func
VertexFunc VertexFunc
FragmentFunc FragmentFunc
}
type Func struct {
Index int
InParams []Type
OutParams []Type
Return Type
Block *Block
}
// VertexFunc takes pseudo params, and the number if len(attributes) + len(varyings) + 1.
// If 0 <= index < len(attributes), the params are in-params and represent attribute variables.
// If index == len(attributes), the param is an out-param and repesents the position in vec4 (gl_Position in GLSL)
// If len(attributes) + 1 <= index < len(attributes) + len(varyings) + 1, the params are out-params and represent
// varying variables.
type VertexFunc struct {
Block *Block
}
// FragmentFunc takes pseudo params, and the number is len(varyings) + 2.
// If index == 0, the param represents the coordinate of the fragment (gl_FragCoord in GLSL).
// If index == len(varyings), the param represents (index-1)th verying variable.
// If index == len(varyings)+1, the param is an out-param representing the color of the pixel (gl_FragColor in GLSL).
type FragmentFunc struct {
Block *Block
}
type Block struct {
LocalVars []Type
LocalVarIndexOffset int
Stmts []Stmt
}
type Stmt struct {
Type StmtType
Exprs []Expr
Blocks []*Block
ForVarType Type
ForVarIndex int
ForInit constant.Value
ForEnd constant.Value
ForOp Op
ForDelta constant.Value
InitIndex int
}
type StmtType int
const (
ExprStmt StmtType = iota
BlockStmt
Assign
Init
If
For
Continue
Break
Return
Discard
)
type ConstType int
const (
ConstTypeNone ConstType = iota
ConstTypeBool
ConstTypeInt
ConstTypeFloat
)
type Expr struct {
Type ExprType
Exprs []Expr
Const constant.Value
ConstType ConstType
BuiltinFunc BuiltinFunc
Swizzling string
Index int
Op Op
}
type ExprType int
const (
Blank ExprType = iota
NumberExpr
UniformVariable
TextureVariable
LocalVariable
StructMember
BuiltinFuncExpr
SwizzlingExpr
FunctionExpr
Unary
Binary
Selection
Call
FieldSelector
Index
)
type Op int
const (
Add Op = iota
Sub
NotOp
ComponentWiseMul
MatrixMul
Div
ModOp
LeftShift
RightShift
LessThanOp
LessThanEqualOp
GreaterThanOp
GreaterThanEqualOp
EqualOp
NotEqualOp
And
Xor
Or
AndAnd
OrOr
)
func OpFromToken(t token.Token, lhs, rhs Type) (Op, bool) {
switch t {
case token.ADD:
return Add, true
case token.SUB:
return Sub, true
case token.NOT:
return NotOp, true
case token.MUL:
if lhs.IsMatrix() || rhs.IsMatrix() {
return MatrixMul, true
}
return ComponentWiseMul, true
case token.QUO:
return Div, true
case token.REM:
return ModOp, true
case token.SHL:
return LeftShift, true
case token.SHR:
return RightShift, true
case token.LSS:
return LessThanOp, true
case token.LEQ:
return LessThanEqualOp, true
case token.GTR:
return GreaterThanOp, true
case token.GEQ:
return GreaterThanEqualOp, true
case token.EQL:
return EqualOp, true
case token.NEQ:
return NotEqualOp, true
case token.AND:
return And, true
case token.XOR:
return Xor, true
case token.OR:
return Or, true
case token.LAND:
return AndAnd, true
case token.LOR:
return OrOr, true
}
return 0, false
}
type BuiltinFunc string
const (
Len BuiltinFunc = "len"
Cap BuiltinFunc = "cap"
BoolF BuiltinFunc = "bool"
IntF BuiltinFunc = "int"
FloatF BuiltinFunc = "float"
Vec2F BuiltinFunc = "vec2"
Vec3F BuiltinFunc = "vec3"
Vec4F BuiltinFunc = "vec4"
Mat2F BuiltinFunc = "mat2"
Mat3F BuiltinFunc = "mat3"
Mat4F BuiltinFunc = "mat4"
Radians BuiltinFunc = "radians"
Degrees BuiltinFunc = "degrees"
Sin BuiltinFunc = "sin"
Cos BuiltinFunc = "cos"
Tan BuiltinFunc = "tan"
Asin BuiltinFunc = "asin"
Acos BuiltinFunc = "acos"
Atan BuiltinFunc = "atan"
Atan2 BuiltinFunc = "atan2"
Pow BuiltinFunc = "pow"
Exp BuiltinFunc = "exp"
Log BuiltinFunc = "log"
Exp2 BuiltinFunc = "exp2"
Log2 BuiltinFunc = "log2"
Sqrt BuiltinFunc = "sqrt"
Inversesqrt BuiltinFunc = "inversesqrt"
Abs BuiltinFunc = "abs"
Sign BuiltinFunc = "sign"
Floor BuiltinFunc = "floor"
Ceil BuiltinFunc = "ceil"
Fract BuiltinFunc = "fract"
Mod BuiltinFunc = "mod"
Min BuiltinFunc = "min"
Max BuiltinFunc = "max"
Clamp BuiltinFunc = "clamp"
Mix BuiltinFunc = "mix"
Step BuiltinFunc = "step"
Smoothstep BuiltinFunc = "smoothstep"
Length BuiltinFunc = "length"
Distance BuiltinFunc = "distance"
Dot BuiltinFunc = "dot"
Cross BuiltinFunc = "cross"
Normalize BuiltinFunc = "normalize"
Faceforward BuiltinFunc = "faceforward"
Reflect BuiltinFunc = "reflect"
Transpose BuiltinFunc = "transpose"
Texture2DF BuiltinFunc = "texture2D"
Dfdx BuiltinFunc = "dfdx"
Dfdy BuiltinFunc = "dfdy"
Fwidth BuiltinFunc = "fwidth"
)
func ParseBuiltinFunc(str string) (BuiltinFunc, bool) {
switch BuiltinFunc(str) {
case Len,
Cap,
BoolF,
IntF,
FloatF,
Vec2F,
Vec3F,
Vec4F,
Mat2F,
Mat3F,
Mat4F,
Sin,
Cos,
Tan,
Asin,
Acos,
Atan,
Atan2,
Pow,
Exp,
Log,
Exp2,
Log2,
Sqrt,
Inversesqrt,
Abs,
Sign,
Floor,
Ceil,
Fract,
Mod,
Min,
Max,
Clamp,
Mix,
Step,
Smoothstep,
Length,
Distance,
Dot,
Cross,
Normalize,
Faceforward,
Reflect,
Transpose,
Texture2DF,
Dfdx,
Dfdy,
Fwidth:
return BuiltinFunc(str), true
}
return "", false
}
func IsValidSwizzling(s string) bool {
if len(s) < 1 || 4 < len(s) {
return false
}
const (
xyzw = "xyzw"
rgba = "rgba"
strq = "strq"
)
switch {
case strings.IndexByte(xyzw, s[0]) >= 0:
for _, c := range s {
if strings.IndexRune(xyzw, c) == -1 {
return false
}
}
return true
case strings.IndexByte(rgba, s[0]) >= 0:
for _, c := range s {
if strings.IndexRune(rgba, c) == -1 {
return false
}
}
return true
case strings.IndexByte(strq, s[0]) >= 0:
for _, c := range s {
if strings.IndexRune(strq, c) == -1 {
return false
}
}
return true
}
return false
}
func (p *Program) ReferredFuncIndicesInVertexShader() []int {
return p.referredFuncIndicesInBlockEntryPoint(p.VertexFunc.Block)
}
func (p *Program) ReferredFuncIndicesInFragmentShader() []int {
return p.referredFuncIndicesInBlockEntryPoint(p.FragmentFunc.Block)
}
func (p *Program) referredFuncIndicesInBlockEntryPoint(b *Block) []int {
indexToFunc := map[int]*Func{}
for _, f := range p.Funcs {
f := f
indexToFunc[f.Index] = &f
}
visited := map[int]struct{}{}
return referredFuncIndicesInBlock(b, indexToFunc, visited)
}
func referredFuncIndicesInBlock(b *Block, indexToFunc map[int]*Func, visited map[int]struct{}) []int {
if b == nil {
return nil
}
var fs []int
for _, s := range b.Stmts {
for _, e := range s.Exprs {
fs = append(fs, referredFuncIndicesInExpr(&e, indexToFunc, visited)...)
}
for _, bb := range s.Blocks {
fs = append(fs, referredFuncIndicesInBlock(bb, indexToFunc, visited)...)
}
}
return fs
}
func referredFuncIndicesInExpr(e *Expr, indexToFunc map[int]*Func, visited map[int]struct{}) []int {
var fs []int
if e.Type == FunctionExpr {
if _, ok := visited[e.Index]; !ok {
fs = append(fs, e.Index)
visited[e.Index] = struct{}{}
fs = append(fs, referredFuncIndicesInBlock(indexToFunc[e.Index].Block, indexToFunc, visited)...)
}
}
for _, ee := range e.Exprs {
fs = append(fs, referredFuncIndicesInExpr(&ee, indexToFunc, visited)...)
}
return fs
} | internal/shaderir/program.go | 0.622115 | 0.404802 | program.go | starcoder |
package txpool
import (
"fmt"
"github.com/ledgerwatch/erigon-lib/rlp"
)
type NewPooledTransactionHashesPacket [][32]byte
// ParseHashesCount looks at the RLP length Prefix for list of 32-byte hashes
// and returns number of hashes in the list to expect
func ParseHashesCount(payload Hashes, pos int) (count int, dataPos int, err error) {
dataPos, dataLen, err := rlp.List(payload, pos)
if err != nil {
return 0, 0, fmt.Errorf("%s: hashes len: %w", rlp.ParseHashErrorPrefix, err)
}
if dataLen%33 != 0 {
return 0, 0, fmt.Errorf("%s: hashes len must be multiple of 33", rlp.ParseHashErrorPrefix)
}
return dataLen / 33, dataPos, nil
}
// EncodeHashes produces RLP encoding of given number of hashes, as RLP list
// It appends encoding to the given given slice (encodeBuf), reusing the space
// there is there is enough capacity.
// The first returned value is the slice where encodinfg
func EncodeHashes(hashes []byte, encodeBuf []byte) []byte {
hashesLen := len(hashes) / 32 * 33
dataLen := hashesLen
encodeBuf = ensureEnoughSize(encodeBuf, rlp.ListPrefixLen(hashesLen)+dataLen)
rlp.EncodeHashes(hashes, encodeBuf)
return encodeBuf
}
// ParseHash extracts the next hash from the RLP encoding (payload) from a given position.
// It appends the hash to the given slice, reusing the space if there is enough capacity
// The first returned value is the slice where hash is appended to.
// The second returned value is the new position in the RLP payload after the extraction
// of the hash.
func ParseHash(payload []byte, pos int, hashbuf []byte) ([]byte, int, error) {
hashbuf = ensureEnoughSize(hashbuf, 32)
pos, err := rlp.ParseHash(payload, pos, hashbuf)
if err != nil {
return nil, 0, fmt.Errorf("%s: hash len: %w", rlp.ParseHashErrorPrefix, err)
}
return hashbuf, pos, nil
}
func ensureEnoughSize(in []byte, size int) []byte {
if cap(in) < size {
newBuf := make([]byte, size)
copy(newBuf, in)
return newBuf
}
return in[:size] // Reuse the space if it has enough capacity
}
// EncodeGetPooledTransactions66 produces encoding of GetPooledTransactions66 packet
func EncodeGetPooledTransactions66(hashes []byte, requestId uint64, encodeBuf []byte) ([]byte, error) {
pos := 0
hashesLen := len(hashes) / 32 * 33
dataLen := rlp.ListPrefixLen(hashesLen) + hashesLen + rlp.U64Len(requestId)
encodeBuf = ensureEnoughSize(encodeBuf, rlp.ListPrefixLen(dataLen)+dataLen)
// Length Prefix for the entire structure
pos += rlp.EncodeListPrefix(dataLen, encodeBuf[pos:])
pos += rlp.EncodeU64(requestId, encodeBuf[pos:])
pos += rlp.EncodeHashes(hashes, encodeBuf[pos:])
_ = pos
return encodeBuf, nil
}
func ParseGetPooledTransactions66(payload []byte, pos int, hashbuf []byte) (requestID uint64, hashes []byte, newPos int, err error) {
pos, _, err = rlp.List(payload, pos)
if err != nil {
return 0, hashes, 0, err
}
pos, requestID, err = rlp.U64(payload, pos)
if err != nil {
return 0, hashes, 0, err
}
var hashesCount int
hashesCount, pos, err = ParseHashesCount(payload, pos)
if err != nil {
return 0, hashes, 0, err
}
hashes = ensureEnoughSize(hashbuf, 32*hashesCount)
for i := 0; pos != len(payload); i++ {
pos, err = rlp.ParseHash(payload, pos, hashes[i*32:])
if err != nil {
return 0, hashes, 0, err
}
}
return requestID, hashes, pos, nil
}
func ParseGetPooledTransactions65(payload []byte, pos int, hashbuf []byte) (hashes []byte, newPos int, err error) {
pos, _, err = rlp.List(payload, pos)
if err != nil {
return hashes, 0, err
}
var hashesCount int
hashesCount, pos, err = ParseHashesCount(payload, pos)
if err != nil {
return hashes, 0, err
}
hashes = ensureEnoughSize(hashbuf, 32*hashesCount)
for i := 0; pos != len(payload); i++ {
pos, err = rlp.ParseHash(payload, pos, hashes[i*32:])
if err != nil {
return hashes, 0, err
}
}
return hashes, pos, nil
}
func EncodePooledTransactions66(txsRlp [][]byte, requestId uint64, encodeBuf []byte) []byte {
pos := 0
txsRlpLen := 0
for i := range txsRlp {
txsRlpLen += len(txsRlp[i])
}
dataLen := rlp.U64Len(requestId) + rlp.ListPrefixLen(txsRlpLen) + txsRlpLen
encodeBuf = ensureEnoughSize(encodeBuf, rlp.ListPrefixLen(dataLen)+dataLen)
// Length Prefix for the entire structure
pos += rlp.EncodeListPrefix(dataLen, encodeBuf[pos:])
pos += rlp.EncodeU64(requestId, encodeBuf[pos:])
pos += rlp.EncodeListPrefix(txsRlpLen, encodeBuf[pos:])
for i := range txsRlp {
copy(encodeBuf[pos:], txsRlp[i])
pos += len(txsRlp[i])
}
_ = pos
return encodeBuf
}
func EncodePooledTransactions65(txsRlp [][]byte, encodeBuf []byte) []byte {
pos := 0
dataLen := 0
for i := range txsRlp {
dataLen += len(txsRlp[i])
}
encodeBuf = ensureEnoughSize(encodeBuf, rlp.ListPrefixLen(dataLen)+dataLen)
// Length Prefix for the entire structure
pos += rlp.EncodeListPrefix(dataLen, encodeBuf[pos:])
for i := range txsRlp {
copy(encodeBuf[pos:], txsRlp[i])
pos += len(txsRlp[i])
}
_ = pos
return encodeBuf
} | txpool/packets.go | 0.648689 | 0.422326 | packets.go | starcoder |
package copypasta
import (
"math/rand"
"time"
)
/* k-d tree: k-dimensional tree; k 维树
https://en.wikipedia.org/wiki/K-d_tree
推荐 https://www.luogu.com.cn/blog/command-block/kdt-xiao-ji
https://www.luogu.com.cn/blog/lc-2018-Canton/solution-p4148
https://oi-wiki.org/ds/kdt/
todo 题单 https://www.luogu.com.cn/training/4295
模板题 https://www.luogu.com.cn/problem/P4148
todo https://codeforces.com/problemset/problem/44/G
*/
type kdNode struct {
lr [2]*kdNode
p, mi, mx [2]int // 0 为 x,1 为 y
sz, val, sm int
}
func (kdNode) min(a, b int) int {
if a < b {
return a
}
return b
}
func (kdNode) max(a, b int) int {
if a > b {
return a
}
return b
}
func (o *kdNode) size() int {
if o != nil {
return o.sz
}
return 0
}
func (o *kdNode) sum() int {
if o != nil {
return o.sm
}
return 0
}
func (o *kdNode) maintain() {
o.sz = o.lr[0].size() + o.lr[1].size() + 1
o.sm = o.lr[0].sum() + o.lr[1].sum() + o.val
for i := 0; i < 2; i++ {
o.mi[i] = o.p[i]
o.mx[i] = o.p[i]
for _, ch := range o.lr {
if ch != nil {
o.mi[i] = o.min(o.mi[i], ch.mi[i])
o.mx[i] = o.max(o.mx[i], ch.mx[i])
}
}
}
}
func (o *kdNode) nodes() []*kdNode {
nodes := make([]*kdNode, 0, o.size())
var f func(*kdNode)
f = func(o *kdNode) {
if o != nil {
nodes = append(nodes, o)
f(o.lr[0])
f(o.lr[1])
}
}
f(o)
rand.Shuffle(len(nodes), func(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] })
return nodes
}
func divideKDT(a []*kdNode, k, dim int) {
for l, r := 0, len(a)-1; l < r; {
v := a[l].p[dim]
i, j := l, r+1
for {
for i++; i < r && a[i].p[dim] < v; i++ {
}
for j--; j > l && a[j].p[dim] > v; j-- {
}
if i >= j {
break
}
a[i], a[j] = a[j], a[i]
}
a[l], a[j] = a[j], a[l]
if j == k {
break
} else if j < k {
l = j + 1
} else {
r = j - 1
}
}
}
// 另一种实现是选择的维度要满足其内部点的分布的差异度最大,见 https://oi-wiki.org/ds/kdt/
func buildKDT(nodes []*kdNode, dim int) *kdNode {
if len(nodes) == 0 {
return nil
}
m := len(nodes) / 2
divideKDT(nodes, m, dim)
o := nodes[m]
o.lr[0] = buildKDT(nodes[:m], dim^1)
o.lr[1] = buildKDT(nodes[m+1:], dim^1)
o.maintain()
return o
}
func (o *kdNode) rebuild(dim int) *kdNode { return buildKDT(o.nodes(), dim) }
func (o *kdNode) put(p [2]int, val, dim int) *kdNode {
if o == nil {
o = &kdNode{p: p, val: val}
o.maintain()
return o
}
if p[dim] < o.p[dim] {
o.lr[0] = o.lr[0].put(p, val, dim^1)
} else {
o.lr[1] = o.lr[1].put(p, val, dim^1)
}
o.maintain()
if sz := o.size() * 3; o.lr[0].size()*4 > sz || o.lr[1].size()*4 > sz { // alpha=3/4
return o.rebuild(dim)
}
return o
}
// 矩形 X-Y 在矩形 x-y 内
func inRect(x1, y1, x2, y2, X1, Y1, X2, Y2 int) bool {
return x1 <= X1 && X2 <= x2 && y1 <= Y1 && Y2 <= y2
}
// 矩形 X-Y 在矩形 x-y 外
func outRect(x1, y1, x2, y2, X1, Y1, X2, Y2 int) bool {
return X2 < x1 || X1 > x2 || Y2 < y1 || Y1 > y2
}
func (o *kdNode) query(x1, y1, x2, y2 int) (res int) {
if o == nil || outRect(x1, y1, x2, y2, o.mi[0], o.mi[1], o.mx[0], o.mx[1]) {
return
}
if inRect(x1, y1, x2, y2, o.mi[0], o.mi[1], o.mx[0], o.mx[1]) {
return o.sm
}
if inRect(x1, y1, x2, y2, o.p[0], o.p[1], o.p[0], o.p[1]) { // 根在询问矩形内
res = o.val
}
res += o.lr[0].query(x1, y1, x2, y2) + o.lr[1].query(x1, y1, x2, y2)
return
}
type kdTree struct {
root *kdNode
}
func newKdTree() *kdTree {
rand.Seed(time.Now().UnixNano())
return &kdTree{}
}
func (t *kdTree) put(p [2]int, val int) { t.root = t.root.put(p, val, 0) }
func (t *kdTree) query(x1, y1, x2, y2 int) int { return t.root.query(x1, y1, x2, y2) } | copypasta/kd_tree.go | 0.612078 | 0.546496 | kd_tree.go | starcoder |
package kubelet
import metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
func cumulativeInt(metricName string, units string, value *uint64) *metricspb.Metric {
if value == nil {
return nil
}
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: metricName,
Unit: units,
Type: metricspb.MetricDescriptor_CUMULATIVE_INT64,
},
Timeseries: []*metricspb.TimeSeries{{
Points: []*metricspb.Point{{
Value: &metricspb.Point_Int64Value{
Int64Value: int64(*value),
},
}},
}},
}
}
func cumulativeDouble(metricName string, units string, value *float64) *metricspb.Metric {
if value == nil {
return nil
}
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: metricName,
Unit: units,
Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE,
},
Timeseries: []*metricspb.TimeSeries{{
Points: []*metricspb.Point{{
Value: &metricspb.Point_DoubleValue{
DoubleValue: *value,
},
}},
}},
}
}
func intGauge(metricName string, units string, value *uint64) *metricspb.Metric {
if value == nil {
return nil
}
return intGaugeWithDescription(metricName, units, "", value)
}
func intGaugeWithDescription(metricName string, units string, description string, value *uint64) *metricspb.Metric {
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: metricName,
Unit: units,
Description: description,
Type: metricspb.MetricDescriptor_GAUGE_INT64,
},
Timeseries: []*metricspb.TimeSeries{{
Points: []*metricspb.Point{{
Value: &metricspb.Point_Int64Value{
Int64Value: int64(*value),
},
}},
}},
}
}
func doubleGauge(metricName string, units string, value *float64) *metricspb.Metric {
if value == nil {
return nil
}
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: metricName,
Unit: units,
Type: metricspb.MetricDescriptor_GAUGE_DOUBLE,
},
Timeseries: []*metricspb.TimeSeries{{
Points: []*metricspb.Point{{
Value: &metricspb.Point_DoubleValue{
DoubleValue: *value,
},
}},
}},
}
} | receiver/kubeletstatsreceiver/kubelet/pb.go | 0.659405 | 0.40869 | pb.go | starcoder |
package gerber
import (
"fmt"
"io"
"math"
)
const (
sf = 1e6 // scale factor
)
// Shape represents the type of shape the apertures use.
type Shape string
const (
// RectShape uses rectangles for the aperture.
RectShape Shape = "R"
// CircleShape uses circles for the aperture.
CircleShape Shape = "C"
)
// Primitive is a Gerber primitive.
type Primitive interface {
WriteGerber(w io.Writer, apertureIndex int) error
Aperture() *Aperture
}
// Aperture represents the nature of the primitive
// and satisfies the Primitive interface.
type Aperture struct {
Shape Shape
Size float64
}
// WriteGerber writes the aperture to the Gerber file.
func (a *Aperture) WriteGerber(w io.Writer, apertureIndex int) error {
if a.Shape == CircleShape {
fmt.Fprintf(w, "%%ADD%vC,%0.5f*%%\n", apertureIndex, a.Size)
return nil
}
fmt.Fprintf(w, "%%ADD%vR,%0.5fX%0.5f*%%\n", apertureIndex, a.Size, a.Size)
return nil
}
// Aperture is defined to implement the Primitive interface.
func (a *Aperture) Aperture() *Aperture {
return a
}
// ID returns a unique ID for the Aperture.
func (a *Aperture) ID() string {
if a == nil {
return "default"
}
return fmt.Sprintf("%v%0.5f", a.Shape, sf*a.Size)
}
// Pt represents a 2D Point.
type Pt struct {
X, Y float64
}
// Point is a simple convenience function that keeps the code easy to read.
// All dimensions are in millimeters.
func Point(x, y float64) Pt {
return Pt{X: x, Y: y}
}
// ArcT represents an arc and satisfies the Primitive interface.
type ArcT struct {
x float64
y float64
radius float64
shape Shape
xScale float64
yScale float64
startAngle float64
endAngle float64
thickness float64
}
// Arc returns an arc primitive.
// All dimensions are in millimeters. Angles are in degrees.
func Arc(
x, y, radius float64,
shape Shape,
xScale, yScale, startAngle, endAngle float64,
thickness float64) *ArcT {
if startAngle > endAngle {
startAngle, endAngle = endAngle, startAngle
}
return &ArcT{
x: x,
y: y,
radius: radius,
shape: shape,
xScale: math.Abs(xScale),
yScale: math.Abs(yScale),
startAngle: math.Pi * startAngle / 180.0,
endAngle: math.Pi * endAngle / 180.0,
thickness: thickness,
}
}
// WriteGerber writes the primitive to the Gerber file.
func (a *ArcT) WriteGerber(w io.Writer, apertureIndex int) error {
delta := a.endAngle - a.startAngle
length := delta * a.radius
// Resolution of segments is 0.1mm
segments := int(0.5+length*10.0) + 1
delta /= float64(segments)
angle := float64(a.startAngle)
for i := 0; i < segments; i++ {
x1 := a.x + a.xScale*math.Cos(angle)*a.radius
y1 := a.y + a.yScale*math.Sin(angle)*a.radius
angle += delta
x2 := a.x + a.xScale*math.Cos(angle)*a.radius
y2 := a.y + a.yScale*math.Sin(angle)*a.radius
line := Line(x1, y1, x2, y2, a.shape, a.thickness)
line.WriteGerber(w, apertureIndex)
}
return nil
}
// Aperture returns the primitive's desired aperture.
func (a *ArcT) Aperture() *Aperture {
return &Aperture{
Shape: a.shape,
Size: a.thickness,
}
}
// CircleT represents a circle and satisfies the Primitive interface.
type CircleT struct {
x, y float64
thickness float64
}
// Circle returns a circle primitive.
// All dimensions are in millimeters.
func Circle(x, y float64, thickness float64) *CircleT {
return &CircleT{
x: x,
y: y,
thickness: thickness,
}
}
// WriteGerber writes the primitive to the Gerber file.
func (c *CircleT) WriteGerber(w io.Writer, apertureIndex int) error {
fmt.Fprintf(w, "G54D%d*\n", apertureIndex)
fmt.Fprintf(w, "X%06dY%06dD02*\n", int(0.5+sf*(c.x)), int(0.5+sf*(c.y)))
fmt.Fprintf(w, "X%06dY%06dD01*\n", int(0.5+sf*(c.x)), int(0.5+sf*(c.y)))
return nil
}
// Aperture returns the primitive's desired aperture.
func (c *CircleT) Aperture() *Aperture {
return &Aperture{
Shape: CircleShape,
Size: c.thickness,
}
}
// LineT represents a line and satisfies the Primitive interface.
type LineT struct {
x1, y1 float64
x2, y2 float64
shape Shape
thickness float64
}
// Line returns a line primitive.
// All dimensions are in millimeters.
func Line(x1, y1, x2, y2 float64, shape Shape, thickness float64) *LineT {
return &LineT{
x1: x1,
y1: y1,
x2: x2,
y2: y2,
shape: shape,
thickness: thickness,
}
}
// WriteGerber writes the primitive to the Gerber file.
func (l *LineT) WriteGerber(w io.Writer, apertureIndex int) error {
fmt.Fprintf(w, "G54D%d*\n", apertureIndex)
fmt.Fprintf(w, "X%06dY%06dD02*\n", int(0.5+sf*(l.x1)), int(0.5+sf*(l.y1)))
fmt.Fprintf(w, "X%06dY%06dD01*\n", int(0.5+sf*(l.x2)), int(0.5+sf*(l.y2)))
return nil
}
// Aperture returns the primitive's desired aperture.
func (l *LineT) Aperture() *Aperture {
return &Aperture{
Shape: l.shape,
Size: l.thickness,
}
}
// PolygonT represents a polygon and satisfies the Primitive interface.
type PolygonT struct {
x, y float64
points []Pt
}
// Polygon returns a polygon primitive.
// All dimensions are in millimeters.
func Polygon(x, y float64, filled bool, points []Pt, thickness float64) *PolygonT {
return &PolygonT{
x: x,
y: y,
points: points,
}
}
// WriteGerber writes the primitive to the Gerber file.
func (p *PolygonT) WriteGerber(w io.Writer, apertureIndex int) error {
io.WriteString(w, "G54D11*\n")
io.WriteString(w, "G36*\n")
for i, pt := range p.points {
if i == 0 {
fmt.Fprintf(w, "X%06dY%06dD02*\n", int(0.5+sf*(pt.X+p.x)), int(0.5+sf*(pt.Y+p.y)))
continue
}
fmt.Fprintf(w, "X%06dY%06dD01*\n", int(0.5+sf*(pt.X+p.x)), int(0.5+sf*(pt.Y+p.y)))
}
fmt.Fprintf(w, "X%06dY%06dD02*\n", int(0.5+sf*(p.points[0].X+p.x)), int(0.5+sf*(p.points[0].Y+p.y)))
io.WriteString(w, "G37*\n")
return nil
}
// Aperture returns nil for PolygonT because it uses the default aperture.
func (p *PolygonT) Aperture() *Aperture {
return nil
} | gerber/primitives.go | 0.844633 | 0.627038 | primitives.go | starcoder |
package main
import (
"fmt"
"github.com/MarinX/keylogger"
"github.com/nsf/termbox-go"
"periph.io/x/periph/conn/gpio"
"periph.io/x/periph/host"
"periph.io/x/periph/host/bcm283x"
)
// https://github.com/MarinX/keylogger/blob/master/keymapper.go
// https://www.usb.org/sites/default/files/documents/hut1_12v2.pdf
var keyCodeMap = map[uint16]uint8 {
1: 0x29,
2: 0x1E,
3: 0x1F,
4: 0x20,
5: 0x21,
6: 0x22,
7: 0x23,
8: 0x24,
9: 0x25,
10: 0x26,
11: 0x27,
12: 0x2D,
13: 0x2E,
14: 0x2A,
15: 0x2B,
16: 0x14,
17: 0x1A,
18: 0x08,
19: 0x15,
20: 0x17,
21: 0x1C,
22: 0x18,
23: 0x0C,
24: 0x12,
25: 0x13,
26: 0x2F,
27: 0x30,
28: 0x28,
29: 0x70, // L_CTRL
30: 0x04,
31: 0x16,
32: 0x07,
33: 0x09,
34: 0x0A,
35: 0x0B,
36: 0x0D,
37: 0x0E,
38: 0x0F,
39: 0x33,
40: 0x34,
41: 0x35,
42: 0x72, // L_SHIFT
43: 0x31,
44: 0x1D,
45: 0x1B,
46: 0x06,
47: 0x19,
48: 0x05,
49: 0x11,
50: 0x10,
51: 0x36,
52: 0x37,
53: 0x38,
54: 0x73, // R_SHIFT
55: 0x55,
56: 0x74, // L_ALT
57: 0x2C,
58: 0x39,
59: 0x3A,
60: 0x3B,
61: 0x3C,
62: 0x3D,
63: 0x3E,
64: 0x3F,
65: 0x40,
66: 0x41,
67: 0x42,
68: 0x43,
69: 0x53,
70: 0x47,
71: 0x5F,
72: 0x60,
73: 0x61,
74: 0x56,
75: 0x5C,
76: 0x5D,
77: 0x5E,
78: 0x57,
79: 0x59,
80: 0x5A,
81: 0x5B,
83: 0x63,
87: 0x44,
88: 0x45,
96: 0x58,
97: 0x71, // R_CTRL
98: 0x54,
99: 0x46,
100: 0x75, // R_ALT
102: 0x4A,
103: 0x52,
104: 0x4B,
105: 0x50,
106: 0x4F,
107: 0x4D,
108: 0x51,
109: 0x4E,
110: 0x49,
111: 0x4C,
125: 0x76, // L_META
126: 0x77, // R_META
}
var modifiers uint8 = 0
var modMap = map[uint8]uint8 {
0x70: 1 << 0, // L CTRL
0x72: 1 << 1, // L SHIFT
0x74: 1 << 2, // L ALT
0x76: 1 << 3, // L META
0x71: 1 << 4, // R CTRL
0x73: 1 << 5, // R SHIFT
0x75: 1 << 6, // R ALT
0x77: 1 << 7, // R META
}
func WriteKeyEvent(e keylogger.InputEvent) {
code, ok := keyCodeMap[e.Code]
if !ok {
return
}
MISO := bcm283x.GPIO10 // purple wire
CS := bcm283x.GPIO9 // blue wire
CLK := bcm283x.GPIO11 // brown wire
clk := gpio.Low
_clk := gpio.Low
keypress := e.KeyPress()
keyrelease := e.KeyRelease()
// Update modifier keys
bitflag, ok := modMap[code]
if ok && keypress {
modifiers |= bitflag
} else if ok && keyrelease {
modifiers &^= bitflag
}
fmt.Printf("mod = %X\n", modifiers)
var bits [9]gpio.Level
// send in MSB order
if keyrelease {
bits[0] = gpio.High // Key release
} else {
bits[0] = gpio.Low // Key press
}
bits[1] = (code >> 6) & 1 == 1
bits[2] = (code >> 5) & 1 == 1
bits[3] = (code >> 4) & 1 == 1
bits[4] = (code >> 3) & 1 == 1
bits[5] = (code >> 2) & 1 == 1
bits[6] = (code >> 1) & 1 == 1
bits[7] = code & 1 == 1
bits[8] = gpio.Low // Return signal to low
bitToSend := bits[0]
bitIndex := 0
// wait until next chip select event
for {
cs := CS.FastRead()
if !cs {
break
}
}
// Immediately put out first bit so it's read on the next rising edge
MISO.FastOut(bitToSend)
bitIndex += 1
bitToSend = bits[bitIndex]
// watch clock, output bits to send on MISO
for {
clk = CLK.FastRead()
if _clk && !clk {
MISO.FastOut(bitToSend)
bitIndex += 1
if bitIndex == 9 {
break
}
bitToSend = bits[bitIndex]
}
_clk = clk
}
// print pretty message
if keyrelease {
code |= 0x80
}
fmt.Printf("code = %X\n", code)
}
func main() {
// The "keyboard" name heuristic doesn't work on my system
// keyboard := keylogger.FindKeyboardDevice()
k, err := keylogger.New("/dev/input/event0")
if err != nil {
fmt.Print("Unable to open /dev/input/event0\n")
return
}
termbox.Init()
host.Init()
MISO := bcm283x.GPIO10 // purple wire
CS := bcm283x.GPIO9 // blue wire
CLK := bcm283x.GPIO11 // brown wire
MISO.Out(gpio.Low)
CS.In(gpio.Float, gpio.NoEdge)
CLK.In(gpio.Float, gpio.NoEdge)
// Keeps track of how many consecutive times we've hit ESC key
escape := 5
// Main loop
events := k.Read()
for e := range events {
if escape == 0 {
break
}
switch e.Type {
case keylogger.EvKey:
if e.KeyPress() {
fmt.Print("pressed " + e.KeyString() + "\n")
if e.KeyString() == "ESC" {
escape -= 1
fmt.Printf("\nPress ESC %d more times to quit\n", escape)
} else {
escape = 5
}
}
if e.KeyRelease() {
fmt.Print("released " + e.KeyString() + "\n")
}
// write to SPI
if e.KeyPress() || e.KeyRelease() {
WriteKeyEvent(e)
}
// weird cleanup step
if e.KeyRelease() {
// Consume stdin so it doesn't flow into the terminal
if e.KeyString() != "" && e.KeyString() != "L_ALT" && e.KeyString() != "R_ALT" &&
e.KeyString() != "L_SHIFT" && e.KeyString() != "R_SHIFT" &&
e.KeyString() != "L_CTRL" && e.KeyString() != "R_CTRL" {
termbox.PollEvent()
}
}
break
}
}
// Teardown
defer termbox.Close()
} | src/misc/spi-keyboard/main.go | 0.500244 | 0.409457 | main.go | starcoder |
package triangulate3
import (
"geGoMetry/r3"
"geGoMetry/shape"
"math"
)
func constructEnglobingTetra(Vectors []r3.Vector) shape.Mesh {
offset := 1.0
minExtremity, maxExtremity := computeBoundingBoxForVectors(Vectors)
minExtremity.Add(r3.Vector{-offset, -offset, -offset})
maxExtremity.Add(r3.Vector{offset, offset, offset})
// get bottom plane
normal := r3.Vector{0, 1, 0}
lowerPlane := r3.Plane3D{Normal: normal, Vector: minExtremity}
top := getTopVectorOfTetrahedron(minExtremity, maxExtremity)
//Line 1
upperVector1 := r3.Vector{minExtremity.X, maxExtremity.Y, maxExtremity.Z}
Direct1 := r3.Sub(top, upperVector1)
line1 := r3.Line{Vector: upperVector1, Direction: Direct1}
// first bottom Intersection Vector
tetraPoint1 := line1.IntersectPlane(lowerPlane)
//Line 2
upperVector2 := r3.Vector{maxExtremity.X, maxExtremity.Y, maxExtremity.Z}
Direct2 := r3.Sub(top, upperVector2)
line2 := r3.Line{Vector: upperVector2, Direction: Direct2}
// second bottom Intersection Vector
tetraPoint2 := line2.IntersectPlane(lowerPlane)
//Line 3
upperVector3 := r3.Vector{maxExtremity.X, maxExtremity.Y, minExtremity.Z}
Direct3 := r3.Sub(top, upperVector3)
line3 := r3.Line{Vector: upperVector3, Direction: Direct3}
// third bottom Intersection Vector
tetraPoint3 := line3.IntersectPlane(lowerPlane)
indices := []uint32{0, 1, 2,
0, 2, 3,
0, 3, 1,
1, 2, 3}
vertices := []r3.Vector{top, tetraPoint1, tetraPoint2, tetraPoint3}
mesh := shape.Mesh{Vertices: vertices, Indices: indices}
return mesh
}
func getTopVectorOfTetrahedron(minExtremity, maxExtremity r3.Vector) r3.Vector {
x := minExtremity.X
y := 2 * maxExtremity.Y
z := minExtremity.Z
return r3.Vector{x, y, z}
}
func computeBoundingBoxForVectors(Vectors []r3.Vector) (r3.Vector, r3.Vector) {
minExtremity := r3.Vector{math.MaxFloat32, math.MaxFloat32, math.MaxFloat32}
maxExtremity := r3.Vector{-math.MaxFloat32, -math.MaxFloat32, -math.MaxFloat32}
for _, Vector := range Vectors {
//Updating the minExtremity
if Vector.X < minExtremity.X {
minExtremity.X = Vector.X
}
if Vector.Y < minExtremity.Y {
minExtremity.Y = Vector.Y
}
if Vector.Z < minExtremity.Z {
minExtremity.Z = Vector.Z
}
//Updating the maxExtremity
if Vector.X > maxExtremity.X {
maxExtremity.X = Vector.X
}
if Vector.Y > maxExtremity.Y {
maxExtremity.Y = Vector.Y
}
if Vector.Z > maxExtremity.Z {
maxExtremity.Z = Vector.Z
}
}
return minExtremity, maxExtremity
} | triangulate3/englober.go | 0.743447 | 0.545467 | englober.go | starcoder |
package search
import (
"github.com/christat/gost/queue"
"time"
"fmt"
)
// Best First Search underpins several algorithms, such as Greedy BFS or A*.
// The main difference comes in the enqueuing logic, which is specific to the algorithm itself.
func BestFirst(origin, target HeuristicState, callback BFSEnqueuingCallback) (path map[State]State, found bool, cost float64, err error) {
path, cumulativeCost, queue, open, closed := initBestFirstVariables()
if callback == nil {
return path, found, 0, fmt.Errorf("enqueuing Callback not provided! Best First Search cannot be executed")
}
queue.Enqueue(origin, 0)
cumulativeCost[origin] = 0
for queue.Size() > 0 {
vertex := queue.Dequeue().(HeuristicState)
closed[vertex.Name()] = true
found = enqueueBestFirstNeighbors(vertex, target, queue, open, closed, cumulativeCost, path, callback)
if found {
break
}
}
return path, found, cumulativeCost[target], nil
}
func BenchmarkBestFirst(origin, target HeuristicState, callback BFSEnqueuingCallback) (path map[State]State, found bool, cost float64, bench AlgorithmBenchmark, err error) {
path, cumulativeCost, queue, open, closed := initBestFirstVariables()
if callback == nil {
return path, found, 0, AlgorithmBenchmark{}, fmt.Errorf("enqueuing Callback not provided! Best First Search cannot be executed")
}
start := time.Now()
var expansions uint = 0
queue.Enqueue(origin, 0)
cumulativeCost[origin] = 0
for queue.Size() > 0 {
vertex := queue.Dequeue().(HeuristicState)
closed[vertex.Name()] = true
expansions++
found = enqueueBestFirstNeighbors(vertex, target, queue, open, closed, cumulativeCost, path, callback)
if found {
break
}
}
elapsed := time.Since(start)
return path, found, cumulativeCost[target], AlgorithmBenchmark{ElapsedTime: elapsed, TotalExpansions: expansions}, nil
}
func initBestFirstVariables() (path map[State]State, cumulativeCost map[State]float64, queue *gost.MinPriorityQueue, open, closed map[string]bool) {
path = make(map[State]State)
cumulativeCost = make(map[State]float64)
queue = new(gost.MinPriorityQueue) // Min as we need to obtain lowest costs first
open = make(map[string]bool)
closed = make(map[string]bool)
return
}
func enqueueBestFirstNeighbors(vertex, target HeuristicState, queue *gost.MinPriorityQueue, open map[string]bool, closed map[string]bool, cumulativeCost map[State]float64, path map[State]State, callback BFSEnqueuingCallback) (found bool) {
if vertex.Equals(target) {
found = true
return
}
for _, neighbor := range vertex.Neighbors() {
_, visited := closed[neighbor.Name()]
if visited {
continue
}
cumulativeVertexCost := cumulativeCost[vertex]
cost := cumulativeVertexCost + vertex.Cost(neighbor)
lowestCost, valueSet := cumulativeCost[neighbor]
if !valueSet || cost < lowestCost {
cumulativeCost[neighbor] = cost
path[neighbor] = vertex
if callback != nil {
callback(neighbor.(HeuristicState), cost, queue, open)
}
}
}
return
}
// Each algorithm decides how to enqueue its nodes. The callback should provide any neccesary parameters.
type BFSEnqueuingCallback func(vertex HeuristicState, cost float64, queue *gost.MinPriorityQueue, open map[string]bool) | best_first_helper.go | 0.797793 | 0.425307 | best_first_helper.go | starcoder |
package mimic
import (
"encoding/binary"
"fmt"
"github.com/cilium/ebpf/asm"
)
var _ VMMem = (*PlainMemory)(nil)
// PlainMemory is the simplest implementation of VMMem possible, it is just a []byte with no additional information
// about its contents. The ByteOrder is used when Load'in or Store'ing scalar values. If ByteOrder is not set
// the native endianness will be used.
type PlainMemory struct {
Backing []byte
ByteOrder binary.ByteOrder
}
// TODO it is fairly common to reuse plain memories of the same size, the process stack for example. This would be a
// good candidate for a freelist/memory pool since they can be large. In the case of plain memory we would have to
// group them by size.
// Load loads a scalar value of the given `size` and `offset` from the memory.
func (pm *PlainMemory) Load(offset uint32, size asm.Size) (uint64, error) {
bytes := size.Sizeof()
if int(offset)+bytes > len(pm.Backing) {
return 0, fmt.Errorf(
"reading %d bytes at offset %d will read out of the memory bounds of %d bytes",
bytes,
offset,
len(pm.Backing),
)
}
if pm.ByteOrder == nil {
pm.ByteOrder = GetNativeEndianness()
}
switch size {
case asm.Byte:
return uint64(pm.Backing[offset]), nil
case asm.Half:
return uint64(pm.ByteOrder.Uint16(pm.Backing[offset : offset+2])), nil
case asm.Word:
return uint64(pm.ByteOrder.Uint32(pm.Backing[offset : offset+4])), nil
case asm.DWord:
return pm.ByteOrder.Uint64(pm.Backing[offset : offset+8]), nil
default:
return 0, fmt.Errorf("unknown size '%v'", size)
}
}
// Store stores a scalar value of the given `size` and `offset` from the memory.
func (pm *PlainMemory) Store(offset uint32, value uint64, size asm.Size) error {
bytes := size.Sizeof()
if int(offset)+bytes > len(pm.Backing) {
return fmt.Errorf(
"writing %d bytes at offset %d will overflow the memory of %d bytes",
bytes,
offset,
len(pm.Backing),
)
}
if pm.ByteOrder == nil {
pm.ByteOrder = GetNativeEndianness()
}
b := make([]byte, bytes)
switch size {
case asm.Byte:
b[0] = byte(value)
case asm.Half:
pm.ByteOrder.PutUint16(b, uint16(value))
case asm.Word:
pm.ByteOrder.PutUint32(b, uint32(value))
case asm.DWord:
pm.ByteOrder.PutUint64(b, value)
default:
return fmt.Errorf("unknown size '%v'", size)
}
copy(pm.Backing[offset:], b)
return nil
}
// Read reads a byte slice of arbitrary size, the length of 'b' is used to determine the requested size
func (pm *PlainMemory) Read(offset uint32, b []byte) error {
if int(offset)+len(b) > len(pm.Backing) {
return fmt.Errorf(
"reading %d bytes at offset %d will read out of the memory bounds of %d bytes",
len(b),
offset,
len(pm.Backing),
)
}
copy(b, pm.Backing[offset:])
return nil
}
// Write write a byte slice of arbitrary size to the memory
func (pm *PlainMemory) Write(offset uint32, b []byte) error {
if int(offset)+len(b) > len(pm.Backing) {
return fmt.Errorf(
"writing %d bytes at offset %d will overflow the memory of %d bytes",
len(b),
offset,
len(pm.Backing),
)
}
copy(pm.Backing[offset:], b)
return nil
} | memory_plain.go | 0.623033 | 0.423041 | memory_plain.go | starcoder |
package grid
import (
"math/rand"
)
// dungeon is a level comprised of square rooms connected by corridors.
type dungeon struct {
grid // superclass grid
}
// Generate a dungeon by partioning the given space into randomly sized
// non-overlapping blocks. Reuse the definition of a room from room.go.
func (d *dungeon) Generate(width, depth int) Grid {
d.create(width, depth, allWalls)
rooms := d.rooms()
d.corridors(rooms)
// connect the rooms with corridors.
return d
}
// rooms places random non-overlapping square rooms over the given grid.
// The newly created rooms are returned.
func (d *dungeon) rooms() []*room {
sx, sy := d.Size()
rooms := []*room{}
possibleRooms := d.locateRooms(&room{0, 0, sx, sy})
for _, rm := range possibleRooms {
// Only use some of the possible spots for rooms.
if rand.Intn(100) < 75 {
rooms = append(rooms, rm)
// randomize the dimensions of larger rooms.
dx, dy := 1, 1
if rm.w > 7 && rm.h > 7 {
dx = rand.Intn(3) + 1
dy = rand.Intn(3) + 1
}
for x := dx; x < rm.w-dx; x++ {
for y := dy; y < rm.h-dy; y++ {
d.cells[rm.x+x][rm.y+y].isWall = false
}
}
}
}
return rooms
}
// locateRooms randomly and recursively quad-partitions a given room,
// gathering and returning all the generated sub-room dimensions.
func (d *dungeon) locateRooms(rm *room) []*room {
min, max := 5, 20
hx, hy := rm.w/2, rm.h/2
if hx < min || hy < min {
return []*room{rm} // to small to split.
}
// split randomly, or if to large.
if rm.w > max || rm.h > max || rand.Intn(100) < 50 {
rooms := []*room{}
rooms = append(rooms, d.locateRooms(&room{rm.x, rm.y, hx, hy})...)
rooms = append(rooms, d.locateRooms(&room{rm.x, rm.y + hy, hx, hy})...)
rooms = append(rooms, d.locateRooms(&room{rm.x + hx, rm.y, hx, hy})...)
rooms = append(rooms, d.locateRooms(&room{rm.x + hx, rm.y + hy, hx, hy})...)
return rooms
}
return []*room{rm}
}
// corridors links each room to a neighbouring room.
func (d *dungeon) corridors(rooms []*room) {
for cnt := 0; cnt < len(rooms); cnt++ {
r0 := rooms[cnt]
x0, y0 := r0.x+r0.w/2, r0.y+r0.h/2
if cnt+1 < len(rooms) {
r1 := rooms[cnt+1]
x1, y1 := r1.x+r1.w/2, r1.y+r1.h/2
dx, dy := 1, 1
if x1-x0 < 0 {
dx = -1
}
if y1-y0 < 0 {
dy = -1
}
newx := x0
for x := x0; x != x1; x += dx {
d.cells[x][y0].isWall = false
newx = x
}
for y := y0; y != y1; y += dy {
d.cells[newx][y].isWall = false
}
}
}
} | grid/dungeon.go | 0.621656 | 0.448547 | dungeon.go | starcoder |
package main
import (
"fmt"
"reflect"
)
func isBool(in reflect.Type) bool {
return in.Kind() == reflect.Bool
}
func isInt(in reflect.Type) bool {
switch in.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return true
}
return false
}
func isString(in reflect.Type) bool {
return in.Kind() == reflect.String
}
func isFloat(in reflect.Type) bool {
switch in.Kind() {
case reflect.Float32, reflect.Float64:
return true
}
return false
}
func isSlice(in reflect.Type) bool {
return in.Kind() == reflect.Slice
}
func isStruct(in reflect.Type) bool {
return in.Kind() == reflect.Struct
}
func isMap(in reflect.Type) bool {
return in.Kind() == reflect.Map
}
func isDuration(in reflect.Type) bool {
return in.Kind() == reflect.Struct && in.String() == "v1.Duration"
}
func isQuantity(in reflect.Type) bool {
return in.Kind() == reflect.Struct && in.String() == "resource.Quantity"
}
func isIntOrString(in reflect.Type) bool {
return in.Kind() == reflect.Struct && in.String() == "intstr.IntOrString"
}
func isPtr(in reflect.Type) bool {
return in.Kind() == reflect.Ptr
}
func isValueType(in reflect.Type) bool {
switch in.Kind() {
case reflect.Ptr:
return isValueType(in.Elem())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
return true
case reflect.Slice, reflect.Map:
return false
case reflect.Struct:
return in.String() == "v1.Duration" || in.String() == "resource.Quantity" || in.String() == "intstr.IntOrString"
default:
panic(fmt.Sprintf("unknown kind %v", in.Kind()))
}
}
func getFields(t reflect.Type, flatten bool) []_field {
var ret []_field
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Anonymous && flatten {
ret = append(ret, getFields(f.Type, flatten)...)
} else {
ret = append(ret, _field{
StructField: t.Field(i),
Owner: t,
})
}
}
return ret
}
func verifyFields(t reflect.Type, fields ...string) error {
for _, field := range fields {
valid := false
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == field {
valid = true
}
}
if !valid {
return fmt.Errorf("field %s is not part of struct %s", field, t.Name())
}
}
return nil
}
func resolve(in reflect.Type, m map[string]string) string {
switch in.Kind() {
case reflect.Ptr:
return "*" + resolve(in.Elem(), m)
case reflect.Slice:
return "[]" + resolve(in.Elem(), m)
case reflect.Map:
return "map[" + resolve(in.Key(), m) + "]" + resolve(in.Elem(), m)
default:
p := in.PkgPath()
if p == "" {
return in.String()
} else {
p, ok := m[p]
if ok {
return p + "." + in.Name()
} else {
return in.String()
}
}
}
} | hack/gen-tf-code/reflect.go | 0.529993 | 0.556038 | reflect.go | starcoder |
package list
// Clone returns a new List with the same contents as this one.
func (l *List[T]) Clone() *List[T] {
nl := New[T]()
if l == nil {
return nil
}
if l.Count() == 0 {
return nil
}
tail := &node[T]{
value: l.head.value,
prev: nil,
next: nil,
}
nl.head = tail
for n := l.head; n != nil; n = n.next {
tail.next = &node[T]{
value: n.value,
prev: tail,
next: nil,
}
tail = tail.next
}
nl.tail = tail
return nl
}
// Count returns the number of elements in the list.
func (l *List[T]) Count() int {
count := 0
for n := l.head; n != nil; n = n.next {
count++
}
return count
}
// IsEmpty returns true if the list is empty.
func (l *List[T]) IsEmpty() bool {
return l.head == nil
}
// Push adds an element to the end of the list.
func (l *List[T]) Push(value T) {
n := &node[T]{
value: value,
next: nil,
}
if l.head == nil {
l.head = n
l.tail = n
return
}
n.prev = l.tail
l.tail.next = n
l.tail = n
}
// Pop removes and returns the last element of the list.
func (l *List[T]) Pop() (rs T) {
if l.tail == nil {
return rs
}
n := l.tail
if l.head == l.tail {
l.head = nil
l.tail = nil
return n.value
}
l.tail = n.prev
l.tail.next = nil
n.prev = nil
n.next = nil
return n.value
}
// Unshift adds an element to the beginning of the list.
func (l *List[T]) Unshift(value T) {
n := &node[T]{
value: value,
next: nil,
}
if l.tail == nil {
l.head = n
l.tail = n
return
}
n.next = l.head
l.head.prev = n
l.head = n
}
// Shift removes and returns the first element of the list.
func (l *List[T]) Shift() (rs T) {
if l.head == nil {
return rs
}
n := l.head
if l.head == l.tail {
l.head = nil
l.tail = nil
return n.value
}
l.head = n.next
l.head.prev = nil
n.next = nil
n.prev = nil
return n.value
}
// Reverse reverses the list.
func (l *List[T]) Reverse() {
if l.head == nil {
return
}
for n := l.head; n != nil; {
next := n.next
n.prev, n.next = n.next, n.prev
n = next
}
l.head, l.tail = l.tail, l.head
}
// Map returns a new List with the result of mapping the elements.
func (l *List[T]) Map(fun func(T) T) {
for n := l.head; n != nil; n = n.next {
n.value = fun(n.value)
}
}
// Filter returns a new List with the elements that satisfy the predicate.
func (l *List[T]) Filter(fun func(T) bool) {
for n := l.head; n != nil; n = n.next {
if fun(n.value) {
continue
}
if n.prev != nil {
n.prev.next = n.next
}
if n.next != nil {
n.next.prev = n.prev
}
if n.prev == nil {
l.head = n.next
}
if n.next == nil {
l.tail = n.prev
}
}
}
// FoldLeft returns the result of folding the elements from first to last.
func (l *List[T]) FoldLeft(fun func(T, T) T, initial T) T {
var acc T
if l.head == nil {
return initial
}
acc = initial
for n := l.head; n != nil; n = n.next {
acc = fun(acc, n.value)
}
return acc
}
// FoldRight returns the result of folding the elements from last to first.
func (l *List[T]) FoldRight(fun func(T, T) T, initial T) T {
var acc T
if l.tail == nil {
return initial
}
acc = initial
for n := l.tail; n != nil; n = n.prev {
acc = fun(n.value, acc)
}
return acc
}
// Foreach calls the function for each element in the list.
func (l *List[T]) Foreach(fun func(T)) {
for n := l.head; n != nil; n = n.next {
fun(n.value)
}
}
// Contains returns true if the list contains the element.
func (l *List[T]) Contains(comparer func(T, T) bool, value T) bool {
for n := l.head; n != nil; n = n.next {
if comparer(n.value, value) {
return true
}
}
return false
}
// ContainsAll returns true if the list contains all the elements.
func (l *List[T]) ContainsAll(comparer func(T, T) bool, values ...T) bool {
for n := l.head; n != nil; n = n.next {
for _, v := range values {
if !comparer(n.value, v) {
return false
}
}
}
return true
}
// Split splits the list into two lists at the index from given function.
func (l *List[T]) Split(fun func(T) bool) (left, right *List[T]) {
left = New[T]()
right = New[T]()
for n := l.head; n != nil; n = n.next {
if fun(n.value) {
left.Push(n.value)
} else {
right.Push(n.value)
}
}
return
}
// Get returns the element at the given index.
func (l *List[T]) Get(index int) *T {
if index < 0 {
return nil
}
for n := l.head; n != nil; n = n.next {
if index == 0 {
return &n.value
}
index--
}
return nil
}
// Set sets the element at the given index.
func (l *List[T]) Set(index int, value T) {
if index < 0 {
return
}
for n := l.head; n != nil; n = n.next {
if index == 0 {
n.value = value
return
}
index--
}
} | collections/list/methods.go | 0.785267 | 0.403097 | methods.go | starcoder |
* Implementation of the AES-GCM Encryption/Authentication
*
* Some restrictions..
* 1. Only for use with AES
* 2. Returned tag is always 128-bits. Truncate at your own risk.
* 3. The order of function calls must follow some rules
*
* Typical sequence of calls..
* 1. call GCM_init
* 2. call GCM_add_header any number of times, as long as length of header is multiple of 16 bytes (block size)
* 3. call GCM_add_header one last time with any length of header
* 4. call GCM_add_cipher any number of times, as long as length of cipher/plaintext is multiple of 16 bytes
* 5. call GCM_add_cipher one last time with any length of cipher/plaintext
* 6. call GCM_finish to extract the tag.
*
* See http://www.mindspring.com/~dmcgrew/gcm-nist-6.pdf
*/
package core
import (
// "fmt"
"strconv"
)
const gcm_NB int = 4
const GCM_ACCEPTING_HEADER int = 0
const GCM_ACCEPTING_CIPHER int = 1
const GCM_NOT_ACCEPTING_MORE int = 2
const GCM_FINISHED int = 3
const GCM_ENCRYPTING int = 0
const GCM_DECRYPTING int = 1
type GCM struct {
table [128][4]uint32 /* 2k bytes */
stateX [16]byte
Y_0 [16]byte
counter int
lenA [2]uint32
lenC [2]uint32
status int
a *AES
}
func gcm_pack(b [4]byte) uint32 { /* pack bytes into a 32-bit Word */
return ((uint32(b[0]) & 0xff) << 24) | ((uint32(b[1]) & 0xff) << 16) | ((uint32(b[2]) & 0xff) << 8) | (uint32(b[3]) & 0xff)
}
func gcm_unpack(a uint32) [4]byte { /* unpack bytes from a word */
var b = [4]byte{byte((a >> 24) & 0xff), byte((a >> 16) & 0xff), byte((a >> 8) & 0xff), byte(a & 0xff)}
return b
}
func (G *GCM) precompute(H []byte) {
var b [4]byte
j := 0
for i := 0; i < gcm_NB; i++ {
b[0] = H[j]
b[1] = H[j+1]
b[2] = H[j+2]
b[3] = H[j+3]
G.table[0][i] = gcm_pack(b)
j += 4
}
for i := 1; i < 128; i++ {
c := uint32(0)
for j := 0; j < gcm_NB; j++ {
G.table[i][j] = c | (G.table[i-1][j])>>1
c = G.table[i-1][j] << 31
}
if c != 0 {
G.table[i][0] ^= 0xE1000000
} /* irreducible polynomial */
}
}
func (G *GCM) gf2mul() { /* gf2m mul - Z=H*X mod 2^128 */
var P [4]uint32
for i := 0; i < 4; i++ {
P[i] = 0
}
j := uint(8)
m := 0
for i := 0; i < 128; i++ {
j--
c := uint32((G.stateX[m] >> j) & 1)
c = ^c + 1
for k := 0; k < gcm_NB; k++ {
P[k] ^= (G.table[i][k] & c)
}
if j == 0 {
j = 8
m++
if m == 16 {
break
}
}
}
j = 0
for i := 0; i < gcm_NB; i++ {
b := gcm_unpack(P[i])
G.stateX[j] = b[0]
G.stateX[j+1] = b[1]
G.stateX[j+2] = b[2]
G.stateX[j+3] = b[3]
j += 4
}
}
func (G *GCM) wrap() { /* Finish off GHASH */
var F [4]uint32
var L [16]byte
/* convert lengths from bytes to bits */
F[0] = (G.lenA[0] << 3) | (G.lenA[1]&0xE0000000)>>29
F[1] = G.lenA[1] << 3
F[2] = (G.lenC[0] << 3) | (G.lenC[1]&0xE0000000)>>29
F[3] = G.lenC[1] << 3
j := 0
for i := 0; i < gcm_NB; i++ {
b := gcm_unpack(F[i])
L[j] = b[0]
L[j+1] = b[1]
L[j+2] = b[2]
L[j+3] = b[3]
j += 4
}
for i := 0; i < 16; i++ {
G.stateX[i] ^= L[i]
}
G.gf2mul()
}
func (G *GCM) ghash(plain []byte, len int) bool {
if G.status == GCM_ACCEPTING_HEADER {
G.status = GCM_ACCEPTING_CIPHER
}
if G.status != GCM_ACCEPTING_CIPHER {
return false
}
j := 0
for j < len {
for i := 0; i < 16 && j < len; i++ {
G.stateX[i] ^= plain[j]
j++
G.lenC[1]++
if G.lenC[1] == 0 {
G.lenC[0]++
}
}
G.gf2mul()
}
if len%16 != 0 {
G.status = GCM_NOT_ACCEPTING_MORE
}
return true
}
/* Initialize GCM mode */
func (G *GCM) Init(nk int, key []byte, niv int, iv []byte) { /* iv size niv is usually 12 bytes (96 bits). AES key size nk can be 16,24 or 32 bytes */
var H [16]byte
for i := 0; i < 16; i++ {
H[i] = 0
G.stateX[i] = 0
}
G.a = new(AES)
G.a.Init(AES_ECB, nk, key, iv)
G.a.ecb_encrypt(H[:]) /* E(K,0) */
G.precompute(H[:])
G.lenA[0] = 0
G.lenC[0] = 0
G.lenA[1] = 0
G.lenC[1] = 0
if niv == 12 {
for i := 0; i < 12; i++ {
G.a.f[i] = iv[i]
}
b := gcm_unpack(uint32(1))
G.a.f[12] = b[0]
G.a.f[13] = b[1]
G.a.f[14] = b[2]
G.a.f[15] = b[3] /* initialise IV */
for i := 0; i < 16; i++ {
G.Y_0[i] = G.a.f[i]
}
} else {
G.status = GCM_ACCEPTING_CIPHER
G.ghash(iv, niv) /* GHASH(H,0,IV) */
G.wrap()
for i := 0; i < 16; i++ {
G.a.f[i] = G.stateX[i]
G.Y_0[i] = G.a.f[i]
G.stateX[i] = 0
}
G.lenA[0] = 0
G.lenC[0] = 0
G.lenA[1] = 0
G.lenC[1] = 0
}
G.status = GCM_ACCEPTING_HEADER
}
/* Add Header data - included but not encrypted */
func (G *GCM) Add_header(header []byte, len int) bool { /* Add some header. Won't be encrypted, but will be authenticated. len is length of header */
if G.status != GCM_ACCEPTING_HEADER {
return false
}
j := 0
for j < len {
for i := 0; i < 16 && j < len; i++ {
G.stateX[i] ^= header[j]
j++
G.lenA[1]++
if G.lenA[1] == 0 {
G.lenA[0]++
}
}
G.gf2mul()
}
if len%16 != 0 {
G.status = GCM_ACCEPTING_CIPHER
}
return true
}
/* Add Plaintext - included and encrypted */
func (G *GCM) Add_plain(plain []byte, len int) []byte {
var B [16]byte
var b [4]byte
cipher := make([]byte, len)
var counter uint32 = 0
if G.status == GCM_ACCEPTING_HEADER {
G.status = GCM_ACCEPTING_CIPHER
}
if G.status != GCM_ACCEPTING_CIPHER {
return nil
}
j := 0
for j < len {
b[0] = G.a.f[12]
b[1] = G.a.f[13]
b[2] = G.a.f[14]
b[3] = G.a.f[15]
counter = gcm_pack(b)
counter++
b = gcm_unpack(counter)
G.a.f[12] = b[0]
G.a.f[13] = b[1]
G.a.f[14] = b[2]
G.a.f[15] = b[3] /* increment counter */
for i := 0; i < 16; i++ {
B[i] = G.a.f[i]
}
G.a.ecb_encrypt(B[:]) /* encrypt it */
for i := 0; i < 16 && j < len; i++ {
cipher[j] = (plain[j] ^ B[i])
G.stateX[i] ^= cipher[j]
j++
G.lenC[1]++
if G.lenC[1] == 0 {
G.lenC[0]++
}
}
G.gf2mul()
}
if len%16 != 0 {
G.status = GCM_NOT_ACCEPTING_MORE
}
return cipher
}
/* Add Ciphertext - decrypts to plaintext */
func (G *GCM) Add_cipher(cipher []byte, len int) []byte {
var B [16]byte
var b [4]byte
plain := make([]byte, len)
var counter uint32 = 0
if G.status == GCM_ACCEPTING_HEADER {
G.status = GCM_ACCEPTING_CIPHER
}
if G.status != GCM_ACCEPTING_CIPHER {
return nil
}
j := 0
for j < len {
b[0] = G.a.f[12]
b[1] = G.a.f[13]
b[2] = G.a.f[14]
b[3] = G.a.f[15]
counter = gcm_pack(b)
counter++
b = gcm_unpack(counter)
G.a.f[12] = b[0]
G.a.f[13] = b[1]
G.a.f[14] = b[2]
G.a.f[15] = b[3] /* increment counter */
for i := 0; i < 16; i++ {
B[i] = G.a.f[i]
}
G.a.ecb_encrypt(B[:]) /* encrypt it */
for i := 0; i < 16 && j < len; i++ {
oc := cipher[j]
plain[j] = (cipher[j] ^ B[i])
G.stateX[i] ^= oc
j++
G.lenC[1]++
if G.lenC[1] == 0 {
G.lenC[0]++
}
}
G.gf2mul()
}
if len%16 != 0 {
G.status = GCM_NOT_ACCEPTING_MORE
}
return plain
}
/* Finish and extract Tag */
func (G *GCM) Finish(extract bool) []byte { /* Finish off GHASH and extract tag (MAC) */
var tag []byte
G.wrap()
/* extract tag */
if extract {
G.a.ecb_encrypt(G.Y_0[:]) /* E(K,Y0) */
for i := 0; i < 16; i++ {
G.Y_0[i] ^= G.stateX[i]
}
for i := 0; i < 16; i++ {
tag = append(tag,G.Y_0[i])
G.Y_0[i] = 0
G.stateX[i] = 0
}
}
G.status = GCM_FINISHED
G.a.End()
return tag
}
func hex2bytes(s string) []byte {
lgh := len(s)
data := make([]byte, lgh/2)
for i := 0; i < lgh; i += 2 {
a, _ := strconv.ParseInt(s[i:i+2], 16, 32)
data[i/2] = byte(a)
}
return data
}
func GCM_ENCRYPT(K []byte,IV []byte,H []byte,P []byte) ([]byte,[]byte){
g:=new(GCM)
g.Init(len(K),K,len(IV),IV)
g.Add_header(H,len(H))
C:=g.Add_plain(P,len(P))
T:=g.Finish(true)
return C,T
}
func GCM_DECRYPT(K []byte,IV []byte,H []byte,C []byte) ([]byte,[]byte){
g:=new(GCM)
g.Init(len(K),K,len(IV),IV)
g.Add_header(H,len(H))
P:=g.Add_cipher(C,len(C))
T:=g.Finish(true)
return P,T
}
/*
func main() {
KT:="feffe9928665731c6d6a8f9467308308"
MT:="d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39"
HT:="feedfacedeadbeeffeedfacedeadbeefabaddad2"
NT:="9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b";
// Tag should be 619cc5aefffe0bfa462af43c1699d050
g:=new(GCM)
M:=hex2bytes(MT)
H:=hex2bytes(HT)
N:=hex2bytes(NT)
K:=hex2bytes(KT)
lenM:=len(M)
lenH:=len(H)
lenK:=len(K)
lenIV:=len(N)
fmt.Printf("Plaintext=\n");
for i:=0;i<lenM;i++ {fmt.Printf("%02x",M[i])}
fmt.Printf("\n")
g.Init(lenK,K,lenIV,N)
g.Add_header(H,lenH)
C:=g.Add_plain(M,lenM)
T:=g.Finish(true)
fmt.Printf("Ciphertext=\n")
for i:=0;i<lenM;i++ {fmt.Printf("%02x",C[i])}
fmt.Printf("\n")
fmt.Printf("Tag=\n")
for i:=0;i<16;i++ {fmt.Printf("%02x",T[i])}
fmt.Printf("\n")
g.Init(lenK,K,lenIV,N)
g.Add_header(H,lenH)
P:=g.Add_cipher(C,lenM)
T=g.Finish(true)
fmt.Printf("Plaintext=\n");
for i:=0;i<lenM;i++ {fmt.Printf("%02x",P[i])}
fmt.Printf("\n")
fmt.Printf("Tag=\n");
for i:=0;i<16;i++ {fmt.Printf("%02x",T[i])}
fmt.Printf("\n")
}
*/ | vendor/github.com/hyperledger/fabric-amcl/core/GCM.go | 0.715026 | 0.447883 | GCM.go | starcoder |
package icinga
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
)
type (
// Range is a combination of a lower boundary, an upper boundary
// and a flag for inverted (@) range semantics. See [0] for more
// details.
Range interface {
Check(float64) bool
CheckInt(int) bool
CheckInt32(int32) bool
}
rangeImpl struct {
Start float64
End float64
Invert bool
}
)
// NewRange parse a string and returns a Range object
// 10 < 0 or > 10, (outside the range of {0 .. 10})
// 10: < 10, (outside {10 .. ∞})
// ~:10 > 10, (outside the range of {-∞ .. 10})
// 10:20 < 10 or > 20, (outside the range of {10 .. 20})
// @10:20 ≥ 10 and ≤ 20, (inside the range of {10 .. 20})
func NewRange(value string) (Range, error) {
// Set defaults
r := &rangeImpl{
Start: 0,
End: math.Inf(1),
Invert: false,
}
value = strings.Trim(value, " \n\r")
// We can override a default value with an empty string and use 0 as range
if len(value) == 0 {
return r, nil
}
// Check for inverted semantics
if value[0] == '@' {
r.Invert = true
value = value[1:]
}
// Parse lower limit
endPos := strings.Index(value, ":")
if endPos > -1 {
if value[0] == '~' {
r.Start = math.Inf(-1)
} else {
min, err := strconv.ParseFloat(value[0:endPos], 64)
if err != nil {
return nil, fmt.Errorf("failed to parse lower limit: %v", err)
}
r.Start = min
}
value = value[endPos+1:]
}
// Parse upper limit
if len(value) > 0 {
max, err := strconv.ParseFloat(value, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse upper limit: %v", err)
}
r.End = max
}
if r.End < r.Start {
return nil, errors.New("Invalid range definition. min <= max violated")
}
// OK
return r, nil
}
// Check returns true if an alert should be raised based on the range (if the
// value is outside the range for normal semantics, or if the value is
// inside the range for inverted semantics ('@-semantics')).
func (r *rangeImpl) Check(value float64) bool {
// Ranges are treated as a closed interval.
if r.Start <= value && value <= r.End {
return r.Invert
}
return !r.Invert
}
// CheckInt is a convenience method which does an unchecked type
// conversion from an int to a float64.
func (r *rangeImpl) CheckInt(val int) bool {
return r.Check(float64(val))
}
// CheckInt32 is a convenience method which does an unchecked type
// conversion from an int32 to a float64.
func (r *rangeImpl) CheckInt32(val int32) bool {
return r.Check(float64(val))
}
func (r *rangeImpl) CheckValue(val interface{}) bool {
return r.Check(val.(float64))
} | range.go | 0.773644 | 0.440469 | range.go | starcoder |
package geo
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"log"
"math"
)
// Represents a Physical Point in geographic notation [lat, lng].
type Point struct {
lat float64
lng float64
}
const (
// According to Wikipedia, the Earth's radius is about 6,371km
EARTH_RADIUS = 6371
)
// Returns a new Point populated by the passed in latitude (lat) and longitude (lng) values.
func NewPoint(lat float64, lng float64) *Point {
return &Point{lat: lat, lng: lng}
}
// Returns Point p's latitude.
func (p *Point) Lat() float64 {
return p.lat
}
// Returns Point p's longitude.
func (p *Point) Lng() float64 {
return p.lng
}
// Returns a Point populated with the lat and lng coordinates
// by transposing the origin point the passed in distance (in kilometers)
// by the passed in compass bearing (in degrees).
// Original Implementation from: http://www.movable-type.co.uk/scripts/latlong.html
func (p *Point) PointAtDistanceAndBearing(dist float64, bearing float64) *Point {
dr := dist / EARTH_RADIUS
bearing = (bearing * (math.Pi / 180.0))
lat1 := (p.lat * (math.Pi / 180.0))
lng1 := (p.lng * (math.Pi / 180.0))
lat2_part1 := math.Sin(lat1) * math.Cos(dr)
lat2_part2 := math.Cos(lat1) * math.Sin(dr) * math.Cos(bearing)
lat2 := math.Asin(lat2_part1 + lat2_part2)
lng2_part1 := math.Sin(bearing) * math.Sin(dr) * math.Cos(lat1)
lng2_part2 := math.Cos(dr) - (math.Sin(lat1) * math.Sin(lat2))
lng2 := lng1 + math.Atan2(lng2_part1, lng2_part2)
lng2 = math.Mod((lng2+3*math.Pi), (2*math.Pi)) - math.Pi
lat2 = lat2 * (180.0 / math.Pi)
lng2 = lng2 * (180.0 / math.Pi)
return &Point{lat: lat2, lng: lng2}
}
// Calculates the Haversine distance between two points in kilometers.
// Original Implementation from: http://www.movable-type.co.uk/scripts/latlong.html
func (p *Point) GreatCircleDistance(p2 *Point) float64 {
dLat := (p2.lat - p.lat) * (math.Pi / 180.0)
dLon := (p2.lng - p.lng) * (math.Pi / 180.0)
lat1 := p.lat * (math.Pi / 180.0)
lat2 := p2.lat * (math.Pi / 180.0)
a1 := math.Sin(dLat/2) * math.Sin(dLat/2)
a2 := math.Sin(dLon/2) * math.Sin(dLon/2) * math.Cos(lat1) * math.Cos(lat2)
a := a1 + a2
c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
return EARTH_RADIUS * c
}
// Calculates the initial bearing (sometimes referred to as forward azimuth)
// Original Implementation from: http://www.movable-type.co.uk/scripts/latlong.html
func (p *Point) BearingTo(p2 *Point) float64 {
dLon := (p2.lng - p.lng) * math.Pi / 180.0
lat1 := p.lat * math.Pi / 180.0
lat2 := p2.lat * math.Pi / 180.0
y := math.Sin(dLon) * math.Cos(lat2)
x := math.Cos(lat1)*math.Sin(lat2) -
math.Sin(lat1)*math.Cos(lat2)*math.Cos(dLon)
brng := math.Atan2(y, x) * 180.0 / math.Pi
return brng
}
// Calculates the midpoint between 'this' point and the supplied point.
// Original implementation from http://www.movable-type.co.uk/scripts/latlong.html
func (p *Point) MidpointTo(p2 *Point) *Point {
lat1 := p.lat * math.Pi / 180.0
lat2 := p2.lat * math.Pi / 180.0
lon1 := p.lng * math.Pi / 180.0
dLon := (p2.lng - p.lng) * math.Pi / 180.0
bx := math.Cos(lat2) * math.Cos(dLon)
by := math.Cos(lat2) * math.Sin(dLon)
lat3Rad := math.Atan2(
math.Sin(lat1)+math.Sin(lat2),
math.Sqrt(math.Pow(math.Cos(lat1)+bx, 2)+math.Pow(by, 2)),
)
lon3Rad := lon1 + math.Atan2(by, math.Cos(lat1)+bx)
lat3 := lat3Rad * 180.0 / math.Pi
lon3 := lon3Rad * 180.0 / math.Pi
return NewPoint(lat3, lon3)
}
// Renders the current point to a byte slice.
// Implements the encoding.BinaryMarshaler Interface.
func (p *Point) MarshalBinary() ([]byte, error) {
var buf bytes.Buffer
err := binary.Write(&buf, binary.LittleEndian, p.lat)
if err != nil {
return nil, fmt.Errorf("unable to encode lat %v: %v", p.lat, err)
}
err = binary.Write(&buf, binary.LittleEndian, p.lng)
if err != nil {
return nil, fmt.Errorf("unable to encode lng %v: %v", p.lng, err)
}
return buf.Bytes(), nil
}
func (p *Point) UnmarshalBinary(data []byte) error {
buf := bytes.NewReader(data)
var lat float64
err := binary.Read(buf, binary.LittleEndian, &lat)
if err != nil {
return fmt.Errorf("binary.Read failed: %v", err)
}
var lng float64
err = binary.Read(buf, binary.LittleEndian, &lng)
if err != nil {
return fmt.Errorf("binary.Read failed: %v", err)
}
p.lat = lat
p.lng = lng
return nil
}
// Renders the current Point to valid JSON.
// Implements the json.Marshaller Interface.
func (p *Point) MarshalJSON() ([]byte, error) {
res := fmt.Sprintf(`{"lat":%v, "lng":%v}`, p.lat, p.lng)
return []byte(res), nil
}
// Decodes the current Point from a JSON body.
// Throws an error if the body of the point cannot be interpreted by the JSON body
func (p *Point) UnmarshalJSON(data []byte) error {
// TODO throw an error if there is an issue parsing the body.
dec := json.NewDecoder(bytes.NewReader(data))
var values map[string]float64
err := dec.Decode(&values)
if err != nil {
log.Print(err)
return err
}
*p = *NewPoint(values["lat"], values["lng"])
return nil
} | vendor/github.com/kellydunn/golang-geo/point.go | 0.80406 | 0.510192 | point.go | starcoder |
package schemas
import (
"encoding/json"
)
// ObjectTypeDefinitionLabels Singular and plural labels for the object. Used in CRM display.
type ObjectTypeDefinitionLabels struct {
// The word for one object. (There’s no way to change this later.)
Singular *string `json:"singular,omitempty"`
// The word for multiple objects. (There’s no way to change this later.)
Plural *string `json:"plural,omitempty"`
}
// NewObjectTypeDefinitionLabels instantiates a new ObjectTypeDefinitionLabels object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewObjectTypeDefinitionLabels() *ObjectTypeDefinitionLabels {
this := ObjectTypeDefinitionLabels{}
return &this
}
// NewObjectTypeDefinitionLabelsWithDefaults instantiates a new ObjectTypeDefinitionLabels object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewObjectTypeDefinitionLabelsWithDefaults() *ObjectTypeDefinitionLabels {
this := ObjectTypeDefinitionLabels{}
return &this
}
// GetSingular returns the Singular field value if set, zero value otherwise.
func (o *ObjectTypeDefinitionLabels) GetSingular() string {
if o == nil || o.Singular == nil {
var ret string
return ret
}
return *o.Singular
}
// GetSingularOk returns a tuple with the Singular field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ObjectTypeDefinitionLabels) GetSingularOk() (*string, bool) {
if o == nil || o.Singular == nil {
return nil, false
}
return o.Singular, true
}
// HasSingular returns a boolean if a field has been set.
func (o *ObjectTypeDefinitionLabels) HasSingular() bool {
if o != nil && o.Singular != nil {
return true
}
return false
}
// SetSingular gets a reference to the given string and assigns it to the Singular field.
func (o *ObjectTypeDefinitionLabels) SetSingular(v string) {
o.Singular = &v
}
// GetPlural returns the Plural field value if set, zero value otherwise.
func (o *ObjectTypeDefinitionLabels) GetPlural() string {
if o == nil || o.Plural == nil {
var ret string
return ret
}
return *o.Plural
}
// GetPluralOk returns a tuple with the Plural field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ObjectTypeDefinitionLabels) GetPluralOk() (*string, bool) {
if o == nil || o.Plural == nil {
return nil, false
}
return o.Plural, true
}
// HasPlural returns a boolean if a field has been set.
func (o *ObjectTypeDefinitionLabels) HasPlural() bool {
if o != nil && o.Plural != nil {
return true
}
return false
}
// SetPlural gets a reference to the given string and assigns it to the Plural field.
func (o *ObjectTypeDefinitionLabels) SetPlural(v string) {
o.Plural = &v
}
func (o ObjectTypeDefinitionLabels) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Singular != nil {
toSerialize["singular"] = o.Singular
}
if o.Plural != nil {
toSerialize["plural"] = o.Plural
}
return json.Marshal(toSerialize)
}
type NullableObjectTypeDefinitionLabels struct {
value *ObjectTypeDefinitionLabels
isSet bool
}
func (v NullableObjectTypeDefinitionLabels) Get() *ObjectTypeDefinitionLabels {
return v.value
}
func (v *NullableObjectTypeDefinitionLabels) Set(val *ObjectTypeDefinitionLabels) {
v.value = val
v.isSet = true
}
func (v NullableObjectTypeDefinitionLabels) IsSet() bool {
return v.isSet
}
func (v *NullableObjectTypeDefinitionLabels) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableObjectTypeDefinitionLabels(val *ObjectTypeDefinitionLabels) *NullableObjectTypeDefinitionLabels {
return &NullableObjectTypeDefinitionLabels{value: val, isSet: true}
}
func (v NullableObjectTypeDefinitionLabels) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableObjectTypeDefinitionLabels) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | generated/schemas/model_object_type_definition_labels.go | 0.635336 | 0.439807 | model_object_type_definition_labels.go | starcoder |
package forGraphBLASGo
import (
"github.com/intel/forGoParallel/pipeline"
)
type vxM[Dw, Du, DA any] struct {
op Semiring[Dw, Du, DA]
u *vectorReference[Du]
A *matrixReference[DA]
}
func newVxM[Dw, Du, DA any](
op Semiring[Dw, Du, DA],
u *vectorReference[Du],
A *matrixReference[DA],
) computeVectorT[Dw] {
return vxM[Dw, Du, DA]{
op: op,
u: u,
A: A,
}
}
func (compute vxM[Dw, Du, DA]) resize(newSize int) computeVectorT[Dw] {
nrows, _ := compute.A.size()
A := compute.A.resize(nrows, newSize)
return newVxM[Dw, Du, DA](compute.op, compute.u, A)
}
func (compute vxM[Dw, Du, DA]) computeElement(index int) (result Dw, ok bool) {
add := compute.op.addition().operator()
mult := compute.op.multiplication()
up := compute.u.getPipeline()
if up == nil {
return
}
ap := compute.A.getColPipeline(index)
if ap == nil {
return
}
return vectorPipelineReduce(makeVector2SourcePipeline(up, ap,
func(index int, uValue Du, uok bool, aValue DA, aok bool) (result Dw, ok bool) {
if uok && aok {
return mult(uValue, aValue), true
}
return
}), add)
}
func (compute vxM[Dw, Du, DA]) computePipeline() *pipeline.Pipeline[any] {
add := compute.op.addition().operator()
mult := compute.op.multiplication()
go compute.u.optimize()
if compute.u.nvals() == 0 {
return nil
}
colPipelines := compute.A.getColPipelines()
var p pipeline.Pipeline[any]
p.Source(pipeline.NewFunc[any](-1, func(size int) (data any, fetched int, err error) {
var result vectorSlice[Dw]
for fetched < size && len(colPipelines) > 0 {
value, ok := vectorPipelineReduce(makeVector2SourcePipeline(compute.u.getPipeline(), colPipelines[0].p,
func(index int, uValue Du, uok bool, aValue DA, aok bool) (result Dw, ok bool) {
if uok && aok {
return mult(uValue, aValue), true
}
return
},
), add)
if ok {
result.indices = append(result.indices, colPipelines[0].index)
result.values = append(result.values, value)
fetched++
}
colPipelines[0].p = nil
colPipelines = colPipelines[1:]
}
return result, fetched, nil
}))
return &p
} | functional_Vector_ComputedVxM.go | 0.538012 | 0.507873 | functional_Vector_ComputedVxM.go | starcoder |
package helpers
import "sort"
type ArrayTypes interface {
int | int32 | int64 | float32 | float64 | ~string
}
// Permutations generates all possible combinations from the input data
func Permutations(xs []int16) (permuts [][]int16) {
// Taken from: https://www.golangprograms.com/golang-program-to-generate-slice-permutations-of-number-entered-by-user.html
var rc func([]int16, int16)
rc = func(a []int16, k int16) {
if k == int16(len(a)) {
permuts = append(permuts, append([]int16{}, a...))
} else {
for i := k; i < int16(len(xs)); i++ {
a[k], a[i] = a[i], a[k]
rc(a, k+1)
a[k], a[i] = a[i], a[k]
}
}
}
rc(xs, 0)
return permuts
}
// IsLocationValid returns if the provided x,y coordinates are within the range of the provided 2d array.
func IsLocationValid[T ArrayTypes](arr [][]T, x, y int) bool {
return (x >= 0) && (x < len(arr[0])) && (y >= 0) && (y < len(arr))
}
// Abs is simple function to return the absolute value of an integer. Absolute value being essentially an always positive number.
func Abs(x int) int {
if x < 0 {
return -x
}
return x
}
// ManhattansDistance return thes Manhattan distance between two points
func ManhattansDistance(x1, y1, x2, y2 int) int {
return Abs(x1-x2) + Abs(y1-y2)
}
// DecimalPositionOf returns a decimal position that a number can be times against to move the decimal position of any number
func DecimalPositionOf(desiredPosition int) (positionValue int) {
/*
Useful if you have an integer of 7 that needs to be in the 3rd decimal poition (i.e. be 700) you would use this as:
return DecimalPositionOf(3) * 7 // 700
Other examples:
return DecimalPositionOf(1) * 7 // 7
return DecimalPositionOf(2) * 7 // 70
return DecimalPositionOf(7) * 7 // 7000000
*/
positionValue = 1
for i := 0; i < desiredPosition; i++ {
positionValue *= 10
}
return
}
// Copy2dArray creates a copy of a 2d array
func Copy2dArray[T ArrayTypes](array [][]T) (copied [][]T) {
for _, i := range array {
tmp := make([]T, len(i))
copy(tmp, i)
copied = append(copied, tmp)
}
return
}
// RemoveItemsAtIndexes takes an array of data and an array of indexes, loops
// through the array to remove those indexes, in a safe way, and returns the
// remaining elements
func RemoveItemsAtIndexes[T ArrayTypes](array []T, indexesForRemoval []int) []T {
// Sorting in reverse means the removals doesn't change the indexes for other removals
sort.Sort(sort.Reverse(sort.IntSlice(indexesForRemoval)))
// Go through all the indexes to remove and remove them from the array
for _, i := range indexesForRemoval {
array = append(array[0:i], array[i+1:]...)
}
return array
}
// RemoveDuplicates takes a []string array and removes any duplicates strings in that array
func RemoveDuplicates(data []string) (uniques []string) {
present := map[string]bool{}
for _, d := range data {
_, ok := present[d]
if !ok {
uniques = append(uniques, d)
present[d] = true
}
}
return uniques
} | pkg/helpers/manipulations.go | 0.754282 | 0.509581 | manipulations.go | starcoder |
package packet
import "encoding/json"
const biomeRegistryJSON = `
{
"value": [
{
"name": "minecraft:ocean",
"id": 0,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1,
"category": "ocean"
}
},
{
"name": "minecraft:plains",
"id": 1,
"element": {
"temperature": 0.8,
"scale": 0.05,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7907327,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.4,
"depth": 0.125,
"category": "plains"
}
},
{
"name": "minecraft:desert",
"id": 2,
"element": {
"temperature": 2,
"scale": 0.05,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.125,
"category": "desert"
}
},
{
"name": "minecraft:mountains",
"id": 3,
"element": {
"temperature": 0.2,
"scale": 0.5,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233727,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.3,
"depth": 1,
"category": "extreme_hills"
}
},
{
"name": "minecraft:forest",
"id": 4,
"element": {
"temperature": 0.7,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7972607,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.1,
"category": "forest"
}
},
{
"name": "minecraft:taiga",
"id": 5,
"element": {
"temperature": 0.25,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233983,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.2,
"category": "taiga"
}
},
{
"name": "minecraft:swamp",
"id": 6,
"element": {
"temperature": 0.8,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 2302743,
"water_color": 6388580,
"sky_color": 7907327,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color_modifier": "swamp",
"foliage_color": 6975545,
"fog_color": 12638463
},
"downfall": 0.9,
"depth": -0.2,
"category": "swamp"
}
},
{
"name": "minecraft:river",
"id": 7,
"element": {
"temperature": 0.5,
"scale": 0,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -0.5,
"category": "river"
}
},
{
"name": "minecraft:nether_wastes",
"id": 8,
"element": {
"temperature": 2,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"music": {
"sound": "minecraft:music.nether.nether_wastes",
"replace_current_music": 0,
"min_delay": 12000,
"max_delay": 24000
},
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.nether_wastes.mood",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 3344392,
"ambient_sound": "minecraft:ambient.nether_wastes.loop",
"additions_sound": {
"tick_chance": 0.0111,
"sound": "minecraft:ambient.nether_wastes.additions"
}
},
"downfall": 0,
"depth": 0.1,
"category": "nether"
}
},
{
"name": "minecraft:the_end",
"id": 9,
"element": {
"temperature": 0.5,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 0,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 10518688
},
"downfall": 0.5,
"depth": 0.1,
"category": "the_end"
}
},
{
"name": "minecraft:frozen_ocean",
"id": 10,
"element": {
"temperature_modifier": "frozen",
"temperature": 0,
"scale": 0.1,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 3750089,
"sky_color": 8364543,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1,
"category": "ocean"
}
},
{
"name": "minecraft:frozen_river",
"id": 11,
"element": {
"temperature": 0,
"scale": 0,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 3750089,
"sky_color": 8364543,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -0.5,
"category": "river"
}
},
{
"name": "minecraft:snowy_tundra",
"id": 12,
"element": {
"temperature": 0,
"scale": 0.05,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8364543,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": 0.125,
"category": "icy"
}
},
{
"name": "minecraft:snowy_mountains",
"id": 13,
"element": {
"temperature": 0,
"scale": 0.3,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8364543,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": 0.45,
"category": "icy"
}
},
{
"name": "minecraft:mushroom_fields",
"id": 14,
"element": {
"temperature": 0.9,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 1,
"depth": 0.2,
"category": "mushroom"
}
},
{
"name": "minecraft:mushroom_field_shore",
"id": 15,
"element": {
"temperature": 0.9,
"scale": 0.025,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 1,
"depth": 0,
"category": "mushroom"
}
},
{
"name": "minecraft:beach",
"id": 16,
"element": {
"temperature": 0.8,
"scale": 0.025,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7907327,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.4,
"depth": 0,
"category": "beach"
}
},
{
"name": "minecraft:desert_hills",
"id": 17,
"element": {
"temperature": 2,
"scale": 0.3,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.45,
"category": "desert"
}
},
{
"name": "minecraft:wooded_hills",
"id": 18,
"element": {
"temperature": 0.7,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7972607,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.45,
"category": "forest"
}
},
{
"name": "minecraft:taiga_hills",
"id": 19,
"element": {
"temperature": 0.25,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233983,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.45,
"category": "taiga"
}
},
{
"name": "minecraft:mountain_edge",
"id": 20,
"element": {
"temperature": 0.2,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233727,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.3,
"depth": 0.8,
"category": "extreme_hills"
}
},
{
"name": "minecraft:jungle",
"id": 21,
"element": {
"temperature": 0.95,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.9,
"depth": 0.1,
"category": "jungle"
}
},
{
"name": "minecraft:jungle_hills",
"id": 22,
"element": {
"temperature": 0.95,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.9,
"depth": 0.45,
"category": "jungle"
}
},
{
"name": "minecraft:jungle_edge",
"id": 23,
"element": {
"temperature": 0.95,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.1,
"category": "jungle"
}
},
{
"name": "minecraft:deep_ocean",
"id": 24,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1.8,
"category": "ocean"
}
},
{
"name": "minecraft:stone_shore",
"id": 25,
"element": {
"temperature": 0.2,
"scale": 0.8,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233727,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.3,
"depth": 0.1,
"category": "none"
}
},
{
"name": "minecraft:snowy_beach",
"id": 26,
"element": {
"temperature": 0.05,
"scale": 0.025,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 4020182,
"sky_color": 8364543,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.3,
"depth": 0,
"category": "beach"
}
},
{
"name": "minecraft:birch_forest",
"id": 27,
"element": {
"temperature": 0.6,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8037887,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.6,
"depth": 0.1,
"category": "forest"
}
},
{
"name": "minecraft:birch_forest_hills",
"id": 28,
"element": {
"temperature": 0.6,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8037887,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.6,
"depth": 0.45,
"category": "forest"
}
},
{
"name": "minecraft:dark_forest",
"id": 29,
"element": {
"temperature": 0.7,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7972607,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color_modifier": "dark_forest",
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.1,
"category": "forest"
}
},
{
"name": "minecraft:snowy_taiga",
"id": 30,
"element": {
"temperature": -0.5,
"scale": 0.2,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 4020182,
"sky_color": 8625919,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.4,
"depth": 0.2,
"category": "taiga"
}
},
{
"name": "minecraft:snowy_taiga_hills",
"id": 31,
"element": {
"temperature": -0.5,
"scale": 0.3,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 4020182,
"sky_color": 8625919,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.4,
"depth": 0.45,
"category": "taiga"
}
},
{
"name": "minecraft:giant_tree_taiga",
"id": 32,
"element": {
"temperature": 0.3,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8168447,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.2,
"category": "taiga"
}
},
{
"name": "minecraft:giant_tree_taiga_hills",
"id": 33,
"element": {
"temperature": 0.3,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8168447,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.45,
"category": "taiga"
}
},
{
"name": "minecraft:wooded_mountains",
"id": 34,
"element": {
"temperature": 0.2,
"scale": 0.5,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233727,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.3,
"depth": 1,
"category": "extreme_hills"
}
},
{
"name": "minecraft:savanna",
"id": 35,
"element": {
"temperature": 1.2,
"scale": 0.05,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7711487,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.125,
"category": "savanna"
}
},
{
"name": "minecraft:savanna_plateau",
"id": 36,
"element": {
"temperature": 1,
"scale": 0.025,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7776511,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0,
"depth": 1.5,
"category": "savanna"
}
},
{
"name": "minecraft:badlands",
"id": 37,
"element": {
"temperature": 2,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color": 9470285,
"foliage_color": 10387789,
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.1,
"category": "mesa"
}
},
{
"name": "minecraft:wooded_badlands_plateau",
"id": 38,
"element": {
"temperature": 2,
"scale": 0.025,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color": 9470285,
"foliage_color": 10387789,
"fog_color": 12638463
},
"downfall": 0,
"depth": 1.5,
"category": "mesa"
}
},
{
"name": "minecraft:badlands_plateau",
"id": 39,
"element": {
"temperature": 2,
"scale": 0.025,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color": 9470285,
"foliage_color": 10387789,
"fog_color": 12638463
},
"downfall": 0,
"depth": 1.5,
"category": "mesa"
}
},
{
"name": "minecraft:small_end_islands",
"id": 40,
"element": {
"temperature": 0.5,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 0,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 10518688
},
"downfall": 0.5,
"depth": 0.1,
"category": "the_end"
}
},
{
"name": "minecraft:end_midlands",
"id": 41,
"element": {
"temperature": 0.5,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 0,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 10518688
},
"downfall": 0.5,
"depth": 0.1,
"category": "the_end"
}
},
{
"name": "minecraft:end_highlands",
"id": 42,
"element": {
"temperature": 0.5,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 0,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 10518688
},
"downfall": 0.5,
"depth": 0.1,
"category": "the_end"
}
},
{
"name": "minecraft:end_barrens",
"id": 43,
"element": {
"temperature": 0.5,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 0,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 10518688
},
"downfall": 0.5,
"depth": 0.1,
"category": "the_end"
}
},
{
"name": "minecraft:warm_ocean",
"id": 44,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 270131,
"water_color": 4445678,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1,
"category": "ocean"
}
},
{
"name": "minecraft:lukewarm_ocean",
"id": 45,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 267827,
"water_color": 4566514,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1,
"category": "ocean"
}
},
{
"name": "minecraft:cold_ocean",
"id": 46,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4020182,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1,
"category": "ocean"
}
},
{
"name": "minecraft:deep_warm_ocean",
"id": 47,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 270131,
"water_color": 4445678,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1.8,
"category": "ocean"
}
},
{
"name": "minecraft:deep_lukewarm_ocean",
"id": 48,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 267827,
"water_color": 4566514,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1.8,
"category": "ocean"
}
},
{
"name": "minecraft:deep_cold_ocean",
"id": 49,
"element": {
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4020182,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1.8,
"category": "ocean"
}
},
{
"name": "minecraft:deep_frozen_ocean",
"id": 50,
"element": {
"temperature_modifier": "frozen",
"temperature": 0.5,
"scale": 0.1,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 3750089,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": -1.8,
"category": "ocean"
}
},
{
"name": "minecraft:the_void",
"id": 127,
"element": {
"temperature": 0.5,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8103167,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": 0.1,
"category": "none"
}
},
{
"name": "minecraft:sunflower_plains",
"id": 129,
"element": {
"temperature": 0.8,
"scale": 0.05,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7907327,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.4,
"depth": 0.125,
"category": "plains"
}
},
{
"name": "minecraft:desert_lakes",
"id": 130,
"element": {
"temperature": 2,
"scale": 0.25,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.225,
"category": "desert"
}
},
{
"name": "minecraft:gravelly_mountains",
"id": 131,
"element": {
"temperature": 0.2,
"scale": 0.5,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233727,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.3,
"depth": 1,
"category": "extreme_hills"
}
},
{
"name": "minecraft:flower_forest",
"id": 132,
"element": {
"temperature": 0.7,
"scale": 0.4,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7972607,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.1,
"category": "forest"
}
},
{
"name": "minecraft:taiga_mountains",
"id": 133,
"element": {
"temperature": 0.25,
"scale": 0.4,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233983,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.3,
"category": "taiga"
}
},
{
"name": "minecraft:swamp_hills",
"id": 134,
"element": {
"temperature": 0.8,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 2302743,
"water_color": 6388580,
"sky_color": 7907327,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color_modifier": "swamp",
"foliage_color": 6975545,
"fog_color": 12638463
},
"downfall": 0.9,
"depth": -0.1,
"category": "swamp"
}
},
{
"name": "minecraft:ice_spikes",
"id": 140,
"element": {
"temperature": 0,
"scale": 0.45000002,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8364543,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.5,
"depth": 0.425,
"category": "icy"
}
},
{
"name": "minecraft:modified_jungle",
"id": 149,
"element": {
"temperature": 0.95,
"scale": 0.4,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.9,
"depth": 0.2,
"category": "jungle"
}
},
{
"name": "minecraft:modified_jungle_edge",
"id": 151,
"element": {
"temperature": 0.95,
"scale": 0.4,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.2,
"category": "jungle"
}
},
{
"name": "minecraft:tall_birch_forest",
"id": 155,
"element": {
"temperature": 0.6,
"scale": 0.4,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8037887,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.6,
"depth": 0.2,
"category": "forest"
}
},
{
"name": "minecraft:tall_birch_hills",
"id": 156,
"element": {
"temperature": 0.6,
"scale": 0.5,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8037887,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.6,
"depth": 0.55,
"category": "forest"
}
},
{
"name": "minecraft:dark_forest_hills",
"id": 157,
"element": {
"temperature": 0.7,
"scale": 0.4,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7972607,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color_modifier": "dark_forest",
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.2,
"category": "forest"
}
},
{
"name": "minecraft:snowy_taiga_mountains",
"id": 158,
"element": {
"temperature": -0.5,
"scale": 0.4,
"precipitation": "snow",
"effects": {
"water_fog_color": 329011,
"water_color": 4020182,
"sky_color": 8625919,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.4,
"depth": 0.3,
"category": "taiga"
}
},
{
"name": "minecraft:giant_spruce_taiga",
"id": 160,
"element": {
"temperature": 0.25,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233983,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.2,
"category": "taiga"
}
},
{
"name": "minecraft:giant_spruce_taiga_hills",
"id": 161,
"element": {
"temperature": 0.25,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233983,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.8,
"depth": 0.2,
"category": "taiga"
}
},
{
"name": "minecraft:modified_gravelly_mountains",
"id": 162,
"element": {
"temperature": 0.2,
"scale": 0.5,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8233727,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.3,
"depth": 1,
"category": "extreme_hills"
}
},
{
"name": "minecraft:shattered_savanna",
"id": 163,
"element": {
"temperature": 1.1,
"scale": 1.225,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7776767,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.3625,
"category": "savanna"
}
},
{
"name": "minecraft:shattered_savanna_plateau",
"id": 164,
"element": {
"temperature": 1,
"scale": 1.2125001,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7776511,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0,
"depth": 1.05,
"category": "savanna"
}
},
{
"name": "minecraft:eroded_badlands",
"id": 165,
"element": {
"temperature": 2,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color": 9470285,
"foliage_color": 10387789,
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.1,
"category": "mesa"
}
},
{
"name": "minecraft:modified_wooded_badlands_plateau",
"id": 166,
"element": {
"temperature": 2,
"scale": 0.3,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color": 9470285,
"foliage_color": 10387789,
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.45,
"category": "mesa"
}
},
{
"name": "minecraft:modified_badlands_plateau",
"id": 167,
"element": {
"temperature": 2,
"scale": 0.3,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"grass_color": 9470285,
"foliage_color": 10387789,
"fog_color": 12638463
},
"downfall": 0,
"depth": 0.45,
"category": "mesa"
}
},
{
"name": "minecraft:bamboo_jungle",
"id": 168,
"element": {
"temperature": 0.95,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.9,
"depth": 0.1,
"category": "jungle"
}
},
{
"name": "minecraft:bamboo_jungle_hills",
"id": 169,
"element": {
"temperature": 0.95,
"scale": 0.3,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7842047,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.9,
"depth": 0.45,
"category": "jungle"
}
},
{
"name": "minecraft:soul_sand_valley",
"id": 170,
"element": {
"temperature": 2,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"particle": {
"probability": 0.00625,
"options": {
"type": "minecraft:ash"
}
},
"music": {
"sound": "minecraft:music.nether.soul_sand_valley",
"replace_current_music": 0,
"min_delay": 12000,
"max_delay": 24000
},
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.soul_sand_valley.mood",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 1787717,
"ambient_sound": "minecraft:ambient.soul_sand_valley.loop",
"additions_sound": {
"tick_chance": 0.0111,
"sound": "minecraft:ambient.soul_sand_valley.additions"
}
},
"downfall": 0,
"depth": 0.1,
"category": "nether"
}
},
{
"name": "minecraft:crimson_forest",
"id": 171,
"element": {
"temperature": 2,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"particle": {
"probability": 0.025,
"options": {
"type": "minecraft:crimson_spore"
}
},
"music": {
"sound": "minecraft:music.nether.crimson_forest",
"replace_current_music": 0,
"min_delay": 12000,
"max_delay": 24000
},
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.crimson_forest.mood",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 3343107,
"ambient_sound": "minecraft:ambient.crimson_forest.loop",
"additions_sound": {
"tick_chance": 0.0111,
"sound": "minecraft:ambient.crimson_forest.additions"
}
},
"downfall": 0,
"depth": 0.1,
"category": "nether"
}
},
{
"name": "minecraft:warped_forest",
"id": 172,
"element": {
"temperature": 2,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7254527,
"particle": {
"probability": 0.01428,
"options": {
"type": "minecraft:warped_spore"
}
},
"music": {
"sound": "minecraft:music.nether.warped_forest",
"replace_current_music": 0,
"min_delay": 12000,
"max_delay": 24000
},
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.warped_forest.mood",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 1705242,
"ambient_sound": "minecraft:ambient.warped_forest.loop",
"additions_sound": {
"tick_chance": 0.0111,
"sound": "minecraft:ambient.warped_forest.additions"
}
},
"downfall": 0,
"depth": 0.1,
"category": "nether"
}
},
{
"name": "minecraft:basalt_deltas",
"id": 173,
"element": {
"temperature": 2,
"scale": 0.2,
"precipitation": "none",
"effects": {
"water_fog_color": 4341314,
"water_color": 4159204,
"sky_color": 7254527,
"particle": {
"probability": 0.118093334,
"options": {
"type": "minecraft:white_ash"
}
},
"music": {
"sound": "minecraft:music.nether.basalt_deltas",
"replace_current_music": 0,
"min_delay": 12000,
"max_delay": 24000
},
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.basalt_deltas.mood",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 6840176,
"ambient_sound": "minecraft:ambient.basalt_deltas.loop",
"additions_sound": {
"tick_chance": 0.0111,
"sound": "minecraft:ambient.basalt_deltas.additions"
}
},
"downfall": 0,
"depth": 0.1,
"category": "nether"
}
},
{
"name": "minecraft:dripstone_caves",
"id": 174,
"element": {
"temperature": 0.8,
"scale": 0.05,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 7907327,
"mood_sound": {
"tick_delay": 6000,
"sound": "minecraft:ambient.cave",
"offset": 2,
"block_search_extent": 8
},
"fog_color": 12638463
},
"downfall": 0.4,
"depth": 0.125,
"category": "underground"
}
},
{
"name": "minecraft:lush_caves",
"id": 175,
"element": {
"temperature": 0.5,
"scale": 0.2,
"precipitation": "rain",
"effects": {
"water_fog_color": 329011,
"water_color": 4159204,
"sky_color": 8103167,
"fog_color": 12638463
},
"downfall": 0.5,
"depth": 0.1,
"category": "underground"
}
}
],
"type": "minecraft:worldgen/biome"
}
`
// buildBiomeRegistry builds the biome registry from the reference json.
func buildBiomeRegistry() map[string]interface{} {
var b map[string]interface{}
_ = json.Unmarshal([]byte(biomeRegistryJSON), &b)
return b
} | pkg/edition/java/proto/packet/biome.go | 0.615666 | 0.41253 | biome.go | starcoder |
package mongo
import (
"fmt"
)
// Price is a structure that holds a price and gives information about the
// amount of tax applied to that price.
type Price struct {
gross Money // The gross.
net Money // The net which is equal to the gross minus tax.
tax Money // The amount of tax subtracted from the gross to produce the net.
taxPercent float64 // The percentage of tax deducted.
}
// PriceFromSubunits constructs a new price object from an integer and tax
// percentage. The value integer used should represent the subunits of the
// currency.
// currIsoCode is an ISO 4217 currency code.
// value is monetary value in subunits.
// taxPercent is the amount of tax applied to this price.
// roundFunc is a function to be used for division operations.
func PriceFromSubunits(currIsoCode string, grossValue int64, taxPercent float64, f roundFunc) (Price, error) {
var price Price
var err error
price.gross, err = MoneyFromSubunits(currIsoCode, grossValue, f)
if err != nil {
return Price{}, err
}
price.taxPercent = taxPercent
if taxPercent < 0.0 || taxPercent > 100.0 {
return Price{}, fmt.Errorf("tax percent '%f' must be between 1 and 100", taxPercent)
}
if taxPercent < 100.0 {
price.net = price.gross.Div(1 + (taxPercent / 100))
price.tax = price.gross.Sub(price.net)
} else {
price.tax = price.gross
price.net = price.gross.Sub(price.tax)
}
return price, nil
}
// MoneyFromString constructs a new price object from a string and tax
// percentage. Everything not contained within a number is stripped out before
// parsing.
// currIsoCode is an ISO 4217 currency code.
// value is monetary value in subunits.
// taxPercent is the amount of tax applied to this price.
// roundFunc is a function to be used for division operations.
func PriceFromString(currIsoCode string, grossValueStr string, taxPercent float64, f roundFunc) (Price, error) {
var price Price
var err error
price.gross, err = MoneyFromString(currIsoCode, grossValueStr, f)
if err != nil {
return Price{}, err
}
price.taxPercent = taxPercent
if taxPercent < 0.0 || taxPercent > 100.0 {
return Price{}, fmt.Errorf("tax percent '%f' must be between 1 and 100", taxPercent)
}
if taxPercent < 100.0 {
price.net = price.gross.Div(1 + (taxPercent / 100))
price.tax = price.gross.Sub(price.net)
} else {
price.tax = price.gross
price.net = price.gross.Sub(price.tax)
}
return price, nil
}
// PriceGBP is a helper function.
func PriceGBP(value int64, taxPercent float64) (Price, error) {
return PriceFromSubunits("GBP", value, taxPercent, nil)
}
// PriceEUR is a helper function.
func PriceEUR(value int64, taxPercent float64) (Price, error) {
return PriceFromSubunits("EUR", value, taxPercent, nil)
}
// IsoCode returns the ISO 4217 currency code.
func (p Price) IsoCode() string {
return p.gross.format.code
}
// Gross returns the gross monetary value of the price.
func (p Price) Gross() Money {
return p.gross
}
// Net returns the net monetary value of the price which is equal to the gross
// minus tax.
func (p Price) Net() Money {
return p.net
}
// Tax returns the amount of tax subtracted from the gross to produce the net.
func (p Price) Tax() Money {
return p.tax
}
// TaxPercent returns the amount of tax subtracted from the gross to produce the
// net, as a percentage.
func (p Price) TaxPercent() float64 {
return p.taxPercent
}
// MarshalJSON is an implementation of json.Marshaller.
func (p Price) MarshalJSON() ([]byte, error) {
json := fmt.Sprintf(`{"currency": "%s", "gross": "%s", "net": "%s", "tax": "%s", "taxPercent": %f}`, p.gross.format.code, p.gross, p.net, p.tax, p.taxPercent)
return []byte(json), nil
}
// String is an implementation of fmt.Stringer and returns the string
// formatted representation of the price value.
func (p Price) String() string {
return p.gross.String()
}
// StringNoSymbol returns the string formatted representation of the price
// value without a currency symbol.
func (p Price) StringNoSymbol() string {
return p.gross.StringNoSymbol()
} | price.go | 0.795022 | 0.676133 | price.go | starcoder |
<tutorial>
Getting started example of using 51Degrees device detection match metrics
information. The example shows how to:
<ol>
<li>Instantiate the 51Degrees device detection provider.
<p><pre class="prettyprint lang-go">
var provider = FiftyOneDegreesPatternV3.NewProvider(dataFile)
</pre></p>
<li>Produce a match for a single HTTP User-Agent header
<p><pre class="prettyprint lang-go">
var match = provider.GetMatch(userAgent)
</pre></p>
<li>Obtain device Id: consists of four components separated by a hyphen
symbol: Hardware-Platform-Browser-IsCrawler where each Component
represents an ID of the corresponding Profile.
<p><pre class="prettyprint lang-go">match.GetDeviceId()</pre>
<li>Obtain match method: provides information about the
algorithm that was used to perform detection for a particular User-Agent.
For more information on what each method means please see:
<a href="https://51degrees.com/support/documentation/pattern">
How device detection works</a>
<p><pre class="prettyprint lang-go">match.GetMethod()</pre>
<li>Obtain difference: used when detection method is not Exact or None.
This is an integer value and the larger the value the less confident the
detector is in this result.
<p><pre class="prettyprint lang-go">match.GetDifference()</pre>
<li>Obtain signature rank: an integer value that indicates how popular
the device is. The lower the rank the more popular the signature.
<p><pre class="prettyprint lang-go">match.GetRank()</pre>
</ol>
This example assumes you have the 51Degrees Go API installed correctly.
</tutorial>
*/
// Snippet Start
package main
import (
"fmt"
"./src/pattern"
)
var matchMethod = []string {
"NONE",
"EXACT",
"NUMERIC",
"NEAREST",
"CLOSEST"}
// Location of data file.
var dataFile = "../data/51Degrees-LiteV3.2.dat"
// Provides access to device detection functions.
var provider =
FiftyOneDegreesPatternV3.NewProvider(dataFile)
// Which properties to retrieve
var properties = []string{"IsMobile", "PlatformName", "PlatformVersion"}
// User-Agent string of an iPhone mobile device.
var mobileUserAgent = "Mozilla/5.0 (iPhone; CPU iPhone OS 7_1 like Mac OS X) " +
"AppleWebKit/537.51.2 (KHTML, like Gecko) 'Version/7.0 Mobile/11D167 " +
"Safari/9537.53"
// User-Agent string of Firefox Web browser version 41 on desktop.
var desktopUserAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) " +
"Gecko/20100101 Firefox/41.0"
// User-Agent string of a MediaHub device.
var mediaHubUserAgent = "Mozilla/5.0 (Linux; Android 4.4.2; X7 Quad Core " +
"Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 " +
"Chrome/30.0.0.0 Safari/537.36"
// output_metrics function. Takes a match object as an argument and
// prints the match metrics relating to the specific match.
func outputMetrics(match FiftyOneDegreesPatternV3.Match) {
fmt.Println(" Id: ", match.GetDeviceId())
fmt.Println(" Match Method: ", matchMethod[match.GetMethod()])
fmt.Println(" Difference: ", match.GetDifference())
fmt.Println(" Rank: ", match.GetRank())
}
func main() {
fmt.Println("Starting Getting Started Match Metrics Example.\n")
// Carries out a match with a mobile User-Agent.
fmt.Println("Mobile User-Agent: ", mobileUserAgent)
var match = provider.GetMatch(mobileUserAgent)
outputMetrics(match)
// Carries out a match with a desktop User-Agent.
fmt.Println("Desktop User-Agent: ", desktopUserAgent)
match = provider.GetMatch(desktopUserAgent)
outputMetrics(match)
// Carries out a match with a MediaHub User-Agent.
fmt.Println("Media Hub User-Agent: ", mediaHubUserAgent)
match = provider.GetMatch(mediaHubUserAgent)
outputMetrics(match)
}
// Snippet End | MatchMetrics.go | 0.628179 | 0.654398 | MatchMetrics.go | starcoder |
package r3
import (
"math"
)
// Vector data structure
type Vector struct {
X, Y, Z float64
}
// Zero Vector
var Zero = Vector{}
func createVector(a, b Vector) Vector {
return Vector{
a.X - b.X,
a.Y - b.Y,
a.Z - b.Z,
}
}
// Norm returns the vector's norm.
func (v Vector) Norm() float64 { return math.Sqrt(float64(Dot(v, v))) }
// Norm2 returns the square of the norm.
func (v Vector) Norm2() float64 { return Dot(v, v) }
// Normalize updates the vector to a unit vector in the same direction as v.
func (v *Vector) Normalize() *Vector {
n2 := v.Norm2()
if n2 == 0 {
v.X, v.Y, v.Z = 0, 0, 0
}
v.Scale(1 / math.Sqrt(n2))
return v
}
// Normalize returns a unit vector in the same direction as v.
func Normalize(v Vector) Vector {
n2 := v.Norm2()
if n2 == 0 {
return Vector{0, 0, 0}
}
return v.Scaled(1 / math.Sqrt(n2))
}
// IsUnit returns whether this vector is of approximately unit length.
func (v Vector) IsUnit() bool {
const epsilon = 5e-14
return math.Abs(float64(v.Norm2()-1)) <= epsilon
}
// Abs returns the vector with nonnegative components.
func (v *Vector) Abs() {
v.X = math.Abs(v.X)
v.Y = math.Abs(v.Y)
v.Z = math.Abs(v.Z)
}
func Abs(v Vector) Vector {
return Vector{
math.Abs(v.X), math.Abs(v.Y), math.Abs(v.Z)}
}
// Scale multiplies all element of the vector by f and returns vec.
func (v *Vector) Scale(f float64) *Vector {
v.X = f * v.X
v.Y = f * v.Y
v.Z = f * v.Z
return v
}
// Scaled returns a copy of vec with all elements multiplies by f.
func (v Vector) Scaled(f float64) Vector { return Vector{f * v.X, f * v.Y, f * v.Z} }
// Add updated the vector to the standard vector sum of v and ov.
func (v *Vector) Add(ov Vector) {
v.X = v.X + ov.X
v.Y = v.Y + ov.Y
v.Z = v.Z + ov.Z
}
// Add returns the standard vector sum of v and ov
func Add(v, ov Vector) Vector {
return Vector{v.X + ov.X, v.Y + ov.Y, v.Z + ov.Z}
}
// Sub updates the vector to the standard vector difference of v and ov.
func (v *Vector) Sub(ov Vector) {
v.X = v.X - ov.X
v.Y = v.Y - ov.Y
v.Z = v.Z - ov.Z
}
// Sub returns the standard vector difference of v and ov.
func Sub(v, ov Vector) Vector {
return Vector{v.X - ov.X, v.Y - ov.Y, v.Z - ov.Z}
}
// Dot returns the standard dot product of v and ov.
func Dot(v, ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z }
// Cross returns the cross product of
func Cross(v Vector, ov Vector) Vector {
return Vector{v.Y*ov.Z - ov.Y*v.Z, v.Z*ov.X - ov.Z*v.X, v.X*ov.Y - ov.X*v.Y}
}
// CalculateDistanceToPlane returns the distance from vector v to plane p
func (v Vector) CalculateDistanceToPlane(p Plane) float64 { return Dot(p.Normal, v) + p.Normal.Norm2() }
func det2D(a, b Vector) float64 {
return a.X*b.Y - a.Y*b.X
}
// GetInterpolationVector returns the interpolated vector between v and b given a certain divisor
func (v Vector) GetInterpolationVector(b Vector, divisor float64) Vector {
divisionVector := Vector{
v.X*divisor + b.X*(1-divisor),
v.Y*divisor + b.Y*(1-divisor),
v.Z*divisor + b.Z*(1-divisor),
}
return divisionVector
}
// Magnitude returns the magnitude of a vector
func (v Vector) Magnitude() float64 {
return math.Sqrt(v.SquaredMagnitude())
}
// SquaredMagnitude returns the squared magnitude of a vector
func (v Vector) SquaredMagnitude() float64 {
return float64(Dot(v, v))
}
// Distance returns the distance between v and ov
func Distance(v, ov Vector) float64 {
diff := Sub(v, ov)
return diff.Norm()
}
// SquaredDistance returns the distance between v and b
func SquaredDistance(v, ov Vector) float64 {
diff := Sub(v, ov)
return diff.Norm2()
}
// Interpolate interpolates between a and b at t (0,1).
func Interpolate(a, b *Vector, t float64) Vector {
t1 := 1 - t
return Vector{
a.X*t1 + b.X*t,
a.Y*t1 + b.Y*t,
a.Z*t1 + b.Z*t,
}
} | r3/vector.go | 0.921684 | 0.798423 | vector.go | starcoder |
package xmath
import "math"
type Vector struct {
X float64 `json:"x"`
Y float64 `json:"y"`
Z float64 `json:"z"`
}
func Vect(x, y, z float64) Vector {
return Vector{x, y, z}
}
func (v Vector) Length() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y + v.Z*v.Z)
}
func (v Vector) LengthSq() float64 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
func (v Vector) Unit() Vector {
length := math.Sqrt(v.X*v.X + v.Y*v.Y + v.Z*v.Z)
return Vector{
X: v.X / length,
Y: v.Y / length,
Z: v.Z / length,
}
}
func (v Vector) Scale(mul float64) Vector {
return Vector{
X: v.X * mul,
Y: v.Y * mul,
Z: v.Z * mul,
}
}
func (v Vector) Add(u Vector) Vector {
return Vector{
X: v.X + u.X,
Y: v.Y + u.Y,
Z: v.Z + u.Z,
}
}
func (v Vector) Sub(u Vector) Vector {
return Vector{
X: v.X - u.X,
Y: v.Y - u.Y,
Z: v.Z - u.Z,
}
}
func (v Vector) Mul(u Vector) Vector {
return Vector{
X: v.X * u.X,
Y: v.Y * u.Y,
Z: v.Z * u.Z,
}
}
func (v Vector) Div(u Vector) Vector {
return Vector{
X: v.X / u.X,
Y: v.Y / u.Y,
Z: v.Z / u.Z,
}
}
func (v Vector) Dot(u Vector) float64 {
return v.X*u.X + v.Y*u.Y + v.Z*u.Z
}
func (v Vector) Cross(u Vector) Vector {
return Vector{
X: v.Y*u.Z - v.Z*u.Y,
Y: v.Z*u.X - v.X*u.Z,
Z: v.X*u.Y - v.Y*u.X,
}
}
func (v Vector) Min(u Vector) Vector {
return Vector{
math.Min(v.X, u.X),
math.Min(v.Y, u.Y),
math.Min(v.Z, u.Z),
}
}
func (v Vector) Max(u Vector) Vector {
return Vector{
math.Max(v.X, u.X),
math.Max(v.Y, u.Y),
math.Max(v.Z, u.Z),
}
}
func (v Vector) Sign() Vector {
return Vector{
math.Copysign(1, v.X),
math.Copysign(1, v.Y),
math.Copysign(1, v.Z),
}
}
func (v Vector) Floor() Vector {
return Vector{
math.Floor(v.X),
math.Floor(v.Y),
math.Floor(v.Z),
}
}
func (v Vector) Abs() Vector {
return Vector{
math.Abs(v.X),
math.Abs(v.Y),
math.Abs(v.Z),
}
}
func (v Vector) AddULPs(ulps int64) Vector {
return Vector{
AddULPs(v.X, ulps),
AddULPs(v.Y, ulps),
AddULPs(v.Z, ulps),
}
}
func (v Vector) Rotate(u Vector, rads float64) Vector {
cos := math.Cos(rads)
sin := math.Sin(rads)
i := Vect(
cos+u.X*u.X*(1-cos),
u.X*u.Y*(1-cos)-u.Z*sin,
u.X*u.Z*(1-cos)+u.Y*sin,
)
j := Vect(
u.Y*u.X*(1-cos)+u.Z*sin,
cos+u.Y*u.Y*(1-cos),
u.Y*u.Z*(1-cos)-u.X*sin,
)
k := Vect(
u.Z*u.X*(1-cos)-u.Y*sin,
u.Z*u.Y*(1-cos)+u.X*sin,
cos+u.Z*u.Z*(1-cos),
)
return Vect(i.Dot(v), j.Dot(v), k.Dot(v))
}
func (v Vector) Proj(unit Vector) Vector {
return unit.Scale(v.Dot(unit))
}
func (v Vector) Rej(unit Vector) Vector {
return v.Sub(v.Proj(unit))
}
func (v Vector) X0() Vector { return Vect(0, v.Y, v.Z) }
func (v Vector) Y0() Vector { return Vect(v.X, 0, v.Z) }
func (v Vector) Z0() Vector { return Vect(v.X, v.Y, 0) }
type Ray struct {
Start, Dir Vector
}
func (r Ray) At(t float64) Vector {
return r.Start.Add(r.Dir.Scale(t))
}
type Triple struct {
X, Y, Z int
}
func Truncate(v Vector) Triple {
return Triple{
int(v.X),
int(v.Y),
int(v.Z),
}
}
func (v Triple) AsVector() Vector {
return Vector{
float64(v.X),
float64(v.Y),
float64(v.Z),
}
}
func (v Triple) Min(u Triple) Triple {
return Triple{
IntMin(v.X, u.X),
IntMin(v.Y, u.Y),
IntMin(v.Z, u.Z),
}
}
func (v Triple) Max(u Triple) Triple {
return Triple{
IntMax(v.X, u.X),
IntMax(v.Y, u.Y),
IntMax(v.Z, u.Z),
}
}
func (v Triple) Sub(u Triple) Triple {
return Triple{
v.X - u.X,
v.Y - u.Y,
v.Z - u.Z,
}
}
func IntMin(x, y int) int {
if x < y {
return x
}
return y
}
func IntMax(x, y int) int {
if x > y {
return x
}
return y
} | xmath/math.go | 0.866712 | 0.644197 | math.go | starcoder |
package gocw
import (
"math"
"math/rand"
"sort"
)
// Edge represents an edge in a directed graph
type Edge struct {
// identifies indices of the samples at the ends of an edge.
Idx1, Idx2 uint64
// can be used for any purpose
Distance float64
}
type Edges []Edge
func (e Edges) Len() int { return len(e) }
func (e Edges) Less(i, j int) bool {
return e[i].Idx1 < e[j].Idx1 || (e[i].Idx1 == e[j].Idx1 && e[i].Idx2 < e[j].Idx2)
}
func (e Edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
// ChineseWhispers implements the chinese whispers
// graph clustering algorithm
type ChineseWhispers struct {
numIterations uint64
edges Edges
labels []uint64
}
// NewChineseWhispers gives a new ChineseWhispers instance
func NewChineseWhispers(numIterations uint64) *ChineseWhispers {
return &ChineseWhispers{
numIterations: numIterations,
}
}
// AddEdge adds graph edges
func (c *ChineseWhispers) AddEdge(edge Edge) {
c.edges = append(c.edges, edge)
}
func (c *ChineseWhispers) ensureOrdered() {
if sort.IsSorted(c.edges) {
return
}
ordered := make(Edges, len(c.edges)*2)
for i := 0; i < len(c.edges); i++ {
ordered = append(ordered, Edge{
Idx1: c.edges[i].Idx1,
Idx2: c.edges[i].Idx2,
Distance: c.edges[i].Distance,
})
if c.edges[i].Idx1 != c.edges[i].Idx2 {
ordered = append(ordered, Edge{
Idx1: c.edges[i].Idx2,
Idx2: c.edges[i].Idx1,
Distance: c.edges[i].Distance,
})
}
}
sort.Sort(ordered)
var start int
for i := 0; i < ordered.Len(); i++ {
if ordered[i].Idx1 == 0 && ordered[i].Idx2 == 0 && ordered[i].Distance == 0 {
start = i
}
}
if start > 0 {
start += 1
}
c.edges = ordered[start:]
}
func (c *ChineseWhispers) findNeighbourRanges(neighbours *[][2]uint64) {
// setup neighbours so that [neighbours[i][0], neighbours[i][1]) is the range
// within edges that contains all node i's edges.
numNodes := func() uint64 {
if len(c.edges) == 0 {
return 0
}
var maxIdx uint64
for i := 0; i < c.edges.Len(); i++ {
if c.edges[i].Idx1 > maxIdx {
maxIdx = c.edges[i].Idx1
}
if c.edges[i].Idx2 > maxIdx {
maxIdx = c.edges[i].Idx2
}
}
return maxIdx + 1
}()
for i := 0; i < int(numNodes); i++ {
(*neighbours) = append((*neighbours), [2]uint64{0, 0})
}
var curNode, startIdx uint64
for i := 0; i < c.edges.Len(); i++ {
if c.edges[i].Idx1 != curNode {
(*neighbours)[curNode] = [2]uint64{startIdx, uint64(i)}
startIdx = uint64(i)
curNode = c.edges[i].Idx1
}
}
if len(*neighbours) != 0 {
(*neighbours)[curNode] = [2]uint64{startIdx, uint64(len(c.edges))}
}
}
// Run runs the algorithm returning number of labels
func (c *ChineseWhispers) Run() int {
c.ensureOrdered()
c.labels = []uint64{}
if c.edges.Len() == 0 {
return 0
}
var neighbours [][2]uint64
c.findNeighbourRanges(&neighbours)
// Initialize the labels, each node gets a different label.
c.labels = make([]uint64, len(neighbours))
for i := 0; i < len(c.labels); i++ {
c.labels[i] = uint64(i)
}
for i := 0; i < len(neighbours)*int(c.numIterations); i++ {
// Pick a random node.
idx := rand.Int63() % int64(len(neighbours))
// Count how many times each label happens amongst our neighbors.
labelsToCounts := make(map[uint64]float64)
for n := neighbours[idx][0]; n != neighbours[idx][1]; n++ {
labelsToCounts[c.labels[c.edges[n].Idx2]] += c.edges[n].Distance
}
// find the most common label
bestScore := math.Inf(-1)
bestLabel := c.labels[idx]
for k, v := range labelsToCounts {
if v > bestScore {
bestScore = v
bestLabel = k
}
}
c.labels[idx] = bestLabel
}
// Remap the labels into a contiguous range. First we find the mapping.
labelRemap := make(map[uint64]uint64)
for i := 0; i < len(c.labels); i++ {
if _, exists := labelRemap[c.labels[i]]; !exists {
labelRemap[c.labels[i]] = uint64(len(labelRemap))
}
}
// now apply the mapping to all the labels.
for i := 0; i < len(c.labels); i++ {
c.labels[i] = labelRemap[c.labels[i]]
}
return len(labelRemap)
}
// GetLabel returns the label at the index idx
func (c *ChineseWhispers) GetLabel(idx uint64) uint64 {
return c.labels[idx]
}
// GetLabels returns the labels
func (c *ChineseWhispers) GetLabels() []uint64 {
return c.labels
} | chinesewhispers.go | 0.703346 | 0.505188 | chinesewhispers.go | starcoder |
package types
import (
"database/sql/driver"
"fmt"
"github.com/shopspring/decimal"
)
const surgePrecision = 2
var (
// DefaultSurge represents the base of surges
DefaultSurge = NewSurgeFromFloat(1)
zero = NewSurgeFromFloat(0.)
decimal100 = decimal.NewFromFloat(100.0)
coeff = decimal.NewFromFloat(1.0)
minSurge = decimal.NewFromFloat(1.0)
maxSurge = decimal.NewFromFloat(9.99)
)
// Surge regulate price multiplier which depends on supply/demand in the real world.
type Surge struct {
d decimal.Decimal
}
// NewSurgeFromDecimal creates a surge from a decimal with precision round
func NewSurgeFromDecimal(d decimal.Decimal) Surge {
return Surge{d.Round(surgePrecision)}
}
// NewSurgeFromFloat creates a surge from float using precision in exponent
func NewSurgeFromFloat(f float64) Surge {
return NewSurgeFromDecimal(floatToSurgeDecimal(f))
}
// NewSurgeFromString create a surge from string using decimal reader
func NewSurgeFromString(surgeString string) (Surge, error) {
d, err := decimal.NewFromString(surgeString)
if err != nil {
return Surge{}, err
}
return NewSurgeFromDecimal(d), nil
}
// Decimal returns a surge in decimal.Decimal type
func (s Surge) Decimal() decimal.Decimal {
return s.d
}
// ToCoefficient returns a surge minus 1 (surge - 1)
func (s Surge) ToCoefficient() decimal.Decimal {
return s.d.Sub(coeff)
}
// ToPercentInt returns an integer part of a surge coefficient (surge - 1) * 100
func (s Surge) ToPercentInt() int64 {
return s.ToCoefficient().Mul(decimal100).Round(0).IntPart()
}
// IsSurgeExist returns true if an integer part is positive and non-zero
func (s Surge) IsSurgeExist() bool {
return s.ToPercentInt() > 0
}
// IsZero checks whether surge is zero or not
func (s Surge) IsZero() bool {
return s.Equals(zero)
}
// Equals returns whether the numbers represented by s and s2 are equal.
func (s Surge) Equals(s2 Surge) bool {
return s.d.Equals(s2.d)
}
// Less returns true if s < s2
func (s Surge) Less(s2 Surge) bool {
return s.d.Cmp(s2.d) < 0
}
// Greater returns true if s > s2
func (s Surge) Greater(s2 Surge) bool {
return s.d.Cmp(s2.d) > 0
}
// ApplyTo multiplies a float by surge
func (s Surge) ApplyTo(f float64) decimal.Decimal {
return s.d.Mul(floatToSurgeDecimal(f))
}
// IsDefault returns true if a surge equals 1.0
func (s Surge) IsDefault() bool {
return s.Equals(DefaultSurge)
}
// Validate checks surge precision and max/min bound
func (s Surge) Validate() error {
if s.d.Exponent() > surgePrecision {
return fmt.Errorf("surge precision should be less or equal to %d, given %d", surgePrecision, s.d.Exponent())
}
if s.d.Cmp(minSurge) == -1 {
return fmt.Errorf("surge is less than %v: %v", minSurge, s.d)
}
if s.d.Cmp(maxSurge) == 1 {
return fmt.Errorf("surge is more than %v: %v", maxSurge, s.d)
}
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (s Surge) MarshalJSON() ([]byte, error) {
return []byte(s.String()), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (s *Surge) UnmarshalJSON(data []byte) error {
var d decimal.Decimal
if err := d.UnmarshalJSON(data); err != nil {
return err
}
*s = NewSurgeFromDecimal(d)
return s.Validate()
}
func (s Surge) String() string {
return s.d.StringFixed(surgePrecision)
}
// Scan implements the sql.Scanner interface for database deserialization.
func (s *Surge) Scan(value interface{}) error {
str, err := Unquote(value)
if err != nil {
return err
}
*s, err = NewSurgeFromString(str)
return err
}
// Value implements the driver.Valuer interface for database serialization.
func (s Surge) Value() (driver.Value, error) {
return s.String(), nil
}
func floatToSurgeDecimal(f float64) decimal.Decimal {
return decimal.NewFromFloatWithExponent(f, -surgePrecision)
} | vendor/junolab.net/ms_core/types/surge.go | 0.799912 | 0.557604 | surge.go | starcoder |
package iso20022
// Description of the financial instrument.
type FinancialInstrumentAttributes3 struct {
// Identifies the financial instrument.
SecurityIdentification *SecurityIdentification11 `xml:"SctyId"`
// Quantity of entitled intermediate securities based on the balance of underlying securities.
Quantity *DecimalNumber `xml:"Qty,omitempty"`
// Specifies whether terms of the event allow resale of the rights.
RenounceableEntitlementStatusType *RenounceableEntitlementStatusTypeFormat1Choice `xml:"RnncblEntitlmntStsTp,omitempty"`
// Specifies how fractions resulting from derived securities will be processed or how prorated decisions will be rounding, if provided with a pro ration rate.
FractionDisposition *FractionDispositionType3Choice `xml:"FrctnDspstn,omitempty"`
// Quantity of intermediate securities awarded for a given quantity of underlying security.
IntermediateSecuritiesToUnderlyingRatio *QuantityToQuantityRatio1 `xml:"IntrmdtSctiesToUndrlygRatio,omitempty"`
// Last reported/known price of a financial instrument in a market.
MarketPrice *AmountPrice2 `xml:"MktPric,omitempty"`
// Date on which an order expires or at which a privilege or offer terminates.
ExpiryDate *DateFormat5Choice `xml:"XpryDt"`
// Date of the posting (credit or debit) to the account.
PostingDate *DateFormat5Choice `xml:"PstngDt"`
// Period during which intermediate or outturn securities are tradable in a secondary market.
TradingPeriod *Period3 `xml:"TradgPrd,omitempty"`
// Balance of uninstructed position.
UninstructedBalance *BalanceFormat1Choice `xml:"UinstdBal,omitempty"`
// Balance of instructed position.
InstructedBalance *BalanceFormat1Choice `xml:"InstdBal,omitempty"`
}
func (f *FinancialInstrumentAttributes3) AddSecurityIdentification() *SecurityIdentification11 {
f.SecurityIdentification = new(SecurityIdentification11)
return f.SecurityIdentification
}
func (f *FinancialInstrumentAttributes3) SetQuantity(value string) {
f.Quantity = (*DecimalNumber)(&value)
}
func (f *FinancialInstrumentAttributes3) AddRenounceableEntitlementStatusType() *RenounceableEntitlementStatusTypeFormat1Choice {
f.RenounceableEntitlementStatusType = new(RenounceableEntitlementStatusTypeFormat1Choice)
return f.RenounceableEntitlementStatusType
}
func (f *FinancialInstrumentAttributes3) AddFractionDisposition() *FractionDispositionType3Choice {
f.FractionDisposition = new(FractionDispositionType3Choice)
return f.FractionDisposition
}
func (f *FinancialInstrumentAttributes3) AddIntermediateSecuritiesToUnderlyingRatio() *QuantityToQuantityRatio1 {
f.IntermediateSecuritiesToUnderlyingRatio = new(QuantityToQuantityRatio1)
return f.IntermediateSecuritiesToUnderlyingRatio
}
func (f *FinancialInstrumentAttributes3) AddMarketPrice() *AmountPrice2 {
f.MarketPrice = new(AmountPrice2)
return f.MarketPrice
}
func (f *FinancialInstrumentAttributes3) AddExpiryDate() *DateFormat5Choice {
f.ExpiryDate = new(DateFormat5Choice)
return f.ExpiryDate
}
func (f *FinancialInstrumentAttributes3) AddPostingDate() *DateFormat5Choice {
f.PostingDate = new(DateFormat5Choice)
return f.PostingDate
}
func (f *FinancialInstrumentAttributes3) AddTradingPeriod() *Period3 {
f.TradingPeriod = new(Period3)
return f.TradingPeriod
}
func (f *FinancialInstrumentAttributes3) AddUninstructedBalance() *BalanceFormat1Choice {
f.UninstructedBalance = new(BalanceFormat1Choice)
return f.UninstructedBalance
}
func (f *FinancialInstrumentAttributes3) AddInstructedBalance() *BalanceFormat1Choice {
f.InstructedBalance = new(BalanceFormat1Choice)
return f.InstructedBalance
} | data/train/go/93e617a40fcc77c55a5385592f43ac0e16ea5a17FinancialInstrumentAttributes3.go | 0.852537 | 0.416737 | 93e617a40fcc77c55a5385592f43ac0e16ea5a17FinancialInstrumentAttributes3.go | starcoder |
package drw
import (
"math"
"github.com/jakubDoka/mlok/ggl"
"github.com/jakubDoka/mlok/mat"
"github.com/jakubDoka/mlok/mat/angle"
)
// AutoResolutionSpacing is size of fraction of the circle that has Auto resolution
var AutoResolutionSpacing float64 = 1
// Auto is constant that if you pass as resolution to Circle, auto resolution will be used
const Auto = -1
// Circle is a drawing tool that can draw circles with different
// resolution efficiently as everithing is precalculated upon creation
type Circle struct {
ggl.Data
Base
resolution int
radius, thickness, start, end float64
outline bool
}
// NCircle creates ready-to-use Circle, choose resolution based of how big circles you want to draw
// bigger the circle more resolution matters, radius is not that important ac you can scale circle
// how ever you like
func NCircle(radius, thickness float64, resolution int) Circle {
return NArc(radius, thickness, 0, 0, resolution)
}
func NArc(radius, thickness, start, end float64, resolution int) Circle {
c := Circle{}
if thickness == 0 {
c.Filled(radius, start, end, resolution)
} else {
c.Outline(radius, thickness, start, end, resolution)
}
return c
}
func (c *Circle) Outline(radius, thickness, start, end float64, resolution int) {
if !c.outline || radius != c.radius || thickness != c.thickness ||
resolution != c.resolution || start != c.start || end != c.end {
c.Base.Resize(resolution * 2)
ang, step := c.setup(start, end, resolution)
for i := 0; i < len(c.Base); i, ang = i+2, ang+step {
c.Base[i] = mat.Rad(ang, radius+thickness)
c.Base[i+1] = mat.Rad(ang, radius-thickness)
}
if start != end {
c.Base = append(c.Base, mat.Rad(ang, radius+thickness), mat.Rad(ang, radius-thickness))
}
c.radius = radius
c.thickness = thickness
c.Vertexes.Resize(len(c.Base))
}
if !c.outline || resolution != c.resolution {
// setting up indices
l := resolution * 6
c.Indices.Resize(l)
ln := uint32(l)
for i, j := uint32(0), uint32(0); i < ln; i, j = i+6, j+2 {
c.Indices[i+0] = j
c.Indices[i+1] = j + 1
c.Indices[i+2] = j + 2
c.Indices[i+3] = j + 1
c.Indices[i+4] = j + 3
c.Indices[i+5] = j + 2
}
if start == end {
c.Indices[l-4] = 0
c.Indices[l-2] = 1
c.Indices[l-1] = 0
}
c.resolution = resolution
}
c.outline = true
}
func (c *Circle) Filled(radius, start, end float64, resolution int) {
if c.outline || radius != c.radius || resolution != c.resolution {
c.Base.Resize(resolution + 1)
ang, step := c.setup(start, end, resolution)
for i := 1; i < len(c.Base); i, ang = i+1, ang+step {
c.Base[i] = mat.Rad(ang, radius)
}
if start != end {
c.Base = append(c.Base, mat.Rad(ang, radius))
}
c.radius = radius
c.Vertexes.Resize(len(c.Base))
}
if c.outline || resolution != c.resolution {
// setting up indices
l := resolution * 3
c.Indices.Resize(l)
res := uint32(resolution)
for i := uint32(0); i < res; i++ {
j := i * 3
c.Indices[j+1] = i + 1
c.Indices[j+2] = i + 2
}
if start == end {
c.Indices[l-1] = 1
} else {
c.Indices = c.Indices[:l-3]
}
c.resolution = resolution
}
c.outline = false
}
func AutoResolution(radius, start, end, spacing float64) int {
if start == end {
end = start + angle.Pi2
}
cof := math.Abs(start-end) / angle.Pi2
return mat.Maxi(int(radius*angle.Pi2*cof/spacing), 3)
}
func (c *Circle) setup(start, end float64, resolution int) (s, step float64) {
if start == end {
end = start + angle.Pi2
} else if start > end {
start, end = end, start
}
c.start = start
c.end = end
return start, (end - start) / float64(resolution)
}
// Draw implements ggl.Drawer interface
func (c *Circle) Draw(t ggl.Target, tran mat.Mat, rgba mat.RGBA) {
c.Update(tran, rgba)
c.Fetch(t)
}
// Update updates circle state to given transformation and color
func (c *Circle) Update(tran mat.Mat, rgba mat.RGBA) {
for i, v := range c.Base {
c.Vertexes[i].Pos = tran.Project(v)
c.Vertexes[i].Color = rgba
}
}
type Base []mat.Vec
/*imp(
github.com/jakubDoka/gogen/templates
)*/
/*gen(
templates.Resize<Base, Resize>
)*/ | ggl/drw/circle.go | 0.74158 | 0.557604 | circle.go | starcoder |
package asig
import "github.com/bloeys/gglm/gglm"
const (
MaxColorSets = 8
MaxTexCoords = 8
)
type Mesh struct {
//Bitwise combination of PrimitiveType enum
PrimitiveTypes PrimitiveType
Vertices []gglm.Vec3
Normals []gglm.Vec3
Tangents []gglm.Vec3
BitTangents []gglm.Vec3
//ColorSets vertex color sets where each set is either empty or has length=len(Vertices), with max number of sets=MaxColorSets
ColorSets [MaxColorSets][]gglm.Vec4
//TexCoords (aka UV channels) where each TexCoords[i] has NumUVComponents[i] channels, and is either empty or has length=len(Vertices), with max number of TexCoords per vertex = MaxTexCoords
TexCoords [MaxTexCoords][]gglm.Vec3
TexCoordChannelCount [MaxTexCoords]uint
Faces []Face
Bones []*Bone
AnimMeshes []*AnimMesh
AABB AABB
MorphMethod MorphMethod
MaterialIndex uint
Name string
}
type Face struct {
Indices []uint
}
type AnimMesh struct {
Name string
/** Replacement for Mes.Vertices. If this array is non-NULL,
* it *must* contain mNumVertices entries. The corresponding
* array in the host mesh must be non-NULL as well - animation
* meshes may neither add or nor remove vertex components (if
* a replacement array is NULL and the corresponding source
* array is not, the source data is taken instead)*/
Vertices []gglm.Vec3
Normals []gglm.Vec3
Tangents []gglm.Vec3
BitTangents []gglm.Vec3
Colors [MaxColorSets][]gglm.Vec4
TexCoords [MaxTexCoords][]gglm.Vec3
Weight float32
}
type AABB struct {
Min gglm.Vec3
Max gglm.Vec3
}
type Bone struct {
Name string
//The influence weights of this bone
Weights []VertexWeight
/** Matrix that transforms from bone space to mesh space in bind pose.
*
* This matrix describes the position of the mesh
* in the local space of this bone when the skeleton was bound.
* Thus it can be used directly to determine a desired vertex position,
* given the world-space transform of the bone when animated,
* and the position of the vertex in mesh space.
*
* It is sometimes called an inverse-bind matrix,
* or inverse bind pose matrix.
*/
OffsetMatrix gglm.Mat4
}
type VertexWeight struct {
VertIndex uint
//The strength of the influence in the range (0...1). The total influence from all bones at one vertex is 1
Weight float32
} | asig/mesh.go | 0.649134 | 0.42316 | mesh.go | starcoder |
package ot
import (
"crypto/rand"
"crypto/rsa"
"encoding/binary"
"fmt"
"io"
"math/big"
"github.com/markkurossi/mpc/ot/mpint"
"github.com/markkurossi/mpc/pkcs1"
)
// RandomData creates size bytes of random data.
func RandomData(size int) ([]byte, error) {
m := make([]byte, size)
_, err := rand.Read(m)
if err != nil {
return nil, err
}
return m, nil
}
// Label implements a 128 bit wire label.
type Label struct {
d0 uint64
d1 uint64
}
// LabelData contains lable data as byte array.
type LabelData [16]byte
func (l Label) String() string {
return fmt.Sprintf("%016x%016x", l.d0, l.d1)
}
// Equal test if the labels are equal.
func (l Label) Equal(o Label) bool {
return l.d0 == o.d0 && l.d1 == o.d1
}
// NewLabel creates a new random label.
func NewLabel(rand io.Reader) (Label, error) {
var buf LabelData
var label Label
if _, err := rand.Read(buf[:]); err != nil {
return label, err
}
label.SetData(&buf)
return label, nil
}
// NewTweak creates a new label from the tweak value.
func NewTweak(tweak uint32) Label {
return Label{
d1: uint64(tweak),
}
}
// S tests the label's S bit.
func (l Label) S() bool {
return (l.d0 & 0x8000000000000000) != 0
}
// SetS sets the label's S bit.
func (l *Label) SetS(set bool) {
if set {
l.d0 |= 0x8000000000000000
} else {
l.d0 &= 0x7fffffffffffffff
}
}
// Mul2 multiplies the label by 2.
func (l *Label) Mul2() {
l.d0 <<= 1
l.d0 |= (l.d1 >> 63)
l.d1 <<= 1
}
// Mul4 multiplies the label by 4.
func (l *Label) Mul4() {
l.d0 <<= 2
l.d0 |= (l.d1 >> 62)
l.d1 <<= 2
}
// Xor xors the label with the argument label.
func (l *Label) Xor(o Label) {
l.d0 ^= o.d0
l.d1 ^= o.d1
}
// GetData gets the labels as label data.
func (l Label) GetData(buf *LabelData) {
binary.BigEndian.PutUint64(buf[0:8], l.d0)
binary.BigEndian.PutUint64(buf[8:16], l.d1)
}
// SetData sets the labels from label data.
func (l *Label) SetData(data *LabelData) {
l.d0 = binary.BigEndian.Uint64((*data)[0:8])
l.d1 = binary.BigEndian.Uint64((*data)[8:16])
}
// Bytes returns the label data as bytes.
func (l Label) Bytes(buf *LabelData) []byte {
l.GetData(buf)
return buf[:]
}
// SetBytes sets the label data from bytes.
func (l *Label) SetBytes(data []byte) {
l.d0 = binary.BigEndian.Uint64(data[0:8])
l.d1 = binary.BigEndian.Uint64(data[8:16])
}
// Wire implements a wire with 0 and 1 labels.
type Wire struct {
L0 Label
L1 Label
}
// Sender implements OT sender.
type Sender struct {
key *rsa.PrivateKey
}
// NewSender creates a new OT sender for the bit.
func NewSender(keyBits int) (*Sender, error) {
key, err := rsa.GenerateKey(rand.Reader, keyBits)
if err != nil {
return nil, err
}
return &Sender{
key: key,
}, nil
}
// MessageSize returns the maximum OT message size.
func (s *Sender) MessageSize() int {
return s.key.PublicKey.Size()
}
// PublicKey returns the sender's public key.
func (s *Sender) PublicKey() *rsa.PublicKey {
return &s.key.PublicKey
}
// NewTransfer creates a new OT sender data transfer.
func (s *Sender) NewTransfer(m0, m1 []byte) (*SenderXfer, error) {
x0, err := RandomData(s.MessageSize())
if err != nil {
return nil, err
}
x1, err := RandomData(s.MessageSize())
if err != nil {
return nil, err
}
return &SenderXfer{
sender: s,
m0: m0,
m1: m1,
x0: x0,
x1: x1,
}, nil
}
// SenderXfer implements the OT sender data transfer.
type SenderXfer struct {
sender *Sender
m0 []byte
m1 []byte
x0 []byte
x1 []byte
k0 *big.Int
k1 *big.Int
}
// MessageSize returns the maximum OT message size.
func (s *SenderXfer) MessageSize() int {
return s.sender.MessageSize()
}
// RandomMessages creates random messages.
func (s *SenderXfer) RandomMessages() ([]byte, []byte) {
return s.x0, s.x1
}
// ReceiveV receives the V.
func (s *SenderXfer) ReceiveV(data []byte) {
v := mpint.FromBytes(data)
x0 := mpint.FromBytes(s.x0)
x1 := mpint.FromBytes(s.x1)
s.k0 = mpint.Exp(mpint.Sub(v, x0), s.sender.key.D, s.sender.key.PublicKey.N)
s.k1 = mpint.Exp(mpint.Sub(v, x1), s.sender.key.D, s.sender.key.PublicKey.N)
}
// Messages creates the transfer messages.
func (s *SenderXfer) Messages() ([]byte, []byte, error) {
m0, err := pkcs1.NewEncryptionBlock(pkcs1.BT1, s.MessageSize(), s.m0)
if err != nil {
return nil, nil, err
}
m0p := mpint.Add(mpint.FromBytes(m0), s.k0)
m1, err := pkcs1.NewEncryptionBlock(pkcs1.BT1, s.MessageSize(), s.m1)
if err != nil {
return nil, nil, err
}
m1p := mpint.Add(mpint.FromBytes(m1), s.k1)
return m0p.Bytes(), m1p.Bytes(), nil
}
// Receiver implements OT receivers.
type Receiver struct {
pub *rsa.PublicKey
}
// NewReceiver creates a new OT receiver.
func NewReceiver(pub *rsa.PublicKey) (*Receiver, error) {
return &Receiver{
pub: pub,
}, nil
}
// MessageSize returns the maximum OT message size.
func (r *Receiver) MessageSize() int {
return r.pub.Size()
}
// NewTransfer creates a new OT receiver data transfer for the bit.
func (r *Receiver) NewTransfer(bit uint) (*ReceiverXfer, error) {
return &ReceiverXfer{
receiver: r,
bit: bit,
}, nil
}
// ReceiverXfer implements the OT receiver data transfer.
type ReceiverXfer struct {
receiver *Receiver
bit uint
k *big.Int
v *big.Int
mb []byte
}
// ReceiveRandomMessages receives the random messages x0 and x1.
func (r *ReceiverXfer) ReceiveRandomMessages(x0, x1 []byte) error {
k, err := rand.Int(rand.Reader, r.receiver.pub.N)
if err != nil {
return err
}
r.k = k
var xb *big.Int
if r.bit == 0 {
xb = mpint.FromBytes(x0)
} else {
xb = mpint.FromBytes(x1)
}
e := big.NewInt(int64(r.receiver.pub.E))
r.v = mpint.Mod(
mpint.Add(xb, mpint.Exp(r.k, e, r.receiver.pub.N)), r.receiver.pub.N)
return nil
}
// V returns the V of the exchange.
func (r *ReceiverXfer) V() []byte {
return r.v.Bytes()
}
// ReceiveMessages processes the received m0p and m1p messages.
func (r *ReceiverXfer) ReceiveMessages(m0p, m1p []byte, err error) error {
if err != nil {
return err
}
var mbp *big.Int
if r.bit == 0 {
mbp = mpint.FromBytes(m0p)
} else {
mbp = mpint.FromBytes(m1p)
}
mbBytes := make([]byte, r.receiver.MessageSize())
mbIntBytes := mpint.Sub(mbp, r.k).Bytes()
ofs := len(mbBytes) - len(mbIntBytes)
copy(mbBytes[ofs:], mbIntBytes)
mb, err := pkcs1.ParseEncryptionBlock(mbBytes)
if err != nil {
return err
}
r.mb = mb
return nil
}
// Message returns the message and bit from the exchange.
func (r *ReceiverXfer) Message() (m []byte, bit uint) {
return r.mb, r.bit
} | ot/rsa.go | 0.857171 | 0.416856 | rsa.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"os"
)
func main() {
fmt.Println("Starting day11")
// Part one
result1 := GetCountOfOccupiedSeatsPartOne("input.txt")
fmt.Printf("Part one: the number of occupied seats after the grid stabalizes is %v\n", result1)
// Part two
result2 := GetCountOfOccupiedSeatsPartTwo("input.txt")
fmt.Printf("Part two: the number of occupied seats after the grid stabalizes is %v\n", result2)
}
// GetCountOfOccupiedSeatsPartOne returns the number of occupied seats after the grid stabalizes.
func GetCountOfOccupiedSeatsPartOne(filename string) int {
lines := readFile(filename)
grid := getGrid(lines)
for i := 0; i < 100; i++ {
fmt.Printf("Iteration %v count of occupied seats %v\n", i, getCountOfOccupiedSeats(grid))
grid = tickPartOne(grid)
}
return getCountOfOccupiedSeats(grid)
}
// GetCountOfOccupiedSeatsPartTwo returns the number of occupied seats after the grid stabalizes.
func GetCountOfOccupiedSeatsPartTwo(filename string) int {
lines := readFile(filename)
grid := getGrid(lines)
for i := 0; i < 100; i++ {
fmt.Printf("Iteration %v count of occupied seats %v\n", i, getCountOfOccupiedSeats(grid))
grid = tickPartTwo(grid)
}
return getCountOfOccupiedSeats(grid)
}
func tickPartOne(grid [][]gridValue) [][]gridValue {
next := duplicateGrid(grid)
for x, row := range grid {
for y, val := range row {
countOfOccupiedNeighbors := getCountOfOccupiedNeighbors(grid, x, y)
if val == *registry.emptySeat && countOfOccupiedNeighbors == 0 {
// If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied.
next[x][y] = *registry.occupiedSeat
} else if val == *registry.occupiedSeat && countOfOccupiedNeighbors >= 4 {
// If a seat is occupied (#) and four or more seats adjacent to it are also occupied, the seat becomes empty.
next[x][y] = *registry.emptySeat
}
// Otherwise, the seat's state does not change.
}
}
// printGrid(next)
return next
}
func tickPartTwo(grid [][]gridValue) [][]gridValue {
next := duplicateGrid(grid)
for x, row := range grid {
for y, val := range row {
countOfOccupiedVisible := getCountOfOccupiedVisible(grid, x, y)
if val == *registry.emptySeat && countOfOccupiedVisible == 0 {
next[x][y] = *registry.occupiedSeat
} else if val == *registry.occupiedSeat && countOfOccupiedVisible >= 5 {
next[x][y] = *registry.emptySeat
}
}
}
// printGrid(next)
return next
}
func duplicateGrid(grid [][]gridValue) [][]gridValue {
duplicate := make([][]gridValue, len(grid))
for i := range grid {
duplicate[i] = make([]gridValue, len(grid[i]))
copy(duplicate[i], grid[i])
}
return duplicate
}
func getCountOfOccupiedNeighbors(grid [][]gridValue, row int, col int) int {
count := 0
for _, diffRow := range []int{-1, 0, 1} {
for _, diffCol := range []int{-1, 0, 1} {
if diffRow == 0 && diffCol == 0 {
continue
}
if row+diffRow >= len(grid) || row+diffRow < 0 {
continue
}
if col+diffCol >= len(grid[0]) || col+diffCol < 0 {
continue
}
neighbor := grid[row+diffRow][col+diffCol]
if neighbor == *registry.occupiedSeat {
count++
}
}
}
return count
}
func getCountOfOccupiedVisible(grid [][]gridValue, row int, col int) int {
count := 0
// fmt.Printf("Getting count of visible for (%d, %d)\n", row, col)
for _, directionRow := range []int{-1, 0, 1} {
for _, directionCol := range []int{-1, 0, 1} {
// fmt.Printf("Visible direction (%d, %d)\n", directionRow, directionCol)
if directionRow == 0 && directionCol == 0 {
continue
}
diffRow := directionRow
diffCol := directionCol
for row+diffRow < len(grid) && row+diffRow >= 0 &&
col+diffCol < len(grid[0]) && col+diffCol >= 0 {
visible := grid[row+diffRow][col+diffCol]
if visible == *registry.occupiedSeat {
// fmt.Printf("(%d, %d) is occupied seat\n", row+diffX, col+diffY)
count++
break
} else if visible == *registry.emptySeat {
// fmt.Printf("(%d, %d) is empty seat\n", row+diffX, col+diffY)
break
} else if visible == *registry.floor {
// fmt.Printf("(%d, %d) is floor\n", row+diffX, col+diffY)
diffRow += directionRow
diffCol += directionCol
}
}
}
}
return count
}
func getCountOfOccupiedSeats(grid [][]gridValue) int {
count := 0
for _, row := range grid {
for _, val := range row {
if val == *registry.occupiedSeat {
count++
}
}
}
return count
}
type gridValue struct {
characterRepresentation rune
}
type gridRegistry struct {
floor *gridValue
emptySeat *gridValue
occupiedSeat *gridValue
tokens []*gridValue
}
func newGridRegistry() *gridRegistry {
floor := &gridValue{'.'}
emptySeat := &gridValue{'L'}
occupiedSeat := &gridValue{'#'}
return &gridRegistry{
floor: floor,
emptySeat: emptySeat,
occupiedSeat: occupiedSeat,
tokens: []*gridValue{floor, emptySeat, occupiedSeat},
}
}
func (g *gridRegistry) List() []*gridValue {
return g.tokens
}
func (g *gridRegistry) Parse(r rune) (*gridValue, error) {
for _, token := range g.List() {
if token.characterRepresentation == r {
return token, nil
}
}
return nil, fmt.Errorf("couldn't find rune %v in list %v", r, g.List())
}
var registry = newGridRegistry()
func printGrid(grid [][]gridValue) {
fmt.Print(toString(grid))
}
func toString(grid [][]gridValue) (result string) {
for _, line := range grid {
for _, r := range line {
result += string(r.characterRepresentation)
}
result += "\n"
}
result += "\n"
return result
}
// getGrid converts a slice of lines into a matrix of gridValues
func getGrid(lines []string) [][]gridValue {
grid := [][]gridValue{}
for _, line := range lines {
gridLine := []gridValue{}
for _, ch := range line {
gridValue, err := registry.Parse(ch)
if err != nil {
log.Fatal(err)
}
gridLine = append(gridLine, *gridValue)
}
grid = append(grid, gridLine)
}
return grid
}
func readFile(filename string) []string {
file, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(file)
lines := []string{}
for scanner.Scan() {
line := scanner.Text()
lines = append(lines, line)
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
return lines
} | 2020/day11/day11.go | 0.610221 | 0.427935 | day11.go | starcoder |
// Mounts template-attack on ECDH power traces, and proves zero-coordinate
// point operations are discernible from power traces.
// This can be used to mount Goubin's "Refined Power-Analysis Attack".
// https://wiki.newae.com/Template_Attacks
// $ go run cmd/ecdh_zero_point_template_attack.go -logtostderr \
// -zero_capture captures/stm_ecdh_zero_t60_s5000.json.gz \
// -rand_capture captures/stm_ecdh_rand_t120_s5000.json.gz
// [ecdh_zero_point_template_attack.go:176] Loading zero-point capture
// [ecdh_zero_point_template_attack.go:180] Loading rand-point capture
// [ecdh_zero_point_template_attack.go:187] Finding points of interest
// [ecdh_zero_point_template_attack.go:189] Selected POI: [4549 3745 4593 4753 2421]
// [ecdh_zero_point_template_attack.go:191] Building zero-point template
// [ecdh_zero_point_template_attack.go:194] Building rand-point template
// [ecdh_zero_point_template_attack.go:197] Testing zero-point validation set
// [ecdh_zero_point_template_attack.go:159] Classified x=[...] as a zero point trace
// [ecdh_zero_point_template_attack.go:159] Classified x=[...] as a zero point trace
// [ecdh_zero_point_template_attack.go:159] Classified x=[...] as a zero point trace
// [ecdh_zero_point_template_attack.go:159] ...
// [ecdh_zero_point_template_attack.go:200] Testing rand-point validation set
// [ecdh_zero_point_template_attack.go:161] Classified x=[...] as a rand point trace
// [ecdh_zero_point_template_attack.go:161] Classified x=[...] as a rand point trace
// [ecdh_zero_point_template_attack.go:161] Classified x=[...] as a rand point trace
// [ecdh_zero_point_template_attack.go:161] ...
package main
import (
"flag"
"math"
"sort"
"github.com/google/gocw"
"github.com/golang/glog"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat"
"gonum.org/v1/gonum/stat/distmv"
)
var (
zeroCaptureFlag = flag.String("zero_capture", "captures/stm_ecdh_zero_t60_s5000.json.gz",
"Capture with ECDH operations that resulted with a zero x-coordinate point")
randCaptureFlag = flag.String("rand_capture", "captures/stm_ecdh_rand_t120_s5000.json.gz",
"Capture with ECDH operations with random EC point")
)
const (
numPoi = 5
)
func init() {
flag.Parse()
}
// Points-Of-Interest are the points in time where the avg trace for the zero-point
// computation is different from the avg trace for the random point computation.
// These will be used in our template based classifier.
type pointOfInterest struct {
diff float64
location int
}
type pointsOfInterest []pointOfInterest
func (p pointsOfInterest) Len() int { return len(p) }
func (p pointsOfInterest) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type ByDiff struct{ pointsOfInterest }
func (p ByDiff) Less(i, j int) bool {
return p.pointsOfInterest[i].diff < p.pointsOfInterest[j].diff
}
func findPointsOfInterest(zeroAvg, randAvg mat.Vector) []int {
poi := make([]pointOfInterest, zeroAvg.Len())
for i := 0; i < zeroAvg.Len(); i++ {
poi[i] = pointOfInterest{math.Abs(zeroAvg.AtVec(i) - randAvg.AtVec(i)), i}
}
sort.Sort(sort.Reverse(ByDiff{poi}))
glog.V(1).Infof("Top POI: %v", poi[:10])
// Pick peaks that aren't too close.
var res []int
for _, p := range poi {
if len(res) == numPoi {
return res
}
var skip bool
for _, l := range res {
if l-10 <= p.location && p.location <= l+10 {
skip = true
}
}
if !skip {
res = append(res, p.location)
}
}
glog.Fatal("Did not find enough points-of-interest")
return nil
}
func averageTraces(M mat.Matrix) mat.Vector {
numTraces, _ := M.Dims()
data := make([]float64, numTraces)
for i := range data {
data[i] = 1.0 / float64(numTraces)
}
S := mat.NewVecDense(numTraces, data)
var avg mat.Dense
avg.Product(mat.TransposeVec{S}, M)
return avg.RowView(0)
}
func loadCapture(filename string) mat.Matrix {
capture, err := gocw.LoadCapture(filename)
if err != nil {
glog.Fatalf("Failed to load capture: %v", err)
return nil
}
return capture.SamplesMatrix()
}
// Builds template based classifier.
// The power profile at the points-of-interest is modeled as a multivariate normal
// distribution. Its mean and covariance matrix are computed from the training set.
func buildTemplate(M mat.Matrix, poi []int) *distmv.Normal {
T := mat.DenseCopyOf(M.T())
n := len(poi)
mu := make([]float64, n)
sigma := mat.NewSymDense(n, nil)
for i := 0; i < n; i++ {
X := T.RawRowView(poi[i])
mu[i] = stat.Mean(X, nil)
for j := 0; j < n; j++ {
Y := T.RawRowView(poi[j])
sigma.SetSym(i, j, stat.Covariance(X, Y, nil))
}
}
glog.V(1).Infof("mu: %v", mu)
glog.V(1).Infof("sigma: %v", sigma)
ndist, pos := distmv.NewNormal(mu, sigma, nil)
if !pos {
glog.Fatal("Covariance matrix is not positive definite => no PDF")
}
return ndist
}
// Tests the classifier on the validation set.
// Computes the log probability of the observed traces at the points-of-interest.
// The model with the highest probability is selected.
func testValidationSet(validation mat.Matrix, poi []int,
zeroDist *distmv.Normal, randDist *distmv.Normal) {
numTraces, _ := validation.Dims()
for i := 0; i < numTraces; i++ {
var x []float64
for _, p := range poi {
x = append(x, validation.At(i, p))
}
glog.V(1).Infof("x: %v, zero PDF: %f, rand PDF: %f", x, zeroDist.LogProb(x), randDist.LogProb(x))
if zeroDist.LogProb(x) > randDist.LogProb(x) {
glog.Infof("Classified x=%v as a zero point trace", x)
} else {
glog.Infof("Classified x=%v as a rand point trace", x)
}
}
}
// Split traces: 80% for training, 20% for validation.
func splitTraces(M mat.Matrix) (mat.Matrix, mat.Matrix) {
r, c := M.Dims()
return M.(*mat.Dense).Slice(0, (r*80)/100, 0, c),
M.(*mat.Dense).Slice((r*80)/100, r, 0, c)
}
func main() {
defer glog.Flush()
glog.Info("Loading zero-point capture")
zeroTraces := loadCapture(*zeroCaptureFlag)
zeroTraining, zeroValidation := splitTraces(zeroTraces)
glog.Info("Loading rand-point capture")
randTraces := loadCapture(*randCaptureFlag)
randTraining, randValidation := splitTraces(randTraces)
zeroAvg := averageTraces(zeroTraining)
randAvg := averageTraces(randTraining)
glog.Info("Finding points of interest")
poi := findPointsOfInterest(zeroAvg, randAvg)
glog.Infof("Selected POI: %v", poi)
glog.Info("Building zero-point template")
zeroDist := buildTemplate(zeroTraining, poi)
glog.Info("Building rand-point template")
randDist := buildTemplate(randTraining, poi)
glog.Info("Testing zero-point validation set")
testValidationSet(zeroValidation, poi, zeroDist, randDist)
glog.Info("Testing rand-point validation set")
testValidationSet(randValidation, poi, zeroDist, randDist)
} | cmd/ecdh_zero_point_template_attack.go | 0.723212 | 0.584597 | ecdh_zero_point_template_attack.go | starcoder |
package shape
import (
"gioui.org/f32"
"gioui.org/layout"
"gioui.org/op"
"gioui.org/op/clip"
"gioui.org/op/paint"
"image/color"
)
const c = 0.55228475 // 4*(sqrt(2)-1)/3
type Circle struct {
Center f32.Point
Radius float32
FillColor *color.NRGBA
StrokeColor *color.NRGBA
StrokeWidth float32
}
func (c Circle) Bounds() f32.Rectangle {
return f32.Rectangle{}
}
// Hit test
func (c Circle) Hit(p f32.Point) bool {
return false
}
func (c Circle) Offset(p f32.Point) Shape {
return nil
}
func (c Circle) Draw(gtx C) {
}
func (c Circle) Move(delta f32.Point) {
c.Center = c.Center.Add(delta)
}
func (cc Circle) Stroke(col color.NRGBA, width float32, gtx layout.Context) f32.Rectangle {
r := cc.Radius
scale := gtx.Metric.PxPerDp
w, h := r*2, r*2
p := cc.Center.Mul(scale)
box := f32.Rectangle{Max: f32.Point{X: p.X + w, Y: p.Y + h}}
defer op.Save(gtx.Ops).Load()
paint.ColorOp{col}.Add(gtx.Ops)
var path clip.Path
path.Begin(gtx.Ops)
path.Move(f32.Point{X: p.X, Y: p.Y})
path.Move(f32.Point{X: w / 4 * 3, Y: r / 2})
path.Cube(f32.Point{X: 0, Y: r * c}, f32.Point{X: -r + r*c, Y: r}, f32.Point{X: -r, Y: r}) // SE
path.Cube(f32.Point{X: -r * c, Y: 0}, f32.Point{X: -r, Y: -r + r*c}, f32.Point{X: -r, Y: -r}) // SW
path.Cube(f32.Point{X: 0, Y: -r * c}, f32.Point{X: r - r*c, Y: -r}, f32.Point{X: r, Y: -r}) // NW
path.Cube(f32.Point{X: r * c, Y: 0}, f32.Point{X: r, Y: r - r*c}, f32.Point{X: r, Y: r}) // NE
path.Move(f32.Point{X: -w, Y: -r}) // Return to origin
scaledWidth := (r - width*2) / r
path.Move(f32.Point{X: w * (1 - scaledWidth) * .5, Y: h * (1 - scaledWidth) * .5})
w *= scale
h *= scale
r *= scale
path.Move(f32.Point{X: 0, Y: h - r})
path.Cube(f32.Point{X: 0, Y: r * c}, f32.Point{X: +r - r*c, Y: r}, f32.Point{X: +r, Y: r}) // SW
path.Cube(f32.Point{X: +r * c, Y: 0}, f32.Point{X: +r, Y: -r + r*c}, f32.Point{X: +r, Y: -r}) // SE
path.Cube(f32.Point{X: 0, Y: -r * c}, f32.Point{X: -(r - r*c), Y: -r}, f32.Point{X: -r, Y: -r}) // NE
path.Cube(f32.Point{X: -r * c, Y: 0}, f32.Point{X: -r, Y: r - r*c}, f32.Point{X: -r, Y: r}) // NW
clip.Outline{Path: path.End()}.Op().Add(gtx.Ops)
paint.PaintOp{}.Add(gtx.Ops)
return box
}
func (cc Circle) Fill(col color.NRGBA, gtx layout.Context) f32.Rectangle {
p := cc.Center
r := cc.Radius
d := r * 2
defer op.Save(gtx.Ops).Load()
paint.ColorOp{col}.Add(gtx.Ops)
paint.ColorOp{col}.Add(gtx.Ops)
rr := (p.X + p.Y) * .25
clip.UniformRRect(f32.Rectangle{Max: f32.Point{X: p.X, Y: p.Y}}, rr).Add(gtx.Ops)
paint.Fill(gtx.Ops, col)
return f32.Rectangle{Min: f32.Point{X: p.X - r, Y: p.Y - r}, Max: f32.Point{X: p.X + d, Y: p.Y + d}}
} | wonder/shape/circle.go | 0.688259 | 0.419113 | circle.go | starcoder |
package parse
import "fmt"
// Expr represents a special type of Node that represents an expression.
type Expr interface {
Node
}
// NameExpr represents an identifier, such as a variable.
type NameExpr struct {
Pos
Name string // Name of the identifier.
}
// NewNameExpr returns a NameExpr.
func NewNameExpr(name string, pos Pos) *NameExpr {
return &NameExpr{pos, name}
}
// All returns all the child Nodes in a NameExpr.
func (exp *NameExpr) All() []Node {
return []Node{}
}
// String returns a string representation of the NameExpr.
func (exp *NameExpr) String() string {
return fmt.Sprintf("NameExpr(%s)", exp.Name)
}
// NullExpr represents a null literal.
type NullExpr struct {
Pos
}
// All returns all the child Nodes in a NullExpr.
func (exp *NullExpr) All() []Node {
return []Node{}
}
// NewNullExpr returns a NullExpr.
func NewNullExpr(pos Pos) *NullExpr {
return &NullExpr{pos}
}
// String returnsa string representation of the NullExpr.
func (exp *NullExpr) String() string {
return "NULL"
}
// BoolExpr represents a boolean literal.
type BoolExpr struct {
Pos
Value bool // The raw boolean value.
}
// NewBoolExpr returns a BoolExpr.
func NewBoolExpr(value bool, pos Pos) *BoolExpr {
return &BoolExpr{pos, value}
}
// All returns all the child Nodes in a UseNode.
func (exp *BoolExpr) All() []Node {
return []Node{}
}
// String returns a string representation of the BoolExpr.
func (exp *BoolExpr) String() string {
if exp.Value {
return "TRUE"
}
return "FALSE"
}
// NumberExpr represents a number literal.
type NumberExpr struct {
Pos
Value string // The string representation of the number.
}
// NewNumberExpr returns a NumberExpr.
func NewNumberExpr(val string, pos Pos) *NumberExpr {
return &NumberExpr{pos, val}
}
// All returns all the child Nodes in a NumberExpr.
func (exp *NumberExpr) All() []Node {
return []Node{}
}
// String returns a string representation of the NumberExpr.
func (exp *NumberExpr) String() string {
return fmt.Sprintf("NumberExpr(%s)", exp.Value)
}
// StringExpr represents a string literal.
type StringExpr struct {
Pos
Text string // The text contained within the literal.
}
// NewStringExpr returns a StringExpr.
func NewStringExpr(text string, pos Pos) *StringExpr {
return &StringExpr{pos, text}
}
// All returns all the child Nodes in a StringExpr.
func (exp *StringExpr) All() []Node {
return []Node{}
}
// String returns a string representation of the StringExpr.
func (exp *StringExpr) String() string {
return fmt.Sprintf("StringExpr(%s)", exp.Text)
}
// FuncExpr represents a function call.
type FuncExpr struct {
Pos
Name string // The name of the function.
Args []Expr // Arguments to be passed to the function.
}
// All returns all the child Nodes in a FuncExpr.
func (exp *FuncExpr) All() []Node {
res := make([]Node, len(exp.Args))
for i, n := range exp.Args {
res[i] = n
}
return res
}
// NewFuncExpr returns a FuncExpr.
func NewFuncExpr(name string, args []Expr, pos Pos) *FuncExpr {
return &FuncExpr{pos, name, args}
}
// String returns a string representation of a FuncExpr.
func (exp *FuncExpr) String() string {
return fmt.Sprintf("FuncExpr(%s, %s)", exp.Name, exp.Args)
}
// FilterExpr represents a filter application.
type FilterExpr struct {
*FuncExpr
}
// String returns a string representation of the FilterExpr.
func (exp *FilterExpr) String() string {
return fmt.Sprintf("FilterExpr(%s, %s)", exp.Name, exp.Args)
}
// NewFilterExpr returns a FilterExpr.
func NewFilterExpr(name string, args []Expr, pos Pos) *FilterExpr {
return &FilterExpr{NewFuncExpr(name, args, pos)}
}
// TestExpr represents a boolean test expression.
type TestExpr struct {
*FuncExpr
}
// String returns a string representation of the TestExpr.
func (exp *TestExpr) String() string {
return fmt.Sprintf("TestExpr(%s, %s)", exp.Name, exp.Args)
}
// NewTestExpr returns a TestExpr.
func NewTestExpr(name string, args []Expr, pos Pos) *TestExpr {
return &TestExpr{NewFuncExpr(name, args, pos)}
}
// BinaryExpr represents a binary operation, such as "x + y"
type BinaryExpr struct {
Pos
Left Expr // Left side expression.
Op string // Binary operation in string form.
Right Expr // Right side expression.
}
// NewBinaryExpr returns a BinaryExpr.
func NewBinaryExpr(left Expr, op string, right Expr, pos Pos) *BinaryExpr {
return &BinaryExpr{pos, left, op, right}
}
// All returns all the child Nodes in a BinaryExpr.
func (exp *BinaryExpr) All() []Node {
return []Node{exp.Left, exp.Right}
}
// String returns a string representation of the BinaryExpr.
func (exp *BinaryExpr) String() string {
return fmt.Sprintf("BinaryExpr(%s %s %s)", exp.Left, exp.Op, exp.Right)
}
// UnaryExpr represents a unary operation, such as "not x"
type UnaryExpr struct {
Pos
Op string // The operation, in string form.
X Expr // Expression to be evaluated.
}
// NewUnaryExpr returns a new UnaryExpr.
func NewUnaryExpr(op string, expr Expr, pos Pos) *UnaryExpr {
return &UnaryExpr{pos, op, expr}
}
// All returns all the child Nodes in a UnaryExpr.
func (exp *UnaryExpr) All() []Node {
return []Node{exp.X}
}
// String returns a string representation of a UnaryExpr.
func (exp *UnaryExpr) String() string {
return fmt.Sprintf("UnaryExpr(%s %s)", exp.Op, exp.X)
}
// GroupExpr represents an arbitrary wrapper around an inner expression.
type GroupExpr struct {
Pos
X Expr // Expression to be evaluated.
}
// NewGroupExpr returns a GroupExpr.
func NewGroupExpr(inner Expr, pos Pos) *GroupExpr {
return &GroupExpr{pos, inner}
}
// All returns all the child Nodes in a GroupExpr.
func (exp *GroupExpr) All() []Node {
return []Node{exp.X}
}
// String returns a string representation of a GroupExpr.
func (exp *GroupExpr) String() string {
return fmt.Sprintf("GroupExpr(%s)", exp.X)
}
// GetAttrExpr represents an attempt to retrieve an attribute from a value.
type GetAttrExpr struct {
Pos
Cont Expr // Container to get attribute from.
Attr Expr // Attribute to get.
Args []Expr // Args to pass to attribute, if its a method.
}
// NewGetAttrExpr returns a GetAttrExpr.
func NewGetAttrExpr(cont Expr, attr Expr, args []Expr, pos Pos) *GetAttrExpr {
return &GetAttrExpr{pos, cont, attr, args}
}
// All returns all the child Nodes in a GetAttrExpr.
func (exp *GetAttrExpr) All() []Node {
res := []Node{exp.Cont, exp.Attr}
for _, v := range exp.Args {
res = append(res, v)
}
return res
}
// String returns a string representation of a GetAttrExpr.
func (exp *GetAttrExpr) String() string {
if len(exp.Args) > 0 {
return fmt.Sprintf("GetAttrExpr(%s -> %s %v)", exp.Cont, exp.Attr, exp.Args)
}
return fmt.Sprintf("GetAttrExpr(%s -> %s)", exp.Cont, exp.Attr)
}
// TernaryIfExpr represents an attempt to retrieve an attribute from a value.
type TernaryIfExpr struct {
Pos
Cond Expr // Condition to test.
TrueX Expr // Expression if Cond is true.
FalseX Expr // Expression if Cond is false.
}
// NewTernaryIfExpr returns a TernaryIfExpr.
func NewTernaryIfExpr(cond, tx, fx Expr, pos Pos) *TernaryIfExpr {
return &TernaryIfExpr{pos, cond, tx, fx}
}
// All returns all the child Nodes in a TernaryIfExpr.
func (exp *TernaryIfExpr) All() []Node {
return []Node{exp.Cond, exp.TrueX, exp.FalseX}
}
// String returns a string representation of a TernaryIfExpr.
func (exp *TernaryIfExpr) String() string {
return fmt.Sprintf("%s ? %s : %v", exp.Cond, exp.TrueX, exp.FalseX)
}
type KeyValueExpr struct {
Pos
Key Expr
Value Expr
}
// NewKeyValueExpr returns a KeyValueExpr.
func NewKeyValueExpr(k, v Expr, pos Pos) *KeyValueExpr {
return &KeyValueExpr{pos, k, v}
}
// All returns all the child Nodes in a KeyValueExpr.
func (exp *KeyValueExpr) All() []Node {
return []Node{exp.Key, exp.Value}
}
// String returns a string representation of a KeyValueExpr.
func (exp *KeyValueExpr) String() string {
return fmt.Sprintf("%s: %s", exp.Key, exp.Value)
}
type HashExpr struct {
Pos
Elements []*KeyValueExpr
}
// NewHashExpr returns a HashExpr.
func NewHashExpr(pos Pos, elements ...*KeyValueExpr) *HashExpr {
return &HashExpr{pos, elements}
}
// All returns all the child Nodes in a HashExpr.
func (exp *HashExpr) All() []Node {
all := make([]Node, len(exp.Elements))
for i, v := range exp.Elements {
all[i] = v
}
return all
}
// String returns a string representation of a HashExpr.
func (exp *HashExpr) String() string {
return fmt.Sprintf("HashExpr{%v}", exp.Elements)
}
type ArrayExpr struct {
Pos
Elements []Expr
}
// NewArrayExpr returns a ArrayExpr.
func NewArrayExpr(pos Pos, els ...Expr) *ArrayExpr {
return &ArrayExpr{pos, els}
}
// All returns all the child Nodes in a ArrayExpr.
func (exp *ArrayExpr) All() []Node {
all := make([]Node, len(exp.Elements))
for i, v := range exp.Elements {
all[i] = v
}
return all
}
// String returns a string representation of a ArrayExpr.
func (exp *ArrayExpr) String() string {
return fmt.Sprintf("ArrayExpr%v", exp.Elements)
} | parse/expr.go | 0.813535 | 0.615261 | expr.go | starcoder |
package clang
// #include <stdlib.h>
// #include "go-clang.h"
import "C"
/**
* \brief Flags that control the creation of translation units.
*
* The enumerators in this enumeration type are meant to be bitwise
* ORed together to specify which options should be used when
* constructing the translation unit.
*/
type TranslationUnitFlags uint32
const (
/**
* \brief Used to indicate that no special translation-unit options are
* needed.
*/
TU_None = C.CXTranslationUnit_None
/**
* \brief Used to indicate that the parser should construct a "detailed"
* preprocessing record, including all macro definitions and instantiations.
*
* Constructing a detailed preprocessing record requires more memory
* and time to parse, since the information contained in the record
* is usually not retained. However, it can be useful for
* applications that require more detailed information about the
* behavior of the preprocessor.
*/
TU_DetailedPreprocessingRecord = C.CXTranslationUnit_DetailedPreprocessingRecord
/**
* \brief Used to indicate that the translation unit is incomplete.
*
* When a translation unit is considered "incomplete", semantic
* analysis that is typically performed at the end of the
* translation unit will be suppressed. For example, this suppresses
* the completion of tentative declarations in C and of
* instantiation of implicitly-instantiation function templates in
* C++. This option is typically used when parsing a header with the
* intent of producing a precompiled header.
*/
TU_Incomplete = C.CXTranslationUnit_Incomplete
/**
* \brief Used to indicate that the translation unit should be built with an
* implicit precompiled header for the preamble.
*
* An implicit precompiled header is used as an optimization when a
* particular translation unit is likely to be reparsed many times
* when the sources aren't changing that often. In this case, an
* implicit precompiled header will be built containing all of the
* initial includes at the top of the main file (what we refer to as
* the "preamble" of the file). In subsequent parses, if the
* preamble or the files in it have not changed, \c
* clang_reparseTranslationUnit() will re-use the implicit
* precompiled header to improve parsing performance.
*/
TU_PrecompiledPreamble = C.CXTranslationUnit_PrecompiledPreamble
/**
* \brief Used to indicate that the translation unit should cache some
* code-completion results with each reparse of the source file.
*
* Caching of code-completion results is a performance optimization that
* introduces some overhead to reparsing but improves the performance of
* code-completion operations.
*/
TU_CacheCompletionResults = C.CXTranslationUnit_CacheCompletionResults
/**
* \brief Used to indicate that the translation unit will be serialized with
* \c clang_saveTranslationUnit.
*
* This option is typically used when parsing a header with the intent of
* producing a precompiled header.
*/
TU_ForSerialization = C.CXTranslationUnit_ForSerialization
/**
* \brief DEPRECATED: Enabled chained precompiled preambles in C++.
*
* Note: this is a *temporary* option that is available only while
* we are testing C++ precompiled preamble support. It is deprecated.
*/
TU_CXXChainedPCH = C.CXTranslationUnit_CXXChainedPCH
/**
* \brief Used to indicate that function/method bodies should be skipped while
* parsing.
*
* This option can be used to search for declarations/definitions while
* ignoring the usages.
*/
TU_SkipFunctionBodies = C.CXTranslationUnit_SkipFunctionBodies
/**
* \brief Used to indicate that brief documentation comments should be
* included into the set of code completions returned from this translation
* unit.
*/
TU_IncludeBriefCommentsInCodeCompletion = C.CXTranslationUnit_IncludeBriefCommentsInCodeCompletion
) | translationunitflags.go | 0.53048 | 0.428413 | translationunitflags.go | starcoder |
package draw2d
import (
"image"
"image/color"
)
// GraphicContext describes the interface for the various backends (images, pdf, opengl, ...)
type GraphicContext interface {
PathBuilder
// BeginPath creates a new path
BeginPath()
// GetMatrixTransform returns the current transformation matrix
GetMatrixTransform() Matrix
// SetMatrixTransform sets the current transformation matrix
SetMatrixTransform(tr Matrix)
// ComposeMatrixTransform composes the current transformation matrix with tr
ComposeMatrixTransform(tr Matrix)
// Rotate applies a rotation to the current transformation matrix. angle is in radian.
Rotate(angle float64)
// Translate applies a translation to the current transformation matrix.
Translate(tx, ty float64)
// Scale applies a scale to the current transformation matrix.
Scale(sx, sy float64)
// SetStrokeColor sets the current stroke color
SetStrokeColor(c color.Color)
// SetStrokeColor sets the current fill color
SetFillColor(c color.Color)
// SetFillRule sets the current fill rule
SetFillRule(f FillRule)
// SetLineWidth sets the current line width
SetLineWidth(lineWidth float64)
// SetLineCap sets the current line cap
SetLineCap(cap LineCap)
// SetLineJoin sets the current line join
SetLineJoin(join LineJoin)
// SetLineJoin sets the current dash
SetLineDash(dash []float64, dashOffset float64)
// SetFontSize
SetFontSize(fontSize float64)
GetFontSize() float64
SetFontData(fontData FontData)
GetFontData() FontData
DrawImage(image image.Image)
Save()
Restore()
Clear()
ClearRect(x1, y1, x2, y2 int)
SetDPI(dpi int)
GetDPI() int
GetStringBounds(s string) (left, top, right, bottom float64)
CreateStringPath(text string, x, y float64) (cursor float64)
FillString(text string) (cursor float64)
FillStringAt(text string, x, y float64) (cursor float64)
StrokeString(text string) (cursor float64)
StrokeStringAt(text string, x, y float64) (cursor float64)
Stroke(paths ...*Path)
Fill(paths ...*Path)
FillStroke(paths ...*Path)
} | vendor/github.com/llgcode/draw2d/gc.go | 0.619471 | 0.547646 | gc.go | starcoder |
package m2c
import (
"fmt"
"math/cmplx"
)
var (
conj = cmplx.Conj
)
// Matrix with two rows, two columns and Complex numbers as values.
type Matrix struct {
A, B, C, D complex128
}
// Matrix contructor.
func NewMatrix(a, b, c, d complex128) Matrix {
return Matrix{A: a, B: b, C: c, D: d}
}
// CannotInvertMatrixError complains cause it is not possible to invert a matrix if determinant is zero.
type CannotInvertMatrixError struct {
matrix Matrix
}
func (e *CannotInvertMatrixError) Error() string {
return fmt.Sprintf("Cannot invert a matrix with determinant=0\n%v", e.matrix)
}
// Add two matrices.
func Add(l Matrix, r Matrix) Matrix {
return Matrix{l.A + r.A, l.B + r.B, l.C + r.C, l.D + r.D}
}
// Conj returns the conjugated matrix.
func Conj(m Matrix) Matrix {
return Matrix{conj(m.A), conj(m.B), conj(m.C), conj(m.D)}
}
// Eq checks if two matrices are equal.
func Eq(l Matrix, r Matrix) bool {
return (l.A == r.A) && (l.B == r.B) && (l.C == r.C) && (l.D == r.D)
}
// Det computes matrix determinant.
func Det(m Matrix) complex128 {
return m.A*m.D - m.B*m.C
}
// I returns the identity matrix.
func I() Matrix {
return Matrix{1, 0, 0, 1}
}
// J returns the symplectic matrix.
func J() Matrix {
return Matrix{0, 1, -1, 0}
}
// Inv inverts given matrix respect to multiplication.
// If it has determinant equal to zero, an error will be returned
// as second argument.
func Inv(m Matrix) (Matrix, error) {
var det = Det(m)
if 0 == det {
return Matrix{}, &CannotInvertMatrixError{m}
}
return Matrix{m.D / det, -m.B / det, -m.C / det, m.A / det}, nil
}
// Mul multiplies two matrices. This operator is not commutative.
func Mul(l Matrix, r Matrix) Matrix {
return Matrix{l.A*r.A + l.B*r.C, l.A*r.B + l.B*r.D, l.C*r.A + l.D*r.B, l.C*r.B + l.D*r.D}
}
// Neg computes matrix inverse respect to addition.
func Neg(m Matrix) Matrix {
return Matrix{-m.A, -m.B, -m.C, -m.D}
}
// Scalar multiplies matrix by a complex number.
func Scalar(m Matrix, c complex128) Matrix {
return Matrix{m.A * c, m.B * c, m.C * c, m.D * c}
}
// Sub subtracts two matrices.
func Sub(l Matrix, r Matrix) Matrix {
return Matrix{l.A - r.A, l.B - r.B, l.C - r.C, l.D - r.D}
}
// T returns the transposed matrix.
func T(m Matrix) Matrix {
return Matrix{m.A, m.C, m.B, m.D}
}
// Zero returns the matrix with all zeros.
func Zero() Matrix {
return Matrix{0, 0, 0, 0}
} | matrix.go | 0.892899 | 0.715561 | matrix.go | starcoder |
package trie
//Trie -> datastructure type
type Trie struct {
letter rune
children []*Trie
meta map[string]interface{}
isLeaf bool
}
//Inserting a node in trie
/*
1. set a current node as root node
2. set the current letter as the first letter of the word
3. if current node has reference to the current letter then set the current node to that referenced node else create a new node set the letter equal to the current letter and also initialize current node to this new node
4. repeat step 3 until the key is traversed
*/
func (t *Trie) hasChild(a rune) (bool, *Trie) {
for _, child := range t.children {
if child.letter == a {
return true, child
}
}
return false, nil
}
//NewTrie creates a new trie with default values
func NewTrie() *Trie {
nT := &Trie{}
nT.children = []*Trie{}
nT.meta = make(map[string]interface{})
return nT
}
func (t *Trie) addChild(a rune) *Trie {
nw := NewTrie()
nw.letter = a
t.children = append(t.children, nw)
return nw
}
//Add -> add word in trie
func (t *Trie) Add(word string) *Trie {
letters, node, i := []rune(word), t, 0
n := len(letters)
for i < n {
if exists, value := node.hasChild(letters[i]); exists {
node = value
} else {
node = node.addChild(letters[i])
}
i++
if i == n {
node.isLeaf = true
}
}
return node
}
//FindNode -> searches whether a node is present in the trie or not
func (t *Trie) FindNode(word string) *Trie {
letters, node, i := []rune(word), t, 0
n := len(letters)
for i < n {
if exists, value := node.hasChild(letters[i]); exists {
node = value
} else {
return nil
}
i++
}
return node
}
//Find -> wrapper for FindNode
func (t *Trie) Find(word string) *Trie {
node := t.FindNode(word)
if node == nil {
return nil
}
if node.isLeaf != true {
return nil
}
return node
}
//Remove a word from the tree
/*
1. check whether the element is already a part of the tree
2. if the element is found then remove it
*/
func (t *Trie) Remove(word string) {
a := t.Find(word)
if a != nil {
a.isLeaf = false
}
}
//Counting the number of strings in trie
func (t *Trie) Count() int {
count := 0
for _, child := range t.children {
if child.isLeaf == true {
count++
}
count += child.Count()
}
return count
}
//Setting / Getting metadata
//Get -> to get the metadata for the key
func (t *Trie) Get(key string) (interface{}, bool) {
if t == nil {
return nil, false
}
if _, ok := t.meta[key]; ok {
return t.meta[key], true
}
return nil, false
}
//Set -> to set the meta data
func (t *Trie) Set(key string, val interface{}) {
if t == nil {
return
}
t.meta[key] = val
} | Data Structures/Trie/trie.go | 0.564339 | 0.478285 | trie.go | starcoder |
package nagios
import (
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceHost() *schema.Resource {
return &schema.Resource{
Read: dataSourceHostRead,
Schema: map[string]*schema.Schema{
"host_name": {
Type: schema.TypeString,
Required: true,
},
"address": {
Type: schema.TypeString,
Computed: true,
Description: "The IP address of the host",
},
"display_name": {
Type: schema.TypeString,
Computed: true,
Description: "Another name for the host that will be displayed in the web interface. If left blank, the value from `name` will be displayed",
},
"max_check_attempts": {
Type: schema.TypeString,
Computed: true,
Description: "How many times to retry the host check before alerting when the state is anything other than OK",
},
"check_period": {
Type: schema.TypeString,
Computed: true,
Description: "The time period during which active checks of the host can be made",
},
"notification_interval": {
Type: schema.TypeString,
Computed: true,
Description: "How long to wait before sending another notification to a contact that the host is down",
},
"notification_period": {
Type: schema.TypeString,
Computed: true,
Description: "The time period during which notifications can be sent for a host alert",
},
"contacts": {
Type: schema.TypeSet,
Computed: true,
Description: "The list of users that Nagios should alert when a host is down",
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"alias": {
Type: schema.TypeString,
Computed: true,
Description: "A longer name to describe the host",
},
"templates": {
Type: schema.TypeSet,
Computed: true,
Description: "A list of Nagios templates to apply to the host",
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"check_command": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the command that should be used to check if the host is up or down",
},
"contact_groups": {
Type: schema.TypeSet,
Computed: true,
Description: "A list of the contact groups that should be notified if the host goes down",
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"notes": {
Type: schema.TypeString,
Computed: true,
Description: "Notes about the host that may assist with troubleshooting",
},
"notes_url": {
Type: schema.TypeString,
Computed: true,
Description: "URL to a third-party documentation respoitory containing more information about the host",
},
"action_url": {
Type: schema.TypeString,
Computed: true,
Description: "URL to a third-party documentation repository containing actions to take in the event the host goes down",
},
"initial_state": {
Type: schema.TypeString,
Computed: true,
Description: "The state of the host when it is first added to Nagios. Valid options are: 'd' down, 's' up or 'u' unreachable",
},
"retry_interval": {
Type: schema.TypeString,
Computed: true,
Description: "How often should Nagios try to check the host after the initial down alert",
},
"passive_checks_enabled": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not passive checks are enabled for the host",
},
"active_checks_enabled": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not active checks are enabled for the host",
},
"obsess_over_host": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not Nagios 'obsesses' over the host using the ochp_command",
},
"event_handler": {
Type: schema.TypeString,
Computed: true,
Description: "The command that should be run whenver a change in the state of the host is detected",
},
"event_handler_enabled": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not event handlers should be enabled for the host",
},
"flap_detection_enabled": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not flap detection is enabled for the host",
},
"flap_detection_options": {
Type: schema.TypeSet,
Computed: true,
Description: "Determines what flap detection logic will be used for the host. One or more of the following valid options can be provided: 'd' down, 'o' up, or 'u' unreachable.",
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"low_flap_threshold": {
Type: schema.TypeString,
Computed: true,
Description: "The minimum threshold that should be used when detecting if flapping is occurring",
},
"high_flap_threshold": {
Type: schema.TypeString,
Computed: true,
Description: "The maximum threshold that should be used when detecting if flapping is occurring",
},
"process_perf_data": {
Type: schema.TypeBool,
Computed: true,
Description: "Determines if Nagios should process performance data",
},
"retain_status_information": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not status related information should be kept for the host",
},
"retain_nonstatus_information": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not non-status related information should be kept for the host",
},
"check_freshness": {
Type: schema.TypeBool,
Computed: true,
Description: "Sets whether or not freshness checks are enabled for the host",
},
"freshness_threshold": {
Type: schema.TypeString,
Computed: true,
Description: "The freshness threshold used for the host",
},
"first_notification_delay": {
Type: schema.TypeString,
Computed: true,
Description: "The amount of time to wait to send out the first notification when a host enters a non-UP state",
},
"notification_options": {
Type: schema.TypeString,
Computed: true,
Description: "Determines when Nagios should alert if a host is one or more of the following option: 'o' up, 'd' down, 'u' unreachable, 'r' recovery, 'f' flapping or 's' scheduled downtime",
},
"notifications_enabled": {
Type: schema.TypeBool,
Computed: true,
Description: "Determines if Nagios should send notifications",
},
"stalking_options": {
Type: schema.TypeString,
Computed: true,
Description: "A list of options to determine which states, if any, should be stalked by Nagios. Refer to the [Nagios documentation](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/3/en/stalking.html) for more information on stalking",
},
"icon_image": {
Type: schema.TypeString,
Computed: true,
Description: "The icon to display in Nagios",
},
"icon_image_alt": {
Type: schema.TypeString,
Computed: true,
Description: "The text to display when hovering over the ",
},
"vrml_image": {
Type: schema.TypeString,
Computed: true,
Description: "The image that will be used as a texture map for the specified host",
},
"statusmap_image": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the image that should be used in the statusmap CGI in Nagios",
},
"2d_coords": {
Type: schema.TypeString,
Computed: true,
Description: "The coordinates to use when drawing the host in the statusmap CGI",
},
"3d_coords": {
Type: schema.TypeString,
Computed: true,
Description: "The coordinates to use when drawing the host in the statuswrl CGI",
},
"register": {
Type: schema.TypeBool,
Computed: true,
Description: "Determines if the host will be marked as active or inactive",
},
"free_variables": {
Type: schema.TypeMap,
Computed: true,
Description: "A key/value pair of free variables to add to the host. The key must begin with an underscore.",
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
}
}
func dataSourceHostRead(d *schema.ResourceData, m interface{}) error {
client := m.(*Client)
hostName := d.Get("host_name").(string)
host, err := client.getHost(hostName)
if err != nil {
return err
}
setDataFromHost(d, host)
return nil
} | nagios/data_source_host.go | 0.616012 | 0.470007 | data_source_host.go | starcoder |
package continuous
import (
"fmt"
"math"
"os"
"sort"
pb "gopkg.in/cheggaaa/pb.v1"
)
// FrenzelPompe is an implementation of
// <NAME> and <NAME>.
// Partial mutual information for coupling analysis of multivariate time series.
// Phys. Rev. Lett., 99:204101, Nov 2007.
// The function assumes that the data xyz is normalised column-wise
func FrenzelPompe(xyz [][]float64, xIndices, yIndices, zIndices []int, k int, eta bool) float64 {
err := ""
if len(xyz) == 0 || len(xyz[0]) == 0 {
err = fmt.Sprintf("%sPlease provide xyx data.\n", err)
}
if len(xIndices) == 0 {
err = fmt.Sprintf("%sPlease provide x indices.\n", err)
}
if len(yIndices) == 0 {
err = fmt.Sprintf("%sPlease provide y indices.\n", err)
}
if len(zIndices) == 0 {
err = fmt.Sprintf("%sPlease provide z indices.\n", err)
}
if len(err) > 0 {
fmt.Println(err)
os.Exit(-1)
}
r := 0.0
T := len(xyz)
Tf := float64(T)
hk := Harmonic(k - 1)
var bar *pb.ProgressBar
if eta == true {
bar = pb.StartNew(T)
}
for _, v := range xyz {
epsilon := fpGetEpsilon(k, v, xyz, xIndices, yIndices, zIndices)
cNxz := fpCount2(epsilon, v, xyz, xIndices, zIndices)
hNxz := Harmonic(cNxz)
cNyz := fpCount2(epsilon, v, xyz, yIndices, zIndices)
hNyz := Harmonic(cNyz)
cNz := fpCount1(epsilon, v, xyz, zIndices)
hNz := Harmonic(cNz)
r += hNxz + hNyz - hNz
if eta == true {
bar.Increment()
}
}
if eta == true {
bar.Finish()
}
r = r/Tf - hk
return r
}
// fpMaxNorm3 computes the max-norm of two 3-dimensional vectors
// maxnorm(a,b) = max( |a[0] - b[0]|, |a[1] - b[1]|, |a[2] - b[2]|)
func fpMaxNorm3(a, b []float64, xIndices, yIndices, zIndices []int) float64 {
xDist := Distance(a, b, xIndices)
yDist := Distance(a, b, yIndices)
zDist := Distance(a, b, zIndices)
return math.Max(xDist, math.Max(yDist, zDist))
}
// fpGetEpsilon calculate epsilon_k(t) as defined by Frenzel & Pompe, 2007
// epsilon_k(t) is the Distance of the k-th nearest neighbour. The function
// takes k, the point from which the Distance is calculated (xyz), and the
// data from which the k-th nearest neighbour should be determined
func fpGetEpsilon(k int, xyz []float64, data [][]float64, xIndices, yIndices, zIndices []int) float64 {
distances := make([]float64, len(data), len(data))
for t := 0; t < len(data); t++ {
distances[t] = fpMaxNorm3(xyz, data[t], xIndices, yIndices, zIndices)
}
sort.Float64s(distances)
if k > len(distances) {
return distances[len(distances)-1]
}
return distances[k] // we start to count at zero, but the first one is xyz[t] vs. xyz[t]
}
// fpCount2 count the number of points for which the x and y coordinate is
// closer than epsilon, where the Distance is measured by the max-norm
func fpCount2(epsilon float64, xyz []float64, data [][]float64, xIndices, yIndices []int) (c int) {
c = -1 // because we will also count xyz[t] vs. xyz[t]
for t := 0; t < len(data); t++ {
if fpMaxNorm2(xyz, data[t], xIndices, yIndices) < epsilon {
c++
}
}
return
}
func fpMaxNorm2(a, b []float64, xIndices, yIndices []int) float64 {
xDist := Distance(a, b, xIndices)
yDist := Distance(a, b, yIndices)
return math.Max(xDist, yDist)
}
// fpCount1 count the number of points for which the z coordinate is
// closer than epsilon
func fpCount1(epsilon float64, xyz []float64, data [][]float64, zIndices []int) (c int) {
c = -1 // because we will also count xyz[t] vs. xyz[t]
for t := 0; t < len(data); t++ {
if Distance(xyz, data[t], zIndices) < epsilon {
c++
}
}
return
} | continuous/FrenzelPompe.go | 0.592195 | 0.540742 | FrenzelPompe.go | starcoder |
// Package csv provides methods for easily validating the rows and columns of a
// CSV file.
package csv
import (
"context"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"chromiumos/tast/errors"
"chromiumos/tast/local/crosconfig"
)
// validator represents a function validator that takes in as input an element
// that is part of the csv data. If the validation fails, an error is returned.
// If the validation succeeds, nil is returned.
type validator func(string) error
type column struct {
field string
columnFunc validator
}
// ValidateCSV is responsible for validating headers and all values of the CSV
// output. An error is returned if the headers are invalid or if any of the CSV
// values are incorrect as determined by the validators provided to verify a
// particular CSV value.
func ValidateCSV(csv [][]string, rows int, columns ...column) error {
// Validate csv dimensions.
if len(csv) != rows {
return errors.Errorf("incorrect number of rows in csv: got %v, want %v", len(csv), rows)
}
if len(csv) == 0 {
return nil
}
if len(columns) != len(csv[0]) {
return errors.Errorf("number of Column validators does not match number of csv columns: got %v, want %v", len(columns), len(csv[0]))
}
// Validate headers.
for i, col := range columns {
if col.field != csv[0][i] {
return errors.Errorf("incorrect header name: got %v, want %v", csv[0][i], col.field)
}
}
// Validate all columns of the CSV (starting from first row) in |csv|.
for i := 1; i < len(csv); i++ {
for j, col := range columns {
if err := col.columnFunc(csv[i][j]); err != nil {
return errors.Wrapf(err, "failed validation on %v (Column %v) with value %v", col.field, j, csv[i][j])
}
}
}
return nil
}
// Rows simply returns the number of rows (including header) that the csv must
// contain.
func Rows(expectedRows int) int {
return expectedRows
}
// Column returns the field name of the column and a validator that will use a
// list of validators to validate all values in this CSV column. The return
// value is of the column type.
func Column(field string, validators ...validator) column {
columnFunc := func(csvValue string) error {
for _, validator := range validators {
if err := validator(csvValue); err != nil {
return err
}
}
return nil
}
return column{field, columnFunc}
}
// ColumnWithDefault returns the field name of the column and a validator that
// will use a list of validators to validate all values in this CSV column. If
// the field value is equal to |defaultValue|, the validators are not run.
func ColumnWithDefault(field, defaultValue string, validators ...validator) column {
columnFunc := func(csvValue string) error {
if csvValue == defaultValue {
return nil
}
for _, validator := range validators {
if err := validator(csvValue); err != nil {
return err
}
}
return nil
}
return column{field, columnFunc}
}
// UInt64 returns a validator that checks whether |actual| can be parsed into a
// uint64.
func UInt64() validator {
return func(actual string) error {
if _, err := strconv.ParseUint(actual, 10, 64); err != nil {
return errors.Wrapf(err, "failed to convert %v to uint64", actual)
}
return nil
}
}
// MatchValue returns a validator that checks whether |value| is equal to
// |actual|.
func MatchValue(value string) validator {
return func(actual string) error {
if value != actual {
return errors.Errorf("values do not match; got %v, want %v", value, actual)
}
return nil
}
}
// MatchRegex returns a function that checks whether |actual| matches the
// regex pattern specified by |regex|.
func MatchRegex(regex *regexp.Regexp) validator {
return func(actual string) error {
if !regex.MatchString(actual) {
return errors.Errorf("failed to follow correct pattern: got %v, want %v", regex, actual)
}
return nil
}
}
// EqualToFileContent returns a function that checks whether |path| exists. If
// it does, it compares the value at that location wih |actual|. If it does not,
// an error is returned.
func EqualToFileContent(path string) validator {
return func(actual string) error {
expectedBytes, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
return errors.Errorf("file does not exist: %v", path)
} else if err != nil {
return errors.Wrapf(err, "failed to read from file %v", path)
}
expected := strings.TrimRight(string(expectedBytes), "\n")
if actual != expected {
return errors.Errorf("value does not match content of %v: got %v, want %v", path, actual, expected)
}
return nil
}
}
// EqualToFileIfCrosConfigProp returns a function that checks whether
// |filePath| should exist by using crosconfig and its two arguments, |prop| and
// |path|. If the crosconfig property does not exist, an error is returned. If
// the file exists, it attempts to read the value from the file. If it
// cannot, an error is reported. If it can, it compares the read value with
// |actual|.
func EqualToFileIfCrosConfigProp(ctx context.Context, path, prop, filePath string) validator {
return func(actual string) error {
val, err := crosconfig.Get(ctx, path, prop)
if err != nil && !crosconfig.IsNotFound(err) {
return errors.Wrapf(err, "failed to get crosconfig %v property", prop)
}
// Property does not exist
if crosconfig.IsNotFound(err) || val != "true" {
return errors.Errorf("crosconfig property does not exist: %v", prop)
}
expectedBytes, err := ioutil.ReadFile(filePath)
if err != nil {
return errors.Wrapf(err, "failed to read file %v", filePath)
}
expected := strings.TrimRight(string(expectedBytes), "\n")
if actual != expected {
return errors.Errorf("failed to get correct value: got %v, want %v", actual, expected)
}
return nil
}
}
// EqualToCrosConfigProp returns a function that uses crosconfig and its two
// arguments, |prop| and |path| to obtain a value that is compared with
// |actual|.
func EqualToCrosConfigProp(ctx context.Context, path, prop string) validator {
return func(actual string) error {
expected, err := crosconfig.Get(ctx, path, prop)
if err != nil && !crosconfig.IsNotFound(err) {
return errors.Wrapf(err, "failed to get crosconfig %v property", prop)
}
if actual != expected {
return errors.Errorf("failed to get correct value: got %v, want %v", actual, expected)
}
return nil
}
} | src/chromiumos/tast/local/bundles/cros/platform/csv/valid.go | 0.69181 | 0.469642 | valid.go | starcoder |
package polygo
/*
This file contains polynomial type defintions and general operations.
*/
import (
"errors"
"fmt"
)
// A RealPolynomial is represented as a slice of coefficients ordered increasingly by degree.
// For example, one can imagine: 5x^0 + 4x^1 + (-2)x^2 + ...
type RealPolynomial struct {
coeffs []float64
}
// A point in R^2.
type Point struct {
X, Y float64
}
/* --- BEGIN GLOBAL SETTINGS --- */
// The number of iterations used in Newton's Method implmentation in root solving functions.
var globalNewtonIterations = 25
/* --- END GLOBAL SETTINGS --- */
/* --- BEGIN STRUCT METHODS --- */
// NumCoeffs returns the number of coefficients of the current instance.
func (rp *RealPolynomial) NumCoeffs() int {
if rp == nil {
panic("received nil *RealPolynomial")
}
return len(rp.coeffs)
}
// Degree returns the degree of the current instance.
func (rp *RealPolynomial) Degree() int {
if rp == nil {
panic("received nil RealPolynomial")
}
// Coefficients should be maintained in such a way that allow the
// number of coefficients to be one less than the degree of the polynomial.
return len(rp.coeffs) - 1
}
// At returns the value of the current instance evaluated at x.
func (rp *RealPolynomial) At(x float64) float64 {
if rp == nil {
panic("received nil *RealPolynomial")
}
// Implement Horner's Method
length := len(rp.coeffs)
out := rp.coeffs[length-1]
for i := length - 2; i >= 0; i-- {
out = out*x + rp.coeffs[i]
}
return out
}
// Derivative returns the derivative of the current instance.
// The current instance is not modified.
func (rp *RealPolynomial) Derivative() *RealPolynomial {
if rp == nil {
panic("received nil *RealPolynomial")
}
// In the case that the polynomial is constant, the derivative has the same number of terms.
// We deal with this case knowing that the derivative of any real constant is 0.
if rp.Degree() == 0 {
deriv, _ := NewRealPolynomial([]float64{0}) // safe call
return deriv
}
nDerivativeCoeffs := len(rp.coeffs) - 1
derivativeCoeffs := make([]float64, nDerivativeCoeffs)
for i := 0; i < nDerivativeCoeffs; i++ {
derivativeCoeffs[i] = rp.coeffs[i+1] * float64(i+1)
}
deriv, _ := NewRealPolynomial(derivativeCoeffs) // safe call
return deriv
}
// LeadCoeff Returns the coefficient of the highest degree term of the current instance.
func (rp *RealPolynomial) LeadCoeff() float64 {
if rp == nil {
panic("received nil *RealPolynomial")
}
return rp.coeffs[len(rp.coeffs)-1]
}
// ShiftRight shifts the coefficients of each term in the current instance rightwards by offset and returns the resulting polynomial.
// The current instance is not modified.
// A right shift by N is equivalent to multipliying the current instance by x^N.
func (rp *RealPolynomial) ShiftRight(offset int) *RealPolynomial {
if rp == nil {
panic("received nil *RealPolynomial")
}
if offset < 0 {
panic("invalid offset")
}
shiftedCoeffs := make([]float64, rp.NumCoeffs()+offset)
copy(shiftedCoeffs[offset:], rp.coeffs)
rp, _ = NewRealPolynomial(shiftedCoeffs) // safe call
return rp
}
// Equal returns true if the current instance is equal to rp2. Otherwise, false is returned.
func (rp1 *RealPolynomial) Equal(rp2 *RealPolynomial) bool {
if rp1 == nil || rp2 == nil {
panic("received nil *RealPolynomial")
}
if rp1.NumCoeffs() != rp2.NumCoeffs() {
return false
}
for i := 0; i < rp1.NumCoeffs(); i++ {
if rp1.coeffs[i] != rp2.coeffs[i] {
return false
}
}
return true
}
// IsZero returns true if current instance is equal to the zero polynomial. Otherwise, false is returned.
func (rp *RealPolynomial) IsZero() bool {
if rp == nil {
panic("received nil *RealPolynomial")
}
return rp.Degree() == 0 && rp.coeffs[0] == 0.0
}
// IsDegree returns true if current instance is of degree n. Otherwise, false is returned.
func (rp *RealPolynomial) IsDegree(n int) bool {
if rp == nil {
panic("received nil *RealPolynomial")
}
return rp.Degree() == n
}
// CoeffAtDegree returns the coefficient at degree n.
//
// n should be positive.
func (rp *RealPolynomial) CoeffAtDegree(n int) float64 {
if rp == nil {
panic("received nil *RealPolynomial")
}
return rp.coeffs[n]
}
// Add adds the current instance and rp2 and returns the sum.
// The current instance is also set to the sum.
func (rp1 *RealPolynomial) Add(rp2 *RealPolynomial) *RealPolynomial {
if rp1 == nil || rp2 == nil {
panic("received nil *RealPolynomial")
}
var maxNumCoeffs int
// Pad "shorter" polynomial with 0s.
if rp1.NumCoeffs() >= rp2.NumCoeffs() {
maxNumCoeffs = rp1.NumCoeffs()
for rp2.NumCoeffs() < maxNumCoeffs {
rp2.coeffs = append(rp2.coeffs, 0.0)
}
} else if rp1.NumCoeffs() < rp2.NumCoeffs() {
maxNumCoeffs = len(rp2.coeffs)
for rp1.NumCoeffs() < maxNumCoeffs {
rp1.coeffs = append(rp1.coeffs, 0.0)
}
} else {
maxNumCoeffs = len(rp1.coeffs)
}
// Add coefficients with matching degrees.
sumCoeffs := make([]float64, maxNumCoeffs)
for i := 0; i < maxNumCoeffs; i++ {
sumCoeffs[i] = rp1.coeffs[i] + rp2.coeffs[i]
}
rp1.coeffs = stripTailingZeroes(sumCoeffs)
rp2.coeffs = stripTailingZeroes(rp2.coeffs)
return rp1
}
// Sub subtracts rp2 from the current instance and returns the difference.
// The current instance is also set to the difference.
func (rp1 *RealPolynomial) Sub(rp2 *RealPolynomial) *RealPolynomial {
if rp1 == nil || rp2 == nil {
panic("received nil *RealPolynomial")
}
var maxNumCoeffs int
// Pad "shorter" polynomial with 0s.
if rp1.NumCoeffs() > rp2.NumCoeffs() {
maxNumCoeffs = rp1.NumCoeffs()
for rp2.NumCoeffs() < maxNumCoeffs {
rp2.coeffs = append(rp2.coeffs, 0.0)
}
} else if rp1.NumCoeffs() < rp2.NumCoeffs() {
maxNumCoeffs = len(rp2.coeffs)
for rp1.NumCoeffs() < maxNumCoeffs {
rp1.coeffs = append(rp1.coeffs, 0.0)
}
} else {
maxNumCoeffs = len(rp1.coeffs)
}
// Subtract coefficients with matching degrees.
diffCoeffs := make([]float64, maxNumCoeffs)
for i := 0; i < maxNumCoeffs; i++ {
diffCoeffs[i] = rp1.coeffs[i] - rp2.coeffs[i]
}
rp1.coeffs = stripTailingZeroes(diffCoeffs)
rp2.coeffs = stripTailingZeroes(rp2.coeffs)
return rp1
}
// MulNaive multiplies the current instance with rp2 and returns the product.
// The current instance is also set to the product.
//
// It is not recommended to use this function. Use Mul instead.
func (rp1 *RealPolynomial) MulNaive(rp2 *RealPolynomial) *RealPolynomial {
if rp1 == nil || rp2 == nil {
panic("received nil *RealPolynomial")
}
prodCoeffs := make([]float64, rp1.Degree()+rp2.Degree()+1)
for i := 0; i < rp1.NumCoeffs(); i++ {
for j := 0; j < rp2.NumCoeffs(); j++ {
// We use += since we may visit the same index multiple times
prodCoeffs[i+j] += rp1.coeffs[i] * rp2.coeffs[j]
}
}
rp1.coeffs = stripTailingZeroes(prodCoeffs)
return rp1
}
// Mul multiplies the current instance with rp2 and returns the product.
// The current instance is also set to the product.
func (rp1 *RealPolynomial) Mul(rp2 *RealPolynomial) *RealPolynomial {
if rp1 == nil || rp2 == nil {
panic("received nil *RealPolynomial")
}
lenRp1 := len(rp1.coeffs)
lenRp2 := len(rp2.coeffs)
padLen := nextClosestPowerOfTwo(lenRp1 + lenRp2 - 1)
coeffs1 := make([]float64, padLen)
coeffs2 := make([]float64, padLen)
copy(coeffs1, rp1.coeffs)
copy(coeffs2, rp2.coeffs)
// With the FFT, we can run in O(n log n) time.
fa := fastFourierTransform(complex128Slice(coeffs1))
fb := fastFourierTransform(complex128Slice(coeffs2))
fc := make([]complex128, padLen)
for i := 0; i < padLen; i++ {
fc[i] = fa[i] * fb[i]
}
tmpCoeffs := float64Slice(inverseFastFourierTransform(fc))
for i, c := range tmpCoeffs {
tmpCoeffs[i] = c / float64(padLen)
}
rp1.coeffs = stripTailingZeroes(tmpCoeffs[:rp1.Degree()+rp2.Degree()+1])
return rp1
}
// MulS multiplies the current instance with the scalar s and returns the product.
// The current instance is also set to the product.
func (rp *RealPolynomial) MulS(s float64) *RealPolynomial {
for i := 0; i < len(rp.coeffs); i++ {
rp.coeffs[i] *= s
}
return rp
}
// EuclideanDiv divides the current instance by rp2 and returns the result as a quotient-remainder pair.
// The current instance is also set to the quotient.
func (rp1 *RealPolynomial) EuclideanDiv(rp2 *RealPolynomial) (*RealPolynomial, *RealPolynomial) {
if rp1 == nil || rp2 == nil {
panic("received nil *RealPolynomial")
}
if rp2.IsZero() {
panic("RealPolynomial division by zero")
}
// Using special properties of the ordered coefficient system, we can divide polynomials
// via shifts:
// https://rosettacode.org/wiki/Polynomial_long_division
quotCoeffs := make([]float64, rp1.Degree()-rp2.Degree()+1)
var d *RealPolynomial
var shift int
var factor float64
rem := *rp1
for rem.Degree() >= rp2.Degree() {
shift = rem.Degree() - rp2.Degree()
d = rp2.ShiftRight(shift)
factor = rem.LeadCoeff() / d.LeadCoeff()
quotCoeffs[shift] = factor
d.MulS(factor)
rem.Sub(d)
}
rp1.coeffs = quotCoeffs
return rp1, &rem
}
// Expr returns a string representation of the current instance in increasing sum form.
func (rp *RealPolynomial) String() string {
if rp == nil {
panic("received nil *RealPolynomial")
}
var expr string
for d, c := range rp.coeffs {
if d == len(rp.coeffs)-1 {
expr += fmt.Sprintf("%fx^%d", c, d)
} else {
expr += fmt.Sprintf("%fx^%d + ", c, d)
}
}
return expr
}
func (p Point) String() string {
return fmt.Sprintf("(%f, %f)", p.X, p.Y)
}
// PrintExpr prints the string expression of the current instance in increasing sum form to standard output.
func (rp *RealPolynomial) Print() {
if rp == nil {
panic("received nil *RealPolynomial")
}
fmt.Println(rp)
}
// --- END STRUCT METHODS ---
// NewRealPolynomial returns a new *RealPolynomial instance with the given coeffs.
func NewRealPolynomial(coeffs []float64) (*RealPolynomial, error) {
if len(coeffs) == 0 {
return nil, errors.New("cannot create polynomial with no coefficients")
}
var newPolynomial RealPolynomial
newPolynomial.coeffs = stripTailingZeroes(coeffs)
return &newPolynomial, nil
}
// SetNewtonIterations sets the number of iterations used in Newton's Method implmentation in root solving functions.
func SetNewtonIterations(n int) error {
if n < 0 {
return errors.New("cannot set negative iterations for Newton's Method")
}
globalNewtonIterations = n
return nil
} | polynomial.go | 0.869049 | 0.533519 | polynomial.go | starcoder |
package lit
import (
"fmt"
"github.com/mb0/diff"
"xelf.org/xelf/cor"
"xelf.org/xelf/knd"
)
// Delta is a list of path edits that describe a transformation fromn one value to another.
type Delta []KeyVal
// Diff returns delta between values a and b or an error. The result can be applied to a to get b.
// The simplest and correct answer is always to return b. We however do make some effort to find a
// simpler set of changes, but do not guarantee to return the shortest edit path.
func Diff(a, b Val) (Delta, error) { return diffVals(a, b, ".", nil) }
func diffVals(a, b Val, pre string, d Delta) (Delta, error) {
if aa, ok := a.(Keyr); ok {
if bb, ok := b.(Keyr); ok {
return diffKeyr(aa, bb, pre, d)
}
} else if aa, ok := toVals(a); ok {
if bb, ok := toVals(b); ok {
return diffIdxr(a, b, aa, bb, pre, d)
}
} else if Equal(a, b) {
return d, nil
}
d = append(d, KeyVal{stripTailDot(pre), b})
return d, nil
}
// Apply applies edits d to mutable a or returns an error.
func Apply(reg *Reg, mut Mut, d Delta) error {
for _, kv := range d {
key := kv.Key
if key != "" && key != "." && key[0] == '.' {
lst := len(key) - 1
if suf := key[lst]; suf == '+' {
return applyListAppend(mut, key[:lst], kv.Val)
} else if suf == '*' {
return applyListOps(mut, key[:lst], kv.Val)
} else if suf == '-' {
return applyKeyrDel(mut, key[:lst])
}
}
p, err := cor.ParsePath(key)
if err != nil {
return err
}
err = CreatePath(reg, mut, p, kv.Val)
if err != nil {
return err
}
}
return nil
}
func selMut(mut Mut, path string, full bool) (res Mut, p cor.Path, s cor.Seg, err error) {
p, err = cor.ParsePath(path)
if err != nil {
return
}
if len(p) == 0 {
return mut, p, s, nil
}
lst := len(p) - 1
s = p[lst]
if !full {
p = p[:lst]
}
if len(p) > 0 {
var found Val
found, err = SelectPath(mut, p)
if err != nil {
return
}
m, ok := found.(Mut)
if !ok {
err = fmt.Errorf("expect mutable got %T", found)
return
}
mut = m
}
return mut, p, s, nil
}
func applyKeyrDel(mut Mut, path string) error {
mut, _, s, err := selMut(mut, path, false)
if err != nil {
return err
}
if s.Key == "" {
return fmt.Errorf("expect key got %v in %s", s, path)
}
k, ok := Unwrap(mut).(Keyr)
if !ok {
return fmt.Errorf("expect keyr got %T", mut)
}
return k.SetKey(s.Key, nil)
}
func applyListAppend(mut Mut, key string, v Val) error {
mut, _, _, err := selMut(mut, key, true)
if err != nil {
return err
}
args, ok := toVals(v)
if !ok {
return fmt.Errorf("expect list ops got %T", v)
}
vals, ok := toVals(Unwrap(mut))
if !ok {
return fmt.Errorf("expect list ops list target got %T", mut)
}
res := make([]Val, 0, len(vals)+len(args))
res = append(res, vals...)
res = append(res, args...)
return mut.Assign(&List{Vals: res})
}
func applyListOps(mut Mut, key string, v Val) error {
mut, _, _, err := selMut(mut, key, true)
if err != nil {
return err
}
ops, ok := toVals(v)
if !ok {
return fmt.Errorf("expect list ops got %T", v)
}
vals, ok := toVals(Unwrap(mut))
if !ok {
return fmt.Errorf("expect list ops list target got %T", mut)
}
res := make([]Val, 0, len(vals))
var ret, del int
for _, op := range ops {
if op.Type().Kind&knd.Int != 0 {
n, err := ToInt(op)
if err != nil {
return err
}
if n > 0 {
idx := ret + del
res = append(res, vals[idx:idx+int(n)]...)
ret += int(n)
} else if n < 0 {
del += int(-n)
} else {
return fmt.Errorf("unexpected zero ops")
}
} else if op.Type().Kind&knd.List != 0 {
vs, ok := toVals(op)
if !ok {
return fmt.Errorf("expect list op vals list got %T", v)
}
res = append(res, vs...)
}
}
if idx := ret + del; idx < len(vals) {
res = append(res, vals[idx:]...)
}
return mut.Assign(&List{Vals: res})
}
func diffIdxr(a, b Val, aa, bb []Val, pre string, d Delta) (Delta, error) {
chgs := diff.Diff(len(aa), len(bb), &valsDiff{aa, bb})
if len(chgs) == 0 {
return d, nil
}
// how much and how often we retain and delete from a and insert from b
ops, t := diffToOps(chgs, aa, bb)
if !t.changed() {
return d, nil
} else if t.replaced() {
d = append(d, KeyVal{stripTailDot(pre), b})
return d, nil
}
// we have at least two ops and known at least one of them to be ret and one del or ins
// ops of the same kind are merged and do not follow each other
// we want to detect append and use special syntax. append does only occur when we have
// two ops u,v where u is ret and v is ins
if len(ops) == 2 && ops[0].N > 0 && ops[1].N == 0 {
// lets return the special append op
d = append(d, KeyVal{stripTailDot(pre) + "+", &List{Vals: ops[1].V}})
return d, nil
}
// we also want to detect replacing a single element and use idx path notation. that does
// only occur in two instances:
// we have three ops u,v,w. retn is 1. v is del or ins and either u or w is the other
// and we have four ops u,v,w,x. retn is 2. v and w are del and ins
if len(ops) == 3 && t.retn == 1 || len(ops) == 4 && t.retn == 2 {
u, v, w := ops[0], ops[1], ops[2]
if v.N == -1 {
if len(w.V) == 1 {
return diffSub(aa[u.N], w.V[0], pre, u.N, d)
}
if t.retn == 1 && len(u.V) == 1 {
return diffSub(aa[0], u.V[0], pre, 0, d)
}
} else if len(v.V) == 1 {
if w.N == -1 {
return diffSub(aa[u.N], v.V[0], pre, u.N, d)
}
if t.retn == 1 && u.N == -1 {
return diffSub(aa[0], v.V[0], pre, 0, d)
}
}
}
// lets return the ops as list
d = append(d, KeyVal{stripTailDot(pre) + "*", opsToList(ops)})
return d, nil
}
func diffSub(a, b Val, pre string, idx int, d Delta) (Delta, error) {
path := fmt.Sprintf("%s%d.", pre, idx)
return diffVals(a, b, path, d)
}
func diffKeyr(a, b Keyr, pre string, d Delta) (Delta, error) {
// we may want different behaviour for dicts and strc
// dict keys can be deleted, strc keys only be set to zero
// dict may be unordered while strc fields are ordered
// lets first figure out dicts and then think about strcs. start by getting all the keys
ak, bk := a.Keys(), b.Keys()
// the order does not matter so create a map of a's keys
km := make(map[string]bool, len(ak))
for _, k := range ak {
km[k] = true
}
// now check b's keys against the map
for _, k := range bk {
if flag, ok := km[k]; !flag {
if !ok {
// does not exist in a
v, err := b.Key(k)
if err != nil {
return nil, err
}
path := k
if pre != "." {
path = pre + k
}
d = append(d, KeyVal{path, v})
// mark as handled
km[k] = false
} // duplicate key in b
continue
}
// exists in a and b
av, err := a.Key(k)
if err != nil {
return nil, err
}
bv, err := b.Key(k)
if err != nil {
return nil, err
}
// call delta on the values
path := pre + k
nvals, err := diffVals(av, bv, path+".", nil)
if err != nil {
return nil, err
}
if pre == "." {
// check for simple path and turn them into plain keys
for i, kv := range nvals {
if kv.Key == path {
kv.Key = k
nvals[i] = kv
}
}
}
// append edits and mark as handled
d = append(d, nvals...)
km[k] = false
}
for k, v := range km {
if v { // deleted key
d = append(d, KeyVal{pre + k + "-", Null{}})
}
}
return d, nil
}
func stripTailDot(s string) string {
if len(s) > 1 && s[len(s)-1] == '.' {
return s[:len(s)-1]
}
return s
} | lit/delta.go | 0.609292 | 0.506836 | delta.go | starcoder |
package azure
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/data/aztables"
"github.com/benthosdev/benthos/v4/internal/batch/policy"
"github.com/benthosdev/benthos/v4/internal/bloblang/field"
"github.com/benthosdev/benthos/v4/internal/bundle"
"github.com/benthosdev/benthos/v4/internal/component/output"
"github.com/benthosdev/benthos/v4/internal/component/output/batcher"
"github.com/benthosdev/benthos/v4/internal/component/output/processors"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/message"
)
func init() {
err := bundle.AllOutputs.Add(processors.WrapConstructor(newAzureTableStorageOutput), docs.ComponentSpec{
Name: "azure_table_storage",
Status: docs.StatusBeta,
Version: "3.36.0",
Summary: `Stores message parts in an Azure Table Storage table.`,
Description: output.Description(true, true, `
Only one authentication method is required, `+"`storage_connection_string`"+` or `+"`storage_account` and `storage_access_key`"+`. If both are set then the `+"`storage_connection_string`"+` is given priority.
In order to set the `+"`table_name`"+`, `+"`partition_key`"+` and `+"`row_key`"+` you can use function interpolations described [here](/docs/configuration/interpolation#bloblang-queries), which are calculated per message of a batch.
If the `+"`properties`"+` are not set in the config, all the `+"`json`"+` fields are marshaled and stored in the table, which will be created if it does not exist.
The `+"`object`"+` and `+"`array`"+` fields are marshaled as strings. e.g.:
The JSON message:
`+"```json"+`
{
"foo": 55,
"bar": {
"baz": "a",
"bez": "b"
},
"diz": ["a", "b"]
}
`+"```"+`
Will store in the table the following properties:
`+"```yml"+`
foo: '55'
bar: '{ "baz": "a", "bez": "b" }'
diz: '["a", "b"]'
`+"```"+`
It's also possible to use function interpolations to get or transform the properties values, e.g.:
`+"```yml"+`
properties:
device: '${! json("device") }'
timestamp: '${! json("timestamp") }'
`+"```"+``),
Config: docs.FieldComponent().WithChildren(
docs.FieldString(
"storage_account",
"The storage account to upload messages to. This field is ignored if `storage_connection_string` is set.",
),
docs.FieldString(
"storage_access_key",
"The storage account access key. This field is ignored if `storage_connection_string` is set.",
),
docs.FieldString(
"storage_connection_string",
"A storage account connection string. This field is required if `storage_account` and `storage_access_key` are not set.",
),
docs.FieldString("table_name", "The table to store messages into.",
`${!meta("kafka_topic")}`,
).IsInterpolated(),
docs.FieldString("partition_key", "The partition key.",
`${!json("date")}`,
).IsInterpolated(),
docs.FieldString("row_key", "The row key.",
`${!json("device")}-${!uuid_v4()}`,
).IsInterpolated(),
docs.FieldString("properties", "A map of properties to store into the table.").IsInterpolated().Map(),
docs.FieldString("insert_type", "Type of insert operation").HasOptions(
"INSERT", "INSERT_MERGE", "INSERT_REPLACE",
).IsInterpolated().Advanced(),
docs.FieldInt("max_in_flight",
"The maximum number of messages to have in flight at a given time. Increase this to improve throughput."),
docs.FieldString("timeout", "The maximum period to wait on an upload before abandoning it and reattempting.").Advanced(),
policy.FieldSpec(),
).ChildDefaultAndTypesFromStruct(output.NewAzureTableStorageConfig()),
Categories: []string{
"Services",
"Azure",
},
})
if err != nil {
panic(err)
}
}
func newAzureTableStorageOutput(conf output.Config, mgr bundle.NewManagement) (output.Streamed, error) {
tableStorage, err := newAzureTableStorageWriter(conf.AzureTableStorage, mgr)
if err != nil {
return nil, err
}
w, err := output.NewAsyncWriter("azure_table_storage", conf.AzureTableStorage.MaxInFlight, tableStorage, mgr)
if err != nil {
return nil, err
}
return batcher.NewFromConfig(conf.AzureTableStorage.Batching, w, mgr)
}
type azureTableStorageWriter struct {
conf output.AzureTableStorageConfig
tableName *field.Expression
partitionKey *field.Expression
rowKey *field.Expression
properties map[string]*field.Expression
client *aztables.ServiceClient
timeout time.Duration
log log.Modular
}
func newAzureTableStorageWriter(conf output.AzureTableStorageConfig, mgr bundle.NewManagement) (*azureTableStorageWriter, error) {
var timeout time.Duration
var err error
if tout := conf.Timeout; len(tout) > 0 {
if timeout, err = time.ParseDuration(tout); err != nil {
return nil, fmt.Errorf("failed to parse timeout period string: %v", err)
}
}
if conf.StorageAccount == "" && conf.StorageConnectionString == "" {
return nil, errors.New("invalid azure storage account credentials")
}
var client *aztables.ServiceClient
if conf.StorageConnectionString != "" {
if strings.Contains(conf.StorageConnectionString, "UseDevelopmentStorage=true;") {
// Only here to support legacy configs that pass UseDevelopmentStorage=true;
// `UseDevelopmentStorage=true` is not available in the current SDK, neither `storage.NewEmulatorClient()` (which was used in the previous SDK).
// Instead, we use the http connection string to connect to the emulator endpoints with the default table storage port.
// https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=visual-studio#http-connection-strings
client, err = aztables.NewServiceClientFromConnectionString("DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=<KEY>;TableEndpoint=http://127.0.0.1:10002/devstoreaccount1;", nil)
} else {
client, err = aztables.NewServiceClientFromConnectionString(conf.StorageConnectionString, nil)
}
} else {
cred, credErr := aztables.NewSharedKeyCredential(conf.StorageAccount, conf.StorageAccessKey)
if credErr != nil {
return nil, fmt.Errorf("invalid azure storage account credentials: %v", err)
}
client, err = aztables.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.table.core.windows.net/", conf.StorageAccount), cred, nil)
}
if err != nil {
return nil, fmt.Errorf("invalid azure storage account credentials: %v", err)
}
a := &azureTableStorageWriter{
conf: conf,
log: mgr.Logger(),
timeout: timeout,
client: client,
}
if a.tableName, err = mgr.BloblEnvironment().NewField(conf.TableName); err != nil {
return nil, fmt.Errorf("failed to parse table name expression: %v", err)
}
if a.partitionKey, err = mgr.BloblEnvironment().NewField(conf.PartitionKey); err != nil {
return nil, fmt.Errorf("failed to parse partition key expression: %v", err)
}
if a.rowKey, err = mgr.BloblEnvironment().NewField(conf.RowKey); err != nil {
return nil, fmt.Errorf("failed to parse row key expression: %v", err)
}
a.properties = make(map[string]*field.Expression)
for property, value := range conf.Properties {
if a.properties[property], err = mgr.BloblEnvironment().NewField(value); err != nil {
return nil, fmt.Errorf("failed to parse property expression: %v", err)
}
}
return a, nil
}
func (a *azureTableStorageWriter) ConnectWithContext(ctx context.Context) error {
return nil
}
func (a *azureTableStorageWriter) WriteWithContext(wctx context.Context, msg *message.Batch) error {
writeReqs := make(map[string]map[string][]*aztables.EDMEntity)
if err := output.IterateBatchedSend(msg, func(i int, p *message.Part) error {
entity := &aztables.EDMEntity{}
tableName := a.tableName.String(i, msg)
partitionKey := a.partitionKey.String(i, msg)
entity.PartitionKey = a.partitionKey.String(i, msg)
entity.RowKey = a.rowKey.String(i, msg)
entity.Properties = a.getProperties(i, p, msg)
if writeReqs[tableName] == nil {
writeReqs[tableName] = make(map[string][]*aztables.EDMEntity)
}
writeReqs[tableName][partitionKey] = append(writeReqs[tableName][partitionKey], entity)
return nil
}); err != nil {
return err
}
return a.execBatch(writeReqs)
}
func (a *azureTableStorageWriter) getProperties(i int, p *message.Part, msg *message.Batch) map[string]interface{} {
properties := make(map[string]interface{})
if len(a.properties) == 0 {
err := json.Unmarshal(p.Get(), &properties)
if err != nil {
a.log.Errorf("error unmarshalling message: %v.", err)
}
for property, v := range properties {
switch v.(type) {
case []interface{}, map[string]interface{}:
m, err := json.Marshal(v)
if err != nil {
a.log.Errorf("error marshaling property: %v.", property)
}
properties[property] = string(m)
}
}
} else {
for property, value := range a.properties {
properties[property] = value.String(i, msg)
}
}
return properties
}
func (a *azureTableStorageWriter) execBatch(writeReqs map[string]map[string][]*aztables.EDMEntity) error {
for tn, pks := range writeReqs {
table := a.client.NewClient(tn)
_, err := table.Create(context.Background(), nil)
if !tableExists(err) {
return err
}
for _, entities := range pks {
var batch []aztables.TransactionAction
ne := len(entities)
for i, entity := range entities {
batch, err = a.addToBatch(batch, a.conf.InsertType, entity)
if err != nil {
return err
}
if reachedBatchLimit(i) || isLastEntity(i, ne) {
if _, err := table.SubmitTransaction(context.Background(), batch, nil); err != nil {
return err
}
batch = nil
}
}
}
}
return nil
}
func tableExists(err error) bool {
if err == nil {
return false
}
var azErr *azcore.ResponseError
if errors.As(err, &azErr) {
return azErr.StatusCode == http.StatusConflict
}
return false
}
func isLastEntity(i, ne int) bool {
return i+1 == ne
}
func reachedBatchLimit(i int) bool {
const batchSizeLimit = 100
return (i+1)%batchSizeLimit == 0
}
func (a *azureTableStorageWriter) addToBatch(batch []aztables.TransactionAction, insertType string, entity *aztables.EDMEntity) ([]aztables.TransactionAction, error) {
appendFunc := func(b []aztables.TransactionAction, t aztables.TransactionType, e *aztables.EDMEntity) ([]aztables.TransactionAction, error) {
// marshal entity
m, err := json.Marshal(e)
if err != nil {
return nil, fmt.Errorf("error marshalling entity: %v", err)
}
b = append(b, aztables.TransactionAction{
ActionType: t,
Entity: m,
})
return b, nil
}
var err error
switch strings.ToUpper(insertType) {
case "ADD":
batch, err = appendFunc(batch, aztables.Add, entity)
case "INSERT", "INSERT_MERGE", "INSERTMERGE":
batch, err = appendFunc(batch, aztables.InsertMerge, entity)
case "INSERT_REPLACE", "INSERTREPLACE":
batch, err = appendFunc(batch, aztables.InsertReplace, entity)
case "UPDATE", "UPDATE_MERGE", "UPDATEMERGE":
batch, err = appendFunc(batch, aztables.UpdateMerge, entity)
case "UPDATE_REPLACE", "UPDATEREPLACE":
batch, err = appendFunc(batch, aztables.UpdateReplace, entity)
case "DELETE":
batch, err = appendFunc(batch, aztables.Delete, entity)
default:
return batch, fmt.Errorf("invalid insert type")
}
return batch, err
}
func (a *azureTableStorageWriter) CloseAsync() {
}
func (a *azureTableStorageWriter) WaitForClose(time.Duration) error {
return nil
} | internal/impl/azure/output_table_storage.go | 0.669529 | 0.562898 | output_table_storage.go | starcoder |
package gofun
// Zippable is the interface for zipping.
type Zippable interface {
// Zip creates Unzippable of pairs where the pair from Unzippable contains
// two elements from two Zippables which have same position. Fail must be a
// failure Unzippable.
Zip(ys Zippable, fail Unzippable) Unzippable
}
// ZippableOrElse returns x if x is Zippable, otherwise y.
func ZippableOrElse(x interface{}, y Zippable) Zippable {
z, isOk := x.(Zippable)
if isOk {
return z
} else {
return y
}
}
func (xs *Option) Zip(ys Zippable, fail Unzippable) Unzippable {
ys2, isOk := ys.(*Option)
if isOk {
if xs.IsSome() && ys2.IsSome() {
return Some(NewPair(xs.Get(), ys2.Get()))
} else {
return None()
}
} else {
return None()
}
}
func (xs *Either) Zip(ys Zippable, fail Unzippable) Unzippable {
ys2, isOk := ys.(*Either)
if isOk {
if xs.IsRight() && ys2.IsRight() {
return Right(NewPair(xs.GetRight(), ys2.GetRight()))
} else {
if xs.IsLeft() {
return Left(xs.GetLeft())
} else {
return Left(ys2.GetLeft())
}
}
} else {
return fail
}
}
func (xs *List) Zip(ys Zippable, fail Unzippable) Unzippable {
var zs *List = Nil()
var prev *List = nil
ys2, isOk := ys.(*List)
if isOk {
for l1, l2 := xs, ys2; l1.IsCons() && l2.IsCons(); l1, l2 = l1.Tail(), l2.Tail() {
l3 := Cons(NewPair(l1.Head(), l2.Head()), Nil())
if prev != nil {
prev.SetTail(l3)
} else {
zs = l3
}
prev = l3
}
}
return zs
}
func (xs InterfaceSlice) Zip(ys Zippable, fail Unzippable) Unzippable {
ys2, isOk := ys.(InterfaceSlice)
if isOk {
var length int
if len(xs) < len(ys2) {
length = len(xs)
} else {
length = len(ys2)
}
zs := make([]interface{}, 0, length)
for i := 0; i < len(xs) && i < len(ys2); i++ {
zs = append(zs, NewPair(xs[i], ys2[i]))
}
return InterfaceSlice(zs)
} else {
return InterfaceSlice([]interface{} {})
}
}
func (xs InterfacePairFunction) Zip(ys Zippable, fail Unzippable) Unzippable {
ys2, isOk := ys.(InterfacePairFunction)
if isOk {
return InterfacePairFunction(func(x interface{}) interface{} {
return NewPair(xs(x), ys2(x))
})
} else {
return fail
}
} | zippable.go | 0.640523 | 0.484319 | zippable.go | starcoder |
package main
import (
"github.com/faiface/pixel"
"github.com/faiface/pixel/pixelgl"
)
// chunk handles triangle data and batch drawing
type chunk struct {
dirty bool
batch *pixel.Batch
triangles *pixel.TrianglesData
bounds *Bounds
cType chunkType
sprite *pixel.Sprite
}
// Impl. the Entity interface
func (c *chunk) hit(x, y, vx, vy float64, power int) {
}
func (c *chunk) getPosition() pixel.Vec {
return pixel.Vec{X: c.bounds.X, Y: c.bounds.Y}
}
// Create a new chunk
func (c *chunk) create(x, y float64, pixels int) {
c.dirty = true
c.triangles = pixel.MakeTrianglesData(400) // Init with some suitable value
c.batch = pixel.NewBatch(c.triangles, nil)
c.bounds = &Bounds{
X: x,
Y: y,
Width: float64(pixels),
Height: float64(pixels),
entity: entity(c),
}
}
// Draw the chunk
func (c *chunk) draw(dt, elapsed float64) {
if c.cType == fgChunk {
if c.dirty {
c.build()
}
c.batch.Draw(global.gWin)
} else {
c.sprite.Draw(global.gWin, pixel.IM.Moved(pixel.V(c.bounds.X+c.bounds.Width/2, c.bounds.Y+c.bounds.Height/2)))
}
}
// Rebuild/Build the chunk.
func (c *chunk) build() {
// start := time.Now()
i := 0
rc := uint32(0)
gc := uint32(0)
bc := uint32(0)
p2 := uint32(0)
r1 := uint32(0)
g1 := uint32(0)
b1 := uint32(0)
draw := 0
sameX := 1.0
sameY := 1.0
pos := 0
px := 0.0
py := 0.0
xpos := 0.0
for x := 0.0; x < c.bounds.Width; x++ {
for y := 0.0; y < c.bounds.Height; y++ {
p := global.gWorld.pixels[int(float64(global.gWorld.width)*(x+c.bounds.X)+(y+c.bounds.Y))]
// Skip visisted or empty
if p == 0 || p&0xFF>>7 == 0 {
continue
}
if p&0xFF != wBackground8 && c.cType == bgChunk {
continue
}
if p&0xFF == wBackground8 && c.cType == fgChunk {
continue
}
rc = p >> 24 & 0xFF
gc = p >> 16 & 0xFF
bc = p >> 8 & 0xFF
sameX = 1.0
sameY = 1.0
// Greedy algorithm to check for range of colors.
// Use first bit in alpha to check for if it has been visited or not.
// It's not being used anyway. Or at least for now :)
// First check how far we can go with the same pixel color
// For each X, walk as long as possible towards Y
for l := x + 1; l < c.bounds.Width; l++ {
// Check color
xpos = float64(global.gWorld.width) * (l + c.bounds.X)
pos = int(xpos + (y + c.bounds.Y))
p2 = global.gWorld.pixels[pos]
if p2 == 0 {
break
}
r1 = p2 >> 24 & 0xFF
g1 = p2 >> 16 & 0xFF
b1 = p2 >> 8 & 0xFF
if r1 == rc && g1 == gc && b1 == bc && ((p2&0xFF)>>7) == 1 {
if p2&0xFF != wBackground8 && c.cType == bgChunk {
break
}
// Same color and not yet visited!
global.gWorld.pixels[pos] &= 0xFFFFFF7F
sameX++
newY := 1.0
for k := y; k < c.bounds.Height; k++ {
pos = int(xpos + (k + c.bounds.Y))
p2 = global.gWorld.pixels[pos]
r1 = p2 >> 24 & 0xFF
g1 = p2 >> 16 & 0xFF
b1 = p2 >> 8 & 0xFF
if r1 == rc && g1 == gc && b1 == bc && ((p2&0xFF)>>7) == 1 {
if p2&0xFF != wBackground8 && c.cType == bgChunk {
break
}
global.gWorld.pixels[pos] &= 0xFFFFFF7F
newY++
} else {
break
}
}
if newY < sameY {
break
} else {
sameY = newY
}
} else {
break
}
}
px = x + c.bounds.X
py = y + c.bounds.Y
draw++
// Convert to decimal
r := float64(p>>24&0xFF) / 255.0
g := float64(p>>16&0xFF) / 255.0
b := float64(p>>8&0xFF) / 255.0
a := float64(p&0xFF) / 255.0
// Increase length of triangles if we need to draw more than we had before.
// Add a buffer so we can skip a few increments.
if draw*6 >= len(*c.triangles) {
c.triangles.SetLen(draw*6 + 60)
}
// Size of triangle is given by how large the greedy algorithm found out.
(*c.triangles)[i].Position = pixel.Vec{X: px, Y: py}
(*c.triangles)[i+1].Position = pixel.Vec{X: px + sameX, Y: py}
(*c.triangles)[i+2].Position = pixel.Vec{X: px + sameX, Y: py + sameY}
(*c.triangles)[i+3].Position = pixel.Vec{X: px, Y: py}
(*c.triangles)[i+4].Position = pixel.Vec{X: px, Y: py + sameY}
(*c.triangles)[i+5].Position = pixel.Vec{X: px + sameX, Y: py + sameY}
for n := 0; n < 6; n++ {
(*c.triangles)[i+n].Color = pixel.RGBA{R: r, G: g, B: b, A: a}
}
i += 6
}
}
// Reset the greedy bit
for x := 0.0; x < c.bounds.Width; x++ {
for y := 0.0; y < c.bounds.Height; y++ {
global.gWorld.pixels[int(float64(global.gWorld.width)*(x+c.bounds.X)+(y+c.bounds.Y))] |= 0x00000080
}
}
// elapsed := time.Since(start)
// Debug("Build took %s", elapsed, "SKIP:", skip, "Draw:", draw, "Total:", len(*c.triangles)/6, "Decr:", 100.0-((float64(draw)*6.0)/(float64(wPixelsPerChunk*wPixelsPerChunk)*6.0)*100.0), "%")
c.triangles.SetLen(draw * 6)
c.batch.Dirty()
c.dirty = false
// If background we build our sprite
if c.cType == bgChunk {
canvas := pixelgl.NewCanvas(pixel.R(0, 0, float64(global.gWorld.width), float64(global.gWorld.height)))
c.batch.Draw(canvas)
c.sprite = pixel.NewSprite(canvas, pixel.R(c.bounds.X, c.bounds.Y, c.bounds.X+c.bounds.Width, c.bounds.Y+c.bounds.Height))
c.triangles.SetLen(0)
c.batch = nil
}
} | chunk.go | 0.68679 | 0.409398 | chunk.go | starcoder |
package cruzbit
import (
"golang.org/x/crypto/ed25519"
)
// BranchType indicates the type of branch a particular block resides on.
// Only blocks currently on the main branch are considered confirmed and only
// transactions in those blocks affect public key balances.
// Values are: MAIN, SIDE, ORPHAN or UNKNOWN.
type BranchType int
const (
MAIN = iota
SIDE
ORPHAN
UNKNOWN
)
// Ledger is an interface to a ledger built from the most-work chain of blocks.
// It manages and computes public key balances as well as transaction and public key transaction indices.
// It also maintains an index of the block chain by height as well as branch information.
type Ledger interface {
// GetChainTip returns the ID and the height of the block at the current tip of the main chain.
GetChainTip() (*BlockID, int64, error)
// GetBlockIDForHeight returns the ID of the block at the given block chain height.
GetBlockIDForHeight(height int64) (*BlockID, error)
// SetBranchType sets the branch type for the given block.
SetBranchType(id BlockID, branchType BranchType) error
// GetBranchType returns the branch type for the given block.
GetBranchType(id BlockID) (BranchType, error)
// ConnectBlock connects a block to the tip of the block chain and applies the transactions
// to the ledger.
ConnectBlock(id BlockID, block *Block) ([]TransactionID, error)
// DisconnectBlock disconnects a block from the tip of the block chain and undoes the effects
// of the transactions on the ledger.
DisconnectBlock(id BlockID, block *Block) ([]TransactionID, error)
// GetPublicKeyBalance returns the current balance of a given public key.
GetPublicKeyBalance(pubKey ed25519.PublicKey) (int64, error)
// GetPublicKeyBalances returns the current balance of the given public keys
// along with block ID and height of the corresponding main chain tip.
GetPublicKeyBalances(pubKeys []ed25519.PublicKey) (
map[[ed25519.PublicKeySize]byte]int64, *BlockID, int64, error)
// GetTransactionIndex returns the index of a processed transaction.
GetTransactionIndex(id TransactionID) (*BlockID, int, error)
// GetPublicKeyTransactionIndicesRange returns transaction indices involving a given public key
// over a range of heights. If startHeight > endHeight this iterates in reverse.
GetPublicKeyTransactionIndicesRange(
pubKey ed25519.PublicKey, startHeight, endHeight int64, startIndex, limit int) (
[]BlockID, []int, int64, int, error)
// Balance returns the total current ledger balance by summing the balance of all public keys.
// It's only used offline for verification purposes.
Balance() (int64, error)
// GetPublicKeyBalanceAt returns the public key balance at the given height.
// It's only used offline for historical and verification purposes.
// This is only accurate when the full block chain is indexed (pruning disabled.)
GetPublicKeyBalanceAt(pubKey ed25519.PublicKey, height int64) (int64, error)
} | ledger.go | 0.547222 | 0.55652 | ledger.go | starcoder |
package pgo
type RigidbodyFlags uint32
const (
/**
\brief Enables kinematic mode for the actor.
Kinematic actors are special dynamic actors that are not
influenced by forces (such as gravity), and have no momentum. They are considered to have infinite
mass and can be moved around the world using the setKinematicTarget() method. They will push
regular dynamic actors out of the way. Kinematics will not collide with static or other kinematic objects.
Kinematic actors are great for moving platforms or characters, where direct motion control is desired.
You can not connect Reduced joints to kinematic actors. Lagrange joints work ok if the platform
is moving with a relatively low, uniform velocity.
<b>Sleeping:</b>
\li Setting this flag on a dynamic actor will put the actor to sleep and set the velocities to 0.
\li If this flag gets cleared, the current sleep state of the actor will be kept.
\note kinematic actors are incompatible with CCD so raising this flag will automatically clear eENABLE_CCD
@see PxRigidDynamic.setKinematicTarget()
*/
RigidbodyFlags_eKINEMATIC RigidbodyFlags = (1 << 0) //!< Enable kinematic mode for the body.
/**
\brief Use the kinematic target transform for scene queries.
If this flag is raised, then scene queries will treat the kinematic target transform as the current pose
of the body (instead of using the actual pose). Without this flag, the kinematic target will only take
effect with respect to scene queries after a simulation step.
@see PxRigidDynamic.setKinematicTarget()
*/
RigidbodyFlags_eUSE_KINEMATIC_TARGET_FOR_SCENE_QUERIES RigidbodyFlags = (1 << 1)
/**
\brief Enables swept integration for the actor.
If this flag is raised and CCD is enabled on the scene, then this body will be simulated by the CCD system to ensure that collisions are not missed due to
high-speed motion. Note individual shape pairs still need to enable PxPairFlag::eDETECT_CCD_CONTACT in the collision filtering to enable the CCD to respond to
individual interactions.
\note kinematic actors are incompatible with CCD so this flag will be cleared automatically when raised on a kinematic actor
*/
RigidbodyFlags_eENABLE_CCD RigidbodyFlags = (1 << 2) //!< Enable CCD for the body.
/**
\brief Enabled CCD in swept integration for the actor.
If this flag is raised and CCD is enabled, CCD interactions will simulate friction. By default, friction is disabled in CCD interactions because
CCD friction has been observed to introduce some simulation artifacts. CCD friction was enabled in previous versions of the SDK. Raising this flag will result in behavior
that is a closer match for previous versions of the SDK.
\note This flag requires PxRigidBodyFlag::eENABLE_CCD to be raised to have any effect.
*/
RigidbodyFlags_eENABLE_CCD_FRICTION RigidbodyFlags = (1 << 3)
/**
\brief Register a rigid body for reporting pose changes by the simulation at an early stage.
Sometimes it might be advantageous to get access to the new pose of a rigid body as early as possible and
not wait until the call to fetchResults() returns. Setting this flag will schedule the rigid body to get reported
in #PxSimulationEventCallback::onAdvance(). Please refer to the documentation of that callback to understand
the behavior and limitations of this functionality.
@see PxSimulationEventCallback::onAdvance()
*/
RigidbodyFlags_eENABLE_POSE_INTEGRATION_PREVIEW RigidbodyFlags = (1 << 4)
/**
\brief Register a rigid body to dynamicly adjust contact offset based on velocity. This can be used to achieve a CCD effect.
*/
RigidbodyFlags_eENABLE_SPECULATIVE_CCD RigidbodyFlags = (1 << 5)
/**
\brief Permit CCD to limit maxContactImpulse. This is useful for use-cases like a destruction system but can cause visual artefacts so is not enabled by default.
*/
RigidbodyFlags_eENABLE_CCD_MAX_CONTACT_IMPULSE RigidbodyFlags = (1 << 6)
/**
\brief Carries over forces/accelerations between frames, rather than clearning them
*/
RigidbodyFlags_eRETAIN_ACCELERATIONS RigidbodyFlags = (1 << 7)
/**
\brief Forces kinematic-kinematic pairs notifications for this actor.
This flag overrides the global scene-level PxPairFilteringMode setting for kinematic actors.
This is equivalent to having PxPairFilteringMode::eKEEP for pairs involving this actor.
A particular use case is when you have a large amount of kinematic actors, but you are only
interested in interactions between a few of them. In this case it is best to use set
PxSceneDesc.kineKineFilteringMode = PxPairFilteringMode::eKILL, and then raise the
eFORCE_KINE_KINE_NOTIFICATIONS flag on the small set of kinematic actors that need
notifications.
\note This has no effect if PxRigidBodyFlag::eKINEMATIC is not set.
\warning Changing this flag at runtime will not have an effect until you remove and re-add the actor to the scene.
@see PxPairFilteringMode PxSceneDesc.kineKineFilteringMode
*/
RigidbodyFlags_eFORCE_KINE_KINE_NOTIFICATIONS RigidbodyFlags = (1 << 8)
/**
\brief Forces static-kinematic pairs notifications for this actor.
Similar to eFORCE_KINE_KINE_NOTIFICATIONS, but for static-kinematic interactions.
\note This has no effect if PxRigidBodyFlag::eKINEMATIC is not set.
\warning Changing this flag at runtime will not have an effect until you remove and re-add the actor to the scene.
@see PxPairFilteringMode PxSceneDesc.staticKineFilteringMode
*/
RigidbodyFlags_eFORCE_STATIC_KINE_NOTIFICATIONS RigidbodyFlags = (1 << 9)
/**
\brief Reserved for internal usage
*/
RigidbodyFlags_eRESERVED RigidbodyFlags = (1 << 15)
) | pgo/rigidbodyflags.go | 0.719088 | 0.600393 | rigidbodyflags.go | starcoder |
package sliceutil
import (
"reflect"
)
// Compare will check if two slices are equal
// even if they aren't in the same order
// Inspired by github.com/stephanbaker white board sudo code
func Compare(s1, s2 interface{}) bool {
if s1 == nil || s2 == nil {
return false
}
// Convert slices to correct type
slice1 := convertIterableToInterface(s1)
slice2 := convertIterableToInterface(s2)
if slice1 == nil || slice2 == nil {
return false
}
if len(slice1) != len(slice2) {
return false
}
// setup maps to store values and count of slices
m1 := make(map[interface{}]int)
m2 := make(map[interface{}]int)
for i := 0; i < len(slice1); i++ {
// Add each value to map and increment for each found
m1[slice1[i]]++
m2[slice2[i]]++
}
for key := range m1 {
if m1[key] != m2[key] {
return false
}
}
return true
}
// OrderedCompare will check if two slices are equal, taking order into consideration.
func OrderedCompare(s1, s2 interface{}) bool {
//If both are nil, they are equal
if s1 == nil && s2 == nil {
return true
}
//If only one is nil, they are not equal (!= represents XOR)
if (s1 == nil) != (s2 == nil) {
return false
}
// Convert slices to correct type
slice1 := convertIterableToInterface(s1)
slice2 := convertIterableToInterface(s2)
//If both are nil, they are equal
if slice1 == nil || slice2 == nil {
return false
}
//If the lengths are different, the slices are not equal
if len(slice1) != len(slice2) {
return false
}
//Loop through and compare the slices at each index
for i := 0; i < len(slice1); i++ {
if slice1[i] != slice2[i] {
return false
}
}
//If nothing has failed up to this point, the slices are equal
return true
}
// Contains checks if a slice contains an element
func Contains(s interface{}, e interface{}) bool {
slice := convertIterableToInterface(s)
for _, a := range slice {
if a == e {
return true
}
}
return false
}
// convertIterableToInterface takes a slice/array/string passed in as an interface{}
// then converts the object to a slice of interfaces
func convertIterableToInterface(s interface{}) (slice []interface{}) {
v := reflect.ValueOf(s)
switch v.Kind() {
case reflect.Slice, reflect.Array:
length := v.Len()
slice = make([]interface{}, length)
for i := 0; i < length; i++ {
slice[i] = v.Index(i).Interface()
}
case reflect.String:
length := v.Len()
slice = make([]interface{}, length)
for _, r := range v.String() {
slice = append(slice, r)
}
}
return
} | sliceutil.go | 0.644225 | 0.406155 | sliceutil.go | starcoder |
package sim
import (
"github.com/quells/LennardJonesGo/vector"
"math"
"math/rand"
)
// InitPositionCubic initializes particle positions in a simple cubic configuration.
func InitPositionCubic(N int, L float64) [][3]float64 {
R := make([][3]float64, N)
Ncube := 1
for N > Ncube*Ncube*Ncube {
Ncube++
}
rs := L / float64(Ncube)
roffset := (L - rs) / 2
i := 0
for x := 0; x < Ncube; x++ {
x := float64(x)
for y := 0; y < Ncube; y++ {
y := float64(y)
for z := 0; z < Ncube; z++ {
z := float64(z)
pos := vector.Scale([3]float64{x, y, z}, rs)
offset := [3]float64{roffset, roffset, roffset}
R[i] = vector.Difference(pos, offset)
i++
}
}
}
return R
}
// InitPositionFCC initializes particle positions in a face-centered cubic configuration
func InitPositionFCC(N int, L float64) [][3]float64 {
R := make([][3]float64, N)
Ncube := 1
for N > 4*Ncube*Ncube*Ncube {
Ncube++
}
o := -L / 2
origin := [3]float64{o, o, o}
rs := L / float64(Ncube)
roffset := rs / 2
i := 0
for x := 0; x < Ncube; x++ {
x := float64(x)
for y := 0; y < Ncube; y++ {
y := float64(y)
for z := 0; z < Ncube; z++ {
z := float64(z)
pos := vector.Scale([3]float64{x, y, z}, rs)
pos = vector.Sum(pos, origin)
R[i] = pos
i++
R[i] = vector.Sum(pos, [3]float64{roffset, roffset, 0})
i++
R[i] = vector.Sum(pos, [3]float64{roffset, 0, roffset})
i++
R[i] = vector.Sum(pos, [3]float64{0, roffset, roffset})
i++
}
}
}
return R
}
// InitVelocity initializes particle velocities selected from a random distribution.
// Ensures that the net momentum of the system is zero and scales the average kinetic energy to match a given temperature.
func InitVelocity(N int, T0 float64, M float64) [][3]float64 {
V := make([][3]float64, N)
rand.Seed(1)
netP := [3]float64{0, 0, 0}
netE := 0.0
for n := 0; n < N; n++ {
for i := 0; i < 3; i++ {
newP := rand.Float64() - 0.5
netP[i] += newP
netE += newP * newP
V[n][i] = newP
}
}
netP = vector.Scale(netP, 1.0/float64(N))
vscale := math.Sqrt(3.0 * float64(N) * T0 / (M * netE))
for i, v := range V {
correctedV := vector.Scale(vector.Difference(v, netP), vscale)
V[i] = correctedV
}
return V
} | sim/initialize.go | 0.695131 | 0.497315 | initialize.go | starcoder |
package data
import (
"fmt"
"strconv"
"strings"
)
// A Value represents a dynamically typed piece of data.
type Value struct {
Type Type
Word string
Number float64
Proc Procedure
Str string
Quotation []Value
Bool bool
}
// Type describes the internal type of a Value.
type Type int
// A naiveconcat value is a number, string, word, procedure, quotation, or
// boolean.
const (
Number Type = iota
String
Word
Proc
Quotation
Boolean
)
func (t Type) String() (s string) {
switch t {
case Number:
s = "number"
case String:
s = "string"
case Word:
s = "word"
case Proc:
s = "procedure"
case Quotation:
s = "quotation"
case Boolean:
s = "boolean"
}
return
}
// A Procedure is an executable procedure.
type Procedure struct {
fn func(*Dictionary, *Stack) error
}
// Execute runs a Procedure.
func (proc Procedure) Execute(d *Dictionary, s *Stack) error {
return proc.fn(d, s)
}
func (v Value) String() (s string) {
switch v.Type {
case Number:
s = strconv.FormatFloat(v.Number, 'f', -1, 64)
case Word:
s = v.Word
case Proc:
s = "PROCEDURE"
case Quotation:
itemStrings := make([]string, len(v.Quotation))
for i, item := range v.Quotation {
itemStrings[i] = item.String()
}
s = fmt.Sprintf("{%s}", strings.Join(itemStrings, " "))
case String:
s = fmt.Sprintf("\"%s\"", v.Str)
case Boolean:
if v.Bool {
s = "TRUE"
} else {
s = "FALSE"
}
}
return
}
// NewNumber constructs a number Value from a float.
func NewNumber(n float64) Value {
return Value{
Type: Number,
Number: n,
}
}
// NewString constructs a number Value from a float.
func NewString(s string) Value {
return Value{
Type: String,
Str: s,
}
}
// NewWord constructs a word Value from a string.
func NewWord(s string) Value {
return Value{
Type: Word,
Word: s,
}
}
// NewQuotation constructs a quotation Value from other Values.
func NewQuotation(data ...Value) Value {
return Value{
Type: Quotation,
Quotation: data,
}
}
// NewProc constructs a Proc Value from a function.
func NewProc(fn func(*Dictionary, *Stack) error) Value {
return Value{
Type: Proc,
Proc: Procedure{fn},
}
}
// NewBoolean constructs a Boolean Value from a bool.
func NewBoolean(b bool) Value {
return Value{
Type: Boolean,
Bool: b,
}
}
// A TypeErr indicates when a naiveconcat Value does not have its expected type.
type TypeErr struct {
val Value
expected []Type
}
// NewTypeErr returns a TypeErr.
func NewTypeErr(val Value, expected ...Type) error {
return TypeErr{val, expected}
}
func (e TypeErr) Error() string {
var want string
for i, typ := range e.expected {
if i == 0 {
want = fmt.Sprint(typ)
} else {
want = want + " or " + fmt.Sprint(typ)
}
}
return fmt.Sprintf(
"type error: expected '%s' (%s) to be type %s ",
e.val,
e.val.Type,
want,
)
} | data/value.go | 0.635336 | 0.432363 | value.go | starcoder |
package conv
import "time"
// NewBool returns ref to v.
func NewBool(v bool) *bool { return &v }
// ValueBool returns dereference of v or zero value if nil.
func ValueBool(v *bool) bool {
if v == nil {
return false
}
return *v
}
// NewInt returns ref to v.
func NewInt(v int) *int { return &v }
// ValueInt returns dereference of v or zero value if nil.
func ValueInt(v *int) int {
if v == nil {
return 0
}
return *v
}
// NewInt8 returns ref to v.
func NewInt8(v int8) *int8 { return &v }
// ValueInt8 returns dereference of v or zero value if nil.
func ValueInt8(v *int8) int8 {
if v == nil {
return 0
}
return *v
}
// NewInt16 returns ref to v.
func NewInt16(v int16) *int16 { return &v }
// ValueInt16 returns dereference of v or zero value if nil.
func ValueInt16(v *int16) int16 {
if v == nil {
return 0
}
return *v
}
// NewInt32 returns ref to v.
func NewInt32(v int32) *int32 { return &v }
// ValueInt32 returns dereference of v or zero value if nil.
func ValueInt32(v *int32) int32 {
if v == nil {
return 0
}
return *v
}
// NewInt64 returns ref to v.
func NewInt64(v int64) *int64 { return &v }
// ValueInt64 returns dereference of v or zero value if nil.
func ValueInt64(v *int64) int64 {
if v == nil {
return 0
}
return *v
}
// NewUInt returns ref to v.
func NewUInt(v uint) *uint { return &v }
// ValueUInt returns dereference of v or zero value if nil.
func ValueUInt(v *uint) uint {
if v == nil {
return 0
}
return *v
}
// NewUInt8 returns ref to v.
func NewUInt8(v uint8) *uint8 { return &v }
// ValueUInt8 returns dereference of v or zero value if nil.
func ValueUInt8(v *uint8) uint8 {
if v == nil {
return 0
}
return *v
}
// NewUInt16 returns ref to v.
func NewUInt16(v uint16) *uint16 { return &v }
// ValueUInt16 returns dereference of v or zero value if nil.
func ValueUInt16(v *uint16) uint16 {
if v == nil {
return 0
}
return *v
}
// NewUInt32 returns ref to v.
func NewUInt32(v uint32) *uint32 { return &v }
// ValueUInt32 returns dereference of v or zero value if nil.
func ValueUInt32(v *uint32) uint32 {
if v == nil {
return 0
}
return *v
}
// NewUInt64 returns ref to v.
func NewUInt64(v uint64) *uint64 { return &v }
// ValueUInt64 returns dereference of v or zero value if nil.
func ValueUInt64(v *uint64) uint64 {
if v == nil {
return 0
}
return *v
}
// NewFloat32 returns ref to v.
func NewFloat32(v float32) *float32 { return &v }
// ValueFloat32 returns dereference of v or zero value if nil.
func ValueFloat32(v *float32) float32 {
if v == nil {
return 0
}
return *v
}
// NewFloat64 returns ref to v.
func NewFloat64(v float64) *float64 { return &v }
// ValueFloat64 returns dereference of v or zero value if nil.
func ValueFloat64(v *float64) float64 {
if v == nil {
return 0
}
return *v
}
// NewString returns ref to v.
func NewString(v string) *string { return &v }
// ValueString returns dereference of v or zero value if nil.
func ValueString(v *string) string {
if v == nil {
return ""
}
return *v
}
// NewDuration returns ref to v.
func NewDuration(v time.Duration) *time.Duration { return &v }
// ValueDuration returns dereference of v or zero value if nil.
func ValueDuration(v *time.Duration) time.Duration {
if v == nil {
return 0
}
return *v
} | new_native.go | 0.784773 | 0.405419 | new_native.go | starcoder |
package rbtree
import (
"log"
)
type color bool
const (
red color = true
black color = false
)
var (
nilNode = &RBTree{0, 0, black, nil, nil, nil}
)
// RBTree ...
type RBTree struct {
key uint32
value interface{}
color color // true - red, false - black
left *RBTree
right *RBTree
parent *RBTree
}
// NewRBTree ...
func NewRBTree() *RBTree {
tree := &RBTree{
color: black,
parent: nil,
left: nil,
right: nil,
}
return tree
}
// Add ...
func (tree *RBTree) Add(key uint32, value interface{}) {
if tree.left == nil {
tree.key = key
tree.value = value
tree.color = black
tree.left = nilNode
tree.right = nilNode
tree.parent = nil
return
}
tree.addNode(key, value)
tree.color = black
}
func (tree *RBTree) addNode(key uint32, value interface{}) {
var cTree *RBTree
if tree.key > key {
// move left
cTree = tree.left
} else if tree.key < key {
// move right
cTree = tree.right
} else {
// there is already this key. Later implement error
return
}
if cTree == nilNode {
newNode := &RBTree{
key: key,
value: value,
color: red,
parent: tree,
left: nilNode,
right: nilNode,
}
if tree.key > key {
tree.left = newNode
} else {
tree.right = newNode
}
if newNode.parent.color == red {
log.Println("balancing")
newNode.rebalance()
}
return
}
cTree.addNode(key, value)
}
// Remove ...
func (tree *RBTree) Remove(key uint32) {
dNode := tree.get(key)
log.Println("Removing", dNode)
var ttmp *RBTree
if dNode.left == nilNode && dNode.right == nilNode {
*dNode = RBTree{}
} else if dNode.left != nilNode && dNode.right == nilNode {
ttmp = dNode.left
*dNode = *ttmp
} else if dNode.left == nilNode && dNode.right != nilNode {
ttmp = dNode.right
*dNode = *ttmp
} else {
var parent *RBTree
lNode := dNode.right
for {
if lNode.left != nilNode {
parent = lNode
lNode = lNode.left
} else {
break
}
}
ttmp := dNode.right
*dNode = *lNode
dNode.right = ttmp
if parent != nil {
parent.left = nil
}
}
}
// Get ...
func (tree *RBTree) Get(key uint32) interface{} {
t := tree.get(key)
if t != nil {
return t.value
}
return nil
}
func (tree *RBTree) get(key uint32) *RBTree {
var t *RBTree
if tree.key > key {
t = tree.left
} else if tree.key < key {
t = tree.right
} else {
return tree
}
if t == nilNode {
return nil
}
return t.get(key)
}
func (tree *RBTree) rebalance() {
var uncle *RBTree
var grand *RBTree
parent := tree.parent
if tree.parent.parent != nil {
grand = tree.parent.parent
} else {
return
}
currentPosition := "left"
if tree == parent.right {
currentPosition = "right"
}
// case 1: unclePosition == right, uncleColor == red, currentPosition = left | right
// case 2: unclePosition == right, uncleColor == black, currentPosition = right
// case 3: unclePosition == right, uncleColor == black, currentPosition = left
// case 4: unclePosition == left, uncleColor == red, currentPosition = right | left
// case 5: unclePosition == left, uncleColor == black, currentPosition = left
// case 6: unclePosition == left, uncleColor == black, currentPosition = right
if parent == grand.right {
// uncle position is left
uncle = grand.left
if uncle.color == black {
if currentPosition == "left" {
// turn parent tree to right
parent.turnRight()
parent.rebalance()
} else {
// change colors
parent.color = black
grand.color = red
// turn grand tree to left
grand.turnLeft()
}
} else {
// change colors
parent.color = black
uncle.color = black
grand.color = red
}
} else {
// uncle position is right
uncle = grand.right
if uncle.color == black {
if currentPosition == "right" {
// turn parent tree to left
parent.turnLeft()
parent.rebalance()
} else {
// change colors
parent.color = black
grand.color = red
// turn grand tree to right
grand.turnRight()
}
} else {
// change colors
parent.color = black
uncle.color = black
grand.color = red
}
}
}
func (tree *RBTree) turnLeft() {
var temp RBTree
temp = *tree
rTree := temp.right
clTree := rTree.left
rTree.left = &temp
rTree.parent = temp.parent
temp.left = clTree
temp.parent = rTree
temp.right = nilNode
*tree = *rTree
}
func (tree *RBTree) turnRight() {
var temp RBTree
temp = *tree
lTree := temp.left
crTree := lTree.right
lTree.right = &temp
lTree.parent = temp.parent
temp.right = crTree
temp.parent = lTree
temp.left = nilNode
*tree = *lTree
}
// GetSortedFromLargest ...
func (tree *RBTree) TraverseTreeFromLargest(res *[]interface{}) {
if tree == nilNode {
return
}
tree.right.TraverseTreeFromLargest(res)
*res = append(*res, tree.value)
tree.left.TraverseTreeFromLargest(res)
} | rbtree/rbtree.go | 0.532911 | 0.411525 | rbtree.go | starcoder |
package keeper
import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
hardtypes "github.com/kava-labs/kava/x/hard/types"
"github.com/kava-labs/kava/x/incentive/types"
)
// AccumulateHardBorrowRewards updates the rewards accumulated for the input reward period
func (k Keeper) AccumulateHardBorrowRewards(ctx sdk.Context, rewardPeriod types.MultiRewardPeriod) error {
previousAccrualTime, found := k.GetPreviousHardBorrowRewardAccrualTime(ctx, rewardPeriod.CollateralType)
if !found {
k.SetPreviousHardBorrowRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
timeElapsed := CalculateTimeElapsed(rewardPeriod.Start, rewardPeriod.End, ctx.BlockTime(), previousAccrualTime)
if timeElapsed.IsZero() {
return nil
}
if rewardPeriod.RewardsPerSecond.IsZero() {
k.SetPreviousHardBorrowRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
totalBorrowedCoins, foundTotalBorrowedCoins := k.hardKeeper.GetBorrowedCoins(ctx)
if !foundTotalBorrowedCoins {
k.SetPreviousHardBorrowRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
totalBorrowed := totalBorrowedCoins.AmountOf(rewardPeriod.CollateralType).ToDec()
if totalBorrowed.IsZero() {
k.SetPreviousHardBorrowRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
previousRewardIndexes, found := k.GetHardBorrowRewardIndexes(ctx, rewardPeriod.CollateralType)
if !found {
for _, rewardCoin := range rewardPeriod.RewardsPerSecond {
rewardIndex := types.NewRewardIndex(rewardCoin.Denom, sdk.ZeroDec())
previousRewardIndexes = append(previousRewardIndexes, rewardIndex)
}
k.SetHardBorrowRewardIndexes(ctx, rewardPeriod.CollateralType, previousRewardIndexes)
}
hardFactor, found := k.hardKeeper.GetBorrowInterestFactor(ctx, rewardPeriod.CollateralType)
if !found {
k.SetPreviousHardBorrowRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
newRewardIndexes := previousRewardIndexes
for _, rewardCoin := range rewardPeriod.RewardsPerSecond {
newRewards := rewardCoin.Amount.ToDec().Mul(timeElapsed.ToDec())
previousRewardIndex, found := previousRewardIndexes.GetRewardIndex(rewardCoin.Denom)
if !found {
previousRewardIndex = types.NewRewardIndex(rewardCoin.Denom, sdk.ZeroDec())
}
// Calculate new reward factor and update reward index
rewardFactor := newRewards.Mul(hardFactor).Quo(totalBorrowed)
newRewardFactorValue := previousRewardIndex.RewardFactor.Add(rewardFactor)
newRewardIndex := types.NewRewardIndex(rewardCoin.Denom, newRewardFactorValue)
i, found := newRewardIndexes.GetFactorIndex(rewardCoin.Denom)
if found {
newRewardIndexes[i] = newRewardIndex
} else {
newRewardIndexes = append(newRewardIndexes, newRewardIndex)
}
}
k.SetHardBorrowRewardIndexes(ctx, rewardPeriod.CollateralType, newRewardIndexes)
k.SetPreviousHardBorrowRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
// InitializeHardBorrowReward initializes the borrow-side of a hard liquidity provider claim
// by creating the claim and setting the borrow reward factor index
func (k Keeper) InitializeHardBorrowReward(ctx sdk.Context, borrow hardtypes.Borrow) {
claim, found := k.GetHardLiquidityProviderClaim(ctx, borrow.Borrower)
if !found {
claim = types.NewHardLiquidityProviderClaim(borrow.Borrower, sdk.Coins{}, nil, nil, nil)
}
var borrowRewardIndexes types.MultiRewardIndexes
for _, coin := range borrow.Amount {
globalRewardIndexes, foundGlobalRewardIndexes := k.GetHardBorrowRewardIndexes(ctx, coin.Denom)
var multiRewardIndex types.MultiRewardIndex
if foundGlobalRewardIndexes {
multiRewardIndex = types.NewMultiRewardIndex(coin.Denom, globalRewardIndexes)
} else {
multiRewardIndex = types.NewMultiRewardIndex(coin.Denom, types.RewardIndexes{})
}
borrowRewardIndexes = append(borrowRewardIndexes, multiRewardIndex)
}
claim.BorrowRewardIndexes = borrowRewardIndexes
k.SetHardLiquidityProviderClaim(ctx, claim)
}
// SynchronizeHardBorrowReward updates the claim object by adding any accumulated rewards
// and updating the reward index value
func (k Keeper) SynchronizeHardBorrowReward(ctx sdk.Context, borrow hardtypes.Borrow) {
claim, found := k.GetHardLiquidityProviderClaim(ctx, borrow.Borrower)
if !found {
return
}
for _, coin := range borrow.Amount {
globalRewardIndexes, foundGlobalRewardIndexes := k.GetHardBorrowRewardIndexes(ctx, coin.Denom)
if !foundGlobalRewardIndexes {
continue
}
userMultiRewardIndex, foundUserMultiRewardIndex := claim.BorrowRewardIndexes.GetRewardIndex(coin.Denom)
if !foundUserMultiRewardIndex {
continue
}
userRewardIndexIndex, foundUserRewardIndexIndex := claim.BorrowRewardIndexes.GetRewardIndexIndex(coin.Denom)
if !foundUserRewardIndexIndex {
continue
}
for _, globalRewardIndex := range globalRewardIndexes {
userRewardIndex, foundUserRewardIndex := userMultiRewardIndex.RewardIndexes.GetRewardIndex(globalRewardIndex.CollateralType)
if !foundUserRewardIndex {
// User borrowed this coin type before it had rewards. When new rewards are added, legacy borrowers
// should immediately begin earning rewards. Enable users to do so by updating their claim with the global
// reward index denom and start their reward factor at 0.0
userRewardIndex = types.NewRewardIndex(globalRewardIndex.CollateralType, sdk.ZeroDec())
userMultiRewardIndex.RewardIndexes = append(userMultiRewardIndex.RewardIndexes, userRewardIndex)
claim.BorrowRewardIndexes[userRewardIndexIndex] = userMultiRewardIndex
}
globalRewardFactor := globalRewardIndex.RewardFactor
userRewardFactor := userRewardIndex.RewardFactor
rewardsAccumulatedFactor := globalRewardFactor.Sub(userRewardFactor)
if rewardsAccumulatedFactor.IsNegative() {
panic(fmt.Sprintf("reward accumulation factor cannot be negative: %s", rewardsAccumulatedFactor))
}
newRewardsAmount := rewardsAccumulatedFactor.Mul(borrow.Amount.AmountOf(coin.Denom).ToDec()).RoundInt()
factorIndex, foundFactorIndex := userMultiRewardIndex.RewardIndexes.GetFactorIndex(globalRewardIndex.CollateralType)
if !foundFactorIndex { // should never trigger
continue
}
claim.BorrowRewardIndexes[userRewardIndexIndex].RewardIndexes[factorIndex].RewardFactor = globalRewardIndex.RewardFactor
newRewardsCoin := sdk.NewCoin(userRewardIndex.CollateralType, newRewardsAmount)
claim.Reward = claim.Reward.Add(newRewardsCoin)
}
}
k.SetHardLiquidityProviderClaim(ctx, claim)
}
// UpdateHardBorrowIndexDenoms adds any new borrow denoms to the claim's borrow reward index
func (k Keeper) UpdateHardBorrowIndexDenoms(ctx sdk.Context, borrow hardtypes.Borrow) {
claim, found := k.GetHardLiquidityProviderClaim(ctx, borrow.Borrower)
if !found {
claim = types.NewHardLiquidityProviderClaim(borrow.Borrower, sdk.Coins{}, nil, nil, nil)
}
borrowDenoms := getDenoms(borrow.Amount)
borrowRewardIndexDenoms := claim.BorrowRewardIndexes.GetCollateralTypes()
uniqueBorrowDenoms := setDifference(borrowDenoms, borrowRewardIndexDenoms)
uniqueBorrowRewardDenoms := setDifference(borrowRewardIndexDenoms, borrowDenoms)
borrowRewardIndexes := claim.BorrowRewardIndexes
// Create a new multi-reward index in the claim for every new borrow denom
for _, denom := range uniqueBorrowDenoms {
_, foundUserRewardIndexes := claim.BorrowRewardIndexes.GetRewardIndex(denom)
if !foundUserRewardIndexes {
globalBorrowRewardIndexes, foundGlobalBorrowRewardIndexes := k.GetHardBorrowRewardIndexes(ctx, denom)
var multiRewardIndex types.MultiRewardIndex
if foundGlobalBorrowRewardIndexes {
multiRewardIndex = types.NewMultiRewardIndex(denom, globalBorrowRewardIndexes)
} else {
multiRewardIndex = types.NewMultiRewardIndex(denom, types.RewardIndexes{})
}
borrowRewardIndexes = append(borrowRewardIndexes, multiRewardIndex)
}
}
// Delete multi-reward index from claim if the collateral type is no longer borrowed
for _, denom := range uniqueBorrowRewardDenoms {
borrowRewardIndexes = borrowRewardIndexes.RemoveRewardIndex(denom)
}
claim.BorrowRewardIndexes = borrowRewardIndexes
k.SetHardLiquidityProviderClaim(ctx, claim)
} | x/incentive/keeper/rewards_borrow.go | 0.726426 | 0.458106 | rewards_borrow.go | starcoder |
package model
type (
// Type is the data structure for storing the parsed values of schema string
Type map[string]Collection // key is database name
// Collection is a data structure for storing fields of schema
Collection map[string]Fields // key is collection name
// Fields is a data structure for storing the type of field
Fields map[string]*FieldType // key is field name
// FieldType stores information about a particular column in table
FieldType struct {
FieldName string `json:"fieldName"`
IsFieldTypeRequired bool `json:"isFieldTypeRequired"`
IsList bool `json:"isList"`
Kind string `json:"kind"`
// Directive string
NestedObject Fields `json:"nestedObject"`
IsPrimary bool `json:"isPrimary"`
// For directives
IsAutoIncrement bool `json:"isAutoIncrement"`
IsIndex bool `json:"isIndex"`
IsUnique bool `json:"isUnique"`
IsCreatedAt bool `json:"isCreatedAt"`
IsUpdatedAt bool `json:"isUpdatedAt"`
IsLinked bool `json:"isLinked"`
IsForeign bool `json:"isForeign"`
IsDefault bool `json:"isDefault"`
IndexInfo *TableProperties `json:"indexInfo"`
LinkedTable *TableProperties `json:"linkedTable"`
JointTable *TableProperties `json:"jointTable"`
Default interface{} `json:"default"`
TypeIDSize int `json:"size"`
}
// TableProperties are properties of the table
TableProperties struct {
From, To string
Table, Field, OnDelete string
DBType string
Group, Sort string
Order int
ConstraintName string
}
)
const (
// TypeDate is variable used for Variable of type Date
TypeDate string = "Date"
// TypeTime is variable used for Variable of type Time
TypeTime string = "Time"
// TypeUUID is variable used for Variable of type UUID
TypeUUID string = "UUID"
// TypeInteger is variable used for Variable of type Integer
TypeInteger string = "Integer"
// TypeString is variable used for Variable of type String
TypeString string = "String"
// TypeFloat is variable used for Variable of type Float
TypeFloat string = "Float"
// TypeBoolean is variable used for Variable of type Boolean
TypeBoolean string = "Boolean"
// TypeDateTime is variable used for Variable of type DateTime
TypeDateTime string = "DateTime"
// TypeID is variable used for Variable of type ID
TypeID string = "ID"
// TypeJSON is variable used for Variable of type Jsonb
TypeJSON string = "JSON"
// SQLTypeIDSize is variable used for specifying size of sql type ID
SQLTypeIDSize int = 50
// TypeObject is a string with value object
TypeObject string = "Object"
// TypeEnum is a variable type enum
TypeEnum string = "Enum"
// DirectiveUnique is used in schema module to add unique index
DirectiveUnique string = "unique"
// DirectiveIndex is used in schema module to add index
DirectiveIndex string = "index"
// DirectiveForeign is used in schema module to add foreign key
DirectiveForeign string = "foreign"
// DirectivePrimary is used in schema module to add primary key
DirectivePrimary string = "primary"
// DirectiveCreatedAt is used in schema module to specify the created location
DirectiveCreatedAt string = "createdAt"
// DirectiveUpdatedAt is used in schema module to add Updated location
DirectiveUpdatedAt string = "updatedAt"
// DirectiveLink is used in schema module to add link
DirectiveLink string = "link"
// DirectiveDefault is used to add default key
DirectiveDefault string = "default"
// DirectiveVarcharSize denotes the maximum allowable character for field type ID
DirectiveVarcharSize string = "size"
// DefaultIndexSort specifies default order of sorting
DefaultIndexSort string = "asc"
// DefaultIndexOrder specifies default order of order
DefaultIndexOrder int = 1
)
// InspectorFieldType is the type for storing sql inspection information
type InspectorFieldType struct {
FieldName string `db:"Field"`
FieldType string `db:"Type"`
FieldNull string `db:"Null"`
FieldKey string `db:"Key"`
FieldDefault string `db:"Default"`
AutoIncrement string `db:"AutoIncrement"`
VarcharSize int `db:"VarcharSize"`
}
// ForeignKeysType is the type for storing foreignkeys information of sql inspection
type ForeignKeysType struct {
TableName string `db:"TABLE_NAME"`
ColumnName string `db:"COLUMN_NAME"`
ConstraintName string `db:"CONSTRAINT_NAME"`
DeleteRule string `db:"DELETE_RULE"`
RefTableName string `db:"REFERENCED_TABLE_NAME"`
RefColumnName string `db:"REFERENCED_COLUMN_NAME"`
}
// IndexType is the type use to indexkey information of sql inspection
type IndexType struct {
TableName string `db:"TABLE_NAME"`
ColumnName string `db:"COLUMN_NAME"`
IndexName string `db:"INDEX_NAME"`
Order int `db:"SEQ_IN_INDEX"`
Sort string `db:"SORT"`
IsUnique string `db:"IS_UNIQUE"`
} | gateway/model/schema_type.go | 0.566858 | 0.444022 | schema_type.go | starcoder |
package bitio
import (
"bufio"
"io"
)
// Reader is the bit reader interface.
type Reader interface {
// Reader is an io.Reader
io.Reader
// Reader is also an io.ByteReader.
// ReadByte reads the next 8 bits and returns them as a byte.
io.ByteReader
// ReadBits reads n bits and returns them as the lowest n bits of u.
ReadBits(n byte) (u uint64, err error)
// ReadBool reads the next bit, and returns true if it is 1.
ReadBool() (b bool, err error)
// Align aligns the bit stream to a byte boundary,
// so next read will read/use data from the next byte.
// Returns the number of unread / skipped bits.
Align() (skipped byte)
}
// An io.Reader and io.ByteReader at the same time.
type readerAndByteReader interface {
io.Reader
io.ByteReader
}
// reader is the bit reader implementation.
type reader struct {
in readerAndByteReader
cache byte // unread bits are stored here
bits byte // number of unread bits in cache
}
// NewReader returns a new Reader using the specified io.Reader as the input (source).
func NewReader(in io.Reader) Reader {
var bin readerAndByteReader
bin, ok := in.(readerAndByteReader)
if !ok {
bin = bufio.NewReader(in)
}
return &reader{in: bin}
}
// Read implements io.Reader.
func (r *reader) Read(p []byte) (n int, err error) {
// r.bits will be the same after reading 8 bits, so we don't need to update that.
if r.bits == 0 {
return r.in.Read(p)
}
for ; n < len(p); n++ {
if p[n], err = r.readUnalignedByte(); err != nil {
return
}
}
return
}
func (r *reader) ReadBits(n byte) (u uint64, err error) {
// Some optimization, frequent cases
if n < r.bits {
// cache has all needed bits, and there are some extra which will be left in cache
shift := r.bits - n
u = uint64(r.cache >> shift)
r.cache &= 1<<shift - 1
r.bits = shift
return
}
if n > r.bits {
// all cache bits needed, and it's not even enough so more will be read
if r.bits > 0 {
u = uint64(r.cache)
n -= r.bits
}
// Read whole bytes
for n >= 8 {
b, err2 := r.in.ReadByte()
if err2 != nil {
return 0, err2
}
u = u<<8 + uint64(b)
n -= 8
}
// Read last fraction, if any
if n > 0 {
if r.cache, err = r.in.ReadByte(); err != nil {
return 0, err
}
shift := 8 - n
u = u<<n + uint64(r.cache>>shift)
r.cache &= 1<<shift - 1
r.bits = shift
} else {
r.bits = 0
}
return u, nil
}
// cache has exactly as many as needed
r.bits = 0 // no need to clear cache, will be overridden on next read
return uint64(r.cache), nil
}
// ReadByte implements io.ByteReader.
func (r *reader) ReadByte() (b byte, err error) {
// r.bits will be the same after reading 8 bits, so we don't need to update that.
if r.bits == 0 {
return r.in.ReadByte()
}
return r.readUnalignedByte()
}
// readUnalignedByte reads the next 8 bits which are (may be) unaligned and returns them as a byte.
func (r *reader) readUnalignedByte() (b byte, err error) {
// r.bits will be the same after reading 8 bits, so we don't need to update that.
bits := r.bits
b = r.cache << (8 - bits)
r.cache, err = r.in.ReadByte()
if err != nil {
return 0, err
}
b |= r.cache >> bits
r.cache &= 1<<bits - 1
return
}
func (r *reader) ReadBool() (b bool, err error) {
if r.bits == 0 {
r.cache, err = r.in.ReadByte()
if err != nil {
return
}
b = (r.cache & 0x80) != 0
r.cache, r.bits = r.cache&0x7f, 7
return
}
r.bits--
b = (r.cache & (1 << r.bits)) != 0
r.cache &= 1<<r.bits - 1
return
}
func (r *reader) Align() (skipped byte) {
skipped = r.bits
r.bits = 0 // no need to clear cache, will be overwritten on next read
return
} | vendor/github.com/icza/bitio/reader.go | 0.637257 | 0.403949 | reader.go | starcoder |
package screen2d
import (
"fmt"
"time"
"github.com/veandco/go-sdl2/sdl"
)
// Color meets the color.Color interface required for CreateRGBSurface
type Color struct {
R, G, B, A uint32
}
// RGBA meets the color.Color interface required for CreateRGBSurface
func (c Color) RGBA() (r, g, b, a uint32) {
return c.R, c.G, c.B, c.A
}
// HexColorToRGBA converts a colour stored in an int to RGBA values
func HexColorToRGBA(color int) *Color {
return &Color{
R: uint32((color & 0xFF000000) >> 24),
G: uint32((color & 0x00FF0000) >> 16),
B: uint32((color & 0x0000FF00) >> 8),
A: uint32(color & 0x000000FF),
}
}
// new1PTexture returns a new texture comprising a single pixel
func new1PTexture(rend *sdl.Renderer, r, g, b, a uint8) *sdl.Texture {
tex, _ := rend.CreateTexture(sdl.PIXELFORMAT_RGBA8888, sdl.TEXTUREACCESS_STATIC, 1, 1)
tex.SetBlendMode(sdl.BLENDMODE_ADD)
pixels := make([]byte, 4)
pixels[0] = r
pixels[1] = g
pixels[2] = b
pixels[3] = a
tex.Update(nil, pixels, 4)
return tex
}
// RGBAPixels2Surface takes an array of RGBA pixel data and returns a Surface
func RGBAPixels2Surface(rgbaData []int, w, h int32) (*sdl.Surface, error) {
if int32(len(rgbaData)) != w*h {
return nil, fmt.Errorf("bitmap does not have the correct number of pixels for surface (%d: %d*%d", len(rgbaData), w, h)
}
surf, err := sdl.CreateRGBSurfaceWithFormat(0, w, h, 32, sdl.PIXELFORMAT_RGBA8888)
if err != nil {
return nil, err
}
pixels := surf.Pixels()
j := 0
for i := 0; i < len(rgbaData); i++ {
j = i * 4
pixels[j+0] = byte((rgbaData[i] & 0xFF000000) >> 24)
pixels[j+1] = byte((rgbaData[i] & 0x00FF0000) >> 16)
pixels[j+2] = byte((rgbaData[i] & 0x0000FF00) >> 8)
pixels[j+3] = byte((rgbaData[i] & 0x000000FF))
}
return surf, nil
}
// RGBAPixels2Texture takes an array of RGBA pixel data and returns a Texture
func RGBAPixels2Texture(rend *sdl.Renderer, rgbaData []int, w, h int32) (*sdl.Texture, error) {
if int32(len(rgbaData)) != w*h {
return nil, fmt.Errorf("bitmap does not have the correct number of pixels for surface (%d: %d*%d", len(rgbaData), w, h)
}
surf, err := sdl.CreateRGBSurfaceWithFormat(0, w, h, 32, sdl.PIXELFORMAT_RGBA8888)
if err != nil {
return nil, err
}
pixels := surf.Pixels()
j := 0
for i := 0; i < len(rgbaData); i++ {
j = i * 4
pixels[j+0] = byte((rgbaData[i] & 0xFF000000) >> 24)
pixels[j+1] = byte((rgbaData[i] & 0x00FF0000) >> 16)
pixels[j+2] = byte((rgbaData[i] & 0x0000FF00) >> 8)
pixels[j+3] = byte((rgbaData[i] & 0x000000FF))
}
tex, texErr := rend.CreateTextureFromSurface(surf)
return tex, texErr
}
// RGBAPixels2Mask takes an array of RGBA pixel data and returns a collision mask
func RGBAPixels2Mask(rgbaData []int, w, h int32) ([]bool, error) {
if int32(len(rgbaData)) != w*h {
return nil, fmt.Errorf("bitmap does not have the correct number of pixels for surface (%d: %d*%d", len(rgbaData), w, h)
}
mask := make([]bool, w*h)
for i := 0; i < len(rgbaData); i++ {
mask[i] = !(rgbaData[i] == 0x00000000)
}
return mask, nil
}
// Surface2Texture takes a Surface returnes a Texxture
func Surface2Texture(rend *sdl.Renderer, surf *sdl.Surface) (*sdl.Texture, error) {
return rend.CreateTextureFromSurface(surf)
}
// Box discribes the bounding corners of a Box
type Box struct {
X1, Y1 int32
X2, Y2 int32
W, H int32
}
// Hitter represents an item that can be checked to see if it has hit another item
type Hitter interface {
GetBox() Box
GetMask() []bool
}
// CheckBoxHit checks if any part of the two EntitState boxes overlap
func CheckBoxHit(entity1, entity2 Hitter) bool {
r1 := entity1.GetBox()
r2 := entity2.GetBox()
// Too far left or right
if r1.X1 > r2.X2 || r1.X2 < r2.X1 {
return false
}
// Top high or low
if r1.Y1 > r2.Y2 || r1.Y2 < r2.Y1 {
return false
}
return true
}
// CheckBoxHitDebug checks if any part of the two EntitState boxes overlap
func CheckBoxHitDebug(entity1, entity2 Hitter) bool {
r1 := entity1.GetBox()
fmt.Printf("R1 - X1: %d Y1: %d X2: %d Y2: %d\n", r1.X1, r1.Y1, r1.X2, r1.Y2)
r2 := entity2.GetBox()
fmt.Printf("R2 - X1: %d Y1: %d X2: %d Y2: %d\n", r2.X1, r2.Y1, r2.X2, r2.Y2)
fmt.Printf("L/R Check - ")
// Too far left or right
if r1.X1 > r2.X2 || r1.X2 < r2.X1 {
fmt.Printf("Miss\n")
return false
}
fmt.Printf(" On Target / H/L Check -- ")
// Top high or low
if r1.Y1 > r2.Y2 || r1.Y2 < r2.Y1 {
fmt.Printf("Miss\n")
return false
}
fmt.Printf("Hit\n")
return true
}
// CheckPixelHit checks if any pixels in the two EntityStates overlap
func CheckPixelHit(entity1, entity2 Hitter) bool {
b1 := entity1.GetBox()
m1 := entity1.GetMask()
b2 := entity2.GetBox()
m2 := entity2.GetMask()
i := int32(0)
for i < int32(len(m1)) {
px := b1.X1 + (i % b1.W)
py := b1.Y1 + (i / b1.W)
if px >= b2.X1 && px < b2.X2 && py >= b2.Y1 && py < b2.Y2 {
x := px - b2.X1
y := py - b2.Y1
i2 := (y * b2.W) + x
if m1[i] == true && m2[i2] == true {
return true
}
}
i++
}
return false
}
// Counter hold various runtie countners
type Counter struct {
FPS int
fpsStart time.Time
fpsFrames int
FrameElapsed float32
frameStart time.Time
min, max float32
}
// Start the counters running
func (c *Counter) Start() {
c.fpsStart = time.Now()
c.frameStart = time.Now()
c.fpsFrames = 0
}
// FrameStart indicates a rendering frame as started
func (c *Counter) FrameStart() {
c.frameStart = time.Now()
}
// FrameEnd indicates the rendering frame has ended
func (c *Counter) FrameEnd() {
c.FrameElapsed = float32(time.Since(c.frameStart).Seconds())
c.fpsFrames++
if time.Since(c.fpsStart).Seconds() > 1 {
fmt.Printf("FPS - %d\n", c.fpsFrames)
c.FPS = c.fpsFrames
c.fpsStart = time.Now()
c.fpsFrames = 0
}
} | screen2d.go | 0.835114 | 0.519887 | screen2d.go | starcoder |
package protocol
import (
"sync"
)
/*This datastructe maintains a map of the form [32]byte - *Block. It stores the blcoks received from the shards.
This datastructure will be queried after every epoch block to check if we can continue to the next epoch.
Because we need to remove the first element of this datastructure and map access is random in Go, we additionally have a slice datastructure
which keeps track of the order of the included state transition. Such that, using the slice structure, we can remove the first received block once this
stash gets full*/
type KeyBlock [32]byte // Key: Hash of the block
type ValueBlock *Block // Value: Block
type BlockStash struct {
M map[KeyBlock]ValueBlock
Keys []KeyBlock
}
var blockMutex = &sync.Mutex{}
func NewShardBlockStash() *BlockStash {
return &BlockStash{M: make(map[KeyBlock]ValueBlock)}
}
/*This function includes a key and tracks its order in the slice*/
func (m *BlockStash) Set(k KeyBlock, v ValueBlock) {
blockMutex.Lock()
defer blockMutex.Unlock()
/*Check if the map does not contain the key*/
if _, ok := m.M[k]; !ok {
m.Keys = append(m.Keys, k)
m.M[k] = v
}
/*When length of stash is > 50 --> Remove first added Block*/
if(len(m.M) > 50){
m.DeleteFirstEntry()
}
}
func (m *BlockStash) BlockIncluded(k KeyBlock) bool {
blockMutex.Lock()
defer blockMutex.Unlock()
/*Check if the map does not contain the key*/
if _, ok := m.M[k]; !ok {
return false
} else {
return true
}
}
/*This function includes a key and tracks its order in the slice. No need to put the lock because it is used from the calling function*/
func (m *BlockStash) DeleteFirstEntry() {
firstBlockHash := m.Keys[0]
if _, ok := m.M[firstBlockHash]; ok {
delete(m.M,firstBlockHash)
}
m.Keys = append(m.Keys[:0], m.Keys[1:]...)
}
/*This function counts how many blocks in the stash have some predefined height*/
func CheckForHeightBlock(blockStash *BlockStash, height uint32) int {
blockMutex.Lock()
defer blockMutex.Unlock()
numberOfBlocksAtHeight := 0
for _,block := range blockStash.M {
if block.Height == height {
numberOfBlocksAtHeight++
}
}
return numberOfBlocksAtHeight
}
func ReturnBlockStashForHeight(blockStash *BlockStash, height uint32) [] *Block {
blockMutex.Lock()
defer blockMutex.Unlock()
blockSlice := []*Block{}
for _,b := range blockStash.M {
if b.Height == height {
blockSlice = append(blockSlice,b)
}
}
return blockSlice
}
func ReturnBlockHashesForHeight(blockStash *BlockStash, height uint32) [][32]byte {
blockMutex.Lock()
defer blockMutex.Unlock()
hashSlice := [][32]byte{}
for _,b := range blockStash.M {
if b.Height == height {
hashSlice = append(hashSlice,b.Hash)
}
}
return hashSlice
}
func ReturnBlockForPosition(blockStash *BlockStash, position int) (stateHash [32]byte, block *Block) {
blockMutex.Lock()
defer blockMutex.Unlock()
if(position > len(blockStash.Keys)-1){
return [32]byte{}, nil
}
stateStashPos := blockStash.Keys[position]
return stateStashPos, blockStash.M[stateStashPos]
} | protocol/received_block_stash.go | 0.510985 | 0.440229 | received_block_stash.go | starcoder |
package reducer
// Reduce is a generic reduction function that processes the input from left to right.
// Initially, the state is equal to start.
// Then, combine is called for each input together with the current state.
// Finally, the state is returned.
func Reduce[A, B any](start B, combine func(B, A) B) Reducer[A, B] {
return func() ReducerInstance[A, B] {
state := start
return ReducerInstance[A, B]{
Complete: func() B {
return state
},
Step: func(a A) bool {
state = combine(state, a)
return true
},
}
}
}
// Reduce0 is like Reduce, but uses the first element in the input as the starting value.
// Returns the zero value when the input is empty.
func Reduce0[A any](combine func(A, A) A) Reducer[A, A] {
return func() ReducerInstance[A, A] {
first := true
var state A
return ReducerInstance[A, A]{
Complete: func() A {
return state
},
Step: func(a A) bool {
if first {
state = a
first = false
} else {
state = combine(state, a)
}
return true
},
}
}
}
// Number type for built-in numbers
type Number interface {
byte | int | int32 | int64 | float32 | float64
}
// Sum the input numbers.
func Sum[N Number]() Reducer[N, N] {
return Reduce(0, func(x N, y N) N {
return x + y
})
}
// Count the inputs.
func Count[A any]() Reducer[A, int] {
return Reduce(0, func(x int, y A) int {
return x + 1
})
}
// Product calculated the multiplication of all input values.
func Product[N Number]() Reducer[N, N] {
return func() ReducerInstance[N, N] {
state := N(1)
return ReducerInstance[N, N]{
Complete: func() N {
return state
},
Step: func(a N) bool {
if a == 0 {
// when one number is 0, the result is 0 and we can return early
state = 0
return false
}
state = state * a
return true
},
}
}
}
// Average calculates the average value from the input
func Average[N Number]() Reducer[N, float64] {
return func() ReducerInstance[N, float64] {
sum := 0.0
count := 0.0
return ReducerInstance[N, float64]{
Complete: func() float64 {
if sum == 0. {
return 0.
}
return sum / count
},
Step: func(a N) bool {
sum += float64(a)
count += 1
return true
},
}
}
}
// Max calculates the maximum number in the input
func Max[N Number]() Reducer[N, N] {
return Reduce0(func(a, b N) N {
if a > b {
return a
}
return b
})
}
// Min calculates the minimum number in the input
func Min[N Number]() Reducer[N, N] {
return Reduce0(func(a, b N) N {
if a < b {
return a
}
return b
})
}
// Exists checks whether there is some element in the input that satisfies the given condition.
func Exists[T any](cond func(T) bool) Reducer[T, bool] {
return func() ReducerInstance[T, bool] {
res := false
return ReducerInstance[T, bool]{
Complete: func() bool {
return res
},
Step: func(a T) bool {
if cond(a) {
res = true
return false
}
return true
},
}
}
}
// Forall checks whether all elements in the input satisfy the given condition.
func Forall[T any](cond func(T) bool) Reducer[T, bool] {
return func() ReducerInstance[T, bool] {
res := true
return ReducerInstance[T, bool]{
Complete: func() bool {
return res
},
Step: func(a T) bool {
if !cond(a) {
res = false
return false
}
return true
},
}
}
}
// DoErr executes the function f for all elements in the input until an error occurs.
// If an error occurs, it is returned.
func DoErr[A any](f func(A) error) Reducer[A, error] {
return func() ReducerInstance[A, error] {
var err error
return ReducerInstance[A, error]{
Complete: func() error {
return err
},
Step: func(a A) bool {
err = f(a)
return err == nil
},
}
}
}
// Do executes the function f for all elements in the input.
func Do[A any](f func(A)) Reducer[A, struct{}] {
return func() ReducerInstance[A, struct{}] {
return ReducerInstance[A, struct{}]{
Complete: func() struct{} {
return struct{}{}
},
Step: func(a A) bool {
f(a)
return true
},
}
}
} | reducer/aggregates.go | 0.785925 | 0.737442 | aggregates.go | starcoder |
package set
const predicatedFunctions = `
// Filter returns a new {{.TName}}Set whose elements return true for func.
func (set {{.TName}}Set) Filter(fn func({{.PName}}) bool) {{.TName}}Collection {
result := make(map[{{.PName}}]struct{})
for v := range set {
if fn(v) {
result[v] = struct{}{}
}
}
return {{.TName}}Set(result)
}
// Partition returns two new {{.TName}}Lists whose elements return true or false for the predicate, p.
// The first result consists of all elements that satisfy the predicate and the second result consists of
// all elements that don't. The relative order of the elements in the results is the same as in the
// original set.
func (set {{.TName}}Set) Partition(p func({{.PName}}) bool) ({{.TName}}Collection, {{.TName}}Collection) {
matching := make(map[{{.PName}}]struct{})
others := make(map[{{.PName}}]struct{})
for v := range set {
if p(v) {
matching[v] = struct{}{}
} else {
others[v] = struct{}{}
}
}
return {{.TName}}Set(matching), {{.TName}}Set(others)
}
// CountBy gives the number elements of {{.TName}}Set that return true for the passed predicate.
func (set {{.TName}}Set) CountBy(predicate func({{.PName}}) bool) (result int) {
for v := range set {
if predicate(v) {
result++
}
}
return
}
// MinBy returns an element of {{.TName}}Set containing the minimum value, when compared to other elements
// using a passed func defining ‘less’. In the case of multiple items being equally minimal, the first such
// element is returned. Panics if there are no elements.
func (set {{.TName}}Set) MinBy(less func({{.PName}}, {{.PName}}) bool) (result {{.PName}}) {
l := len(set)
if l == 0 {
panic("Cannot determine the minimum of an empty set.")
}
first := true
for v := range set {
if first {
first = false
result = v
} else if less(v, result) {
result = v
}
}
return
}
// MaxBy returns an element of {{.TName}}Set containing the maximum value, when compared to other elements
// using a passed func defining ‘less’. In the case of multiple items being equally maximal, the last such
// element is returned. Panics if there are no elements.
func (set {{.TName}}Set) MaxBy(less func({{.PName}}, {{.PName}}) bool) (result {{.PName}}) {
l := len(set)
if l == 0 {
panic("Cannot determine the maximum of an empty set.")
}
first := true
for v := range set {
if first {
first = false
result = v
} else if less(result, v) {
result = v
}
}
return
}
` | internal/set/predicated.go | 0.813757 | 0.601945 | predicated.go | starcoder |
package tonacity
import (
"fmt"
"sort"
)
// Interval The type to use when specifying an interval in a key.
type Interval uint8
const (
// These are intervals in the context of a key, which is made up of seven notes.
// First For specifying the root tone. Here for completeness, not actually useful.
First Interval = 1
// Second For specifying the interval of a Second.
Second Interval = 2
// Third For specifying the interval of a Third.
Third Interval = 3
// Fourth For specifying the interval of a Fourth.
Fourth Interval = 4
// Fifth For specifying the interval of a Fifth.
Fifth Interval = 5
// Sixth For specifying the interval of a Sixth.
Sixth Interval = 6
// Seventh For specifying the interval of a Seventh. Limit on a piano for average female hands.
Seventh Interval = 7
// Ninth For specifying the interval of a Ninth. Limit on a piano for average male hands.
Ninth Interval = 9
// Eleventh For specifying the interval of an Eleventh. You need large hands to reach this on a piano.
Eleventh Interval = 11
// Thirteenth For specifying the interval of a Thirteenth. On a piano this is 29cm on the white keys, Rachmaninov (6'6") and Liszt could manage it.
Thirteenth Interval = 13
// These are the half-step distances from the First
// MinorSecond The interval of a minor second, in half steps.
MinorSecond = HalfStepValue
// MajorSecond The interval of a major second, in half steps.
MajorSecond = WholeStepValue
// MinorThird The interval of a minor third, in half steps.
MinorThird = HalfStepValue * 3
// MajorThird The interval of a major third, in half steps.
MajorThird = HalfStepValue * 4
// PerfectFourth The interval of a perfect fourth, in half steps.
PerfectFourth = HalfStepValue * 5
// PerfectFifth The interval of a perfect fifth, in half steps.
PerfectFifth = HalfStepValue * 7
)
// Chord A collection of specific pitches, making a chord.
type Chord struct {
pitches []Pitch // The pitches that make up this chord
}
func MakeChord(pitches ...Pitch) *Chord {
return &Chord{pitches}
}
func (c *Chord) String() string {
return fmt.Sprintf("%v", c.pitches)
}
// ChordFactory The purpose of this class is to allow creating chords using scale intervals, without needing to worry
// about half steps, e.g., specify "third" without having to know if it's a major (2 steps) or minor (three half steps) third.
type ChordFactory struct {
pattern Pattern // The pattern of the scale, which must be diatonic
root *Pitch // The pitch to apply the pattern from
offset int // The offset into the scale of the root
}
// GetPitch Get the pitch that is the given interval from this factory's root. The interval should be 1 (a first,
// which will return the same pitch) or higher. One is zero? Yes, that's just how music works ¯\_(ツ)_/¯.
func (f *ChordFactory) GetPitch(interval Interval) *Pitch {
var halfSteps HalfSteps
for i := 1; i < int(interval)+f.offset; i++ {
halfSteps += f.pattern.At(i - 1)
}
return f.root.GetTransposedCopy(halfSteps)
}
// GetIntervalSize Gets the size of the given interval in half steps.
func (f *ChordFactory) GetIntervalSize(interval Interval) HalfSteps {
return f.root.GetDistanceTo(f.GetPitch(interval))
}
// ContainsInterval Returns true if the pitch at the given interval is the given number of half steps from its root.
func (f *ChordFactory) ContainsInterval(interval Interval, halfSteps HalfSteps) bool {
return f.GetIntervalSize(interval) == halfSteps
}
// HasMajorThird Returns true if this factory's third is a major third, false if it is a minor third.
func (f *ChordFactory) HasMajorThird() bool {
return f.ContainsInterval(Third, MajorThird)
}
// HasPerfectFourth Returns true if this factory's fourth is a perfect fourth, false if it is a minor third.
func (f *ChordFactory) HasPerfectFourth() bool {
return f.ContainsInterval(Fourth, PerfectFourth)
}
// HasPerfectFifth Returns true if this factory's fifth is a perfect fifth, false if it is a minor third.
func (f *ChordFactory) HasPerfectFifth() bool {
return f.ContainsInterval(Fifth, PerfectFifth)
}
// CreateChord Create a chord using the specified intervals.
func (f *ChordFactory) CreateChord(intervals ...Interval) *Chord {
pitches := make([]Pitch, len(intervals), len(intervals))
for i, v := range intervals {
pitches[i] = *f.GetPitch(v)
}
return &Chord{pitches}
}
// Given a set of two or more pitch classes, how do we determine what the chord is?
// Does the key matter? Only in determining the Roman Numeral to use.
// Only the pattern of half step intervals matters.
// For each (type of) chord we know, we can iterate through all its inversions, adding them to a trie.
func createTriadPattern(third1 HalfSteps, third2 HalfSteps) *Pattern {
// Necessary to make a pattern that spans exactly an octave, as the third interval is required for chord inversions
return MakePattern(third1, third2)
}
func createTetrachordPattern(third1 HalfSteps, third2 HalfSteps, third3 HalfSteps) *Pattern {
// Necessary to make a pattern that spans exactly an octave, as the third interval is required for chord inversions
return MakePattern(third1, third2, third3)
}
// CreatePowerChordPattern Creates the pattern for a "power chord", consisting of the root and the fifth.
func CreatePowerChordPattern() *Pattern {
return MakePattern(PerfectFifth)
}
// CreateMajorTriadPattern Creates the pattern for a Major Triad. Three of these exist in a key (I, IV, V, iii, vi, vii)
func CreateMajorTriadPattern() *Pattern {
return createTriadPattern(MajorThird, MinorThird)
}
// CreateMinorTriadPattern Creates the pattern for a minor triad. Three of these exist in a key (II, III, VI, i, iv, v)
func CreateMinorTriadPattern() *Pattern {
return createTriadPattern(MinorThird, MajorThird)
}
// CreateDiminishedTriadPattern Creates the pattern for a diminished triad. Only one of these exists in a key (VII in major, ii in minor).
func CreateDiminishedTriadPattern() *Pattern {
return createTriadPattern(MinorThird, MinorThird)
}
// CreateAugmentedTriadPattern Creates the pattern for an augmented triad.
func CreateAugmentedTriadPattern() *Pattern {
return createTriadPattern(MajorThird, MajorThird)
}
// CreateSuspendedPattern Creates the pattern for a suspended second triad, where the third is omitted, and a fourth/second is added.
// Important: A suspended second is equivalent to a suspended fourth with the fifth as the root.
func CreateSuspendedPattern() *Pattern {
return createTriadPattern(PerfectFourth, MajorSecond)
}
// CreateMajorSeventhPattern Creates the pattern for a Major Seventh Chord.
func CreateMajorSeventhPattern() *Pattern {
return createTetrachordPattern(MajorThird, MinorThird, MajorThird)
}
// CreateDominantSeventhPattern Creates the pattern for a Dominant Seventh Chord.
func CreateDominantSeventhPattern() *Pattern {
return createTetrachordPattern(MajorThird, MinorThird, MinorThird)
}
// CreateMinorSeventhPattern Creates the pattern for a Minor Seventh Chord.
func CreateMinorSeventhPattern() *Pattern {
return createTetrachordPattern(MinorThird, MajorThird, MinorThird)
}
// CreateDiminishedSeventhPattern Creates the pattern for a Diminished Seventh Chord.
func CreateDiminishedSeventhPattern() *Pattern {
return createTetrachordPattern(MinorThird, MinorThird, MinorThird)
}
type chordDictionaryEntry struct {
name string
rootIndex int
}
func addChordWithInversionsToDict(dict *PatternDictionary, chord *Pattern, name string) {
// Add the chord in root position
dict.AddPattern(chord, &chordDictionaryEntry{name, 0})
for i := chord.Length(); i > 0; i-- {
// Get the next inversion of the chord
chord.Invert()
// dict doesn't keep reference to chord, so mutations after the call don't matter
dict.AddPattern(chord, &chordDictionaryEntry{name, i})
}
}
func addChordToDict(dict *PatternDictionary, chord *Pattern, name string) {
dict.AddPattern(chord, &chordDictionaryEntry{name, 0})
}
// CreateChordDictionary Creates a new dictionary for the purpose of naming chords based on the half step intervals between pitches.
func CreateChordDictionary() (dict *PatternDictionary) {
dict = &PatternDictionary{NewTrie(1, OctaveValue)}
addChordWithInversionsToDict(dict, CreatePowerChordPattern(), "5")
addChordWithInversionsToDict(dict, CreateMajorTriadPattern(), " Major") // 4, 3
addChordWithInversionsToDict(dict, CreateMinorTriadPattern(), " Minor") // 3, 4
addChordWithInversionsToDict(dict, CreateDiminishedTriadPattern(), " Diminished") // 3, 3
// Inversion of an augmented chord is an augmented chord with a different root (because 12-(4+4)=4)
addChordToDict(dict, CreateAugmentedTriadPattern(), " Augmented") // 4, 4
addChordWithInversionsToDict(dict, CreateSuspendedPattern(), " Suspended")
addChordWithInversionsToDict(dict, CreateMajorSeventhPattern(), " Major Seventh")
addChordWithInversionsToDict(dict, CreateDominantSeventhPattern(), " Dominant Seventh")
addChordWithInversionsToDict(dict, CreateMinorSeventhPattern(), " Minor Seventh")
addChordWithInversionsToDict(dict, CreateDiminishedSeventhPattern(), " Diminished Seventh")
return
}
// GetName will return the name of this chord, if its intervals are a valid pattern in the given dictionary. This function is
// specifically preferable for guitars or similar, where extended chords (those with ninths - a stretch on a piano, elevenths, and thirteenths) are used more.
func (c *Chord) GetName(dict *PatternDictionary, pitchNamer *PitchNamer) (name string, ok bool) {
sort.Sort(ByPitch(c.pitches))
intervals := make([]HalfSteps, len(c.pitches)-1, len(c.pitches)-1)
for i := 0; i < len(intervals); i++ {
intervals[i] = c.pitches[i].GetDistanceTo(&c.pitches[i+1])
}
entries := dict.GetEntries(&Pattern{intervals})
for _, entry := range entries {
e, ok := entry.(*chordDictionaryEntry)
if ok {
firstNote := pitchNamer.Name(c.pitches[0].Class())
if e.rootIndex == 0 {
// Chord is in root position
name = fmt.Sprintf("%s%s", firstNote, e.name)
} else {
// Chord is inverted
root := pitchNamer.Name(c.pitches[e.rootIndex].Class())
name = fmt.Sprintf("%s%s/%s", root, e.name, firstNote)
}
return name, true
}
}
ok = false
return
}
// GetChordName Given a set of unique pitch classes, returns the name of the produced chord. If the chord doesn't contain a name in
// the given dictionary, then ("", false) is returned.
// As this function takes pitch classes, it cannot determine extended chord names, as the pitches would loop back around (11th -> 4th).
// It also cannot give the "/<low note>" modifier on an inverted chord, as pitch ordering is lost.
// This function is useful for when distinct pitches are far apart (left and right hands on piano) but do combine to make a chord.
func GetChordName(dict *PatternDictionary, pitchNamer *PitchNamer, chord []PitchClass) (name string, ok bool) {
// Assumption: only unique pitch classes are in chord
if len(chord) < 2 {
// I've made the executive decision to allow power chords, even though, featuring only two pitches, they
// aren't technically chords
ok = false
return
}
// 1. Sort the pitches so their values are monotonically ascending
pitchIndices := make(sort.IntSlice, len(chord), len(chord))
for i, _ := range chord {
pitchIndices[i] = int(chord[i].value)
}
pitchIndices.Sort()
// 2. Create the pattern of the intervals between pitches
pattern := make([]HalfSteps, 0, len(pitchIndices))
for i, v := range pitchIndices {
if i+1 < len(pitchIndices) {
interval := HalfSteps(pitchIndices[(i+1)] - v)
pattern = append(pattern, interval)
}
}
// 3. Look up the pattern in the dictionary
// For now we're just taking the first name; at some point, getting all the possible names will need to be an option.
entries := dict.GetEntries(&Pattern{pattern})
for _, entry := range entries {
e, ok := entry.(*chordDictionaryEntry)
if ok {
root := pitchNamer.Name(PitchClass{HalfSteps(pitchIndices[e.rootIndex])})
name = fmt.Sprintf("%s%s", root, e.name)
return name, true
}
}
ok = false
return
} | chords.go | 0.88178 | 0.61659 | chords.go | starcoder |
package signal
//go:generate go run gen.go
import (
"math"
"reflect"
"time"
)
type (
// Signal is a buffer that contains a digital representation of a
// physical signal that is a sampled and quantized.
// Signal types have semantics of go slices. They can be sliced
// and appended to each other.
Signal interface {
Capacity() int
Channels() int
Length() int
Len() int
Cap() int
BufferIndex(channel int, index int) int
Free(*PoolAllocator)
}
// Fixed is a digital signal represented with fixed-point values.
Fixed interface {
Signal
BitDepth() BitDepth
}
// Signed is a digital signal represented with signed fixed-point values.
Signed interface {
Fixed
Slice(start int, end int) Signed
Channel(channel int) Signed
Append(Signed)
AppendSample(value int64)
Sample(index int) int64
SetSample(index int, value int64)
}
// Unsigned is a digital signal represented with unsigned fixed-point values.
Unsigned interface {
Fixed
Slice(start int, end int) Unsigned
Channel(channel int) Unsigned
Append(Unsigned)
AppendSample(value uint64)
Sample(index int) uint64
SetSample(index int, value uint64)
}
// Floating is a digital signal represented with floating-point values.
Floating interface {
Signal
Slice(start int, end int) Floating
Channel(channel int) Floating
Append(Floating)
AppendSample(value float64)
Sample(index int) float64
SetSample(index int, value float64)
}
// Allocator provides allocation of various signal buffers.
Allocator struct {
Channels int
Length int
Capacity int
}
)
// types for buffer properties.
type (
bitDepth BitDepth
channels int
)
// BitDepth is the number of bits of information in each sample.
type BitDepth uint8
const (
// BitDepth4 is 4 bit depth.
BitDepth4 BitDepth = 1 << (iota + 2)
// BitDepth8 is 8 bit depth.
BitDepth8
// BitDepth16 is 16 bit depth.
BitDepth16
// BitDepth32 is 32 bit depth.
BitDepth32
// BitDepth64 is 64 bit depth.
BitDepth64
// BitDepth24 is 24 bit depth.
BitDepth24 BitDepth = 24
// MaxBitDepth is a maximum supported bit depth.
MaxBitDepth BitDepth = BitDepth64
)
// MaxSignedValue returns the maximum signed value for the bit depth.
func (b BitDepth) MaxSignedValue() int64 {
if b == 0 {
return 0
}
return 1<<(b-1) - 1
}
// MaxUnsignedValue returns the maximum unsigned value for the bit depth.
func (b BitDepth) MaxUnsignedValue() uint64 {
if b == 0 {
return 0
}
return 1<<b - 1
}
// MinSignedValue returns the minimum signed value for the bit depth.
func (b BitDepth) MinSignedValue() int64 {
if b == 0 {
return 0
}
return -1 << (b - 1)
}
// UnsignedValue clips the unsigned signal value to the given bit depth
// range.
func (b BitDepth) UnsignedValue(val uint64) uint64 {
max := b.MaxUnsignedValue()
if val > max {
return max
}
return val
}
// SignedValue clips the signed signal value to the given bit depth range.
func (b BitDepth) SignedValue(val int64) int64 {
max := b.MaxSignedValue()
min := b.MinSignedValue()
switch {
case val < min:
return min
case val > max:
return max
}
return val
}
// Scale returns scale for bit depth requantization.
func Scale(high, low BitDepth) int64 {
return int64(1 << (high - low))
}
// defaultBitDepth limits bit depth value to max and returns max if it is 0.
func limitBitDepth(b, max BitDepth) bitDepth {
if b == 0 || b > max {
return bitDepth(max)
}
return bitDepth(b)
}
// Frequency in Hertz is the number of occurrences of a repeating event per
// second. It might represent sample rate or pitch.
type Frequency float64
// Duration returns a total time duration for a number events at this
// frequency.
func (f Frequency) Duration(events int) time.Duration {
return time.Duration(math.Round(float64(time.Second) / float64(f) * float64(events)))
}
// Events returns a number of events for time duration at this frequency.
func (f Frequency) Events(d time.Duration) int {
return int(math.Round(float64(f) / float64(time.Second) * float64(d)))
}
// FloatingAsFloating appends floating-point samples to the floating-point
// destination buffer. Both buffers must have the same number of channels,
// otherwise function will panic. Returns a number of samples written per
// channel.
func FloatingAsFloating(src, dst Floating) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
// determine the multiplier for bit depth conversion
for i := 0; i < length; i++ {
dst.SetSample(i, src.Sample(i))
}
return min(src.Length(), dst.Length())
}
// FloatingAsSigned converts floating-point samples into signed fixed-point
// and appends them to the destination buffer. The floating sample range
// [-1,1] is mapped to signed [-2^(bitDepth-1), 2^(bitDepth-1)-1]. Floating
// values beyond the range will be clipped. Buffers must have the same
// number of channels, otherwise function will panic. Returns a number of
// samples written per channel.
func FloatingAsSigned(src Floating, dst Signed) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
// determine the multiplier for bit depth conversion
msv := dst.BitDepth().MaxSignedValue()
for i := 0; i < length; i++ {
var sample int64
if f := src.Sample(i); f > 0 {
// detect overflow
if int64(f) == 0 {
sample = int64(f * float64(msv))
} else {
sample = msv
}
} else {
// no overflow here
sample = int64(f * (float64(msv) + 1))
}
dst.SetSample(i, sample)
}
return min(src.Length(), dst.Length())
}
// FloatingAsUnsigned converts floating-point samples into unsigned
// fixed-point and appends them to the destination buffer. The floating
// sample range [-1,1] is mapped to unsigned [0, 2^bitDepth-1]. Floating
// values beyond the range will be clipped. Buffers must have the same
// number of channels, otherwise function will panic.
func FloatingAsUnsigned(src Floating, dst Unsigned) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
// determine the multiplier for bit depth conversion
msv := uint64(dst.BitDepth().MaxSignedValue())
offset := msv + 1
for i := 0; i < length; i++ {
var sample uint64
if f := src.Sample(i); f > 0 {
// detect overflow
if int64(f) == 0 {
sample = uint64(f*float64(msv)) + offset
} else {
sample = msv + offset
}
} else {
// no overflow here
sample = uint64(f*(float64(msv)+1)) + offset
}
dst.SetSample(i, sample)
}
return min(src.Length(), dst.Length())
}
// SignedAsFloating converts signed fixed-point samples into floating-point
// and appends them to the destination buffer. The signed sample range
// [-2^(bitDepth-1), 2^(bitDepth-1)-1] is mapped to floating [-1,1].
// Buffers must have the same number of channels, otherwise function will
// panic.
func SignedAsFloating(src Signed, dst Floating) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
// determine the divider for bit depth conversion.
msv := float64(src.BitDepth().MaxSignedValue())
for i := 0; i < length; i++ {
if sample := src.Sample(i); sample > 0 {
dst.SetSample(i, float64(sample)/msv)
} else {
dst.SetSample(i, float64(sample)/(msv+1))
}
}
return min(src.Length(), dst.Length())
}
// SignedAsSigned appends signed fixed-point samples to the signed
// fixed-point destination buffer. The samples are quantized to the
// destination bit depth. Buffers must have the same number of channels,
// otherwise function will panic.
func SignedAsSigned(src, dst Signed) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
// downscale
if src.BitDepth() >= dst.BitDepth() {
scale := Scale(src.BitDepth(), dst.BitDepth())
for i := 0; i < length; i++ {
dst.SetSample(i, src.Sample(i)/scale)
}
return min(src.Length(), dst.Length())
}
// upscale
scale := Scale(dst.BitDepth(), src.BitDepth())
for i := 0; i < length; i++ {
if sample := src.Sample(i); sample > 0 {
dst.SetSample(i, (src.Sample(i)+1)*scale-1)
} else {
dst.SetSample(i, src.Sample(i)*scale)
}
}
return min(src.Length(), dst.Length())
}
// SignedAsUnsigned converts signed fixed-point samples into unsigned
// fixed-point and appends them to the destination buffer. The samples are
// quantized to the destination bit depth. The signed sample range
// [-2^(bitDepth-1), 2^(bitDepth-1)-1] is mapped to unsigned [0,
// 2^bitDepth-1]. Buffers must have the same number of channels, otherwise
// function will panic.
func SignedAsUnsigned(src Signed, dst Unsigned) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
msv := uint64(dst.BitDepth().MaxSignedValue())
// downscale
if src.BitDepth() >= dst.BitDepth() {
scale := Scale(src.BitDepth(), dst.BitDepth())
for i := 0; i < length; i++ {
dst.SetSample(i, uint64(src.Sample(i)/scale)+msv+1)
}
return min(src.Length(), dst.Length())
}
// upscale
scale := Scale(dst.BitDepth(), src.BitDepth())
for i := 0; i < length; i++ {
if sample := src.Sample(i); sample > 0 {
dst.SetSample(i, uint64((src.Sample(i)+1)*scale)+msv)
} else {
dst.SetSample(i, uint64(src.Sample(i)*scale)+msv+1)
}
}
return min(src.Length(), dst.Length())
}
// UnsignedAsFloating converts unsigned fixed-point samples into
// floating-point and appends them to the destination buffer. The unsigned
// sample range [0, 2^bitDepth-1] is mapped to floating [-1,1]. Buffers
// must have the same number of channels, otherwise function will panic.
func UnsignedAsFloating(src Unsigned, dst Floating) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
// determine the multiplier for bit depth conversion
msv := float64(src.BitDepth().MaxSignedValue())
for i := 0; i < length; i++ {
if sample := src.Sample(i); sample > 0 {
dst.SetSample(i, (float64(sample)-(msv+1))/msv)
} else {
dst.SetSample(i, (float64(sample)-(msv+1))/(msv+1))
}
}
return min(src.Length(), dst.Length())
}
// UnsignedAsSigned converts unsigned fixed-point samples into signed
// fixed-point and appends them to the destination buffer. The samples are
// quantized to the destination bit depth. The unsigned sample range [0,
// 2^bitDepth-1] is mapped to signed [-2^(bitDepth-1), 2^(bitDepth-1)-1].
// Buffers must have the same number of channels, otherwise function will
// panic.
func UnsignedAsSigned(src Unsigned, dst Signed) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
msv := uint64(src.BitDepth().MaxSignedValue())
// downscale
if src.BitDepth() >= dst.BitDepth() {
scale := Scale(src.BitDepth(), dst.BitDepth())
for i := 0; i < length; i++ {
dst.SetSample(i, int64(src.Sample(i)-(msv+1))/scale)
}
return min(src.Length(), dst.Length())
}
// upscale
scale := Scale(dst.BitDepth(), src.BitDepth())
for i := 0; i < length; i++ {
if sample := int64(src.Sample(i) - (msv + 1)); sample > 0 {
dst.SetSample(i, (sample+1)*scale-1)
} else {
dst.SetSample(i, sample*scale)
}
}
return min(src.Length(), dst.Length())
}
// UnsignedAsUnsigned appends unsigned fixed-point samples to the unsigned
// fixed-point destination buffer. The samples are quantized to the
// destination bit depth. Buffers must have the same number of channels,
// otherwise function will panic.
func UnsignedAsUnsigned(src, dst Unsigned) int {
mustSameChannels(src.Channels(), dst.Channels())
// cap length to destination capacity.
length := min(src.Len(), dst.Len())
if length == 0 {
return 0
}
// downscale
if src.BitDepth() >= dst.BitDepth() {
scale := uint64(Scale(src.BitDepth(), dst.BitDepth()))
for i := 0; i < length; i++ {
dst.SetSample(i, src.Sample(i)/scale)
}
return min(src.Length(), dst.Length())
}
// upscale
scale := uint64(Scale(dst.BitDepth(), src.BitDepth()))
msv := uint64(src.BitDepth().MaxSignedValue())
for i := 0; i < length; i++ {
var sample uint64
if sample = src.Sample(i); sample > msv+1 {
dst.SetSample(i, (sample+1)*scale-1)
} else {
dst.SetSample(i, sample*scale)
}
}
return min(src.Length(), dst.Length())
}
// BitDepth returns bit depth of the buffer.
func (bd bitDepth) BitDepth() BitDepth {
return BitDepth(bd)
}
// Channels returns number of channels in the buffer.
func (c channels) Channels() int {
return int(c)
}
func capFloat(v float64) float64 {
if v > 1 {
return 1
}
if v < -1 {
return -1
}
return v
}
func min(v1, v2 int) int {
if v1 < v2 {
return v1
}
return v2
}
func mustSameChannels(c1, c2 int) {
if c1 != c2 {
panic("different number of channels")
}
}
func mustSameBitDepth(bd1, bd2 BitDepth) {
if bd1 != bd2 {
panic("different bit depth")
}
}
func mustSameCapacity(c1, c2 int) {
if c1 != c2 {
panic("different buffer capacity")
}
}
// ChannelLength calculates a channel length for provided buffer length and
// number of channels.
func ChannelLength(sliceLen, channels int) int {
return int(math.Ceil(float64(sliceLen) / float64(channels)))
}
// BufferIndex calculates sample index in the buffer based on number of
// channels in the buffer, channel of the sample and sample index in the
// channel.
func (c channels) BufferIndex(channel, idx int) int {
return int(c)*idx + channel
}
// WriteInt writes values from provided slice into the buffer.
// Returns a number of samples written per channel.
func WriteInt(src []int, dst Signed) int {
length := min(dst.Len(), len(src))
for i := 0; i < length; i++ {
dst.SetSample(i, int64(src[i]))
}
return ChannelLength(length, dst.Channels())
}
// WriteStripedInt writes values from provided slice into the buffer.
// The length of provided slice must be equal to the number of channels,
// otherwise function will panic. Nested slices can be nil, zero values for
// that channel will be written. Returns a number of samples written for
// the longest channel.
func WriteStripedInt(src [][]int, dst Signed) (written int) {
mustSameChannels(dst.Channels(), len(src))
// determine the length of longest nested slice
for i := range src {
if len(src[i]) > written {
written = len(src[i])
}
}
// limit a number of writes to the length of the buffer
written = min(written, dst.Length())
for c := 0; c < dst.Channels(); c++ {
for i := 0; i < written; i++ {
if i < len(src[c]) {
dst.SetSample(dst.BufferIndex(c, i), int64(src[c][i]))
} else {
dst.SetSample(dst.BufferIndex(c, i), 0)
}
}
}
return
}
// WriteUint writes values from provided slice into the buffer.
// Returns a number of samples written per channel.
func WriteUint(src []uint, dst Unsigned) int {
length := min(dst.Len(), len(src))
for i := 0; i < length; i++ {
dst.SetSample(i, uint64(src[i]))
}
return ChannelLength(length, dst.Channels())
}
// WriteStripedUint writes values from provided slice into the buffer.
// The length of provided slice must be equal to the number of channels,
// otherwise function will panic. Nested slices can be nil, zero values for
// that channel will be written. Returns a number of samples written for
// the longest channel.
func WriteStripedUint(src [][]uint, dst Unsigned) (written int) {
mustSameChannels(dst.Channels(), len(src))
// determine the length of longest nested slice
for i := range src {
if len(src[i]) > written {
written = len(src[i])
}
}
// limit a number of writes to the length of the buffer
written = min(written, dst.Length())
for c := 0; c < dst.Channels(); c++ {
for i := 0; i < written; i++ {
if i < len(src[c]) {
dst.SetSample(dst.BufferIndex(c, i), uint64(src[c][i]))
} else {
dst.SetSample(dst.BufferIndex(c, i), 0)
}
}
}
return
}
// ReadInt reads values from the buffer into provided slice.
// Returns number of samples read per channel.
func ReadInt(src Signed, dst []int) int {
length := min(src.Len(), len(dst))
for i := 0; i < length; i++ {
dst[i] = int(src.Sample(i))
}
return ChannelLength(length, src.Channels())
}
// ReadStripedInt reads values from the buffer into provided slice. The
// length of provided slice must be equal to the number of channels,
// otherwise function will panic. Nested slices can be nil, no values for
// that channel will be read. Returns a number of samples read for the
// longest channel.
func ReadStripedInt(src Signed, dst [][]int) (read int) {
mustSameChannels(src.Channels(), len(dst))
for c := 0; c < src.Channels(); c++ {
length := min(len(dst[c]), src.Length())
if length > read {
read = length
}
for i := 0; i < length; i++ {
dst[c][i] = int(src.Sample(src.BufferIndex(c, i)))
}
}
return
}
// ReadUint reads values from the buffer into provided slice.
func ReadUint(src Unsigned, dst []uint) int {
length := min(src.Len(), len(dst))
for i := 0; i < length; i++ {
dst[i] = uint(src.Sample(i))
}
return ChannelLength(length, src.Channels())
}
// ReadStripedUint reads values from the buffer into provided slice. The
// length of provided slice must be equal to the number of channels,
// otherwise function will panic. Nested slices can be nil, no values for
// that channel will be read. Returns a number of samples read for the
// longest channel.
func ReadStripedUint(src Unsigned, dst [][]uint) (read int) {
mustSameChannels(src.Channels(), len(dst))
for c := 0; c < src.Channels(); c++ {
length := min(len(dst[c]), src.Length())
if length > read {
read = length
}
for i := 0; i < length; i++ {
dst[c][i] = uint(src.Sample(src.BufferIndex(c, i)))
}
}
return
}
// alignCapacity ensures that buffer capacity is aligned with number of
// channels.
func alignCapacity(s interface{}, channels, c int) {
reflect.ValueOf(s).Elem().SetCap(c - c%channels)
} | signal.go | 0.729327 | 0.525795 | signal.go | starcoder |
package indicators
import (
"fmt"
)
// A collection of indicators and derived FSMs.
type FsmCollection struct {
// Array of all FSMs
Fsms []*FsmMap
// Map from FSM to Indicator
Indicators map[*FsmMap]*Indicator
// Map from activator tokens to array of FSMs. The tokens include
// all tokens which lead out of the 'init' state for each FSM. By
// 'active', I'm refering to any FSM which has left the 'init'
// state.
Activators map[Token][]*FsmMap
// The current state of all active FSMs, maps FSM to the current
// state string.
State map[*FsmMap]string
}
// Dump an FSM collection showing all tracked states.
func (c *FsmCollection) Dump() {
fmt.Println("State:")
for fsm, state := range c.State {
fmt.Println(" ", c.Indicators[fsm].Id, " in state ", state)
}
}
// Resets an FSM collection so that all FSMs revert to the inactive state.
// This would be called to forget existing scanning history when scanning
// something new.
func (c *FsmCollection) Reset() {
c.State = map[*FsmMap]string{}
}
// Update an FSM collection for a new token.
func (c *FsmCollection) Update(token Token) {
// If the token is an activator, activate all relevant FSMs to the
// init state. The next code segment will apply the transition from
// init to the next state.
if fsms, ok := c.Activators[token]; ok {
for _, fsm := range fsms {
if _, ok = c.State[fsm]; !ok {
c.State[fsm] = "init"
}
}
}
// Iterate over all active FSMs, moving to the next state if necessary.
for fsm, state := range c.State {
event := FsmEvent{State: state, Token: token}
if newstate, ok := (*fsm)[event]; ok {
c.State[fsm] = newstate
}
}
}
// Returns all active FSM hits. This would be called once scanning is
// complete to return hits.
func (c *FsmCollection) GetHits() []*Indicator {
hits := []*Indicator{}
for fsm, state := range c.State {
if state == "hit" {
hits = append(hits, c.Indicators[fsm])
}
}
return hits
}
// Create an FSM collection from a set of indicators.
func CreateFsmCollection(ii *Indicators) *FsmCollection {
// Initialise the FSM collection to null state, and allocate all maps.
fsmc := FsmCollection{}
fsmc.Indicators = map[*FsmMap]*Indicator{}
fsmc.Activators = map[Token][]*FsmMap{}
fsmc.State = map[*FsmMap]string{}
// Iterate over indicators
for _, ind := range ii.Indicators {
// Generate the FSM for this indicator.
fsm := ind.GenerateFsm()
// Convert the FSM to its 'map' form.
fsmm := fsm.Mapify()
// Add mapping from FSM to corresponding indicator.
fsmc.Indicators[fsmm] = ind
// Get activator terms
activs := fsm.GetActivators()
// Add activator terms to the activator map.
for _, activ := range activs {
if _, ok := fsmc.Activators[activ]; !ok {
fsmc.Activators[activ] = nil
}
fsmc.Activators[activ] = append(fsmc.Activators[activ],
fsmm)
}
// Append FSM to FSM list.
fsmc.Fsms = append(fsmc.Fsms, fsmm)
}
return &fsmc
} | collection.go | 0.600657 | 0.421195 | collection.go | starcoder |
package feedforward
import (
"fmt"
"math"
)
// Represents a type which holds information about the current Iteration of an iterative algorithm.
// GetIteration returns the current Iteration number.
// GetScore returns a loss function score (lower is better).
type IterationStatistic interface {
GetIteration() int
GetScore() float64
}
// Type representing a function which returns a loss function score
type Scorer func() float64
// Implementation of IterationStatistic which holds the current iteration number
// and a Scorer type which computes the loss function score on demand.
// The reason for using a Scorer instead of storing the score directly is to enable lazy evaluation as scores can be
// expensive to compute and not all usages of IterationStatistic call the GetScore method.
// On the first call of GetScore, the computed value will be cached in scoreCache variable.
type iterationStatistic struct {
iteration int
scorer Scorer
scoreCache float64
}
// Constructor for a new iterationStatistic, scoreCache is initially set to math.Nan.
func NewIterationStatistic(iteration int, scorer Scorer) IterationStatistic {
return &iterationStatistic{iteration: iteration, scorer: scorer, scoreCache: math.NaN()}
}
// Gets the iteration number of this iterationStatistic.
func (i *iterationStatistic) GetIteration() int {
return i.iteration
}
// Gets the score of this iterationStatistic.
// On first method call, the method calls the Scorer type and stores the computed value in scoreCache.
func (i *iterationStatistic) GetScore() float64 {
if !(i.scoreCache == math.NaN()) {
i.scoreCache = i.scorer()
}
return i.scoreCache
}
// Interface defining an observer of an iterative process.
type ModelObserver interface {
Update(statistic IterationStatistic)
}
// Interface defining a publisher of Iteration statistics.
// It provides methods for adding, removing and notifying any number of observers
type ModelSubject interface {
AddObserver(observer ModelObserver)
RemoveObserver(observer ModelObserver)
NotifyObservers(statistic IterationStatistic)
}
// Struct implementing the entire ModelSubject interface.
// Extended by types which publish Iteration statistics.
type BaseSubject struct {
observers []ModelObserver
}
// Adds an observer into a slice of observers.
func (s *BaseSubject) AddObserver(observer ModelObserver) {
s.observers = append(s.observers, observer)
}
// Removes an observer from the slice of observers.
func (s *BaseSubject) RemoveObserver(observer ModelObserver) {
index := -1
for i, attached := range s.observers {
if attached == observer {
index = i
}
}
s.observers[index], s.observers[len(s.observers)-1] = s.observers[len(s.observers)-1], s.observers[index]
s.observers = s.observers[0:len(s.observers)]
}
// Notifies all observers by iterating over the slice and calling Update method of every observer.
func (s *BaseSubject) NotifyObservers(statistic IterationStatistic) {
for _, observer := range s.observers {
observer.Update(statistic)
}
}
// Observer type which serves as a wrapper for a given ModelObserver.
// It calls the Update method of the underlying observer every iter iterations, effectively allowing the underlying observer
// to receive an update only every nth Iteration.
type nthIterObserver struct {
observer ModelObserver
iter int
}
// Constructor for generating a new nthIterObserver
func NewNthIterationObserver(observer ModelObserver, iter int) ModelObserver {
return &nthIterObserver{observer: observer, iter: iter}
}
// Checks if current Iteration is divisible by iter, forwards the given statistic to the underlying observer if true
func (s *nthIterObserver) Update(statistic IterationStatistic) {
if statistic.GetIteration()%s.iter == 0 {
s.observer.Update(statistic)
}
}
// Observer type which prints to standard output every statistic it receives
type stoutLogger struct{}
// Constructor for generating a new stoutLogger
func NewStOutLogger() ModelObserver {
return &stoutLogger{}
}
// Prints the given statistic to the standard output
func (s *stoutLogger) Update(statistic IterationStatistic) {
fmt.Println(statistic.GetIteration(), statistic.GetScore())
} | observers.go | 0.86164 | 0.591989 | observers.go | starcoder |
package skillz
type Operator struct {
Column string `json:"column"`
Operation Operation `json:"operation"`
Value OpValue `json:"value"`
}
type OpValue interface{}
type Operation int
const (
EqualOp Operation = iota
NotEqualOp
GreaterThanOp
GreaterThanEqualToOp
LessThanOp
LessThanEqualToOp
InOp
NotInOp
LikeOp
LimitOp
OrderOp
SkipOp
OrOp
AndOp
ExistsOp
)
var AllOperations = []Operation{
EqualOp,
NotEqualOp,
GreaterThanOp,
GreaterThanEqualToOp,
LessThanOp,
LessThanEqualToOp,
InOp,
NotInOp,
LikeOp,
LimitOp,
OrderOp,
SkipOp,
OrOp,
AndOp,
ExistsOp,
}
func (o Operation) IsValid() bool {
switch o {
case EqualOp, NotEqualOp,
GreaterThanOp, LessThanOp, GreaterThanEqualToOp, LessThanEqualToOp,
InOp, NotInOp, LikeOp,
LimitOp, OrderOp, SkipOp, OrOp, AndOp, ExistsOp:
return true
}
return false
}
func NewEqualOperator(column string, value interface{}) *Operator {
return &Operator{
Column: column,
Operation: EqualOp,
Value: value,
}
}
func NewNotEqualOperator(column string, value interface{}) *Operator {
return &Operator{
Column: column,
Operation: NotEqualOp,
Value: value,
}
}
func NewGreaterThanOperator(column string, value interface{}) *Operator {
return &Operator{
Column: column,
Operation: GreaterThanOp,
Value: value,
}
}
func NewGreaterThanEqualToOperator(column string, value interface{}) *Operator {
return &Operator{
Column: column,
Operation: GreaterThanEqualToOp,
Value: value,
}
}
func NewLessThanOperator(column string, value interface{}) *Operator {
return &Operator{
Column: column,
Operation: LessThanOp,
Value: value,
}
}
func NewLessThanEqualToOperator(column string, value interface{}) *Operator {
return &Operator{
Column: column,
Operation: LessThanEqualToOp,
Value: value,
}
}
type Sort int
const (
SortAsc Sort = 1
SortDesc Sort = -1
)
var AllSort = []Sort{
SortAsc,
SortDesc,
}
func (e Sort) IsValid() bool {
switch e {
case SortAsc, SortDesc:
return true
}
return false
}
func (e Sort) Value() int {
return int(e)
}
func NewOrderOperator(column string, sort Sort) *Operator {
if !sort.IsValid() {
return nil
}
return &Operator{
Column: column,
Operation: OrderOp,
Value: sort.Value(),
}
}
func NewInOperator(column string, value []interface{}) *Operator {
return &Operator{
Column: column,
Operation: InOp,
Value: value,
}
}
func NewNotInOperator(column string, value interface{}) *Operator {
return &Operator{
Column: column,
Operation: NotInOp,
Value: value,
}
}
func NewLimitOperator(value uint64) *Operator {
return &Operator{
Column: "",
Operation: LimitOp,
Value: value,
}
}
func NewSkipOperator(value int64) *Operator {
return &Operator{
Column: "",
Operation: SkipOp,
Value: value,
}
}
func NewOrOperator(value ...*Operator) *Operator {
return &Operator{
Column: "",
Operation: OrOp,
Value: value,
}
}
func NewAndOperator(value ...*Operator) *Operator {
return &Operator{
Column: "",
Operation: AndOp,
Value: value,
}
}
func NewExistsOperator(column string, value bool) *Operator {
return &Operator{
Column: column,
Operation: ExistsOp,
Value: value,
}
}
func NewLikeOperator(column string, value string) *Operator {
return &Operator{
Column: column,
Operation: LikeOp,
Value: value,
}
} | operator.go | 0.766031 | 0.436202 | operator.go | starcoder |
package books
import (
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3" // Used with sql package.
)
const titleSearch = true // Used as a parameter when func query is called.
// Book represents a searchable book object.
type Book struct {
ID int // A different number for each book.
Title string // Book's title.
Authors string // Book's authors.
AverageRating float32 // Average rating (out of 5.)
ISBN string // 10 digit ISBN.
ISBN13 string // 13 digit ISBN.
LanguageCode string // 3-character language code.
Pages int // Number of book's pages.
RatingsCount int // Number of ratings (out of 5.)
ReviewsCount int // Number of text reviews.
}
// SearchIn contains the database and table's name to search in. Table's
// layout is specified in github.com/sudo-sturbia/bfr/internal/datastore.
type SearchIn struct {
Datastore *sql.DB // Datastore to search in.
BookTable string // Table to search in.
}
// SearchBy is a set of parameters to use when searching for books in
// a datastore.
// Not all fields have to be specifed, a search can be performed using
// only a sub-set of the fields. To ignore a string field when searching,
// leave it empty, to ignore a number set it to < 0.
// For floor/ceil values floor is exclusive, ceil is inclusive.
type SearchBy struct {
TitleHas string // A sub-string that must exist in the title.
Authors []string // Must have at least one of these authors. Ignored if nil or empty.
LanguageCode []string // Must be in at least one of these languages. Ignored if nil or empty.
ISBN string // 10 digit ISBN.
ISBN13 string // 13 digit ISBN.
RatingCeil float32 // Rating must be less than or equal.
RatingFloor float32 // Rating must be higher than.
PagesCeil int // Number of pages must be less than or equal.
PagesFloor int // Number of pages must be higher than.
RatingsCountCeil int // Number of ratings must be less than or equal.
RatingsCountFloor int // Number of ratings must be higher than.
ReviewsCountCeil int // Number of reviews must be less than or equal.
ReviewsCountFloor int // Number of reviews must be higher than.
}
// SearchByID searchs for an ID in table and database specified in SearchIn, and
// returns a Book, if any is found, and an error otherwise.
func SearchByID(searchIn *SearchIn, id int) (*Book, error) {
search := fmt.Sprintf("select * from %s where id = ?;", searchIn.BookTable)
rows, err := searchIn.Datastore.Query(search, id)
if err != nil {
return nil, err
}
if rows.Next() {
book := new(Book)
rows.Scan(
&book.ID,
&book.Title,
&book.Authors,
&book.AverageRating,
&book.ISBN,
&book.ISBN13,
&book.LanguageCode,
&book.Pages,
&book.RatingsCount,
&book.ReviewsCount,
)
return book, nil
}
return nil, fmt.Errorf("failed to find id")
}
// SearchByTitle searchs in table and database specified in given SearchIn, and returns
// a list of books that match the given title.
func SearchByTitle(searchIn *SearchIn, title string) ([]*Book, error) {
search := fmt.Sprintf("select * from %s where title = ?;", searchIn.BookTable)
rows, err := searchIn.Datastore.Query(search, title)
if err != nil {
return nil, err
}
books := make([]*Book, 0)
for rows.Next() {
book := new(Book)
rows.Scan(
&book.ID,
&book.Title,
&book.Authors,
&book.AverageRating,
&book.ISBN,
&book.ISBN13,
&book.LanguageCode,
&book.Pages,
&book.RatingsCount,
&book.ReviewsCount,
)
books = append(books, book)
}
return books, nil
}
// Search searchs in table and database specified in given SearchIn, and returns
// a list of books that match the parameters given in SearchBy.
func Search(searchIn *SearchIn, searchBy *SearchBy) ([]*Book, error) {
query, parameters := query(searchIn, searchBy, !titleSearch)
rows, err := searchIn.Datastore.Query(query, parameters...)
if err != nil {
return nil, err
}
books := make([]*Book, 0)
for rows.Next() {
book := new(Book)
rows.Scan(
&book.ID,
&book.Title,
&book.Authors,
&book.AverageRating,
&book.ISBN,
&book.ISBN13,
&book.LanguageCode,
&book.Pages,
&book.RatingsCount,
&book.ReviewsCount,
)
books = append(books, book)
}
return books, nil
}
// SearchForTitles works similar to Search but returns a list of titles (strings)
// instead of books. Titles can be then used to search for a specific book.
func SearchForTitles(searchIn *SearchIn, searchBy *SearchBy) ([]string, error) {
query, parameters := query(searchIn, searchBy, titleSearch)
rows, err := searchIn.Datastore.Query(query, parameters...)
if err != nil {
return nil, err
}
titles := make([]string, 0)
for rows.Next() {
titles = append(titles, "")
rows.Scan(&titles[len(titles)-1])
}
return titles, nil
} | pkg/books/books.go | 0.633183 | 0.444384 | books.go | starcoder |
package numbers
import (
"log"
"math"
//DEBUG: "fmt"
)
//LogIntegrate evaluates log(int_a^b f(x)dx) in cases where f returns log(f(x)). Uses the rectangle rule.
func LogIntegrate(f func(float64) float64, a float64, b float64, n int) float64 {
if a >= b {
log.Fatalf("logIntegrate failed, left bound must be smaller than right bound.")
}
var deltaX float64 = (b - a) / float64(n)
var logDeltaX float64 = math.Log(deltaX)
var currLeft float64 = a //this variable stores the left bound of the current rectangle.
var currRight float64 = a + deltaX
var answer float64
//first time, sets answer as the area of the first rectangle
var nextLeftEval float64 = f(currRight)
answer = MultiplyLog(MidpointLog(f(currLeft), nextLeftEval), logDeltaX)
var rightEval float64
for i := 1; i < n; i++ {
currLeft += deltaX
currRight += deltaX
rightEval = f(currRight)
answer = AddLog(answer, MultiplyLog(MidpointLog(nextLeftEval, rightEval), logDeltaX))
nextLeftEval = rightEval
}
return answer
}
func LogIntegrateIterative(f func(float64) float64, a float64, b float64, maxIter int, relativeError float64) float64 {
if maxIter < 2 {
log.Fatalf("maxIterations for LogIntegrateIterative must be at least 2.")
}
if relativeError <= 0 {
log.Fatalf("relativeError for LogIntegrateIterative must be greater than 0.")
}
n := 1000
var prev, curr float64
prev = LogIntegrate(f, a, b, n)
for i := 0; i < maxIter; i++ {
n := n * 10
curr = LogIntegrate(f, a, b, n)
if math.Abs(prev-curr)/curr < relativeError {
//DEBUG: log.Printf("In LogIntegrateIterative: i=%v.", i)
return curr
}
prev = curr
}
log.Fatalf("LogIntegrateIterative failed to converge below relative error: %f in maxIter: %v.", relativeError, maxIter)
return (0)
}
// There are a number of ways to evaluate a definite integral computationally.
// Romberg's method seems like a good mix of accuracy and coding difficulty,
// but there are better methods out there if more speed or accuracy are needed.
// This code tries to follow the algorithm and variable names used here:
// https://en.wikipedia.org/wiki/Romberg's_method
func rombergsMethod(f func(float64) float64, a float64, b float64, estimatedError float64, relativeEstError float64, maxIter int) float64 {
var n, m int
var kMax, k, h, currEstError float64
var currR, prevR []float64 = make([]float64, maxIter), make([]float64, maxIter)
var minIter int = 10
prevR[0] = 0.5 * (f(a) + f(b))
for n = 1; n < maxIter; n++ {
// compute the current h value
h = math.Exp2(float64(-n)) * (b - a)
// compute R[n][0]
currR[0] = 0 // needed because memory is being reused
kMax = math.Exp2(float64(n - 1))
for k = 1; k <= kMax; k++ {
currR[0] += f(a + (2*k-1)*h)
}
currR[0] *= h
currR[0] += 0.5 * prevR[0]
// now that we have R[n][0], we can compute R[n][m] where m > 0
for m = 1; m <= n; m++ {
currR[m] = currR[m-1] + 1/(math.Pow(4, float64(m))-1)*(currR[m-1]-prevR[m-1])
}
// now checking to see if we have convergence
// some people use R[n][n]-R[n][n-1]
// and some use R[n][n]-R[n-1][n-1]
// these appear to be related by a constant of 1/(4^n-1) with
// R[n][n]-R[n-1][n-1] being more conservative, so we will use that one
// log.Printf("prevEst=%e, currEst=%e\n", prevR[n-1], currR[n])
currEstError = math.Abs(currR[n] - prevR[n-1])
//fmt.Printf("currValue: %e. currError: %e\n", currR[n], currEstError)
if (currEstError < estimatedError || currEstError < relativeEstError*math.Abs(currR[n])) && n >= minIter {
return currR[n]
}
// swap prev and curr so that current becomes prev
prevR, currR = currR, prevR
}
log.Fatal("Error: Romberg's method did not converge.")
return (0)
}
// DefiniteIntegral computes the definite integral of f(x) dx from start to end
func DefiniteIntegral(f func(float64) float64, start float64, end float64) float64 {
return rombergsMethod(f, start, end, 1e-8, 1e-8, 30)
}
//DefiniteSmallIntegral is like DefiniteIntegral with absolute error set to zero, so only relative error defines convergence conditions.
//slower than DefiniteIntegral, but more accurate for small values.
func DefiniteSmallIntegral(f func(float64) float64, start float64, end float64) float64 {
return rombergsMethod(f, start, end, 0, 1e-6, 30)
}
// adaptiveSimponsHelper is the recursive core function for AdaptiveSimpsons
func adaptiveSimpsonsHelper(f func(float64) float64, a, b, midpoint, fa, fb, fMidpoint, wholeEstimate, errorThresh float64, maxDepth int) float64 {
var h, leftMidpoint, rightMidpoint, fLeftMidpoint, fRightMidpoint, leftEstimate, rightEstimate, delta float64
h = (b - a) / 2
leftMidpoint = (a + midpoint) / 2
rightMidpoint = (midpoint + b) / 2
if maxDepth < 0 {
log.Fatalf("Error in integration: exceeded maximum depth\n")
} else if errorThresh/2 == errorThresh {
log.Fatalf("Error in integration: the error threshold has gotten too small after many recursive calls\n")
} else if a == leftMidpoint {
log.Fatalf("Error in integration: the left side and midpoint have gotten too close to each other\n")
}
fLeftMidpoint = f(leftMidpoint)
fRightMidpoint = f(rightMidpoint)
leftEstimate = (h / 6) * (fa + 4*fLeftMidpoint + fMidpoint)
rightEstimate = (h / 6) * (fMidpoint + 4*fRightMidpoint + fb)
delta = leftEstimate + rightEstimate - wholeEstimate
// Lyness 1969 + Richardson extrapolation; see article
if math.Abs(delta) <= 15*errorThresh {
return leftEstimate + rightEstimate + delta/15
} else {
return adaptiveSimpsonsHelper(f, a, midpoint, leftMidpoint, fa, fMidpoint, fLeftMidpoint, leftEstimate, errorThresh/2, maxDepth-1) + adaptiveSimpsonsHelper(f, midpoint, b, rightMidpoint, fMidpoint, fb, fRightMidpoint, rightEstimate, errorThresh/2, maxDepth-1)
}
}
// AdaptiveSimpsons returns the integral from a to b of function f
// The error in the calculation should be less than or equal to errorThreshold. If this can not be
// achieved within maxDepth number recursions, then the function aborts.
func AdaptiveSimpsons(f func(float64) float64, a float64, b float64, errorThreshold float64, maxDepth int) float64 {
var midpoint, h, fa, fb, fMidpoint, s float64
h = b - a
midpoint = (a + b) / 2
fa = f(a)
fb = f(b)
fMidpoint = f(midpoint)
s = (h / 6) * (fa + 4*fMidpoint + fb)
return adaptiveSimpsonsHelper(f, a, b, midpoint, fa, fb, fMidpoint, s, errorThreshold, maxDepth)
}
// adaptiveSimponsLogHelper is the recursive core function for AdaptiveSimpsonsLog
func adaptiveSimpsonsLogHelper(f func(float64) float64, a, b, midpoint, fa, fb, fMidpoint, wholeEstimate, errorThresh float64, maxDepth int) float64 {
const logFour float64 = 1.386294
const logFifteen float64 = 2.70805
const logHalf float64 = -0.6931472
var estimateFromHalves, logHOverSix, h, leftMidpoint, rightMidpoint, fLeftMidpoint, fRightMidpoint, leftEstimate, rightEstimate, delta float64
h = (b - a) / 2
leftMidpoint = (a + midpoint) / 2
rightMidpoint = (midpoint + b) / 2
if maxDepth < 0 {
log.Fatalf("Error in integration: exceeded maximum depth\n")
} else if MultiplyLog(errorThresh, logHalf) == errorThresh {
log.Fatalf("Error in integration: the error threshold has gotten too small after many recursive calls\n")
} else if a == leftMidpoint {
log.Fatalf("Error in integration: the left side and midpoint have gotten too close to each other. a: %e. b: %e. Midpoint: %e. LeftMidpoint:%e. Fa: %e. Fb: %e. MaxDepth: %d.\n", a, b, midpoint, leftMidpoint, fa, fb, maxDepth)
}
fLeftMidpoint = f(leftMidpoint)
fRightMidpoint = f(rightMidpoint)
//DEBUG: log.Printf("fLeftMidpoint: %e. fRightMidpoint: %e.\n", fLeftMidpoint, fRightMidpoint)
logHOverSix = math.Log(h / 6)
leftEstimate = MultiplyLog(logHOverSix, AddLog(AddLog(fa, MultiplyLog(logFour, fLeftMidpoint)), fMidpoint))
rightEstimate = MultiplyLog(logHOverSix, AddLog(AddLog(fMidpoint, MultiplyLog(logFour, fRightMidpoint)), fb))
estimateFromHalves = AddLog(leftEstimate, rightEstimate)
//log.Printf("maxDepth:%d, left:%f, right:%f, fromHalves:%f, whole:%f\n", maxDepth, math.Exp(leftEstimate), math.Exp(rightEstimate), math.Exp(estimateFromHalves), math.Exp(estimateFromHalves))
if estimateFromHalves > wholeEstimate {
delta = SubtractLog(estimateFromHalves, wholeEstimate)
if delta <= MultiplyLog(logFifteen, errorThresh) {
return AddLog(AddLog(leftEstimate, rightEstimate), DivideLog(delta, logFifteen))
}
} else if wholeEstimate > estimateFromHalves {
delta = SubtractLog(wholeEstimate, estimateFromHalves)
if delta <= MultiplyLog(logFifteen, errorThresh) {
return AddLog(AddLog(leftEstimate, rightEstimate), DivideLog(delta, logFifteen))
}
}
return AddLog(adaptiveSimpsonsLogHelper(f, a, midpoint, leftMidpoint, fa, fMidpoint, fLeftMidpoint, leftEstimate, MultiplyLog(errorThresh, logHalf), maxDepth-1), adaptiveSimpsonsLogHelper(f, midpoint, b, rightMidpoint, fMidpoint, fb, fRightMidpoint, rightEstimate, MultiplyLog(errorThresh, logHalf), maxDepth-1))
}
// AdaptiveSimpsons returns the log of the integral from a to b of g(x), where f(x) = log(g(x))
// The error in the calculation should be less than or equal to errorThreshold. If this can not be
// achieved within maxDepth number recursions, then the function aborts.
func AdaptiveSimpsonsLog(f func(float64) float64, a float64, b float64, errorThreshold float64, maxDepth int) float64 {
const logFour float64 = 1.386294
var midpoint, h, fa, fb, fMidpoint, s float64
h = b - a
midpoint = (a + b) / 2
fa = f(a)
fb = f(b)
fMidpoint = f(midpoint)
s = MultiplyLog(math.Log(h/6), AddLog(AddLog(fa, MultiplyLog(logFour, fMidpoint)), fb))
return adaptiveSimpsonsLogHelper(f, a, b, midpoint, fa, fb, fMidpoint, s, math.Log(errorThreshold), maxDepth)
} | numbers/integrate.go | 0.696784 | 0.557785 | integrate.go | starcoder |
package collections
import (
"reflect"
)
// IndexOf Returns the index of the first occurrence of the specified element in the slice, or -1 if the specified element is not contained in the slice.
func IndexOf[T any](slice []*T, element *T) int {
for index, value := range slice {
if reflect.DeepEqual(value, element) {
return index
}
}
return -1
}
// LastIndexOf Returns the index of the last occurrence of the specified element in the slice, or -1 if the specified element is not contained in the slice.
func LastIndexOf[T any](slice []*T, element *T) int {
for index := len(slice) - 1; index >= 0; index-- {
if reflect.DeepEqual(slice[index], element) {
return index
}
}
return -1
}
// Map Returns a slice containing the results of applying the given transform function to each element in the original slice.
func Map[T, R any](slice []*T, transform func(*T) *R) []*R {
result := make([]*R, len(slice))
for index, value := range slice {
result[index] = transform(value)
}
return result
}
// MapIndexed Returns a slice containing the results of applying the given transform function to each element and its index in the original slice.
func MapIndexed[T, R any](slice []*T, transform func(int, *T) *R) []*R {
result := make([]*R, len(slice))
for index, value := range slice {
result[index] = transform(index, value)
}
return result
}
// FlatMap Returns a single slice of all elements yielded from results of transform function being invoked on each element of original slice.
func FlatMap[T, R any](slice []*T, transform func(*T) []*R) []*R {
var result []*R
for _, value := range slice {
result = append(result, transform(value)...)
}
return result
}
// Flatten RReturns a single slice of all elements from all collections in the given slice.
func Flatten[T any](slice [][]*T) []*T {
var result []*T
for _, value := range slice {
for _, element := range value {
result = append(result, element)
}
}
return result
}
// Partition Splits the original collection into a pair of slices, where first slice contains elements for which predicate yielded true, while second slice contains elements for which predicate yielded false.
func Partition[T any](slice []*T, predicate func(*T) bool) ([]*T, []*T) {
var matches []*T
var unmatched []*T
for _, value := range slice {
if predicate(value) {
matches = append(matches, value)
} else {
unmatched = append(unmatched, value)
}
}
return matches, unmatched
}
// Plus Returns an array containing all elements of the original slice and then all elements of the given elements slice.
func Plus[T any](slice []*T, other []*T) []*T {
size := len(slice)
result := make([]*T, size+len(other))
for index, value := range slice {
result[index] = value
}
for index, value := range other {
result[size+index] = value
}
return result
}
// Reversed Returns a reversed view of the original slice.
func Reversed[T any](slice []*T) []*T {
size := len(slice)
result := make([]*T, size)
i := 0
for j := size - 1; j >= 0; j-- {
result[i] = slice[j]
i++
}
return result
}
// Filter Returns a slice containing only elements matching the given predicate.
func Filter[T any](slice []*T, predicate func(*T) bool) []*T {
return filter(slice, predicate, true)
}
// FilterNot Returns a slice containing all elements not matching the given predicate.
func FilterNot[T any](slice []*T, predicate func(*T) bool) []*T {
return filter(slice, predicate, false)
}
// FilterIndexed Returns a slice containing only elements matching the given predicate.
func FilterIndexed[T any](slice []*T, predicate func(int, *T) bool) []*T {
var result []*T
for index, value := range slice {
if predicate(index, value) {
result = append(result, value)
}
}
return result
}
// FilterNotNil Returns a slice containing all elements that are not nil.
func FilterNotNil[T any](slice []*T) []*T {
return filter(slice, func(s *T) bool {
return s != nil
}, true)
}
func filter[T any](slice []*T, predicate func(*T) bool, predicateShouldBeTrue bool) []*T {
var result []*T
if predicateShouldBeTrue {
for _, value := range slice {
if predicate(value) {
result = append(result, value)
}
}
} else {
for _, value := range slice {
if !predicate(value) {
result = append(result, value)
}
}
}
return result
}
// Find Returns the first element matching the given predicate, or nil if no such element was found.
func Find[T any](slice []*T, predicate func(*T) bool) *T {
for _, value := range slice {
if predicate(value) {
return value
}
}
return nil
}
// All Returns true if all elements match the given predicate.
func All[T any](slice []*T, predicate func(*T) bool) bool {
for _, value := range slice {
if !predicate(value) {
return false
}
}
return true
}
// Any Returns true if slice has at least one element.
func Any[T any](slice []*T, predicate func(*T) bool) bool {
return Find(slice, predicate) != nil
}
// None Returns true if the slice has no element.
func None[T any](slice []*T, predicate func(*T) bool) bool {
return !All(slice, predicate)
}
// Fold Accumulates value starting with the first element and applying operation from left to right to current accumulator value and each element.
func Fold[S, T any](slice []*T, accumulator *S, operation func(*S, *T) *S) *S {
for _, element := range slice {
accumulator = operation(accumulator, element)
}
return accumulator
} | collections/collections.go | 0.883575 | 0.734453 | collections.go | starcoder |
package storagehostmanager
import (
"time"
"github.com/DxChainNetwork/godx/common/unit"
"github.com/DxChainNetwork/godx/storage"
)
// StorageHostManager related constant
const (
saveFrequency = 2 * time.Minute
PersistStorageHostManagerHeader = "Storage Host Manager Settings"
PersistStorageHostManagerVersion = "1.0"
PersistFilename = "storagehostmanager.json"
)
// Scan related constants
const (
scanOnlineCheckDuration = 30 * time.Second
scanCheckDuration = 200 * time.Millisecond
scanQuantity = 2500
maxScanSleep = 6 * time.Hour
minScanSleep = time.Hour + time.Minute*30
maxWorkersAllowed = 80
)
const (
// scoreDefaultBase is the multiplier of the score as a base.
scoreDefaultBase = 1000
// minScore is the minimum score of a host evaluation. Host evaluation score starts at
// this value.
minScore = 1
)
// Presence factor related constants
const (
// lowValueLimit is the presenceScore when block difference between the current block
// height and host first seen is smaller than lowTimeLimit.
lowValueLimit = 0.50
// lowTimeLimit set the low limit of the block difference. If block passed is smaller than
// this value, the presenceScore is of value lowValueLimit.
lowTimeLimit = 0
// highValueLimit is the presenceScore when block difference between the current block
// height and host first seen is larger than highTimeLimit.
highValueLimit = 1.00
// highTimeLimit set the high limit of the block difference. If block passed is larger than
// this value, the presenceScore is of value highValueLimit.
highTimeLimit = 100 * unit.BlocksPerDay
)
// deposit factor related constants
const (
// depositBaseDivider is the parameter to be used in depositRemaining calculation.
// The larger the divider, the slower the function approaching asymptote y = 1 as
// deposit grows.
depositBaseDivider float64 = 3
)
// storage factor related constants
const (
// storageBaseDivider is the parameter to be used in storageRemainingScore calculation.
// The larger the divider, the slower the function approaching asymptote y = 1 as storage grows.
storageBaseDivider float64 = 10
)
// interaction related fields
const (
// initialSuccessfulInteractionFactor is the initial value for hostInfo.SuccessfulInteractionFactor.
// The value is in unit the same as interaction weight.
initialSuccessfulInteractionFactor = 10
// initialFailedInteractionFactor is the initial value for hostInfo.FailedInteractionFactor.
// The value is in unit the same as interaction weight. A low initial value is aimed to give
// a new host an initial boost in scores
initialFailedInteractionFactor = 0
// interactionDecay is the decay factor to be multiplied to hostInfo.SuccessfulInteractionFactor
// and hostInfo.FailedInteractionFactor each second. The value implies that the weight of
// record 7 days ago is halved, a.k.a, the half-life of the factor is about 7 days
interactionDecay float64 = 0.999999
// interactionExponentialIndex is the exponential index for calculating the interactionScore.
// Roughly, an interaction successful rate of 90% is about to give an interaction score of value
// 0.64
interactionExponentialIndex = 4
// maxNumInteractionRecord is the maximum number of interaction records to be saved in
// nodeInfo
maxNumInteractionRecord = 30
)
// uptime related fields
const (
// initialAccumulatedUptime is the initial value for hostInfo.AccumulatedUptimeFactor.
// The initial value is in unit second, thus the initial uptime factor has value 6 hours,
// which is the same as the maxScanSleep.
initialAccumulatedUptime = 21600
// initialAccumulatedDowntime is the initial value for hostInfo.AccumulatedDowntimeFactor.
// The value is in unit second and has value 0. The low initial downtime is aimed to
// give a boost for newly added hosts.
initialAccumulatedDowntime = 0
// uptimeDecay is the decay factor to be multiplied to hostInfo.AccumulatedUptimeFactor
// and hostInfo.AccumulatedDowntimeFactor each second. The value implies that the
// weight of the record 7 days ago is halved, a.k.a, the half-life of the factor is
// about 7 days.
uptimeDecay = 0.999999
// uptimeExponentialIndex is the exponential index for calculating the uptimeScore.
// Roughly, an uptimeRate of 90% is about to give an uptime score of value 0.64
uptimeExponentialIndex = 4
// uptimeCap is the upper cap of the uptimeRate. Any uptimeRate larger than this value
// will have a full score (1.00) in uptimeScore
uptimeCap = 0.98
// uptimeMaxNumScanRecords is the maximum number of ScanRecords to be saved in nodeInfo.
uptimeMaxNumScanRecords = 20
)
// host manager remove criteria
const (
// critIntercept is the criteria's intercept with y axis, which is the upRate criteria when
// upRate is 0
critIntercept = 0.30
// critRemoveBase is the parameter to be used in criteria calculation. The larger this
// parameter, the slower the criteria function approaching asymptote y = 1.
critRemoveBase = unit.BlocksPerDay * 3
)
// host market related constants
const (
// priceUpdateInterval is the time to be passed before the host market price shall be
// updated.
priceUpdateInterval = 1 * time.Minute
// floorRatio is the ratio below which the price does not count for the average
floorRatio float64 = 0.2
// ceilRatio is the ratio of total where the highest price does not count for the average
ceilRatio float64 = 0.2
)
var defaultMarketPrice = storage.MarketPrice{
ContractPrice: storage.DefaultContractPrice,
StoragePrice: storage.DefaultStoragePrice,
UploadPrice: storage.DefaultUploadBandwidthPrice,
DownloadPrice: storage.DefaultDownloadBandwidthPrice,
Deposit: storage.DefaultDeposit,
MaxDeposit: storage.DefaultMaxDeposit,
} | storage/storageclient/storagehostmanager/defaults.go | 0.594904 | 0.406744 | defaults.go | starcoder |
package dataframe
import (
"fmt"
"time"
)
// Vector represents a collection of Elements.
type Vector interface {
Set(idx int, i interface{})
Append(i interface{})
At(i int) interface{}
Len() int
PrimitiveType() VectorPType
}
func newVector(t interface{}, n int) (v Vector) {
switch t.(type) {
case []int64:
v = newIntVector(n)
case []*int64:
v = newNullableIntVector(n)
case []uint64:
v = newUintVector(n)
case []*uint64:
v = newNullableUintVector(n)
case []float64:
v = newFloatVector(n)
case []*float64:
v = newNullableFloatVector(n)
case []string:
v = newStringVector(n)
case []*string:
v = newNullableStringVector(n)
case []bool:
v = newBoolVector(n)
case []*bool:
v = newNullableBoolVector(n)
case []time.Time:
v = newTimeVector(n)
case []*time.Time:
v = newNullableTimeVector(n)
default:
panic(fmt.Sprintf("unsupported vector type of %T", t))
}
return
}
// VectorPType indicates the go type underlying the Vector.
type VectorPType int
const (
// VectorPTypeInt64 indicates the underlying primitive is a []int64.
VectorPTypeInt64 VectorPType = iota
// VectorPTypeNullableInt64 indicates the underlying primitive is a []*int64.
VectorPTypeNullableInt64
// VectorPTypeUint64 indicates the underlying primitive is a []uint64.
VectorPTypeUint64
// VectorPTypeNullableUInt64 indicates the underlying primitive is a []*uint64.
VectorPTypeNullableUInt64
// VectorPTypeFloat64 indicates the underlying primitive is a []float64.
VectorPTypeFloat64
// VectorPTypeNullableFloat64 indicates the underlying primitive is a []*float64.
VectorPTypeNullableFloat64
// VectorPTypeString indicates the underlying primitive is a []string.
VectorPTypeString
// VectorPTypeNullableString indicates the underlying primitive is a []*string.
VectorPTypeNullableString
// VectorPTypeBool indicates the underlying primitive is a []bool.
VectorPTypeBool
// VectorPTypeNullableBool indicates the underlying primitive is a []*bool.
VectorPTypeNullableBool
// VectorPTypeTime indicates the underlying primitive is a []time.Time.
VectorPTypeTime
// VectorPTypeNullableTime indicates the underlying primitive is a []*time.Time.
VectorPTypeNullableTime
) | vendor/github.com/grafana/grafana-plugin-sdk-go/dataframe/vector.go | 0.659186 | 0.517327 | vector.go | starcoder |
package interpreter
import (
"fmt"
"github.com/smackem/ylang/internal/lang"
"reflect"
)
type Nilval lang.Nil
func (n Nilval) Compare(other Value) (Value, error) {
if _, ok := other.(Nilval); ok {
return Number(0), nil
}
return nil, nil
}
func (n Nilval) greaterThan(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil > %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) greaterThanOrEqual(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil >= %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) lessThan(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil < %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) lessThanOrEqual(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil <= %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) Add(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil + %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) Sub(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil - %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) Mul(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil * %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) Div(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil / %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) Mod(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil %% %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) In(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil In %s Not supported", reflect.TypeOf(other))
}
func (n Nilval) Neg() (Value, error) {
return nil, fmt.Errorf("type mismatch: -nil Not supported")
}
func (n Nilval) Not() (Value, error) {
return nil, fmt.Errorf("type mismatch: Not nil Not supported")
}
func (n Nilval) At(bitmap BitmapContext) (Value, error) {
return nil, fmt.Errorf("type mismatch: @nil Not supported")
}
func (n Nilval) Property(ident string) (Value, error) {
return baseProperty(n, ident)
}
func (n Nilval) PrintStr() string {
return "nil"
}
func (n Nilval) Iterate(visit func(Value) error) error {
return fmt.Errorf("cannot Iterate over nil")
}
func (n Nilval) Index(index Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil[Index] Not supported")
}
func (n Nilval) IndexRange(lower, upper Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil[lower..upper] Not supported")
}
func (n Nilval) IndexAssign(index Value, val Value) error {
return fmt.Errorf("type mismatch: nil[%s] Not supported", reflect.TypeOf(index))
}
func (n Nilval) RuntimeTypeName() string {
return "nil"
}
func (n Nilval) Concat(val Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: nil :: [%s] Not supported", reflect.TypeOf(val))
} | internal/interpreter/nilval.go | 0.78785 | 0.433382 | nilval.go | starcoder |
package packets
// This packet contains lap times and tyre usage for the session. This packet works slightly differently to other packets. To reduce CPU and bandwidth, each packet relates to a specific vehicle and is sent every 1/20 s, and the vehicle being sent is cycled through. Therefore in a 20 car race you should receive an update for each vehicle at least once per second.
// Note that at the end of the race, after the final classification packet has been sent, a final bulk update of all the session histories for the vehicles in that session will be sent.
// Frequency: 20 per second but cycling through cars
// Size: 1155 bytes
// Version: 1
type LapHistoryData struct {
LapTimeInMS uint32 // Lap time in milliseconds
Sector1TimeInMS uint16 // Sector 1 time in milliseconds
Sector2TimeInMS uint16 // Sector 2 time in milliseconds
Sector3TimeInMS uint16 // Sector 3 time in milliseconds
LapValidBitFlags uint8 // 0x01 bit set-lap valid, 0x02 bit set-sector 1 valid, 0x04 bit set-sector 2 valid, 0x08 bit set-sector 3 valid
}
type TyreStintHistoryData struct {
EndLap uint8 // Lap the tyre usage ends on (255 of current tyre)
TyreActualCompound uint8 // Actual tyres used by this driver
TyreVisualCompound uint8 // Visual tyres used by this driver
}
type PacketSessionHistoryData struct {
Header PacketHeader // Header
CarIdx uint8 // Index of the car this lap data relates to
NumLaps uint8 // Num laps in the data (including current partial lap)
NumTyreStints uint8 // Number of tyre stints in the data
BestLapTimeLapNum uint8 // Lap the best lap time was achieved on
BestSector1LapNum uint8 // Lap the best Sector 1 time was achieved on
BestSector2LapNum uint8 // Lap the best Sector 2 time was achieved on
BestSector3LapNum uint8 // Lap the best Sector 3 time was achieved on
LapHistoryData [100]LapHistoryData // 100 laps of data max
TyreStintsHistoryData [8]TyreStintHistoryData
} | pkg/packets/session_history.go | 0.546496 | 0.487612 | session_history.go | starcoder |
package day9
import (
"strconv"
"strings"
)
// isSumOfNumberPair checks whether a number is the sum
// of any two distinct numbers in a list
func isSumOfNumberPair(num int64, numbers []int64) bool {
if len(numbers) < 2 {
return false
}
for i := 0; i < len(numbers)-1; i++ {
for o := 1; o < len(numbers); o++ {
if numbers[i] == numbers[o] { // Can't be the same number
continue
}
if numbers[i]+numbers[o] == num {
return true
}
}
}
return false
}
// sumOfContiguousNumbers checks whether a number is the sum
// of any two distinct numbers in a list
func sumOfContiguousNumbers(num int64, numbers []int64) (int64, int64, int64) {
if len(numbers) < 2 {
return 0, 0, 0
}
for i := 0; i < len(numbers)-1; i++ {
lowest := numbers[i]
highest := numbers[i]
interim := numbers[i]
for o := i + 1; o < len(numbers); o++ {
// The cumulative sum is too high, start again
if interim+numbers[o] > num {
break
}
// Remember the highest/lowest numbers
if numbers[o] < lowest {
lowest = numbers[o]
}
if numbers[o] > highest {
highest = numbers[o]
}
// We found the numbers adding up to our desired sum
if interim+numbers[o] == num {
return lowest, highest, highest + lowest
}
interim += numbers[o]
}
}
return 0, 0, 0
}
func findInvalidNumber(numbers []int64, preambleLen int) int64 {
for i := preambleLen; i < len(numbers); i++ {
if !isSumOfNumberPair(numbers[i], numbers[i-preambleLen:i]) {
return numbers[i]
}
}
return 0
}
// SolvePart1 takes input from a string
// and then solves the Day 7, Part 1 challenge
func SolvePart1(input string) int64 {
lines := strings.Split(strings.TrimRight(input, "\n"), "\n")
numbers := make([]int64, len(lines))
for i := 0; i < len(lines); i++ {
numbers[i], _ = strconv.ParseInt(lines[i], 10, 64)
}
return findInvalidNumber(numbers, 25)
}
// SolvePart2 takes input from a string
// and then solves the Day 7, Part 2 challenge
func SolvePart2(input string, preambleLen int) int64 {
lines := strings.Split(strings.TrimRight(input, "\n"), "\n")
numbers := make([]int64, len(lines))
for i := 0; i < len(lines); i++ {
numbers[i], _ = strconv.ParseInt(lines[i], 10, 64)
}
invNum := findInvalidNumber(numbers, preambleLen)
_, _, sum := sumOfContiguousNumbers(invNum, numbers)
return sum
} | 2020/day9/day9.go | 0.529507 | 0.446374 | day9.go | starcoder |
package main
import (
"encoding/json"
"fmt"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/zclconf/go-cty/cty"
ctyjson "github.com/zclconf/go-cty/cty/json"
)
// Value represent a value to serialize into JSON
type Value struct {
Value interface{}
}
func singleToValue(v interface{}) Value {
return Value{v}
}
func mapToValueMap(v interface{}) map[string]Value {
values := map[string]Value{}
switch v := v.(type) {
case hclsyntax.Attributes:
for idx, v := range v {
values[idx] = Value{v}
}
default:
panic(fmt.Errorf("unknown map: %v", v))
}
return values
}
func sliceToValueSlice(v interface{}) []Value {
values := []Value{}
switch v := v.(type) {
case hclsyntax.Blocks:
values = make([]Value, len(v))
for idx, v := range v {
values[idx] = Value{v}
}
case []hclsyntax.Expression:
values = make([]Value, len(v))
for idx, v := range v {
values[idx] = Value{v}
}
case []hclsyntax.ObjectConsItem:
values = make([]Value, len(v))
for idx, v := range v {
values[idx] = Value{v}
}
case hcl.Diagnostics:
values = make([]Value, len(v))
for idx, v := range v {
values[idx] = Value{v}
}
case []hcl.Range:
values = make([]Value, len(v))
for idx, v := range v {
values[idx] = Value{v}
}
case hcl.Traversal:
values = make([]Value, len(v))
for idx, v := range v {
values[idx] = Value{v}
}
default:
panic(fmt.Errorf("unknown slice: %v", v))
}
return values
}
// MarshalJSON converts the value into json, recursively passing all children through the Value interface
func (v Value) MarshalJSON() ([]byte, error) {
if v.Value == nil {
return []byte("null"), nil
}
switch v := v.Value.(type) {
case *hclsyntax.AnonSymbolExpr:
return json.Marshal(map[string]interface{}{
"kind": "anonSymbolExpr",
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.Attribute:
return json.Marshal(map[string]interface{}{
"kind": "attribute",
"name": v.Name,
"expr": singleToValue(v.Expr),
"srcRange": singleToValue(v.SrcRange),
"nameRange": singleToValue(v.NameRange),
"equalsRange": singleToValue(v.EqualsRange),
})
case *hclsyntax.Block:
return json.Marshal(map[string]interface{}{
"kind": "block",
"type": v.Type,
"labels": v.Labels,
"body": singleToValue(v.Body),
"typeRange": singleToValue(v.TypeRange),
"labelRanges": sliceToValueSlice(v.LabelRanges),
"openBraceRange": singleToValue(v.OpenBraceRange),
"closeBraceRange": singleToValue(v.CloseBraceRange),
})
case *hclsyntax.BinaryOpExpr:
return json.Marshal(map[string]interface{}{
"kind": "binaryOpExpr",
"lhs": singleToValue(v.LHS),
"op": singleToValue(v.Op),
"rhs": singleToValue(v.RHS),
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.Body:
return json.Marshal(map[string]interface{}{
"kind": "body",
"attributes": mapToValueMap(v.Attributes),
"blocks": sliceToValueSlice(v.Blocks),
})
case *hclsyntax.ConditionalExpr:
return json.Marshal(map[string]interface{}{
"kind": "conditionalExpr",
"condition": singleToValue(v.Condition),
"trueResult": singleToValue(v.TrueResult),
"falseResult": singleToValue(v.FalseResult),
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.ForExpr:
return json.Marshal(map[string]interface{}{
"kind": "functionCallExpr",
"keyVar": v.KeyVar,
"valVar": v.ValVar,
"collExpr": singleToValue(v.CollExpr),
"keyExpr": singleToValue(v.KeyExpr),
"valExpr": singleToValue(v.ValExpr),
"condExpr": singleToValue(v.CondExpr),
"group": v.Group,
"srcRange": singleToValue(v.SrcRange),
"openRange": singleToValue(v.OpenRange),
"closeRange": singleToValue(v.CloseRange),
})
case *hclsyntax.FunctionCallExpr:
return json.Marshal(map[string]interface{}{
"kind": "functionCallExpr",
"name": v.Name,
"args": sliceToValueSlice(v.Args),
"expandFinal": v.ExpandFinal,
"nameRange": singleToValue(v.NameRange),
"openParenRange": singleToValue(v.OpenParenRange),
"closeParenRange": singleToValue(v.CloseParenRange),
})
case *hclsyntax.IndexExpr:
return json.Marshal(map[string]interface{}{
"kind": "indexExpr",
"collection": singleToValue(v.Collection),
"key": singleToValue(v.Key),
"srcRange": singleToValue(v.SrcRange),
"openRange": singleToValue(v.OpenRange),
"bracketRange": singleToValue(v.BracketRange),
})
case *hclsyntax.LiteralValueExpr:
return json.Marshal(map[string]interface{}{
"kind": "literalValueExpr",
"val": singleToValue(v.Val),
"type": v.Val.Type(),
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.ObjectConsExpr:
return json.Marshal(map[string]interface{}{
"kind": "objectConsExpr",
"items": sliceToValueSlice(v.Items),
"srcRange": singleToValue(v.SrcRange),
"openRange": singleToValue(v.OpenRange),
})
case hclsyntax.ObjectConsItem:
return json.Marshal(map[string]interface{}{
"kind": "objectConsItem",
"keyExpr": singleToValue(v.KeyExpr),
"valueExpr": singleToValue(v.ValueExpr),
})
case *hclsyntax.ObjectConsKeyExpr:
return json.Marshal(map[string]interface{}{
"kind": "objectConsKeyExpr",
"wrapped": singleToValue(v.Wrapped),
"forceNonLiteral": v.ForceNonLiteral,
})
case *hclsyntax.Operation:
return json.Marshal(map[string]interface{}{
"kind": "operation",
"type": v.Type,
})
case *hclsyntax.RelativeTraversalExpr:
return json.Marshal(map[string]interface{}{
"kind": "relativeTraversalExpr",
"source": singleToValue(v.Source),
"traversal": sliceToValueSlice(v.Traversal),
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.ScopeTraversalExpr:
return json.Marshal(map[string]interface{}{
"kind": "scopeTraversalExpr",
"traversal": sliceToValueSlice(v.Traversal),
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.SplatExpr:
return json.Marshal(map[string]interface{}{
"kind": "splatExpr",
"source": singleToValue(v.Source),
"each": singleToValue(v.Each),
"item": singleToValue(v.Item),
"srcRange": singleToValue(v.SrcRange),
"markerRange": singleToValue(v.MarkerRange),
})
case *hclsyntax.TemplateExpr:
return json.Marshal(map[string]interface{}{
"kind": "templateExpr",
"parts": sliceToValueSlice(v.Parts),
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.TemplateJoinExpr:
return json.Marshal(map[string]interface{}{
"kind": "templateJoinExpr",
"tuple": singleToValue(v.Tuple),
})
case *hclsyntax.TemplateWrapExpr:
return json.Marshal(map[string]interface{}{
"kind": "templateWrapExpr",
"wrapped": singleToValue(v.Wrapped),
"srcRange": singleToValue(v.SrcRange),
})
case *hclsyntax.TupleConsExpr:
return json.Marshal(map[string]interface{}{
"kind": "tupleConsExpr",
"exprs": sliceToValueSlice(v.Exprs),
"srcRange": singleToValue(v.SrcRange),
"openRange": singleToValue(v.OpenRange),
})
case *hclsyntax.UnaryOpExpr:
return json.Marshal(map[string]interface{}{
"kind": "unaryOpExpr",
"op": singleToValue(v.Op),
"val": singleToValue(v.Val),
"srcRange": singleToValue(v.SrcRange),
"symbolRange": singleToValue(v.SymbolRange),
})
case *hcl.Diagnostic:
subject := singleToValue(v.Subject)
context := singleToValue(v.Context)
if v.Subject == nil {
subject = Value{nil}
}
if v.Context == nil {
context = subject
}
return json.Marshal(map[string]interface{}{
"severity": v.Severity,
"summary": v.Summary,
"detail": v.Detail,
"subject": subject,
"context": context,
"expression": singleToValue(v.Expression),
})
case hcl.Pos:
return json.Marshal(map[string]interface{}{
"byte": v.Byte,
"column": v.Column,
"line": v.Line,
})
case hcl.Range:
return json.Marshal(map[string]interface{}{
"filename": v.Filename,
"start": singleToValue(v.Start),
"end": singleToValue(v.End),
})
case *hcl.Range:
return json.Marshal(map[string]interface{}{
"filename": v.Filename,
"start": singleToValue(v.Start),
"end": singleToValue(v.End),
})
case hcl.TraverseAttr:
return json.Marshal(map[string]interface{}{
"kind": "traverseAttr",
"name": v.Name,
"srcRange": singleToValue(v.SrcRange),
})
case hcl.TraverseIndex:
return json.Marshal(map[string]interface{}{
"kind": "traverseIndex",
"key": singleToValue(v.Key),
"keyType": v.Key.Type(),
"srcRange": singleToValue(v.SrcRange),
})
case hcl.TraverseRoot:
return json.Marshal(map[string]interface{}{
"kind": "traverseRoot",
"name": v.Name,
"srcRange": singleToValue(v.SrcRange),
})
case hcl.TraverseSplat:
return json.Marshal(map[string]interface{}{
"kind": "traverseSplat",
"each": sliceToValueSlice(v.Each),
"srcRange": singleToValue(v.SrcRange),
})
case cty.Value:
if !v.IsKnown() {
return json.Marshal(nil)
}
return json.Marshal(ctyjson.SimpleJSONValue{Value: v})
default:
return nil, fmt.Errorf("unable to marshal value: %v", v)
}
}
// ParseConfig parses HCL into an AST
func ParseConfig(filename, contents string) (interface{}, error) {
file, diags := hclsyntax.ParseConfig([]byte(contents), filename, hcl.Pos{Line: 1, Column: 1})
data, err := json.Marshal(map[string]interface{}{
"ast": singleToValue(file.Body.(*hclsyntax.Body)),
"diagnostics": sliceToValueSlice(diags),
})
if err != nil {
return nil, err
}
return string(data), nil
} | packages/@ts-terraform/hcl/parse.go | 0.729134 | 0.415788 | parse.go | starcoder |
// Package lgs handles input and output of Longer Orange 10 print files
package lgs
import (
"fmt"
"image"
"image/color"
)
func Rle4Encode(pic *image.Gray) (data []byte, err error) {
bounds := pic.Bounds()
addSpan := func(color uint8, span uint) (out []byte) {
for ; span > 0; span >>= 4 {
datum := uint8(span&0xf) | (color & 0xf0)
out = append([]byte{datum}, out...)
}
return
}
span := uint(0)
lc := uint8(0)
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
c := pic.GrayAt(x, y).Y & 0xf0
if c == lc {
span++
} else {
data = append(data, addSpan(lc, span)...)
span = 1
}
lc = c
}
}
data = append(data, addSpan(lc, span)...)
return
}
func Rle4Decode(data []byte, bounds image.Rectangle) (gi *image.Gray) {
gi = image.NewGray(bounds)
last := uint8(0)
span := 0
index := 0
addSpan := func(color uint8, span int) {
for ; span > 0; span-- {
if index >= len(gi.Pix) {
panic(fmt.Sprintf("%v bytes too many", span))
return
}
gi.Pix[index] = color
index++
}
}
for _, b := range data {
color := (b & 0xf0) | (b >> 4)
if color == last {
span = (span << 4) | int(b&0xf)
} else {
addSpan(last, span)
span = int(b & 0xf)
}
last = color
}
addSpan(last, span)
if index != len(gi.Pix) {
panic(fmt.Sprintf("%v bytes missing of %v\n", len(gi.Pix)-index, len(gi.Pix)))
}
return
}
func RGB15Encode(pic image.Image) (data []byte) {
bounds := pic.Bounds()
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
c := color.NRGBAModel.Convert(pic.At(x, y)).(color.NRGBA)
rgb15 := (uint16(c.R>>3) << 11) |
(uint16(c.G>>3) << 6) |
(uint16(c.B>>3) << 0)
data = append(data, uint8(rgb15>>8), uint8(rgb15&0xff))
}
}
return
}
func RGB15Decode(bounds image.Rectangle, data []byte) (pic image.Image) {
size := bounds.Size()
preview := image.NewNRGBA(bounds)
for y := 0; y < size.Y; y++ {
for x := 0; x < size.X; x++ {
n := (y*size.X + x) * 2
rgb15 := (uint16(data[n+0]) << 8) | uint16(data[n+1])
r := ((uint8(rgb15>>11) & 0x1f) << 3) | 0x7
g := ((uint8(rgb15>>6) & 0x1f) << 3) | 0x7
b := ((uint8(rgb15>>0) & 0x1f) << 3) | 0x7
preview.Set(x, y, color.NRGBA{r, g, b, 0xff})
}
}
pic = preview
return
} | lgs/rle.go | 0.694406 | 0.416915 | rle.go | starcoder |
package tda
import (
"image"
"image/color"
"image/jpeg"
"image/png"
"os"
"strings"
"github.com/kettek/apng"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
)
// GetImage returns the pixel levels of a jpeg or png file as greyscale
// values, along with the number of rows in the image.
func GetImage(filename string) ([]int, int) {
fid, err := os.Open(filename)
if err != nil {
panic(err)
}
defer fid.Close()
fnl := strings.ToLower(filename)
var img image.Image
switch {
case strings.HasSuffix(fnl, ".jpg"), strings.HasSuffix(fnl, ".jpeg"):
img, err = jpeg.Decode(fid)
if err != nil {
panic(err)
}
case strings.HasSuffix(fnl, ".png"):
img, err = png.Decode(fid)
if err != nil {
panic(err)
}
default:
panic("Unknown image format")
}
imb := img.Bounds()
imd := make([]int, imb.Max.X*imb.Max.Y)
ii := 0
for y := 0; y < imb.Max.Y; y++ {
for x := 0; x < imb.Max.X; x++ {
c := img.At(x, y)
r, g, b, _ := c.RGBA()
imd[ii] = int(0.21*float64(r) + 0.72*float64(g) + 0.07*float64(b))
ii++
}
}
return imd, imb.Max.Y
}
// AnimateThreshold constructs an animated PNG showing a sequence of thresholded
// versions of an image. The image pixel data are provided as a slice of integers,
// which must conform to a rectangular image with the given number of rows.
// The animation is based on a series of steps in which the image is thresholded
// at a linear sequence of values ranging from the minimum to the maximum
// pixel intensity. The image is
// written in animated png (.apng) format to the given file.
func AnimateThreshold(img []int, rows, steps int, outfile string) {
cols := len(img) / rows
if len(img) != rows*cols {
panic("image shape does not conform to a rectangle")
}
a := apng.APNG{
Frames: make([]apng.Frame, steps),
}
mn, mx := iminmax(img)
for i := 0; i < steps; i++ {
thresh := mn + int(float64(i*(mx-mn))/float64(steps-1))
imb := image.NewGray16(image.Rect(0, 0, cols, rows))
ii := 0
for y := 0; y < rows; y++ {
for x := 0; x < cols; x++ {
if img[ii] > thresh {
imb.Set(x, y, color.Gray16{65530})
} else {
imb.Set(x, y, color.Gray16{0})
}
ii++
}
}
a.Frames[i].Image = imb
}
out, err := os.Create(outfile)
if err != nil {
panic(err)
}
defer out.Close()
apng.Encode(out, a)
}
func iminmax(x []int) (int, int) {
mn := x[0]
mx := x[0]
for i := range x {
if x[i] < mn {
mn = x[i]
}
if x[i] > mx {
mx = x[i]
}
}
return mn, mx
}
// LandscapePlot supports creation of plots of landscape functions.
type LandscapePlot struct {
// Input image file name, should be a png or jpeg image file
Filename string
// Output filename for the plot, suffix determines format
Outfile string
// The number of image thresholding steps
Isteps int
// The number of steps along the landscape profile
Lsteps int
// Plot these landscape depths
Depth []int
}
func (lsp *LandscapePlot) checkArgs() {
if lsp.Filename == "" {
panic("Filename cannot be empty")
}
if lsp.Outfile == "" {
panic("Outfile cannot be empty")
}
if lsp.Isteps == 0 {
panic("Isteps must be positive")
}
if lsp.Lsteps == 0 {
panic("Lsteps must be positive")
}
if len(lsp.Depth) == 0 {
panic("Depth cannot be empty")
}
}
// Plot generates a landscape plom a LandscapePlot value.
func (lsp *LandscapePlot) Plot() {
lsp.checkArgs()
img, rows := GetImage(lsp.Filename)
ps := NewPersistence(img, rows, lsp.Isteps)
birth, death := ps.BirthDeath()
ls := NewLandscape(birth, death)
d := floats.Min(birth)
r := floats.Max(death) - d
var lsc [][]float64
var tvals []float64
for i := 0; i < lsp.Lsteps; i++ {
t := d + float64(i)*r/float64(lsp.Lsteps-1)
tvals = append(tvals, t)
kp := ls.Eval(t, lsp.Depth)
lsc = append(lsc, kp)
}
lsp.diagram(birth, death, tvals, lsc)
}
func (lsp *LandscapePlot) diagram(birth, death, tvals []float64, lsc [][]float64) {
plt, err := plot.New()
if err != nil {
panic(err)
}
plt.Title.Text = "Landscape diagram"
plt.X.Label.Text = "(Birth+Death)/2"
plt.Y.Label.Text = "(Death-Birth)/2"
// Get the birth and death times for each object
pts := make(plotter.XYs, len(birth))
for i := range birth {
pts[i].X = (birth[i] + death[i]) / 2
pts[i].Y = (death[i] - birth[i]) / 2
}
s, err := plotter.NewScatter(pts)
if err != nil {
panic(err)
}
plt.Add(s)
// Plot a sequence of landscapes in red
for j := 0; j < len(lsp.Depth); j++ {
lpts := make(plotter.XYs, len(tvals))
for i := range tvals {
lpts[i].X = tvals[i]
lpts[i].Y = lsc[i][j]
}
l, err := plotter.NewLine(lpts)
if err != nil {
panic(err)
}
l.Color = color.RGBA{R: 255, A: 255}
plt.Add(l)
}
// Save the plot to a PNG file.
if err := plt.Save(5*vg.Inch, 4*vg.Inch, lsp.Outfile); err != nil {
panic(err)
}
}
// ConvexPeelPlot supports constructing plots of convex hull peels.
type ConvexPeelPlot struct {
// Input image file name, should be a png or jpeg image file
Filename string
// Output filename for the plot, suffix determines format
Outfile string
// The number of image thresholding steps
Isteps int
// Plot these convex peel fractions (e.g. Depth=0.95 trims off 5% of the data)
Depth []float64
}
func (cpp *ConvexPeelPlot) convexPeelDiagram(birth, death []float64) {
plt, err := plot.New()
if err != nil {
panic(err)
}
plt.Title.Text = "Persistence diagram"
plt.X.Label.Text = "Birth"
plt.Y.Label.Text = "Death"
// Get the birth and death times for each object
pts := make(plotter.XYs, len(birth))
for i := range birth {
pts[i].X = birth[i]
pts[i].Y = death[i]
}
s, err := plotter.NewScatter(pts)
if err != nil {
panic(err)
}
plt.Add(s)
// Plot a sequence of convex hull peels in red
cp := NewConvexPeel(birth, death)
for _, frac := range cpp.Depth {
cp.PeelTo(frac)
hp := cp.HullPoints(nil)
pts := make(plotter.XYs, len(hp))
for i := range hp {
pts[i].X = hp[i][0]
pts[i].Y = hp[i][1]
}
l, err := plotter.NewLine(pts)
if err != nil {
panic(err)
}
l.Color = color.RGBA{R: 255, A: 255}
plt.Add(l)
}
// Save the plot to a PNG file.
if err := plt.Save(5*vg.Inch, 4*vg.Inch, cpp.Outfile); err != nil {
panic(err)
}
}
func (cpp *ConvexPeelPlot) checkArgs() {
if cpp.Filename == "" {
panic("Filename cannot be empty")
}
if cpp.Outfile == "" {
panic("Outfile cannot be empty")
}
if cpp.Isteps == 0 {
panic("Isteps must be positive")
}
if len(cpp.Depth) == 0 {
panic("Depth cannot be empty")
}
}
// Plot generates a plot of a set of birth/death times along with several
// convex hull peels.
func (cpp *ConvexPeelPlot) Plot() {
cpp.checkArgs()
img, rows := GetImage(cpp.Filename)
// Calculate persistence trajectories using an
// increasing sequence of thresholds
ps := NewPersistence(img, rows, cpp.Isteps)
birth, death := ps.BirthDeath()
cpp.convexPeelDiagram(birth, death)
} | image_utils.go | 0.668988 | 0.407893 | image_utils.go | starcoder |
package dsu;
/*
A disjoint-set—also called union-find—data structure keeps track of
nonoverlapping partitions of a collection of data elements. Initially, each
data element belongs to its own, singleton, set. The following operations can
then be performed on these sets:
• Union merges two sets into a single set containing the union of their
elements.
• Find returns an arbitrary element from a set.
The critical feature is that Find returns the same element when given any
element in a set. The implication is that two elements A and B belong to the
same set if and only if A.Find() == B.Find().
Both Union and Find take as arguments elements of sets, not the sets
themselves. Because sets are mutually disjoint, an element uniquely identifies
a set. Ergo, there is no need to pass sets to those functions.
Disjoint sets are more limited in functionality than conventional sets. They
support only set union, not set intersection, set difference, or any other set
operation. They don't allow an element to reside in more than one set. They
don't even provide a way to enumerate the elements in a given set. What makes
them useful, though, is that they're extremely fast, especially for large sets;
both Union and Find run in amortized near-constant time. See
http://en.wikipedia.org/wiki/Disjoint-set_data_structure for more information.
Disjoint sets are often used in graph algorithms, for example to find a minimal
spanning tree for a graph or to determine if adding a given edge to a graph
would create a cycle.
*/
type Element struct {
parent *Element
rank int
Data interface{}
}
func NewElement() *Element {
s := &Element{}
s.parent = s
return s
}
func (e *Element) Find() *Element {
for e.parent != e {
e.parent = e.parent.parent
e = e.parent
}
return e
}
func Union(e1, e2 *Element) {
e1Root := e1.Find()
e2Root := e2.Find()
if e1Root == e2Root {
return
}
switch {
case e1Root.rank < e2Root.rank:
e1Root.parent = e2Root
case e1Root.rank > e2Root.rank:
e2Root.parent = e1Root
default:
e2Root.parent = e1Root
e1Root.rank++
}
} | data_structures/Disjoint_Set_Union/Golang/DSU_Path_Compression.go | 0.868896 | 0.627366 | DSU_Path_Compression.go | starcoder |
package camera
import (
"github.com/faiface/pixel"
"gotracer/vmath"
"math"
)
// CameraDefocus object describes how the objects are projected into the screen.
// The camera object is used to get the rays that need to be casted for each screen UV coordinate.
type Camera struct {
// Aspect ratio of the camera viewport (X / Y)
AspectRatio float64
// Field of view of the camera in degrees.
Fov float64
// World position of the camera
Position *vmath.Vector3
// Point where the camera is looking at
LookAt *vmath.Vector3
// Up direction to calculate the camera look direction
Up *vmath.Vector3
// The Lower left corner of the camera relative to the center considering the vertical and horizontal sizes.
// Calculated by the UpdateViewport method.
LowerLeftCorner *vmath.Vector3
// Vertical size of the camera (usually only uses Y).
// Calculated by the UpdateViewport method.
Vertical *vmath.Vector3
// Horizontal size of the camera (usually only uses X).
// Calculated by the UpdateViewport method.
Horizontal *vmath.Vector3
}
// Create camera from bouding box
func NewCamera(bounds pixel.Rect, position *vmath.Vector3, lookAt *vmath.Vector3, up *vmath.Vector3, fov float64) *Camera {
var c = new(Camera)
var size = bounds.Size()
c.Fov = fov
c.AspectRatio = size.X / size.Y
c.Position = position
c.LookAt = lookAt
c.Up = up
c.UpdateViewport()
return c
}
// Create camera from bouding box
func NewCameraBounds(bounds pixel.Rect) *Camera {
var c = new(Camera)
var size = bounds.Size()
c.Fov = 70
c.AspectRatio = size.X / size.Y
c.Position = vmath.NewVector3(-2.0, 2.0, 1.0)
c.LookAt = vmath.NewVector3(0.0, 0.0, -1.0)
c.Up = vmath.NewVector3(0.0, 1.0, 0.0)
c.UpdateViewport()
return c
}
// UpdateViewport camera projection properties.
func (c *Camera) UpdateViewport() {
var fovRad = c.Fov * (math.Pi / 180.0)
var halfHeight = math.Tan(fovRad / 2.0)
var halfWidth = c.AspectRatio * halfHeight
var direction = c.Position.Clone()
direction.Sub(c.LookAt)
var w = direction.UnitVector()
var u = vmath.Cross(c.Up, w)
var v = vmath.Cross(w, u)
u.MulScalar(halfWidth)
v.MulScalar(halfHeight)
c.LowerLeftCorner = c.Position.Clone()
c.LowerLeftCorner.Sub(u)
c.LowerLeftCorner.Sub(v)
c.LowerLeftCorner.Sub(w)
c.Horizontal = u.Clone()
c.Horizontal.MulScalar(2.0)
c.Vertical = v.Clone()
c.Vertical.MulScalar(2.0)
}
// Get a ray from this camera, from a normalized UV screen coordinate.
func (c *Camera) GetRay(u float64, v float64) *vmath.Ray {
var hor = c.Horizontal.Clone()
hor.MulScalar(u)
var vert = c.Vertical.Clone()
vert.MulScalar(v)
var direction = c.LowerLeftCorner.Clone()
direction.Add(hor)
direction.Add(vert)
direction.Sub(c.Position)
return vmath.NewRay(c.Position, direction)
}
// Copy data from another camera object
func (c *Camera) Copy(o *Camera) {
c.Fov = o.Fov
c.AspectRatio = o.AspectRatio
c.Position.Copy(o.Position)
c.LookAt.Copy(o.LookAt)
c.Up.Copy(o.Up)
}
// Clone the camera object
func (o *Camera) Clone() *Camera {
var c = new(Camera)
c.Fov = o.Fov
c.AspectRatio = o.AspectRatio
c.Position = o.Position.Clone()
c.LookAt = o.LookAt.Clone()
c.Up = o.Up.Clone()
c.UpdateViewport()
return c
} | camera/camera.go | 0.831725 | 0.634812 | camera.go | starcoder |
package aoc2021
import (
"fmt"
"strconv"
"strings"
utils "github.com/simonski/aoc/utils"
)
/*
--- Day 3: Binary Diagnostic ---
The submarine has been making some odd creaking noises, so you ask it to produce a diagnostic report just in case.
The diagnostic report (your puzzle input) consists of a list of binary numbers which, when decoded properly, can tell you many useful things about the conditions of the submarine. The first parameter to check is the power consumption.
You need to use the binary numbers in the diagnostic report to generate two new binary numbers (called the gamma rate and the epsilon rate). The power consumption can then be found by multiplying the gamma rate by the epsilon rate.
Each bit in the gamma rate can be determined by finding the most common bit in the corresponding position of all numbers in the diagnostic report. For example, given the following diagnostic report:
00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010
Considering only the first bit of each number, there are five 0 bits and seven 1 bits. Since the most common bit is 1, the first bit of the gamma rate is 1.
The most common second bit of the numbers in the diagnostic report is 0, so the second bit of the gamma rate is 0.
The most common value of the third, fourth, and fifth bits are 1, 1, and 0, respectively, and so the final three bits of the gamma rate are 110.
So, the gamma rate is the binary number 10110, or 22 in decimal.
The epsilon rate is calculated in a similar way; rather than use the most common bit, the least common bit from each position is used. So, the epsilon rate is 01001, or 9 in decimal. Multiplying the gamma rate (22) by the epsilon rate (9) produces the power consumption, 198.
Use the binary numbers in your diagnostic report to calculate the gamma rate and epsilon rate, then multiply them together. What is the power consumption of the submarine? (Be sure to represent your answer in decimal, not binary.)
*/
// rename this to the year and day in question
func (app *Application) Y2021D03P1() {
RunD1With(DAY_2021_03_TEST_DATA)
fmt.Println()
RunD1With(DAY_2021_03_DATA)
}
func RunD1With(s string) {
lines := strings.Split(s, "\n")
counts := make([]int, len(lines[0]))
for _, line := range lines {
for index := 0; index < len(line); index++ {
value, _ := strconv.Atoi(line[index : index+1])
counts[index] += value
}
}
gamma := make([]int, len(counts))
epsilon := make([]int, len(counts))
linelength := len(lines) / 2
for index := 0; index < len(counts); index++ {
if counts[index] > linelength {
// more frequently occurring 1s
gamma[index] = 1
epsilon[index] = 0
} else {
// more frequently occurring 0s
gamma[index] = 0
epsilon[index] = 1
}
}
gamma_str := ""
for index := 0; index < len(gamma); index++ {
gamma_str += strconv.Itoa(gamma[index])
}
epsilon_str := ""
for index := 0; index < len(gamma); index++ {
epsilon_str += strconv.Itoa(epsilon[index])
}
fmt.Printf("gamma : %v\n", gamma_str)
fmt.Printf("epsilon: %v\n", epsilon_str)
dec_gamma := utils.BinaryStringToInt(gamma_str)
dec_epsilon := utils.BinaryStringToInt(epsilon_str)
fmt.Printf("dec_gamma : %v\n", dec_gamma)
fmt.Printf("dec_epsilon: %v\n", dec_epsilon)
result := int64(dec_gamma * dec_epsilon)
fmt.Printf("result = %v * %v = %v, or %v\n", dec_gamma, dec_epsilon, result, strconv.FormatInt(result, 2))
}
/*
--- Part Two ---
Next, you should verify the life support rating, which can be determined by multiplying the oxygen generator rating by the CO2 scrubber rating.
Both the oxygen generator rating and the CO2 scrubber rating are values that can be found in your diagnostic report - finding them is the tricky part. Both values are located using a similar process that involves filtering out values until only one remains. Before searching for either rating value, start with the full list of binary numbers from your diagnostic report and consider just the first bit of those numbers. Then:
Keep only numbers selected by the bit criteria for the type of rating value for which you are searching. Discard numbers which do not match the bit criteria.
If you only have one number left, stop; this is the rating value for which you are searching.
Otherwise, repeat the process, considering the next bit to the right.
The bit criteria depends on which type of rating value you want to find:
To find oxygen generator rating, determine the most common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position. If 0 and 1 are equally common, keep values with a 1 in the position being considered.
To find CO2 scrubber rating, determine the least common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position. If 0 and 1 are equally common, keep values with a 0 in the position being considered.
For example, to determine the oxygen generator rating value using the same example diagnostic report from above:
Start with all 12 numbers and consider only the first bit of each number. There are more 1 bits (7) than 0 bits (5), so keep only the 7 numbers with a 1 in the first position: 11110, 10110, 10111, 10101, 11100, 10000, and 11001.
Then, consider the second bit of the 7 remaining numbers: there are more 0 bits (4) than 1 bits (3), so keep only the 4 numbers with a 0 in the second position: 10110, 10111, 10101, and 10000.
In the third position, three of the four numbers have a 1, so keep those three: 10110, 10111, and 10101.
In the fourth position, two of the three numbers have a 1, so keep those two: 10110 and 10111.
In the fifth position, there are an equal number of 0 bits and 1 bits (one each). So, to find the oxygen generator rating, keep the number with a 1 in that position: 10111.
As there is only one number left, stop; the oxygen generator rating is 10111, or 23 in decimal.
Then, to determine the CO2 scrubber rating value from the same example above:
Start again with all 12 numbers and consider only the first bit of each number. There are fewer 0 bits (5) than 1 bits (7), so keep only the 5 numbers with a 0 in the first position: 00100, 01111, 00111, 00010, and 01010.
Then, consider the second bit of the 5 remaining numbers: there are fewer 1 bits (2) than 0 bits (3), so keep only the 2 numbers with a 1 in the second position: 01111 and 01010.
In the third position, there are an equal number of 0 bits and 1 bits (one each). So, to find the CO2 scrubber rating, keep the number with a 0 in that position: 01010.
As there is only one number left, stop; the CO2 scrubber rating is 01010, or 10 in decimal.
Finally, to find the life support rating, multiply the oxygen generator rating (23) by the CO2 scrubber rating (10) to get 230.
Use the binary numbers in your diagnostic report to calculate the oxygen generator rating and CO2 scrubber rating, then multiply them together. What is the life support rating of the submarine? (Be sure to represent your answer in decimal, not binary.)
*/
// rename this to the year and day in question
func (app *Application) Y2021D03P2() {
RunD2With(DAY_2021_03_TEST_DATA)
fmt.Println()
RunD2With(DAY_2021_03_DATA)
}
func RunD2With(data string) {
lines := strings.Split(data, "\n")
most := retainMostFunc(lines, 0)
least := retainLeastFunc(lines, 0)
most_int := utils.BinaryStringToInt(most[0])
least_int := utils.BinaryStringToInt(least[0])
fmt.Printf("Y2021-03/2 most:= %v (%v)\n", most[0], most_int)
fmt.Printf("Y2021-03/2 least:= %v (%v)\n", least[0], least_int)
fmt.Printf("Y2021-03/2 most*least=%v\n", most_int*least_int)
}
func retainMostFunc(data []string, index int) []string {
// fmt.Printf("Y2021-03/2 most[index %v] = %v (length=%v)\n", index, data, len(data))
if len(data) == 1 {
// fmt.Printf("Y2021-03/2 most[%v] length is 1, returning\n", index)
return data
}
// work out frequency
// width := len(data[0])
ones := 0
zeroes := 0
for _, line := range data {
value, _ := strconv.Atoi(line[index : index+1])
// fmt.Printf("line=%v, index=%v, value=%v\n", line, index, value)
if value == 0 {
zeroes++
} else {
ones++
}
}
// retain highest occurring
to_retain := -1
if ones >= zeroes {
to_retain = 1
} else {
to_retain = 0
}
// fmt.Printf("to_retain: %v\n", to_retain)
retain := make([]string, 0)
for _, line := range data {
value, _ := strconv.Atoi(line[index : index+1])
if value == to_retain {
retain = append(retain, line)
}
}
// reduce it then iterate onwards
return retainMostFunc(retain, index+1)
}
func retainLeastFunc(data []string, index int) []string {
if len(data) == 1 {
return data
}
// work out frequency
ones := 0
zeroes := 0
for _, line := range data {
value, _ := strconv.Atoi(line[index : index+1])
// fmt.Printf("line=%v, index=%v, value=%v\n", line, index, value)
if value == 0 {
zeroes++
} else {
ones++
}
}
// retain highest occurring
to_retain := -1
if ones < zeroes {
to_retain = 1
} else {
to_retain = 0
}
retain := make([]string, 0)
for _, line := range data {
value, _ := strconv.Atoi(line[index : index+1])
if value == to_retain {
retain = append(retain, line)
}
}
// reduce it then iterate onwards
return retainLeastFunc(retain, index+1)
}
// rename and uncomment this to the year and day in question once complete for a gold star!
// func (app *Application) Y20XXDXXP1Render() {
// }
// rename and uncomment this to the year and day in question once complete for a gold star!
// func (app *Application) Y20XXDXXP2Render() {
// }
// this is what we will reflect and call - so both parts with run. It's up to you to make it print nicely etc.
// The app reference has a CLI for logging.
func (app *Application) Y2021D03() {
app.Y2021D03P1()
app.Y2021D03P2()
} | app/aoc2021/aoc2021_03.go | 0.791338 | 0.798108 | aoc2021_03.go | starcoder |
package integration
import (
"context"
"fmt"
"strconv"
"testing"
"time"
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
"github.com/stretchr/testify/require"
)
// TestIndexWrites holds index writes for testing.
type TestIndexWrites []TestIndexWrite
// TestSeriesIterator is a minimal subset of encoding.SeriesIterator.
type TestSeriesIterator interface {
encoding.Iterator
// ID gets the ID of the series.
ID() ident.ID
// Tags returns an iterator over the tags associated with the ID.
Tags() ident.TagIterator
}
// TestSeriesIterators is a an iterator over TestSeriesIterator.
type TestSeriesIterators interface {
// Next moves to the next item.
Next() bool
// Current returns the current value.
Current() TestSeriesIterator
}
type testSeriesIterators struct {
encoding.SeriesIterators
idx int
}
func (t *testSeriesIterators) Next() bool {
if t.idx >= t.Len() {
return false
}
t.idx++
return true
}
func (t *testSeriesIterators) Current() TestSeriesIterator {
return t.Iters()[t.idx-1]
}
// MatchesSeriesIters matches index writes with expected series.
func (w TestIndexWrites) MatchesSeriesIters(
t *testing.T,
seriesIters encoding.SeriesIterators,
) {
actualCount := w.MatchesTestSeriesIters(t, &testSeriesIterators{SeriesIterators: seriesIters})
uniqueIDs := make(map[string]struct{})
for _, wi := range w {
uniqueIDs[wi.ID.String()] = struct{}{}
}
require.Equal(t, len(uniqueIDs), actualCount)
}
// MatchesTestSeriesIters matches index writes with expected test series.
func (w TestIndexWrites) MatchesTestSeriesIters(
t *testing.T,
seriesIters TestSeriesIterators,
) int {
writesByID := make(map[string]TestIndexWrites)
for _, wi := range w {
writesByID[wi.ID.String()] = append(writesByID[wi.ID.String()], wi)
}
var actualCount int
for seriesIters.Next() {
iter := seriesIters.Current()
id := iter.ID().String()
writes, ok := writesByID[id]
require.True(t, ok, id)
writes.matchesSeriesIter(t, iter)
actualCount++
}
return actualCount
}
func (w TestIndexWrites) matchesSeriesIter(t *testing.T, iter TestSeriesIterator) {
found := make([]bool, len(w))
count := 0
for iter.Next() {
count++
dp, _, _ := iter.Current()
for i := 0; i < len(w); i++ {
if found[i] {
continue
}
wi := w[i]
if !ident.NewTagIterMatcher(wi.Tags.Duplicate()).Matches(iter.Tags().Duplicate()) {
require.FailNow(t, "tags don't match provided id", iter.ID().String())
}
if dp.Timestamp.Equal(wi.Timestamp) && dp.Value == wi.Value {
found[i] = true
break
}
}
}
require.Equal(t, len(w), count, iter.ID().String())
require.NoError(t, iter.Err())
for i := 0; i < len(found); i++ {
require.True(t, found[i], iter.ID().String())
}
}
// Write test data.
func (w TestIndexWrites) Write(t *testing.T, ns ident.ID, s client.Session) {
for i := 0; i < len(w); i++ {
wi := w[i]
require.NoError(t, s.WriteTagged(ns,
wi.ID,
wi.Tags.Duplicate(),
wi.Timestamp,
wi.Value,
xtime.Second,
nil,
), "%v", wi)
}
}
// NumIndexed gets number of indexed series.
func (w TestIndexWrites) NumIndexed(t *testing.T, ns ident.ID, s client.Session) int {
numFound := 0
for i := 0; i < len(w); i++ {
wi := w[i]
q := newQuery(t, wi.Tags)
iter, _, err := s.FetchTaggedIDs(ContextWithDefaultTimeout(), ns,
index.Query{Query: q},
index.QueryOptions{
StartInclusive: wi.Timestamp.Add(-1 * time.Second),
EndExclusive: wi.Timestamp.Add(1 * time.Second),
SeriesLimit: 10,
})
if err != nil {
continue
}
if !iter.Next() {
continue
}
cuNs, cuID, cuTag := iter.Current()
if ns.String() != cuNs.String() {
continue
}
if wi.ID.String() != cuID.String() {
continue
}
if !ident.NewTagIterMatcher(wi.Tags).Matches(cuTag) {
continue
}
numFound++
}
return numFound
}
type TestIndexWrite struct {
ID ident.ID
Tags ident.TagIterator
Timestamp time.Time
Value float64
}
// GenerateTestIndexWrite generates test index writes.
func GenerateTestIndexWrite(periodID, numWrites, numTags int, startTime, endTime time.Time) TestIndexWrites {
writes := make([]TestIndexWrite, 0, numWrites)
step := endTime.Sub(startTime) / time.Duration(numWrites+1)
for i := 0; i < numWrites; i++ {
id, tags := genIDTags(periodID, i, numTags)
writes = append(writes, TestIndexWrite{
ID: id,
Tags: tags,
Timestamp: startTime.Add(time.Duration(i) * step).Truncate(time.Second),
Value: float64(i),
})
}
return writes
}
type genIDTagsOption func(ident.Tags) ident.Tags
func genIDTags(i int, j int, numTags int, opts ...genIDTagsOption) (ident.ID, ident.TagIterator) {
id := fmt.Sprintf("foo.%d.%d", i, j)
tags := make([]ident.Tag, 0, numTags)
for i := 0; i < numTags; i++ {
tags = append(tags, ident.StringTag(
fmt.Sprintf("%s.tagname.%d", id, i),
fmt.Sprintf("%s.tagvalue.%d", id, i),
))
}
tags = append(tags,
ident.StringTag("common_i", strconv.Itoa(i)),
ident.StringTag("common_j", strconv.Itoa(j)),
ident.StringTag("shared", "shared"))
result := ident.NewTags(tags...)
for _, fn := range opts {
result = fn(result)
}
return ident.StringID(id), ident.NewTagsIterator(result)
}
func isIndexed(t *testing.T, s client.Session, ns ident.ID, id ident.ID, tags ident.TagIterator) bool {
result, err := isIndexedChecked(t, s, ns, id, tags)
if err != nil {
return false
}
return result
}
func isIndexedChecked(t *testing.T, s client.Session, ns ident.ID, id ident.ID, tags ident.TagIterator) (bool, error) {
q := newQuery(t, tags)
iter, _, err := s.FetchTaggedIDs(ContextWithDefaultTimeout(), ns,
index.Query{Query: q},
index.QueryOptions{
StartInclusive: time.Now(),
EndExclusive: time.Now(),
SeriesLimit: 10,
})
if err != nil {
return false, err
}
defer iter.Finalize()
if !iter.Next() {
return false, nil
}
cuNs, cuID, cuTag := iter.Current()
if err := iter.Err(); err != nil {
return false, fmt.Errorf("iter err: %v", err)
}
if ns.String() != cuNs.String() {
return false, fmt.Errorf("namespace not matched")
}
if id.String() != cuID.String() {
return false, fmt.Errorf("id not matched")
}
if !ident.NewTagIterMatcher(tags).Matches(cuTag) {
return false, fmt.Errorf("tags did not match")
}
return true, nil
}
func newQuery(t *testing.T, tags ident.TagIterator) idx.Query {
tags = tags.Duplicate()
filters := make([]idx.Query, 0, tags.Remaining())
for tags.Next() {
tag := tags.Current()
tq := idx.NewTermQuery(tag.Name.Bytes(), tag.Value.Bytes())
filters = append(filters, tq)
}
return idx.NewConjunctionQuery(filters...)
}
// ContextWithDefaultTimeout returns a context with a default timeout
// set of one minute.
func ContextWithDefaultTimeout() context.Context {
ctx, _ := context.WithTimeout(context.Background(), time.Minute)
return ctx
} | src/dbnode/integration/index_helpers.go | 0.620277 | 0.443962 | index_helpers.go | starcoder |
package sqlparser
// EqualsSQLNode does deep equals between the two objects.
func EqualsSQLNode(inA, inB SQLNode) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case AccessMode:
b, ok := inB.(AccessMode)
if !ok {
return false
}
return a == b
case *AddColumns:
b, ok := inB.(*AddColumns)
if !ok {
return false
}
return EqualsRefOfAddColumns(a, b)
case *AddConstraintDefinition:
b, ok := inB.(*AddConstraintDefinition)
if !ok {
return false
}
return EqualsRefOfAddConstraintDefinition(a, b)
case *AddIndexDefinition:
b, ok := inB.(*AddIndexDefinition)
if !ok {
return false
}
return EqualsRefOfAddIndexDefinition(a, b)
case AlgorithmValue:
b, ok := inB.(AlgorithmValue)
if !ok {
return false
}
return a == b
case *AliasedExpr:
b, ok := inB.(*AliasedExpr)
if !ok {
return false
}
return EqualsRefOfAliasedExpr(a, b)
case *AliasedTableExpr:
b, ok := inB.(*AliasedTableExpr)
if !ok {
return false
}
return EqualsRefOfAliasedTableExpr(a, b)
case *AlterCharset:
b, ok := inB.(*AlterCharset)
if !ok {
return false
}
return EqualsRefOfAlterCharset(a, b)
case *AlterColumn:
b, ok := inB.(*AlterColumn)
if !ok {
return false
}
return EqualsRefOfAlterColumn(a, b)
case *AlterDatabase:
b, ok := inB.(*AlterDatabase)
if !ok {
return false
}
return EqualsRefOfAlterDatabase(a, b)
case *AlterMigration:
b, ok := inB.(*AlterMigration)
if !ok {
return false
}
return EqualsRefOfAlterMigration(a, b)
case *AlterTable:
b, ok := inB.(*AlterTable)
if !ok {
return false
}
return EqualsRefOfAlterTable(a, b)
case *AlterView:
b, ok := inB.(*AlterView)
if !ok {
return false
}
return EqualsRefOfAlterView(a, b)
case *AlterVschema:
b, ok := inB.(*AlterVschema)
if !ok {
return false
}
return EqualsRefOfAlterVschema(a, b)
case *AndExpr:
b, ok := inB.(*AndExpr)
if !ok {
return false
}
return EqualsRefOfAndExpr(a, b)
case Argument:
b, ok := inB.(Argument)
if !ok {
return false
}
return a == b
case *AutoIncSpec:
b, ok := inB.(*AutoIncSpec)
if !ok {
return false
}
return EqualsRefOfAutoIncSpec(a, b)
case *Begin:
b, ok := inB.(*Begin)
if !ok {
return false
}
return EqualsRefOfBegin(a, b)
case *BinaryExpr:
b, ok := inB.(*BinaryExpr)
if !ok {
return false
}
return EqualsRefOfBinaryExpr(a, b)
case BoolVal:
b, ok := inB.(BoolVal)
if !ok {
return false
}
return a == b
case *CallProc:
b, ok := inB.(*CallProc)
if !ok {
return false
}
return EqualsRefOfCallProc(a, b)
case *CaseExpr:
b, ok := inB.(*CaseExpr)
if !ok {
return false
}
return EqualsRefOfCaseExpr(a, b)
case *ChangeColumn:
b, ok := inB.(*ChangeColumn)
if !ok {
return false
}
return EqualsRefOfChangeColumn(a, b)
case *CheckConstraintDefinition:
b, ok := inB.(*CheckConstraintDefinition)
if !ok {
return false
}
return EqualsRefOfCheckConstraintDefinition(a, b)
case ColIdent:
b, ok := inB.(ColIdent)
if !ok {
return false
}
return EqualsColIdent(a, b)
case *ColName:
b, ok := inB.(*ColName)
if !ok {
return false
}
return EqualsRefOfColName(a, b)
case *CollateExpr:
b, ok := inB.(*CollateExpr)
if !ok {
return false
}
return EqualsRefOfCollateExpr(a, b)
case *ColumnDefinition:
b, ok := inB.(*ColumnDefinition)
if !ok {
return false
}
return EqualsRefOfColumnDefinition(a, b)
case *ColumnType:
b, ok := inB.(*ColumnType)
if !ok {
return false
}
return EqualsRefOfColumnType(a, b)
case Columns:
b, ok := inB.(Columns)
if !ok {
return false
}
return EqualsColumns(a, b)
case Comments:
b, ok := inB.(Comments)
if !ok {
return false
}
return EqualsComments(a, b)
case *Commit:
b, ok := inB.(*Commit)
if !ok {
return false
}
return EqualsRefOfCommit(a, b)
case *ComparisonExpr:
b, ok := inB.(*ComparisonExpr)
if !ok {
return false
}
return EqualsRefOfComparisonExpr(a, b)
case *ConstraintDefinition:
b, ok := inB.(*ConstraintDefinition)
if !ok {
return false
}
return EqualsRefOfConstraintDefinition(a, b)
case *ConvertExpr:
b, ok := inB.(*ConvertExpr)
if !ok {
return false
}
return EqualsRefOfConvertExpr(a, b)
case *ConvertType:
b, ok := inB.(*ConvertType)
if !ok {
return false
}
return EqualsRefOfConvertType(a, b)
case *ConvertUsingExpr:
b, ok := inB.(*ConvertUsingExpr)
if !ok {
return false
}
return EqualsRefOfConvertUsingExpr(a, b)
case *CreateDatabase:
b, ok := inB.(*CreateDatabase)
if !ok {
return false
}
return EqualsRefOfCreateDatabase(a, b)
case *CreateTable:
b, ok := inB.(*CreateTable)
if !ok {
return false
}
return EqualsRefOfCreateTable(a, b)
case *CreateView:
b, ok := inB.(*CreateView)
if !ok {
return false
}
return EqualsRefOfCreateView(a, b)
case *CurTimeFuncExpr:
b, ok := inB.(*CurTimeFuncExpr)
if !ok {
return false
}
return EqualsRefOfCurTimeFuncExpr(a, b)
case *Default:
b, ok := inB.(*Default)
if !ok {
return false
}
return EqualsRefOfDefault(a, b)
case *Delete:
b, ok := inB.(*Delete)
if !ok {
return false
}
return EqualsRefOfDelete(a, b)
case *DerivedTable:
b, ok := inB.(*DerivedTable)
if !ok {
return false
}
return EqualsRefOfDerivedTable(a, b)
case *DropColumn:
b, ok := inB.(*DropColumn)
if !ok {
return false
}
return EqualsRefOfDropColumn(a, b)
case *DropDatabase:
b, ok := inB.(*DropDatabase)
if !ok {
return false
}
return EqualsRefOfDropDatabase(a, b)
case *DropKey:
b, ok := inB.(*DropKey)
if !ok {
return false
}
return EqualsRefOfDropKey(a, b)
case *DropTable:
b, ok := inB.(*DropTable)
if !ok {
return false
}
return EqualsRefOfDropTable(a, b)
case *DropView:
b, ok := inB.(*DropView)
if !ok {
return false
}
return EqualsRefOfDropView(a, b)
case *ExistsExpr:
b, ok := inB.(*ExistsExpr)
if !ok {
return false
}
return EqualsRefOfExistsExpr(a, b)
case *ExplainStmt:
b, ok := inB.(*ExplainStmt)
if !ok {
return false
}
return EqualsRefOfExplainStmt(a, b)
case *ExplainTab:
b, ok := inB.(*ExplainTab)
if !ok {
return false
}
return EqualsRefOfExplainTab(a, b)
case Exprs:
b, ok := inB.(Exprs)
if !ok {
return false
}
return EqualsExprs(a, b)
case *Flush:
b, ok := inB.(*Flush)
if !ok {
return false
}
return EqualsRefOfFlush(a, b)
case *Force:
b, ok := inB.(*Force)
if !ok {
return false
}
return EqualsRefOfForce(a, b)
case *ForeignKeyDefinition:
b, ok := inB.(*ForeignKeyDefinition)
if !ok {
return false
}
return EqualsRefOfForeignKeyDefinition(a, b)
case *FuncExpr:
b, ok := inB.(*FuncExpr)
if !ok {
return false
}
return EqualsRefOfFuncExpr(a, b)
case GroupBy:
b, ok := inB.(GroupBy)
if !ok {
return false
}
return EqualsGroupBy(a, b)
case *GroupConcatExpr:
b, ok := inB.(*GroupConcatExpr)
if !ok {
return false
}
return EqualsRefOfGroupConcatExpr(a, b)
case *IndexDefinition:
b, ok := inB.(*IndexDefinition)
if !ok {
return false
}
return EqualsRefOfIndexDefinition(a, b)
case *IndexHints:
b, ok := inB.(*IndexHints)
if !ok {
return false
}
return EqualsRefOfIndexHints(a, b)
case *IndexInfo:
b, ok := inB.(*IndexInfo)
if !ok {
return false
}
return EqualsRefOfIndexInfo(a, b)
case *Insert:
b, ok := inB.(*Insert)
if !ok {
return false
}
return EqualsRefOfInsert(a, b)
case *IntervalExpr:
b, ok := inB.(*IntervalExpr)
if !ok {
return false
}
return EqualsRefOfIntervalExpr(a, b)
case *IsExpr:
b, ok := inB.(*IsExpr)
if !ok {
return false
}
return EqualsRefOfIsExpr(a, b)
case IsolationLevel:
b, ok := inB.(IsolationLevel)
if !ok {
return false
}
return a == b
case JoinCondition:
b, ok := inB.(JoinCondition)
if !ok {
return false
}
return EqualsJoinCondition(a, b)
case *JoinTableExpr:
b, ok := inB.(*JoinTableExpr)
if !ok {
return false
}
return EqualsRefOfJoinTableExpr(a, b)
case *KeyState:
b, ok := inB.(*KeyState)
if !ok {
return false
}
return EqualsRefOfKeyState(a, b)
case *Limit:
b, ok := inB.(*Limit)
if !ok {
return false
}
return EqualsRefOfLimit(a, b)
case ListArg:
b, ok := inB.(ListArg)
if !ok {
return false
}
return a == b
case *Literal:
b, ok := inB.(*Literal)
if !ok {
return false
}
return EqualsRefOfLiteral(a, b)
case *Load:
b, ok := inB.(*Load)
if !ok {
return false
}
return EqualsRefOfLoad(a, b)
case *LockOption:
b, ok := inB.(*LockOption)
if !ok {
return false
}
return EqualsRefOfLockOption(a, b)
case *LockTables:
b, ok := inB.(*LockTables)
if !ok {
return false
}
return EqualsRefOfLockTables(a, b)
case *MatchExpr:
b, ok := inB.(*MatchExpr)
if !ok {
return false
}
return EqualsRefOfMatchExpr(a, b)
case *ModifyColumn:
b, ok := inB.(*ModifyColumn)
if !ok {
return false
}
return EqualsRefOfModifyColumn(a, b)
case *Nextval:
b, ok := inB.(*Nextval)
if !ok {
return false
}
return EqualsRefOfNextval(a, b)
case *NotExpr:
b, ok := inB.(*NotExpr)
if !ok {
return false
}
return EqualsRefOfNotExpr(a, b)
case *NullVal:
b, ok := inB.(*NullVal)
if !ok {
return false
}
return EqualsRefOfNullVal(a, b)
case OnDup:
b, ok := inB.(OnDup)
if !ok {
return false
}
return EqualsOnDup(a, b)
case *OptLike:
b, ok := inB.(*OptLike)
if !ok {
return false
}
return EqualsRefOfOptLike(a, b)
case *OrExpr:
b, ok := inB.(*OrExpr)
if !ok {
return false
}
return EqualsRefOfOrExpr(a, b)
case *Order:
b, ok := inB.(*Order)
if !ok {
return false
}
return EqualsRefOfOrder(a, b)
case OrderBy:
b, ok := inB.(OrderBy)
if !ok {
return false
}
return EqualsOrderBy(a, b)
case *OrderByOption:
b, ok := inB.(*OrderByOption)
if !ok {
return false
}
return EqualsRefOfOrderByOption(a, b)
case *OtherAdmin:
b, ok := inB.(*OtherAdmin)
if !ok {
return false
}
return EqualsRefOfOtherAdmin(a, b)
case *OtherRead:
b, ok := inB.(*OtherRead)
if !ok {
return false
}
return EqualsRefOfOtherRead(a, b)
case *ParenSelect:
b, ok := inB.(*ParenSelect)
if !ok {
return false
}
return EqualsRefOfParenSelect(a, b)
case *ParenTableExpr:
b, ok := inB.(*ParenTableExpr)
if !ok {
return false
}
return EqualsRefOfParenTableExpr(a, b)
case *PartitionDefinition:
b, ok := inB.(*PartitionDefinition)
if !ok {
return false
}
return EqualsRefOfPartitionDefinition(a, b)
case *PartitionSpec:
b, ok := inB.(*PartitionSpec)
if !ok {
return false
}
return EqualsRefOfPartitionSpec(a, b)
case Partitions:
b, ok := inB.(Partitions)
if !ok {
return false
}
return EqualsPartitions(a, b)
case *RangeCond:
b, ok := inB.(*RangeCond)
if !ok {
return false
}
return EqualsRefOfRangeCond(a, b)
case ReferenceAction:
b, ok := inB.(ReferenceAction)
if !ok {
return false
}
return a == b
case *ReferenceDefinition:
b, ok := inB.(*ReferenceDefinition)
if !ok {
return false
}
return EqualsRefOfReferenceDefinition(a, b)
case *Release:
b, ok := inB.(*Release)
if !ok {
return false
}
return EqualsRefOfRelease(a, b)
case *RenameIndex:
b, ok := inB.(*RenameIndex)
if !ok {
return false
}
return EqualsRefOfRenameIndex(a, b)
case *RenameTable:
b, ok := inB.(*RenameTable)
if !ok {
return false
}
return EqualsRefOfRenameTable(a, b)
case *RenameTableName:
b, ok := inB.(*RenameTableName)
if !ok {
return false
}
return EqualsRefOfRenameTableName(a, b)
case *RevertMigration:
b, ok := inB.(*RevertMigration)
if !ok {
return false
}
return EqualsRefOfRevertMigration(a, b)
case *Rollback:
b, ok := inB.(*Rollback)
if !ok {
return false
}
return EqualsRefOfRollback(a, b)
case *SRollback:
b, ok := inB.(*SRollback)
if !ok {
return false
}
return EqualsRefOfSRollback(a, b)
case *Savepoint:
b, ok := inB.(*Savepoint)
if !ok {
return false
}
return EqualsRefOfSavepoint(a, b)
case *Select:
b, ok := inB.(*Select)
if !ok {
return false
}
return EqualsRefOfSelect(a, b)
case SelectExprs:
b, ok := inB.(SelectExprs)
if !ok {
return false
}
return EqualsSelectExprs(a, b)
case *SelectInto:
b, ok := inB.(*SelectInto)
if !ok {
return false
}
return EqualsRefOfSelectInto(a, b)
case *Set:
b, ok := inB.(*Set)
if !ok {
return false
}
return EqualsRefOfSet(a, b)
case *SetExpr:
b, ok := inB.(*SetExpr)
if !ok {
return false
}
return EqualsRefOfSetExpr(a, b)
case SetExprs:
b, ok := inB.(SetExprs)
if !ok {
return false
}
return EqualsSetExprs(a, b)
case *SetTransaction:
b, ok := inB.(*SetTransaction)
if !ok {
return false
}
return EqualsRefOfSetTransaction(a, b)
case *Show:
b, ok := inB.(*Show)
if !ok {
return false
}
return EqualsRefOfShow(a, b)
case *ShowBasic:
b, ok := inB.(*ShowBasic)
if !ok {
return false
}
return EqualsRefOfShowBasic(a, b)
case *ShowCreate:
b, ok := inB.(*ShowCreate)
if !ok {
return false
}
return EqualsRefOfShowCreate(a, b)
case *ShowFilter:
b, ok := inB.(*ShowFilter)
if !ok {
return false
}
return EqualsRefOfShowFilter(a, b)
case *ShowLegacy:
b, ok := inB.(*ShowLegacy)
if !ok {
return false
}
return EqualsRefOfShowLegacy(a, b)
case *ShowMigrationLogs:
b, ok := inB.(*ShowMigrationLogs)
if !ok {
return false
}
return EqualsRefOfShowMigrationLogs(a, b)
case *StarExpr:
b, ok := inB.(*StarExpr)
if !ok {
return false
}
return EqualsRefOfStarExpr(a, b)
case *Stream:
b, ok := inB.(*Stream)
if !ok {
return false
}
return EqualsRefOfStream(a, b)
case *Subquery:
b, ok := inB.(*Subquery)
if !ok {
return false
}
return EqualsRefOfSubquery(a, b)
case *SubstrExpr:
b, ok := inB.(*SubstrExpr)
if !ok {
return false
}
return EqualsRefOfSubstrExpr(a, b)
case TableExprs:
b, ok := inB.(TableExprs)
if !ok {
return false
}
return EqualsTableExprs(a, b)
case TableIdent:
b, ok := inB.(TableIdent)
if !ok {
return false
}
return EqualsTableIdent(a, b)
case TableName:
b, ok := inB.(TableName)
if !ok {
return false
}
return EqualsTableName(a, b)
case TableNames:
b, ok := inB.(TableNames)
if !ok {
return false
}
return EqualsTableNames(a, b)
case TableOptions:
b, ok := inB.(TableOptions)
if !ok {
return false
}
return EqualsTableOptions(a, b)
case *TableSpec:
b, ok := inB.(*TableSpec)
if !ok {
return false
}
return EqualsRefOfTableSpec(a, b)
case *TablespaceOperation:
b, ok := inB.(*TablespaceOperation)
if !ok {
return false
}
return EqualsRefOfTablespaceOperation(a, b)
case *TimestampFuncExpr:
b, ok := inB.(*TimestampFuncExpr)
if !ok {
return false
}
return EqualsRefOfTimestampFuncExpr(a, b)
case *TruncateTable:
b, ok := inB.(*TruncateTable)
if !ok {
return false
}
return EqualsRefOfTruncateTable(a, b)
case *UnaryExpr:
b, ok := inB.(*UnaryExpr)
if !ok {
return false
}
return EqualsRefOfUnaryExpr(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
return EqualsRefOfUnion(a, b)
case *UnionSelect:
b, ok := inB.(*UnionSelect)
if !ok {
return false
}
return EqualsRefOfUnionSelect(a, b)
case *UnlockTables:
b, ok := inB.(*UnlockTables)
if !ok {
return false
}
return EqualsRefOfUnlockTables(a, b)
case *Update:
b, ok := inB.(*Update)
if !ok {
return false
}
return EqualsRefOfUpdate(a, b)
case *UpdateExpr:
b, ok := inB.(*UpdateExpr)
if !ok {
return false
}
return EqualsRefOfUpdateExpr(a, b)
case UpdateExprs:
b, ok := inB.(UpdateExprs)
if !ok {
return false
}
return EqualsUpdateExprs(a, b)
case *Use:
b, ok := inB.(*Use)
if !ok {
return false
}
return EqualsRefOfUse(a, b)
case *VStream:
b, ok := inB.(*VStream)
if !ok {
return false
}
return EqualsRefOfVStream(a, b)
case ValTuple:
b, ok := inB.(ValTuple)
if !ok {
return false
}
return EqualsValTuple(a, b)
case *Validation:
b, ok := inB.(*Validation)
if !ok {
return false
}
return EqualsRefOfValidation(a, b)
case Values:
b, ok := inB.(Values)
if !ok {
return false
}
return EqualsValues(a, b)
case *ValuesFuncExpr:
b, ok := inB.(*ValuesFuncExpr)
if !ok {
return false
}
return EqualsRefOfValuesFuncExpr(a, b)
case VindexParam:
b, ok := inB.(VindexParam)
if !ok {
return false
}
return EqualsVindexParam(a, b)
case *VindexSpec:
b, ok := inB.(*VindexSpec)
if !ok {
return false
}
return EqualsRefOfVindexSpec(a, b)
case *When:
b, ok := inB.(*When)
if !ok {
return false
}
return EqualsRefOfWhen(a, b)
case *Where:
b, ok := inB.(*Where)
if !ok {
return false
}
return EqualsRefOfWhere(a, b)
case *XorExpr:
b, ok := inB.(*XorExpr)
if !ok {
return false
}
return EqualsRefOfXorExpr(a, b)
default:
// this should never happen
return false
}
}
// EqualsRefOfAddColumns does deep equals between the two objects.
func EqualsRefOfAddColumns(a, b *AddColumns) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSliceOfRefOfColumnDefinition(a.Columns, b.Columns) &&
EqualsRefOfColName(a.First, b.First) &&
EqualsRefOfColName(a.After, b.After)
}
// EqualsRefOfAddConstraintDefinition does deep equals between the two objects.
func EqualsRefOfAddConstraintDefinition(a, b *AddConstraintDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfConstraintDefinition(a.ConstraintDefinition, b.ConstraintDefinition)
}
// EqualsRefOfAddIndexDefinition does deep equals between the two objects.
func EqualsRefOfAddIndexDefinition(a, b *AddIndexDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfIndexDefinition(a.IndexDefinition, b.IndexDefinition)
}
// EqualsRefOfAliasedExpr does deep equals between the two objects.
func EqualsRefOfAliasedExpr(a, b *AliasedExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Expr, b.Expr) &&
EqualsColIdent(a.As, b.As)
}
// EqualsRefOfAliasedTableExpr does deep equals between the two objects.
func EqualsRefOfAliasedTableExpr(a, b *AliasedTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSimpleTableExpr(a.Expr, b.Expr) &&
EqualsPartitions(a.Partitions, b.Partitions) &&
EqualsTableIdent(a.As, b.As) &&
EqualsRefOfIndexHints(a.Hints, b.Hints)
}
// EqualsRefOfAlterCharset does deep equals between the two objects.
func EqualsRefOfAlterCharset(a, b *AlterCharset) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.CharacterSet == b.CharacterSet &&
a.Collate == b.Collate
}
// EqualsRefOfAlterColumn does deep equals between the two objects.
func EqualsRefOfAlterColumn(a, b *AlterColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.DropDefault == b.DropDefault &&
EqualsRefOfColName(a.Column, b.Column) &&
EqualsExpr(a.DefaultVal, b.DefaultVal)
}
// EqualsRefOfAlterDatabase does deep equals between the two objects.
func EqualsRefOfAlterDatabase(a, b *AlterDatabase) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.UpdateDataDirectory == b.UpdateDataDirectory &&
a.FullyParsed == b.FullyParsed &&
EqualsTableIdent(a.DBName, b.DBName) &&
EqualsSliceOfCollateAndCharset(a.AlterOptions, b.AlterOptions)
}
// EqualsRefOfAlterMigration does deep equals between the two objects.
func EqualsRefOfAlterMigration(a, b *AlterMigration) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.UUID == b.UUID &&
a.Type == b.Type
}
// EqualsRefOfAlterTable does deep equals between the two objects.
func EqualsRefOfAlterTable(a, b *AlterTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.FullyParsed == b.FullyParsed &&
EqualsTableName(a.Table, b.Table) &&
EqualsSliceOfAlterOption(a.AlterOptions, b.AlterOptions) &&
EqualsRefOfPartitionSpec(a.PartitionSpec, b.PartitionSpec) &&
EqualsComments(a.Comments, b.Comments)
}
// EqualsRefOfAlterView does deep equals between the two objects.
func EqualsRefOfAlterView(a, b *AlterView) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Algorithm == b.Algorithm &&
a.Definer == b.Definer &&
a.Security == b.Security &&
a.CheckOption == b.CheckOption &&
EqualsTableName(a.ViewName, b.ViewName) &&
EqualsColumns(a.Columns, b.Columns) &&
EqualsSelectStatement(a.Select, b.Select)
}
// EqualsRefOfAlterVschema does deep equals between the two objects.
func EqualsRefOfAlterVschema(a, b *AlterVschema) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Action == b.Action &&
EqualsTableName(a.Table, b.Table) &&
EqualsRefOfVindexSpec(a.VindexSpec, b.VindexSpec) &&
EqualsSliceOfColIdent(a.VindexCols, b.VindexCols) &&
EqualsRefOfAutoIncSpec(a.AutoIncSpec, b.AutoIncSpec)
}
// EqualsRefOfAndExpr does deep equals between the two objects.
func EqualsRefOfAndExpr(a, b *AndExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Left, b.Left) &&
EqualsExpr(a.Right, b.Right)
}
// EqualsRefOfAutoIncSpec does deep equals between the two objects.
func EqualsRefOfAutoIncSpec(a, b *AutoIncSpec) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Column, b.Column) &&
EqualsTableName(a.Sequence, b.Sequence)
}
// EqualsRefOfBegin does deep equals between the two objects.
func EqualsRefOfBegin(a, b *Begin) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfBinaryExpr does deep equals between the two objects.
func EqualsRefOfBinaryExpr(a, b *BinaryExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Operator == b.Operator &&
EqualsExpr(a.Left, b.Left) &&
EqualsExpr(a.Right, b.Right)
}
// EqualsRefOfCallProc does deep equals between the two objects.
func EqualsRefOfCallProc(a, b *CallProc) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableName(a.Name, b.Name) &&
EqualsExprs(a.Params, b.Params)
}
// EqualsRefOfCaseExpr does deep equals between the two objects.
func EqualsRefOfCaseExpr(a, b *CaseExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Expr, b.Expr) &&
EqualsSliceOfRefOfWhen(a.Whens, b.Whens) &&
EqualsExpr(a.Else, b.Else)
}
// EqualsRefOfChangeColumn does deep equals between the two objects.
func EqualsRefOfChangeColumn(a, b *ChangeColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfColName(a.OldColumn, b.OldColumn) &&
EqualsRefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) &&
EqualsRefOfColName(a.First, b.First) &&
EqualsRefOfColName(a.After, b.After)
}
// EqualsRefOfCheckConstraintDefinition does deep equals between the two objects.
func EqualsRefOfCheckConstraintDefinition(a, b *CheckConstraintDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Enforced == b.Enforced &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsColIdent does deep equals between the two objects.
func EqualsColIdent(a, b ColIdent) bool {
return a.val == b.val &&
a.lowered == b.lowered &&
a.at == b.at
}
// EqualsRefOfColName does deep equals between the two objects.
func EqualsRefOfColName(a, b *ColName) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name) &&
EqualsTableName(a.Qualifier, b.Qualifier)
}
// EqualsRefOfCollateExpr does deep equals between the two objects.
func EqualsRefOfCollateExpr(a, b *CollateExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Charset == b.Charset &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsRefOfColumnDefinition does deep equals between the two objects.
func EqualsRefOfColumnDefinition(a, b *ColumnDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name) &&
EqualsColumnType(a.Type, b.Type)
}
// EqualsRefOfColumnType does deep equals between the two objects.
func EqualsRefOfColumnType(a, b *ColumnType) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
a.Unsigned == b.Unsigned &&
a.Zerofill == b.Zerofill &&
a.Charset == b.Charset &&
a.Collate == b.Collate &&
EqualsRefOfColumnTypeOptions(a.Options, b.Options) &&
EqualsRefOfLiteral(a.Length, b.Length) &&
EqualsRefOfLiteral(a.Scale, b.Scale) &&
EqualsSliceOfString(a.EnumValues, b.EnumValues)
}
// EqualsColumns does deep equals between the two objects.
func EqualsColumns(a, b Columns) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsColIdent(a[i], b[i]) {
return false
}
}
return true
}
// EqualsComments does deep equals between the two objects.
func EqualsComments(a, b Comments) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
// EqualsRefOfCommit does deep equals between the two objects.
func EqualsRefOfCommit(a, b *Commit) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfComparisonExpr does deep equals between the two objects.
func EqualsRefOfComparisonExpr(a, b *ComparisonExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Operator == b.Operator &&
EqualsExpr(a.Left, b.Left) &&
EqualsExpr(a.Right, b.Right) &&
EqualsExpr(a.Escape, b.Escape)
}
// EqualsRefOfConstraintDefinition does deep equals between the two objects.
func EqualsRefOfConstraintDefinition(a, b *ConstraintDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name) &&
EqualsConstraintInfo(a.Details, b.Details)
}
// EqualsRefOfConvertExpr does deep equals between the two objects.
func EqualsRefOfConvertExpr(a, b *ConvertExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Expr, b.Expr) &&
EqualsRefOfConvertType(a.Type, b.Type)
}
// EqualsRefOfConvertType does deep equals between the two objects.
func EqualsRefOfConvertType(a, b *ConvertType) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
a.Charset == b.Charset &&
EqualsRefOfLiteral(a.Length, b.Length) &&
EqualsRefOfLiteral(a.Scale, b.Scale) &&
a.Operator == b.Operator
}
// EqualsRefOfConvertUsingExpr does deep equals between the two objects.
func EqualsRefOfConvertUsingExpr(a, b *ConvertUsingExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsRefOfCreateDatabase does deep equals between the two objects.
func EqualsRefOfCreateDatabase(a, b *CreateDatabase) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.IfNotExists == b.IfNotExists &&
a.FullyParsed == b.FullyParsed &&
EqualsComments(a.Comments, b.Comments) &&
EqualsTableIdent(a.DBName, b.DBName) &&
EqualsSliceOfCollateAndCharset(a.CreateOptions, b.CreateOptions)
}
// EqualsRefOfCreateTable does deep equals between the two objects.
func EqualsRefOfCreateTable(a, b *CreateTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Temp == b.Temp &&
a.IfNotExists == b.IfNotExists &&
a.FullyParsed == b.FullyParsed &&
EqualsTableName(a.Table, b.Table) &&
EqualsRefOfTableSpec(a.TableSpec, b.TableSpec) &&
EqualsRefOfOptLike(a.OptLike, b.OptLike) &&
EqualsComments(a.Comments, b.Comments)
}
// EqualsRefOfCreateView does deep equals between the two objects.
func EqualsRefOfCreateView(a, b *CreateView) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Algorithm == b.Algorithm &&
a.Definer == b.Definer &&
a.Security == b.Security &&
a.CheckOption == b.CheckOption &&
a.IsReplace == b.IsReplace &&
EqualsTableName(a.ViewName, b.ViewName) &&
EqualsColumns(a.Columns, b.Columns) &&
EqualsSelectStatement(a.Select, b.Select)
}
// EqualsRefOfCurTimeFuncExpr does deep equals between the two objects.
func EqualsRefOfCurTimeFuncExpr(a, b *CurTimeFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name) &&
EqualsExpr(a.Fsp, b.Fsp)
}
// EqualsRefOfDefault does deep equals between the two objects.
func EqualsRefOfDefault(a, b *Default) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.ColName == b.ColName
}
// EqualsRefOfDelete does deep equals between the two objects.
func EqualsRefOfDelete(a, b *Delete) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Ignore == b.Ignore &&
EqualsComments(a.Comments, b.Comments) &&
EqualsTableNames(a.Targets, b.Targets) &&
EqualsTableExprs(a.TableExprs, b.TableExprs) &&
EqualsPartitions(a.Partitions, b.Partitions) &&
EqualsRefOfWhere(a.Where, b.Where) &&
EqualsOrderBy(a.OrderBy, b.OrderBy) &&
EqualsRefOfLimit(a.Limit, b.Limit)
}
// EqualsRefOfDerivedTable does deep equals between the two objects.
func EqualsRefOfDerivedTable(a, b *DerivedTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSelectStatement(a.Select, b.Select)
}
// EqualsRefOfDropColumn does deep equals between the two objects.
func EqualsRefOfDropColumn(a, b *DropColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfColName(a.Name, b.Name)
}
// EqualsRefOfDropDatabase does deep equals between the two objects.
func EqualsRefOfDropDatabase(a, b *DropDatabase) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.IfExists == b.IfExists &&
EqualsComments(a.Comments, b.Comments) &&
EqualsTableIdent(a.DBName, b.DBName)
}
// EqualsRefOfDropKey does deep equals between the two objects.
func EqualsRefOfDropKey(a, b *DropKey) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
EqualsColIdent(a.Name, b.Name)
}
// EqualsRefOfDropTable does deep equals between the two objects.
func EqualsRefOfDropTable(a, b *DropTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Temp == b.Temp &&
a.IfExists == b.IfExists &&
EqualsTableNames(a.FromTables, b.FromTables) &&
EqualsComments(a.Comments, b.Comments)
}
// EqualsRefOfDropView does deep equals between the two objects.
func EqualsRefOfDropView(a, b *DropView) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.IfExists == b.IfExists &&
EqualsTableNames(a.FromTables, b.FromTables)
}
// EqualsRefOfExistsExpr does deep equals between the two objects.
func EqualsRefOfExistsExpr(a, b *ExistsExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfSubquery(a.Subquery, b.Subquery)
}
// EqualsRefOfExplainStmt does deep equals between the two objects.
func EqualsRefOfExplainStmt(a, b *ExplainStmt) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
EqualsStatement(a.Statement, b.Statement)
}
// EqualsRefOfExplainTab does deep equals between the two objects.
func EqualsRefOfExplainTab(a, b *ExplainTab) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Wild == b.Wild &&
EqualsTableName(a.Table, b.Table)
}
// EqualsExprs does deep equals between the two objects.
func EqualsExprs(a, b Exprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfFlush does deep equals between the two objects.
func EqualsRefOfFlush(a, b *Flush) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.IsLocal == b.IsLocal &&
a.WithLock == b.WithLock &&
a.ForExport == b.ForExport &&
EqualsSliceOfString(a.FlushOptions, b.FlushOptions) &&
EqualsTableNames(a.TableNames, b.TableNames)
}
// EqualsRefOfForce does deep equals between the two objects.
func EqualsRefOfForce(a, b *Force) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfForeignKeyDefinition does deep equals between the two objects.
func EqualsRefOfForeignKeyDefinition(a, b *ForeignKeyDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColumns(a.Source, b.Source) &&
EqualsColIdent(a.IndexName, b.IndexName) &&
EqualsRefOfReferenceDefinition(a.ReferenceDefinition, b.ReferenceDefinition)
}
// EqualsRefOfFuncExpr does deep equals between the two objects.
func EqualsRefOfFuncExpr(a, b *FuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Distinct == b.Distinct &&
EqualsTableIdent(a.Qualifier, b.Qualifier) &&
EqualsColIdent(a.Name, b.Name) &&
EqualsSelectExprs(a.Exprs, b.Exprs)
}
// EqualsGroupBy does deep equals between the two objects.
func EqualsGroupBy(a, b GroupBy) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfGroupConcatExpr does deep equals between the two objects.
func EqualsRefOfGroupConcatExpr(a, b *GroupConcatExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Distinct == b.Distinct &&
a.Separator == b.Separator &&
EqualsSelectExprs(a.Exprs, b.Exprs) &&
EqualsOrderBy(a.OrderBy, b.OrderBy) &&
EqualsRefOfLimit(a.Limit, b.Limit)
}
// EqualsRefOfIndexDefinition does deep equals between the two objects.
func EqualsRefOfIndexDefinition(a, b *IndexDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfIndexInfo(a.Info, b.Info) &&
EqualsSliceOfRefOfIndexColumn(a.Columns, b.Columns) &&
EqualsSliceOfRefOfIndexOption(a.Options, b.Options)
}
// EqualsRefOfIndexHints does deep equals between the two objects.
func EqualsRefOfIndexHints(a, b *IndexHints) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
EqualsSliceOfColIdent(a.Indexes, b.Indexes)
}
// EqualsRefOfIndexInfo does deep equals between the two objects.
func EqualsRefOfIndexInfo(a, b *IndexInfo) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
a.Primary == b.Primary &&
a.Spatial == b.Spatial &&
a.Fulltext == b.Fulltext &&
a.Unique == b.Unique &&
EqualsColIdent(a.Name, b.Name) &&
EqualsColIdent(a.ConstraintName, b.ConstraintName)
}
// EqualsRefOfInsert does deep equals between the two objects.
func EqualsRefOfInsert(a, b *Insert) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Action == b.Action &&
EqualsComments(a.Comments, b.Comments) &&
a.Ignore == b.Ignore &&
EqualsTableName(a.Table, b.Table) &&
EqualsPartitions(a.Partitions, b.Partitions) &&
EqualsColumns(a.Columns, b.Columns) &&
EqualsInsertRows(a.Rows, b.Rows) &&
EqualsOnDup(a.OnDup, b.OnDup)
}
// EqualsRefOfIntervalExpr does deep equals between the two objects.
func EqualsRefOfIntervalExpr(a, b *IntervalExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Unit == b.Unit &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsRefOfIsExpr does deep equals between the two objects.
func EqualsRefOfIsExpr(a, b *IsExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Left, b.Left) &&
a.Right == b.Right
}
// EqualsJoinCondition does deep equals between the two objects.
func EqualsJoinCondition(a, b JoinCondition) bool {
return EqualsExpr(a.On, b.On) &&
EqualsColumns(a.Using, b.Using)
}
// EqualsRefOfJoinTableExpr does deep equals between the two objects.
func EqualsRefOfJoinTableExpr(a, b *JoinTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableExpr(a.LeftExpr, b.LeftExpr) &&
a.Join == b.Join &&
EqualsTableExpr(a.RightExpr, b.RightExpr) &&
EqualsJoinCondition(a.Condition, b.Condition)
}
// EqualsRefOfKeyState does deep equals between the two objects.
func EqualsRefOfKeyState(a, b *KeyState) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Enable == b.Enable
}
// EqualsRefOfLimit does deep equals between the two objects.
func EqualsRefOfLimit(a, b *Limit) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Offset, b.Offset) &&
EqualsExpr(a.Rowcount, b.Rowcount)
}
// EqualsRefOfLiteral does deep equals between the two objects.
func EqualsRefOfLiteral(a, b *Literal) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Val == b.Val &&
a.Type == b.Type
}
// EqualsRefOfLoad does deep equals between the two objects.
func EqualsRefOfLoad(a, b *Load) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfLockOption does deep equals between the two objects.
func EqualsRefOfLockOption(a, b *LockOption) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type
}
// EqualsRefOfLockTables does deep equals between the two objects.
func EqualsRefOfLockTables(a, b *LockTables) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableAndLockTypes(a.Tables, b.Tables)
}
// EqualsRefOfMatchExpr does deep equals between the two objects.
func EqualsRefOfMatchExpr(a, b *MatchExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSelectExprs(a.Columns, b.Columns) &&
EqualsExpr(a.Expr, b.Expr) &&
a.Option == b.Option
}
// EqualsRefOfModifyColumn does deep equals between the two objects.
func EqualsRefOfModifyColumn(a, b *ModifyColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) &&
EqualsRefOfColName(a.First, b.First) &&
EqualsRefOfColName(a.After, b.After)
}
// EqualsRefOfNextval does deep equals between the two objects.
func EqualsRefOfNextval(a, b *Nextval) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Expr, b.Expr)
}
// EqualsRefOfNotExpr does deep equals between the two objects.
func EqualsRefOfNotExpr(a, b *NotExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Expr, b.Expr)
}
// EqualsRefOfNullVal does deep equals between the two objects.
func EqualsRefOfNullVal(a, b *NullVal) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsOnDup does deep equals between the two objects.
func EqualsOnDup(a, b OnDup) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfUpdateExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfOptLike does deep equals between the two objects.
func EqualsRefOfOptLike(a, b *OptLike) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableName(a.LikeTable, b.LikeTable)
}
// EqualsRefOfOrExpr does deep equals between the two objects.
func EqualsRefOfOrExpr(a, b *OrExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Left, b.Left) &&
EqualsExpr(a.Right, b.Right)
}
// EqualsRefOfOrder does deep equals between the two objects.
func EqualsRefOfOrder(a, b *Order) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Expr, b.Expr) &&
a.Direction == b.Direction
}
// EqualsOrderBy does deep equals between the two objects.
func EqualsOrderBy(a, b OrderBy) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfOrder(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfOrderByOption does deep equals between the two objects.
func EqualsRefOfOrderByOption(a, b *OrderByOption) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColumns(a.Cols, b.Cols)
}
// EqualsRefOfOtherAdmin does deep equals between the two objects.
func EqualsRefOfOtherAdmin(a, b *OtherAdmin) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfOtherRead does deep equals between the two objects.
func EqualsRefOfOtherRead(a, b *OtherRead) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfParenSelect does deep equals between the two objects.
func EqualsRefOfParenSelect(a, b *ParenSelect) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSelectStatement(a.Select, b.Select)
}
// EqualsRefOfParenTableExpr does deep equals between the two objects.
func EqualsRefOfParenTableExpr(a, b *ParenTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableExprs(a.Exprs, b.Exprs)
}
// EqualsRefOfPartitionDefinition does deep equals between the two objects.
func EqualsRefOfPartitionDefinition(a, b *PartitionDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Maxvalue == b.Maxvalue &&
EqualsColIdent(a.Name, b.Name) &&
EqualsExpr(a.Limit, b.Limit)
}
// EqualsRefOfPartitionSpec does deep equals between the two objects.
func EqualsRefOfPartitionSpec(a, b *PartitionSpec) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.IsAll == b.IsAll &&
a.WithoutValidation == b.WithoutValidation &&
a.Action == b.Action &&
EqualsPartitions(a.Names, b.Names) &&
EqualsRefOfLiteral(a.Number, b.Number) &&
EqualsTableName(a.TableName, b.TableName) &&
EqualsSliceOfRefOfPartitionDefinition(a.Definitions, b.Definitions)
}
// EqualsPartitions does deep equals between the two objects.
func EqualsPartitions(a, b Partitions) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsColIdent(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfRangeCond does deep equals between the two objects.
func EqualsRefOfRangeCond(a, b *RangeCond) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Operator == b.Operator &&
EqualsExpr(a.Left, b.Left) &&
EqualsExpr(a.From, b.From) &&
EqualsExpr(a.To, b.To)
}
// EqualsRefOfReferenceDefinition does deep equals between the two objects.
func EqualsRefOfReferenceDefinition(a, b *ReferenceDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableName(a.ReferencedTable, b.ReferencedTable) &&
EqualsColumns(a.ReferencedColumns, b.ReferencedColumns) &&
a.OnDelete == b.OnDelete &&
a.OnUpdate == b.OnUpdate
}
// EqualsRefOfRelease does deep equals between the two objects.
func EqualsRefOfRelease(a, b *Release) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name)
}
// EqualsRefOfRenameIndex does deep equals between the two objects.
func EqualsRefOfRenameIndex(a, b *RenameIndex) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.OldName, b.OldName) &&
EqualsColIdent(a.NewName, b.NewName)
}
// EqualsRefOfRenameTable does deep equals between the two objects.
func EqualsRefOfRenameTable(a, b *RenameTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSliceOfRefOfRenameTablePair(a.TablePairs, b.TablePairs)
}
// EqualsRefOfRenameTableName does deep equals between the two objects.
func EqualsRefOfRenameTableName(a, b *RenameTableName) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableName(a.Table, b.Table)
}
// EqualsRefOfRevertMigration does deep equals between the two objects.
func EqualsRefOfRevertMigration(a, b *RevertMigration) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.UUID == b.UUID &&
EqualsComments(a.Comments, b.Comments)
}
// EqualsRefOfRollback does deep equals between the two objects.
func EqualsRefOfRollback(a, b *Rollback) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfSRollback does deep equals between the two objects.
func EqualsRefOfSRollback(a, b *SRollback) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name)
}
// EqualsRefOfSavepoint does deep equals between the two objects.
func EqualsRefOfSavepoint(a, b *Savepoint) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name)
}
// EqualsRefOfSelect does deep equals between the two objects.
func EqualsRefOfSelect(a, b *Select) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Distinct == b.Distinct &&
a.StraightJoinHint == b.StraightJoinHint &&
a.SQLCalcFoundRows == b.SQLCalcFoundRows &&
EqualsRefOfBool(a.Cache, b.Cache) &&
EqualsSliceOfTableExpr(a.From, b.From) &&
EqualsComments(a.Comments, b.Comments) &&
EqualsSelectExprs(a.SelectExprs, b.SelectExprs) &&
EqualsRefOfWhere(a.Where, b.Where) &&
EqualsGroupBy(a.GroupBy, b.GroupBy) &&
EqualsRefOfWhere(a.Having, b.Having) &&
EqualsOrderBy(a.OrderBy, b.OrderBy) &&
EqualsRefOfLimit(a.Limit, b.Limit) &&
a.Lock == b.Lock &&
EqualsRefOfSelectInto(a.Into, b.Into)
}
// EqualsSelectExprs does deep equals between the two objects.
func EqualsSelectExprs(a, b SelectExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsSelectExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfSelectInto does deep equals between the two objects.
func EqualsRefOfSelectInto(a, b *SelectInto) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.FileName == b.FileName &&
a.Charset == b.Charset &&
a.FormatOption == b.FormatOption &&
a.ExportOption == b.ExportOption &&
a.Manifest == b.Manifest &&
a.Overwrite == b.Overwrite &&
a.Type == b.Type
}
// EqualsRefOfSet does deep equals between the two objects.
func EqualsRefOfSet(a, b *Set) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsComments(a.Comments, b.Comments) &&
EqualsSetExprs(a.Exprs, b.Exprs)
}
// EqualsRefOfSetExpr does deep equals between the two objects.
func EqualsRefOfSetExpr(a, b *SetExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Scope == b.Scope &&
EqualsColIdent(a.Name, b.Name) &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsSetExprs does deep equals between the two objects.
func EqualsSetExprs(a, b SetExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfSetExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfSetTransaction does deep equals between the two objects.
func EqualsRefOfSetTransaction(a, b *SetTransaction) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSQLNode(a.SQLNode, b.SQLNode) &&
EqualsComments(a.Comments, b.Comments) &&
a.Scope == b.Scope &&
EqualsSliceOfCharacteristic(a.Characteristics, b.Characteristics)
}
// EqualsRefOfShow does deep equals between the two objects.
func EqualsRefOfShow(a, b *Show) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsShowInternal(a.Internal, b.Internal)
}
// EqualsRefOfShowBasic does deep equals between the two objects.
func EqualsRefOfShowBasic(a, b *ShowBasic) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Full == b.Full &&
a.Command == b.Command &&
EqualsTableName(a.Tbl, b.Tbl) &&
EqualsTableIdent(a.DbName, b.DbName) &&
EqualsRefOfShowFilter(a.Filter, b.Filter)
}
// EqualsRefOfShowCreate does deep equals between the two objects.
func EqualsRefOfShowCreate(a, b *ShowCreate) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Command == b.Command &&
EqualsTableName(a.Op, b.Op)
}
// EqualsRefOfShowFilter does deep equals between the two objects.
func EqualsRefOfShowFilter(a, b *ShowFilter) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Like == b.Like &&
EqualsExpr(a.Filter, b.Filter)
}
// EqualsRefOfShowLegacy does deep equals between the two objects.
func EqualsRefOfShowLegacy(a, b *ShowLegacy) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Extended == b.Extended &&
a.Type == b.Type &&
EqualsTableName(a.OnTable, b.OnTable) &&
EqualsTableName(a.Table, b.Table) &&
EqualsRefOfShowTablesOpt(a.ShowTablesOpt, b.ShowTablesOpt) &&
a.Scope == b.Scope &&
EqualsExpr(a.ShowCollationFilterOpt, b.ShowCollationFilterOpt)
}
// EqualsRefOfShowMigrationLogs does deep equals between the two objects.
func EqualsRefOfShowMigrationLogs(a, b *ShowMigrationLogs) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.UUID == b.UUID &&
EqualsComments(a.Comments, b.Comments)
}
// EqualsRefOfStarExpr does deep equals between the two objects.
func EqualsRefOfStarExpr(a, b *StarExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableName(a.TableName, b.TableName)
}
// EqualsRefOfStream does deep equals between the two objects.
func EqualsRefOfStream(a, b *Stream) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsComments(a.Comments, b.Comments) &&
EqualsSelectExpr(a.SelectExpr, b.SelectExpr) &&
EqualsTableName(a.Table, b.Table)
}
// EqualsRefOfSubquery does deep equals between the two objects.
func EqualsRefOfSubquery(a, b *Subquery) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSelectStatement(a.Select, b.Select)
}
// EqualsRefOfSubstrExpr does deep equals between the two objects.
func EqualsRefOfSubstrExpr(a, b *SubstrExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfColName(a.Name, b.Name) &&
EqualsRefOfLiteral(a.StrVal, b.StrVal) &&
EqualsExpr(a.From, b.From) &&
EqualsExpr(a.To, b.To)
}
// EqualsTableExprs does deep equals between the two objects.
func EqualsTableExprs(a, b TableExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsTableExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsTableIdent does deep equals between the two objects.
func EqualsTableIdent(a, b TableIdent) bool {
return a.v == b.v
}
// EqualsTableName does deep equals between the two objects.
func EqualsTableName(a, b TableName) bool {
return EqualsTableIdent(a.Name, b.Name) &&
EqualsTableIdent(a.Qualifier, b.Qualifier)
}
// EqualsTableNames does deep equals between the two objects.
func EqualsTableNames(a, b TableNames) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsTableName(a[i], b[i]) {
return false
}
}
return true
}
// EqualsTableOptions does deep equals between the two objects.
func EqualsTableOptions(a, b TableOptions) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfTableOption(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfTableSpec does deep equals between the two objects.
func EqualsRefOfTableSpec(a, b *TableSpec) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSliceOfRefOfColumnDefinition(a.Columns, b.Columns) &&
EqualsSliceOfRefOfIndexDefinition(a.Indexes, b.Indexes) &&
EqualsSliceOfRefOfConstraintDefinition(a.Constraints, b.Constraints) &&
EqualsTableOptions(a.Options, b.Options)
}
// EqualsRefOfTablespaceOperation does deep equals between the two objects.
func EqualsRefOfTablespaceOperation(a, b *TablespaceOperation) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Import == b.Import
}
// EqualsRefOfTimestampFuncExpr does deep equals between the two objects.
func EqualsRefOfTimestampFuncExpr(a, b *TimestampFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Name == b.Name &&
a.Unit == b.Unit &&
EqualsExpr(a.Expr1, b.Expr1) &&
EqualsExpr(a.Expr2, b.Expr2)
}
// EqualsRefOfTruncateTable does deep equals between the two objects.
func EqualsRefOfTruncateTable(a, b *TruncateTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableName(a.Table, b.Table)
}
// EqualsRefOfUnaryExpr does deep equals between the two objects.
func EqualsRefOfUnaryExpr(a, b *UnaryExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Operator == b.Operator &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsRefOfUnion does deep equals between the two objects.
func EqualsRefOfUnion(a, b *Union) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSelectStatement(a.FirstStatement, b.FirstStatement) &&
EqualsSliceOfRefOfUnionSelect(a.UnionSelects, b.UnionSelects) &&
EqualsOrderBy(a.OrderBy, b.OrderBy) &&
EqualsRefOfLimit(a.Limit, b.Limit) &&
a.Lock == b.Lock
}
// EqualsRefOfUnionSelect does deep equals between the two objects.
func EqualsRefOfUnionSelect(a, b *UnionSelect) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Distinct == b.Distinct &&
EqualsSelectStatement(a.Statement, b.Statement)
}
// EqualsRefOfUnlockTables does deep equals between the two objects.
func EqualsRefOfUnlockTables(a, b *UnlockTables) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsRefOfUpdate does deep equals between the two objects.
func EqualsRefOfUpdate(a, b *Update) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsComments(a.Comments, b.Comments) &&
a.Ignore == b.Ignore &&
EqualsTableExprs(a.TableExprs, b.TableExprs) &&
EqualsUpdateExprs(a.Exprs, b.Exprs) &&
EqualsRefOfWhere(a.Where, b.Where) &&
EqualsOrderBy(a.OrderBy, b.OrderBy) &&
EqualsRefOfLimit(a.Limit, b.Limit)
}
// EqualsRefOfUpdateExpr does deep equals between the two objects.
func EqualsRefOfUpdateExpr(a, b *UpdateExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfColName(a.Name, b.Name) &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsUpdateExprs does deep equals between the two objects.
func EqualsUpdateExprs(a, b UpdateExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfUpdateExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfUse does deep equals between the two objects.
func EqualsRefOfUse(a, b *Use) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableIdent(a.DBName, b.DBName)
}
// EqualsRefOfVStream does deep equals between the two objects.
func EqualsRefOfVStream(a, b *VStream) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsComments(a.Comments, b.Comments) &&
EqualsSelectExpr(a.SelectExpr, b.SelectExpr) &&
EqualsTableName(a.Table, b.Table) &&
EqualsRefOfWhere(a.Where, b.Where) &&
EqualsRefOfLimit(a.Limit, b.Limit)
}
// EqualsValTuple does deep equals between the two objects.
func EqualsValTuple(a, b ValTuple) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfValidation does deep equals between the two objects.
func EqualsRefOfValidation(a, b *Validation) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.With == b.With
}
// EqualsValues does deep equals between the two objects.
func EqualsValues(a, b Values) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsValTuple(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfValuesFuncExpr does deep equals between the two objects.
func EqualsRefOfValuesFuncExpr(a, b *ValuesFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsRefOfColName(a.Name, b.Name)
}
// EqualsVindexParam does deep equals between the two objects.
func EqualsVindexParam(a, b VindexParam) bool {
return a.Val == b.Val &&
EqualsColIdent(a.Key, b.Key)
}
// EqualsRefOfVindexSpec does deep equals between the two objects.
func EqualsRefOfVindexSpec(a, b *VindexSpec) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Name, b.Name) &&
EqualsColIdent(a.Type, b.Type) &&
EqualsSliceOfVindexParam(a.Params, b.Params)
}
// EqualsRefOfWhen does deep equals between the two objects.
func EqualsRefOfWhen(a, b *When) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Cond, b.Cond) &&
EqualsExpr(a.Val, b.Val)
}
// EqualsRefOfWhere does deep equals between the two objects.
func EqualsRefOfWhere(a, b *Where) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type &&
EqualsExpr(a.Expr, b.Expr)
}
// EqualsRefOfXorExpr does deep equals between the two objects.
func EqualsRefOfXorExpr(a, b *XorExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.Left, b.Left) &&
EqualsExpr(a.Right, b.Right)
}
// EqualsAlterOption does deep equals between the two objects.
func EqualsAlterOption(inA, inB AlterOption) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *AddColumns:
b, ok := inB.(*AddColumns)
if !ok {
return false
}
return EqualsRefOfAddColumns(a, b)
case *AddConstraintDefinition:
b, ok := inB.(*AddConstraintDefinition)
if !ok {
return false
}
return EqualsRefOfAddConstraintDefinition(a, b)
case *AddIndexDefinition:
b, ok := inB.(*AddIndexDefinition)
if !ok {
return false
}
return EqualsRefOfAddIndexDefinition(a, b)
case AlgorithmValue:
b, ok := inB.(AlgorithmValue)
if !ok {
return false
}
return a == b
case *AlterCharset:
b, ok := inB.(*AlterCharset)
if !ok {
return false
}
return EqualsRefOfAlterCharset(a, b)
case *AlterColumn:
b, ok := inB.(*AlterColumn)
if !ok {
return false
}
return EqualsRefOfAlterColumn(a, b)
case *ChangeColumn:
b, ok := inB.(*ChangeColumn)
if !ok {
return false
}
return EqualsRefOfChangeColumn(a, b)
case *DropColumn:
b, ok := inB.(*DropColumn)
if !ok {
return false
}
return EqualsRefOfDropColumn(a, b)
case *DropKey:
b, ok := inB.(*DropKey)
if !ok {
return false
}
return EqualsRefOfDropKey(a, b)
case *Force:
b, ok := inB.(*Force)
if !ok {
return false
}
return EqualsRefOfForce(a, b)
case *KeyState:
b, ok := inB.(*KeyState)
if !ok {
return false
}
return EqualsRefOfKeyState(a, b)
case *LockOption:
b, ok := inB.(*LockOption)
if !ok {
return false
}
return EqualsRefOfLockOption(a, b)
case *ModifyColumn:
b, ok := inB.(*ModifyColumn)
if !ok {
return false
}
return EqualsRefOfModifyColumn(a, b)
case *OrderByOption:
b, ok := inB.(*OrderByOption)
if !ok {
return false
}
return EqualsRefOfOrderByOption(a, b)
case *RenameIndex:
b, ok := inB.(*RenameIndex)
if !ok {
return false
}
return EqualsRefOfRenameIndex(a, b)
case *RenameTableName:
b, ok := inB.(*RenameTableName)
if !ok {
return false
}
return EqualsRefOfRenameTableName(a, b)
case TableOptions:
b, ok := inB.(TableOptions)
if !ok {
return false
}
return EqualsTableOptions(a, b)
case *TablespaceOperation:
b, ok := inB.(*TablespaceOperation)
if !ok {
return false
}
return EqualsRefOfTablespaceOperation(a, b)
case *Validation:
b, ok := inB.(*Validation)
if !ok {
return false
}
return EqualsRefOfValidation(a, b)
default:
// this should never happen
return false
}
}
// EqualsCharacteristic does deep equals between the two objects.
func EqualsCharacteristic(inA, inB Characteristic) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case AccessMode:
b, ok := inB.(AccessMode)
if !ok {
return false
}
return a == b
case IsolationLevel:
b, ok := inB.(IsolationLevel)
if !ok {
return false
}
return a == b
default:
// this should never happen
return false
}
}
// EqualsColTuple does deep equals between the two objects.
func EqualsColTuple(inA, inB ColTuple) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case ListArg:
b, ok := inB.(ListArg)
if !ok {
return false
}
return a == b
case *Subquery:
b, ok := inB.(*Subquery)
if !ok {
return false
}
return EqualsRefOfSubquery(a, b)
case ValTuple:
b, ok := inB.(ValTuple)
if !ok {
return false
}
return EqualsValTuple(a, b)
default:
// this should never happen
return false
}
}
// EqualsConstraintInfo does deep equals between the two objects.
func EqualsConstraintInfo(inA, inB ConstraintInfo) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *CheckConstraintDefinition:
b, ok := inB.(*CheckConstraintDefinition)
if !ok {
return false
}
return EqualsRefOfCheckConstraintDefinition(a, b)
case *ForeignKeyDefinition:
b, ok := inB.(*ForeignKeyDefinition)
if !ok {
return false
}
return EqualsRefOfForeignKeyDefinition(a, b)
default:
// this should never happen
return false
}
}
// EqualsDBDDLStatement does deep equals between the two objects.
func EqualsDBDDLStatement(inA, inB DBDDLStatement) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *AlterDatabase:
b, ok := inB.(*AlterDatabase)
if !ok {
return false
}
return EqualsRefOfAlterDatabase(a, b)
case *CreateDatabase:
b, ok := inB.(*CreateDatabase)
if !ok {
return false
}
return EqualsRefOfCreateDatabase(a, b)
case *DropDatabase:
b, ok := inB.(*DropDatabase)
if !ok {
return false
}
return EqualsRefOfDropDatabase(a, b)
default:
// this should never happen
return false
}
}
// EqualsDDLStatement does deep equals between the two objects.
func EqualsDDLStatement(inA, inB DDLStatement) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *AlterTable:
b, ok := inB.(*AlterTable)
if !ok {
return false
}
return EqualsRefOfAlterTable(a, b)
case *AlterView:
b, ok := inB.(*AlterView)
if !ok {
return false
}
return EqualsRefOfAlterView(a, b)
case *CreateTable:
b, ok := inB.(*CreateTable)
if !ok {
return false
}
return EqualsRefOfCreateTable(a, b)
case *CreateView:
b, ok := inB.(*CreateView)
if !ok {
return false
}
return EqualsRefOfCreateView(a, b)
case *DropTable:
b, ok := inB.(*DropTable)
if !ok {
return false
}
return EqualsRefOfDropTable(a, b)
case *DropView:
b, ok := inB.(*DropView)
if !ok {
return false
}
return EqualsRefOfDropView(a, b)
case *RenameTable:
b, ok := inB.(*RenameTable)
if !ok {
return false
}
return EqualsRefOfRenameTable(a, b)
case *TruncateTable:
b, ok := inB.(*TruncateTable)
if !ok {
return false
}
return EqualsRefOfTruncateTable(a, b)
default:
// this should never happen
return false
}
}
// EqualsExplain does deep equals between the two objects.
func EqualsExplain(inA, inB Explain) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *ExplainStmt:
b, ok := inB.(*ExplainStmt)
if !ok {
return false
}
return EqualsRefOfExplainStmt(a, b)
case *ExplainTab:
b, ok := inB.(*ExplainTab)
if !ok {
return false
}
return EqualsRefOfExplainTab(a, b)
default:
// this should never happen
return false
}
}
// EqualsExpr does deep equals between the two objects.
func EqualsExpr(inA, inB Expr) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *AndExpr:
b, ok := inB.(*AndExpr)
if !ok {
return false
}
return EqualsRefOfAndExpr(a, b)
case Argument:
b, ok := inB.(Argument)
if !ok {
return false
}
return a == b
case *BinaryExpr:
b, ok := inB.(*BinaryExpr)
if !ok {
return false
}
return EqualsRefOfBinaryExpr(a, b)
case BoolVal:
b, ok := inB.(BoolVal)
if !ok {
return false
}
return a == b
case *CaseExpr:
b, ok := inB.(*CaseExpr)
if !ok {
return false
}
return EqualsRefOfCaseExpr(a, b)
case *ColName:
b, ok := inB.(*ColName)
if !ok {
return false
}
return EqualsRefOfColName(a, b)
case *CollateExpr:
b, ok := inB.(*CollateExpr)
if !ok {
return false
}
return EqualsRefOfCollateExpr(a, b)
case *ComparisonExpr:
b, ok := inB.(*ComparisonExpr)
if !ok {
return false
}
return EqualsRefOfComparisonExpr(a, b)
case *ConvertExpr:
b, ok := inB.(*ConvertExpr)
if !ok {
return false
}
return EqualsRefOfConvertExpr(a, b)
case *ConvertUsingExpr:
b, ok := inB.(*ConvertUsingExpr)
if !ok {
return false
}
return EqualsRefOfConvertUsingExpr(a, b)
case *CurTimeFuncExpr:
b, ok := inB.(*CurTimeFuncExpr)
if !ok {
return false
}
return EqualsRefOfCurTimeFuncExpr(a, b)
case *Default:
b, ok := inB.(*Default)
if !ok {
return false
}
return EqualsRefOfDefault(a, b)
case *ExistsExpr:
b, ok := inB.(*ExistsExpr)
if !ok {
return false
}
return EqualsRefOfExistsExpr(a, b)
case *FuncExpr:
b, ok := inB.(*FuncExpr)
if !ok {
return false
}
return EqualsRefOfFuncExpr(a, b)
case *GroupConcatExpr:
b, ok := inB.(*GroupConcatExpr)
if !ok {
return false
}
return EqualsRefOfGroupConcatExpr(a, b)
case *IntervalExpr:
b, ok := inB.(*IntervalExpr)
if !ok {
return false
}
return EqualsRefOfIntervalExpr(a, b)
case *IsExpr:
b, ok := inB.(*IsExpr)
if !ok {
return false
}
return EqualsRefOfIsExpr(a, b)
case ListArg:
b, ok := inB.(ListArg)
if !ok {
return false
}
return a == b
case *Literal:
b, ok := inB.(*Literal)
if !ok {
return false
}
return EqualsRefOfLiteral(a, b)
case *MatchExpr:
b, ok := inB.(*MatchExpr)
if !ok {
return false
}
return EqualsRefOfMatchExpr(a, b)
case *NotExpr:
b, ok := inB.(*NotExpr)
if !ok {
return false
}
return EqualsRefOfNotExpr(a, b)
case *NullVal:
b, ok := inB.(*NullVal)
if !ok {
return false
}
return EqualsRefOfNullVal(a, b)
case *OrExpr:
b, ok := inB.(*OrExpr)
if !ok {
return false
}
return EqualsRefOfOrExpr(a, b)
case *RangeCond:
b, ok := inB.(*RangeCond)
if !ok {
return false
}
return EqualsRefOfRangeCond(a, b)
case *Subquery:
b, ok := inB.(*Subquery)
if !ok {
return false
}
return EqualsRefOfSubquery(a, b)
case *SubstrExpr:
b, ok := inB.(*SubstrExpr)
if !ok {
return false
}
return EqualsRefOfSubstrExpr(a, b)
case *TimestampFuncExpr:
b, ok := inB.(*TimestampFuncExpr)
if !ok {
return false
}
return EqualsRefOfTimestampFuncExpr(a, b)
case *UnaryExpr:
b, ok := inB.(*UnaryExpr)
if !ok {
return false
}
return EqualsRefOfUnaryExpr(a, b)
case ValTuple:
b, ok := inB.(ValTuple)
if !ok {
return false
}
return EqualsValTuple(a, b)
case *ValuesFuncExpr:
b, ok := inB.(*ValuesFuncExpr)
if !ok {
return false
}
return EqualsRefOfValuesFuncExpr(a, b)
case *XorExpr:
b, ok := inB.(*XorExpr)
if !ok {
return false
}
return EqualsRefOfXorExpr(a, b)
default:
// this should never happen
return false
}
}
// EqualsInsertRows does deep equals between the two objects.
func EqualsInsertRows(inA, inB InsertRows) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *ParenSelect:
b, ok := inB.(*ParenSelect)
if !ok {
return false
}
return EqualsRefOfParenSelect(a, b)
case *Select:
b, ok := inB.(*Select)
if !ok {
return false
}
return EqualsRefOfSelect(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
return EqualsRefOfUnion(a, b)
case Values:
b, ok := inB.(Values)
if !ok {
return false
}
return EqualsValues(a, b)
default:
// this should never happen
return false
}
}
// EqualsSelectExpr does deep equals between the two objects.
func EqualsSelectExpr(inA, inB SelectExpr) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *AliasedExpr:
b, ok := inB.(*AliasedExpr)
if !ok {
return false
}
return EqualsRefOfAliasedExpr(a, b)
case *Nextval:
b, ok := inB.(*Nextval)
if !ok {
return false
}
return EqualsRefOfNextval(a, b)
case *StarExpr:
b, ok := inB.(*StarExpr)
if !ok {
return false
}
return EqualsRefOfStarExpr(a, b)
default:
// this should never happen
return false
}
}
// EqualsSelectStatement does deep equals between the two objects.
func EqualsSelectStatement(inA, inB SelectStatement) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *ParenSelect:
b, ok := inB.(*ParenSelect)
if !ok {
return false
}
return EqualsRefOfParenSelect(a, b)
case *Select:
b, ok := inB.(*Select)
if !ok {
return false
}
return EqualsRefOfSelect(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
return EqualsRefOfUnion(a, b)
default:
// this should never happen
return false
}
}
// EqualsShowInternal does deep equals between the two objects.
func EqualsShowInternal(inA, inB ShowInternal) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *ShowBasic:
b, ok := inB.(*ShowBasic)
if !ok {
return false
}
return EqualsRefOfShowBasic(a, b)
case *ShowCreate:
b, ok := inB.(*ShowCreate)
if !ok {
return false
}
return EqualsRefOfShowCreate(a, b)
case *ShowLegacy:
b, ok := inB.(*ShowLegacy)
if !ok {
return false
}
return EqualsRefOfShowLegacy(a, b)
default:
// this should never happen
return false
}
}
// EqualsSimpleTableExpr does deep equals between the two objects.
func EqualsSimpleTableExpr(inA, inB SimpleTableExpr) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *DerivedTable:
b, ok := inB.(*DerivedTable)
if !ok {
return false
}
return EqualsRefOfDerivedTable(a, b)
case TableName:
b, ok := inB.(TableName)
if !ok {
return false
}
return EqualsTableName(a, b)
default:
// this should never happen
return false
}
}
// EqualsStatement does deep equals between the two objects.
func EqualsStatement(inA, inB Statement) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *AlterDatabase:
b, ok := inB.(*AlterDatabase)
if !ok {
return false
}
return EqualsRefOfAlterDatabase(a, b)
case *AlterMigration:
b, ok := inB.(*AlterMigration)
if !ok {
return false
}
return EqualsRefOfAlterMigration(a, b)
case *AlterTable:
b, ok := inB.(*AlterTable)
if !ok {
return false
}
return EqualsRefOfAlterTable(a, b)
case *AlterView:
b, ok := inB.(*AlterView)
if !ok {
return false
}
return EqualsRefOfAlterView(a, b)
case *AlterVschema:
b, ok := inB.(*AlterVschema)
if !ok {
return false
}
return EqualsRefOfAlterVschema(a, b)
case *Begin:
b, ok := inB.(*Begin)
if !ok {
return false
}
return EqualsRefOfBegin(a, b)
case *CallProc:
b, ok := inB.(*CallProc)
if !ok {
return false
}
return EqualsRefOfCallProc(a, b)
case *Commit:
b, ok := inB.(*Commit)
if !ok {
return false
}
return EqualsRefOfCommit(a, b)
case *CreateDatabase:
b, ok := inB.(*CreateDatabase)
if !ok {
return false
}
return EqualsRefOfCreateDatabase(a, b)
case *CreateTable:
b, ok := inB.(*CreateTable)
if !ok {
return false
}
return EqualsRefOfCreateTable(a, b)
case *CreateView:
b, ok := inB.(*CreateView)
if !ok {
return false
}
return EqualsRefOfCreateView(a, b)
case *Delete:
b, ok := inB.(*Delete)
if !ok {
return false
}
return EqualsRefOfDelete(a, b)
case *DropDatabase:
b, ok := inB.(*DropDatabase)
if !ok {
return false
}
return EqualsRefOfDropDatabase(a, b)
case *DropTable:
b, ok := inB.(*DropTable)
if !ok {
return false
}
return EqualsRefOfDropTable(a, b)
case *DropView:
b, ok := inB.(*DropView)
if !ok {
return false
}
return EqualsRefOfDropView(a, b)
case *ExplainStmt:
b, ok := inB.(*ExplainStmt)
if !ok {
return false
}
return EqualsRefOfExplainStmt(a, b)
case *ExplainTab:
b, ok := inB.(*ExplainTab)
if !ok {
return false
}
return EqualsRefOfExplainTab(a, b)
case *Flush:
b, ok := inB.(*Flush)
if !ok {
return false
}
return EqualsRefOfFlush(a, b)
case *Insert:
b, ok := inB.(*Insert)
if !ok {
return false
}
return EqualsRefOfInsert(a, b)
case *Load:
b, ok := inB.(*Load)
if !ok {
return false
}
return EqualsRefOfLoad(a, b)
case *LockTables:
b, ok := inB.(*LockTables)
if !ok {
return false
}
return EqualsRefOfLockTables(a, b)
case *OtherAdmin:
b, ok := inB.(*OtherAdmin)
if !ok {
return false
}
return EqualsRefOfOtherAdmin(a, b)
case *OtherRead:
b, ok := inB.(*OtherRead)
if !ok {
return false
}
return EqualsRefOfOtherRead(a, b)
case *ParenSelect:
b, ok := inB.(*ParenSelect)
if !ok {
return false
}
return EqualsRefOfParenSelect(a, b)
case *Release:
b, ok := inB.(*Release)
if !ok {
return false
}
return EqualsRefOfRelease(a, b)
case *RenameTable:
b, ok := inB.(*RenameTable)
if !ok {
return false
}
return EqualsRefOfRenameTable(a, b)
case *RevertMigration:
b, ok := inB.(*RevertMigration)
if !ok {
return false
}
return EqualsRefOfRevertMigration(a, b)
case *Rollback:
b, ok := inB.(*Rollback)
if !ok {
return false
}
return EqualsRefOfRollback(a, b)
case *SRollback:
b, ok := inB.(*SRollback)
if !ok {
return false
}
return EqualsRefOfSRollback(a, b)
case *Savepoint:
b, ok := inB.(*Savepoint)
if !ok {
return false
}
return EqualsRefOfSavepoint(a, b)
case *Select:
b, ok := inB.(*Select)
if !ok {
return false
}
return EqualsRefOfSelect(a, b)
case *Set:
b, ok := inB.(*Set)
if !ok {
return false
}
return EqualsRefOfSet(a, b)
case *SetTransaction:
b, ok := inB.(*SetTransaction)
if !ok {
return false
}
return EqualsRefOfSetTransaction(a, b)
case *Show:
b, ok := inB.(*Show)
if !ok {
return false
}
return EqualsRefOfShow(a, b)
case *ShowMigrationLogs:
b, ok := inB.(*ShowMigrationLogs)
if !ok {
return false
}
return EqualsRefOfShowMigrationLogs(a, b)
case *Stream:
b, ok := inB.(*Stream)
if !ok {
return false
}
return EqualsRefOfStream(a, b)
case *TruncateTable:
b, ok := inB.(*TruncateTable)
if !ok {
return false
}
return EqualsRefOfTruncateTable(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
return EqualsRefOfUnion(a, b)
case *UnlockTables:
b, ok := inB.(*UnlockTables)
if !ok {
return false
}
return EqualsRefOfUnlockTables(a, b)
case *Update:
b, ok := inB.(*Update)
if !ok {
return false
}
return EqualsRefOfUpdate(a, b)
case *Use:
b, ok := inB.(*Use)
if !ok {
return false
}
return EqualsRefOfUse(a, b)
case *VStream:
b, ok := inB.(*VStream)
if !ok {
return false
}
return EqualsRefOfVStream(a, b)
default:
// this should never happen
return false
}
}
// EqualsTableExpr does deep equals between the two objects.
func EqualsTableExpr(inA, inB TableExpr) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *AliasedTableExpr:
b, ok := inB.(*AliasedTableExpr)
if !ok {
return false
}
return EqualsRefOfAliasedTableExpr(a, b)
case *JoinTableExpr:
b, ok := inB.(*JoinTableExpr)
if !ok {
return false
}
return EqualsRefOfJoinTableExpr(a, b)
case *ParenTableExpr:
b, ok := inB.(*ParenTableExpr)
if !ok {
return false
}
return EqualsRefOfParenTableExpr(a, b)
default:
// this should never happen
return false
}
}
// EqualsSliceOfRefOfColumnDefinition does deep equals between the two objects.
func EqualsSliceOfRefOfColumnDefinition(a, b []*ColumnDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfColumnDefinition(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfCollateAndCharset does deep equals between the two objects.
func EqualsSliceOfCollateAndCharset(a, b []CollateAndCharset) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsCollateAndCharset(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfAlterOption does deep equals between the two objects.
func EqualsSliceOfAlterOption(a, b []AlterOption) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsAlterOption(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfColIdent does deep equals between the two objects.
func EqualsSliceOfColIdent(a, b []ColIdent) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsColIdent(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfRefOfWhen does deep equals between the two objects.
func EqualsSliceOfRefOfWhen(a, b []*When) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfWhen(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfColIdent does deep equals between the two objects.
func EqualsRefOfColIdent(a, b *ColIdent) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.val == b.val &&
a.lowered == b.lowered &&
a.at == b.at
}
// EqualsColumnType does deep equals between the two objects.
func EqualsColumnType(a, b ColumnType) bool {
return a.Type == b.Type &&
a.Unsigned == b.Unsigned &&
a.Zerofill == b.Zerofill &&
a.Charset == b.Charset &&
a.Collate == b.Collate &&
EqualsRefOfColumnTypeOptions(a.Options, b.Options) &&
EqualsRefOfLiteral(a.Length, b.Length) &&
EqualsRefOfLiteral(a.Scale, b.Scale) &&
EqualsSliceOfString(a.EnumValues, b.EnumValues)
}
// EqualsRefOfColumnTypeOptions does deep equals between the two objects.
func EqualsRefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Autoincrement == b.Autoincrement &&
EqualsRefOfBool(a.Null, b.Null) &&
EqualsExpr(a.Default, b.Default) &&
EqualsExpr(a.OnUpdate, b.OnUpdate) &&
EqualsExpr(a.As, b.As) &&
EqualsRefOfLiteral(a.Comment, b.Comment) &&
a.Storage == b.Storage &&
EqualsRefOfReferenceDefinition(a.Reference, b.Reference) &&
a.KeyOpt == b.KeyOpt
}
// EqualsSliceOfString does deep equals between the two objects.
func EqualsSliceOfString(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
// EqualsSliceOfRefOfIndexColumn does deep equals between the two objects.
func EqualsSliceOfRefOfIndexColumn(a, b []*IndexColumn) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfIndexColumn(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfRefOfIndexOption does deep equals between the two objects.
func EqualsSliceOfRefOfIndexOption(a, b []*IndexOption) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfIndexOption(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfJoinCondition does deep equals between the two objects.
func EqualsRefOfJoinCondition(a, b *JoinCondition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsExpr(a.On, b.On) &&
EqualsColumns(a.Using, b.Using)
}
// EqualsTableAndLockTypes does deep equals between the two objects.
func EqualsTableAndLockTypes(a, b TableAndLockTypes) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfTableAndLockType(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfRefOfPartitionDefinition does deep equals between the two objects.
func EqualsSliceOfRefOfPartitionDefinition(a, b []*PartitionDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfPartitionDefinition(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfRefOfRenameTablePair does deep equals between the two objects.
func EqualsSliceOfRefOfRenameTablePair(a, b []*RenameTablePair) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfRenameTablePair(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfBool does deep equals between the two objects.
func EqualsRefOfBool(a, b *bool) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return *a == *b
}
// EqualsSliceOfTableExpr does deep equals between the two objects.
func EqualsSliceOfTableExpr(a, b []TableExpr) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsTableExpr(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfCharacteristic does deep equals between the two objects.
func EqualsSliceOfCharacteristic(a, b []Characteristic) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsCharacteristic(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfShowTablesOpt does deep equals between the two objects.
func EqualsRefOfShowTablesOpt(a, b *ShowTablesOpt) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Full == b.Full &&
a.DbName == b.DbName &&
EqualsRefOfShowFilter(a.Filter, b.Filter)
}
// EqualsRefOfTableIdent does deep equals between the two objects.
func EqualsRefOfTableIdent(a, b *TableIdent) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.v == b.v
}
// EqualsRefOfTableName does deep equals between the two objects.
func EqualsRefOfTableName(a, b *TableName) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableIdent(a.Name, b.Name) &&
EqualsTableIdent(a.Qualifier, b.Qualifier)
}
// EqualsRefOfTableOption does deep equals between the two objects.
func EqualsRefOfTableOption(a, b *TableOption) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Name == b.Name &&
a.String == b.String &&
EqualsRefOfLiteral(a.Value, b.Value) &&
EqualsTableNames(a.Tables, b.Tables)
}
// EqualsSliceOfRefOfIndexDefinition does deep equals between the two objects.
func EqualsSliceOfRefOfIndexDefinition(a, b []*IndexDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfIndexDefinition(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfRefOfConstraintDefinition does deep equals between the two objects.
func EqualsSliceOfRefOfConstraintDefinition(a, b []*ConstraintDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfConstraintDefinition(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfRefOfUnionSelect does deep equals between the two objects.
func EqualsSliceOfRefOfUnionSelect(a, b []*UnionSelect) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfUnionSelect(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfVindexParam does deep equals between the two objects.
func EqualsRefOfVindexParam(a, b *VindexParam) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Val == b.Val &&
EqualsColIdent(a.Key, b.Key)
}
// EqualsSliceOfVindexParam does deep equals between the two objects.
func EqualsSliceOfVindexParam(a, b []VindexParam) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsVindexParam(a[i], b[i]) {
return false
}
}
return true
}
// EqualsCollateAndCharset does deep equals between the two objects.
func EqualsCollateAndCharset(a, b CollateAndCharset) bool {
return a.IsDefault == b.IsDefault &&
a.Value == b.Value &&
a.Type == b.Type
}
// EqualsRefOfIndexColumn does deep equals between the two objects.
func EqualsRefOfIndexColumn(a, b *IndexColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsColIdent(a.Column, b.Column) &&
EqualsRefOfLiteral(a.Length, b.Length) &&
a.Direction == b.Direction
}
// EqualsRefOfIndexOption does deep equals between the two objects.
func EqualsRefOfIndexOption(a, b *IndexOption) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Name == b.Name &&
a.String == b.String &&
EqualsRefOfLiteral(a.Value, b.Value)
}
// EqualsRefOfTableAndLockType does deep equals between the two objects.
func EqualsRefOfTableAndLockType(a, b *TableAndLockType) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableExpr(a.Table, b.Table) &&
a.Lock == b.Lock
}
// EqualsRefOfRenameTablePair does deep equals between the two objects.
func EqualsRefOfRenameTablePair(a, b *RenameTablePair) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsTableName(a.FromTable, b.FromTable) &&
EqualsTableName(a.ToTable, b.ToTable)
}
// EqualsRefOfCollateAndCharset does deep equals between the two objects.
func EqualsRefOfCollateAndCharset(a, b *CollateAndCharset) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.IsDefault == b.IsDefault &&
a.Value == b.Value &&
a.Type == b.Type
} | go/vt/sqlparser/ast_equals.go | 0.511717 | 0.479565 | ast_equals.go | starcoder |
package fork
import "math/big"
// CompactToBig converts a compact representation of a whole number N to an unsigned 32-bit number. The representation is similar to IEEE754 floating point numbers.
// Like IEEE754 floating point, there are three basic components: the sign, the exponent, and the mantissa. They are broken out as follows:
// * the most significant 8 bits represent the unsigned base 256 exponent
// * bit 23 (the 24th bit) represents the sign bit
// * the least significant 23 bits represent the mantissa
// -------------------------------------------------
// | Exponent | Sign | Mantissa |
// -------------------------------------------------
// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] |
// -------------------------------------------------
// The formula to calculate N is:
// N = (-1^sign) * mantissa * 256^(exponent-3)
// This compact form is only used in bitcoin to encode unsigned 256-bit numbers which represent difficulty targets, thus there really is not a need for a sign bit, but it is implemented here to stay consistent with bitcoind.
func CompactToBig( compact uint32) *big.Int {
// Extract the mantissa, sign bit, and exponent.
mantissa := compact & 0x007fffff
isNegative := compact&0x00800000 != 0
exponent := uint(compact >> 24)
// Since the base for the exponent is 256, the exponent can be treated as the number of bytes to represent the full 256-bit number. So, treat the exponent as the number of bytes and shift the mantissa right or left accordingly. This is equivalent to N = mantissa * 256^(exponent-3)
var bn *big.Int
if exponent <= 3 {
mantissa >>= 8 * (3 - exponent)
bn = big.NewInt(int64(mantissa))
} else {
bn = big.NewInt(int64(mantissa))
bn.Lsh(bn, 8*(exponent-3))
}
// Make it negative if the sign bit is set.
if isNegative {
bn = bn.Neg(bn)
}
return bn
}
// BigToCompact converts a whole number N to a compact representation using an unsigned 32-bit number. The compact representation only provides 23 bits of precision, so values larger than (2^23 - 1) only encode the most significant digits of the number. See CompactToBig for details.
func BigToCompact( n *big.Int) uint32 {
// No need to do any work if it's zero.
if n.Sign() == 0 {
return 0
}
// Since the base for the exponent is 256, the exponent can be treated as the number of bytes. So, shift the number right or left accordingly. This is equivalent to: mantissa = mantissa / 256^(exponent-3)
var mantissa uint32
exponent := uint(len(n.Bytes()))
if exponent <= 3 {
mantissa = uint32(n.Bits()[0])
mantissa <<= 8 * (3 - exponent)
} else {
// Use a copy to avoid modifying the caller's original number.
tn := new(big.Int).Set(n)
mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0])
}
// When the mantissa already has the sign bit set, the number is too large to fit into the available 23-bits, so divide the number by 256 and increment the exponent accordingly.
if mantissa&0x00800000 != 0 {
mantissa >>= 8
exponent++
}
// Pack the exponent, sign bit, and mantissa into an unsigned 32-bit int and return it.
compact := uint32(exponent<<24) | mantissa
if n.Sign() < 0 {
compact |= 0x00800000
}
return compact
} | pkg/chain/fork/bits.go | 0.679604 | 0.564279 | bits.go | starcoder |
package op
// This defines how we do projected gradient.
type ProjectedGradient struct {
projector *Projection
beta, sigma, alpha float32
}
type vgpair struct {
value float32
gradient Parameter
}
func NewProjectedGradient(projector *Projection, beta, sigma, alpha float32) *ProjectedGradient {
return &ProjectedGradient{
projector: projector,
beta: beta,
sigma: sigma,
alpha: alpha,
}
}
// This implementation is based on "Projected Gradient Methods for Non-negative Matrix
// Factorization" by <NAME>. Particularly it is based on the discription of an
// improved projected gradient method in page 10 of that paper.
func (pg *ProjectedGradient) Minimize(loss Function, stop StopCriteria, vec Parameter) (float32, error) {
stt := vec
// Remember to clip the point before we do any thing.
pg.projector.ClipPoint(stt)
nxt := stt.CloneWithoutCopy()
cdd := stt.CloneWithoutCopy()
Fill(nxt, 0)
// Evaluate once
ovalgrad := &vgpair{value: 0, gradient: stt.CloneWithoutCopy()}
evaluate(loss, stt, ovalgrad)
nvalgrad := &vgpair{value: 0, gradient: stt.CloneWithoutCopy()}
tvalgrad := &vgpair{value: 0, gradient: stt.CloneWithoutCopy()}
alpha := pg.alpha
for k := 0; !stop.Done(stt, ovalgrad.value, ovalgrad.gradient); k += 1 {
newPoint(stt, nxt, ovalgrad.gradient, alpha, pg.projector)
evaluate(loss, nxt, nvalgrad)
if pg.isGoodStep(stt, nxt, ovalgrad, nvalgrad) {
newPoint(stt, cdd, ovalgrad.gradient, alpha/pg.beta, pg.projector)
evaluate(loss, cdd, tvalgrad)
for pg.isGoodStep(stt, cdd, ovalgrad, tvalgrad) {
nxt, cdd = cdd, nxt
nvalgrad, tvalgrad = tvalgrad, nvalgrad
// Now increase alpha as much as we can.
alpha /= pg.beta
newPoint(stt, cdd, ovalgrad.gradient, alpha/pg.beta, pg.projector)
evaluate(loss, cdd, tvalgrad)
}
} else {
// Now we decrease alpha barely enough to make sufficient decrease
// of the objective value.
for !pg.isGoodStep(stt, nxt, ovalgrad, nvalgrad) {
alpha *= pg.beta
newPoint(stt, nxt, ovalgrad.gradient, alpha, pg.projector)
evaluate(loss, nxt, nvalgrad)
}
}
// Now we arrive at a point satisfies sufficient decrease condition.
// Swap the wts and gradient for the next round.
stt, nxt = nxt, stt
ovalgrad, nvalgrad = nvalgrad, ovalgrad
}
// Originally stt == vec, but stt may be swapped to newly created param (nxt). So copy it to output param here.
for it := vec.IndexIterator(); it.Next(); {
i := it.Index()
vec.Set(i, stt.Get(i))
}
// This is so that we can reuse the step size in next round.
pg.alpha = alpha
// Return the final loss func value and error.
return ovalgrad.value, nil
}
// This implements the sufficient decrease condition described in Eq (13)
func (pg *ProjectedGradient) isGoodStep(owts, nwts Parameter, ovg, nvg *vgpair) bool {
valdiff := nvg.value - ovg.value
sum := float64(0)
for it := owts.IndexIterator(); it.Next(); {
i := it.Index()
sum += float64(ovg.gradient.Get(i) * (nwts.Get(i) - owts.Get(i)))
}
return valdiff <= pg.sigma*float32(sum)
}
// This creates a new point based on current point, step size and gradient.
func newPoint(owts, nwts, grad Parameter, alpha float32, projector *Projection) {
for it := owts.IndexIterator(); it.Next(); {
i := it.Index()
nwts.Set(i, owts.Get(i)-alpha*grad.Get(i))
}
projector.ClipPoint(nwts)
}
func evaluate(loss Function, stt Parameter, ovalgrad *vgpair) {
Fill(ovalgrad.gradient, 0)
ovalgrad.value = loss.Evaluate(stt, ovalgrad.gradient)
} | op/projected_gradient.go | 0.867822 | 0.562898 | projected_gradient.go | starcoder |
package gameutils
import (
"github.com/hajimehoshi/ebiten/v2"
"image"
)
// Interfaz básica de Entidad
type BaseEntity interface {
Update() error
Draw(screen *ebiten.Image)
CheckPosition(x int, y int) bool
GetPosition() (float64, float64)
SetPosition(x float64, y float64)
Move(x float64, y float64)
GetImage() *ebiten.Image
}
// Implementación de Sprite para ser heredada por subclases y que compartan
// código básico de dibujado en pantalla, pero no se mueve ni interactúa.
type Sprite struct {
// Referencia para compratir recurso entre entidades similares.
image *ebiten.Image
X float64
Y float64
}
func (e *Sprite) Draw(screen *ebiten.Image) {
// TODO: Ñapa temporal. En el futuro se debe utilizar una Animation
op := &ebiten.DrawImageOptions{}
op.GeoM.Translate(e.X, e.Y)
screen.DrawImage(e.image, op)
}
// Como la entidad es estática, al actualizar no hace nada
func (e *Sprite) Update() error { return nil }
// Esta función comprueba si una posición está dentro de la imagen de la entidad.
// Si x e y es la posición del ratón, comprueba si el ratón está sobre la entidad.
func (e *Sprite) CheckPosition(x int, y int) bool {
imgWidth, imgHeight := e.image.Size()
fx := float64(x)
fy := float64(y)
return fx >= e.X && fx <= (e.X + float64(imgWidth)) && fy >= e.Y && (fy <= e.Y + float64(imgHeight))
}
func (e *Sprite) GetPosition() (float64, float64) {
return e.X, e.Y
}
func (e *Sprite) SetPosition(x float64, y float64) {
e.X = x
e.Y = y
}
func (e *Sprite) Move(x float64, y float64) {
e.X += x
e.Y += y
}
func (e *Sprite) GetImage() *ebiten.Image {
return e.image
}
// Construye un SolidSprite con su hitbox a partir de un Sprite y las
// coordenadas de su rectángulo.
func MakeSolidSprite(img *ebiten.Image, x, y float64, x0, y0, x1, y1 int) *SolidSprite {
sprite := Sprite{img, x, y}
hBox := image.Rect(x0 + int(x), y0 + int(y), x1 + int(x), y1 + int(y))
return &SolidSprite{sprite, hBox}
}
// Una entidad sólida es una entidad que sigue sin moverse pero
// puede interactuar por contacto con otros elementos
type SolidSprite struct {
Sprite
Hbox image.Rectangle
}
// Establece la nueva posición del sprite
func (s *SolidSprite) SetPosition(x float64, y float64) {
boxWidth := s.Hbox.Dx()
boxHeight := s.Hbox.Dy()
// El desplazamiento de la caja con respecto a la posición del sprite.
boxXdesp := s.Hbox.Min.X - int(s.Sprite.X)
boxYdesp := s.Hbox.Min.Y - int(s.Sprite.Y)
// Movemos el sprite
s.Sprite.SetPosition(x, y)
// Y movemos la caja.
s.Hbox.Min.X = int(x) + boxXdesp
s.Hbox.Min.Y = int(y) + boxYdesp
s.Hbox.Max.X = int(x) + boxXdesp + boxWidth
s.Hbox.Max.Y = int(y) + boxYdesp + boxHeight
}
// Desplaza el sprite.
func (s *SolidSprite) Move(x float64, y float64) {
currentX := s.Sprite.X
currentY := s.Sprite.Y
s.SetPosition(currentX + x, currentY + y)
}
// Construye un personaje con las funciones básicas.
func MakeCharacter(img *ebiten.Image, x, y float64, x0, y0, x1, y1 int, health int,
moveSpeed, airFactor, jumpForce float64, canGrab, canShoot bool) *Character {
sprite := MakeSolidSprite(img, x, y, x0, y0, x1, y1)
return &Character{*sprite, health, health, moveSpeed, airFactor,
jumpForce, canGrab, canShoot, []Action{}}
}
// Character representa cualquier personaje que pueda moverse, saltar y
// atacar.
type Character struct {
SolidSprite
health int
maxHealth int
moveSpeed float64 // Velocidad de movimiento
airFactor float64 // Reducción de velocidad al estar en el aire
jumpForce float64 // "fuerza" del salto. Define la altura
canGrab bool // Define si puede agarrar cajas o bombas
canShoot bool // Define si puede disparar cañones.
actionQueue []Action // Acciones que el personaje debe hacer en el próximo frame (si puede)
}
func (c *Character) Update() error {
// Llamamos a la función Update de la superclase ya que será necesario en
// un futuro para las animaciones
c.SolidSprite.Update()
// Comprobamos acciones. De momento sólo movimiento horizontal, ya que aún no
// hay físicas ni animaciones.
for _, action := range c.actionQueue {
// TODO: Completar con el resto de acciones (salto, ataque, agarrar y disparar)
switch action.action {
case MOVE_LEFT:
c.Move(-c.moveSpeed, 0)
case MOVE_RIGHT:
c.Move(c.moveSpeed, 0)
}
}
// Vaciamos la cola de acciones.
c.actionQueue = []Action{}
// Completar con la comprobación de colisión con otras entidades y si hacen daño.
// La detección de muerte debe ser realizada por las estructuras contenedoras.
return nil
}
func MakePlayer(img *ebiten.Image, x, y float64, x0, y0, x1, y1, health int,
moveSpeed, airFactor, jumpForce float64, iManager *InputManager) *Player {
character := MakeCharacter(img, x, y, x0, y0, x1, y1, health, moveSpeed, airFactor, jumpForce, false, false)
// Queremos pasar el valor, no el puntero. ¿Se hace así?
return &Player{*character, iManager}
}
type Player struct {
Character
iManager *InputManager
}
func (p *Player) Update() error {
// TODO: Añadir salto y ataque.
// Capturamos eventos y actualizamos el personaje.
if a := p.iManager.IsActionPressed(MOVE_LEFT); a != nil {
p.Character.actionQueue = append(p.Character.actionQueue, *a)
}
if a := p.iManager.IsActionPressed(MOVE_RIGHT); a != nil {
p.Character.actionQueue = append(p.Character.actionQueue, *a)
}
return p.Character.Update()
} | castlecleanup/gameutils/entities.go | 0.5083 | 0.576214 | entities.go | starcoder |
package seq
import (
"fmt"
"strings"
util "github.com/leesjensen/go-chart/util"
)
const (
bufferMinimumGrow = 4
bufferShrinkThreshold = 32
bufferGrowFactor = 200
bufferDefaultCapacity = 4
)
var (
emptyArray = make([]float64, 0)
)
// NewBuffer creates a new value buffer with an optional set of values.
func NewBuffer(values ...float64) *Buffer {
var tail int
array := make([]float64, util.Math.MaxInt(len(values), bufferDefaultCapacity))
if len(values) > 0 {
copy(array, values)
tail = len(values)
}
return &Buffer{
array: array,
head: 0,
tail: tail,
size: len(values),
}
}
// NewBufferWithCapacity creates a new ValueBuffer pre-allocated with the given capacity.
func NewBufferWithCapacity(capacity int) *Buffer {
return &Buffer{
array: make([]float64, capacity),
head: 0,
tail: 0,
size: 0,
}
}
// Buffer is a fifo datastructure that is backed by a pre-allocated array.
// Instead of allocating a whole new node object for each element, array elements are re-used (which saves GC churn).
// Enqueue can be O(n), Dequeue is generally O(1).
// Buffer implements `seq.Provider`
type Buffer struct {
array []float64
head int
tail int
size int
}
// Len returns the length of the Buffer (as it is currently populated).
// Actual memory footprint may be different.
func (b *Buffer) Len() int {
return b.size
}
// GetValue implements seq provider.
func (b *Buffer) GetValue(index int) float64 {
effectiveIndex := (b.head + index) % len(b.array)
return b.array[effectiveIndex]
}
// Capacity returns the total size of the Buffer, including empty elements.
func (b *Buffer) Capacity() int {
return len(b.array)
}
// SetCapacity sets the capacity of the Buffer.
func (b *Buffer) SetCapacity(capacity int) {
newArray := make([]float64, capacity)
if b.size > 0 {
if b.head < b.tail {
arrayCopy(b.array, b.head, newArray, 0, b.size)
} else {
arrayCopy(b.array, b.head, newArray, 0, len(b.array)-b.head)
arrayCopy(b.array, 0, newArray, len(b.array)-b.head, b.tail)
}
}
b.array = newArray
b.head = 0
if b.size == capacity {
b.tail = 0
} else {
b.tail = b.size
}
}
// Clear removes all objects from the Buffer.
func (b *Buffer) Clear() {
b.array = make([]float64, bufferDefaultCapacity)
b.head = 0
b.tail = 0
b.size = 0
}
// Enqueue adds an element to the "back" of the Buffer.
func (b *Buffer) Enqueue(value float64) {
if b.size == len(b.array) {
newCapacity := int(len(b.array) * int(bufferGrowFactor/100))
if newCapacity < (len(b.array) + bufferMinimumGrow) {
newCapacity = len(b.array) + bufferMinimumGrow
}
b.SetCapacity(newCapacity)
}
b.array[b.tail] = value
b.tail = (b.tail + 1) % len(b.array)
b.size++
}
// Dequeue removes the first element from the RingBuffer.
func (b *Buffer) Dequeue() float64 {
if b.size == 0 {
return 0
}
removed := b.array[b.head]
b.head = (b.head + 1) % len(b.array)
b.size--
return removed
}
// Peek returns but does not remove the first element.
func (b *Buffer) Peek() float64 {
if b.size == 0 {
return 0
}
return b.array[b.head]
}
// PeekBack returns but does not remove the last element.
func (b *Buffer) PeekBack() float64 {
if b.size == 0 {
return 0
}
if b.tail == 0 {
return b.array[len(b.array)-1]
}
return b.array[b.tail-1]
}
// TrimExcess resizes the capacity of the buffer to better fit the contents.
func (b *Buffer) TrimExcess() {
threshold := float64(len(b.array)) * 0.9
if b.size < int(threshold) {
b.SetCapacity(b.size)
}
}
// Array returns the ring buffer, in order, as an array.
func (b *Buffer) Array() Array {
newArray := make([]float64, b.size)
if b.size == 0 {
return newArray
}
if b.head < b.tail {
arrayCopy(b.array, b.head, newArray, 0, b.size)
} else {
arrayCopy(b.array, b.head, newArray, 0, len(b.array)-b.head)
arrayCopy(b.array, 0, newArray, len(b.array)-b.head, b.tail)
}
return Array(newArray)
}
// Each calls the consumer for each element in the buffer.
func (b *Buffer) Each(mapfn func(int, float64)) {
if b.size == 0 {
return
}
var index int
if b.head < b.tail {
for cursor := b.head; cursor < b.tail; cursor++ {
mapfn(index, b.array[cursor])
index++
}
} else {
for cursor := b.head; cursor < len(b.array); cursor++ {
mapfn(index, b.array[cursor])
index++
}
for cursor := 0; cursor < b.tail; cursor++ {
mapfn(index, b.array[cursor])
index++
}
}
}
// String returns a string representation for value buffers.
func (b *Buffer) String() string {
var values []string
for _, elem := range b.Array() {
values = append(values, fmt.Sprintf("%v", elem))
}
return strings.Join(values, " <= ")
}
// --------------------------------------------------------------------------------
// Util methods
// --------------------------------------------------------------------------------
func arrayClear(source []float64, index, length int) {
for x := 0; x < length; x++ {
absoluteIndex := x + index
source[absoluteIndex] = 0
}
}
func arrayCopy(source []float64, sourceIndex int, destination []float64, destinationIndex, length int) {
for x := 0; x < length; x++ {
from := sourceIndex + x
to := destinationIndex + x
destination[to] = source[from]
}
} | seq/buffer.go | 0.767167 | 0.459986 | buffer.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.