code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package sipparser
// Imports from the go standard library
import (
"errors"
"fmt"
)
// pAssertedIdStateFn is just a fn type
type pAssertedIdStateFn func(p *PAssertedId) pAssertedIdStateFn
// PAssertedId is a struct that holds:
// -- Error is just an os.Error
// -- Val is the raw value
// -- Name is the name value from the p-asserted-id hdr
// -- URI is the parsed uri from the p-asserted-id hdr
// -- Params is a slice of the *Params from the p-asserted-id hdr
type PAssertedId struct {
Error error
Val string
Name string
URI *URI
Params []*Param
nameInt int
}
// addParam adds the *Param to the Params field
func (p *PAssertedId) addParam(s string) {
np := getParam(s)
if p.Params == nil {
p.Params = []*Param{np}
return
}
p.Params = append(p.Params, np)
}
// parse actually parsed the .Val field of the PAssertedId struct
func (p *PAssertedId) parse() {
for state := parsePAssertedId; state != nil; {
state = state(p)
}
}
func parsePAssertedId(p *PAssertedId) pAssertedIdStateFn {
if p.Error != nil {
return nil
}
p.Name, p.nameInt = getName(p.Val)
return parsePAssertedIdGetUri
}
func parsePAssertedIdGetUri(p *PAssertedId) pAssertedIdStateFn {
left := 0
right := 0
for i := range p.Val {
if p.Val[i] == '<' && left == 0 {
left = i
}
if p.Val[i] == '>' && right == 0 {
right = i
}
}
if left < right {
p.URI = ParseURI(p.Val[left+1 : right])
if p.URI.Error != nil {
p.Error = fmt.Errorf("parsePAssertedIdGetUri err: received err getting uri: %v", p.URI.Error)
return nil
}
return parsePAssertedIdGetParams
}
p.Error = errors.New("parsePAssertedIdGetUri err: could not locate bracks in uri")
return nil
}
func parsePAssertedIdGetParams(p *PAssertedId) pAssertedIdStateFn {
var pos []int
right := 0
for i := range p.Val {
if p.Val[i] == '>' && right == 0 {
right = i
}
}
if len(p.Val) > right+1 {
pos = make([]int, 0)
for i := range p.Val[right+1:] {
if p.Val[right+1:][i] == ';' {
pos = append(pos, i)
}
}
}
if pos == nil {
return nil
}
for i := range pos {
if len(pos)-1 == i {
if len(p.Val[right+1:])-1 > pos[i]+1 {
p.addParam(p.Val[right+1:][pos[i]+1:])
}
}
if len(pos)-1 > i {
p.addParam(p.Val[right+1:][pos[i]+1 : pos[i+1]])
}
}
return nil
} | passertedid.go | 0.507812 | 0.42185 | passertedid.go | starcoder |
package ensure
import (
"fmt"
"reflect"
"strings"
"testing"
)
// Testable respresents a value to test.
type Testable struct {
Test *testing.T
Error error
String string
Value interface{}
ReturnValue *interface{}
}
// shortcut
var s = fmt.Sprintf
// Fatal stops with fatal error.
func (t *Testable) Fatal(msg string, what []string) {
t.Test.Fatal(s("%s: %v\n--> %s\n", t.Test.Name(), what, msg))
}
// Succeeds expects the Testable (error) to pass.
func (t *Testable) Succeeds(what ...string) *Testable {
if t.Error != nil {
t.Fatal(s("fails with error '%v'", t.Error), what)
}
return t
}
// Fails expects the Testable (error) to fail.
func (t *Testable) Fails(what ...string) *Testable {
if t.Error == nil {
t.Fatal("should have failed", what)
}
return t
}
// Return returns the stored value.
func (t *Testable) Return(what ...string) interface{} {
if t.ReturnValue == nil {
t.Fatal("no return value stored", what)
}
return *t.ReturnValue
}
// Contains expects the Testable to be a string that contains the substring.
func (t *Testable) Contains(sub string, what ...string) *Testable {
if !strings.Contains(t.String, sub) {
t.Fatal(s("'%s' should contain '%s'", t.String, sub), what)
}
return t
}
// Is expects the Testable to be the same.
func (t *Testable) Is(v interface{}, what ...string) *Testable {
switch v.(type) {
case error:
t.Fatal("do not use 'Is' for errors", what)
case string:
str := v.(string)
if str != t.String {
t.Fatal(s("should have similar values (is: '%v', expected: '%v')", t.String, str), what)
}
default:
if !reflect.DeepEqual(v, t.Value) {
t.Fatal(s("should have similar values (is: '%v', expected: '%v')", t.Value, v), what)
}
}
return t
}
// IsNot expect the Testable to be different.
func (t *Testable) IsNot(v interface{}, what ...string) *Testable {
switch v.(type) {
case error:
t.Fatal("do not use 'Is' for errors", what)
case string:
str := v.(string)
if str == t.String {
t.Fatal(s("should have different values (value: '%v')", str), what)
}
default:
if reflect.DeepEqual(v, t.Value) {
t.Fatal(s("should have different values (value: '%v')", v), what)
}
}
return t
}
// IsNotEmpty expect the Testable to be a non-empty string.
func (t *Testable) IsNotEmpty(what ...string) *Testable {
if len(t.String) == 0 {
t.Fatal("string should not be empty", what)
}
return t
}
// makeEnsure constructs an ensure Testable.
func makeEnsure(v interface{}, t *testing.T) *Testable {
switch v.(type) {
case error:
err := v.(error)
return &Testable{Test: t, Error: err}
case string:
s := v.(string)
return &Testable{Test: t, String: s}
default:
return &Testable{Test: t, Value: v}
}
}
// Ensure creates a Testable result (without testing integration).
func Ensure(t *testing.T, v interface{}) *Testable {
return makeEnsure(v, t)
}
// T represents an Ensure for a test.
type T struct {
Ensure func(v interface{}) *Testable
Ensure2 func(res, v interface{}) *Testable
}
// Make returns the Ensure function integrated with testing.
func Make(t *testing.T) T {
return T{
Ensure: func(v interface{}) *Testable { return makeEnsure(v, t) },
Ensure2: func(res, v interface{}) *Testable {
e := makeEnsure(v, t)
e.ReturnValue = &res
return e
},
}
} | ensure.go | 0.694821 | 0.49646 | ensure.go | starcoder |
package scatter
import (
"math"
"strconv"
"github.com/knightjdr/prohits-viz-analysis/pkg/float"
customMath "github.com/knightjdr/prohits-viz-analysis/pkg/math"
"github.com/knightjdr/prohits-viz-analysis/pkg/types"
)
func formatData(scatter *Scatter, axisLength float64) {
axisBoundaries := defineAxisBoundaries(scatter.Plot, scatter.LogBase)
scatter.Ticks = defineTicks(axisBoundaries, scatter.LogBase)
scatter.Axes = defineAxes(scatter.Ticks)
scaleData(scatter, axisLength)
}
func defineAxisBoundaries(plot []types.ScatterPoint, logBase string) boundaries {
minMax := getAxisMinMax(plot)
if logBase != "none" {
return defineLogTickLimits(logBase, minMax)
}
return defineLinearTickLimits(minMax)
}
func getAxisMinMax(plot []types.ScatterPoint) boundaries {
minmax := boundaries{
x: boundary{
max: -math.MaxFloat64,
min: math.MaxFloat64,
},
y: boundary{
max: -math.MaxFloat64,
min: math.MaxFloat64,
},
}
for _, point := range plot {
if point.X > minmax.x.max {
minmax.x.max = point.X
}
if point.X < minmax.x.min {
minmax.x.min = point.X
}
if point.Y > minmax.y.max {
minmax.y.max = point.Y
}
if point.Y < minmax.y.min {
minmax.y.min = point.Y
}
}
if minmax.x.max < 0 {
minmax.x.max = 0
}
if minmax.y.max < 0 {
minmax.y.max = 0
}
if minmax.x.min > 0 {
minmax.x.min = 0
}
if minmax.y.min > 0 {
minmax.y.min = 0
}
return minmax
}
func defineLinearTickLimits(minMax boundaries) boundaries {
return boundaries{
x: defineLinearTickLimitsForAxis(minMax.x),
y: defineLinearTickLimitsForAxis(minMax.y),
}
}
func defineLinearTickLimitsForAxis(axis boundary) boundary {
axisMax := axis.max
axisMin := axis.min
if axisMax > 0 && axisMin < 0 {
powerMax := math.Floor(math.Log10(math.Abs(axisMax)))
powerMin := math.Floor(math.Log10(math.Abs(axisMin)))
if powerMax < powerMin {
axisMax = math.Pow(10, powerMin)
}
if powerMin < powerMax {
axisMin = -math.Pow(10, powerMax)
}
}
return boundary{
max: defineLinearTickLimit(axisMax, math.Signbit(axis.max)),
min: defineLinearTickLimit(axisMin, math.Signbit(axis.min)),
}
}
func defineLinearTickLimit(boundary float64, isNegative bool) float64 {
if boundary == 0 {
return 0
}
exp := math.Pow(10, math.Floor(math.Log10(math.Abs(boundary))))
if isNegative {
return math.Floor(boundary/exp) * exp
}
return math.Ceil(boundary/exp) * exp
}
func defineLogTickLimits(base string, minMax boundaries) boundaries {
return boundaries{
x: defineLogTickLimitsForAxis(base, minMax.x),
y: defineLogTickLimitsForAxis(base, minMax.y),
}
}
func defineLogTickLimitsForAxis(base string, axis boundary) boundary {
limits := boundary{}
if axis.max != 0 {
limits.max = defineUpperLogTickLimit(base, math.Abs(axis.max))
} else {
limits.max = -1 * defineLowerLogTickLimit(base, math.Abs(axis.max))
}
if axis.min != 0 {
limits.min = -1 * defineUpperLogTickLimit(base, math.Abs(axis.min))
} else {
limits.min = defineLowerLogTickLimit(base, math.Abs(axis.min))
}
return limits
}
func defineUpperLogTickLimit(logBase string, value float64) float64 {
if logBase == "2" {
return math.Pow(2, math.Ceil(math.Log2(value)))
}
return math.Pow(10, math.Ceil(math.Log10(value)))
}
func defineLowerLogTickLimit(logBase string, value float64) float64 {
if logBase == "2" {
if value < 1 {
return 0.5
}
return 1
}
if value < 1 {
return 0.1
}
return 1
}
func defineTicks(axisBoundaries boundaries, logBase string) Ticks {
if logBase != "none" {
return calculateLogTicks(logBase, axisBoundaries)
}
return calculateLinearTicks(axisBoundaries)
}
func calculateLinearTicks(axisBoundaries boundaries) Ticks {
return Ticks{
X: calculateLinearTicksForAxis(axisBoundaries.x),
Y: calculateLinearTicksForAxis(axisBoundaries.y),
}
}
func calculateLinearTicksForAxis(axis boundary) []float64 {
maxAbsoluteValue := math.Max(math.Abs(axis.max), math.Abs(axis.min))
power := math.Floor(math.Log10(maxAbsoluteValue - 0.5))
step := math.Pow(10, power)
ticks := make([]float64, 0)
for i := axis.min; i <= axis.max; i += step {
ticks = append(ticks, i)
}
if ticks[len(ticks)-1] != axis.max {
ticks = append(ticks, axis.max)
}
return ticks
}
func calculateLogTicks(logBase string, axisBoundaries boundaries) Ticks {
return Ticks{
X: calculateLogTicksForAxis(logBase, axisBoundaries.x),
Y: calculateLogTicksForAxis(logBase, axisBoundaries.y),
}
}
func calculateLogTicksForAxis(logBase string, axis boundary) []float64 {
ticks := make([]float64, 0)
logBaseAsFloat, _ := strconv.ParseFloat(logBase, 64)
stepMultiplier := logBaseAsFloat
if axis.min <= 0 {
stepMultiplier = 1 / logBaseAsFloat
}
if axis.min < 0 && axis.max > 0 {
end := -1 / logBaseAsFloat
for i := axis.min; i < axis.max; i *= stepMultiplier {
ticks = append(ticks, i)
lastTickIndex := len(ticks) - 1
if ticks[lastTickIndex] >= end && ticks[lastTickIndex] < 0 {
i = -ticks[lastTickIndex] / logBaseAsFloat
stepMultiplier = logBaseAsFloat
}
}
} else {
for i := axis.min; i < axis.max; i *= stepMultiplier {
ticks = append(ticks, i)
}
}
ticks = append(ticks, axis.max)
return ticks
}
func defineAxes(ticks Ticks) Axes {
defineOrigin := func(axis []float64) float64 {
lastIndex := len(axis) - 1
if axis[0] == 0 ||
axis[lastIndex] == 0 ||
(axis[0] < 0 && axis[lastIndex] > 0) {
return 0
}
iMin := 0
for i, tick := range axis {
if math.Abs(tick) < math.Abs((axis[iMin])) {
iMin = i
}
}
return axis[iMin]
}
xOrigin := defineOrigin(ticks.X)
yOrigin := defineOrigin(ticks.Y)
return Axes{
X: Line{
X1: ticks.X[0],
X2: ticks.X[len(ticks.X)-1],
Y1: yOrigin,
Y2: yOrigin,
},
Y: Line{
X1: xOrigin,
X2: xOrigin,
Y1: ticks.Y[0],
Y2: ticks.Y[len(ticks.Y)-1],
},
}
}
func scaleData(scatter *Scatter, axisLength float64) {
scaleXValue := getScaler(scatter.LogBase, axisLength, scatter.Ticks.X)
scaleYValue := getScaler(scatter.LogBase, axisLength, scatter.Ticks.Y)
for i := range scatter.Plot {
scatter.Plot[i].X = scaleXValue(math.Max(scatter.Plot[i].X, scatter.Ticks.X[0]))
scatter.Plot[i].Y = scaleYValue(math.Max(scatter.Plot[i].Y, scatter.Ticks.Y[0]))
}
scatter.Ticks.XLabel = make([]string, len(scatter.Ticks.X))
for i := range scatter.Ticks.X {
scatter.Ticks.XLabel[i] = float.RemoveTrailingZeros(scatter.Ticks.X[i])
scatter.Ticks.X[i] = scaleXValue(scatter.Ticks.X[i])
}
scatter.Ticks.YLabel = make([]string, len(scatter.Ticks.Y))
for i := range scatter.Ticks.Y {
scatter.Ticks.YLabel[i] = float.RemoveTrailingZeros(scatter.Ticks.Y[i])
scatter.Ticks.Y[i] = scaleYValue(scatter.Ticks.Y[i])
}
scatter.Axes.X.X1 = scaleXValue(scatter.Axes.X.X1)
scatter.Axes.X.X2 = scaleXValue(scatter.Axes.X.X2)
scatter.Axes.X.Y1 = customMath.Round(axisLength-scaleYValue(scatter.Axes.X.Y1), 0.01)
scatter.Axes.X.Y2 = customMath.Round(axisLength-scaleYValue(scatter.Axes.X.Y2), 0.01)
scatter.Axes.Y.X1 = scaleXValue(scatter.Axes.Y.X1)
scatter.Axes.Y.X2 = scaleXValue(scatter.Axes.Y.X2)
scatter.Axes.Y.Y1 = customMath.Round(axisLength-scaleYValue(scatter.Axes.Y.Y1), 0.01)
scatter.Axes.Y.Y2 = customMath.Round(axisLength-scaleYValue(scatter.Axes.Y.Y2), 0.01)
}
func getScaler(logBase string, axisLength float64, ticks []float64) func(float64) float64 {
first := ticks[0]
last := ticks[len(ticks)-1]
if logBase != "none" {
logFunc := math.Log10
if logBase == "2" {
logFunc = math.Log2
}
segments := len(ticks) - 1
numNegativeTicks := 0
for _, tick := range ticks {
if tick < 0 {
numNegativeTicks += 1
}
}
numPositiveTicks := len(ticks) - numNegativeTicks
negAxisLength := float64(0)
if numNegativeTicks > 0 {
negAxisLength = axisLength * ((float64(numNegativeTicks) - 1) / float64(segments))
}
posAxisLength := float64(0)
if numPositiveTicks > 0 {
posAxisLength = axisLength * ((float64(numPositiveTicks) - 1) / float64(segments))
}
negativeExtremes := map[string]float64{
"max": 0,
"min": math.Inf(1),
}
positiveExtremes := map[string]float64{
"max": 0,
"min": math.Inf(1),
}
for _, tick := range ticks {
absoluteTick := math.Abs(tick)
if tick < 0 && absoluteTick > negativeExtremes["max"] {
negativeExtremes["max"] = absoluteTick
}
if tick < 0 && absoluteTick < negativeExtremes["min"] {
negativeExtremes["min"] = absoluteTick
}
if tick > positiveExtremes["max"] {
positiveExtremes["max"] = tick
}
if tick > 0 && tick < positiveExtremes["min"] {
positiveExtremes["min"] = tick
}
}
voidSpace := float64(0)
if numPositiveTicks > 0 && numNegativeTicks > 0 {
voidSpace = axisLength / (float64(len(ticks)) - 1)
}
kNeg := float64(0)
if negAxisLength > 0 {
kNeg = negAxisLength / (logFunc(negativeExtremes["max"]) - logFunc(negativeExtremes["min"]))
}
kPos := float64(0)
if posAxisLength > 0 {
kPos = posAxisLength / (logFunc(positiveExtremes["max"]) - logFunc(positiveExtremes["min"]))
}
cNeg := -1 * kNeg * logFunc(negativeExtremes["min"])
cPos := -1 * kPos * logFunc(positiveExtremes["min"])
scaleLinear := func(point float64) float64 {
return customMath.Round(
negAxisLength+(((point+negativeExtremes["min"])/(positiveExtremes["min"]+negativeExtremes["min"]))*voidSpace),
0.01,
)
}
return func(point float64) float64 {
if point >= 0 {
if point < positiveExtremes["min"] {
return scaleLinear(point)
}
return customMath.Round(kPos*logFunc(point)+cPos+negAxisLength+voidSpace, 0.01)
}
if point > -negativeExtremes["min"] {
return scaleLinear(point)
}
return customMath.Round(negAxisLength-(kNeg*logFunc(math.Abs(point))+cNeg), 0.01)
}
}
return func(point float64) float64 {
return customMath.Round(axisLength*(point-first)/(last-first), 0.01)
}
} | pkg/svg/scatter/data.go | 0.732496 | 0.410225 | data.go | starcoder |
package main
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/sensorable/lblconv"
)
var (
convertFrom format // The source format.
convertTo format // The target format.
imageDirPath string // The input directory with the labeled images.
imageOutDirPath string // The output directory for images after processing.
labelFileOrDirPath string // The input label directory or file, depending on the format.
labelOutFileOrDirPaths []string // The output label dir or file path(s), depending on the format.
labelOutSplits []int // The cumulative split percentages for the output datasets.
tfRecordLabelMapFilePath string // The TFRecord label map file.
numShardFiles int // The number of shard files to create.
labelMappings string // A comma-separated string of label mappings.
bboxScaleWidth float64 // A scale factor for the bounding box width.
bboxScaleHeight float64 // A scale factor for the bounding box height.
bboxAspectRatio float64 // The desired output aspect ratio for bounding boxes.
filterLabels string // A comma-separated string of labels to keep (empty keeps all).
filterAttributes string // A comma-separated string of attributes to keep (empty keeps all).
filterRequiredAttrs string // A comma-sep. str of required attrs (present and not zero value).
filterConfidence float64 // The min. confidence value.
filterRequireLabel bool // Filter out files with no labels (after other filters).
filterMinBboxWidth float64 // The minimum bounding box width.
filterMinBboxHeight float64 // The minimum bounding box height.
filterMinAspectRatio float64 // The minimum aspect ratio of bboxes (w/h).
filterMaxAspectRatio float64 // The maximum aspect ratio of bboxes (w/h).
imageOutEncoding string // The file type for image outputs.
imageResizeLonger int // The target length for the longer side of the image.
imageResizeShorter int // The target length for the shorter side of the image.
imageDownsamplingFilter string // The algorithm to use when downsampling.
imageUpsamplingFilter string // The algorithm to use when upsampling.
imageJPEGQuality int // The JPEG quality for JPEG outputs.
imageCropObjects bool // Crop individual objects from images and output these instead.
)
type format int
// The known label formats.
const (
Unknown format = iota // If an unknown format is specified.
AWSDetectLabels
AWSDetectText
Kitti
Sloth
TFRecord
VIA // VGG Image Annotator
)
func formatFrom(s string) format {
switch s {
case "aws-dl":
return AWSDetectLabels
case "aws-dt":
return AWSDetectText
case "kitti":
return Kitti
case "sloth":
return Sloth
case "tfrecord":
return TFRecord
case "via":
return VIA
}
return Unknown
}
func init() {
flag.Usage = func() {
_, _ = fmt.Fprintf(os.Stderr, "Usage: %s -from <format> -to <format> [<arg> ...]\n",
filepath.Base(os.Args[0]))
_, _ = fmt.Fprintln(os.Stderr)
_, _ = fmt.Fprintln(os.Stderr, "The supported input (-from) and output (-to) formats and their"+
" required arguments:")
_, _ = fmt.Fprintln(os.Stderr, " AWS Rekognition detect-labels:")
_, _ = fmt.Fprintln(os.Stderr, " -from aws-dl -labels <dir> -images <dir>")
_, _ = fmt.Fprintln(os.Stderr, " AWS Rekognition detect-text:")
_, _ = fmt.Fprintln(os.Stderr, " -from aws-dt -labels <dir> -images <dir>")
_, _ = fmt.Fprintln(os.Stderr, " KITTI 2D object detection:")
_, _ = fmt.Fprintln(os.Stderr, " -from kitti -labels <dir> -images <dir>")
_, _ = fmt.Fprintln(os.Stderr, " -to kitti -labels-out <dir>")
_, _ = fmt.Fprintln(os.Stderr, " Sloth:")
_, _ = fmt.Fprintln(os.Stderr, " -from sloth -labels <file>")
_, _ = fmt.Fprintln(os.Stderr, " -to sloth -labels-out <file>")
_, _ = fmt.Fprintln(os.Stderr, " TensorFlow TFRecord:")
_, _ = fmt.Fprintln(os.Stderr, " -to tfrecord -labels-out <file>"+
" -tfrecord-label-map-file <file> [-num-shards <int>]")
_, _ = fmt.Fprintln(os.Stderr, " VGG Image Annotator (VIA):")
_, _ = fmt.Fprintln(os.Stderr, " -from via -labels <file>")
_, _ = fmt.Fprintln(os.Stderr, " -to via -labels-out <file>")
_, _ = fmt.Fprintln(os.Stderr)
_, _ = fmt.Fprintln(os.Stderr, "Arguments:")
flag.PrintDefaults()
}
printUsageAndExit := func(msg ...interface{}) {
log.Print(msg...)
flag.Usage()
os.Exit(1)
}
// Format arguments.
from := flag.String("from", "", "The source `format`")
to := flag.String("to", "", "The target `format`")
// Path arguments.
flag.StringVar(&imageDirPath, "images", imageDirPath,
"The `path` to the image input directory")
flag.StringVar(&imageOutDirPath, "images-out", imageOutDirPath,
"The `path` to the image output directory (only required when image processing"+
" functionality is used")
flag.StringVar(&labelFileOrDirPath, "labels", labelFileOrDirPath,
"The `path` to the label input file (sloth, via) or directory (kitti, aws-dl, aws-dt)")
outPaths := flag.String("labels-out", "",
"The comma-separated paths (`path[,...]`) to the label output files (sloth, tfrecord, via)"+
" or directories (kitti); must be one path per value in flag -split")
outSplits := flag.String("split", "100",
"The comma-separated output split percentages (`percent[,...]`) to divide labels into"+
" (only sloth, tfrecord, and via output formats); must add up to 100%")
flag.StringVar(&tfRecordLabelMapFilePath, "tfrecord-label-map-file", tfRecordLabelMapFilePath,
"The TFRecord label map file `path`")
flag.IntVar(&numShardFiles, "num-shards", 1,
"The number of shard files to create (tfrecord only)")
// Conversion and transformation arguments.
flag.StringVar(&labelMappings, "map-labels", labelMappings,
"Comma-separated list of old=new label (sub-)string replacements")
flag.Float64Var(&bboxScaleWidth, "bbox-scale-x", 1,
"A scale factor for the width of all bounding boxes")
flag.Float64Var(&bboxScaleHeight, "bbox-scale-y", 1,
"A scale factor for the height of all bounding boxes")
flag.Float64Var(&bboxAspectRatio, "bbox-aspect-ratio", 0,
"The output aspect `ratio` for object bounding boxes; bounding boxes are grown (not shrunk)"+
" to match this ratio when it is > 0")
// Filter arguments.
flag.StringVar(&filterLabels, "filter-labels", filterLabels,
"Comma-separated list of labels to keep (after map-labels; empty string keeps all)")
flag.StringVar(&filterAttributes, "filter-attributes", filterAttributes,
"Comma-separated list of attributes to keep (if the target format supports attributes;"+
" empty string keeps all)")
flag.StringVar(&filterRequiredAttrs, "filter-required-attrs", filterRequiredAttrs,
"Comma-separated list of required attributes whose values must not be the Go zero value for"+
" their type to keep the annotation")
flag.Float64Var(&filterConfidence, "min-confidence", filterConfidence,
"The minimum confidence value to keep a label; range [0.0, 1.0)")
flag.BoolVar(&filterRequireLabel, "require-label", filterRequireLabel,
"Require at least one label (after filters) to keep the file")
flag.Float64Var(&filterMinBboxWidth, "min-bbox-width", filterMinBboxWidth,
"The min. required width in `pixels` for object bounding boxes (before resizing)")
flag.Float64Var(&filterMinBboxHeight, "min-bbox-height", filterMinBboxHeight,
"The min. required height in `pixels` for object bounding boxes (before resizing)")
flag.Float64Var(&filterMinAspectRatio, "min-bbox-aspect-ratio", filterMinAspectRatio,
"The min. required aspect `ratio` (width/height) for object bounding boxes (before resizing;"+
" zero disables the filter)")
flag.Float64Var(&filterMaxAspectRatio, "max-bbox-aspect-ratio", filterMaxAspectRatio,
"The max. required aspect `ratio` (width/height) for object bounding boxes (before resizing;"+
" zero disables the filter)")
// Image processing arguments.
flag.StringVar(&imageOutEncoding, "image-enc", "jpg",
"The `encoding` for output images {jpg, png}")
flag.IntVar(&imageResizeLonger, "resize-longer", imageResizeLonger,
"The target `length` for the longer side of the image (zero to keep aspect ratio)")
flag.IntVar(&imageResizeShorter, "resize-shorter", imageResizeShorter,
"The target `length` for the shorter side of the image (zero to keep aspect ratio)")
flag.StringVar(&imageDownsamplingFilter, "downsample-filter", "box",
"The filter to use when downsampling an image {nearest, box, linear, gaussian, lanczos}")
flag.StringVar(&imageUpsamplingFilter, "upsample-filter", "linear",
"The filter to use when upsampling an image {nearest, box, linear, gaussian, lanczos}")
flag.IntVar(&imageJPEGQuality, "jpeg-quality", 90,
"The quality to use when encoding JPEGs [1, 100]")
flag.BoolVar(&imageCropObjects, "crop-objects", imageCropObjects,
"Crop and output objects from images (image processing flags apply to the individual crops)")
// Parse and validate flags.
flag.Parse()
convertFrom = formatFrom(*from)
convertTo = formatFrom(*to)
// Validate the conversion direction.
validInFormat := false
for _, f := range []format{AWSDetectLabels, AWSDetectText, Kitti, Sloth, VIA} {
if f == convertFrom {
validInFormat = true
break
}
}
validOutFormat := false
for _, f := range []format{Kitti, Sloth, TFRecord, VIA} {
if f == convertTo {
validOutFormat = true
break
}
}
if !validInFormat {
printUsageAndExit("Unsupported input format")
} else if !validOutFormat {
printUsageAndExit("Unsupported output format")
}
// Validate input arguments.
if labelFileOrDirPath == "" ||
(convertFrom == Kitti && imageDirPath == "") ||
(convertFrom == AWSDetectLabels && imageDirPath == "") ||
(convertFrom == AWSDetectText && imageDirPath == "") {
printUsageAndExit("Missing label or image input path argument")
}
// Validate output split arguments.
labelOutFileOrDirPaths = strings.Split(*outPaths, ",")
splits := strings.Split(*outSplits, ",")
if len(splits) != len(labelOutFileOrDirPaths) {
printUsageAndExit("The number of output datasets defined by -split and the number of" +
" paths in -labels-out must match")
}
if convertTo == Kitti && len(splits) > 1 {
printUsageAndExit("Argument -split is not supported with output format \"kitti\"")
}
// Parse splits as cumulative int percentages.
var splitSum int
for _, v := range splits {
if i, err := strconv.Atoi(v); err != nil || i < 0 || i > 100 {
printUsageAndExit("Invalid value in -split: ", v)
} else {
splitSum += i
labelOutSplits = append(labelOutSplits, splitSum)
}
}
if splitSum != 100 {
printUsageAndExit("The values in -split must add up to 100%")
}
// Validate other output arguments.
if convertTo == TFRecord && tfRecordLabelMapFilePath == "" {
printUsageAndExit("Missing label output path argument")
}
// Transformation arguments.
if bboxScaleWidth <= 0 || bboxScaleHeight <= 0 {
printUsageAndExit("Invalid bounding box scale factor")
} else if bboxAspectRatio < 0 {
printUsageAndExit("Invalid value for -bbox-aspect-ratio")
}
// Image processing arguments.
if (imageResizeLonger > 0 || imageResizeShorter > 0 || imageCropObjects) &&
imageOutDirPath == "" {
printUsageAndExit("Missing image output directory path")
}
if imageJPEGQuality < 1 || imageJPEGQuality > 100 {
imageJPEGQuality = 92
log.Print("Invalid JPEG quality, setting it to ", imageJPEGQuality)
}
// Validate filter arguments.
if filterConfidence < 0 || filterConfidence >= 1 {
printUsageAndExit("Invalid -min-confidence, must be in [0.0, 1.0): ", filterConfidence)
}
// Clean path arguments.
if imageDirPath != "" {
imageDirPath = filepath.Clean(imageDirPath)
}
if imageOutDirPath != "" {
imageOutDirPath = filepath.Clean(imageOutDirPath)
}
if imageDirPath != "" && imageDirPath == imageOutDirPath {
printUsageAndExit("The image input and output paths cannot be identical")
}
labelFileOrDirPath = filepath.Clean(labelFileOrDirPath)
for i, v := range labelOutFileOrDirPaths {
labelOutFileOrDirPaths[i] = filepath.Clean(v)
if labelFileOrDirPath == labelOutFileOrDirPaths[i] {
printUsageAndExit("The label input and output paths cannot be identical")
}
}
tfRecordLabelMapFilePath = filepath.Clean(tfRecordLabelMapFilePath)
}
func main() {
// Parse input.
var data []lblconv.AnnotatedFile
var err error
switch convertFrom {
case AWSDetectLabels:
data, err = lblconv.FromAWSDetectLabels(labelFileOrDirPath, imageDirPath)
case AWSDetectText:
data, err = lblconv.FromAWSDetectText(labelFileOrDirPath, imageDirPath)
case Kitti:
data, err = lblconv.FromKitti(labelFileOrDirPath, imageDirPath)
case Sloth:
data, err = lblconv.FromSloth(labelFileOrDirPath)
case VIA:
data, err = lblconv.FromVIA(labelFileOrDirPath)
default:
err = fmt.Errorf("unsupported input format")
}
if err != nil {
log.Fatal("Failed to parse the input: ", err)
}
af := lblconv.AnnotatedFiles(data)
// Map labels.
if len(labelMappings) > 0 {
if err := af.MapLabels(strings.Split(labelMappings, ",")); err != nil {
log.Fatal("Failed to map labels: ", err)
}
}
// Perform transformations.
if bboxScaleWidth != 1 || bboxScaleHeight != 1 || bboxAspectRatio > 0 {
af.TransformBboxes(bboxScaleWidth, bboxScaleHeight, bboxAspectRatio)
}
// Apply filters.
var labelNames, attrNames, requiredAttrNames []string
if filterLabels != "" {
labelNames = strings.Split(filterLabels, ",")
}
if filterAttributes != "" {
attrNames = strings.Split(filterAttributes, ",")
}
if filterRequiredAttrs != "" {
requiredAttrNames = strings.Split(filterRequiredAttrs, ",")
}
af.Filter(labelNames, attrNames, requiredAttrNames, filterConfidence, filterRequireLabel,
filterMinBboxWidth, filterMinBboxHeight, filterMinAspectRatio, filterMaxAspectRatio)
// Process images.
err = af.ProcessImages(imageOutDirPath, imageResizeLonger, imageResizeShorter,
imageDownsamplingFilter, imageUpsamplingFilter, imageOutEncoding, imageJPEGQuality,
imageCropObjects)
if err != nil {
log.Fatal("Image processing failed: ", err)
}
// Split data into output datasets.
var datasets []lblconv.AnnotatedFiles
if len(labelOutSplits) == 1 {
datasets = []lblconv.AnnotatedFiles{af}
} else {
if datasets, err = af.Split(labelOutSplits); err != nil {
log.Fatal("Failed to split the dataset: ", err)
}
}
// Write output datasets.
for i, data := range datasets {
outPath := labelOutFileOrDirPaths[i]
switch convertTo {
case Kitti:
kittiData := lblconv.ToKitti(data)
err = lblconv.WriteKitti(outPath, kittiData)
case Sloth:
slothData := lblconv.ToSloth(data)
err = lblconv.WriteSloth(outPath, slothData)
case TFRecord:
err = lblconv.WriteTFRecord(outPath, tfRecordLabelMapFilePath, data, numShardFiles)
case VIA:
viaData := lblconv.ToVIA(data)
err = lblconv.WriteVIA(outPath, viaData)
default:
err = fmt.Errorf("unsupported output format")
}
if err != nil {
log.Fatal("Conversion failed: ", err)
}
log.Printf("Successfully wrote labels for %d files to %s", len(data), outPath)
}
log.Print("Total number of labelled files: ", len(af))
} | cmd/lblconv/main.go | 0.577734 | 0.410225 | main.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_decision_tree
#include <capi/decision_tree.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type DecisionTreeOptionalParam struct {
InputModel *decisionTreeModel
Labels *mat.Dense
MaximumDepth int
MinimumGainSplit float64
MinimumLeafSize int
PrintTrainingAccuracy bool
PrintTrainingError bool
Test *matrixWithInfo
TestLabels *mat.Dense
Training *matrixWithInfo
Verbose bool
Weights *mat.Dense
}
func DecisionTreeOptions() *DecisionTreeOptionalParam {
return &DecisionTreeOptionalParam{
InputModel: nil,
Labels: nil,
MaximumDepth: 0,
MinimumGainSplit: 1e-07,
MinimumLeafSize: 20,
PrintTrainingAccuracy: false,
PrintTrainingError: false,
Test: nil,
TestLabels: nil,
Training: nil,
Verbose: false,
Weights: nil,
}
}
/*
Train and evaluate using a decision tree. Given a dataset containing numeric
or categorical features, and associated labels for each point in the dataset,
this program can train a decision tree on that data.
The training set and associated labels are specified with the "Training" and
"Labels" parameters, respectively. The labels should be in the range [0,
num_classes - 1]. Optionally, if "Labels" is not specified, the labels are
assumed to be the last dimension of the training dataset.
When a model is trained, the "OutputModel" output parameter may be used to
save the trained model. A model may be loaded for predictions with the
"InputModel" parameter. The "InputModel" parameter may not be specified when
the "Training" parameter is specified. The "MinimumLeafSize" parameter
specifies the minimum number of training points that must fall into each leaf
for it to be split. The "MinimumGainSplit" parameter specifies the minimum
gain that is needed for the node to split. The "MaximumDepth" parameter
specifies the maximum depth of the tree. If "PrintTrainingError" is
specified, the training error will be printed.
Test data may be specified with the "Test" parameter, and if performance
numbers are desired for that test set, labels may be specified with the
"TestLabels" parameter. Predictions for each test point may be saved via the
"Predictions" output parameter. Class probabilities for each prediction may
be saved with the "Probabilities" output parameter.
For example, to train a decision tree with a minimum leaf size of 20 on the
dataset contained in data with labels labels, saving the output model to tree
and printing the training error, one could call
// Initialize optional parameters for DecisionTree().
param := mlpack.DecisionTreeOptions()
param.Training = data
param.Labels = labels
param.MinimumLeafSize = 20
param.MinimumGainSplit = 0.001
param.PrintTrainingAccuracy = true
tree, _, _ := mlpack.DecisionTree(param)
Then, to use that model to classify points in test_set and print the test
error given the labels test_labels using that model, while saving the
predictions for each point to predictions, one could call
// Initialize optional parameters for DecisionTree().
param := mlpack.DecisionTreeOptions()
param.InputModel = &tree
param.Test = test_set
param.TestLabels = test_labels
_, predictions, _ := mlpack.DecisionTree(param)
Input parameters:
- InputModel (decisionTreeModel): Pre-trained decision tree, to be used
with test points.
- Labels (mat.Dense): Training labels.
- MaximumDepth (int): Maximum depth of the tree (0 means no limit).
Default value 0.
- MinimumGainSplit (float64): Minimum gain for node splitting. Default
value 1e-07.
- MinimumLeafSize (int): Minimum number of points in a leaf. Default
value 20.
- PrintTrainingAccuracy (bool): Print the training accuracy.
- PrintTrainingError (bool): Print the training error (deprecated; will
be removed in mlpack 4.0.0).
- Test (matrixWithInfo): Testing dataset (may be categorical).
- TestLabels (mat.Dense): Test point labels, if accuracy calculation is
desired.
- Training (matrixWithInfo): Training dataset (may be categorical).
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
- Weights (mat.Dense): The weight of labels
Output parameters:
- outputModel (decisionTreeModel): Output for trained decision tree.
- predictions (mat.Dense): Class predictions for each test point.
- probabilities (mat.Dense): Class probabilities for each test point.
*/
func DecisionTree(param *DecisionTreeOptionalParam) (decisionTreeModel, *mat.Dense, *mat.Dense) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("Decision tree")
// Detect if the parameter was passed; set if so.
if param.InputModel != nil {
setDecisionTreeModel("input_model", param.InputModel)
setPassed("input_model")
}
// Detect if the parameter was passed; set if so.
if param.Labels != nil {
gonumToArmaUrow("labels", param.Labels)
setPassed("labels")
}
// Detect if the parameter was passed; set if so.
if param.MaximumDepth != 0 {
setParamInt("maximum_depth", param.MaximumDepth)
setPassed("maximum_depth")
}
// Detect if the parameter was passed; set if so.
if param.MinimumGainSplit != 1e-07 {
setParamDouble("minimum_gain_split", param.MinimumGainSplit)
setPassed("minimum_gain_split")
}
// Detect if the parameter was passed; set if so.
if param.MinimumLeafSize != 20 {
setParamInt("minimum_leaf_size", param.MinimumLeafSize)
setPassed("minimum_leaf_size")
}
// Detect if the parameter was passed; set if so.
if param.PrintTrainingAccuracy != false {
setParamBool("print_training_accuracy", param.PrintTrainingAccuracy)
setPassed("print_training_accuracy")
}
// Detect if the parameter was passed; set if so.
if param.PrintTrainingError != false {
setParamBool("print_training_error", param.PrintTrainingError)
setPassed("print_training_error")
}
// Detect if the parameter was passed; set if so.
if param.Test != nil {
gonumToArmaMatWithInfo("test", param.Test)
setPassed("test")
}
// Detect if the parameter was passed; set if so.
if param.TestLabels != nil {
gonumToArmaUrow("test_labels", param.TestLabels)
setPassed("test_labels")
}
// Detect if the parameter was passed; set if so.
if param.Training != nil {
gonumToArmaMatWithInfo("training", param.Training)
setPassed("training")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Detect if the parameter was passed; set if so.
if param.Weights != nil {
gonumToArmaMat("weights", param.Weights)
setPassed("weights")
}
// Mark all output options as passed.
setPassed("output_model")
setPassed("predictions")
setPassed("probabilities")
// Call the mlpack program.
C.mlpackDecisionTree()
// Initialize result variable and get output.
var outputModel decisionTreeModel
outputModel.getDecisionTreeModel("output_model")
var predictionsPtr mlpackArma
predictions := predictionsPtr.armaToGonumUrow("predictions")
var probabilitiesPtr mlpackArma
probabilities := probabilitiesPtr.armaToGonumMat("probabilities")
// Clear settings.
clearSettings()
// Return output(s).
return outputModel, predictions, probabilities
} | decision_tree.go | 0.715325 | 0.593963 | decision_tree.go | starcoder |
package index
import (
"time"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier/astmapper"
"github.com/grafana/loki/pkg/storage/config"
)
type periodIndex struct {
time.Time
idx int // address of the index to use
}
type Multi struct {
periods []periodIndex
indices []Interface
}
func NewMultiInvertedIndex(periods []config.PeriodConfig, indexShards uint32) (*Multi, error) {
var (
err error
ii Interface // always stored in 0th index
bitPrefixed Interface // always stored in 1st index
periodIndices []periodIndex
)
for _, pd := range periods {
switch pd.IndexType {
case config.TSDBType:
if bitPrefixed == nil {
bitPrefixed, err = NewBitPrefixWithShards(indexShards)
if err != nil {
return nil, errors.Wrapf(err, "creating tsdb inverted index for period starting %v", pd.From)
}
}
periodIndices = append(periodIndices, periodIndex{
Time: pd.From.Time.Time(),
idx: 1, // tsdb inverted index is always stored in position one
})
default:
if ii == nil {
ii = NewWithShards(indexShards)
}
periodIndices = append(periodIndices, periodIndex{
Time: pd.From.Time.Time(),
idx: 0, // regular inverted index is always stored in position zero
})
}
}
return &Multi{
periods: periodIndices,
indices: []Interface{ii, bitPrefixed},
}, nil
}
func (m *Multi) Add(labels []logproto.LabelAdapter, fp model.Fingerprint) (result labels.Labels) {
for _, i := range m.indices {
if i != nil {
result = i.Add(labels, fp)
}
}
return
}
func (m *Multi) Delete(labels labels.Labels, fp model.Fingerprint) {
for _, i := range m.indices {
if i != nil {
i.Delete(labels, fp)
}
}
}
func (m *Multi) Lookup(t time.Time, matchers []*labels.Matcher, shard *astmapper.ShardAnnotation) ([]model.Fingerprint, error) {
return m.indexFor(t).Lookup(matchers, shard)
}
func (m *Multi) LabelNames(t time.Time, shard *astmapper.ShardAnnotation) ([]string, error) {
return m.indexFor(t).LabelNames(shard)
}
func (m *Multi) LabelValues(t time.Time, name string, shard *astmapper.ShardAnnotation) ([]string, error) {
return m.indexFor(t).LabelValues(name, shard)
}
// Query planning is responsible for ensuring no query spans more than one inverted index.
// Therefore we don't need to account for both `from` and `through`.
func (m *Multi) indexFor(t time.Time) Interface {
for i := range m.periods {
if !m.periods[i].Time.After(t) && (i+1 == len(m.periods) || t.Before(m.periods[i+1].Time)) {
return m.indices[m.periods[i].idx]
}
}
return noopInvertedIndex{}
}
type noopInvertedIndex struct{}
func (noopInvertedIndex) Add(labels []logproto.LabelAdapter, fp model.Fingerprint) labels.Labels {
return nil
}
func (noopInvertedIndex) Delete(labels labels.Labels, fp model.Fingerprint) {}
func (noopInvertedIndex) Lookup(matchers []*labels.Matcher, shard *astmapper.ShardAnnotation) ([]model.Fingerprint, error) {
return nil, nil
}
func (noopInvertedIndex) LabelNames(shard *astmapper.ShardAnnotation) ([]string, error) {
return nil, nil
}
func (noopInvertedIndex) LabelValues(name string, shard *astmapper.ShardAnnotation) ([]string, error) {
return nil, nil
} | pkg/ingester/index/multi.go | 0.653901 | 0.40539 | multi.go | starcoder |
package secp256k1
import (
"crypto/elliptic"
"math/big"
"sync"
)
type CurveParams struct {
P *big.Int
N *big.Int
B *big.Int
Gx, Gy *big.Int
BitSize int
Name string
}
func (curve *CurveParams) Params() *elliptic.CurveParams {
return &elliptic.CurveParams{
P: curve.P,
N: curve.N,
B: curve.B,
Gx: curve.Gx,
Gy: curve.Gy,
BitSize: curve.BitSize,
Name: curve.Name,
}
}
func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
// y^2
y2 := new(big.Int).Mul(y, y)
y2.Mod(y2, curve.P)
// x^3 + b
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
x3.Add(x3, curve.B)
x3.Mod(x3, curve.P)
return y2.Cmp(x3) == 0
}
func zForAffine(xA, yA *big.Int) (z *big.Int) {
z = new(big.Int)
if xA.Sign() != 0 || yA.Sign() != 0 {
return z.SetInt64(1)
}
return // point at infinity
}
func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xA, yA *big.Int) {
// point at infinity
if z.Sign() == 0 {
return new(big.Int), new(big.Int)
}
// 1/Z^1
zInv := new(big.Int).ModInverse(z, curve.P)
// 1/Z^2
zzInv := new(big.Int).Mul(zInv, zInv)
// 1/Z^3
zzzInv := new(big.Int).Mul(zzInv, zInv)
// x = X/Z^2
xA = new(big.Int).Mul(x, zzInv)
xA.Mod(xA, curve.P)
// y = Y/Z^3
yA = new(big.Int).Mul(y, zzzInv)
yA.Mod(yA, curve.P)
return
}
func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
z1 := zForAffine(x1, y1)
z2 := zForAffine(x2, y2)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z1, x2, y2, z2))
}
// ref. https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (x3, y3, z3 *big.Int) {
x3, y3, z3 = new(big.Int), new(big.Int), new(big.Int)
// (x1, y1) is point at infinity in affine coordinates
if z1.Sign() == 0 {
// (x3, y3) = (x2, y2) in affine coordinates
x3.Set(x2)
y3.Set(y2)
z3.Set(z2)
return
}
// (x2, y2) is point at infinity in affine coordinates
if z2.Sign() == 0 {
// (x3, y3) = (x1, y1) in affine coordinates
x3.Set(x1)
y3.Set(y1)
z3.Set(z1)
return
}
// Z1Z1 = Z1^2
z1z1 := new(big.Int).Mul(z1, z1) // 1S
// Z2Z2 = Z2^2
z2z2 := new(big.Int).Mul(z2, z2) // 2S
// U1 = X1*Z2Z2
u1 := new(big.Int).Mul(x1, z2z2) // 1M
u1.Mod(u1, curve.P)
// U2 = X2*Z1Z1
u2 := new(big.Int).Mul(x2, z1z1) // 2M
u2.Mod(u2, curve.P)
// S1 = Y1*Z2*Z2Z2
s1 := new(big.Int).Mul(y1, z2) // 3M
s1.Mul(s1, z2z2) // 4M
s1.Mod(s1, curve.P)
// S2 = Y2*Z1*Z1Z1
s2 := new(big.Int).Mul(y2, z1) // 5M
s2.Mul(s2, z1z1) // 6M
s2.Mod(s2, curve.P)
// x1 == x2 and y1 == y2 in affine coordinates
if u1.Cmp(u2) == 0 && s1.Cmp(s2) == 0 {
return curve.doubleJacobian(x1, y1, z1)
}
// H = U2 - U1
h := new(big.Int).Sub(u2, u1) // 1add
// I = (2*H)^2
i := new(big.Int).Lsh(h, 1) // 1*2
i.Mul(i, i) // 3S
// J = H*I
j := new(big.Int).Mul(h, i) // 7M
// r = 2*(S2 - S1)
r := new(big.Int).Sub(s2, s1) // 2add
r.Lsh(r, 1) // 2*2
// V = U1*I
v := new(big.Int).Mul(u1, i) // 8M
// tmp1 = 2*V
tmp1 := new(big.Int).Lsh(v, 1) // 3*2
// tmp2 = 2*S1*J
tmp2 := new(big.Int).Mul(s1, j) // 9M
tmp2.Lsh(tmp2, 1) // 4*2
// X3 = r^2 - J - 2*V
x3.Mul(r, r) // 4S
x3.Sub(x3, j) // 3add
x3.Sub(x3, tmp1) // 4add
x3.Mod(x3, curve.P)
// Y3 = r*(V - X3) - 2*S1*J
y3.Sub(v, x3) // 5add
y3.Mul(r, y3) // 10M
y3.Sub(y3, tmp2) // 6add
y3.Mod(y3, curve.P)
// Z3 = ((Z1 + Z2)^2 - Z1Z1 - Z2Z2)*H
z3.Add(z1, z2) // 7add
z3.Mul(z3, z3) // 5S
z3.Sub(z3, z1z1) // 8add
z3.Sub(z3, z2z2) // 9add
z3.Mul(z3, h) // 11M
z3.Mod(z3, curve.P)
// cost: 11M + 5S + 9add + 4*2
return
}
func (curve *CurveParams) Double(x1, y1 *big.Int) (x, y *big.Int) {
z1 := zForAffine(x1, y1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// ref. https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
func (curve *CurveParams) doubleJacobian(x1, y1, z1 *big.Int) (x3, y3, z3 *big.Int) {
x3, y3, z3 = new(big.Int), new(big.Int), new(big.Int)
// (x1, y1) is point at infinity in affine coordinates
if y1.Sign() == 0 || z1.Sign() == 0 {
return
}
// A = X1^2
a := new(big.Int).Mul(x1, x1) // 1S
// B = Y1^2
b := new(big.Int).Mul(y1, y1) // 2S
// C = B^2
c := new(big.Int).Mul(b, b) // 3S
// D = 2*((X1 + B)^2 - A - C)
d := new(big.Int).Add(x1, b) // 1add
d.Mul(d, d) // 4S
d.Sub(d, a) // 2add
d.Sub(d, c) // 3add
d.Lsh(d, 1) // 1*2
// E = 3*A
e := new(big.Int).Lsh(a, 1) // 2*2
e.Add(e, a) // 4add
// F = E^2
f := new(big.Int).Mul(e, e) // 5S
// tmp1 = 2*D
tmp1 := new(big.Int).Lsh(d, 1) // 3*2
// tmp2 = 8*C
tmp2 := new(big.Int).Lsh(c, 3) // 1*8
// X3 = F - 2*D
x3.Sub(f, tmp1) // 5add
x3.Mod(x3, curve.P)
// Y3 = E * (D - X3) - 8*C
y3.Sub(d, x3) // 6add
y3.Mul(e, y3) // 1M
y3.Sub(y3, tmp2) // 7add
y3.Mod(y3, curve.P)
// Z3 = 2 * Y1 * Z1
z3.Mul(y1, z1) // 2M
z3.Lsh(z3, 1) // 4*2
z3.Mod(z3, curve.P)
// cost: 2M + 5S + 7add + 4*2 + 1*8
return
}
func (curve *CurveParams) ScalarMult(x1, y1 *big.Int, k []byte) (x, y *big.Int) {
z1 := new(big.Int).SetInt64(1)
x, y, z := new(big.Int), new(big.Int), new(big.Int)
for _, byte := range k {
for bitNum := 0; bitNum < 8; bitNum++ {
x, y, z = curve.doubleJacobian(x, y, z)
if byte&0x80 == 0x80 {
x, y, z = curve.addJacobian(x1, y1, z1, x, y, z)
}
byte <<= 1
}
}
return curve.affineFromJacobian(x, y, z)
}
func (curve *CurveParams) ScalarBaseMult(k []byte) (x, y *big.Int) {
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
const (
p = "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"
n = "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"
b = "0000000000000000000000000000000000000000000000000000000000000007"
gx = "<KEY>"
gy = "<KEY>"
bitSize = 256
)
var (
initonce sync.Once
secp256k1 *CurveParams
)
func initS256() {
secp256k1 = &CurveParams{Name: "secp256k1"}
secp256k1.P = hexToBigInt(p)
secp256k1.N = hexToBigInt(n)
secp256k1.B = hexToBigInt(b)
secp256k1.Gx = hexToBigInt(gx)
secp256k1.Gy = hexToBigInt(gy)
secp256k1.BitSize = bitSize
}
func S256() elliptic.Curve {
initonce.Do(initS256)
return secp256k1
} | secp256k1.go | 0.609175 | 0.612455 | secp256k1.go | starcoder |
package network
import (
"errors"
"fmt"
neatmath "github.com/yaricom/goNEAT/v2/neat/math"
"math"
)
var (
// ErrNetExceededMaxActivationAttempts The error to be raised when maximal number of network activation attempts exceeded
ErrNetExceededMaxActivationAttempts = errors.New("maximal network activation attempts exceeded")
// ErrNetUnsupportedSensorsArraySize The error to be raised when unsupported size of the sensor data array provided
ErrNetUnsupportedSensorsArraySize = errors.New("the sensors array size is unsupported by network solver")
// ErrMaximalNetDepthExceeded The error to be raised when depth of the network exceeds maximal allowed
ErrMaximalNetDepthExceeded = errors.New("depth of the network exceeds maximum allowed, fallback to maximal")
// ErrZeroActivationStepsRequested the error to be raised when zero activation steps requested
ErrZeroActivationStepsRequested = errors.New("zero activation steps requested")
)
// NodeType NNodeType defines the type of NNode to create
type NodeType byte
// Predefined NNode types
const (
// NeuronNode The neuron type
NeuronNode NodeType = iota
// SensorNode The sensor type
SensorNode
)
// NodeTypeName Returns human-readable NNode type name for given constant value
func NodeTypeName(nType NodeType) string {
switch nType {
case NeuronNode:
return "NEURON"
case SensorNode:
return "SENSOR"
default:
return "UNKNOWN NODE TYPE"
}
}
// NodeNeuronType NeuronType defines the type of neuron to create
type NodeNeuronType byte
// These are NNode layer type
const (
// HiddenNeuron The node is in hidden layer
HiddenNeuron NodeNeuronType = iota
// InputNeuron The node is in input layer
InputNeuron
// OutputNeuron The node is in output layer
OutputNeuron
// BiasNeuron The node is bias
BiasNeuron
)
const (
hiddenNeuronName = "HIDN"
inputNeuronName = "INPT"
outputNeuronName = "OUTP"
biasNeuronName = "BIAS"
unknownNeuroName = "UNKNOWN NEURON TYPE"
)
// NeuronTypeName Returns human-readable neuron type name for given constant
func NeuronTypeName(neuronType NodeNeuronType) string {
switch neuronType {
case HiddenNeuron:
return hiddenNeuronName
case InputNeuron:
return inputNeuronName
case OutputNeuron:
return outputNeuronName
case BiasNeuron:
return biasNeuronName
default:
return unknownNeuroName
}
}
// NeuronTypeByName Returns neuron node type from its name
func NeuronTypeByName(name string) (NodeNeuronType, error) {
switch name {
case hiddenNeuronName:
return HiddenNeuron, nil
case inputNeuronName:
return InputNeuron, nil
case outputNeuronName:
return OutputNeuron, nil
case biasNeuronName:
return BiasNeuron, nil
default:
return math.MaxInt8, errors.New("Unknown neuron type name: " + name)
}
}
// ActivateNode Method to calculate activation for specified neuron node based on it's ActivationType field value.
// Will return error and set -0.0 activation if unsupported activation type requested.
func ActivateNode(node *NNode, a *neatmath.NodeActivatorsFactory) error {
out, err := a.ActivateByType(node.ActivationSum, node.Params, node.ActivationType)
if err == nil {
node.setActivation(out)
}
return err
}
// ActivateModule Method to activate neuron module presented by provided node. As a result of execution the activation values of all
// input nodes will be processed by corresponding activation function and corresponding activation values of output nodes
// will be set. Will panic if unsupported activation type requested.
func ActivateModule(module *NNode, a *neatmath.NodeActivatorsFactory) error {
inputs := make([]float64, len(module.Incoming))
for i, v := range module.Incoming {
inputs[i] = v.InNode.GetActiveOut()
}
outputs, err := a.ActivateModuleByType(inputs, module.Params, module.ActivationType)
if err != nil {
return err
}
if len(outputs) != len(module.Outgoing) {
return fmt.Errorf(
"number of output parameters [%d] returned by module activator doesn't match "+
"the number of output neurons of the module [%d]", len(outputs), len(module.Outgoing))
}
// set outputs
for i, out := range outputs {
module.Outgoing[i].OutNode.setActivation(out)
module.Outgoing[i].OutNode.isActive = true // activate output node
}
return nil
}
// NodeIdGenerator definition of the unique IDs generator for network nodes.
type NodeIdGenerator interface {
// NextNodeId is to get next unique node ID
NextNodeId() int
} | neat/network/common.go | 0.701713 | 0.473718 | common.go | starcoder |
package cdk
// Point2I and Rectangle combined
import (
"fmt"
"regexp"
"strconv"
)
type Region struct {
Point2I
Rectangle
}
func NewRegion(x, y, w, h int) *Region {
r := MakeRegion(x, y, w, h)
return &r
}
func MakeRegion(x, y, w, h int) Region {
return Region{
Point2I{X: x, Y: y},
Rectangle{W: w, H: h},
}
}
func ParseRegion(value string) (point Region, ok bool) {
if rxParseRegion.MatchString(value) {
m := rxParseRegion.FindStringSubmatch(value)
if len(m) == 5 {
x, _ := strconv.Atoi(m[1])
y, _ := strconv.Atoi(m[2])
w, _ := strconv.Atoi(m[3])
h, _ := strconv.Atoi(m[4])
return MakeRegion(x, y, w, h), true
}
}
if rxParseFourDigits.MatchString(value) {
m := rxParseFourDigits.FindStringSubmatch(value)
if len(m) == 5 {
x, _ := strconv.Atoi(m[1])
y, _ := strconv.Atoi(m[2])
w, _ := strconv.Atoi(m[3])
h, _ := strconv.Atoi(m[4])
return MakeRegion(x, y, w, h), true
}
}
return Region{}, false
}
func (r Region) String() string {
return fmt.Sprintf("{x:%v,y:%v,w:%v,h:%v}", r.X, r.Y, r.H, r.W)
}
func (r Region) Clone() (clone Region) {
clone.X = r.X
clone.Y = r.Y
clone.W = r.W
clone.H = r.H
return
}
func (r Region) NewClone() (clone *Region) {
clone = NewRegion(r.X, r.Y, r.W, r.H)
return
}
func (r *Region) Set(x, y, w, h int) {
r.X, r.Y, r.W, r.H = x, y, w, h
}
func (r *Region) SetRegion(region Region) {
r.X, r.Y, r.W, r.H = region.X, region.Y, region.W, region.H
}
func (r Region) HasPoint(pos Point2I) bool {
if r.X <= pos.X {
if r.Y <= pos.Y {
if (r.X + r.W) >= pos.X {
if (r.Y + r.H) >= pos.Y {
return true
}
}
}
}
return false
}
func (r Region) Origin() Point2I {
return Point2I{r.X, r.Y}
}
func (r Region) FarPoint() Point2I {
return Point2I{
r.X + r.W,
r.Y + r.H,
}
}
func (r Region) Size() Rectangle {
return Rectangle{r.W, r.H}
}
var (
rxParseRegion = regexp.MustCompile(`(?:i)^{??(?:x:)??(\d+),(?:y:)??(\d+),(?:w:)??(\d+),(?:h:)??(\d+)}??$`)
rxParseFourDigits = regexp.MustCompile(`(?:i)^\s*(\d+)\s*(\d+)\s*(\d+)\s*(\d+)\s*$`)
) | region.go | 0.684053 | 0.478407 | region.go | starcoder |
package notes
import (
"math"
"time"
)
const (
// C is middle C frequency
C = 261.63
// Cs is middle C sharp frequency
Cs = 277.18
// D is middle D frequency
D = 293.66
// Ds is middle D sharp frequency
Ds = 311.13
// E is middle E frequency
E = 329.63
// F is middle F frequency
F = 349.23
// Fs is middle F sharp frequency
Fs = 369.99
// G is middle G frequency
G = 392.00
// Gs is middle G sharp frequency
Gs = 415.30
// A is middle A frequency
A = 440.00
// As is middle A sharp frequency
As = 466.16
// B is middle B frequency
B = 493.88
)
// Note wrapper around things needed for creating sine wave for a note
type Note struct {
Volume float64
Frequency []float64
Octave float64
Length time.Duration
}
// SilentNote generates a silent note of defined length
func SilentNote(length time.Duration) *Note {
return &Note{
Volume: 0.0,
Frequency: []float64{0.0},
Octave: 1.0,
Length: length,
}
}
// NewNote generates a new note object
func NewNote(vol float64, len time.Duration, freq ...float64) *Note {
return &Note{
Volume: vol,
Frequency: freq,
Octave: 1.0,
Length: len,
}
}
// NewNoteWithOctave generates a new note object with a specified octave from the middle note
func NewNoteWithOctave(vol, oct float64, len time.Duration, freq ...float64) *Note {
return &Note{
Volume: vol,
Frequency: freq,
Octave: oct,
Length: len,
}
}
// AtTime grabs sin value at time t
func (note Note) AtTime(t, sr int) float64 {
return NoteAtTime(t, sr, note)
}
// NoteAtTime grabs sin value at time t
func NoteAtTime(t, sr int, note Note) float64 {
sum := 0.0
multiplier := (2.0 * math.Pi) / float64(sr)
for i := 0; i < len(note.Frequency); i++ {
sum += math.Sin((multiplier * (note.Frequency[i] * note.Octave)) * float64(t))
}
return sum
}
// ToData generates a float64 value representation of the note
// Accounts for mutliple notes by dividing the volume by the number of notes
func (note Note) ToData(index, sr int) float64 {
freqLen := len(note.Frequency)
vol := note.Volume
if freqLen > 1 {
vol = vol / float64(freqLen)
}
return vol * note.AtTime(index, sr)
} | notes/single.go | 0.729327 | 0.434401 | single.go | starcoder |
package biguint
// An exercise to help me to learn the basics of Go.
// The arithmetic uses the algorithms I learned in primary school - long division etc.
// I resisted the urge to call it "bigunit" with great difficulty.
import (
"strconv"
)
// biguints are implemnted as slices of ints.
type biguint []int
// String is the standard string function for biguint.
func (b biguint) String() string {
ret := ""
for _, i := range b {
ret += strconv.Itoa(i)
}
return ret
}
// add adds biguints.
func (b1 biguint) add(bn ...biguint) biguint {
work := b1
for _, s := range bn {
work = add2(work, s)
}
return work
}
// add2 adds 2 biguints (not a method).
func add2(s1, s2 biguint) biguint {
var arg1, arg2 biguint
// Make the 2 slices equal length to simplify the addition.
switch {
case len(s1) == len(s2):
arg1 = s1
arg2 = s2
case len(s1) < len(s2):
arg1 = append(make([]int, len(s2)-len(s1)), s1...)
arg2 = s2
case len(s1) > len(s2):
arg1 = s1
arg2 = append(make([]int, len(s1)-len(s2)), s2...)
}
carry := 0
ans := make(biguint, len(arg1))
for i := len(arg1) - 1; i >= 0; i-- {
sum := arg1[i] + arg2[i] + carry
ans[i] = sum % 10
carry = sum / 10
}
if carry == 1 {
return append([]int{1}, ans...)
} else {
return ans
}
}
// z9 removes leading zeros from a biguint (z9 from the COBOL PICTURE clause).
func (b biguint) z9() biguint {
nzi := -1
for i, dig := range b {
if dig != 0 {
nzi = i
break
}
}
if nzi == -1 {
nzi = len(b) - 1
}
return b[nzi:]
}
// compare compares two biguints for <, >, =.
func (b1 biguint) compare(b2 biguint) string {
v1 := b1.z9()
v2 := b2.z9()
switch {
case len(v1) < len(v2):
return "<"
case len(v1) > len(v2):
return ">"
}
for i, d1 := range v1 {
switch {
case d1 < v2[i]:
return "<"
case d1 > v2[i]:
return ">"
}
}
return "="
}
// greater returns true if b1 > b2.
func (b1 biguint) greater(b2 biguint) bool {
if b1.compare(b2) == ">" {
return true
} else {
return false
}
}
// ge returns true if b1 >= b2.
func (b1 biguint) ge(b2 biguint) bool {
if b1.compare(b2) == ">" || b1.compare(b2) == "=" {
return true
} else {
return false
}
}
// less returns true if b1 < b2.
func (b1 biguint) less(b2 biguint) bool {
if b1.compare(b2) == "<" {
return true
} else {
return false
}
}
// le returns true if b1 <= b2.
func (b1 biguint) le(b2 biguint) bool {
if b1.compare(b2) == "<" || b1.compare(b2) == "=" {
return true
} else {
return false
}
}
// equal returns true if b1 = b2.
func (b1 biguint) equal(b2 biguint) bool {
if b1.compare(b2) == "=" {
return true
} else {
return false
}
}
// subtract subtracts biguints.
func (a1 biguint) subtract(a2 biguint) biguint {
var arg1, arg2 biguint
b1 := a1.z9()
b2 := a2.z9()
// Make the 2 slices equal length to simplify the subtraction.
// Also make arg1 the larger if the biguints are not equal.
switch {
case len(b1) == len(b2):
if b1.greater(b2) {
arg1 = b1
arg2 = b2
} else {
arg1 = b2
arg2 = b1
}
case len(b1) < len(b2):
arg2 = append(make([]int, len(b2)-len(b1)), b1...)
arg1 = b2
case len(b1) > len(b2):
arg1 = b1
arg2 = append(make([]int, len(b1)-len(b2)), b2...)
}
carry := 0
ans := make(biguint, len(arg1))
for i := len(arg1) - 1; i >= 0; i-- {
sum := arg2[i] + carry
if sum > arg1[i] {
carry = 1
ans[i] = 10 + arg1[i] - sum
} else {
carry = 0
ans[i] = arg1[i] - sum
}
}
return ans.z9()
}
// times multiplies two biguints.
func (b1 biguint) times(b2 biguint) biguint {
var m1, m2 biguint // m2 the multiplier, m1 the multiplicand
if len(b1.z9()) < len(b2.z9()) {
m2 = b1.z9()
m1 = b2.z9()
} else {
m1 = b1.z9()
m2 = b2.z9()
}
ans := make(biguint, len(m1)+len(m2))
for mx := len(m2) - 1; mx >= 0; mx-- {
work := make(biguint, len(m1)+len(m2))
carry := 0
wx := len(work) - (len(m2) - mx) // index into work - start one place further left each time.
for dx := len(m1) - 1; dx >= 0; dx-- {
prod := m1[dx]*m2[mx] + carry
work[wx] = prod % 10
carry = prod / 10
wx--
}
work[wx] = carry
ans = ans.add(work)
}
return ans.z9()
}
// divby divides two biguints.
func (b1 biguint) divby(b2 biguint) (biguint, bool) {
var dor, dend biguint // divisor and dividend
var work biguint
if b1.less(b2) {
dor = b1.z9()
dend = b2.z9()
} else {
dend = b1.z9()
dor = b2.z9()
}
if len(dor) == 1 && dor[0] == 0 {
return nil, false // division by zero
}
ans := make(biguint, len(dend))
start := len(dor) // start point of dividend the loop begins at
wk := dend[:len(dor)]
if dor.le(wk) {
start -= 1
}
if start > 0 {
work = dend[:start]
}
for _, dig := range dend[start:] {
var prod biguint
work = append(work, dig)
mult := biguint{0} // here we do division by multiplication (because the divisor might be too big)
for {
mult[0] += 1
prod = mult.times(dor)
if prod.greater(work) {
break
}
}
mult[0] -= 1
ans = append(ans, mult...)
work = work.subtract(mult.times(dor))
}
return ans.z9(), true
}
// exp is biguint exponentiation.
//TODO much too slow with very large exponents
func (b biguint) exp(e biguint) biguint {
bigzero := biguint{0}
bigone := biguint{1}
rslt := biguint{1}
count := make(biguint, len(e))
count = e
for !count.equal(bigzero) {
rslt = rslt.times(b)
count = count.subtract(bigone)
}
return rslt
}
// strToBig converts a string of digits to a biguint.
func strToBig(s string) (biguint, error) {
bi := make(biguint, len(s))
for i, _ := range s {
var err error
bi[i], err = strconv.Atoi(s[i : i+1])
if err != nil {
return nil, err
}
}
return bi, nil
} | biguint.go | 0.586168 | 0.47171 | biguint.go | starcoder |
package query
import (
"encoding/json"
"fmt"
"strconv"
"strings"
)
// isNumber takes an interface as input, and returns a float64 if the type is
// compatible (int* or float*).
func isNumber(n interface{}) (float64, bool) {
switch n := n.(type) {
case int:
return float64(n), true
case int8:
return float64(n), true
case int16:
return float64(n), true
case int32:
return float64(n), true
case int64:
return float64(n), true
case uint:
return float64(n), true
case uint8:
return float64(n), true
case uint16:
return float64(n), true
case uint32:
return float64(n), true
case uint64:
return float64(n), true
case float32:
return float64(n), true
case float64:
return n, true
default:
return 0, false
}
}
// quoteField return the field quoted if needed.
func quoteField(field string) string {
for i := 0; i < len(field); i++ {
b := field[i]
if (b >= '0' && b <= '9') ||
(b >= 'a' && b <= 'z') ||
(b >= 'A' && b <= 'Z') ||
b == '$' || b == '.' || b == '_' || b == '-' {
continue
}
return strconv.Quote(field)
}
return field
}
func valueString(v Value) string {
switch t := v.(type) {
case string:
return strconv.Quote(t)
case int:
return strconv.Itoa(t)
case int8:
return strconv.FormatInt(int64(t), 10)
case int16:
return strconv.FormatInt(int64(t), 10)
case int32:
return strconv.FormatInt(int64(t), 10)
case int64:
return strconv.FormatInt(t, 10)
case uint:
return strconv.FormatUint(uint64(t), 10)
case uint8:
return strconv.FormatUint(uint64(t), 10)
case uint16:
return strconv.FormatUint(uint64(t), 10)
case uint32:
return strconv.FormatUint(uint64(t), 10)
case uint64:
return strconv.FormatUint(t, 10)
case float32:
return strconv.FormatFloat(float64(t), 'f', -1, 32)
case float64:
return strconv.FormatFloat(t, 'f', -1, 64)
default:
if s, ok := v.(fmt.Stringer); ok {
return strconv.Quote(s.String())
}
b, _ := json.Marshal(v)
return string(b)
}
}
// getField gets the value of a given field by supporting sub-field path. A get
// on field.subfield is equivalent to payload["field"]["subfield].
func getField(payload map[string]interface{}, name string) interface{} {
val, found := getFieldExist(payload, name)
if !found {
return nil
}
return val
}
func getFieldExist(payload map[string]interface{}, name string) (interface{}, bool) {
// Split the name to get the current level name on first element and the
// rest of the path as second element if dot notation is used (i.e.:
// field.subfield.subsubfield -> field, subfield.subsubfield).
path := strings.SplitN(name, ".", 2)
if value, found := payload[path[0]]; found {
if len(path) == 2 {
if subPayload, ok := value.(map[string]interface{}); ok {
// Check next level.
return getFieldExist(subPayload, path[1])
}
// The requested depth does not exist.
return nil, false
}
// Full path has been found.
return value, true
}
return nil, false
} | schema/query/utils.go | 0.666822 | 0.429908 | utils.go | starcoder |
package pufferpanel
import (
"errors"
"fmt"
"github.com/spf13/cast"
"reflect"
"time"
)
//Converts the val parameter to the same type as the target
func Convert(val interface{}, target interface{}) (interface{}, error) {
switch target.(type) {
case string:
if val == nil {
return "", nil
}
return cast.ToStringE(val)
case int:
if val == nil {
return int(0), nil
}
return cast.ToIntE(val)
case int8:
if val == nil {
return int8(0), nil
}
return cast.ToInt8E(val)
case int16:
if val == nil {
return int16(0), nil
}
return cast.ToInt16E(val)
case int32:
if val == nil {
return int32(0), nil
}
return cast.ToInt32E(val)
case int64:
if val == nil {
return int64(0), nil
}
return cast.ToInt64E(val)
case uint:
if val == nil {
return uint(0), nil
}
return cast.ToUintE(val)
case uint8:
if val == nil {
return uint8(0), nil
}
return cast.ToUint8E(val)
case uint16:
if val == nil {
return uint16(0), nil
}
return cast.ToUint16E(val)
case uint32:
if val == nil {
return uint32(0), nil
}
return cast.ToUint32E(val)
case uint64:
if val == nil {
return uint64(0), nil
}
return cast.ToUint64E(val)
case bool:
if val == nil {
return false, nil
}
return cast.ToBoolE(val)
case time.Duration:
if val == nil {
return time.Duration(0), nil
}
return cast.ToDurationE(val)
case time.Time:
if val == nil {
return time.Time{}, nil
}
return cast.ToTimeE(val)
case float32:
if val == nil {
return float32(0), nil
}
return cast.ToFloat64E(val)
case float64:
if val == nil {
return float64(0), nil
}
return cast.ToFloat64E(val)
case map[string]string:
if val == nil {
return map[string]string{}, nil
}
return cast.ToStringMapStringE(val)
case map[string][]string:
if val == nil {
return map[string][]string{}, nil
}
return cast.ToStringMapStringSliceE(val)
case map[string]bool:
if val == nil {
return map[string]bool{}, nil
}
return cast.ToStringMapBoolE(val)
case map[string]interface{}:
if val == nil {
return map[string]interface{}{}, nil
}
return cast.ToStringMapE(val)
case map[string]int:
if val == nil {
return map[string]int{}, nil
}
return cast.ToStringMapIntE(val)
case map[string]int64:
if val == nil {
return map[string]int64{}, nil
}
return cast.ToStringMapInt64E(val)
case []interface{}:
if val == nil {
return []interface{}{}, nil
}
return cast.ToSliceE(val)
case []bool:
if val == nil {
return []bool{}, nil
}
return cast.ToBoolSliceE(val)
case []string:
if val == nil {
return []string{}, nil
}
return cast.ToStringSliceE(val)
case []int:
if val == nil {
return []int{}, nil
}
return cast.ToIntSliceE(val)
case []time.Duration:
if val == nil {
return []time.Duration{}, nil
}
return cast.ToDurationSliceE(val)
}
return nil, errors.New(fmt.Sprintf("cannot convert %s to %s", reflect.TypeOf(val), reflect.TypeOf(target)))
} | conversion.go | 0.507324 | 0.402069 | conversion.go | starcoder |
package intersect
import "math"
// Line A line is infinate in length, and is defined by any two points
type Line struct {
slope float64
yInt float64
xVal float64 // only used if slope is inf
}
// ToLine returns the line, it is only used to fill the edge interface
func (l1 Line) ToLine() Line {
return l1
}
// NewLine creates a new line passing through p1 and p2, if p1 == p2, then a vertical line will be created
func NewLine(p1, p2 Vector) Line {
xVal := p1.X
slope := (p2.Y - p1.Y) / (p2.X - p1.X)
if math.IsNaN(slope) {
slope = math.Inf(1)
}
yInt := p1.Y - slope*p1.X
return Line{slope, yInt, xVal}
}
// Equals checks if the two lines are the same, within 1e-9
func (l1 Line) Equals(l2 Line) bool {
if l1.IsVertical() && l2.IsVertical() {
return floatEquals(l1.xVal, l2.xVal)
}
return floatEquals(l1.slope, l2.slope) && floatEquals(l1.yInt, l2.yInt)
}
// IsPointOn returns true if the point is on the line
func (l1 Line) IsPointOn(p1 Vector) bool {
if l1.IsVertical() {
return floatEquals(p1.X, l1.xVal)
}
return floatEquals(l1.yInt+l1.slope*p1.X, p1.Y)
}
// IsParallel returns true if the slopes are equal. If the lines are the same line, it will still return true
func (l1 Line) IsParallel(l2 Line) bool {
return floatEquals(l1.slope, l2.slope) || (l1.IsVertical() && l2.IsVertical())
}
// IsIntersect returns true, if the two lines intersect, is false if they are the same line
func (l1 Line) IsIntersect(l2 Line) bool {
return !l1.IsParallel(l2)
}
// IsVertical returns true if the slope of the line is infinate
func (l1 Line) IsVertical() bool {
return math.IsInf(l1.slope, 0)
}
// EvalX returns the x for a given y on the line
func (l1 Line) EvalX(y float64) float64 {
if l1.slope == 0 {
return math.NaN()
}
return (y - l1.yInt) / l1.slope
}
// EvalY returns the y for a given x on the line
func (l1 Line) EvalY(x float64) float64 {
if l1.IsVertical() {
return math.NaN()
}
return l1.yInt + l1.slope*x
}
// Intersect returns the point the line and edge intersect, and a boolean which determines if they intersect
func (l1 Line) Intersect(e2 Edge) (Vector, bool) {
return Intersect(l1, e2)
} | line.go | 0.853593 | 0.489626 | line.go | starcoder |
package rivescript
import "fmt"
/* Topic inheritance functions.
These are helper functions to assist with topic inheritance and includes.
*/
/*
getTopicTriggers recursively scans topics and collects triggers therein.
This function scans through a topic and collects its triggers, along with the
triggers belonging to any topic that's inherited by or included by the parent
topic. Some triggers will come out with an {inherits} tag to signify
inheritance depth.
Params:
topic: The name of the topic to scan through
thats: Whether to get only triggers that have %Previous.
`false` returns all triggers.
Each "trigger" returned from this function is actually an array, where index
0 is the trigger text and index 1 is the pointer to the trigger's data within
the original topic structure.
*/
func (rs *RiveScript) getTopicTriggers(topic string, thats bool) []sortedTriggerEntry {
return rs._getTopicTriggers(topic, thats, 0, 0, false)
}
/*
_getTopicTriggers implements the bulk of the logic for getTopicTriggers.
Additional private parameters used:
- depth: Recursion depth counter.
- inheritance: Inheritance counter.
- inherited: Inherited status.
Important info about the depth vs. inheritance params to this function:
depth increments by 1 each time this function recursively calls itself.
inheritance only increments by 1 when this topic inherits another topic.
This way, `> topic alpha includes beta inherits gamma` will have this effect:
- alpha and beta's triggers are combined together into one matching pool,
- and then those triggers have higher priority than gamma's.
The inherited option is true if this is a recursive call, from a topic that
inherits other topics. This forces the {inherits} tag to be added to the
triggers. This only applies when the topic 'includes' another topic.
*/
func (rs *RiveScript) _getTopicTriggers(topic string, thats bool, depth uint, inheritance int, inherited bool) []sortedTriggerEntry {
// Break if we're in too deep.
if depth > rs.Depth {
rs.warn("Deep recursion while scanning topic inheritance!")
return []sortedTriggerEntry{}
}
/*
Keep in mind here that there is a difference between 'includes' and
'inherits' -- topics that inherit other topics are able to OVERRIDE
triggers that appear in the inherited topic. This means that if the top
topic has a trigger of simply '*', then NO triggers are capable of
matching in ANY inherited topic, because even though * has the lowest
priority, it has an automatic priority over all inherited topics.
The getTopicTriggers method takes this into account. All topics that
inherit other topics will have their triggers prefixed with a fictional
{inherits} tag, which would start at {inherits=0} and increment of this
topic has other inheriting topics. So we can use this tag to make sure
topics that inherit things will have their triggers always be on top of
the stack, from inherits=0 to inherits=n.
*/
rs.say("Collecting trigger list for topic %s (depth=%d; inheritance=%d; inherited=%v)",
topic, depth, inheritance, inherited)
// Collect an array of triggers to return.
triggers := []sortedTriggerEntry{}
// Get those that exist in this topic directly.
inThisTopic := []sortedTriggerEntry{}
if _, ok := rs.topics[topic]; ok {
for _, trigger := range rs.topics[topic].triggers {
if !thats {
// All triggers.
entry := sortedTriggerEntry{trigger.trigger, trigger}
inThisTopic = append(inThisTopic, entry)
} else {
// Only triggers that have %Previous.
if trigger.previous != "" {
inThisTopic = append(inThisTopic, sortedTriggerEntry{trigger.previous, trigger})
}
}
}
}
// Does this topic include others?
if _, ok := rs.includes[topic]; ok {
for includes := range rs.includes[topic] {
rs.say("Topic %s includes %s", topic, includes)
triggers = append(triggers, rs._getTopicTriggers(includes, thats, depth+1, inheritance+1, false)...)
}
}
// Does this topic inherit others?
if _, ok := rs.inherits[topic]; ok {
for inherits := range rs.inherits[topic] {
rs.say("Topic %s inherits %s", topic, inherits)
triggers = append(triggers, rs._getTopicTriggers(inherits, thats, depth+1, inheritance+1, true)...)
}
}
// Collect the triggers for *this* topic. If this topic inherits any other
// topics, it means that this topic's triggers have higher priority than
// those in any inherited topics. Enforce this with an {inherits} tag.
if len(rs.inherits[topic]) > 0 || inherited {
for _, trigger := range inThisTopic {
rs.say("Prefixing trigger with {inherits=%d} %s", inheritance, trigger.trigger)
label := fmt.Sprintf("{inherits=%d}%s", inheritance, trigger.trigger)
triggers = append(triggers, sortedTriggerEntry{label, trigger.pointer})
}
} else {
for _, trigger := range inThisTopic {
triggers = append(triggers, sortedTriggerEntry{trigger.trigger, trigger.pointer})
}
}
return triggers
}
/*
getTopicTree returns an array of every topic related to a topic (all the
topics it inherits or includes, plus all the topics included or inherited
by those topics, and so on). The array includes the original topic, too.
*/
func (rs *RiveScript) getTopicTree(topic string, depth uint) []string {
// Break if we're in too deep.
if depth > rs.Depth {
rs.warn("Deep recursion while scanning topic tree!")
return []string{}
}
// Collect an array of all topics.
topics := []string{topic}
for includes := range rs.includes[topic] {
topics = append(topics, rs.getTopicTree(includes, depth+1)...)
}
for inherits := range rs.inherits[topic] {
topics = append(topics, rs.getTopicTree(inherits, depth+1)...)
}
return topics
} | inheritance.go | 0.78316 | 0.500854 | inheritance.go | starcoder |
package timeseries
import (
"math"
"sort"
"time"
)
// Aligns point's time stamps according to provided interval.
func (ts TimeSeries) Align(interval time.Duration) TimeSeries {
if interval <= 0 || ts.Len() < 2 {
return ts
}
alignedTs := NewTimeSeries()
var frameTs = ts[0].GetTimeFrame(interval)
var pointFrameTs time.Time
var point TimePoint
for i := 0; i < ts.Len(); i++ {
point = ts[i]
pointFrameTs = point.GetTimeFrame(interval)
if pointFrameTs.After(frameTs) {
for frameTs.Before(pointFrameTs) {
alignedTs = append(alignedTs, TimePoint{Time: frameTs, Value: nil})
frameTs = frameTs.Add(interval)
}
}
alignedTs = append(alignedTs, TimePoint{Time: pointFrameTs, Value: point.Value})
frameTs = frameTs.Add(interval)
}
return alignedTs
}
// Fill missing points in trend by null values
func (ts TimeSeries) FillTrendWithNulls() TimeSeries {
if ts.Len() < 2 {
return ts
}
interval := time.Hour
alignedTs := NewTimeSeries()
var frameTs = ts[0].GetTimeFrame(interval)
var pointFrameTs time.Time
var point TimePoint
for i := 0; i < ts.Len(); i++ {
point = ts[i]
pointFrameTs = point.GetTimeFrame(interval)
if pointFrameTs.After(frameTs) {
for frameTs.Before(pointFrameTs) {
alignedTs = append(alignedTs, TimePoint{Time: frameTs, Value: nil})
frameTs = frameTs.Add(interval)
}
}
alignedTs = append(alignedTs, point)
frameTs = frameTs.Add(interval)
}
return alignedTs
}
// Detects interval between data points in milliseconds based on median delta between points.
func (ts TimeSeries) DetectInterval() time.Duration {
if ts.Len() < 2 {
return 0
}
deltas := make([]int, 0)
for i := 1; i < ts.Len(); i++ {
delta := ts[i].Time.Sub(ts[i-1].Time)
deltas = append(deltas, int(delta.Milliseconds()))
}
sort.Ints(deltas)
midIndex := int(math.Floor(float64(len(deltas)) * 0.5))
return time.Duration(deltas[midIndex]) * time.Millisecond
}
// AlignSeriesIntervals aligns series to the same time interval
func AlignSeriesIntervals(series []*TimeSeriesData) []*TimeSeriesData {
if len(series) == 0 {
return series
}
// Skip if interval not defined
for _, s := range series {
if s.Meta.Interval == nil {
return series
}
}
minInterval := *series[0].Meta.Interval
for _, s := range series {
if *s.Meta.Interval < minInterval {
minInterval = *s.Meta.Interval
}
}
// 0 interval means series is not aligned, so it's tricky to align multiple series
if minInterval == 0 {
return series
}
for _, s := range series {
if s.Len() < 2 || *s.Meta.Interval == minInterval {
continue
}
s.TS = s.TS.Interpolate(minInterval)
}
return series
}
func (ts TimeSeries) Interpolate(interval time.Duration) TimeSeries {
if interval <= 0 || ts.Len() < 2 {
return ts
}
alignedTs := NewTimeSeries()
var frameTs = ts[0].Time
var pointFrameTs time.Time
var point TimePoint
var nextPoint TimePoint
for i := 0; i < ts.Len()-1; i++ {
point = ts[i]
nextPoint = ts[i+1]
pointFrameTs = point.Time
if point.Value != nil && nextPoint.Value != nil {
frameTs = pointFrameTs.Add(interval)
for frameTs.Before(nextPoint.Time) {
pointValue := linearInterpolation(frameTs, point, nextPoint)
alignedTs = append(alignedTs, TimePoint{Time: frameTs, Value: &pointValue})
frameTs = frameTs.Add(interval)
}
}
alignedTs = append(alignedTs, TimePoint{Time: pointFrameTs, Value: point.Value})
frameTs = frameTs.Add(interval)
}
return alignedTs
} | pkg/timeseries/align.go | 0.785309 | 0.589539 | align.go | starcoder |
package main
import (
"math"
"github.com/unixpickle/model3d/model2d"
"github.com/unixpickle/model3d/model3d"
"github.com/unixpickle/model3d/render3d"
"github.com/unixpickle/model3d/toolbox3d"
)
func main() {
solid := model3d.JoinedSolid{
BaseSolid(),
HouseSolid(),
}
mesh := model3d.MarchingCubesSearch(solid, 0.01, 8)
mesh = mesh.EliminateCoplanar(1e-5)
mesh.SaveGroupedSTL("house.stl")
render3d.SaveRandomGrid("rendering.png", mesh, 3, 3, 300, nil)
}
func BaseSolid() model3d.Solid {
text := model2d.MustReadBitmap("text.png", nil).FlipX().Mesh().SmoothSq(30)
text = text.Scale(1.0 / 256.0).Translate(model2d.XY(-2, -2))
textSolid := model2d.NewColliderSolid(model2d.MeshToCollider(text))
return model3d.JoinedSolid{
model3d.NewRect(model3d.XYZ(-2, -2, -0.1), model3d.XYZ(2, 2, 0)),
model3d.ProfileSolid(textSolid, 0, 0.1),
}
}
func HouseSolid() model3d.Solid {
window := WindowSolid()
windows := model3d.JoinedSolid{}
for _, y := range []float64{-0.5, 0.5} {
windows = append(windows, model3d.TranslateSolid(window, model3d.YZ(y, 0.7)))
for _, z := range []float64{0.25, 0.7} {
for _, x := range []float64{-0.6, 0.6} {
windows = append(windows, model3d.TranslateSolid(window, model3d.XYZ(x, y, z)))
}
}
}
return model3d.JoinedSolid{
// Body of house.
model3d.NewRect(model3d.XYZ(-1, -0.5, 0), model3d.XYZ(1, 0.5, 1)),
RoofSolid(),
DoorSolid(),
ChimneySolid(),
windows,
}
}
func RoofSolid() model3d.Solid {
prism := model3d.ConvexPolytope{
&model3d.LinearConstraint{
Normal: model3d.YZ(-1, 1).Normalize(),
Max: math.Sqrt2 / 4,
},
&model3d.LinearConstraint{
Normal: model3d.YZ(1, 1).Normalize(),
Max: math.Sqrt2 / 4,
},
&model3d.LinearConstraint{
Normal: model3d.Z(-1),
Max: 0,
},
&model3d.LinearConstraint{
Normal: model3d.X(-1),
Max: 1,
},
&model3d.LinearConstraint{
Normal: model3d.X(1),
Max: 1,
},
}
return model3d.TranslateSolid(prism.Solid(), model3d.Z(1))
}
func ChimneySolid() model3d.Solid {
return model3d.NewRect(model3d.XY(0.5, 0.1), model3d.XYZ(0.65, 0.25, 1.6))
}
func WindowSolid() model3d.Solid {
const size = 0.15
const thickness = 0.015
return model3d.JoinedSolid{
toolbox3d.TriangularPolygon(
thickness, true,
model3d.XZ(-size, -size),
model3d.XZ(-size, size),
model3d.XZ(size, size),
model3d.XZ(size, -size),
),
toolbox3d.TriangularLine(thickness, model3d.X(-size+thickness/2), model3d.X(size-thickness/2)),
toolbox3d.TriangularLine(thickness, model3d.Z(-size+thickness/2), model3d.Z(size-thickness/2)),
}
}
func DoorSolid() model3d.Solid {
const size = 0.15
return model3d.JoinedSolid{
toolbox3d.TriangularPolygon(
0.02, false, model3d.XYZ(-size, 0.5, 0), model3d.XYZ(-size, 0.5, 0.45),
model3d.XYZ(size, 0.5, 0.45), model3d.XYZ(size, 0.5, 0),
),
toolbox3d.TriangularBall(0.03, model3d.XYZ(-size+0.07, 0.5, 0.45/2)),
}
} | examples/romantic/my_home/main.go | 0.618665 | 0.438605 | main.go | starcoder |
package types
import (
"encoding/json"
"math/big"
"github.com/filecoin-project/go-filecoin/internal/pkg/encoding"
"github.com/filecoin-project/go-leb128"
"github.com/polydawn/refmt/obj/atlas"
)
func init() {
encoding.RegisterIpldCborType(blockHeightAtlasEntry)
}
var blockHeightAtlasEntry = atlas.BuildEntry(BlockHeight{}).Transform().
TransformMarshal(atlas.MakeMarshalTransformFunc(
func(i BlockHeight) ([]byte, error) {
return i.Bytes(), nil
})).
TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(
func(x []byte) (BlockHeight, error) {
return *NewBlockHeightFromBytes(x), nil
})).
Complete()
// UnmarshalJSON converts a byte array to a BlockHeight.
func (z *BlockHeight) UnmarshalJSON(b []byte) error {
var i big.Int
if err := json.Unmarshal(b, &i); err != nil {
return err
}
*z = BlockHeight{val: &i}
return nil
}
// MarshalJSON converts a BlockHeight to a byte array and returns it.
func (z BlockHeight) MarshalJSON() ([]byte, error) {
return json.Marshal(z.val)
}
// An BlockHeight is a signed multi-precision integer.
type BlockHeight struct{ val *big.Int }
// NewBlockHeight allocates and returns a new BlockHeight set to x.
func NewBlockHeight(x uint64) *BlockHeight {
return &BlockHeight{val: big.NewInt(0).SetUint64(x)}
}
// NewBlockHeightFromBytes allocates and returns a new BlockHeight set
// to the value of buf as the bytes of a big-endian unsigned integer.
func NewBlockHeightFromBytes(buf []byte) *BlockHeight {
bh := NewBlockHeight(0)
// TODO: fix leb128 https://github.com/filecoin-project/go-leb128/issues/7
if len(buf) > 0 {
bh.val = leb128.ToBigInt(buf)
}
return bh
}
// NewBlockHeightFromString allocates a new BlockHeight set to the value of s,
// interpreted in the given base, and returns it and a boolean indicating success.
func NewBlockHeightFromString(s string, base int) (*BlockHeight, bool) {
bh := NewBlockHeight(0)
val, ok := bh.val.SetString(s, base)
bh.val = val // overkill
return bh, ok
}
// Bytes returns the absolute value of x as a big-endian byte slice.
func (z *BlockHeight) Bytes() []byte {
return leb128.FromBigInt(z.val)
}
// Equal returns true if z = y
func (z *BlockHeight) Equal(y *BlockHeight) bool {
return z.val.Cmp(y.val) == 0
}
// String returns a string version of the ID
func (z *BlockHeight) String() string {
return z.val.String()
}
// LessThan returns true if z < y
func (z *BlockHeight) LessThan(y *BlockHeight) bool {
return z.val.Cmp(y.val) < 0
}
// GreaterThan returns true if z > y
func (z *BlockHeight) GreaterThan(y *BlockHeight) bool {
return z.val.Cmp(y.val) > 0
}
// LessEqual returns true if z <= y
func (z *BlockHeight) LessEqual(y *BlockHeight) bool {
return z.val.Cmp(y.val) <= 0
}
// GreaterEqual returns true if z >= y
func (z *BlockHeight) GreaterEqual(y *BlockHeight) bool {
return z.val.Cmp(y.val) >= 0
}
// Add adds the given value to the current value and returns a copy
func (z *BlockHeight) Add(y *BlockHeight) *BlockHeight {
a := big.NewInt(0).Set(z.val)
a = a.Add(a, y.val)
return &BlockHeight{val: a}
}
// Sub subtracts y from a copy of z and returns the copy.
func (z *BlockHeight) Sub(y *BlockHeight) *BlockHeight {
a := big.NewInt(0).Set(z.val)
a = a.Sub(a, y.val)
return &BlockHeight{val: a}
}
// AsBigInt returns the blockheight as a big.Int
func (z *BlockHeight) AsBigInt() (out *big.Int) {
out = &big.Int{}
out.Set(z.val)
return
} | internal/pkg/types/block_height.go | 0.601125 | 0.50177 | block_height.go | starcoder |
package util
import (
"strconv"
"log"
"net"
"strings"
"os"
"bufio"
"dsgd/Math"
"fmt"
)
// data point encapsulates the data features stored in the data vector
// label correpsonds to the class of the datapoint
type Data_point struct {
Data Math.Vector
Label float64
}
// a tuple used to write output of each node to the file
type LossT struct {
Iteration int
Loss float64
}
// parse a port string into network address
func ReadPort (input string) (net.UDPAddr) {
getPorts := strings.Split(strings.Split(input,":")[1],",")
localHost := "127.0.0.1:" + getPorts[0]
addr,_ := net.ResolveUDPAddr("udp",localHost)
return *addr
}
// create a zero matrix used for creation of link matrix
func zero_matrix(row_dim,col_dim int) ([][]float64) {
mat := make([][]float64,row_dim)
for i:=0;i<row_dim ;i++ {
mat[i] = make([]float64,col_dim)
}
return mat
}
// sum over a row of a matrix
func axis_sum(matrix []float64) (float64){
sum :=0.0
for i:=0;i<len(matrix);i++ {
sum+= matrix[i]
}
return sum
}
// get a link matrix representing the link matrix
// normalize each row
func Stochastic_matrix(fileName string,numPIS int) ([][] float64) {
linkMatrix := readTopology(fileName,numPIS)
for i:=0;i< numPIS;i++ {
rowSum := axis_sum(linkMatrix[i])
normalized := make([]float64,len(linkMatrix[i]))
for j:=0;j<len(linkMatrix[i]);j++ {
normalized[j] = linkMatrix[i][j]/rowSum
}
linkMatrix[i] = normalized
}
return linkMatrix
}
// read the topology from a text file
// input is of the form nodeID1 nodeID2
// coresponding to the existence of a link between those two nodes
func readTopology(fileName string,numPIS int) ([][] float64) {
linkMatrix := zero_matrix(numPIS,numPIS)
file, err := os.Open(fileName)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
nodes := strings.Split(scanner.Text()," ")
outgoing,_ := strconv.Atoi(nodes[0])
incoming,_ := strconv.Atoi(nodes[1])
linkMatrix[outgoing][incoming] = 1.0
linkMatrix[incoming][outgoing] = 1.0
}
file.Close()
for i:=0;i<len(linkMatrix);i++{
linkMatrix[i][i] = 1.0
}
return linkMatrix
}
// parse a list of ip ports
func Parse_Neighbours(input string) (NeighbourAddress []net.UDPAddr) {
getPorts := strings.Split(strings.Split(input,":")[1],",")
var peers []net.UDPAddr
for i:=0;i<len(getPorts);i++ {
localHost := "127.0.0.1:" + getPorts[i]
addr,_ := net.ResolveUDPAddr("udp",localHost)
peers = append(peers,*addr)
}
return peers
}
// function to read the data files
// @output : datapoint array storing features and labels of each row in the data matrix
func ReadData(dataPath string) ([]Data_point) {
var representation []Data_point
inputD,erra := os.Open(dataPath)
if erra!=nil {
log.Fatal("Error loading files")
}
defer inputD.Close()
scanner := bufio.NewScanner(inputD)
rowId := 0
for scanner.Scan(){
label,data := parseRow(scanner.Text())
temp := Data_point{Math.Vector{1,
len(data),
data},label}
representation = append(representation,temp)
rowId++
}
inputD.Close()
return representation
}
// function used in read data function to parse the text files
func parseRow(input string) (float64,[]float64) {
parsing := strings.Split(input,",")
dimension := len(parsing)
label,_ := strconv.ParseFloat(parsing[dimension - 1],64)
data := make([]float64,dimension-1)
for i:=1;i<dimension-1;i++{
parsed,_ := strconv.ParseFloat(parsing[i],64)
data[i] = parsed
}
return label,data
}
// count number of nonzero inputs
// used to check how many messages a process is expecting from his peers
func Non_zero(row []float64) int {
count := 0
for i:=0;i<len(row) ;i++ {
if row[i]>0.0 {
count+=1
}
}
return count
}
func CheckError(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
}
// write loss to file
func OutputLoss(fileName string,loss []LossT) {
file , err := os.Create("./Results/" + fileName + ".txt")
CheckError(err)
defer file.Close()
for i:=0;i<len(loss) ;i++ {
file.WriteString(fmt.Sprintf("%d,%f\n",loss[i].Iteration,loss[i].Loss))
}
file.Close()
} | util/utils.go | 0.512205 | 0.527438 | utils.go | starcoder |
package f5api
// This describes a message sent to or received from some operations
type LtmMonitorWap struct {
// The application service to which the object belongs.
AppService string `json:"appService,omitempty"`
// Specifies the IP address and service port of the resource that is the destination of this monitor. Possible values are: *:* (Specifies to perform a health check on the IP address and port supplied by a pool member), *:port (Specifies to perform a health check on the server with the IP address supplied by the pool member and the port you specify.), and IP : port (Specifies to mark a pool member up or down based on the response of the server at the IP address and port you specify.).
Destination string `json:"destination,omitempty"`
// Specifies the RADIUS server that provides authentication for the WAP target. This is an optional setting. Note that if you configure the accounting-port option, but you do not configure the accounting-node option, the system assumes that the RADIUS server and the WAP server are the same system.
AccountingNode string `json:"accountingNode,omitempty"`
// Specifies how often in seconds that the system issues the monitor check when the node is up. The default value is the same as the (down) interval.
UpInterval int64 `json:"upInterval,omitempty"`
// Specifies whether the system automatically changes the status of a resource to enabled at the next successful monitor check. The default value is disabled. If you set this option to enabled, you must manually re-enable the resource before the system can use it for load balancing connections.
ManualResume string `json:"manualResume,omitempty"`
// Specifies the port that the monitor uses for RADIUS accounting. The default value is none. A value of 0 (zero) disables RADIUS accounting.
AccountingPort string `json:"accountingPort,omitempty"`
// Specifies the frequency at which the system issues the monitor check. The default value is 10 seconds.
Interval int64 `json:"interval,omitempty"`
// Specifies the existing monitor from which the system imports settings for the new monitor.
DefaultsFrom string `json:"defaultsFrom,omitempty"`
// Specifies the text string that the monitor looks for in the returned resource. The most common receive expressions contain a text string that is included in an HTML file on your site. The text string can be regular text, HTML tags, or image names. If you do not specify both a Send String and a Receive String, the monitor performs a simple service check and connect only.
Recv string `json:"recv,omitempty"`
// User defined description.
Description string `json:"description,omitempty"`
// Kind of entity
Kind string `json:"kind,omitempty"`
// Name of entity
Name string `json:"name,omitempty"`
// Specifies the 11-digit phone number for the RADIUS server. This is an optional setting.
CallId string `json:"callId,omitempty"`
// Displays the administrative partition within which the monitor resides.
Partition string `json:"partition,omitempty"`
// Specifies the RADIUS session identification number when configuring a RADIUS server. This is an optional setting.
SessionId string `json:"sessionId,omitempty"`
// Specifies the text string that the monitor sends to the target object. The default setting is GET /, which retrieves a default HTML file for a web site. To retrieve a specific page from a web site, specify a fully-qualified path name, for example, GET /www/company/index.html. Since the string may have special characters, the system may require that the string be enclosed with single quotation marks. If this value is null, then a valid connection suffices to determine that the service is up. In this case, the system does not need the recv, recv-row, and recv-column options and ignores the options even if not null.
Send string `json:"send,omitempty"`
// Specifies the amount of time in seconds after the first successful response before a node will be marked up. A value of 0 will cause a node to be marked up immediately after a valid response is received from the node. The default setting is 0.
TimeUntilUp int64 `json:"timeUntilUp,omitempty"`
// Specifies the password the monitor needs to access the resource.
Secret string `json:"secret,omitempty"`
// Specifies the number of seconds the target has in which to respond to the monitor request. The default value is 31 seconds. If the target responds within the set time period, it is considered up. If the target does not respond within the set time period, it is considered down. Also, if the target responds with a RESET packet, the system immediately flags the target as down without waiting for the timeout interval to expire.
Timeout int64 `json:"timeout,omitempty"`
// Specifies whether the monitor sends error messages and additional information to a log file created and labeled specifically for this monitor. The default setting is no. You can use the log information to help diagnose and troubleshoot unsuccessful health checks. The options are 'no' (specifies that the system does not redirect error messages and additional information related to this monitor to the log file) and 'yes' (specifies that the system redirects error messages and additional information to the /var/log/monitors/ monitor_name - node_name - port .log file).
Debug string `json:"debug,omitempty"`
// Specifies the RADIUS NAS-ID for this system when configuring a RADIUS server.
ServerId string `json:"serverId,omitempty"`
// Specifies the RADIUS framed IP address. This is an optional setting.
FramedAddress string `json:"framedAddress,omitempty"`
} | ltm_monitor_wap.go | 0.859767 | 0.464659 | ltm_monitor_wap.go | starcoder |
package money
import (
"strconv"
"strings"
)
// Currency represents the currency information for formatting
type Currency struct {
code string
decimalDelimiter string
thousandDelimiter string
exponent int
symbol string
template string
}
// https://en.wikipedia.org/wiki/ISO_4217
// USD creates and returns a new Currency instance for USD
func USD() *Currency {
return &Currency{code: "USD", decimalDelimiter: ".", thousandDelimiter: ",", exponent: 2, symbol: "$", template: "$1"}
}
// EUR creates and returns a new Currency instance for EUR
func EUR() *Currency {
return &Currency{code: "EUR", decimalDelimiter: ",", thousandDelimiter: ".", exponent: 2, symbol: "€", template: "$1"}
}
// Add creates and returns a new Currency instance
func Add(code string, decimalDelimiter string, thousandDelimiter string, exponent int, symbol string, template string) *Currency {
return &Currency{
code: code,
decimalDelimiter: decimalDelimiter,
thousandDelimiter: thousandDelimiter,
exponent: exponent,
symbol: symbol,
template: template,
}
}
// Format returns a formatted string for the given amount value
func (c *Currency) Format(amount int64) string {
positiveAmount := amount
if amount < 0 {
positiveAmount = amount * -1
}
result := strconv.FormatInt(positiveAmount, 10)
if len(result) <= c.exponent {
result = strings.Repeat("0", c.exponent-len(result)+1) + result
}
if c.thousandDelimiter != "" {
for i := len(result) - c.exponent - 3; i > 0; i -= 3 {
result = result[:i] + c.thousandDelimiter + result[i:]
}
}
if c.exponent > 0 {
result = result[:len(result)-c.exponent] + c.decimalDelimiter + result[len(result)-c.exponent:]
}
result = strings.Replace(c.template, "1", result, 1)
result = strings.Replace(result, "$", c.symbol, 1)
// Add minus sign for negative amount
if amount < 0 {
result = "-" + result
}
return result
}
func (c *Currency) equals(currency *Currency) bool {
return c.code == currency.code
} | currency.go | 0.805556 | 0.458167 | currency.go | starcoder |
In chess, a queen can attack horizontally, vertically, and diagonally
*/
// 1 "Q" per row
// moment we have no available spaces we backtrack
func solveNQueens(n int) [][]string {
var res [][]string
if n == 1 {
res = append(res, []string{"Q"})
return res
}
board := buildBaseBoard(n)
solve(&res, board, 0, n, n)
return res
}
func buildBaseBoard(n int) [][]byte {
delim := byte('.')
base := make([]byte, n)
for i:=0; i < len(base); i++ {
base[i] = delim
}
baseBoard := make([][]byte, n)
for i := range baseBoard {
baseBoard[i] = make([]byte, n)
copy(baseBoard[i], base)
}
return baseBoard
}
//checks if valid loc at desired row,col val
func isValid(board [][]byte, row, col, sideLen int) bool {
//check above curr row
for i:=0; i < row; i++ {
if board[i][col] == byte('Q') {
return false
}
}
//check upper right diagonal
for i, j := row-1, col+1; i >=0 && j < sideLen; i, j = i -1, j+1 {
if board[i][j] == byte('Q') {
return false
}
}
//check upper left diagonal
for i, j := row-1, col-1; i >=0 && j >=0; i, j = i-1, j-1 {
if board[i][j] == byte('Q') {
return false
}
}
return true
}
func solve(res *[][]string, board [][]byte, currRow int, nLeft int, sideLen int) {
if nLeft == 0 {
tmp := make([]string, sideLen)
for i:=0; i <len(board); i++ {
tmp[i] = string(board[i])
}
*res = append(*res, tmp)
return
}
for col:=0; col < sideLen; col++ {
if !isValid(board, currRow, col, sideLen) {
continue
}
board[currRow][col] = byte('Q')
solve(res, board,currRow+1, nLeft-1, sideLen)
board[currRow][col] = byte('.')
}
}
/*
if n == 2; []string{"Q.", ".Q"}
if n == 3; []string{"Q..", ".Q.", "..Q"}
if n == 4; []string{"Q...", ".Q..", "..Q.", "...Q"}
func buildOpts(n int) []string {
queen := "Q"
delim := "."
var opts []string
str := strings.Repeat(delim, n)
opts = append(opts, queen + str[:len(str)-1])
for i := 1; i < n-1; i++ {
opt := str[:i] + queen + str[i+1:]
opts = append(opts, opt)
}
lastOpt := strings.Repeat(delim, n-1) + queen
opts = append(opts, lastOpt)
return opts
}
*/ | n-queens/n-queens.go | 0.626353 | 0.488771 | n-queens.go | starcoder |
package asciitable
import (
"bytes"
"fmt"
"os"
"strings"
"text/tabwriter"
"golang.org/x/term"
)
// Column represents a column in the table.
type Column struct {
Title string
MaxCellLength int
FootnoteLabel string
width int
}
// Table holds tabular values in a rows and columns format.
type Table struct {
columns []Column
rows [][]string
footnotes map[string]string
}
// MakeHeadlessTable creates a new instance of the table without any column names.
// The number of columns is required.
func MakeHeadlessTable(columnCount int) Table {
return Table{
columns: make([]Column, columnCount),
rows: make([][]string, 0),
footnotes: make(map[string]string),
}
}
// MakeTable creates a new instance of the table with given column
// names. Optionally rows to be added to the table may be included.
func MakeTable(headers []string, rows ...[]string) Table {
t := MakeHeadlessTable(len(headers))
for i := range t.columns {
t.columns[i].Title = headers[i]
t.columns[i].width = len(headers[i])
}
for _, row := range rows {
t.AddRow(row)
}
return t
}
// MakeTableWithTruncatedColumn creates a table where the column
// matching truncatedColumn will be shortened to account for terminal
// width.
func MakeTableWithTruncatedColumn(columnOrder []string, rows [][]string, truncatedColumn string) Table {
width, _, err := term.GetSize(int(os.Stdin.Fd()))
if err != nil {
width = 80
}
truncatedColMinSize := 16
maxColWidth := (width - truncatedColMinSize) / (len(columnOrder) - 1)
t := MakeTable([]string{})
totalLen := 0
columns := []Column{}
for collIndex, colName := range columnOrder {
column := Column{
Title: colName,
MaxCellLength: len(colName),
}
if colName == truncatedColumn { // truncated column is handled separately in next loop
columns = append(columns, column)
continue
}
for _, row := range rows {
cellLen := row[collIndex]
if len(cellLen) > column.MaxCellLength {
column.MaxCellLength = len(cellLen)
}
}
if column.MaxCellLength > maxColWidth {
column.MaxCellLength = maxColWidth
totalLen += column.MaxCellLength + 4 // "...<space>"
} else {
totalLen += column.MaxCellLength + 1 // +1 for column separator
}
columns = append(columns, column)
}
for _, column := range columns {
if column.Title == truncatedColumn {
column.MaxCellLength = width - totalLen - len("... ")
}
t.AddColumn(column)
}
for _, row := range rows {
t.AddRow(row)
}
return t
}
// AddColumn adds a column to the table's structure.
func (t *Table) AddColumn(c Column) {
c.width = len(c.Title)
t.columns = append(t.columns, c)
}
// AddRow adds a row of cells to the table.
func (t *Table) AddRow(row []string) {
limit := min(len(row), len(t.columns))
for i := 0; i < limit; i++ {
cell, _ := t.truncateCell(i, row[i])
t.columns[i].width = max(len(cell), t.columns[i].width)
}
t.rows = append(t.rows, row[:limit])
}
// AddFootnote adds a footnote for referencing from truncated cells.
func (t *Table) AddFootnote(label string, note string) {
t.footnotes[label] = note
}
// truncateCell truncates cell contents to shorter than the column's
// MaxCellLength, and adds the footnote symbol if specified.
func (t *Table) truncateCell(colIndex int, cell string) (string, bool) {
maxCellLength := t.columns[colIndex].MaxCellLength
if maxCellLength == 0 || len(cell) <= maxCellLength {
return cell, false
}
truncatedCell := fmt.Sprintf("%v...", cell[:maxCellLength])
footnoteLabel := t.columns[colIndex].FootnoteLabel
if footnoteLabel == "" {
return truncatedCell, false
}
return fmt.Sprintf("%v %v", truncatedCell, footnoteLabel), true
}
// AsBuffer returns a *bytes.Buffer with the printed output of the table.
func (t *Table) AsBuffer() *bytes.Buffer {
var buffer bytes.Buffer
writer := tabwriter.NewWriter(&buffer, 5, 0, 1, ' ', 0)
template := strings.Repeat("%v\t", len(t.columns))
// Header and separator.
if !t.IsHeadless() {
var colh []interface{}
var cols []interface{}
for _, col := range t.columns {
colh = append(colh, col.Title)
cols = append(cols, strings.Repeat("-", col.width))
}
fmt.Fprintf(writer, template+"\n", colh...)
fmt.Fprintf(writer, template+"\n", cols...)
}
// Body.
footnoteLabels := make(map[string]struct{})
for _, row := range t.rows {
var rowi []interface{}
for i := range row {
cell, addFootnote := t.truncateCell(i, row[i])
if addFootnote {
footnoteLabels[t.columns[i].FootnoteLabel] = struct{}{}
}
rowi = append(rowi, cell)
}
fmt.Fprintf(writer, template+"\n", rowi...)
}
// Footnotes.
for label := range footnoteLabels {
fmt.Fprintln(writer)
fmt.Fprintln(writer, label, t.footnotes[label])
}
writer.Flush()
return &buffer
}
// IsHeadless returns true if none of the table title cells contains any text.
func (t *Table) IsHeadless() bool {
for i := range t.columns {
if len(t.columns[i].Title) > 0 {
return false
}
}
return true
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
} | lib/asciitable/table.go | 0.640074 | 0.461077 | table.go | starcoder |
package assertion
import (
"fmt"
"reflect"
)
const (
cmpOpEqual = iota
cmpOpNotEqual
cmpOpGreater
cmpOpLowerEqual
cmpOpLower
cmpOpGreaterEqual
)
var errMsgByOp = map[int]string{
cmpOpEqual: errMsgNotEqual,
cmpOpNotEqual: errMsgNotDifferent,
cmpOpGreater: errMsgNotGreater,
cmpOpLowerEqual: errMsgNotLowerEqual,
cmpOpLower: errMsgNotLower,
cmpOpGreaterEqual: errMsgNotGreaterEqual,
}
// compare returns true if a given value and other operand satisfy the compare
// operation determined by the operator. If the operation is not satisfied, this
// function also returns an error (only comparable types allowed)
func compare(op int, value, other interface{}, msgArgs ...interface{}) (bool, error) {
rv, ro := reflect.ValueOf(value), reflect.ValueOf(other)
if rv.Kind() != ro.Kind() {
return false, buildError(fmt.Sprintf(errMsgNotSameType, value, other), msgArgs...)
}
switch op {
case cmpOpNotEqual, cmpOpGreaterEqual, cmpOpLowerEqual:
ok, err := compare(op-1, value, other, msgArgs...)
if ok {
err = buildError(fmt.Sprintf(errMsgByOp[op], value, other), msgArgs...)
}
return !ok, err
}
switch value.(type) {
case bool:
v, o := rv.Bool(), ro.Bool()
if op == cmpOpEqual && v == o {
return true, nil
}
case int, int8, int16, int32, int64:
v, o := rv.Int(), ro.Int()
if (op == cmpOpEqual && v == o) || (op == cmpOpGreater && v > o) || (op == cmpOpLower && v < o) {
return true, nil
}
case uint, uint8, uint16, uint32, uint64:
v, o := rv.Uint(), ro.Uint()
if (op == cmpOpEqual && v == o) || (op == cmpOpGreater && v > o) || (op == cmpOpLower && v < o) {
return true, nil
}
case float32, float64:
v, o := rv.Float(), ro.Float()
if (op == cmpOpEqual && v == o) || (op == cmpOpGreater && v > o) || (op == cmpOpLower && v < o) {
return true, nil
}
case string:
v, o := rv.String(), ro.String()
if (op == cmpOpEqual && v == o) || (op == cmpOpGreater && v > o) || (op == cmpOpLower && v < o) {
return true, nil
}
}
return false, buildError(fmt.Sprintf(errMsgByOp[op], value, other), msgArgs...)
}
// validateArgsLength panics if args length is lower than minLength
func validateArgsLength(minLength int, args ...interface{}) {
if len(args) < minLength {
panic(buildError(errMsgMissingArgs))
}
}
// Nil returns true if a given bool value is equal to other bool value
func (a *Assertion) Nil(args ...interface{}) bool {
validateArgsLength(1, args...)
if args[0] == nil {
return true
}
v := reflect.ValueOf(args[0])
switch v.Kind() {
case reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice:
if v.IsNil() {
return true
}
}
a.addError(buildError(fmt.Sprintf(errMsgNot, args[0], nil), args[1:]...))
return false
}
// Equal returns true if a given value is equal to other value
func (a *Assertion) Equal(args ...interface{}) bool {
validateArgsLength(2, args...)
ok, err := compare(cmpOpEqual, args[0], args[1], args[2:]...)
if !ok {
a.addError(err)
}
return ok
}
// True returns true if a given bool value is true
func (a *Assertion) True(value bool, msgArgs ...interface{}) bool {
ok, err := compare(cmpOpEqual, value, true, msgArgs...)
if !ok {
a.addError(err)
}
return ok
}
// False returns true if a given bool value is false
func (a *Assertion) False(value bool, msgArgs ...interface{}) bool {
ok, err := compare(cmpOpEqual, value, false, msgArgs...)
if !ok {
a.addError(err)
}
return ok
}
// GreaterThan returns true if a given int64 value is greater than other int64 value
func (a *Assertion) GreaterThan(args ...interface{}) bool {
validateArgsLength(2, args...)
ok, err := compare(cmpOpGreater, args[0], args[1], args[2:]...)
if !ok {
a.addError(err)
}
return ok
}
// GreaterThanOrEqual returns true if a given value is greater than or equal to other value
func (a *Assertion) GreaterThanOrEqual(args ...interface{}) bool {
validateArgsLength(2, args...)
ok, err := compare(cmpOpGreaterEqual, args[0], args[1], args[2:]...)
if !ok {
a.addError(err)
}
return ok
}
// LowerThan returns true if a given value is lower than other value
func (a *Assertion) LowerThan(args ...interface{}) bool {
validateArgsLength(2, args...)
ok, err := compare(cmpOpLower, args[0], args[1], args[2:]...)
if !ok {
a.addError(err)
}
return ok
}
// LowerThanOrEqual returns true if a given value is lower than or equal to other value
func (a *Assertion) LowerThanOrEqual(args ...interface{}) bool {
validateArgsLength(2, args...)
ok, err := compare(cmpOpLowerEqual, args[0], args[1], args[2:]...)
if !ok {
a.addError(err)
}
return ok
}
// Between returns true if a given value is between a lower and upper
// limit values (including both)
func (a *Assertion) Between(args ...interface{}) bool {
validateArgsLength(3, args...)
for op, v := range map[int]interface{}{cmpOpGreaterEqual: args[1], cmpOpLowerEqual: args[2]} {
ok, _ := compare(op, args[0], v, args[3:]...)
if !ok {
a.addError(buildError(fmt.Sprintf(errMsgNotBetween, args[0], args[1], args[2]), args[3:]...))
return false
}
}
return true
}
// BetweenExclude returns true if a given value is between a lower and upper
// limit values (excluding both)
func (a *Assertion) BetweenExclude(args ...interface{}) bool {
validateArgsLength(3, args...)
for op, v := range map[int]interface{}{cmpOpGreater: args[1], cmpOpLower: args[2]} {
ok, _ := compare(op, args[0], v, args[3:]...)
if !ok {
a.addError(buildError(fmt.Sprintf(errMsgNotBetweenExclude, args[0], args[1], args[2]), args[3:]...))
return false
}
}
return true
} | compare.go | 0.710025 | 0.401013 | compare.go | starcoder |
package game
// State is a current game state as percieved by the current turn.
type State struct {
// Turn is a current turn number.
// Note that turn number starts with one, not zero.
Turn int
// Round is a number of the current encounter.
// Note that round number starts with one, not zero.
Round int
// RoundTurn is a round-local turn number.
// If it's 1, then it's a first turn in the round.
// Note that round turn number starts with one, not zero.
RoundTurn int
// Score is your current game score.
Score int
// Avatar contains information about your hero status.
Avatar Avatar
// Creep is an information about your current opponent.
Creep Creep
// NextCreep is a type of the next creep.
// Next creep is encountered after the current creep is defeated.
// If there is no next creep, a special type CreepNone indicates that.
NextCreep CreepType
// Deck is your cards collection.
// It's keyed by a card type, like CardAttack.
Deck map[CardType]Card
}
// Can reports whether it's legal to do a cardType move.
func (st *State) Can(cardType CardType) bool {
if st.Deck[cardType].Count == 0 {
return false // Card is unavailable
}
if st.Avatar.MP < st.Deck[cardType].MP {
return false // Not enougn mana
}
return true
}
// Creep is a particular creep information.
type Creep struct {
Type CreepType
HP int
// Stun is a number of turns this creep is going to skip.
// You probably want to use Creep.IsStunned() instead of this.
Stun int
CreepStats
}
// IsFull reports whether creep health is full.
func (c *Creep) IsFull() bool { return c.HP == c.MaxHP }
// IsStunned reports whether creep is currently stunned.
func (c *Creep) IsStunned() bool { return c.Stun > 0 }
// CreepStats is a set of creep statistics.
type CreepStats struct {
MaxHP int
Damage IntRange
ScoreReward int
CardsReward int
Traits CreepTraitList
}
// Avatar is a hero status information.
type Avatar struct {
HP int
MP int
AvatarStats
}
// AvatarStats is a set of avatar statistics.
type AvatarStats struct {
MaxHP int
MaxMP int
}
// Card is a hero deck card information.
type Card struct {
// Type is a card type, like "CardAttack" or "CardMagicArrow".
Type CardType
// Count tells how many such cards you have.
// -1 means "unlimited".
Count int
CardStats
}
// CardStats is a set of card statistics.
type CardStats struct {
// MP is a card mana cost per usage.
MP int
// IsMagic tells whether this card effect is considered to be magical.
IsMagic bool
// Effect is a description-like string that explains the Power field meaning.
Effect string
// Power is a spell effectiveness.
// For offensive spells, it's the damage they deal.
// For other spells it can mean different things (see Effect field).
Power IntRange
// IsOffensive tells whether this card targets enemy.
// If it's not, it either targets you or has some special effect like "Retreat".
IsOffensive bool
}
// IntRange is an inclusive integer range from Low() to High().
type IntRange [2]int
func (rng IntRange) Low() int { return rng[0] }
func (rng IntRange) High() int { return rng[1] }
func (rng IntRange) IsZero() bool { return rng.Low() == 0 && rng.High() == 0 }
// CardType is an enum-like type for cards.
type CardType int
// All card types.
//go:generate stringer -type=CardType -trimprefix=Card
const (
// Infinite cards.
CardAttack CardType = iota
CardMagicArrow
CardRetreat
CardRest
// Cards that need to be obtained during the gameplay.
CardPowerAttack
CardFirebolt
CardStun
CardHeal
CardParry
)
// CreepType is an enum-like type for creeps.
type CreepType int
// All creep types.
//go:generate stringer -type=CreepType -trimprefix=Creep
const (
CreepNone CreepType = iota
CreepCheepy
CreepImp
CreepLion
CreepFairy
CreepMummy
CreepDragon
)
// CreepTraitList is convenience wrapper over a slice of creep traits.
type CreepTraitList []CreepTrait
// Has reports whether a creep trait list contains the specified trait.
func (list CreepTraitList) Has(x CreepTrait) bool {
for _, trait := range list {
if trait == x {
return true
}
}
return false
}
// CreepTrait is an enum-like type for creep special traits.
type CreepTrait int
// All creep traits.
//go:generate stringer -type=CreepTrait -trimprefix=Trait
const (
TraitCoward CreepTrait = iota
TraitMagicImmunity
TraitWeakToFire
TraitSlow
TraitRanged
) | game/game.go | 0.707607 | 0.404919 | game.go | starcoder |
package workflow
import (
"fmt"
"reflect"
"sync"
"time"
)
type OpType int
const (
CALL OpType = iota
AND
APPLY
COMBINE
)
type step struct {
targetFunc interface{}
paramsPassed []interface{}
funcReturned []interface{}
voidReturn bool
op OpType
future *Future
}
type Flow struct {
steps []*step
future *Future
runOnce sync.Once
es *ExecutorService
}
func (fl *Flow) SetExecutor(customExecutor *ExecutorService) *Flow {
fl.es = customExecutor
return fl
}
// This method creates a new Flow and adds a step that represents the async execution of the target function.
// Execute() should be called on the returned flow to trigger the execution of the target function.
func NewFlow(targetFunc interface{}, args ...interface{}) *Flow {
targetType := reflect.TypeOf(targetFunc)
switch targetType.Kind() {
case reflect.Func:
flow := &Flow{}
step0 := &step{}
step0.targetFunc = targetFunc
step0.paramsPassed = args
step0.voidReturn = targetType.NumOut() == 0
step0.op = CALL
flow.steps = append(flow.steps, step0)
flow.es = default_es
return flow
default:
fmt.Errorf("%s un-supported type", targetType.Kind())
return nil
}
}
// This method creates a new step in the Flow that represents the async execution of the target function.
// Step created using this method represents a target function is independent and can be triggered independently.
func (fl *Flow) AndCall(targetFunc interface{}, args ...interface{}) *Flow {
targetType := reflect.TypeOf(targetFunc)
switch targetType.Kind() {
case reflect.Func:
nxtStp := &step{}
nxtStp.targetFunc = targetFunc
nxtStp.paramsPassed = args
nxtStp.voidReturn = targetType.NumOut() == 0
nxtStp.op = AND
fl.steps = append(fl.steps, nxtStp)
return fl
default:
fmt.Errorf("%s un-supported type", targetType.Kind())
return nil
}
}
// This method creates a new step in the Flow that represents the async execution of the target function.
// Step created using this method represents a target function that combines the results of the previous steps.
// The argument of this target function should match the return types of the previous steps.
func (fl *Flow) ThenCombine(targetFunc interface{}) *Flow {
targetType := reflect.TypeOf(targetFunc)
switch targetType.Kind() {
case reflect.Func:
nxtStp := &step{}
nxtStp.targetFunc = targetFunc
nxtStp.voidReturn = targetType.NumOut() == 0
nxtStp.op = COMBINE
fl.steps = append(fl.steps, nxtStp)
return fl
default:
fmt.Errorf("%s un-supported type", targetType.Kind())
return nil
}
}
// This method creates a new step in the Flow that represents the async execution of the target function.
// Step created using this method represents a target function, this step runs after the execution of the previous step.
// The argument of this target function should match the return types of the previous step.
func (fl *Flow) ThenApply(targetFunc interface{}) *Flow {
targetType := reflect.TypeOf(targetFunc)
switch targetType.Kind() {
case reflect.Func:
nxtStp := &step{}
nxtStp.targetFunc = targetFunc
nxtStp.voidReturn = targetType.NumOut() == 0
nxtStp.op = APPLY
fl.steps = append(fl.steps, nxtStp)
return fl
default:
fmt.Errorf("%s un-supported type", targetType.Kind())
return nil
}
}
func (fl *Flow) runFlow() ([]interface{}, error) {
for i := range fl.steps {
currentStp := fl.steps[i]
switch currentStp.op {
case CALL, AND:
if callFtr, err := RunAsync(currentStp.targetFunc, currentStp.paramsPassed...); err != nil {
fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i].targetFunc, fl.steps[i].op, err.Error())
return nil, fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i].targetFunc, fl.steps[i].op, err.Error())
} else {
currentStp.future = callFtr
callFtr.SetExecutor(fl.es).Execute()
}
case APPLY:
if stepOutput, err := fl.steps[i-1].future.Get(0); err != nil {
fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i-1].targetFunc, fl.steps[i-1].op, err.Error())
return nil, fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i-1].targetFunc, fl.steps[i-1].op, err.Error())
} else {
if applyFtr, err := RunAsync(currentStp.targetFunc, stepOutput...); err != nil {
fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i].targetFunc, fl.steps[i].op, err.Error())
return nil, fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i].targetFunc, fl.steps[i].op, err.Error())
} else {
currentStp.future = applyFtr
applyFtr.SetExecutor(fl.es).Execute()
}
}
case COMBINE:
var allResponses []interface{}
start := 0
for p := 0; p < i; p++ {
if fl.steps[p].op == APPLY || fl.steps[p].op == COMBINE {
start = p
}
}
for p := start; p < i; p++ {
if pCallFtr, err := fl.steps[p].future.Get(0); err != nil {
fmt.Errorf("%+v step (%d) failed with %s", fl.steps[p].targetFunc, fl.steps[p].op, err.Error())
return nil, fmt.Errorf("%+v step (%d) failed with %s", fl.steps[p].targetFunc, fl.steps[p].op, err.Error())
} else {
allResponses = append(allResponses, pCallFtr...)
}
}
if combinedFtr, err := RunAsync(currentStp.targetFunc, allResponses...); err != nil {
fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i].targetFunc, fl.steps[i].op, err.Error())
return nil, fmt.Errorf("%+v step (%d) failed with %s", fl.steps[i].targetFunc, fl.steps[i].op, err.Error())
} else {
currentStp.future = combinedFtr
combinedFtr.SetExecutor(fl.es).Execute()
}
}
}
var (
flowReturn []interface{}
flowError error = fmt.Errorf("flow aborted/timedout")
)
if fl.steps[len(fl.steps)-1].future != nil { //this is to check for aborts/timeout
flowReturn, flowError = fl.steps[len(fl.steps)-1].future.Get(0)
}
fmt.Println("completed flow")
return flowReturn, flowError
}
// This has to be called to trigger the execution of steps/pipeline represented by this flow.
// The target function(s) will not be executed unless this method is invoked.
// Each target function in the flow is executed by a separate GOROUTINE either parallelly or one after another
func (fl *Flow) Execute() *Flow {
fl.runOnce.Do(func() {
if flowFtr, err := RunAsync(fl.runFlow); err != nil {
fmt.Errorf("error (%s) while creating future", err.Error())
} else {
fl.future = flowFtr
flowFtr.SetExecutor(fl.es).Execute()
}
})
return fl
}
// Call to Get() blocks until the target function(s) invocation is completed.
// Return from this method indicates successful execution of target function(s) or time-out or user aborted/cancelled this flow.
// error is returned in case of timeouts or aborted.
// This method always returns results of the last target function of the flow
func (fl *Flow) Get(timeout time.Duration) ([]interface{}, error) {
if fl.future == nil {
return nil, fmt.Errorf("future not created for the flow")
}
if timeout > 0 {
time.AfterFunc(timeout, func() {
for _, s := range fl.steps {
if s.future != nil && s.future.Stage() != COMPLETED {
fmt.Errorf("Timeout (%d) got triggered hence cancelling target (%+v)", timeout, reflect.TypeOf(s.targetFunc))
s.future.Cancel()
}
}
})
}
if get, e := fl.future.Get(timeout); e != nil {
return nil, e
} else {
if get[1] != nil {
return nil, get[1].(error)
}
return get[0].([]interface{}), nil
}
}
// Cancel's the referred flow, sends a signal to abort the call of all the referred target function(s).
// This method returns immediately.
// Call to Cancel() does not mean the target function(s) is aborted if it is running already.
// Target function(s) will not be called if it is not triggered yet when Cancel() is called.
// Any call to *Flow.Get() after Cancel() is invoked will return an error indicating aborted
func (fl *Flow) Cancel() {
if fl.future == nil {
return
}
fl.future.Cancel()
for _, s := range fl.steps {
if s.future != nil {
s.future.Cancel()
}
}
} | flow_services.go | 0.622345 | 0.460471 | flow_services.go | starcoder |
package utils
import (
"net"
"regexp"
"strings"
)
// ResolvesToIp resolves a given DNS name and checks whether the result matches the expected IP address.
func ResolvesToIp(hostname string, expectedIp string) bool {
// Return false if expected IP is invalid
if len(expectedIp) == 0 || net.ParseIP(expectedIp) == nil {
return false
}
// Return false if given hostname is not valid
if len(hostname) == 0 {
return false
}
// Return false if hostname lookup failed
ips, err := net.LookupIP(hostname)
if err != nil {
return false
}
// Return true if hostname lookup returned single IP which matches expected IP
if len(ips) == 1 && ips[0].String() == expectedIp {
return true
}
// Return false if lookup returned zero or more than one IPs
// If zero: Hostname obviously not pointing to expected IP
// If >1: Hostname not clearly pointing to expected IP
return false
}
// ResolvesToHostname checks whether a given IP reverse resolves to the expected hostname
func ResolvesToHostname(ip string, hostname string) bool {
// Return false if IP is invalid
if len(ip) == 0 || net.ParseIP(ip) == nil {
return false
}
// Return false if given hostname is not valid
if len(hostname) == 0 {
return false
}
// Return false if reverse lookup failed
resolvedHostnames, err := net.LookupAddr(ip)
if err != nil {
return false
}
// Return true if one of the resolved hostnames matches the given one
for _, resolvedHostname := range resolvedHostnames {
resolvedHostname = strings.TrimRight(resolvedHostname, ".")
if resolvedHostname == hostname {
return true
}
}
// Return false if reverse lookup results do not contain hostname
return false
}
// IsValidHostname determines whether a given hostname is a plausible one
func IsValidHostname(hostname string) bool {
// convert to lower case, as cases don't have semantic in domains
hostname = strings.ToLower(hostname)
// Return false on empty strings
if len(hostname) == 0 {
return false
}
// Return false if invalid start character
firstCharRegex := regexp.MustCompile(`^[[:alnum:]]`)
if !firstCharRegex.MatchString(hostname) {
return false
}
// Return false if invalid end
lastCharRegex := regexp.MustCompile(`[[:alnum:]]$`)
if !lastCharRegex.MatchString(hostname) {
return false
}
// Return false if hostname does not match RFC1035
hostnameRegex := regexp.MustCompile(`^[[:alnum:]][[:alnum:]\-]{0,61}[[:alnum:]]?|[[:alpha:]]?$`)
if !hostnameRegex.MatchString(hostname) {
return false
}
// Return false if hostname is actually an IPv4/6 address
if net.ParseIP(hostname) != nil {
return false
}
// Return false on strings with invalid characters
for _, fChar := range []string{" ", "=", ":", "?", "!", "\\", "/", "\x00", "\\x00"} {
if strings.Contains(hostname, fChar) {
return false
}
}
// Return true as valid hostname
return true
}
// IsValidIp determines whether a given string is a valid IPv4/IPv6 address
func IsValidIp(s string) bool {
if net.ParseIP(s) != nil {
return true
}
return false
}
// IsValidIpV4 determines whether a given string is a valid IPv4 address
func IsValidIpV4(s string) bool {
if IsValidIp(s) && strings.Count(s, ":") < 2 {
return true
}
return false
}
// IsValidIpV6 determines whether a given string is a valid IPv6 address
func IsValidIpV6(s string) bool {
if IsValidIp(s) && strings.Count(s, ":") >= 2 {
return true
}
return false
}
// IsValidIpRange determines whether a given string is a valid network range
func IsValidIpRange(s string) bool {
_, _, err := net.ParseCIDR(s)
if err == nil {
return true
}
return false
}
// IsValidAddress determines whether a given string is a valid IPv4, IPv6 or hostname, but NOT a network range
func IsValidAddress(s string) bool {
if IsValidIp(s) {
return true
} else if IsValidHostname(s) {
return true
}
return false
} | utils/network.go | 0.829596 | 0.451327 | network.go | starcoder |
package evop
import "fmt"
type (
gene struct {
key string
weight int
}
// operator should return a genome and information if passed genome was modified.
operator func([]*gene) ([]*gene, bool)
// constraint is kind of a filter which let us eliminate incorrecty encoded trees.
constraint func([]*gene) bool
)
// isEmpty returns true if gene has no information.
// A gene is empty if:
// 1) is nil,
// 2) key is an empty string (no information) -
// it represents a "miss" node in a tree (e.g. we couldn't find a key with certain information).
// "Miss" nodes are also empty genes (leaves), but with weight > 0.
func (g *gene) isEmpty() bool {
return nil == g || "" == g.key
}
func (g *gene) equal(gg *gene) bool {
if g == nil && gg == nil {
return true
}
if g == nil || gg == nil {
return false
}
return *g == *gg
}
// String returns a gene in "printing friendly" format
func (g *gene) String() string {
if nil == g {
return "(nil)"
}
if "" == g.key {
return fmt.Sprintf("(%d)", g.weight)
}
return fmt.Sprintf("(%s/%d)", g.key, g.weight)
}
// clone returns a copy of given genome.
func clone(genome []*gene) []*gene {
if genome == nil {
return nil
}
g := make([]*gene, len(genome))
copy(g, genome)
return g
}
// feasible tests genome against all constraints.
// If the genome passes the constraints, it is feasible -
// the function returns true, otherwise false.
func feasible(genome []*gene, consts ...constraint) bool {
for _, c := range consts {
if !c(genome) {
return false
}
}
return true
}
// eval evaluates a given genome with fitness function: ∑wi(hi + 1), i in [0, n)
func eval(genome []*gene) int {
var fn func(int) int
// fitness function
fn = func(h int) int {
if len(genome) == 0 || genome[0] == nil {
return 0
}
if genome[0].isEmpty() {
// miss node - no information, but it has a weight.
return h * genome[0].weight
}
s := h * genome[0].weight
genome = genome[1:]
s += fn(h + 1)
genome = genome[1:]
s += fn(h + 1)
return s
}
// start from the root (height: 0 + 1)
return fn(1)
}
// (a, b, [c, d, e], f, g) -> (a, b, [e, d, c], f, g)
func inversion(genome []*gene) ([]*gene, bool) {
n := len(genome)
k1, k2 := randIntn(n), randIntn(n)
if k1 == k2 {
return genome, false
}
if k1 > k2 {
k1, k2 = k2, k1
}
return inversionAt(genome, k1, k2), true
}
func inversionAt(genome []*gene, k1, k2 int) []*gene {
for k1 < k2 {
genome[k1], genome[k2] = genome[k2], genome[k1]
k1++
k2--
}
return genome
}
// (a, [b], c, d, e, [f], g) -> (a, [f], c, d, e, [b], g)
func swap(genome []*gene) ([]*gene, bool) {
n := len(genome)
k1, k2 := randNormIntn(n), randNormIntn(n)
if k1 == k2 {
return genome, false
}
return swapAt(genome, k1, k2), true
}
func swapAt(genome []*gene, k1, k2 int) []*gene {
genome[k1], genome[k2] = genome[k2], genome[k1]
return genome
}
// ([a, b, c, d] [e, f], g) -> ([e, f] [a, b, c, d], g)
func crossover(genome []*gene) ([]*gene, bool) {
n := len(genome)
k := randIntn(n - 3)
return crossoverAt(genome, k), true
}
func crossoverAt(genome []*gene, k int) []*gene {
n := len(genome)
head, tail := clone(genome[0:k+1]), clone(genome[k+1:n-2])
copy(genome[0:len(tail)], tail)
copy(genome[len(tail):], head)
return genome
}
func splayLeft(genome []*gene) ([]*gene, bool) {
n := len(genome)
cnt0, cnt1 := 0, 0
for i := 1; i < n; i++ {
if genome[i].isEmpty() {
cnt0++
} else {
cnt1++
}
if cnt0 > cnt1 {
k := i + 1
if k < n && !genome[k].isEmpty() {
return splayLeftAt(genome, k), true
}
}
}
return genome, false
}
func splayLeftAt(genome []*gene, k int) []*gene {
g := genome[k]
copy(genome[1:k+1], genome[0:k])
genome[0] = g
return genome
}
func splayLeftSubTree(genome []*gene) ([]*gene, bool) {
n := len(genome)
return splayLeftSubTreeAt(genome, randIntn(n))
}
func splayLeftSubTreeAt(genome []*gene, root int) ([]*gene, bool) {
n := len(genome)
k1 := subTreeIndex(genome, root)
cnt0, cnt1 := 0, 0
for i := k1; i < n; i++ {
if genome[i].isEmpty() {
cnt0++
} else {
cnt1++
}
if cnt0 > cnt1 {
k2 := i
if k2 < n {
_, b := splayLeft(genome[k1 : k2+1])
return genome, b
}
}
}
return genome, false
}
func splayRight(genome []*gene) ([]*gene, bool) {
n := len(genome)
if n > 1 && genome[1] != nil {
k1 := 1
cnt0, cnt1 := 0, 0
for i := 2; i < n; i++ {
if genome[i].isEmpty() {
cnt0++
} else {
cnt1++
}
if cnt0 > cnt1 {
k2 := i
return splayRightAt(genome, k1, k2), true
}
}
}
return genome, false
}
func splayRightAt(genome []*gene, k1, k2 int) []*gene {
g := genome[0]
copy(genome[0:], genome[k1:k2+1])
genome[k2] = g
return genome
}
func splayRightSubTree(genome []*gene) ([]*gene, bool) {
n := len(genome)
if n > 1 && genome[1] != nil {
return splayRightSubTreeAt(genome, randIntn(n))
}
return genome, false
}
func splayRightSubTreeAt(genome []*gene, root int) ([]*gene, bool) {
n := len(genome)
k1 := subTreeIndex(genome, root)
cnt0, cnt1 := 0, 0
for i := k1; i < n; i++ {
if genome[i].isEmpty() {
cnt0++
} else {
cnt1++
}
if cnt0 > cnt1 {
k2 := i
if k2 < n {
_, b := splayRight(genome[k1 : k2+1])
return genome, b
}
}
}
return genome, false
}
func subTreeIndex(genome []*gene, root int) int {
n := 0
for k := root; k > 0; k-- {
if genome[k].isEmpty() {
n++
continue
}
n--
if n < 2 {
return k
}
}
return 0
}
// isBinTree is a constraint which checks if given genome is a correctly encoded binary tree
func isBinTree(genome []*gene) bool {
n := len(genome) - 1
cnt0, cnt1 := 0, 0
for i := 0; i < n; i++ {
if genome[i].isEmpty() {
cnt0++
} else {
cnt1++
}
if cnt0 > cnt1 {
return false
}
}
return genome[n].isEmpty()
}
// isBST is a constraint which checks if given genome is a correctly encoded binary search tree.
// The function assumes that genome is correctly encoded binary tree,
// and only checks BST constraints (left < root < right).
func isBST(genome []*gene) bool {
var (
keys []string
inorder func() error
)
inorder = func() error {
if len(genome) == 0 || genome[0].isEmpty() {
return nil
}
root := genome[0].key
genome = genome[1:]
if err := inorder(); err != nil {
return err
}
keys = append(keys, root)
if n := len(keys); n > 1 {
if keys[n-2] > keys[n-1] {
return fmt.Errorf("isBST: %s > %s", keys[n-2], keys[n-1])
}
keys = keys[1:]
}
genome = genome[1:]
if err := inorder(); err != nil {
return err
}
return nil
}
err := inorder()
return err == nil
}
func equal(g1 []*gene, g2 []*gene) bool {
if len(g1) != len(g2) {
return false
}
for i, g := range g1 {
if !g.equal(g2[i]) {
return false
}
}
return true
} | evop.go | 0.766992 | 0.475118 | evop.go | starcoder |
package godag
// DAG represents a directed acyclic graph.
type DAG struct {
nodes map[string]*Node
weights map[string]int
}
// New returns a new DAG instance.
func New() *DAG {
return &DAG{nodes: make(map[string]*Node)}
}
// Insert creates a new node with the specified label and list of dependency labels.
func (d *DAG) Insert(label string, dependencies []string) {
d.nodes[label] = &Node{Label: label, Dependencies: dependencies}
}
// Order returns an ordered slice of strings for the DAG.
func (d *DAG) Order() ([]string, error) {
err := d.allWeights()
if err != nil {
return nil, err
}
ows := make(map[int][]*Node)
for label, weight := range d.weights {
ows[weight] = append(ows[weight], d.nodes[label])
}
ordered := make([]string, 0, len(ows))
i := 1
for {
list, ok := ows[i]
if !ok {
break
}
for _, item := range list {
ordered = append(ordered, item.Label)
}
i++
}
return ordered, nil
}
func (d *DAG) allWeights() error {
d.weights = make(map[string]int)
for label, node := range d.nodes {
if _, ok := d.weights[label]; !ok {
w, err := d.findWeight(node)
if err != nil {
return err
}
d.weights[label] = w
}
}
return nil
}
func (d *DAG) findWeight(node *Node) (int, error) {
var err error
var max int
d.weights[node.Label] = -1
for _, dep := range node.Dependencies {
w, ok := d.weights[dep]
if !ok {
n, ok := d.nodes[dep]
if !ok {
return 0, ErrMissingNode(dep)
}
w, err = d.findWeight(n)
if err != nil {
return 0, err
}
d.weights[dep] = w
} else {
if w == -1 {
return 0, ErrCyclicLoop(dep)
}
}
if w > max {
max = w
}
}
return 1 + max, nil
}
type Node struct {
Label string
Dependencies []string
}
func (n Node) String() string {
return n.Label
}
type ErrMissingNode string
func (e ErrMissingNode) Error() string {
return "cannot find node: " + string(e)
}
type ErrCyclicLoop string
func (e ErrCyclicLoop) Error() string {
return "cyclic loop detected: " + string(e)
} | godag.go | 0.808067 | 0.511046 | godag.go | starcoder |
package forGraphBLASGo
import (
"github.com/intel/forGoParallel/pipeline"
"runtime"
"sync/atomic"
)
type transposedMatrix[T any] struct {
nrows, ncols int
base *matrixReference[T]
}
func newTransposedMatrix[T any](ref *matrixReference[T]) *matrixReference[T] {
if baseReferent, ok := ref.get().(transposedMatrix[T]); ok {
return baseReferent.base
}
ncols, nrows := ref.size()
n := atomic.LoadInt64(&ref.nvalues)
return newMatrixReference[T](transposedMatrix[T]{
nrows: nrows,
ncols: ncols,
base: ref,
}, n)
}
func newTransposedMatrixRaw[T any](nrows, ncols int, base *matrixReference[T]) transposedMatrix[T] {
return transposedMatrix[T]{nrows: nrows, ncols: ncols, base: base}
}
func (m transposedMatrix[T]) resize(_ *matrixReference[T], newNRows, newNCols int) *matrixReference[T] {
return newTransposedMatrix[T](m.base.resize(newNCols, newNRows))
}
func (m transposedMatrix[T]) size() (nrows, ncols int) {
return m.nrows, m.ncols
}
func (m transposedMatrix[T]) nvals() int {
return m.base.nvals()
}
func (m transposedMatrix[T]) setElement(_ *matrixReference[T], value T, row, col int) *matrixReference[T] {
return newMatrixReference[T](newTransposedMatrixRaw[T](m.nrows, m.ncols, m.base.setElement(value, col, row)), -1)
}
func (m transposedMatrix[T]) removeElement(_ *matrixReference[T], row, col int) *matrixReference[T] {
return newMatrixReference[T](newTransposedMatrixRaw[T](m.nrows, m.ncols, m.base.removeElement(col, row)), -1)
}
func (m transposedMatrix[T]) extractElement(row, col int) (T, bool) {
return m.base.extractElement(col, row)
}
func transposeMatrixPipeline[T any](base *matrixReference[T]) *pipeline.Pipeline[any] {
colPipelines := base.getColPipelines()
ch := make(chan any, runtime.GOMAXPROCS(0))
var np pipeline.Pipeline[any]
np.Source(pipeline.NewChan(ch))
np.Notify(func() {
for pi, p := range colPipelines {
p.p.Add(
pipeline.Par(pipeline.Receive(func(_ int, data any) any {
slice := data.(vectorSlice[T])
ncow := slice.cow & cowv
if slice.cow&cow0 != 0 {
ncow |= cow1
}
result := matrixSlice[T]{
cow: ncow,
rows: make([]int, len(slice.values)),
cols: slice.indices,
values: slice.values,
}
for i := range result.rows {
result.rows[i] = p.index
}
return result
})),
pipeline.Ord(pipeline.Receive(func(_ int, data any) any {
select {
case <-p.p.Context().Done():
case <-np.Context().Done():
case ch <- data:
}
return nil
})),
)
p.p.Run()
if err := p.p.Err(); err != nil {
panic(err)
}
colPipelines[pi].p = nil
}
close(ch)
})
return &np
}
func (m transposedMatrix[T]) getPipeline() *pipeline.Pipeline[any] {
return transposeMatrixPipeline(m.base)
}
func (m transposedMatrix[T]) getRowPipeline(row int) *pipeline.Pipeline[any] {
return m.base.getColPipeline(row)
}
func (m transposedMatrix[T]) getColPipeline(col int) *pipeline.Pipeline[any] {
return m.base.getRowPipeline(col)
}
func (m transposedMatrix[T]) getRowPipelines() []matrix1Pipeline {
return m.base.getColPipelines()
}
func (m transposedMatrix[T]) getColPipelines() []matrix1Pipeline {
return m.base.getRowPipelines()
}
func (m transposedMatrix[T]) optimized() bool {
return m.base.optimized()
}
func (m transposedMatrix[T]) optimize() functionalMatrix[T] {
m.base.optimize()
return m
} | functional_MatrixTransposed.go | 0.587943 | 0.402392 | functional_MatrixTransposed.go | starcoder |
package indns
import (
"net"
)
type RecordType uint16
// Types of DNS records. The values must match the standard ones.
const (
TypeA RecordType = 1
TypeNS = 2
TypeTXT = 16
TypeAAAA = 28
TypeANY = 255 // Only for matching against actual resource types.
)
type Record interface {
DeepCopy() Record
IsZero() bool
Type() RecordType
}
type RecordA IPRecord
type RecordNS StringRecord
type RecordTXT StringsRecord
type RecordAAAA IPRecord
func (r RecordA) DeepCopy() Record { return RecordA((*IPRecord)(&r).DeepCopy()) }
func (r RecordNS) DeepCopy() Record { return RecordNS((*StringRecord)(&r).DeepCopy()) }
func (r RecordTXT) DeepCopy() Record { return RecordTXT((*StringsRecord)(&r).DeepCopy()) }
func (r RecordAAAA) DeepCopy() Record { return RecordAAAA((*IPRecord)(&r).DeepCopy()) }
func (r RecordA) IsZero() bool { return len(r.Value) == 0 }
func (r RecordNS) IsZero() bool { return r.Value == "" }
func (r RecordTXT) IsZero() bool { return len(r.Values) == 0 }
func (r RecordAAAA) IsZero() bool { return len(r.Value) == 0 }
func (RecordA) Type() RecordType { return TypeA }
func (RecordNS) Type() RecordType { return TypeNS }
func (RecordTXT) Type() RecordType { return TypeTXT }
func (RecordAAAA) Type() RecordType { return TypeAAAA }
// Records contains Record*-type items (values, not pointers).
type Records []Record
func (rs Records) Addressable() bool {
for _, r := range rs {
switch r.Type() {
case TypeA, TypeAAAA:
return true
}
}
return false
}
func (rs Records) DeepCopy() Records {
clone := make(Records, len(rs))
for i, r := range rs {
clone[i] = r.DeepCopy()
}
return clone
}
type IPRecord struct {
Value net.IP
TTL uint32
}
func (r *IPRecord) DeepCopy() IPRecord {
return IPRecord{
Value: append(net.IP(nil), r.Value...),
TTL: r.TTL,
}
}
type StringRecord struct {
Value string
TTL uint32
}
func (r *StringRecord) DeepCopy() StringRecord {
return *r
}
type StringsRecord struct {
Values []string
TTL uint32
}
func (r *StringsRecord) DeepCopy() StringsRecord {
return StringsRecord{
Values: append([]string(nil), r.Values...),
TTL: r.TTL,
}
} | record.go | 0.607314 | 0.486392 | record.go | starcoder |
package header
/**
* The To header field first and foremost specifies the desired "logical"
* recipient of the request, or the address-of-record of the user or resource
* that is the target of this request. This may or may not be the ultimate
* recipient of the request. Requests and Responses must contain a ToHeader,
* indicating the desired recipient of the Request. The UAS or redirect server
* copies the ToHeader into its Response.
* <p>
* The To header field MAY contain a SIP or SIPS URI, but it may also make use
* of other URI schemes i.e the telURL, when appropriate. All SIP
* implementations MUST support the SIP URI scheme. Any implementation that
* supports TLS MUST support the SIPS URI scheme. Like the From header field,
* it contains a URI and optionally a display name, encapsulated in a
* {@link javax.sip.address.Address}.
* <p>
* A UAC may learn how to populate the To header field for a particular request
* in a number of ways. Usually the user will suggest the To header field
* through a human interface, perhaps inputting the URI manually or selecting
* it from some sort of address book. Using the string to form the user part
* of a SIP URI implies that the User Agent wishes the name to be resolved in the
* domain to the right-hand side (RHS) of the at-sign in the SIP URI. Using
* the string to form the user part of a SIPS URI implies that the User Agent wishes to
* communicate securely, and that the name is to be resolved in the domain to
* the RHS of the at-sign. The RHS will frequently be the home domain of the
* requestor, which allows for the home domain to process the outgoing request.
* This is useful for features like "speed dial" that require interpretation of
* the user part in the home domain.
* <p>
* The telURL may be used when the User Agent does not wish to specify the domain that
* should interpret a telephone number that has been input by the user. Rather,
* each domain through which the request passes would be given that opportunity.
* As an example, a user in an airport might log in and send requests through
* an outbound proxy in the airport. If they enter "411" (this is the phone
* number for local directory assistance in the United States), that needs to
* be interpreted and processed by the outbound proxy in the airport, not the
* user's home domain. In this case, tel:411 would be the right choice.
* <p>
* Two To header fields are equivalent if their URIs match, and their
* parameters match. Extension parameters in one header field, not present in
* the other are ignored for the purposes of comparison. This means that the
* display name and presence or absence of angle brackets do not affect
* matching.
* <ul>
* <li> The "Tag" parameter - is used in the To and From header fields of SIP
* messages. It serves as a general mechanism to identify a dialog, which is
* the combination of the Call-ID along with two tags, one from each
* participant in the dialog. When a UA sends a request outside of a dialog,
* it contains a From tag only, providing "half" of the dialog ID. The dialog
* is completed from the response(s), each of which contributes the second half
* in the To header field. When a tag is generated by a UA for insertion into
* a request or response, it MUST be globally unique and cryptographically
* random with at least 32 bits of randomness. Besides the requirement for
* global uniqueness, the algorithm for generating a tag is implementation
* specific. Tags are helpful in fault tolerant systems, where a dialog is to
* be recovered on an alternate server after a failure. A UAS can select the
* tag in such a way that a backup can recognize a request as part of a dialog
* on the failed server, and therefore determine that it should attempt to
* recover the dialog and any other state associated with it.
* </ul>
* A request outside of a dialog MUST NOT contain a To tag; the tag in the To
* field of a request identifies the peer of the dialog. Since no dialog is
* established, no tag is present.
* <p>
* For Example:<br>
* <code>To: Carol sip:<EMAIL><br>
* To: Duke sip:<EMAIL>;tag=287447</code>
*
* @see AddressHeader
*/
type ToHeader interface {
AddressHeader
ParametersHeader
/**
* Sets the tag parameter of the ToHeader. The tag in the To field of a
* request identifies the peer of the dialog. If no dialog is established,
* no tag is present.
* <p>
* The To Header MUST contain a new "tag" parameter. When acting as a UAC
* the To "tag" is maintained by the SipProvider from the dialog layer,
* however whan acting as a UAS the To "tag" is assigned by the application.
* That is the tag assignment for outbound responses for messages in a
* dialog is only the responsibility of the application for the first
* outbound response. After dialog establishment, the stack will take care
* of the tag assignment.
*
* @param tag - the new tag of the To Header
* @throws ParseException which signals that an error has been reached
* unexpectedly while parsing the Tag value.
*/
SetTag(tag string) (ParseException error)
/**
* Gets tag of ToHeader. The Tag parameter identified the Peer of the
* dialogue.
*
* @return the tag parameter of the ToHeader. Returns null if no Tag is
* present, i.e no dialogue is established.
*/
GetTag() string
} | sip/header/ToHeader.go | 0.898872 | 0.588594 | ToHeader.go | starcoder |
package shape
import (
"fmt"
"gioui.org/f32"
"gioui.org/op"
orderedmap "github.com/wk8/go-ordered-map"
"github.com/wrnrlr/wonderwall/wonder/colornames"
"github.com/wrnrlr/wonderwall/wonder/rtree"
)
// A two-dimensional surface that extends infinitely far
type Plane struct {
Elements *orderedmap.OrderedMap
Index *rtree.RTree
Offset f32.Point
Scale float32
Width float32
Height float32
}
func NewPlane() *Plane {
return &Plane{
Elements: orderedmap.New(),
Index: &rtree.RTree{},
Offset: f32.Point{X: 0, Y: 0},
Scale: 1,
}
}
func (p *Plane) View(gtx C) {
//p.printElements()
pxs := gtx.Metric.PxPerDp
cons := gtx.Constraints
width, height := float32(cons.Max.X)/pxs, float32(cons.Max.Y)/pxs
p.Width, p.Height = width, height
center := f32.Pt(p.Offset.X+p.Width/2, p.Offset.Y+p.Height/2)
//tr := f32.Affine2D{}.Offset(p.Offset).Scale(f32.Point{}, f32.Pt(p.Scale, p.Scale)) //.Offset(p.Center())
//tr := p.GetTransform()
defer op.Save(gtx.Ops).Load()
//op.Affine(tr).Add(gtx.Ops)
scaledWidth, scaledHeight := width*p.Scale, height*p.Scale
min := [2]float32{center.X - scaledWidth/2, center.Y - scaledHeight/2}
max := [2]float32{center.X + scaledWidth/2, center.Y + scaledHeight/2}
//fmt.Printf("Window: %f,%f, Offset: %v, Scale: %f\n", p.Width, p.Height, p.Offset, p.Scale)
//fmt.Printf("Min, Max: %v %v\n", min, max)
//minr, maxr := p.Index.Bounds()
//fmt.Printf("RTRee Min, Max: %v %v\n", minr, maxr)
p.Index.Search(min, max, func(min, max [2]float32, key interface{}) bool {
value, _ := p.Elements.Get(key)
s, ok := value.(Shape)
if !ok {
return true
}
s.Draw(gtx)
return true
})
for pair := p.Elements.Oldest(); pair != nil; pair = pair.Next() {
s, _ := pair.Value.(Shape)
Rectangle{s.Bounds(), nil, &colornames.Lightgreen, float32(1)}.Draw(gtx)
}
}
func (p Plane) Within(r f32.Rectangle) Group {
min, max := [2]float32{r.Min.X, r.Min.Y}, [2]float32{r.Max.X, r.Max.Y}
p.Index.Search(min, max, func(min [2]float32, max [2]float32, value interface{}) bool {
return false
})
return Group{}
}
func (p Plane) Intersects(r f32.Rectangle) []Shape {
var results []Shape
min, max := [2]float32{r.Min.X, r.Min.Y}, [2]float32{r.Max.X, r.Max.Y}
p.Index.Search(min, max, func(min [2]float32, max [2]float32, value interface{}) bool {
s, ok := value.(Shape)
if !ok {
return false
}
results = append(results, s)
return true
})
return results
}
func (p Plane) Hits(pos f32.Point) Shape {
var result Shape
min, max := [2]float32{pos.X, pos.Y}, [2]float32{pos.X, pos.Y}
p.Index.Search(min, max, func(min [2]float32, max [2]float32, key interface{}) bool {
value, found := p.Elements.Get(key)
if !found {
return false // TODO this should not be happening
}
s, ok := value.(Shape)
if !ok {
return false
}
if s.Hit(pos) {
result = s
return true
}
return false
})
return result
}
func (p *Plane) Insert(s Shape) {
p.Elements.Set(s.Identity(), s)
bounds := s.Bounds()
min, max := [2]float32{bounds.Min.X, bounds.Min.Y}, [2]float32{bounds.Max.X, bounds.Max.Y}
p.Index.Insert(min, max, s.Identity())
}
func (p *Plane) InsertAll(ss []Shape) {
for _, s := range ss {
p.Insert(s)
}
}
func (p *Plane) Update(s Shape) {
id := s.Identity()
old, found := p.Elements.Get(id)
if !found {
return
}
olds := old.(Shape)
bounds := olds.Bounds()
min, max := [2]float32{bounds.Min.X, bounds.Min.Y}, [2]float32{bounds.Max.X, bounds.Max.Y}
removed := p.Index.Delete(min, max, id)
fmt.Printf("Removed element: %s: %v\n", id, removed)
p.Elements.Set(id, s)
bounds = s.Bounds()
min, max = [2]float32{bounds.Min.X, bounds.Min.Y}, [2]float32{bounds.Max.X, bounds.Max.Y}
p.Index.Insert(min, max, id)
}
func (p *Plane) UpdateAll(ss []Shape) {
for _, s := range ss {
p.Update(s)
}
}
func (p *Plane) Remove(s Shape) {
p.Elements.Delete(s.Identity())
bounds := s.Bounds()
min, max := [2]float32{bounds.Min.X, bounds.Min.Y}, [2]float32{bounds.Max.X, bounds.Max.Y}
removed := p.Index.Delete(min, max, s.Identity())
fmt.Printf("Removed element: %s: %v\n", s.Identity(), removed)
}
func (p *Plane) RemoveAll(ss []Shape) {
for _, s := range ss {
p.Remove(s)
}
}
func (p Plane) printElements() {
p.Index.Scan(func(min, max [2]float32, data interface{}) bool {
s, _ := data.(Shape)
fmt.Printf("shape: %v\n", s.Bounds())
return true
})
}
func intersects(r1, r2 f32.Rectangle) bool {
if r1.Min.X >= r2.Max.X || r2.Max.X >= r1.Min.X {
return false
} else if r1.Min.Y <= r2.Max.X || r2.Max.Y <= r1.Min.X {
return false
}
return true
}
func (p Plane) RelativePoint(point f32.Point, gtx C) f32.Point {
return point
}
func (p Plane) Center() f32.Point {
return f32.Pt(p.Offset.X+p.Width/2, p.Offset.Y+p.Height/2)
}
func (p Plane) GetTransform2() f32.Affine2D {
return f32.Affine2D{}.Scale(p.Center(), f32.Pt(p.Scale, p.Scale)).Offset(p.Center())
}
func (p Plane) GetTransform() f32.Affine2D {
return f32.Affine2D{}.Offset(p.Offset).Scale(f32.Point{}, f32.Pt(p.Scale, p.Scale))
} | wonder/shape/plane.go | 0.555073 | 0.413655 | plane.go | starcoder |
package main
import (
"flag"
"fmt"
"math"
)
func main() {
input := flag.Int("i", 1, "Define input to compute steps")
version := flag.Int("v", 1, "Define a version")
flag.Parse()
switch *version {
case 1:
x, y := getUlamSpiralCoordinates(*input)
steps := int(math.Abs(x) + math.Abs(y))
fmt.Printf("Steps: %d", steps)
return
case 2:
sum := findPointWithGreaterSummValue(*input)
fmt.Printf("Greater value: %d", sum)
}
}
// Algorithm was taken from https://math.stackexchange.com/a/1707796
func getUlamSpiralCoordinates(n int) (float64, float64) {
r := math.Sqrt(float64(n))
m := math.Mod(r, 1.)
var p float64
if math.Mod(r*.5, 1.) > .5 {
p = 1.
} else {
p = -1.
}
s := p*1.5 - m*p*2.
var x float64
var y float64
if m < .5 {
x = r * .5 * p
y = r*p - r*s
} else {
x = r * s
y = r * .5 * p
}
return x, y
}
// probably weird solution with computation and memory overhead
func findPointWithGreaterSummValue(input int) int {
search := true
values := make(map[string]int)
values["0_0"] = 1
var x int
var y int
prevY := 0
for r := 1; search; r++ {
prevX := r
for direction := 0; direction < 4; direction++ {
x = prevX
y = prevY
for ; isInside(x, y, r); x, y = getNextCartesianCoordiantes(x, y, direction) {
if _, ok := values[fmt.Sprintf("%d_%d", x, y)]; ok {
continue
}
s := getNextSumm(x, y, direction, values)
if s > input {
search = false
return s
}
values[fmt.Sprintf("%d_%d", x, y)] = s
prevX = x
prevY = y
}
}
}
return 0
}
func getNextSumm(x int, y int, direction int, grid map[string]int) int {
sum := 0
for _, n := range getNeighbors(x, y, direction) {
if v, ok := grid[fmt.Sprintf("%d_%d", n.x, n.y)]; ok {
sum += v
}
}
return sum
}
type point struct {
x int
y int
}
//directions:
// 0 is for up
// 1 is for right
// 2 is for down
// 3 is for left
func getNeighbors(px int, py int, direction int) []point {
switch direction {
case 0:
return []point{
point{x: px, y: py - 1},
point{x: px - 1, y: py},
point{x: px - 1, y: py - 1},
point{x: px - 1, y: py + 1},
}
case 1:
return []point{
point{x: px + 1, y: py},
point{x: px, y: py - 1},
point{x: px + 1, y: py - 1},
point{x: px - 1, y: py - 1},
}
case 2:
return []point{
point{x: px, y: py + 1},
point{x: px + 1, y: py},
point{x: px + 1, y: py + 1},
point{x: px + 1, y: py - 1},
}
case 3:
return []point{
point{x: px - 1, y: py},
point{x: px, y: py + 1},
point{x: px - 1, y: py + 1},
point{x: px + 1, y: py + 1},
}
default:
return nil
}
}
func isInside(x int, y int, r int) bool {
absX := int(math.Abs(float64(x)))
absY := int(math.Abs(float64(y)))
return absX <= r && absY <= r
}
//directions:
// 0 is for up
// 1 is for right
// 2 is for down
// 3 is for left
func getNextCartesianCoordiantes(x int, y int, direction int) (int, int) {
switch direction {
case 0:
return x, y + 1
case 1:
return x - 1, y
case 2:
return x, y - 1
case 3:
return x + 1, y
default:
return x, y
}
} | day3/spiral.go | 0.565059 | 0.469338 | spiral.go | starcoder |
package main
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
)
// Command represents a single stage in the pipeline of commands. It processes
// `data` between `start` and `end` and if it finds a match calls `match` with the
// start and end of the match.
type Command interface {
Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error
}
type Doner interface {
Done() error
}
type RegexpCommand struct {
regexp *regexp.Regexp
secRdr *io.SectionReader
rdr *bufio.Reader
start, _offset int64
}
// NewRegexpCommand returns a new Command that uses the specified Regexp.
// The `label` chooses which Command to build; i.e. 'x' creates an XCommand.
func NewRegexpCommand(label rune, re *regexp.Regexp) Command {
switch label {
case 'x':
return &XCommand{RegexpCommand{regexp: re}}
case 'g':
return &GCommand{RegexpCommand{regexp: re}}
case 'y':
return &YCommand{RegexpCommand{regexp: re}}
case 'v':
return &VCommand{RegexpCommand{regexp: re}}
case 'z':
return &ZCommand{RegexpCommand{regexp: re}, -1}
default:
panic(fmt.Sprintf("NewRegexpCommand: called with invalid command rune %c", label))
}
return nil
}
func (r *RegexpCommand) reader(data io.ReaderAt, start, end int64) io.RuneReader {
r.secRdr = io.NewSectionReader(data, start, end-start)
r.rdr = bufio.NewReader(r.secRdr)
r.start = start
r._offset = start
return r.rdr
}
func (r *RegexpCommand) offset() int64 {
return r._offset
}
func (r *RegexpCommand) updateOffset(o int64) {
r._offset = o
r.secRdr.Seek(r._offset-r.start, io.SeekStart)
r.rdr.Reset(r.secRdr)
}
// XCommand is like the sam editor's x command: loop over matches of this regexp
type XCommand struct {
RegexpCommand
}
func (c XCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
if emptyRange(start, end) {
return nil
}
rdr := c.reader(data, start, end)
dbg("XCommand.Do: section reader from %d len %d\n", start, end-start)
for {
locs := c.RegexpCommand.regexp.FindReaderSubmatchIndex(rdr)
if locs == nil {
break
}
dbg("XCommand.Do: match at %d-%d\n", locs[0], locs[1])
match(c.offset()+int64(locs[0]), c.offset()+int64(locs[1]))
c.updateOffset(c.offset() + int64(locs[1]))
}
return nil
}
// YCommand is like the sam editor's y command: loop over strings before, between, and after matches of this regexp
type YCommand struct {
RegexpCommand
}
func (c YCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
if emptyRange(start, end) {
return nil
}
rdr := c.reader(data, start, end)
dbg("YCommand.Do: section reader from %d len %d\n", start, end-start)
for {
locs := c.RegexpCommand.regexp.FindReaderSubmatchIndex(rdr)
if locs == nil {
break
}
dbg("YCommand.Do: re match at %d-%d\n", locs[0], locs[1])
dbg("YCommand.Do: sending match %d-%d\n", c.offset(), c.offset()+int64(locs[0]))
match(c.offset(), c.offset()+int64(locs[0]))
c.updateOffset(c.offset() + int64(locs[1]))
}
if c.offset() != end {
match(c.offset(), end)
}
return nil
}
// YCommand is like the sam editor's y command, but instead of omitting the matching part, it is included
// as part of the following match.
type ZCommand struct {
RegexpCommand
matchStart int64
}
func (c ZCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
if emptyRange(start, end) {
return nil
}
c.matchStart = -1
rdr := c.reader(data, start, end)
dbg("ZCommand.Do: section reader from %d len %d\n", start, end-start)
for {
locs := c.RegexpCommand.regexp.FindReaderSubmatchIndex(rdr)
if locs == nil {
break
}
dbg("ZCommand.Do: match starting at %d\n", locs[0])
if c.matchStart >= 0 {
dbg("ZCommand.Do: match at %d-%d. offset=%d\n", c.matchStart, c.offset()+int64(locs[0]), c.offset())
match(c.matchStart, c.offset()+int64(locs[0]))
c.matchStart = int64(locs[0])
}
c.matchStart = c.offset() + int64(locs[0])
c.updateOffset(c.offset() + int64(locs[1]))
}
if c.matchStart >= 0 && c.offset() != end {
match(c.matchStart, end)
}
return nil
}
// GCommand is like the sam editor's g command: if the regexp matches the range, output the range, otherwise output no range.
type GCommand struct {
RegexpCommand
}
func (c GCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
if emptyRange(start, end) {
return nil
}
rdr := c.reader(data, start, end)
dbg("GCommand.Do: section reader from %d len %d\n", start, end-start)
if c.RegexpCommand.regexp.MatchReader(rdr) {
dbg("GCommand.Do: match\n")
match(start, end)
return nil
}
return nil
}
// VCommand is like the sam editor's y command: if the regexp doesn't match the range, output the range, otherwise output no range.
type VCommand struct {
RegexpCommand
}
func (c VCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
if emptyRange(start, end) {
return nil
}
rdr := c.reader(data, start, end)
dbg("GCommand.Do: section reader from %d len %d\n", start, end-start)
if c.RegexpCommand.regexp.MatchReader(rdr) {
dbg("GCommand.Do: match\n")
return nil
}
match(start, end)
return nil
}
// PrintCommand is like the sam editor's p command.
type PrintCommand struct {
out io.Writer
sep []byte
printSep bool
}
func (p *PrintCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
buf, err := readRange(data, start, end)
dbg("PrintCommand.Do(%s)\n", string(buf))
if err != nil {
return err
}
if p.out == nil {
p.out = os.Stdout
}
if p.printSep && len(p.sep) > 0 {
p.out.Write(p.sep)
}
p.out.Write(buf)
p.printSep = true
return nil
}
// NewPrintCommand returns a new PrintCommand that writes to `out` and prints the separator `sep` between each match.
func NewPrintCommand(out io.Writer, sep string) *PrintCommand {
return &PrintCommand{out: out, sep: []byte(sep)}
}
// PrintLineCommand is like the sam editor's = command.
type PrintLineCommand struct {
fname string
out io.Writer
}
func NewPrintLineCommand(fname string, out io.Writer) *PrintLineCommand {
return &PrintLineCommand{fname: fname, out: out}
}
func (p *PrintLineCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
dbg("PrintLineCommand.Do for %d-%d\n", start, end)
nl := 1
var (
err error
r rune
)
sr := io.NewSectionReader(data, 0, start)
rdr := bufio.NewReader(sr)
readAndCount := func() {
for {
r, _, err = rdr.ReadRune()
if err != nil {
break
}
if r == '\n' {
nl++
}
}
}
// Re-read data and count the number of lines
readAndCount()
if err != io.EOF {
return err
}
p.out.Write([]byte(fmt.Sprintf("%s:%d", p.fname, nl)))
scnt := nl
sr = io.NewSectionReader(data, start, end-start)
rdr.Reset(sr)
readAndCount()
if err != io.EOF {
return err
}
if nl != scnt {
p.out.Write([]byte(fmt.Sprintf(",%d", nl)))
}
p.out.Write([]byte("\n"))
return nil
}
// NCommand only allows ranges in the range [first,last] to pass. Ranges
// are counted starting from 0.
// Syntax:
// 5 sixth range
// 5:6 sixth and seventh ranges
// 5: sixth range to last
// 0:-2 sixth range to second-last
// -1 last
type NCommand struct {
// end == -1 means end is the last possible range.
// end == -2 means the second last range
start, end int
ranges []Range
match func(start, end int64)
}
func NewNCommand(s string) (*NCommand, error) {
cmd := &NCommand{}
var err error
parts := strings.Split(s, ":")
cmd.start, err = strconv.Atoi(parts[0])
if err != nil {
return nil, err
}
if len(parts) == 1 {
// a single number
cmd.end = cmd.start
} else {
if len(parts[1]) == 0 {
cmd.end = -1
} else {
cmd.end, err = strconv.Atoi(parts[1])
if err != nil {
return nil, err
}
}
}
return cmd, err
}
func MustNCommand(s string) *NCommand {
c, err := NewNCommand(s)
if err != nil {
panic(err)
}
return c
}
func (p *NCommand) Do(data io.ReaderAt, start, end int64, match func(start, end int64)) error {
p.saveRange(start, end)
p.match = match
return nil
}
func (p *NCommand) saveRange(start, end int64) {
if p.ranges == nil {
p.ranges = make([]Range, 0, 20)
}
p.ranges = append(p.ranges, Range{start, end})
}
func (p *NCommand) Done() error {
p.computeActualStart()
p.computeActualEnd()
if p.start > p.end {
// Treat this as the empty set
return nil
}
if p.start < 0 || p.end > len(p.ranges) {
return nil
}
for _, r := range p.ranges[p.start:p.end] {
p.match(r.Start, r.End)
}
return nil
}
func (p *NCommand) computeActualStart() {
if p.start < 0 {
p.start = len(p.ranges) + p.start
}
}
func (p *NCommand) computeActualEnd() {
if p.end >= 0 {
p.end += 1
} else {
p.end = len(p.ranges) + p.end + 1
}
} | commands.go | 0.637482 | 0.462776 | commands.go | starcoder |
package signatures
// SimpleSignature ...
type SimpleSignature struct {
part string
match string
description string
comment string
}
// Match checks if given file matches with signature
func (s SimpleSignature) Match(file MatchFile) []*MatchResult {
var haystack *string
switch s.part {
case PartPath:
haystack = &file.Path
case PartFilename:
haystack = &file.Filename
case PartExtension:
haystack = &file.Extension
case PartContent:
haystack = &file.Content
default:
return nil
}
var matchResults []*MatchResult
if s.match == *haystack {
matchResults = append(matchResults, &MatchResult{
Filename: file.Filename,
Path: file.Path,
Extension: file.Extension,
Line: 0,
LineContent: "",
})
}
return matchResults
}
// Description returns signature description
func (s SimpleSignature) Description() string {
return s.description
}
// Comment returns signature comment
func (s SimpleSignature) Comment() string {
return s.comment
}
// Part returns signature part type
func (s SimpleSignature) Part() string {
return s.part
}
// SimpleSignatures contains simple signatures
var SimpleSignatures = []Signature{
// Extensions
SimpleSignature{
part: PartExtension,
match: ".pem",
description: "Potential cryptographic private key",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".log",
description: "Log file",
comment: "Log files can contain secret HTTP endpoints, session IDs, API keys and other goodies",
},
SimpleSignature{
part: PartExtension,
match: ".pkcs12",
description: "Potential cryptographic key bundle",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".p12",
description: "Potential cryptographic key bundle",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".pfx",
description: "Potential cryptographic key bundle",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".asc",
description: "Potential cryptographic key bundle",
comment: "",
},
SimpleSignature{
part: PartFilename,
match: "otr.private_key",
description: "Pidgin OTR private key",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".ovpn",
description: "OpenVPN client configuration file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".cscfg",
description: "Azure service configuration schema file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".rdp",
description: "Remote Desktop connection file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".mdf",
description: "Microsoft SQL database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".sdf",
description: "Microsoft SQL server compact database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".sqlite",
description: "SQLite database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".sqlite3",
description: "SQLite3 database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".bek",
description: "Microsoft BitLocker recovery key file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".tpm",
description: "Microsoft BitLocker Trusted Platform Module password file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".fve",
description: "Windows BitLocker full volume encrypted data file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".jks",
description: "Java keystore file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".psafe3",
description: "Password Safe database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".agilekeychain",
description: "1Password password manager database file",
comment: "Feed it to Hashcat and see if you're lucky",
},
SimpleSignature{
part: PartExtension,
match: ".keychain",
description: "Apple Keychain database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".pcap",
description: "Network traffic capture file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".gnucash",
description: "GnuCash database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".kwallet",
description: "KDE Wallet Manager database file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".tblk",
description: "Tunnelblick VPN configuration file",
comment: "",
},
SimpleSignature{
part: PartExtension,
match: ".dayone",
description: "Day One journal file",
comment: "Now it's getting creepy...",
},
// Filenames
SimpleSignature{
part: PartFilename,
match: "secret_token.rb",
description: "Ruby On Rails secret token configuration file",
comment: "If the Rails secret token is known, it can allow for remote code execution (http://www.exploit-db.com/exploits/27527/)",
},
SimpleSignature{
part: PartFilename,
match: "carrierwave.rb",
description: "Carrierwave configuration file",
comment: "Can contain credentials for cloud storage systems such as Amazon S3 and Google Storage",
},
SimpleSignature{
part: PartFilename,
match: "database.yml",
description: "Potential Ruby On Rails database configuration file",
comment: "Can contain database credentials",
},
SimpleSignature{
part: PartFilename,
match: "omniauth.rb",
description: "OmniAuth configuration file",
comment: "The OmniAuth configuration file can contain client application secrets",
},
SimpleSignature{
part: PartFilename,
match: "settings.py",
description: "Django configuration file",
comment: "Can contain database credentials, cloud storage system credentials, and other secrets",
},
SimpleSignature{
part: PartFilename,
match: "jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml",
description: "Jenkins publish over SSH plugin file",
comment: "",
},
SimpleSignature{
part: PartFilename,
match: "credentials.xml",
description: "Potential Jenkins credentials file",
comment: "",
},
SimpleSignature{
part: PartFilename,
match: "LocalSettings.php",
description: "Potential MediaWiki configuration file",
comment: "",
},
SimpleSignature{
part: PartFilename,
match: "Favorites.plist",
description: "Sequel Pro MySQL database manager bookmark file",
comment: "",
},
SimpleSignature{
part: PartFilename,
match: "configuration.user.xpl",
description: "Little Snitch firewall configuration file",
comment: "Contains traffic rules for applications",
},
SimpleSignature{
part: PartFilename,
match: "journal.txt",
description: "Potential jrnl journal file",
comment: "Now it's getting creepy...",
},
SimpleSignature{
part: PartFilename,
match: "knife.rb",
description: "Chef Knife configuration file",
comment: "Can contain references to Chef servers",
},
SimpleSignature{
part: PartFilename,
match: "proftpdpasswd",
description: "cPanel backup ProFTPd credentials file",
comment: "Contains usernames and password hashes for FTP accounts",
},
SimpleSignature{
part: PartFilename,
match: "robomongo.json",
description: "Robomongo MongoDB manager configuration file",
comment: "Can contain credentials for MongoDB databases",
},
SimpleSignature{
part: PartFilename,
match: "filezilla.xml",
description: "FileZilla FTP configuration file",
comment: "Can contain credentials for FTP servers",
},
SimpleSignature{
part: PartFilename,
match: "recentservers.xml",
description: "FileZilla FTP recent servers file",
comment: "Can contain credentials for FTP servers",
},
SimpleSignature{
part: PartFilename,
match: "ventrilo_srv.ini",
description: "Ventrilo server configuration file",
comment: "Can contain passwords",
},
SimpleSignature{
part: PartFilename,
match: "terraform.tfvars",
description: "Terraform variable config file",
comment: "Can contain credentials for terraform providers",
},
SimpleSignature{
part: PartFilename,
match: ".exports",
description: "Shell configuration file",
comment: "Shell configuration files can contain passwords, API keys, hostnames and other goodies",
},
SimpleSignature{
part: PartFilename,
match: ".functions",
description: "Shell configuration file",
comment: "Shell configuration files can contain passwords, API keys, hostnames and other goodies",
},
SimpleSignature{
part: PartFilename,
match: ".extra",
description: "Shell configuration file",
comment: "Shell configuration files can contain passwords, API keys, hostnames and other goodies",
},
} | scanner/signatures/simple.go | 0.677154 | 0.417212 | simple.go | starcoder |
package lengconv
func CmToFt(c Centimeter) Foot {
return Foot(c / FootC) // преобразование типа Centimeter в Foot
}
func CmToM(c Centimeter) Meter {
return Meter(c / 100) // преобразование типа Centimeter в Meter
}
func CmToMm(c Centimeter) Millimeter {
return Millimeter(c * 10) // преобразование типа Centimeter в Millimeter
}
func CmToIn(c Centimeter) Inch {
return Inch(c / InchC) // преобразование типа Centimeter в Inch
}
func FtToCm(f Foot) Centimeter {
return Centimeter(Centimeter(f) * FootC) // преобразование типа Foot в Centimeter
}
func FtToM(f Foot) Meter {
return Meter(Meter(f) / Meter(FootM)) // преобразование типа Foot в Meter
}
func FtToMm(f Foot) Millimeter {
return Millimeter(Millimeter(f) * FootMM) // преобразование типа Foot в Millimeter
}
func FtToIn(f Foot) Inch {
return Inch(Inch(f) * 12) // преобразование типа Foot в Inch
}
func MToFt(m Meter) Foot {
return Foot(Foot(m) * FootM) // преобразование типа Meter в Foot
}
func MToCm(m Meter) Centimeter {
return Centimeter(Centimeter(m) * 100) // преобразование типа Meter в Centimeter
}
func MToMm(m Meter) Millimeter {
return Millimeter(Millimeter(m) * 1000) // преобразование типа Meter в Millimeter
}
func MToIn(m Meter) Inch {
return Inch(Inch(m) * 39.37) // преобразование типа Meter в Inch
}
func MmToCm(m Millimeter) Centimeter {
return Centimeter(Centimeter(m) / 10) // преобразование типа Millimeter в Centimeter
}
func MmToFt(m Millimeter) Foot {
return Foot(Foot(m) / Foot(FootMM)) // преобразование типа Millimeter в Foot
}
func MmToM(m Millimeter) Meter {
return Meter(Meter(m) / 1000) // преобразование типа Millimeter в Meter
}
func MmToIn(m Millimeter) Inch {
return Inch(m / InchMm) // преобразование типа Millimeter в Inch
}
func InToMm(i Inch) Millimeter {
return Millimeter(Millimeter(i) + InchMm) // преобразование типа Inch в Millimeter
}
func InToCm(i Inch) Centimeter {
return Centimeter(Centimeter(i) * InchC) // преобразование типа Inch в Centimeter
}
func InToM(i Inch) Meter {
return Meter(Meter(i) * InchM) // преобразование типа Inch в Meter
}
func InToF(i Inch) Foot {
return Foot(Foot(i) * InchF) // преобразование типа Inch в Foot
} | conv.go | 0.550607 | 0.571348 | conv.go | starcoder |
package playbook
type ospot struct {
o O
spot HalfCourtPos
}
func oMoved(o ospot, to HalfCourtPos) ospot {
return ospot{
o: o.o,
spot: to,
}
}
//Setting represents the setting of the Os on the court during the play
type Setting struct {
os map[ONum]ospot
ball ONum
}
//Ball gets the ball handler
func (s Setting) Ball() O {
return s.os[s.ball].o
}
//OSpot finds in a setting the O and its spot on the court for a given role number
func (s Setting) OSpot(role ONum) (O, HalfCourtPos) {
rv := s.os[role]
return rv.o, rv.spot
}
//CreateSetting sets the Os on the half court
func CreateSetting(O1spot HalfCourtPos, O2spot HalfCourtPos, O3spot HalfCourtPos, O4spot HalfCourtPos, O5spot HalfCourtPos, ballHandler ONum) Setting {
rv := Setting{
os: make(map[ONum]ospot),
ball: ballHandler,
}
rv.os[O1] = ospot{
o: O1Sym,
spot: O1spot,
}
rv.os[O2] = ospot{
o: O2Sym,
spot: O2spot,
}
rv.os[O3] = ospot{
o: O3Sym,
spot: O3spot,
}
rv.os[O4] = ospot{
o: O4Sym,
spot: O4spot,
}
rv.os[O5] = ospot{
o: O5Sym,
spot: O5spot,
}
return rv
}
//CreateSetup122 creates 1-2-2 (left and right blocks are filled) initial half court setup
func CreateSetup122() Setting {
return CreateSetting(TopOfTheKey, RightWing, LeftWing, RightBlock, LeftBlock, O1)
}
//CreateSetup131 creates 1-3-1 initial half court setup
func CreateSetup131() Setting {
return CreateSetting(TopOfTheKey, RightWing, LeftWing, RightBlock, HighPost, O1)
}
//CreateSetup14High creates high 1-4 initial half court setup
func CreateSetup14High() Setting {
return CreateSetting(TopOfTheKey, RightWing, LeftWing, RightElbow, LeftElbow, O1)
}
//CreateSetup212 creates 2-1-2 initial half court setup
func CreateSetup212() Setting {
return CreateSetting(RightGuard, LeftGuard, RightCorner, LeftCorner, HighPost, O1)
}
//CreateSetup23 creates 2-3 initial half court setup
func CreateSetup23() Setting {
return CreateSetting(RightGuard, LeftGuard, RightWing, LeftWing, HighPost, O1)
}
//CreateSetup4Out creates 4-out 1-in initial half court setup
func CreateSetup4Out() Setting {
return CreateSetting(RightGuard, LeftGuard, RightCorner, LeftCorner, RightBlock, O1)
}
//CreateSetupDoubleElbow creates double elbow initial half court setup
func CreateSetupDoubleElbow() Setting {
return CreateSetting(TopOfTheKey, RightBlock, LeftBlock, RightElbow, LeftElbow, O1)
}
//CreateSetupHighDoubleStack creates high double stack initial half court setup
func CreateSetupHighDoubleStack() Setting {
return CreateSetting(TopOfTheKey, RightElbow, LeftElbow, RightElbow, LeftElbow, O1)
}
//CreateSetupLowDoubleStack creates low double stack initial half court setup
func CreateSetupLowDoubleStack() Setting {
return CreateSetting(TopOfTheKey, RightBlock, LeftBlock, RightBlock, LeftBlock, O1)
}
//CreateSetupOpen creates "Open" (1-2-2) initial half court setup
func CreateSetupOpen() Setting {
return CreateSetting(TopOfTheKey, RightWing, LeftWing, RightCorner, LeftCorner, O1)
}
func copySetting(set Setting) Setting {
return CreateSetting(set.os[O1].spot, set.os[O2].spot, set.os[O3].spot, set.os[O4].spot, set.os[O5].spot, set.ball)
} | playbook/setting.go | 0.682045 | 0.504028 | setting.go | starcoder |
package stepper
import (
. "github.com/conclave/pcduino/core"
)
type Stepper struct {
direction int
speed uint
steps uint
delay uint
pins []byte
count uint
timestamp int64
}
func New(steps uint, pins ...byte) *Stepper {
l := len(pins)
if l != 2 && l != 4 {
return nil
}
stepper := Stepper{
direction: 0,
speed: 0,
steps: steps,
delay: 0,
pins: nil,
count: 0,
timestamp: 0,
}
if l == 2 {
stepper.pins = []byte{pins[0], pins[1]}
PinMode(pins[0], OUTPUT)
PinMode(pins[1], OUTPUT)
} else {
stepper.pins = []byte{pins[0], pins[1], pins[2], pins[3]}
PinMode(pins[0], OUTPUT)
PinMode(pins[1], OUTPUT)
PinMode(pins[2], OUTPUT)
PinMode(pins[3], OUTPUT)
}
return &stepper
}
/*
Drives a unipolar or bipolar stepper motor using 2 wires or 4 wires
When wiring multiple stepper motors to a microcontroller,
you quickly run out of output pins, with each motor requiring 4 connections.
By making use of the fact that at any time two of the four motor
coils are the inverse of the other two, the number of
control connections can be reduced from 4 to 2.
A slightly modified circuit around a Darlington transistor array or an L293 H-bridge
connects to only 2 microcontroler pins, inverts the signals received,
and delivers the 4 (2 plus 2 inverted ones) output signals required
for driving a stepper motor.
The sequence of control signals for 4 control wires is as follows:
Step C0 C1 C2 C3
1 1 0 1 0
2 0 1 1 0
3 0 1 0 1
4 1 0 0 1
The sequence of controls signals for 2 control wires is as follows
(columns C1 and C2 from above):
Step C0 C1
1 0 1
2 1 1
3 1 0
4 0 0
The circuits can be found at
http://www.arduino.cc/en/Tutorial/Stepper
*/
func (this *Stepper) SetSpeed(speed uint) {
this.delay = 60 * 1000 / this.steps / speed
}
func (this *Stepper) Step(n int) {
nn := n
if nn < 0 {
nn = -nn
this.direction = 0
} else {
this.direction = 1
}
for nn > 0 {
if Millis()-this.timestamp >= int64(this.delay) {
this.timestamp = Millis()
if this.direction == 1 {
this.count++
if this.count == this.steps {
this.count = 0
}
} else {
if this.count == 0 {
this.count = this.steps
}
this.count--
}
nn--
this.step(byte(this.count % 4))
}
}
}
func (this *Stepper) Version() int {
return 4
}
func (this *Stepper) step(n byte) {
if len(this.pins) == 2 {
switch n & 0x03 {
case 0: // 01
DigitalWrite(this.pins[0], LOW)
DigitalWrite(this.pins[1], HIGH)
case 1: // 11
DigitalWrite(this.pins[0], HIGH)
DigitalWrite(this.pins[1], HIGH)
case 2: // 10
DigitalWrite(this.pins[0], HIGH)
DigitalWrite(this.pins[1], LOW)
case 3: // 00
DigitalWrite(this.pins[0], LOW)
DigitalWrite(this.pins[1], LOW)
}
} else {
switch n & 0x03 {
case 0: // 1010
DigitalWrite(this.pins[0], HIGH)
DigitalWrite(this.pins[1], LOW)
DigitalWrite(this.pins[2], HIGH)
DigitalWrite(this.pins[3], LOW)
case 1: // 0110
DigitalWrite(this.pins[0], LOW)
DigitalWrite(this.pins[1], HIGH)
DigitalWrite(this.pins[2], HIGH)
DigitalWrite(this.pins[3], LOW)
case 2: // 0101
DigitalWrite(this.pins[0], LOW)
DigitalWrite(this.pins[1], HIGH)
DigitalWrite(this.pins[2], LOW)
DigitalWrite(this.pins[3], HIGH)
case 3: // 1001
DigitalWrite(this.pins[0], HIGH)
DigitalWrite(this.pins[1], LOW)
DigitalWrite(this.pins[2], LOW)
DigitalWrite(this.pins[3], HIGH)
}
}
} | lib/stepper/stepper.go | 0.516108 | 0.509398 | stepper.go | starcoder |
package rti
import (
"encoding/binary"
"math"
)
// EarthVelocityDataSet will contain all the Earth Velocity Data set values.
// These values describe water profile data in m/s.
// The data will be stored in array. The array size will be based off the
// base data set.
// Bin x Beam
type EarthVelocityDataSet struct {
Base BaseDataSet // Base Dataset
Velocity [][]float32 // Velcity data in m/s
Vectors []VelocityVector // Velocity vector with maginitude and direction
}
// Decode will take the binary data and decode into
// into the ensemble data set.
func (vel *EarthVelocityDataSet) Decode(data []byte) {
// Initialize the 2D array
// [Bins][Beams]
vel.Velocity = make([][]float32, vel.Base.NumElements)
for i := range vel.Velocity {
vel.Velocity[i] = make([]float32, vel.Base.ElementMultiplier)
}
vel.Vectors = make([]VelocityVector, vel.Base.NumElements)
// Not enough data
if uint32(len(data)) < vel.Base.NumElements*vel.Base.ElementMultiplier*uint32(BytesInFloat) {
return
}
// Set each beam and bin data
ptr := 0
for beam := 0; beam < int(vel.Base.ElementMultiplier); beam++ {
for bin := 0; bin < int(vel.Base.NumElements); bin++ {
// Get the location of the data
ptr = GetBinBeamIndex(int(vel.Base.NameLen), int(vel.Base.NumElements), beam, bin)
// Set the data to float
bits := binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
vel.Velocity[bin][beam] = math.Float32frombits(bits)
// Calculate the velocity vector
vel.Vectors[bin] = calcVV(vel.Velocity[bin])
}
}
}
// calcVV will calculate the velocity vector for each bin.
func calcVV(binData []float32) VelocityVector {
// Init the values
var vv = VelocityVector{Magnitude: BadVelocity, DirectionXNorth: BadVelocity, DirectionYNorth: BadVelocity}
// Need East, North, Vert
if len(binData) < 3 {
return vv
}
// All the data must be good
if binData[0] == BadVelocity || binData[1] == BadVelocity || binData[2] == BadVelocity {
return vv
}
// Magnitude
// Sqrt(East^2 + North^2 + Vert^2)
mag := math.Sqrt(math.Pow(float64(binData[0]), 2) + math.Pow(float64(binData[1]), 2) + math.Pow(float64(binData[2]), 2))
vv.Magnitude = math.Abs(mag)
// Direct X North
// atan2(east, north)
dirXNorth := math.Atan2(float64(binData[0]), float64(binData[1])) * (180.0 / math.Pi)
if dirXNorth < 0.0 {
dirXNorth = 360.0 + dirXNorth
}
vv.DirectionXNorth = dirXNorth
// Direct Y North
// atan2(north, east)
dirYNorth := math.Atan2(float64(binData[1]), float64(binData[2])) * (180.0 / math.Pi)
if dirYNorth < 0.0 {
dirYNorth = 360.0 + dirYNorth
}
vv.DirectionYNorth = dirYNorth
return vv
} | EarthVelocityDataSet.go | 0.671471 | 0.73029 | EarthVelocityDataSet.go | starcoder |
package matrix
import "fmt"
type InsightsVector struct {
_m int
_data []float64
_valid bool
}
func NewInsightVector(m int, value float64) InsightsVector {
vector := InsightsVector{
_m: -1,
_data: nil,
_valid: false,
}
if m <= 0 {
panic("[InsightsVector] invalid size")
} else {
vector._data = make([]float64, m)
for j := 0; j < m; j++ {
vector._data[j] = value
}
vector._m = m
vector._valid = true
}
return vector
}
func NewInsightVectorWithData(data []float64, deepCopy bool) *InsightsVector {
vector := &InsightsVector{
_m: -1,
_data: nil,
_valid: false,
}
if data == nil || len(data) == 0 {
panic("[InsightsVector] invalid data")
} else {
vector._m = len(data)
if deepCopy {
vector._data = make([]float64, len(data))
// copy(vector._data, data)
copy(data, vector._data)
} else {
vector._data = data
}
vector._valid = true
}
return vector
}
func (i *InsightsVector) DeepCopy() []float64 {
dataDeepCopy := make([]float64, i._m)
copy(dataDeepCopy, i._data)
return dataDeepCopy
}
func (iv *InsightsVector) Get(i int) float64 {
if !iv._valid {
panic("[InsightsVector] invalid Vector")
} else if i >= iv._m {
panic(fmt.Sprintf("[InsightsVector] Index: %d, Size: %d", i, iv._m))
}
return iv._data[i]
}
func (iv *InsightsVector) Size() int {
if !iv._valid {
panic("[InsightsVector] invalid Vector")
}
return iv._m
}
func (iv *InsightsVector) Set(i int, val float64) {
if !iv._valid {
panic("[InsightsVector] invalid Vector")
} else if i >= iv._m {
panic(
fmt.Sprintf("[InsightsVector] Index: %d, Size: %d", i, iv._m))
}
iv._data[i] = val
}
func (iv *InsightsVector) Dot(vector *InsightsVector) float64 {
if !iv._valid || !vector._valid {
panic("[InsightsVector] invalid Vector")
} else if iv._m != vector.Size() {
panic("[InsightsVector][dot] invalid vector size.")
}
sumOfProducts := 0.
for i := 0; i < iv._m; i++ {
sumOfProducts += iv._data[i] * vector.Get(i)
}
return sumOfProducts
} | arima/matrix/insight_vector.go | 0.580233 | 0.578835 | insight_vector.go | starcoder |
package goment
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/nleeper/goment/locales"
"github.com/nleeper/goment/regexps"
"github.com/tkuchiki/go-timezone"
)
type formatReplacementFunc func(*Goment) string
type formatPadding struct {
token string
targetLength int
forceSign bool
}
var formatReplacements = map[string]formatReplacementFunc{}
// Format takes a string of tokens and replaces them with their corresponding values to display the Goment.
func (g *Goment) Format(args ...interface{}) string {
format := ""
numArgs := len(args)
if numArgs < 1 {
format = "YYYY-MM-DDTHH:mm:ssZ"
} else {
format = args[0].(string)
}
return convertFormat(g, format)
}
func loadFormatReplacements() {
if len(formatReplacements) > 0 {
return
}
addFormatReplacement("M", padding("MM", 2), "Mo", func(g *Goment) string {
return strconv.Itoa(g.Month())
})
addFormatReplacement("MMM", emptyPadding(), "", func(g *Goment) string {
return g.locale.MonthsShort[g.Month()-1]
})
addFormatReplacement("MMMM", emptyPadding(), "", func(g *Goment) string {
return g.locale.Months[g.Month()-1]
})
addFormatReplacement("D", padding("DD", 2), "Do", func(g *Goment) string {
return strconv.Itoa(g.Date())
})
addFormatReplacement("DDD", padding("DDDD", 3), "DDDo", func(g *Goment) string {
return strconv.Itoa(g.DayOfYear())
})
addFormatReplacement("Y", emptyPadding(), "", func(g *Goment) string {
y := g.Year()
if y <= 9999 {
return zeroFill(y, 4, false)
}
return "+" + strconv.Itoa(y)
})
addFormatReplacement("", padding("YY", 2), "", func(g *Goment) string {
return strconv.Itoa(g.Year() % 100)
})
addFormatReplacement("", padding("YYYY", 4), "", func(g *Goment) string {
return strconv.Itoa(g.Year())
})
addFormatReplacement("", padding("YYYYY", 5), "", func(g *Goment) string {
return strconv.Itoa(g.Year())
})
addFormatReplacement("", padding("YYYYYY", 6, true), "", func(g *Goment) string {
return strconv.Itoa(g.Year())
})
addFormatReplacement("d", emptyPadding(), "do", func(g *Goment) string {
return strconv.Itoa(g.Day())
})
addFormatReplacement("dd", emptyPadding(), "", func(g *Goment) string {
return g.locale.WeekdaysMin[g.Day()]
})
addFormatReplacement("ddd", emptyPadding(), "", func(g *Goment) string {
return g.locale.WeekdaysShort[g.Day()]
})
addFormatReplacement("dddd", emptyPadding(), "", func(g *Goment) string {
return g.locale.Weekdays[g.Day()]
})
addFormatReplacement("e", emptyPadding(), "", func(g *Goment) string {
return strconv.Itoa(g.Weekday())
})
addFormatReplacement("E", emptyPadding(), "", func(g *Goment) string {
return strconv.Itoa(g.ISOWeekday())
})
addFormatReplacement("w", padding("ww", 2), "wo", func(g *Goment) string {
return strconv.Itoa(g.Week())
})
addFormatReplacement("W", padding("WW", 2), "Wo", func(g *Goment) string {
return strconv.Itoa(g.ISOWeek())
})
addFormatReplacement("", padding("gg", 2), "", func(g *Goment) string {
return strconv.Itoa(g.WeekYear() % 100)
})
addFormatReplacement("", padding("gggg", 4), "", func(g *Goment) string {
return strconv.Itoa(g.WeekYear())
})
addFormatReplacement("", padding("ggggg", 5), "", func(g *Goment) string {
return strconv.Itoa(g.WeekYear())
})
addFormatReplacement("", padding("GG", 2), "", func(g *Goment) string {
return strconv.Itoa(g.ISOWeekYear() % 100)
})
addFormatReplacement("", padding("GGGG", 4), "", func(g *Goment) string {
return strconv.Itoa(g.ISOWeekYear())
})
addFormatReplacement("", padding("GGGGG", 5), "", func(g *Goment) string {
return strconv.Itoa(g.ISOWeekYear())
})
addFormatReplacement("Q", emptyPadding(), "Qo", func(g *Goment) string {
return strconv.Itoa(g.Quarter())
})
addFormatReplacement("H", padding("HH", 2), "", func(g *Goment) string {
return strconv.Itoa(g.Hour())
})
addFormatReplacement("h", padding("hh", 2), "", func(g *Goment) string {
val := 0
mod := g.Hour() % 12
if mod == 0 {
val = 12
} else {
val = mod
}
return strconv.Itoa(val)
})
addFormatReplacement("k", padding("kk", 2), "", func(g *Goment) string {
return strconv.Itoa(g.Hour() + 1)
})
addFormatReplacement("a", emptyPadding(), "", func(g *Goment) string {
return g.locale.MeridiemFunc(g.Hour(), g.Minute(), true)
})
addFormatReplacement("A", emptyPadding(), "", func(g *Goment) string {
return g.locale.MeridiemFunc(g.Hour(), g.Minute(), false)
})
addFormatReplacement("m", padding("mm", 2), "", func(g *Goment) string {
return strconv.Itoa(g.Minute())
})
addFormatReplacement("s", padding("ss", 2), "", func(g *Goment) string {
return strconv.Itoa(g.Second())
})
addFormatReplacement("X", emptyPadding(), "", func(g *Goment) string {
return fmt.Sprintf("%d", g.ToUnix())
})
addFormatReplacement("x", emptyPadding(), "", func(g *Goment) string {
return fmt.Sprintf("%d", g.ToTime().UnixNano()/int64(time.Millisecond))
})
addFormatReplacement("Z", emptyPadding(), "", func(g *Goment) string {
return offset(g, ":")
})
addFormatReplacement("ZZ", emptyPadding(), "", func(g *Goment) string {
return offset(g, "")
})
addFormatReplacement("z", emptyPadding(), "", func(g *Goment) string {
return timezoneAbbr(g)
})
addFormatReplacement("zz", emptyPadding(), "", func(g *Goment) string {
return timezoneAbbr(g)
})
addFormatReplacement("zzzz", emptyPadding(), "", func(g *Goment) string {
return timezoneFullName(g)
})
}
func addFormatReplacement(token string, padding formatPadding, ordinal string, f formatReplacementFunc) {
if token != "" {
formatReplacements[token] = f
}
if padding.token != "" {
formatReplacements[padding.token] = func(g *Goment) string {
var val = f(g)
i, _ := strconv.Atoi(val)
return zeroFill(i, padding.targetLength, padding.forceSign)
}
}
if ordinal != "" {
formatReplacements[ordinal] = func(g *Goment) string {
var val = f(g)
i, _ := strconv.Atoi(val)
return g.locale.OrdinalFunc(i, token)
}
}
}
func padding(token string, length int, forceSign ...bool) formatPadding {
sign := false
if len(forceSign) > 0 {
sign = forceSign[0]
}
return formatPadding{
token: token,
targetLength: length,
forceSign: sign,
}
}
func emptyPadding() formatPadding {
return formatPadding{}
}
func expandLocaleFormats(layout string, locale locales.LocaleDetails) string {
return replaceFormatTokens(
layout,
regexps.LocaleRegex.FindAllStringIndex(layout, -1),
func(text string) (string, bool) {
return locale.LongDateFormat(text)
},
)
}
func convertFormat(g *Goment, layout string) string {
// Replace any Goment locale specific format tokens (LTS, L, LL, etc).
layout = expandLocaleFormats(layout, g.locale)
// Replace any bracketed text in layout.
bracketMatch := regexps.BracketRegex.FindAllString(layout, -1)
bracketsFound := len(bracketMatch) > 0
// Replace bracketed text with token like $1.
if bracketsFound {
for i := range bracketMatch {
layout = strings.Replace(layout, bracketMatch[i], makeBracketToken(i), 1)
}
}
// Replace any Goment format tokens that are not standard to Go formatting (DDD, Mo, etc).
layout = replaceFormatTokens(
layout,
regexps.TokenRegex.FindAllStringIndex(layout, -1),
func(text string) (string, bool) {
match, ok := formatReplacements[text]
if ok {
return match(g), ok
}
return "", false
},
)
// Replace back any bracketed text.
if bracketsFound {
for i := range bracketMatch {
layout = strings.Replace(layout, makeBracketToken(i), bracketMatch[i][1:len(bracketMatch[i])-1], 1)
}
}
return layout
}
func replaceFormatTokens(layout string, matches [][]int, replacementFunc func(string) (string, bool)) string {
for i := range matches {
start, end := matches[i][0], matches[i][1]
matchText := layout[start:end]
replaceText, ok := replacementFunc(matchText)
if !ok {
replaceText = matchText
}
diff := len(replaceText) - len(matchText)
layout = layout[0:start] + replaceText + layout[end:len(layout)]
// If the replacement text is longer/shorter than the match, shift the remaining indexes.
if diff != 0 {
for j := i + 1; j < len(matches); j++ {
matches[j][0] += diff
matches[j][1] += diff
}
}
}
return layout
}
func offset(g *Goment, sep string) string {
os := g.UTCOffset()
sign := "+"
if os < 0 {
os = os * -1
sign = "-"
}
return sign + zeroFill(os/60, 2, false) + sep + zeroFill(os%60, 2, false)
}
func timezoneAbbr(g *Goment) string {
tz, _ := g.ToTime().Zone()
return tz
}
func timezoneFullName(g *Goment) string {
tz := timezone.New()
tzAbbrInfos, _ := tz.GetTzAbbreviationInfo(timezoneAbbr(g))
return tzAbbrInfos[0].Name()
}
func zeroFill(val int, length int, forceSign bool) string {
absNumber := abs(val)
sign := val >= 0
signValue := ""
if sign {
if forceSign {
signValue = "+"
}
} else {
signValue = "-"
}
return signValue + fmt.Sprintf("%0"+strconv.Itoa(length)+"d", absNumber)
}
func makeBracketToken(num int) string {
return fmt.Sprintf("$%v", num+1)
} | format.go | 0.569733 | 0.513607 | format.go | starcoder |
package trees
import (
"fmt"
"math"
)
/*
Binary Search Tree Implementation
*/
type BinarySearchTreeNode struct {
value int
Left, Right *BinarySearchTreeNode
}
func NewBinarySearchTreeNode(value int) *BinarySearchTreeNode {
return &BinarySearchTreeNode{
value: value,
Left: nil,
Right: nil,
}
}
type BinarySearchTree struct {
Root *BinarySearchTreeNode
}
func NewBinarySearchTree() *BinarySearchTree {
return &BinarySearchTree{Root: nil}
}
func (b *BinarySearchTree) InsertRecur(value int) {
b.Root = insertRecur(b.Root, value)
}
func insertRecur(node *BinarySearchTreeNode, value int) *BinarySearchTreeNode {
// If the tree is empty, return a new node
if node == nil {
return NewBinarySearchTreeNode(value)
}
if value < node.value {
node.Left = insertRecur(node.Left, value)
} else if value > node.value {
node.Right = insertRecur(node.Right, value)
}
return node
}
func (b *BinarySearchTree) Insert(value int) {
if b.Root == nil {
b.Root = NewBinarySearchTreeNode(value)
} else {
//loop traverse until
curr := b.Root
for {
if value > curr.value {
if curr.Right != nil {
curr = curr.Right
} else {
curr.Left = NewBinarySearchTreeNode(value)
break
}
}
if value < curr.value {
if curr.Left != nil {
curr = curr.Left
} else {
curr.Right = NewBinarySearchTreeNode(value)
break
}
} else {
//case that both are the same
break
}
}
}
}
/*
Given the root node of a binary search tree (BST) and a value.
You need to find the node in the BST that the node's value equals the given value.
Return the subtree rooted with that node.
If such node doesn't exist, you should return NULL.
For example,
Given the tree:
4
/ \
2 7
/ \
1 3
And the value to search: 2
You should return this subtree:
2
/ \
1 3
In the example above, if we want to search the value 5, since there is no node with
value 5, we should return NULL.
Note that an empty tree is represented by NULL,
therefore you would see the expected output (serialized tree format) as [], not null.
*/
func (b BinarySearchTree) Search(value int) *BinarySearchTreeNode {
return b.search(b.Root, value)
}
func (b BinarySearchTree) search(node *BinarySearchTreeNode, value int) *BinarySearchTreeNode {
if node == nil {
return nil
}
if node.value > value {
// traverse left
return b.search(node.Left, value)
} else if node.value < value {
// traverse right
return b.search(node.Right, value)
} else {
// found
return node
}
}
func (b *BinarySearchTree) LowestCommonAncestor(x,y int) *BinarySearchTreeNode {
return b.lowestCommonAncestor(b.Root, x,y)
}
func (b *BinarySearchTree) lowestCommonAncestor(node *BinarySearchTreeNode, x, y int) *BinarySearchTreeNode {
if node == nil {
return nil
}
if math.Max(float64(x), float64(y)) < float64(node.value) {
// If max of x and y is less than node value then LCA is located left of node
return b.lowestCommonAncestor(node.Left, x, y)
} else if math.Min(float64(x), float64(y)) > float64(node.value) {
// If min of x and y is more than node value then LCA is located right of node
return b.lowestCommonAncestor(node.Right, x, y)
} else {
// Otherwise this node is the LCA
return node
}
}
func (b *BinarySearchTree) Delete(value int) *BinarySearchTreeNode {
return b.delete(b.Root, value)
}
func (b *BinarySearchTree) delete(node *BinarySearchTreeNode, value int) *BinarySearchTreeNode {
if node == nil {
return nil
} else if value < node.value {
// Traverse left
node.Left = b.delete(node.Left, value)
} else if value > node.value {
// Traverse right
node.Right = b.delete(node.Right, value)
} else {
// Found!
// case no child
if node.Left == nil && node.Right == nil {
return nil
} else if node.Left == nil {
// Bubble up right child
node = node.Right
return node
} else if node.Right == nil {
// Bubble up left child
node = node.Left
return node
} else {
/*
If the node has two children, either find the maximum of the left
subtree or find the minimum of the right subtree to replace that
node.
*/
temp := findMin(node.Right)
node.value = temp.value
// find and remove that node. Then assign it to Right.
node.Right = b.delete(node.Right, temp.value)
return node
}
}
return node
}
func findMin(root *BinarySearchTreeNode) *BinarySearchTreeNode {
for root.Left != nil {
root = root.Left
}
return root
}
/*
Given a root of a tree, and an integer k.
Print all the nodes which are at k distance from root.
Solution: Pass the k parameter on each children when we traverse down we subtract one.
If k == 0 then we print the node value
*/
func PrintNodesKDistanceRoot(node *BinarySearchTreeNode, k int) {
if node == nil {
return
}
if k == 0 {
fmt.Println(node.value)
return
} else {
PrintNodesKDistanceRoot(node.Right, k-1)
PrintNodesKDistanceRoot(node.Left, k-1)
}
} | trees/BinarySearchTree.go | 0.672869 | 0.437163 | BinarySearchTree.go | starcoder |
package memeduck
import (
"github.com/MakeNowJust/memefish/pkg/ast"
"github.com/pkg/errors"
"github.com/genkami/memeduck/internal"
)
// WhereCond is a conditional expression that appears in WHERE clauses.
type WhereCond interface {
ToASTWhere() (*ast.Where, error)
}
// ExprCond is a boolean expression to filter records.
type ExprCond struct {
expr ast.Expr
}
func (c *ExprCond) ToASTWhere() (*ast.Where, error) {
return &ast.Where{
Expr: c.expr,
}, nil
}
// Bool creates a new boolean literal.
func Bool(v bool) *ExprCond {
return &ExprCond{expr: internal.BoolLit(v)}
}
// OpCond is a binary operator expression.
type OpCond struct {
lhs, rhs interface{}
op BinaryOp
}
// Op is a binary operator
type BinaryOp ast.BinaryOp
const (
EQ BinaryOp = BinaryOp(ast.OpEqual)
NE BinaryOp = BinaryOp(ast.OpNotEqual)
LT BinaryOp = BinaryOp(ast.OpLess)
GT BinaryOp = BinaryOp(ast.OpGreater)
LE BinaryOp = BinaryOp(ast.OpLessEqual)
GE BinaryOp = BinaryOp(ast.OpGreaterEqual)
LIKE BinaryOp = BinaryOp(ast.OpLike)
NOT_LIKE BinaryOp = BinaryOp(ast.OpNotLike)
)
func (c *OpCond) ToASTWhere() (*ast.Where, error) {
lhs, err := internal.ToExpr(c.lhs)
if err != nil {
return nil, err
}
rhs, err := internal.ToExpr(c.rhs)
if err != nil {
return nil, err
}
return &ast.Where{
Expr: &ast.BinaryExpr{
Op: ast.BinaryOp(c.op),
Left: lhs,
Right: rhs,
},
}, nil
}
// Op creates a new binary operator expression.
func Op(lhs interface{}, op BinaryOp, rhs interface{}) *OpCond {
return &OpCond{
lhs: lhs,
rhs: rhs,
op: op,
}
}
// Eq(x, y) is a shorthand for Op(x, EQ, y)
func Eq(lhs, rhs interface{}) *OpCond {
return Op(lhs, EQ, rhs)
}
// Ne(x, y) is a shorthand for Op(x, NE, y)
func Ne(lhs, rhs interface{}) *OpCond {
return Op(lhs, NE, rhs)
}
// Lt(x, y) is a shorthand for Op(x, LT, y)
func Lt(lhs, rhs interface{}) *OpCond {
return Op(lhs, LT, rhs)
}
// Gt(x, y) is a shorthand for Op(x, GT, y)
func Gt(lhs, rhs interface{}) *OpCond {
return Op(lhs, GT, rhs)
}
// Le(x, y) is a shorthand for Op(x, LE, y)
func Le(lhs, rhs interface{}) *OpCond {
return Op(lhs, LE, rhs)
}
// Ge(x, y) is a shorthand for Op(x, GE, y)
func Ge(lhs, rhs interface{}) *OpCond {
return Op(lhs, GE, rhs)
}
// Like(x, y) is a shorthand for Op(x, LIKE, y)
func Like(lhs, rhs interface{}) *OpCond {
return Op(lhs, LIKE, rhs)
}
// NotLike(x, y) is a shorthand for Op(x, NOT_LIKE, y)
func NotLike(lhs, rhs interface{}) *OpCond {
return Op(lhs, NOT_LIKE, rhs)
}
// NullCond represents IS NULL or IS NOT NULL predicate.
type NullCond struct {
not bool
arg interface{}
}
// IsNull creates `x IS NULL` predicate.
func IsNull(arg interface{}) *NullCond {
return &NullCond{arg: arg}
}
// IsNotNull creates `x IS NOT NULL` predicate.
func IsNotNull(arg interface{}) *NullCond {
return &NullCond{arg: arg, not: true}
}
func (c *NullCond) ToASTWhere() (*ast.Where, error) {
expr, err := internal.ToExpr(c.arg)
if err != nil {
return nil, err
}
return &ast.Where{
Expr: &ast.IsNullExpr{
Not: c.not,
Left: expr,
},
}, nil
}
// BetweenCond represents BETWEEN or NOT BETWEEN predicates.
type BetweenCond struct {
arg interface{}
min interface{}
max interface{}
not bool
}
// Between(x, min, max) creates `x BETWEEN min AND max` predicate.
func Between(x, min, max interface{}) *BetweenCond {
return &BetweenCond{arg: x, min: min, max: max}
}
// NotBetween(x, min, max) creates `x NOT BETWEEN min AND max` predicate.
func NotBetween(x, min, max interface{}) *BetweenCond {
return &BetweenCond{arg: x, min: min, max: max, not: true}
}
func (c *BetweenCond) ToASTWhere() (*ast.Where, error) {
arg, err := internal.ToExpr(c.arg)
if err != nil {
return nil, err
}
min, err := internal.ToExpr(c.min)
if err != nil {
return nil, err
}
max, err := internal.ToExpr(c.max)
if err != nil {
return nil, err
}
return &ast.Where{
Expr: &ast.BetweenExpr{
Not: c.not,
Left: arg,
RightStart: min,
RightEnd: max,
},
}, nil
}
// IdentExpr is an identifier.
type IdentExpr struct {
names []string
}
// Ident creates a new IdentExpr.
// Path expression can be created by passing more than one elements.
func Ident(names ...string) *IdentExpr {
return &IdentExpr{names: names}
}
func (e *IdentExpr) ToASTExpr() (ast.Expr, error) {
if len(e.names) <= 0 {
return nil, errors.New("empty identifier")
}
path := &ast.Path{}
for _, name := range e.names {
path.Idents = append(path.Idents, &ast.Ident{
Name: name,
})
}
return path, nil
}
// ParamExpr is a query parameter.
type ParamExpr struct {
name string
}
// Param createsa new ParamExpr.
func Param(name string) *ParamExpr {
return &ParamExpr{name: name}
}
func (e *ParamExpr) ToASTExpr() (ast.Expr, error) {
return &ast.Param{Name: e.name}, nil
}
// LogicalOpCond represents AND/OR operator.
type LogicalOpCond struct {
op logicalOp
conds []WhereCond
}
type logicalOp ast.BinaryOp
const (
logicalOpAnd logicalOp = logicalOp(ast.OpAnd)
logicalOpOr logicalOp = logicalOp(ast.OpOr)
)
// And concatenates more than one WhereConds with AND operator.
func And(conds ...WhereCond) *LogicalOpCond {
return &LogicalOpCond{
op: logicalOpAnd,
conds: conds,
}
}
// Or concatenates more than one WhereConds with OR operator.
func Or(conds ...WhereCond) *LogicalOpCond {
return &LogicalOpCond{
op: logicalOpOr,
conds: conds,
}
}
func (c *LogicalOpCond) ToASTWhere() (*ast.Where, error) {
if len(c.conds) <= 0 {
return nil, errors.New("no conditions")
}
where, err := c.conds[0].ToASTWhere()
if err != nil {
return nil, err
}
acc := where
for _, cond := range c.conds[1:] {
where, err = cond.ToASTWhere()
if err != nil {
return nil, err
}
acc = &ast.Where{
Expr: &ast.BinaryExpr{
Op: ast.BinaryOp(c.op),
Left: acc.Expr,
Right: where.Expr,
},
}
}
return acc, nil
} | where.go | 0.779406 | 0.441553 | where.go | starcoder |
package glmatrix
import (
"fmt"
"math"
"math/rand"
)
// NewVec2 creates a new, empty vec2
func NewVec2() []float64 {
return []float64{0., 0.}
}
// Vec2Create creates a new vec2 initialized with values from an existing vector
func Vec2Create() []float64 {
return NewVec2()
}
// Vec2Clone creates a new vec2 initialized with the given values
func Vec2Clone(a []float64) []float64 {
return []float64{a[0], a[1]}
}
// Vec2FromValues creates a new vec2 initialized with the given values
func Vec2FromValues(x, y float64) []float64 {
return []float64{x, y}
}
// Vec2Copy copy the values from one vec2 to another
func Vec2Copy(out, a []float64) []float64 {
out[0] = a[0]
out[1] = a[1]
return out
}
// Vec2Set set the components of a vec2 to the given values
func Vec2Set(out []float64, x, y float64) []float64 {
out[0] = x
out[1] = y
return out
}
// Vec2Add adds two vec2's
func Vec2Add(out, a, b []float64) []float64 {
out[0] = a[0] + b[0]
out[1] = a[1] + b[1]
return out
}
// Vec2Subtract subtracts vector b from vector a
func Vec2Subtract(out, a, b []float64) []float64 {
out[0] = a[0] - b[0]
out[1] = a[1] - b[1]
return out
}
// Vec2Multiply multiplies two vec2's
func Vec2Multiply(out, a, b []float64) []float64 {
out[0] = a[0] * b[0]
out[1] = a[1] * b[1]
return out
}
// Vec2Divide divides two vec2's
func Vec2Divide(out, a, b []float64) []float64 {
out[0] = a[0] / b[0]
out[1] = a[1] / b[1]
return out
}
// Vec2Ceil math.ceil the components of a vec2
func Vec2Ceil(out, a []float64) []float64 {
out[0] = math.Ceil(a[0])
out[1] = math.Ceil(a[1])
return out
}
// Vec2Floor math.floor the components of a vec2
func Vec2Floor(out, a []float64) []float64 {
out[0] = math.Floor(a[0])
out[1] = math.Floor(a[1])
return out
}
// Vec2Min returns the minimum of two vec2's
func Vec2Min(out, a, b []float64) []float64 {
out[0] = math.Min(a[0], b[0])
out[1] = math.Min(a[1], b[1])
return out
}
// Vec2Max returns the maximum of two vec2's
func Vec2Max(out, a, b []float64) []float64 {
out[0] = math.Max(a[0], b[0])
out[1] = math.Max(a[1], b[1])
return out
}
// Vec2Round math.round the components of a vec2
func Vec2Round(out, a []float64) []float64 {
out[0] = math.Round(a[0])
out[1] = math.Round(a[1])
return out
}
// Vec2Scale scales a vec2 by a scalar number
func Vec2Scale(out, a []float64, scale float64) []float64 {
out[0] = a[0] * scale
out[1] = a[1] * scale
return out
}
// Vec2ScaleAndAdd adds two vec2's after scaling the second operand by a scalar value
func Vec2ScaleAndAdd(out, a, b []float64, scale float64) []float64 {
out[0] = a[0] + b[0]*scale
out[1] = a[1] + b[1]*scale
return out
}
// Vec2Distance calculates the euclidian distance between two vec2's
func Vec2Distance(a, b []float64) float64 {
x := b[0] - a[0]
y := b[1] - a[1]
return math.Hypot(x, y)
}
// Vec2SquaredDistance calculates the squared euclidian distance between two vec2's
func Vec2SquaredDistance(a, b []float64) float64 {
x := b[0] - a[0]
y := b[1] - a[1]
return x*x + y*y
}
// Vec2Length calculates the length of a vec2
func Vec2Length(out []float64) float64 {
x := out[0]
y := out[1]
return math.Hypot(x, y)
}
// Vec2SquaredLength calculates the squared length of a vec2
func Vec2SquaredLength(out []float64) float64 {
x := out[0]
y := out[1]
return x*x + y*y
}
// Vec2Negate negates the components of a vec2
func Vec2Negate(out, a []float64) []float64 {
out[0] = -a[0]
out[1] = -a[1]
return out
}
// Vec2Inverse returns the inverse of the components of a vec2
func Vec2Inverse(out, a []float64) []float64 {
out[0] = 1. / a[0]
out[1] = 1. / a[1]
return out
}
// Vec2Normalize normalize a vec2
func Vec2Normalize(out, a []float64) []float64 {
len := Vec2Length(a)
if 0 < len {
len = 1. / len
}
out[0] = a[0] * len
out[1] = a[1] * len
return out
}
// Vec2Dot calculates the dot product of two vec2's
func Vec2Dot(a, b []float64) float64 {
return a[0]*b[0] + a[1]*b[1]
}
// Vec2Cross computes the cross product of two vec2's
// Note that the cross product must by definition produce a 3D vector
func Vec2Cross(out, a, b []float64) []float64 {
z := a[0]*b[1] - a[1]*b[0]
out[0] = 0
out[1] = 0
out[2] = z
return out
}
// Vec2Lerp performs a linear interpolation between two vec2's
func Vec2Lerp(out, a, b []float64, t float64) []float64 {
ax := a[0]
ay := a[1]
out[0] = ax + t*(b[0]-ax)
out[1] = ay + t*(b[1]-ay)
return out
}
// Vec2Random generates a random vector with the given scale
func Vec2Random(out []float64, scale float64) []float64 {
r := rand.Float64() * 2.0 * math.Pi
out[0] = math.Cos(r) * scale
out[1] = math.Sin(r) * scale
return out
}
// Vec2TransformMat2 transforms the vec2 with a mat2
func Vec2TransformMat2(out, a, m []float64) []float64 {
x := a[0]
y := a[1]
out[0] = m[0]*x + m[2]*y
out[1] = m[1]*x + m[3]*y
return out
}
// Vec2TransformMat2d transforms the vec2 with a mat2d
func Vec2TransformMat2d(out, a, m []float64) []float64 {
x := a[0]
y := a[1]
out[0] = m[0]*x + m[2]*y + m[4]
out[1] = m[1]*x + m[3]*y + m[5]
return out
}
// Vec2TransformMat3 transforms the vec2 with a mat3
// 3rd vector component is implicitly '1'
func Vec2TransformMat3(out, a, m []float64) []float64 {
x := a[0]
y := a[1]
out[0] = m[0]*x + m[2]*y + m[6]
out[1] = m[1]*x + m[3]*y + m[7]
return out
}
// Vec2TransformMat4 transforms the vec2 with a mat4
// 3rd vector component is implicitly '0'
// 4th vector component is implicitly '1'
func Vec2TransformMat4(out, a, m []float64) []float64 {
x := a[0]
y := a[1]
out[0] = m[0]*x + m[2]*y + m[12]
out[1] = m[1]*x + m[3]*y + m[13]
return out
}
// Vec2Rotate rotate a 2D vector
func Vec2Rotate(out, p, c []float64, rad float64) []float64 {
p0 := p[0] - c[0]
p1 := p[1] - c[1]
sinC := math.Sin(rad)
cosC := math.Cos(rad)
out[0] = p0*cosC - p1*sinC + c[0]
out[1] = p0*sinC + p1*cosC + c[1]
return out
}
// Vec2Angle get the angle between two 2D vectors
func Vec2Angle(a, b []float64) float64 {
x1 := a[0]
y1 := a[1]
x2 := b[0]
y2 := b[1]
cosine := math.Sqrt(x1*x1+y1*y1) * math.Sqrt(x2*x2+y2*y2)
if cosine != 0 {
cosine = (x1*x2 + y1*y2) / cosine
}
return math.Acos(math.Min(math.Max(cosine, -1), 1))
}
// Vec2Zero set the components of a vec2 to zero
func Vec2Zero(out []float64) []float64 {
out[0] = 0.
out[1] = 0.
return out
}
// Vec2Str returns a string representation of a vector
func Vec2Str(out []float64) string {
return fmt.Sprintf("vec2(%v, %v)", out[0], out[1])
}
// Vec2ExactEquals returns whether or not the vectors exactly have the same elements in the same position (when compared with ===)
func Vec2ExactEquals(a, b []float64) bool {
return a[0] == b[0] && a[1] == b[1]
}
// Vec2Equals returns whether or not the vectors have approximately the same elements in the same position.
func Vec2Equals(a, b []float64) bool {
return equals(a[0], b[0]) && equals(a[1], b[1])
}
// Vec2Len alias for Vec2Length
var Vec2Len = Vec2Length
// Vec2Sub alias for Vec2Subtract
var Vec2Sub = Vec2Subtract
// Vec2Mul alias for Vec2Multiply
var Vec2Mul = Vec2Multiply
// Vec2Div alias for Vec2Divide
var Vec2Div = Vec2Divide
// Vec2Dist alias for Vec2Distance
var Vec2Dist = Vec2Distance
// Vec2SqrDist alias for Vec2SquaredDistance
var Vec2SqrDist = Vec2SquaredDistance
// Vec2SqrLen alias for Vec2SquaredLength
var Vec2SqrLen = Vec2SquaredLength
// Vec2ForEach perform some operation over an array of vec2s.
func Vec2ForEach(a []float64, stride, offset, count int, fn func([]float64, []float64, []float64), arg []float64) []float64 {
if stride <= 0 {
stride = 2
}
if offset <= 0 {
offset = 0
}
var l int
if 0 < count {
l = int(math.Min(float64(count*stride+offset), float64(len(a))))
} else {
l = len(a)
}
for i := offset; i < l; i += stride {
vec := []float64{a[i], a[i+1]}
fn(vec, vec, arg)
a[i] = vec[0]
a[i+1] = vec[1]
}
return a
} | vec2.go | 0.899472 | 0.669448 | vec2.go | starcoder |
package aes
import (
"bytes"
"fmt"
)
// 0 padding mode,Use 0 padding when the data length is not aligned, otherwise no padding,
// When padding with ZeroPadding, there is no way to distinguish between real data and
// padding data, so it is only suitable for encryption and decryption of strings ending with \0.
func ZeroPadding(plainText []byte, blockSize int) []byte {
padding := blockSize - len(plainText)%blockSize
paddingText := bytes.Repeat([]byte{0}, padding)
return append(plainText, paddingText...)
}
func ZeroUnPadding(plainText []byte) []byte {
return bytes.TrimFunc(plainText,
func(r rune) bool {
return r == rune(0)
})
}
// PKCS5 way to add padding, PKCS5 is only padding for 8 bytes (BlockSize=8), and the padding content is 0x01-0x08.
func PKCS5Padding(plainText []byte, blockSize int) ([]byte, error) {
if blockSize != 8 {
return nil, fmt.Errorf("blockSize = %d, must equal 8", blockSize)
}
padding := blockSize - len(plainText)%blockSize
paddingText := bytes.Repeat([]byte{byte(padding)}, padding)
return append(plainText, paddingText...), nil
}
func PKCS5UnPadding(plainText []byte) ([]byte, error) {
length := len(plainText)
if length <= 0 {
return nil, fmt.Errorf("plaintext len <= 0")
}
// get padding length
paddingSize := int(plainText[length-1])
if paddingSize >= length {
return nil, fmt.Errorf("padding len is big than plaintext")
}
// remove padding text
return plainText[:(length - paddingSize)], nil
}
// PKCS7 way to add padding, PKCS7 is compatible with PKCS5.
func PKCS7Padding(plainText []byte, blockSize int) []byte {
padding := blockSize - len(plainText)%blockSize
paddingText := bytes.Repeat([]byte{byte(padding)}, padding)
return append(plainText, paddingText...)
}
func PKCS7UnPadding(plainText []byte) ([]byte, error) {
length := len(plainText)
if length <= 0 {
return nil, fmt.Errorf("plaintext len <= 0")
}
paddingSize := int(plainText[length-1])
if paddingSize >= length {
return nil, fmt.Errorf("padding len is big than plaintext")
}
return plainText[:(length - paddingSize)], nil
} | crypto/aes/padding.go | 0.773473 | 0.435962 | padding.go | starcoder |
package level2
import (
"fmt"
"github.com/sfomuseum/go-edtf"
"github.com/sfomuseum/go-edtf/common"
"github.com/sfomuseum/go-edtf/re"
"strconv"
"strings"
)
/*
Significant digits
A year (expressed in any of the three allowable forms: four-digit, 'Y' prefix, or exponential) may be followed by 'S', followed by a positive integer indicating the number of significant digits.
Example 1 ‘1950S2’
some year between 1900 and 1999, estimated to be 1950
Example 2 ‘Y171010000S3’
some year between 171010000 and 171010999, estimated to be 171010000
Example 3 ‘Y3388E2S3’
some year between 338000 and 338999, estimated to be 338800.
*/
func IsSignificantDigits(edtf_str string) bool {
return re.SignificantDigits.MatchString(edtf_str)
}
func ParseSignificantDigits(edtf_str string) (*edtf.EDTFDate, error) {
/*
SIGN 5 1950S2,1950,,,2
SIGN 5 Y171010000S3,,171010000,,3
SIGN 5 Y-20E2S3,,,-20E2,3
SIGN 5 Y3388E2S3,,,3388E2,3
SIGN 5 Y-20E2S3,,,-20E2,3
*/
m := re.SignificantDigits.FindStringSubmatch(edtf_str)
if len(m) != 5 {
return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str)
}
str_yyyy := m[1]
str_year := m[2]
notation := m[3]
str_digits := m[4]
var yyyy int
if str_yyyy != "" {
y, err := strconv.Atoi(str_yyyy)
if err != nil {
return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str)
}
yyyy = y
} else if str_year != "" {
if len(str_year) > 4 {
return nil, edtf.Unsupported(SIGNIFICANT_DIGITS, edtf_str)
}
y, err := strconv.Atoi(str_year)
if err != nil {
return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str)
}
yyyy = y
} else if notation != "" {
y, err := common.ParseExponentialNotation(notation)
if err != nil {
return nil, err
}
yyyy = y
} else {
return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str)
}
if yyyy > edtf.MAX_YEARS {
return nil, edtf.Unsupported(SIGNIFICANT_DIGITS, edtf_str)
}
digits, err := strconv.Atoi(str_digits)
if err != nil {
return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str)
}
if len(strconv.Itoa(digits)) > len(strconv.Itoa(yyyy)) {
return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str)
}
str_yyyy = strconv.Itoa(yyyy)
prefix_yyyy := str_yyyy[0 : len(str_yyyy)-digits]
first := strings.Repeat("0", digits)
last := strings.Repeat("9", digits)
start_yyyy := prefix_yyyy + first
end_yyyy := prefix_yyyy + last
_str := fmt.Sprintf("%s/%s", start_yyyy, end_yyyy)
if strings.HasPrefix(start_yyyy, "-") && strings.HasPrefix(end_yyyy, "-") {
_str = fmt.Sprintf("%s/%s", end_yyyy, start_yyyy)
}
sp, err := common.DateSpanFromEDTF(_str)
if err != nil {
return nil, err
}
d := &edtf.EDTFDate{
Start: sp.Start,
End: sp.End,
EDTF: edtf_str,
Level: LEVEL,
Feature: SIGNIFICANT_DIGITS,
}
return d, nil
} | vendor/github.com/sfomuseum/go-edtf/level2/significant_digits.go | 0.685529 | 0.418935 | significant_digits.go | starcoder |
package models
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// Process
type Process struct {
// User account identifier (user account context the process ran under) for example, AccountName, SID, and so on.
accountName *string
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The full process invocation commandline including all parameters.
commandLine *string
// Time at which the process was started. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
createdDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Complex type containing file hashes (cryptographic and location-sensitive).
fileHash FileHashable
// The integrity level of the process. Possible values are: unknown, untrusted, low, medium, high, system.
integrityLevel *ProcessIntegrityLevel
// True if the process is elevated.
isElevated *bool
// The name of the process' Image file.
name *string
// DateTime at which the parent process was started. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
parentProcessCreatedDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// The Process ID (PID) of the parent process.
parentProcessId *int32
// The name of the image file of the parent process.
parentProcessName *string
// Full path, including filename.
path *string
// The Process ID (PID) of the process.
processId *int32
}
// NewProcess instantiates a new process and sets the default values.
func NewProcess()(*Process) {
m := &Process{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateProcessFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateProcessFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewProcess(), nil
}
// GetAccountName gets the accountName property value. User account identifier (user account context the process ran under) for example, AccountName, SID, and so on.
func (m *Process) GetAccountName()(*string) {
if m == nil {
return nil
} else {
return m.accountName
}
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *Process) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCommandLine gets the commandLine property value. The full process invocation commandline including all parameters.
func (m *Process) GetCommandLine()(*string) {
if m == nil {
return nil
} else {
return m.commandLine
}
}
// GetCreatedDateTime gets the createdDateTime property value. Time at which the process was started. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
func (m *Process) GetCreatedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.createdDateTime
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *Process) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["accountName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAccountName(val)
}
return nil
}
res["commandLine"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCommandLine(val)
}
return nil
}
res["createdDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetCreatedDateTime(val)
}
return nil
}
res["fileHash"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateFileHashFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetFileHash(val.(FileHashable))
}
return nil
}
res["integrityLevel"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseProcessIntegrityLevel)
if err != nil {
return err
}
if val != nil {
m.SetIntegrityLevel(val.(*ProcessIntegrityLevel))
}
return nil
}
res["isElevated"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsElevated(val)
}
return nil
}
res["name"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetName(val)
}
return nil
}
res["parentProcessCreatedDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetParentProcessCreatedDateTime(val)
}
return nil
}
res["parentProcessId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetParentProcessId(val)
}
return nil
}
res["parentProcessName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetParentProcessName(val)
}
return nil
}
res["path"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetPath(val)
}
return nil
}
res["processId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetProcessId(val)
}
return nil
}
return res
}
// GetFileHash gets the fileHash property value. Complex type containing file hashes (cryptographic and location-sensitive).
func (m *Process) GetFileHash()(FileHashable) {
if m == nil {
return nil
} else {
return m.fileHash
}
}
// GetIntegrityLevel gets the integrityLevel property value. The integrity level of the process. Possible values are: unknown, untrusted, low, medium, high, system.
func (m *Process) GetIntegrityLevel()(*ProcessIntegrityLevel) {
if m == nil {
return nil
} else {
return m.integrityLevel
}
}
// GetIsElevated gets the isElevated property value. True if the process is elevated.
func (m *Process) GetIsElevated()(*bool) {
if m == nil {
return nil
} else {
return m.isElevated
}
}
// GetName gets the name property value. The name of the process' Image file.
func (m *Process) GetName()(*string) {
if m == nil {
return nil
} else {
return m.name
}
}
// GetParentProcessCreatedDateTime gets the parentProcessCreatedDateTime property value. DateTime at which the parent process was started. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
func (m *Process) GetParentProcessCreatedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.parentProcessCreatedDateTime
}
}
// GetParentProcessId gets the parentProcessId property value. The Process ID (PID) of the parent process.
func (m *Process) GetParentProcessId()(*int32) {
if m == nil {
return nil
} else {
return m.parentProcessId
}
}
// GetParentProcessName gets the parentProcessName property value. The name of the image file of the parent process.
func (m *Process) GetParentProcessName()(*string) {
if m == nil {
return nil
} else {
return m.parentProcessName
}
}
// GetPath gets the path property value. Full path, including filename.
func (m *Process) GetPath()(*string) {
if m == nil {
return nil
} else {
return m.path
}
}
// GetProcessId gets the processId property value. The Process ID (PID) of the process.
func (m *Process) GetProcessId()(*int32) {
if m == nil {
return nil
} else {
return m.processId
}
}
// Serialize serializes information the current object
func (m *Process) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("accountName", m.GetAccountName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("commandLine", m.GetCommandLine())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("createdDateTime", m.GetCreatedDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("fileHash", m.GetFileHash())
if err != nil {
return err
}
}
if m.GetIntegrityLevel() != nil {
cast := (*m.GetIntegrityLevel()).String()
err := writer.WriteStringValue("integrityLevel", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("isElevated", m.GetIsElevated())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("name", m.GetName())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("parentProcessCreatedDateTime", m.GetParentProcessCreatedDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("parentProcessId", m.GetParentProcessId())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("parentProcessName", m.GetParentProcessName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("path", m.GetPath())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("processId", m.GetProcessId())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAccountName sets the accountName property value. User account identifier (user account context the process ran under) for example, AccountName, SID, and so on.
func (m *Process) SetAccountName(value *string)() {
if m != nil {
m.accountName = value
}
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *Process) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCommandLine sets the commandLine property value. The full process invocation commandline including all parameters.
func (m *Process) SetCommandLine(value *string)() {
if m != nil {
m.commandLine = value
}
}
// SetCreatedDateTime sets the createdDateTime property value. Time at which the process was started. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
func (m *Process) SetCreatedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.createdDateTime = value
}
}
// SetFileHash sets the fileHash property value. Complex type containing file hashes (cryptographic and location-sensitive).
func (m *Process) SetFileHash(value FileHashable)() {
if m != nil {
m.fileHash = value
}
}
// SetIntegrityLevel sets the integrityLevel property value. The integrity level of the process. Possible values are: unknown, untrusted, low, medium, high, system.
func (m *Process) SetIntegrityLevel(value *ProcessIntegrityLevel)() {
if m != nil {
m.integrityLevel = value
}
}
// SetIsElevated sets the isElevated property value. True if the process is elevated.
func (m *Process) SetIsElevated(value *bool)() {
if m != nil {
m.isElevated = value
}
}
// SetName sets the name property value. The name of the process' Image file.
func (m *Process) SetName(value *string)() {
if m != nil {
m.name = value
}
}
// SetParentProcessCreatedDateTime sets the parentProcessCreatedDateTime property value. DateTime at which the parent process was started. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
func (m *Process) SetParentProcessCreatedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.parentProcessCreatedDateTime = value
}
}
// SetParentProcessId sets the parentProcessId property value. The Process ID (PID) of the parent process.
func (m *Process) SetParentProcessId(value *int32)() {
if m != nil {
m.parentProcessId = value
}
}
// SetParentProcessName sets the parentProcessName property value. The name of the image file of the parent process.
func (m *Process) SetParentProcessName(value *string)() {
if m != nil {
m.parentProcessName = value
}
}
// SetPath sets the path property value. Full path, including filename.
func (m *Process) SetPath(value *string)() {
if m != nil {
m.path = value
}
}
// SetProcessId sets the processId property value. The Process ID (PID) of the process.
func (m *Process) SetProcessId(value *int32)() {
if m != nil {
m.processId = value
}
} | models/process.go | 0.620392 | 0.410638 | process.go | starcoder |
package ext
import (
"fmt"
"xelf.org/xelf/cor"
"xelf.org/xelf/exp"
"xelf.org/xelf/lit"
"xelf.org/xelf/typ"
)
// Rule is a configurable helper for assigning tags to nodes.
type Rule struct {
Prepper KeyPrepper
Setter KeySetter
}
// KeyPrepper resolves el and returns a value to be assigned to key or an error.
type KeyPrepper = func(p *exp.Prog, env exp.Env, n Node, key string, el exp.Exp) (lit.Val, error)
// KeySetter sets the property with key on not to value v or returns an error.
type KeySetter = func(p *exp.Prog, n Node, key string, v lit.Val) error
// IdxKeyer returns a key for an unnamed argument at idx.
type IdxKeyer = func(n Node, idx int) string
// Default is an alias for the default rule.
type Default = Rule
// Rules is a collection of rules that assign tags to nodes.
type Rules struct {
// Key holds optional per key rules
Key map[string]Rule
// IdxKeyer will map unnamed tags to a key, when null unnamed tags result in an error
IdxKeyer
// Default holds an optional default rule.
// If neither specific nor default rule are found DynPrepper and PathSetter are used.
Default
// Tail holds optional rules for tail elements.
Tail Rule
}
func (rs *Rules) Rule(tag string) Rule {
r := rs.Key[tag]
if r.Prepper == nil {
r.Prepper = DynPrepper
if rs.Prepper != nil {
r.Prepper = rs.Prepper
}
}
if r.Setter == nil {
r.Setter = PathSetter
if rs.Setter != nil {
r.Setter = rs.Setter
}
}
return r
}
// ZeroKeyer is an index keyer without offset.
var ZeroKeyer = OffsetKeyer(0)
// OffsetKeyer returns an index keyer that looks up a field at the index plus the offset.
func OffsetKeyer(offset int) IdxKeyer {
return func(n Node, i int) string {
pb := n.Type().Body.(*typ.ParamBody)
p := pb.Params[i+offset]
return p.Key
}
}
// ListPrepper resolves args using p and env and returns a list or an error.
func ListPrepper(p *exp.Prog, env exp.Env, _ Node, _ string, arg exp.Exp) (lit.Val, error) {
res := &lit.List{}
switch a := arg.(type) {
case *exp.Tupl:
res.Vals = make([]lit.Val, 0, len(a.Els))
for _, el := range a.Els {
aa, err := p.Eval(env, el)
if err != nil {
return nil, err
}
if v := aa.Value(); !v.Zero() {
res.Vals = append(res.Vals, aa.Val)
}
}
default:
aa, err := p.Eval(env, a)
if err != nil {
return nil, err
}
res.Vals = []lit.Val{aa.Val}
}
return res, nil
}
// DynPrepper resolves args using p and env and returns a value or an error.
// Empty args result in an untyped null value. Multiple args are resolved as call.
func DynPrepper(p *exp.Prog, env exp.Env, _ Node, _ string, arg exp.Exp) (_ lit.Val, err error) {
switch a := arg.(type) {
case nil:
return lit.Bool(true), nil
case *exp.Tupl:
if len(a.Els) == 0 {
return lit.Null{}, nil
}
if len(a.Els) == 1 {
arg = a.Els[0]
} else {
arg = &exp.Call{Args: a.Els, Src: a.Src}
}
arg, err = p.Resl(env, arg, typ.Void)
if err != nil {
return nil, err
}
}
a, err := p.Eval(env, arg)
if err != nil {
return nil, err
}
return a.Val, nil
}
// BitsPrepper returns a key prepper that tries to resolve a bits constant.
func BitsPrepper(consts []typ.Const) KeyPrepper {
return func(p *exp.Prog, env exp.Env, n Node, key string, arg exp.Exp) (lit.Val, error) {
v, err := DynPrepper(p, env, n, key, arg)
if err != nil {
return v, err
}
for _, b := range consts {
if key == cor.Keyed(b.Name) {
return lit.Int(b.Val), nil
}
}
return nil, fmt.Errorf("no constant named %q", key)
}
}
// PathSetter sets el to n using key as path or returns an error.
func PathSetter(p *exp.Prog, n Node, key string, v lit.Val) error {
path, err := cor.ParsePath(key)
if err != nil {
return fmt.Errorf("parse %s: %w", key, err)
}
err = lit.CreatePath(p.Reg, n, path, v)
if err != nil {
return err
}
return nil
}
// ExtraSetter sets el to n using key as path or returns an error.
func ExtraSetter(m string) KeySetter {
return func(p *exp.Prog, n Node, key string, v lit.Val) error {
path, err := cor.ParsePath(key)
if err != nil {
return fmt.Errorf("parse %s: %w", key, err)
}
v = v.Value()
err = lit.CreatePath(p.Reg, n, path, v)
if err != nil {
path = append(cor.Path{{Key: m}}, path...)
e := lit.CreatePath(p.Reg, n, path, v)
if e == nil {
return nil
}
}
return err
}
}
// BitsSetter returns a key setter that tries to add to a node bits field with key.
func BitsSetter(b string) KeySetter {
return func(p *exp.Prog, n Node, _ string, v lit.Val) error {
f, err := n.Key(b)
if err != nil {
return err
}
fi, err := lit.ToInt(f)
if err != nil {
return err
}
vi, err := lit.ToInt(v)
if err != nil {
return err
}
return n.SetKey(b, lit.Int(uint64(fi)|uint64(vi)))
}
}
var norule Rule | ext/node_rules.go | 0.608594 | 0.406803 | node_rules.go | starcoder |
package checker
import (
"fmt"
"github.com/twtiger/gosecco/tree"
)
type typeChecker struct {
result error
expectBoolean bool
}
func typeCheckExpectingBoolean(x tree.Expression) error {
tc := &typeChecker{result: nil, expectBoolean: true}
x.Accept(tc)
return tc.result
}
func typeCheckExpectingNumeric(x tree.Expression) error {
tc := &typeChecker{result: nil, expectBoolean: false}
x.Accept(tc)
return tc.result
}
// AcceptAnd implements Visitor
func (t *typeChecker) AcceptAnd(v tree.And) {
if !t.expectBoolean {
t.result = fmt.Errorf("expected numeric expression but found: %s", tree.ExpressionString(v))
return
}
res := either(
typeCheckExpectingBoolean(v.Left),
typeCheckExpectingBoolean(v.Right))
if res != nil {
t.result = res
}
}
// AcceptArgument implements Visitor
func (t *typeChecker) AcceptArgument(v tree.Argument) {
if t.expectBoolean {
t.result = fmt.Errorf("expected boolean expression but found: %s", tree.ExpressionString(v))
}
}
// AcceptArithmetic implements Visitor
func (t *typeChecker) AcceptArithmetic(v tree.Arithmetic) {
if t.expectBoolean {
t.result = fmt.Errorf("expected boolean expression but found: %s", tree.ExpressionString(v))
return
}
res := either(
typeCheckExpectingNumeric(v.Left),
typeCheckExpectingNumeric(v.Right))
if res != nil {
t.result = res
}
}
// AcceptBinaryNegation implements Visitor
func (t *typeChecker) AcceptBinaryNegation(v tree.BinaryNegation) {
if t.expectBoolean {
t.result = fmt.Errorf("expected boolean expression but found: %s", tree.ExpressionString(v))
return
}
res := typeCheckExpectingNumeric(v.Operand)
if res != nil {
t.result = res
}
}
// AcceptBooleanLiteral implements Visitor
func (t *typeChecker) AcceptBooleanLiteral(v tree.BooleanLiteral) {
if !t.expectBoolean {
t.result = fmt.Errorf("expected numeric expression but found: %s", tree.ExpressionString(v))
}
}
// AcceptCall implements Visitor
func (t *typeChecker) AcceptCall(v tree.Call) {
t.result = fmt.Errorf("found unresolved call: %s", v.Name)
}
// AcceptComparison implements Visitor
func (t *typeChecker) AcceptComparison(v tree.Comparison) {
if !t.expectBoolean {
t.result = fmt.Errorf("expected numeric expression but found: %s", tree.ExpressionString(v))
return
}
// This language only accepts comparisons between numeric values, so we implement that here
res := either(
typeCheckExpectingNumeric(v.Left),
typeCheckExpectingNumeric(v.Right))
if res != nil {
t.result = res
}
}
// AcceptInclusion implements Visitor
func (t *typeChecker) AcceptInclusion(v tree.Inclusion) {
if !t.expectBoolean {
t.result = fmt.Errorf("expected numeric expression but found: %s", tree.ExpressionString(v))
return
}
res := typeCheckExpectingNumeric(v.Left)
for _, r := range v.Rights {
res2 := typeCheckExpectingNumeric(r)
if res == nil {
res = res2
}
}
if res != nil {
t.result = res
}
}
// AcceptNegation implements Visitor
func (t *typeChecker) AcceptNegation(v tree.Negation) {
if !t.expectBoolean {
t.result = fmt.Errorf("expected numeric expression but found: %s", tree.ExpressionString(v))
return
}
res := typeCheckExpectingBoolean(v.Operand)
if res != nil {
t.result = res
}
}
// AcceptNumericLiteral implements Visitor
func (t *typeChecker) AcceptNumericLiteral(v tree.NumericLiteral) {
if t.expectBoolean {
t.result = fmt.Errorf("expected boolean expression but found: %s", tree.ExpressionString(v))
}
}
// AcceptOr implements Visitor
func (t *typeChecker) AcceptOr(v tree.Or) {
if !t.expectBoolean {
t.result = fmt.Errorf("expected numeric expression but found: %s", tree.ExpressionString(v))
return
}
res := either(
typeCheckExpectingBoolean(v.Left),
typeCheckExpectingBoolean(v.Right))
if res != nil {
t.result = res
}
}
// AcceptVariable implements Visitor
func (t *typeChecker) AcceptVariable(v tree.Variable) {
t.result = fmt.Errorf("found unresolved variable: %s", v.Name)
} | vendor/github.com/twtiger/gosecco/checker/type_checker.go | 0.716119 | 0.442516 | type_checker.go | starcoder |
package typedefs
// SceneItemTransform represents the complex type for SceneItemTransform.
type SceneItemTransform struct {
Bounds struct {
// Alignment of the bounding box.
Alignment int `json:"alignment"`
// Type of bounding box. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER", "OBS_BOUNDS_SCALE_OUTER",
// "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT", "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
Type string `json:"type"`
// Width of the bounding box.
X float64 `json:"x"`
// Height of the bounding box.
Y float64 `json:"y"`
} `json:"bounds"`
Crop struct {
// The number of pixels cropped off the bottom of the scene item before scaling.
Bottom int `json:"bottom"`
// The number of pixels cropped off the left of the scene item before scaling.
Left int `json:"left"`
// The number of pixels cropped off the right of the scene item before scaling.
Right int `json:"right"`
// The number of pixels cropped off the top of the scene item before scaling.
Top int `json:"top"`
} `json:"crop"`
// List of children (if this item is a group)
GroupChildren []SceneItemTransform `json:"groupChildren"`
// Scene item height (base source height multiplied by the vertical scaling factor)
Height float64 `json:"height"`
// If the scene item is locked in position.
Locked bool `json:"locked"`
// Name of the item's parent (if this item belongs to a group)
ParentGroupName string `json:"parentGroupName"`
Position struct {
// The point on the scene item that the item is manipulated from.
Alignment float64 `json:"alignment"`
// The x position of the scene item from the left.
X float64 `json:"x"`
// The y position of the scene item from the top.
Y float64 `json:"y"`
} `json:"position"`
// The clockwise rotation of the scene item in degrees around the point of alignment.
Rotation float64 `json:"rotation"`
Scale struct {
// The scale filter of the source. Can be "OBS_SCALE_DISABLE", "OBS_SCALE_POINT", "OBS_SCALE_BICUBIC",
// "OBS_SCALE_BILINEAR", "OBS_SCALE_LANCZOS" or "OBS_SCALE_AREA".
Filter string `json:"filter"`
// The x-scale factor of the scene item.
X float64 `json:"x"`
// The y-scale factor of the scene item.
Y float64 `json:"y"`
} `json:"scale"`
// Base source (without scaling) of the source
SourceHeight int `json:"sourceHeight"`
// Base width (without scaling) of the source
SourceWidth int `json:"sourceWidth"`
// If the scene item is visible.
Visible bool `json:"visible"`
// Scene item width (base source width multiplied by the horizontal scaling factor)
Width float64 `json:"width"`
} | api/typedefs/xx_generated.sceneitemtransform.go | 0.830216 | 0.475057 | xx_generated.sceneitemtransform.go | starcoder |
package regression
import (
"errors"
"fmt"
"math"
"github.com/jung-kurt/etc/go/util"
"gonum.org/v1/gonum/optimize"
"gonum.org/v1/gonum/stat"
)
// LinearFitType groups together the slope, intercept, coefficient of
// determination, and root-mean-square average deviation of a regression line.
type LinearFitType struct {
Eq util.LinearEquationType
RSquared float64
RMS float64
}
func f3(val float64) string {
return util.Float64ToStrSig(val, ".", ",", 3, 3)
}
// Strings implements the fmt Stringer interface.
func (fit LinearFitType) String() string {
var b float64
var op string
b = fit.Eq.Intercept
if b < 0 {
b = -b
op = "-"
} else {
op = "+"
}
return fmt.Sprintf("y(x) = %s * x %s %s (r squared %s, RMS %s)",
f3(fit.Eq.Slope), op, f3(b), f3(fit.RSquared), f3(fit.RMS))
}
// DownhillSimplex finds the lowest value reported by fnc. The number of
// dimensions is specified by the number of elements in init, the initial
// location. The same number of elements will be passed to fnc, the callback
// function, when probing. The final result, if err is nil, will contain this
// number of elements as well. Two parameters can be adjusted to avoid
// converging on suboptimal local minima: length specifies the simplex size,
// and expansion (some value greater than 1) specifies the multiplier used when
// expanding the simplex.
func DownhillSimplex(fnc func(x []float64) float64, init []float64, length, expansion float64) (res []float64, err error) {
var prb optimize.Problem
var r *optimize.Result
prb.Func = fnc
r, err = optimize.Local(prb, init, nil, &optimize.NelderMead{
SimplexSize: length,
Expansion: expansion, // 1.25,
})
if err == nil {
res = r.X
}
return
}
// Center uses the downhill simplex method to calculate the center of a circle
// of known radius to a set of observed points specified by pairs.
func Center(pairs []util.PairType, radius float64) (x, y float64, err error) {
var res []float64
var count = float64(len(pairs))
centerFnc := func(x []float64) (val float64) {
var alpha, beta float64
alpha = x[0]
beta = x[1]
for _, pr := range pairs {
xa := pr.X - alpha
yb := pr.Y - beta
e := math.Sqrt(xa*xa+yb*yb) - radius
val += e * e
}
return
}
if count > 0 {
lf, rt, tp, bt := util.BoundingBox(pairs)
res, err = DownhillSimplex(centerFnc, []float64{(lf + rt) / 2, bt + bt - tp}, (rt-lf)/32, 1.2)
if err == nil {
x = res[0]
y = res[1]
}
} else {
err = errors.New("insufficient number of points to locate center")
}
return
}
// LinearFit return the slope, intercept, r-squared values, and RMS value for
// the least squares regression fit of the points specified by xList and yList.
func LinearFit(xList, yList []float64) (le LinearFitType) {
le.Eq.Intercept, le.Eq.Slope = stat.LinearRegression(xList, yList, nil, false)
le.RSquared = stat.RSquared(xList, yList, nil, le.Eq.Intercept, le.Eq.Slope)
le.RMS = util.RootMeanSquareLinear(xList, yList, le.Eq.Intercept, le.Eq.Slope)
// logf("RSquared: Gonum %.3f, local %.3f", le.RSquared, rSquared(xList, yList, le.Eq.Intercept, le.Eq.Slope))
return
} | go/regression/mth.go | 0.819063 | 0.576661 | mth.go | starcoder |
package chess
import (
"fmt"
"strings"
)
type ChessBoard struct {
board [][]*Figure
graveYard []*Figure
}
func NewCleanChessBoard() *ChessBoard {
board := make([][]*Figure, 8)
for i := 0; i < len(board); i++ {
board[i] = make([]*Figure, 8)
}
return &ChessBoard{
board: board,
graveYard: make([]*Figure, 0),
}
}
func NewChessBoard() *ChessBoard {
board := NewCleanChessBoard()
fillFigureRow(board.board[0], White)
fillPawnRow(board.board[1], White)
fillPawnRow(board.board[6], Black)
fillFigureRow(board.board[7], Black)
return board
}
func fillPawnRow(row []*Figure, color Color) {
for i := 0; i < len(row); i++ {
row[i] = &Figure{
Name: FigurePawn,
Color: color,
}
}
}
func fillFigureRow(row []*Figure, color Color) {
for _, col := range []int{0, 7} {
row[col] = &Figure{
Name: FigureRook,
Color: color,
}
}
for _, col := range []int{1, 6} {
row[col] = &Figure{
Name: FigureKnight,
Color: color,
}
}
for _, col := range []int{2, 5} {
row[col] = &Figure{
Name: FigureBishop,
Color: color,
}
}
row[3] = &Figure{
Name: FigureQueen,
Color: color,
}
row[4] = &Figure{
Name: FigureKing,
Color: color,
}
}
func (b ChessBoard) GetFigure(pos Pos) *Figure {
return b.board[pos.Row][pos.Col]
}
func (b *ChessBoard) MoveStr(from, to string) error {
f, err := NewPosFromStr(from)
if err != nil {
return err
}
t, err := NewPosFromStr(to)
if err != nil {
return err
}
return b.Move(f, t)
}
func (b *ChessBoard) Move(from, to Pos) error {
moves, err := b.GetPossibleMoves(from)
if err != nil {
return err
}
if !moves.Contains(to) {
return fmt.Errorf("not possible move")
}
if toFig := b.GetFigure(to); toFig != nil {
b.graveYard = append(b.graveYard, toFig)
}
removedFig := b.removeFigure(from)
b.setFigure(removedFig, to)
removedFig.Moves++
return nil
}
func (b *ChessBoard) removeFigure(pos Pos) *Figure {
f := b.board[pos.Row][pos.Col]
b.board[pos.Row][pos.Col] = nil
return f
}
func (b *ChessBoard) setFigure(f *Figure, pos Pos) {
b.board[pos.Row][pos.Col] = f
}
func (b ChessBoard) GetPossibleMoves(pos Pos) (Moves, error) {
f := b.GetFigure(pos)
if f == nil {
return nil, fmt.Errorf("no figure")
}
res := Moves{}
switch f.Name {
case FigureKing:
moves := pos.adjacent()
for _, movePos := range moves {
if movePosFigure := b.GetFigure(movePos); movePosFigure == nil || !SameColor(f.Color, movePosFigure.Color) {
res = append(res, movePos)
}
}
case FigureQueen:
for rowOffset := -1; rowOffset <= 1; rowOffset++ {
for colOffset := -1; colOffset <= 1; colOffset++ {
line := pos.line(rowOffset, colOffset)
res = append(res, b.filterLine(f.Color, line)...)
}
}
case FigureBishop:
for _, offsets := range [][]int{{-1, -1}, {1, -1}, {-1, 1}, {1, 1}} {
line := pos.line(offsets[0], offsets[1])
res = append(res, b.filterLine(f.Color, line)...)
}
case FigureKnight:
moves := pos.knightMoves()
res = append(res, b.filterAdjacent(f.Color, moves)...)
case FigurePawn:
rowOffset := rowOffsetForColor(f.Color)
line := pos.line(rowOffset, 0)
if len(line) > 0 {
if b.GetFigure(line[0]) == nil {
res = append(res, line[0])
if f.Moves == 0 && len(line) > 1 && b.GetFigure(line[1]) == nil {
res = append(res, line[1])
}
}
}
diags := Moves{
{Col: pos.Col - 1, Row: pos.Row + rowOffset},
{Col: pos.Col + 1, Row: pos.Row + rowOffset},
}
diags = filterInvalidMoves(diags)
for _, diag := range diags {
if diagFigure := b.GetFigure(diag); diagFigure != nil && !SameColor(diagFigure.Color, f.Color) {
res = append(res, diag)
}
}
case FigureRook:
for _, offsets := range [][]int{{-1, 0}, {1, 0}, {0, -1}, {0, 1}} {
line := pos.line(offsets[0], offsets[1])
res = append(res, b.filterLine(f.Color, line)...)
}
default:
return nil, fmt.Errorf("unknown figure")
}
return res, nil
}
// filterAdjacent returns slice, that contains nil figures or figures with another color.
func (b ChessBoard) filterAdjacent(color Color, line Moves) Moves {
res := Moves{}
for _, pos := range line {
f := b.GetFigure(pos)
if f == nil || !SameColor(f.Color, color) {
res = append(res, pos)
}
}
return res
}
// filterLine returns line, that contains nil figures and first figure with different color included (if exists).
func (b ChessBoard) filterLine(color Color, line Moves) Moves {
res := Moves{}
for _, pos := range line {
f := b.GetFigure(pos)
if f == nil {
res = append(res, pos)
} else {
if !SameColor(color, f.Color) {
res = append(res, pos)
}
break
}
}
return res
}
const header = " a b c d e f g h"
func (b ChessBoard) String() string {
builder := strings.Builder{}
builder.WriteString(header + "\n")
for r := len(b.board) - 1; r >= 0; r-- {
row := b.board[r]
builder.WriteString(fmt.Sprintf("%d %s\n", r+1, b.rowToString(row)))
}
return builder.String()
}
func (b ChessBoard) rowToString(row []*Figure) string {
glyphs := []string{}
for _, f := range row {
glyph := " "
if f != nil {
glyph = f.Glyph()
}
glyphs = append(glyphs, glyph)
}
return strings.Join(glyphs, " ")
} | chess/chessboard.go | 0.767777 | 0.441733 | chessboard.go | starcoder |
package instrumentation
import "time"
// MultiInstrumentation satisfies the Instrumentation interface by demuxing
// each call to multiple instrumentation targets.
type MultiInstrumentation struct {
instrs []Instrumentation
}
// Satisfaction guaranteed.
var _ Instrumentation = MultiInstrumentation{}
// NewMultiInstrumentation creates a new MultiInstrumentation that will demux
// all calls to the provided Instrumentation targets.
func NewMultiInstrumentation(instrs ...Instrumentation) Instrumentation {
return MultiInstrumentation{
instrs: instrs,
}
}
// InsertCall satisfies the Instrumentation interface.
func (i MultiInstrumentation) InsertCall() {
for _, instr := range i.instrs {
instr.InsertCall()
}
}
// InsertRecordCount satisfies the Instrumentation interface.
func (i MultiInstrumentation) InsertRecordCount(n int) {
for _, instr := range i.instrs {
instr.InsertRecordCount(n)
}
}
// InsertCallDuration satisfies the Instrumentation interface.
func (i MultiInstrumentation) InsertCallDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.InsertCallDuration(d)
}
}
// InsertRecordDuration satisfies the Instrumentation interface but does no
// work.
func (i MultiInstrumentation) InsertRecordDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.InsertRecordDuration(d)
}
}
// InsertQuorumFailure satisfies the Instrumentation interface.
func (i MultiInstrumentation) InsertQuorumFailure() {
for _, instr := range i.instrs {
instr.InsertQuorumFailure()
}
}
// SelectCall satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectCall() {
for _, instr := range i.instrs {
instr.SelectCall()
}
}
// SelectKeys satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectKeys(n int) {
for _, instr := range i.instrs {
instr.SelectKeys(n)
}
}
// SelectSendTo satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectSendTo(n int) {
for _, instr := range i.instrs {
instr.SelectSendTo(n)
}
}
// SelectFirstResponseDuration satisfies the Instrumentation interface but
// does no work.
func (i MultiInstrumentation) SelectFirstResponseDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.SelectFirstResponseDuration(d)
}
}
// SelectPartialError satisfies the Instrumentation interface but does no
// work.
func (i MultiInstrumentation) SelectPartialError() {
for _, instr := range i.instrs {
instr.SelectPartialError()
}
}
// SelectBlockingDuration satisfies the Instrumentation interface but does no
// work.
func (i MultiInstrumentation) SelectBlockingDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.SelectBlockingDuration(d)
}
}
// SelectOverheadDuration satisfies the Instrumentation interface but does no
// work.
func (i MultiInstrumentation) SelectOverheadDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.SelectOverheadDuration(d)
}
}
// SelectDuration satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.SelectDuration(d)
}
}
// SelectSendAllPermitGranted satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectSendAllPermitGranted() {
for _, instr := range i.instrs {
instr.SelectSendAllPermitGranted()
}
}
// SelectSendAllPermitRejected satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectSendAllPermitRejected() {
for _, instr := range i.instrs {
instr.SelectSendAllPermitRejected()
}
}
// SelectSendAllPromotion satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectSendAllPromotion() {
for _, instr := range i.instrs {
instr.SelectSendAllPromotion()
}
}
// SelectRetrieved satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectRetrieved(n int) {
for _, instr := range i.instrs {
instr.SelectRetrieved(n)
}
}
// SelectReturned satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectReturned(n int) {
for _, instr := range i.instrs {
instr.SelectReturned(n)
}
}
// SelectRepairNeeded satisfies the Instrumentation interface.
func (i MultiInstrumentation) SelectRepairNeeded(n int) {
for _, instr := range i.instrs {
instr.SelectRepairNeeded(n)
}
}
// DeleteCall satisfies the Instrumentation interface.
func (i MultiInstrumentation) DeleteCall() {
for _, instr := range i.instrs {
instr.DeleteCall()
}
}
// DeleteRecordCount satisfies the Instrumentation interface.
func (i MultiInstrumentation) DeleteRecordCount(n int) {
for _, instr := range i.instrs {
instr.DeleteRecordCount(n)
}
}
// DeleteCallDuration satisfies the Instrumentation interface.
func (i MultiInstrumentation) DeleteCallDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.DeleteCallDuration(d)
}
}
// DeleteRecordDuration satisfies the Instrumentation interface but does no
// work.
func (i MultiInstrumentation) DeleteRecordDuration(d time.Duration) {
for _, instr := range i.instrs {
instr.DeleteRecordDuration(d)
}
}
// DeleteQuorumFailure satisfies the Instrumentation interface.
func (i MultiInstrumentation) DeleteQuorumFailure() {
for _, instr := range i.instrs {
instr.DeleteQuorumFailure()
}
}
// RepairCall satisfies the Instrumentation interface.
func (i MultiInstrumentation) RepairCall() {
for _, instr := range i.instrs {
instr.RepairCall()
}
}
// RepairRequest satisfies the Instrumentation interface.
func (i MultiInstrumentation) RepairRequest(n int) {
for _, instr := range i.instrs {
instr.RepairRequest(n)
}
}
// RepairDiscarded satisfies the Instrumentation interface.
func (i MultiInstrumentation) RepairDiscarded(n int) {
for _, instr := range i.instrs {
instr.RepairDiscarded(n)
}
}
// RepairWriteSuccess satisfies the Instrumentation interface.
func (i MultiInstrumentation) RepairWriteSuccess(n int) {
for _, instr := range i.instrs {
instr.RepairWriteSuccess(n)
}
}
// RepairWriteFailure satisfies the Instrumentation interface.
func (i MultiInstrumentation) RepairWriteFailure(n int) {
for _, instr := range i.instrs {
instr.RepairWriteFailure(n)
}
}
// WalkKeys satisfies the Instrumentation interface.
func (i MultiInstrumentation) WalkKeys(n int) {
for _, instr := range i.instrs {
instr.WalkKeys(n)
}
} | instrumentation/multi_instrumentation.go | 0.757884 | 0.410461 | multi_instrumentation.go | starcoder |
package schema
import (
"math"
"strconv"
"github.com/ccbrown/api-fu/graphql/ast"
)
func coerceInt(v interface{}) interface{} {
switch v := v.(type) {
case bool:
if v {
return 1
}
return 0
case int8:
return int(v)
case uint8:
return int(v)
case int16:
return int(v)
case uint16:
return int(v)
case int32:
return int(v)
case uint32:
if v <= math.MaxInt32 {
return int(v)
}
case int64:
if v >= math.MinInt32 && v <= math.MaxInt32 {
return int(v)
}
case uint64:
if v <= math.MaxInt32 {
return int(v)
}
case int:
if v >= math.MinInt32 && v <= math.MaxInt32 {
return int(v)
}
case uint:
if v <= math.MaxInt32 {
return int(v)
}
case float32:
return coerceInt(float64(v))
case float64:
if n := math.Trunc(v); n == v && n >= math.MinInt32 && n <= math.MaxInt32 {
return int(n)
}
}
return nil
}
// IntType implements the Int type as defined by the GraphQL spec.
var IntType = &ScalarType{
Name: "Int",
LiteralCoercion: func(v ast.Value) interface{} {
switch v := v.(type) {
case *ast.IntValue:
if n, err := strconv.ParseInt(v.Value, 10, 32); err == nil {
return int(n)
}
}
return nil
},
VariableValueCoercion: coerceInt,
ResultCoercion: coerceInt,
}
func coerceFloat(v interface{}) interface{} {
switch v := v.(type) {
case bool:
if v {
return 1.0
}
return 0.0
case int8:
return float64(v)
case uint8:
return float64(v)
case int16:
return float64(v)
case uint16:
return float64(v)
case int32:
return float64(v)
case uint32:
return float64(v)
case int64:
return float64(v)
case uint64:
return float64(v)
case int:
return float64(v)
case uint:
return float64(v)
case float32:
return float64(v)
case float64:
return v
}
return nil
}
// FloatType implements the Float type as defined by the GraphQL spec.
var FloatType = &ScalarType{
Name: "Float",
LiteralCoercion: func(v ast.Value) interface{} {
switch v := v.(type) {
case *ast.IntValue:
if n, err := strconv.ParseFloat(v.Value, 64); err == nil {
return n
}
case *ast.FloatValue:
if n, err := strconv.ParseFloat(v.Value, 64); err == nil {
return n
}
}
return nil
},
VariableValueCoercion: coerceFloat,
ResultCoercion: coerceFloat,
}
func coerceString(v interface{}) interface{} {
switch v.(type) {
case string:
return v
}
return nil
}
// StringType implements the String type as defined by the GraphQL spec.
var StringType = &ScalarType{
Name: "String",
LiteralCoercion: func(v ast.Value) interface{} {
switch v := v.(type) {
case *ast.StringValue:
return v.Value
}
return nil
},
VariableValueCoercion: coerceString,
ResultCoercion: coerceString,
}
func coerceBoolean(v interface{}) interface{} {
switch v := v.(type) {
case bool:
return v
}
return nil
}
// BooleanType implements the Boolean type as defined by the GraphQL spec.
var BooleanType = &ScalarType{
Name: "Boolean",
LiteralCoercion: func(v ast.Value) interface{} {
switch v := v.(type) {
case *ast.BooleanValue:
return v.Value
}
return nil
},
VariableValueCoercion: coerceBoolean,
ResultCoercion: coerceBoolean,
}
// IDType implements the ID type as defined by the GraphQL spec. It can be deserialized from a
// string or an integer type, but always serializes to a string.
var IDType = &ScalarType{
Name: "ID",
LiteralCoercion: func(v ast.Value) interface{} {
switch v := v.(type) {
case *ast.IntValue:
if n, err := strconv.ParseInt(v.Value, 10, 0); err == nil {
return int(n)
}
case *ast.StringValue:
return v.Value
}
return nil
},
VariableValueCoercion: func(v interface{}) interface{} {
switch v := v.(type) {
case int:
return v
case float64:
if n := int(math.Trunc(v)); float64(n) == v {
return n
}
case string:
return v
}
return nil
},
ResultCoercion: func(v interface{}) interface{} {
switch v := v.(type) {
case int8:
return strconv.FormatInt(int64(v), 10)
case uint8:
return strconv.FormatInt(int64(v), 10)
case int16:
return strconv.FormatInt(int64(v), 10)
case uint16:
return strconv.FormatInt(int64(v), 10)
case int32:
return strconv.FormatInt(int64(v), 10)
case uint32:
return strconv.FormatInt(int64(v), 10)
case int64:
return strconv.FormatInt(v, 10)
case uint64:
if v <= math.MaxInt64 {
return strconv.FormatInt(int64(v), 10)
}
case int:
return strconv.FormatInt(int64(v), 10)
case uint:
if v <= math.MaxInt64 {
return strconv.FormatInt(int64(v), 10)
}
case string:
return v
}
return nil
},
}
// BuiltInTypes maps all of the built-in types to their names.
var BuiltInTypes = map[string]*ScalarType{
"Int": IntType,
"Float": FloatType,
"String": StringType,
"Boolean": BooleanType,
"ID": IDType,
}
// SkipDirective implements the @skip directive as defined by the GraphQL spec.
var SkipDirective = &DirectiveDefinition{
Description: "The @skip directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional exclusion during execution as described by the if argument.",
Arguments: map[string]*InputValueDefinition{
"if": {
Type: NewNonNullType(BooleanType),
},
},
Locations: []DirectiveLocation{DirectiveLocationField, DirectiveLocationFragmentSpread, DirectiveLocationInlineFragment},
FieldCollectionFilter: func(arguments map[string]interface{}) bool {
return !arguments["if"].(bool)
},
}
// IncludeDirective implements the @include directive as defined by the GraphQL spec.
var IncludeDirective = &DirectiveDefinition{
Description: "The @include directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional inclusion during execution as described by the if argument.",
Arguments: map[string]*InputValueDefinition{
"if": {
Type: NewNonNullType(BooleanType),
},
},
Locations: []DirectiveLocation{DirectiveLocationField, DirectiveLocationFragmentSpread, DirectiveLocationInlineFragment},
FieldCollectionFilter: func(arguments map[string]interface{}) bool {
return arguments["if"].(bool)
},
} | graphql/schema/builtins.go | 0.630116 | 0.442094 | builtins.go | starcoder |
package scroll
import (
"image"
"image/color"
"gioui.org/f32"
"gioui.org/gesture"
"gioui.org/io/pointer"
"gioui.org/layout"
"gioui.org/op"
"gioui.org/op/clip"
"gioui.org/op/paint"
"gioui.org/unit"
"gioui.org/widget"
)
type (
C = layout.Context
D = layout.Dimensions
)
// Scrollable holds state of a scrolling widget. The Scrolled() method is
// used to tell both whether a scroll operation occurred during the last frame
// as well as the progress through the scrollable region at the end of the
// scroll operation.
type Scrollable struct {
// Track clicks.
clickable widget.Clickable
// Track drag events.
drag gesture.Drag
// Has the bar scrolled since the previous frame?
scrolled bool
// Cached length of scroll region after layout has been computed. This can be
// off if the screen is being resized, but we have no better way to acquire
// this data.
length int
// progress is how far along we are as a fraction between 0 and 1.
progress float32
}
// Bar represents a scrolling indicator for a layout.List
type Bar struct {
*Scrollable
// Color of the scroll indicator.
Color color.NRGBA
// Progress tells the bar where to render the indicator as a fraction [0, 1].
Progress float32
// Scale tells the bar what fraction of the available axis space it should
// occupy as a fraction between [0, 1].
Scale float32
// Axis along which the bar is oriented.
Axis Axis
// Axis independent size.
Thickness unit.Value
// MinLength is the minimum length of the scroll indicator. Regardless of
// the scale of the bar, it will not be displayed shorter than this. If
// the scale parameter isn't provided, the indicator will always have
// this length.
MinLength unit.Value
}
// Axis specifies the scroll bar orientation.
// Default to `Vertical`.
type Axis int
const (
Vertical = 0
Horizontal = 1
)
// DefaultBar returns a bar with a translucent gray background. The progress
// parameter tells the bar how far through its range of motion to draw itself.
// The scale parameter tells the bar what fraction of the scrollable space is
// visible. Scale may be left as zero to use a minimum-length scroll indicator
// that does not respond to changes in the length of the scrollable region.
func DefaultBar(state *Scrollable, progress, scale float32) Bar {
return Bar{
Scrollable: state,
Progress: progress,
Scale: scale,
Color: color.NRGBA{A: 200},
Thickness: unit.Dp(8),
MinLength: unit.Dp(16),
}
}
// Update the internal state of the bar.
func (sb *Scrollable) Update(gtx C, axis Axis) {
sb.scrolled = false
// Restrict progress to [0, 1].
defer func() {
if sb.progress > 1 {
sb.progress = 1
} else if sb.progress < 0 {
sb.progress = 0
}
}()
pickAxis := func(pt f32.Point) (v float32) {
switch axis {
case Vertical:
v = pt.Y
case Horizontal:
v = pt.X
}
return v
}
if sb.clickable.Clicked() {
if presses := sb.clickable.History(); len(presses) > 0 {
press := presses[len(presses)-1]
sb.progress = float32(pickAxis(press.Position)) / float32(sb.length)
sb.scrolled = true
}
}
if drags := sb.drag.Events(gtx.Metric, gtx, axis.ToGesture()); len(drags) > 0 {
delta := pickAxis(drags[len(drags)-1].Position)
sb.progress = (sb.progress*float32(sb.length) + (delta / 2)) / float32(sb.length)
sb.scrolled = true
}
}
// Scrolled returns true if the scroll position changed within the last frame.
func (sb Scrollable) Scrolled() (didScroll bool, progress float32) {
return sb.scrolled, sb.progress
}
// Layout renders the bar into the provided context.
func (sb Bar) Layout(gtx C) D {
sb.Scrollable.progress = sb.Progress
sb.Update(gtx, sb.Axis)
if scrolled, _ := sb.Scrolled(); scrolled {
op.InvalidateOp{}.Add(gtx.Ops)
}
scaledLength := float32(0)
switch sb.Axis {
case Horizontal:
scaledLength = (sb.Scale * float32(gtx.Constraints.Max.X))
case Vertical:
scaledLength = (sb.Scale * float32(gtx.Constraints.Max.Y))
}
if int(scaledLength) > gtx.Px(sb.MinLength) {
sb.MinLength = unit.Dp(scaledLength / gtx.Metric.PxPerDp)
}
return sb.Axis.Layout(gtx, func(gtx C) D {
if sb.MinLength == (unit.Value{}) {
sb.MinLength = unit.Dp(16)
}
if sb.Thickness == (unit.Value{}) {
sb.Thickness = unit.Dp(8)
}
var (
total float32
size f32.Point
top = unit.Dp(2)
left = unit.Dp(2)
)
switch sb.Axis {
case Horizontal:
sb.length = gtx.Constraints.Max.X
size = f32.Point{
X: float32(gtx.Px(sb.MinLength)),
Y: float32(gtx.Px(sb.Thickness)),
}
total = float32(gtx.Constraints.Max.X) / gtx.Metric.PxPerDp
left = unit.Dp(total * sb.Progress)
if left.V+sb.MinLength.V > total {
left = unit.Dp(total - sb.MinLength.V)
}
case Vertical:
sb.length = gtx.Constraints.Max.Y
size = f32.Point{
X: float32(gtx.Px(sb.Thickness)),
Y: float32(gtx.Px(sb.MinLength)),
}
total = float32(gtx.Constraints.Max.Y) / gtx.Metric.PxPerDp
top = unit.Dp(total * sb.Progress)
if top.V+sb.MinLength.V > total {
top = unit.Dp(total - sb.MinLength.V)
}
}
return clickBox(gtx, &sb.clickable, func(gtx C) D {
barAreaDims := layout.Inset{
Top: top,
Right: unit.Dp(2),
Left: left,
Bottom: unit.Dp(2),
}.Layout(gtx, func(gtx C) D {
pointer.Rect(image.Rectangle{
Max: image.Point{
X: int(size.X),
Y: int(size.Y),
},
}).Add(gtx.Ops)
sb.drag.Add(gtx.Ops)
return rect{
Color: sb.Color,
Size: size,
Radii: float32(gtx.Px(unit.Dp(4))),
}.Layout(gtx)
})
switch sb.Axis {
case Vertical:
barAreaDims.Size.Y = gtx.Constraints.Max.Y
case Horizontal:
barAreaDims.Size.X = gtx.Constraints.Max.X
}
return barAreaDims
})
})
}
func (axis Axis) Layout(gtx C, widget layout.Widget) D {
if axis == Vertical {
return layout.NE.Layout(gtx, widget)
}
if axis == Horizontal {
return layout.SW.Layout(gtx, widget)
}
return layout.Dimensions{}
}
func (axis Axis) ToGesture() (g gesture.Axis) {
switch axis {
case Vertical:
g = gesture.Vertical
case Horizontal:
g = gesture.Horizontal
}
return g
}
// rect creates a rectangle of the provided background color with
// Dimensions specified by size and a corner radius (on all corners)
// specified by radii.
type rect struct {
Color color.NRGBA
Size f32.Point
Radii float32
}
// Layout renders the Rect into the provided context
func (r rect) Layout(gtx C) D {
return drawRect(gtx, r.Color, r.Size, r.Radii)
}
// drawRect creates a rectangle of the provided background color with
// Dimensions specified by size and a corner radius (on all corners)
// specified by radii.
func drawRect(gtx C, background color.NRGBA, size f32.Point, radii float32) D {
bounds := f32.Rectangle{
Max: size,
}
paint.FillShape(gtx.Ops, background, clip.UniformRRect(bounds, radii).Op(gtx.Ops))
return layout.Dimensions{Size: image.Pt(int(size.X), int(size.Y))}
}
// clickBox lays out a rectangular clickable widget without further
// decoration.
func clickBox(gtx layout.Context, button *widget.Clickable, w layout.Widget) layout.Dimensions {
return layout.Stack{}.Layout(gtx,
layout.Expanded(button.Layout),
layout.Expanded(func(gtx layout.Context) layout.Dimensions {
clip.RRect{
Rect: f32.Rectangle{Max: f32.Point{
X: float32(gtx.Constraints.Min.X),
Y: float32(gtx.Constraints.Min.Y),
}},
}.Add(gtx.Ops)
return layout.Dimensions{Size: gtx.Constraints.Min}
}),
layout.Stacked(w),
)
} | scroll/scroll.go | 0.637482 | 0.423756 | scroll.go | starcoder |
package fmp4parser
import (
"errors"
"sync/atomic"
)
// bufferCache is a simple queue for mp4Buffer used for fmp4parser
const SlotEntry = 1024
const SlotSize = 4096
// internal mp4Buffer size is 4M == SlotEntry*SlotSize bytes
// bufferCache.b is a consecutive bytes
// How it works: Slice b is divided into 1024 sub-slices, call it slot. Each slot
// has 4096 bytes memory. readingSlot and writingSlot respectively
//represent the slot where the reading pointer and the writing pointer are located.
// readingIndex and writingIndex respectively represent the distance from the corresponding pointer to the slot head.
// The most important thing: the reading pointer will never catch up with the writing pointer.
// You can think of this structure as a variant of circular mp4Buffer.
type bufferCache struct {
b [][]byte // SlotEntry slot x SlotSize bytes
readingSlot int // the slot number of reading currently
writingSlot int // the slot number of writing currently
readingIndex int // point to unread data of readingSlot
writingIndex int // point to the byte for writing of writingIndex
absPosition int64 // the start position in origin file of slice b : form writingSlot.writingIndex to writingSlot.(writingIndex-1)
length int32
}
func newBufferCache() *bufferCache {
b := make([][]byte, SlotEntry)
for i := range b {
b[i] = make([]byte, SlotSize)
}
return &bufferCache{b: b}
}
// Len is alias of length
func (p *bufferCache) Len() int {
return int(atomic.LoadInt32(&p.length))
}
// Read n bytes from bufferCache.
// Read implements the io.Reader interface.
// Notice: Non-Thread-Safety
func (p *bufferCache) Read(b []byte) (n int, e error) {
currentLen := int(atomic.LoadInt32(&p.length))
retrieveData := func() {
leftToRead := n
nRead := 0
if leftToRead < SlotSize-p.readingIndex {
nRead = copy(b, (p.b[p.readingSlot])[p.readingIndex:p.readingIndex+leftToRead])
p.readingIndex += nRead
p.readingIndex %= SlotSize
} else {
nRead = copy(b, (p.b[p.readingSlot])[p.readingIndex:])
p.readingIndex += leftToRead
p.readingIndex %= SlotSize
p.readingSlot++
p.readingSlot %= SlotEntry
leftToRead -= nRead
leftSlotToRead := leftToRead / SlotSize
residual := leftToRead % SlotSize
for i := 0; i < leftSlotToRead; i++ {
nRead += copy(b[nRead:], (p.b[p.readingSlot])[:])
p.readingSlot++
p.readingSlot %= SlotEntry
}
if residual > 0 {
copy(b[nRead:], (p.b[p.readingSlot])[:residual])
}
p.readingIndex += residual
p.readingIndex %= SlotSize
}
atomic.AddInt32(&p.length, int32(-n))
}
n = 0
e = nil
if atomic.LoadInt32(&p.length) <= 0 {
e = errors.New("no enough data to read")
} else {
if len(b) >= currentLen {
n = currentLen
} else {
n = len(b)
}
retrieveData()
}
return n, e
}
// Write will attach len(b) bytes data to the tail of bufferCache.b
// As we set, the upper bound is 4M, and len(b) is far small with it.
// Write implements the io.Writer interface.
// Notice: Non-Thread-Safety
func (p *bufferCache) Write(b []byte) (n int, e error) {
currentLen := int(atomic.LoadInt32(&p.length))
appendData := func(b []byte) {
currentWritingSlotLeft := SlotSize - p.writingIndex
nWritten := 0
if len(b) < currentWritingSlotLeft {
nWritten = copy((p.b[p.writingSlot])[p.writingIndex:], b)
p.writingIndex += nWritten
p.writingIndex %= SlotSize
} else {
nWritten = copy((p.b[p.writingSlot])[p.writingIndex:], b)
p.writingIndex += nWritten
p.writingIndex %= SlotSize
p.writingSlot++
p.writingSlot %= SlotEntry
leftToWrite := len(b) - nWritten
leftSlotToWrite := leftToWrite / SlotSize
residual := leftToWrite % SlotSize
for i := 0; i < leftSlotToWrite; i++ {
nWritten += copy(p.b[p.writingSlot], b[nWritten:])
p.writingSlot++
p.writingSlot %= SlotEntry
}
if residual > 0 {
_ = copy(p.b[p.writingSlot], b[nWritten:])
}
p.writingIndex += residual
p.writingIndex %= SlotSize
}
atomic.AddInt32(&p.length, int32(len(b)))
}
n = 0
e = nil
if SlotSize*SlotEntry-currentLen > 0 {
if SlotSize*SlotEntry-currentLen <= len(b) {
b2 := b[:SlotSize*SlotEntry-currentLen]
appendData(b2)
n = SlotSize*SlotEntry - currentLen
} else {
appendData(b)
n = len(b)
}
} else {
e = errors.New("no more space to write")
}
return n, e
}
// Reset will reset internal value
func (p *bufferCache) Reset() {
p.readingIndex = 0
p.readingSlot = 0
p.writingIndex = 0
p.writingSlot = 0
atomic.StoreInt32(&p.length, 0)
} | buffer_cache.go | 0.614972 | 0.427516 | buffer_cache.go | starcoder |
package algorithm
import "fmt"
type BinarySearchTree struct {
value int
leftChild *BinarySearchTree
rightChild *BinarySearchTree
}
//递归添加
func (tree *BinarySearchTree) RecursionAdd(value int) {
if tree.value == 0 {
tree.value = value
tree.leftChild = &BinarySearchTree{}
tree.rightChild = &BinarySearchTree{}
} else {
if value < tree.value {
tree.leftChild.RecursionAdd(value)
} else {
tree.rightChild.RecursionAdd(value)
}
}
}
/**
非递归添加
*/
func (tree *BinarySearchTree) NotRecursionAdd(value int) {
if tree.value == 0 {
tree.value = value
tree.leftChild = &BinarySearchTree{}
tree.rightChild = &BinarySearchTree{}
} else {
current := tree
for {
if value < current.value {
current = current.leftChild
} else if value > current.value {
current = current.rightChild
} else {
return
}
if current == nil {
current = &BinarySearchTree{value, nil, nil}
break
}
}
}
}
func (tree *BinarySearchTree) Search(value int) *BinarySearchTree {
current := tree
if current.value == value {
return current
}
for {
if current == nil {
return nil
}
if value < current.value {
current = current.leftChild
} else if value > current.value {
current = current.rightChild
} else {
return current
}
}
}
func (tree *BinarySearchTree) Remove(value int) {
t := tree.Search(value)
if t == nil {
panic("value is not exist")
}
if t.leftChild.value == 0 && t.rightChild.value == 0 {
destroy(t)
} else if t.leftChild.value != 0 && t.rightChild.value != 0 { // Both LeftChild and RightChild are not empty.
// Get the min-element in its RightChild.
min := t.rightChild.GetMin()
// Replace its value with the min-element's value.
t.value = min.value
// Remove the min-element.
destroy(min)
} else { // LeftChild or RightChild is empty.
// Replace it with its child which is not empty.
if t.leftChild.value > 0 {
t.replaceWith(t.leftChild)
} else {
t.replaceWith(t.rightChild)
}
}
}
func (tree *BinarySearchTree) replaceWith(t *BinarySearchTree) {
tree.value = t.value
tree.leftChild = t.leftChild
tree.rightChild = t.rightChild
destroy(t)
}
func (tree *BinarySearchTree) GetMin() *BinarySearchTree {
if tree.leftChild.value == 0 {
return tree
} else {
return tree.leftChild.GetMin()
}
}
func destroy(tree *BinarySearchTree) {
tree.value = 0
tree.leftChild = nil
tree.rightChild = nil
}
/**
获取树的深度
百度百科定义:树的高度或深度:树中节点的最大层次;
*/
func (tree *BinarySearchTree) Deep() int {
var stack *ArrayStack
if tree == nil {
return 0
}
tempTree := tree
stack = Push(stack, tempTree)
deep := 1
for !IsEmpty(stack) {
if tempTree.leftChild.value != 0 {
deep++
stack = Push(stack, tempTree.leftChild)
tempTree = tempTree.leftChild
} else {
tempTree = Pop(stack).(*BinarySearchTree)
if tempTree.leftChild.value != 0 {
deep--
}
if tempTree.rightChild.value != 0 {
deep++
stack = Push(stack, tempTree.rightChild)
tempTree = tempTree.rightChild
}
}
}
return deep
}
func (tree *BinarySearchTree) Height() int {
l := 0
r := 0
left := tree.leftChild
if left == nil {
return 0
}
l = left.Height() + 1
right := tree.rightChild
if right == nil {
return 0
}
r = right.Height() + 1
if l > r {
return l
}
return r
}
/**
假设该数组满足二叉树
先序非递归遍历的方式打印该数组非递归
时间复杂度O(n) 空间复杂度O(logn)
*/
func PreTraversalTree(array []int) {
stack := &LinkStack{}
length := len(array)
index := 0
for index < length || stack.Size() > 0 {
if index < length {
fmt.Println(array[index])
stack.Push(index)
index = index*2 + 1
} else {
index = stack.Pop().(int)
index = index*2 + 2
}
}
}
/**
中序遍历非递归
*/
func InTraversalTree(array []int) {
stack := &LinkStack{}
length := len(array)
index := 0
for index < length || stack.Size() > 0 {
if index < length {
stack.Push(index)
index = index*2 + 1
} else {
index = stack.Pop().(int)
fmt.Println(array[index])
index = index*2 + 2
}
}
}
/**
后续遍历非递归
*/
func PoTraversalTree(array []int) {
length := len(array)
stack := &LinkStack{}
index := 0
preIndex := -1
for index < length {
stack.Push(index)
index = index*2 + 1
}
for stack.Size() > 0 {
index = stack.Pop().(int)
if (index*2+1) < length && (index*2+2) < length && (index*2+2) != preIndex {
stack.Push(index)
index = index*2 + 2
for index < length {
stack.Push(index)
index = index*2 + 1
}
} else {
fmt.Println(array[index])
preIndex = index
}
}
}
func PreTraversalRecursionTree(array []int) {
index := 0
PrintPreArray(index, len(array), array)
}
/**
先序递归遍历
*/
func PrintPreArray(index, length int, array []int) {
if index < length {
fmt.Println(array[index])
PrintPreArray(index*2+1, length, array)
PrintPreArray(index*2+2, length, array)
}
}
func InTraversalRecursionTree(array []int) {
index := 0
PrintInArray(index, len(array), array)
}
/**
中序递归遍历
*/
func PrintInArray(index, length int, array []int) {
if index < length {
PrintInArray(index*2+1, length, array)
fmt.Println(array[index])
PrintInArray(index*2+2, length, array)
}
}
func PoTraversalRecursionTree(array []int) {
index := 0
PrintPoArray(index, len(array), array)
}
/**
后序递归遍历
*/
func PrintPoArray(index, length int, array []int) {
if index < length {
PrintPoArray(index*2+1, length, array)
PrintPoArray(index*2+2, length, array)
fmt.Println(array[index])
}
} | algorithm/binarysearchtree.go | 0.54577 | 0.413951 | binarysearchtree.go | starcoder |
package reflectutil
import (
"fmt"
"reflect"
"strings"
)
// T represents any type T.
type T struct {
// The underlying non-pointer type T.
reflect.Type
// Counts how many times the given type was pointing on an underlying non-pointer type T.
ptrCount int
}
func (t T) String() string {
return strings.Repeat("*", t.ptrCount) + t.Type.String()
}
// New returns a T for the given type. Not all types are supported as T, if the type is not
// supported this function will return an error.
func New(tp reflect.Type) (T, error) {
var t T
loop:
for {
switch tp.Kind() {
case reflect.Ptr:
// If the type is a pointer, get the enderlying type and increment the pointer counter.
tp = tp.Elem()
t.ptrCount++
case reflect.Slice:
// Only allow slice of []byte.
if tp.Elem().Kind() == reflect.Uint8 {
break loop
}
return t, fmt.Errorf("slice (besides []byte) is not supported for T.")
case reflect.Array, reflect.Map, reflect.Func:
return t, fmt.Errorf("%v is not supported for T.", tp.Kind())
default:
break loop
}
}
t.Type = tp
return t, nil
}
// Convert returns the given value as T. If the conversion is not possible, it returns false as the
// second argument. It panics when the value can't be converted.
func (t T) Convert(v reflect.Value) reflect.Value {
ok := t.convert(v.Type(), &v)
if !ok {
panic(fmt.Sprintf("type %v can't be converted to: %v", v.Type(), t.Type))
}
return v
}
// Check if another type is convertable to T.
func (t T) Check(tp reflect.Type) bool {
return t.convert(tp, nil)
}
// converts checks if src can be converted to T and applies the conversion on v if given.
func (t T) convert(src reflect.Type, v *reflect.Value) (ok bool) {
dst := t.Type
// If the conversion was successful set v to be a pointer to T according to the T.ptrCount.
defer func() {
if !ok || v == nil {
return
}
for i := 0; i < t.ptrCount; i++ {
*v = ptrTo(*v)
}
}()
for {
switch {
case src == dst:
// Exactly the same types.
ok = true
return
case kindConversionAllowed(src, dst):
// The conversion between src to dst is allowed.
if v != nil {
*v = v.Convert(dst)
}
ok = true
return
case src.Kind() == reflect.Ptr:
// src might be a pointer to dst, take the underlying object and look for dst.
if v != nil {
*v = v.Elem()
}
src = src.Elem()
default:
return
}
}
}
// kindConversionAllowed checks if the conversion from src to dst is allowed.
func kindConversionAllowed(src reflect.Type, dst reflect.Type) bool {
// If the same kind return true, with an exception for struct in which src should be
// convertable to dst.
if src.Kind() == dst.Kind() && (dst.Kind() != reflect.Struct || src.ConvertibleTo(dst)) {
return true
}
// For numerical kinds, allow converting the same numerical group where dst has number of bits
// greater or equal to src.
srcKindGroup := numKindOf(src.Kind())
dstKindGroup := numKindOf(dst.Kind())
return srcKindGroup != numNot && srcKindGroup == dstKindGroup && src.Bits() <= dst.Bits()
}
// numKind represents a group of numerical kinds.
type numKind int
const (
numNot numKind = iota // Not a number
numInt // int* kinds.
numUint // uint* kinds.
numFloat // float* kinds.
numComplex // complex* kinds.
)
// numKindOf returns the numerical kind of a given kind.
func numKindOf(k reflect.Kind) numKind {
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return numInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return numUint
case reflect.Float32, reflect.Float64:
return numFloat
case reflect.Complex64, reflect.Complex128:
return numFloat
default:
return numNot
}
}
// ptrTo returns a value which is the pointer to the given value.
func ptrTo(v reflect.Value) reflect.Value {
p := reflect.New(v.Type())
p.Elem().Set(v)
return p
} | internal/reflectutil/t.go | 0.756807 | 0.50177 | t.go | starcoder |
package kalman
// See https://en.wikipedia.org/wiki/Kalman_filter
import (
"fmt"
"gonum.org/v1/gonum/mat"
)
const (
_N = 6 // Size of the matrixes and vectors.
// Symbolic names for rows/columns.
_X = 0
_Y = 1
_Z = 2
_VX = 3
_VY = 4
_VZ = 5
)
// ErrInvalidProcNoise is returned when we can't compute process noise.
var ErrInvalidProcNoise = fmt.Errorf("invalid process noise arguments")
// Filter is a Kalman filter.
type Filter struct {
state mat.Vector // State.
cov mat.Matrix // Covariance.
procNoise mat.Matrix // Process noise.
}
// ProcessNoise represents process noise.
type ProcessNoise struct {
SX, SY, SZ float64 // Random step (coordinates).
SVX, SVY, SVZ float64 // Random step (speed).
ST float64 // Random step (time).
}
// Observed represents a single observation.
type Observed struct {
X, Y, Z float64 // Coordinates.
VX, VY, VZ float64 // Speed.
XA, YA, ZA float64 // Accuracy (coordinates).
VXA, VYA, VZA float64 // Accuracy (speed).
}
// NewFilter creates and returns a new Kalman filter.
func NewFilter(d *ProcessNoise) (*Filter, error) {
if d.ST == 0 && (d.SX > 0 || d.SY > 0 || d.SZ > 0 || d.SVX > 0 || d.SVY > 0 || d.SVZ > 0) {
return nil, ErrInvalidProcNoise
}
// Init process noise.
procNoise := mat.NewDense(_N, _N, nil)
if d.ST > 0 {
procNoise.Set(_X, _X, d.SX*d.SX/d.ST)
procNoise.Set(_Y, _Y, d.SY*d.SY/d.ST)
procNoise.Set(_Z, _Z, d.SZ*d.SZ/d.ST)
procNoise.Set(_VX, _VX, d.SVX*d.SVX/d.ST)
procNoise.Set(_VY, _VY, d.SVY*d.SVY/d.ST)
procNoise.Set(_VZ, _VZ, d.SVZ*d.SVZ/d.ST)
}
return &Filter{procNoise: procNoise}, nil
}
func (f *Filter) initCov(ob *Observed) {
cov := mat.NewDense(_N, _N, nil)
cov.Set(_X, _X, ob.XA*ob.XA)
cov.Set(_Y, _Y, ob.YA*ob.YA)
cov.Set(_Z, _Z, ob.ZA*ob.ZA)
cov.Set(_VX, _VX, ob.VXA*ob.VXA)
cov.Set(_VY, _VY, ob.VYA*ob.VYA)
cov.Set(_VZ, _VZ, ob.VZA*ob.VZA)
f.cov = cov
}
func (f *Filter) predictState(td float64) mat.Vector {
m := mat.DenseCopyOf(eye(_N))
m.Set(_X, _VX, td)
m.Set(_Y, _VY, td)
m.Set(_Z, _VZ, td)
newState := mat.NewVecDense(6, nil)
newState.MulVec(m, f.state)
return newState
}
func (f *Filter) predictCov(td float64) mat.Matrix {
m := mat.DenseCopyOf(eye(6))
m.Set(_X, _VX, td)
m.Set(_Y, _VY, td)
m.Set(_Z, _VZ, td)
var w mat.Dense
w.Scale(td, f.procNoise)
var r mat.Dense
r.Mul(f.cov, m.T())
r.Add(&r, &w)
return &r
}
func (f *Filter) kalmanGain(predCov mat.Matrix, ob *Observed) (mat.Matrix, error) {
r := mat.NewDense(_N, _N, nil)
r.Set(_X, _X, ob.XA*ob.XA)
r.Set(_Y, _Y, ob.YA*ob.YA)
r.Set(_Z, _Z, ob.ZA*ob.ZA)
r.Set(_VX, _VX, ob.VXA*ob.VXA)
r.Set(_VY, _VY, ob.VYA*ob.VYA)
r.Set(_VZ, _VZ, ob.VZA*ob.VZA)
var t mat.Dense
t.Add(predCov, r)
var it mat.Dense
err := it.Inverse(&t)
if err != nil {
return nil, err
}
var q mat.Dense
q.Mul(predCov, &it)
return &q, nil
}
// Observe processes a single act of observation, td is the time since last update.
func (f *Filter) Observe(td float64, ob *Observed) error {
if f.state == nil {
f.initCov(ob)
f.state = mat.NewVecDense(_N, []float64{ob.X, ob.Y, ob.Z, ob.VX, ob.VY, ob.VZ})
return nil
}
predState := f.predictState(td)
predCov := f.predictCov(td)
k, err := f.kalmanGain(predCov, ob)
if err != nil {
return err
}
obState := mat.NewVecDense(_N, []float64{ob.X, ob.Y, ob.Z, ob.VX, ob.VY, ob.VZ})
var stateDif mat.VecDense
stateDif.SubVec(obState, predState)
var r mat.VecDense
r.MulVec(k, &stateDif)
r.AddVec(&r, predState)
f.state = &r
cov := mat.DenseCopyOf(eye(_N))
cov.Sub(cov, k)
cov.Mul(cov, predCov)
f.cov = cov
return nil
}
// eye returns an n by n identity matrix.
func eye(n int) mat.Matrix {
d := make([]float64, n)
for i := 0; i < n; i++ {
d[i] = 1.0
}
return mat.NewDiagDense(n, d)
} | filter.go | 0.795301 | 0.564098 | filter.go | starcoder |
package assertjson
import (
"math"
"github.com/stretchr/testify/assert"
)
// IsInteger asserts that the JSON node has an integer value.
func (node *AssertNode) IsInteger(msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
float, ok := node.value.(float64)
if !ok {
assert.Failf(
node.t,
`value at path "%s" is not numeric`,
node.pathPrefix+node.path,
msgAndArgs...,
)
}
_, fractional := math.Modf(float)
if fractional != 0 {
assert.Failf(
node.t,
`value at path "%s" is float, not integer`,
node.pathPrefix+node.path,
msgAndArgs...,
)
}
}
}
// IsFloat asserts that the JSON node has a float value.
func (node *AssertNode) IsFloat(msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
assert.IsType(node.t, 0.0, node.value, msgAndArgs...)
}
}
// EqualToTheInteger asserts that the JSON node has an integer value equals to the given value.
func (node *AssertNode) EqualToTheInteger(expectedValue int, msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
float, ok := node.value.(float64)
if !ok {
assert.Failf(
node.t,
`value at path "%s" is not numeric`,
node.pathPrefix+node.path,
msgAndArgs...,
)
}
integer, fractional := math.Modf(float)
if fractional != 0 {
assert.Failf(
node.t,
`value at path "%s" is float, not integer`,
node.pathPrefix+node.path,
msgAndArgs...,
)
}
assert.Equal(node.t, expectedValue, int(integer), msgAndArgs...)
}
}
// EqualToTheFloat asserts that the JSON node has a float value equals to the given value.
func (node *AssertNode) EqualToTheFloat(expectedValue float64, msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
assert.IsType(node.t, 0.0, node.value, msgAndArgs...)
assert.Equal(node.t, expectedValue, node.value, msgAndArgs...)
}
}
// IsNumberGreaterThan asserts that the JSON node has a number greater than the given value.
func (node *AssertNode) IsNumberGreaterThan(value float64, msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
assert.IsType(node.t, 0.0, node.value, msgAndArgs...)
assert.Greater(node.t, node.value, value, msgAndArgs...)
}
}
// IsNumberGreaterThanOrEqual asserts that the JSON node has a number greater than or equal to the given value.
func (node *AssertNode) IsNumberGreaterThanOrEqual(value float64, msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
assert.IsType(node.t, 0.0, node.value, msgAndArgs...)
assert.GreaterOrEqual(node.t, node.value, value, msgAndArgs...)
}
}
// IsNumberLessThan asserts that the JSON node has a number less than the given value.
func (node *AssertNode) IsNumberLessThan(value float64, msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
assert.IsType(node.t, 0.0, node.value, msgAndArgs...)
assert.Less(node.t, node.value, value, msgAndArgs...)
}
}
// IsNumberLessThanOrEqual asserts that the JSON node has a number less than or equal to the given value.
func (node *AssertNode) IsNumberLessThanOrEqual(value float64, msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
assert.IsType(node.t, 0.0, node.value, msgAndArgs...)
assert.LessOrEqual(node.t, node.value, value, msgAndArgs...)
}
}
// IsNumberInRange asserts that the JSON node has a number with value in the given range.
func (node *AssertNode) IsNumberInRange(min float64, max float64, msgAndArgs ...interface{}) {
node.t.Helper()
if node.exists() {
assert.IsType(node.t, 0.0, node.value, msgAndArgs...)
assert.GreaterOrEqual(node.t, node.value, min, msgAndArgs...)
assert.LessOrEqual(node.t, node.value, max, msgAndArgs...)
}
} | assertjson/numeric.go | 0.734786 | 0.710716 | numeric.go | starcoder |
package behaviors
//--------------------
// IMPORTS
//--------------------
import (
"time"
"github.com/tideland/gocells/cells"
)
//--------------------
// CONSTANTS
//--------------------
const (
// TopicPair signals a detected pair of events.
TopicPair = "pair"
// TopicPairTimeout signals a timeout during waiting for a
// pair of events.
TopicPairTimeout = "pair:timeout"
)
//--------------------
// PAIR BEHAVIOR
//--------------------
// PairCriterion is used by the pair behavior and has to return true, if
// the passed event matches a criterion for rate measuring. The returned
// data in case of a first hit is stored and then passed as argument to
// each further call of the pair criterion. In case of a pair event both
// returned datas are part of the emitted event payload.
type PairCriterion func(event cells.Event, hit cells.Payload) (cells.Payload, bool)
// Pair contains event pair information.
type Pair struct {
FirstTime time.Time
FirstPayload cells.Payload
SecondTime time.Time
SecondPayload cells.Payload
Timeout time.Time
}
// pairBehavior checks if events occur in pairs.
type pairBehavior struct {
cell cells.Cell
matches PairCriterion
duration time.Duration
hitTime *time.Time
hitPayload cells.Payload
timeout *time.Timer
}
// NewPairBehavior creates a behavior checking if two events match a criterion
// defined by the PairCriterion function and the duration between them is not
// longer than the passed duration. In case of a positive pair match an according
// event containing both timestamps and both returned datas is emitted. In case
// of a timeout a timeout event is emitted. It's payload is the first timestamp,
// the first data, and the timestamp of the timeout.
func NewPairBehavior(criterion PairCriterion, duration time.Duration) cells.Behavior {
return &pairBehavior{
cell: nil,
matches: criterion,
duration: duration,
hitTime: nil,
hitPayload: nil,
timeout: nil,
}
}
// Init implements the cells.Behavior interface.
func (b *pairBehavior) Init(c cells.Cell) error {
b.cell = c
return nil
}
// Terminate implements the cells.Behavior interface.
func (b *pairBehavior) Terminate() error {
return nil
}
// ProcessEvent implements the cells.Behavior interface.
func (b *pairBehavior) ProcessEvent(event cells.Event) error {
switch event.Topic() {
case TopicPairTimeout:
if b.hitTime != nil && b.timeout != nil {
// Received timeout event, check if the expected one.
var first time.Time
err := event.Payload().Unmarshal(&first)
if err != nil {
return err
}
if first.Equal(*b.hitTime) {
b.emitTimeout()
b.timeout = nil
}
}
default:
if payload, ok := b.matches(event, b.hitPayload); ok {
now := time.Now()
if b.hitTime == nil {
// First hit, store time and data and start timeout reminder.
b.hitTime = &now
b.hitPayload = payload
b.timeout = time.AfterFunc(b.duration, func() {
b.cell.Environment().EmitNew(b.cell.ID(), TopicPairTimeout, now)
})
} else {
// Second hit earlier than timeout event.
// Check if it is in time.
b.timeout.Stop()
b.timeout = nil
if now.Sub(*b.hitTime) > b.duration {
b.emitTimeout()
} else {
b.emitPair(now, payload)
}
}
}
}
return nil
}
// Recover implements the cells.Behavior interface.
func (b *pairBehavior) Recover(err interface{}) error {
return nil
}
// emitPair emits the event for a successful pair.
func (b *pairBehavior) emitPair(timestamp time.Time, payload cells.Payload) {
b.cell.EmitNew(TopicPair, Pair{
FirstTime: *b.hitTime,
FirstPayload: b.hitPayload,
SecondTime: timestamp,
SecondPayload: payload,
})
b.hitTime = nil
}
// emitTimeout emits the event for a pairing timeout.
func (b *pairBehavior) emitTimeout() {
b.cell.EmitNew(TopicPairTimeout, Pair{
FirstTime: *b.hitTime,
FirstPayload: b.hitPayload,
Timeout: time.Now(),
})
b.hitTime = nil
}
// EOF | behaviors/pair.go | 0.6488 | 0.400632 | pair.go | starcoder |
package ray
import (
"image"
"math"
)
// Camera is the canonical camera object, tracing rays and writing to an image
type Camera struct {
Transform
f float64
dx float64
dy float64
Width int
Height int
Image *image.RGBA
}
// NewCamera creates a camera with a focal length, camera width, camera height and image width.
// Located at origin and points towards negative z in an orthonormal basis
func NewCamera(f, dx, dy float64, w int) *Camera {
h := int(math.Round(float64(w) * dy / dx))
img := image.NewRGBA(image.Rect(0, 0, w, h))
return &Camera{
Transform: IDTransform, f: f, dx: dx, dy: dy,
Width: w, Height: h, Image: img}
}
// BuildRay creates a Ray in global space given an image pixel position
func (c *Camera) BuildRay(x, y int) Ray {
X := (float64(x)*c.dx)/float64(c.Width) - c.dx/2
Y := c.dy/2 - float64(y)*c.dy/float64(c.Height)
return c.RayToGlobal(Ray{pt: Origin, dir: Vector3{X, Y, -c.f}})
}
// Project ...
func (c *Camera) Project(p Point3) (int, int) {
lp := c.PointToLocal(p)
var x, y float64
if lp[Z] < 0 {
x = c.f * lp[X] / -lp[Z]
y = c.f * lp[Y] / -lp[Z]
} else {
x = math.Copysign(c.dx/2, lp[X])
y = math.Copysign(c.dy/2, lp[Y])
}
px := (x + c.dx/2) * float64(c.Width) / c.dx
py := float64(c.Height) - (y+c.dy/2)*float64(c.Height)/c.dy
return int(math.Round(px)), int(math.Round(py))
}
// Translate applies a translation
func (c *Camera) Translate(x, y, z float64) *Camera {
c.Transform.Translate(x, y, z)
return c
}
// RotateX applies a rotation around x-axis
func (c *Camera) RotateX(x float64) *Camera {
c.Transform.RotateX(x)
return c
}
// RotateY applies a rotation around y-axis
func (c *Camera) RotateY(y float64) *Camera {
c.Transform.RotateY(y)
return c
}
// RotateZ applies a rotation around z-axis
func (c *Camera) RotateZ(z float64) *Camera {
c.Transform.RotateZ(z)
return c
}
// Scale applies a scaling transform
func (c *Camera) Scale(x, y, z float64) *Camera {
c.Transform.Scale(x, y, z)
return c
} | camera.go | 0.88623 | 0.611092 | camera.go | starcoder |
package renderer
import (
"image"
"image/color"
"math"
"github.com/schollz/progressbar/v3"
"gonum.org/v1/gonum/mat"
)
func constrain(x, min, max float64) float64 {
if x < min {
return min
} else if x > max {
return max
} else {
return x
}
}
func normalization(v *mat.VecDense) {
v.ScaleVec(1/math.Sqrt(mat.Dot(v, v)), v)
}
func Render(width int, height int) (image.Image, error) {
eyePos := mat.NewVecDense(3, []float64{0, 0, -5})
spherePos := mat.NewVecDense(3, []float64{0, 0, 5})
sphereR := 1
lightPos := mat.NewVecDense(3, []float64{-5, 5, -5})
lightIntensity := 1.0
ambientIntensity := 0.1
kAmb := 0.01
kDif := 0.69
kSpe := 0.3
shininess := 8.0
pw := mat.NewVecDense(3, []float64{0, 0, 0})
img := image.NewRGBA(image.Rect(0, 0, width, height))
bar := progressbar.Default(int64(height))
for y := 0; y < height; y++ {
bar.Add(1)
pw.SetVec(1, float64(-2*y)/float64(height-1)+1.0)
for x := 0; x < width; x++ {
pw.SetVec(0, float64(2*x)/float64(width-1)-1.0)
eyeDir := mat.NewVecDense(3, nil)
eyeDir.SubVec(pw, eyePos)
tmp := mat.NewVecDense(3, nil)
tmp.SubVec(eyePos, spherePos)
a := mat.Dot(eyeDir, eyeDir)
b := 2 * mat.Dot(eyeDir, tmp)
c := mat.Dot(tmp, tmp) - float64(sphereR*sphereR)
d := b*b - 4*a*c
t := -1.0
if d == 0 {
t = -b / (2 * a)
} else if c > 0 {
t1 := (-b - math.Sqrt(d)) / (2 * a)
t2 := (-b + math.Sqrt(d)) / (2 * a)
if t1 > 0 && t2 > 0 {
t = math.Min(t1, t2)
} else {
t = math.Max(t1, t2)
}
}
col := color.RGBA{100, 149, 237, 255}
if t > 0 {
radianceAmb := kAmb * ambientIntensity
intPos := mat.NewVecDense(3, nil)
intPos.AddScaledVec(eyePos, t, eyeDir)
lightDir := mat.NewVecDense(3, nil)
lightDir.SubVec(lightPos, intPos)
normalization(lightDir)
sphereN := mat.NewVecDense(3, nil)
sphereN.SubVec(intPos, spherePos)
normalization(sphereN)
nlDot := constrain(mat.Dot(sphereN, lightDir), 0, 1)
radianceDif := kDif * lightIntensity * nlDot
radianceSpe := 0.0
if nlDot > 0 {
refDir := mat.NewVecDense(3, nil)
refDir.ScaleVec(2*nlDot, sphereN)
refDir.SubVec(refDir, lightDir)
invEyeDir := mat.NewVecDense(3, nil)
invEyeDir.ScaleVec(-1.0, eyeDir)
normalization(invEyeDir)
vrDot := constrain(mat.Dot(invEyeDir, refDir), 0, 1)
radianceSpe = kSpe * lightIntensity * math.Pow(vrDot, shininess)
}
radiance := constrain(radianceAmb+radianceDif+radianceSpe, 0, 1)
gray := (uint8)(255 * radiance)
col = color.RGBA{gray, gray, gray, 255}
}
img.Set(x, y, col)
}
}
return img, nil
} | renderer/render.go | 0.605099 | 0.401013 | render.go | starcoder |
package fnv
type (
// 64-bit FNV-1 hash.
Fnv64 uint64
// 64-bit FNV-1a hash.
Fnv64a uint64
)
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// New64 returns a new 64-bit FNV-1 hash.
func New64() *Fnv64 {
var s Fnv64 = offset64
return &s
}
// Reset resets the Hash to its initial state.
func (s *Fnv64) Reset() {
*s = offset64
}
// Size returns the number of bytes Sum will return.
func (s *Fnv64) Size() int {
return 8
}
// Sum64 returns the current hash value.
func (s *Fnv64) Sum64() uint64 {
return uint64(*s)
}
// BlockSize returns the hash's underlying block size.
func (s *Fnv64) BlockSize() int {
return 1
}
// Sum appends the current hash to buf in big-endian byte order and returns the resulting slice.
func (s *Fnv64) Sum(buf []byte) []byte {
return sum(uint64(*s), buf)
}
// Write adds the bytes in data to the running hash. It never returns an error.
func (s *Fnv64) Write(data []byte) (int, error) {
hash := *s // avoid dereferencing s during loop because it significantly changes benchmark speed
for _, c := range data {
hash *= prime64
hash ^= Fnv64(c)
}
*s = hash
return len(data), nil
}
// Write adds the single byte b to the running hash. It never returns an error.
func (s *Fnv64) WriteByte(b byte) error {
*s *= prime64
*s ^= Fnv64(b)
return nil
}
// WriteString adds the bytes of string str to the running hash and returns the number of bytes written.
// It never returns an error.
func (s *Fnv64) WriteString(str string) (int, error) {
hash := *s
for i := 0; i < len(str); i++ {
hash *= prime64
hash ^= Fnv64(str[i])
}
*s = hash
return len(str), nil
}
// WriteUint64 adds the 8 bytes of n to the running hash in big-endian byte order.
func (s *Fnv64) WriteUint64(n uint64) {
hash := *s
hash *= prime64
hash ^= Fnv64(n >> 56)
hash *= prime64
hash ^= Fnv64((n >> 48) & 0xff)
hash *= prime64
hash ^= Fnv64((n >> 40) & 0xff)
hash *= prime64
hash ^= Fnv64((n >> 32) & 0xff)
hash *= prime64
hash ^= Fnv64((n >> 24) & 0xff)
hash *= prime64
hash ^= Fnv64((n >> 16) & 0xff)
hash *= prime64
hash ^= Fnv64((n >> 8) & 0xff)
hash *= prime64
hash ^= Fnv64(n & 0xff)
*s = hash
}
// New64a returns a new 64-bit FNV-1a hash.
func New64a() *Fnv64a {
var s Fnv64a = offset64
return &s
}
// Reset resets the Hash to its initial state.
func (s *Fnv64a) Reset() {
*s = offset64
}
// Size returns the number of bytes Sum will return.
func (s *Fnv64a) Size() int {
return 8
}
// Sum64 returns the current hash value.
func (s *Fnv64a) Sum64() uint64 {
return uint64(*s)
}
// BlockSize returns the hash's underlying block size.
func (s *Fnv64a) BlockSize() int {
return 1
}
// Sum appends the current hash to buf in big-endian byte order and returns the resulting slice.
func (s *Fnv64a) Sum(buf []byte) []byte {
return sum(uint64(*s), buf)
}
// Write adds the bytes in data to the running hash. It never returns an error.
func (s *Fnv64a) Write(data []byte) (int, error) {
hash := *s
for _, c := range data {
hash ^= Fnv64a(c)
hash *= prime64
}
*s = hash
return len(data), nil
}
// Write adds the single byte b to the running hash. It never returns an error.
func (s *Fnv64a) WriteByte(b byte) error {
*s ^= Fnv64a(b)
*s *= prime64
return nil
}
// WriteString adds the bytes of string str to the running hash and returns the number of bytes written.
// It never returns an error.
func (s *Fnv64a) WriteString(str string) (int, error) {
hash := *s
for i := 0; i < len(str); i++ {
hash ^= Fnv64a(str[i])
hash *= prime64
}
*s = hash
return len(str), nil
}
// WriteUint64 adds the 8 bytes of n to the running hash in big-endian byte order.
func (s *Fnv64a) WriteUint64(n uint64) {
hash := *s
hash ^= Fnv64a(n >> 56)
hash *= prime64
hash ^= Fnv64a((n >> 48) & 0xff)
hash *= prime64
hash ^= Fnv64a((n >> 40) & 0xff)
hash *= prime64
hash ^= Fnv64a((n >> 32) & 0xff)
hash *= prime64
hash ^= Fnv64a((n >> 24) & 0xff)
hash *= prime64
hash ^= Fnv64a((n >> 16) & 0xff)
hash *= prime64
hash ^= Fnv64a((n >> 8) & 0xff)
hash *= prime64
hash ^= Fnv64a(n & 0xff)
hash *= prime64
*s = hash
}
func sum(n uint64, buf []byte) []byte {
return append(buf, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
// String64 returns a 64-bit hash of str using the FNV-1 algorithm.
func String64(str string) uint64 {
hash := uint64(offset64)
for i := 0; i < len(str); i++ {
hash *= prime64
hash ^= uint64(str[i])
}
return uint64(hash)
}
// String64a returns a 64-bit hash of str using the FNV-1a algorithm.
func String64a(str string) uint64 {
hash := uint64(offset64)
for i := 0; i < len(str); i++ {
hash ^= uint64(str[i])
hash *= prime64
}
return uint64(hash)
}
// Sum64 returns a 64-bit hash of buf using the FNV-1 algorithm.
func Sum64(buf []byte) uint64 {
hash := uint64(offset64)
for _, b := range buf {
hash *= prime64
hash ^= uint64(b)
}
return uint64(hash)
}
// Sum64a returns a 64-bit hash of buf using the FNV-1a algorithm.
func Sum64a(buf []byte) uint64 {
hash := uint64(offset64)
for _, b := range buf {
hash ^= uint64(b)
hash *= prime64
}
return uint64(hash)
} | internal/hash/fnv/fnv.go | 0.825343 | 0.404802 | fnv.go | starcoder |
package dcel
import "github.com/gonum/graph"
// Node is a graph node in the DCEL data structure.
type Node interface {
graph.Node
// Halfedge returns the outgoing halfedge from the node. When the node is
// isolated, the returned halfedge is nil. When the node is at a boundary,
// the halfedge's Face is nil.
Halfedge() Halfedge
// SetHalfedge sets the outgoing halfedge from the node.
SetHalfedge(Halfedge)
}
// Halfedge is a an oriented edge in the DCEL data structure.
type Halfedge interface {
// From returns the origin node.
From() Node
// SetFrom sets the origin node.
SetFrom(Node)
// Twin returns the twin halfedge in the same edge.
Twin() Halfedge
// SetTwin sets the twin halfedge in the same edge.
SetTwin(Halfedge)
// Next returns the next halfedge around the adjacent face.
Next() Halfedge
// SetNext sets the next halfedge around the adjacent face.
SetNext(Halfedge)
// Prev returns the previous halfedge around the adjacent face.
Prev() Halfedge
// SetPrev sets the previous halfedge around the adjacent face.
SetPrev(Halfedge)
// Edge returns the undirected edge to which the halfedge belongs.
Edge() Edge
// SetEdge sets the undirected edge to which the halfedge belongs.
SetEdge(Edge)
// Face returns the adjacent face.
Face() Face
// SetFace sets the adjacent face.
SetFace(Face)
}
// Edge is an undirected edge in the DCEL data structure.
type Edge interface {
graph.Edge
// ID returns an edge identifier unique within the graph.
ID() int
// Halfedges returns the two halfedges that form the edge.
Halfedges() (Halfedge, Halfedge)
// SetHalfedges sets the two halfedges that form the edge.
SetHalfedges(Halfedge, Halfedge)
}
// Face is a face in the DCEL data structure.
type Face interface {
// ID returns a face identifier unique within the graph.
ID() int
// Halfedge returns an adjacent halfedge.
Halfedge() Halfedge
// SetHalfedge sets an adjacent halfedge.
SetHalfedge(Halfedge)
}
// Items wraps methods for allocating graph entities that can be stored in the
// DCEL data structure.
type Items interface {
// NewNode returns a new node with the given id.
NewNode(int) Node
// NewHalfedge returns a new halfedge.
NewHalfedge() Halfedge
// NewEdge returns a new edge with the given id.
NewEdge(int) Edge
// NewFace returns a new face with the given id.
NewFace(int) Face
} | interfaces.go | 0.719581 | 0.631438 | interfaces.go | starcoder |
package quadtreego
import (
"fmt"
"errors"
)
type Point struct {
X float64
Y float64
Weight interface{}
}
type Node struct {
Left float64
Top float64
Width float64
Height float64
Parent *Node
NW *Node
NE *Node
SW *Node
SE *Node
Point Point
NodeType NodeType
}
type NodeType int
const (
EMPTY NodeType = iota
LEAF
POINTER
)
type Quadtree struct {
Root Node
Count int
}
func NewQuadTree(minX float64, minY float64, maxX float64, maxY float64) *Quadtree {
root := Node{Left: minX, Top: minY, Width: maxX - minX, Height: maxY - minY}
return &Quadtree{
root, 0,
}
}
func (tree *Quadtree) find(node Node, x float64, y float64) Node {
var response Node
switch node.NodeType {
case EMPTY:
{
break
}
case LEAF:
{
if node.Point.X == x && node.Point.Y == y {
response = node
}
}
case POINTER:
{
response = tree.find(*getQuadrantForPoint(&node, x, y), x, y);
}
}
return response
}
func (tree *Quadtree) set(x float64, y float64, value interface{}) error {
if x < tree.Root.Left || y < tree.Root.Top || x > tree.Root.Left+tree.Root.Width || y > tree.Root.Top+tree.Root.Height {
return errors.New(fmt.Sprintf("Out of bounds :( %d, %d )", x, y))
}
if insert(&tree.Root, Point{x, y, value}) {
tree.Count++;
}
return nil
}
func (tree *Quadtree) get(x float64, y float64) interface{} {
node := tree.find(tree.Root, x, y)
return node.Point.Weight
}
func (tree *Quadtree) remove(x float64, y float64) {
node := tree.find(tree.Root, x, y)
if node.NodeType != EMPTY {
node.NodeType = EMPTY
tree.Count--
tree.balance(node)
}
}
func (tree *Quadtree) clear() {
tree.Root.NW = nil
tree.Root.NE = nil
tree.Root.SW = nil
tree.Root.SE = nil
tree.Root.NodeType = EMPTY
tree.Root.Point = Point{}
tree.Count = 0
}
func (tree *Quadtree) traverse(node Node, TF traverseFunc) {
switch node.NodeType {
case LEAF:
{
TF(node.Left, node.Top, node.Width, node.Height, node.Point)
}
case POINTER:
{
tree.traverse(*node.NE, TF)
tree.traverse(*node.SE, TF)
tree.traverse(*node.SW, TF)
tree.traverse(*node.NW, TF)
}
case EMPTY:
{
//TF(node.Left, node.Top, node.Width, node.Height, node.Point)
}
}
}
func (tree *Quadtree) balance(node Node) {
switch node.NodeType {
case EMPTY:
case LEAF:
{
if node.Parent != nil {
tree.balance(*node.Parent)
}
}
case POINTER:
{
nw := node.NW
ne := node.NE
sw := node.SW
se := node.SE
var firstLeaf *Node
if nw.NodeType != EMPTY {
firstLeaf = nw
}
if ne.NodeType != EMPTY {
if firstLeaf != nil {
break
}
firstLeaf = ne
}
if sw.NodeType != EMPTY {
if firstLeaf != nil {
break
}
firstLeaf = sw
}
if se.NodeType != EMPTY {
if firstLeaf != nil {
break
}
firstLeaf = se
}
if firstLeaf == nil {
node.NodeType = EMPTY
node.NW = nil
node.NE = nil
node.SW = nil
node.SE = nil
} else if firstLeaf.NodeType == POINTER {
break
} else {
node.NodeType = LEAF
node.NW = nil
node.NE = nil
node.SW = nil
node.SE = nil
node.Point = firstLeaf.Point
}
if node.Parent != nil {
tree.balance(*node.Parent)
}
}
}
}
/*============================ PRIVATE FUNCS ============================ */
func insert(parent *Node, point Point) bool {
var result = false
switch parent.NodeType {
case EMPTY:
setPointForNode(parent, point)
result = true
case LEAF:
if parent.Point.X == point.X && parent.Point.Y == point.Y {
setPointForNode(parent, point)
result = true
} else {
split(parent)
result = insert(parent, point)
}
result = true
case POINTER:
result = insert(getQuadrantForPoint(parent, point.X, point.Y), point)
}
return result
}
type traverseFunc func(float64, float64, float64, float64, Point)
func getQuadrantForPoint(parent *Node, x float64, y float64) *Node {
mx := parent.Left + parent.Width/2
my := parent.Top + parent.Height/2
if x < mx {
if y < my {
return parent.NW
} else {
return parent.SW
}
} else {
if y < my {
return parent.NE
} else {
return parent.SE
}
}
}
func split(node *Node) {
oldPoint := node.Point
node.Point = Point{}
node.NodeType = POINTER
x := node.Left
y := node.Top
subWidth := node.Width / 2
subHeight := node.Height / 2
node.NW = &Node{Left: x, Top: y, Width: subWidth, Height: subHeight, Parent: node}
node.NE = &Node{Left: x + subWidth, Top: y, Width: subWidth, Height: subHeight, Parent: node}
node.SW = &Node{Left: x, Top: y + subHeight, Width: subWidth, Height: subHeight, Parent: node}
node.SE = &Node{Left: x + subWidth, Top: y + subHeight, Width: subWidth, Height: subHeight, Parent: node}
insert(node, oldPoint)
}
func setPointForNode(node *Node, point Point) {
if node.NodeType == POINTER {
panic("Can not set point for node of type POINTER");
}
node.NodeType = LEAF
node.Point = point
} | quadtree.go | 0.657428 | 0.484441 | quadtree.go | starcoder |
package cryptotest
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/inklabs/rangedb/pkg/crypto"
"github.com/inklabs/rangedb/pkg/shortuuid"
)
func VerifyKeyStore(t *testing.T, newStore func(t *testing.T) crypto.KeyStore) {
t.Helper()
t.Run("get", func(t *testing.T) {
t.Run("not found for unknown subjectID", func(t *testing.T) {
// Given
const subjectID = "de85370e9e0e449e9fba3afd4f2142b1"
store := newStore(t)
// When
key, err := store.Get(subjectID)
// Then
require.Equal(t, crypto.ErrKeyNotFound, err)
assert.Equal(t, "", key)
})
t.Run("returns previously saved key by subjectID", func(t *testing.T) {
// Given
const expectedKey = "cf050718ab934580a62eae7122a877d9"
subjectID := shortuuid.New().String()
store := newStore(t)
err := store.Set(subjectID, expectedKey)
require.NoError(t, err)
// When
key, err := store.Get(subjectID)
// Then
require.NoError(t, err)
assert.Equal(t, expectedKey, key)
})
})
t.Run("delete", func(t *testing.T) {
t.Run("removes an existing key by subjectID", func(t *testing.T) {
// Given
const expectedKey = "867ebb86ebad4dbea4252dfb5b00b0ce"
subjectID := shortuuid.New().String()
store := newStore(t)
err := store.Set(subjectID, expectedKey)
require.NoError(t, err)
// When
err = store.Delete(subjectID)
// Then
require.NoError(t, err)
key, err := store.Get(subjectID)
require.Equal(t, crypto.ErrKeyWasDeleted, err)
assert.Equal(t, "", key)
})
})
t.Run("set", func(t *testing.T) {
t.Run("errors due to existing key", func(t *testing.T) {
// Given
const (
key1 = "<KEY>"
key2 = "4aa8ba05416e4d1a97cf711ef8a5158b"
)
subjectID := shortuuid.New().String()
store := newStore(t)
err := store.Set(subjectID, key1)
require.NoError(t, err)
// When
err = store.Set(subjectID, key2)
// Then
require.Equal(t, crypto.ErrKeyExistsForSubjectID, err)
})
t.Run("errors from empty encryption key", func(t *testing.T) {
// Given
const emptyKey = ""
subjectID := shortuuid.New().String()
store := newStore(t)
// When
err := store.Set(subjectID, emptyKey)
// Then
require.Equal(t, crypto.ErrInvalidKey, err)
})
t.Run("errors due to duplicate encryption key for two subjectIDs", func(t *testing.T) {
t.Skip("TODO: add support for unique secrets")
// Given
const key = "<KEY>"
subjectID1 := shortuuid.New().String()
subjectID2 := shortuuid.New().String()
store := newStore(t)
err := store.Set(subjectID1, key)
require.NoError(t, err)
// When
err = store.Set(subjectID2, key)
// Then
require.Equal(t, crypto.ErrKeyAlreadyUsed, err)
})
})
} | pkg/crypto/cryptotest/verify_key_store.go | 0.538498 | 0.615781 | verify_key_store.go | starcoder |
package maps
import (
"bytes"
"io"
)
// Polyline represents a list of lat,lng points encoded as a byte array.
// See: https://developers.google.com/maps/documentation/utilities/polylinealgorithm
type Polyline struct {
Points string `json:"points"`
}
// DecodePolyline converts a polyline encoded string to an array of LatLng objects.
func DecodePolyline(poly string) ([]LatLng, error) {
p := &Polyline{
Points: poly,
}
return p.Decode()
}
// Decode converts this encoded Polyline to an array of LatLng objects.
func (p *Polyline) Decode() ([]LatLng, error) {
input := bytes.NewBufferString(p.Points)
var lat, lng int64
path := make([]LatLng, 0, len(p.Points)/2)
for {
dlat, _ := decodeInt(input)
dlng, err := decodeInt(input)
if err == io.EOF {
return path, nil
}
if err != nil {
return nil, err
}
lat, lng = lat+dlat, lng+dlng
path = append(path, LatLng{
Lat: float64(lat) * 1e-5,
Lng: float64(lng) * 1e-5,
})
}
}
// Encode returns a new encoded Polyline from the given Path.
func Encode(path []LatLng) string {
var prevLat, prevLng int64
out := new(bytes.Buffer)
out.Grow(len(path) * 4)
for _, point := range path {
lat := int64(point.Lat * 1e5)
lng := int64(point.Lng * 1e5)
encodeInt(lat-prevLat, out)
encodeInt(lng-prevLng, out)
prevLat, prevLng = lat, lng
}
return out.String()
}
// decodeInt reads an encoded int64 from the passed io.ByteReader.
func decodeInt(r io.ByteReader) (int64, error) {
result := int64(0)
var shift uint8
for {
raw, err := r.ReadByte()
if err != nil {
return 0, err
}
b := raw - 63
result += int64(b&0x1f) << shift
shift += 5
if b < 0x20 {
bit := result & 1
result >>= 1
if bit != 0 {
result = ^result
}
return result, nil
}
}
}
// encodeInt writes an encoded int64 to the passed io.ByteWriter.
func encodeInt(v int64, w io.ByteWriter) {
if v < 0 {
v = ^(v << 1)
} else {
v <<= 1
}
for v >= 0x20 {
w.WriteByte((0x20 | (byte(v) & 0x1f)) + 63)
v >>= 5
}
w.WriteByte(byte(v) + 63)
} | polyline.go | 0.855006 | 0.500244 | polyline.go | starcoder |
package storagetests
import (
"testing"
"github.com/anothermemory/storage"
"github.com/anothermemory/unit"
"github.com/stretchr/testify/assert"
)
// CreateFunc represents function which must return created storage object
type CreateFunc func() storage.Storage
// Func represents test function for single test-case
type Func func(t *testing.T, c CreateFunc, is *assert.Assertions)
// RunStorageTests performs full test run for all test-cases for given storage
func RunStorageTests(t *testing.T, c CreateFunc) {
for _, test := range tests {
t.Run(test.title, func(t *testing.T) { test.testFunc(t, c, assert.New(t)) })
}
}
var tests = []struct {
title string
testFunc Func
}{
{"Storage is not created initially when initialized first time with given arguments", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
is.False(c().IsCreated())
}},
{"Storage can be successfully created", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
is.NoError(s.Create())
is.True(s.IsCreated())
}},
{"Storage can not be used before it will be created", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
u := unit.NewUnit()
is.Error(s.SaveUnit(u))
is.Error(s.RemoveUnit(u))
u, e := s.LoadUnit("123")
is.Error(e)
is.Nil(u)
}},
{"Storage can be removed if not created before", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
is.NoError(s.Remove())
}},
{"Storage can be removed if was created before", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
is.NoError(s.Create())
is.NoError(s.Remove())
}},
{"Storage is not created when removed", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
is.NoError(s.Create())
is.NoError(s.Remove())
is.False(c().IsCreated())
}},
{"Storage can handle all supported simple unit types", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
unitUnit := unit.NewUnit(unit.OptionTitle("MyUnit"))
unitTextPlain := unit.NewTextPlain(unit.OptionTitle("MyUnit"), unit.OptionTextPlainData("MyData"))
unitTextMarkdown := unit.NewTextMarkdown(unit.OptionTitle("MyUnit"), unit.OptionTextMarkdownData("MyData"))
unitTextCode := unit.NewTextCode(unit.OptionTitle("MyUnit"), unit.OptionTextCodeData("MyData"), unit.OptionTextCodeLanguage("MyLang"))
unitTodo := unit.NewTodo(unit.OptionTitle("MyUnit"))
t1 := unitTodo.NewItem()
t1.SetData("Data1")
t1.SetDone(true)
t2 := unitTodo.NewItem()
t2.SetData("Data2")
t2.SetDone(false)
unitTodo.SetItems([]unit.TodoItem{t1, t2})
unitsTests := []unit.Unit{
unitUnit,
unitTextPlain,
unitTextMarkdown,
unitTextCode,
unitTodo,
}
for _, u := range unitsTests {
t.Run(u.Type().String(), func(t *testing.T) {
is := assert.New(t)
s := c()
is.NoError(s.Create())
is.NoError(s.SaveUnit(u))
l, e := s.LoadUnit(u.ID())
is.NoError(e)
is.True(unit.Equal(u, l))
is.NoError(s.RemoveUnit(l))
r, e := s.LoadUnit(l.ID())
is.Error(e)
is.Nil(r)
})
}
}},
{"Storage can handle list unit", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
unitUnit := unit.NewUnit(unit.OptionTitle("MyUnit"))
unitTextPlain := unit.NewTextPlain(unit.OptionTitle("MyUnit"), unit.OptionTextPlainData("MyData"))
unitTextMarkdown := unit.NewTextMarkdown(unit.OptionTitle("MyUnit"), unit.OptionTextMarkdownData("MyData"))
unitTextCode := unit.NewTextCode(unit.OptionTitle("MyUnit"), unit.OptionTextCodeData("MyData"), unit.OptionTextCodeLanguage("MyLang"))
unitTodo := unit.NewTodo(unit.OptionTitle("MyUnit"))
t1 := unitTodo.NewItem()
t1.SetData("Data1")
t1.SetDone(true)
t2 := unitTodo.NewItem()
t2.SetData("Data2")
t2.SetDone(false)
unitTodo.SetItems([]unit.TodoItem{t1, t2})
unitList := unit.NewList(unit.OptionTitle("MyUnit"))
unitList.SetItems([]unit.Unit{
unitUnit,
unitTextPlain,
unitTextMarkdown,
unitTextCode,
unitTodo,
})
s := c()
is.NoError(s.Create())
is.NoError(s.SaveUnit(unitUnit))
is.NoError(s.SaveUnit(unitTextPlain))
is.NoError(s.SaveUnit(unitTextMarkdown))
is.NoError(s.SaveUnit(unitTextCode))
is.NoError(s.SaveUnit(unitTodo))
is.NoError(s.SaveUnit(unitList))
l, e := s.LoadUnit(unitList.ID())
is.NoError(e)
is.True(unit.Equal(unitList, l))
is.NoError(s.RemoveUnit(l))
r, e := s.LoadUnit(l.ID())
is.Error(e)
is.Nil(r)
}},
{"Nil unit cannot be saved", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
is.NoError(s.Create())
is.Error(s.SaveUnit(nil))
}},
{"Nil unit cannot be removed", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
is.NoError(s.Create())
is.Error(s.RemoveUnit(nil))
}},
{"Empty ID cannot be used to load unit", func(t *testing.T, c CreateFunc, is *assert.Assertions) {
s := c()
is.NoError(s.Create())
l, e := s.LoadUnit("")
is.Error(e)
is.Nil(l)
}},
} | storagetests.go | 0.524151 | 0.560493 | storagetests.go | starcoder |
package day10
import (
"math"
"sort"
)
// Asteroid is a single asteroid on the map. An Asteroid knows its location in Cartesian coordinates,
// and it also knows the location of all other asteroids in polar coordinates, relative to itself.
type Asteroid struct {
X int
Y int
angles map[int][]*AsteroidEdge
}
func newAsteroid(x, y int) *Asteroid {
return &Asteroid{
X: x,
Y: y,
angles: make(map[int][]*AsteroidEdge),
}
}
// ConnectAll connects this asteroid to all asteroids in the provided list.
func (a *Asteroid) ConnectAll(as []*Asteroid) {
for _, b := range as {
a.Connect(b)
}
}
// Connect ensures that the two asteroids know their relative positions to each other.
func (a *Asteroid) Connect(b *Asteroid) {
// these are intentionally swapped to reflect over the line y = x
// this gives angles that match the rotation of the laser
dx, dy := float64(a.Y-b.Y), float64(b.X-a.X)
angle := math.Atan2(dy, dx)
if angle < 0 {
angle = math.Pi*2 + angle
}
radius := math.Sqrt(dx*dx + dy*dy)
a.addEdge(b, int(angle*10000), radius)
otherAngle := angle + math.Pi
if otherAngle > math.Pi*2 {
otherAngle -= math.Pi * 2
}
b.addEdge(a, int(otherAngle*10000), radius)
}
func (a *Asteroid) addEdge(b *Asteroid, angle int, radius float64) {
edge := &AsteroidEdge{
Asteroid: b,
Radius: radius,
}
if edges, ok := a.angles[angle]; ok {
for i, e := range edges {
if radius < e.Radius {
// insert into the edges slice
edges = append(edges, nil)
copy(edges[i+1:], edges[i:])
edges[i] = edge
a.angles[angle] = edges
return
}
}
a.angles[angle] = append(a.angles[angle], edge)
return
}
a.angles[angle] = []*AsteroidEdge{edge}
}
// VisibleAsteroids returns the list of asteroids that are visible from this one.
// This will exclude asteroids that are occluded by a closer one.
func (a *Asteroid) VisibleAsteroids() []*Asteroid {
return a.AsteroidsAtDistance(0)
}
// AsteroidsAtDistance returns the list of asteroids that are a certain distance
// away. The distance is in terms of the number of asteroids at that angle.
func (a *Asteroid) AsteroidsAtDistance(d int) []*Asteroid {
var angles sort.IntSlice
for ang := range a.angles {
angles = append(angles, ang)
}
angles.Sort()
var asteroids []*Asteroid
for _, angle := range angles {
edges := a.angles[angle]
if len(edges) > d {
a := edges[d].Asteroid
asteroids = append(asteroids, a)
}
}
return asteroids
}
// AsteroidEdge stores an asteroid and its distance from another astroid.
type AsteroidEdge struct {
Asteroid *Asteroid
Radius float64
} | day10/asteroid.go | 0.824885 | 0.57821 | asteroid.go | starcoder |
package ast
import "fmt"
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *IntegerLiteral) Exec(context *ExecContext) *Variant {
return &Variant{
Type: PrimitiveTypeInt,
Int: n.Val,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *BoolLiteral) Exec(context *ExecContext) *Variant {
return &Variant{
Type: PrimitiveTypeBool,
Bool: n.Val,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *StringLiteral) Exec(context *ExecContext) *Variant {
return &Variant{
Type: PrimitiveTypeString,
String: n.Str,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *NilLiteral) Exec(context *ExecContext) *Variant {
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *ArrayLiteral) Exec(context *ExecContext) *Variant {
sizeNode := n.Type.Len.Exec(context)
if sizeNode.Type != PrimitiveTypeInt {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Non-integer len used for array",
})
return &Variant{Type: PrimitiveTypeUndefined}
}
var values = make([]*Variant, sizeNode.Int)
if len(values) != len(n.Literal) && len(n.Literal) != 0 {
context.Errors = append(context.Errors, ExecutionError{
Class: BoundsErr,
CreatingNode: n,
Text: "Literal used in array assignment does not match the size of the underlying array",
})
return &Variant{Type: PrimitiveTypeUndefined}
}
var i int
for ; i < len(n.Literal); i++ {
values[i] = n.Literal[i].Exec(context)
}
for ; i < len(values); i++ {
values[i] = &Variant{Type: PrimitiveTypeUndefined}
}
return &Variant{
Type: ComplexTypeArray,
VectorData: values,
}
}
// Exec resolves the values for the literals specified (if any).
func (n *StructLiteral) Exec(context *ExecContext) *Variant {
o := &Variant{
Type: ComplexTypeStruct,
NamedData: map[string]*Variant{},
}
for _, field := range n.Type.Fields {
if n.Values != nil && n.Values[field.Ident] != nil {
o.NamedData[field.Ident] = n.Values[field.Ident].Exec(context)
} else {
var err error
o.NamedData[field.Ident], err = DefaultVariantValue(field.Type)
if err != nil {
context.Errors = append(context.Errors, ExecutionError{
Class: InternalErr,
CreatingNode: n,
Text: "Failed to create default value to populate field '" + field.Ident + "' with type: " + field.Type.String(),
})
}
}
}
return o
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *StatementList) Exec(context *ExecContext) *Variant {
callingContext := (*context)
newContext := callingContext
newContext.IsFuncContext = false
for _, node := range n.Stmts {
v := node.Exec(&newContext)
if v.IsReturn {
for _, err := range newContext.Errors {
context.Errors = append(context.Errors, err)
}
return v
}
}
for _, err := range newContext.Errors {
context.Errors = append(context.Errors, err)
}
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *ReturnStmt) Exec(context *ExecContext) *Variant {
v := n.Expr.Exec(context)
temp := *v
temp.IsReturn = true
return &temp
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *BinaryOp) Exec(context *ExecContext) *Variant {
l := n.LHS.Exec(context)
r := n.RHS.Exec(context)
ret := Variant{
Type: PrimitiveTypeUndefined,
}
if l.Type == PrimitiveTypeInt && r.Type == PrimitiveTypeInt {
ret.Type = PrimitiveTypeInt
switch n.Op {
case BinOpAdd:
ret.Int = l.Int + r.Int
case BinOpSub:
ret.Int = l.Int - r.Int
case BinOpMul:
ret.Int = l.Int * r.Int
case BinOpDiv:
ret.Int = l.Int / r.Int
case BinOpMod:
ret.Int = l.Int % r.Int
case BinOpEquality:
ret.Type = PrimitiveTypeBool
ret.Bool = l.Int == r.Int
case BinOpNotEquality:
ret.Type = PrimitiveTypeBool
ret.Bool = l.Int != r.Int
}
} else if l.Type == PrimitiveTypeString && r.Type == PrimitiveTypeString {
ret.Type = PrimitiveTypeString
switch n.Op {
case BinOpAdd:
ret.String = l.String + r.String
case BinOpEquality:
ret.Type = PrimitiveTypeBool
ret.Bool = l.String == r.String
case BinOpNotEquality:
ret.Type = PrimitiveTypeBool
ret.Bool = l.String != r.String
default:
ret.Type = PrimitiveTypeUndefined
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Invalid operation for string operands: " + n.Op.String(),
})
}
} else if l.Type == PrimitiveTypeBool && r.Type == PrimitiveTypeBool {
ret.Type = PrimitiveTypeBool
switch n.Op {
case BinOpEquality:
ret.Bool = l.Bool && r.Bool
case BinOpNotEquality:
ret.Bool = l.Bool != r.Bool
case BinOpLAnd:
ret.Bool = l.Bool && r.Bool
case BinOpLOr:
ret.Bool = l.Bool || r.Bool
default:
ret.Type = PrimitiveTypeUndefined
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Invalid operation for boolean operands: " + n.Op.String(),
})
}
} else {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Invalid types for operands: " + l.Type.String() + " and " + r.Type.String(),
})
}
return &ret
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *VariableReference) Exec(context *ExecContext) *Variant {
if v, ok := context.FunctionNamespace[n.Name]; ok {
return v
}
if context.GlobalNamespace != nil {
if v, ok := context.GlobalNamespace[n.Name]; ok {
return v
}
}
return &Variant{
Type: PrimitiveTypeUndefined,
VariableReferenceFailed: true,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *Assign) Exec(context *ExecContext) *Variant {
variable := n.Variable.Exec(context)
v := n.Value.Exec(context)
if ident, ok := n.Variable.(*VariableReference); ok {
if n.NewLocal || v.VariableReferenceFailed {
context.FunctionNamespace.Save(ident.Name, v)
} else {
if _, ok := context.FunctionNamespace[ident.Name]; ok && context.IsFuncContext {
context.FunctionNamespace.Save(ident.Name, v)
} else if _, ok := context.GlobalNamespace[ident.Name]; ok {
context.GlobalNamespace.Save(ident.Name, v)
} else {
if context.IsFuncContext {
context.FunctionNamespace.Save(ident.Name, v)
} else {
context.GlobalNamespace.Save(ident.Name, v)
}
}
}
} else {
newValue := *v
newValue.IsReturn = false
*variable = newValue
}
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *IfStmt) Exec(context *ExecContext) *Variant {
if n.Init != nil {
n.Init.Exec(context)
}
conditionResult := n.Conditional.Exec(context)
if conditionResult.Type == PrimitiveTypeBool && conditionResult.Bool {
return n.Code.Exec(context)
} else if n.Else != nil {
return n.Else.Exec(context)
}
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *ForStmt) Exec(context *ExecContext) *Variant {
if n.Init != nil {
n.Init.Exec(context)
}
conditionResult := n.Conditional.Exec(context)
for true {
if conditionResult.Type != PrimitiveTypeBool {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Non-bool used as loop conditional: " + conditionResult.Type.String(),
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
if conditionResult.Bool {
n.Code.Exec(context)
if n.PostIteration != nil {
n.PostIteration.Exec(context)
}
} else {
break
}
conditionResult = n.Conditional.Exec(context)
}
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *UnaryOp) Exec(context *ExecContext) *Variant {
upper := n.Expr.Exec(context)
if upper.Type == PrimitiveTypeBool {
switch n.Op {
case UnOpNot:
return &Variant{
Type: PrimitiveTypeBool,
Bool: !upper.Bool,
}
default:
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Cannot perform boolean unary operation on " + upper.Type.String(),
})
}
} else {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Cannot perform unary operations on type " + upper.Type.String(),
})
}
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *Subscript) Exec(context *ExecContext) *Variant {
baseVar := n.Expr.Exec(context)
subscript := n.Subscript.Exec(context)
if baseVar.VariableReferenceFailed {
context.Errors = append(context.Errors, ExecutionError{
Class: NotFoundErr,
CreatingNode: n,
Text: "Could not resolve a value/variable for base data of type " + baseVar.Type.String(),
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
if baseVar.Type != ComplexTypeArray {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Cannot perform subscript operation on type " + baseVar.Type.String(),
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
if subscript.Type != PrimitiveTypeInt {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Cannot perform subscript operation on type " + baseVar.Type.String(),
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
if int(subscript.Int) >= len(baseVar.VectorData) {
context.Errors = append(context.Errors, ExecutionError{
Class: BoundsErr,
CreatingNode: n,
Text: "Subscript out of bounds",
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
return baseVar.VectorData[subscript.Int]
}
// Exec carries out node-specific logic, which may include evaluation of subnodes and primitive operations depending on the nodes type.
func (n *NamedSelector) Exec(context *ExecContext) *Variant {
baseVar := n.Expr.Exec(context)
if baseVar.Type.Kind() != ComplexTypeStruct {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Cannot perform named selection operation on type " + baseVar.Type.String(),
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
if v, ok := baseVar.NamedData[n.Name]; ok {
return v
}
context.Errors = append(context.Errors, ExecutionError{
Class: NotFoundErr,
CreatingNode: n,
Text: "Cannot find named element " + n.Name,
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
// Exec represents the invocation of the FunctionCall - with the function pointer and arguments resolved from the contained nodes.
func (n *FunctionCall) Exec(context *ExecContext) *Variant {
fmt.Println(context.GlobalNamespace)
functionPointer := n.Function.Exec(context)
if functionPointer.Type.Kind() != ComplexTypeFunction {
context.Errors = append(context.Errors, ExecutionError{
Class: TypeErr,
CreatingNode: n,
Text: "Cannot call non-function type: " + functionPointer.Type.String(),
})
return &Variant{
Type: PrimitiveTypeUndefined,
}
}
fn := map[string]*Variant{}
execContext := &ExecContext{
IsFuncContext: true,
FunctionNamespace: fn,
GlobalNamespace: context.GlobalNamespace,
}
for i, paramNode := range functionPointer.Type.(FunctionType).Parameters {
pn := n.Args[i].Exec(context)
nt := paramNode.(NamedType)
fn[nt.Ident] = pn
}
return functionPointer.Type.(FunctionType).Code.Exec(execContext)
} | ast/exec.go | 0.708717 | 0.534795 | exec.go | starcoder |
/**
* An extended form of Bram Cohen's patience diff algorithm.
* <p>
* This implementation was derived by using the 4 rules that are outlined in
* Bram Cohen's <a href="http://bramcohen.livejournal.com/73318.html">blog</a>,
* and then was further extended to support low-occurrence common elements.
* <p>
* The basic idea of the algorithm is to create a histogram of occurrences for
* each element of sequence A. Each element of sequence B is then considered in
* turn. If the element also exists in sequence A, and has a lower occurrence
* count, the positions are considered as a candidate for the longest common
* subsequence (LCS). After scanning of B is complete the LCS that has the
* lowest number of occurrences is chosen as a split point. The region is split
* around the LCS, and the algorithm is recursively applied to the sections
* before and after the LCS.
* <p>
* By always selecting a LCS position with the lowest occurrence count, this
* algorithm behaves exactly like Bram Cohen's patience diff whenever there is a
* unique common element available between the two sequences. When no unique
* elements exist, the lowest occurrence element is chosen instead. This offers
* more readable diffs than simply falling back on the standard Myers' O(ND)
* algorithm would produce.
* <p>
* To prevent the algorithm from having an O(N^2) running time, an upper limit
* on the number of unique elements in a histogram bucket is configured by
* {@link #setMaxChainLength(int)}. If sequence A has more than this many
* elements that hash into the same hash bucket, the algorithm passes the region
* to {@link #setFallbackAlgorithm(DiffAlgorithm)}. If no fallback algorithm is
* configured, the region is emitted as a replace edit.
* <p>
* During scanning of sequence B, any element of A that occurs more than
* {@link #setMaxChainLength(int)} times is never considered for an LCS match
* position, even if it is common between the two sequences. This limits the
* number of locations in sequence A that must be considered to find the LCS,
* and helps maintain a lower running time bound.
* <p>
* So long as {@link #setMaxChainLength(int)} is a small constant (such as 64),
* the algorithm runs in O(N * D) time, where N is the sum of the input lengths
* and D is the number of edits in the resulting EditList. If the supplied
* {@link SequenceComparator} has a good hash function, this implementation
* typically out-performs {@link MyersDiff}, even though its theoretical running
* time is the same.
* <p>
* This implementation has an internal limitation that prevents it from handling
* sequences with more than 268,435,456 (2^28) elements.
*/
package histogramdiff
import (
"github.com/hattya/go.diff"
)
const MAX_OCCURRENCE = 64
type HistIndex struct {
rm map[string]*Record
count int
lcs Region
has_common bool
has_lcs bool
}
type Record struct {
lines []int
}
type Region struct {
astart int
aend int
bstart int
bend int
}
func Strings(al []string, bl []string) []diff.Change {
return histogram_diff(al, 0, len(al), bl, 0, len(bl))
}
func histogram_diff(al []string, astart int, aend int, bl []string, bstart int, bend int) []diff.Change {
if astart == aend && bstart == bend {
return []diff.Change{}
} else if astart == aend || bstart == bend {
return []diff.Change{diff.Change{A: astart, B: bstart, Del: aend - astart, Ins: bend - bstart}}
}
index := HistIndex{
rm: map[string]*Record{},
count: MAX_OCCURRENCE,
}
find_lcs(&index, al, astart, aend, bl, bstart, bend)
if !index.has_lcs {
if index.has_common {
return fallback_diff(al, astart, aend, bl, bstart, bend)
} else {
return []diff.Change{diff.Change{A: astart, B: bstart, Del: aend - astart, Ins: bend - bstart}}
}
}
cl := []diff.Change{}
subcl := histogram_diff(al, astart, index.lcs.astart, bl, bstart, index.lcs.bstart)
cl = append(cl, subcl...)
subcl = histogram_diff(al, index.lcs.aend, aend, bl, index.lcs.bend, bend)
cl = append(cl, subcl...)
return cl
}
func fallback_diff(al []string, astart int, aend int, bl []string, bstart int, bend int) []diff.Change {
cl := diff.Strings(al[astart:aend], bl[bstart:bend])
for i, c := range cl {
c.A += astart
c.B += bstart
cl[i] = c
}
return cl
}
func find_lcs(index *HistIndex, al []string, astart int, aend int, bl []string, bstart int, bend int) {
scanA(index, al, astart, aend)
for b := bstart; b < bend; {
b = try_lcs(index, b, al, astart, aend, bl, bstart, bend)
}
}
func scanA(index *HistIndex, al []string, astart int, aend int) {
for a := astart; a < aend; a++ {
if _, ok := index.rm[al[a]]; ok {
index.rm[al[a]].lines = append(index.rm[al[a]].lines, a)
} else {
index.rm[al[a]] = &Record{lines: []int{a}}
}
}
}
func try_lcs(index *HistIndex, b int, al []string, astart int, aend int, bl []string, bstart int, bend int) int {
b_next := b + 1
r, ok := index.rm[bl[b]]
if !ok {
return b_next
}
index.has_common = true
if len(r.lines) > index.count {
return b_next
}
prev_ae := 0
for _, a := range r.lines {
if a < prev_ae {
continue
}
as := a
ae := a + 1
bs := b
be := b + 1
rc := len(r.lines)
for astart < as && bstart < bs && al[as-1] == bl[bs-1] {
as--
bs--
if len(index.rm[al[as]].lines) < rc {
rc = len(index.rm[al[as]].lines)
}
}
for ae < aend && be < bend && al[ae] == bl[be] {
ae++
be++
if len(index.rm[al[ae-1]].lines) < rc {
rc = len(index.rm[al[ae-1]].lines)
}
}
if b_next < be {
b_next = be
}
if index.lcs.aend-index.lcs.astart < ae-as || rc < index.count {
index.lcs.astart = as
index.lcs.aend = ae
index.lcs.bstart = bs
index.lcs.bend = be
index.count = rc
index.has_lcs = true
}
prev_ae = ae
}
return b_next
} | src/diff/histogramdiff/histogramdiff.go | 0.805211 | 0.577734 | histogramdiff.go | starcoder |
package graphblas
import (
"context"
"log"
"reflect"
"github.com/rossmerr/graphblas/constraints"
)
func init() {
RegisterMatrix(reflect.TypeOf((*CSRMatrix[float64])(nil)).Elem())
}
// CSRMatrix compressed storage by rows (CSR)
type CSRMatrix[T constraints.Number] struct {
r int // number of rows in the sparse matrix
c int // number of columns in the sparse matrix
values []T
cols []int
rowStart []int
}
// NewCSRMatrix returns a CSRMatrix
func NewCSRMatrix[T constraints.Number](r, c int) *CSRMatrix[T] {
return newCSRMatrix[T](r, c, 0)
}
// NewCSRMatrixFromArray returns a CSRMatrix
func NewCSRMatrixFromArray[T constraints.Number](data [][]T) *CSRMatrix[T] {
r := len(data)
c := len(data[0])
s := newCSRMatrix[T](r, c, 0)
for i := 0; i < r; i++ {
for k := 0; k < c; k++ {
s.Set(i, k, data[i][k])
}
}
return s
}
func newCSRMatrix[T constraints.Number](r, c int, l int) *CSRMatrix[T] {
s := &CSRMatrix[T]{
r: r,
c: c,
values: make([]T, l),
cols: make([]int, l),
rowStart: make([]int, r+1),
}
return s
}
// Columns the number of columns of the matrix
func (s *CSRMatrix[T]) Columns() int {
return s.c
}
// Rows the number of rows of the matrix
func (s *CSRMatrix[T]) Rows() int {
return s.r
}
// Update does a At and Set on the matrix element at r-th, c-th
func (s *CSRMatrix[T]) Update(r, c int, f func(T) T) {
if r < 0 || r >= s.r {
log.Panicf("Row '%+v' is invalid", r)
}
if c < 0 || c >= s.c {
log.Panicf("Column '%+v' is invalid", c)
}
pointerStart, pointerEnd := s.columnIndex(r, c)
if pointerStart < pointerEnd && s.cols[pointerStart] == c {
value := f(s.values[pointerStart])
if value == Default[T]() {
s.remove(pointerStart, r)
} else {
s.values[pointerStart] = value
}
} else {
s.insert(pointerStart, r, c, f(Default[T]()))
}
}
// At returns the value of a matrix element at r-th, c-th
func (s *CSRMatrix[T]) At(r, c int) (value T) {
s.Update(r, c, func(v T) T {
value = v
return v
})
return
}
// Set sets the value at r-th, c-th of the matrix
func (s *CSRMatrix[T]) Set(r, c int, value T) {
s.Update(r, c, func(v T) T {
return value
})
}
// ColumnsAt return the columns at c-th
func (s *CSRMatrix[T]) ColumnsAt(c int) Vector[T] {
if c < 0 || c >= s.c {
log.Panicf("Column '%+v' is invalid", c)
}
columns := NewSparseVector[T](s.r)
for r := range s.rowStart[:s.r] {
pointerStart, pointerEnd := s.columnIndex(r, c)
if pointerStart < pointerEnd && s.cols[pointerStart] == c {
columns.SetVec(r, s.values[pointerStart])
}
}
return columns
}
// RowsAt return the rows at r-th
func (s *CSRMatrix[T]) RowsAt(r int) Vector[T] {
if r < 0 || r >= s.r {
log.Panicf("Row '%+v' is invalid", r)
}
rows := NewSparseVector[T](s.c)
start := s.rowStart[r]
end := s.rowStart[r+1]
for i := start; i < end; i++ {
rows.SetVec(s.cols[i], s.values[i])
}
return rows
}
// RowsAtToArray return the rows at r-th
func (s *CSRMatrix[T]) RowsAtToArray(r int) []T {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
rows := make([]T, s.c)
start := s.rowStart[r]
end := s.rowStart[r+1]
for i := start; i < end; i++ {
rows[s.cols[i]] = s.values[i]
}
return rows
}
func (s *CSRMatrix[T]) insert(pointer, r, c int, value T) {
if value == Default[T]() {
return
}
s.cols = append(s.cols[:pointer], append([]int{c}, s.cols[pointer:]...)...)
s.values = append(s.values[:pointer], append([]T{value}, s.values[pointer:]...)...)
for i := r + 1; i <= s.r; i++ {
s.rowStart[i]++
}
}
func (s *CSRMatrix[T]) remove(pointer, r int) {
s.cols = append(s.cols[:pointer], s.cols[pointer+1:]...)
s.values = append(s.values[:pointer], s.values[pointer+1:]...)
for i := r + 1; i <= s.r; i++ {
s.rowStart[i]--
}
}
func (s *CSRMatrix[T]) columnIndex(r, c int) (int, int) {
start := s.rowStart[r]
end := s.rowStart[r+1]
if start-end == 0 {
return start, end
}
if c > s.cols[end-1] {
return end, end
}
for start < end {
p := (start + end) / 2
if s.cols[p] > c {
end = p
} else if s.cols[p] < c {
start = p + 1
} else {
return p, end
}
}
return start, end
}
// Copy copies the matrix
func (s *CSRMatrix[T]) Copy() Matrix[T] {
matrix := newCSRMatrix[T](s.r, s.c, len(s.values))
for i := range s.values {
matrix.values[i] = s.values[i]
matrix.cols[i] = s.cols[i]
}
for i := range s.rowStart {
matrix.rowStart[i] = s.rowStart[i]
}
return matrix
}
// Scalar multiplication of a matrix by alpha
func (s *CSRMatrix[T]) Scalar(alpha T) Matrix[T] {
return Scalar[T](context.Background(), s, alpha)
}
// Multiply multiplies a matrix by another matrix
func (s *CSRMatrix[T]) Multiply(m Matrix[T]) Matrix[T] {
matrix := newCSRMatrix[T](s.Rows(), m.Columns(), 0)
MatrixMatrixMultiply[T](context.Background(), s, m, nil, matrix)
return matrix
}
// Add addition of a matrix by another matrix
func (s *CSRMatrix[T]) Add(m Matrix[T]) Matrix[T] {
matrix := s.Copy()
Add[T](context.Background(), s, m, nil, matrix)
return matrix
}
// Subtract subtracts one matrix from another matrix
func (s *CSRMatrix[T]) Subtract(m Matrix[T]) Matrix[T] {
matrix := m.Copy()
Subtract[T](context.Background(), s, m, nil, matrix)
return matrix
}
// Negative the negative of a matrix
func (s *CSRMatrix[T]) Negative() Matrix[T] {
matrix := s.Copy()
Negative[T](context.Background(), s, nil, matrix)
return matrix
}
// Transpose swaps the rows and columns
func (s *CSRMatrix[T]) Transpose() Matrix[T] {
matrix := newCSRMatrix[T](s.c, s.r, 0)
Transpose[T](context.Background(), s, nil, matrix)
return matrix
}
// Equal the two matrices are equal
func (s *CSRMatrix[T]) Equal(m Matrix[T]) bool {
return Equal[T](context.Background(), s, m)
}
// NotEqual the two matrices are not equal
func (s *CSRMatrix[T]) NotEqual(m Matrix[T]) bool {
return NotEqual[T](context.Background(), s, m)
}
// Size of the matrix
func (s *CSRMatrix[T]) Size() int {
return s.Rows() * s.Columns()
}
// Values the number of non-zero elements in the matrix
func (s *CSRMatrix[T]) Values() int {
return len(s.values)
}
// Clear removes all elements from a matrix
func (s *CSRMatrix[T]) Clear() {
s.values = make([]T, 0)
s.cols = make([]int, 0)
s.rowStart = make([]int, s.r+1)
}
// Enumerate iterates through all non-zero elements, order is not guaranteed
func (s *CSRMatrix[T]) Enumerate() Enumerate[T] {
return s.iterator()
}
type cSRMatrixIterator[T constraints.Number] struct {
matrix *CSRMatrix[T]
size int
last int
c int
r int
cIndex int
index int
pointerStart int
pointerEnd int
}
func (s *CSRMatrix[T]) iterator() *cSRMatrixIterator[T] {
i := &cSRMatrixIterator[T]{
matrix: s,
size: len(s.values),
r: -1,
}
return i
}
func (s *cSRMatrixIterator[T]) next() {
for s.pointerStart == s.pointerEnd {
s.r++
s.pointerStart = s.matrix.rowStart[s.r]
s.pointerEnd = s.matrix.rowStart[s.r+1]
s.cIndex = s.matrix.cols[s.pointerStart]
}
for s.pointerStart < s.pointerEnd {
if s.matrix.cols[s.pointerStart] == s.cIndex {
s.index = s.pointerStart
s.pointerStart++
s.c = s.cIndex
s.cIndex++
s.last++
return
}
s.cIndex++
}
}
// HasNext checks the iterator has any more values
func (s *cSRMatrixIterator[T]) HasNext() bool {
if s.last >= s.size {
return false
}
return true
}
// Next moves the iterator and returns the row, column and value
func (s *cSRMatrixIterator[T]) Next() (int, int, T) {
s.next()
return s.r, s.c, s.matrix.values[s.index]
}
// Map replace each element with the result of applying a function to its value
func (s *CSRMatrix[T]) Map() Map[T] {
t := s.iterator()
i := &cSRMatrixMap[T]{t}
return i
}
type cSRMatrixMap[T constraints.Number] struct {
*cSRMatrixIterator[T]
}
// HasNext checks the iterator has any more values
func (s *cSRMatrixMap[T]) HasNext() bool {
return s.cSRMatrixIterator.HasNext()
}
// Map move the iterator and uses a higher order function to changes the elements current value
func (s *cSRMatrixMap[T]) Map(f func(int, int, T) T) {
s.next()
value := f(s.r, s.c, s.matrix.values[s.index])
if value != Default[T]() {
s.matrix.values[s.index] = value
} else {
s.matrix.remove(s.index, s.r)
}
}
// Element of the mask for each tuple that exists in the matrix for which the value of the tuple cast to Boolean is true
func (s *CSRMatrix[T]) Element(r, c int) (b bool) {
s.Update(r, c, func(v T) T {
b = v > Default[T]()
return v
})
return
} | csrMatrix.go | 0.761184 | 0.519278 | csrMatrix.go | starcoder |
package il
import (
"fmt"
"strconv"
)
func coerceLiteral(lit *BoundLiteral, from, to Type) (*BoundLiteral, bool) {
var str string
switch from {
case TypeBool:
str = strconv.FormatBool(lit.Value.(bool))
case TypeNumber:
str = strconv.FormatFloat(lit.Value.(float64), 'g', -1, 64)
case TypeString:
str = lit.Value.(string)
default:
panic(fmt.Sprintf("unexpected literal type in coerceLiteral: %v", lit.Value))
}
switch to {
case TypeBool:
if str == "" {
return &BoundLiteral{ExprType: TypeBool, Value: false}, true
}
val, err := strconv.ParseBool(str)
if err == nil {
return &BoundLiteral{ExprType: TypeBool, Value: val}, true
}
case TypeNumber:
if str == "" {
return &BoundLiteral{ExprType: TypeNumber, Value: 0.0}, true
}
val, err := strconv.ParseFloat(str, 64)
if err == nil {
return &BoundLiteral{ExprType: TypeNumber, Value: val}, true
}
case TypeString:
return &BoundLiteral{ExprType: TypeString, Value: str}, true
}
return nil, false
}
func canMakeCoerceCall(from, to Type) bool {
switch from {
case TypeBool, TypeNumber:
return to == TypeString
case TypeString:
return to == TypeBool || to == TypeNumber
default:
return false
}
}
// makeCoercion inserts a call to the `__coerce` intrinsic if one is required to convert the given expression to the
// given type. If the input node is statically coercable according to the semantics of
// "github.com/hashicorp/terraform/helper/schema.stringToPrimitive".
func makeCoercion(n BoundNode, toType Type) BoundNode {
// TODO: we really need dynamic coercions for the negative case.
from, to := n.Type().ElementType(), toType.ElementType()
e, ok := n.(BoundExpr)
if !ok || from == to {
return n
}
// If we're dealing with a literal, we can always try to convert through a string.
if lit, ok := n.(*BoundLiteral); ok {
if result, ok := coerceLiteral(lit, from, to); ok {
return result
}
}
// Otherwise, we will either do nothing (for conversions we don't support), or emit a call to the __coerce
// intrinsic. That call will later be generated as an appropriate dynamic coercion.
if !canMakeCoerceCall(from, to) {
return n
}
return NewCoerceCall(e, toType)
}
// AddCoercions inserts calls to the `__coerce` intrinsic in cases where a list or map element's type disagrees with
// the element type present in the list or map's schema.
func AddCoercions(prop BoundNode) (BoundNode, error) {
rewriter := func(n BoundNode) (BoundNode, error) {
switch n := n.(type) {
case *BoundListProperty:
elemType := n.Schemas.ElemSchemas().Type()
for i := range n.Elements {
n.Elements[i] = makeCoercion(n.Elements[i], elemType)
}
case *BoundMapProperty:
for k := range n.Elements {
n.Elements[k] = makeCoercion(n.Elements[k], n.Schemas.PropertySchemas(k).Type())
}
}
return n, nil
}
return VisitBoundNode(prop, IdentityVisitor, rewriter)
} | pkg/tf2pulumi/il/coercions.go | 0.52829 | 0.487856 | coercions.go | starcoder |
package financial
import (
"errors"
"math"
)
// Fv Returns a float specifying the future value of an annuity based on
// periodic, fixed payments and a fixed interest rate.
func Fv(rate, periods, pmt, principal float64) (float64, error) {
if periods < 0 {
return 0, errors.New("Invalid payment period")
}
t := math.Pow((rate + 1), float64(periods))
return ((-principal) * t) - ((pmt / rate) * (t - 1)), nil
}
// Pv Returns a float specifying the present value of an annuity based on periodic,
// fixed payments to be paid in the future and a fixed interest rate.
func Pv(rate, periods, pmt, fv float64, endOfPeriod bool) (float64, error) {
if rate == 0 {
return -fv - pmt*periods, nil
}
num1 := 1.0
if endOfPeriod {
num1 = num1 + rate
}
num2 := math.Pow(1.0+rate, periods)
return -(fv + pmt*num1*((num2-1.0)/rate)) / num2, nil
}
// Pmt Returns a float specifying the payment for an annuity based on periodic,
// fixed payments and a fixed interest rate.
func Pmt(rate, periods, principal, fv float64, endOfPeriod bool) (float64, error) {
if periods < 1 {
return 0, errors.New("Invalid payment period")
}
if rate == 0 {
return -(principal / periods), nil
}
if endOfPeriod {
rate = 0
}
t := math.Pow((rate + 1), float64(periods))
return ((-(fv + principal) * t) / (t - 1)) * rate, nil
}
// IPmt Returns a float specifying the interest payment for an annuity based on
// periodic, fixed payments and a fixed interest rate.
func IPmt(rate, currentPeriod, periods, principal float64) (float64, error) {
if periods < 1 {
return 0, errors.New("Invalid payment period")
}
if currentPeriod > periods {
return 0, errors.New("Invalid payment period")
}
if rate == 0 {
return 0, nil
}
pmt, e := Pmt(rate, periods, principal, 0, false)
if e != nil {
return 0, e
}
fv, e := Fv(rate, (currentPeriod - 1), pmt, principal)
if e != nil {
return 0, e
}
return (fv * rate), nil
}
// PPmt Returns a float specifying the principal payment for an annuity based on
// periodic, fixed payments and a fixed interest rate.
func PPmt(rate, currentPeriod, periods, principal float64) (float64, error) {
pmt, e := Pmt(rate, periods, principal, 0, false)
if e != nil {
return 0, e
}
ipmt, e := IPmt(rate, currentPeriod, periods, principal)
if e != nil {
return 0, e
}
return pmt - ipmt, nil
} | time_based.go | 0.871939 | 0.685334 | time_based.go | starcoder |
package wordchainsresolver
// GreedyWordTreeNode struct represents words tidy in a tree
type GreedyWordTreeNode struct {
Word string
ScoreToGoal int
PreviousElement *GreedyWordTreeNode
NextElements []*GreedyWordTreeNode
}
// NewGreedyWordTreeElement is GreedyWordTreeNode constructor
// input : a word to be push in the tree, its score from the final word, its previous element
func NewGreedyWordTreeElement(word string, score int, previous *GreedyWordTreeNode) *GreedyWordTreeNode {
return &GreedyWordTreeNode{
Word: word,
ScoreToGoal: score,
PreviousElement: previous,
NextElements: nil,
}
}
func (node *GreedyWordTreeNode) extractSolutionFromNode() []string {
var wordChains []string
tmpNode := node
wordChains = append(wordChains, tmpNode.Word)
for tmpNode.PreviousElement != nil {
tmpNode = tmpNode.PreviousElement
wordChains = append(wordChains, tmpNode.Word)
}
return flipStringSlice(wordChains)
}
func (node *GreedyWordTreeNode) getNodeDepth() int {
depth := 1
tmpNode := node
for tmpNode.PreviousElement != nil {
depth++
tmpNode = tmpNode.PreviousElement
}
return depth
}
// GreedySolver is a implementation of Solver interface in order to find
// word chains with a greedy algorithm
type GreedySolver struct {
wordList []string
from string
to string
usefulWords []string
wordTree *GreedyWordTreeNode
matchingWordNode []*GreedyWordTreeNode
solutionFoundAtDepth int
maxDepth int
}
// NewGreedySolver is a simple GreedySolver constructor
func NewGreedySolver() *GreedySolver {
return &GreedySolver{
solutionFoundAtDepth: int(^uint(0) >> 1),
}
}
// NewGreedySolverWithParams is a GreedySolver constructor too but with params
// input : the first word of the futur word chains, the ending word of the futur word chains,
// the word list database
// /!\ Warning, using this constructor is unsafe and should be used in a testing purpose
func NewGreedySolverWithParams(from string, to string, wordList []string) *GreedySolver {
return &GreedySolver{
wordList: wordList,
from: from,
to: to,
usefulWords: nil,
wordTree: nil,
matchingWordNode: nil,
solutionFoundAtDepth: int(^uint(0) >> 1),
maxDepth: len(from) * 3,
}
}
// FindWordChains implements the Solver interface. The greedy solver generate a word chain
// using the greedy algorithm. It is not complete so it may not give any expected
func (greedy *GreedySolver) FindWordChains(from string, to string, wordList []string) ([][]string, error) {
if len([]rune(from)) != len([]rune(to)) {
return nil, ErrorWordLengthDoesNotMatch
}
greedy.from = from
greedy.to = to
greedy.wordList = wordList
greedy.maxDepth = len(from) * 3
greedy.getUsefulWordOnly()
solutions := greedy.getPath()
greedy.Clean()
return getBestSolution(solutions), nil
}
func (greedy *GreedySolver) getPath() [][]string {
head := NewGreedyWordTreeElement(greedy.from, getScoreBetweenTwoWord(greedy.from, greedy.to), nil)
var wordChainsList [][]string
greedy.wordTree = greedy.generateTree(head, []string{greedy.from})
for _, solutionNode := range greedy.matchingWordNode {
wordChainSolution := solutionNode.extractSolutionFromNode()
wordChainsList = append(wordChainsList, wordChainSolution)
}
return wordChainsList
}
func (greedy *GreedySolver) generateTree(head *GreedyWordTreeNode, wordList []string) *GreedyWordTreeNode {
// Ending condition
if head.Word == greedy.to {
greedy.solutionFoundAtDepth = head.getNodeDepth()
greedy.matchingWordNode = append(greedy.matchingWordNode, head)
return head
}
if head.getNodeDepth() > greedy.solutionFoundAtDepth {
return head
}
possibleNextWords := greedy.listPossibleNextWords(head.Word)
possibleNextWords = excludeStringsFromStrings(possibleNextWords, wordList)
numberOfChildAdded := 1
targetedScore := head.ScoreToGoal + 1
head, numberOfChildAdded = greedy.createPopulation(head, possibleNextWords, wordList, targetedScore)
depth := head.getNodeDepth()
if numberOfChildAdded == 0 && depth < greedy.maxDepth {
head, numberOfChildAdded = greedy.createPopulation(head, possibleNextWords, wordList, targetedScore-1)
}
return head
}
func (greedy *GreedySolver) createPopulation(head *GreedyWordTreeNode, possibleNextWords, wordList []string, targetedScore int) (*GreedyWordTreeNode, int) {
numberOfNodeCreated := 0
for _, word := range possibleNextWords {
scoreFromGoal := getScoreBetweenTwoWord(word, greedy.to)
if scoreFromGoal == targetedScore {
numberOfNodeCreated++
newNode := NewGreedyWordTreeElement(word, scoreFromGoal, head)
wordList = append(wordList, word)
newNode = greedy.generateTree(newNode, wordList)
head.NextElements = append(head.NextElements, newNode)
}
}
return head, numberOfNodeCreated
}
func (greedy *GreedySolver) getUsefulWordOnly() {
wordLength := len(greedy.from)
for _, word := range greedy.wordList {
if len(word) == wordLength && word != greedy.from {
greedy.usefulWords = append(greedy.usefulWords, word)
}
}
}
func (greedy *GreedySolver) listPossibleNextWords(word string) []string {
var possibleNewWords []string
for _, nextWord := range greedy.usefulWords {
if getScoreBetweenTwoWord(nextWord, word) == len(word)-1 {
possibleNewWords = append(possibleNewWords, nextWord)
}
}
return possibleNewWords
}
// Clean delete all data stored in the current GreedySolver instance
func (greedy *GreedySolver) Clean() {
greedy.from = ""
greedy.to = ""
greedy.usefulWords = nil
greedy.wordTree = nil
greedy.matchingWordNode = nil
greedy.solutionFoundAtDepth = int(^uint(0) >> 1)
} | internal/app/wordchainsresolver/greedySolver.go | 0.675978 | 0.412353 | greedySolver.go | starcoder |
package main
/*
Day 13, Part A
Given a list of inputs in a file in the format `layer number: depth`, each on
a new line, which represent the number of firewall layers in position. If
there is no layer specified, it has no depth.
Each cycle a "scanner" will progress down in depth of each layer. When the
"scan" reaches the bottom, start over at the top.
After the scanner moves, progress through each layer to the end (at the top)
and record each layer where the scanner runs into us.
Move the position, then move the scanner. If we run into an active scan then
we're detected.
*/
import (
"bufio"
"flag"
"fmt"
"os"
"strconv"
"strings"
)
var inputFile = flag.String("inputFile", "./inputs/day13-example.txt", "Input File")
var partB = flag.Bool("partB", false, "Perform part B solution?")
var debug = flag.Bool("debug", false, "Debug?")
var maxAttempts = flag.Int("maxAttempts", 4000000, "Max attempts for part B")
type Firewall struct {
Rules map[int]int // layer # -> depth
Positions map[int]int // current position in each layer
MovementDirection map[int]bool // true=down, false=up
}
func (fw *Firewall) Clone() *Firewall {
ret := NewFirewall()
for layerNumber := 0; layerNumber <= fw.HighestLayer(); layerNumber++ {
ret.Rules[layerNumber] = fw.Rules[layerNumber]
ret.Positions[layerNumber] = fw.Positions[layerNumber]
ret.MovementDirection[layerNumber] = fw.MovementDirection[layerNumber]
}
return ret
}
func (fw *Firewall) AddRuleAtPos(layer, depth int) {
fw.Rules[layer] = depth
fw.Positions[layer] = 0
fw.MovementDirection[layer] = true
}
// how many layers in total?
func (fw *Firewall) Len() int {
return len(fw.Rules)
}
/*
Advance the scanner once. Return a map of what the current position is for
each layer. If there is a gap between layers there will not be an entry.
*/
func (fw *Firewall) Advance() map[int]int {
for layerNumber := 0; layerNumber <= fw.HighestLayer(); layerNumber++ {
// Skip if there's no rules, or it has 0 length.
if fw.Rules[layerNumber] == 0 {
continue
}
/*
If we're previously going down:
if i can go down, go down. otherwise, flip direction; go up
If we're previously going up:
if i can go up, go up. otherwise, flip direction; go down
*/
if *debug {
fmt.Printf("Advancing to firewall position (layer) %d\n", layerNumber)
}
if fw.MovementDirection[layerNumber] {
// Going down
if *debug {
fmt.Printf("Currently going down ")
}
if fw.Positions[layerNumber]+1 >= fw.Rules[layerNumber] {
if *debug {
fmt.Printf("Can't go down because fw.Positions[layerNumber]+1 >= fw.Rules[layerNumber] (%d>=%d)\n", fw.Positions[layerNumber]+1, fw.Rules[layerNumber])
}
// can't keep going down, so flip around and go up
fw.MovementDirection[layerNumber] = false
fw.Positions[layerNumber] -= 1
} else {
if *debug {
fmt.Printf("Keeping on going down\n")
}
// keep going down
fw.Positions[layerNumber] += 1
}
} else {
// Going up
if *debug {
fmt.Printf("Currently going up ")
}
if fw.Positions[layerNumber]-1 < 0 {
if *debug {
fmt.Printf("Can't keep going up because fw.Positions[layerNumber]-1 < 0 (%d<0)\n", fw.Positions[layerNumber]-1)
}
// can't keep going up, so flip around and go down
fw.MovementDirection[layerNumber] = true
fw.Positions[layerNumber] += 1
} else {
// keep going up
if *debug {
fmt.Printf("Keep on going up")
}
fw.Positions[layerNumber] -= 1
}
} // end: which way am i going?
} // out of layers
return fw.Positions
}
func (fw *Firewall) HighestLayer() int {
// highest layer number
highest := -1
for layerNumber, _ := range fw.Rules {
if layerNumber > highest {
highest = layerNumber
}
}
return highest
}
// It's possible to have gaps, so fill them in with 0 depth layers
func (fw *Firewall) FillInGaps() {
for i := 0; i <= fw.HighestLayer(); i++ {
if fw.Rules[i] == 0 {
fw.Rules[i] = 0
fw.MovementDirection[i] = true
}
}
}
func (fw *Firewall) PrintMap() {
for i := 0; i <= fw.HighestLayer(); i++ {
fmt.Printf("Layer %d Depth: %d Pos: %d Down?: %t\n", i, fw.Rules[i], fw.Positions[i], fw.MovementDirection[i])
}
}
// true if the position of the scan is at 0 in layer layerPosition.
func (fw *Firewall) CheckCollision(layerPosition int) bool {
return fw.Rules[layerPosition] > 0 && fw.Positions[layerPosition] == 0
}
// What's the cost of being caught in layer layerPosition?
func (fw *Firewall) CollisionCost(layerPosition int) int {
return layerPosition * fw.Rules[layerPosition]
}
/*
Try to run through the layers with the current configuration (eg scan position) to see if the checker is at the top.
Returns: success/failure, failure position, collision cost.
*/
func (fw *Firewall) CheckRun() (bool, int, int) {
collisionCost := 0
failPosition := 0
ret := true
if *debug {
fmt.Printf("Firewall at the start of CheckRun\n")
fw.PrintMap()
}
// Now, run through the firewall
// check initial condition; later, do them all
if fw.CheckCollision(0) {
collisionCost += fw.CollisionCost(0)
if *debug {
fmt.Printf("Firewall at end of CheckRun (0 check)\n")
fw.PrintMap()
}
return false, 0, collisionCost
}
for position := 1; position <= fw.HighestLayer(); position++ {
fw.Advance()
if *debug {
fmt.Printf("CheckRun - checking at layer %d\n", position)
}
if fw.CheckCollision(position) {
failPosition = position
collisionCost += fw.CollisionCost(position)
ret = false
break
}
}
if *debug {
fmt.Printf("Firewall at end of CheckRun\n")
fw.PrintMap()
}
return ret, failPosition, collisionCost
}
func NewFirewall() *Firewall {
return &Firewall{
Rules: make(map[int]int),
Positions: make(map[int]int),
MovementDirection: make(map[int]bool),
}
}
func main() {
flag.Parse()
input, err := os.Open(*inputFile)
if err != nil {
fmt.Printf("Couldn't read file: %s\n", err)
os.Exit(1)
}
defer input.Close()
firewall := NewFirewall()
lineReader := bufio.NewScanner(input)
for lineReader.Scan() {
line := lineReader.Text()
var layer, depth int
for n, token := range strings.Split(line, ":") {
switch n {
case 0:
layer, err = strconv.Atoi(strings.Trim(token, " "))
if err != nil {
fmt.Printf("Couldn't convert %s to layer number.\n", token)
os.Exit(1)
}
case 1:
depth, err = strconv.Atoi(strings.Trim(token, " "))
if err != nil {
fmt.Printf("Couldn't convert %s to depth.\n", token)
os.Exit(1)
}
default:
fmt.Printf("Unknown item found at %d (%s) in %s\n", n, token, line)
os.Exit(1)
}
} // EOL
firewall.AddRuleAtPos(layer, depth)
} // EOF
firewall.FillInGaps()
if *debug {
fmt.Printf("Firewall after creation\n")
firewall.PrintMap()
}
if *partB {
success := false
failPosition := -1
cost := 0
for attempt := 1; attempt < *maxAttempts; attempt++ {
if *debug {
fmt.Println()
fmt.Printf("Part B attempt with %d picosecond delay\n", attempt-1)
}
success, failPosition, cost = firewall.Clone().CheckRun()
if success {
fmt.Printf("Success after %d runs\n", attempt-1)
return
} else {
if *debug {
fmt.Printf(" Tried delaying for %d picoseconds, but failed on layer %d (cost: %d)\n", attempt-1, failPosition, cost)
}
firewall.Advance()
}
if *debug {
fmt.Println()
}
}
fmt.Printf("Out of attempts\n")
} else { // end part B
collisionCost := 0
// check initial condition
if firewall.CheckCollision(0) {
collisionCost += firewall.CollisionCost(0)
}
if *debug {
fmt.Printf("picosecond %d\n", 0)
firewall.PrintMap()
fmt.Printf("Collision at %d? => %t\n", 0, firewall.CheckCollision(0))
fmt.Println()
}
for position := 1; position <= firewall.HighestLayer(); position++ {
firewall.Advance()
if *debug {
fmt.Printf("picosecond %d\n", position)
firewall.PrintMap()
fmt.Printf("Collision at %d? => %t\n", position, firewall.CheckCollision(position))
}
if firewall.CheckCollision(position) {
collisionCost += firewall.CollisionCost(position)
}
if *debug {
fmt.Println()
}
}
fmt.Printf("Made it! But at what cost...? Collision Cost: %d\n", collisionCost)
}
} | 2017/day13.go | 0.60964 | 0.491639 | day13.go | starcoder |
package cheapruler
import (
"errors"
"math"
)
// A collection of very fast approximations to common geodesic measurements.
// Useful for performance-sensitive code that measures things on a city scale.
type CheapRuler struct {
Kx float64
Ky float64
}
// The closest point on the line from the given point and
// index is the start index of the segment with the closest point.
type PointOnLine struct {
Point []float64
Index float64
T float64
}
// Create a new cheap ruler instance
func New(lat float64, m Unit) CheapRuler {
cr := CheapRuler{}
cos := math.Cos(lat * math.Pi / 180)
cos2 := 2*cos*cos - 1
cos3 := 2*cos*cos2 - cos
cos4 := 2*cos*cos3 - cos2
cos5 := 2*cos*cos4 - cos3
// multipliers for converting longitude and latitude degrees into distance
// (http://1.usa.gov/1Wb1bv7)
cr.Kx = float64(m) * (111.41513*cos - 0.09455*cos3 + 0.00012*cos5)
cr.Ky = float64(m) * (111.13209 - 0.56605*cos2 + 0.0012*cos4)
return cr
}
// NewCheapruler returns a new cheapruler instance while keeping compatibility
func NewCheapruler(lat float64, m string) (CheapRuler, error) {
var u Unit
switch m {
case "kilometers", "kilometres":
u = Kilometers
case "miles":
u = Miles
case "nauticalmiles":
u = Nauticalmiles
case "meters", "metres":
u = Meters
case "yards":
u = Yards
case "feet":
u = Feet
case "Inches":
u = Inches
default:
return CheapRuler{}, errors.New(m + "is not a valid unit")
}
return New(lat, u), nil
}
// Creates a CheapRuler struct from tile coordinates (y and z). Convenient in tile-reduce scripts.
func NewFromTile(y float64, z float64, m Unit) CheapRuler {
n := math.Pi * (1 - 2*(y+0.5)/math.Pow(2, z))
lat := math.Atan(0.5*(math.Exp(n)-math.Exp(-n))) * 180 / math.Pi
return New(lat, m)
}
// Creates a CheapRuler struct from tile coordinates (y and z). Convenient in tile-reduce scripts.
func NewCheaprulerFromTile(y float64, z float64, m string) (CheapRuler, error) {
n := math.Pi * (1 - 2*(y+0.5)/math.Pow(2, z))
lat := math.Atan(0.5*(math.Exp(n)-math.Exp(-n))) * 180 / math.Pi
return NewCheapruler(lat, m)
}
// Given two points returns the distance in the units of the ruler
func (cr CheapRuler) Distance(a []float64, b []float64) float64 {
dx := (a[0] - b[0]) * cr.Kx
dy := (a[1] - b[1]) * cr.Ky
return math.Sqrt(dx*dx + dy*dy)
}
// Returns the bearing between two points in angles.
func (cr CheapRuler) Bearing(a []float64, b []float64) float64 {
dx := (b[0] - a[0]) * cr.Kx
dy := (b[1] - a[1]) * cr.Ky
if dx == 0.0 && dy == 0.0 {
return 0.0
}
bearing := math.Atan2(dx, dy) * 180 / math.Pi
if bearing > 180 {
bearing -= 360
}
return bearing
}
// Returns a new point given distance and bearing from the starting point.
func (cr CheapRuler) Destination(p []float64, dist float64, bearing float64) []float64 {
a := (90.0 - bearing) * math.Pi / 180.0
return cr.Offset(p, math.Cos(a)*dist, math.Sin(a)*dist)
}
// Returns a new point given easting and northing offsets (in ruler units) from the starting point.
func (cr CheapRuler) Offset(p []float64, dx float64, dy float64) []float64 {
xo := p[0] + dx/cr.Kx
yo := p[1] + dy/cr.Ky
return []float64{xo, yo}
}
// Given a line (an slice of points), returns the total line distance.
func (cr CheapRuler) LineDistance(points [][]float64) float64 {
total := 0.0
for i := 0; i < len(points)-1; i++ {
total += cr.Distance(points[i], points[i+1])
}
return total
}
// Given a polygon (a slice of rings, where each ring is a slice of points), returns the area.
func (cr CheapRuler) Area(polygon [][][]float64) float64 {
sum := 0.0
for i := 0; i < len(polygon); i++ {
ring := polygon[i]
ringlen := len(ring)
k := ringlen - 1.0
for j := 0; j < ringlen; {
posneg := 1.0
if i != 0 {
posneg = -1.0
}
sum += (ring[j][0] - ring[k][0]) * (ring[j][1] + ring[k][1]) * posneg
j++
k = j
}
}
return (math.Abs(sum) / 2) * cr.Kx * cr.Ky
}
// Returns the point at a specified distance along the line.
func (cr CheapRuler) Along(line [][]float64, dist float64) []float64 {
sum := 0.0
if dist <= 0 {
return line[0]
}
for i := 0; i < len(line)-1; i++ {
p0 := line[i]
p1 := line[i+1]
d := cr.Distance(p0, p1)
sum += d
if sum > dist {
return interpolate(p0, p1, (dist-(sum-d))/d)
}
}
return line[len(line)-1]
}
// Returns an struct where point is closest point on the line from the given point,
// and index is the start index of the segment with the closest point.
func (cr CheapRuler) PointOnLine(line [][]float64, p []float64) PointOnLine {
minDist := math.Inf(1)
var minX float64
var minY float64
var minI float64
var minT float64
var t float64
for i := 0; i < len(line)-1; i++ {
x := line[i][0]
y := line[i][1]
dx := (line[i+1][0] - x) * cr.Kx
dy := (line[i+1][1] - y) * cr.Ky
if dx != 0 || dy != 0 {
t = ((p[0]-x)*cr.Kx*dx + (p[1]-y)*cr.Ky*dy) / (dx*dx + dy*dy)
if t > 1 {
x = line[i+1][0]
y = line[i+1][1]
} else if t > 0 {
x += (dx / cr.Kx) * t
y += (dy / cr.Ky) * t
}
}
dx = (p[0] - x) * cr.Kx
dy = (p[1] - y) * cr.Ky
sqDist := dx*dx + dy*dy
if sqDist < minDist {
minDist = sqDist
minX = x
minY = y
minI = float64(i)
minT = t
}
}
return PointOnLine{
[]float64{minX, minY},
minI,
minT,
}
}
// Returns a part of the given line between the start and the stop points (or their closest points on the line).
func (cr CheapRuler) LineSlice(start []float64, stop []float64, line [][]float64) [][]float64 {
p1 := cr.PointOnLine(line, start)
p2 := cr.PointOnLine(line, stop)
if p1.Index > p2.Index || (p1.Index == p2.Index && p1.T > p2.T) {
tmp := p1
p1 = p2
p2 = tmp
}
sl := [][]float64{p1.Point}
l := p1.Index + 1
r := p2.Index
if !equals(line[int(l)], sl[0]) && l <= r {
sl = append(sl, line[int(l)])
}
for i := l + 1; i <= r; i++ {
sl = append(sl, line[int(i)])
}
if !equals(line[int(r)], p2.Point) {
sl = append(sl, p2.Point)
}
return sl
}
// Returns a part of the given line between the start and the stop points indicated by distance along the line.
func (cr CheapRuler) LineSliceAlong(start float64, stop float64, line [][]float64) [][]float64 {
sum := 0.0
var sl [][]float64
for i := 0; i < len(line)-1; i++ {
p0 := line[i]
p1 := line[i+1]
d := cr.Distance(p0, p1)
sum += d
if sum > start && len(sl) == 0.0 {
sl = append(sl, interpolate(p0, p1, (start-(sum-d))/d))
}
if sum >= stop {
sl = append(sl, interpolate(p0, p1, (stop-(sum-d))/d))
return sl
}
if sum > start {
sl = append(sl, p1)
}
}
return sl
}
// Given a point, returns a bounding box slice ([]float64{w, s, e, n})
// created from the given point buffered by a given distance.
func (cr CheapRuler) BufferPoint(p []float64, buffer float64) []float64 {
v := buffer / cr.Ky
h := buffer / cr.Kx
return []float64{
p[0] - h,
p[1] - v,
p[0] + h,
p[1] + v,
}
}
// Given a bounding box, returns the box buffered by a given distance.
func (cr CheapRuler) BufferBBox(bbox []float64, buffer float64) []float64 {
v := buffer / cr.Ky
h := buffer / cr.Kx
return []float64{
bbox[0] - h,
bbox[1] - v,
bbox[2] + h,
bbox[3] + v,
}
}
// Returns true if the given point is inside in the given bounding box, otherwise false.
func (cr CheapRuler) InsideBBox(p []float64, bbox []float64) bool {
return p[0] >= bbox[0] &&
p[0] <= bbox[2] &&
p[1] >= bbox[1] &&
p[1] <= bbox[3]
}
func equals(a []float64, b []float64) bool {
return a[0] == b[0] && a[1] == b[1]
}
func interpolate(a []float64, b []float64, t float64) []float64 {
dx := b[0] - a[0]
dy := b[1] - a[1]
return []float64{
a[0] + dx*t,
a[1] + dy*t,
}
} | cheapruler.go | 0.883142 | 0.738221 | cheapruler.go | starcoder |
package light
import (
"github.com/g3n/engine/core"
"github.com/g3n/engine/gls"
"github.com/g3n/engine/math32"
)
// Point is an omnidirectional light source
type Point struct {
core.Node // Embedded node
color math32.Color // Light color
intensity float32 // Light intensity
uni gls.Uniform // Uniform location cache
udata struct { // Combined uniform data in 3 vec3:
color math32.Color // Light color
position math32.Vector3 // Light position
linearDecay float32 // Distance linear decay factor
quadraticDecay float32 // Distance quadratic decay factor
dummy float32 // Completes 3*vec3
}
}
// NewPoint creates and returns a point light with the specified color and intensity
func NewPoint(color *math32.Color, intensity float32) *Point {
lp := new(Point)
lp.Node.Init(lp)
lp.color = *color
lp.intensity = intensity
// Creates uniform and sets initial values
lp.uni.Init("PointLight")
lp.SetColor(color)
lp.SetIntensity(intensity)
lp.SetLinearDecay(1.0)
lp.SetQuadraticDecay(1.0)
return lp
}
// SetColor sets the color of this light
func (lp *Point) SetColor(color *math32.Color) {
lp.color = *color
lp.udata.color = lp.color
lp.udata.color.MultiplyScalar(lp.intensity)
}
// Color returns the current color of this light
func (lp *Point) Color() math32.Color {
return lp.color
}
// SetIntensity sets the intensity of this light
func (lp *Point) SetIntensity(intensity float32) {
lp.intensity = intensity
lp.udata.color = lp.color
lp.udata.color.MultiplyScalar(lp.intensity)
}
// Intensity returns the current intensity of this light
func (lp *Point) Intensity() float32 {
return lp.intensity
}
// SetLinearDecay sets the linear decay factor as a function of the distance
func (lp *Point) SetLinearDecay(decay float32) {
lp.udata.linearDecay = decay
}
// LinearDecay returns the current linear decay factor
func (lp *Point) LinearDecay() float32 {
return lp.udata.linearDecay
}
// SetQuadraticDecay sets the quadratic decay factor as a function of the distance
func (lp *Point) SetQuadraticDecay(decay float32) {
lp.udata.quadraticDecay = decay
}
// QuadraticDecay returns the current quadratic decay factor
func (lp *Point) QuadraticDecay() float32 {
return lp.udata.quadraticDecay
}
// RenderSetup is called by the engine before rendering the scene
func (lp *Point) RenderSetup(gs *gls.GLS, rinfo *core.RenderInfo, idx int) {
// Calculates light position in camera coordinates and updates uniform
var pos math32.Vector3
lp.WorldPosition(&pos)
pos4 := math32.Vector4{pos.X, pos.Y, pos.Z, 1.0}
pos4.ApplyMatrix4(&rinfo.ViewMatrix)
lp.udata.position.X = pos4.X
lp.udata.position.Y = pos4.Y
lp.udata.position.Z = pos4.Z
// Transfer uniform data
const vec3count = 3
location := lp.uni.LocationIdx(gs, vec3count*int32(idx))
gs.Uniform3fv(location, vec3count, &lp.udata.color.R)
} | light/point.go | 0.889694 | 0.547162 | point.go | starcoder |
package solver
import "fmt"
// yWing removes candidates. If a cell has two candidates (AB) and in the same column as AB is a cell containing AC and in the same row as AB is a cell containing BC, then if AB evaluates to A, then AC must be C, and similarly, if AB evaluates to B, then BC must be B. Therefore, the cell at the other intersection of AC and BC cannot contain a C, since C must appear in either AC or BC, therefore C can be removed from that last cell. It returns true if it changes any cells.
func (gr *Grid) yWing() bool {
res := false
for _, b := range box.unit {
for _, p := range b {
cl := *gr.pt(p)
if count[cl] != 2 {
continue
}
candidates := gr.findCandidates(p, p)
for c1i := 0; c1i < len(candidates)-2; c1i++ {
c1 := candidates[c1i]
v1 := *gr.pt(c1)
n1 := neighbors(c1)
for c2i := c1i + 1; c2i < len(candidates)-1; c2i++ {
c2 := candidates[c2i]
v2 := *gr.pt(c2)
if count[v1|v2] != 3 || cl&v1|cl&v2 != cl {
continue
}
n2 := neighbors(c2)
var overlap [9][9]bool
for r := 0; r < 9; r++ {
for c := 0; c < 9; c++ {
if n1[r][c] && n2[r][c] {
overlap[r][c] = true
}
}
}
overlap[p.r][p.c] = false
for r := 0; r < 9; r++ {
for c := 0; c < 9; c++ {
if overlap[r][c] {
bits := (*gr.pt(c1) | *gr.pt(c2)) &^ cl
if (&gr[r][c]).andNot(bits) {
res = true
if verbose >= 1 {
fmt.Printf("ywing: %s, %s, %s causes clearing %s from (%d, %d)\n",
p, c1, c2, bits.digits(), r, c)
}
if verbose >= 3 {
gr.Display()
}
}
}
}
}
}
}
}
}
if res && verbose == 2 {
gr.Display()
}
return res
}
func (gr *Grid) findCandidates(p, skip point) []point {
var ps []point
ps = append(ps, gr.findCandidatesUnit(&box.unit[boxOf(p.r, p.c)], p, skip)...)
ps = append(ps, gr.findCandidatesUnit(&col.unit[p.c], p, skip)...)
ps = append(ps, gr.findCandidatesUnit(&row.unit[p.r], p, skip)...)
return ps
}
func (gr *Grid) findCandidatesUnit(u *[9]point, cp, skip point) []point {
var ps []point
cl := *gr.pt(cp)
for _, p := range u {
if p == skip {
continue
}
cand := *gr.pt(p)
if count[cand] != 2 || count[cl&cand] != 1 {
continue
}
ps = append(ps, p)
}
return ps
}
func neighbors(pt point) *[9][9]bool {
var n [9][9]bool
for _, u := range []*[9]point{&box.unit[boxOf(pt.r, pt.c)], &col.unit[pt.c], &row.unit[pt.r]} {
for _, p := range u {
if p == pt {
continue
}
n[p.r][p.c] = true
}
}
return &n
} | solver/ywing.go | 0.70791 | 0.497925 | ywing.go | starcoder |
package slice
import (
"reflect"
)
// UnionGeneric returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionGeneric(left, right interface{}) interface{} {
if left == nil && right == nil {
return nil
}
return UnionValue(reflect.ValueOf(left), reflect.ValueOf(right)).Interface()
}
// UnionValue returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionValue(left, right reflect.Value) reflect.Value {
var result reflect.Value
if left.IsValid() {
length := left.Len()
result = reflect.MakeSlice(left.Type(), length, length)
reflect.Copy(result, left)
} else if right.IsValid() {
result = reflect.Zero(right.Type())
} else {
return result
}
if right.IsValid() {
length := right.Len()
for i := 0; i < length; i++ {
v := right.Index(i)
if !ContainsValue(result, v.Interface()) {
result = reflect.Append(result, v)
}
}
}
return result
}
// UnionInterface returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionInterface(left, right []interface{}) []interface{} {
result := make([]interface{}, len(left))
copy(result, left)
for _, v := range right {
if !ContainsInterface(result, v) {
result = append(result, v)
}
}
return result
}
// UnionString returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionString(left, right []string) []string {
result := make([]string, len(left))
copy(result, left)
for _, v := range right {
if !ContainsString(result, v) {
result = append(result, v)
}
}
return result
}
// UnionBool returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionBool(left, right []bool) []bool {
result := make([]bool, len(left))
copy(result, left)
for _, v := range right {
if !ContainsBool(result, v) {
result = append(result, v)
}
}
return result
}
// UnionInt returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionInt(left, right []int) []int {
result := make([]int, len(left))
copy(result, left)
for _, v := range right {
if !ContainsInt(result, v) {
result = append(result, v)
}
}
return result
}
// UnionInt8 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionInt8(left, right []int8) []int8 {
result := make([]int8, len(left))
copy(result, left)
for _, v := range right {
if !ContainsInt8(result, v) {
result = append(result, v)
}
}
return result
}
// UnionInt16 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionInt16(left, right []int16) []int16 {
result := make([]int16, len(left))
copy(result, left)
for _, v := range right {
if !ContainsInt16(result, v) {
result = append(result, v)
}
}
return result
}
// UnionInt32 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionInt32(left, right []int32) []int32 {
result := make([]int32, len(left))
copy(result, left)
for _, v := range right {
if !ContainsInt32(result, v) {
result = append(result, v)
}
}
return result
}
// UnionInt64 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionInt64(left, right []int64) []int64 {
result := make([]int64, len(left))
copy(result, left)
for _, v := range right {
if !ContainsInt64(result, v) {
result = append(result, v)
}
}
return result
}
// UnionUint returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionUint(left, right []uint) []uint {
result := make([]uint, len(left))
copy(result, left)
for _, v := range right {
if !ContainsUint(result, v) {
result = append(result, v)
}
}
return result
}
// UnionUint8 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionUint8(left, right []uint8) []uint8 {
result := make([]uint8, len(left))
copy(result, left)
for _, v := range right {
if !ContainsUint8(result, v) {
result = append(result, v)
}
}
return result
}
// UnionUint16 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionUint16(left, right []uint16) []uint16 {
result := make([]uint16, len(left))
copy(result, left)
for _, v := range right {
if !ContainsUint16(result, v) {
result = append(result, v)
}
}
return result
}
// UnionUint32 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionUint32(left, right []uint32) []uint32 {
result := make([]uint32, len(left))
copy(result, left)
for _, v := range right {
if !ContainsUint32(result, v) {
result = append(result, v)
}
}
return result
}
// UnionUint64 returns union set of left and right, with right follows left.
// The duplicate members in left are kept.
func UnionUint64(left, right []uint64) []uint64 {
result := make([]uint64, len(left))
copy(result, left)
for _, v := range right {
if !ContainsUint64(result, v) {
result = append(result, v)
}
}
return result
} | union.go | 0.854536 | 0.562477 | union.go | starcoder |
package tlv
import (
"bytes"
"fmt"
"io"
"sort"
"github.com/eacsuite/eacd/btcec"
)
// Type is an 64-bit identifier for a TLV Record.
type Type uint64
// TypeMap is a map of parsed Types. The map values are byte slices. If the byte
// slice is nil, the type was successfully parsed. Otherwise the value is byte
// slice containing the encoded data.
type TypeMap map[Type][]byte
// Encoder is a signature for methods that can encode TLV values. An error
// should be returned if the Encoder cannot support the underlying type of val.
// The provided scratch buffer must be non-nil.
type Encoder func(w io.Writer, val interface{}, buf *[8]byte) error
// Decoder is a signature for methods that can decode TLV values. An error
// should be returned if the Decoder cannot support the underlying type of val.
// The provided scratch buffer must be non-nil.
type Decoder func(r io.Reader, val interface{}, buf *[8]byte, l uint64) error
// ENOP is an encoder that doesn't modify the io.Writer and never fails.
func ENOP(io.Writer, interface{}, *[8]byte) error { return nil }
// DNOP is an encoder that doesn't modify the io.Reader and never fails.
func DNOP(io.Reader, interface{}, *[8]byte, uint64) error { return nil }
// SizeFunc is a function that can compute the length of a given field. Since
// the size of the underlying field can change, this allows the size of the
// field to be evaluated at the time of encoding.
type SizeFunc func() uint64
// SizeVarBytes returns a SizeFunc that can compute the length of a byte slice.
func SizeVarBytes(e *[]byte) SizeFunc {
return func() uint64 {
return uint64(len(*e))
}
}
// RecorderProducer is an interface for objects that can produce a Record object
// capable of encoding and/or decoding the RecordProducer as a Record.
type RecordProducer interface {
// Record returns a Record that can be used to encode or decode the
// backing object.
Record() Record
}
// Record holds the required information to encode or decode a TLV record.
type Record struct {
value interface{}
typ Type
staticSize uint64
sizeFunc SizeFunc
encoder Encoder
decoder Decoder
}
// Size returns the size of the Record's value. If no static size is known, the
// dynamic size will be evaluated.
func (f *Record) Size() uint64 {
if f.sizeFunc == nil {
return f.staticSize
}
return f.sizeFunc()
}
// Type returns the type of the underlying TLV record.
func (f *Record) Type() Type {
return f.typ
}
// Encode writes out the TLV record to the passed writer. This is useful when a
// caller wants to obtain the raw encoding of a *single* TLV record, outside
// the context of the Stream struct.
func (f *Record) Encode(w io.Writer) error {
var b [8]byte
return f.encoder(w, f.value, &b)
}
// Decode read in the TLV record from the passed reader. This is useful when a
// caller wants decode a *single* TLV record, outside the context of the Stream
// struct.
func (f *Record) Decode(r io.Reader, l uint64) error {
var b [8]byte
return f.decoder(r, f.value, &b, l)
}
// MakePrimitiveRecord creates a record for common types.
func MakePrimitiveRecord(typ Type, val interface{}) Record {
var (
staticSize uint64
sizeFunc SizeFunc
encoder Encoder
decoder Decoder
)
switch e := val.(type) {
case *uint8:
staticSize = 1
encoder = EUint8
decoder = DUint8
case *uint16:
staticSize = 2
encoder = EUint16
decoder = DUint16
case *uint32:
staticSize = 4
encoder = EUint32
decoder = DUint32
case *uint64:
staticSize = 8
encoder = EUint64
decoder = DUint64
case *[32]byte:
staticSize = 32
encoder = EBytes32
decoder = DBytes32
case *[33]byte:
staticSize = 33
encoder = EBytes33
decoder = DBytes33
case **btcec.PublicKey:
staticSize = 33
encoder = EPubKey
decoder = DPubKey
case *[64]byte:
staticSize = 64
encoder = EBytes64
decoder = DBytes64
case *[]byte:
sizeFunc = SizeVarBytes(e)
encoder = EVarBytes
decoder = DVarBytes
default:
panic(fmt.Sprintf("unknown primitive type: %T", val))
}
return Record{
value: val,
typ: typ,
staticSize: staticSize,
sizeFunc: sizeFunc,
encoder: encoder,
decoder: decoder,
}
}
// MakeStaticRecord creates a record for a field of fixed-size
func MakeStaticRecord(typ Type, val interface{}, size uint64, encoder Encoder,
decoder Decoder) Record {
return Record{
value: val,
typ: typ,
staticSize: size,
encoder: encoder,
decoder: decoder,
}
}
// MakeDynamicRecord creates a record whose size may vary, and will be
// determined at the time of encoding via sizeFunc.
func MakeDynamicRecord(typ Type, val interface{}, sizeFunc SizeFunc,
encoder Encoder, decoder Decoder) Record {
return Record{
value: val,
typ: typ,
sizeFunc: sizeFunc,
encoder: encoder,
decoder: decoder,
}
}
// RecordsToMap encodes a series of TLV records as raw key-value pairs in the
// form of a map.
func RecordsToMap(records []Record) (map[uint64][]byte, error) {
tlvMap := make(map[uint64][]byte, len(records))
for _, record := range records {
var b bytes.Buffer
if err := record.Encode(&b); err != nil {
return nil, err
}
tlvMap[uint64(record.Type())] = b.Bytes()
}
return tlvMap, nil
}
// StubEncoder is a factory function that makes a stub tlv.Encoder out of a raw
// value. We can use this to make a record that can be encoded when we don't
// actually know it's true underlying value, and only it serialization.
func StubEncoder(v []byte) Encoder {
return func(w io.Writer, val interface{}, buf *[8]byte) error {
_, err := w.Write(v)
return err
}
}
// MapToRecords encodes the passed TLV map as a series of regular tlv.Record
// instances. The resulting set of records will be returned in sorted order by
// their type.
func MapToRecords(tlvMap map[uint64][]byte) []Record {
records := make([]Record, 0, len(tlvMap))
for k, v := range tlvMap {
// We don't pass in a decoder here since we don't actually know
// the type, and only expect this Record to be used for display
// and encoding purposes.
record := MakeStaticRecord(
Type(k), nil, uint64(len(v)), StubEncoder(v), nil,
)
records = append(records, record)
}
SortRecords(records)
return records
}
// SortRecords is a helper function that will sort a slice of records in place
// according to their type.
func SortRecords(records []Record) {
if len(records) == 0 {
return
}
sort.Slice(records, func(i, j int) bool {
return records[i].Type() < records[j].Type()
})
} | tlv/record.go | 0.796411 | 0.403949 | record.go | starcoder |
package continuous
import (
"math"
"sort"
pb "gopkg.in/cheggaaa/pb.v1"
)
// KraskovStoegbauerGrassberger1 is an implementation of the first
// algorithm presented in
// <NAME>, <NAME>, and <NAME>.
// Estimating mutual information. Phys. Rev. E, 69:066138, Jun 2004.
// The function assumes that the data xyz is normalised column-wise
func KraskovStoegbauerGrassberger1(xy [][]float64, xIndices, yIndices []int, k int, eta bool) (r float64) {
r = 0.0
hk := Harmonic(k) // h(k)
hN := Harmonic(len(xy)) // h(N)
var bar *pb.ProgressBar
if eta == true {
bar = pb.StartNew(len(xy))
}
for t := 0; t < len(xy); t++ {
epsilon := ksgGetEpsilon(k, xy[t], xy, xIndices, yIndices)
cNx := ksgCount(epsilon, xy[t], xy, xIndices) // N_x
hNx := Harmonic(cNx + 1) // h(N_x)
cNy := ksgCount(epsilon, xy[t], xy, yIndices) // N_y
hNy := Harmonic(cNy + 1) // h(N_y)
r -= hNx + hNy
if eta == true {
bar.Increment()
}
}
if eta == true {
bar.Finish()
}
r /= float64(len(xy))
r += hk + hN
return
}
// KraskovStoegbauerGrassberger2 is an implementation of the second
// algorithm presented in
// <NAME>, <NAME>, and <NAME>.
// Estimating mutual information. Phys. Rev. E, 69:066138, Jun 2004.
// The function assumes that the data xyz is normalised column-wise
func KraskovStoegbauerGrassberger2(xy [][]float64, xIndices, yIndices []int, k int, eta bool) (r float64) {
r = 0.0
hk := Harmonic(k)
hN := Harmonic(len(xy))
var bar *pb.ProgressBar
if eta == true {
bar = pb.StartNew(len(xy))
}
for t := 0; t < len(xy); t++ {
epsilon := ksgGetEpsilon(k, xy[t], xy, xIndices, yIndices)
cNx := ksgCount(epsilon, xy[t], xy, xIndices)
hNx := Harmonic(cNx)
cNy := ksgCount(epsilon, xy[t], xy, yIndices)
hNy := Harmonic(cNy)
r -= hNx + hNy
if eta == true {
bar.Increment()
}
}
if eta == true {
bar.FinishPrint("Finished")
}
r /= float64(len(xy))
r += hk + hN - 1.0/float64(k)
return
}
// ksgGetEpsilon calculate epsilon_k(t) as defined by Frenzel & Pompe, 2007
// epsilon_k(t) is the Distance of the k-th nearest neighbour. The function
// takes k, the point from which the Distance is calculated (xyz), and the
// data from which the k-th nearest neighbour should be determined
func ksgGetEpsilon(k int, xy []float64, data [][]float64, xIndices, yIndices []int) float64 {
distances := make([]float64, len(data), len(data))
for t := 0; t < len(data); t++ {
distances[t] = ksgMaxNorm2(xy, data[t], xIndices, yIndices)
}
sort.Float64s(distances)
return distances[k-1] // we start to count at zero
}
func ksgMaxNorm2(a, b []float64, xIndices, yIndices []int) float64 {
xDistance := Distance(a, b, xIndices)
yDistance := Distance(a, b, yIndices)
return math.Max(xDistance, yDistance)
}
// ksgCount count the number of points for which the x or y coordinate is
// closer than epsilon, where the ksgDistance is measured by the max-norm
func ksgCount(epsilon float64, xy []float64, data [][]float64, indices []int) (c int) {
for t := 0; t < len(data); t++ {
if Distance(xy, data[t], indices) < epsilon {
c++
}
}
return
} | continuous/KraskovStoegbauerGrassberger.go | 0.657978 | 0.529932 | KraskovStoegbauerGrassberger.go | starcoder |
package entity
import (
"github.com/df-mc/dragonfly/server/entity/damage"
"github.com/df-mc/dragonfly/server/entity/healing"
"github.com/df-mc/dragonfly/server/world"
"github.com/go-gl/mathgl/mgl64"
)
// Living represents an entity that is alive and that has health. It is able to take damage and will die upon
// taking fatal damage.
type Living interface {
world.Entity
// Health returns the health of the entity.
Health() float64
// MaxHealth returns the maximum health of the entity.
MaxHealth() float64
// SetMaxHealth changes the maximum health of the entity to the value passed.
SetMaxHealth(v float64)
// AttackImmune checks if the entity is currently immune to entity attacks. Entities typically turn
// immune for half a second after being attacked.
AttackImmune() bool
// Hurt hurts the entity for a given amount of damage. The source passed represents the cause of the
// damage, for example damage.SourceEntityAttack if the entity is attacked by another entity.
// If the final damage exceeds the health that the player currently has, the entity is killed.
Hurt(damage float64, source damage.Source)
// Heal heals the entity for a given amount of health. The source passed represents the cause of the
// healing, for example healing.SourceFood if the entity healed by having a full food bar. If the health
// added to the original health exceeds the entity's max health, Heal may not add the full amount.
Heal(health float64, source healing.Source)
// KnockBack knocks the entity back with a given force and height. A source is passed which indicates the
// source of the velocity, typically the position of an attacking entity. The source is used to calculate
// the direction which the entity should be knocked back in.
KnockBack(src mgl64.Vec3, force, height float64)
// Speed returns the current speed of the living entity. The default value is different for each entity.
Speed() float64
// SetSpeed sets the speed of an entity to a new value.
SetSpeed(float64)
} | server/entity/living.go | 0.620507 | 0.411673 | living.go | starcoder |
package vm
import (
"fmt"
"math/big"
"github.com/tenderly/solidity-hmr/ethereum/common/math"
)
// Memory implements a simple memory model for the ethereum virtual machine.
type Memory struct {
store []byte
lastGasCost uint64
}
// NewMemory returns a new memory model.
func NewMemory() *Memory {
return &Memory{}
}
// Set sets offset + size to value
func (m *Memory) Set(offset, size uint64, value []byte) {
// It's possible the offset is greater than 0 and size equals 0. This is because
// the calcMemSize (common.go) could potentially return 0 when size is zero (NO-OP)
if size > 0 {
// length of store may never be less than offset + size.
// The store should be resized PRIOR to setting the memory
if offset+size > uint64(len(m.store)) {
panic("invalid memory: store empty")
}
copy(m.store[offset:offset+size], value)
}
}
// Set32 sets the 32 bytes starting at offset to the value of val, left-padded with zeroes to
// 32 bytes.
func (m *Memory) Set32(offset uint64, val *big.Int) {
// length of store may never be less than offset + size.
// The store should be resized PRIOR to setting the memory
if offset+32 > uint64(len(m.store)) {
panic("invalid memory: store empty")
}
// Zero the memory area
copy(m.store[offset:offset+32], []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
// Fill in relevant bits
math.ReadBits(val, m.store[offset:offset+32])
}
// Resize resizes the memory to size
func (m *Memory) Resize(size uint64) {
if uint64(m.Len()) < size {
m.store = append(m.store, make([]byte, size-uint64(m.Len()))...)
}
}
// Get returns offset + size as a new slice
func (m *Memory) Get(offset, size int64) (cpy []byte) {
if size == 0 {
return nil
}
if len(m.store) > int(offset) {
cpy = make([]byte, size)
copy(cpy, m.store[offset:offset+size])
return
}
return
}
// GetPtr returns the offset + size
func (m *Memory) GetPtr(offset, size int64) []byte {
if size == 0 {
return nil
}
if len(m.store) > int(offset) {
return m.store[offset : offset+size]
}
return nil
}
// Len returns the length of the backing slice
func (m *Memory) Len() int {
return len(m.store)
}
// Data returns the backing slice
func (m *Memory) Data() []byte {
return m.store
}
// Print dumps the content of the memory.
func (m *Memory) Print() {
fmt.Printf("### mem %d bytes ###\n", len(m.store))
if len(m.store) > 0 {
addr := 0
for i := 0; i+32 <= len(m.store); i += 32 {
fmt.Printf("%03d: % x\n", addr, m.store[i:i+32])
addr++
}
} else {
fmt.Println("-- empty --")
}
fmt.Println("####################")
} | ethereum/core/vm/memory.go | 0.687 | 0.466299 | memory.go | starcoder |
A chart repository is an HTTP server that provides information on charts. A local
repository cache is an on-disk representation of a chart repository.
There are two important file formats for chart repositories.
The first is the 'index.yaml' format, which is expressed like this:
apiVersion: v1
entries:
frobnitz:
- created: 2016-09-29T12:14:34.830161306-06:00
description: This is a frobniz.
digest: 587bd19a9bd9d2bc4a6d25ab91c8c8e7042c47b4ac246e37bf8e1e74386190f4
home: http://example.com
keywords:
- frobnitz
- sprocket
- dodad
maintainers:
- email: <EMAIL>
name: The Helm Team
- email: <EMAIL>
name: Someone Else
name: frobnitz
urls:
- http://example-charts.com/testdata/repository/frobnitz-1.2.3.tgz
version: 1.2.3
sprocket:
- created: 2016-09-29T12:14:34.830507606-06:00
description: This is a sprocket"
digest: 8505ff813c39502cc849a38e1e4a8ac24b8e6e1dcea88f4c34ad9b7439685ae6
home: http://example.com
keywords:
- frobnitz
- sprocket
- dodad
maintainers:
- email: <EMAIL>
name: The Helm Team
- email: <EMAIL>
name: Someone Else
name: sprocket
urls:
- http://example-charts.com/testdata/repository/sprocket-1.2.0.tgz
version: 1.2.0
generated: 2016-09-29T12:14:34.829721375-06:00
An index.yaml file contains the necessary descriptive information about what
charts are available in a repository, and how to get them.
The second file format is the repositories.yaml file format. This file is for
facilitating local cached copies of one or more chart repositories.
The format of a repository.yaml file is:
apiVersion: v1
generated: TIMESTAMP
repositories:
- name: stable
url: http://example.com/charts
cache: stable-index.yaml
- name: incubator
url: http://example.com/incubator
cache: incubator-index.yaml
This file maps three bits of information about a repository:
- The name the user uses to refer to it
- The fully qualified URL to the repository (index.yaml will be appended)
- The name of the local cachefile
The format for both files was changed after Helm v2.0.0-Alpha.4. Helm is not
backwards compatible with those earlier versions.
*/
package repo | src/vendor/k8s.io/helm/pkg/repo/doc.go | 0.713432 | 0.532121 | doc.go | starcoder |
package main
import (
"github.com/guanyilun/go-sampling/sampling"
"fmt"
)
type Automata struct {
limit int
actions int
probs []float64
active bool
sampling *sampling.Sampling // May not be necessary
counter int
delta int
reward float64
penalize float64
threshold float64
}
// @PASSED
func NewAutomata(actions, limit int) *Automata {
var a Automata
a.limit = limit
a.probs = make([]float64, actions)
// Initialize probabilities for automata
for i := range a.probs {
a.probs[i] = 1/float64(actions)
}
a.active = true
a.counter = 0
a.actions = actions
a.sampling = sampling.NewSampling()
a.sampling.AddBundleProbs(a.probs)
a.delta = 100000 // A large number
// By default we adopt L_R-I model following JA (2013)
a.reward = 0.09
a.penalize = 0
a.threshold = 0.9
return &a
}
// @PASSED
func (a *Automata) Enum() int {
if a.active {
a.counter++
if a.counter == a.limit {
a.active = false
}
return a.sampling.Sample()
} else {
return 0
}
// TODO: Error handling isn't done properly
}
func (a *Automata) ReEnum() int {
return a.sampling.Sample()
}
// @PASSED
func (a *Automata) IsActive() bool {
return a.active
}
// @PASSED
func (a *Automata) Reward(j int) {
// Assuming learning reward-penalty (L_R-I) algorithm
var sum float64 = 0
r := a.reward
for i := range a.probs {
if i == j {
a.probs[i] = a.probs[i] + r * (1 - a.probs[i])
} else {
a.probs[i] = (1 - r) * a.probs[i]
}
sum += a.probs[i]
}
// Normalize the probabilities after modifying
a.Normalize()
//fmt.Println(a.probs)
}
// @PASSED
// Print function is a facilitating function for debug purpuse that prints
// out some important properties
func (a *Automata) Print() {
fmt.Printf("[DEBUG] Automata: delta = %v; active = %v; probs = %f; stable = %v\n", a.delta, a.IsActive(), a.probs, a.IsStable())
}
func (a *Automata) Penalize(j int) {
// Assuming learning reward-penalty (L_R-I) algorithm, r = 0, and
// it can be seen that that Penalize() does nothing in such case,
// hence we will comment this part unless required in the future.
/*
r := a.penalize
for i := range a.probs {
if i == j {
a.probs[i] = (1 - r) * a.probs[i]
} else {
a.probs[i] = r / (a.actions - 1) + (1 - r) * a.probs[i]
}
}
// Normalize the probabilities after modifying
a.Normalize()
*/
}
// @PASSED
func (a *Automata) Normalize() {
var norm float64 = 0
for _, v := range a.probs {
norm += v
}
for i := range a.probs {
a.probs[i] = a.probs[i] / norm
}
}
// @PASSED
func (a *Automata) IsStable() bool {
for _, v := range a.probs {
if v > a.threshold {
return true
}
}
return false
}
func (a *Automata) Reset() {
a.counter = 0
a.active = true;
}
func (a *Automata) SetActive(val bool) {
a.active = val;
} | go/automata.go | 0.574634 | 0.428831 | automata.go | starcoder |
package output
import (
"fmt"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/output/writer"
"github.com/Jeffail/benthos/v3/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeKinesis] = TypeSpec{
constructor: NewKinesis,
Description: `
Sends messages to a Kinesis stream.
Both the ` + "`partition_key`" + `(required) and ` + "`hash_key`" + ` (optional)
fields can be dynamically set using function interpolations described
[here](/docs/configuration/interpolation#functions). When sending batched messages the
interpolations are performed per message part.
### Credentials
By default Benthos will use a shared credentials file when connecting to AWS
services. It's also possible to set them explicitly at the component level,
allowing you to transfer data across accounts. You can find out more
[in this document](/docs/guides/aws).`,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
return sanitiseWithBatch(conf.Kinesis, conf.Kinesis.Batching)
},
Async: true,
Batches: true,
}
}
//------------------------------------------------------------------------------
// NewKinesis creates a new Kinesis output type.
func NewKinesis(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
kin, err := writer.NewKinesis(conf.Kinesis, log, stats)
if err != nil {
return nil, err
}
var w Type
if conf.Kinesis.MaxInFlight == 1 {
w, err = NewWriter(
TypeKinesis, kin, log, stats,
)
} else {
w, err = NewAsyncWriter(
TypeKinesis, conf.Kinesis.MaxInFlight, kin, log, stats,
)
}
if bconf := conf.Kinesis.Batching; err == nil && !bconf.IsNoop() {
policy, err := batch.NewPolicy(bconf, mgr, log.NewModule(".batching"), metrics.Namespaced(stats, "batching"))
if err != nil {
return nil, fmt.Errorf("failed to construct batch policy: %v", err)
}
w = NewBatcher(policy, w, log, stats)
}
return w, err
}
//------------------------------------------------------------------------------ | lib/output/kinesis.go | 0.73412 | 0.456168 | kinesis.go | starcoder |
package pipeline
import (
"fmt"
"strings"
"github.com/observiq/stanza/errors"
"github.com/observiq/stanza/operator"
"gonum.org/v1/gonum/graph/encoding/dot"
"gonum.org/v1/gonum/graph/simple"
"gonum.org/v1/gonum/graph/topo"
)
var _ Pipeline = (*DirectedPipeline)(nil)
// DirectedPipeline is a pipeline backed by a directed graph
type DirectedPipeline struct {
Graph *simple.DirectedGraph
}
// Start will start the operators in a pipeline in reverse topological order
func (p *DirectedPipeline) Start() error {
sortedNodes, _ := topo.Sort(p.Graph)
for i := len(sortedNodes) - 1; i >= 0; i-- {
operator := sortedNodes[i].(OperatorNode).Operator()
operator.Logger().Debug("Starting operator")
if err := operator.Start(); err != nil {
return err
}
operator.Logger().Debug("Started operator")
}
return nil
}
// Stop will stop the operators in a pipeline in topological order
func (p *DirectedPipeline) Stop() error {
sortedNodes, _ := topo.Sort(p.Graph)
for _, node := range sortedNodes {
operator := node.(OperatorNode).Operator()
operator.Logger().Debug("Stopping operator")
_ = operator.Stop()
operator.Logger().Debug("Stopped operator")
}
return nil
}
// Render will render the pipeline as a dot graph
func (p *DirectedPipeline) Render() ([]byte, error) {
return dot.Marshal(p.Graph, "G", "", " ")
}
// Operators returns a slice of operators that make up the pipeline graph
func (p *DirectedPipeline) Operators() []operator.Operator {
operators := make([]operator.Operator, 0)
nodes := p.Graph.Nodes()
for nodes.Next() {
operators = append(operators, nodes.Node().(OperatorNode).Operator())
}
return operators
}
// addNodes will add operators as nodes to the supplied graph.
func addNodes(graph *simple.DirectedGraph, operators []operator.Operator) error {
for _, operator := range operators {
operatorNode := createOperatorNode(operator)
if graph.Node(operatorNode.ID()) != nil {
return errors.NewError(
fmt.Sprintf("operator with id '%s' already exists in pipeline", operatorNode.Operator().ID()),
"ensure that each operator has a unique `type` or `id`",
)
}
graph.AddNode(operatorNode)
}
return nil
}
// connectNodes will connect the nodes in the supplied graph.
func connectNodes(graph *simple.DirectedGraph) error {
nodes := graph.Nodes()
for nodes.Next() {
node := nodes.Node().(OperatorNode)
if err := connectNode(graph, node); err != nil {
return err
}
}
if _, err := topo.Sort(graph); err != nil {
return errors.NewError(
"pipeline has a circular dependency",
"ensure that all operators are connected in a straight, acyclic line",
"cycles", unorderableToCycles(err.(topo.Unorderable)),
)
}
return nil
}
// connectNode will connect a node to its outputs in the supplied graph.
func connectNode(graph *simple.DirectedGraph, inputNode OperatorNode) error {
for outputOperatorID, outputNodeID := range inputNode.OutputIDs() {
if graph.Node(outputNodeID) == nil {
return errors.NewError(
"operators cannot be connected, because the output does not exist in the pipeline",
"ensure that the output operator is defined",
"input_operator", inputNode.Operator().ID(),
"output_operator", outputOperatorID,
)
}
outputNode := graph.Node(outputNodeID).(OperatorNode)
if !outputNode.Operator().CanProcess() {
return errors.NewError(
"operators cannot be connected, because the output operator can not process logs",
"ensure that the output operator can process logs (like a parser or destination)",
"input_operator", inputNode.Operator().ID(),
"output_operator", outputOperatorID,
)
}
if graph.HasEdgeFromTo(inputNode.ID(), outputNodeID) {
return errors.NewError(
"operators cannot be connected, because a connection already exists",
"ensure that only a single connection exists between the two operators",
"input_operator", inputNode.Operator().ID(),
"output_operator", outputOperatorID,
)
}
edge := graph.NewEdge(inputNode, outputNode)
graph.SetEdge(edge)
}
return nil
}
// setOperatorOutputs will set the outputs on operators that can output.
func setOperatorOutputs(operators []operator.Operator) error {
for _, operator := range operators {
if !operator.CanOutput() {
continue
}
if err := operator.SetOutputs(operators); err != nil {
return errors.WithDetails(err, "operator_id", operator.ID())
}
}
return nil
}
// NewDirectedPipeline creates a new directed pipeline
func NewDirectedPipeline(operators []operator.Operator) (*DirectedPipeline, error) {
if err := setOperatorOutputs(operators); err != nil {
return nil, err
}
graph := simple.NewDirectedGraph()
if err := addNodes(graph, operators); err != nil {
return nil, err
}
if err := connectNodes(graph); err != nil {
return nil, err
}
return &DirectedPipeline{Graph: graph}, nil
}
func unorderableToCycles(err topo.Unorderable) string {
var cycles strings.Builder
for i, cycle := range err {
if i != 0 {
cycles.WriteByte(',')
}
cycles.WriteByte('(')
for _, node := range cycle {
cycles.WriteString(node.(OperatorNode).operator.ID())
cycles.Write([]byte(` -> `))
}
cycles.WriteString(cycle[0].(OperatorNode).operator.ID())
cycles.WriteByte(')')
}
return cycles.String()
} | pipeline/directed.go | 0.823151 | 0.409103 | directed.go | starcoder |
package main
import (
"fmt"
"strings"
)
func visualiseMatrix(given [][]uint8, width, height int) {
fmt.Print(matricesToString(given, nil, width, height))
}
func (c1 cell) in(slice []cell) bool {
for _, c2 := range slice {
if c1 == c2 {
return true
}
}
return false
}
func aliveCellsToString(given, expected []cell, width, height int) string {
givenMatrix := make([][]byte, height)
for i := range givenMatrix {
givenMatrix[i] = make([]byte, width)
}
expectedMatrix := make([][]byte, height)
for i := range expectedMatrix {
expectedMatrix[i] = make([]byte, width)
}
for i := 0; i < height; i++ {
for j := 0; j < width; j++ {
if (cell{j, i}).in(given) {
givenMatrix[i][j] = 0xFF
}
}
}
for i := 0; i < height; i++ {
for j := 0; j < width; j++ {
if (cell{j, i}).in(expected) {
expectedMatrix[i][j] = 0xFF
}
}
}
var output []string
output = append(output, " Your alive cells: Expected alive cells:\n")
output = append(output, squaresToStrings(givenMatrix, expectedMatrix, width, height)...)
return strings.Join(output, "")
}
func matricesToString(given, expected [][]uint8, width, height int) string {
var output []string
output = append(output, " Your world matrix: ")
if expected != nil {
output = append(output, "Expected world matrix:\n")
} else {
output = append(output, "\n")
}
output = append(output, squaresToStrings(given, expected, width, height)...)
return strings.Join(output, "")
}
func getHorizontalBorder(start, middle, end string, width int) string {
border := start
for i := 0; i < width*2; i++ {
border += "─"
}
border += end
return border
}
func squaresToStrings(given, expected [][]uint8, width, height int) []string {
var output []string
output = append(output, getHorizontalBorder(" ┌", "─", "┐ ", width))
if expected != nil {
output = append(output, getHorizontalBorder(" ┌", "─", "┐", width))
}
output = append(output, "\n")
for i := 0; i < height; i++ {
output = append(output, fmt.Sprintf("%2d│", i))
for j := 0; j < width; j++ {
if given[i][j] == 0xFF {
output = append(output, "██")
} else if given [i][j] == 0x00 {
output = append(output, " ")
}
}
if expected != nil {
output = append(output, fmt.Sprintf("│ %2d│", i))
for j := 0; j < width; j++ {
if expected[i][j] == 0xFF {
output = append(output, "██")
} else if expected[i][j] == 0x00 {
output = append(output, " ")
}
}
}
output = append(output, "│\n")
}
output = append(output, getHorizontalBorder(" └", "─", "┘ ", width))
if expected != nil {
output = append(output, getHorizontalBorder(" └", "─", "┘", width))
}
output = append(output, "\n")
return output
} | visualise.go | 0.516839 | 0.424114 | visualise.go | starcoder |
package trie
import (
"github.com/DgamesFoundation/subchain/fabric/core/ledger/statemgmt"
)
type levelDeltaMap map[string]*trieNode
type trieDelta struct {
lowestLevel int
deltaMap map[int]levelDeltaMap
}
func newLevelDeltaMap() levelDeltaMap {
return levelDeltaMap(make(map[string]*trieNode))
}
func newTrieDelta(stateDelta *statemgmt.StateDelta) *trieDelta {
trieDelta := &trieDelta{0, make(map[int]levelDeltaMap)}
chaincodes := stateDelta.GetUpdatedChaincodeIds(false)
for _, chaincodeID := range chaincodes {
updates := stateDelta.GetUpdates(chaincodeID)
for key, updatedvalue := range updates {
if updatedvalue.IsDeleted() {
trieDelta.delete(chaincodeID, key)
} else {
if stateDelta.RollBackwards {
trieDelta.set(chaincodeID, key, updatedvalue.GetPreviousValue())
} else {
trieDelta.set(chaincodeID, key, updatedvalue.GetValue())
}
}
}
}
return trieDelta
}
func (trieDelta *trieDelta) getLowestLevel() int {
return trieDelta.lowestLevel
}
func (trieDelta *trieDelta) getChangesAtLevel(level int) []*trieNode {
levelDelta := trieDelta.deltaMap[level]
changedNodes := make([]*trieNode, len(levelDelta))
for _, v := range levelDelta {
changedNodes = append(changedNodes, v)
}
return changedNodes
}
func (trieDelta *trieDelta) getParentOf(trieNode *trieNode) *trieNode {
parentLevel := trieNode.getParentLevel()
parentTrieKey := trieNode.getParentTrieKey()
levelDeltaMap := trieDelta.deltaMap[parentLevel]
if levelDeltaMap == nil {
return nil
}
return levelDeltaMap[parentTrieKey.getEncodedBytesAsStr()]
}
func (trieDelta *trieDelta) addTrieNode(trieNode *trieNode) {
level := trieNode.getLevel()
levelDeltaMap := trieDelta.deltaMap[level]
if levelDeltaMap == nil {
levelDeltaMap = newLevelDeltaMap()
trieDelta.deltaMap[level] = levelDeltaMap
}
levelDeltaMap[trieNode.trieKey.getEncodedBytesAsStr()] = trieNode
if level > trieDelta.lowestLevel {
trieDelta.lowestLevel = level
}
}
func (trieDelta *trieDelta) getTrieRootNode() *trieNode {
levelZeroMap := trieDelta.deltaMap[0]
if levelZeroMap == nil {
return nil
}
return levelZeroMap[rootTrieKeyStr]
}
func (trieDelta *trieDelta) set(chaincodeId string, key string, value []byte) {
trieNode := newTrieNode(newTrieKey(chaincodeId, key), value, true)
trieDelta.addTrieNode(trieNode)
}
func (trieDelta *trieDelta) delete(chaincodeId string, key string) {
trieDelta.set(chaincodeId, key, nil)
} | fabric/core/ledger/statemgmt/trie/trie_delta.go | 0.620852 | 0.630088 | trie_delta.go | starcoder |
package analyzers
import (
summarypb "github.com/GoogleCloudPlatform/testgrid/pb/summary"
"github.com/GoogleCloudPlatform/testgrid/pkg/summarizer/common"
"github.com/golang/protobuf/ptypes/timestamp"
)
// IntString is for sorting, primarily intended for map[string]int as implemented below
type IntString struct {
s string
i int
}
// BaseAnalyzer implements functions that calculate flakiness as a ratio of failed tests to total tests
type BaseAnalyzer struct {
}
// GetFlakiness returns a HealthinessInfo message with data to display flakiness as a ratio of failed tests
// to total tests
func (na *BaseAnalyzer) GetFlakiness(gridMetrics []*common.GridMetrics, minRuns int, startDate int, endDate int, tab string) *summarypb.HealthinessInfo {
testInfoList := []*summarypb.TestInfo{}
for _, test := range gridMetrics {
testInfo, success := calculateNaiveFlakiness(test, minRuns)
if !success {
continue
}
// TODO (itsazhuhere@): Introduce name parsing into test name and env
testInfo.DisplayName = test.Name
testInfoList = append(testInfoList, testInfo)
}
// Populate Healthiness with above calculated information
healthiness := createHealthiness(startDate, endDate, testInfoList)
return healthiness
}
func createHealthiness(startDate int, endDate int, testInfoList []*summarypb.TestInfo) *summarypb.HealthinessInfo {
healthiness := &summarypb.HealthinessInfo{
Start: intToTimestamp(startDate),
End: intToTimestamp(endDate),
Tests: testInfoList,
}
var averageFlakiness float32
for _, testInfo := range healthiness.Tests {
averageFlakiness += testInfo.Flakiness
}
totalTests := int32(len(healthiness.Tests))
if totalTests > 0 {
healthiness.AverageFlakiness = averageFlakiness / float32(totalTests)
}
return healthiness
}
func calculateNaiveFlakiness(test *common.GridMetrics, minRuns int) (*summarypb.TestInfo, bool) {
failedCount := int32(test.Failed)
totalCount := int32(test.Passed) + int32(test.Failed)
totalCountWithInfra := totalCount + int32(test.FailedInfraCount)
if totalCount <= 0 || totalCount < int32(minRuns) {
return &summarypb.TestInfo{}, false
}
// Convert from map[string]int to map[string]int32
infraFailures := map[string]int32{}
for key, value := range test.InfraFailures {
infraFailures[key] = int32(value)
}
flakiness := 100 * float32(failedCount) / float32(totalCount)
testInfo := &summarypb.TestInfo{
Flakiness: flakiness,
TotalNonInfraRuns: totalCount,
TotalRunsWithInfra: totalCountWithInfra,
PassedNonInfraRuns: int32(test.Passed),
FailedNonInfraRuns: int32(test.Failed),
FailedInfraRuns: int32(test.FailedInfraCount),
InfraFailures: infraFailures,
}
return testInfo, true
}
func intToTimestamp(seconds int) *timestamp.Timestamp {
timestamp := ×tamp.Timestamp{
Seconds: int64(seconds),
}
return timestamp
} | pkg/summarizer/analyzers/baseanalyzer.go | 0.63114 | 0.406509 | baseanalyzer.go | starcoder |
package mipmap
import (
"fmt"
"math"
"github.com/MattSwanson/ebiten/v2/internal/affine"
"github.com/MattSwanson/ebiten/v2/internal/buffered"
"github.com/MattSwanson/ebiten/v2/internal/driver"
"github.com/MattSwanson/ebiten/v2/internal/graphics"
"github.com/MattSwanson/ebiten/v2/internal/shaderir"
)
func BeginFrame() error {
return buffered.BeginFrame()
}
func EndFrame() error {
return buffered.EndFrame()
}
// Mipmap is a set of buffered.Image sorted by the order of mipmap level.
// The level 0 image is a regular image and higher-level images are used for mipmap.
type Mipmap struct {
width int
height int
volatile bool
orig *buffered.Image
imgs map[int]*buffered.Image
}
func New(width, height int) *Mipmap {
return &Mipmap{
width: width,
height: height,
orig: buffered.NewImage(width, height),
imgs: map[int]*buffered.Image{},
}
}
func NewScreenFramebufferMipmap(width, height int) *Mipmap {
return &Mipmap{
width: width,
height: height,
orig: buffered.NewScreenFramebufferImage(width, height),
imgs: map[int]*buffered.Image{},
}
}
func (m *Mipmap) SetVolatile(volatile bool) {
m.volatile = volatile
if m.volatile {
m.disposeMipmaps()
}
m.orig.SetVolatile(volatile)
}
func (m *Mipmap) Dump(name string, blackbg bool) error {
return m.orig.Dump(name, blackbg)
}
func (m *Mipmap) ReplacePixels(pix []byte, x, y, width, height int) error {
if err := m.orig.ReplacePixels(pix, x, y, width, height); err != nil {
return err
}
m.disposeMipmaps()
return nil
}
func (m *Mipmap) Pixels(x, y, width, height int) ([]byte, error) {
return m.orig.Pixels(x, y, width, height)
}
func (m *Mipmap) DrawTriangles(srcs [graphics.ShaderImageNum]*Mipmap, vertices []float32, indices []uint16, colorm *affine.ColorM, mode driver.CompositeMode, filter driver.Filter, address driver.Address, dstRegion, srcRegion driver.Region, subimageOffsets [graphics.ShaderImageNum - 1][2]float32, shader *Shader, uniforms []interface{}, canSkipMipmap bool) {
if len(indices) == 0 {
return
}
level := 0
// TODO: Do we need to check all the sources' states of being volatile?
if !canSkipMipmap && srcs[0] != nil && !srcs[0].volatile && filter != driver.FilterScreen {
level = math.MaxInt32
for i := 0; i < len(indices)/3; i++ {
const n = graphics.VertexFloatNum
dx0 := vertices[n*indices[3*i]+0]
dy0 := vertices[n*indices[3*i]+1]
sx0 := vertices[n*indices[3*i]+2]
sy0 := vertices[n*indices[3*i]+3]
dx1 := vertices[n*indices[3*i+1]+0]
dy1 := vertices[n*indices[3*i+1]+1]
sx1 := vertices[n*indices[3*i+1]+2]
sy1 := vertices[n*indices[3*i+1]+3]
dx2 := vertices[n*indices[3*i+2]+0]
dy2 := vertices[n*indices[3*i+2]+1]
sx2 := vertices[n*indices[3*i+2]+2]
sy2 := vertices[n*indices[3*i+2]+3]
if l := mipmapLevelFromDistance(dx0, dy0, dx1, dy1, sx0, sy0, sx1, sy1, filter); level > l {
level = l
}
if l := mipmapLevelFromDistance(dx1, dy1, dx2, dy2, sx1, sy1, sx2, sy2, filter); level > l {
level = l
}
if l := mipmapLevelFromDistance(dx2, dy2, dx0, dy0, sx2, sy2, sx0, sy0, filter); level > l {
level = l
}
}
if level == math.MaxInt32 {
panic("mipmap: level must be calculated at least once but not")
}
}
if colorm != nil && colorm.ScaleOnly() {
body, _ := colorm.UnsafeElements()
cr := body[0]
cg := body[5]
cb := body[10]
ca := body[15]
colorm = nil
const n = graphics.VertexFloatNum
for i := 0; i < len(vertices)/n; i++ {
vertices[i*n+4] *= cr
vertices[i*n+5] *= cg
vertices[i*n+6] *= cb
vertices[i*n+7] *= ca
}
}
var s *buffered.Shader
if shader != nil {
s = shader.shader
}
var imgs [graphics.ShaderImageNum]*buffered.Image
for i, src := range srcs {
if src == nil {
continue
}
if level != 0 {
if img := src.level(level); img != nil {
const n = graphics.VertexFloatNum
s := float32(pow2(level))
for i := 0; i < len(vertices)/n; i++ {
vertices[i*n+2] /= s
vertices[i*n+3] /= s
}
imgs[i] = img
continue
}
}
imgs[i] = src.orig
}
m.orig.DrawTriangles(imgs, vertices, indices, colorm, mode, filter, address, dstRegion, srcRegion, subimageOffsets, s, uniforms)
m.disposeMipmaps()
}
func (m *Mipmap) level(level int) *buffered.Image {
if level == 0 {
panic("ebiten: level must be non-zero at level")
}
if m.volatile {
panic("ebiten: mipmap images for a volatile image is not implemented yet")
}
if img, ok := m.imgs[level]; ok {
return img
}
var src *buffered.Image
var vs []float32
var filter driver.Filter
switch {
case level == 1:
src = m.orig
vs = graphics.QuadVertices(0, 0, float32(m.width), float32(m.height), 0.5, 0, 0, 0.5, 0, 0, 1, 1, 1, 1, false)
filter = driver.FilterLinear
case level > 1:
src = m.level(level - 1)
if src == nil {
m.imgs[level] = nil
return nil
}
w := sizeForLevel(m.width, level-1)
h := sizeForLevel(m.height, level-1)
vs = graphics.QuadVertices(0, 0, float32(w), float32(h), 0.5, 0, 0, 0.5, 0, 0, 1, 1, 1, 1, false)
filter = driver.FilterLinear
default:
panic(fmt.Sprintf("ebiten: invalid level: %d", level))
}
is := graphics.QuadIndices()
w2 := sizeForLevel(m.width, level-1)
h2 := sizeForLevel(m.height, level-1)
if w2 == 0 || h2 == 0 {
m.imgs[level] = nil
return nil
}
// buffered.NewImage panics with a too big size when actual allocation happens.
// 4096 should be a safe size in most environments (#1399).
// Unfortunately a precise max image size cannot be obtained here since this requires GPU access.
if w2 > 4096 || h2 > 4096 {
m.imgs[level] = nil
return nil
}
s := buffered.NewImage(w2, h2)
s.SetVolatile(m.volatile)
dstRegion := driver.Region{
X: 0,
Y: 0,
Width: float32(w2),
Height: float32(h2),
}
s.DrawTriangles([graphics.ShaderImageNum]*buffered.Image{src}, vs, is, nil, driver.CompositeModeCopy, filter, driver.AddressUnsafe, dstRegion, driver.Region{}, [graphics.ShaderImageNum - 1][2]float32{}, nil, nil)
m.imgs[level] = s
return m.imgs[level]
}
func sizeForLevel(x int, level int) int {
for i := 0; i < level; i++ {
x /= 2
if x == 0 {
return 0
}
}
return x
}
func (m *Mipmap) MarkDisposed() {
m.disposeMipmaps()
m.orig.MarkDisposed()
m.orig = nil
}
func (m *Mipmap) disposeMipmaps() {
for _, img := range m.imgs {
if img != nil {
img.MarkDisposed()
}
}
for k := range m.imgs {
delete(m.imgs, k)
}
}
// mipmapLevel returns an appropriate mipmap level for the given distance.
func mipmapLevelFromDistance(dx0, dy0, dx1, dy1, sx0, sy0, sx1, sy1 float32, filter driver.Filter) int {
const maxLevel = 6
if filter == driver.FilterScreen {
return 0
}
d := (dx1-dx0)*(dx1-dx0) + (dy1-dy0)*(dy1-dy0)
s := (sx1-sx0)*(sx1-sx0) + (sy1-sy0)*(sy1-sy0)
if s == 0 {
return 0
}
scale := d / s
// Scale can be infinite when the specified scale is extremely big (#1398).
if math.IsInf(float64(scale), 0) {
return 0
}
// Scale can be zero when the specified scale is extremely small (#1398).
if scale == 0 {
return 0
}
if filter != driver.FilterLinear {
return 0
}
level := 0
for scale < 0.25 {
level++
scale *= 4
}
if level > 0 {
// If the image can be scaled into 0 size, adjust the level. (#839)
w, h := int(sx1-sx0), int(sy1-sy0)
for level >= 0 {
s := 1 << uint(level)
if (w > 0 && w/s == 0) || (h > 0 && h/s == 0) {
level--
continue
}
break
}
if level < 0 {
// As the render source is too small, nothing is rendered.
return 0
}
}
if level > maxLevel {
level = maxLevel
}
return level
}
func pow2(power int) float32 {
x := 1
return float32(x << uint(power))
}
type Shader struct {
shader *buffered.Shader
}
func NewShader(program *shaderir.Program) *Shader {
return &Shader{
shader: buffered.NewShader(program),
}
}
func (s *Shader) MarkDisposed() {
s.shader.MarkDisposed()
s.shader = nil
} | internal/mipmap/mipmap.go | 0.501709 | 0.453806 | mipmap.go | starcoder |
package types
//------------------------------------------------------------------------------
// Message is a struct containing any relevant fields of a benthos message and
// helper functions.
type Message struct {
Parts [][]byte `json:"parts"`
}
// NewMessage initializes an empty message.
func NewMessage() Message {
return Message{
Parts: [][]byte{},
}
}
//------------------------------------------------------------------------------
/*
Internal message blob format:
- Four bytes containing number of message parts in big endian
- For each message part:
+ Four bytes containing length of message part in big endian
+ Content of message part
# Of bytes in message 2
|
# Of message parts (big endian) | Content of message 2
| | |
v v v
| 0| 0| 0| 2| 0| 0| 0| 5| h| e| l| l| o| 0| 0| 0| 5| w| o| r| l| d|
0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 17 18 19 20 21 22
^ ^
| |
| Content of message 1
|
# Of bytes in message 1 (big endian)
*/
// Reserve bytes for our length counter (4 * 8 = 32 bit)
var intLen uint32 = 4
// Bytes serialises the message into a single byte array.
func (m *Message) Bytes() []byte {
lenParts := uint32(len(m.Parts))
l := (lenParts + 1) * intLen
for i := range m.Parts {
l += uint32(len(m.Parts[i]))
}
b := make([]byte, l)
b[0] = byte(lenParts >> 24)
b[1] = byte(lenParts >> 16)
b[2] = byte(lenParts >> 8)
b[3] = byte(lenParts)
b2 := b[intLen:]
for i := range m.Parts {
le := uint32(len(m.Parts[i]))
b2[0] = byte(le >> 24)
b2[1] = byte(le >> 16)
b2[2] = byte(le >> 8)
b2[3] = byte(le)
b2 = b2[intLen:]
copy(b2, m.Parts[i])
b2 = b2[len(m.Parts[i]):]
}
return b
}
// FromBytes deserialises a Message from a byte array.
func FromBytes(b []byte) (Message, error) {
var m Message
if len(b) < 4 {
return m, ErrBadMessageBytes
}
numParts := uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
if numParts >= uint32(len(b)) {
return m, ErrBadMessageBytes
}
m.Parts = make([][]byte, numParts)
b = b[4:]
for i := uint32(0); i < numParts; i++ {
if len(b) < 4 {
return m, ErrBadMessageBytes
}
partSize := uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
b = b[4:]
if uint32(len(b)) < partSize {
return m, ErrBadMessageBytes
}
m.Parts[i] = b[:partSize]
b = b[partSize:]
}
return m, nil
}
//------------------------------------------------------------------------------ | plugin/benthos/lib/types/message.go | 0.643665 | 0.555013 | message.go | starcoder |
package hoff
import (
"errors"
"github.com/google/go-cmp/cmp"
)
// Computation take a NodeSystem and compute a Context against it.
type Computation struct {
System *NodeSystem
Context *Context
Status bool
Report map[Node]ComputeState
}
// NewComputation create a computation based on a valid, and activated NodeSystem and a Context.
func NewComputation(system *NodeSystem, context *Context) (*Computation, error) {
if system == nil {
return nil, errors.New("must have a node system to work properly")
}
if !system.IsActivated() {
return nil, errors.New("must have an activated node system to work properly")
}
if context == nil {
return nil, errors.New("must have a context to work properly")
}
return &Computation{
Status: false,
System: system,
Context: context,
}, nil
}
// Equal validate the two Computation are equals.
func (cp Computation) Equal(o Computation) bool {
return cmp.Equal(cp.Status, o.Status) && cmp.Equal(cp.Context, o.Context) && cmp.Equal(cp.System, o.System) && cmp.Equal(cp.Report, o.Report)
}
// Compute run all nodes in the defined order to enhance the Context.
// At the end of the computation (Status at true), you can read the compute state
// of each node in the Report.
func (cp *Computation) Compute() error {
cp.Report = make(map[Node]ComputeState)
err := cp.computeNodes(cp.System.InitialNodes())
if err != nil {
return err
}
cp.Status = true
return nil
}
func (cp *Computation) computeNodes(nodes []Node) error {
for _, node := range nodes {
err := cp.computeNode(node)
if err != nil {
return err
}
}
return nil
}
func (cp *Computation) computeNode(node Node) error {
order := cp.calculateComputeOrder(node)
switch order {
case dontRunIt, alreadyRunOnce:
return nil
case skipIt:
cp.Report[node] = NewSkipComputeState()
case computeIt:
state := node.Compute(cp.Context)
cp.Report[node] = state
if state.Value == AbortState {
return state.Error
}
}
return cp.computeFollowingNodes(node, nodeBranches(node)...)
}
func (cp *Computation) computeFollowingNodes(node Node, branches ...*bool) error {
for _, branch := range branches {
nextNodes, _ := cp.System.Follow(node, branch)
err := cp.computeNodes(nextNodes)
if err != nil {
return err
}
}
return nil
}
func (cp *Computation) calculateComputeOrder(node Node) computeOrder {
if _, ok := cp.Report[node]; ok {
return alreadyRunOnce
}
ancestorsCount, ancestorsComputed, ancestorsWithContinueState := cp.ansectorsComputationStatistics(node)
if ancestorsCount != ancestorsComputed {
return dontRunIt
} else if ancestorsCount == 0 {
return computeIt
}
joinMode := cp.System.JoinModeOfNode(node)
switch joinMode {
case JoinAnd:
if ancestorsCount == ancestorsWithContinueState {
return computeIt
}
case JoinOr:
if ancestorsWithContinueState > 0 {
return computeIt
}
case JoinNone:
if ancestorsWithContinueState == 1 {
return computeIt
}
}
return skipIt
}
func (cp *Computation) ansectorsComputationStatistics(node Node) (int, int, int) {
ancestorsCount, ancestorsComputed, ancestorsWithContinueState := cp.ansectorsComputationStatisticsOnBranch(node, nil)
ancestorsCountOnBranchTrue, ancestorsComputedOnBranchTrue, ancestorsWithContinueStateOnBranchTrue := cp.ansectorsComputationStatisticsOnBranch(node, boolPointer(true))
ancestorsCount += ancestorsCountOnBranchTrue
ancestorsComputed += ancestorsComputedOnBranchTrue
ancestorsWithContinueState += ancestorsWithContinueStateOnBranchTrue
ancestorsCountOnBranchFalse, ancestorsComputedOnBranchFalse, ancestorsWithContinueStateOnBranchFalse := cp.ansectorsComputationStatisticsOnBranch(node, boolPointer(false))
ancestorsCount += ancestorsCountOnBranchFalse
ancestorsComputed += ancestorsComputedOnBranchFalse
ancestorsWithContinueState += ancestorsWithContinueStateOnBranchFalse
return ancestorsCount, ancestorsComputed, ancestorsWithContinueState
}
func (cp *Computation) ansectorsComputationStatisticsOnBranch(node Node, branch *bool) (int, int, int) {
linkedNodes, _ := cp.System.Ancestors(node, branch)
computedNodes := 0
nodesWithContinueState := 0
for _, linkedNode := range linkedNodes {
report, found := cp.Report[linkedNode]
if found {
computedNodes++
if report.Value == ContinueState && report.Branch == branch {
nodesWithContinueState++
}
}
}
return len(linkedNodes), computedNodes, nodesWithContinueState
}
type computeOrder string
const (
computeIt computeOrder = "compute_it"
skipIt = "skip_it"
dontRunIt = "dont_run_it"
alreadyRunOnce = "already_run_once"
)
func nodeBranches(node Node) []*bool {
if node.DecideCapability() {
return []*bool{boolPointer(true), boolPointer(false)}
}
return []*bool{nil}
} | computation.go | 0.693265 | 0.462898 | computation.go | starcoder |
package circuit
import (
"fmt"
"math"
"math/cmplx"
"math/rand"
"time"
"github.com/Quant-Team/qvm/pkg/circuit/gates"
m "github.com/Quant-Team/qvm/pkg/math/matrix"
v "github.com/Quant-Team/qvm/pkg/math/vector"
)
var _ Qubiter = Zero()
var _ Qubiter = One()
type Gate m.Matrix
// Qubiter - abstractio interface of qubit
type Qubiter interface {
Measure() Qubiter
Probability() []float64
Apply(gates.Gate) Qubiter
Equal(Qubiter) bool
}
type Qubit struct {
vec *v.Vector
}
func (q *Qubit) Probability() []float64 {
list := []float64{}
iterator := q.vec.Iterator()
for i, err := iterator.Next(); err == nil; i, err = iterator.Next() {
amp, _ := q.vec.At(i)
p := math.Pow(cmplx.Abs(amp.(complex128)), 2)
list = append(list, p)
}
return list
}
func (q *Qubit) Measure() Qubiter {
rand.Seed(time.Now().UnixNano())
r := rand.Float64()
plist := q.Probability()
var sum float64
for i, p := range plist {
if sum <= r && r < sum+p {
q.vec = v.NewZero(q.vec.Size())
q.vec.Set(i, 1)
break
}
sum = sum + p
}
return q
}
func (q *Qubit) String() string {
return fmt.Sprintf("%s", q.vec)
}
func (q *Qubit) Normalize() *Qubit {
var sum float64
iterator := q.vec.Iterator()
for i, err := iterator.Next(); err == nil; i, err = iterator.Next() {
amp, _ := q.vec.At(i)
sum = sum + math.Pow(cmplx.Abs(amp.(complex128)), 2)
}
z := 1 / math.Sqrt(sum)
q.vec = q.vec.MulScalar(v.NewScalar(complex(z, 0)))
return q
}
func (q *Qubit) Apply(g gates.Gate) Qubiter {
q.vec = q.vec.ApplyGate(g)
return q
}
func (q0 *Qubit) Equal(q Qubiter) bool {
q1, ok := q.(*Qubit)
if !ok {
return false
}
if q0.vec.Shape()[0] != q1.vec.Shape()[0] {
return false
}
iterator := q0.vec.Iterator()
for i, err := iterator.Next(); err == nil; i, err = iterator.Next() {
left, _ := q0.vec.At(i)
right, _ := q1.vec.At(i)
if left.(complex128) != right.(complex128) {
return false
}
}
return true
}
func NewQubit(z ...complex128) *Qubit {
vec := v.New(z...)
q := &Qubit{vec}
q.Normalize()
return q
}
func Zero() *Qubit {
return &Qubit{
vec: v.New(cmplx.Sqrt(1+0i), cmplx.Sqrt(0+0i)),
}
}
func One() *Qubit {
return &Qubit{
vec: v.New(cmplx.Sqrt(0+0i), cmplx.Sqrt(1+0i)),
}
} | pkg/circuit/qubit.go | 0.724091 | 0.404713 | qubit.go | starcoder |
package msgp
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"reflect"
"strconv"
)
// Unpack reads a value from the io.Reader. And assigns it to the value pointed by 'ptr'.
// Because Unpack depends on the type of 'ptr' to extract a value, 'ptr' should be
// an address of specific type variable.
// If possible, the read value will be converted to the type of variable pointed by 'ptr'.
// If 'ptr' is a pointer of pointer, a new value will be allocated. You don't have to
// allocate new one.
// It is recommended to use this function for all types.
func Unpack(r io.Reader, ptr interface{}) error {
var err error
wantType := reflect.TypeOf(ptr).Elem()
switch wantType.Kind() {
case reflect.Bool:
err = UnpackBool(r, ptr)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
err = UnpackInt(r, ptr)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
err = UnpackUint(r, ptr)
case reflect.Float32, reflect.Float64:
err = UnpackFloat(r, ptr)
case reflect.String:
err = UnpackString(r, ptr)
case reflect.Array:
err = UnpackArray(r, ptr)
case reflect.Slice:
err = UnpackSlice(r, ptr)
case reflect.Map:
err = UnpackMap(r, ptr)
case reflect.Struct:
err = UnpackStruct(r, ptr)
case reflect.Ptr:
err = UnpackPtr(r, ptr)
case reflect.Interface:
err = UnpackInterface(r, ptr)
default:
return fmt.Errorf("msgp: specified type[%v] is not supported", wantType.Kind())
}
return err
}
// UnpackBool reads a bool value from the io.Reader. And assigns it to the value pointed by 'ptr'.
func UnpackBool(r io.Reader, ptr interface{}) error {
var err error
var val interface{}
if val, err = UnpackPrimitive(r); err != nil {
return err
}
if val == nil {
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
} else {
if b, ok := val.(bool); ok {
reflect.ValueOf(ptr).Elem().SetBool(b)
} else {
return fmt.Errorf("msgp: unpacked value[%v] is not assignable to bool type", val)
}
}
return nil
}
// UnpackInt reads a integer value from the io.Reader. And assigns it to the value pointed by 'ptr'.
// Numeric types(int, uint, float) are compatible with each other.
// Even float value can be read by a int variable.
func UnpackInt(r io.Reader, ptr interface{}) error {
var err error
var val interface{}
if val, err = UnpackPrimitive(r); err != nil {
return err
}
if val == nil {
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
} else {
switch reflect.ValueOf(val).Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
reflect.ValueOf(ptr).Elem().SetInt(reflect.ValueOf(val).Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
reflect.ValueOf(ptr).Elem().SetInt(int64(reflect.ValueOf(val).Uint()))
case reflect.Float32, reflect.Float64:
reflect.ValueOf(ptr).Elem().SetInt(int64(reflect.ValueOf(val).Float()))
default:
return fmt.Errorf("msgp: unpacked value[%v] is not assignable to integer type", val)
}
}
return nil
}
// UnpackUint reads a unsigned integer value from the io.Reader. And assigns it to the value pointed by 'ptr'.
// Numeric types(int, uint, float) are compatible with each other.
// Even float value can be read by a uint variable.
func UnpackUint(r io.Reader, ptr interface{}) error {
var err error
var val interface{}
if val, err = UnpackPrimitive(r); err != nil {
return err
}
if val == nil {
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
} else {
switch reflect.ValueOf(val).Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
reflect.ValueOf(ptr).Elem().SetUint(uint64(reflect.ValueOf(val).Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
reflect.ValueOf(ptr).Elem().SetUint(reflect.ValueOf(val).Uint())
case reflect.Float32, reflect.Float64:
reflect.ValueOf(ptr).Elem().SetUint(uint64(reflect.ValueOf(val).Float()))
default:
return fmt.Errorf("msgp: unpacked value[%v] is not assignable to unsigned integer type", val)
}
}
return nil
}
// UnpackFloat reads a float value from the io.Reader. And assigns it to the value pointed by 'ptr'.
// Numeric types(int, uint, float) are compatible with each other.
// Even int value can be read by a float32 variable.
func UnpackFloat(r io.Reader, ptr interface{}) error {
var err error
var val interface{}
if val, err = UnpackPrimitive(r); err != nil {
return err
}
if val == nil {
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
} else {
switch reflect.ValueOf(val).Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
reflect.ValueOf(ptr).Elem().SetFloat(float64(reflect.ValueOf(val).Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
reflect.ValueOf(ptr).Elem().SetFloat(float64(reflect.ValueOf(val).Uint()))
case reflect.Float32, reflect.Float64:
reflect.ValueOf(ptr).Elem().SetFloat(reflect.ValueOf(val).Float())
default:
return fmt.Errorf("msgp: unpacked value[%v] is not assignable to float type", val)
}
}
return nil
}
// UnpackString reads a string value from the io.Reader. And assigns it to the value pointed by 'ptr'.
func UnpackString(r io.Reader, ptr interface{}) error {
var err error
var val interface{}
if val, err = UnpackPrimitive(r); err != nil {
return err
}
if val == nil {
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
} else {
valTyp := reflect.TypeOf(val)
if valTyp.Kind() == reflect.String {
reflect.ValueOf(ptr).Elem().SetString(reflect.ValueOf(val).String())
} else if valTyp.Kind() == reflect.Slice {
if valTyp.Elem().Kind() == reflect.Uint8 {
reflect.ValueOf(ptr).Elem().SetString(string(val.([]byte)))
} else {
return fmt.Errorf("msgp: unpacked value[%v] is not assignable to string type", val)
}
} else {
return fmt.Errorf("msgp: unpacked value[%v] is not assignable to string type", val)
}
}
return nil
}
// UnpackArray reads an array from the io.Reader. And assigns it to the value pointed by 'ptr'.
// The length of array type pointed by 'ptr' should be large enough.
// Bin format family value is also read by this function.
// Bin format famaly should be read with a pointer of '[n]byte' or '[n]uint8'.
// (Numeric types are compatible, but bin format family cannot be read with [n]int)
func UnpackArray(r io.Reader, ptr interface{}) error {
var err error
var head byte
arrTyp := reflect.TypeOf(ptr).Elem()
arrVal := reflect.ValueOf(ptr).Elem()
arrLen := arrVal.Len()
if err = binary.Read(r, binary.BigEndian, &head); err != nil {
return err
}
if head == 0xc0 { // nil
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
return nil
}
// handle bin format family
if head == 0xc4 || head == 0xc5 || head == 0xc6 {
if arrTyp.Elem().Kind() == reflect.Uint8 {
var byteSlice []byte
switch head {
case 0xc4:
byteSlice, err = unpackBin8(r)
case 0xc5:
byteSlice, err = unpackBin16(r)
case 0xc6:
byteSlice, err = unpackBin32(r)
}
if err != nil {
return err
}
arrVal.Set(reflect.Zero(reflect.ArrayOf(len(byteSlice), reflect.TypeOf(byteSlice).Elem())))
reflect.Copy(arrVal, reflect.ValueOf(byteSlice))
return nil
}
return fmt.Errorf("msgp: byte array can't be assigned to other type[%v] array", arrTyp.Elem().Kind())
}
// handle array format family
var srcLen = 0
if head&0xf0 == 0x90 { // array
srcLen = int(head & 0x0f)
} else if head == 0xdc {
var temp uint16
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp)
} else if head == 0xdd {
var temp uint32
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp) // maybe overflow.
} else {
return fmt.Errorf("msgp: unpacked value is not an array")
}
if arrLen < srcLen {
return fmt.Errorf("msgp: array size is too small")
}
arrVal.Set(reflect.Zero(reflect.ArrayOf(arrLen, arrTyp.Elem()))) // array 생성.
for inx := 0; inx < srcLen; inx++ {
if err = Unpack(r, arrVal.Index(inx).Addr().Interface()); err != nil {
return err
}
}
return nil
}
// UnpackSlice reads an array from the io.Reader. And assigns it to the value pointed by 'ptr'.
// Bin format family value is also read by this function.
// Bin format famaly should be read with a pointer of '[]byte' or '[]uint8'.
// (Numeric types are compatible, but bin format family cannot be read with []int)
func UnpackSlice(r io.Reader, ptr interface{}) error {
var err error
var head byte
sliceTyp := reflect.TypeOf(ptr).Elem()
sliceVal := reflect.ValueOf(ptr).Elem()
if err = binary.Read(r, binary.BigEndian, &head); err != nil {
return err
}
if head == 0xc0 { // nil
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
return nil
}
// handle bin format family
if head == 0xc4 || head == 0xc5 || head == 0xc6 {
if sliceTyp.Elem().Kind() == reflect.Uint8 {
var byteSlice []byte
switch head {
case 0xc4:
byteSlice, err = unpackBin8(r)
case 0xc5:
byteSlice, err = unpackBin16(r)
case 0xc6:
byteSlice, err = unpackBin32(r)
}
if err != nil {
return err
}
sliceVal.Set(reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(byteSlice).Elem()), len(byteSlice), len(byteSlice)))
reflect.Copy(sliceVal, reflect.ValueOf(byteSlice))
return nil
}
return fmt.Errorf("msgp: byte array can't be assigned to other type[%v] slice", sliceTyp.Elem().Kind())
}
// handle array format family
var srcLen = 0
if head&0xf0 == 0x90 { // array
srcLen = int(head & 0x0f)
} else if head == 0xdc {
var temp uint16
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp)
} else if head == 0xdd {
var temp uint32
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp) // maybe overflow.
} else {
return fmt.Errorf("msgp: unpacked value is not an array")
}
sliceVal.Set(reflect.MakeSlice(reflect.SliceOf(sliceTyp.Elem()), srcLen, srcLen)) // slice 생성.
for inx := 0; inx < srcLen; inx++ {
if err = Unpack(r, sliceVal.Index(inx).Addr().Interface()); err != nil {
return err
}
}
return nil
}
// UnpackMap reads a map from the io.Reader. And assigns it to the value pointed by 'ptr'.
func UnpackMap(r io.Reader, ptr interface{}) error {
var err error
var head byte
mapTyp := reflect.TypeOf(ptr).Elem()
mapVal := reflect.ValueOf(ptr).Elem()
if err = binary.Read(r, binary.BigEndian, &head); err != nil {
return err
}
if head == 0xc0 { // nil
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
return nil
}
var srcLen = 0
if head&0xf0 == 0x80 { // map
srcLen = int(head & 0x0f)
} else if head == 0xde {
var temp uint16
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp)
} else if head == 0xdf {
var temp uint32
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp)
} else {
return fmt.Errorf("msgp: unpacked value is not a map")
}
mapVal.Set(reflect.MakeMap(reflect.MapOf(mapTyp.Key(), mapTyp.Elem()))) // map 생성.
for inx := 0; inx < srcLen; inx++ {
keyPtr := reflect.New(mapTyp.Key())
if err = Unpack(r, keyPtr.Interface()); err != nil {
return err
}
valPtr := reflect.New(mapTyp.Elem())
if err = Unpack(r, valPtr.Interface()); err != nil {
return err
}
mapVal.SetMapIndex(keyPtr.Elem(), valPtr.Elem())
}
return nil
}
// UnpackStruct reads a struct value from the io.Reader. And assigns it to the value pointed by 'ptr'.
// The struct value is deserialized from a map value.
// If the fields of struct are not compatible with the value read, an error is returned.
func UnpackStruct(r io.Reader, ptr interface{}) error {
var err error
var head byte
if err = binary.Read(r, binary.BigEndian, &head); err != nil {
return err
}
if head == 0xc0 { // nil
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
return nil
}
var srcLen = 0
if head&0xf0 == 0x80 { // map
srcLen = int(head & 0x0f)
} else if head == 0xde {
var temp uint16
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp)
} else if head == 0xdf {
var temp uint32
if err = binary.Read(r, binary.BigEndian, &temp); err != nil {
return err
}
srcLen = int(temp)
} else {
return fmt.Errorf("msgp: unpacked value is not a map")
}
type StructField struct {
Props FieldProps
Val reflect.Value
}
fieldMap := make(map[string]StructField)
structTyp := reflect.TypeOf(ptr).Elem()
structVal := reflect.ValueOf(ptr).Elem()
structVal.Set(reflect.Zero(structTyp)) // init with zero value
structNumField := structTyp.NumField()
for inx := 0; inx < structNumField; inx++ {
var fp FieldProps
fieldTyp := structTyp.Field(inx)
fieldVal := structVal.Field(inx)
fp.parseTag(fieldTyp)
if fp.Skip {
continue
}
fieldMap[fp.Name] = StructField{fp, fieldVal}
}
for inx := 0; inx < srcLen; inx++ {
var key string
if err = Unpack(r, &key); err != nil {
return err
}
structField, ok := fieldMap[key]
if ok {
if structField.Props.Skip {
continue
}
if structField.Props.String {
var str string
if err = Unpack(r, &str); err != nil {
return err
}
if err = assignValueFromString(structField.Val, str); err != nil {
return err
}
} else {
if err = Unpack(r, structField.Val.Addr().Interface()); err != nil {
return err
}
}
}
}
return nil
}
// UnpackPtr reads a value from the io.Reader. And assigns it to the value pointed by 'ptr'.
// 'ptr' should be a pointer of pointer.
func UnpackPtr(r io.Reader, ptr interface{}) error {
var err error
var peek byte
br := NewPeekableReader(r)
if peek, err = br.Peek(); err != nil {
return err
}
if peek == 0xc0 { // nil value unpacked.
reflect.ValueOf(ptr).Elem().Set(reflect.Zero(reflect.TypeOf(ptr).Elem()))
return nil
}
newVal := reflect.New(reflect.TypeOf(ptr).Elem().Elem())
if err = Unpack(br, newVal.Interface()); err != nil { // peeked byte will be consumed in Unpack()
return err
}
reflect.ValueOf(ptr).Elem().Set(newVal)
return nil
}
// UnpackInterface reads a value from the io.Reader. And assigns it to the value pointed by 'ptr'.
// If you don't know the type, you can use this function. but you will have to use reflection to discover the type of the value read.
func UnpackInterface(r io.Reader, ptr interface{}) error {
var err error
pi, ok := ptr.(*interface{})
if !ok {
return fmt.Errorf("msgp: specified type[%v] is not supported", reflect.TypeOf(ptr).Elem())
}
if *pi, err = UnpackPrimitive(r); err != nil {
return err
}
return nil
}
// UnpackPrimitive reads a value from the io.Reader.
// but no type casting takes place.
// It is generally recommended to use Unpack().
func UnpackPrimitive(r io.Reader) (interface{}, error) {
var err error
var head byte
if err = binary.Read(r, binary.BigEndian, &head); err != nil {
return nil, err
}
if head == 0xc0 { // nil
return nil, nil
} else if head == 0xc2 { // bool
return false, nil
} else if head == 0xc3 {
return true, nil
} else if head&0x80 == 0 { // int8
return int8(head), nil
} else if head&0xe0 == 0xe0 {
return int8(head), nil
} else if head == 0xd0 {
return unpackInt8(r)
} else if head == 0xd1 {
return unpackInt16(r)
} else if head == 0xd2 {
return unpackInt32(r)
} else if head == 0xd3 {
return unpackInt64(r)
} else if head == 0xcc {
return unpackUint8(r)
} else if head == 0xcd {
return unpackUint16(r)
} else if head == 0xce {
return unpackUint32(r)
} else if head == 0xcf {
return unpackUint64(r)
} else if head == 0xca {
return unpackFloat32(r)
} else if head == 0xcb {
return unpackFloat64(r)
} else if head&0xe0 == 0xa0 {
return unpackString5(r, int(head&0x1f))
} else if head == 0xd9 {
return unpackString8(r)
} else if head == 0xda {
return unpackString16(r)
} else if head == 0xdb {
return unpackString32(r)
} else if head == 0xc4 { // bin
return unpackBin8(r)
} else if head == 0xc5 {
return unpackBin16(r)
} else if head == 0xc6 {
return unpackBin32(r)
} else if head&0xf0 == 0x90 { // array
return unpackArray4(r, int(head&0x0f))
} else if head == 0xdc {
return unpackArray16(r)
} else if head == 0xdd {
return unpackArray32(r)
} else if head&0xf0 == 0x80 { // map
return unpackMap4(r, int(head&0x0f))
} else if head == 0xde {
return unpackMap16(r)
} else if head == 0xdf {
return unpackMap32(r)
}
return nil, errors.New("msgp: UnpackPrimitive() reads unsupported(array, map) format family")
}
func unpackInt8(r io.Reader) (int8, error) {
var val int8
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackInt16(r io.Reader) (int16, error) {
var val int16
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackInt32(r io.Reader) (int32, error) {
var val int32
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackInt64(r io.Reader) (int64, error) {
var val int64
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackUint8(r io.Reader) (uint8, error) {
var val uint8
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackUint16(r io.Reader) (uint16, error) {
var val uint16
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackUint32(r io.Reader) (uint32, error) {
var val uint32
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackUint64(r io.Reader) (uint64, error) {
var val uint64
err := binary.Read(r, binary.BigEndian, &val)
return val, err
}
func unpackFloat32(r io.Reader) (float32, error) {
buf := make([]byte, 4)
if _, err := r.Read(buf); err != nil {
return 0, err
}
bits := binary.BigEndian.Uint32(buf)
return math.Float32frombits(bits), nil
}
func unpackFloat64(r io.Reader) (float64, error) {
buf := make([]byte, 8)
if _, err := r.Read(buf); err != nil {
return 0, err
}
bits := binary.BigEndian.Uint64(buf)
return math.Float64frombits(bits), nil
}
func unpackString5(r io.Reader, len int) (string, error) {
return unpackStringBody(r, len)
}
func unpackString8(r io.Reader) (string, error) {
var len uint8
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return "", err
}
return unpackStringBody(r, int(len))
}
func unpackString16(r io.Reader) (string, error) {
var len uint16
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return "", err
}
return unpackStringBody(r, int(len))
}
func unpackString32(r io.Reader) (string, error) {
var len uint32
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return "", err
}
return unpackStringBody(r, int(len))
}
func unpackBin8(r io.Reader) ([]byte, error) {
var len uint8
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
return unpackBinBody(r, int(len))
}
func unpackBin16(r io.Reader) ([]byte, error) {
var len uint16
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
return unpackBinBody(r, int(len))
}
func unpackBin32(r io.Reader) ([]byte, error) {
var len uint32
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
return unpackBinBody(r, int(len))
}
func unpackArray4(r io.Reader, len int) (interface{}, error) {
return unpackArrayBody(r, len)
}
func unpackArray16(r io.Reader) (interface{}, error) {
var len uint16
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
return unpackArrayBody(r, int(len))
}
func unpackArray32(r io.Reader) (interface{}, error) {
var len uint32
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
return unpackArrayBody(r, int(len))
}
func unpackMap4(r io.Reader, len int) (interface{}, error) {
return unpackMapBody(r, len)
}
func unpackMap16(r io.Reader) (interface{}, error) {
var len uint16
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
return unpackMapBody(r, int(len))
}
func unpackMap32(r io.Reader) (interface{}, error) {
var len uint32
if err := binary.Read(r, binary.BigEndian, &len); err != nil {
return nil, err
}
return unpackMapBody(r, int(len))
}
func unpackStringBody(r io.Reader, len int) (string, error) {
if len == 0 {
return "", nil
}
var n int
var err error
str := make([]byte, len)
if n, err = r.Read(str); err != nil {
return "", err
}
if n != len {
return "", errors.New("msgp: broken string format family was found")
}
return string(str), nil
}
func unpackBinBody(r io.Reader, len int) ([]byte, error) {
if len == 0 {
return nil, nil // nil as an empty slice
}
var n int
var err error
bin := make([]byte, len)
if n, err = r.Read(bin); err != nil {
return nil, err
}
if n != len {
return nil, errors.New("msgp: broken binary format family was found")
}
return bin, nil
}
func unpackArrayBody(r io.Reader, len int) (interface{}, error) {
if len == 0 {
return nil, nil // nil as an empty slice
}
var err error
var val interface{}
slice := make([]interface{}, len, len)
for inx := 0; inx < len; inx++ {
if val, err = UnpackPrimitive(r); err != nil {
return nil, err
}
slice[inx] = val
}
return slice, nil
}
func unpackMapBody(r io.Reader, len int) (interface{}, error) {
if len == 0 {
return nil, nil // nil as an empty map
}
var err error
var key, val interface{}
mapVal := make(map[interface{}]interface{})
for inx := 0; inx < len; inx++ {
if key, err = UnpackPrimitive(r); err != nil {
return nil, err
}
if val, err = UnpackPrimitive(r); err != nil {
return nil, err
}
mapVal[key] = val
}
return mapVal, nil
}
func assignValueFromString(dest reflect.Value, str string) error {
var err error
if str == "null" || str == "nil" {
dest.Set(reflect.Zero(dest.Type()))
} else {
switch dest.Type().Kind() {
case reflect.Bool:
var b bool
if b, err = strconv.ParseBool(str); err != nil {
return err
}
dest.SetBool(b)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var i int64
if i, err = strconv.ParseInt(str, 10, 64); err != nil {
return err
}
dest.SetInt(i)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var u uint64
if u, err = strconv.ParseUint(str, 10, 64); err != nil {
return err
}
dest.SetUint(u)
case reflect.Float32, reflect.Float64:
var f float64
if f, err = strconv.ParseFloat(str, 64); err != nil {
return err
}
dest.SetFloat(f)
case reflect.String:
dest.SetString(str)
case reflect.Ptr:
dest.Set(reflect.New(dest.Elem().Type()))
return assignValueFromString(dest.Elem(), str)
}
}
return nil
} | decode.go | 0.539711 | 0.4436 | decode.go | starcoder |
package imaging
import (
"image"
"image/color"
"math"
)
// FlipH flips the image horizontally (from left to right) and returns the transformed image.
func FlipH(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstY
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// FlipV flips the image vertically (from top to bottom) and returns the transformed image.
func FlipV(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstH - dstY - 1
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise.
func Transpose(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstY
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Transverse flips the image vertically and rotates 90 degrees counter-clockwise.
func Transverse(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstH - dstY - 1
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate90 rotates the image 90 degrees counter-clockwise and returns the transformed image.
func Rotate90(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstH - dstY - 1
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Rotate180 rotates the image 180 degrees counter-clockwise and returns the transformed image.
func Rotate180(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstH - dstY - 1
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate270 rotates the image 270 degrees counter-clockwise and returns the transformed image.
func Rotate270(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstY
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate rotates an image by the given angle counter-clockwise .
// The angle parameter is the rotation angle in degrees.
// The bgColor parameter specifies the color of the uncovered zone after the rotation.
func Rotate(img image.Image, angle float64, bgColor color.Color) *image.NRGBA {
angle = angle - math.Floor(angle/360)*360
switch angle {
case 0:
return Clone(img)
case 90:
return Rotate90(img)
case 180:
return Rotate180(img)
case 270:
return Rotate270(img)
}
src := toNRGBA(img)
srcW := src.Bounds().Max.X
srcH := src.Bounds().Max.Y
dstW, dstH := rotatedSize(srcW, srcH, angle)
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
if dstW <= 0 || dstH <= 0 {
return dst
}
srcXOff := float64(srcW)/2 - 0.5
srcYOff := float64(srcH)/2 - 0.5
dstXOff := float64(dstW)/2 - 0.5
dstYOff := float64(dstH)/2 - 0.5
bgColorNRGBA := color.NRGBAModel.Convert(bgColor).(color.NRGBA)
sin, cos := math.Sincos(math.Pi * angle / 180)
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
for dstX := 0; dstX < dstW; dstX++ {
xf, yf := rotatePoint(float64(dstX)-dstXOff, float64(dstY)-dstYOff, sin, cos)
xf, yf = xf+srcXOff, yf+srcYOff
interpolatePoint(dst, dstX, dstY, src, xf, yf, bgColorNRGBA)
}
}
})
return dst
}
func rotatePoint(x, y, sin, cos float64) (float64, float64) {
return x*cos - y*sin, x*sin + y*cos
}
func rotatedSize(w, h int, angle float64) (int, int) {
if w <= 0 || h <= 0 {
return 0, 0
}
sin, cos := math.Sincos(math.Pi * angle / 180)
x1, y1 := rotatePoint(float64(w-1), 0, sin, cos)
x2, y2 := rotatePoint(float64(w-1), float64(h-1), sin, cos)
x3, y3 := rotatePoint(0, float64(h-1), sin, cos)
minx := math.Min(x1, math.Min(x2, math.Min(x3, 0)))
maxx := math.Max(x1, math.Max(x2, math.Max(x3, 0)))
miny := math.Min(y1, math.Min(y2, math.Min(y3, 0)))
maxy := math.Max(y1, math.Max(y2, math.Max(y3, 0)))
neww := maxx - minx + 1
if neww-math.Floor(neww) > 0.1 {
neww++
}
newh := maxy - miny + 1
if newh-math.Floor(newh) > 0.1 {
newh++
}
return int(neww), int(newh)
}
func interpolatePoint(dst *image.NRGBA, dstX, dstY int, src *image.NRGBA, xf, yf float64, bgColor color.NRGBA) {
j := dstY*dst.Stride + dstX*4
d := dst.Pix[j : j+4 : j+4]
x0 := int(math.Floor(xf))
y0 := int(math.Floor(yf))
bounds := src.Bounds()
if !image.Pt(x0, y0).In(image.Rect(bounds.Min.X-1, bounds.Min.Y-1, bounds.Max.X, bounds.Max.Y)) {
d[0] = bgColor.R
d[1] = bgColor.G
d[2] = bgColor.B
d[3] = bgColor.A
return
}
xq := xf - float64(x0)
yq := yf - float64(y0)
points := [4]image.Point{
{x0, y0},
{x0 + 1, y0},
{x0, y0 + 1},
{x0 + 1, y0 + 1},
}
weights := [4]float64{
(1 - xq) * (1 - yq),
xq * (1 - yq),
(1 - xq) * yq,
xq * yq,
}
var r, g, b, a float64
for i := 0; i < 4; i++ {
p := points[i]
w := weights[i]
if p.In(bounds) {
i := p.Y*src.Stride + p.X*4
s := src.Pix[i : i+4 : i+4]
wa := float64(s[3]) * w
r += float64(s[0]) * wa
g += float64(s[1]) * wa
b += float64(s[2]) * wa
a += wa
} else {
wa := float64(bgColor.A) * w
r += float64(bgColor.R) * wa
g += float64(bgColor.G) * wa
b += float64(bgColor.B) * wa
a += wa
}
}
if a != 0 {
aInv := 1 / a
d[0] = clamp(r * aInv)
d[1] = clamp(g * aInv)
d[2] = clamp(b * aInv)
d[3] = clamp(a)
}
} | vendor/github.com/disintegration/imaging/transform.go | 0.890091 | 0.462534 | transform.go | starcoder |
package bloombits
import (
"sync"
)
// request represents a bloom retrieval task to prioritize and pull from the local
// database or remotely from the network.
type request struct {
section uint64 // Section index to retrieve the a bit-vector from
bit uint // Bit index within the section to retrieve the vector of
}
// response represents the state of a requested bit-vector through a scheduler.
type response struct {
cached []byte // Cached bits to dedup multiple requests
done chan struct{} // Channel to allow waiting for completion
}
// scheduler handles the scheduling of bloom-filter retrieval operations for
// entire section-batches belonging to a single bloom bit. Beside scheduling the
// retrieval operations, this struct also deduplicates the requests and caches
// the results to minimize network/database overhead even in complex filtering
// scenarios.
type scheduler struct {
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
lock sync.Mutex // Lock protecting the responses from concurrent access
}
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
// bit index.
func newScheduler(idx uint) *scheduler {
return &scheduler{
bit: idx,
responses: make(map[uint64]*response),
}
}
// run creates a retrieval pipeline, receiving section indexes from sections and
// returning the results in the same order through the done channel. Concurrent
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Create a forwarder channel between requests and responses of the same size as
// the distribution channel (since that will block the pipeline anyway).
pend := make(chan uint64, cap(dist))
// Start the pipeline schedulers to forward between user -> distributor -> user
wg.Add(2)
go s.scheduleRequests(sections, dist, pend, quit, wg)
go s.scheduleDeliveries(pend, done, quit, wg)
}
// reset cleans up any leftovers from previous runs. This is required before a
// restart to ensure the no previously requested but never delivered state will
// cause a lockup.
func (s *scheduler) reset() {
s.lock.Lock()
defer s.lock.Unlock()
for section, res := range s.responses {
if res.cached == nil {
delete(s.responses, section)
}
}
}
// scheduleRequests reads section retrieval requests from the input channel,
// deduplicates the stream and pushes unique retrieval tasks into the distribution
// channel for a database or network layer to honour.
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(pend)
// Keep reading and scheduling section requests
for {
select {
case <-quit:
return
case section, ok := <-reqs:
// New section retrieval requested
if !ok {
return
}
// Deduplicate retrieval requests
unique := false
s.lock.Lock()
if s.responses[section] == nil {
s.responses[section] = &response{
done: make(chan struct{}),
}
unique = true
}
s.lock.Unlock()
// Schedule the section for retrieval and notify the deliverer to expect this section
if unique {
select {
case <-quit:
return
case dist <- &request{bit: s.bit, section: section}:
}
}
select {
case <-quit:
return
case pend <- section:
}
}
}
}
// scheduleDeliveries reads section acceptance notifications and waits for them
// to be delivered, pushing them into the output data buffer.
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(done)
// Keep reading notifications and scheduling deliveries
for {
select {
case <-quit:
return
case idx, ok := <-pend:
// New section retrieval pending
if !ok {
return
}
// Wait until the request is honoured
s.lock.Lock()
res := s.responses[idx]
s.lock.Unlock()
select {
case <-quit:
return
case <-res.done:
}
// Deliver the result
select {
case <-quit:
return
case done <- res.cached:
}
}
}
}
// deliver is called by the request distributor when a reply to a request arrives.
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
s.lock.Lock()
defer s.lock.Unlock()
for i, section := range sections {
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
res.cached = data[i]
close(res.done)
}
}
} | core/bloombits/scheduler.go | 0.661486 | 0.480905 | scheduler.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.