code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package types
import (
"github.com/meshplus/gosdk/kvsql/util/hack"
"math"
"time"
)
// Kind constants.
const (
KindNull byte = 0
KindInt64 byte = 1
KindUint64 byte = 2
KindFloat32 byte = 3
KindFloat64 byte = 4
KindString byte = 5
KindBytes byte = 6
KindMysqlDuration byte = 9
KindMysqlEnum byte = 10
KindMysqlTime byte = 13
KindInterface byte = 14
)
// Datum is a data box holds different kind of data.
// It has better performance and is easier to use than `interface{}`.
// nolint
type Datum struct {
k byte // datum kind.
decimal uint16 // decimal can hold uint16 values.
length uint32 // length can hold uint32 values.
i int64 // i can hold int64 uint64 float64 values.
// 需要规定好支持的字符集
collation string // collation hold the collation information for string value.
b []byte // b can hold string or []byte values.
x interface{} // x hold all other types.
}
// Collation gets the collation of the datum.
func (d *Datum) Collation() string {
return d.collation
}
// SetCollation sets the collation of the datum.
func (d *Datum) SetCollation(collation string) {
d.collation = collation
}
// Frac gets the frac of the datum.
func (d *Datum) Frac() int {
return int(d.decimal)
}
// SetFrac sets the frac of the datum.
func (d *Datum) SetFrac(frac int) {
d.decimal = uint16(frac)
}
// Length gets the length of the datum.
func (d *Datum) Length() int {
return int(d.length)
}
// SetLength sets the length of the datum.
func (d *Datum) SetLength(l int) {
d.length = uint32(l)
}
// Kind gets the kind of the datum.
func (d *Datum) Kind() byte {
return d.k
}
// GetInterface gets interface value.
func (d *Datum) GetInterface() interface{} {
return d.x
}
// SetInterface sets interface to datum.
func (d *Datum) SetInterface(x interface{}) {
d.k = KindInterface
d.x = x
}
// SetNull sets datum to nil.
func (d *Datum) SetNull() {
d.k = KindNull
d.x = nil
}
// GetInt64 gets int64 value.
func (d *Datum) GetInt64() int64 {
return d.i
}
// SetInt64 sets int64 value.
func (d *Datum) SetInt64(i int64) {
d.k = KindInt64
d.i = i
}
// GetUint64 gets uint64 value.
func (d *Datum) GetUint64() uint64 {
return uint64(d.i)
}
// SetUint64 sets uint64 value.
func (d *Datum) SetUint64(i uint64) {
d.k = KindUint64
d.i = int64(i)
}
// GetFloat32 gets float32 value.
func (d *Datum) GetFloat32() float32 {
return float32(math.Float64frombits(uint64(d.i)))
}
// SetFloat32 sets float32 value.
func (d *Datum) SetFloat32(f float32) {
d.k = KindFloat32
d.i = int64(math.Float64bits(float64(f)))
}
// GetFloat64 gets float64 value.
func (d *Datum) GetFloat64() float64 {
return math.Float64frombits(uint64(d.i))
}
// SetFloat64 sets float64 value.
func (d *Datum) SetFloat64(f float64) {
d.k = KindFloat64
d.i = int64(math.Float64bits(f))
}
// GetString gets string value.
func (d *Datum) GetString() string {
return string(hack.String(d.b))
}
// SetString sets string value.
func (d *Datum) SetString(s string, collation string) {
d.k = KindString
sink(s)
d.b = hack.Slice(s)
d.collation = collation
}
// sink prevents s from being allocated on the stack.
var sink = func(s string) {
}
// GetBytes gets bytes value.
func (d *Datum) GetBytes() []byte {
return d.b
}
// SetBytes sets bytes value to datum.
func (d *Datum) SetBytes(b []byte) {
d.k = KindBytes
d.b = b
}
// GetMysqlTime gets types.Time value
func (d *Datum) GetMysqlTime() Time {
return d.x.(Time)
}
// SetMysqlTime sets types.Time value
func (d *Datum) SetMysqlTime(b Time) {
d.k = KindMysqlTime
d.x = b
}
// GetMysqlDuration gets Duration value
func (d *Datum) GetMysqlDuration() Duration {
return Duration{Duration: time.Duration(d.i), Fsp: int8(d.decimal)}
}
// SetMysqlDuration sets Duration value
func (d *Datum) SetMysqlDuration(b Duration) {
d.k = KindMysqlDuration
d.i = int64(b.Duration)
d.decimal = uint16(b.Fsp)
}
// GetMysqlEnum gets Enum value
func (d *Datum) GetMysqlEnum() Enum {
str := string(hack.String(d.b))
return Enum{Name: str}
}
// SetMysqlEnum sets Enum value
func (d *Datum) SetMysqlEnum(b Enum, collation string) {
d.k = KindMysqlEnum
sink(b.Name)
d.collation = collation
d.b = hack.Slice(b.Name)
}
// IsNull checks if datum is null.
func (d *Datum) IsNull() bool {
return d.k == KindNull
} | kvsql/types/datum.go | 0.619817 | 0.445228 | datum.go | starcoder |
package ticketstats
import (
"fmt"
"log"
"time"
"github.com/montanaflynn/stats"
)
// Stats groups a average work statistics result for a ticket list.
type Stats struct {
Mean Work
Median Work
Count int
}
func (stats Stats) ToString() string {
return fmt.Sprintf("mean: %s, median: %s, count: %d",
formatWork(stats.Mean),
formatWork(stats.Median),
stats.Count)
}
// Timerages groups the time rages used for ticket statistics
type TimeRanges struct {
Week Stats
Month Stats
Quarter Stats
Year Stats
}
func (tr TimeRanges) ToString() string {
str := ""
stats := tr.Week
str += fmt.Sprintf("last week: mean: %.15s, median: %.15s, %d issues\n",
formatWork(stats.Mean),
formatWork(stats.Median),
stats.Count)
stats = tr.Month
str += fmt.Sprintf("last month: mean: %.15s, median: %.15s, %d issues\n",
formatWork(stats.Mean),
formatWork(stats.Median),
stats.Count)
stats = tr.Quarter
str += fmt.Sprintf("last quarter: mean: %.15s, median: %.15s, %d issues\n",
formatWork(stats.Mean),
formatWork(stats.Median),
stats.Count)
stats = tr.Year
str += fmt.Sprintf("last year: mean: %.15s, median: %.15s, %d issues\n",
formatWork(stats.Mean),
formatWork(stats.Median),
stats.Count)
return str
}
// OldBugs finds all unresolved bugs older than one month.
func OldBugs(issues []*Issue, config Config) []*Issue {
oldBugs := make([]*Issue, 0)
bugs := OlderThanOneMonth(FilterByType(issues, config.Types.Bug))
for _, issue := range bugs {
if !issue.IsResolved() {
oldBugs = append(oldBugs, issue)
}
}
return oldBugs
}
// ResolutionTime calculates resolution time statistics for a ticket list.
func ResolutionTime(issues []*Issue) Stats {
times := make([]float64, 0)
for _, issue := range issues {
// ignore tickets with no time bookings
if issue.TimeSpend > 0.0 {
times = append(times, float64(issue.TimeSpend))
}
}
// Case: no tickets with time bookings.
if len(times) == 0 {
return Stats{
Mean: Work(0),
Median: Work(0),
Count: 0,
}
}
mean, err := stats.Mean(times)
if err != nil {
log.Println("ERROR: mean of resolution time", err)
}
median, err := stats.Median(times)
if err != nil {
log.Println("ERROR: median of resolution time", err)
}
return Stats{
Mean: Work(mean),
Median: Work(median),
Count: len(times),
}
}
// ResultionTimesByType calculates the resolution time statistics for each
// ticket type in the given list.
func ResultionTimesByType(issues []*Issue) map[string]TimeRanges {
result := make(map[string]TimeRanges)
for _, t := range Types(issues) {
typeIssues := FilterByType(issues, t)
var tr TimeRanges
typeIssuesRange := ClosedLastYear(typeIssues)
tr.Year = ResolutionTime(typeIssuesRange)
if tr.Year.Count == 0 {
// no booked work hours
continue
}
typeIssuesRange = ClosedLastQuarter(typeIssuesRange)
tr.Quarter = ResolutionTime(typeIssuesRange)
typeIssuesRange = ClosedLastMonth(typeIssuesRange)
tr.Month = ResolutionTime(typeIssuesRange)
typeIssuesRange = ClosedLastWeek(typeIssuesRange)
tr.Week = ResolutionTime(typeIssuesRange)
result[t] = tr
}
return result
}
// WorkAfter sums all work done after a given start date.
func WorkAfter(issues []*Issue, start time.Time) Work {
var work Work
for _, issue := range issues {
for _, log := range issue.LogWorks {
if log.Date.After(start) {
work += log.Hours
}
}
}
return work
} | ticketstats/stats.go | 0.704872 | 0.421492 | stats.go | starcoder |
package dataframe
import (
"fmt"
"logarithmotechnia/vector"
"strconv"
)
type Column struct {
name string
vector vector.Vector
}
type Dataframe struct {
rowNum int
colNum int
columns []vector.Vector
columnNames []string
columnNamesVector vector.Vector
groupedBy []string
}
func (df *Dataframe) RowNum() int {
return df.rowNum
}
func (df *Dataframe) ColNum() int {
return df.colNum
}
func (df *Dataframe) Clone() *Dataframe {
return New(df.columns, df.OptionsWithNames()...)
}
func (df *Dataframe) Cn(name string) vector.Vector {
index := df.columnIndexByName(name)
if index > 0 {
return df.columns[index-1]
}
return nil
}
func (df *Dataframe) C(selector interface{}) vector.Vector {
if index, ok := selector.(int); ok {
return df.Ci(index)
}
if name, ok := selector.(string); ok {
return df.Cn(name)
}
return nil
}
func (df *Dataframe) Ci(index int) vector.Vector {
if df.IsValidColumnIndex(index) {
return df.columns[index-1]
}
return nil
}
func (df *Dataframe) Names() vector.Vector {
return df.columnNamesVector
}
func (df *Dataframe) NamesAsStrings() []string {
names := make([]string, df.colNum)
copy(names, df.columnNames)
return names
}
func (df *Dataframe) ByIndices(indices []int) *Dataframe {
newColumns := make([]vector.Vector, df.colNum)
for i, column := range df.columns {
newColumns[i] = column.ByIndices(indices)
}
return New(newColumns, df.OptionsWithNames()...)
}
func (df *Dataframe) Columns() []vector.Vector {
return df.columns
}
func (df *Dataframe) IsEmpty() bool {
return df.colNum == 0
}
func (df *Dataframe) IsValidColumnIndex(index int) bool {
if index >= 1 && index <= df.colNum {
return true
}
return false
}
func (df *Dataframe) HasColumn(name string) bool {
return strPosInSlice(df.columnNames, name) != -1
}
func (df *Dataframe) GroupBy(selectors ...interface{}) *Dataframe {
columns := []string{}
for _, selector := range selectors {
switch selector.(type) {
case string:
columns = append(columns, selector.(string))
case []string:
columns = append(columns, selector.([]string)...)
}
}
groupByColumns := []string{}
for _, column := range columns {
if df.Names().Has(column) {
groupByColumns = append(groupByColumns, column)
}
}
if len(groupByColumns) == 0 {
return df
}
var groups [][]int
for _, groupBy := range groupByColumns {
groups = df.groupByColumn(groupBy, groups)
}
if len(groups) == 0 {
return df
}
newColumns := make([]vector.Vector, df.colNum)
for i, column := range df.columns {
newColumns[i] = column.GroupByIndices(groups)
}
newDf := New(newColumns, df.OptionsWithNames()...)
newDf.groupedBy = groupByColumns
return newDf
}
func (df *Dataframe) groupByColumn(groupBy string, curGroups [][]int) [][]int {
if len(curGroups) == 0 {
groups, _ := df.Cn(groupBy).Groups()
return groups
}
newIndices := [][]int{}
for _, indices := range curGroups {
if len(indices) == 1 {
newIndices = append(newIndices, indices)
continue
}
subGroups, _ := df.Cn(groupBy).ByIndices(indices).Groups()
replaceGroups := make([][]int, len(subGroups))
for j, subIndices := range subGroups {
newGroup := make([]int, len(subIndices))
for k, idx := range subIndices {
newGroup[k] = indices[idx-1]
}
replaceGroups[j] = newGroup
}
newIndices = append(newIndices, replaceGroups...)
}
return newIndices
}
func (df *Dataframe) IsGrouped() bool {
return len(df.groupedBy) > 0
}
func (df *Dataframe) GroupedBy() []string {
groupedBy := make([]string, len(df.groupedBy))
copy(groupedBy, df.groupedBy)
return groupedBy
}
func (df *Dataframe) Ungroup() *Dataframe {
if !df.IsGrouped() {
return df
}
columns := make([]vector.Vector, df.colNum)
for i := 0; i < df.colNum; i++ {
columns[i] = df.columns[i].Ungroup()
}
return New(df.columns, df.OptionsWithNames()...)
}
func (df *Dataframe) String() string {
var str string
for i, column := range df.columns {
str += fmt.Sprintf("%s: %v\n", df.columnNames[i], column)
}
return str
}
func (df *Dataframe) columnIndexByName(name string) int {
index := 1
for _, columnName := range df.columnNames {
if columnName == name {
return index
}
index++
}
return 0
}
func (df *Dataframe) OptionsWithNames() []vector.Option {
return append(df.Options(), vector.OptionColumnNames(df.columnNames))
}
func (df *Dataframe) Options() []vector.Option {
return []vector.Option{}
}
func generateColumnNames(length int) []string {
names := make([]string, length)
for i := 1; i <= length; i++ {
names[i-1] = strconv.Itoa(i)
}
return names
}
func New(data interface{}, options ...vector.Option) *Dataframe {
var df *Dataframe
switch data.(type) {
case []vector.Vector:
df = dataframeFromVectors(data.([]vector.Vector), options...)
case []Column:
df = dataframeFromColumns(data.([]Column), options...)
default:
df = dataframeFromVectors([]vector.Vector{})
}
return df
}
func dataframeFromColumns(columns []Column, options ...vector.Option) *Dataframe {
vectors := []vector.Vector{}
names := []string{}
for _, column := range columns {
vectors = append(vectors, column.vector)
names = append(names, column.name)
}
options = append(options, vector.OptionColumnNames(names))
return dataframeFromVectors(vectors, options...)
}
func dataframeFromVectors(vectors []vector.Vector, options ...vector.Option) *Dataframe {
maxLen := 0
for _, v := range vectors {
if v.Len() > maxLen {
maxLen = v.Len()
}
}
for i, v := range vectors {
if v.Len() < maxLen {
vectors[i] = v.Append(vector.NA(maxLen - v.Len()))
} else {
vectors[i] = v
}
}
colNum := len(vectors)
conf := vector.MergeOptions(options)
columnNames := generateColumnNames(colNum)
if conf.HasOption(vector.KeyOptionColumnNames) {
names := conf.Value(vector.KeyOptionColumnNames).([]string)
names = renameDuplicateColumns(names)
if colNum >= len(names) {
copy(columnNames, names)
} else {
copy(columnNames, names[0:colNum])
}
}
return &Dataframe{
rowNum: maxLen,
colNum: colNum,
columns: vectors,
columnNames: columnNames,
columnNamesVector: vector.StringWithNA(columnNames, nil),
}
}
func renameDuplicateColumns(names []string) []string {
if len(names) == 0 {
return names
}
uniqueNames := make([]string, len(names))
uniqueNames[0] = names[0]
for i := 1; i < len(names); i++ {
id := 1
name := names[i]
for {
duplicate := false
for j := 0; j < i; j++ {
if uniqueNames[j] == name {
duplicate = true
break
}
}
if !duplicate {
break
}
name = names[i] + "_" + strconv.Itoa(id)
id++
}
uniqueNames[i] = name
}
return uniqueNames
} | dataframe/dataframe.go | 0.597373 | 0.564699 | dataframe.go | starcoder |
package predictiongame
import "math"
import "fmt"
func evaluateConfidence(correct int, questions int, statedConfidence float64) string {
var message string
if correct < 0 || correct > questions {
return "Assertion failed in evaluateConfidence: variable questions out of bounds!"
}
// Two-sided binomial test
pLeft := binomCDF(float64(correct), float64(questions), statedConfidence) * 2
pRight := (1 - binomCDF(float64(correct-1), float64(questions), statedConfidence)) * 2
factor := (float64(correct) / float64(questions)) / statedConfidence
if pLeft <= 0.2 {
message = fmt.Sprintf("You're %s overconfident, by an estimated factor of %.1fX",
degree(pLeft), 1/factor)
} else if pRight <= 0.2 {
message = fmt.Sprintf("You're %s underconfident, by an estimated factor of %.1fX",
degree(pRight), factor)
} else {
message = fmt.Sprintf("Your quota of correct answers is statistically close to the expected %g. Congratulations, you seem to be well calibrated!", statedConfidence)
}
return message
}
func degree(p float64) string {
if p > 0.2 {
return "not"
} else if p > 0.05 {
return "likely"
} else if p > 0.0001 {
return "very likely"
}
return "definitely"
}
func roundP(p float64) float64 {
var cutoffs = [7]float64{1, 0.05, 0.01, 0.001, 1e-4, 1e-5, 1e-6}
m := 0
for m < len(cutoffs) {
if p > cutoffs[m] {
break
}
m = m + 1
}
return cutoffs[m-1]
}
func binomCDF(k float64, n float64, p float64) float64 {
return 1.0 - betai(k+1, n-k, p)
}
func gammln(xx float64) float64 {
cof := [6]float64{76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5}
x := xx
y := xx
tmp := x + 5.5
tmp = tmp - (x+0.5)*math.Log(tmp)
ser := 1.000000000190015
j := 0
for j <= 5 {
y = y + 1
ser = ser + cof[j]/y
j = j + 1
}
return -tmp + math.Log(2.5066282746310005*ser/x)
}
func betacf(a float64, b float64, x float64) float64 {
var aa, c, d, del, h, qab, qam, qap float64
const FPMIN float64 = 1.0e-30
qab = a + b
qap = a + 1.0
qam = a - 1.0
c = 1.0
d = 1.0 - qab*x/qap
if math.Abs(d) < FPMIN {
d = FPMIN
}
d = 1.0 / d
h = d
m := 1
for m <= 100 {
m1 := float64(m)
m2 := 2 * m1
aa = m1 * (b - m1) * x / ((qam + m2) * (a + m2))
d = 1.0 + aa*d
if math.Abs(d) < FPMIN {
d = FPMIN
}
c = 1.0 + aa/c
if math.Abs(d) < FPMIN {
d = FPMIN
}
d = 1.0 / d
h *= d * c
aa = -(a + m1) * (qab + m1) * x / ((a + m2) * (qap + m2))
d = 1.0 + aa*d
if math.Abs(d) < FPMIN {
d = FPMIN
}
c = 1.0 + aa/c
if math.Abs(d) < FPMIN {
d = FPMIN
}
d = 1.0 / d
del = d * c
h *= del
if math.Abs(del-1.0) < 3.0e-7 {
break
}
m = m + 1
}
if m > 100 {
return math.NaN()
}
return h
}
func betai(a float64, b float64, x float64) float64 {
var bt float64
if x < 0.0 || x > 1.0 {
return math.NaN()
}
if x == 0.0 || x == 1.0 {
bt = 0.0
} else {
bt = math.Exp(gammln(a+b) - gammln(a) - gammln(b) + a*math.Log(x) + b*math.Log(1.0-x))
}
if x < (a+1.0)/(a+b+2.0) {
return bt * betacf(a, b, x) / a
}
return 1.0 - bt*betacf(b, a, 1.0-x)/b
} | confidence.go | 0.686475 | 0.596198 | confidence.go | starcoder |
// Package datastructure implements some data structure. eg. list, linklist, stack, queue, tree, graph.
package datastructure
import (
"reflect"
)
// List is a linear table, implemented with slice
type List[T any] struct {
data []T
}
// NewList return a pointer of List
func NewList[T any](data []T) *List[T] {
return &List[T]{data: data}
}
// Data return list data
func (l *List[T]) Data() []T {
return l.data
}
// ValueOf return the value pointer at index of list data.
func (l *List[T]) ValueOf(index int) (*T, bool) {
if index < 0 || index >= len(l.data) {
return nil, false
}
return &l.data[index], true
}
// IndexOf reture the index of value. if not found return -1
func (l *List[T]) IndexOf(value T) int {
index := -1
data := l.data
for i, v := range data {
if reflect.DeepEqual(v, value) {
index = i
break
}
}
return index
}
// Contain checks if the value in the list or not
func (l *List[T]) Contain(value T) bool {
data := l.data
for _, v := range data {
if reflect.DeepEqual(v, value) {
return true
}
}
return false
}
// Push append value to the list data
func (l *List[T]) Push(value T) {
l.data = append(l.data, value)
}
// InsertAtFirst insert value into list at first index
func (l *List[T]) InsertAtFirst(value T) {
l.InsertAt(0, value)
}
// InsertAtLast insert value into list at last index
func (l *List[T]) InsertAtLast(value T) {
l.InsertAt(len(l.data), value)
}
// InsertAt insert value into list at index
func (l *List[T]) InsertAt(index int, value T) {
data := l.data
size := len(data)
if index < 0 || index > size {
return
}
l.data = append(data[:index], append([]T{value}, data[index:]...)...)
}
// PopFirst delete the first value of list and return it
func (l *List[T]) PopFirst() (*T, bool) {
if len(l.data) == 0 {
return nil, false
}
v := l.data[0]
l.DeleteAt(0)
return &v, true
}
// PopLast delete the last value of list and return it
func (l *List[T]) PopLast() (*T, bool) {
size := len(l.data)
if size == 0 {
return nil, false
}
v := l.data[size-1]
l.DeleteAt(size - 1)
return &v, true
}
// DeleteAt delete the value of list at index
func (l *List[T]) DeleteAt(index int) {
data := l.data
size := len(data)
if index < 0 || index > size-1 {
return
}
if index == size-1 {
data = append(data[:index])
} else {
data = append(data[:index], data[index+1:]...)
}
l.data = data
}
// UpdateAt update value of list at index, index shoud between 0 and list size -1
func (l *List[T]) UpdateAt(index int, value T) {
data := l.data
size := len(data)
if index < 0 || index >= size {
return
}
l.data = append(data[:index], append([]T{value}, data[index+1:]...)...)
}
// Equtal compare list to other list, use reflect.DeepEqual
func (l *List[T]) Equtal(other *List[T]) bool {
if len(l.data) != len(other.data) {
return false
}
for i := 0; i < len(l.data); i++ {
if !reflect.DeepEqual(l.data[i], other.data[i]) {
return false
}
}
return true
}
// IsEmpty check if the list is empty or not
func (l *List[T]) IsEmpty() bool {
return len(l.data) == 0
}
// Clear the data of list
func (l *List[T]) Clear() {
l.data = make([]T, 0, 0)
}
// Clone return a copy of list
func (l *List[T]) Clone() *List[T] {
cl := NewList(make([]T, len(l.data)))
copy(cl.data, l.data)
return cl
}
// Merge two list, return new list, don't change original list
func (l *List[T]) Merge(other *List[T]) *List[T] {
l1, l2 := len(l.data), len(other.data)
ml := NewList(make([]T, l1+l2, l1+l2))
data := append([]T{}, append(l.data, other.data...)...)
ml.data = data
return ml
}
// Size return number of list data items
func (l *List[T]) Size() int {
return len(l.data)
}
// Swap the value of index i and j in list
func (l *List[T]) Swap(i, j int) {
size := len(l.data)
if i < 0 || i >= size || j < 0 || j >= size {
return
}
l.data[i], l.data[j] = l.data[j], l.data[i]
}
// Reverse the item order of list
func (l *List[T]) Reverse() {
for i, j := 0, len(l.data)-1; i < j; i, j = i+1, j-1 {
l.data[i], l.data[j] = l.data[j], l.data[i]
}
}
// Unique remove duplicate items in list
func (l *List[T]) Unique() {
data := l.data
size := len(data)
uniqueData := make([]T, 0, 0)
for i := 0; i < size; i++ {
value := data[i]
skip := true
for _, v := range uniqueData {
if reflect.DeepEqual(value, v) {
skip = false
break
}
}
if skip {
uniqueData = append(uniqueData, value)
}
}
l.data = uniqueData
}
// Union creates a new list contain all element in list l and other, remove duplicate element.
func (l *List[T]) Union(other *List[T]) *List[T] {
res := NewList([]T{})
res.data = append(res.data, l.data...)
res.data = append(res.data, other.data...)
res.Unique()
return res
}
// Intersection creates a new list whose element both be contained in list l and other
func (l *List[T]) Intersection(other *List[T]) *List[T] {
res := NewList(make([]T, 0, 0))
for _, v := range l.data {
if other.Contain(v) {
res.data = append(res.data, v)
}
}
return res
} | datastructure/list/list.go | 0.691185 | 0.531939 | list.go | starcoder |
package graphql
import (
"fmt"
)
// nonNullTypeCreator is given to newTypeImpl for creating a NonNull.
type nonNullTypeCreator struct {
typeDef NonNullTypeDefinition
}
// nonNullTypeCreator implements typeCreator.
var _ typeCreator = (*nonNullTypeCreator)(nil)
// TypeDefinition implements typeCreator.
func (creator *nonNullTypeCreator) TypeDefinition() TypeDefinition {
return creator.typeDef
}
// LoadDataAndNew implements typeCreator.
func (creator *nonNullTypeCreator) LoadDataAndNew() (Type, error) {
return &nonNull{}, nil
}
// Finalize implements typeCreator.
func (creator *nonNullTypeCreator) Finalize(t Type, typeDefResolver typeDefinitionResolver) error {
// Resolve element type.
elementType, err := typeDefResolver(creator.typeDef.InnerType())
if err != nil {
return err
} else if elementType == nil {
return NewError("Must provide an non-nil element type for NonNull.")
} else if !IsNullableType(elementType) {
return NewError(fmt.Sprintf("Expected a nullable type for NonNull but got an %s.", Inspect(elementType)))
}
nonNull := t.(*nonNull)
nonNull.elementType = elementType
return nil
}
// nonNullTypeDefinitionOf wraps a TypeDefinition of the element type and implements
// NonNullTypeDefinition.
type nonNullTypeDefinitionOf struct {
ThisIsTypeDefinition
elementTypeDef TypeDefinition
}
var _ NonNullTypeDefinition = nonNullTypeDefinitionOf{}
// InnerType implements NonNullTypeDefinition.
func (typeDef nonNullTypeDefinitionOf) InnerType() TypeDefinition {
return typeDef.elementTypeDef
}
// NonNullOf returns a NonNullTypeDefinition with the given TypeDefinition of element type.
func NonNullOf(elementTypeDef TypeDefinition) NonNullTypeDefinition {
return nonNullTypeDefinitionOf{
elementTypeDef: elementTypeDef,
}
}
// nonNullTypeDefinitionOfType wraps a Type of the element type and implements
// NonNullTypeDefinition.
type nonNullTypeDefinitionOfType struct {
ThisIsTypeDefinition
elementType Type
}
var _ NonNullTypeDefinition = nonNullTypeDefinitionOfType{}
// InnerType implements NonNullTypeDefinition.
func (typeDef nonNullTypeDefinitionOfType) InnerType() TypeDefinition {
return T(typeDef.elementType)
}
// NonNullOfType returns a NonNullTypeDefinition with the given Type of element type.
func NonNullOfType(elementType Type) NonNullTypeDefinition {
return nonNullTypeDefinitionOfType{
elementType: elementType,
}
}
// nonNull is our built-in implementation for NonNull. It is configured with and built from
// NonNullTypeDefinition.
type nonNull struct {
ThisIsNonNullType
elementType Type
}
var _ NonNull = (*nonNull)(nil)
// NewNonNullOfType defines a NonNull type from a given Type of element type.
func NewNonNullOfType(elementType Type) (NonNull, error) {
return NewNonNull(NonNullOfType(elementType))
}
// MustNewNonNullOfType is a panic-on-fail version of NewNonNullOfType.
func MustNewNonNullOfType(elementType Type) NonNull {
return MustNewNonNull(NonNullOfType(elementType))
}
// NewNonNullOf defines a NonNull type from a given TypeDefinition of element type.
func NewNonNullOf(elementTypeDef TypeDefinition) (NonNull, error) {
return NewNonNull(NonNullOf(elementTypeDef))
}
// MustNewNonNullOf is a panic-on-fail version of NewNonNullOf.
func MustNewNonNullOf(elementTypeDef TypeDefinition) NonNull {
return MustNewNonNull(NonNullOf(elementTypeDef))
}
// NewNonNull defines a NonNull type from a NonNullTypeDefinition.
func NewNonNull(typeDef NonNullTypeDefinition) (NonNull, error) {
t, err := newTypeImpl(&nonNullTypeCreator{
typeDef: typeDef,
})
if err != nil {
return nil, err
}
return t.(*nonNull), nil
}
// MustNewNonNull is a convenience function equivalent to NewNonNull but panics on failure instead of
// returning an error.
func MustNewNonNull(typeDef NonNullTypeDefinition) NonNull {
n, err := NewNonNull(typeDef)
if err != nil {
panic(err)
}
return n
}
// UnwrappedType implements WrappingType.
func (n *nonNull) UnwrappedType() Type {
return n.InnerType()
}
// InnerType implements NonNull.
func (n *nonNull) InnerType() Type {
return n.elementType
} | graphql/non_null.go | 0.74826 | 0.623692 | non_null.go | starcoder |
package geo
import (
"reflect"
"unsafe"
)
// Polygon represents a closed Polygon of vertices when
// the first and last vertices are equal.
type Polygon struct {
min, max LatLng // min and man LatLng
v []LatLng // Vertices
}
// NewPolygon returns a new empty Polygon
func NewPolygon() Polygon {
return Polygon{
max: NewLatLng(minLatitude, minLongitude),
min: NewLatLng(maxLatitude, maxLongitude),
}
}
// NewPolygonFromVertices returns a new Polygon with the LatLng vertices.
// updates the polygon's boundingbox.
func NewPolygonFromVertices(s []LatLng) Polygon {
p := NewPolygon()
p.v = s
p.UpdateBoundingBox()
return p
}
func NewPolygonFromBytes(b []byte) Polygon {
p := NewPolygon()
p.v = toLatLngSlice(b)
p.UpdateBoundingBox()
return p
}
// updateBounds updates the max and min limits of the boundingBox.
func (p *Polygon) updateBounds(ll LatLng) {
if ll.Valid() {
if p.max.Lat < ll.Lat {
p.max.Lat = ll.Lat
}
if p.max.Lng < ll.Lng {
p.max.Lng = ll.Lng
}
if p.min.Lat > ll.Lat {
p.min.Lat = ll.Lat
}
if p.min.Lng > ll.Lng {
p.min.Lng = ll.Lng
}
}
}
// Length retuns the number of veritices in the Polygon
func (p *Polygon) Length() int {
return len(p.v)
}
// Max returns the bottom-left coordinate of the Polygon.
// Correspoinding to the minimum latitide and longitude values contained.
func (p *Polygon) Min() LatLng {
return p.min
}
// Max returns the top-right coordinate of the Polygon.
// Correspoinding to the maximum latitude and longitude values contained.
func (p *Polygon) Max() LatLng {
return p.max
}
// Add adds a Latitude and Longitude in degrees to the Polygon.
// Maximum and minimum latitude is -90 and +90 respectively.
// Maximum and minimum longitude is -180 and +180 respectively.
func (p *Polygon) Add(latitude, longitude float64) {
p.AddVertex(LatLng{float32(latitude), float32(longitude)})
}
// AddVertex adds a LatLng Vertex to the Polygon. Updates polygon bounds with new Vertex.
func (p *Polygon) AddVertex(ll LatLng) {
if ll.Valid() {
p.v = append(p.v, ll)
p.updateBounds(ll)
}
}
// UpdateBoundingBox updates the max and min limits of the boundingBox using the contained ploygon vertices.
func (p *Polygon) UpdateBoundingBox() {
for _, v := range p.v {
p.updateBounds(v)
}
}
func (p *Polygon) ContainsLatLng(query LatLng) bool {
if len(p.v) < 3 {
return false
}
in := rayIntersectsSegment(query, p.v[len(p.v)-1], p.v[0])
for i := 1; i < len(p.v); i++ {
if rayIntersectsSegment(query, p.v[i-1], p.v[i]) {
in = !in
}
}
return in
}
func rayIntersectsSegment(p, a, b LatLng) bool {
return (a.Lng > p.Lng) != (b.Lng > p.Lng) &&
p.Lat < (b.Lat-a.Lat)*(p.Lng-a.Lng)/(b.Lng-a.Lng)+a.Lat
}
// reference: https://go101.org/article/unsafe.html
func toByteSlice(b []LatLng) []byte {
var bs []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&bs))
hdr.Len = len(b) * 8
hdr.Cap = hdr.Len
hdr.Data = uintptr(unsafe.Pointer(&b[0]))
return bs
}
// reference: https://go101.org/article/unsafe.html
func toLatLngSlice(b []byte) (result []LatLng) {
var lls []LatLng
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&lls))
hdr.Len = len(b) / 8
hdr.Cap = hdr.Len
hdr.Data = uintptr(unsafe.Pointer(&b[0]))
return lls
}
func (p Polygon) ToByteSlice() []byte {
return toByteSlice(p.v)
}
func (p *Polygon) FromByteSlice(src []byte) {
p.v = toLatLngSlice(src)
} | v2/geo/polygon.go | 0.875853 | 0.657153 | polygon.go | starcoder |
package runtime
import (
"github.com/zouzhihao-994/gvm/config"
"github.com/zouzhihao-994/gvm/oops"
"github.com/zouzhihao-994/gvm/utils"
"math"
)
type OperandStack struct {
// record the top position of the stack
size uint32
slots utils.Slots
}
func NewOperandStack(maxStack uint16) *OperandStack {
if maxStack > 0 {
operandStack := &OperandStack{
slots: make([]utils.Slot, maxStack),
}
return operandStack
}
return nil
}
func (stack *OperandStack) GetByIdx(i int) utils.Slot {
return stack.slots[i]
}
func (stack *OperandStack) SetByIdx(i int, s utils.Slot) {
stack.slots[i] = s
}
func (stack *OperandStack) PushInt(val int32) {
stack.slots[stack.size].Num = val
stack.size++
}
func (stack *OperandStack) PopInt() int32 {
stack.size--
return stack.slots[stack.size].Num
}
func (stack *OperandStack) PushFloat(val float32) {
bits := math.Float32bits(val)
stack.slots[stack.size].Num = int32(bits)
stack.size++
}
func (stack *OperandStack) PopFloat() float32 {
stack.size--
bits := uint32(stack.slots[stack.size].Num)
return math.Float32frombits(bits)
}
func (stack *OperandStack) PushLong(val int64) {
stack.slots[stack.size].Num = int32(val)
stack.slots[stack.size+1].Num = int32(val >> 32)
stack.size += 2
}
func (stack *OperandStack) PopLong() int64 {
stack.size -= 2
low := uint32(stack.slots[stack.size].Num)
high := uint32(stack.slots[stack.size+1].Num)
return int64(high)<<32 | int64(low)
}
func (stack *OperandStack) PushDouble(val float64) {
bits := math.Float64bits(val)
stack.PushLong(int64(bits))
}
func (stack *OperandStack) PopDouble() float64 {
bits := uint64(stack.PopLong())
return math.Float64frombits(bits)
}
func (stack *OperandStack) PushRef(ref *oops.OopInstance) {
stack.slots[stack.size].Ref = ref
stack.size++
}
func (stack *OperandStack) PopRef() *oops.OopInstance {
stack.size--
var ref *oops.OopInstance
if stack.slots[stack.size].Ref != nil {
ref = stack.slots[stack.size].Ref.(*oops.OopInstance)
stack.slots[stack.size].Ref = nil
}
return ref
}
// PushSlot /*
func (stack *OperandStack) PushSlot(slot utils.Slot) {
stack.slots[stack.size] = slot
stack.size++
}
// PopSlot /*
func (stack *OperandStack) PopSlot() utils.Slot {
stack.size--
return stack.slots[stack.size]
}
func (stack *OperandStack) PushBoolean(val bool) {
if val {
stack.PushInt(1)
} else {
stack.PushInt(0)
}
}
func (stack *OperandStack) PopBoolean() bool {
return stack.PopInt() == 1
}
// PopByParamters todo: provide more parmes type
func (stack *OperandStack) PopByParamters(params []string, localVars *LocalVars, isStatic bool) {
i := len(params)
// method is storages <this.class.Ref> on localvars[0]
// but the static_method is different,don't storages Ref on [0]
if isStatic {
i--
}
for idx := range params {
switch params[idx] {
case config.Char:
panic("Does not support")
case config.Double:
localVars.SetDouble(uint(i-idx), stack.PopDouble())
break
case config.Float:
localVars.SetFloat(uint(i-idx), stack.PopFloat())
break
case config.Int:
localVars.SetInt(uint(i-idx), stack.PopInt())
break
case config.Long:
stack.PopLong()
localVars.SetLong(uint(i-idx), stack.PopLong())
break
case config.Boolean:
stack.PopBoolean()
localVars.SetBoolean(uint(i+idx), stack.PopBoolean())
break
case "L":
panic("Does not support")
case config.Ref:
stack.PopRef()
break
}
}
// save the invoke class Ref to localvars[0]
if !isStatic {
localVars.SetRef(0, stack.PopRef())
}
} | runtime/operandStack.go | 0.587943 | 0.436682 | operandStack.go | starcoder |
package build
// Union returns the graph union g1 ∪ g2, which
// consists of the union of the two vertex sets
// and the union of the two edge sets of g1 and g2.
// The edges of the new graph will have zero cost.
func (g1 *Virtual) Union(g2 *Virtual) *Virtual {
return g1.union(g2, false)
}
// If cost is true keep costs and use g1's cost for edges that belong to both g1 and g2.
func (g1 *Virtual) union(g2 *Virtual, cost bool) *Virtual {
switch {
case g1.order == 0:
if cost {
return g2
}
return g2.AddCost(0)
case g2.order == 0:
if cost {
return g1
}
return g1.AddCost(0)
}
newCost := zero
if cost {
newCost = func(v, w int) int64 {
if g2.edge(v, w) && !g1.edge(v, w) {
return g2.cost(v, w)
}
return g1.cost(v, w)
}
}
var res *Virtual
switch {
case g1.order == g2.order:
res = generic(g1.order, newCost, func(v, w int) bool {
return g1.edge(v, w) || g2.edge(v, w)
})
case g1.order < g2.order:
res = generic(g2.order, newCost, func(v, w int) bool {
return v < g1.order && w < g1.order && g1.edge(v, w) || g2.edge(v, w)
})
default:
res = generic(g1.order, newCost, func(v, w int) bool {
return v < g2.order && w < g2.order && g2.edge(v, w) || g1.edge(v, w)
})
}
res.visit = func(v int, a int, do func(w int, c int64) bool) (aborted bool) {
next := 0
if v < g1.order && g1.visit(v, a, func(w int, c int64) (skip bool) {
// First all neighbors from g2 that are less than w...
if next != w && v < g2.order && a <= w {
if more := false; g2.visit(v, max(next, a), func(w0 int, c0 int64) (skip bool) {
if w0 >= w {
more, skip = true, true
return
}
if cost {
return do(w0, c0)
}
return do(w0, 0)
}) && !more {
return true
}
}
// ...then w.
switch {
case cost && do(w, c):
return true
case !cost && do(w, 0):
return true
}
next = w + 1
return
}) {
return true
}
// When done with g1, produce any leftovers from g2.
return v < g2.order && g2.visit(v, max(next, a), func(w int, c int64) (skip bool) {
if cost {
return do(w, c)
}
return do(w, 0)
})
}
return res
} | build/union.go | 0.71602 | 0.550003 | union.go | starcoder |
package models
// Paragraph format element.
type ParagraphFormat struct {
Link *WordsApiLink `json:"link,omitempty"`
// Gets or sets a flag indicating whether inter-character spacing is automatically adjusted between regions of Latin text and regions of East Asian text in the current paragraph.
AddSpaceBetweenFarEastAndAlpha bool `json:"AddSpaceBetweenFarEastAndAlpha,omitempty"`
// Gets or sets a flag indicating whether inter-character spacing is automatically adjusted between regions of numbers and regions of East Asian text in the current paragraph.
AddSpaceBetweenFarEastAndDigit bool `json:"AddSpaceBetweenFarEastAndDigit,omitempty"`
// Gets or sets text alignment for the paragraph.
Alignment string `json:"Alignment,omitempty"`
// Gets or sets whether this is a right-to-left paragraph.
Bidi bool `json:"Bidi,omitempty"`
// Gets or sets the position for a drop cap text.
DropCapPosition string `json:"DropCapPosition,omitempty"`
// Gets or sets the value (in points) for a first line or hanging indent. Use a positive value to set a first-line indent, and use a negative value to set a hanging indent.
FirstLineIndent float64 `json:"FirstLineIndent,omitempty"`
// Gets or sets True when the paragraph is an item in a bulleted or numbered list.
IsListItem bool `json:"IsListItem,omitempty"`
// Gets or sets true if all lines in the paragraph are to remain on the same page.
KeepTogether bool `json:"KeepTogether,omitempty"`
// Gets or sets true if the paragraph is to remains on the same page as the paragraph that follows it.
KeepWithNext bool `json:"KeepWithNext,omitempty"`
// Gets or sets the value (in points) that represents the left indent for paragraph.
LeftIndent float64 `json:"LeftIndent,omitempty"`
// Gets or sets the line spacing (in points) for the paragraph.
LineSpacing float64 `json:"LineSpacing,omitempty"`
// Gets or sets the line spacing for the paragraph.
LineSpacingRule string `json:"LineSpacingRule,omitempty"`
// Gets or sets the number of lines of the paragraph text used to calculate the drop cap height.
LinesToDrop int32 `json:"LinesToDrop,omitempty"`
// Gets or sets when true, SpaceBefore and SpaceAfter will be ignored between the paragraphs of the same style.
NoSpaceBetweenParagraphsOfSameStyle bool `json:"NoSpaceBetweenParagraphsOfSameStyle,omitempty"`
// Gets or sets specifies the outline level of the paragraph in the document.
OutlineLevel string `json:"OutlineLevel,omitempty"`
// Gets or sets true if a page break is forced before the paragraph.
PageBreakBefore bool `json:"PageBreakBefore,omitempty"`
// Gets or sets the value (in points) that represents the right indent for paragraph.
RightIndent float64 `json:"RightIndent,omitempty"`
Shading *Shading `json:"Shading,omitempty"`
// Gets or sets the amount of spacing (in points) after the paragraph.
SpaceAfter float64 `json:"SpaceAfter,omitempty"`
// Gets or sets true if the amount of spacing after the paragraph is set automatically.
SpaceAfterAuto bool `json:"SpaceAfterAuto,omitempty"`
// Gets or sets the amount of spacing (in points) before the paragraph.
SpaceBefore float64 `json:"SpaceBefore,omitempty"`
// Gets or sets true if the amount of spacing before the paragraph is set automatically.
SpaceBeforeAuto bool `json:"SpaceBeforeAuto,omitempty"`
// Gets or sets the locale independent style identifier of the paragraph style applied to this formatting.
StyleIdentifier string `json:"StyleIdentifier,omitempty"`
// Gets or sets the name of the paragraph style applied to this formatting.
StyleName string `json:"StyleName,omitempty"`
// Gets or sets specifies whether the current paragraph should be exempted from any hyphenation which is applied in the document settings.
SuppressAutoHyphens bool `json:"SuppressAutoHyphens,omitempty"`
// Gets or sets specifies whether the current paragraph's lines should be exempted from line numbering which is applied in the parent section.
SuppressLineNumbers bool `json:"SuppressLineNumbers,omitempty"`
// Gets or sets true if the first and last lines in the paragraph are to remain on the same page as the rest of the paragraph.
WidowControl bool `json:"WidowControl,omitempty"`
}
type IParagraphFormat interface {
IsParagraphFormat() bool
}
func (ParagraphFormat) IsParagraphFormat() bool {
return true;
}
func (ParagraphFormat) IsLinkElement() bool {
return true;
} | v2005/api/models/paragraph_format.go | 0.784814 | 0.41253 | paragraph_format.go | starcoder |
package histogram
import (
"errors"
"github.com/kelvins/lbph/math"
"github.com/kelvins/lbph/metric"
)
// Calculate function generates a histogram based on the 'matrix' passed by parameter.
func Calculate(pixels [][]uint64, gridX, gridY uint8) ([]float64, error) {
var hist []float64
// Check the pixels 'matrix'
if len(pixels) == 0 {
return hist, errors.New("The pixels slice passed to the GetHistogram function is empty")
}
// Get the 'matrix' dimensions
rows := len(pixels)
cols := len(pixels[0])
// Check the grid (X and Y)
if gridX <= 0 || int(gridX) >= cols {
return hist, errors.New("Invalid grid X passed to the GetHistogram function")
}
if gridY <= 0 || int(gridX) >= rows {
return hist, errors.New("Invalid grid Y passed to the GetHistogram function")
}
// Get the size (width and height) of each region
gridWidth := cols / int(gridX)
gridHeight := rows / int(gridY)
// Calculates the histogram of each grid
for gX := 0; gX < int(gridX); gX++ {
for gY := 0; gY < int(gridY); gY++ {
// Create a slice with empty 256 positions
regionHistogram := make([]float64, 256)
// Define the start and end positions for the following loop
startPosX := gX * gridWidth
startPosY := gY * gridHeight
endPosX := (gX + 1) * gridWidth
endPosY := (gY + 1) * gridHeight
// Make sure that no pixel has been leave at the end
if gX == int(gridX)-1 {
endPosX = cols
}
if gY == int(gridY)-1 {
endPosY = rows
}
// Creates the histogram for the current region
for x := startPosX; x < endPosX; x++ {
for y := startPosY; y < endPosY; y++ {
// Make sure we are trying to access a valid position
if x < len(pixels) {
if y < len(pixels[x]) {
if int(pixels[x][y]) < len(regionHistogram) {
regionHistogram[pixels[x][y]] += 1
}
}
}
}
}
// Concatenate two slices
hist = append(hist, regionHistogram...)
}
}
return hist, nil
}
// Compare function is used to compare two histograms using a selected metric.
// Histogram comparison references:
// http://docs.opencv.org/2.4/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.html
func Compare(hist1, hist2 []float64, selectedMetric string) (float64, error) {
switch selectedMetric {
case metric.ChiSquare:
return math.ChiSquare(hist1, hist2)
case metric.EuclideanDistance:
return math.EuclideanDistance(hist1, hist2)
case metric.NormalizedEuclideanDistance:
return math.NormalizedEuclideanDistance(hist1, hist2)
case metric.AbsoluteValue:
return math.AbsoluteValue(hist1, hist2)
}
return 0, errors.New("Invalid metric selected to compare the histograms")
} | histogram/histogram.go | 0.843863 | 0.602237 | histogram.go | starcoder |
package memory
import (
"fmt"
"reflect"
"github.com/google/gapid/core/math/u64"
"github.com/google/gapid/core/os/device"
)
// AlignOf returns the byte alignment of the type t.
func AlignOf(t reflect.Type, m *device.MemoryLayout) uint64 {
switch {
case t.Implements(tyPointer):
return uint64(m.GetPointer().GetAlignment())
case t.Implements(tyCharTy):
return uint64(m.GetChar().GetAlignment())
case t.Implements(tyIntTy), t.Implements(tyUintTy):
return uint64(m.GetInteger().GetAlignment())
case t.Implements(tySizeTy):
return uint64(m.GetSize().GetAlignment())
default:
switch t.Kind() {
case reflect.Bool, reflect.Int8, reflect.Uint8:
return uint64(m.GetI8().GetAlignment())
case reflect.Int16, reflect.Uint16:
return uint64(m.GetI16().GetAlignment())
case reflect.Int32, reflect.Uint32:
return uint64(m.GetI32().GetAlignment())
case reflect.Float32:
return uint64(m.GetF32().GetAlignment())
case reflect.Float64:
return uint64(m.GetF64().GetAlignment())
case reflect.Int64, reflect.Uint64:
return uint64(m.GetI64().GetAlignment())
case reflect.Int, reflect.Uint:
return uint64(m.GetInteger().GetAlignment())
case reflect.Array, reflect.Slice:
return AlignOf(t.Elem(), m)
case reflect.String:
return 1
case reflect.Struct:
alignment := uint64(1)
for i, c := 0, t.NumField(); i < c; i++ {
if a := AlignOf(t.Field(i).Type, m); alignment < a {
alignment = a
}
}
return alignment
default:
panic(fmt.Errorf("MemoryLayout.AlignOf not implemented for type %v (%v)", t, t.Kind()))
}
}
}
// SizeOf returns the byte size of the type t.
func SizeOf(t reflect.Type, m *device.MemoryLayout) uint64 {
switch {
case t.Implements(tyPointer):
return uint64(m.GetPointer().GetSize())
case t.Implements(tyCharTy):
return uint64(m.GetChar().GetSize())
case t.Implements(tyIntTy), t.Implements(tyUintTy):
return uint64(m.GetInteger().GetSize())
case t.Implements(tySizeTy):
return uint64(m.GetSize().GetSize())
default:
switch t.Kind() {
case reflect.Bool, reflect.Int8, reflect.Uint8:
return uint64(m.GetI8().GetSize())
case reflect.Int16, reflect.Uint16:
return uint64(m.GetI16().GetSize())
case reflect.Int32, reflect.Uint32:
return uint64(m.GetI32().GetSize())
case reflect.Float32:
return uint64(m.GetF32().GetSize())
case reflect.Float64:
return uint64(m.GetF64().GetSize())
case reflect.Int64, reflect.Uint64:
return uint64(m.GetI64().GetSize())
case reflect.Int, reflect.Uint:
return uint64(m.GetInteger().GetSize())
case reflect.Array:
return SizeOf(t.Elem(), m) * uint64(t.Len())
case reflect.String:
return 1
case reflect.Struct:
var size, align uint64
for i, c := 0, t.NumField(); i < c; i++ {
f := t.Field(i)
a := AlignOf(f.Type, m)
size = u64.AlignUp(size, a)
size += SizeOf(f.Type, m)
align = u64.Max(align, a)
}
size = u64.AlignUp(size, align)
return size
default:
panic(fmt.Errorf("MemoryLayout.SizeOf not implemented for type %v (%v)", t, t.Kind()))
}
}
} | gapis/memory/alignof_sizeof.go | 0.626696 | 0.449816 | alignof_sizeof.go | starcoder |
package solver
import "github.com/heustis/tsp-solver-go/model"
// FindShortestPathNPHeap finds the shortest path by using a heap to grow the shortest circuit until in includes all the vertices.
// It accepts an unordered set of vertices, and returns the ordered list of vertices.
// This has a complexity of O(n!) and a memory usage of O(n!).
func FindShortestPathNPHeap(vertices []model.CircuitVertex) ([]model.CircuitVertex, float64) {
// Prepare root of tree
treeRoot := createHeapNode(0, 1, nil)
numVertices := len(vertices)
heap := model.NewHeap(func(a interface{}) float64 {
return a.(*treeHeapNodeTSP).pathLength
})
// Add each child in the current node to the heap. The current node is the node in the heap with the shortest path so far.
node := treeRoot
for ; node.depth < numVertices; node = heap.PopHeap().(*treeHeapNodeTSP) {
node.createChildren(numVertices)
for _, c := range node.children {
c.pathLength = c.computePathLen(vertices)
heap.PushHeap(c)
}
}
// Create a path from the root to the current node.
pathLength := node.pathLength
path := make([]model.CircuitVertex, numVertices)
for n := node; n != nil; n = n.parent {
path[n.depth-1] = vertices[n.index]
}
// Clean up the heap and tree.
heap.Delete()
treeRoot.deleteNode()
return path, pathLength
}
type treeHeapNodeTSP struct {
parent *treeHeapNodeTSP
children []*treeHeapNodeTSP
index int
depth int
pathLength float64
}
func createHeapNode(index int, depth int, parent *treeHeapNodeTSP) *treeHeapNodeTSP {
return &treeHeapNodeTSP{
parent: parent,
children: []*treeHeapNodeTSP{},
index: index,
depth: depth,
pathLength: 0.0,
}
}
// createChildren creates one treeHeapNodeTSP for each index that is not already and ancester of the current node nor is the current node.
func (t *treeHeapNodeTSP) createChildren(numVertices int) {
existingIndices := t.computeExistingIndices()
for index := 0; index < numVertices; index++ {
if _, exists := existingIndices[index]; !exists {
t.children = append(t.children, createHeapNode(index, t.depth+1, t))
}
}
}
// computeExistingIndices returns a map of all indices that are along the path from the root of the tree through the current node.
func (t *treeHeapNodeTSP) computeExistingIndices() map[int]bool {
existingIndices := make(map[int]bool)
for current := t; current != nil; current = current.parent {
existingIndices[current.index] = true
}
return existingIndices
}
// computePathLen determines the length of the path from the root to this node.
// If the current node is a leaf node, the length of the path back to the root is added to the circuit length.
func (t *treeHeapNodeTSP) computePathLen(vertices []model.CircuitVertex) float64 {
if t.parent == nil {
return 0.0
} else if t.depth == len(vertices) {
// The root node is always index 0, so we can determine the path length by utilizing the parent node's path length (rather than navigating through the tree).
return t.parent.pathLength + vertices[t.parent.index].DistanceTo(vertices[t.index]) + vertices[0].DistanceTo(vertices[t.index])
} else {
return t.parent.pathLength + vertices[t.parent.index].DistanceTo(vertices[t.index])
}
}
// deleteNode cleans up the current node and any of the node's descendents.
func (t *treeHeapNodeTSP) deleteNode() {
t.parent = nil
if t.children != nil {
for _, c := range t.children {
c.deleteNode()
}
t.children = nil
}
} | solver/npsolverheap.go | 0.857231 | 0.754508 | npsolverheap.go | starcoder |
package distance
import (
"errors"
"github.com/ALTree/bigfloat"
v "github.com/cwchentw/algo-golang/vector/generics"
"math"
"math/big"
"reflect"
)
func Euclidean(p *v.Vector, q *v.Vector) (interface{}, error) {
return Minkowski(p, q, 2)
}
func Minkowski(p *v.Vector, q *v.Vector, n int) (interface{}, error) {
if n <= 0 {
panic("Exp should be larger than zero")
}
checkLength(p, q)
s, err := p.Sub(q)
if err != nil {
return nil, err
}
_len := s.Len()
vec := v.WithSize(_len)
for i := 0; i < _len; i++ {
e := s.GetAt(i)
ts := reflect.TypeOf(e).String()
switch ts {
case "int":
num := e.(int)
if num < 0 {
num = -num
}
vec.SetAt(i, math.Pow(float64(num), 1.0/float64(n)))
case "float64":
num := e.(float64)
if num < 0 {
num = -num
}
vec.SetAt(i, math.Pow(num, 1/float64(n)))
case reflect.TypeOf(big.NewInt(0)).String():
num := e.(*big.Int)
num = num.Abs(num)
_num := new(big.Float).SetInt(num)
result := big.NewFloat(1.0)
result = bigfloat.Pow(result, big.NewFloat(1.0).Quo(big.NewFloat(1.0), _num))
vec.SetAt(i, result)
case reflect.TypeOf(big.NewFloat(0.0)).String():
num := e.(*big.Float)
num = num.Abs(num)
result := big.NewFloat(1.0)
result = bigfloat.Pow(result, big.NewFloat(1.0).Quo(big.NewFloat(1.0), num))
vec.SetAt(i, result)
default:
return nil, errors.New("Unknown Type")
}
}
out, err := vec.Reduce(add)
if err != nil {
return nil, err
}
return out, nil
}
func Maximum(p *v.Vector, q *v.Vector) (interface{}, error) {
return Chebyshev(p, q)
}
func Chebyshev(p *v.Vector, q *v.Vector) (interface{}, error) {
checkLength(p, q)
s, err := p.Sub(q)
if err != nil {
return nil, err
}
_len := s.Len()
vec := v.WithSize(_len)
for i := 0; i < _len; i++ {
e := s.GetAt(i)
ts := reflect.TypeOf(e).String()
switch ts {
case "int":
num := e.(int)
if num < 0 {
num = -num
}
vec.SetAt(i, float64(num))
case "float64":
num := e.(float64)
if num < 0 {
num = -num
}
vec.SetAt(i, num)
case reflect.TypeOf(big.NewInt(0)).String():
num := e.(*big.Int)
num = num.Abs(num)
vec.SetAt(i, num)
case reflect.TypeOf(big.NewFloat(0.0)).String():
num := e.(*big.Float)
num = num.Abs(num)
vec.SetAt(i, num)
default:
return nil, errors.New("Unknown Type")
}
}
out, err := vec.Reduce(max)
if err != nil {
return nil, err
}
return out, nil
}
func Manhattan(p *v.Vector, q *v.Vector) (interface{}, error) {
checkLength(p, q)
s, err := p.Sub(q)
if err != nil {
return nil, err
}
_len := s.Len()
vec := v.WithSize(_len)
for i := 0; i < _len; i++ {
e := s.GetAt(i)
ts := reflect.TypeOf(e).String()
switch ts {
case "int":
num := e.(int)
if num < 0 {
num = -num
}
vec.SetAt(i, float64(num))
case "float64":
num := e.(float64)
if num < 0 {
num = -num
}
vec.SetAt(i, num)
case reflect.TypeOf(big.NewInt(0)).String():
num := e.(*big.Int)
num = num.Abs(num)
vec.SetAt(i, num)
case reflect.TypeOf(big.NewFloat(0.0)).String():
num := e.(*big.Float)
num = num.Abs(num)
vec.SetAt(i, num)
default:
return nil, errors.New("Unknown Type")
}
}
out, err := vec.Reduce(add)
if err != nil {
return nil, err
}
return out, nil
}
func Canberra(p *v.Vector, q *v.Vector) (interface{}, error) {
checkLength(p, q)
pAbs, err := p.Map(abs)
if err != nil {
return nil, err
}
qAbs, err := q.Map(abs)
if err != nil {
return nil, err
}
s, err := p.Sub(q)
if err != nil {
return nil, err
}
sAbs, err := s.Map(abs)
if err != nil {
return nil, err
}
_len := s.Len()
vec := v.WithSize(_len)
for i := 0; i < _len; i++ {
a := sAbs.GetAt(i)
ts := reflect.TypeOf(a).String()
b := pAbs.GetAt(i)
c := qAbs.GetAt(i)
switch ts {
case "int":
na := float64(a.(int))
nb := float64(b.(int))
nc := float64(c.(int))
vec.SetAt(i, na/(nb+nc))
case "float64":
na := a.(float64)
nb := b.(float64)
nc := c.(float64)
vec.SetAt(i, na/(nb+nc))
case reflect.TypeOf(big.NewInt(0)).String():
na := new(big.Float).SetInt(a.(*big.Int))
nb := new(big.Float).SetInt(b.(*big.Int))
nc := new(big.Float).SetInt(c.(*big.Int))
vec.SetAt(i, na.Quo(na, nb.Add(nb, nc)))
case reflect.TypeOf(big.NewFloat(0.0)).String():
na := a.(*big.Float)
nb := b.(*big.Float)
nc := c.(*big.Float)
vec.SetAt(i, na.Quo(na, nb.Add(nb, nc)))
default:
return nil, errors.New("Unknown Type")
}
}
out, err := vec.Reduce(add)
if err != nil {
return nil, err
}
return out, nil
}
func add(a interface{}, b interface{}) (interface{}, error) {
ta := reflect.TypeOf(a).String()
tb := reflect.TypeOf(b).String()
if !(ta == "float64" || ta == "int") &&
!(tb == "float64" || tb == "int") {
if ta != tb {
return nil, errors.New("Unequal Type")
}
}
switch ta {
case "int":
switch tb {
case "int":
na := a.(int)
nb := b.(int)
return float64(na + nb), nil
case "float64":
na := a.(int)
nb := b.(float64)
return float64(na) + nb, nil
default:
return nil, errors.New("Unknown Type")
}
case "float64":
switch tb {
case "int":
na := a.(float64)
nb := b.(int)
return na + float64(nb), nil
case "float64":
na := a.(float64)
nb := b.(float64)
return na + nb, nil
default:
return nil, errors.New("Unknown Type")
}
case reflect.TypeOf(big.NewInt(0)).String():
na := a.(*big.Int)
nb := b.(*big.Int)
return na.Add(na, nb), nil
case reflect.TypeOf(big.NewFloat(0.0)).String():
na := a.(*big.Float)
nb := b.(*big.Float)
return na.Add(na, nb), nil
default:
return nil, errors.New("Unknown Type")
}
}
func max(a interface{}, b interface{}) (interface{}, error) {
ta := reflect.TypeOf(a).String()
tb := reflect.TypeOf(b).String()
if !(ta == "float64" || ta == "int") &&
!(tb == "float64" || tb == "int") {
if ta != tb {
return nil, errors.New("Unequal Type")
}
}
switch ta {
case "int":
switch tb {
case "int":
na := a.(int)
nb := b.(int)
if na > nb {
return na, nil
} else {
return nb, nil
}
case "float64":
na := float64(a.(int))
nb := b.(float64)
if na > nb {
return na, nil
} else {
return nb, nil
}
default:
return nil, errors.New("Unknown Type")
}
case "float64":
switch tb {
case "int":
na := a.(float64)
nb := float64(b.(int))
if na > nb {
return na, nil
} else {
return nb, nil
}
case "float64":
na := a.(float64)
nb := b.(float64)
if na > nb {
return na, nil
} else {
return nb, nil
}
default:
return nil, errors.New("Unknown Type")
}
case reflect.TypeOf(big.NewInt(0)).String():
na := a.(*big.Int)
nb := b.(*big.Int)
if na.Cmp(nb) > 0 {
return na, nil
} else {
return nb, nil
}
case reflect.TypeOf(big.NewFloat(0.0)).String():
na := a.(*big.Float)
nb := b.(*big.Float)
if na.Cmp(nb) > 0 {
return na, nil
} else {
return nb, nil
}
default:
return nil, errors.New("Unknown Type")
}
}
func abs(a interface{}) (interface{}, error) {
ta := reflect.TypeOf(a).String()
switch ta {
case "int":
na := a.(int)
if na > 0 {
return na, nil
} else {
return -na, nil
}
case "float64":
na := a.(float64)
if na > 0 {
return na, nil
} else {
return -na, nil
}
case reflect.TypeOf(big.NewInt(0)).String():
na := a.(*big.Int)
return na.Abs(na), nil
case reflect.TypeOf(big.NewFloat(0.0)).String():
na := a.(*big.Float)
return na.Abs(na), nil
default:
return nil, errors.New("Unknown Type")
}
}
func checkLength(p *v.Vector, q *v.Vector) {
if p.Len() != q.Len() {
panic("Unequal Length")
}
} | distance/generics/distance.go | 0.663233 | 0.537466 | distance.go | starcoder |
package sortedintlistgentest
import (
"fmt"
"github.com/comdiv/golang_course_comdiv/internal/sortedintlist"
"github.com/stretchr/testify/assert"
"math/rand"
"testing"
)
func GenericTestSorted_GetUnique(l sortedintlist.IIntSetMutable, t *testing.T) {
sortedintlist.InsertAllVar(l, 8, 1, 2, 4, 5, 4, 4, 5, 6, 1)
unique := l.GetUnique()
expected := []int{1, 2, 4, 5, 6, 8}
assert.ElementsMatch(t, expected, unique)
}
func GenericTestSorted_GetAll(l sortedintlist.IIntListMutable, t *testing.T) {
sortedintlist.InsertAllVar(l, 8, 1, 2, 4, 5, 4, 4, 5, 6, 1)
all := l.GetAll()
expected := []int{1, 1, 2, 4, 4, 4, 5, 5, 6, 8}
assert.ElementsMatch(t, expected, all)
}
func GenericTestSorted_Size(l sortedintlist.IIntListMutable, t *testing.T) {
sortedintlist.InsertAllVar(l, 1, 2, 4, 4, 4)
assert.Equal(t, 5, l.Size())
}
func GenericTestSorted_UniqueSize(l sortedintlist.IIntSetMutable, t *testing.T) {
sortedintlist.InsertAllVar(l, 1, 2, 4, 4, 4)
assert.Equal(t, 3, l.UniqueSize())
}
func GenericTestSorted_InsertList(l sortedintlist.IIntListMutable, t *testing.T) {
var inserted bool
inserted = l.Insert(1)
assert.True(t, inserted)
assert.Equal(t, 1, l.Size())
inserted = l.Insert(10)
assert.True(t, inserted)
assert.Equal(t, 2, l.Size())
inserted = l.Insert(10)
assert.False(t, inserted)
assert.Equal(t, 3, l.Size())
}
func GenericTestSorted_InsertSet(l sortedintlist.IIntSetMutable, t *testing.T) {
var inserted bool
inserted = l.Insert(1)
assert.True(t, inserted)
assert.Equal(t, 1, l.UniqueSize())
inserted = l.Insert(10)
assert.True(t, inserted)
assert.Equal(t, 2, l.UniqueSize())
inserted = l.Insert(10)
assert.False(t, inserted)
assert.Equal(t, 2, l.UniqueSize())
}
func GenericTestSorted_DeleteList(l sortedintlist.IIntListMutable, t *testing.T) {
l.Insert(1)
l.Insert(10)
l.Insert(11)
l.Insert(12)
l.Insert(12)
l.Insert(12)
if !(l.Size() == 6) {
t.Errorf("%v", l.Size())
}
var deleted bool
deleted = l.Delete(10, true)
assert.True(t, deleted)
assert.Equal(t, 5, l.Size())
deleted = l.Delete(77777, true)
assert.False(t, deleted)
assert.Equal(t, 5, l.Size())
deleted = l.Delete(12, false)
assert.True(t, deleted)
assert.Equal(t, 4, l.Size())
deleted = l.Delete(12, true)
assert.True(t, deleted)
assert.Equal(t, 2, l.Size())
}
func GenericTestSorted_DeleteSet(l sortedintlist.IIntSetMutable, t *testing.T) {
l.Insert(1)
l.Insert(10)
l.Insert(11)
l.Insert(12)
l.Insert(12)
l.Insert(12)
if !(l.UniqueSize() == 4) {
t.Errorf("%v", l.UniqueSize())
}
var deleted bool
deleted = l.Delete(10, true)
assert.True(t, deleted)
assert.Equal(t, 3, l.UniqueSize())
deleted = l.Delete(77777, true)
assert.False(t, deleted)
assert.Equal(t, 3, l.UniqueSize())
deleted = l.Delete(12, false)
assert.True(t, deleted)
assert.Equal(t, 3, l.UniqueSize())
deleted = l.Delete(12, true)
assert.True(t, deleted)
assert.Equal(t, 2, l.UniqueSize())
}
func GenericTestSorted_MinMax(minmax sortedintlist.IIntMinMax, t *testing.T) {
l, ok := minmax.(sortedintlist.IIntListMutable)
if !ok {
panic(fmt.Sprintf("Not l list given for test! %v", minmax))
}
assert.False(t, minmax.IsIntRangeInitialized(), "Should not be initialized at start")
_, err := minmax.GetMin()
assert.NotNil(t, err, "Should be error to ask min from empty list")
_, err = minmax.GetMax()
assert.NotNil(t, err, "Should be error to ask max from empty list")
r := rand.New(rand.NewSource(DEFAULT_BENCH_DATA_SEED))
basevalue := r.Intn(DEFAULT_BENCH_DATA_SIZE)
l.Insert(basevalue)
assert.True(t, minmax.IsIntRangeInitialized(), "Should be initialized if has values")
min, err := minmax.GetMin()
assert.Nil(t, err, "Min should be working after initialization")
assert.Equal(t, basevalue, min)
max, err := minmax.GetMax()
assert.Nil(t, err, "Min should be working after initialization")
assert.Equal(t, basevalue, max)
delata := r.Intn(1000) + 500
expectedmin := basevalue - delata
l.Insert(expectedmin)
expectedmax := basevalue + delata
l.Insert(expectedmax)
min, err = minmax.GetMin()
assert.Nil(t, err, "Min should be working after initialization")
assert.Equal(t, expectedmin, min)
max, err = minmax.GetMax()
assert.Nil(t, err, "Min should be working after initialization")
assert.Equal(t, expectedmax, max)
} | internal/sortedintlistgentest/GenericSortedListTests.go | 0.690455 | 0.611411 | GenericSortedListTests.go | starcoder |
package dag
//go:generate stringer -type=Type,status -output=node.stringer.go
import (
"bytes"
"fmt"
"io"
"strings"
)
const (
// TypeMiddle represents the node type located in the middle of the graph.
TypeMiddle Type = iota
// TypeBeginning represents the node type that indicates beginning of the graph.
TypeBeginning
// TypeEnd represents the node type that indicates end of the graph.
TypeEnd
// TypeMiddleBeginning represents the node type that indicates the beginning of a subgraph.
TypeMiddleBeginning
// TypeMiddleEnd represents the node type that indicates the end of a subgraph.
TypeMiddleEnd
// TypeHidden represents the node type that is ignored by a drawer, other than that.
// It can be considered equal to TypeMiddle.
TypeHidden
)
const (
statusNotSeen status = iota
statusVisited
statusDone
statusFailed
)
type (
// Type is a kind of a node.
Type int
status int
)
// Node is a single vertex of a graph.
type Node struct {
// Data stores logic associated with the node that may be executed by a visitor.
Data interface{}
status status
kind Type
parents, children Nodes
beginning, end *Node
}
// New instantiate a new graph. It returns its beginning and its end.
// It's up to the caller how it wants to interact with the graph.
func New() (*Node, *Node) {
b := &Node{kind: TypeBeginning}
e := &Node{kind: TypeEnd}
b.children.add(e)
e.parents.add(b)
b.end = e
e.beginning = b
return b, e
}
// Hidden instantiates an "invisible" node.
func Hidden(d interface{}) *Node {
return &Node{
Data: d,
kind: TypeHidden,
}
}
func (n *Node) isGraph() bool {
return n.beginning != nil || n.end != nil
}
// Type returns node kind.
func (n *Node) Type() Type {
return n.kind
}
// Done returns true if node can be considered visited.
func (n *Node) Done() bool {
return n.status == statusDone || n.kind == TypeBeginning
}
// MarkAsDone marks node as already processed.
func (n *Node) MarkAsDone() {
n.status = statusDone
}
// MarkAsFailed marks node as failed.
func (n *Node) MarkAsFailed() {
n.status = statusFailed
}
// GoString implements fmt GoStringer interface.
func (n Node) GoString() string {
buf := bytes.NewBuffer(nil)
n.goString(buf, 0)
return buf.String()
}
func (n Node) goString(w io.Writer, i int) {
if dat, ok := n.Data.(interface{ Name() string }); ok {
_, _ = fmt.Fprintf(w, fmt.Sprintf("%%%ds %%s\n", i*2), "", dat.Name())
} else {
_, _ = fmt.Fprintf(w, fmt.Sprintf("%%%ds %%v\n", i*2), "", n.Data)
}
for _, child := range n.children {
child.goString(w, i+1)
}
}
// After allows to attach node one after another.
// It takes care of already existing connections receiver node has,
// so that node that is injected become a proper bridge.
// Injected node loses its own connections.
func (n *Node) After(node *Node) {
for _, child := range n.children {
if node.isGraph() {
child.parents.replace(n, node.end)
node.end.children.add(child)
} else {
child.parents.replace(n, node)
node.children.add(child)
}
n.children.remove(child)
}
if node.isGraph() {
if node.beginning != nil {
panic("cant pass last node of a group into after method")
}
node.kind = middleType(node.kind)
node.end.kind = middleType(node.end.kind)
}
node.parents.add(n)
n.children.add(node)
}
// Between allows to inject receiver node between given nodes.
// Receiver node loses its own connections.
func (n *Node) Between(beginning, end *Node) {
if n.isGraph() {
beginning.children.replace(end, n)
end.parents.replace(beginning, n.end)
n.parents.add(beginning)
n.end.children.add(end)
n.kind = middleType(n.kind)
n.end.kind = middleType(n.end.kind)
return
}
n.between(beginning, end)
}
func (n *Node) between(parent, child *Node) {
for i, c := range parent.children {
if c == child {
parent.children = append(parent.children[:i], parent.children[i+1:]...)
break
}
}
for i, p := range child.parents {
if p == parent {
child.parents = append(child.parents[:i], child.parents[i+1:]...)
break
}
}
parent.children.replace(child, n)
child.parents.replace(parent, n)
n.parents.add(parent)
n.children.add(child)
}
// Children returns a collection of nodes given node is the parent of.
func (n *Node) Children() Nodes {
return n.children
}
// Parents returns a collection of nodes given node is child of.
func (n *Node) Parents() Nodes {
return n.parents
}
// Nodes is a set of nodes.
type Nodes []*Node
// String implements fmt Stringer interface.
func (n Nodes) String() string {
var parts []string
for _, nn := range n {
if str, ok := nn.Data.(interface{ Name() string }); ok {
parts = append(parts, fmt.Sprint(str.Name()))
} else {
parts = append(parts, fmt.Sprint(nn.Data))
}
}
return strings.Join(parts, "\n")
}
func (n Nodes) done() bool {
done := true
for _, node := range n {
if !node.Done() {
done = false
}
}
return done
}
func (n *Nodes) add(node *Node) bool {
for _, nn := range *n {
if nn == node {
return false
}
}
*n = append(*n, node)
return true
}
func (n *Nodes) replace(before, after *Node) {
for i, nn := range *n {
if nn == before {
(*n)[i] = after
return
}
}
n.add(after)
}
func (n *Nodes) remove(node *Node) {
for i, nn := range *n {
if nn == node {
*n = append((*n)[:i], (*n)[i+1:]...)
return
}
}
}
func (n Nodes) contains(node *Node) bool {
for _, nn := range n {
if nn == node {
return true
}
}
return false
}
func middleType(t Type) Type {
switch t {
case TypeBeginning:
return TypeMiddleBeginning
case TypeEnd:
return TypeMiddleEnd
default:
return t
}
}
func isMiddleType(t Type) bool {
switch t {
case TypeMiddle, TypeMiddleBeginning, TypeMiddleEnd, TypeHidden:
return true
default:
return false
}
} | pkg/dag/node.go | 0.686895 | 0.467332 | node.go | starcoder |
package sctp
import (
"encoding/binary"
"github.com/pkg/errors"
)
/*
chunkHeader represents a SCTP Chunk header, defined in https://tools.ietf.org/html/rfc4960#section-3.2
The figure below illustrates the field format for the chunks to be
transmitted in the SCTP packet. Each chunk is formatted with a Chunk
Type field, a chunk-specific Flag field, a Chunk Length field, and a
Value field.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Chunk Type | Chunk Flags | Chunk Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Chunk Value |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
type chunkHeader struct {
typ chunkType
flags byte
raw []byte
}
const (
chunkHeaderSize = 4
)
func (c *chunkHeader) unmarshal(raw []byte) error {
if len(raw) < chunkHeaderSize {
return errors.Errorf("raw only %d bytes, %d is the minimum length for a SCTP chunk", len(raw), chunkHeaderSize)
}
c.typ = chunkType(raw[0])
c.flags = raw[1]
length := binary.BigEndian.Uint16(raw[2:])
// Length includes Chunk header
valueLength := int(length - chunkHeaderSize)
lengthAfterValue := len(raw) - (chunkHeaderSize + valueLength)
if lengthAfterValue < 0 {
return errors.Errorf("Not enough data left in SCTP packet to satisfy requested length remain %d req %d ", valueLength, len(raw)-chunkHeaderSize)
} else if lengthAfterValue < 4 {
// https://tools.ietf.org/html/rfc4960#section-3.2
// The Chunk Length field does not count any chunk padding.
// Chunks (including Type, Length, and Value fields) are padded out
// by the sender with all zero bytes to be a multiple of 4 bytes
// long. This padding MUST NOT be more than 3 bytes in total. The
// Chunk Length value does not include terminating padding of the
// chunk. However, it does include padding of any variable-length
// parameter except the last parameter in the chunk. The receiver
// MUST ignore the padding.
for i := lengthAfterValue; i > 0; i-- {
paddingOffset := chunkHeaderSize + valueLength + (i - 1)
if raw[paddingOffset] != 0 {
return errors.Errorf("Chunk padding is non-zero at offset %d ", paddingOffset)
}
}
}
c.raw = raw[chunkHeaderSize : chunkHeaderSize+valueLength]
return nil
}
func (c *chunkHeader) marshal() ([]byte, error) {
raw := make([]byte, 4+len(c.raw))
raw[0] = uint8(c.typ)
raw[1] = c.flags
binary.BigEndian.PutUint16(raw[2:], uint16(len(c.raw)+chunkHeaderSize))
copy(raw[4:], c.raw)
return raw, nil
}
func (c *chunkHeader) valueLength() int {
return len(c.raw)
}
// String makes chunkHeader printable
func (c chunkHeader) String() string {
return c.typ.String()
} | trunk/3rdparty/srs-bench/vendor/github.com/pion/sctp/chunkheader.go | 0.71413 | 0.435661 | chunkheader.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPLiteralMap256 struct for BTPLiteralMap256
type BTPLiteralMap256 struct {
Atomic *bool `json:"atomic,omitempty"`
BtType *string `json:"btType,omitempty"`
DocumentationType *string `json:"documentationType,omitempty"`
EndSourceLocation *int32 `json:"endSourceLocation,omitempty"`
Entries *[]BTPLiteralMapEntry257 `json:"entries,omitempty"`
NodeId *string `json:"nodeId,omitempty"`
ShortDescriptor *string `json:"shortDescriptor,omitempty"`
SpaceAfter *BTPSpace10 `json:"spaceAfter,omitempty"`
SpaceBefore *BTPSpace10 `json:"spaceBefore,omitempty"`
SpaceDefault *bool `json:"spaceDefault,omitempty"`
SpaceInEmptyList *BTPSpace10 `json:"spaceInEmptyList,omitempty"`
StartSourceLocation *int32 `json:"startSourceLocation,omitempty"`
TrailingComma *bool `json:"trailingComma,omitempty"`
}
// NewBTPLiteralMap256 instantiates a new BTPLiteralMap256 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPLiteralMap256() *BTPLiteralMap256 {
this := BTPLiteralMap256{}
return &this
}
// NewBTPLiteralMap256WithDefaults instantiates a new BTPLiteralMap256 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPLiteralMap256WithDefaults() *BTPLiteralMap256 {
this := BTPLiteralMap256{}
return &this
}
// GetAtomic returns the Atomic field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetAtomic() bool {
if o == nil || o.Atomic == nil {
var ret bool
return ret
}
return *o.Atomic
}
// GetAtomicOk returns a tuple with the Atomic field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetAtomicOk() (*bool, bool) {
if o == nil || o.Atomic == nil {
return nil, false
}
return o.Atomic, true
}
// HasAtomic returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasAtomic() bool {
if o != nil && o.Atomic != nil {
return true
}
return false
}
// SetAtomic gets a reference to the given bool and assigns it to the Atomic field.
func (o *BTPLiteralMap256) SetAtomic(v bool) {
o.Atomic = &v
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPLiteralMap256) SetBtType(v string) {
o.BtType = &v
}
// GetDocumentationType returns the DocumentationType field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetDocumentationType() string {
if o == nil || o.DocumentationType == nil {
var ret string
return ret
}
return *o.DocumentationType
}
// GetDocumentationTypeOk returns a tuple with the DocumentationType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetDocumentationTypeOk() (*string, bool) {
if o == nil || o.DocumentationType == nil {
return nil, false
}
return o.DocumentationType, true
}
// HasDocumentationType returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasDocumentationType() bool {
if o != nil && o.DocumentationType != nil {
return true
}
return false
}
// SetDocumentationType gets a reference to the given string and assigns it to the DocumentationType field.
func (o *BTPLiteralMap256) SetDocumentationType(v string) {
o.DocumentationType = &v
}
// GetEndSourceLocation returns the EndSourceLocation field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetEndSourceLocation() int32 {
if o == nil || o.EndSourceLocation == nil {
var ret int32
return ret
}
return *o.EndSourceLocation
}
// GetEndSourceLocationOk returns a tuple with the EndSourceLocation field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetEndSourceLocationOk() (*int32, bool) {
if o == nil || o.EndSourceLocation == nil {
return nil, false
}
return o.EndSourceLocation, true
}
// HasEndSourceLocation returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasEndSourceLocation() bool {
if o != nil && o.EndSourceLocation != nil {
return true
}
return false
}
// SetEndSourceLocation gets a reference to the given int32 and assigns it to the EndSourceLocation field.
func (o *BTPLiteralMap256) SetEndSourceLocation(v int32) {
o.EndSourceLocation = &v
}
// GetEntries returns the Entries field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetEntries() []BTPLiteralMapEntry257 {
if o == nil || o.Entries == nil {
var ret []BTPLiteralMapEntry257
return ret
}
return *o.Entries
}
// GetEntriesOk returns a tuple with the Entries field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetEntriesOk() (*[]BTPLiteralMapEntry257, bool) {
if o == nil || o.Entries == nil {
return nil, false
}
return o.Entries, true
}
// HasEntries returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasEntries() bool {
if o != nil && o.Entries != nil {
return true
}
return false
}
// SetEntries gets a reference to the given []BTPLiteralMapEntry257 and assigns it to the Entries field.
func (o *BTPLiteralMap256) SetEntries(v []BTPLiteralMapEntry257) {
o.Entries = &v
}
// GetNodeId returns the NodeId field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetNodeId() string {
if o == nil || o.NodeId == nil {
var ret string
return ret
}
return *o.NodeId
}
// GetNodeIdOk returns a tuple with the NodeId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetNodeIdOk() (*string, bool) {
if o == nil || o.NodeId == nil {
return nil, false
}
return o.NodeId, true
}
// HasNodeId returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasNodeId() bool {
if o != nil && o.NodeId != nil {
return true
}
return false
}
// SetNodeId gets a reference to the given string and assigns it to the NodeId field.
func (o *BTPLiteralMap256) SetNodeId(v string) {
o.NodeId = &v
}
// GetShortDescriptor returns the ShortDescriptor field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetShortDescriptor() string {
if o == nil || o.ShortDescriptor == nil {
var ret string
return ret
}
return *o.ShortDescriptor
}
// GetShortDescriptorOk returns a tuple with the ShortDescriptor field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetShortDescriptorOk() (*string, bool) {
if o == nil || o.ShortDescriptor == nil {
return nil, false
}
return o.ShortDescriptor, true
}
// HasShortDescriptor returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasShortDescriptor() bool {
if o != nil && o.ShortDescriptor != nil {
return true
}
return false
}
// SetShortDescriptor gets a reference to the given string and assigns it to the ShortDescriptor field.
func (o *BTPLiteralMap256) SetShortDescriptor(v string) {
o.ShortDescriptor = &v
}
// GetSpaceAfter returns the SpaceAfter field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetSpaceAfter() BTPSpace10 {
if o == nil || o.SpaceAfter == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceAfter
}
// GetSpaceAfterOk returns a tuple with the SpaceAfter field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetSpaceAfterOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceAfter == nil {
return nil, false
}
return o.SpaceAfter, true
}
// HasSpaceAfter returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasSpaceAfter() bool {
if o != nil && o.SpaceAfter != nil {
return true
}
return false
}
// SetSpaceAfter gets a reference to the given BTPSpace10 and assigns it to the SpaceAfter field.
func (o *BTPLiteralMap256) SetSpaceAfter(v BTPSpace10) {
o.SpaceAfter = &v
}
// GetSpaceBefore returns the SpaceBefore field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetSpaceBefore() BTPSpace10 {
if o == nil || o.SpaceBefore == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceBefore
}
// GetSpaceBeforeOk returns a tuple with the SpaceBefore field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetSpaceBeforeOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceBefore == nil {
return nil, false
}
return o.SpaceBefore, true
}
// HasSpaceBefore returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasSpaceBefore() bool {
if o != nil && o.SpaceBefore != nil {
return true
}
return false
}
// SetSpaceBefore gets a reference to the given BTPSpace10 and assigns it to the SpaceBefore field.
func (o *BTPLiteralMap256) SetSpaceBefore(v BTPSpace10) {
o.SpaceBefore = &v
}
// GetSpaceDefault returns the SpaceDefault field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetSpaceDefault() bool {
if o == nil || o.SpaceDefault == nil {
var ret bool
return ret
}
return *o.SpaceDefault
}
// GetSpaceDefaultOk returns a tuple with the SpaceDefault field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetSpaceDefaultOk() (*bool, bool) {
if o == nil || o.SpaceDefault == nil {
return nil, false
}
return o.SpaceDefault, true
}
// HasSpaceDefault returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasSpaceDefault() bool {
if o != nil && o.SpaceDefault != nil {
return true
}
return false
}
// SetSpaceDefault gets a reference to the given bool and assigns it to the SpaceDefault field.
func (o *BTPLiteralMap256) SetSpaceDefault(v bool) {
o.SpaceDefault = &v
}
// GetSpaceInEmptyList returns the SpaceInEmptyList field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetSpaceInEmptyList() BTPSpace10 {
if o == nil || o.SpaceInEmptyList == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceInEmptyList
}
// GetSpaceInEmptyListOk returns a tuple with the SpaceInEmptyList field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetSpaceInEmptyListOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceInEmptyList == nil {
return nil, false
}
return o.SpaceInEmptyList, true
}
// HasSpaceInEmptyList returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasSpaceInEmptyList() bool {
if o != nil && o.SpaceInEmptyList != nil {
return true
}
return false
}
// SetSpaceInEmptyList gets a reference to the given BTPSpace10 and assigns it to the SpaceInEmptyList field.
func (o *BTPLiteralMap256) SetSpaceInEmptyList(v BTPSpace10) {
o.SpaceInEmptyList = &v
}
// GetStartSourceLocation returns the StartSourceLocation field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetStartSourceLocation() int32 {
if o == nil || o.StartSourceLocation == nil {
var ret int32
return ret
}
return *o.StartSourceLocation
}
// GetStartSourceLocationOk returns a tuple with the StartSourceLocation field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetStartSourceLocationOk() (*int32, bool) {
if o == nil || o.StartSourceLocation == nil {
return nil, false
}
return o.StartSourceLocation, true
}
// HasStartSourceLocation returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasStartSourceLocation() bool {
if o != nil && o.StartSourceLocation != nil {
return true
}
return false
}
// SetStartSourceLocation gets a reference to the given int32 and assigns it to the StartSourceLocation field.
func (o *BTPLiteralMap256) SetStartSourceLocation(v int32) {
o.StartSourceLocation = &v
}
// GetTrailingComma returns the TrailingComma field value if set, zero value otherwise.
func (o *BTPLiteralMap256) GetTrailingComma() bool {
if o == nil || o.TrailingComma == nil {
var ret bool
return ret
}
return *o.TrailingComma
}
// GetTrailingCommaOk returns a tuple with the TrailingComma field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralMap256) GetTrailingCommaOk() (*bool, bool) {
if o == nil || o.TrailingComma == nil {
return nil, false
}
return o.TrailingComma, true
}
// HasTrailingComma returns a boolean if a field has been set.
func (o *BTPLiteralMap256) HasTrailingComma() bool {
if o != nil && o.TrailingComma != nil {
return true
}
return false
}
// SetTrailingComma gets a reference to the given bool and assigns it to the TrailingComma field.
func (o *BTPLiteralMap256) SetTrailingComma(v bool) {
o.TrailingComma = &v
}
func (o BTPLiteralMap256) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Atomic != nil {
toSerialize["atomic"] = o.Atomic
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.DocumentationType != nil {
toSerialize["documentationType"] = o.DocumentationType
}
if o.EndSourceLocation != nil {
toSerialize["endSourceLocation"] = o.EndSourceLocation
}
if o.Entries != nil {
toSerialize["entries"] = o.Entries
}
if o.NodeId != nil {
toSerialize["nodeId"] = o.NodeId
}
if o.ShortDescriptor != nil {
toSerialize["shortDescriptor"] = o.ShortDescriptor
}
if o.SpaceAfter != nil {
toSerialize["spaceAfter"] = o.SpaceAfter
}
if o.SpaceBefore != nil {
toSerialize["spaceBefore"] = o.SpaceBefore
}
if o.SpaceDefault != nil {
toSerialize["spaceDefault"] = o.SpaceDefault
}
if o.SpaceInEmptyList != nil {
toSerialize["spaceInEmptyList"] = o.SpaceInEmptyList
}
if o.StartSourceLocation != nil {
toSerialize["startSourceLocation"] = o.StartSourceLocation
}
if o.TrailingComma != nil {
toSerialize["trailingComma"] = o.TrailingComma
}
return json.Marshal(toSerialize)
}
type NullableBTPLiteralMap256 struct {
value *BTPLiteralMap256
isSet bool
}
func (v NullableBTPLiteralMap256) Get() *BTPLiteralMap256 {
return v.value
}
func (v *NullableBTPLiteralMap256) Set(val *BTPLiteralMap256) {
v.value = val
v.isSet = true
}
func (v NullableBTPLiteralMap256) IsSet() bool {
return v.isSet
}
func (v *NullableBTPLiteralMap256) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPLiteralMap256(val *BTPLiteralMap256) *NullableBTPLiteralMap256 {
return &NullableBTPLiteralMap256{value: val, isSet: true}
}
func (v NullableBTPLiteralMap256) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPLiteralMap256) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_literal_map_256.go | 0.701611 | 0.419648 | model_btp_literal_map_256.go | starcoder |
package kafkachannel
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"knative.dev/reconciler-test/pkg/feature"
)
// ConfigureDataPlane creates a Feature which sets up the specified KafkaChannel
// Subscription and EventsHub Receiver so that it is ready to receive CloudEvents.
func ConfigureDataPlane(ctx context.Context, t *testing.T) *feature.Feature {
// Get Test Names From Context
testName := TestName(ctx, t)
receiverName := ReceiverName(ctx, t)
// Create A Feature To Configure The DataPlane (KafkaChannel, Subscription, Receiver)
f := feature.NewFeatureNamed("Configure Data-Plane")
setupEventsHubReceiver(f, receiverName)
setupKafkaChannel(f, testName)
setupSubscription(f, testName, receiverName)
assertKafkaChannelReady(f, testName)
assertSubscriptionReady(f, testName)
// Return The ConfigureDataPlane Feature
return f
}
// SendEvents creates a Feature which sends a number of CloudEvents to the specified
// KafkaChannel and verifies their receipt in the corresponding EventsHub Receiver.
// It is assumed that the backing KafkaChannel / Subscription / Receiver are in place
// and ready to receive these events.
func SendEvents(ctx context.Context, t *testing.T, eventCount int, startId int, endId int) *feature.Feature {
// Get Test Names From Context
testName := TestName(ctx, t)
senderName := SenderName(ctx, t)
senderSink := SenderSink(ctx, t)
receiverName := ReceiverName(ctx, t)
// Create The Base CloudEvent To Send (ID will be set by the EventsHub Sender)
event, err := newEvent(testName, senderName)
assert.Nil(t, err)
// Create A New Feature To Send Events And Verify Receipt
f := feature.NewFeatureNamed("Send Events")
setupEventsHubSender(f, senderName, senderSink, event, eventCount)
assertEventsReceived(f, receiverName, event, eventCount, startId, endId)
// Return The SendEvents Feature
return f
}
// ReplayEvents creates a Feature which adjusts a KafkaChannel Subscription to a specific
// offset time by creating a ResetOffset and verifying the expected final event count.
// The actual count is dependent upon the number of events in the KafkaChannel (Topic)
// related to the specified offsetTime.
func ReplayEvents(ctx context.Context, t *testing.T, offsetTime string, eventCount int, startId int, endId int) *feature.Feature {
// Get Test Names From Context
testName := TestName(ctx, t)
senderName := SenderName(ctx, t)
receiverName := ReceiverName(ctx, t)
// Create The Base CloudEvent To Send (ID will be set by the EventsHub Sender)
event, err := newEvent(testName, senderName)
assert.Nil(t, err)
// Create A New Feature To Replay Events And Verify Receipt
f := feature.NewFeatureNamed("Replay Events")
setupResetOffset(f, testName, offsetTime)
assertResetOffsetSucceeded(f, testName)
assertEventsReceived(f, receiverName, event, eventCount, startId, endId)
// Return The ReplayEvents Feature
return f
} | test/rekt/features/kafkachannel/dataplane.go | 0.765243 | 0.476032 | dataplane.go | starcoder |
package magica
import (
"github.com/mattkimber/gandalf/geometry"
"github.com/mattkimber/gandalf/magica/scenegraph"
"github.com/mattkimber/gandalf/magica/types"
"github.com/mattkimber/gandalf/utils"
)
type VoxelData [][][]byte
type VoxelObject struct {
Voxels [][][]byte
PaletteData []byte
Size geometry.Point
}
func (v *VoxelObject) GetPoints() (result types.PointData) {
ct := 0
v.Iterate(func(x, y, z int) {
if v.Voxels[x][y][z] != 0 {
ct++
}
})
result = make([]geometry.PointWithColour, ct)
ct = 0
v.Iterate(func(x, y, z int) {
if v.Voxels[x][y][z] != 0 {
result[ct] = geometry.PointWithColour{
Point: geometry.Point{X: x, Y: y, Z: z},
Colour: v.Voxels[x][y][z],
}
ct++
}
})
return result
}
// NewVoxelObject returns an empty voxel object of the specified size and palette
func NewVoxelObject(size geometry.Point, palette []byte) VoxelObject {
voxelData := make([][][]byte, size.X)
for x := 0; x < size.X; x++ {
voxelData[x] = make([][]byte, size.Y)
for y := 0; y < size.Y; y++ {
voxelData[x][y] = make([]byte, size.Z)
}
}
v := VoxelObject{
Voxels: voxelData,
PaletteData: palette,
Size: size,
}
return v
}
func (v *VoxelObject) Copy() (result VoxelObject) {
result = VoxelObject{}
result.Size = v.Size
// We don't do anything with the palette data, so a shallow copy is okay
result.PaletteData = v.PaletteData
result.Voxels = utils.Make3DByteSlice(types.Size{X: v.Size.X, Y: v.Size.Y, Z: v.Size.Z})
v.Iterate(func(x, y, z int) { result.Voxels[x][y][z] = v.Voxels[x][y][z] })
return
}
// Set sets the voxel at loc to index i
func (v *VoxelObject) Set(loc geometry.Point, i byte) {
v.Voxels[loc.X][loc.Y][loc.Z] = i
}
// Get gets the voxel index at loc
func (v *VoxelObject) Get(loc geometry.Point) byte {
return v.Voxels[loc.X][loc.Y][loc.Z]
}
// SafeSet sets the voxel at loc to index i, if loc is in bounds
func (v *VoxelObject) SafeSet(loc geometry.Point, i byte) {
if loc.IsInBounds(geometry.Bounds{Max: v.Size}) {
v.Voxels[loc.X][loc.Y][loc.Z] = i
}
}
// SafeGet gets the voxel at loc if loc is in bounds, or 0 if it isn't
func (v *VoxelObject) SafeGet(loc geometry.Point) byte {
if loc.IsInBounds(geometry.Bounds{Max: v.Size}) {
return v.Voxels[loc.X][loc.Y][loc.Z]
}
return 0
}
func (v *VoxelObject) Iterate(iterator func(int, int, int)) {
for x := 0; x < v.Size.X; x++ {
for y := 0; y < v.Size.Y; y++ {
for z := 0; z < v.Size.Z; z++ {
iterator(x, y, z)
}
}
}
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func (v *VoxelObject) Split(size int) scenegraph.Node {
objectsX := (v.Size.X + (size - 1)) / size
objectsY := (v.Size.Y + (size - 1)) / size
objectsZ := (v.Size.Z + (size - 1)) / size
nodes := make([]scenegraph.Node, 0)
for x := 0; x < objectsX; x++ {
for y := 0; y < objectsY; y++ {
for z := 0; z < objectsZ; z++ {
maxX, maxY, maxZ := 0, 0, 0
object := NewVoxelObject(geometry.Point{X: size, Y: size, Z:size}, v.PaletteData)
object.Iterate(func(i,j,k int) {
i0 := i + (x * size)
j0 := j + (y * size)
k0 := k + (z * size)
colour := v.SafeGet(geometry.Point{X: i0, Y: j0, Z: k0})
if colour != 0 {
object.Set(geometry.Point{X: i, Y: j, Z: k}, colour)
maxX = max(maxX, i+1)
maxY = max(maxY, j+1)
maxZ = max(maxZ, k+1)
}
})
// Make all sizes divisible by 2
maxX = maxX + (maxX % 2)
maxY = maxY + (maxY % 2)
maxZ = maxZ + (maxZ % 2)
node := scenegraph.Node{
Location: geometry.Point{X: (x*size)+(maxX/2), Y: (y*size)+(maxY/2), Z: (z*size)+(maxZ/2)},
Size: types.Size{X: maxX, Y: maxY, Z: maxZ},
Models: []scenegraph.Model{{
Points: object.GetPoints(),
Size: types.Size{X: maxX, Y: maxY, Z: maxZ},
}},
}
nodes = append(nodes, node)
}
}
}
return scenegraph.Node{
Location: geometry.Point{X: 0, Y: 0, Z: 0},
Size: types.Size{X: objectsX * size, Y: objectsY * size, Z: objectsZ * size},
Children: nodes,
}
} | magica/voxelobject.go | 0.7413 | 0.51312 | voxelobject.go | starcoder |
package backoff
import (
"fmt"
"time"
)
const (
// Default value to use for number of iterations: infinite
DefaultMaxIterations uint64 = 0
// Default value to use for maximum iteration time
DefaultMaxIterationTime = 60 * time.Second
// Default value to use for maximum execution time: infinite
DefaultMaxTotalTime time.Duration = 0
// Default value to use for initial iteration time
DefaultMinIterationTime = 100 * time.Millisecond
// Default multiplier to apply to iteration time after each iteration
DefaultMultipler float64 = 1.5
)
// Backoff holds the configuration for backoff function retries
type Backoff struct {
MaxIterations uint64
MaxIterationTime time.Duration
MaxTotalTime time.Duration
MinIterationTime time.Duration
Multiplier float64
}
// NewBackoff creates a new Backoff configuration with default values (see constants)
func NewBackoff() *Backoff {
return &Backoff{
MaxIterations: DefaultMaxIterations,
MaxIterationTime: DefaultMaxIterationTime,
MaxTotalTime: DefaultMaxTotalTime,
MinIterationTime: DefaultMinIterationTime,
Multiplier: DefaultMultipler,
}
}
// Retry executes the function and waits for it to end successul or for the
// given limites to be reached. The returned error uses Go1.13 wrapping of
// errors and can be unwrapped into the error of the function itself.
func (b Backoff) Retry(f Retryable) error {
var (
iterations uint64
sleepTime = b.MinIterationTime
start = time.Now()
)
for {
err := f()
if err == nil {
return nil
}
iterations++
if b.MaxIterations > 0 && iterations == b.MaxIterations {
return fmt.Errorf("Maximum iterations reached: %w", err)
}
if b.MaxTotalTime > 0 && time.Since(start) >= b.MaxTotalTime {
return fmt.Errorf("Maximum execution time reached: %w", err)
}
time.Sleep(sleepTime)
sleepTime = b.nextIterationSleep(sleepTime)
}
}
// WithMaxIterations is a wrapper around setting the MaxIterations
// and then returning the Backoff object to use in chained creation
func (b *Backoff) WithMaxIterations(v uint64) *Backoff {
b.MaxIterations = v
return b
}
// WithMaxIterationTime is a wrapper around setting the MaxIterationTime
// and then returning the Backoff object to use in chained creation
func (b *Backoff) WithMaxIterationTime(v time.Duration) *Backoff {
b.MaxIterationTime = v
return b
}
// WithMaxTotalTime is a wrapper around setting the MaxTotalTime
// and then returning the Backoff object to use in chained creation
func (b *Backoff) WithMaxTotalTime(v time.Duration) *Backoff {
b.MaxTotalTime = v
return b
}
// WithMinIterationTime is a wrapper around setting the MinIterationTime
// and then returning the Backoff object to use in chained creation
func (b *Backoff) WithMinIterationTime(v time.Duration) *Backoff {
b.MinIterationTime = v
return b
}
// WithMultiplier is a wrapper around setting the Multiplier
// and then returning the Backoff object to use in chained creation
func (b *Backoff) WithMultiplier(v float64) *Backoff {
b.Multiplier = v
return b
}
func (b Backoff) nextIterationSleep(currentSleep time.Duration) time.Duration {
next := time.Duration(float64(currentSleep) * b.Multiplier)
if next > b.MaxIterationTime {
next = b.MaxIterationTime
}
return next
}
// Retryable is a function which takes no parameters and yields an error
// when it should be retried and nil when it was successful
type Retryable func() error
// Retry is a convenience wrapper to execute the retry with default values
// (see exported constants)
func Retry(f Retryable) error { return NewBackoff().Retry(f) } | backoff/backoff.go | 0.754734 | 0.453383 | backoff.go | starcoder |
package main
import (
"fmt"
"sort"
"wheal-investments-algorithm/funds"
"wheal-investments-algorithm/ga"
)
func main() {
//The size of the population
sizeOfPopulation := 1000
//The number of generations
numGenerations := 1000
//The number of eliete members of the population
numElietes := 50
//The proability of mutation or crossover
probMutation := 0.5
//The probabilitty of completely new member of population
probNewChromosome := 0.05
//Create a new population of the desired size
population := ga.NewPopulation(sizeOfPopulation)
//Store the inital fittest ever chromosome
fittestEverChromosome := population.Fittest()
//Main generation loop
for generation := 0; generation < numGenerations; generation++ {
//Create a new populatio
var newPopulation ga.Population
//Sort the population by fitness
sort.Slice(population.Chromosomes,
func(i, j int) bool {
return population.Chromosomes[i].Fitness > population.Chromosomes[j].Fitness
})
//Get the elite population and always include the fittest ever chromosome
elitePopulation := ga.Population{
Chromosomes: append(population.Chromosomes[0:numElietes], fittestEverChromosome),
}
//Loop to populate new population
for len(newPopulation.Chromosomes) <= sizeOfPopulation {
//Select a random chromosome from the elite population
chromosome := elitePopulation.SelectRoulette()
//Generate a random number
randomNumber := ga.Random().Float64()
//If should mutate
if randomNumber < probMutation {
//Equal proability of each mutation/crossover type
mutationRandom := ga.Random().Intn(4)
switch mutationRandom {
case 1:
chromosome.MutateIncrement()
case 2:
chromosome.MutateSwap()
case 3:
chromosome = ga.SingleCrossover(chromosome, elitePopulation.SelectRoulette())
case 4:
chromosome = ga.MultipleCrossover(chromosome, elitePopulation.SelectRoulette())
}
}
//If should genenerate entirely new chromosome
if randomNumber > (1.0 - probNewChromosome) {
chromosome = ga.GenerateRandomChromosome()
}
//Add the new chromosome to the new population
newPopulation.Chromosomes = append(newPopulation.Chromosomes, chromosome)
}
//Calculate the total fitness of the new population
newPopulation.CalculateFitness()
//Get the fittest chromosome of the new population
fittest := newPopulation.Fittest()
//If the fittest ever chromosome
if fittest.Fitness > fittestEverChromosome.Fitness {
fittestEverChromosome = fittest
}
//Print the fittest chromosome to the screen
generationText := fmt.Sprintf("Gen %04d:", generation)
fmt.Println(generationText, parametersText(fittest.GetActualFundParameters()), fittest.Fitness)
//Set the new population as the population for the next generation
population = newPopulation
}
//Get the fittest chromosome of the population
fittest := population.Fittest()
fmt.Println("Answer:", allocationText(fittest.GetFundAllocationPercentage()))
}
//Returns the actual fund parameters in human readable form
func parametersText(fundParameters funds.FundParameters) string {
var parametersText string
for _, value := range fundParameters {
parametersText += fmt.Sprintf("%.2f ", value)
}
return parametersText
}
//Returns the allocation in human readable form
func allocationText(fundAllocation ga.FundAllocation) string {
var allocationsText string
for _, value := range fundAllocation {
allocationsText += fmt.Sprintf("%.2f ", value*100)
}
return allocationsText
} | main.go | 0.599837 | 0.44065 | main.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPStatementCompressedQuery1237 struct for BTPStatementCompressedQuery1237
type BTPStatementCompressedQuery1237 struct {
BTPStatement269
BtType *string `json:"btType,omitempty"`
Query *string `json:"query,omitempty"`
}
// NewBTPStatementCompressedQuery1237 instantiates a new BTPStatementCompressedQuery1237 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPStatementCompressedQuery1237() *BTPStatementCompressedQuery1237 {
this := BTPStatementCompressedQuery1237{}
return &this
}
// NewBTPStatementCompressedQuery1237WithDefaults instantiates a new BTPStatementCompressedQuery1237 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPStatementCompressedQuery1237WithDefaults() *BTPStatementCompressedQuery1237 {
this := BTPStatementCompressedQuery1237{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPStatementCompressedQuery1237) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementCompressedQuery1237) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPStatementCompressedQuery1237) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPStatementCompressedQuery1237) SetBtType(v string) {
o.BtType = &v
}
// GetQuery returns the Query field value if set, zero value otherwise.
func (o *BTPStatementCompressedQuery1237) GetQuery() string {
if o == nil || o.Query == nil {
var ret string
return ret
}
return *o.Query
}
// GetQueryOk returns a tuple with the Query field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementCompressedQuery1237) GetQueryOk() (*string, bool) {
if o == nil || o.Query == nil {
return nil, false
}
return o.Query, true
}
// HasQuery returns a boolean if a field has been set.
func (o *BTPStatementCompressedQuery1237) HasQuery() bool {
if o != nil && o.Query != nil {
return true
}
return false
}
// SetQuery gets a reference to the given string and assigns it to the Query field.
func (o *BTPStatementCompressedQuery1237) SetQuery(v string) {
o.Query = &v
}
func (o BTPStatementCompressedQuery1237) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTPStatement269, errBTPStatement269 := json.Marshal(o.BTPStatement269)
if errBTPStatement269 != nil {
return []byte{}, errBTPStatement269
}
errBTPStatement269 = json.Unmarshal([]byte(serializedBTPStatement269), &toSerialize)
if errBTPStatement269 != nil {
return []byte{}, errBTPStatement269
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Query != nil {
toSerialize["query"] = o.Query
}
return json.Marshal(toSerialize)
}
type NullableBTPStatementCompressedQuery1237 struct {
value *BTPStatementCompressedQuery1237
isSet bool
}
func (v NullableBTPStatementCompressedQuery1237) Get() *BTPStatementCompressedQuery1237 {
return v.value
}
func (v *NullableBTPStatementCompressedQuery1237) Set(val *BTPStatementCompressedQuery1237) {
v.value = val
v.isSet = true
}
func (v NullableBTPStatementCompressedQuery1237) IsSet() bool {
return v.isSet
}
func (v *NullableBTPStatementCompressedQuery1237) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPStatementCompressedQuery1237(val *BTPStatementCompressedQuery1237) *NullableBTPStatementCompressedQuery1237 {
return &NullableBTPStatementCompressedQuery1237{value: val, isSet: true}
}
func (v NullableBTPStatementCompressedQuery1237) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPStatementCompressedQuery1237) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_statement_compressed_query_1237.go | 0.662469 | 0.423041 | model_btp_statement_compressed_query_1237.go | starcoder |
// Package rla provides an implementation of RLA (Recurrent Linear Attention).
// See: "Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention" by Katharopoulos et al., 2020.
package rla
import (
"encoding/gob"
"github.com/nlpodyssey/spago/ag"
"github.com/nlpodyssey/spago/mat"
"github.com/nlpodyssey/spago/mat/float"
"github.com/nlpodyssey/spago/nn"
)
var _ nn.Model = &Model{}
// Config provides configuration settings for a RLA Model.
type Config struct {
InputSize int
}
// Model contains the serializable parameters for an RLA neural network.
type Model struct {
nn.Module
Config
Wk nn.Param `spago:"type:weights"`
Bk nn.Param `spago:"type:biases"`
Wv nn.Param `spago:"type:weights"`
Bv nn.Param `spago:"type:biases"`
Wq nn.Param `spago:"type:weights"`
Bq nn.Param `spago:"type:biases"`
}
// State represent a state of the RLA recurrent network.
type State struct {
S ag.Node
Z ag.Node
Y ag.Node
}
func init() {
gob.Register(&Model{})
}
// New returns a new RLA Model, initialized according to the given configuration.
func New[T float.DType](config Config) *Model {
return &Model{
Config: config,
Wk: nn.NewParam(mat.NewEmptyDense[T](config.InputSize, config.InputSize)),
Bk: nn.NewParam(mat.NewEmptyVecDense[T](config.InputSize)),
Wv: nn.NewParam(mat.NewEmptyDense[T](config.InputSize, config.InputSize)),
Bv: nn.NewParam(mat.NewEmptyVecDense[T](config.InputSize)),
Wq: nn.NewParam(mat.NewEmptyDense[T](config.InputSize, config.InputSize)),
Bq: nn.NewParam(mat.NewEmptyVecDense[T](config.InputSize)),
}
}
func (m *Model) Forward(xs ...ag.Node) []ag.Node {
ys := make([]ag.Node, len(xs))
var s *State = nil
for i, x := range xs {
s = m.Next(s, x)
ys[i] = s.Y
}
return ys
}
// Next performs a single forward step, producing a new state.
func (m *Model) Next(prevState *State, x ag.Node) (s *State) {
s = new(State)
key := ag.Affine(m.Bk, m.Wk, x)
value := ag.Affine(m.Bv, m.Wv, x)
query := ag.Affine(m.Bq, m.Wq, x)
attKey := defaultMappingFunction(key)
attQuery := defaultMappingFunction(query)
if prevState != nil {
s.S = ag.Add(prevState.S, ag.Mul(attKey, ag.T(value)))
s.Z = ag.Add(prevState.Z, attKey)
} else {
s.S = ag.Mul(attKey, ag.T(value))
s.Z = attKey
}
e := ag.Var(s.Z.Value().NewScalar(1e-12))
s.Y = ag.DivScalar(ag.T(ag.Mul(ag.T(attQuery), s.S)), ag.AddScalar(ag.Dot(attQuery, s.Z), e))
return
}
// defaultMappingFunction returns ELU(x) + 1
// TODO: support arbitrary mapping functions
func defaultMappingFunction(x ag.Node) ag.Node {
return ag.PositiveELU(x)
} | nn/recurrent/rla/rla.go | 0.854019 | 0.530723 | rla.go | starcoder |
package main
import (
"container/heap"
"fmt"
"regexp"
"strconv"
"github.com/budavariam/advent_of_code/2018/utils"
)
func main() {
input := utils.LoadInput("23_2")
result := LNN(input)
fmt.Println(result)
}
// LNN gets the closest point that has the largest number of nanobot intersections. Based on a solution from reddit.
// For each bot, the code calculates d = manhattan distance to origin and adds (MAX(d-r,0), 1) and (d+r, -1) to a priority queue.
// The queue is holding entries for the start and end of each "line segment" as measured by manhattan distance from the origin. At the start of the segment the 1 adds to the total of overlapping segments. The -1 that marks the segment's end, and is used to decrease the counter.
// The final loop calculates the maximum number of overlapping segments, and the point where the maximum is hit, which is the answer.
// This is really a very nice and amazingly simple solution! Thanks, /u/EriiKKo
func LNN(input []string) int {
nanobots := parseInput(input)
queue := priQueueFromBots(nanobots)
count, maxCount, result := 0, 0, 0
for queue.Len() > 0 {
current := heap.Pop(queue).(*Item)
count += current.value
if count > maxCount {
result = current.priority
maxCount = count
}
}
return result
}
func priQueueFromBots(nanobots []nanoBot) *PriorityQueue {
pq := make(PriorityQueue, (len(nanobots))*2)
for i, b := range nanobots {
d := abs(b.position[0]) + abs(b.position[1]) + abs(b.position[2])
pq[2*i] = &Item{
priority: max(0, d-b.radius),
value: 1,
}
pq[2*i+1] = &Item{
priority: d + b.radius + 1,
value: -1,
}
}
heap.Init(&pq)
return &pq
}
func parseInput(input []string) []nanoBot {
regexBot := *regexp.MustCompile(`^pos=<(-?\d+),(-?\d+),(-?\d+)>, r=(\d+)$`)
nanobots := make([]nanoBot, len(input))
for lineNr, line := range input {
rawBot := regexBot.FindStringSubmatch(line)
botX, _ := strconv.Atoi(rawBot[1])
botY, _ := strconv.Atoi(rawBot[2])
botZ, _ := strconv.Atoi(rawBot[3])
botRadius, _ := strconv.Atoi(rawBot[4])
bot := nanoBot{radius: botRadius, position: coord{botX, botY, botZ}}
nanobots[lineNr] = bot
}
return nanobots
}
type nanoBot struct {
radius int
position coord
}
type coord [3]int
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func max(x, y int) int {
if x < y {
return y
}
return x
} | 2018/23_2/solution.go | 0.70202 | 0.500488 | solution.go | starcoder |
package main
import "fmt"
// Set union.
func Union(a int, b int) int {
return a | b
}
// Set intersection.
func Intersection(a int, b int) int {
return a & b
}
// This is set subtraction, not regular subtraction.
func Subtraction(a int, b int) int {
return a & ^b
}
// This is binary negation, not a simple sign inversion.
func Negate(a int) int {
return ^a
}
func SetBit(a int, bit int) int {
return a | 1 << uint(bit)
}
func ClearBit(a int, bit int) int {
return a & ^(1 << uint(bit))
}
func TestBit(a int, bit int) bool {
return a & (1 << uint(bit)) != 0
}
func ExtractLastBit(a int) int {
return a & -a
}
func RemoveLastBit(a int) int {
return a & (a-1)
}
func AllOnes() int {
return ^0
}
func CountOnes(num int) int {
count := 0
for num != 0 {
num = num & (num-1)
count++
}
return count
}
func PowerOfTwo(num int) bool {
return num & (num-1) == 0
}
func PowerOfFour(num int) bool {
// 0x…5555 ==== …10101
return (num > 0) && (num & (num-1) == 0) && (num & 0x55555555 != 0)
}
// This implements a chain of adders (https://en.wikipedia.org/wiki/Adder_(electronics))
// a xor b -> bitwise addition, a and b -> bitwise carryover
// shift the carryover to right to add to the next digit.
func Sum(a int, b int) int {
if b == 0 {
return a
}
return Sum(a ^ b, (a & b) << 1)
}
func LargestPowerOfTwo(a int) int {
a |= a>>1
a |= a>>2
a |= a>>4
a |= a>>8
a |= a>>16
return (a+1)>>1
}
func ReverseBits(a uint32) uint32 {
var mask uint32 = 1<<31
var res uint32 = 0
for i := 0; i<32; i++ {
if a & 1 == 1 {
res |= mask
}
mask >>= 1
a >>= 1
}
return res
}
func BitwiseAndAllBetween(m int, n int) int {
a := 0
for m != n {
m >>= 1
n >>= 1
a++
}
return m<<uint(a)
}
func main() {
fmt.Println(
Union( 8, 16),
Intersection( 8, 16),
Subtraction(72, 56),
// -8 is two’s complement; -9 is two’s complement minus one.
// which is all the bits inverted.
Negate(8),
SetBit(8, 2),
ClearBit(12, 2),
TestBit(8, 3),
ExtractLastBit(10),
RemoveLastBit(10),
AllOnes(),
CountOnes(42),
PowerOfTwo(64),
PowerOfFour(16777216),
Sum(42, 42),
LargestPowerOfTwo(46),
ReverseBits(268435456),
BitwiseAndAllBetween(5, 7),
)
} | archive/volkan.io/examples/bitwise/004_tricks.go | 0.7324 | 0.419053 | 004_tricks.go | starcoder |
package gpsutil
import (
"fmt"
"math"
)
// GetDistance returns distance in meters between 2 given points
func GetDistance(lng1, lat1, lng2, lat2 float64) float64 {
dLat := toRad(lat2 - lat1)
dLng := toRad(lng2 - lng1)
a := math.Pow(math.Sin(dLat/2), 2) + math.Pow(math.Sin(dLng/2), 2)*math.Cos(toRad(lat1))*math.Cos(toRad(lat2))
c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
return c * EARTH_RADIUS
}
// GetTotalDistance returns total distance in meters for between points in a list
func GetTotalDistance(points []*LatLng) float64 {
max := len(points) - 1
total := 0.0
for i := 0; i < max; i++ {
total += GetDistance(points[i].lng, points[i].lat, points[i+1].lng, points[i+1].lat)
}
return total
}
// GetBoundingBox returns a bounding box for a point and certain distance in meters
func GetBoundingBox(lat, lng, distance float64) *BBox {
radDist := distance / EARTH_RADIUS
radLat := toRad(lat)
radLng := toRad(lng)
var minLat, maxLat, minLng, maxLng, deltaLng float64
minLat = radLat - radDist
maxLat = radLat + radDist
if minLat > MIN_LAT && maxLat < MAX_LAT {
deltaLng = math.Asin(math.Sin(radDist) / math.Cos(radLat))
minLng = radLng - deltaLng
if minLng < MIN_LNG {
minLng += 2 * math.Pi
}
maxLng = radLng + deltaLng
if maxLng > MAX_LNG {
maxLng -= 2 * math.Pi
}
} else {
minLat = math.Max(minLat, MIN_LAT)
maxLat = math.Min(maxLat, MAX_LAT)
minLng = MIN_LNG
maxLng = MAX_LNG
}
return &BBox{
southwest: LatLng{lat: toDegrees(minLat), lng: toDegrees(minLng)},
northeast: LatLng{lat: toDegrees(maxLat), lng: toDegrees(maxLng)}}
}
// GetMidPoint returns center point calculated based on a list of points
func GetMidPoint(points []*LatLng) (*LatLng, error) {
length := len(points)
if length < 1 {
return nil, fmt.Errorf("Points must not be empty")
} else if length == 1 {
return &LatLng{lat: points[0].lat, lng: points[0].lng}, nil
}
x := 0.0
y := 0.0
z := 0.0
var lat, lng float64
for i := 0; i < length; i++ {
lat = toRad(points[i].lat)
lng = toRad(points[i].lng)
x += math.Cos(lat) * math.Cos(lng)
y += math.Cos(lat) * math.Sin(lng)
z += math.Sin(lat)
}
x = x / float64(length)
y = y / float64(length)
z = z / float64(length)
lng = math.Atan2(y, x)
lat = math.Atan2(z, math.Sqrt(x*x+y*y))
return &LatLng{lat: toDegrees(lat), lng: toDegrees(lng)}, nil
}
// GetPointByBearing returns a point calculated based on a given point, bearing and travelling distance
// lat, lng given in decimal degrees, bearing is given in degrees and distance is in meters
func GetPointByBearing(lat, lng, bearing, distance float64) *LatLng {
radLat := toRad(lat)
radLng := toRad(lng)
radBearing := toRad(bearing)
radDist := distance / EARTH_RADIUS
radLat2 := math.Asin(math.Sin(radLat)*math.Cos(radDist) + math.Cos(radLat)*math.Sin(radDist)*math.Cos(radBearing))
radLng2 := radLng + math.Atan2(math.Sin(radBearing)*math.Sin(radDist)*math.Cos(radLat), math.Cos(radDist)-math.Sin(radLat)*math.Sin(radLat2))
return &LatLng{lat: toDegrees(radLat2), lng: toDegrees(radLng2)}
} | distance.go | 0.82485 | 0.647269 | distance.go | starcoder |
package tuple
// Tuple1 is a tuple containing 1 value(s).
type Tuple1[T0 any] struct {
V0 T0
}
// New1 creates a new tuple of 1 value(s).
func New1[T0 any](v0 T0) *Tuple1[T0] {
return &Tuple1[T0]{v0}
}
// Tuple2 is a tuple containing 2 value(s).
type Tuple2[T0, T1 any] struct {
V0 T0
V1 T1
}
// New2 creates a new tuple of 2 value(s).
func New2[T0, T1 any](v0 T0, v1 T1) *Tuple2[T0, T1] {
return &Tuple2[T0, T1]{v0, v1}
}
// Tuple3 is a tuple containing 3 value(s).
type Tuple3[T0, T1, T2 any] struct {
V0 T0
V1 T1
V2 T2
}
// New3 creates a new tuple of 3 value(s).
func New3[T0, T1, T2 any](v0 T0, v1 T1, v2 T2) *Tuple3[T0, T1, T2] {
return &Tuple3[T0, T1, T2]{v0, v1, v2}
}
// Tuple4 is a tuple containing 4 value(s).
type Tuple4[T0, T1, T2, T3 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
}
// New4 creates a new tuple of 4 value(s).
func New4[T0, T1, T2, T3 any](v0 T0, v1 T1, v2 T2, v3 T3) *Tuple4[T0, T1, T2, T3] {
return &Tuple4[T0, T1, T2, T3]{v0, v1, v2, v3}
}
// Tuple5 is a tuple containing 5 value(s).
type Tuple5[T0, T1, T2, T3, T4 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
}
// New5 creates a new tuple of 5 value(s).
func New5[T0, T1, T2, T3, T4 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4) *Tuple5[T0, T1, T2, T3, T4] {
return &Tuple5[T0, T1, T2, T3, T4]{v0, v1, v2, v3, v4}
}
// Tuple6 is a tuple containing 6 value(s).
type Tuple6[T0, T1, T2, T3, T4, T5 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
}
// New6 creates a new tuple of 6 value(s).
func New6[T0, T1, T2, T3, T4, T5 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5) *Tuple6[T0, T1, T2, T3, T4, T5] {
return &Tuple6[T0, T1, T2, T3, T4, T5]{v0, v1, v2, v3, v4, v5}
}
// Tuple7 is a tuple containing 7 value(s).
type Tuple7[T0, T1, T2, T3, T4, T5, T6 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
}
// New7 creates a new tuple of 7 value(s).
func New7[T0, T1, T2, T3, T4, T5, T6 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6) *Tuple7[T0, T1, T2, T3, T4, T5, T6] {
return &Tuple7[T0, T1, T2, T3, T4, T5, T6]{v0, v1, v2, v3, v4, v5, v6}
}
// Tuple8 is a tuple containing 8 value(s).
type Tuple8[T0, T1, T2, T3, T4, T5, T6, T7 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
}
// New8 creates a new tuple of 8 value(s).
func New8[T0, T1, T2, T3, T4, T5, T6, T7 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7) *Tuple8[T0, T1, T2, T3, T4, T5, T6, T7] {
return &Tuple8[T0, T1, T2, T3, T4, T5, T6, T7]{v0, v1, v2, v3, v4, v5, v6, v7}
}
// Tuple9 is a tuple containing 9 value(s).
type Tuple9[T0, T1, T2, T3, T4, T5, T6, T7, T8 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
}
// New9 creates a new tuple of 9 value(s).
func New9[T0, T1, T2, T3, T4, T5, T6, T7, T8 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8) *Tuple9[T0, T1, T2, T3, T4, T5, T6, T7, T8] {
return &Tuple9[T0, T1, T2, T3, T4, T5, T6, T7, T8]{v0, v1, v2, v3, v4, v5, v6, v7, v8}
}
// Tuple10 is a tuple containing 10 value(s).
type Tuple10[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
}
// New10 creates a new tuple of 10 value(s).
func New10[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9) *Tuple10[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] {
return &Tuple10[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9}
}
// Tuple11 is a tuple containing 11 value(s).
type Tuple11[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
}
// New11 creates a new tuple of 11 value(s).
func New11[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10) *Tuple11[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10] {
return &Tuple11[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10}
}
// Tuple12 is a tuple containing 12 value(s).
type Tuple12[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
}
// New12 creates a new tuple of 12 value(s).
func New12[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11) *Tuple12[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11] {
return &Tuple12[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11}
}
// Tuple13 is a tuple containing 13 value(s).
type Tuple13[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
}
// New13 creates a new tuple of 13 value(s).
func New13[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12) *Tuple13[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12] {
return &Tuple13[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12}
}
// Tuple14 is a tuple containing 14 value(s).
type Tuple14[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
}
// New14 creates a new tuple of 14 value(s).
func New14[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13) *Tuple14[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13] {
return &Tuple14[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13}
}
// Tuple15 is a tuple containing 15 value(s).
type Tuple15[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
}
// New15 creates a new tuple of 15 value(s).
func New15[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14) *Tuple15[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14] {
return &Tuple15[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14}
}
// Tuple16 is a tuple containing 16 value(s).
type Tuple16[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
}
// New16 creates a new tuple of 16 value(s).
func New16[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15) *Tuple16[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15] {
return &Tuple16[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}
}
// Tuple17 is a tuple containing 17 value(s).
type Tuple17[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
}
// New17 creates a new tuple of 17 value(s).
func New17[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16) *Tuple17[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16] {
return &Tuple17[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16}
}
// Tuple18 is a tuple containing 18 value(s).
type Tuple18[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
}
// New18 creates a new tuple of 18 value(s).
func New18[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17) *Tuple18[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17] {
return &Tuple18[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17}
}
// Tuple19 is a tuple containing 19 value(s).
type Tuple19[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
}
// New19 creates a new tuple of 19 value(s).
func New19[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18) *Tuple19[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18] {
return &Tuple19[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18}
}
// Tuple20 is a tuple containing 20 value(s).
type Tuple20[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
}
// New20 creates a new tuple of 20 value(s).
func New20[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19) *Tuple20[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19] {
return &Tuple20[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19}
}
// Tuple21 is a tuple containing 21 value(s).
type Tuple21[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
}
// New21 creates a new tuple of 21 value(s).
func New21[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20) *Tuple21[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20] {
return &Tuple21[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20}
}
// Tuple22 is a tuple containing 22 value(s).
type Tuple22[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
}
// New22 creates a new tuple of 22 value(s).
func New22[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21) *Tuple22[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21] {
return &Tuple22[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21}
}
// Tuple23 is a tuple containing 23 value(s).
type Tuple23[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
}
// New23 creates a new tuple of 23 value(s).
func New23[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22) *Tuple23[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22] {
return &Tuple23[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22}
}
// Tuple24 is a tuple containing 24 value(s).
type Tuple24[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
}
// New24 creates a new tuple of 24 value(s).
func New24[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23) *Tuple24[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23] {
return &Tuple24[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23}
}
// Tuple25 is a tuple containing 25 value(s).
type Tuple25[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
}
// New25 creates a new tuple of 25 value(s).
func New25[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24) *Tuple25[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24] {
return &Tuple25[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24}
}
// Tuple26 is a tuple containing 26 value(s).
type Tuple26[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
V25 T25
}
// New26 creates a new tuple of 26 value(s).
func New26[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24, v25 T25) *Tuple26[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25] {
return &Tuple26[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25}
}
// Tuple27 is a tuple containing 27 value(s).
type Tuple27[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
V25 T25
V26 T26
}
// New27 creates a new tuple of 27 value(s).
func New27[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24, v25 T25, v26 T26) *Tuple27[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26] {
return &Tuple27[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26}
}
// Tuple28 is a tuple containing 28 value(s).
type Tuple28[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
V25 T25
V26 T26
V27 T27
}
// New28 creates a new tuple of 28 value(s).
func New28[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24, v25 T25, v26 T26, v27 T27) *Tuple28[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27] {
return &Tuple28[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27}
}
// Tuple29 is a tuple containing 29 value(s).
type Tuple29[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
V25 T25
V26 T26
V27 T27
V28 T28
}
// New29 creates a new tuple of 29 value(s).
func New29[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24, v25 T25, v26 T26, v27 T27, v28 T28) *Tuple29[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28] {
return &Tuple29[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28}
}
// Tuple30 is a tuple containing 30 value(s).
type Tuple30[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
V25 T25
V26 T26
V27 T27
V28 T28
V29 T29
}
// New30 creates a new tuple of 30 value(s).
func New30[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24, v25 T25, v26 T26, v27 T27, v28 T28, v29 T29) *Tuple30[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29] {
return &Tuple30[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29}
}
// Tuple31 is a tuple containing 31 value(s).
type Tuple31[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
V25 T25
V26 T26
V27 T27
V28 T28
V29 T29
V30 T30
}
// New31 creates a new tuple of 31 value(s).
func New31[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24, v25 T25, v26 T26, v27 T27, v28 T28, v29 T29, v30 T30) *Tuple31[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30] {
return &Tuple31[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30}
}
// Tuple32 is a tuple containing 32 value(s).
type Tuple32[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31 any] struct {
V0 T0
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
V11 T11
V12 T12
V13 T13
V14 T14
V15 T15
V16 T16
V17 T17
V18 T18
V19 T19
V20 T20
V21 T21
V22 T22
V23 T23
V24 T24
V25 T25
V26 T26
V27 T27
V28 T28
V29 T29
V30 T30
V31 T31
}
// New32 creates a new tuple of 32 value(s).
func New32[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31 any](v0 T0, v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10, v11 T11, v12 T12, v13 T13, v14 T14, v15 T15, v16 T16, v17 T17, v18 T18, v19 T19, v20 T20, v21 T21, v22 T22, v23 T23, v24 T24, v25 T25, v26 T26, v27 T27, v28 T28, v29 T29, v30 T30, v31 T31) *Tuple32[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31] {
return &Tuple32[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31]{v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31}
} | tuple/tuple.go | 0.708414 | 0.586819 | tuple.go | starcoder |
package neighbors
import (
"fmt"
"runtime"
"github.com/pa-m/sklearn/base"
"github.com/pa-m/sklearn/metrics"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat"
)
// KNeighborsRegressor is a Regression based on k-nearest neighbors.
// The target is predicted by local interpolation of the targets
// associated of the nearest neighbors in the training set.
type KNeighborsRegressor struct {
NearestNeighbors
K int
// Weights may be "uniform", "distance" or func(dstWeights, srcDists []float64)
Weights interface{}
Scale bool
Distance Distance
// Runtime members
Xscaled, Y *mat.Dense
}
// NewKNeighborsRegressor returns an initialized *KNeighborsRegressor
// Weights may be "uniform", "distance" or func(dist []float64) []float64
func NewKNeighborsRegressor(K int, Weights interface{}) base.Predicter {
return &KNeighborsRegressor{NearestNeighbors: *NewNearestNeighbors(), K: K, Weights: Weights}
}
// PredicterClone return a (possibly unfitted) copy of predicter
func (m *KNeighborsRegressor) PredicterClone() base.Predicter {
clone := *m
return &clone
}
// IsClassifier returns false for KNeighborsRegressor
func (*KNeighborsRegressor) IsClassifier() bool { return false }
// Fit ...
func (m *KNeighborsRegressor) Fit(Xmatrix, Ymatrix mat.Matrix) base.Fiter {
X, Y := base.ToDense(Xmatrix), base.ToDense(Ymatrix)
m.Xscaled = mat.DenseCopyOf(X)
m.Y = mat.DenseCopyOf(Y)
if m.Distance == nil {
m.Distance = EuclideanDistance
}
if m.K <= 0 {
panic(fmt.Errorf("K<=0"))
}
m.NearestNeighbors.Fit(X, Y)
return m
}
// GetNOutputs return Y width
func (m *KNeighborsRegressor) GetNOutputs() int { return m.Y.RawMatrix().Cols }
// Predict ...
func (m *KNeighborsRegressor) Predict(X mat.Matrix, Ymutable mat.Mutable) *mat.Dense {
Y := base.ToDense(Ymutable)
nSamples, _ := X.Dims()
if Y.IsEmpty() {
*Y = *mat.NewDense(nSamples, m.GetNOutputs(), nil)
}
NX, _ := X.Dims()
_, outputs := m.Y.Dims()
NPROCS := runtime.GOMAXPROCS(0)
var isWeightDistance bool
type wfntype = func(dstWeights, srcDists []float64)
var wfn wfntype
if weightsstr, ok := m.Weights.(string); ok && weightsstr == "distance" {
isWeightDistance = true
} else if tmpfn, ok := m.Weights.(wfntype); ok {
wfn = tmpfn
}
distances, indices := m.KNeighbors(X, m.K)
base.Parallelize(NPROCS, NX, func(th, start, end int) {
weights := make([]float64, m.K)
dists := make([]float64, m.K)
ys := make([]float64, m.K)
epsilon := 1e-15
for ik := range weights {
weights[ik] = 1.
}
for sample := start; sample < end; sample++ {
// set Y(sample,output) to weighted average of K nearest
mat.Row(dists, sample, distances)
for o := 0; o < outputs; o++ {
for ik := range ys {
ys[ik] = m.Y.At(int(indices.At(sample, ik)), o)
if wfn != nil {
wfn(weights, dists)
} else if isWeightDistance {
weights[ik] = 1. / (epsilon + dists[ik])
}
}
Y.Set(sample, o, stat.Mean(ys, weights))
}
}
})
return base.FromDense(Ymutable, Y)
}
// Score for KNeighborsRegressor
func (m *KNeighborsRegressor) Score(X, Y mat.Matrix) float64 {
NSamples, NOutputs := Y.Dims()
Ypred := mat.NewDense(NSamples, NOutputs, nil)
m.Predict(X, Ypred)
return metrics.R2Score(Y, Ypred, nil, "").At(0, 0)
} | neighbors/regression.go | 0.789274 | 0.453625 | regression.go | starcoder |
package catalog
const ProjectTemplate = `
{{range $name, $link := .Links}}
[{{$name}}]({{$link}})
{{end}}
# {{Base .Title}}
| Package |
----|{{range $val := Packages .Module}}
[{{$val}}]({{$val}}/README.md)|{{end}}
## Integration Diagram
<img src="{{CreateIntegrationDiagram .Module .Title false}}">
## End Point Analysis Integration Diagram
<img src="{{CreateIntegrationDiagram .Module .Title true}}">
`
const MacroPackageProject = `
# {{Base .Title}}
| Package |
----|{{range $val := MacroPackages .Module}}
[{{$val}}]({{$val}}/README.md)|{{end}}
## Integration Diagram
<img src="{{CreateIntegrationDiagram .Module .Title false}}">
## End Point Analysis Integration Diagram
<img src="{{CreateIntegrationDiagram .Module .Title true}}">
`
const NewPackageTemplate = `
[Back](../README.md)
{{$packageName := ModulePackageName .}}
# {{$packageName}}
## Service Index
| Service Name | Method | Source Location |
----|----|----{{$Apps := .Apps}}{{range $appName := SortedKeys .Apps}}{{$app := index $Apps $appName}}{{if eq (hasPattern $app.Attrs "ignore") false}}{{$Endpoints := $app.Endpoints}}{{range $endpointName := SortedKeys $Endpoints}}{{$endpoint := index $Endpoints $endpointName}}{{if eq (hasPattern $endpoint.Attrs "ignore") false}}
{{$appName}} | [{{$endpoint.Name}}](#{{$appName}}-{{SanitiseOutputName $endpoint.Name}}) | [{{SourcePath $app}}]({{SourcePath $app}})| {{end}}{{end}}{{end}}{{end}}

{{range $appName := SortedKeys .Apps}}{{$app := index $Apps $appName}}
{{if eq (hasPattern $app.Attrs "ignore") false}}
{{if ne $appName $packageName}}
# {{$appName}}{{end}}
{{Attribute $app "description"}}
{{range $e := $app.Endpoints}}
{{if eq (hasPattern $e.Attrs "ignore") false}}
## {{$appName}} {{SanitiseOutputName $e.Name}}
{{Attribute $e "description"}}

### Request types
{{if and (eq (len $e.Param) 0) (not $e.RestParams) }}
No Request types
{{end}}
{{range $param := $e.Param}}
{{Attribute $param.Type "description"}}

{{end}}
{{if $e.RestParams}}{{if $e.RestParams.UrlParam}}
{{range $param := $e.RestParams.UrlParam}}
{{$pathDataModel := (CreateParamDataModel $app $param)}}
{{if ne $pathDataModel ""}}
### Path Parameter

{{end}}{{end}}{{end}}
{{if $e.RestParams.QueryParam}}
{{range $param := $e.RestParams.QueryParam}}
{{$queryDataModel := (CreateParamDataModel $app $param)}}
{{if ne $queryDataModel ""}}
### Query Parameter

{{end}}{{end}}{{end}}{{end}}
### Response types
{{$responses := false}}
{{range $s := $e.Stmt}}{{$diagram := CreateReturnDataModel $appName $s $e}}{{if ne $diagram ""}}
{{$responses = true}}
{{$ret := (GetReturnType $e $s)}}{{if $ret }}
{{Attribute $ret "description"}}{{end}}

{{end}}{{end}}
{{if eq $responses false}}
No Response Types
{{end}}{{end}}{{end}}{{end}}{{end}}
{{range $appName := SortedKeys .Apps}}{{$app := index $Apps $appName}}
{{if hasPattern $app.GetAttrs "db"}}
## Database
{{Attribute $app "description"}}

{{end}}{{end}}
### Types
<table>
<tr>
<th>App Name</th>
<th>Diagram</th>
<th>Description</th>
<th>Full Diagram</th>
{{range $appName := SortedKeys .Apps}}{{$app := index $Apps $appName}}{{$types := $app.Types}}
{{if ne (hasPattern $app.Attrs "db") true}}
</tr>
{{range $typeName := SortedKeys $types}}{{$type := index $types $typeName}}
<tr>
<td>
{{$appName}}.<br>{{$typeName}}
</td>
<td>
<img src="{{CreateTypeDiagram $appName $typeName $type false}}">
</td>
<td>
{{if ne (Attribute $type "description") ""}}<details closed><summary>Description</summary><br>{{Attribute $type "description"}}</details>{{end}}
</td>
<td>
<a href="{{CreateTypeDiagram $appName $typeName $type true}}">Link</a>
</td>
</tr>{{end}}{{end}}{{end}}
</table>
` | pkg/catalog/template.go | 0.626581 | 0.41117 | template.go | starcoder |
package exp
import (
"regexp"
"strings"
"unicode"
)
import (
"github.com/pkg/errors"
)
func ParseValue(s string, remainder string) (Node, error) {
if len(remainder) == 0 {
return &Literal{Value: s}, nil
}
left := &Literal{Value: s}
root, err := Parse(remainder)
if err != nil {
return root, err
}
switch root.(type) {
case *And:
root.(*And).Left = left
case *Or:
root.(*Or).Left = left
case *In:
root.(*In).Left = left
case *Like:
root.(*Like).Left = left
case *ILike:
root.(*ILike).Left = left
case *LessThan:
root.(*LessThan).Left = left
case *LessThanOrEqual:
root.(*LessThanOrEqual).Left = left
case *GreaterThan:
root.(*GreaterThan).Left = left
case *GreaterThanOrEqual:
root.(*GreaterThanOrEqual).Left = left
default:
return root, errors.New("Invalid expression syntax for "+s+". Root is not a binary operator")
}
return root, nil
}
func ParseAttribute(in string) (Node, error) {
end := strings.Index(strings.TrimLeftFunc(in, unicode.IsSpace), " ")
if end == -1 {
return &Attribute{Name:strings.TrimSpace(in)[1:]}, nil
}
if len(strings.TrimSpace(in[end:])) == 0 {
return &Attribute{Name:in[1:end]}, nil
}
left := &Attribute{Name: in[1:end]}
root, err := Parse(in[end:])
if err != nil {
return root, err
}
switch root.(type) {
case *And:
root.(*And).Left = left
case *Or:
root.(*Or).Left = left
case *In:
root.(*In).Left = left
case *Like:
root.(*Like).Left = left
case *ILike:
root.(*ILike).Left = left
case *LessThan:
root.(*LessThan).Left = left
case *LessThanOrEqual:
root.(*LessThanOrEqual).Left = left
case *GreaterThan:
root.(*GreaterThan).Left = left
case *GreaterThanOrEqual:
root.(*GreaterThanOrEqual).Left = left
default:
return root, errors.New("Invalid expression syntax for "+in+". Root is not a binary operator")
}
return root, nil
}
func ParseSub(s string, remainder string) (Node, error) {
if len(remainder) == 0 {
return Parse(s)
}
var root Node
left, err := Parse(s)
if err != nil {
return root, err
}
root, err = Parse(remainder)
if err != nil {
return root, err
}
switch root.(type) {
case *And:
root.(*And).Left = left
case *Or:
root.(*Or).Left = left
case *In:
root.(*In).Left = left
case *Like:
root.(*Like).Left = left
case *ILike:
root.(*ILike).Left = left
case *LessThan:
root.(*LessThan).Left = left
case *LessThanOrEqual:
root.(*LessThanOrEqual).Left = left
case *GreaterThan:
root.(*GreaterThan).Left = left
case *GreaterThanOrEqual:
root.(*GreaterThanOrEqual).Left = left
default:
return root, errors.New("Invalid expression syntax for "+s+". Root is not a binary operator")
}
return root, nil
}
func Parse(in string) (Node, error) {
var root Node
if len(in) == 0 {
return root, errors.New("Error: Input string is empty.")
}
re, err := regexp.Compile("(\\s*)(?P<name>([a-zA-Z_\\d]+))(\\s*)\\((\\s*)(?P<args>(.)*?)(\\s*)\\)(\\s*)")
if err != nil {
return root, err
}
if strings.HasPrefix(strings.TrimLeftFunc(in, unicode.IsSpace), "@") {
return ParseAttribute(in)
} else {
parentheses := 0
for i, c := range in {
s := strings.TrimSpace(in[0:i+1])
s_lc := strings.ToLower(s)
remainder := strings.TrimSpace(in[i+1:])
if c == '(' {
parentheses += 1
} else if c == ')' {
parentheses -= 1
}
if parentheses == 0 && (len(remainder) == 0 || in[i+1] == ' ') {
if len(s) >= 2 && ((strings.HasPrefix(s, "'") && strings.HasSuffix(s, "'")) || (strings.HasPrefix(s, "\"") && strings.HasSuffix(s, "\""))) {
return ParseValue(s[1:len(s) - 1], remainder)
} else if len(s) >= 2 && strings.HasPrefix(s, "(") && strings.HasSuffix(s, ")") {
return ParseSub(s[1: len(s) - 1], remainder)
} else if s_lc == "and" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &And{&BinaryOperator{Right: right}}, nil
} else if s_lc == "or" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &Or{&BinaryOperator{Right: right}}, nil
} else if s_lc == "xor" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &Xor{&BinaryOperator{Right: right}}, nil
} else if s_lc == "<" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &LessThan{&BinaryOperator{Right: right}}, nil
} else if s_lc == "<=" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &LessThanOrEqual{&BinaryOperator{Right: right}}, nil
} else if s_lc == ">" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &GreaterThan{&BinaryOperator{Right: right}}, nil
} else if s_lc == ">=" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &GreaterThanOrEqual{&BinaryOperator{Right: right}}, nil
} else if s_lc == "not" {
node, err := Parse(remainder)
if err != nil {
return node, err
}
return &Not{&UnaryOperator{Node: node}}, nil
} else if s_lc == "in" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &In{&BinaryOperator{Right: right}}, nil
} else if s_lc == "like" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &Like{&BinaryOperator{Right: right}}, nil
} else if s_lc == "ilike" {
right, err := Parse(remainder)
if err != nil {
return right, err
}
return &ILike{&BinaryOperator{Right: right}}, nil
} else if re.MatchString(s) {
return ParseFunction(s, remainder, re)
}
}
}
}
return root, errors.New("Invalid expression syntax for \""+in+"\".")
} | graph/exp/Parse.go | 0.578329 | 0.518973 | Parse.go | starcoder |
package gf
import (
"crypto/rand"
"encoding/binary"
"fmt"
)
// GF(64) under the irreducible polynomial R
// R = x^64 + x^4 + x^3 + x + 1
// R = x^64 + r
// gf64MOD is the lower part of the irreducible polynomial
// r = x^4 + x^3 + x + 1
var gf64MOD uint64 = 0x1b
// randGF64 generates a new random GF(64) element.
func randGF64() uint64 {
buf := make([]byte, 8)
var e uint64
for e == 0 {
_, err := rand.Read(buf)
if err != nil {
panic(err)
}
e = binary.BigEndian.Uint64(buf)
}
return e
}
func toHex(e uint64) string {
return fmt.Sprintf("%#16.16x", e)
}
func zero() uint64 {
return 0
}
func one() uint64 {
return 1
}
func inverse(e0 uint64) uint64 {
if e0 == 0 {
return e0
}
// Chain is generated with tool below.
// https://github.com/kwantam/addchain
// Run:
// $ ./addchain 2^64-2
t0 := e0
t1 := square64(t0)
t2 := mul64(t0, t1)
t0 = square64(t2)
squareassign64(&t0)
mulassign64(&t1, t0)
mulassign64(&t2, t0)
t0 = square64(t2)
squareassign64(&t0)
squareassign64(&t0)
squareassign64(&t0)
mulassign64(&t1, t0)
mulassign64(&t2, t0)
t0 = square64(t2)
for i := 14; i < 21; i++ {
squareassign64(&t0)
}
mulassign64(&t1, t0)
mulassign64(&t2, t0)
t0 = square64(t2)
for i := 24; i < 39; i++ {
squareassign64(&t0)
}
mulassign64(&t1, t0)
mulassign64(&t0, t2)
for i := 41; i < 73; i++ {
squareassign64(&t0)
}
mulassign64(&t0, t1)
return t0
}
func exp(a uint64, e uint64) uint64 {
if e == 0 {
return 1
}
l := log2Ceil(int(e))
var acc = a
var r uint64 = 1
for i := 0; i < l+1; i++ {
if (e>>i)&1 == 1 {
mulassign64(&r, acc)
}
squareassign64(&acc)
}
return r
}
// mulNaive multiplies two GF(16) element with shift and add method
func mulNaive(e0, e1 uint64) uint64 {
var result uint64 = 0
var shifted = e0
var i uint64
for i = 0; i < 64; i++ {
if (e1)&(1<<i) != 0 {
result = result ^ shifted
}
if shifted&(0x8000000000000000) != 0 {
shifted <<= 1
shifted ^= gf64MOD
} else {
shifted = shifted << 1
}
}
return result
} | gf64_field.go | 0.557123 | 0.455925 | gf64_field.go | starcoder |
package clikit
import termbox "github.com/nsf/termbox-go"
// RelCoord is a coordinate relative to a Canvas's origin
type RelCoord int
// AbsCoord is a coordinate relative to the terminal's origin
type AbsCoord int
// Canvas represents an absolute rectange on the screen upon which drawing operations are applied.
type Canvas struct {
// Absolute X
X AbsCoord
// Absolute Y
Y AbsCoord
// Width
W int
// Height
H int
}
// NewCanvasFullScreen constructs a BoundingBox encompassing the entire terminal.
func NewCanvasFullScreen() Canvas {
w, h := termbox.Size()
return Canvas{
X: 0,
Y: 0,
W: w,
H: h,
}
}
// relToAbs translates canvas-relative coordinates to absolute screen coordinates
func (cvs *Canvas) relToAbs(xr, yr RelCoord) (xa, ya AbsCoord) {
return AbsCoord(int(xr) + int(cvs.X)), AbsCoord(int(yr) + int(cvs.Y))
}
// ForChild creates a new derived canvas from the relative coordinates of a child component
func (cvs *Canvas) ForChild(pm PositionalModel) Canvas {
x, y := cvs.relToAbs(pm.Position().X(), pm.Position().Y())
return Canvas{
X: x,
Y: y,
W: pm.Width().Value(),
H: pm.Height().Value(),
}
}
// Clone creates a new derived Canvas identical to this canvas
func (cvs *Canvas) Clone() Canvas {
return Canvas{
X: cvs.X,
Y: cvs.Y,
W: cvs.W,
H: cvs.H,
}
}
// isIn determines if a pair of absolute coordinates are
func (cvs *Canvas) isIn(xa, ya AbsCoord) bool {
return cvs.X <= xa && xa < AbsCoord(int(cvs.X)+cvs.W) && cvs.Y <= ya && ya < AbsCoord(int(cvs.Y)+cvs.H)
}
// FillBgBox fills a box with a background color, preserving the foreground contents.
func (cvs *Canvas) FillBgBox(x, y RelCoord, w, h int, bg termbox.Attribute) {
curBuf := termbox.CellBuffer()
for xi := x; xi < RelCoord(int(x)+w); xi++ {
for yi := y; yi < RelCoord(int(y)+h); yi++ {
xa, ya := cvs.relToAbs(xi, yi)
if cvs.isIn(xa, ya) {
curCell := cvs.cellAt(curBuf, xa, ya)
cvs.setCell(xa, ya, curCell.Ch, curCell.Fg, bg)
}
}
}
}
// FillFgBox fills a box with a rune, preserving the background.
func (cvs *Canvas) FillFgBox(x, y RelCoord, w, h int, ch rune, fg termbox.Attribute) {
curBuf := termbox.CellBuffer()
for xi := x; xi < RelCoord(int(x)+w); xi++ {
for yi := y; yi < RelCoord(int(y)+h); yi++ {
xa, ya := cvs.relToAbs(xi, yi)
if cvs.isIn(xa, ya) {
curCell := cvs.cellAt(curBuf, xa, ya)
cvs.setCell(xa, ya, ch, fg, curCell.Bg)
}
}
}
}
// DrawHorizLine draws a horizontal line with a given rune in the foreground,
// preserving the background.
func (cvs *Canvas) DrawHorizLine(x, y RelCoord, len int, ch rune, fg termbox.Attribute) {
curBuf := termbox.CellBuffer()
for xi := x; xi < RelCoord(int(x)+len); xi++ {
xa, ya := cvs.relToAbs(xi, y)
if cvs.isIn(xa, ya) {
curCell := cvs.cellAt(curBuf, xa, ya)
cvs.setCell(xa, ya, ch, fg, curCell.Bg)
}
}
}
// DrawCappedHorizLine draws a horizontal line with a given rune and endcaps in the foreground,
// preserving the background.
func (cvs *Canvas) DrawCappedHorizLine(x, y RelCoord, len int, left, middle, right rune, fg termbox.Attribute) {
cvs.DrawRuneFg(x, y, left, fg)
cvs.DrawHorizLine(x+1, y, len-2, middle, fg)
cvs.DrawRuneFg(RelCoord(int(x)+len-1), y, right, fg)
}
// DrawHorizLineStyle draws a horizontal line with a given style in the foreground,
// preserving the background.
func (cvs *Canvas) DrawHorizLineStyle(x, y RelCoord, len int, lineStyle *LineStyle, fg termbox.Attribute) {
cvs.DrawCappedHorizLine(x, y, len, lineStyle.LeftCap, lineStyle.Horiz, lineStyle.RightCap, fg)
}
// DrawVertLine draws a horizontal line with a given rune in the foreground,
// preserving the background.
func (cvs *Canvas) DrawVertLine(x, y RelCoord, len int, ch rune, fg termbox.Attribute) {
curBuf := termbox.CellBuffer()
for yi := y; yi < RelCoord(int(y)+len); yi++ {
xa, ya := cvs.relToAbs(x, yi)
if cvs.isIn(xa, ya) {
curCell := cvs.cellAt(curBuf, xa, ya)
cvs.setCell(xa, ya, ch, fg, curCell.Bg)
}
}
}
// DrawCappedVertLine draws a vertical line with a given rune and endcaps in the foreground,
// preserving the background.
func (cvs *Canvas) DrawCappedVertLine(x, y RelCoord, len int, top, middle, bottom rune, fg termbox.Attribute) {
cvs.DrawRuneFg(x, y, top, fg)
cvs.DrawVertLine(x, y+1, len-2, middle, fg)
cvs.DrawRuneFg(x, RelCoord(int(y)+len-1), bottom, fg)
}
// DrawVertLineStyle draws a vertical line with a given style in the foreground,
// preserving the background.
func (cvs *Canvas) DrawVertLineStyle(x, y RelCoord, len int, lineStyle *LineStyle, fg termbox.Attribute) {
cvs.DrawCappedVertLine(x, y, len, lineStyle.TopCap, lineStyle.Vert, lineStyle.BottomCap, fg)
}
// DrawRuneFg draws a rune in the foreground, preserving the background.
func (cvs *Canvas) DrawRuneFg(x, y RelCoord, ch rune, fg termbox.Attribute) {
curBuf := termbox.CellBuffer()
xa, ya := cvs.relToAbs(x, y)
if cvs.isIn(xa, ya) {
curCell := cvs.cellAt(curBuf, xa, ya)
cvs.setCell(xa, ya, ch, fg, curCell.Bg)
}
}
// DrawCellBg draws a cell's background, preserving the foreground content.
func (cvs *Canvas) DrawCellBg(x, y RelCoord, bg termbox.Attribute) {
curBuf := termbox.CellBuffer()
xa, ya := cvs.relToAbs(x, y)
if cvs.isIn(xa, ya) {
curCell := cvs.cellAt(curBuf, xa, ya)
cvs.setCell(xa, ya, curCell.Ch, curCell.Fg, bg)
}
}
// DrawBorder draws a border in a given style, preserving the background.
func (cvs *Canvas) DrawBorder(x, y RelCoord, w, h int, borderStyle *BorderStyle, fg termbox.Attribute) {
cvs.DrawRuneFg(x, y, borderStyle.TopLeft, fg)
cvs.DrawRuneFg(RelCoord(int(x)+w-1), y, borderStyle.TopRight, fg)
cvs.DrawRuneFg(x, RelCoord(int(y)+h-1), borderStyle.BottomLeft, fg)
cvs.DrawRuneFg(RelCoord(int(x)+w-1), RelCoord(int(y)+h-1), borderStyle.BottomRight, fg)
cvs.DrawHorizLine(x+1, y, w-2, borderStyle.Top, fg)
cvs.DrawHorizLine(x+1, RelCoord(int(y)+h-1), w-2, borderStyle.Bottom, fg)
cvs.DrawVertLine(x, y+1, h-2, borderStyle.Left, fg)
cvs.DrawVertLine(RelCoord(int(x)+w-1), y+1, h-2, borderStyle.Right, fg)
}
// DrawStringFg draws a string in a given style, preserving the background.
func (cvs *Canvas) DrawStringFg(x, y RelCoord, str string, fg termbox.Attribute) {
chars := []rune(str)
for i, c := range chars {
cvs.DrawRuneFg(RelCoord(int(x)+i), y, c, fg)
}
}
func (cvs *Canvas) setCell(xa, ya AbsCoord, ch rune, fg, bg termbox.Attribute) {
termbox.SetCell(int(xa), int(ya), ch, fg, bg)
}
func (cvs *Canvas) cellAt(buf []termbox.Cell, x, y AbsCoord) termbox.Cell {
w, _ := termbox.Size()
lineOffset := AbsCoord(w * int(y))
cellOffset := lineOffset + x
return buf[cellOffset]
} | draw.go | 0.735926 | 0.598048 | draw.go | starcoder |
package main
import (
"github.com/ByteArena/box2d"
"github.com/wdevore/Ranger-Go-IGE/api"
"github.com/wdevore/Ranger-Go-IGE/engine/rendering/color"
"github.com/wdevore/Ranger-Go-IGE/extras/shapes"
)
func (p *seesawPhysicsComponent) Build2(world api.IWorld, parent api.INode, phyWorld *box2d.B2World, position api.IPoint) {
p.position = position
p.buildPolygon2(world, parent)
p.buildCircle2(world, p.phyNode)
p.buildSquare2(world, p.phyNode)
p.buildPhysics2(phyWorld, position)
}
func (p *seesawPhysicsComponent) buildPhysics2(phyWorld *box2d.B2World, position api.IPoint) {
// -------------------------------------------
// A body def used to create bodies
bDef := box2d.MakeB2BodyDef()
bDef.Type = box2d.B2BodyType.B2_dynamicBody
// Set the position of the Body
px := position.X()
py := position.Y()
bDef.Position.Set(
float64(px),
float64(py),
)
// An instance of a body to contain Fixtures
p.b2Body = phyWorld.CreateBody(&bDef)
// Every Fixture has a shape
b2Shape := box2d.MakeB2PolygonShape()
// Box2D assumes the same is defined in unit-space which
// means if the object is defined otherwise we need the object
// to return the correct value
tcc := p.phyNode.(*shapes.MonoPolygonNode)
vertices := []box2d.B2Vec2{}
verts := tcc.Vertices()
// s := p.phyNode.Scale()
vertices = append(vertices, box2d.B2Vec2{X: float64((*verts)[0]), Y: float64((*verts)[1])})
vertices = append(vertices, box2d.B2Vec2{X: float64((*verts)[3]), Y: float64((*verts)[4])})
vertices = append(vertices, box2d.B2Vec2{X: float64((*verts)[6]), Y: float64((*verts)[7])})
vertices = append(vertices, box2d.B2Vec2{X: float64((*verts)[9]), Y: float64((*verts)[10])})
vertices = append(vertices, box2d.B2Vec2{X: float64((*verts)[12]), Y: float64((*verts)[13])})
b2Shape.Set(vertices, len(vertices))
fd := box2d.MakeB2FixtureDef()
fd.Shape = &b2Shape
fd.Density = 1.0
p.b2Body.CreateFixtureFromDef(&fd) // attach Fixture to body
// ---------------------------------------------------------------
// Circle
// ---------------------------------------------------------------
// Every Fixture has a shape
b2CircleShape := box2d.MakeB2CircleShape()
b2CircleShape.M_p.Set(float64(p.circle.Position().X()), float64(p.circle.Position().Y())) // Relative to body position
gcir := p.circle.(*shapes.MonoCircleNode)
b2CircleShape.SetRadius(float64(gcir.Radius()))
fd = box2d.MakeB2FixtureDef()
fd.Shape = &b2CircleShape
fd.Density = 1.0
p.b2Body.CreateFixtureFromDef(&fd) // attach Fixture to body
// ---------------------------------------------------------------
// Square
// ---------------------------------------------------------------
// Every Fixture has a shape
b2SquareShape := box2d.MakeB2PolygonShape()
gss := p.square.(*shapes.MonoSquareNode)
b2SquareShape.SetAsBoxFromCenterAndAngle(
float64(gss.HalfSide()), float64(gss.HalfSide()),
box2d.B2Vec2{X: float64(p.square.Position().X()), Y: float64(p.square.Position().Y())}, 0.0)
fd = box2d.MakeB2FixtureDef()
fd.Shape = &b2SquareShape
fd.Density = 1.0
p.b2Body.CreateFixtureFromDef(&fd) // attach Fixture to body
}
func (p *seesawPhysicsComponent) buildPolygon2(world api.IWorld, parent api.INode) error {
var err error
vertices := []float32{
-1.0, 2.0, 0.0,
-1.0, 0.0, 0.0,
0.0, -3.0, 0.0,
1.0, 0.0, 0.0,
1.0, 1.0, 0.0,
}
scale := float32(3.0)
for i, v := range vertices {
vertices[i] = v * scale
}
indices := []uint32{
0, 1, 2, 3, 4,
}
// --------------------------------------------------------------
p.phyNode, err = shapes.NewMonoPolygonNode("Polygon", &vertices, &indices, api.OUTLINED, world, parent)
if err != nil {
return err
}
// This build version embeds the scale directly inside
// the vertices below rather than scaling the node
// p.phyNode.SetScale(scale)
p.phyNode.SetPosition(p.position.X(), p.position.Y())
gpol := p.phyNode.(*shapes.MonoPolygonNode)
gpol.SetColor(color.NewPaletteInt64(color.LightOrange))
return nil
}
func (p *seesawPhysicsComponent) buildCircle2(world api.IWorld, parent api.INode) error {
var err error
p.circle, err = shapes.NewMonoCircleNode("Circle", api.FILLOUTLINED, 10, world, parent)
if err != nil {
return err
}
p.circle.SetScale(5.0)
p.circle.SetPosition(-8.0, 0.0)
gc := p.circle.(*shapes.MonoCircleNode)
gc.SetFilledColor(color.NewPaletteInt64(color.Green))
return nil
}
func (p *seesawPhysicsComponent) buildSquare2(world api.IWorld, parent api.INode) error {
var err error
p.square, err = shapes.NewMonoSquareNode("Square", api.FILLOUTLINED, true, world, parent)
if err != nil {
return err
}
p.square.SetScale(5.0)
p.square.SetPosition(8.0, 0.0)
gsq := p.square.(*shapes.MonoSquareNode)
gsq.SetFilledColor(color.NewPaletteInt64(color.Aqua))
return nil
} | examples/complex/physics/basic/p7_seesaw/seesaw_physics_component2.go | 0.625896 | 0.556761 | seesaw_physics_component2.go | starcoder |
package serialization
import (
"reflect"
"strings"
"sync"
"github.com/modern-go/reflect2"
"github.com/pkg/errors"
)
var cache sync.Map
// Type wraps reflect.Type with serialization support.
// To deserialize the type on the remote, it also needs be available and registered in the remote side.
type Type struct {
T reflect2.Type `json:"-"`
}
// TypeOf returns a wrapped type of given value. It also has a side-effect that registers given type,
// which allows deserialization of the serialized given type on this process/application.
func TypeOf(v interface{}) Type {
// calling reflect2.TypeOf registers given type to the reflect2 cache
t := Type{reflect2.TypeOf(v)}
cache.Store(t.String(), t)
return t
}
// TypeFromString loads and returns type from given type descriptor.
// type descriptor is composed of <kind><pkgPath>.<typeName> (e.g. []*github.com/pkg/errors.Error).
// It returns ErrUnresolved if given type is not found on this process/application.
func TypeFromString(desc string) (Type, error) {
if v, hit := cache.Load(desc); hit {
return v.(Type), nil
}
t, err := deserializeType(desc)
if err != nil {
return Type{}, err
}
typ := Type{T: reflect2.Type2(t)}
cache.Store(desc, typ)
return typ, nil
}
// New returns a pointer to data of this type.
func (t Type) New() interface{} {
return t.T.New()
}
func (t Type) IsSameType(v interface{}) bool {
return t.T == reflect2.TypeOf(v)
}
func (t Type) String() string {
if t.T == nil {
return "nil"
}
return serializeTypeInfo(t.T.Type1())
}
func (t Type) MarshalText() ([]byte, error) {
return []byte(t.String()), nil
}
func (t *Type) UnmarshalText(d []byte) (err error) {
*t, err = TypeFromString(string(d))
if err != nil {
return nil
}
return nil
}
func serializeTypeInfo(typ reflect.Type) string {
if typ == nil {
return "nil"
}
switch typ.Kind() {
case reflect.Slice:
return "[]" + serializeTypeInfo(typ.Elem())
case reflect.Ptr:
return "*" + serializeTypeInfo(typ.Elem())
default:
pkg := typ.PkgPath()
if pkg == "" {
// probably primitives
return typ.Name()
}
return pkg + "." + typ.Name()
}
}
func deserializeType(typ string) (reflect.Type, error) {
switch {
case strings.HasPrefix(typ, "[]"):
elemTyp, err := deserializeType(typ[2:])
if err != nil {
return nil, err
}
return reflect.SliceOf(elemTyp), nil
case strings.HasPrefix(typ, "*"):
elemTyp, err := deserializeType(typ[1:])
if err != nil {
return nil, err
}
return reflect.PtrTo(elemTyp), nil
default:
if v, hit := cache.Load(typ); hit {
// primitives / wrapped
return v.(Type).T.Type1(), nil
}
dotIndex := strings.LastIndex(typ, ".")
if dotIndex == -1 {
return nil, errors.Errorf("invalid type: %s", typ)
}
pkg, name := typ[:dotIndex], typ[dotIndex+1:]
t := reflect2.TypeByPackageName(pkg, name)
if t == nil {
return nil, errors.Wrapf(ErrUnresolved, "resolve %s", typ)
}
return t.Type1(), nil
}
}
// ErrUnresolved is returned when the type with given package path and name does not exist.
// It usually caused by unuse; Go compiler erases unused and unimported types, so you need to ensure that
// receiver of the serialized struct imports the referred type.
var ErrUnresolved = errors.New("unknown type")
// register primitive types
var (
_ = TypeOf(nil)
_ = TypeOf(0)
_ = TypeOf("")
_ = TypeOf(true)
_ = TypeOf(byte(0))
_ = TypeOf(rune(0))
_ = TypeOf(int8(0))
_ = TypeOf(int16(0))
_ = TypeOf(int32(0))
_ = TypeOf(int64(0))
_ = TypeOf(uint8(0))
_ = TypeOf(uint16(0))
_ = TypeOf(uint32(0))
_ = TypeOf(uint64(0))
_ = TypeOf(float32(0))
_ = TypeOf(float64(0))
_ = TypeOf(complex64(0))
_ = TypeOf(complex128(0))
) | internal/serialization/type.go | 0.719384 | 0.411879 | type.go | starcoder |
package curves
import (
"math/big"
)
// CurveSystem is a set of parameters and functions for a pairing based cryptosystem
// It has everything necessary to support all bgls functionality which we use.
type CurveSystem interface {
Name() string
MakeG1Point([]*big.Int, bool) (Point, bool)
MakeG2Point([]*big.Int, bool) (Point, bool)
// GTToAffineCoords(PointT) (*big.Int, *big.Int)
UnmarshalG1([]byte) (Point, bool)
UnmarshalG2([]byte) (Point, bool)
UnmarshalGT([]byte) (PointT, bool)
GetG1() Point
GetG2() Point
GetGT() PointT
GetG1Infinity() Point
GetG2Infinity() Point
GetGTIdentity() PointT
HashToG1(message []byte) Point
GetG1Q() *big.Int
GetG1Order() *big.Int
// getGTQ() *big.Int
getG1Cofactor() *big.Int
getG1A() *big.Int
getG1B() *big.Int
// Fouque-Tibouchi hash parameters, sqrt(-3), (-1 + sqrt(-3))/2 computed in F_q
getFTHashParams() (*big.Int, *big.Int)
g1XToYSquared(*big.Int) *big.Int
Pair(Point, Point) (PointT, bool)
// Product of Pairings
PairingProduct([]Point, []Point) (PointT, bool)
}
// Point is a way to represent a point on G1 or G2, in the first two elliptic curves.
type Point interface {
Add(Point) (Point, bool)
Copy() Point
Equals(Point) bool
Marshal() []byte
MarshalUncompressed() []byte
Mul(*big.Int) Point
ToAffineCoords() []*big.Int
}
// PointT is a way to represent a point on GT, in the target group
type PointT interface {
Add(PointT) (PointT, bool)
Copy() PointT
Equals(PointT) bool
Marshal() []byte
Mul(*big.Int) PointT
// ToAffineCoords() (*big.Int, *big.Int)
}
// AggregatePoints takes the sum of points.
func AggregatePoints(points []Point) Point {
if len(points) == 2 { // No parallelization needed
aggPoint, _ := points[0].Add(points[1])
return aggPoint
}
// Aggregate all the points together using concurrency
c := make(chan Point)
// Initialize aggPoint to an array with elements being the sum of two
// adjacent Points.
counter := 0
for i := 0; i < len(points); i += 2 {
go concurrentAggregatePoints(i, points, c)
counter++
}
aggPoint := make([]Point, counter)
for i := 0; i < counter; i++ {
aggPoint[i] = <-c
}
// Keep on aggregating every pair of points until only one aggregate point remains
for {
counter = 0
if len(aggPoint) == 1 {
break
}
for i := 0; i < len(aggPoint); i += 2 {
go concurrentAggregatePoints(i, aggPoint, c)
counter++
}
nxtAggPoint := make([]Point, counter)
for i := 0; i < counter; i++ {
nxtAggPoint[i] = <-c
}
aggPoint = nxtAggPoint
}
return aggPoint[0]
}
// concurrentAggregatePoints handles the channel for concurrent Aggregation of points.
// It only adds the element at points[start] and points[start + 1], and sends it through the channel
func concurrentAggregatePoints(start int, points []Point, c chan Point) {
if start+1 >= len(points) {
c <- points[start]
return
}
summed, _ := points[start].Add(points[start+1])
c <- summed
}
// concurrentPairingProduct computes a set of pairings in parallel,
// and then takes their product again using concurrency.
func concurrentPairingProduct(curve CurveSystem, points1 []Point, points2 []Point) (PointT, bool) {
if len(points1) != len(points2) {
return nil, false
}
// Compute all the pairings in parallel
c := make(chan PointT)
pairedPoints := make([]PointT, len(points1))
for i := 0; i < len(pairedPoints); i++ {
go concurrentPair(curve, points1[i], points2[i], c)
}
for i := 0; i < len(pairedPoints); i++ {
pairedPoints[i] = <-c
if pairedPoints[i] == nil {
return nil, false
}
}
counter := 0
// Set aggPairedPoints to an array with elements being the sum of two
// adjacent Points.
for i := 0; i < len(pairedPoints); i += 2 {
go concurrentAggregatePointTs(i, pairedPoints, c)
counter++
}
aggPairedPoints := make([]PointT, counter)
for i := 0; i < counter; i++ {
aggPairedPoints[i] = <-c
}
// Keep on aggregating every pair of points until only one aggregate point remains
for {
counter = 0
if len(aggPairedPoints) == 1 {
break
}
for i := 0; i < len(aggPairedPoints); i += 2 {
go concurrentAggregatePointTs(i, aggPairedPoints, c)
counter++
}
nxtPairedPoints := make([]PointT, counter)
for i := 0; i < counter; i++ {
nxtPairedPoints[i] = <-c
}
aggPairedPoints = nxtPairedPoints
}
return aggPairedPoints[0], true
}
// concurrentAggregatePoints handles the channel for concurrent Aggregation of points.
// It only adds the element at points[start] and points[start + 1], and sends it through the channel
func concurrentAggregatePointTs(start int, points []PointT, c chan PointT) {
if start+1 >= len(points) {
c <- points[start]
return
}
summed, _ := points[start].Add(points[start+1])
c <- summed
}
type indexedPoint struct {
index int
pt Point
}
// ScalePoints takes a set of points, and a set of multiples, and returns a
// new set of points multiplied by the corresponding factor.
func ScalePoints(pts []Point, factors []*big.Int) (newKeys []Point) {
if factors == nil {
return pts
} else if len(pts) != len(factors) {
return nil
}
newKeys = make([]Point, len(pts))
c := make(chan *indexedPoint)
for i := 0; i < len(pts); i++ {
go concurrentScale(pts[i], factors[i], i, c)
}
for i := 0; i < len(pts); i++ {
pt := <-c
newKeys[pt.index] = pt.pt
}
return newKeys
}
func concurrentScale(key Point, factor *big.Int, index int, c chan *indexedPoint) {
if factor == nil {
c <- &indexedPoint{index, key.Copy()}
} else {
c <- &indexedPoint{index, key.Mul(factor)}
}
}
// concurrentPair pairs pt with key, and sends the result down the channel.
func concurrentPair(curve CurveSystem, pt1 Point, pt2 Point, c chan PointT) {
if targetPoint, ok := curve.Pair(pt1, pt2); ok {
c <- targetPoint
return
}
c <- nil
} | curves/curve.go | 0.735357 | 0.629348 | curve.go | starcoder |
package v1
import (
"encoding/json"
"time"
)
// Timeframe struct for Timeframe
type Timeframe struct {
StartedAt time.Time `json:"started_at"`
EndedAt time.Time `json:"ended_at"`
}
// NewTimeframe instantiates a new Timeframe object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewTimeframe(startedAt time.Time, endedAt time.Time) *Timeframe {
this := Timeframe{}
this.StartedAt = startedAt
this.EndedAt = endedAt
return &this
}
// NewTimeframeWithDefaults instantiates a new Timeframe object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewTimeframeWithDefaults() *Timeframe {
this := Timeframe{}
return &this
}
// GetStartedAt returns the StartedAt field value
func (o *Timeframe) GetStartedAt() time.Time {
if o == nil {
var ret time.Time
return ret
}
return o.StartedAt
}
// GetStartedAtOk returns a tuple with the StartedAt field value
// and a boolean to check if the value has been set.
func (o *Timeframe) GetStartedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return &o.StartedAt, true
}
// SetStartedAt sets field value
func (o *Timeframe) SetStartedAt(v time.Time) {
o.StartedAt = v
}
// GetEndedAt returns the EndedAt field value
func (o *Timeframe) GetEndedAt() time.Time {
if o == nil {
var ret time.Time
return ret
}
return o.EndedAt
}
// GetEndedAtOk returns a tuple with the EndedAt field value
// and a boolean to check if the value has been set.
func (o *Timeframe) GetEndedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return &o.EndedAt, true
}
// SetEndedAt sets field value
func (o *Timeframe) SetEndedAt(v time.Time) {
o.EndedAt = v
}
func (o Timeframe) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["started_at"] = o.StartedAt
}
if true {
toSerialize["ended_at"] = o.EndedAt
}
return json.Marshal(toSerialize)
}
type NullableTimeframe struct {
value *Timeframe
isSet bool
}
func (v NullableTimeframe) Get() *Timeframe {
return v.value
}
func (v *NullableTimeframe) Set(val *Timeframe) {
v.value = val
v.isSet = true
}
func (v NullableTimeframe) IsSet() bool {
return v.isSet
}
func (v *NullableTimeframe) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableTimeframe(val *Timeframe) *NullableTimeframe {
return &NullableTimeframe{value: val, isSet: true}
}
func (v NullableTimeframe) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableTimeframe) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | v1/model_timeframe.go | 0.78964 | 0.429549 | model_timeframe.go | starcoder |
package render
import (
geometry "basic-ray/pkg/geometry"
_ "fmt"
pb "github.com/cheggaaa/pb/v3"
"math"
)
func Main(origin geometry.Point, lightSources []LightSource, camera *Camera, triangles []*geometry.Triangle) {
var light Photon
bar := pb.StartNew(len(*camera.Pixels) * len((*camera.Pixels)[0]))
for i, row := range *camera.Pixels {
for j, _ := range row {
ray := geometry.Ray{
Origin: origin,
Vector: geometry.Normalize(geometry.CreateVector(GetPoint(camera, i, j), origin)),
}
light = Trace(&ray, triangles, lightSources, 0)
(*camera.Pixels)[i][j] = light.rgb // GetWeightedColor()
bar.Increment()
}
}
bar.Finish()
}
func MultiThreadedMain(origin geometry.Point, lightSources []LightSource, camera *Camera, triangles []*geometry.Triangle) {
progress := make(chan bool, 20)
totalCount := len(*camera.Pixels) * len((*camera.Pixels)[0])
bar := pb.StartNew(totalCount)
go renderPixels(origin, triangles, lightSources, camera, progress)
reportProgress(bar, progress, totalCount)
close(progress)
bar.Finish()
}
func reportProgress(bar *pb.ProgressBar, progress <-chan bool, totalCount int) {
for i := 0; i < totalCount; i++ {
<-progress
bar.Increment()
}
}
func renderPixels(origin geometry.Point, triangles []*geometry.Triangle, lightSources []LightSource, camera *Camera, progress chan<- bool) {
jobs := make(chan bool, 5)
for i, row := range *camera.Pixels {
for j, _ := range row {
jobs <- true
ray := geometry.Ray{
Origin: origin,
Vector: geometry.Normalize(geometry.CreateVector(GetPoint(camera, i, j), origin)),
}
go renderPixel(&ray, triangles, lightSources, camera, i, j, progress, jobs)
}
}
}
func renderPixel(ray *geometry.Ray, triangles []*geometry.Triangle, lightSources []LightSource, camera *Camera, i, j int, progress chan<- bool, jobs <-chan bool) {
light := Trace(ray, triangles, lightSources, 0)
(*camera.Pixels)[i][j] = light.rgb // GetWeightedColor()
// complete job
<-jobs
// report progress
progress <- true
}
func Trace(ray *geometry.Ray, triangles []*geometry.Triangle, lightSources []LightSource, depth int) Photon {
receiveVector := geometry.Normalize(geometry.ScalarProduct(ray.Vector, -1))
photon := Photon{vector: receiveVector}
closestPoint := float64(math.Inf(1))
if depth >= 3 {
return photon
}
for _, triangle := range triangles {
intersects := geometry.GetIntersection(ray, triangle)
if intersects == nil {
continue
}
collision := *intersects
distance := geometry.Distance(collision, ray.Origin)
if distance < closestPoint {
closestPoint = distance
} else {
continue
}
photon = GetColor(ray, collision, triangle, lightSources, triangles, depth)
}
return photon
}
func GetColor(
ray *geometry.Ray,
reflectionPoint geometry.Point,
triangle *geometry.Triangle,
lightSources []LightSource,
triangles []*geometry.Triangle,
depth int,
) Photon {
receiveVector := geometry.Normalize(geometry.ScalarProduct(ray.Vector, -1))
switch triangle.MaterialType {
case geometry.REFLECTIVE:
reflectionRay := &geometry.Ray{Origin: reflectionPoint, Vector: GetReflectiveVector(ray.Vector, triangle)}
return Trace(reflectionRay, triangles, lightSources, depth+1)
case geometry.FLAT_DIFFUSE:
photons := GetDirectLight(reflectionPoint, triangles, lightSources)
return DiffuseShader(receiveVector, photons, triangle)
case geometry.GOURAUD_DIFFUSE:
photons := GetDirectLight(reflectionPoint, triangles, lightSources)
return GouraudShader(receiveVector, reflectionPoint, photons, triangle)
}
return Photon{vector: receiveVector}
} | pkg/render/ray_trace.go | 0.693577 | 0.524943 | ray_trace.go | starcoder |
package gohome
import (
"github.com/PucklaMotzer09/mathgl/mgl32"
"math"
)
const (
NEAR_LEFT_DOWN = 0
NEAR_RIGHT_DOWN = 1
NEAR_RIGHT_UP = 2
NEAR_LEFT_UP = 3
FAR_LEFT_DOWN = 4
FAR_RIGHT_DOWN = 5
FAR_RIGHT_UP = 6
FAR_LEFT_UP = 7
)
// A projection that is used to correctly display the objects on the screen
type Projection interface {
// Calculates the projection matrix used in the shader
CalculateProjectionMatrix()
// Returns the projection matrix
GetProjectionMatrix() mgl32.Mat4
// Updates the projection with the new viewport
Update(newViewport Viewport)
// Returns the view frustum
GetFrustum() [8]mgl32.Vec3
}
// A 2-dimensional orthogonal projection
type Ortho2DProjection struct {
// The left most value
Left float32
// The right most value
Right float32
// The bottom mose value
Bottom float32
// The top most value
Top float32
oldLeft float32
oldRight float32
oldBottom float32
oldTop float32
projectionMatrix mgl32.Mat4
}
func (o2Dp *Ortho2DProjection) valuesChanged() bool {
return o2Dp.Left != o2Dp.oldLeft || o2Dp.Right != o2Dp.oldRight || o2Dp.Bottom != o2Dp.oldBottom || o2Dp.Top != o2Dp.oldTop
}
func (o2Dp *Ortho2DProjection) CalculateProjectionMatrix() {
if o2Dp.valuesChanged() {
o2Dp.projectionMatrix = mgl32.Ortho2D(o2Dp.Left, o2Dp.Right, o2Dp.Bottom, o2Dp.Top)
} else {
return
}
o2Dp.oldLeft = o2Dp.Left
o2Dp.oldRight = o2Dp.Right
o2Dp.oldBottom = o2Dp.Bottom
o2Dp.oldTop = o2Dp.Top
}
func (o2Dp *Ortho2DProjection) GetProjectionMatrix() mgl32.Mat4 {
return o2Dp.projectionMatrix
}
func (o2Dp *Ortho2DProjection) Update(newViewport Viewport) {
o2Dp.Left = 0.0
o2Dp.Right = float32(newViewport.Width)
o2Dp.Top = 0.0
o2Dp.Bottom = float32(newViewport.Height)
o2Dp.CalculateProjectionMatrix()
}
func (o2Dp *Ortho2DProjection) GetFrustum() [8]mgl32.Vec3 {
return [8]mgl32.Vec3{}
}
// A 3-dimensional perspective projection
type PerspectiveProjection struct {
// The width of the viewport
Width float32
// The height of the viewport
Height float32
// The field of view
FOV float32
// The near plane at which objects start to clip away
NearPlane float32
// The far plane at which the objects start to clip away
FarPlane float32
oldWidth float32
oldHeight float32
oldFOV float32
oldNearPlane float32
oldFarPlane float32
projectionMatrix mgl32.Mat4
}
func (pp *PerspectiveProjection) valuesChanged() bool {
return pp.Width != pp.oldWidth || pp.Height != pp.oldHeight || pp.FOV != pp.oldFOV || pp.NearPlane != pp.oldNearPlane || pp.FarPlane != pp.oldFarPlane
}
func (pp *PerspectiveProjection) CalculateProjectionMatrix() {
if pp.valuesChanged() {
pp.projectionMatrix = mgl32.Perspective(mgl32.DegToRad(pp.FOV), pp.Width/pp.Height, pp.NearPlane, pp.FarPlane)
} else {
return
}
pp.oldWidth = pp.Width
pp.oldHeight = pp.Height
pp.oldFOV = pp.FOV
pp.oldNearPlane = pp.NearPlane
pp.oldFarPlane = pp.FarPlane
}
func (pp *PerspectiveProjection) Update(newViewport Viewport) {
pp.Width = float32(newViewport.Width)
pp.Height = float32(newViewport.Height)
pp.CalculateProjectionMatrix()
}
func (pp *PerspectiveProjection) GetProjectionMatrix() mgl32.Mat4 {
return pp.projectionMatrix
}
func (pp *PerspectiveProjection) GetFrustum() [8]mgl32.Vec3 {
var farPlaneHalfWidth, nearPlaneHalfWidth float32
var farPlaneHalfHeight, nearPlaneHalfHeight float32
var centerFarPlane, centerNearPlane mgl32.Vec3
var points [8]mgl32.Vec3
forward := mgl32.Vec3{0.0, 0.0, -1.0}
up := mgl32.Vec3{0.0, 1.0, 0.0}
down := up.Mul(-1.0)
right := mgl32.Vec3{1.0, 0.0, 0.0}
left := right.Mul(-1.0)
farPlaneHalfWidth = float32(math.Tan(float64(pp.FOV)/180.0*math.Pi) * float64(pp.FarPlane))
nearPlaneHalfWidth = float32(math.Tan(float64(pp.FOV)/180.0*math.Pi) * float64(pp.NearPlane))
farPlaneHalfHeight = farPlaneHalfWidth / (pp.Width / pp.Height)
nearPlaneHalfHeight = nearPlaneHalfWidth / (pp.Width / pp.Height)
centerFarPlane = forward.Mul(pp.FarPlane)
centerNearPlane = forward.Mul(pp.NearPlane)
points[NEAR_LEFT_DOWN] = centerNearPlane.Add(left.Mul(nearPlaneHalfWidth)).Add(down.Mul(nearPlaneHalfHeight))
points[NEAR_RIGHT_DOWN] = centerNearPlane.Add(right.Mul(nearPlaneHalfWidth)).Add(down.Mul(nearPlaneHalfHeight))
points[NEAR_RIGHT_UP] = centerNearPlane.Add(right.Mul(nearPlaneHalfWidth)).Add(up.Mul(nearPlaneHalfHeight))
points[NEAR_LEFT_UP] = centerNearPlane.Add(left.Mul(nearPlaneHalfWidth)).Add(up.Mul(nearPlaneHalfHeight))
points[FAR_LEFT_DOWN] = centerFarPlane.Add(left.Mul(farPlaneHalfWidth)).Add(down.Mul(farPlaneHalfHeight))
points[FAR_RIGHT_DOWN] = centerFarPlane.Add(right.Mul(farPlaneHalfWidth)).Add(down.Mul(farPlaneHalfHeight))
points[FAR_RIGHT_UP] = centerFarPlane.Add(right.Mul(farPlaneHalfWidth)).Add(up.Mul(farPlaneHalfHeight))
points[FAR_LEFT_UP] = centerFarPlane.Add(left.Mul(farPlaneHalfWidth)).Add(up.Mul(farPlaneHalfHeight))
return points
}
// A projection doing nothing
type IdentityProjection struct {
}
func (IdentityProjection) CalculateProjectionMatrix() {
}
func (IdentityProjection) GetProjectionMatrix() mgl32.Mat4 {
return mgl32.Ident4()
}
func (IdentityProjection) Update(newViewport Viewport) {
}
func (IdentityProjection) GetFrustum() [8]mgl32.Vec3 {
return [8]mgl32.Vec3{}
}
// A 3-dimensional orthogonal projection
type Ortho3DProjection struct {
// The left most point
Left float32
// The right most point
Right float32
// The bottom most point
Bottom float32
// The top most point
Top float32
// The near plane
Near float32
// The far plane
Far float32
oldLeft float32
oldRight float32
oldBottom float32
oldTop float32
oldNear float32
oldFar float32
projectionMatrix mgl32.Mat4
}
func (this *Ortho3DProjection) valuesChanged() bool {
return this.Left != this.oldLeft || this.Right != this.oldRight || this.Bottom != this.oldBottom || this.Top != this.oldTop || this.Near != this.oldNear || this.Far != this.oldFar
}
func (this *Ortho3DProjection) CalculateProjectionMatrix() {
if this.valuesChanged() {
this.projectionMatrix = mgl32.Ortho(this.Left, this.Right, this.Bottom, this.Top, this.Near, this.Far)
} else {
return
}
this.oldLeft = this.Left
this.oldRight = this.Right
this.oldBottom = this.Bottom
this.oldTop = this.Top
this.oldNear = this.Near
this.oldFar = this.Far
}
func (this *Ortho3DProjection) GetProjectionMatrix() mgl32.Mat4 {
return this.projectionMatrix
}
func (this *Ortho3DProjection) Update(newViewport Viewport) {
this.Left = 0.0
this.Right = float32(newViewport.Width)
this.Top = 0.0
this.Bottom = float32(newViewport.Height)
this.CalculateProjectionMatrix()
}
func (this *Ortho3DProjection) GetFrustum() [8]mgl32.Vec3 {
return [8]mgl32.Vec3{}
} | src/gohome/projection.go | 0.786336 | 0.549761 | projection.go | starcoder |
package gltf
import "encoding/json"
var (
// DefaultMatrix defines an identity matrix.
DefaultMatrix = [16]float64{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}
// DefaultRotation defines a quaternion without rotation.
DefaultRotation = [4]float64{0, 0, 0, 1}
// DefaultScale defines a scaling that does not modify the size of the object.
DefaultScale = [3]float64{1, 1, 1}
// DefaultTranslation defines a translation that does not move the object.
DefaultTranslation = [3]float64{0, 0, 0}
)
var (
emptyMatrix = [16]float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
emptyRotation = [4]float64{0, 0, 0, 0}
emptyScale = [3]float64{0, 0, 0}
)
// The ComponentType is the datatype of components in the attribute. All valid values correspond to WebGL enums.
// 5125 (UNSIGNED_INT) is only allowed when the accessor contains indices.
type ComponentType uint16
const (
// Float corresponds to a Float32Array.
Float ComponentType = iota
// Byte corresponds to a Int8Array.
Byte
// UnsignedByte corresponds to a Uint8Array.
UnsignedByte
// Short corresponds to a Int16Array.
Short
// UnsignedShort corresponds to a Uint16Array.
UnsignedShort
// UnsignedInt corresponds to a Uint32Array.
UnsignedInt
)
// UnmarshalJSON unmarshal the component type with the correct default values.
func (c *ComponentType) UnmarshalJSON(data []byte) error {
var tmp uint16
err := json.Unmarshal(data, &tmp)
if err == nil {
*c = map[uint16]ComponentType{
5120: Byte,
5121: UnsignedByte,
5122: Short,
5123: UnsignedShort,
5125: UnsignedInt,
5126: Float,
}[tmp]
}
return err
}
// MarshalJSON marshal the component type with the correct default values.
func (c *ComponentType) MarshalJSON() ([]byte, error) {
return json.Marshal(map[ComponentType]uint16{
Byte: 5120,
UnsignedByte: 5121,
Short: 5122,
UnsignedShort: 5123,
UnsignedInt: 5125,
Float: 5126,
}[*c])
}
// AccessorType specifies if the attribute is a scalar, vector, or matrix.
type AccessorType uint8
const (
// Scalar corresponds to a single dimension value.
Scalar AccessorType = iota
// Vec2 corresponds to a two dimensions array.
Vec2
// Vec3 corresponds to a three dimensions array.
Vec3
// Vec4 corresponds to a four dimensions array.
Vec4
// Mat2 corresponds to a 2x2 matrix.
Mat2
// Mat3 corresponds to a 3x3 matrix.
Mat3
// Mat4 corresponds to a 4x4 matrix.
Mat4
)
// UnmarshalJSON unmarshal the accessor type with the correct default values.
func (a *AccessorType) UnmarshalJSON(data []byte) error {
var tmp string
err := json.Unmarshal(data, &tmp)
if err == nil {
*a = map[string]AccessorType{
"SCALAR": Scalar,
"VEC2": Vec2,
"VEC3": Vec3,
"VEC4": Vec4,
"MAT2": Mat2,
"MAT3": Mat3,
"MAT4": Mat4,
}[tmp]
}
return err
}
// MarshalJSON marshal the accessor type with the correct default values.
func (a *AccessorType) MarshalJSON() ([]byte, error) {
return json.Marshal(map[AccessorType]string{
Scalar: "SCALAR",
Vec2: "VEC2",
Vec3: "VEC3",
Vec4: "VEC4",
Mat2: "MAT2",
Mat3: "MAT3",
Mat4: "MAT4",
}[*a])
}
// The Target that the GPU buffer should be bound to.
type Target uint16
const (
// None is used when the buffer should not bound to a target, for example when referenced by an sparce indices.
None = 0
// ArrayBuffer corresponds to an array buffer.
ArrayBuffer Target = 34962
// ElementArrayBuffer corresponds to an element array buffer.
ElementArrayBuffer = 34963
)
// Attribute is a map that each key corresponds to mesh attribute semantic and each value is the index of the accessor containing attribute's data.
type Attribute = map[string]uint32
// PrimitiveMode defines the type of primitives to render. All valid values correspond to WebGL enums.
type PrimitiveMode uint8
const (
// Triangles corresponds to a Triangle primitive.
Triangles PrimitiveMode = iota
// Points corresponds to a Point primitive.
Points
// Lines corresponds to a Line primitive.
Lines
// LineLoop corresponds to a Line Loop primitive.
LineLoop
// LineStrip corresponds to a Line Strip primitive.
LineStrip
// TriangleStrip corresponds to a Triangle Strip primitive.
TriangleStrip
// TriangleFan corresponds to a Triangle Fan primitive.
TriangleFan
)
// UnmarshalJSON unmarshal the primitive mode with the correct default values.
func (p *PrimitiveMode) UnmarshalJSON(data []byte) error {
var tmp uint8
err := json.Unmarshal(data, &tmp)
if err == nil {
*p = map[uint8]PrimitiveMode{
0: Points,
1: Lines,
2: LineLoop,
3: LineStrip,
4: Triangles,
5: TriangleStrip,
6: TriangleFan,
}[tmp]
}
return err
}
// MarshalJSON marshal the primitive mode with the correct default values.
func (p *PrimitiveMode) MarshalJSON() ([]byte, error) {
return json.Marshal(map[PrimitiveMode]uint8{
Points: 0,
Lines: 1,
LineLoop: 2,
LineStrip: 3,
Triangles: 4,
TriangleStrip: 5,
TriangleFan: 6,
}[*p])
}
// The AlphaMode enumeration specifying the interpretation of the alpha value of the main factor and texture.
type AlphaMode uint8
const (
// Opaque corresponds to an Opaque material.
Opaque AlphaMode = iota
// Mask corresponds to a masked material.
Mask
// Blend corresponds to a Blend material.
Blend
)
// UnmarshalJSON unmarshal the alpha mode with the correct default values.
func (a *AlphaMode) UnmarshalJSON(data []byte) error {
var tmp string
err := json.Unmarshal(data, &tmp)
if err == nil {
*a = map[string]AlphaMode{
"OPAQUE": Opaque,
"MASK": Mask,
"BLEND": Blend,
}[tmp]
}
return err
}
// MarshalJSON marshal the alpha mode with the correct default values.
func (a *AlphaMode) MarshalJSON() ([]byte, error) {
return json.Marshal(map[AlphaMode]string{
Opaque: "OPAQUE",
Mask: "MASK",
Blend: "BLEND",
}[*a])
}
// MagFilter is the magnification filter.
type MagFilter uint16
const (
// MagLinear corresponds to a linear magnification filter.
MagLinear MagFilter = iota
// MagNearest corresponds to a nearest magnification filter.
MagNearest
)
// UnmarshalJSON unmarshal the mag filter with the correct default values.
func (m *MagFilter) UnmarshalJSON(data []byte) error {
var tmp uint16
err := json.Unmarshal(data, &tmp)
if err == nil {
*m = map[uint16]MagFilter{
9728: MagNearest,
9729: MagLinear,
}[tmp]
}
return err
}
// MarshalJSON marshal the alpha mode with the correct default values.
func (m *MagFilter) MarshalJSON() ([]byte, error) {
return json.Marshal(map[MagFilter]uint16{
MagNearest: 9728,
MagLinear: 9729,
}[*m])
}
// MinFilter is the minification filter.
type MinFilter uint16
const (
// MinLinear corresponds to a linear minification filter.
MinLinear MinFilter = iota
// MinNearestMipMapLinear corresponds to a nearest mipmap linear minification filter.
MinNearestMipMapLinear
// MinNearest corresponds to a nearest minification filter.
MinNearest
// MinNearestMipMapNearest corresponds to a nearest mipmap nearest minification filter.
MinNearestMipMapNearest
// MinLinearMipMapNearest corresponds to a linear mipmap nearest minification filter.
MinLinearMipMapNearest
// MinLinearMipMapLinear corresponds to a linear mipmap linear minification filter.
MinLinearMipMapLinear
)
// UnmarshalJSON unmarshal the min filter with the correct default values.
func (m *MinFilter) UnmarshalJSON(data []byte) error {
var tmp uint16
err := json.Unmarshal(data, &tmp)
if err == nil {
*m = map[uint16]MinFilter{
9728: MinNearest,
9729: MinLinear,
9984: MinNearestMipMapNearest,
9985: MinLinearMipMapNearest,
9986: MinNearestMipMapLinear,
9987: MinLinearMipMapLinear,
}[tmp]
}
return err
}
// MarshalJSON marshal the min filter with the correct default values.
func (m *MinFilter) MarshalJSON() ([]byte, error) {
return json.Marshal(map[MinFilter]uint16{
MinNearest: 9728,
MinLinear: 9729,
MinNearestMipMapNearest: 9984,
MinLinearMipMapNearest: 9985,
MinNearestMipMapLinear: 9986,
MinLinearMipMapLinear: 9987,
}[*m])
}
// WrappingMode is the wrapping mode of a texture.
type WrappingMode uint16
const (
// Repeat corresponds to a repeat wrapping.
Repeat WrappingMode = iota
// ClampToEdge corresponds to a clamp to edge wrapping.
ClampToEdge
// MirroredRepeat corresponds to a mirrored repeat wrapping.
MirroredRepeat
)
// UnmarshalJSON unmarshal the wrapping mode with the correct default values.
func (w *WrappingMode) UnmarshalJSON(data []byte) error {
var tmp uint16
err := json.Unmarshal(data, &tmp)
if err == nil {
*w = map[uint16]WrappingMode{
33071: ClampToEdge,
33648: MirroredRepeat,
10497: Repeat,
}[tmp]
}
return err
}
// MarshalJSON marshal the wrapping mode with the correct default values.
func (w *WrappingMode) MarshalJSON() ([]byte, error) {
return json.Marshal(map[WrappingMode]uint16{
ClampToEdge: 33071,
MirroredRepeat: 33648,
Repeat: 10497,
}[*w])
}
// Interpolation algorithm.
type Interpolation uint8
const (
// Linear corresponds to a linear interpolation.
Linear Interpolation = iota
// Step corresponds to a step interpolation.
Step
// CubicSpline corresponds to a cubic spline interpolation.
CubicSpline
)
// UnmarshalJSON unmarshal the interpolation with the correct default values.
func (i *Interpolation) UnmarshalJSON(data []byte) error {
var tmp string
err := json.Unmarshal(data, &tmp)
if err == nil {
*i = map[string]Interpolation{
"LINEAR": Linear,
"STEP": Step,
"CUBICSPLINE": CubicSpline,
}[tmp]
}
return err
}
// MarshalJSON marshal the interpolation with the correct default values.
func (i *Interpolation) MarshalJSON() ([]byte, error) {
return json.Marshal(map[Interpolation]string{
Linear: "LINEAR",
Step: "STEP",
CubicSpline: "CUBICSPLINE",
}[*i])
}
// TRSProperty defines a local space transformation.
// TRSproperties are converted to matrices and postmultiplied in the T * R * S order to compose the transformation matrix.
type TRSProperty uint8
const (
// Translation corresponds to a translation transform.
Translation TRSProperty = iota
// Rotation corresponds to a rotation transform.
Rotation
// Scale corresponds to a scale transform.
Scale
// Weights corresponds to a weights transform.
Weights
)
// UnmarshalJSON unmarshal the TRSProperty with the correct default values.
func (t *TRSProperty) UnmarshalJSON(data []byte) error {
var tmp string
err := json.Unmarshal(data, &tmp)
if err == nil {
*t = map[string]TRSProperty{
"translation": Translation,
"rotation": Rotation,
"scale": Scale,
"weights": Weights,
}[tmp]
}
return err
}
// MarshalJSON marshal the TRSProperty with the correct default values.
func (t *TRSProperty) MarshalJSON() ([]byte, error) {
return json.Marshal(map[TRSProperty]string{
Translation: "translation",
Rotation: "rotation",
Scale: "scale",
Weights: "weights",
}[*t])
}
const (
glbHeaderMagic = 0x46546c67
glbChunkJSON = 0x4e4f534a
glbChunkBIN = 0x004e4942
)
type chunkHeader struct {
Length uint32
Type uint32
}
type glbHeader struct {
Magic uint32
Version uint32
Length uint32
JSONHeader chunkHeader
}
const (
mimetypeApplicationOctet = "data:application/octet-stream;base64"
mimetypeImagePNG = "data:image/png;base64"
mimetypeImageJPG = "data:image/jpeg;base64"
) | const.go | 0.695235 | 0.49347 | const.go | starcoder |
package nomad
import (
"sort"
"sync"
"time"
"github.com/ugorji/go/codec"
)
// TimeTable is used to associate a Raft index with a timestamp.
// This is used so that we can quickly go from a timestamp to an
// index or visa versa.
type TimeTable struct {
granularity time.Duration
limit time.Duration
table []TimeTableEntry
l sync.RWMutex
}
// TimeTableEntry is used to track a time and index
type TimeTableEntry struct {
Index uint64
Time time.Time
}
// NewTimeTable creates a new time table which stores entries
// at a given granularity for a maximum limit. The storage space
// required is (limit/granularity)
func NewTimeTable(granularity time.Duration, limit time.Duration) *TimeTable {
size := limit / granularity
if size < 1 {
size = 1
}
t := &TimeTable{
granularity: granularity,
limit: limit,
table: make([]TimeTableEntry, 1, size),
}
return t
}
// Serialize is used to serialize the time table
func (t *TimeTable) Serialize(enc *codec.Encoder) error {
t.l.RLock()
defer t.l.RUnlock()
return enc.Encode(t.table)
}
// Deserialize is used to deserialize the time table
// and restore the state
func (t *TimeTable) Deserialize(dec *codec.Decoder) error {
// Decode the table
var table []TimeTableEntry
if err := dec.Decode(&table); err != nil {
return err
}
// Witness from oldest to newest
n := len(table)
for i := n - 1; i >= 0; i-- {
t.Witness(table[i].Index, table[i].Time)
}
return nil
}
// Witness is used to witness a new index and time.
func (t *TimeTable) Witness(index uint64, when time.Time) {
t.l.Lock()
defer t.l.Unlock()
// Ensure monotonic indexes
if t.table[0].Index > index {
return
}
// Skip if we already have a recent enough entry
if when.Sub(t.table[0].Time) < t.granularity {
return
}
// Grow the table if we haven't reached the size
if len(t.table) < cap(t.table) {
t.table = append(t.table, TimeTableEntry{})
}
// Add this entry
copy(t.table[1:], t.table[:len(t.table)-1])
t.table[0].Index = index
t.table[0].Time = when
}
// NearestIndex returns the nearest index older than the given time
func (t *TimeTable) NearestIndex(when time.Time) uint64 {
t.l.RLock()
defer t.l.RUnlock()
n := len(t.table)
idx := sort.Search(n, func(i int) bool {
return !t.table[i].Time.After(when)
})
if idx < n && idx >= 0 {
return t.table[idx].Index
}
return 0
}
// NearestTime returns the nearest time older than the given index
func (t *TimeTable) NearestTime(index uint64) time.Time {
t.l.RLock()
defer t.l.RUnlock()
n := len(t.table)
idx := sort.Search(n, func(i int) bool {
return t.table[i].Index <= index
})
if idx < n && idx >= 0 {
return t.table[idx].Time
}
return time.Time{}
} | vendor/github.com/hashicorp/nomad/nomad/timetable.go | 0.669096 | 0.409398 | timetable.go | starcoder |
package main
import (
"fmt"
"github.com/pkg/errors"
"gorgonia.org/gorgonia"
"gorgonia.org/tensor"
)
type convLayer struct {
filters int
padding int
kernelSize int
stride int
activation string
batchNormalize int
bias bool
layerIndex int
//learnables
means, vars *gorgonia.Node
//loadables
kernels, gamma, beta, biases *gorgonia.Node
convOut, bnOut, actOut *gorgonia.Node
outShape tensor.Shape
bnOp *gorgonia.BatchNormOp
}
func (l *convLayer) String() string {
return fmt.Sprintf(
"Convolution layer: Filters->%[1]d Padding->%[2]d Kernel->%[3]dx%[3]d Stride->%[4]d Activation->%[5]s Batch->%[6]d Bias->%[7]t",
l.filters, l.padding, l.kernelSize, l.stride, l.activation, l.batchNormalize, l.bias,
)
}
func (l *convLayer) Type() string {
return "convolutional"
}
func (l *convLayer) ToNode(g *gorgonia.ExprGraph, input ...*gorgonia.Node) (*gorgonia.Node, error) {
l.kernels = gorgonia.NewTensor(g, tensor.Float32, 4, gorgonia.WithShape(l.filters, input[0].Shape()[1], l.kernelSize, l.kernelSize), gorgonia.WithName(fmt.Sprintf("conv_%d", l.layerIndex)))
var err error
l.convOut, err = gorgonia.Conv2d(input[0], l.kernels, tensor.Shape{l.kernelSize, l.kernelSize}, []int{l.padding, l.padding}, []int{l.stride, l.stride}, []int{1, 1})
if err != nil {
return &gorgonia.Node{}, errors.Wrap(err, "Can't prepare convolution operation")
}
if l.bias {
l.biases = gorgonia.NewTensor(g, tensor.Float32, 4, gorgonia.WithShape(l.convOut.Shape()...), gorgonia.WithName(fmt.Sprintf("bias_%d", l.layerIndex)))
}
if l.batchNormalize > 0 {
l.beta = gorgonia.NewTensor(g, tensor.Float32, 4, gorgonia.WithShape(l.convOut.Shape().Clone()...), gorgonia.WithName(fmt.Sprintf("beta_%d", l.layerIndex)))
l.gamma = gorgonia.NewTensor(g, tensor.Float32, 4, gorgonia.WithShape(l.convOut.Shape().Clone()...), gorgonia.WithName(fmt.Sprintf("gamma_%d", l.layerIndex)))
l.vars = gorgonia.NewTensor(g, tensor.Float32, 1, gorgonia.WithShape(l.filters), gorgonia.WithName(fmt.Sprintf("vars_%d", l.layerIndex)))
l.means = gorgonia.NewTensor(g, tensor.Float32, 1, gorgonia.WithShape(l.filters), gorgonia.WithName(fmt.Sprintf("means_%d", l.layerIndex)))
l.bnOut, l.gamma, l.beta, l.bnOp, err = gorgonia.BatchNorm(l.convOut, l.gamma, l.beta, 0.1, 10e-5)
if err != nil {
return &gorgonia.Node{}, errors.Wrap(err, "Can't prepare batch normalization operation")
}
l.outShape = l.bnOut.Shape()
} else {
fmt.Println("SHP:", l.biases.Shape(), l.convOut.Shape())
l.convOut, err = gorgonia.Add(l.convOut, l.biases)
if err != nil {
panic(err)
}
l.outShape = l.convOut.Shape()
l.bnOut = l.convOut
}
if l.activation == "leaky" {
var err error
l.actOut, err = gorgonia.LeakyRelu(l.bnOut, 0.1)
if err != nil {
return &gorgonia.Node{}, errors.Wrap(err, "Can't prepare activation operation")
}
return l.actOut, nil
}
return l.convOut, nil
} | examples/tiny-yolo-v3-coco/conv_layer.go | 0.533884 | 0.40928 | conv_layer.go | starcoder |
package main
import (
"fmt"
"log"
"sort"
"strings"
"sync"
"github.com/nfisher/mdindexer/edit"
)
var (
ErrWordNotIndexed = fmt.Errorf("index does not contain word")
)
type Document struct {
Name string
WordCount map[string]int
}
// New creates an index that can accommodate the number of documents specified by size.
func New(size int) *Index {
return &Index{
Words: make(map[string]*WordColumn),
Names: make([]string, 0, size),
}
}
// Index is a matrix that counts word occurrences in documents.
type Index struct {
Words map[string]*WordColumn
Names []string
sync.RWMutex `msg:"-"`
}
// Capacity returns the number of documents in the index.
func (z *Index) Capacity() int {
z.RLock()
defer z.RUnlock()
return cap(z.Names)
}
// WordCount provides the number of Words in the index.
func (z *Index) WordCount() int {
z.RLock()
defer z.RUnlock()
return len(z.Words)
}
// Update incorporates the documents word count frequency into the index.
func (z *Index) Update(doc *Document) {
z.Lock()
defer z.Unlock()
var isNew bool
pos := z.byName(doc.Name)
if pos == nameNotFound {
pos = len(z.Names)
z.Names = append(z.Names, doc.Name)
isNew = true
}
cur := make(map[string]bool)
for word, count := range doc.WordCount {
cur[word] = true
col, ok := z.Words[word]
if !ok {
col = NewColumn(word)
}
col.Upsert(pos, count)
z.Words[word] = col
}
if !isNew {
z.clean(pos, cur)
}
}
func (z *Index) clean(pos int, cur map[string]bool) {
for word, col := range z.Words {
if cur[word] {
continue
}
col.Remove(pos)
if col.Empty() {
delete(z.Words, word)
}
}
}
// Search returns the list of documents that contain needle.
func (z *Index) Search(needle string) (DocList, error) {
z.RLock()
defer z.RUnlock()
if 1 > len(z.Words) {
return nil, ErrWordNotIndexed
}
var words = Words{{needle, 0}}
_, ok := z.Words[needle]
if !ok {
words = Words{}
for k := range z.Words {
d := edit.Distance2(needle, k)
words = append(words, WordDist{k, d})
}
sort.Sort(words)
end := len(words)
min := words[0].Distance
for i := 1; i < len(words); i++ {
if words[i].Distance > min {
break
}
end = i
}
words = words[0:end]
}
pos := make(map[string]int)
var docs = make(DocList, 0, len(words))
for _, word := range words {
col := z.Words[word.Word]
col.Apply(func(id int, count int) {
if count > 0 {
doc := z.byId(id)
relevance := DocRelevance{Document: doc}
relevance.Count = count
relevance.Distance = word.Distance
p, ok := pos[doc]
if !ok {
p = len(docs)
docs = append(docs, relevance)
pos[doc] = p
return
}
if docs[p].Distance < relevance.Distance {
return
}
docs[p].Distance = relevance.Distance
docs[p].Count = relevance.Count
}
})
}
sort.Slice(docs, func(i, j int) bool {
return docs[i].Count < docs[j].Count
})
return docs, nil
}
const nameNotFound = -1
func (z *Index) byName(name string) int {
var id int
for ; id < len(z.Names); id++ {
if name == z.Names[id] {
return id
}
}
return nameNotFound
}
func (z *Index) byId(id int) string {
return z.Names[id]
}
// NewColumn returns a newly initialised WordColumn.
func NewColumn(name string) *WordColumn {
return &WordColumn{
Name: name,
Docs: make([][2]int, 0, 64),
}
}
// WordColumn maintains the frequency a word occurs in the named document.
type WordColumn struct {
Name string
Docs [][2]int
// msgpack/json can't serialise map with int keys
idx map[int]int
}
func (z *WordColumn) Upsert(pos int, count int) {
// lazily build the index so a read from file does not break
if z.idx == nil {
z.lazyIndex()
}
i, ok := z.idx[pos]
// make a sparse matrices, don't store count < 1
if count < 1 {
return
}
tup := [2]int{pos, count}
if !ok {
i := len(z.Docs)
z.idx[pos] = i
z.Docs = append(z.Docs, tup)
return
}
z.Docs[i] = tup
}
func (z *WordColumn) lazyIndex() {
z.idx = make(map[int]int)
for i, tup := range z.Docs {
pos := tup[0]
z.idx[pos] = i
}
}
func (z *WordColumn) Apply(each func(int, int)) {
for _, tup := range z.Docs {
if tup[1] > 0 {
each(tup[0], tup[1])
}
}
}
func (z *WordColumn) Empty() bool {
var count int
z.Apply(func(i int, i2 int) {
count++
})
return count == 0
}
func (z *WordColumn) Remove(pos int) {
i, ok := z.idx[pos]
if !ok {
return
}
delete(z.idx, pos)
// TODO: should consider compacting the array instead of using 0
z.Docs[i] = [2]int{pos, 0}
}
// Search executes the query against the index returning a document list.
func Search(query string, index *Index) ScoreList {
var result = make(Scores)
if query == "" {
return ScoreList{}
}
query = strings.ToLower(query)
terms := strings.Split(query, " ")
union := make(StrSet)
for i, term := range terms {
docs, err := index.Search(term)
if err != nil {
log.Printf("search=failed query=%s error='%v'\n", term, err)
continue
}
s := make(StrSet)
for _, d := range docs {
n := d.Document
if i == 0 {
result[n] = d.Distance + d.Rank
s[n] = true
continue
}
if !union[n] {
continue
}
result[n] += d.Distance + d.Rank
s[n] = true
}
union = s
}
list := make(ScoreList, 0, len(union))
for n := range result {
if !union[n] {
continue
}
length := len(n) - len(query)
dist := edit.Distance2(query, n) - length
list = append(list, Score{Document: n, Rank: result[n], NameDistance: dist})
}
sort.Slice(list, func(i, j int) bool {
return list[i].NameDistance < list[j].NameDistance
})
sort.SliceStable(list, func(i, j int) bool {
return list[i].Rank < list[j].Rank
})
return list
}
type Score struct {
Document string
Rank int
NameDistance int
}
type ScoreList []Score
type Scores map[string]int
type StrSet map[string]bool
func (s StrSet) Union(o StrSet) StrSet {
r := make(StrSet)
for k := range s {
if o[k] {
r[k] = true
}
}
return r
}
type DocList []DocRelevance
type DocRelevance struct {
Document string
Count int
Distance int
Rank int
}
type WordDist struct {
Word string
Distance int
}
type Words []WordDist
func (z Words) Len() int { return len(z) }
func (z Words) Swap(i, j int) { z[i], z[j] = z[j], z[i] }
func (z Words) Less(i, j int) bool { return z[i].Distance < z[j].Distance } | index.go | 0.576304 | 0.412234 | index.go | starcoder |
package basic
import "strings"
// DropWhilePtrTest returns a new list after dropping the given item
func DropWhilePtrTest() string {
return `
func TestDropWhile<FTYPE>Ptr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 <TYPE> = 2
var v3 <TYPE> = 3
var v4 <TYPE> = 4
var v5 <TYPE> = 5
expectedNewList := []*<TYPE>{&v3, &v4, &v5}
NewList := DropWhile<FTYPE>Ptr(isEven<FTYPE>Ptr, []*<TYPE>{&v4, &v2, &v3, &v4, &v5})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] || *NewList[2] != *expectedNewList[2] {
t.Errorf("DropWhile<FTYPE>Ptr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
if len(DropWhile<FTYPE>Ptr(nil, nil)) > 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed.")
}
if len(DropWhile<FTYPE>Ptr(nil, []*<TYPE>{})) > 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed.")
t.Errorf(reflect.String.String())
}
NewList = DropWhile<FTYPE>Ptr(isEven<FTYPE>Ptr, []*<TYPE>{&v4})
if len(NewList) != 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed")
}
}
`
}
// DropWhilePtrBoolTest removes duplicates.
func DropWhilePtrBoolTest() string {
return `
func TestDropWhile<FTYPE>Ptr(t *testing.T) {
var vt <TYPE> = true
var vf <TYPE> = false
expectedNewList := []*<TYPE>{&vf, &vt}
NewList := DropWhile<FTYPE>Ptr(isTrue<FTYPE>Ptr, []*<TYPE>{&vt, &vf, &vt})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] {
t.Errorf("DropWhile<FTYPE>Ptr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
}
`
}
// ReplaceActivityDropWhilePtr replaces ...
func ReplaceActivityDropWhilePtr(code string) string {
s1 := `func TestDropWhileStrPtr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 string = "2"
var v3 string = "3"
var v4 string = "4"
var v5 string = "5"
expectedNewList := []*string{&v3, &v4, &v5}
NewList := DropWhileStrPtr(isEvenStrPtr, []*string{&v4, &v2, &v3, &v4, &v5})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] || *NewList[2] != *expectedNewList[2] {
t.Errorf("DropWhileStrPtr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
if len(DropWhileStrPtr(nil, nil)) > 0 {
t.Errorf("DropWhileStrPtr failed.")
}
if len(DropWhileStrPtr(nil, []*string{})) > 0 {
t.Errorf("DropWhileStrPtr failed.")
t.Errorf(reflect.String.String())
}
NewList = DropWhileStrPtr(isEvenStrPtr, []*string{&v4})
if len(NewList) != 0 {
t.Errorf("DropWhileStrPtr failed")
}
}`
s2 := `func TestDropWhileStrPtr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 string = "2"
var v3 string = "3"
var v4 string = "4"
var v5 string = "5"
expectedNewList := []*string{&v3, &v4, &v5}
NewList := DropWhileStrPtr(isEvenStrPtr, []*string{&v4, &v2, &v3, &v4, &v5})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] || *NewList[2] != *expectedNewList[2] {
t.Errorf("DropWhileStrPtr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
if len(DropWhileStrPtr(nil, nil)) > 0 {
t.Errorf("DropWhileStrPtr failed.")
}
if len(DropWhileStrPtr(nil, []*string{})) > 0 {
t.Errorf("DropWhileStrPtr failed.")
}
NewList = DropWhileStrPtr(isEvenStrPtr, []*string{&v4})
if len(NewList) != 0 {
t.Errorf("DropWhileStrPtr failed")
}
}`
code = strings.Replace(code, s1, s2, -1)
return code
}
//**********DropWhilePtrErrTest************************
// DropWhilePtrErrTest returns a new list after dropping the given item
func DropWhilePtrErrTest() string {
return `
func TestDropWhile<FTYPE>PtrErr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 <TYPE> = 2
var v3 <TYPE> = 3
var v4 <TYPE> = 4
var v5 <TYPE> = 5
var v0 <TYPE>
expectedNewList := []*<TYPE>{&v3, &v4, &v5}
NewList, _ := DropWhile<FTYPE>PtrErr(isEven<FTYPE>PtrErr, []*<TYPE>{&v4, &v2, &v3, &v4, &v5})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] || *NewList[2] != *expectedNewList[2] {
t.Errorf("DropWhile<FTYPE>PtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhile<FTYPE>PtrErr(isEven<FTYPE>PtrErr, []*<TYPE>{&v4, &v2, &v0, &v4, &v5})
if err == nil {
t.Errorf("DropWhile<FTYPE>PtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhile<FTYPE>PtrErr(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed.")
}
r, _ = DropWhile<FTYPE>PtrErr(nil, []*<TYPE>{})
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed.")
}
NewList, _ = DropWhile<FTYPE>PtrErr(isEven<FTYPE>PtrErr, []*<TYPE>{&v4})
if len(NewList) != 0 {
t.Errorf("DropWhile<FTYPE>PtrErr failed")
}
}
`
}
// DropWhilePtrErrBoolTest is template
func DropWhilePtrErrBoolTest() string {
return `
func TestDropWhile<FTYPE>PtrErr(t *testing.T) {
var vt <TYPE> = true
var vf <TYPE> = false
expectedNewList := []*<TYPE>{&vf, &vt}
NewList, _ := DropWhile<FTYPE>PtrErr(isTrue<FTYPE>PtrErr, []*<TYPE>{&vt, &vf, &vt})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] {
t.Errorf("DropWhile<FTYPE>PtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhile<FTYPE>PtrErr(isTrue<FTYPE>PtrErr2, []*<TYPE>{&vt, &vf, &vt})
if err == nil {
t.Errorf("DropWhile<FTYPE>PtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhile<FTYPE>PtrErr(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed.")
}
r, _ = DropWhile<FTYPE>PtrErr(nil, []*<TYPE>{})
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed.")
}
r, _ = DropWhile<FTYPE>PtrErr(isTrue<FTYPE>PtrErr2, []*<TYPE>{})
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Ptr failed.")
}
}
func isTrueBoolPtrErr(v *bool) (bool, error) {
return *v == true, nil
}
func isTrueBoolPtrErr2(v *bool) (bool, error) {
if *v == false {
return false, errors.New("false is invalid for this test")
}
return *v == true, nil
}
`
}
// ReplaceActivityDropWhilePtrErr replaces ...
func ReplaceActivityDropWhilePtrErr(code string) string {
s1 := `import (
_ "errors"
"reflect"
"testing"
)
func TestDropWhileIntPtrErr(t *testing.T) {`
s2 := `import (
"errors"
"testing"
)
func TestDropWhileIntPtrErr(t *testing.T) {`
code = strings.Replace(code, s1, s2, -1)
s1 = `func TestDropWhileStrPtrErr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 string = "2"
var v3 string = "3"
var v4 string = "4"
var v5 string = "5"
var v0 string
expectedNewList := []*string{&v3, &v4, &v5}
NewList, _ := DropWhileStrPtrErr(isEvenStrPtrErr, []*string{&v4, &v2, &v3, &v4, &v5})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] || *NewList[2] != *expectedNewList[2] {
t.Errorf("DropWhileStrPtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhileStrPtrErr(isEvenStrPtrErr, []*string{&v4, &v2, &v0, &v4, &v5})
if err == nil {
t.Errorf("DropWhileStrPtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhileStrPtrErr(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhileStrPtr failed.")
}
r, _ = DropWhileStrPtrErr(nil, []*string{})
if len(r) > 0 {
t.Errorf("DropWhileStrPtr failed.")
}
NewList, _ = DropWhileStrPtrErr(isEvenStrPtrErr, []*string{&v4})
if len(NewList) != 0 {
t.Errorf("DropWhileStrPtrErr failed")
}
}`
s2 = `func TestDropWhileStrPtrErr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 string = "2"
var v3 string = "3"
var v4 string = "4"
var v5 string = "5"
var v0 string = "0"
expectedNewList := []*string{&v3, &v4, &v5}
NewList, _ := DropWhileStrPtrErr(isEvenStrPtrErr, []*string{&v4, &v2, &v3, &v4, &v5})
if *NewList[0] != *expectedNewList[0] || *NewList[1] != *expectedNewList[1] || *NewList[2] != *expectedNewList[2] {
t.Errorf("DropWhileStrPtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhileStrPtrErr(isEvenStrPtrErr, []*string{&v4, &v2, &v0, &v4, &v5})
if err == nil {
t.Errorf("DropWhileStrPtrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhileStrPtrErr(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhileStrPtr failed.")
}
r, _ = DropWhileStrPtrErr(nil, []*string{})
if len(r) > 0 {
t.Errorf("DropWhileStrPtr failed.")
}
NewList, _ = DropWhileStrPtrErr(isEvenStrPtrErr, []*string{&v4})
if len(NewList) != 0 {
t.Errorf("DropWhileStrPtrErr failed")
}
}`
code = strings.Replace(code, s1, s2, -1)
return code
}
//**********DropWhileErrTest************************
// DropWhileErrTest returns a new list after dropping the given item
func DropWhileErrTest() string {
return `
func TestDropWhile<FTYPE>Err(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 <TYPE> = 2
var v3 <TYPE> = 3
var v4 <TYPE> = 4
var v5 <TYPE> = 5
var v0 <TYPE>
expectedNewList := []<TYPE>{v3, v4, v5}
NewList, _ := DropWhile<FTYPE>Err(isEven<FTYPE>Err, []<TYPE>{v4, v2, v3, v4, v5})
if NewList[0] != expectedNewList[0] || NewList[1] != expectedNewList[1] || NewList[2] != expectedNewList[2] {
t.Errorf("DropWhile<FTYPE>Err failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhile<FTYPE>Err(isEven<FTYPE>Err, []<TYPE>{v4, v2, v0, v4, v5})
if err == nil {
t.Errorf("DropWhile<FTYPE>Err failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhile<FTYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Err failed.")
}
r, _ = DropWhile<FTYPE>Err(nil, []<TYPE>{})
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Err failed.")
}
NewList, _ = DropWhile<FTYPE>Err(isEven<FTYPE>Err, []<TYPE>{v4})
if len(NewList) != 0 {
t.Errorf("DropWhile<FTYPE>Err failed")
}
}
`
}
// DropWhileErrBoolTest is template
func DropWhileErrBoolTest() string {
return `
func TestDropWhile<FTYPE>Err(t *testing.T) {
var vt <TYPE> = true
var vf <TYPE> = false
expectedNewList := []<TYPE>{vf, vt}
NewList, _ := DropWhile<FTYPE>Err(isTrue<FTYPE>Err, []<TYPE>{vt, vf, vt})
if NewList[0] != expectedNewList[0] || NewList[1] != expectedNewList[1] {
t.Errorf("DropWhile<FTYPE>Err failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhile<FTYPE>Err(isTrue<FTYPE>Err2, []<TYPE>{vt, vf, vt})
if err == nil {
t.Errorf("DropWhile<FTYPE>Err failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhile<FTYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Err failed.")
}
r, _ = DropWhile<FTYPE>Err(nil, []<TYPE>{})
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Err failed.")
}
r, _ = DropWhile<FTYPE>Err(isTrue<FTYPE>Err2, []<TYPE>{})
if len(r) > 0 {
t.Errorf("DropWhile<FTYPE>Err failed.")
}
}
func isTrueBoolErr(v bool) (bool, error) {
return v == true, nil
}
func isTrueBoolErr2(v bool) (bool, error) {
if v == false {
return false, errors.New("false is invalid for this test")
}
return v == true, nil
}
`
}
// ReplaceActivityDropWhileErr replaces ...
func ReplaceActivityDropWhileErr(code string) string {
s1 := `import (
_ "errors"
"reflect"
"testing"
)
func TestDropWhileIntErr(t *testing.T) {`
s2 := `import (
"errors"
"testing"
)
func TestDropWhileIntErr(t *testing.T) {`
code = strings.Replace(code, s1, s2, -1)
s1 = `func TestDropWhileStrErr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 string = "2"
var v3 string = "3"
var v4 string = "4"
var v5 string = "5"
var v0 string
expectedNewList := []string{v3, v4, v5}
NewList, _ := DropWhileStrErr(isEvenStrErr, []string{v4, v2, v3, v4, v5})
if NewList[0] != expectedNewList[0] || NewList[1] != expectedNewList[1] || NewList[2] != expectedNewList[2] {
t.Errorf("DropWhileStrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhileStrErr(isEvenStrErr, []string{v4, v2, v0, v4, v5})
if err == nil {
t.Errorf("DropWhileStrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhileStrErr(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhileStrErr failed.")
}
r, _ = DropWhileStrErr(nil, []string{})
if len(r) > 0 {
t.Errorf("DropWhileStrErr failed.")
}
NewList, _ = DropWhileStrErr(isEvenStrErr, []string{v4})
if len(NewList) != 0 {
t.Errorf("DropWhileStrErr failed")
}
}`
s2 = `func TestDropWhileStrErr(t *testing.T) {
// Test : drop the numbers as long as condition match and returns remaining number in the list once condition fails
var v2 string = "2"
var v3 string = "3"
var v4 string = "4"
var v5 string = "5"
var v0 string = "0"
expectedNewList := []string{v3, v4, v5}
NewList, _ := DropWhileStrErr(isEvenStrErr, []string{v4, v2, v3, v4, v5})
if NewList[0] != expectedNewList[0] || NewList[1] != expectedNewList[1] || NewList[2] != expectedNewList[2] {
t.Errorf("DropWhileStrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
_, err := DropWhileStrErr(isEvenStrErr, []string{v4, v2, v0, v4, v5})
if err == nil {
t.Errorf("DropWhileStrErr failed. Expected New list=%v, actual list=%v", expectedNewList, NewList)
}
r, _ := DropWhileStrErr(nil, nil)
if len(r) > 0 {
t.Errorf("DropWhileStrErr failed.")
}
r, _ = DropWhileStrErr(nil, []string{})
if len(r) > 0 {
t.Errorf("DropWhileStrErr failed.")
}
NewList, _ = DropWhileStrErr(isEvenStrErr, []string{v4})
if len(NewList) != 0 {
t.Errorf("DropWhileStrErr failed")
}
}`
code = strings.Replace(code, s1, s2, -1)
return code
} | internal/template/basic/dropwhileptrtest.go | 0.630116 | 0.475544 | dropwhileptrtest.go | starcoder |
package grouper
import (
"fmt"
"strings"
"github.com/go-openapi/strfmt"
"github.com/semi-technologies/weaviate/entities/models"
"github.com/semi-technologies/weaviate/entities/search"
)
type valueType int
const (
numerical valueType = iota
textual
boolean
reference
geo
unknown
)
type valueGroup struct {
values []interface{}
valueType valueType
name string
}
func (g group) flattenMerge() (search.Result, error) {
values := g.makeValueGroups()
merged, err := mergeValueGroups(values)
if err != nil {
return search.Result{}, fmt.Errorf("merge values: %v", err)
}
vector, err := g.mergeVectors()
if err != nil {
return search.Result{}, fmt.Errorf("merge vectors: %v", err)
}
className := g.mergeGetClassName()
return search.Result{
ClassName: className,
Schema: merged,
Vector: vector,
}, nil
}
func (g group) mergeGetClassName() string {
if len(g.Elements) > 0 {
return g.Elements[0].ClassName
}
return ""
}
func (g group) makeValueGroups() map[string]valueGroup {
values := map[string]valueGroup{}
for _, elem := range g.Elements {
if elem.Schema == nil {
continue
}
for propName, propValue := range elem.Schema.(map[string]interface{}) {
current, ok := values[propName]
if !ok {
current = valueGroup{
values: []interface{}{propValue},
valueType: valueTypeOf(propValue),
name: propName,
}
values[propName] = current
continue
}
current.values = append(current.values, propValue)
values[propName] = current
}
}
return values
}
func (g group) mergeVectors() ([]float32, error) {
amount := len(g.Elements)
if amount == 0 {
return nil, nil
}
if amount == 1 {
return g.Elements[0].Vector, nil
}
dimensions := len(g.Elements[0].Vector)
out := make([]float32, dimensions)
// sum up
for _, groupElement := range g.Elements {
if len(groupElement.Vector) != dimensions {
return nil, fmt.Errorf("vectors have different dimensions")
}
for i, vectorElement := range groupElement.Vector {
out[i] = out[i] + vectorElement
}
}
// divide by amount of vectors
for i := range out {
out[i] = out[i] / float32(amount)
}
return out, nil
}
func mergeValueGroups(props map[string]valueGroup) (map[string]interface{}, error) {
mergedProps := map[string]interface{}{}
for propName, group := range props {
var (
res interface{}
err error
)
switch group.valueType {
case textual:
res, err = mergeTextualProps(group.values)
case numerical:
res, err = mergeNumericalProps(group.values)
case boolean:
res, err = mergeBooleanProps(group.values)
case geo:
res, err = mergeGeoProps(group.values)
case reference:
res, err = mergeReferenceProps(group.values)
case unknown:
continue
default:
err = fmt.Errorf("unrecognized value type")
}
if err != nil {
return nil, fmt.Errorf("prop '%s': %v", propName, err)
}
mergedProps[propName] = res
}
return mergedProps, nil
}
func valueTypeOf(in interface{}) valueType {
switch in.(type) {
case string:
return textual
case float64:
return numerical
case bool:
return boolean
case *models.GeoCoordinates:
return geo
case []interface{}:
return reference
default:
return unknown
}
}
func mergeTextualProps(in []interface{}) (string, error) {
var values []string
seen := make(map[string]struct{}, len(in))
for i, elem := range in {
asString, ok := elem.(string)
if !ok {
return "", fmt.Errorf("element %d: expected textual element to be string, but got %T", i, elem)
}
if _, ok := seen[asString]; ok {
// this is a duplicate, don't append it again
continue
}
seen[asString] = struct{}{}
values = append(values, asString)
}
if len(values) == 1 {
return values[0], nil
}
return fmt.Sprintf("%s (%s)", values[0], strings.Join(values[1:], ", ")), nil
}
func mergeNumericalProps(in []interface{}) (float64, error) {
var sum float64
for i, elem := range in {
asFloat, ok := elem.(float64)
if !ok {
return 0, fmt.Errorf("element %d: expected numerical element to be float64, but got %T", i, elem)
}
sum += asFloat
}
return sum / float64(len(in)), nil
}
func mergeBooleanProps(in []interface{}) (bool, error) {
var countTrue uint
var countFalse uint
for i, elem := range in {
asBool, ok := elem.(bool)
if !ok {
return false, fmt.Errorf("element %d: expected boolean element to be bool, but got %T", i, elem)
}
if asBool {
countTrue++
} else {
countFalse++
}
}
return countTrue >= countFalse, nil
}
func mergeGeoProps(in []interface{}) (*models.GeoCoordinates, error) {
var sumLat float32
var sumLon float32
for i, elem := range in {
asGeo, ok := elem.(*models.GeoCoordinates)
if !ok {
return nil, fmt.Errorf("element %d: expected geo element to be *models.GeoCoordinates, but got %T", i, elem)
}
if asGeo.Latitude != nil {
sumLat += *asGeo.Latitude
}
if asGeo.Longitude != nil {
sumLon += *asGeo.Longitude
}
}
return &models.GeoCoordinates{
Latitude: ptFloat32(sumLat / float32(len(in))),
Longitude: ptFloat32(sumLon / float32(len(in))),
}, nil
}
func ptFloat32(in float32) *float32 {
return &in
}
func mergeReferenceProps(in []interface{}) ([]interface{}, error) {
var out []interface{}
seenID := map[string]struct{}{}
for i, elem := range in {
asSlice, ok := elem.([]interface{})
if !ok {
return nil, fmt.Errorf("element %d: expected reference values to be slice, but got %T", i, elem)
}
for _, singleRef := range asSlice {
asRef, ok := singleRef.(search.LocalRef)
if !ok {
// don't know what to do with this type, ignore
continue
}
id, ok := asRef.Fields["id"]
if !ok {
return nil, fmt.Errorf("found a search.LocalRef, but 'id' field is missing: %#v", asRef)
}
idString, err := getIDString(id)
if err != nil {
return nil, err
}
if _, ok := seenID[idString]; ok {
// duplicate
continue
}
out = append(out, asRef)
seenID[idString] = struct{}{} // make sure we skip this next time
}
}
return out, nil
}
func getIDString(id interface{}) (string, error) {
switch v := id.(type) {
case strfmt.UUID:
return v.String(), nil
default:
return "", fmt.Errorf("found a search.LocalRef, 'id' field type expected to be strfmt.UUID but got %T", v)
}
} | usecases/traverser/grouper/merge_group.go | 0.586641 | 0.431764 | merge_group.go | starcoder |
package buckettree
import (
"bytes"
"sort"
"github.com/abchain/fabric/core/ledger/statemgmt"
)
// Code for managing changes in data nodes
type dataNodes []*dataNode
func (dataNodes dataNodes) Len() int {
return len(dataNodes)
}
func (dataNodes dataNodes) Swap(i, j int) {
dataNodes[i], dataNodes[j] = dataNodes[j], dataNodes[i]
}
func (dataNodes dataNodes) Less(i, j int) bool {
return bytes.Compare(dataNodes[i].dataKey.compositeKey, dataNodes[j].dataKey.compositeKey) < 0
}
type dataNodesDelta struct {
byBucket map[bucketKeyLite]dataNodes
}
func newDataNodesDelta(conf *config, stateDelta *statemgmt.StateDelta) *dataNodesDelta {
dataNodesDelta := &dataNodesDelta{make(map[bucketKeyLite]dataNodes)}
chaincodeIDs := stateDelta.GetUpdatedChaincodeIds(false)
for _, chaincodeID := range chaincodeIDs {
updates := stateDelta.GetUpdates(chaincodeID)
for key, updatedValue := range updates {
if stateDelta.RollBackwards {
dataNodesDelta.add(conf, chaincodeID, key, updatedValue.GetPreviousValue())
} else {
dataNodesDelta.add(conf, chaincodeID, key, updatedValue.GetValue())
}
}
}
for _, dataNodes := range dataNodesDelta.byBucket {
sort.Sort(dataNodes)
}
return dataNodesDelta
}
func (dataNodesDelta *dataNodesDelta) add(conf *config, chaincodeID string, key string, value []byte) {
dataKey := newDataKey(conf, chaincodeID, key)
bucketKey := dataKey.getBucketKey(conf)
dataNode := newDataNode(dataKey, value)
logger.Debugf("Adding dataNode=[%s] against bucketKey=[%s]", dataNode, bucketKey)
dataNodesDelta.byBucket[*bucketKey] = append(dataNodesDelta.byBucket[*bucketKey], dataNode)
}
func (dataNodesDelta *dataNodesDelta) getAffectedBuckets() []*bucketKeyLite {
changedBuckets := []*bucketKeyLite{}
for bucketKey := range dataNodesDelta.byBucket {
copyOfBucketKey := bucketKey.clone()
logger.Debugf("Adding changed bucket [%s]", copyOfBucketKey)
changedBuckets = append(changedBuckets, copyOfBucketKey)
}
logger.Debugf("Changed buckets are = [%s]", changedBuckets)
return changedBuckets
}
func (dataNodesDelta *dataNodesDelta) getSortedDataNodesFor(bucketKey *bucketKeyLite) dataNodes {
return dataNodesDelta.byBucket[*bucketKey]
} | core/ledger/statemgmt/buckettree/data_nodes_delta.go | 0.557364 | 0.468547 | data_nodes_delta.go | starcoder |
// This file must be kept in sync with index_no_bound_checks.go.
// +build bounds
package mat
// At returns the element at row i, column j.
func (m *Dense) At(i, j int) float64 {
return m.at(i, j)
}
func (m *Dense) at(i, j int) float64 {
if uint(i) >= uint(m.mat.Rows) {
panic(ErrRowAccess)
}
if uint(j) >= uint(m.mat.Cols) {
panic(ErrColAccess)
}
return m.mat.Data[i*m.mat.Stride+j]
}
// Set sets the element at row i, column j to the value v.
func (m *Dense) Set(i, j int, v float64) {
m.set(i, j, v)
}
func (m *Dense) set(i, j int, v float64) {
if uint(i) >= uint(m.mat.Rows) {
panic(ErrRowAccess)
}
if uint(j) >= uint(m.mat.Cols) {
panic(ErrColAccess)
}
m.mat.Data[i*m.mat.Stride+j] = v
}
// At returns the element at row i, column j.
func (m *CDense) At(i, j int) complex128 {
return m.at(i, j)
}
func (m *CDense) at(i, j int) complex128 {
if uint(i) >= uint(m.mat.Rows) {
panic(ErrRowAccess)
}
if uint(j) >= uint(m.mat.Cols) {
panic(ErrColAccess)
}
return m.mat.Data[i*m.mat.Stride+j]
}
// Set sets the element at row i, column j to the value v.
func (m *CDense) Set(i, j int, v complex128) {
m.set(i, j, v)
}
func (m *CDense) set(i, j int, v complex128) {
if uint(i) >= uint(m.mat.Rows) {
panic(ErrRowAccess)
}
if uint(j) >= uint(m.mat.Cols) {
panic(ErrColAccess)
}
m.mat.Data[i*m.mat.Stride+j] = v
}
// At returns the element at row i.
// It panics if i is out of bounds or if j is not zero.
func (v *VecDense) At(i, j int) float64 {
if j != 0 {
panic(ErrColAccess)
}
return v.at(i)
}
// AtVec returns the element at row i.
// It panics if i is out of bounds.
func (v *VecDense) AtVec(i int) float64 {
return v.at(i)
}
func (v *VecDense) at(i int) float64 {
if uint(i) >= uint(v.mat.N) {
panic(ErrRowAccess)
}
return v.mat.Data[i*v.mat.Inc]
}
// SetVec sets the element at row i to the value val.
// It panics if i is out of bounds.
func (v *VecDense) SetVec(i int, val float64) {
v.setVec(i, val)
}
func (v *VecDense) setVec(i int, val float64) {
if uint(i) >= uint(v.mat.N) {
panic(ErrVectorAccess)
}
v.mat.Data[i*v.mat.Inc] = val
}
// At returns the element at row i and column j.
func (t *SymDense) At(i, j int) float64 {
return t.at(i, j)
}
func (t *SymDense) at(i, j int) float64 {
if uint(i) >= uint(t.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(t.mat.N) {
panic(ErrColAccess)
}
if i > j {
i, j = j, i
}
return t.mat.Data[i*t.mat.Stride+j]
}
// SetSym sets the elements at (i,j) and (j,i) to the value v.
func (t *SymDense) SetSym(i, j int, v float64) {
t.set(i, j, v)
}
func (t *SymDense) set(i, j int, v float64) {
if uint(i) >= uint(t.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(t.mat.N) {
panic(ErrColAccess)
}
if i > j {
i, j = j, i
}
t.mat.Data[i*t.mat.Stride+j] = v
}
// At returns the element at row i, column j.
func (t *TriDense) At(i, j int) float64 {
return t.at(i, j)
}
func (t *TriDense) at(i, j int) float64 {
if uint(i) >= uint(t.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(t.mat.N) {
panic(ErrColAccess)
}
isUpper := t.isUpper()
if (isUpper && i > j) || (!isUpper && i < j) {
return 0
}
return t.mat.Data[i*t.mat.Stride+j]
}
// SetTri sets the element of the triangular matrix at row i, column j to the value v.
// It panics if the location is outside the appropriate half of the matrix.
func (t *TriDense) SetTri(i, j int, v float64) {
t.set(i, j, v)
}
func (t *TriDense) set(i, j int, v float64) {
if uint(i) >= uint(t.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(t.mat.N) {
panic(ErrColAccess)
}
isUpper := t.isUpper()
if (isUpper && i > j) || (!isUpper && i < j) {
panic(ErrTriangleSet)
}
t.mat.Data[i*t.mat.Stride+j] = v
}
// At returns the element at row i, column j.
func (b *BandDense) At(i, j int) float64 {
return b.at(i, j)
}
func (b *BandDense) at(i, j int) float64 {
if uint(i) >= uint(b.mat.Rows) {
panic(ErrRowAccess)
}
if uint(j) >= uint(b.mat.Cols) {
panic(ErrColAccess)
}
pj := j + b.mat.KL - i
if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj {
return 0
}
return b.mat.Data[i*b.mat.Stride+pj]
}
// SetBand sets the element at row i, column j to the value v.
// It panics if the location is outside the appropriate region of the matrix.
func (b *BandDense) SetBand(i, j int, v float64) {
b.set(i, j, v)
}
func (b *BandDense) set(i, j int, v float64) {
if uint(i) >= uint(b.mat.Rows) {
panic(ErrRowAccess)
}
if uint(j) >= uint(b.mat.Cols) {
panic(ErrColAccess)
}
pj := j + b.mat.KL - i
if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj {
panic(ErrBandSet)
}
b.mat.Data[i*b.mat.Stride+pj] = v
}
// At returns the element at row i, column j.
func (s *SymBandDense) At(i, j int) float64 {
return s.at(i, j)
}
func (s *SymBandDense) at(i, j int) float64 {
if uint(i) >= uint(s.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(s.mat.N) {
panic(ErrColAccess)
}
if i > j {
i, j = j, i
}
pj := j - i
if s.mat.K+1 <= pj {
return 0
}
return s.mat.Data[i*s.mat.Stride+pj]
}
// SetSymBand sets the element at row i, column j to the value v.
// It panics if the location is outside the appropriate region of the matrix.
func (s *SymBandDense) SetSymBand(i, j int, v float64) {
s.set(i, j, v)
}
func (s *SymBandDense) set(i, j int, v float64) {
if uint(i) >= uint(s.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(s.mat.N) {
panic(ErrColAccess)
}
if i > j {
i, j = j, i
}
pj := j - i
if s.mat.K+1 <= pj {
panic(ErrBandSet)
}
s.mat.Data[i*s.mat.Stride+pj] = v
}
func (t *TriBandDense) At(i, j int) float64 {
return t.at(i, j)
}
func (t *TriBandDense) at(i, j int) float64 {
// TODO(btracey): Support Diag field, see #692.
if uint(i) >= uint(t.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(t.mat.N) {
panic(ErrColAccess)
}
isUpper := t.isUpper()
if (isUpper && i > j) || (!isUpper && i < j) {
return 0
}
kl, ku := t.mat.K, 0
if isUpper {
kl, ku = 0, t.mat.K
}
pj := j + kl - i
if pj < 0 || kl+ku+1 <= pj {
return 0
}
return t.mat.Data[i*t.mat.Stride+pj]
}
func (t *TriBandDense) SetTriBand(i, j int, v float64) {
t.setTriBand(i, j, v)
}
func (t *TriBandDense) setTriBand(i, j int, v float64) {
if uint(i) >= uint(t.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(t.mat.N) {
panic(ErrColAccess)
}
isUpper := t.isUpper()
if (isUpper && i > j) || (!isUpper && i < j) {
panic(ErrTriangleSet)
}
kl, ku := t.mat.K, 0
if isUpper {
kl, ku = 0, t.mat.K
}
pj := j + kl - i
if pj < 0 || kl+ku+1 <= pj {
panic(ErrBandSet)
}
// TODO(btracey): Support Diag field, see #692.
t.mat.Data[i*t.mat.Stride+pj] = v
}
// At returns the element at row i, column j.
func (d *DiagDense) At(i, j int) float64 {
return d.at(i, j)
}
func (d *DiagDense) at(i, j int) float64 {
if uint(i) >= uint(d.mat.N) {
panic(ErrRowAccess)
}
if uint(j) >= uint(d.mat.N) {
panic(ErrColAccess)
}
if i != j {
return 0
}
return d.mat.Data[i*d.mat.Inc]
}
// SetDiag sets the element at row i, column i to the value v.
// It panics if the location is outside the appropriate region of the matrix.
func (d *DiagDense) SetDiag(i int, v float64) {
d.setDiag(i, v)
}
func (d *DiagDense) setDiag(i int, v float64) {
if uint(i) >= uint(d.mat.N) {
panic(ErrRowAccess)
}
d.mat.Data[i*d.mat.Inc] = v
} | vendor/gonum.org/v1/gonum/mat/index_bound_checks.go | 0.78535 | 0.464719 | index_bound_checks.go | starcoder |
package fcc
var HomeYML = map[string]string{
"2017-05-18T160934Z": `
name: home
modifiedon: 2017-05-18T160934Z
outputscheme:
show_unknown_devices: true
interval: 30
devices:
- id: 1
num: 0
addr: 0
type: JeeLink
alias: Schreibtisch
absent: false
locked: true
lon: 0
lat: 0
alt: 0
- id: 2
num: 1
addr: 46
type: TX29TDH-IT
alias: Bad
absent: false
locked: false
lon: 1.1
lat: 1.2
alt: 1.3
- id: 3
num: 2
addr: 42
type: TX29TDH-IT
alias: Küche
absent: false
locked: false
lon: 2.1
lat: 2.2
alt: 2.3
- id: 4
num: 3
addr: 11
type: TX29TDH-IT
alias: Kühlschrank
absent: false
locked: false
lon: 3.1
lat: 3.2
alt: 3.3
- id: 5
num: 4
addr: 50
type: TX29TDH-IT
alias: Schlafzimmer
absent: false
locked: false
lon: 4.1
lat: 4.2
alt: 4.3
- id: 6
num: 5
addr: 57
type: TX29TDH-IT
alias: Wohnzimmer
absent: false
locked: false
lon: 5.1
lat: 5.2
alt: 5.3
- id: 7
num: 6
addr: 12
type: TX29TDH-IT
alias: <NAME>
absent: true
locked: false
lon: 6.1
lat: 6.2
alt: 6.3
- id: 8
num: 7
addr: 24
type: TX25TP-IT
alias: Bei Aucoteam
absent: false
locked: false
lon: 7.1
lat: 7.2
alt: 7.3
- id: 9
num: 8
addr: 6
type: TX29TDH-IT
alias: Other-08
absent: false
locked: false
lon: 8.1
lat: 8.2
alt: 8.3
- id: 10
num: 9
addr: 14
type: TX29TDH-IT
alias: Other-09
absent: false
locked: false
lon: 9.1
lat: 9.2
alt: 9.3
- id: 11
num: 10
addr: 43
type: TX29TDH-IT
alias: Other-10
absent: false
locked: false
lon: 10.1
lat: 10.2
alt: 10.3
- id: 12
num: 11
addr: 28
type: TX29TDH-IT
alias: Other-11
absent: false
locked: false
lon: 11.1
lat: 11.2
alt: 11.3
- id: 13
num: 12
addr: 26
type: TX29TDH-IT
alias: Other-12
absent: false
locked: false
lon: 12.1
lat: 12.2
alt: 12.3
`,
}
var AletaYML = map[string]string{
"2017-05-15T202418Z": `
name: aleta
modifiedon: 2017-05-15T202418Z
outputscheme:
show_unknown_devices: false
interval: 60
devices:
- id: 1
num: 193
addr: 7
type: TX29TDH-IT
alias: R 2.7 - Schreibtisch
absent: false
locked: false
lon: 19.31
lat: 19.32
alt: 19.33
- id: 2
num: 194
addr: 44
type: TX29TDH-IT
alias: R 2.7 - Kühlschrank
absent: false
locked: false
lon: 19.41
lat: 19.42
alt: 19.43
- id: 3
num: 195
addr: 48
type: TX29TDH-IT
alias: R 2.7 - Wand
absent: false
locked: false
lon: 19.51
lat: 19.52
alt: 19.53
- id: 4
num: 7
addr: 24
type: TX25TP-IT
alias: R 2.7 - Fenster:Draußen - AUCOTEAM Nordseite
absent: false
locked: false
lon: 7.1
lat: 7.2
alt: 7.3
`,
"2017-05-17T155848Z": `
name: aleta
modifiedon: 2017-05-17T155848Z
outputscheme:
show_unknown_devices: false
interval: 60
devices:
- id: 1
num: 193
addr: 7
type: TX29TDH-IT
alias: R 2.7 - Schreibtisch
absent: false
locked: false
lon: 19.31
lat: 19.32
alt: 19.33
- id: 2
num: 194
addr: 44
type: TX29TDH-IT
alias: R 2.7 - Kühlschrank
absent: false
locked: false
lon: 19.41
lat: 19.42
alt: 19.43
- id: 3
num: 195
addr: 48
type: TX29TDH-IT
alias: R 2.7 - Wand
absent: false
locked: false
lon: 19.51
lat: 19.52
alt: 19.53
- id: 4
num: 7
addr: 24
type: TX25TP-IT
alias: R 2.7 - Fenster:Draußen - AUCOTEAM Nordseite
absent: false
locked: false
lon: 7.1
lat: 7.2
alt: 7.3
- id: 5
num: 10
addr: 0
type: xyz
alias: ""
absent: false
locked: false
lon: 0
lat: 0
alt: 0
`,
"2017-05-17T175852Z": `
name: aleta
modifiedon: 2017-05-17T175852Z
outputscheme:
show_unknown_devices: false
interval: 60
devices:
- id: 1
num: 193
addr: 7
type: TX29TDH-IT
alias: R 2.7 - Schreibtisch
absent: false
locked: false
lon: 19.31
lat: 19.32
alt: 19.33
- id: 2
num: 194
addr: 44
type: TX29TDH-IT
alias: R 2.7 - Kühlschrank
absent: false
locked: false
lon: 19.41
lat: 19.42
alt: 19.43
- id: 3
num: 195
addr: 48
type: TX29TDH-IT
alias: R 2.7 - Wand
absent: false
locked: false
lon: 19.51
lat: 19.52
alt: 19.53
- id: 4
num: 7
addr: 24
type: TX25TP-IT
alias: R 2.7 - Fenster:Draußen - AUCOTEAM Nordseite
absent: false
locked: false
lon: 7.1
lat: 7.2
alt: 7.3
- id: 5
num: 10
addr: 0
type: xyz
alias: ""
absent: false
locked: false
lon: 0
lat: 0
alt: 0
`,
"2017-05-17T175924Z": `
name: aleta
modifiedon: 2017-05-17T175924Z
outputscheme:
show_unknown_devices: false
interval: 60
devices:
- id: 1
num: 193
addr: 7
type: TX29TDH-IT
alias: R 2.7 - Schreibtisch
absent: false
locked: false
lon: 19.31
lat: 19.32
alt: 19.33
- id: 2
num: 194
addr: 44
type: TX29TDH-IT
alias: R 2.7 - Kühlschrank
absent: false
locked: false
lon: 19.41
lat: 19.42
alt: 19.43
- id: 3
num: 195
addr: 48
type: TX29TDH-IT
alias: R 2.7 - Wand
absent: false
locked: false
lon: 19.51
lat: 19.52
alt: 19.53
- id: 4
num: 7
addr: 24
type: TX25TP-IT
alias: R 2.7 - Fenster:Draußen - AUCOTEAM Nordseite
absent: false
locked: false
lon: 7.1
lat: 7.2
alt: 7.3
- id: 5
num: 10
addr: 0
type: xyz
alias: ""
absent: false
locked: false
lon: 0
lat: 0
alt: 0
`,
"2017-05-17T182418Z": `
name: aleta
modifiedon: 2017-05-17T182418Z
outputscheme:
show_unknown_devices: false
interval: 60
devices:
- id: 1
num: 193
addr: 7
type: TX29TDH-IT
alias: R 2.7 - Schreibtisch
absent: false
locked: false
lon: 19.31
lat: 19.32
alt: 19.33
- id: 2
num: 194
addr: 44
type: TX29TDH-IT
alias: R 2.7 - Kühlschrank
absent: false
locked: false
lon: 19.41
lat: 19.42
alt: 19.43
- id: 3
num: 195
addr: 48
type: TX29TDH-IT
alias: R 2.7 - Wand
absent: false
locked: false
lon: 19.51
lat: 19.52
alt: 19.53
- id: 4
num: 7
addr: 24
type: TX25TP-IT
alias: R 2.7 - Fenster:Draußen - AUCOTEAM Nordseite
absent: false
locked: false
lon: 7.1
lat: 7.2
alt: 7.3
- id: 5
num: 10
addr: 0
type: xyz
alias: ""
absent: false
locked: false
lon: 0
lat: 0
alt: 0
`,
"2017-05-20T194020Z": `
name: aleta
modifiedon: 2017-05-20T194020Z
outputscheme:
show_unknown_devices: false
interval: 60
devices:
- id: 1
num: 193
addr: 7
type: TX29TDH-IT
alias: R 2.7 - Schreibtisch
absent: false
locked: false
lon: 19.31
lat: 19.32
alt: 19.33
- id: 2
num: 194
addr: 44
type: TX29TDH-IT
alias: R 2.7 - Kühlschrank
absent: false
locked: false
lon: 19.41
lat: 19.42
alt: 19.43
- id: 3
num: 195
addr: 48
type: TX29TDH-IT
alias: R 2.7 - Wand
absent: false
locked: false
lon: 19.51
lat: 19.52
alt: 19.53
- id: 4
num: 7
addr: 24
type: TX25TP-IT
alias: R 2.7 - Fenster:Draußen - AUCOTEAM Nordseite
absent: false
locked: false
lon: 7.1
lat: 7.2
alt: 7.3
`,
} | fcc/YMLdata.go | 0.654232 | 0.490114 | YMLdata.go | starcoder |
Package bender makes it easy to build load testing applications for services using protocols like
HTTP, Thrift, Protocol Buffers and many others.
Bender provides two different approaches to load testing. The first, LoadTestThroughput, gives the
tester control over the throughput (QPS), but not over the concurrency (number of goroutines). The
second, LoadTestConcurrency, gives the tester control over the concurrency, but not over the
throughput.
LoadTestThroughput simulates the load caused by concurrent clients sending requests to a service. It
can be used to simulate a target throughput (QPS) and to measure the request latency and error rate
at that throughput. The load tester will keep spawning goroutines to send requests, even if the
service is sending errors or hanging, making this a good way to test the actual behavior of the
service under heavy load. This is the same approach used by Twitter's Iago library, and is nearly
always the right place to start when load testing services exposed (directly or indirectly) to the
Internet.
LoadTestConcurrency simulates a fixed number of clients, each of which sends a request, waits for a
response and then repeats. The downside to this approach is that increased latency from the service
results in decreased throughput from the load tester, as the simulated clients are all waiting for
responses. That makes this a poor way to test services, as real-world traffic doesn't behave this
way. The best use for this function is to test services that need to handle a lot of concurrent
connections, and for which you need to simulate many connections to test resource limits, latency
and other metrics. This approach is used by load testers like the Grinder and JMeter, and has been
critiqued well by <NAME> in his talk "How Not To Measure Latency".
The next two sections provide more detail on the implementations of LoadTestThroughput and
LoadTestConcurrency. The following sections provide descriptions for the common arguments to the
load testing functions, and how they work, including the interval generators, request generators,
request executors and event recorders.
LoadTestThroughput
The LoadTestThroughput function takes four arguments. The first is a function that generates
nanosecond intervals which are used as request arrival times. The second is a channel of requests.
The third is a function that knows how to send a request and validate the response. The inner loop
of LoadTestThroughput looks like this:
for {
interval := intervals(time.Now().UnixNanos())
time.Sleep(time.Duration(interval)
request := <-requests
go func() {
err := requestExec(time.Now().UnixNano(), request)
}()
}
The fourth argument to LoadTestThroughput is a channel which is used to output events. There are
events for the start and end of the load test, the sending of each request and the receiving of
each response and the wait time between sending requests. The wait message includes an "overage"
time which is useful for monitoring the health of the load test program and underlying OS and host.
The overage time measures the difference between the expected wait time (the interval time) and the
actual wait time. On a heavily loaded host, or when there are long GC pauses, that difference can
be large. Bender attempts to compensate for the overage by reducing the subsequent wait times, but
under heavy load, the overage will continue to increase until it cannot be compensated for. At that
point the wait events will report a monotonically increasing overage which means the load test
isn't keeping up with the desired throughput.
A load test ends when the request channel is closed and all remaining requests in the channel have
been executed.
LoadTestConcurrency
The LoadTestConcurrency function takes four arguments. The first is a semaphore that controls the
maximum number of concurrently executing requests, and makes it possible to dynamically control that
number over the lifetime of the load test. The second, third and fourth arguments are identical to
those for LoadTestThroughput. The inner loop of LoadTestConcurrency does something like this:
for {
workerSem.Wait(1)
request := <-requests
go func() {
err := requestExec(time.Now().UnixNano(), request)
workerSem.Signal(1)
}
}
Reducing the semaphore count will reduce the number of running connections as existing connections
complete, so there can be some lag between calling workerSem.Wait(n) and the number of running
connections actually decreasing by n. The worker semaphore does not protect you from reducing the
number of workers below zero, which will cause undefined behavior from the load tester.
As with LoadTestThroughput, the load test ends when the request channel is closed and all remaining
requests have been executed.
Interval Generators
An IntervalGenerator is a function that takes the current Unix epoch time (in nanoseconds) and
returns a non-negative time (also in nanoseconds) until the next request should be sent. Bender
provides functions to create interval generators for uniform and exponential distributions, each
of which takes the target throughput (requests per second) and returns an IntervalGenerator. Neither
of the included generators makes use of the function argument, but it is there for cases in which
the simulated intervals are time dependent (you want to simulate the daily traffice variation of a
web site, for example).
Request Channels
The request channel decouples creation of requests from execution of requests and allows them to
run concurrently. A typical approach to creating a request channel is code like this:
c := make(chan *Request)
go func() {
for {
// create service request r with request ID rid
c <- &Request{rid, r}
}
close(c)
}()
Requests can be generated randomly, read from files (like access logs) or generated any other way
you like. The important part is that the request generation be done in a separate goroutine that
communicates with the load tester via a channel. In addition, the channel must be closed to indicate
that the load test is done.
The requests channel should almost certainly be buffered, unless you can generate requests much
faster than they are sent (and not just on average). The easiest way to miss your target throughput
with LoadTestThroughput is to be blocked waiting for requests to be generated, particularly when
testing a large throughput.
Request Executors
A request executor is a function that takes the current Unix Epoch time (in nanoseconds) and a
*Request, sends the request to the service, waits for the response, optionally validates it and
returns an error or nil. This function is timed by the load tester, so it should do as little else
as possible, and everything it does will be added to the reported service latency. Here, for
example, is a very simple request executor for HTTP requests:
func HttpRequestExecutor(_ int64, request *Request) error {
url := request.Request.(string)
_, err := http.Get(url)
return err
}
The http package in Bender provides a function that generates executors that make use of the http
packages Transport and Client classes and provide an easy way to validate the body of the http
request.
RequestExecutors are called concurrently from multiple goroutines, and must be concurrency-safe.
Event Messages
The LoadTestThroughput and LoadTestConcurrency functions both take a channel of events (represented
as interface{}) as a parameter. This channel is used to output events as they happen during the load
test, including the following events:
StartEvent: sent once at the start of the load test.
EndEvent: sent once at the end of the load test, no more events are sent after this.
WaitEvent: sent only for LoadTestThroughput, see below for details.
StartRequestEvent: sent before a request is sent to the service, includes the request and the
event time. Note that the event time is not the same as the start time for the request for
stupid performance reasons. If you need to know the actual start time, see the EndRequestEvent.
EndRequestEvent: sent after a request has finished, includes the response, the actual start and
end times for the request and any error returned by the RequestExecutor.
The WaitEvent includes the time until the next request is sent (in nanoseconds) and an "overage"
time. When the inner loop sleeps, it subtracts the total time slept from the time it intended to
sleep, and adds that to the overage. The overage, therefore, is a good proxy for how overloaded the
load testing host is. If it grows over time, that means the load test is falling behind, and can't
start enough goroutines to run all the requests it needs to. In that case you will need a more
powerful load testing host, or need to distribute the load test across more hosts.
The event channel doesn't need to be buffered, but it may help if you find that Bender isn't sending
as much throughput as you expect. In general, this depends a lot on how quickly you are consuming
events from the channel, and how quickly the load tester is running. It is a good practice to
proactively buffer this channel.
*/
package bender | internal/bender/doc.go | 0.861538 | 0.796253 | doc.go | starcoder |
package encoding
import (
"fmt"
"chromiumos/tast/local/coords"
"chromiumos/tast/local/media/videotype"
)
// StreamParams is the parameter for video_encode_accelerator_unittest.
type StreamParams struct {
// Name is the name of input raw data file.
Name string
// Size is the width and height of YUV image in the input raw data.
Size coords.Size
// Bitrate is the requested bitrate in bits per second. VideoEncodeAccelerator is forced to output
// encoded video in expected range around the bitrate.
Bitrate int
// FrameRate is the initial frame rate in the test. This value is optional, and will be set to
// 30 if unspecified.
FrameRate int
// SubseqBitrate is the bitrate to switch to in the middle of the stream in some test cases in
// video_encode_accelerator_unittest. This value is optional, and will be set to two times of Bitrate if unspecified.
SubseqBitrate int
// SubseqFrameRate is the frame rate to switch to in the middle of the stream in some test cases in
// video_encode_accelerator_unittest. This value is optional, and will be set to 30 if unspecified.
SubseqFrameRate int
// Level is the requested output level. This value is optional and currently only used by the H264 codec. The value
// should be aligned with the H264LevelIDC enum in https://cs.chromium.org/chromium/src/media/video/h264_parser.h,
// as well as level_idc(u8) definition of sequence parameter set data in official H264 spec.
Level int
}
// CreateStreamDataArg creates an argument of video_encode_accelerator_unittest from profile, dataPath and outFile.
func CreateStreamDataArg(params StreamParams, profile videotype.CodecProfile, pixelFormat videotype.PixelFormat, dataPath, outFile string) string {
const (
defaultFrameRate = 30
defaultSubseqBitrateRatio = 2
)
// Fill default values if they are unsettled.
if params.FrameRate == 0 {
params.FrameRate = defaultFrameRate
}
if params.SubseqBitrate == 0 {
params.SubseqBitrate = params.Bitrate * defaultSubseqBitrateRatio
}
if params.SubseqFrameRate == 0 {
params.SubseqFrameRate = defaultFrameRate
}
streamDataArgs := fmt.Sprintf("--test_stream_data=%s:%d:%d:%d:%s:%d:%d:%d:%d:%d",
dataPath, params.Size.Width, params.Size.Height, int(profile), outFile,
params.Bitrate, params.FrameRate, params.SubseqBitrate,
params.SubseqFrameRate, int(pixelFormat))
if params.Level != 0 {
streamDataArgs += fmt.Sprintf(":%d", params.Level)
}
return streamDataArgs
} | src/chromiumos/tast/local/media/encoding/stream_params.go | 0.50293 | 0.448245 | stream_params.go | starcoder |
package golang
import (
srccore "evergreen/dub/core"
src "evergreen/dub/flow"
dstcore "evergreen/go/core"
dst "evergreen/go/flow"
"evergreen/graph"
)
type flowMapper struct {
ctx *DubToGoContext
stitcher *graph.EdgeStitcher
builder *dst.GoFlowBuilder
original *src.LLFunc
}
func (mapper *flowMapper) simpleExitFlow(srcID graph.NodeID, dstID graph.NodeID) {
// Copy edges.
srcG := mapper.original.CFG
eit := srcG.ExitIterator(srcID)
for eit.HasNext() {
e, _ := eit.GetNext()
flow := mapper.original.Edges[e]
switch flow {
case src.NORMAL:
mapper.stitcher.MapEdge(e, mapper.builder.EmitEdge(dstID, dst.NORMAL))
case src.RETURN:
mapper.stitcher.MapEdge(e, mapper.builder.EmitEdge(dstID, dst.RETURN))
default:
panic(flow)
}
}
}
func (mapper *flowMapper) simpleFlow(srcID graph.NodeID, dstID graph.NodeID) {
mapper.stitcher.MapIncomingEdges(srcID, dstID)
mapper.simpleExitFlow(srcID, dstID)
}
func (mapper *flowMapper) handleFailEdge(original graph.EdgeID, translated graph.EdgeID, doesExit bool) {
if doesExit {
// This fail edge exits the function.
// Before translation, failiure edges are non-local flow, but after translation they are
// plain-old normal flow. Cap the edge with a return to exit the function with non-local
// flow.
builder := mapper.builder
returnNode := builder.EmitOp(&dst.Return{})
returnEdge := builder.EmitEdge(returnNode, dst.RETURN)
mapper.builder.ConnectEdgeExit(translated, returnNode)
mapper.stitcher.MapEdge(original, returnEdge)
} else {
mapper.stitcher.MapEdge(original, translated)
}
}
func (mapper *flowMapper) dubFlow(frameRef *dst.Register, srcID graph.NodeID, dstID graph.NodeID) {
ctx := mapper.ctx
stitcher := mapper.stitcher
builder := mapper.builder
srcG := mapper.original.CFG
stitcher.MapIncomingEdges(srcID, dstID)
normal := graph.NoEdge
fail := graph.NoEdge
failExits := false
eit := srcG.ExitIterator(srcID)
for eit.HasNext() {
e, dstID := eit.GetNext()
flow := mapper.original.Edges[e]
switch flow {
case src.NORMAL:
normal = e
case src.FAIL:
fail = e
_, failExits = mapper.original.Ops[dstID].(*src.ExitOp)
default:
panic(flow)
}
}
if normal != graph.NoEdge {
if fail != graph.NoEdge {
flow := builder.MakeRegister("flow", ctx.index.Int)
reference := builder.MakeRegister("normal", ctx.index.Int)
cond := builder.MakeRegister("cond", ctx.index.Bool)
attrID := builder.EmitOp(&dst.Attr{
Expr: frameRef,
Name: "Flow",
Dst: flow,
})
constID := builder.EmitOp(&dst.ConstantInt{
Value: 0,
Dst: reference,
})
compareID := builder.EmitOp(&dst.BinaryOp{
Left: flow,
Op: "==",
Right: reference,
Dst: cond,
})
switchID := builder.EmitOp(&dst.Switch{
Cond: cond,
})
builder.EmitConnection(dstID, dst.NORMAL, attrID)
builder.EmitConnection(attrID, dst.NORMAL, constID)
builder.EmitConnection(constID, dst.NORMAL, compareID)
builder.EmitConnection(compareID, dst.NORMAL, switchID)
stitcher.MapEdge(normal, builder.EmitEdge(switchID, dst.COND_TRUE))
mapper.handleFailEdge(fail, builder.EmitEdge(switchID, dst.COND_FALSE), failExits)
} else {
stitcher.MapEdge(normal, builder.EmitEdge(dstID, dst.NORMAL))
}
} else if fail != graph.NoEdge {
mapper.handleFailEdge(fail, builder.EmitEdge(dstID, dst.NORMAL), failExits)
} else {
// Dead end should not happen?
panic(srcID)
}
}
func dstReg(regMap []*dst.Register, reg *src.RegisterInfo) *dst.Register {
if reg == nil {
return nil
}
return regMap[reg.Index]
}
func multiDstReg(regMap []*dst.Register, reg *src.RegisterInfo) []*dst.Register {
if reg == nil {
return nil
}
return []*dst.Register{regMap[reg.Index]}
}
func regList(regMap []*dst.Register, args []*src.RegisterInfo) []*dst.Register {
out := make([]*dst.Register, len(args))
for i, arg := range args {
out[i] = regMap[arg.Index]
}
return out
}
func translateFlow(srcF *src.LLFunc, ctx *DubToGoContext) *dst.FlowFunc {
goFlowFunc := &dst.FlowFunc{
Recv: nil,
Function: ctx.functionMap[srcF.F.Index],
Register_Scope: &dst.Register_Scope{},
}
builder := dst.MakeGoFlowBuilder(goFlowFunc)
frameReg := builder.MakeRegister("frame", &dstcore.PointerType{Element: ctx.state})
// Remap registers
num := srcF.RegisterInfo_Scope.Len()
regMap := make([]*dst.Register, num)
for i := 0; i < num; i++ {
r := srcF.RegisterInfo_Scope.Get(src.RegisterInfo_Ref(i))
regMap[i] = builder.MakeRegister(r.Name, goType(r.T, ctx))
}
// Remap parameters
goFlowFunc.Params = make([]*dst.Register, len(srcF.Params)+1)
goFlowFunc.Params[0] = frameReg
for i, p := range srcF.Params {
goFlowFunc.Params[i+1] = regMap[p.Index]
}
// Create result registers
goFlowFunc.Results = make([]*dst.Register, len(srcF.ReturnTypes))
for i, rt := range srcF.ReturnTypes {
goFlowFunc.Results[i] = builder.MakeRegister("ret", goType(rt, ctx))
}
srcG := srcF.CFG
dstG := goFlowFunc.CFG
stitcher := graph.MakeEdgeStitcher(srcG, dstG)
mapper := &flowMapper{
ctx: ctx,
builder: builder,
stitcher: stitcher,
original: srcF,
}
order, _ := graph.ReversePostorder(srcG)
nit := graph.OrderedIterator(order)
for nit.HasNext() {
srcID := nit.GetNext()
op := srcF.Ops[srcID]
switch op := op.(type) {
case *src.EntryOp:
// Entry node already exists
dstID := srcID
mapper.simpleExitFlow(srcID, dstID)
case *src.ExitOp:
// Exit node already exists
dstID := srcID
stitcher.MapIncomingEdges(srcID, dstID)
case *src.SwitchOp:
dstID := builder.EmitOp(&dst.Switch{
Cond: regMap[op.Cond.Index],
})
stitcher.MapIncomingEdges(srcID, dstID)
// Copy edges.
eit := srcG.ExitIterator(srcID)
for eit.HasNext() {
e, _ := eit.GetNext()
flow := mapper.original.Edges[e]
switch flow {
case src.COND_TRUE:
stitcher.MapEdge(e, builder.EmitEdge(dstID, dst.COND_TRUE))
case src.COND_FALSE:
stitcher.MapEdge(e, builder.EmitEdge(dstID, dst.COND_FALSE))
default:
panic(flow)
}
}
case *src.CallOp:
mappedArgs := regList(regMap, op.Args)
mappedDsts := regList(regMap, op.Dsts)
switch c := op.Target.(type) {
case *srccore.Function:
args := []*dst.Register{frameReg}
args = append(args, mappedArgs...)
dstID := builder.EmitOp(&dst.Call{
Target: ctx.functionMap[c.Index],
Args: args,
Dsts: mappedDsts,
})
mapper.dubFlow(frameReg, srcID, dstID)
case *srccore.IntrinsicFunction:
if c.Parent == ctx.core.Builtins.Append {
if len(mappedArgs) != 2 {
panic(op)
}
if len(mappedDsts) > 1 {
panic(op)
}
dstID := builder.EmitOp(&dst.Call{
Target: ctx.index.Append,
Args: mappedArgs,
Dsts: mappedDsts,
})
mapper.dubFlow(frameReg, srcID, dstID)
} else {
switch c {
case ctx.core.Builtins.Position:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "Checkpoint",
Args: mappedArgs,
Dsts: mappedDsts,
})
mapper.dubFlow(frameReg, srcID, dstID)
case ctx.core.Builtins.Slice:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "Slice",
Args: mappedArgs,
Dsts: mappedDsts,
})
mapper.dubFlow(frameReg, srcID, dstID)
default:
panic(c)
}
}
default:
panic(op.Target)
}
case *src.ConstructOp:
t := ctx.link.GetType(op.Type, STRUCT)
st, ok := t.(*dstcore.StructType)
if !ok {
panic(t)
}
args := make([]*dst.NamedArg, len(op.Args))
for i, arg := range op.Args {
args[i] = &dst.NamedArg{
Name: arg.Key,
Arg: regMap[arg.Value.Index],
}
}
nodes := []graph.NodeID{}
for _, c := range op.Type.Contains {
scopeTA := ctx.link.GetType(c, SCOPE)
scopeT, ok := scopeTA.(*dstcore.StructType)
if !ok {
panic(scopeTA)
}
scopeTP := &dstcore.PointerType{Element: scopeT}
scope := builder.MakeRegister("scope", scopeTP)
nodes = append(nodes, builder.EmitOp(&dst.ConstructStruct{
Type: scopeT,
AddrTaken: true,
Dst: scope,
}))
args = append(args, &dst.NamedArg{
Name: subtypeName(c, SCOPE),
Arg: scope,
})
}
nodes = append(nodes, builder.EmitOp(&dst.ConstructStruct{
Type: st,
AddrTaken: true,
Args: args,
Dst: dstReg(regMap, op.Dst),
}))
stitcher.MapIncomingEdges(srcID, nodes[0])
for i := 0; i < len(nodes)-1; i++ {
builder.EmitConnection(nodes[i], dst.NORMAL, nodes[i+1])
}
mapper.simpleExitFlow(srcID, nodes[len(nodes)-1])
case *src.ConstructListOp:
dstID := builder.EmitOp(&dst.ConstructSlice{
Type: goSliceType(op.Type, ctx),
Args: regList(regMap, op.Args),
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.TransferOp:
dstID := builder.EmitOp(&dst.Transfer{
Srcs: regList(regMap, op.Srcs),
Dsts: regList(regMap, op.Dsts),
})
mapper.simpleFlow(srcID, dstID)
case *src.ConstantRuneOp:
dstID := builder.EmitOp(&dst.ConstantRune{
Value: op.Value,
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.ConstantStringOp:
dstID := builder.EmitOp(&dst.ConstantString{
Value: op.Value,
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.ConstantIntOp:
dstID := builder.EmitOp(&dst.ConstantInt{
Value: op.Value,
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.ConstantFloat32Op:
dstID := builder.EmitOp(&dst.ConstantFloat32{
Value: op.Value,
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.ConstantBoolOp:
dstID := builder.EmitOp(&dst.ConstantBool{
Value: op.Value,
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.ConstantNilOp:
dstID := builder.EmitOp(&dst.ConstantNil{
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.BinaryOp:
dstID := builder.EmitOp(&dst.BinaryOp{
Left: regMap[op.Left.Index],
Op: op.Op,
Right: regMap[op.Right.Index],
Dst: dstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.Checkpoint:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "Checkpoint",
Dsts: multiDstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.Fail:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "Fail",
})
mapper.dubFlow(frameReg, srcID, dstID)
case *src.Recover:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "Recover",
Args: []*dst.Register{regMap[op.Src.Index]},
})
mapper.simpleFlow(srcID, dstID)
case *src.Peek:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "Peek",
Dsts: multiDstReg(regMap, op.Dst),
})
mapper.dubFlow(frameReg, srcID, dstID)
case *src.Consume:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "Consume",
})
mapper.dubFlow(frameReg, srcID, dstID)
case *src.LookaheadBegin:
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: "LookaheadBegin",
Dsts: multiDstReg(regMap, op.Dst),
})
mapper.simpleFlow(srcID, dstID)
case *src.LookaheadEnd:
name := "LookaheadNormal"
if op.Failed {
name = "LookaheadFail"
}
dstID := builder.EmitOp(&dst.MethodCall{
Expr: frameReg,
Name: name,
Args: []*dst.Register{regMap[op.Src.Index]},
})
mapper.dubFlow(frameReg, srcID, dstID)
case *src.CoerceOp:
dstID := builder.EmitOp(&dst.Coerce{
Src: regMap[op.Src.Index],
Type: goType(op.T, ctx),
Dst: regMap[op.Dst.Index],
})
mapper.simpleFlow(srcID, dstID)
case *src.ReturnOp:
transferID := builder.EmitOp(&dst.Transfer{
Srcs: regList(regMap, op.Exprs),
Dsts: goFlowFunc.Results, // TODO copy?
})
stitcher.MapIncomingEdges(srcID, transferID)
returnID := builder.EmitOp(&dst.Return{})
builder.EmitConnection(transferID, dst.NORMAL, returnID)
mapper.simpleExitFlow(srcID, returnID)
default:
panic(op)
}
}
return goFlowFunc
}
func createTagInternal(base *srccore.StructType, parent *srccore.StructType, goCoreProg *dstcore.CoreProgram, goFlowProg *dst.FlowProgram, p *dstcore.Package, selfType *dstcore.StructType) {
if parent == nil {
return
}
createTagInternal(base, parent.Implements, goCoreProg, goFlowProg, p, selfType)
goCoreFunc := &dstcore.Function{
Name: "is" + parent.Name,
Package: nil,
}
goFlowFunc := &dst.FlowFunc{
Recv: nil,
CFG: graph.CreateGraph(),
Ops: []dst.GoOp{
&dst.Entry{},
&dst.Exit{},
},
Register_Scope: &dst.Register_Scope{},
}
goFlowFunc.Recv = goFlowFunc.Register_Scope.Register(&dst.Register{
Name: "node",
T: &dstcore.PointerType{
Element: selfType,
},
})
// Empty function.
g := goFlowFunc.CFG
g.ConnectEdge(g.Entry(), dst.AllocEdge(goFlowFunc, 0), g.Exit())
f := goCoreProg.Function_Scope.Register(goCoreFunc)
goFlowProg.FlowFunc_Scope.Register(goFlowFunc)
// Index.
dstcore.InsertFunctionIntoPackage(goCoreProg, p, f)
selfType.Methods = append(selfType.Methods, f)
}
// Fake functions for enforcing type relationships.
func createTags(dubCoreProg *srccore.CoreProgram, dubFlowProg *src.DubProgram, goCoreProg *dstcore.CoreProgram, goFlowProg *dst.FlowProgram, packages []*dstcore.Package, ctx *DubToGoContext) {
for _, s := range dubCoreProg.Structures {
if s.IsParent || s.Implements == nil {
continue
}
p := packages[s.File.Package.Index]
absSelfType := ctx.link.GetType(s, STRUCT)
selfType, ok := absSelfType.(*dstcore.StructType)
if !ok {
panic(absSelfType)
}
createTagInternal(s, s.Implements, goCoreProg, goFlowProg, p, selfType)
}
} | src/evergreen/dub/transform/golang/translate_flow.go | 0.539469 | 0.409988 | translate_flow.go | starcoder |
package distance
import (
"errors"
"math"
)
type DistMetric func(x []float64, y []float64) (dist float64, err error)
func Binary(x []float64, y []float64) (dist float64, err error) {
if len(x) != len(y) {
err = errors.New("Vectors for calculating distance must have equal length")
return
}
denominator := float64(0)
numerator := float64(0)
for i := range x {
if x[i] > 0 && y[i] > 0 {
numerator++
}
// Ignore i when both x[i] and y[i] are zero.
if x[i] > 0 || y[i] > 0 {
denominator++
}
}
dist = 1 - (numerator / denominator)
return
}
func Canberra(x []float64, y []float64) (dist float64, err error) {
if len(x) != len(y) {
err = errors.New("Vectors for calculating distance must have equal length")
return
}
dist = 0
for i := range x {
// Ignore i when both x[i] and y[i] are zero.
if x[i] > 0 || y[i] > 0 {
dist += math.Abs(x[i]-y[i]) / (math.Abs(x[i]) + math.Abs(y[i]))
}
}
return
}
func Jaccard(x []float64, y []float64) (dist float64, err error) {
if len(x) != len(y) {
err = errors.New("Vectors for calculating distance must have equal length")
return
}
denominator := float64(0)
numerator := float64(0)
for i := range x {
// Ignore i when both x[i] and y[i] are zero.
if x[i] > 0 || y[i] > 0 {
numerator += math.Min(x[i], y[i])
denominator += math.Max(x[i], y[i])
}
}
dist = 1 - (numerator / denominator)
return
}
func Manhattan(x []float64, y []float64) (dist float64, err error) {
if len(x) != len(y) {
err = errors.New("Vectors for calculating distance must have equal length")
return
}
dist = 0
for i := range x {
dist += math.Abs(x[i] - y[i])
}
return
}
func Maximum(x []float64, y []float64) (dist float64, err error) {
if len(x) != len(y) {
err = errors.New("Vectors for calculating distance must have equal length")
return
}
dist = 0
for i := range x {
diff := math.Abs(x[i] - y[i])
if diff > dist {
dist = diff
}
}
return
}
func Euclidean(x []float64, y []float64) (dist float64, err error) {
if len(x) != len(y) {
err = errors.New("Vectors for calculating distance must have equal length")
return
}
dist = 0
for i := range x {
diff := x[i] - y[i]
dist += diff * diff
}
dist = math.Sqrt(dist)
return
}
func Cosine(x []float64, y []float64) (dist float64, err error) {
if len(x) != len(y) {
err = errors.New("Vectors for calculating distance must have equal length")
return
}
var ab float64
var a2 float64
var b2 float64
for i := range x {
ab += x[i] * y[i]
a2 += math.Pow(x[i], 2)
b2 += math.Pow(y[i], 2)
}
denominator := math.Sqrt(a2) * math.Sqrt(b2)
if denominator == 0 {
err = errors.New("Zero vector detected")
return
}
dist = 1 - (ab / denominator)
return
} | distance/distmetric.go | 0.616705 | 0.530723 | distmetric.go | starcoder |
package exp
type (
sqlFunctionExpression struct {
name string
args []interface{}
}
)
// Creates a new SQLFunctionExpression with the given name and arguments
func NewSQLFunctionExpression(name string, args ...interface{}) SQLFunctionExpression {
return sqlFunctionExpression{name: name, args: args}
}
func (sfe sqlFunctionExpression) Clone() Expression {
return sqlFunctionExpression{name: sfe.name, args: sfe.args}
}
func (sfe sqlFunctionExpression) Expression() Expression { return sfe }
func (sfe sqlFunctionExpression) Args() []interface{} { return sfe.args }
func (sfe sqlFunctionExpression) Name() string { return sfe.name }
func (sfe sqlFunctionExpression) As(val interface{}) AliasedExpression { return aliased(sfe, val) }
func (sfe sqlFunctionExpression) Eq(val interface{}) BooleanExpression { return eq(sfe, val) }
func (sfe sqlFunctionExpression) Neq(val interface{}) BooleanExpression { return neq(sfe, val) }
func (sfe sqlFunctionExpression) Gt(val interface{}) BooleanExpression { return gt(sfe, val) }
func (sfe sqlFunctionExpression) Gte(val interface{}) BooleanExpression { return gte(sfe, val) }
func (sfe sqlFunctionExpression) Lt(val interface{}) BooleanExpression { return lt(sfe, val) }
func (sfe sqlFunctionExpression) Lte(val interface{}) BooleanExpression { return lte(sfe, val) }
func (sfe sqlFunctionExpression) Between(val RangeVal) RangeExpression { return between(sfe, val) }
func (sfe sqlFunctionExpression) NotBetween(val RangeVal) RangeExpression {
return notBetween(sfe, val)
}
func (sfe sqlFunctionExpression) Like(val interface{}) BooleanExpression { return like(sfe, val) }
func (sfe sqlFunctionExpression) NotLike(val interface{}) BooleanExpression { return notLike(sfe, val) }
func (sfe sqlFunctionExpression) ILike(val interface{}) BooleanExpression { return iLike(sfe, val) }
func (sfe sqlFunctionExpression) NotILike(val interface{}) BooleanExpression {
return notILike(sfe, val)
}
func (sfe sqlFunctionExpression) RegexpLike(val interface{}) BooleanExpression {
return regexpLike(sfe, val)
}
func (sfe sqlFunctionExpression) RegexpNotLike(val interface{}) BooleanExpression {
return regexpNotLike(sfe, val)
}
func (sfe sqlFunctionExpression) RegexpILike(val interface{}) BooleanExpression {
return regexpILike(sfe, val)
}
func (sfe sqlFunctionExpression) RegexpNotILike(val interface{}) BooleanExpression {
return regexpNotILike(sfe, val)
}
func (sfe sqlFunctionExpression) In(vals ...interface{}) BooleanExpression { return in(sfe, vals...) }
func (sfe sqlFunctionExpression) NotIn(vals ...interface{}) BooleanExpression {
return notIn(sfe, vals...)
}
func (sfe sqlFunctionExpression) Is(val interface{}) BooleanExpression { return is(sfe, val) }
func (sfe sqlFunctionExpression) IsNot(val interface{}) BooleanExpression { return isNot(sfe, val) }
func (sfe sqlFunctionExpression) IsNull() BooleanExpression { return is(sfe, nil) }
func (sfe sqlFunctionExpression) IsNotNull() BooleanExpression { return isNot(sfe, nil) }
func (sfe sqlFunctionExpression) IsTrue() BooleanExpression { return is(sfe, true) }
func (sfe sqlFunctionExpression) IsNotTrue() BooleanExpression { return isNot(sfe, true) }
func (sfe sqlFunctionExpression) IsFalse() BooleanExpression { return is(sfe, false) }
func (sfe sqlFunctionExpression) IsNotFalse() BooleanExpression { return isNot(sfe, false) }
func (sfe sqlFunctionExpression) Over(we WindowExpression) SQLWindowFunctionExpression {
return NewSQLWindowFunctionExpression(sfe, nil, we)
}
func (sfe sqlFunctionExpression) OverName(windowName IdentifierExpression) SQLWindowFunctionExpression {
return NewSQLWindowFunctionExpression(sfe, windowName, nil)
} | exp/func.go | 0.776029 | 0.447762 | func.go | starcoder |
package register
import (
"fmt"
"github.com/golang/glog"
)
const (
InitialSetup RegStep = "Initial Minikube Setup"
SelectingDriver RegStep = "Selecting Driver"
DownloadingArtifacts RegStep = "Downloading Artifacts"
StartingNode RegStep = "Starting Node"
RunningLocalhost RegStep = "Running on Localhost"
LocalOSRelease RegStep = "Local OS Release"
CreatingContainer RegStep = "Creating Container"
CreatingVM RegStep = "Creating VM"
ConfiguringLHEnv RegStep = "Configuring Localhost Environment"
PreparingKubernetes RegStep = "Preparing Kubernetes"
VerifyingKubernetes RegStep = "Verifying Kubernetes"
EnablingAddons RegStep = "Enabling Addons"
Done RegStep = "Done"
Stopping RegStep = "Stopping"
Deleting RegStep = "Deleting"
Pausing RegStep = "Pausing"
Unpausing RegStep = "Unpausing"
)
// RegStep is a type representing a distinct step of `minikube start`
type RegStep string
// Register holds all of the steps we could see in `minikube start`
// and keeps track of the current step
type Register struct {
steps map[RegStep][]RegStep
first RegStep
current RegStep
}
// Reg keeps track of all possible steps and the current step we are on
var Reg Register
func init() {
Reg = Register{
// Expected step orders, organized by the initial step seen
steps: map[RegStep][]RegStep{
InitialSetup: []RegStep{
InitialSetup,
SelectingDriver,
DownloadingArtifacts,
StartingNode,
RunningLocalhost,
LocalOSRelease,
CreatingContainer,
CreatingVM,
PreparingKubernetes,
ConfiguringLHEnv,
VerifyingKubernetes,
EnablingAddons,
Done,
},
Stopping: []RegStep{Stopping, Done},
Pausing: []RegStep{Pausing, Done},
Unpausing: []RegStep{Unpausing, Done},
Deleting: []RegStep{Deleting, Done},
},
}
}
// totalSteps returns the total number of steps in the register
func (r *Register) totalSteps() string {
return fmt.Sprintf("%d", len(r.steps[r.first])-1)
}
// currentStep returns the current step we are on
func (r *Register) currentStep() string {
if r.first == RegStep("") {
return ""
}
steps, ok := r.steps[r.first]
if !ok {
return "unknown"
}
for i, s := range r.steps[r.first] {
if r.current == s {
return fmt.Sprintf("%d", i)
}
}
// all steps should be registered so this shouldn't happen
// can't call exit.WithError as it creates an import dependency loop
glog.Errorf("%q was not found within the registered steps for %q: %v", r.current, r.first, steps)
return ""
}
// SetStep sets the current step
func (r *Register) SetStep(s RegStep) {
if r.first == RegStep("") {
_, ok := r.steps[s]
if ok {
r.first = s
} else {
glog.Errorf("unexpected first step: %q", r.first)
}
}
r.current = s
}
// recordStep records the current step | pkg/minikube/out/register/register.go | 0.567457 | 0.404096 | register.go | starcoder |
package components
import (
"container/ring"
"context"
"math"
"sync"
"github.com/mitchellh/go-glint"
)
// SparklineComponent renders a sparkline graph.
type SparklineComponent struct {
sync.Mutex
// If set, this will style the peak value.
PeakStyle []glint.StyleOption
values *ring.Ring
}
// Sparkline creates a SparklineComponent with the given set of initial values.
// These initial values will also specify the max width for this sparkline
// unless values are replaced with Set.
func Sparkline(values []uint) *SparklineComponent {
var c SparklineComponent
c.Set(values)
return &c
}
// Set sets the full set of values to the given slice. This will also reset
// the size of the sparkline to this length.
func (c *SparklineComponent) Set(values []uint) {
c.Lock()
defer c.Unlock()
c.values = ring.New(len(values))
for _, v := range values {
c.values.Value = v
c.values = c.values.Next()
}
}
// Append adds the given values to the end of the values buffer. The buffer
// size is determined by the values list given in Sparkline or Set. This will
// overwrite the oldest values.
func (c *SparklineComponent) Append(values ...uint) {
c.Lock()
defer c.Unlock()
for _, v := range values {
c.values.Value = v
c.values = c.values.Next()
}
}
func (c *SparklineComponent) valuesSlice() []uint {
result := make([]uint, c.values.Len())
for i := range result {
result[i] = c.values.Value.(uint)
c.values = c.values.Next()
}
return result
}
func (c *SparklineComponent) Body(context.Context) glint.Component {
c.Lock()
defer c.Unlock()
values := c.valuesSlice()
// If we have nothing we render nothing
if len(values) == 0 {
return nil
}
// Find the max
max := values[0]
if len(values) > 1 {
for _, v := range values[1:] {
if v > max {
max = v
}
}
}
// Build each symbol
peak := false
parts := make([]glint.Component, len(values))
for i, v := range values {
symbolIdx := int(math.Ceil(float64(v) / float64(max) * float64(len(sparklineSymbols)-1)))
parts[i] = glint.Text(string(sparklineSymbols[symbolIdx]))
if len(c.PeakStyle) > 0 && v == max && !peak {
peak = true
parts[i] = glint.Style(parts[i], c.PeakStyle...)
}
}
// Render them in a row
return glint.Layout(parts...).Row()
}
var sparklineSymbols = []rune{
'\u2581',
'\u2582',
'\u2583',
'\u2584',
'\u2585',
'\u2586',
'\u2587',
'\u2588',
} | components/sparkline.go | 0.672332 | 0.411347 | sparkline.go | starcoder |
package geom
// A MultiPolygon is a collection of Polygons.
type MultiPolygon struct {
geom3
}
// NewMultiPolygon returns a new MultiPolygon with no Polygons.
func NewMultiPolygon(Lay Layout) *MultiPolygon {
return NewMultiPolygonFlat(Lay, nil, nil)
}
// NewMultiPolygonFlat returns a new MultiPolygon with the given flat coordinates.
func NewMultiPolygonFlat(Lay Layout, FlatCoord []float64, endss [][]int) *MultiPolygon {
g := new(MultiPolygon)
g.Lay = Lay
g.Strd = Lay.Stride()
g.FlatCoord = FlatCoord
g.endss = endss
return g
}
// Area returns the sum of the area of the individual Polygons.
func (g *MultiPolygon) Area() float64 {
return doubleArea3(g.FlatCoord, 0, g.endss, g.Strd) / 2
}
// Clone returns a deep copy.
func (g *MultiPolygon) Clone() *MultiPolygon {
return deriveCloneMultiPolygon(g)
}
// Length returns the sum of the perimeters of the Polygons.
func (g *MultiPolygon) Length() float64 {
return length3(g.FlatCoord, 0, g.endss, g.Strd)
}
// MustSetCoords sets the coordinates and panics on any error.
func (g *MultiPolygon) MustSetCoords(coords [][][]Coord) *MultiPolygon {
Must(g.SetCoords(coords))
return g
}
// NumPolygons returns the number of Polygons.
func (g *MultiPolygon) NumPolygons() int {
return len(g.endss)
}
// Polygon returns the ith Polygon.
func (g *MultiPolygon) Polygon(i int) *Polygon {
if len(g.endss[i]) == 0 {
return NewPolygon(g.Lay)
}
// Find the offset from the previous non-empty polygon element.
offset := 0
lastNonEmptyIdx := i - 1
for lastNonEmptyIdx >= 0 {
ends := g.endss[lastNonEmptyIdx]
if len(ends) > 0 {
offset = ends[len(ends)-1]
break
}
lastNonEmptyIdx--
}
ends := make([]int, len(g.endss[i]))
if offset == 0 {
copy(ends, g.endss[i])
} else {
for j, end := range g.endss[i] {
ends[j] = end - offset
}
}
return NewPolygonFlat(g.Lay, g.FlatCoord[offset:g.endss[i][len(g.endss[i])-1]], ends)
}
// Push appends a Polygon.
func (g *MultiPolygon) Push(p *Polygon) error {
if p.Lay != g.Lay {
return ErrLayoutMismatch{Got: p.Lay, Want: g.Lay}
}
offset := len(g.FlatCoord)
var ends []int
if len(p.ends) > 0 {
ends = make([]int, len(p.ends))
if offset == 0 {
copy(ends, p.ends)
} else {
for i, end := range p.ends {
ends[i] = end + offset
}
}
}
g.FlatCoord = append(g.FlatCoord, p.FlatCoord...)
g.endss = append(g.endss, ends)
return nil
}
// SetCoords sets the coordinates.
func (g *MultiPolygon) SetCoords(coords [][][]Coord) (*MultiPolygon, error) {
if err := g.setCoords(coords); err != nil {
return nil, err
}
return g, nil
}
// SetSRID sets the SRID of g.
func (g *MultiPolygon) SetSRID(Srid int) *MultiPolygon {
g.Srid = Srid
return g
}
// Swap swaps the values of g and g2.
func (g *MultiPolygon) Swap(g2 *MultiPolygon) {
*g, *g2 = *g2, *g
} | multipolygon.go | 0.790692 | 0.542379 | multipolygon.go | starcoder |
package easings
import (
"math"
)
// Linear Easing functions
// LinearNone easing
func LinearNone(t, b, c, d float32) float32 {
return c*t/d + b
}
// LinearIn easing
func LinearIn(t, b, c, d float32) float32 {
return c*t/d + b
}
// LinearOut easing
func LinearOut(t, b, c, d float32) float32 {
return c*t/d + b
}
// LinearInOut easing
func LinearInOut(t, b, c, d float32) float32 {
return c*t/d + b
}
// Sine Easing functions
// SineIn easing
func SineIn(t, b, c, d float32) float32 {
return -c*float32(math.Cos(float64(t/d)*(math.Pi/2))) + c + b
}
// SineOut easing
func SineOut(t, b, c, d float32) float32 {
return c*float32(math.Sin(float64(t/d)*(math.Pi/2))) + b
}
// SineInOut easing
func SineInOut(t, b, c, d float32) float32 {
return -c/2*(float32(math.Cos(math.Pi*float64(t/d)))-1) + b
}
// Circular Easing functions
// CircIn easing
func CircIn(t, b, c, d float32) float32 {
t = t / d
return -c*(float32(math.Sqrt(float64(1-t*t)))-1) + b
}
// CircOut easing
func CircOut(t, b, c, d float32) float32 {
return c*float32(math.Sqrt(1-float64((t/d-1)*t))) + b
}
// CircInOut easing
func CircInOut(t, b, c, d float32) float32 {
t = t / d * 2
if t < 1 {
return -c/2*(float32(math.Sqrt(float64(1-t*t)))-1) + b
}
t = t - 2
return c/2*(float32(math.Sqrt(1-float64(t*t)))+1) + b
}
// Cubic Easing functions
// CubicIn easing
func CubicIn(t, b, c, d float32) float32 {
t = t / d
return c*t*t*t + b
}
// CubicOut easing
func CubicOut(t, b, c, d float32) float32 {
t = t/d - 1
return c*(t*t*t+1) + b
}
// CubicInOut easing
func CubicInOut(t, b, c, d float32) float32 {
t = t / d * 2
if t < 1 {
return (c/2*t*t*t + b)
}
t = t - 2
return c/2*(t*t*t+2) + b
}
// Quadratic Easing functions
// QuadIn easing
func QuadIn(t, b, c, d float32) float32 {
t = t / d
return c*t*t + b
}
// QuadOut easing
func QuadOut(t, b, c, d float32) float32 {
t = t / d
return (-c*t*(t-2) + b)
}
// QuadInOut easing
func QuadInOut(t, b, c, d float32) float32 {
t = t / d * 2
if t < 1 {
return ((c / 2) * (t * t)) + b
}
return -c/2*((t-1)*(t-3)-1) + b
}
// Exponential Easing functions
// ExpoIn easing
func ExpoIn(t, b, c, d float32) float32 {
if t == 0 {
return b
}
return (c*float32(math.Pow(2, 10*float64(t/d-1))) + b)
}
// ExpoOut easing
func ExpoOut(t, b, c, d float32) float32 {
if t == d {
return (b + c)
}
return c*(-float32(math.Pow(2, -10*float64(t/d)))+1) + b
}
// ExpoInOut easing
func ExpoInOut(t, b, c, d float32) float32 {
if t == 0 {
return b
}
if t == d {
return (b + c)
}
t = t / d * 2
if t < 1 {
return (c/2*float32(math.Pow(2, 10*float64(t-1))) + b)
}
t = t - 1
return (c/2*(-float32(math.Pow(2, -10*float64(t)))+2) + b)
}
// Back Easing functions
// BackIn easing
func BackIn(t, b, c, d float32) float32 {
s := float32(1.70158)
t = t / d
return c*t*t*((s+1)*t-s) + b
}
// BackOut easing
func BackOut(t, b, c, d float32) float32 {
s := float32(1.70158)
t = t/d - 1
return c*(t*t*((s+1)*t+s)+1) + b
}
// BackInOut easing
func BackInOut(t, b, c, d float32) float32 {
s := float32(1.70158)
s = s * 1.525
t = t / d * 2
if t < 1 {
return c/2*(t*t*((s+1)*t-s)) + b
}
t = t - 2
return c/2*(t*t*((s+1)*t+s)+2) + b
}
// Bounce Easing functions
// BounceIn easing
func BounceIn(t, b, c, d float32) float32 {
return (c - BounceOut(d-t, 0, c, d) + b)
}
// BounceOut easing
func BounceOut(t, b, c, d float32) float32 {
t = t / d
if t < (1 / 2.75) {
return (c*(7.5625*t*t) + b)
} else if t < (2 / 2.75) {
t = t - (1.5 / 2.75)
return c*(7.5625*t*t+0.75) + b
} else if t < (2.5 / 2.75) {
t = t - (2.25 / 2.75)
return c*(7.5625*t*t+0.9375) + b
}
t = t - (2.625 / 2.75)
return c*(7.5625*t*t+0.984375) + b
}
// BounceInOut easing
func BounceInOut(t, b, c, d float32) float32 {
if t < d/2 {
return BounceIn(t*2, 0, c, d)*0.5 + b
}
return BounceOut(t*2-d, 0, c, d)*0.5 + c*0.5 + b
}
// Elastic Easing functions
// ElasticIn easing
func ElasticIn(t, b, c, d float32) float32 {
if t == 0 {
return b
}
t = t / d
if t == 1 {
return b + c
}
p := d * 0.3
a := c
s := p / 4
postFix := a * float32(math.Pow(2, 10*float64(t-1)))
return -(postFix * float32(math.Sin(float64(t*d-s)*(2*math.Pi)/float64(p)))) + b
}
// ElasticOut easing
func ElasticOut(t, b, c, d float32) float32 {
if t == 0 {
return b
}
t = t / d
if t == 1 {
return b + c
}
p := d * 0.3
a := c
s := p / 4
return a*float32(math.Pow(2, -10*float64(t)))*float32(math.Sin(float64(t*d-s)*(2*math.Pi)/float64(p))) + c + b
}
// ElasticInOut easing
func ElasticInOut(t, b, c, d float32) float32 {
if t == 0 {
return b
}
t = t / d * 2
if t == 2 {
return b + c
}
p := d * (0.3 * 1.5)
a := c
s := p / 4
if t < 1 {
t = t - 1
postFix := a * float32(math.Pow(2, 10*float64(t)))
return -0.5*(postFix*float32(math.Sin(float64(t*d-s)*(2*math.Pi)/float64(p)))) + b
}
t = t - 1
postFix := a * float32(math.Pow(2, -10*(float64(t))))
return postFix*float32(math.Sin(float64(t*d-s)*(2*math.Pi)/float64(p)))*0.5 + c + b
} | easings/easings.go | 0.830319 | 0.579638 | easings.go | starcoder |
package assert
import (
"encoding/json"
)
// Equal checks if values equal to each other
func Equal(left, right interface{}) bool {
return isCompareTrue(equal, left, right)
}
// NotEqual checks if values not equal to each other
func NotEqual(left, right interface{}) bool {
return isCompareTrue(notEqual, left, right)
}
// LessThan checks if value "left" less than value "right"
func LessThan(left, right interface{}) bool {
return isCompareTrue(lessThan, left, right)
}
// GreaterThan checks if value "left" greater than value "right"
func GreaterThan(left, right interface{}) bool {
return isCompareTrue(greaterThan, left, right)
}
// LessEqual checks if value "left" less than value "right" or equal to it
func LessEqual(left, right interface{}) bool {
return isCompareTrue(lessEqual, left, right)
}
// GreaterEqual checks if value "left" greater than value "right" or equal to it
func GreaterEqual(left, right interface{}) bool {
return isCompareTrue(greaterEqual, left, right)
}
// In checks if array value "right" contains value "left"
func In(left, right interface{}) bool {
return isCompareTrue(in, left, right)
}
// In checks if array value "right" not contains value "left"
func NotIn(left, right interface{}) bool {
return isCompareTrue(notIn, left, right)
}
const (
equal = iota
notEqual
lessThan
greaterThan
lessEqual
greaterEqual
in
notIn
)
func isCompareTrue(comparison uint8, valueLeft, valueRight interface{}) bool {
if valueLeft == nil && valueRight == nil && comparison == equal {
return true
}
switch vLeft := valueLeft.(type) {
case json.Number:
if left, err := vLeft.Float64(); err == nil {
switch vRight := valueRight.(type) {
case json.Number:
if right, err := vRight.Float64(); err == nil {
switch comparison {
case equal:
if left == right {
return true
}
case notEqual:
if left != right {
return true
}
case lessThan:
if left < right {
return true
}
case greaterThan:
if left > right {
return true
}
case lessEqual:
if left <= right {
return true
}
case greaterEqual:
if left >= right {
return true
}
}
}
case float32, float64, int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64:
right := float(valueRight)
switch comparison {
case equal:
if left == right {
return true
}
case notEqual:
if left != right {
return true
}
case lessThan:
if left < right {
return true
}
case greaterThan:
if left > right {
return true
}
case lessEqual:
if left <= right {
return true
}
case greaterEqual:
if left >= right {
return true
}
}
case []interface{}:
switch comparison {
case in:
for _, v := range vRight {
switch value := v.(type) {
case json.Number:
if item, err := value.Float64(); err == nil && item == left {
return true
}
case float32, float64, int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64:
item := float(v)
if item == left {
return true
}
}
}
case notIn:
for _, v := range vRight {
switch value := v.(type) {
case json.Number:
if item, err := value.Float64(); err == nil && item == left {
return false
}
case float32, float64, int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64:
item := float(v)
if item == left {
return false
}
}
}
return true
}
case []json.Number:
switch comparison {
case in:
for _, value := range vRight {
if item, err := value.Float64(); err == nil && item == left {
return true
}
}
case notIn:
for _, value := range vRight {
if item, err := value.Float64(); err == nil && item == left {
return false
}
}
return true
}
}
}
case float32, float64, int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64:
left := float(valueLeft)
switch vRight := valueRight.(type) {
case json.Number:
if right, err := vRight.Float64(); err == nil {
switch comparison {
case equal:
if left == right {
return true
}
case notEqual:
if left != right {
return true
}
case lessThan:
if left < right {
return true
}
case greaterThan:
if left > right {
return true
}
case lessEqual:
if left <= right {
return true
}
case greaterEqual:
if left >= right {
return true
}
}
}
case float32, float64, int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64:
right := float(valueRight)
switch comparison {
case equal:
if left == right {
return true
}
case notEqual:
if left != right {
return true
}
case lessThan:
if left < right {
return true
}
case greaterThan:
if left > right {
return true
}
case lessEqual:
if left <= right {
return true
}
case greaterEqual:
if left >= right {
return true
}
}
case []interface{}:
switch comparison {
case in:
for _, v := range vRight {
switch value := v.(type) {
case json.Number:
if item, err := value.Float64(); err == nil && item == left {
return true
}
case float32, float64, int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64:
item := float(v)
if item == left {
return true
}
}
}
case notIn:
for _, v := range vRight {
switch value := v.(type) {
case json.Number:
if item, err := value.Float64(); err == nil && item == left {
return false
}
case float32, float64, int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64:
item := float(v)
if item == left {
return false
}
}
}
return true
}
case []float32, []float64, []int, []uint, []int8, []uint8, []int16, []uint16, []int32, []uint32, []int64, []uint64:
vr := floatArray(valueRight)
switch comparison {
case in:
for _, v := range vr {
item := float(v)
if item == left {
return true
}
}
case notIn:
for _, v := range vr {
item := float(v)
if item == left {
return false
}
}
return true
}
case []json.Number:
switch comparison {
case in:
for _, value := range vRight {
if item, err := value.Float64(); err == nil && item == left {
return true
}
}
case notIn:
for _, value := range vRight {
if item, err := value.Float64(); err == nil && item == left {
return false
}
}
return true
}
}
case string:
left := vLeft
switch right := valueRight.(type) {
case string:
switch comparison {
case equal:
if left == right {
return true
}
case notEqual:
if left != right {
return true
}
case lessThan:
if left < right {
return true
}
case greaterThan:
if left > right {
return true
}
case lessEqual:
if left <= right {
return true
}
case greaterEqual:
if left >= right {
return true
}
}
case []interface{}:
switch comparison {
case in:
for _, v := range right {
if item, ok := v.(string); ok && item == left {
return true
}
}
case notIn:
for _, v := range right {
if item, ok := v.(string); ok && item == left {
return false
}
}
return true
}
case []string:
switch comparison {
case in:
for _, v := range right {
if v == left {
return true
}
}
case notIn:
for _, v := range right {
if v == left {
return false
}
}
return true
}
}
case bool:
left := vLeft
switch right := valueRight.(type) {
case bool:
switch comparison {
case equal:
if left == right {
return true
}
case notEqual:
if left != right {
return true
}
}
}
}
return false
}
func float(value interface{}) float64 {
switch v := value.(type) {
case float32:
return float64(v)
case float64:
return float64(v)
case int:
return float64(v)
case uint:
return float64(v)
case int8:
return float64(v)
case uint8:
return float64(v)
case int16:
return float64(v)
case uint16:
return float64(v)
case int32:
return float64(v)
case uint32:
return float64(v)
case int64:
return float64(v)
case uint64:
return float64(v)
}
panic("never happen in that implementation")
}
func floatArray(value interface{}) []float64 {
var result []float64
switch v := value.(type) {
case []float32:
for _, item := range v {
result = append(result, float64(item))
}
case []float64:
for _, item := range v {
result = append(result, float64(item))
}
case []int:
for _, item := range v {
result = append(result, float64(item))
}
case []uint:
for _, item := range v {
result = append(result, float64(item))
}
case []int8:
for _, item := range v {
result = append(result, float64(item))
}
case []uint8:
for _, item := range v {
result = append(result, float64(item))
}
case []int16:
for _, item := range v {
result = append(result, float64(item))
}
case []uint16:
for _, item := range v {
result = append(result, float64(item))
}
case []int32:
for _, item := range v {
result = append(result, float64(item))
}
case []uint32:
for _, item := range v {
result = append(result, float64(item))
}
case []int64:
for _, item := range v {
result = append(result, float64(item))
}
case []uint64:
for _, item := range v {
result = append(result, float64(item))
}
}
return result
} | assert.go | 0.658088 | 0.547404 | assert.go | starcoder |
package driver
type AuthenticationType int
const (
// AuthenticationTypeBasic uses username+password basic authentication
AuthenticationTypeBasic AuthenticationType = iota
// AuthenticationTypeJWT uses username+password JWT token based authentication
AuthenticationTypeJWT
// AuthenticationTypeRaw uses a raw value for the Authorization header
AuthenticationTypeRaw
)
// Authentication implements a kind of authentication.
type Authentication interface {
// Returns the type of authentication
Type() AuthenticationType
// Get returns a configuration property of the authentication.
// Supported properties depend on type of authentication.
Get(property string) string
}
// BasicAuthentication creates an authentication implementation based on the given username & password.
func BasicAuthentication(userName, password string) Authentication {
return &userNameAuthentication{
authType: AuthenticationTypeBasic,
userName: userName,
password: password,
}
}
// JWTAuthentication creates a JWT token authentication implementation based on the given username & password.
func JWTAuthentication(userName, password string) Authentication {
return &userNameAuthentication{
authType: AuthenticationTypeJWT,
userName: userName,
password: password,
}
}
// basicAuthentication implements HTTP Basic authentication.
type userNameAuthentication struct {
authType AuthenticationType
userName string
password string
}
// Returns the type of authentication
func (a *userNameAuthentication) Type() AuthenticationType {
return a.authType
}
// Get returns a configuration property of the authentication.
// Supported properties depend on type of authentication.
func (a *userNameAuthentication) Get(property string) string {
switch property {
case "username":
return a.userName
case "password":
return a.password
default:
return ""
}
}
// RawAuthentication creates a raw authentication implementation based on the given value for the Authorization header.
func RawAuthentication(value string) Authentication {
return &rawAuthentication{
value: value,
}
}
// rawAuthentication implements Raw authentication.
type rawAuthentication struct {
value string
}
// Returns the type of authentication
func (a *rawAuthentication) Type() AuthenticationType {
return AuthenticationTypeRaw
}
// Get returns a configuration property of the authentication.
// Supported properties depend on type of authentication.
func (a *rawAuthentication) Get(property string) string {
switch property {
case "value":
return a.value
default:
return ""
}
} | deps/github.com/arangodb/go-driver/authentication.go | 0.784649 | 0.406833 | authentication.go | starcoder |
package api
import (
"encoding/json"
)
// Cell struct for Cell
type Cell struct {
// The Cell Identity (for 2G and 3G networks), a 16 bit value represented in decimal form as an integer. (See 3GPP TS 23.003 4.3)
Ci *int32 `json:"ci,omitempty"`
// The E-UTRAN Cell Identifier (for LTE networks), a 28 bit value represented in decimal form as a long. (See 3GPP TS 23.003 19.6)
Eci *int32 `json:"eci,omitempty"`
// The Location Area Code (for 2G and 3G networks), a 16 bit value represented in decimal form as an integer. (See 3GPP TS 23.003 4.1)
Lac *int32 `json:"lac,omitempty"`
// The Mobile Country Code, a 3 digit number.
Mcc *int32 `json:"mcc,omitempty"`
// The Mobile Network Code, a 2 or 3 digit number. If the value returned is only 1 digit in length, then you should prepend the value with a leading zero.
Mnc *int32 `json:"mnc,omitempty"`
// The Routing Area Code (for 2G and 3G networks), an 8 bit value represented in decimal form as an integer. (See 3GPP TS 23.003 4.2)
Rac *int32 `json:"rac,omitempty"`
// The Radio Access Technology or type of network that the device is connected to. Possible values are \"gsm\" for 2G or 3G networks, or \"lte\" for LTE networks. Unfortunately, it is not possible to differentiate 2G from 3G, or LTE from LTE Cat-M1.
RadioType *string `json:"radioType,omitempty"`
// The Service Area Code (for 2G and 3G networks), a 16 bit value represented in decimal form as an integer. (See 3GPP TS 23.003 12.5)
Sac *int32 `json:"sac,omitempty"`
// The Tracking Area Code (for LTE networks), a 16 bit value represented in decimal form as an integer. (See 3GPP TS 23.003 19.4.2.3)
Tac *int32 `json:"tac,omitempty"`
}
// NewCell instantiates a new Cell object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewCell() *Cell {
this := Cell{}
return &this
}
// NewCellWithDefaults instantiates a new Cell object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewCellWithDefaults() *Cell {
this := Cell{}
return &this
}
// GetCi returns the Ci field value if set, zero value otherwise.
func (o *Cell) GetCi() int32 {
if o == nil || o.Ci == nil {
var ret int32
return ret
}
return *o.Ci
}
// GetCiOk returns a tuple with the Ci field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetCiOk() (*int32, bool) {
if o == nil || o.Ci == nil {
return nil, false
}
return o.Ci, true
}
// HasCi returns a boolean if a field has been set.
func (o *Cell) HasCi() bool {
if o != nil && o.Ci != nil {
return true
}
return false
}
// SetCi gets a reference to the given int32 and assigns it to the Ci field.
func (o *Cell) SetCi(v int32) {
o.Ci = &v
}
// GetEci returns the Eci field value if set, zero value otherwise.
func (o *Cell) GetEci() int32 {
if o == nil || o.Eci == nil {
var ret int32
return ret
}
return *o.Eci
}
// GetEciOk returns a tuple with the Eci field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetEciOk() (*int32, bool) {
if o == nil || o.Eci == nil {
return nil, false
}
return o.Eci, true
}
// HasEci returns a boolean if a field has been set.
func (o *Cell) HasEci() bool {
if o != nil && o.Eci != nil {
return true
}
return false
}
// SetEci gets a reference to the given int32 and assigns it to the Eci field.
func (o *Cell) SetEci(v int32) {
o.Eci = &v
}
// GetLac returns the Lac field value if set, zero value otherwise.
func (o *Cell) GetLac() int32 {
if o == nil || o.Lac == nil {
var ret int32
return ret
}
return *o.Lac
}
// GetLacOk returns a tuple with the Lac field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetLacOk() (*int32, bool) {
if o == nil || o.Lac == nil {
return nil, false
}
return o.Lac, true
}
// HasLac returns a boolean if a field has been set.
func (o *Cell) HasLac() bool {
if o != nil && o.Lac != nil {
return true
}
return false
}
// SetLac gets a reference to the given int32 and assigns it to the Lac field.
func (o *Cell) SetLac(v int32) {
o.Lac = &v
}
// GetMcc returns the Mcc field value if set, zero value otherwise.
func (o *Cell) GetMcc() int32 {
if o == nil || o.Mcc == nil {
var ret int32
return ret
}
return *o.Mcc
}
// GetMccOk returns a tuple with the Mcc field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetMccOk() (*int32, bool) {
if o == nil || o.Mcc == nil {
return nil, false
}
return o.Mcc, true
}
// HasMcc returns a boolean if a field has been set.
func (o *Cell) HasMcc() bool {
if o != nil && o.Mcc != nil {
return true
}
return false
}
// SetMcc gets a reference to the given int32 and assigns it to the Mcc field.
func (o *Cell) SetMcc(v int32) {
o.Mcc = &v
}
// GetMnc returns the Mnc field value if set, zero value otherwise.
func (o *Cell) GetMnc() int32 {
if o == nil || o.Mnc == nil {
var ret int32
return ret
}
return *o.Mnc
}
// GetMncOk returns a tuple with the Mnc field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetMncOk() (*int32, bool) {
if o == nil || o.Mnc == nil {
return nil, false
}
return o.Mnc, true
}
// HasMnc returns a boolean if a field has been set.
func (o *Cell) HasMnc() bool {
if o != nil && o.Mnc != nil {
return true
}
return false
}
// SetMnc gets a reference to the given int32 and assigns it to the Mnc field.
func (o *Cell) SetMnc(v int32) {
o.Mnc = &v
}
// GetRac returns the Rac field value if set, zero value otherwise.
func (o *Cell) GetRac() int32 {
if o == nil || o.Rac == nil {
var ret int32
return ret
}
return *o.Rac
}
// GetRacOk returns a tuple with the Rac field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetRacOk() (*int32, bool) {
if o == nil || o.Rac == nil {
return nil, false
}
return o.Rac, true
}
// HasRac returns a boolean if a field has been set.
func (o *Cell) HasRac() bool {
if o != nil && o.Rac != nil {
return true
}
return false
}
// SetRac gets a reference to the given int32 and assigns it to the Rac field.
func (o *Cell) SetRac(v int32) {
o.Rac = &v
}
// GetRadioType returns the RadioType field value if set, zero value otherwise.
func (o *Cell) GetRadioType() string {
if o == nil || o.RadioType == nil {
var ret string
return ret
}
return *o.RadioType
}
// GetRadioTypeOk returns a tuple with the RadioType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetRadioTypeOk() (*string, bool) {
if o == nil || o.RadioType == nil {
return nil, false
}
return o.RadioType, true
}
// HasRadioType returns a boolean if a field has been set.
func (o *Cell) HasRadioType() bool {
if o != nil && o.RadioType != nil {
return true
}
return false
}
// SetRadioType gets a reference to the given string and assigns it to the RadioType field.
func (o *Cell) SetRadioType(v string) {
o.RadioType = &v
}
// GetSac returns the Sac field value if set, zero value otherwise.
func (o *Cell) GetSac() int32 {
if o == nil || o.Sac == nil {
var ret int32
return ret
}
return *o.Sac
}
// GetSacOk returns a tuple with the Sac field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetSacOk() (*int32, bool) {
if o == nil || o.Sac == nil {
return nil, false
}
return o.Sac, true
}
// HasSac returns a boolean if a field has been set.
func (o *Cell) HasSac() bool {
if o != nil && o.Sac != nil {
return true
}
return false
}
// SetSac gets a reference to the given int32 and assigns it to the Sac field.
func (o *Cell) SetSac(v int32) {
o.Sac = &v
}
// GetTac returns the Tac field value if set, zero value otherwise.
func (o *Cell) GetTac() int32 {
if o == nil || o.Tac == nil {
var ret int32
return ret
}
return *o.Tac
}
// GetTacOk returns a tuple with the Tac field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Cell) GetTacOk() (*int32, bool) {
if o == nil || o.Tac == nil {
return nil, false
}
return o.Tac, true
}
// HasTac returns a boolean if a field has been set.
func (o *Cell) HasTac() bool {
if o != nil && o.Tac != nil {
return true
}
return false
}
// SetTac gets a reference to the given int32 and assigns it to the Tac field.
func (o *Cell) SetTac(v int32) {
o.Tac = &v
}
func (o Cell) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Ci != nil {
toSerialize["ci"] = o.Ci
}
if o.Eci != nil {
toSerialize["eci"] = o.Eci
}
if o.Lac != nil {
toSerialize["lac"] = o.Lac
}
if o.Mcc != nil {
toSerialize["mcc"] = o.Mcc
}
if o.Mnc != nil {
toSerialize["mnc"] = o.Mnc
}
if o.Rac != nil {
toSerialize["rac"] = o.Rac
}
if o.RadioType != nil {
toSerialize["radioType"] = o.RadioType
}
if o.Sac != nil {
toSerialize["sac"] = o.Sac
}
if o.Tac != nil {
toSerialize["tac"] = o.Tac
}
return json.Marshal(toSerialize)
}
type NullableCell struct {
value *Cell
isSet bool
}
func (v NullableCell) Get() *Cell {
return v.value
}
func (v *NullableCell) Set(val *Cell) {
v.value = val
v.isSet = true
}
func (v NullableCell) IsSet() bool {
return v.isSet
}
func (v *NullableCell) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableCell(val *Cell) *NullableCell {
return &NullableCell{value: val, isSet: true}
}
func (v NullableCell) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableCell) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | openapi/api/model_cell.go | 0.783823 | 0.500061 | model_cell.go | starcoder |
package skyhook
import (
"encoding/binary"
"encoding/json"
"io"
"os"
"strings"
)
type DataType string
const (
ImageType DataType = "image"
VideoType = "video"
DetectionType = "detection"
ShapeType = "shape"
IntType = "int"
FloatsType = "floats"
ImListType = "imlist"
TextType = "text"
StringType = "string"
ArrayType = "array"
FileType = "file"
TableType = "table"
GeoImageType = "geoimage"
GeoJsonType = "geojson"
)
var DataTypes = map[DataType]string{
ImageType: "Image",
VideoType: "Video",
DetectionType: "Detection",
ShapeType: "Shape",
IntType: "Int",
FloatsType: "Floats",
ImListType: "Image List",
TextType: "Text",
StringType: "String",
ArrayType: "Array",
FileType: "File",
TableType: "Table",
GeoImageType: "Geo-Image",
GeoJsonType: "GeoJSON",
}
func EncodeTypes(types []DataType) string {
strs := make([]string, len(types))
for i, t := range types {
strs[i] = string(t)
}
return strings.Join(strs, ",")
}
func DecodeTypes(s string) []DataType {
strs := strings.Split(s, ",")
var types []DataType
for _, str := range strs {
if str == "" {
continue
}
types = append(types, DataType(str))
}
return types
}
// Metadata can mostly be anything but must support Update.
type DataMetadata interface {
// Produce new metadata where fields that are specified by other overwrite
// fields in the current metadata.
Update(other DataMetadata) DataMetadata
}
// Specifies a data type.
type DataSpec interface {
// Decode JSON-encoded metadata into a metadata object.
DecodeMetadata(rawMetadata string) DataMetadata
// Read data that has been written via WriteStream.
ReadStream(r io.Reader) (data interface{}, err error)
// Write data for reading via ReadStream.
WriteStream(data interface{}, w io.Writer) error
// Read data from storage.
// format is the Item.Format which data types can use to describe how the data is stored.
Read(format string, metadata DataMetadata, r io.Reader) (data interface{}, err error)
// Write data to storage.
Write(data interface{}, format string, metadata DataMetadata, w io.Writer) error
// Given some data, return a suggested file extension and format to store it with.
GetDefaultExtAndFormat(data interface{}, metadata DataMetadata) (ext string, format string)
}
type FileDataSpec interface {
DataSpec
// Read data directly from a file.
ReadFile(format string, metadata DataMetadata, fname string) (data interface{}, err error)
// Write data directly to a file.
WriteFile(data interface{}, format string, metadata DataMetadata, fname string) error
}
type MetadataFromFileDataSpec interface {
DataSpec
// Given a filename, which should correspond to an actual file stored on disk,
// returns a suitable format and metadata for reading that file.
GetMetadataFromFile(fname string) (format string, metadata DataMetadata, err error)
}
type ExtFromFormatDataSpec interface {
DataSpec
// Given a format, return the standard extension corresponding to the format.
// If a DataSpec doesn't implement this function, callers should use the format
// as the file extension.
GetExtFromFormat(format string) (ext string)
}
// SequenceDataSpec describes sequence data types.
// These are any data types consisting of a sequence of elements.
// For example, Detections are sequences of []Detection, while videos are sequences
// of images.
type SequenceDataSpec interface {
DataSpec
// Initialize a SequenceReader for reading data from storage.
// The SequenceReader should read the data chunk by chunk.
Reader(format string, metadata DataMetadata, r io.Reader) SequenceReader
// Initialize a SequenceWriter to write chunk by chunk to storage.
Writer(format string, metadata DataMetadata, w io.Writer) SequenceWriter
// Slice operations on the sequence data.
Length(data interface{}) int
Append(data interface{}, more interface{}) interface{}
Slice(data interface{}, i int, j int) interface{}
}
// Sequence data types that want to have special functionality when reading from
// local disk can implement FileReader and FileWriter.
type FileSequenceDataSpec interface {
SequenceDataSpec
FileReader(format string, metadata DataMetadata, fname string) SequenceReader
FileWriter(format string, metadata DataMetadata, fname string) SequenceWriter
}
type RandomAccessDataSpec interface {
SequenceDataSpec
// Initialize a SequenceReader that starts reading at index i, and reads up to index j.
ReadSlice(format string, metadata DataMetadata, fname string, i, j int) SequenceReader
}
type SequenceReader interface {
Read(n int) (interface{}, error)
Close()
}
type SequenceWriter interface {
Write(data interface{}) error
Close() error
}
var DataSpecs = make(map[DataType]DataSpec)
func ReadData(t DataType, format string, metadata DataMetadata, r io.Reader) (data interface{}, err error) {
return DataSpecs[t].Read(format, metadata, r)
}
func DecodeFile(t DataType, format string, metadata DataMetadata, fname string) (data interface{}, err error) {
spec := DataSpecs[t]
if fileSpec, ok := spec.(FileDataSpec); ok {
return fileSpec.ReadFile(format, metadata, fname)
}
file, err := os.Open(fname)
if err != nil {
return nil, err
}
defer file.Close()
return spec.Read(format, metadata, file)
}
// Write x with JSON-encoding to a stream.
// Before writing the JSON-encoded data, we write the length of the data.
func WriteJsonData(x interface{}, w io.Writer) error {
bytes := JsonMarshal(x)
blen := make([]byte, 4)
binary.BigEndian.PutUint32(blen, uint32(len(bytes)))
w.Write(blen)
_, err := w.Write(bytes)
return err
}
// Reads data that was written by WriteJsonData.
func ReadJsonData(r io.Reader, x interface{}) error {
blen := make([]byte, 4)
if _, err := io.ReadFull(r, blen); err != nil {
return err
}
bytes := make([]byte, binary.BigEndian.Uint32(blen))
if _, err := io.ReadFull(r, bytes); err != nil {
return err
}
return json.Unmarshal(bytes, x)
}
type NoMetadata struct{}
func (m NoMetadata) Update(other DataMetadata) DataMetadata { return m }
// Forwards to ExtFromFormatDataSpec.GetExtFromFormat if available.
func GetExtFromFormat(dtype DataType, format string) string {
spec := DataSpecs[dtype]
if extSpec, ok := spec.(ExtFromFormatDataSpec); ok {
return extSpec.GetExtFromFormat(format)
}
return ""
}
// SequenceReader/SequenceWriter that return errors.
type ErrorSequenceReader struct {
Error error
}
func (r ErrorSequenceReader) Read(n int) (interface{}, error) {
return nil, r.Error
}
func (r ErrorSequenceReader) Close() {}
type ErrorSequenceWriter struct {
Error error
}
func (w ErrorSequenceWriter) Write(data interface{}) error {
return w.Error
}
func (w ErrorSequenceWriter) Close() error {
return w.Error
}
// SequenceReader for sequence data that has already been read into memory.
type SliceReader struct {
Data interface{}
Spec SequenceDataSpec
pos int
}
func (r *SliceReader) Read(n int) (interface{}, error) {
remaining := r.Spec.Length(r.Data) - r.pos
if remaining <= 0 {
return nil, io.EOF
}
if remaining < n {
n = remaining
}
data := r.Spec.Slice(r.Data, r.pos, r.pos+n)
r.pos += n
return data, nil
}
func (r *SliceReader) Close() {}
func NewSliceReader(spec SequenceDataSpec, format string, metadata DataMetadata, r io.Reader) SequenceReader {
data, err := spec.Read(format, metadata, r)
if err != nil {
return ErrorSequenceReader{err}
}
return &SliceReader{
Data: data,
Spec: spec,
}
}
// SequenceWriter that stores everything in-memory until Close.
type SliceWriter struct {
Spec SequenceDataSpec
Format string
Metadata DataMetadata
Writer io.Writer
data interface{}
}
func (w *SliceWriter) Write(data interface{}) error {
if w.data == nil {
w.data = data
} else {
w.data = w.Spec.Append(w.data, data)
}
return nil
}
func (w *SliceWriter) Close() error {
return w.Spec.Write(w.data, w.Format, w.Metadata, w.Writer)
}
// SequenceReader that closes an io.ReadCloser on Close.
type ClosingSequenceReader struct {
ReadCloser io.ReadCloser
Reader SequenceReader
}
func (r ClosingSequenceReader) Read(n int) (interface{}, error) {
return r.Reader.Read(n)
}
func (r ClosingSequenceReader) Close() {
r.Reader.Close()
r.ReadCloser.Close()
}
// SequenceWriter that closses an io.WriteCloser on Close.
type ClosingSequenceWriter struct {
WriteCloser io.WriteCloser
Writer SequenceWriter
}
func (w ClosingSequenceWriter) Write(data interface{}) error {
return w.Writer.Write(data)
}
func (w ClosingSequenceWriter) Close() error {
err1 := w.Writer.Close()
err2 := w.WriteCloser.Close()
if err1 != nil {
return err1
} else if err2 != nil {
return err2
}
return nil
} | skyhook/data.go | 0.642208 | 0.415699 | data.go | starcoder |
package record
import (
"strings"
)
type Record struct {
data map[string]interface{}
}
// New creates a pointer to a Record which is ready to have values set.
func New() *Record {
return &Record{make(map[string]interface{})}
}
// Init creates a pointer to a Record whose underlying data is the input.
func Init(m map[string]interface{}) *Record {
r := New()
r.data = m
return r
}
// AsMap returns the underlying map-of-maps data structure a Record
func (r *Record) AsMap() map[string]interface{} {
return r.data
}
// Get the element associated to the path.
func (r *Record) Get(path string) (interface{}, bool) {
p := strings.Split(path, ".")
return get(r.data, p)
}
// Set inserts the input into the Record.
func (r *Record) Set(path string, x interface{}) {
p := strings.Split(path, ".")
set(r.data, p, x)
}
// Filter returns a new Record that only includes the specified paths.
func (r *Record) Filter(paths []string) *Record {
data := filter(r.data, paths)
return Init(data)
}
// FilterMap returns a new composite map filtered to include only values
// (and nested maps) specified by the paths array.
func FilterMap(m map[string]interface{}, paths []string) map[string]interface{} {
return filter(m, paths)
}
// SubRecord produces a new nested-map structure from the input
func filter(m map[string]interface{}, paths []string) map[string]interface{} {
// Create empty composite map.
s := make(map[string]interface{})
for _, pathStr := range paths {
// Don't insert any value into s if the path in m doesn't exist.
p := strings.Split(pathStr, ".")
if v, prs := get(m, p); prs {
set(s, p, v)
}
}
return s
}
func get(m map[string]interface{}, path []string) (interface{}, bool) {
l := len(path)
switch l {
case 0:
return nil, false
case 1:
x, prs := m[path[0]]
return x, prs
default:
m2 := m[path[0]]
switch m2.(type) {
case map[string]interface{}:
return get(m2.(map[string]interface{}), path[1:])
default:
// Invalid key.
return nil, false
}
}
}
func set(m map[string]interface{}, path []string, x interface{}) {
l := len(path)
switch l {
case 0:
return
case 1:
m[path[0]] = x
default:
if _, prs := m[path[0]]; !prs {
m[path[0]] = make(map[string]interface{})
}
set(m[path[0]].(map[string]interface{}), path[1:], x)
}
}
// NOTE the following functions are not being used for anything.
// func getOld(x interface{}, path []string) (interface{}, bool) {
// l := len(path)
// switch l {
// case 0:
// return x, true
// default:
// switch x.(type) {
// case map[string]interface{}:
// key := path[0]
// newMap := x.(map[string]interface{})[key]
// return getOld(newMap, path[1:])
// default:
// // Invalid path.
// return nil, false
// }
// }
// } | record.go | 0.582254 | 0.498169 | record.go | starcoder |
package scraper
import (
"database/sql"
"errors"
"fmt"
"math"
"regexp"
"time"
pq "gitee.com/opengauss/openGauss-connector-go-pq"
"github.com/blang/semver"
"github.com/gogf/gf/util/gconv"
"github.com/prometheus/client_golang/prometheus"
"opengauss_exporter/internal/core/scrape"
"opengauss_exporter/internal/utils"
)
// ColumnUsage should be one of several enum values which describe how a
// queried row is to be converted to a Prometheus metric.
// NOTICE: this part of the code comes from PostgreSQL Exporter
type ColumnUsage int
const (
// DISCARD ignores a column
DISCARD ColumnUsage = iota
// LABEL identifies a column as a label
LABEL ColumnUsage = iota
// COUNTER identifies a column as a counter
COUNTER ColumnUsage = iota
// GAUGE identifies a column as a gauge
GAUGE ColumnUsage = iota
// MAPPEDMETRIC identifies a column as a mapping of text values
MAPPEDMETRIC ColumnUsage = iota
// DURATION identifies a column as a text duration (and converted to milliseconds)
DURATION ColumnUsage = iota
// HISTOGRAM identifies a column as a histogram
HISTOGRAM ColumnUsage = iota
)
// MappingOptions is a copy of ColumnMapping used only for parsing
// NOTICE: this part of the code comes from PostgreSQL Exporter
type MappingOptions struct {
Usage string `yaml:"usage"`
Description string `yaml:"description"`
Mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC
SupportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD).
}
// Mapping represents a set of MappingOptions
// NOTICE: this part of the code comes from PostgreSQL Exporter
type Mapping map[string]MappingOptions
// Regex used to get the "short-version" from the postgres version field.
// NOTICE: this part of the code comes from PostgreSQL Exporter
var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`)
// Parses the version of postgres into the short version string we can use to match behaviors.
// NOTICE: this part of the code comes from PostgreSQL Exporter
func parseVersion(versionString string) (semver.Version, error) {
submatches := versionRegex.FindStringSubmatch(versionString)
if len(submatches) > 1 {
return semver.ParseTolerant(submatches[1])
}
return semver.Version{},
errors.New(fmt.Sprintln("Could not find a postgres version in string:", versionString))
}
// ColumnMapping is the user-friendly representation of a prometheus descriptor map
// NOTICE: this part of the code comes from PostgreSQL Exporter
type ColumnMapping struct {
usage ColumnUsage `yaml:"usage"`
description string `yaml:"description"`
mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC
supportedVersions semver.Range `yaml:"og_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD).
}
// intermediateMetricMap holds the partially loaded metric map parsing.
// This is mainly so we can parse cacheSeconds around.
// NOTICE: this part of the code comes from PostgreSQL Exporter
type intermediateMetricMap struct {
columnMappings map[string]ColumnMapping
master bool
cacheSeconds uint64
}
// MetricMapNamespace groups metric maps under a shared set of labels.
// NOTICE: this part of the code comes from PostgreSQL Exporter
type MetricMapNamespace struct {
labels []string // Label names for this namespace
columnMappings map[string]MetricMap // Column mappings in this namespace
master bool // Call query only for master database
cacheSeconds uint64 // Number of seconds this metric namespace can be cached. 0 disables.
}
// MetricMap stores the prometheus metric description which a given column will
// be mapped to by the collector
// NOTICE: this part of the code comes from PostgreSQL Exporter
type MetricMap struct {
discard bool // Should metric be discarded during mapping?
histogram bool // Should metric be treated as a histogram?
vtype prometheus.ValueType // Prometheus valuetype
desc *prometheus.Desc // Prometheus descriptor
conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64
}
// OverrideQuery 's are run in-place of simple namespace look ups, and provide
// advanced functionality. But they have a tendency to postgres version specific.
// There aren't too many versions, so we simply store customized versions using
// the semver matching we do for columns.
// NOTICE: this part of the code comes from PostgreSQL Exporter
type OverrideQuery struct {
versionRange semver.Range
query string
}
// Metrics map from PostgreSQL Exporter
// NOTICE: this part of the code comes from PostgreSQL Exporter
var builtinMetricMaps = map[string]intermediateMetricMap{
"pg_stat_bgwriter": {
map[string]ColumnMapping{
"checkpoints_timed": {COUNTER, "Number of scheduled checkpoints that have been performed", nil, nil},
"checkpoints_req": {COUNTER, "Number of requested checkpoints that have been performed", nil, nil},
"checkpoint_write_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", nil, nil},
"checkpoint_sync_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", nil, nil},
"buffers_checkpoint": {COUNTER, "Number of buffers written during checkpoints", nil, nil},
"buffers_clean": {COUNTER, "Number of buffers written by the background writer", nil, nil},
"maxwritten_clean": {COUNTER, "Number of times the background writer stopped a cleaning scan because it had written too many buffers", nil, nil},
"buffers_backend": {COUNTER, "Number of buffers written directly by a backend", nil, nil},
"buffers_backend_fsync": {COUNTER, "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", nil, nil},
"buffers_alloc": {COUNTER, "Number of buffers allocated", nil, nil},
"stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil},
},
true,
0,
},
"pg_stat_database": {
map[string]ColumnMapping{
"datid": {LABEL, "OID of a database", nil, nil},
"datname": {LABEL, "Name of this database", nil, nil},
"numbackends": {GAUGE, "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", nil, nil},
"xact_commit": {COUNTER, "Number of transactions in this database that have been committed", nil, nil},
"xact_rollback": {COUNTER, "Number of transactions in this database that have been rolled back", nil, nil},
"blks_read": {COUNTER, "Number of disk blocks read in this database", nil, nil},
"blks_hit": {COUNTER, "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", nil, nil},
"tup_returned": {COUNTER, "Number of rows returned by queries in this database", nil, nil},
"tup_fetched": {COUNTER, "Number of rows fetched by queries in this database", nil, nil},
"tup_inserted": {COUNTER, "Number of rows inserted by queries in this database", nil, nil},
"tup_updated": {COUNTER, "Number of rows updated by queries in this database", nil, nil},
"tup_deleted": {COUNTER, "Number of rows deleted by queries in this database", nil, nil},
"conflicts": {COUNTER, "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", nil, nil},
"temp_files": {COUNTER, "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", nil, nil},
"temp_bytes": {COUNTER, "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", nil, nil},
"deadlocks": {COUNTER, "Number of deadlocks detected in this database", nil, nil},
"blk_read_time": {COUNTER, "Time spent reading data file blocks by backends in this database, in milliseconds", nil, nil},
"blk_write_time": {COUNTER, "Time spent writing data file blocks by backends in this database, in milliseconds", nil, nil},
"stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil},
},
true,
0,
},
"pg_stat_database_conflicts": {
map[string]ColumnMapping{
"datid": {LABEL, "OID of a database", nil, nil},
"datname": {LABEL, "Name of this database", nil, nil},
"confl_tablespace": {COUNTER, "Number of queries in this database that have been canceled due to dropped tablespaces", nil, nil},
"confl_lock": {COUNTER, "Number of queries in this database that have been canceled due to lock timeouts", nil, nil},
"confl_snapshot": {COUNTER, "Number of queries in this database that have been canceled due to old snapshots", nil, nil},
"confl_bufferpin": {COUNTER, "Number of queries in this database that have been canceled due to pinned buffers", nil, nil},
"confl_deadlock": {COUNTER, "Number of queries in this database that have been canceled due to deadlocks", nil, nil},
},
true,
0,
},
"pg_locks": {
map[string]ColumnMapping{
"datname": {LABEL, "Name of this database", nil, nil},
"mode": {LABEL, "Type of Lock", nil, nil},
"count": {GAUGE, "Number of locks", nil, nil},
},
true,
0,
},
"pg_stat_replication": {
map[string]ColumnMapping{
"pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=9.2.0")},
"usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil},
"usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil},
"application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil},
"client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil},
"client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil},
"client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil},
"backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil},
"backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil},
"state": {LABEL, "Current WAL sender state", nil, nil},
"sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange("<10.0.0")},
"write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange("<10.0.0")},
"flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange("<10.0.0")},
"sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil},
"sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil},
"slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, semver.MustParseRange(">=9.2.0")},
"plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil},
"slot_type": {DISCARD, "The slot type - physical or logical", nil, nil},
"datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
"database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
"active": {DISCARD, "True if this slot is currently actively being used", nil, nil},
"active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil},
"xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil},
"catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil},
"restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil},
"pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil},
"pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=9.2.0 <10.0.0")},
"confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil},
},
true,
0,
},
"pg_replication_slots": {
map[string]ColumnMapping{
"slot_name": {LABEL, "Name of the replication slot", nil, nil},
"database": {LABEL, "Name of the database", nil, nil},
"active": {GAUGE, "Flag indicating if the slot is active", nil, nil},
"pg_wal_lsn_diff": {GAUGE, "Replication lag in bytes", nil, nil},
},
true,
0,
},
"pg_stat_activity": {
map[string]ColumnMapping{
"datname": {LABEL, "Name of this database", nil, nil},
"state": {LABEL, "connection state", nil, semver.MustParseRange(">=9.2.0")},
"count": {GAUGE, "number of connections in this state", nil, nil},
"max_tx_duration": {GAUGE, "max duration in seconds any active transaction has been running", nil, nil},
},
true,
0,
},
}
// Overriding queries for namespaces above.
// NOTICE: this part of the code comes from PostgreSQL Exporter
var builtinQueryOverrides = map[string][]OverrideQuery{
"pg_locks": {
{
semver.MustParseRange(">0.0.0"),
`SELECT pg_database.datname,tmp.mode,COALESCE(count,0) as count
FROM
(
VALUES ('accesssharelock'),
('rowsharelock'),
('rowexclusivelock'),
('shareupdateexclusivelock'),
('sharelock'),
('sharerowexclusivelock'),
('exclusivelock'),
('accessexclusivelock'),
('sireadlock')
) AS tmp(mode) CROSS JOIN pg_database
LEFT JOIN
(SELECT database, lower(mode) AS mode,count(*) AS count
FROM pg_locks WHERE database IS NOT NULL
GROUP BY database, lower(mode)
) AS tmp2
ON tmp.mode=tmp2.mode and pg_database.oid = tmp2.database ORDER BY 1`,
},
},
"pg_replication_slots": {
{
semver.MustParseRange(">=9.4.0 <10.0.0"),
`
SELECT slot_name, database, active, pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn)
FROM pg_replication_slots
`,
},
},
"pg_stat_archiver": {
{
semver.MustParseRange(">=0.0.0"),
`
SELECT *,
extract(epoch from now() - last_archived_time) AS last_archive_age
FROM pg_stat_archiver
`,
},
},
"pg_stat_activity": {
// This query only works
{
semver.MustParseRange(">=9.2.0"),
`
SELECT
pg_database.datname,
tmp.state,
COALESCE(count,0) as count,
COALESCE(max_tx_duration,0) as max_tx_duration
FROM
(
VALUES ('active'),
('idle'),
('idle in transaction'),
('idle in transaction (aborted)'),
('fastpath function call'),
('disabled')
) AS tmp(state) CROSS JOIN pg_database
LEFT JOIN
(
SELECT
datname,
state,
count(*) AS count,
MAX(EXTRACT(EPOCH FROM now() - xact_start))::float AS max_tx_duration
FROM pg_stat_activity GROUP BY datname,state) AS tmp2
ON tmp.state = tmp2.state AND pg_database.datname = tmp2.datname
`,
},
},
}
// Turn the MetricMap column mapping into a prometheus descriptor mapping.
// NOTICE: this part of the code comes from PostgreSQL Exporter
func makeDescMap(task *scrape.Task, metricMaps map[string]intermediateMetricMap) map[string]MetricMapNamespace {
var metricMap = make(map[string]MetricMapNamespace)
for namespace, intermediateMappings := range metricMaps {
thisMap := make(map[string]MetricMap)
// Get the variable labels
var variableLabels []string
for columnName, columnMapping := range intermediateMappings.columnMappings {
if columnMapping.usage == LABEL {
variableLabels = append(variableLabels, columnName)
}
}
for columnName, columnMapping := range intermediateMappings.columnMappings {
// Check column version compatibility for the current map
// Force to discard if not compatible.
if columnMapping.supportedVersions != nil {
if !columnMapping.supportedVersions(task.PGVersion) {
// It's very useful to be able to see what columns are being rejected.
utils.GetLogger().Warnw("Column is being forced to discard due to version incompatibility", "column", columnName)
thisMap[columnName] = MetricMap{
discard: true,
conversion: func(_ interface{}) (float64, bool) {
return math.NaN(), true
},
}
continue
}
}
// Determine how to convert the column based on its usage.
// nolint: dupl
switch columnMapping.usage {
case DISCARD, LABEL:
thisMap[columnName] = MetricMap{
discard: true,
conversion: func(_ interface{}) (float64, bool) {
return math.NaN(), true
},
}
case COUNTER:
thisMap[columnName] = MetricMap{
vtype: prometheus.CounterValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, task.ConstLabels),
conversion: func(in interface{}) (float64, bool) {
return gconv.Float64(in), true
},
}
case GAUGE:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, task.ConstLabels),
conversion: func(in interface{}) (float64, bool) {
return gconv.Float64(in), true
},
}
case HISTOGRAM:
thisMap[columnName] = MetricMap{
histogram: true,
vtype: prometheus.UntypedValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, task.ConstLabels),
conversion: func(in interface{}) (float64, bool) {
return gconv.Float64(in), true
},
}
thisMap[columnName+"_bucket"] = MetricMap{
histogram: true,
discard: true,
}
thisMap[columnName+"_sum"] = MetricMap{
histogram: true,
discard: true,
}
thisMap[columnName+"_count"] = MetricMap{
histogram: true,
discard: true,
}
case MAPPEDMETRIC:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, task.ConstLabels),
conversion: func(in interface{}) (float64, bool) {
text, ok := in.(string)
if !ok {
return math.NaN(), false
}
val, ok := columnMapping.mapping[text]
if !ok {
return math.NaN(), false
}
return val, true
},
}
case DURATION:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, task.ConstLabels),
conversion: func(in interface{}) (float64, bool) {
var durationString string
switch t := in.(type) {
case []byte:
durationString = string(t)
case string:
durationString = t
default:
utils.GetLogger().Warn("Duration conversion metric was not a string")
return math.NaN(), false
}
if durationString == "-1" {
return math.NaN(), false
}
d, err := time.ParseDuration(durationString)
if err != nil {
utils.GetLogger().Warnw("Failed converting result to metric", "column", columnName, "in", in, "err", err)
return math.NaN(), false
}
return float64(d / time.Millisecond), true
},
}
}
}
metricMap[namespace] = MetricMapNamespace{variableLabels, thisMap, intermediateMappings.master, intermediateMappings.cacheSeconds}
}
return metricMap
}
// Convert the query override file to the version-specific query override file for the exporter.
// NOTICE: this part of the code comes from PostgreSQL Exporter
func makeQueryOverrideMap(task *scrape.Task, queryOverrides map[string][]OverrideQuery) map[string]string {
resultMap := make(map[string]string)
for name, overrideDef := range queryOverrides {
// Find a matching semver. We make it an error to have overlapping
// ranges at test-time, so only 1 should ever match.
matched := false
for _, queryDef := range overrideDef {
if queryDef.versionRange(task.PGVersion) {
resultMap[name] = queryDef.query
matched = true
break
}
}
if !matched {
utils.GetLogger().Warnw("No matched query override, disabling metric space",
"server", task.Fingerprint,
"name", name,
)
resultMap[name] = ""
}
}
return resultMap
}
type BuiltinSQLScraper struct{}
func NewBuiltinSQLScraper() *BuiltinSQLScraper {
return &BuiltinSQLScraper{}
}
func (b BuiltinSQLScraper) Scrape(task *scrape.Task) ([]prometheus.Metric, []error, error) {
allNonfatalErrors := make([]error, 0)
allMetrics := make([]prometheus.Metric, 0)
queryOverrides := makeQueryOverrideMap(task, builtinQueryOverrides)
for namespace, mapping := range makeDescMap(task, builtinMetricMaps) {
utils.GetLogger().Infow("Querying namespace",
"server", task.Fingerprint,
"namespace", namespace,
)
if mapping.master && !task.Config().Master {
utils.GetLogger().Infow("Query skipped",
"server", task.Fingerprint,
"namespace", namespace,
)
continue
}
metrics, nonfatalErrors, err := queryNamespaceMapping(task, namespace, mapping, queryOverrides)
// Serious error - a namespace disappeared
if err != nil {
utils.GetLogger().Error("err", err)
return nil, nil, err
}
// Non-serious errors - likely version or parsing problems.
if len(nonfatalErrors) > 0 {
for _, err := range nonfatalErrors {
allNonfatalErrors = append(allNonfatalErrors, err)
utils.GetLogger().Error("err", err)
}
}
allMetrics = append(allMetrics, metrics...)
}
return allMetrics, allNonfatalErrors, nil
}
// Query within a namespace mapping and emit metrics. Returns fatal errors if
// the scrape fails, and a slice of errors if they were non-fatal.
func queryNamespaceMapping(task *scrape.Task, namespace string, mapping MetricMapNamespace, queryOverrides map[string]string) ([]prometheus.Metric, []error, error) {
// Check for a query override for this namespace
query, found := queryOverrides[namespace]
// Was this query disabled (i.e. nothing sensible can be queried on cu
// version of PostgreSQL?
if query == "" && found {
// Return success (no pertinent data)
return []prometheus.Metric{}, []error{}, nil
}
// Don't fail on a bad scrape of one metric
var rows *sql.Rows
var err error
if !found {
rows, err = task.DB.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas
} else {
rows, err = task.DB.Query(query)
}
if err != nil {
return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on datasource %s: %s %v", task.Fingerprint, namespace, err)
}
defer rows.Close()
var columnNames []string
columnNames, err = rows.Columns()
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("error retrieving column list for: ", namespace, err))
}
// Make a lookup map for the column indices
var columnIdx = make(map[string]int, len(columnNames))
for i, n := range columnNames {
columnIdx[n] = i
}
var columnData = make([]interface{}, len(columnNames))
var scanArgs = make([]interface{}, len(columnNames))
for i := range columnData {
scanArgs[i] = &columnData[i]
}
nonfatalErrors := make([]error, 0)
metrics := make([]prometheus.Metric, 0)
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
// Get the label values for this row.
labels := make([]string, len(mapping.labels))
for idx, label := range mapping.labels {
labels[idx] = gconv.String(columnData[columnIdx[label]])
}
// Loop over column names, and match to scan data. Unknown columns
// will be filled with an untyped metric number *if* they can be
// converted to float64s. NULLs are allowed and treated as NaN.
for idx, columnName := range columnNames {
var metric prometheus.Metric
if metricMapping, ok := mapping.columnMappings[columnName]; ok {
// Is this a metricy metric?
if metricMapping.discard {
continue
}
if metricMapping.histogram {
var keys []float64
err = pq.Array(&keys).Scan(columnData[idx])
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving", columnName, "buckets:", namespace, err))
}
var values []int64
valuesIdx, ok := columnIdx[columnName+"_bucket"]
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_bucket")))
continue
}
err = pq.Array(&values).Scan(columnData[valuesIdx])
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving", columnName, "bucket values:", namespace, err))
}
buckets := make(map[float64]uint64, len(keys))
for i, key := range keys {
if i >= len(values) {
break
}
buckets[key] = uint64(values[i])
}
idx, ok = columnIdx[columnName+"_sum"]
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_sum")))
continue
}
sum, ok := gconv.Float64(columnData[idx]), true
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName+"_sum", columnData[idx])))
continue
}
idx, ok = columnIdx[columnName+"_count"]
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_count")))
continue
}
count, ok := gconv.Uint64(columnData[idx]), true
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName+"_count", columnData[idx])))
continue
}
metric = prometheus.MustNewConstHistogram(
metricMapping.desc,
count, sum, buckets,
labels...,
)
} else {
value := gconv.Float64(columnData[idx])
// Generate the metric
metric = prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
}
} else {
// Unknown metric. Report as untyped if scan to float64 works, else note an error too.
metricLabel := fmt.Sprintf("%s_%s", namespace, columnName)
desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, task.ConstLabels)
// Its not an error to fail here, since the values are
// unexpected anyway.
value := gconv.Float64(columnData[idx])
metric = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...)
}
metrics = append(metrics, metric)
}
}
return metrics, nonfatalErrors, nil
} | internal/scraper/postgresql_exporter.go | 0.6508 | 0.434221 | postgresql_exporter.go | starcoder |
package vgmath
import (
"fmt"
"math"
)
// AxisX x轴
var AxisX *Vector3 = NewVector3(1, 0, 0)
// AxisY y轴
var AxisY *Vector3 = NewVector3(0, 1, 0)
// AxisZ z轴
var AxisZ *Vector3 = NewVector3(0, 0, 1)
// Vector3 向量
type Vector3 struct {
x, y, z float64
}
// NewVector3 新建向量
func NewVector3(x, y, z float64) *Vector3 {
pVec := new(Vector3)
pVec.x, pVec.y, pVec.z = x, y, z
return pVec
}
// Add 向量相加
func (v *Vector3) Add(vr *Vector3) *Vector3 {
v.x += vr.x
v.y += vr.y
v.z += vr.z
return v
}
// Sub 向量相减
func (v *Vector3) Sub(vr *Vector3) *Vector3 {
v.x -= vr.x
v.y -= vr.y
v.z -= vr.z
return v
}
// Dot 向量内积
func (v *Vector3) Dot(vr *Vector3) float64 {
return v.x*vr.x + v.y*vr.y + v.z*vr.z
}
// Cross 向量外积
func (v *Vector3) Cross(vr *Vector3) *Vector3 {
x := v.y*vr.z - vr.y*v.z
y := vr.x*v.z - v.x*vr.z
z := v.x*vr.y - vr.x*v.y
v.x, v.y, v.z = x, y, z
return v
}
// MulMatrix 向量乘以矩阵
func (v *Vector3) MulMatrix(mat *Matrix33) *Vector3 {
x := v.x*mat.a[0] + v.y*mat.a[1] + v.z*mat.a[2]
y := v.x*mat.a[3] + v.y*mat.a[4] + v.z*mat.a[5]
z := v.x*mat.a[6] + v.y*mat.a[7] + v.z*mat.a[8]
v.x, v.y, v.z = x, y, z
return v
}
// MulNumber 向量乘以数字
func (v *Vector3) MulNumber(n float64) *Vector3 {
v.x *= n
v.y *= n
v.z *= n
return v
}
// DivNumber 向量除以数字
func (v *Vector3) DivNumber(n float64) *Vector3 {
rn := 1 / n
return v.MulNumber(rn)
}
// Negative 取反
func (v *Vector3) Negative() *Vector3 {
v.x, v.y, v.z = -v.x, -v.y, -v.z
return v
}
// ProjectLength 指定方向上的投影长度
func (v *Vector3) ProjectLength(dir *Vector3) float64 {
nd := dir.Clone()
nd.Normalize()
return v.Dot(nd)
}
// Project 投影向量
func (v *Vector3) Project(dir *Vector3) *Vector3 {
nd := dir.Clone()
nd.Normalize()
pLen := v.Dot(nd)
nd.MulNumber(pLen)
v.x, v.y, v.z = nd.x, nd.y, nd.z
return v
}
// Normalize 标准化
func (v *Vector3) Normalize() *Vector3 {
length := v.Length()
if math.Abs(length-1) > 0.000000001 {
v.DivNumber(length)
}
return v
}
// Clone 复制
func (v *Vector3) Clone() *Vector3 {
return NewVector3(v.x, v.y, v.z)
}
// LengthSquare 长度平方
func (v *Vector3) LengthSquare() float64 {
return v.x*v.x + v.y*v.y + v.z*v.z
}
// Length 长度
func (v *Vector3) Length() float64 {
return math.Sqrt(v.LengthSquare())
}
// ToString 转字符串
func (v *Vector3) ToString() string {
return fmt.Sprintf("[%f, %f, %f]", v.x, v.y, v.z)
} | util/vgmath/vector.go | 0.517815 | 0.584064 | vector.go | starcoder |
package iso20022
// Specifies security rate details.
type CorporateActionRate77 struct {
// Quantity of additional intermediate securities/new equities awarded for a given quantity of securities derived from subscription.
AdditionalQuantityForSubscribedResultantSecurities *RatioFormat23Choice `xml:"AddtlQtyForSbcbdRsltntScties,omitempty"`
// Quantity of additional securities for a given quantity of underlying securities where underlying securities are not exchanged or debited, for example, 1 for 1: 1 new equity credited for every 1 underlying equity = 2 resulting equities.
AdditionalQuantityForExistingSecurities *RatioFormat23Choice `xml:"AddtlQtyForExstgScties,omitempty"`
// Quantity of new securities for a given quantity of underlying securities, where the underlying securities will be exchanged or debited, for example, 2 for 1: 2 new equities credited for every 1 underlying equity debited = 2 resulting equities.
NewToOld *RatioFormat24Choice `xml:"NewToOd,omitempty"`
// Rate used to determine the cash consideration split across outturn settlement transactions that are the result of a transformation of the parent transaction.
TransformationRate *PercentageRate `xml:"TrfrmatnRate,omitempty"`
// Rate used to calculate the amount of the charges/fees that cannot be categorised.
ChargesFees *RateAndAmountFormat46Choice `xml:"ChrgsFees,omitempty"`
// Percentage of fiscal tax to apply.
FiscalStamp *RateFormat3Choice `xml:"FsclStmp,omitempty"`
// Rate applicable to the event announced, for example, redemption rate for a redemption event.
ApplicableRate *RateFormat3Choice `xml:"AplblRate,omitempty"`
// Amount of money per equity allocated as the result of a tax credit.
TaxCreditRate []*TaxCreditRateFormat10Choice `xml:"TaxCdtRate,omitempty"`
// Rate of financial transaction tax.
FinancialTransactionTaxRate *RateFormat3Choice `xml:"FinTxTaxRate,omitempty"`
}
func (c *CorporateActionRate77) AddAdditionalQuantityForSubscribedResultantSecurities() *RatioFormat23Choice {
c.AdditionalQuantityForSubscribedResultantSecurities = new(RatioFormat23Choice)
return c.AdditionalQuantityForSubscribedResultantSecurities
}
func (c *CorporateActionRate77) AddAdditionalQuantityForExistingSecurities() *RatioFormat23Choice {
c.AdditionalQuantityForExistingSecurities = new(RatioFormat23Choice)
return c.AdditionalQuantityForExistingSecurities
}
func (c *CorporateActionRate77) AddNewToOld() *RatioFormat24Choice {
c.NewToOld = new(RatioFormat24Choice)
return c.NewToOld
}
func (c *CorporateActionRate77) SetTransformationRate(value string) {
c.TransformationRate = (*PercentageRate)(&value)
}
func (c *CorporateActionRate77) AddChargesFees() *RateAndAmountFormat46Choice {
c.ChargesFees = new(RateAndAmountFormat46Choice)
return c.ChargesFees
}
func (c *CorporateActionRate77) AddFiscalStamp() *RateFormat3Choice {
c.FiscalStamp = new(RateFormat3Choice)
return c.FiscalStamp
}
func (c *CorporateActionRate77) AddApplicableRate() *RateFormat3Choice {
c.ApplicableRate = new(RateFormat3Choice)
return c.ApplicableRate
}
func (c *CorporateActionRate77) AddTaxCreditRate() *TaxCreditRateFormat10Choice {
newValue := new (TaxCreditRateFormat10Choice)
c.TaxCreditRate = append(c.TaxCreditRate, newValue)
return newValue
}
func (c *CorporateActionRate77) AddFinancialTransactionTaxRate() *RateFormat3Choice {
c.FinancialTransactionTaxRate = new(RateFormat3Choice)
return c.FinancialTransactionTaxRate
} | CorporateActionRate77.go | 0.880579 | 0.532911 | CorporateActionRate77.go | starcoder |
package render
import (
"image"
"image/color"
"math"
"github.com/oakmound/oak/v4/alg/floatgeom"
"github.com/oakmound/oak/v4/alg/span"
)
// A Polygon is a renderable that is represented by a set of in order points
// on a plane.
type Polygon struct {
*Sprite
floatgeom.Polygon2
}
// NewPointsPolygon is a helper function for `NewPolygon(floatgeom.NewPolygon2(p1, p2, p3, pn...))`
func NewPointsPolygon(p1, p2, p3 floatgeom.Point2, pn ...floatgeom.Point2) *Polygon {
return NewPolygon(floatgeom.NewPolygon2(p1, p2, p3, pn...))
}
// NewPolygon constructs a renderable polygon. It will display nothing until
// Fill or FillInverse is called on it.
func NewPolygon(poly floatgeom.Polygon2) *Polygon {
return &Polygon{
Sprite: NewSprite(poly.Bounding.Min.X(), poly.Bounding.Min.Y(),
image.NewRGBA(image.Rect(0, 0, int(poly.Bounding.W()), int(poly.Bounding.H())))),
Polygon2: poly,
}
}
// GetOutline returns a set of lines of the given color along this polygon's outline
func (pg *Polygon) GetOutline(c color.Color) *CompositeM {
return pg.GetColoredOutline(IdentityColorer(c), 0)
}
// GetThickOutline returns a set of lines of the given color along this polygon's outline,
// at the given thickness
func (pg *Polygon) GetThickOutline(c color.Color, thickness int) *CompositeM {
return pg.GetColoredOutline(IdentityColorer(c), thickness)
}
// GetGradientOutline returns a set of lines of the given color along this polygon's outline,
// at the given thickness, ranging from c1 to c2 in color
func (pg *Polygon) GetGradientOutline(c1, c2 color.Color, thickness int) *CompositeM {
return pg.GetColoredOutline(span.NewLinearColor(c1, c2).Percentile, thickness)
}
// GetColoredOutline returns a set of lines of the given color along this polygon's outline
func (pg *Polygon) GetColoredOutline(colorer Colorer, thickness int) *CompositeM {
sl := NewCompositeM()
j := len(pg.Points) - 1
for i, p2 := range pg.Points {
p1 := pg.Points[j]
MinX := math.Min(p1.X(), p2.X())
MinY := math.Min(p1.Y(), p2.Y())
sl.AppendOffset(
NewColoredLine(p1.X(), p1.Y(), p2.X(), p2.Y(), colorer, thickness),
floatgeom.Point2{MinX, MinY})
j = i
}
return sl
}
// FillInverse colors this polygon's exterior the given color
func (pg *Polygon) FillInverse(c color.Color) {
bounds := pg.r.Bounds()
rect := image.Rect(0, 0, bounds.Max.X, bounds.Max.Y)
rgba := image.NewRGBA(rect)
for x := 0; x < bounds.Max.X; x++ {
for y := 0; y < bounds.Max.Y; y++ {
if !pg.ConvexContains(float64(x), float64(y)) {
rgba.Set(x, y, c)
}
}
}
pg.r = rgba
}
// Fill fills the inside of this polygon with the input color
func (pg *Polygon) Fill(c color.Color) {
// Reset the rgba of the polygon
bounds := pg.r.Bounds()
rect := image.Rect(0, 0, bounds.Max.X, bounds.Max.Y)
rgba := image.NewRGBA(rect)
minx := pg.Bounding.Min.X()
miny := pg.Bounding.Min.Y()
for x := 0; x < bounds.Max.X; x++ {
for y := 0; y < bounds.Max.Y; y++ {
if pg.Contains(float64(x)+minx, float64(y)+miny) {
rgba.Set(x, y, c)
}
}
}
pg.r = rgba
} | render/polygon.go | 0.843975 | 0.625667 | polygon.go | starcoder |
package container
import (
"fmt"
"io"
"unsafe"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/stl"
)
func NewStrVector[T any](opts ...*Options) *strVector[T] {
var capacity int
var alloc stl.MemAllocator
lenOpt := new(Options)
offOpt := new(Options)
dataOpt := new(Options)
if len(opts) > 0 {
opt := opts[0]
capacity = opt.Capacity
alloc = opt.Allocator
if opt.HasData() {
lenOpt.Data = new(stl.Bytes)
offOpt.Data = new(stl.Bytes)
dataOpt.Data = new(stl.Bytes)
data := opt.Data.Length
lenOpt.Data.Data = unsafe.Slice((*byte)(unsafe.Pointer(&data[0])),
len(data)*stl.Sizeof[uint32]())
data = opt.Data.Offset
offOpt.Data.Data = unsafe.Slice((*byte)(unsafe.Pointer(&data[0])),
len(data)*stl.Sizeof[uint32]())
capacity = len(data)
dataOpt.Data.Data = opt.Data.Data
}
}
if alloc == nil {
alloc = stl.DefaultAllocator
}
if capacity == 0 {
capacity = 4
}
lenOpt.Capacity = capacity
lenOpt.Allocator = alloc
offOpt.Capacity = capacity
offOpt.Allocator = alloc
dataOpt.Capacity = capacity
dataOpt.Allocator = alloc
offsets := NewStdVector[uint32](offOpt)
lengths := NewStdVector[uint32](lenOpt)
data := NewStdVector[byte](dataOpt)
return &strVector[T]{
offsets: offsets,
lengths: lengths,
data: data,
}
}
func (vec *strVector[T]) Close() {
if vec.offsets != nil {
vec.offsets.Close()
}
if vec.lengths != nil {
vec.lengths.Close()
}
if vec.data != nil {
vec.data.Close()
}
}
func (vec *strVector[T]) GetAllocator() stl.MemAllocator {
return vec.offsets.GetAllocator()
}
func (vec *strVector[T]) Length() int { return vec.lengths.Length() }
func (vec *strVector[T]) Capacity() int { return vec.lengths.Capacity() }
func (vec *strVector[T]) Allocated() int {
return vec.lengths.Allocated() + vec.offsets.Allocated() + vec.data.Allocated()
}
func (vec *strVector[T]) IsView() bool { return false }
func (vec *strVector[T]) Data() []byte { return vec.data.Data() }
func (vec *strVector[T]) Slice() []T { panic("not support") }
func (vec *strVector[T]) SliceWindow(_, _ int) []T { panic("not support") }
func (vec *strVector[T]) DataWindow(offset, length int) []byte {
start := vec.offsets.Get(offset)
eoff := vec.offsets.Get(offset + length - 1)
elen := vec.lengths.Get(offset + length - 1)
return vec.data.Data()[start:(eoff + elen)]
}
func (vec *strVector[T]) Desc() string {
s := fmt.Sprintf("StrVector:Len=%d[Rows];Cap=%d[Rows];Allocted:%d[Bytes]",
vec.Length(),
vec.Capacity(),
vec.Allocated())
return s
}
func (vec *strVector[T]) String() string {
s := vec.Desc()
end := 100
if vec.Length() < end {
end = vec.Length()
}
if end == 0 {
return s
}
data := ""
for i := 0; i < end; i++ {
data = fmt.Sprintf("%s %v", data, vec.Get(i))
}
s = fmt.Sprintf("%s %s", s, data)
return s
}
func (vec *strVector[T]) Append(v T) {
val := any(v).([]byte)
length := len(val)
offset := vec.data.Length()
vec.lengths.Append(uint32(length))
vec.offsets.Append(uint32(offset))
vec.data.AppendMany(val...)
}
func (vec *strVector[T]) Get(i int) T {
s := vec.offsets.Get(i)
l := vec.lengths.Get(i)
return any(vec.data.Slice()[s : s+l]).(T)
}
func (vec *strVector[T]) Update(i int, v T) {
val := any(v).([]byte)
nlen := len(val)
olen := vec.lengths.Get(i)
offset := vec.offsets.Get(i)
if int(olen) == nlen {
copy(vec.data.Slice()[offset:], val)
return
}
tail := vec.data.Slice()[olen+offset:]
val = append(val, tail...)
vec.data.RangeDelete(int(offset), vec.data.Length()-int(offset))
vec.data.AppendMany(val...)
vec.lengths.Update(i, uint32(nlen))
delta := uint32(nlen) - olen
for j := i + 1; j < vec.Length(); j++ {
old := vec.offsets.Get(j)
vec.offsets.Update(j, old+delta)
}
}
func (vec *strVector[T]) Delete(i int) (deleted T) {
s := vec.offsets.Get(i)
l := vec.lengths.Get(i)
deleted = any(vec.data.Slice()[s : s+l]).(T)
vec.data.RangeDelete(int(s), int(l))
vec.offsets.Delete(i)
vec.lengths.Delete(i)
for j := i; j < vec.Length(); j++ {
old := vec.offsets.Get(j)
vec.offsets.Update(j, old-l)
}
return
}
func (vec *strVector[T]) RangeDelete(offset, length int) {
for i := offset + length - 1; i >= offset; i-- {
vec.Delete(i)
}
}
func (vec *strVector[T]) AppendMany(vals ...T) {
for _, val := range vals {
vec.Append(val)
}
}
func (vec *strVector[T]) Clone(offset, length int) stl.Vector[T] {
opts := &Options{
Capacity: length,
Allocator: vec.GetAllocator(),
}
cloned := NewStrVector[T](opts)
if offset == 0 {
cloned.offsets.AppendMany(vec.offsets.Slice()[:length]...)
} else {
delta := vec.offsets.Get(offset)
slice := vec.offsets.Slice()[offset : offset+length]
for _, off := range slice {
cloned.offsets.Append(off - delta)
}
}
cloned.lengths.AppendMany(vec.lengths.Slice()[offset : offset+length]...)
start := vec.offsets.Get(offset)
eoff := vec.offsets.Get(offset + length - 1)
elen := vec.lengths.Get(offset + length - 1)
cloned.data.AppendMany(vec.data.Slice()[start : eoff+elen]...)
return cloned
}
func (vec *strVector[T]) Reset() {
vec.data.Reset()
vec.offsets.Reset()
vec.lengths.Reset()
}
func (vec *strVector[T]) Bytes() *stl.Bytes {
bs := new(stl.Bytes)
bs.Data = vec.data.Slice()
bs.Offset = vec.offsets.Slice()
bs.Length = vec.lengths.Slice()
return bs
}
func (vec *strVector[T]) ReadBytes(bs *stl.Bytes, share bool) {
if bs == nil {
return
}
bs1 := stl.NewBytes()
bs1.Data = bs.Data
vec.data.ReadBytes(bs1, share)
bs1.Data = bs.LengthBuf()
vec.lengths.ReadBytes(bs1, share)
bs1.Data = bs.OffsetBuf()
vec.offsets.ReadBytes(bs1, share)
}
func (vec *strVector[T]) ReadFrom(r io.Reader) (n int64, err error) {
var nr int64
if nr, err = vec.data.ReadFrom(r); err != nil {
return
}
n += nr
if nr, err = vec.offsets.ReadFrom(r); err != nil {
return
}
n += nr
if nr, err = vec.lengths.ReadFrom(r); err != nil {
return
}
n += nr
return
}
func (vec *strVector[T]) WriteTo(w io.Writer) (n int64, err error) {
var nr int64
if nr, err = vec.data.WriteTo(w); err != nil {
return
}
n += nr
if nr, err = vec.offsets.WriteTo(w); err != nil {
return
}
n += nr
if nr, err = vec.lengths.WriteTo(w); err != nil {
return
}
n += nr
return
} | pkg/vm/engine/tae/stl/container/strvec.go | 0.597256 | 0.407923 | strvec.go | starcoder |
// Command stats implements the stats Quick Start example from:
// https://opencensus.io/quickstart/go/metrics/
package main
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"log"
"os"
"time"
"net/http"
"go.opencensus.io/examples/exporter"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.opencensus.io/zpages"
)
const (
metricsLogFile = "/tmp/metrics.log"
)
// Measures for the stats quickstart.
var (
// The latency in milliseconds
mLatencyMs = stats.Float64("repl/latency", "The latency in milliseconds per REPL loop", stats.UnitMilliseconds)
// Counts the number of lines read in from standard input
mLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", stats.UnitNone)
// Encounters the number of non EOF(end-of-file) errors.
mErrors = stats.Int64("repl/errors", "The number of errors encountered", stats.UnitNone)
// Counts/groups the lengths of lines read in.
mLineLengths = stats.Int64("repl/line_lengths", "The distribution of line lengths", stats.UnitBytes)
)
// TagKeys for the stats quickstart.
var (
keyMethod = tag.MustNewKey("method")
)
// Views for the stats quickstart.
var (
latencyView = &view.View{
Name: "demo/latency",
Measure: mLatencyMs,
Description: "The distribution of the latencies",
// Latency in buckets:
// [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s]
Aggregation: view.Distribution(25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000),
TagKeys: []tag.Key{keyMethod}}
lineCountView = &view.View{
Name: "demo/lines_in",
Measure: mLinesIn,
Description: "The number of lines from standard input",
Aggregation: view.Count(),
}
errorCountView = &view.View{
Name: "demo/errors",
Measure: mErrors,
Description: "The number of errors encountered",
Aggregation: view.Count(),
}
lineLengthView = &view.View{
Name: "demo/line_lengths",
Description: "Groups the lengths of keys in buckets",
Measure: mLineLengths,
// Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000]
Aggregation: view.Distribution(5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000),
}
)
func main() {
zpages.Handle(nil, "/debug")
go http.ListenAndServe("localhost:8080", nil)
// Using log exporter here to export metrics but you can choose any supported exporter.
exporter, err := exporter.NewLogExporter(exporter.Options{
ReportingInterval: time.Duration(10 * time.Second),
MetricsLogFile: metricsLogFile,
})
if err != nil {
log.Fatalf("Error creating log exporter: %v", err)
}
exporter.Start()
defer exporter.Stop()
defer exporter.Close()
// Register the views
if err := view.Register(latencyView, lineCountView, errorCountView, lineLengthView); err != nil {
log.Fatalf("Failed to register views: %v", err)
}
// In a REPL:
// 1. Read input
// 2. process input
br := bufio.NewReader(os.Stdin)
// repl is the read, evaluate, print, loop
for {
if err := readEvaluateProcess(br); err != nil {
if err == io.EOF {
return
}
log.Fatal(err)
}
}
}
// readEvaluateProcess reads a line from the input reader and
// then processes it. It returns an error if any was encountered.
func readEvaluateProcess(br *bufio.Reader) error {
ctx, err := tag.New(context.Background(), tag.Insert(keyMethod, "repl"))
if err != nil {
return err
}
fmt.Printf("> ")
line, _, err := br.ReadLine()
if err != nil {
if err != io.EOF {
stats.Record(ctx, mErrors.M(1))
}
return err
}
out, err := processLine(ctx, line)
if err != nil {
stats.Record(ctx, mErrors.M(1))
return err
}
fmt.Printf("< %s\n\n", out)
return nil
}
// processLine takes in a line of text and
// transforms it. Currently it just capitalizes it.
func processLine(ctx context.Context, in []byte) (out []byte, err error) {
startTime := time.Now()
defer func() {
ms := float64(time.Since(startTime).Nanoseconds()) / 1e6
stats.Record(ctx, mLinesIn.M(1), mLatencyMs.M(ms), mLineLengths.M(int64(len(in))))
}()
return bytes.ToUpper(in), nil
} | vendor/go.opencensus.io/examples/quickstart/stats.go | 0.630116 | 0.527682 | stats.go | starcoder |
package v1
const (
// AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339).
AnnotationCreated = "org.opencontainers.image.created"
// AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string).
AnnotationAuthors = "org.opencontainers.image.authors"
// AnnotationURL is the annotation key for the URL to find more information on the image.
AnnotationURL = "org.opencontainers.image.url"
// AnnotationDocumentation is the annotation key for the URL to get documentation on the image.
AnnotationDocumentation = "org.opencontainers.image.documentation"
// AnnotationSource is the annotation key for the URL to get source code for building the image.
AnnotationSource = "org.opencontainers.image.source"
// AnnotationVersion is the annotation key for the version of the packaged software.
// The version MAY match a label or tag in the source code repository.
// The version MAY be Semantic versioning-compatible.
AnnotationVersion = "org.opencontainers.image.version"
// AnnotationRevision is the annotation key for the source control revision identifier for the packaged software.
AnnotationRevision = "org.opencontainers.image.revision"
// AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual.
AnnotationVendor = "org.opencontainers.image.vendor"
// AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression.
AnnotationLicenses = "org.opencontainers.image.licenses"
// AnnotationRefName is the annotation key for the name of the reference for a target.
// SHOULD only be considered valid when on descriptors on `index.json` within image layout.
AnnotationRefName = "org.opencontainers.image.ref.name"
// AnnotationTitle is the annotation key for the human-readable title of the image.
AnnotationTitle = "org.opencontainers.image.title"
// AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image.
AnnotationDescription = "org.opencontainers.image.description"
) | vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go | 0.660501 | 0.436802 | annotations.go | starcoder |
package utils
import (
"errors"
"math/big"
"github.com/daoleno/uniswap-sdk-core/entities"
"github.com/daoleno/uniswapv3-sdk/constants"
)
var (
ErrSqrtPriceLessThanZero = errors.New("sqrt price less than zero")
ErrLiquidityLessThanZero = errors.New("liquidity less than zero")
ErrInvariant = errors.New("invariant violation")
)
var MaxUint160 = new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(160), nil), constants.One)
func multiplyIn256(x, y *big.Int) *big.Int {
product := new(big.Int).Mul(x, y)
return new(big.Int).And(product, entities.MaxUint256)
}
func addIn256(x, y *big.Int) *big.Int {
sum := new(big.Int).Add(x, y)
return new(big.Int).And(sum, entities.MaxUint256)
}
func GetAmount0Delta(sqrtRatioAX96, sqrtRatioBX96, liquidity *big.Int, roundUp bool) *big.Int {
if sqrtRatioAX96.Cmp(sqrtRatioBX96) >= 0 {
sqrtRatioAX96, sqrtRatioBX96 = sqrtRatioBX96, sqrtRatioAX96
}
numerator1 := new(big.Int).Lsh(liquidity, 96)
numerator2 := new(big.Int).Sub(sqrtRatioBX96, sqrtRatioAX96)
if roundUp {
return MulDivRoundingUp(MulDivRoundingUp(numerator1, numerator2, sqrtRatioBX96), constants.One, sqrtRatioAX96)
}
return new(big.Int).Div(new(big.Int).Div(new(big.Int).Mul(numerator1, numerator2), sqrtRatioBX96), sqrtRatioAX96)
}
func GetAmount1Delta(sqrtRatioAX96, sqrtRatioBX96, liquidity *big.Int, roundUp bool) *big.Int {
if sqrtRatioAX96.Cmp(sqrtRatioBX96) >= 0 {
sqrtRatioAX96, sqrtRatioBX96 = sqrtRatioBX96, sqrtRatioAX96
}
if roundUp {
return MulDivRoundingUp(liquidity, new(big.Int).Sub(sqrtRatioBX96, sqrtRatioAX96), constants.Q96)
}
return new(big.Int).Div(new(big.Int).Mul(liquidity, new(big.Int).Sub(sqrtRatioBX96, sqrtRatioAX96)), constants.Q96)
}
func GetNextSqrtPriceFromInput(sqrtPX96, liquidity, amountIn *big.Int, zeroForOne bool) (*big.Int, error) {
if sqrtPX96.Cmp(constants.Zero) <= 0 {
return nil, ErrSqrtPriceLessThanZero
}
if liquidity.Cmp(constants.Zero) <= 0 {
return nil, ErrLiquidityLessThanZero
}
if zeroForOne {
return getNextSqrtPriceFromAmount0RoundingUp(sqrtPX96, liquidity, amountIn, true)
}
return getNextSqrtPriceFromAmount1RoundingDown(sqrtPX96, liquidity, amountIn, true)
}
func GetNextSqrtPriceFromOutput(sqrtPX96, liquidity, amountOut *big.Int, zeroForOne bool) (*big.Int, error) {
if sqrtPX96.Cmp(constants.Zero) <= 0 {
return nil, ErrSqrtPriceLessThanZero
}
if liquidity.Cmp(constants.Zero) <= 0 {
return nil, ErrLiquidityLessThanZero
}
if zeroForOne {
return getNextSqrtPriceFromAmount1RoundingDown(sqrtPX96, liquidity, amountOut, false)
}
return getNextSqrtPriceFromAmount0RoundingUp(sqrtPX96, liquidity, amountOut, false)
}
func getNextSqrtPriceFromAmount0RoundingUp(sqrtPX96, liquidity, amount *big.Int, add bool) (*big.Int, error) {
if amount.Cmp(constants.Zero) == 0 {
return sqrtPX96, nil
}
numerator1 := new(big.Int).Lsh(liquidity, 96)
if add {
product := multiplyIn256(amount, sqrtPX96)
if new(big.Int).Div(product, amount).Cmp(sqrtPX96) == 0 {
denominator := addIn256(numerator1, product)
if denominator.Cmp(numerator1) >= 0 {
return MulDivRoundingUp(numerator1, sqrtPX96, denominator), nil
}
}
return MulDivRoundingUp(numerator1, constants.One, new(big.Int).Add(new(big.Int).Div(numerator1, sqrtPX96), amount)), nil
} else {
product := multiplyIn256(amount, sqrtPX96)
if new(big.Int).Div(product, amount).Cmp(sqrtPX96) != 0 {
return nil, ErrInvariant
}
if numerator1.Cmp(product) <= 0 {
return nil, ErrInvariant
}
denominator := new(big.Int).Sub(numerator1, product)
return MulDivRoundingUp(numerator1, sqrtPX96, denominator), nil
}
}
func getNextSqrtPriceFromAmount1RoundingDown(sqrtPX96, liquidity, amount *big.Int, add bool) (*big.Int, error) {
if add {
var quotient *big.Int
if amount.Cmp(MaxUint160) <= 0 {
quotient = new(big.Int).Div(new(big.Int).Lsh(amount, 96), liquidity)
} else {
quotient = new(big.Int).Div(new(big.Int).Mul(amount, constants.Q96), liquidity)
}
return new(big.Int).Add(sqrtPX96, quotient), nil
}
quotient := MulDivRoundingUp(amount, constants.Q96, liquidity)
if sqrtPX96.Cmp(quotient) <= 0 {
return nil, ErrInvariant
}
return new(big.Int).Sub(sqrtPX96, quotient), nil
} | utils/sqrtprice_math.go | 0.711531 | 0.590455 | sqrtprice_math.go | starcoder |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file tests type lists & structural constraints.
// Note: This test has been adjusted to use the new
// type set notation rather than type lists.
package p
// Assignability of an unnamed pointer type to a type parameter that
// has a matching underlying type.
func _[T interface{}, PT interface{ ~*T }](x T) PT {
return &x
}
// Indexing of generic types containing type parameters in their type list:
func at[T interface{ ~[]E }, E any](x T, i int) E {
return x[i]
}
// A generic type inside a function acts like a named type. Its underlying
// type is itself, its "operational type" is defined by the type list in
// the tybe bound, if any.
func _[T interface{ ~int }](x T) {
type myint int
var _ int = int(x)
var _ T = 42
var _ T = T(myint(42))
}
// Indexing a generic type which has a structural contraints to be an array.
func _[T interface{ ~[10]int }](x T) {
_ = x[9] // ok
}
// Dereference of a generic type which has a structural contraint to be a pointer.
func _[T interface{ ~*int }](p T) int {
return *p
}
// Channel send and receive on a generic type which has a structural constraint to
// be a channel.
func _[T interface{ ~chan int }](ch T) int {
// This would deadlock if executed (but ok for a compile test)
ch <- 0
return <-ch
}
// Calling of a generic type which has a structural constraint to be a function.
func _[T interface{ ~func() }](f T) {
f()
go f()
}
// Same, but function has a parameter and return value.
func _[T interface{ ~func(string) int }](f T) int {
return f("hello")
}
// Map access of a generic type which has a structural constraint to be a map.
func _[V any, T interface{ ~map[string]V }](p T) V {
return p["test"]
}
// Testing partial and full type inference, including the case where the types can
// be inferred without needing the types of the function arguments.
// Cannot embed stand-alone type parameters. Disabled for now.
/*
func f0[A any, B interface{type C}, C interface{type D}, D interface{type A}](A, B, C, D)
func f0x() {
f := f0[string]
f("a", "b", "c", "d")
f0("a", "b", "c", "d")
}
func f1[A any, B interface{type A}](A, B)
func f1x() {
f := f1[int]
f(int(0), int(0))
f1(int(0), int(0))
f(0, 0)
f1(0, 0)
}
*/
func f2[A any, B interface{ ~[]A }](_ A, _ B) {}
func f2x() {
f := f2[byte]
f(byte(0), []byte{})
f2(byte(0), []byte{})
f(0, []byte{})
// f2(0, []byte{}) - this one doesn't work
}
// Cannot embed stand-alone type parameters. Disabled for now.
/*
func f3[A any, B interface{type C}, C interface{type *A}](a A, _ B, c C)
func f3x() {
f := f3[int]
var x int
f(x, &x, &x)
f3(x, &x, &x)
}
*/
func f4[A any, B interface{ ~[]C }, C interface{ ~*A }](_ A, _ B, c C) {}
func f4x() {
f := f4[int]
var x int
f(x, []*int{}, &x)
f4(x, []*int{}, &x)
}
func f5[A interface {
~struct {
b B
c C
}
}, B any, C interface{ ~*B }](x B) A { panic(0) }
func f5x() {
x := f5(1.2)
var _ float64 = x.b
var _ float64 = *x.c
}
func f6[A any, B interface{ ~struct{ f []A } }](B) A { panic(0) }
func f6x() {
x := f6(struct{ f []string }{})
var _ string = x
} | test/typeparam/typelist.go | 0.806014 | 0.413477 | typelist.go | starcoder |
package cf
// CF is a main struct of changefinder.
type CF struct {
smooth int
order int
r float64
ts []float64
firstScores []float64
smoothedScores []float64
secondScores []float64
convolve []float64
smooth2 int
convolve2 []float64
sdarFirst *sdar1Dim
sdarSecond *sdar1Dim
}
// ChangeFinder returns a new CF struct.
func ChangeFinder(r float64, order, smooth int) *CF {
cf := CF{
smooth: smooth,
order: order,
r: r,
ts: make([]float64, 0, 64),
firstScores: make([]float64, 0, smooth),
smoothedScores: make([]float64, 0, smooth),
secondScores: make([]float64, 0, smooth),
smooth2: int(smooth / 2.0),
sdarFirst: newSDAR1Dim(r, order),
sdarSecond: newSDAR1Dim(r, order),
}
// ones
cf.convolve = make([]float64, cf.smooth)
for i := range cf.convolve {
cf.convolve[i] = 1.0
}
cf.convolve2 = make([]float64, cf.smooth2)
for i := range cf.convolve2 {
cf.convolve2[i] = 1.0
}
return &cf
}
func (cf *CF) addOne(one float64, ts *[]float64, size int) {
*ts = append(*ts, one)
if len(*ts) == size+1 {
*ts = (*ts)[1:]
}
}
func (cf *CF) smoothing(ts []float64) float64 {
var ave float64
for i := 0; i < cf.smooth; i++ {
ave += ts[i] * cf.convolve[i]
}
return ave
}
func (cf *CF) smoothing2(ts []float64) float64 {
var ave float64
for i := 0; i < cf.smooth2; i++ {
ave += ts[i] * cf.convolve2[i]
}
return ave
}
// Update returns a new score of CF.
func (cf *CF) Update(x float64) float64 {
score := float64(0.0)
// First step learning
if len(cf.ts) == cf.order {
score = cf.sdarFirst.update(x, cf.ts)
cf.addOne(score, &cf.firstScores, cf.smooth)
}
cf.addOne(x, &cf.ts, cf.order)
var secondTarget float64
// Smoothing
if len(cf.firstScores) == cf.smooth {
secondTarget = cf.smoothing(cf.firstScores)
// log.Printf("firstScores: %v\n", cf.firstScores)
// log.Printf("secondTarget: %v\n", secondTarget)
}
// Second step learning
if secondTarget != 0.0 && len(cf.smoothedScores) == cf.order {
score = cf.sdarSecond.update(secondTarget, cf.smoothedScores)
cf.addOne(score, &cf.secondScores, cf.smooth2)
}
if secondTarget != 0.0 {
cf.addOne(secondTarget, &cf.smoothedScores, cf.order)
}
if len(cf.secondScores) == cf.smooth2 {
score = cf.smoothing2(cf.secondScores)
// log.Printf("secondScores: %v\n", cf.secondScores)
// log.Printf("secondSmoothed: %v\n", score)
return score
}
return 0.0
} | cf.go | 0.713831 | 0.515071 | cf.go | starcoder |
package cef
import (
"net"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/elastic/beats/v7/libbeat/common"
)
// DataType specifies one of CEF data types.
type DataType uint8
// List of DataTypes.
const (
Unset DataType = iota
IntegerType
LongType
FloatType
DoubleType
StringType
BooleanType
IPType
MACAddressType
TimestampType
)
// ToType converts the given value string value to the specified data type.
func ToType(value string, typ DataType) (interface{}, error) {
switch typ {
case StringType:
return value, nil
case LongType:
return toLong(value)
case IntegerType:
return toInteger(value)
case FloatType:
return toFloat(value)
case DoubleType:
return toDouble(value)
case BooleanType:
return toBoolean(value)
case IPType:
return toIP(value)
case MACAddressType:
return toMACAddress(value)
case TimestampType:
return toTimestamp(value)
default:
return nil, errors.Errorf("invalid data type: %v", typ)
}
}
func toLong(v string) (int64, error) {
return strconv.ParseInt(v, 0, 64)
}
func toInteger(v string) (int32, error) {
i, err := strconv.ParseInt(v, 0, 32)
return int32(i), err
}
func toFloat(v string) (float32, error) {
f, err := strconv.ParseFloat(v, 32)
return float32(f), err
}
func toDouble(v string) (float64, error) {
f, err := strconv.ParseFloat(v, 64)
return f, err
}
func toBoolean(v string) (bool, error) {
return strconv.ParseBool(v)
}
func toIP(v string) (string, error) {
// This is validating that the value is an IP.
if net.ParseIP(v) != nil {
return v, nil
}
return "", errors.New("value is not a valid IP address")
}
// toMACAddress accepts a MAC addresses as hex characters separated by colon,
// dot, or dash. It returns lowercase hex characters separated by colons.
func toMACAddress(v string) (string, error) {
// CEF specifies that MAC addresses are colon separated, but this will be a
// little more liberal.
hw, err := net.ParseMAC(v)
if err != nil {
return "", err
}
return hw.String(), nil
}
var timeLayouts = []string{
// MMM dd HH:mm:ss.SSS zzz
"Jan _2 15:04:05.000 MST",
"Jan _2 15:04:05.000 Z0700",
"Jan _2 15:04:05.000 Z07:00",
"Jan _2 15:04:05.000 GMT-07:00",
// MMM dd HH:mm:sss.SSS
"Jan _2 15:04:05.000",
// MMM dd HH:mm:ss zzz
"Jan _2 15:04:05 MST",
"Jan _2 15:04:05 Z0700",
"Jan _2 15:04:05 Z07:00",
"Jan _2 15:04:05 GMT-07:00",
// MMM dd HH:mm:ss
"Jan _2 15:04:05",
// MMM dd yyyy HH:mm:ss.SSS zzz
"Jan _2 2006 15:04:05.000 MST",
"Jan _2 2006 15:04:05.000 Z0700",
"Jan _2 2006 15:04:05.000 Z07:00",
"Jan _2 2006 15:04:05.000 GMT-07:00",
// MMM dd yyyy HH:mm:ss.SSS
"Jan _2 2006 15:04:05.000",
// MMM dd yyyy HH:mm:ss zzz
"Jan _2 2006 15:04:05 MST",
"Jan _2 2006 15:04:05 Z0700",
"Jan _2 2006 15:04:05 Z07:00",
"Jan _2 2006 15:04:05 GMT-07:00",
// MMM dd yyyy HH:mm:ss
"Jan _2 2006 15:04:05",
}
func toTimestamp(v string) (common.Time, error) {
if unixMs, err := toLong(v); err == nil {
return common.Time(time.Unix(0, unixMs*int64(time.Millisecond))), nil
}
for _, layout := range timeLayouts {
ts, err := time.ParseInLocation(layout, v, time.UTC)
if err == nil {
// Use current year if no year is zero.
if ts.Year() == 0 {
currentYear := time.Now().In(ts.Location()).Year()
ts = ts.AddDate(currentYear, 0, 0)
}
return common.Time(ts), nil
}
}
return common.Time(time.Time{}), errors.New("value is not a valid timestamp")
} | x-pack/filebeat/processors/decode_cef/cef/types.go | 0.756537 | 0.4856 | types.go | starcoder |
// It is not always possible to be able to say the interface value itself will be enough context.
// Sometimes, it requires more context. For example, a networking problem can be really
// complicated. Error variables wouldn't work there.
// Only when the error variables wouldn't work, we should go ahead and start working with
// custom concrete type for the error.
// Below are two custom error types from the json package in the standard library and see how we
// can use those. This is type as context.
// http://golang.org/src/pkg/encoding/json/decode.go
package main
import (
"fmt"
"reflect"
)
// An UnmarshalTypeError describes a JSON value that was not appropriate for
// a value of a specific Go type.
// Naming convention: The word "Error" ends at the name of the type.
type UnmarshalTypeError struct {
Value string // description of JSON value
Type reflect.Type // type of Go value it could not be assigned to
}
// UnmarshalTypeError implements the error interface.
// We are using pointer semantic.
// In the implementation, we are validating all the fields are being used in the error message. If
// not, we have a problem. Because why would you add a field to the custom error type and not
// displaying on your log when this method would call. We only do this when we really need it.
func (e *UnmarshalTypeError) Error() string {
return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
// (The argument to Unmarshal must be a non-nil pointer.)
// This concrete type is used when we don't pass the address of a value into Unmarshal function.
type InvalidUnmarshalError struct {
Type reflect.Type
}
// InvalidUnmarshalError implements the error interface.
func (e *InvalidUnmarshalError) Error() string {
if e.Type == nil {
return "json: Unmarshal(nil)"
}
if e.Type.Kind() != reflect.Ptr {
return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
}
return "json: Unmarshal(nil " + e.Type.String() + ")"
}
// user is a type for use in the Unmarshal call.
type user struct {
Name int
}
func main() {
var u user
err := Unmarshal([]byte(`{"name":"bill"}`), u) // Run with a value and pointer.
if err != nil {
// This is a special type assertion that only works on the switch.
switch e := err.(type) {
case *UnmarshalTypeError:
fmt.Printf("UnmarshalTypeError: Value[%s] Type[%v]\n", e.Value, e.Type)
case *InvalidUnmarshalError:
fmt.Printf("InvalidUnmarshalError: Type[%v]\n", e.Type)
default:
fmt.Println(err)
}
return
}
fmt.Println("Name:", u.Name)
}
// Unmarshal simulates an unmarshal call that always fails.
// Notice the parameters here: The first one is a slice of byte and the second one is an empty
// interface. The empty interface basically says nothing, which means any value can be passed into
// this function.
// We are going to reflect on the concrete type that is stored inside this interface and we are
// going to validate that if it is a pointer or not nil. We then return different error types
// depending on these.
func Unmarshal(data []byte, v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return &InvalidUnmarshalError{reflect.TypeOf(v)}
}
return &UnmarshalTypeError{"string", reflect.TypeOf(v)}
}
// There is one flaw when using type as context here. In this case, we are now going back to the
// concrete. We walk away from the decoupling because our code is now bounded to these concrete
// types. If the developer who wrote the json package makes any changes to these concrete types,
// that's gonna create a cascading effect all the way through our code. We are no longer protected
// by the decoupling of the error interface.
// This sometime has to happen. Can we do something differnt not to lose the decoupling. This is
// where the idea of behavior as context comes in. | go/design/error_3.go | 0.634317 | 0.441673 | error_3.go | starcoder |
package parse
import (
"strings"
"unicode"
"unicode/utf8"
)
// token represents a unit of information found in an IDL file.
type token struct {
t int // The tokens on the form T_XXX defined in the parser. Mandatory.
v string // Any additional token value for strings, integers and floats. May be an empty string.
}
// String is an implementation of the Stringer interface. Returns a string representing the token for printing purposes.
func (t token) String() string {
s := IdlTokname(t.t - IdlPrivate + 2)
if t.t == 0 {
s = "EOF"
}
if len(t.v) > 0 {
s += " \"" + t.v + "\""
}
return s
}
// Returns a new token given the string "s".
func toToken(s string) token {
t := token{}
switch strings.ToLower(s) {
case "{":
t.t = T_LEFT_CURLY_BRACKET
case "}":
t.t = T_RIGHT_CURLY_BRACKET
case "[":
t.t = T_LEFT_SQUARE_BRACKET
case "]":
t.t = T_RIGHT_SQUARE_BRACKET
case "(":
t.t = T_LEFT_PARANTHESIS
case ")":
t.t = T_RIGHT_PARANTHESIS
case ":":
t.t = T_COLON
case ",":
t.t = T_COMMA
case ";":
t.t = T_SEMICOLON
case "=":
t.t = T_EQUAL
case ">>":
t.t = T_SHIFTRIGHT
case "<<":
t.t = T_SHIFTLEFT
case "+":
t.t = T_PLUS_SIGN
case "-":
t.t = T_MINUS_SIGN
case "*":
t.t = T_ASTERISK
case "/":
t.t = T_SOLIDUS
case "%":
t.t = T_PERCENT_SIGN
case "~":
t.t = T_TILDE
case "|":
t.t = T_VERTICAL_LINE
case "^":
t.t = T_CIRCUMFLEX
case "&":
t.t = T_AMPERSAND
case "<":
t.t = T_LESS_THAN_SIGN
case ">":
t.t = T_GREATER_THAN_SIGN
case "const":
t.t = T_CONST
case "typedef":
t.t = T_TYPEDEF
case "float":
t.t = T_FLOAT
case "double":
t.t = T_DOUBLE
case "char":
t.t = T_CHAR
case "wchar":
t.t = T_WCHAR
case "fixed":
t.t = T_FIXED
case "boolean":
t.t = T_BOOLEAN
case "string":
t.t = T_STRING
case "wstring":
t.t = T_WSTRING
case "void":
t.t = T_VOID
case "unsigned":
t.t = T_UNSIGNED
case "long":
t.t = T_LONG
case "short":
t.t = T_SHORT
case "false":
t.t = T_FALSE
case "true":
t.t = T_TRUE
case "struct":
t.t = T_STRUCT
case "union":
t.t = T_UNION
case "switch":
t.t = T_SWITCH
case "case":
t.t = T_CASE
case "default":
t.t = T_DEFAULT
case "enum":
t.t = T_ENUM
case "in":
t.t = T_IN
case "out":
t.t = T_OUT
case "interface":
t.t = T_INTERFACE
case "abstract":
t.t = T_ABSTRACT
case "valuetype":
t.t = T_VALUETYPE
case "truncatable":
t.t = T_TRUNCATABLE
case "supports":
t.t = T_SUPPORTS
case "custom":
t.t = T_CUSTOM
case "public":
t.t = T_PUBLIC
case "private":
t.t = T_PRIVATE
case "factory":
t.t = T_FACTORY
case "native":
t.t = T_NATIVE
case "valuebase":
t.t = T_VALUEBASE
case "::":
t.t = T_SCOPE
case "module":
t.t = T_MODULE
case "octet":
t.t = T_OCTET
case "any":
t.t = T_ANY
case "sequence":
t.t = T_SEQUENCE
case "readonly":
t.t = T_READONLY
case "attribute":
t.t = T_ATTRIBUTE
case "exception":
t.t = T_EXCEPTION
case "oneway":
t.t = T_ONEWAY
case "inout":
t.t = T_INOUT
case "raises":
t.t = T_RAISES
case "context":
t.t = T_CONTEXT
case "object":
t.t = T_OBJECT
case "principal":
t.t = T_PRINCIPAL
default:
t.t = T_IDENTIFIER
t.v = s
}
// Is this a number of some sort?
r, _ := utf8.DecodeRuneInString(s)
if unicode.IsDigit(r) {
// Is it a float (does it contain a period)?
if strings.ContainsRune(s, '.') {
t.t = T_FLOATING_PT_LITERAL
} else {
t.t = T_INTEGER_LITERAL
}
t.v = s
} else if r == '"' {
// It's a string literal. Store the string with the quotes removed.
t.t = T_STRING_LITERAL
t.v = strings.Trim(s, "\"")
}
return t
} | data/train/go/687245c94e9dd4704980a050f04e00520dc25093token.go | 0.530723 | 0.505493 | 687245c94e9dd4704980a050f04e00520dc25093token.go | starcoder |
package convert
import (
"errors"
"strconv"
)
// IntToBool -- Converts a value from int64 to boolean
func IntToBool(value int64) (bool, error) {
if int64(1) == value {
return true, nil
} else if int64(0) == value {
return false, nil
}
return false, errors.New(cannotConvertErrMsg)
}
// IntToFloat32 -- Converts a value from int64 to float32
func IntToFloat32(value int64) (float32, error) {
result := float32(value)
if value < float32MinPreciseValue || value > float32MaxPreciseValue {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToFloat64 -- Converts a value from int64 to float64
func IntToFloat64(value int64) (float64, error) {
result := float64(value)
if value < float64MinPreciseValue || value > float64MaxPreciseValue {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToDefaultInt -- Converts a value from int64 to int
func IntToDefaultInt(value int64) (int, error) {
result := int(value)
if int64(result) != value {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToInt8 -- Converts a value from int64 to int8
func IntToInt8(value int64) (int8, error) {
result := int8(value)
if int64(result) != value {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToInt16 -- Converts a value from int64 to int16
func IntToInt16(value int64) (int16, error) {
result := int16(value)
if int64(result) != value {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToInt32 -- Converts a value from int64 to int32
func IntToInt32(value int64) (int32, error) {
result := int32(value)
if int64(result) != value {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToUint -- Converts a value from int64 to uint
func IntToUint(value int64) (uint, error) {
result := uint(value)
if int64(result) != value || value < 0 {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToUint8 -- Converts a value from int64 to uint8
func IntToUint8(value int64) (uint8, error) {
result := uint8(value)
if int64(result) != value || value < 0 {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToUint16 -- Converts a value from int64 to uint16
func IntToUint16(value int64) (uint16, error) {
result := uint16(value)
if int64(result) != value || value < 0 {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToUint32 -- Converts a value from int64 to uint32
func IntToUint32(value int64) (uint32, error) {
result := uint32(value)
if int64(result) != value || value < 0 {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToUint64 -- Converts a value from int64 to uint64
func IntToUint64(value int64) (uint64, error) {
result := uint64(value)
if int64(result) != value || value < 0 {
return result, errors.New(lossOfPrecisionErrorMsg)
}
return result, nil
}
// IntToString -- Converts a value from int64 to string
func IntToString(value int64) (string, error) {
return strconv.FormatInt(value, 10), nil
} | int.go | 0.837387 | 0.50891 | int.go | starcoder |
package main
import "fmt"
// DeviceConverter is the representation of the wrist deviceConverter
type DeviceConverter struct {
registers [6]int
instructions []Instruction
ipBoundTo int
ip int
}
func (d DeviceConverter) isEqual(other DeviceConverter) bool {
for i, value := range d.registers {
if other.registers[i] != value {
return false
}
}
return true
}
var operationsConvertMapping = map[string]func(Instruction, int){
"addr": addrConvert,
"setr": setrConvert,
"muli": muliConvert,
"seti": setiConvert,
"gtrr": gtrrConvert,
"gtir": gtirConvert,
"eqrr": eqrrConvert,
"eqri": eqriConvert,
"mulr": mulrConvert,
"addi": addiConvert,
"banr": banrConvert,
"bani": baniConvert,
"borr": borrConvert,
"bori": boriConvert,
}
var n = map[int]string{
0: "a",
1: "b",
2: "c",
3: "d",
4: "e",
5: "F",
}
var ipBoundTo = 5
// Below are the different operations that can be done on the deviceConverter
//Addition:
// addr (add register) stores into register C the result of adding register A and register B.
func addrConvert(i Instruction, lineNum int) {
if i.Cout == ipBoundTo {
jump := 1 + lineNum
if i.Ain == ipBoundTo {
fmt.Printf("addr\t\tGOTO %d + %s \r\n", jump, n[i.Bin])
} else {
fmt.Printf("addr\t\tGOTO %d + %s \r\n", jump, n[i.Ain])
}
} else if i.Ain == ipBoundTo {
fmt.Printf("addr\t\t%s := %d + %s \r\n", n[i.Cout], lineNum, n[i.Bin])
} else if i.Bin == ipBoundTo {
fmt.Printf("addr\t\t%s := %s + %d \r\n", n[i.Cout], n[i.Ain], lineNum)
} else {
fmt.Printf("addr\t\t%s := %s + %s\r\n", n[i.Cout], n[i.Ain], n[i.Bin])
}
}
//addi (add immediate) stores into register C the result of adding register A and value B.
func addiConvert(i Instruction, lineNum int) {
if i.Cout == ipBoundTo {
jump := lineNum + 1
if i.Ain == ipBoundTo {
jump += i.Bin
}
fmt.Printf("addi\t\tGOTO %d \r\n", jump)
} else {
fmt.Printf("addi\t\t%s := %s + %d \r\n", n[i.Cout], n[i.Ain], i.Bin)
}
}
// Multiplication:
// mulr (multiply register) stores into register C the result of multiplying register A and register B.
func mulrConvert(i Instruction, lineNum int) {
if i.Cout == ipBoundTo {
fmt.Printf("mulr\t\tEXIT \r\n")
} else {
if i.Ain == ipBoundTo {
fmt.Printf("mulr\t\t%s := %d * %s \r\n", n[i.Cout], lineNum, n[i.Bin])
} else if i.Bin == ipBoundTo {
fmt.Printf("mulr\t\t%s := %s * %d \r\n", n[i.Cout], n[i.Ain], lineNum)
} else {
fmt.Printf("mulr\t\t%s := %s * %s \r\n", n[i.Cout], n[i.Ain], n[i.Bin])
}
}
}
// muli (multiply immediate) stores into register C the result of multiplying register A and value B.
func muliConvert(i Instruction, lineNum int) {
fmt.Printf("muli\t\t%s := %s * %d\r\n", n[i.Cout], n[i.Ain], i.Bin)
}
// Assignment:
// setr (set register) copies the contents of register A into register C. (Input B is ignored.)
func setrConvert(i Instruction, lineNum int) {
if i.Cout == ipBoundTo {
fmt.Printf("setr\t\tGOTO %d \r\n", i.Ain)
} else if i.Ain == ipBoundTo {
fmt.Printf("setr\t\t%s = %d\r\n", n[i.Cout], lineNum)
} else {
fmt.Printf("setr\t\t%s = %s\r\n", n[i.Cout], n[i.Ain])
}
}
// seti (set immediate) stores value A into register C. (Input B is ignored.)
func setiConvert(i Instruction, lineNum int) {
if i.Cout == ipBoundTo {
fmt.Printf("seti\t\tGOTO %d \r\n", i.Ain+1)
} else {
fmt.Printf("seti\t\t%s = %d\r\n", n[i.Cout], i.Ain)
}
}
// Greater-than testing:
// gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0.
func gtrrConvert(i Instruction, lineNum int) {
fmt.Printf("gtrr\t\t%s = (%s > %s) ? 1 : 0\r\n", n[i.Cout], n[i.Ain], n[i.Bin])
}
// gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0.
func gtirConvert(i Instruction, lineNum int) {
fmt.Printf("gtir\t\t%s = (%d > %s) ? 1 : 0\r\n", n[i.Cout], i.Ain, n[i.Bin])
}
// Equality testing:
// eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0.
func eqrrConvert(i Instruction, lineNum int) {
fmt.Printf("eqrr\t\t%s = (%s == %s) ? 1 : 0\r\n", n[i.Cout], n[i.Ain], n[i.Bin])
}
// eqri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0.
func eqriConvert(i Instruction, lineNum int) {
fmt.Printf("eqri\t\t%s = (%s == %d) ? 1 : 0\r\n", n[i.Cout], n[i.Ain], i.Bin)
}
// Bitwise AND:
// banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.
func banrConvert(i Instruction, lineNum int) {
fmt.Printf("banr\t\t%s = %s & %s\r\n", n[i.Cout], n[i.Ain], n[i.Bin])
}
// bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.
func baniConvert(i Instruction, lineNum int) {
fmt.Printf("bani\t\t%s = %s & %d\r\n", n[i.Cout], n[i.Ain], i.Bin)
}
// Bitwise OR:
// borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B.
func borrConvert(i Instruction, lineNum int) {
fmt.Printf("borr\t\t%s = %s | %s\r\n", n[i.Cout], n[i.Ain], n[i.Bin])
}
// bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B.
func boriConvert(i Instruction, lineNum int) {
fmt.Printf("bori\t\t%s = %s | %d\r\n", n[i.Cout], n[i.Ain], i.Bin)
} | 2018/21_1/deviceConverter.go | 0.527317 | 0.441974 | deviceConverter.go | starcoder |
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// NumRangeFromIntArray2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]int.
func NumRangeFromIntArray2(val [2]int) driver.Valuer {
return numRangeFromIntArray2{val: val}
}
// NumRangeToIntArray2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]int and sets it to val.
func NumRangeToIntArray2(val *[2]int) sql.Scanner {
return numRangeToIntArray2{val: val}
}
// NumRangeFromInt8Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]int8.
func NumRangeFromInt8Array2(val [2]int8) driver.Valuer {
return numRangeFromInt8Array2{val: val}
}
// NumRangeToInt8Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]int8 and sets it to val.
func NumRangeToInt8Array2(val *[2]int8) sql.Scanner {
return numRangeToInt8Array2{val: val}
}
// NumRangeFromInt16Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]int16.
func NumRangeFromInt16Array2(val [2]int16) driver.Valuer {
return numRangeFromInt16Array2{val: val}
}
// NumRangeToInt16Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]int16 and sets it to val.
func NumRangeToInt16Array2(val *[2]int16) sql.Scanner {
return numRangeToInt16Array2{val: val}
}
// NumRangeFromInt32Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]int32.
func NumRangeFromInt32Array2(val [2]int32) driver.Valuer {
return numRangeFromInt32Array2{val: val}
}
// NumRangeToInt32Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]int32 and sets it to val.
func NumRangeToInt32Array2(val *[2]int32) sql.Scanner {
return numRangeToInt32Array2{val: val}
}
// NumRangeFromInt64Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]int64.
func NumRangeFromInt64Array2(val [2]int64) driver.Valuer {
return numRangeFromInt64Array2{val: val}
}
// NumRangeToInt64Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]int64 and sets it to val.
func NumRangeToInt64Array2(val *[2]int64) sql.Scanner {
return numRangeToInt64Array2{val: val}
}
// NumRangeFromUintArray2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]uint.
func NumRangeFromUintArray2(val [2]uint) driver.Valuer {
return numRangeFromUintArray2{val: val}
}
// NumRangeToUintArray2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]uint and sets it to val.
func NumRangeToUintArray2(val *[2]uint) sql.Scanner {
return numRangeToUintArray2{val: val}
}
// NumRangeFromUint8Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]uint8.
func NumRangeFromUint8Array2(val [2]uint8) driver.Valuer {
return numRangeFromUint8Array2{val: val}
}
// NumRangeToUint8Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]uint8 and sets it to val.
func NumRangeToUint8Array2(val *[2]uint8) sql.Scanner {
return numRangeToUint8Array2{val: val}
}
// NumRangeFromUint16Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]uint16.
func NumRangeFromUint16Array2(val [2]uint16) driver.Valuer {
return numRangeFromUint16Array2{val: val}
}
// NumRangeToUint16Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]uint16 and sets it to val.
func NumRangeToUint16Array2(val *[2]uint16) sql.Scanner {
return numRangeToUint16Array2{val: val}
}
// NumRangeFromUint32Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]uint32.
func NumRangeFromUint32Array2(val [2]uint32) driver.Valuer {
return numRangeFromUint32Array2{val: val}
}
// NumRangeToUint32Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]uint32 and sets it to val.
func NumRangeToUint32Array2(val *[2]uint32) sql.Scanner {
return numRangeToUint32Array2{val: val}
}
// NumRangeFromUint64Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]uint64.
func NumRangeFromUint64Array2(val [2]uint64) driver.Valuer {
return numRangeFromUint64Array2{val: val}
}
// NumRangeToUint64Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]uint64 and sets it to val.
func NumRangeToUint64Array2(val *[2]uint64) sql.Scanner {
return numRangeToUint64Array2{val: val}
}
// NumRangeFromFloat32Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]float32.
func NumRangeFromFloat32Array2(val [2]float32) driver.Valuer {
return numRangeFromFloat32Array2{val: val}
}
// NumRangeToFloat32Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]float32 and sets it to val.
func NumRangeToFloat32Array2(val *[2]float32) sql.Scanner {
return numRangeToFloat32Array2{val: val}
}
// NumRangeFromFloat64Array2 returns a driver.Valuer that produces a PostgreSQL numrange from the given Go [2]float64.
func NumRangeFromFloat64Array2(val [2]float64) driver.Valuer {
return numRangeFromFloat64Array2{val: val}
}
// NumRangeToFloat64Array2 returns an sql.Scanner that converts a PostgreSQL numrange into a Go [2]float64 and sets it to val.
func NumRangeToFloat64Array2(val *[2]float64) sql.Scanner {
return numRangeToFloat64Array2{val: val}
}
type numRangeFromIntArray2 struct {
val [2]int
}
func (v numRangeFromIntArray2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToIntArray2 struct {
val *[2]int
}
func (v numRangeToIntArray2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = int(lo)
v.val[1] = int(hi)
return nil
}
type numRangeFromInt8Array2 struct {
val [2]int8
}
func (v numRangeFromInt8Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToInt8Array2 struct {
val *[2]int8
}
func (v numRangeToInt8Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 8); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 8); err != nil {
return err
}
}
v.val[0] = int8(lo)
v.val[1] = int8(hi)
return nil
}
type numRangeFromInt16Array2 struct {
val [2]int16
}
func (v numRangeFromInt16Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToInt16Array2 struct {
val *[2]int16
}
func (v numRangeToInt16Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 16); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 16); err != nil {
return err
}
}
v.val[0] = int16(lo)
v.val[1] = int16(hi)
return nil
}
type numRangeFromInt32Array2 struct {
val [2]int32
}
func (v numRangeFromInt32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToInt32Array2 struct {
val *[2]int32
}
func (v numRangeToInt32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = int32(lo)
v.val[1] = int32(hi)
return nil
}
type numRangeFromInt64Array2 struct {
val [2]int64
}
func (v numRangeFromInt64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, v.val[0], 10)
out = append(out, ',')
out = strconv.AppendInt(out, v.val[1], 10)
out = append(out, ')')
return out, nil
}
type numRangeToInt64Array2 struct {
val *[2]int64
}
func (v numRangeToInt64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = lo
v.val[1] = hi
return nil
}
type numRangeFromUintArray2 struct {
val [2]uint
}
func (v numRangeFromUintArray2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToUintArray2 struct {
val *[2]uint
}
func (v numRangeToUintArray2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = uint(lo)
v.val[1] = uint(hi)
return nil
}
type numRangeFromUint8Array2 struct {
val [2]uint8
}
func (v numRangeFromUint8Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToUint8Array2 struct {
val *[2]uint8
}
func (v numRangeToUint8Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 8); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 8); err != nil {
return err
}
}
v.val[0] = uint8(lo)
v.val[1] = uint8(hi)
return nil
}
type numRangeFromUint16Array2 struct {
val [2]uint16
}
func (v numRangeFromUint16Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToUint16Array2 struct {
val *[2]uint16
}
func (v numRangeToUint16Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 16); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 16); err != nil {
return err
}
}
v.val[0] = uint16(lo)
v.val[1] = uint16(hi)
return nil
}
type numRangeFromUint32Array2 struct {
val [2]uint32
}
func (v numRangeFromUint32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToUint32Array2 struct {
val *[2]uint32
}
func (v numRangeToUint32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = uint32(lo)
v.val[1] = uint32(hi)
return nil
}
type numRangeFromUint64Array2 struct {
val [2]uint64
}
func (v numRangeFromUint64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, v.val[0], 10)
out = append(out, ',')
out = strconv.AppendUint(out, v.val[1], 10)
out = append(out, ')')
return out, nil
}
type numRangeToUint64Array2 struct {
val *[2]uint64
}
func (v numRangeToUint64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = lo
v.val[1] = hi
return nil
}
type numRangeFromFloat32Array2 struct {
val [2]float32
}
func (v numRangeFromFloat32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToFloat32Array2 struct {
val *[2]float32
}
func (v numRangeToFloat32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = float32(lo)
v.val[1] = float32(hi)
return nil
}
type numRangeFromFloat64Array2 struct {
val [2]float64
}
func (v numRangeFromFloat64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type numRangeToFloat64Array2 struct {
val *[2]float64
}
func (v numRangeToFloat64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = float64(lo)
v.val[1] = float64(hi)
return nil
} | pgsql/numrange.go | 0.801354 | 0.740597 | numrange.go | starcoder |
package streamstats
import "fmt"
// BoxPlot represents a BoxPlot with interquartile range and whiskers backed by a P2-Quantile tracking the median, P=0.5
type BoxPlot struct {
P2Quantile
}
// NewBoxPlot returns a new BoxPlot
func NewBoxPlot() BoxPlot {
return BoxPlot{NewP2Quantile(0.5)}
}
// Median returns the estimated median
func (bp BoxPlot) Median() float64 {
return bp.Quantile()
}
// UpperQuartile returns the estimated upper quartile
func (bp BoxPlot) UpperQuartile() float64 {
return bp.UpperQuantile()
}
// LowerQuartile returns the estimated lower quartile
func (bp BoxPlot) LowerQuartile() float64 {
return bp.LowerQuantile()
}
// InterQuartileRange returns the estimated interquartile range
func (bp BoxPlot) InterQuartileRange() float64 {
return bp.UpperQuantile() - bp.LowerQuantile()
}
// UpperWhisker returns the estimated upper whisker, Q3 + 1.5 * IQR
func (bp BoxPlot) UpperWhisker() float64 {
return bp.UpperQuantile() + 1.5*bp.InterQuartileRange()
}
// LowerWhisker returns the estimated lower whisker, Q1 - 1.5 * IQR
func (bp BoxPlot) LowerWhisker() float64 {
return bp.LowerQuantile() - 1.5*bp.InterQuartileRange()
}
// IsOutlier returns true if the data is outside the whiskers
func (bp BoxPlot) IsOutlier(x float64) bool {
return x < bp.LowerWhisker() || x > bp.UpperWhisker()
}
// MidHinge returns the MidHinge of the data, average of upper and lower quantiles
func (bp BoxPlot) MidHinge() float64 {
return (bp.UpperQuartile() + bp.LowerQuartile()) / 2.0
}
// MidRange returns the MidRange of the data, average of max and min
func (bp BoxPlot) MidRange() float64 {
return (bp.Max() + bp.Min()) / 2.0
}
// TriMean returns the TriMean of the data, average of Median and MidHinge
func (bp BoxPlot) TriMean() float64 {
return (bp.UpperQuartile() + 2.0*bp.Median() + bp.LowerQuartile()) / 4.0
}
func (bp BoxPlot) String() string {
return fmt.Sprintf("Min: %0.3f LowerQuartile: %0.3f Median: %0.3f UpperQuartile: %0.3f Max: %0.3f N: %d", bp.Min(), bp.LowerQuartile(), bp.Median(), bp.UpperQuartile(), bp.Max(), bp.N())
} | boxplot.go | 0.921852 | 0.768038 | boxplot.go | starcoder |
package lzfse
type lzvnOpCode byte
const (
nop lzvnOpCode = iota
end_of_stream
undefined
small_literal
large_literal
small_match
large_match
small_distance
large_distance
medium_distance
previous_distance
)
var opcode_table = [256]lzvnOpCode{
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, end_of_stream, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, nop, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, nop, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, undefined, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, undefined, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, undefined, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, undefined, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, undefined, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined,
undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance,
medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance,
medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance,
medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance, medium_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
small_distance, small_distance, small_distance, small_distance, small_distance, small_distance, previous_distance, large_distance,
undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined,
undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined,
large_literal, small_literal, small_literal, small_literal, small_literal, small_literal, small_literal, small_literal,
small_literal, small_literal, small_literal, small_literal, small_literal, small_literal, small_literal, small_literal,
large_match, small_match, small_match, small_match, small_match, small_match, small_match, small_match,
small_match, small_match, small_match, small_match, small_match, small_match, small_match, small_match,
} | pkg/lzfse/lzvn.go | 0.540681 | 0.566978 | lzvn.go | starcoder |
package chaikin
import (
"math/big"
"github.com/MicahParks/go-ad"
"github.com/MicahParks/go-ma"
)
// BigChaikin represents the state of the Chaikin Oscillator.
type BigChaikin struct {
ad *ad.BigAD
short *ma.BigEMA
long *ma.BigEMA
prevBuy bool
}
// BigResult holds the results of a BigChaikin calculation.
type BigResult struct {
ADLine *big.Float
BuySignal *bool
ChaikinLine *big.Float
}
// NewBig creates a new Chaikin Oscillator and returns its first point along with the corresponding Accumulation
// Distribution Line point.
func NewBig(initial [LongEMA]ad.BigInput) (*BigChaikin, BigResult) {
return NewBigCustom(initial[:], ShortEMA, nil, nil)
}
// NewBigCustom creates a new Chaikin Oscillator and returns its first point along with the corresponding Accumulation
// Distribution Line point. Custom (non-Chaikin approved) inputs are allowed. The length of the initial input slice is
// the length of the long EMA period.
func NewBigCustom(initial []ad.BigInput, shortPeriod uint, shortSmoothing, longSmoothing *big.Float) (*BigChaikin, BigResult) {
adLinePoints := make([]*big.Float, len(initial))
cha := &BigChaikin{}
var adLine *big.Float
cha.ad, adLine = ad.NewBig(initial[0])
adLinePoints[0] = adLine
for i, input := range initial[1:] {
adLinePoints[i+1] = cha.ad.Calculate(input)
}
_, shortSMA := ma.NewBigSMA(adLinePoints[:shortPeriod])
cha.short = ma.NewBigEMA(shortPeriod, shortSMA, shortSmoothing)
// Catch up the short EMA to where the long EMA will be.
var latestShortEMA *big.Float
for _, adLine = range adLinePoints[shortPeriod:] {
latestShortEMA = cha.short.Calculate(adLine)
}
_, longSMA := ma.NewBigSMA(adLinePoints)
cha.long = ma.NewBigEMA(uint(len(initial)), longSMA, longSmoothing)
result := BigResult{
ADLine: adLine,
BuySignal: nil,
ChaikinLine: new(big.Float).Sub(latestShortEMA, longSMA),
}
cha.prevBuy = result.ChaikinLine.Cmp(adLine) == 1
return cha, result
}
// Calculate produces the next point on the Chaikin Oscillator given the current period's information.
func (c *BigChaikin) Calculate(next ad.BigInput) BigResult {
adLine := c.ad.Calculate(next)
result := new(big.Float).Sub(c.short.Calculate(adLine), c.long.Calculate(adLine))
expected := -1
if c.prevBuy {
expected = 1
}
var buySignal *bool
if result.Cmp(adLine) != expected {
buy := !c.prevBuy
c.prevBuy = buy
buySignal = &buy
}
return BigResult{
ADLine: adLine,
BuySignal: buySignal,
ChaikinLine: result,
}
} | big.go | 0.75274 | 0.461138 | big.go | starcoder |
package model
import (
"path/filepath"
"github.com/gobwas/glob"
"github.com/pkg/errors"
)
type PathMatcher interface {
Matches(f string, isDir bool) (bool, error)
}
// A Matcher that matches nothing.
type emptyMatcher struct{}
func (m emptyMatcher) Matches(f string, isDir bool) (bool, error) {
return false, nil
}
var EmptyMatcher PathMatcher = emptyMatcher{}
// A matcher that matches against a set of files.
type fileMatcher struct {
paths map[string]bool
}
func (m fileMatcher) Matches(f string, isDir bool) (bool, error) {
return m.paths[f], nil
}
func NewSimpleFileMatcher(paths ...string) (fileMatcher, error) {
pathMap := make(map[string]bool, len(paths))
for _, path := range paths {
// Get the absolute path of the path, because PathMatchers expect to always
// work with absolute paths.
path, err := filepath.Abs(path)
if err != nil {
return fileMatcher{}, errors.Wrap(err, "NewSimplePathMatcher")
}
pathMap[path] = true
}
return fileMatcher{paths: pathMap}, nil
}
type globMatcher struct {
globs []glob.Glob
}
func (gm globMatcher) Matches(f string, isDir bool) (bool, error) {
for _, g := range gm.globs {
if g.Match(f) {
return true, nil
}
}
return false, nil
}
func NewGlobMatcher(globs ...string) PathMatcher {
ret := globMatcher{}
for _, g := range globs {
ret.globs = append(ret.globs, glob.MustCompile(g))
}
return ret
}
type PatternMatcher interface {
PathMatcher
// Express this PathMatcher as a sequence of filepath.Match
// patterns. These patterns are widely useful in Docker-land because
// they're suitable in .dockerignore or Dockerfile ADD statements
// https://docs.docker.com/engine/reference/builder/#add
AsMatchPatterns() []string
}
type CompositePathMatcher struct {
Matchers []PathMatcher
}
func NewCompositeMatcher(matchers []PathMatcher) PathMatcher {
if len(matchers) == 0 {
return EmptyMatcher
}
cMatcher := CompositePathMatcher{Matchers: matchers}
pMatchers := make([]PatternMatcher, len(matchers))
for i, m := range matchers {
pm, ok := m.(CompositePatternMatcher)
if !ok {
return cMatcher
}
pMatchers[i] = pm
}
return CompositePatternMatcher{
CompositePathMatcher: cMatcher,
Matchers: pMatchers,
}
}
func (c CompositePathMatcher) Matches(f string, isDir bool) (bool, error) {
for _, t := range c.Matchers {
ret, err := t.Matches(f, isDir)
if err != nil {
return false, err
}
if ret {
return true, nil
}
}
return false, nil
}
type CompositePatternMatcher struct {
CompositePathMatcher
Matchers []PatternMatcher
}
func (c CompositePatternMatcher) AsMatchPatterns() []string {
result := []string{}
for _, m := range c.Matchers {
result = append(result, m.AsMatchPatterns()...)
}
return result
}
var _ PathMatcher = CompositePathMatcher{}
var _ PatternMatcher = CompositePatternMatcher{} | internal/model/matcher.go | 0.722233 | 0.444625 | matcher.go | starcoder |
package sprite
import (
"bytes"
"encoding/binary"
"github.com/silbinarywolf/gml-go/gml/internal/dt"
"github.com/silbinarywolf/gml-go/gml/internal/geom"
)
type spriteStateSerialize struct {
SpriteIndex SpriteIndex
ImageScale geom.Vec
ImageIndex float64
}
type SpriteState struct {
spriteIndex SpriteIndex
ImageScale geom.Vec
imageIndex float64
}
func GetCollisionMask(spriteIndex SpriteIndex, imageIndex int, kind int) *CollisionMask {
spr := spriteIndex.get()
if spr == nil {
return nil
}
return &spr.frames[imageIndex].collisionMasks[kind]
}
func (state *SpriteState) SpriteIndex() SpriteIndex { return state.spriteIndex }
func (state *SpriteState) sprite() SpriteIndex { return state.spriteIndex }
func (state *SpriteState) ImageIndex() float64 { return state.imageIndex }
func (state *SpriteState) ImageSpeed() float64 {
if state.spriteIndex == SprUndefined {
return 0
}
return state.spriteIndex.ImageSpeed()
}
func (state *SpriteState) ImageNumber() float64 {
if state.spriteIndex == SprUndefined {
return 0
}
spr := state.spriteIndex.get()
return float64(len(spr.frames))
}
func (state *SpriteState) SetSprite(spriteIndex SpriteIndex) {
if state.spriteIndex != spriteIndex {
if !spriteIndex.isLoaded() {
SpriteLoad(spriteIndex)
}
state.spriteIndex = spriteIndex
state.imageIndex = 0
}
}
func (state *SpriteState) SetImageIndex(imageIndex float64) {
state.imageIndex = imageIndex
imageNumber := state.ImageNumber()
if imageNumber > 0 {
for state.imageIndex >= imageNumber {
state.imageIndex -= imageNumber
}
if state.imageIndex < 0 {
state.imageIndex = 0
}
}
}
func (state *SpriteState) ImageUpdate() {
imageNumber := state.ImageNumber()
if imageNumber > 0 {
imageSpeed := state.ImageSpeed() * dt.DeltaTime()
state.imageIndex += imageSpeed
if state.imageIndex >= state.ImageNumber() {
// NOTE(Jake): 2019-04-03
// Tested against Game Maker Studio 2, 2.2.2.326
// It resets to zero after going over.
// This is important as it allows us to test if the animation
// has ended on the current frame without adding extra state.
state.imageIndex = 0
}
}
}
func (state SpriteState) UnsafeSnapshotMarshalBinary(buf *bytes.Buffer) error {
if err := binary.Write(buf, binary.LittleEndian, state.spriteIndex); err != nil {
return err
}
if err := binary.Write(buf, binary.LittleEndian, state.imageIndex); err != nil {
return err
}
if err := binary.Write(buf, binary.LittleEndian, state.ImageScale); err != nil {
return err
}
return nil
}
func (state *SpriteState) UnsafeSnapshotUnmarshalBinary(buf *bytes.Buffer) error {
if err := binary.Read(buf, binary.LittleEndian, &state.spriteIndex); err != nil {
return err
}
if err := binary.Read(buf, binary.LittleEndian, &state.imageIndex); err != nil {
return err
}
if err := binary.Read(buf, binary.LittleEndian, &state.ImageScale); err != nil {
return err
}
return nil
} | gml/internal/sprite/sprite_state.go | 0.615203 | 0.427994 | sprite_state.go | starcoder |
package quicksort
import (
"math/rand"
)
// QuickSort quick sort
func QuickSort(A []int, start, end int, isIncrement bool) []int {
if start < end {
r := partition(A, start, end, isIncrement)
QuickSort(A, start, r-1, isIncrement)
QuickSort(A, r+1, end, isIncrement)
}
return A
}
// Partition A with A[end], less than A[end] on the left, more than A[end] on the right
// When partitioning, A is divided into three blocks, [start, p] is less than A [end], [p + 1, q] is more than A [end], [q, end-1] is waiting for partition
func partition(A []int, start, end int, isIncrement bool) int {
p, x := start-1, end
for q := start; q <= end; q++ {
if A[x] > A[q] && isIncrement {
p++
exchange(A, p, q)
}
if A[x] < A[q] && !isIncrement {
p++
exchange(A, p, q)
}
}
exchange(A, p+1, x)
return p + 1
}
func exchange(A []int, x, y int) {
temp := A[x]
A[x] = A[y]
A[y] = temp
}
// RandomizedQuickSort quick sort for randomized x
func RandomizedQuickSort(A []int, start, end int, isIncrement bool) []int {
if start < end {
r := randomizedPartition(A, start, end, isIncrement)
RandomizedQuickSort(A, start, r-1, isIncrement)
RandomizedQuickSort(A, r+1, end, isIncrement)
}
return A
}
// RandomizedPartition A with a randomize A[end], less than A[end] on the left, more than A[end] on the right
// The A[end] is randomly selected by another element exchanged with A[end]
func randomizedPartition(A []int, start, end int, isIncrement bool) int {
// If you set a random number seed for each partition, the performance is too low.
// rand.Seed(time.Now().Unix())
index := rand.Intn(end-start) + start
exchange(A, end, index)
return partition(A, start, end, isIncrement)
}
func reviewQuick(a []int, p, r int) {
if p < r {
q := reviewPartition(a, p, r)
reviewQuick(a, p, q-1)
reviewQuick(a, q+1, r)
}
}
func reviewPartition(a []int, p, r int) int {
k, key := r, a[r]
for i := r - 1; i >= p; i-- {
if a[i] > key {
k--
temp := a[k]
a[k] = a[i]
a[i] = temp
}
}
a[r] = a[k]
a[k] = key
return k
} | sort/quick/quick.go | 0.544559 | 0.498474 | quick.go | starcoder |
package geom
import (
"fmt"
"io"
"math"
"math/rand"
)
// Vec represents a 3-element vector
type Vec struct {
E [3]float64
}
// NewVec creates a Vec from 3 float values
func NewVec(e0, e1, e2 float64) Vec {
return Vec{E: [3]float64{e0, e1, e2}}
}
// RandVecInSphere creates a random Vec within a unit sphere
// TODO: I don't like rejection methods. Isn't there a way to generate 2 angles and accomplish the same thing reliably?
func RandVecInSphere() Vec {
for {
v := NewVec(rand.Float64(), rand.Float64(), rand.Float64()).Scaled(2).Minus(NewVec(1, 1, 1))
if v.LenSq() < 1 {
return v
}
}
}
// RandVecInDisk creates a random Vec within a unit disk
// TODO: more rejection methods :/
func RandVecInDisk() Vec {
xy := NewVec(1, 1, 0)
for {
v := NewVec(rand.Float64(), rand.Float64(), 0).Scaled(2).Minus(xy)
if v.Dot(v) < 1 {
return v
}
}
}
// X returns the first element
func (v Vec) X() float64 {
return v.E[0]
}
// Y returns the second element
func (v Vec) Y() float64 {
return v.E[1]
}
// Z returns the third element
func (v Vec) Z() float64 {
return v.E[2]
}
// Inv returns this vector's inverse as a new vector
func (v Vec) Inv() Vec {
return NewVec(-v.E[0], -v.E[1], -v.E[2])
}
// Len returns the vector's length
func (v Vec) Len() float64 {
return math.Sqrt(v.LenSq())
}
// LenSq returns the square of the vector's length
func (v Vec) LenSq() float64 {
return v.E[0]*v.E[0] + v.E[1]*v.E[1] + v.E[2]*v.E[2]
}
// Unit converts this vector to a unit vector
func (v Vec) Unit() (u Unit) {
k := 1.0 / v.Len()
u.E[0] = v.E[0] * k
u.E[1] = v.E[1] * k
u.E[2] = v.E[2] * k
return
}
// IStream streams in space-separated vector values from a Reader
func (v Vec) IStream(r io.Reader) error {
_, err := fmt.Fscan(r, v.E[0], v.E[1], v.E[2])
return err
}
// OStream writes space-separated vector values to a Writer
func (v Vec) OStream(w io.Writer) error {
_, err := fmt.Fprint(w, v.E[0], v.E[1], v.E[2])
return err
}
// Plus returns the sum of two vectors
func (v Vec) Plus(v2 Vec) Vec {
return NewVec(v.E[0]+v2.E[0], v.E[1]+v2.E[1], v.E[2]+v2.E[2])
}
// Minus returns the difference of two vectors
func (v Vec) Minus(v2 Vec) Vec {
return NewVec(v.E[0]-v2.E[0], v.E[1]-v2.E[1], v.E[2]-v2.E[2])
}
// Times returns the multiplication of two vectors
func (v Vec) Times(v2 Vec) Vec {
return NewVec(v.E[0]*v2.E[0], v.E[1]*v2.E[1], v.E[2]*v2.E[2])
}
// Div returns the division of two vectors
func (v Vec) Div(v2 Vec) Vec {
return NewVec(v.E[0]/v2.E[0], v.E[1]/v2.E[1], v.E[2]/v2.E[2])
}
// Scaled returns a vector scaled by a scalar
func (v Vec) Scaled(n float64) Vec {
return NewVec(v.E[0]*n, v.E[1]*n, v.E[2]*n)
}
// Dot returns the dot product of two vectors
func (v Vec) Dot(v2 Vec) float64 {
return v.E[0]*v2.E[0] + v.E[1]*v2.E[1] + v.E[2]*v2.E[2]
}
// Cross returns the cross product of two vectors
func (v Vec) Cross(v2 Vec) Vec {
return NewVec(
v.E[1]*v2.E[2]-v.E[2]*v2.E[1],
v.E[2]*v2.E[0]-v.E[0]*v2.E[2],
v.E[0]*v2.E[1]-v.E[1]*v2.E[0],
)
} | src/renderer/pkg/geom/vec.go | 0.688678 | 0.635618 | vec.go | starcoder |
package gdnative
import (
"fmt"
"reflect"
"github.com/godot-go/godot-go/pkg/log"
)
// VariantToGoType will check the given variant type and convert it to its
// actual type. The value is returned as a reflect.Value.
func VariantToGoType(variant Variant) reflect.Value {
v := variant.GetType()
switch v {
case GODOT_VARIANT_TYPE_NIL:
return reflect.ValueOf(nil)
case GODOT_VARIANT_TYPE_BOOL:
return reflect.ValueOf(variant.AsBool())
case GODOT_VARIANT_TYPE_INT:
return reflect.ValueOf(variant.AsInt())
case GODOT_VARIANT_TYPE_REAL:
return reflect.ValueOf(variant.AsReal())
case GODOT_VARIANT_TYPE_STRING:
return reflect.ValueOf(variant.AsString())
case GODOT_VARIANT_TYPE_VECTOR2:
return reflect.ValueOf(variant.AsVector2())
case GODOT_VARIANT_TYPE_RECT2:
return reflect.ValueOf(variant.AsRect2())
case GODOT_VARIANT_TYPE_VECTOR3:
return reflect.ValueOf(variant.AsVector3())
case GODOT_VARIANT_TYPE_TRANSFORM2D:
return reflect.ValueOf(variant.AsTransform2D())
case GODOT_VARIANT_TYPE_PLANE:
return reflect.ValueOf(variant.AsPlane())
case GODOT_VARIANT_TYPE_QUAT:
return reflect.ValueOf(variant.AsQuat())
case GODOT_VARIANT_TYPE_AABB:
return reflect.ValueOf(variant.AsAABB())
case GODOT_VARIANT_TYPE_BASIS:
return reflect.ValueOf(variant.AsBasis())
case GODOT_VARIANT_TYPE_TRANSFORM:
return reflect.ValueOf(variant.AsTransform())
case GODOT_VARIANT_TYPE_COLOR:
return reflect.ValueOf(variant.AsColor())
case GODOT_VARIANT_TYPE_NODE_PATH:
return reflect.ValueOf(variant.AsNodePath())
case GODOT_VARIANT_TYPE_RID:
return reflect.ValueOf(variant.AsRID())
case GODOT_VARIANT_TYPE_OBJECT:
return reflect.ValueOf(variant.AsObject())
case GODOT_VARIANT_TYPE_DICTIONARY:
return reflect.ValueOf(variant.AsDictionary())
case GODOT_VARIANT_TYPE_ARRAY:
return reflect.ValueOf(variant.AsArray())
case GODOT_VARIANT_TYPE_POOL_BYTE_ARRAY:
return reflect.ValueOf(variant.AsPoolByteArray())
case GODOT_VARIANT_TYPE_POOL_INT_ARRAY:
return reflect.ValueOf(variant.AsPoolIntArray())
case GODOT_VARIANT_TYPE_POOL_REAL_ARRAY:
return reflect.ValueOf(variant.AsPoolRealArray())
case GODOT_VARIANT_TYPE_POOL_STRING_ARRAY:
return reflect.ValueOf(variant.AsPoolStringArray())
case GODOT_VARIANT_TYPE_POOL_VECTOR2_ARRAY:
return reflect.ValueOf(variant.AsPoolVector2Array())
case GODOT_VARIANT_TYPE_POOL_VECTOR3_ARRAY:
return reflect.ValueOf(variant.AsPoolVector3Array())
case GODOT_VARIANT_TYPE_POOL_COLOR_ARRAY:
return reflect.ValueOf(variant.AsPoolColorArray())
}
log.WithField("type", fmt.Sprintf("%d", variant.GetType())).Panic("variant to native built-in type version unhandled")
return reflect.ValueOf(nil)
}
// GoTypeToVariant will check the given Go type and convert it to its
// Variant type. The value is returned as a Variant.
func GoTypeToVariant(value reflect.Value) Variant {
if !value.IsValid() {
return NewVariantNil()
}
valueInterface := value.Interface()
switch v := valueInterface.(type) {
case bool:
return NewVariantBool(v)
case int:
return NewVariantInt(int64(v))
case int16:
return NewVariantInt(int64(v))
case int32:
return NewVariantInt(int64(v))
case int64:
return NewVariantInt(v)
case float32:
return NewVariantReal(float64(v))
case float64:
return NewVariantReal(v)
case string:
log.
WithField("value", fmt.Sprintf("%v", value)).
Panic("unable to handle native go string. please wrap string in a Godot String with gdnative.NewStringFromGoString")
case String:
return NewVariantString(v)
case Vector2:
return NewVariantVector2(v)
case Rect2:
return NewVariantRect2(v)
case Vector3:
return NewVariantVector3(v)
case Transform2D:
return NewVariantTransform2D(v)
case Plane:
return NewVariantPlane(v)
case Quat:
return NewVariantQuat(v)
case AABB:
return NewVariantAABB(v)
case Basis:
return NewVariantBasis(v)
case Transform:
return NewVariantTransform(v)
case Color:
return NewVariantColor(v)
case NodePath:
return NewVariantNodePath(v)
case RID:
return NewVariantRID(v)
case *GodotObject:
return NewVariantObject(v)
case Dictionary:
return NewVariantDictionary(v)
case Array:
return NewVariantArray(v)
case PoolByteArray:
return NewVariantPoolByteArray(v)
case PoolIntArray:
return NewVariantPoolIntArray(v)
case PoolRealArray:
return NewVariantPoolRealArray(v)
case PoolStringArray:
return NewVariantPoolStringArray(v)
case PoolVector2Array:
return NewVariantPoolVector2Array(v)
case PoolVector3Array:
return NewVariantPoolVector3Array(v)
case PoolColorArray:
return NewVariantPoolColorArray(v)
case Variant:
return v
}
log.WithField("value", fmt.Sprintf("%v", value)).Panic("value not handled")
return NewVariantNil()
} | pkg/gdnative/convertvariant.go | 0.598312 | 0.4184 | convertvariant.go | starcoder |
package befunge
import "math"
// https://en.wikipedia.org/wiki/Befunge
// 0-9 Push this number on the stack.
const (
OpAdd = '+' // Addition: Pop a and b, then push a+b.
OpSub = '-' // Subtraction: Pop a and b, then push b-a.
OpMult = '*' // Multiplication: Pop a and b, then push a*b.
OpDiv = '/' // Integer division: Pop a and b, then push b/a, rounded down. If a is zero, push zero.
OpMod = '%' // Modulo: Pop a and b, then push the b%a. If a is zero, push zero.
OpNot = '!' // Logical NOT: Pop a value. If the value is zero, push 1; otherwise, push zero.
OpGreaterThan = '`' // Greater than: Pop a and b, then push 1 if b>a, otherwise push zero.
OpDup = ':' // Duplicate value on top of the stack. If there is nothing on top of the stack, push a 0.
OpSwap = '\\' // Swap two values on top of the stack. If there is only one value, pretend there is an extra 0 on bottom of the stack.
OpStringMode = '"' // Start string mode: push each character's ASCII value all the way up to the next ".
OpPop = '$' // Pop value from the stack and discard it.
OpOutInt = '.' // Pop value and output as an integer.
OpOutRune = ',' // Pop value and output the ASCII character represented by the integer code that is stored in the value.
OpInInt = '&' // Ask user for a number and push it
OpInRune = '~' // Ask user for a character and push its ASCII value
OpGetCode = 'g' // A "get" call (a way to retrieve data in storage). Pop y and x, then push ASCII value of the character at that position in the program
OpPutCode = 'p' // A "put" call (a way to store a value for later use). Pop y, x and v, then change the character at the position (x,y) in the program to the character with ASCII value v.
OpModRight = '>' // Start moving right.
OpMovLeft = '<' // Start moving left.
OpMovUp = '^' // Start moving up.
OpMovDown = 'v' // Start moving down.
OpMovRandom = '?' // Start moving in a random cardinal direction.
OpIfHoriz = '_' // Pop a value; move right if value = 0, left otherwise.
OpIfVert = '|' // Pop a value; move down if value = 0, up otherwise.
OpBridge = '#' // Trampoline: Skip next cell.
OpBlank = ' ' // (i.e. a space) No-op. Does nothing.
OpEnd = '@' // End program.
OpOther = math.MaxUint8
OpNone = 0
) | const.go | 0.633183 | 0.473292 | const.go | starcoder |
package header
/**
* This interface represents the RAck header, as defined by
* <a href = "http://www.ietf.org/rfc/rfc3262.txt">RFC3262</a>, this
* header is not part of RFC3261.
* <p>
* The PRACK messages contain an RAck header field, which indicates the
* sequence number of the provisional response that is being acknowledged (each
* provisional response is given a sequence number, carried in the RSeq header
* field in the Provisional response). The acknowledgements are not cumulative,
* and the specifications recommend a single outstanding provisional response at
* a time, for purposes of congestion control.
* <p>
* The RAck header contains two numbers and a method tag. The first number is
* the sequence number from the RSeqHeader in the provisional response that is
* being acknowledged. The next number is the sequence number that is copied
* from the CSeqHeader along with the method tag, from the response that is being
* acknowledged.
* <p>
* For Example:<br>
* <code>RAck: 776656 1 INVITE</code>
* <p>
* A server must ignore Headers that it does not understand. A proxy must not
* remove or modify Headers that it does not understand.
*/
type RAckHeader interface {
Header
/**
* Sets the method of RAckHeader, which correlates to the method of the
* CSeqHeader of the provisional response being acknowledged.
*
* @param method - the new string value of the method of the RAckHeader
* @throws ParseException which signals that an error has been reached
* unexpectedly while parsing the method value.
*/
SetMethod(method string) (ParseException error)
/**
* Gets the method of RAckHeader.
*
* @return method of RAckHeader.
*/
GetMethod() string
/**
* Sets the sequence number value of the CSeqHeader of the provisional
* response being acknowledged. The sequence number MUST be expressible as
* a 32-bit unsigned integer and MUST be less than 2**31.
*
* @param cSeqNumber - the new cSeq number of this RAckHeader.
* @throws InvalidArgumentException if supplied value is less than zero.
*/
SetCSeqNumber(cSeqNumber int) (InvalidArgumentException error)
/**
* Gets the CSeq sequence number of this RAckHeader.
*
* @return the integer value of the cSeq number of the RAckHeader.
*/
GetCSeqNumber() int
/**
* Sets the sequence number value of the RSeqHeader of the provisional
* response being acknowledged. The sequence number MUST be expressible as
* a 32-bit unsigned integer and MUST be less than 2**31.
*
* @param rSeqNumber - the new rSeq number of this RAckHeader.
* @throws InvalidArgumentException if supplied value is less than zero.
*/
SetRSeqNumber(rSeqNumber int) (InvalidArgumentException error)
/**
* Gets the RSeq sequence number of this RAckHeader.
*
* @return the integer value of the RSeq number of the RAckHeader.
*/
GetRSeqNumber() int
} | sip/header/RAckHeader.go | 0.905908 | 0.424114 | RAckHeader.go | starcoder |
package engine
// Exchange interaction (Heisenberg + Dzyaloshinskii-Moriya)
// See also opencl/exchange.cl and opencl/dmi.cl
import (
"github.com/mumax/3cl/data"
"github.com/mumax/3cl/opencl"
"github.com/mumax/3cl/util"
"unsafe"
)
var (
Aex = NewScalarParam("Aex", "J/m", "Exchange stiffness", &lex2)
Dind = NewScalarParam("Dind", "J/m2", "Interfacial Dzyaloshinskii-Moriya strength", &din2)
Dbulk = NewScalarParam("Dbulk", "J/m2", "Bulk Dzyaloshinskii-Moriya strength", &dbulk2)
lex2 exchParam // inter-cell Aex
din2 exchParam // inter-cell Dind
dbulk2 exchParam // inter-cell Dbulk
B_exch = NewVectorField("B_exch", "T", "Exchange field", AddExchangeField)
E_exch = NewScalarValue("E_exch", "J", "Total exchange energy", GetExchangeEnergy)
Edens_exch = NewScalarField("Edens_exch", "J/m3", "Total exchange energy density", AddExchangeEnergyDensity)
// Average exchange coupling with neighbors. Useful to debug inter-region exchange
ExchCoupling = NewScalarField("ExchCoupling", "arb.", "Average exchange coupling with neighbors", exchangeDecode)
DindCoupling = NewScalarField("DindCoupling", "arb.", "Average DMI coupling with neighbors", dindDecode)
)
var AddExchangeEnergyDensity = makeEdensAdder(&B_exch, -0.5) // TODO: normal func
func init() {
registerEnergy(GetExchangeEnergy, AddExchangeEnergyDensity)
DeclFunc("ext_ScaleExchange", ScaleInterExchange, "Re-scales exchange coupling between two regions.")
DeclFunc("ext_InterExchange", InterExchange, "Sets exchange coupling between two regions.")
DeclFunc("ext_ScaleDind", ScaleInterDind, "Re-scales Dind coupling between two regions.")
DeclFunc("ext_InterDind", InterDind, "Sets Dind coupling between two regions.")
lex2.init(Aex)
din2.init(Dind)
dbulk2.init(Dbulk)
}
// Adds the current exchange field to dst
func AddExchangeField(dst *data.Slice) {
inter := !Dind.isZero()
bulk := !Dbulk.isZero()
ms := Msat.MSlice()
defer ms.Recycle()
switch {
case !inter && !bulk:
opencl.AddExchange(dst, M.Buffer(), lex2.Gpu(), ms, regions.Gpu(), M.Mesh())
case inter && !bulk:
Refer("mulkers2017")
opencl.AddDMI(dst, M.Buffer(), lex2.Gpu(), din2.Gpu(), ms, regions.Gpu(), M.Mesh()) // dmi+exchange
case bulk && !inter:
opencl.AddDMIBulk(dst, M.Buffer(), lex2.Gpu(), dbulk2.Gpu(), ms, regions.Gpu(), M.Mesh()) // dmi+exchange
// TODO: add ScaleInterDbulk and InterDbulk
case inter && bulk:
util.Fatal("Cannot have induced and interfacial DMI at the same time")
}
}
// Set dst to the average exchange coupling per cell (average of lex2 with all neighbors).
func exchangeDecode(dst *data.Slice) {
opencl.ExchangeDecode(dst, lex2.Gpu(), regions.Gpu(), M.Mesh())
}
// Set dst to the average dmi coupling per cell (average of din2 with all neighbors).
func dindDecode(dst *data.Slice) {
opencl.ExchangeDecode(dst, din2.Gpu(), regions.Gpu(), M.Mesh())
}
// Returns the current exchange energy in Joules.
func GetExchangeEnergy() float64 {
return -0.5 * cellVolume() * dot(&M_full, &B_exch)
}
// Scales the heisenberg exchange interaction between region1 and 2.
// Scale = 1 means the harmonic mean over the regions of Aex.
func ScaleInterExchange(region1, region2 int, scale float64) {
lex2.setScale(region1, region2, scale)
}
// Sets the exchange interaction between region 1 and 2.
func InterExchange(region1, region2 int, value float64) {
lex2.setInter(region1, region2, value)
}
// Scales the DMI interaction between region 1 and 2.
func ScaleInterDind(region1, region2 int, scale float64) {
din2.setScale(region1, region2, scale)
}
// Sets the DMI interaction between region 1 and 2.
func InterDind(region1, region2 int, value float64) {
din2.setInter(region1, region2, value)
}
// stores interregion exchange stiffness and DMI
// the interregion exchange/DMI by default is the harmonic mean (scale=1, inter=0)
type exchParam struct {
parent *RegionwiseScalar
lut [NREGION * (NREGION + 1) / 2]float32 // harmonic mean of regions (i,j)
scale [NREGION * (NREGION + 1) / 2]float32 // extra scale factor for lut[SymmIdx(i, j)]
inter [NREGION * (NREGION + 1) / 2]float32 // extra term for lut[SymmIdx(i, j)]
gpu opencl.SymmLUT // gpu copy of lut, lazily transferred when needed
gpu_ok, cpu_ok bool // gpu cache up-to date with lut source
}
// to be called after Aex or scaling changed
func (p *exchParam) invalidate() {
p.cpu_ok = false
p.gpu_ok = false
}
func (p *exchParam) init(parent *RegionwiseScalar) {
for i := range p.scale {
p.scale[i] = 1 // default scaling
p.inter[i] = 0 // default additional interexchange term
}
p.parent = parent
}
// Get a GPU mirror of the look-up table.
// Copies to GPU first only if needed.
func (p *exchParam) Gpu() opencl.SymmLUT {
p.update()
if !p.gpu_ok {
p.upload()
}
return p.gpu
}
// sets the interregion exchange/DMI using a specified value (scale = 0)
func (p *exchParam) setInter(region1, region2 int, value float64) {
p.scale[symmidx(region1, region2)] = float32(0.)
p.inter[symmidx(region1, region2)] = float32(value)
p.invalidate()
}
// sets the interregion exchange/DMI by rescaling the harmonic mean (inter = 0)
func (p *exchParam) setScale(region1, region2 int, scale float64) {
p.scale[symmidx(region1, region2)] = float32(scale)
p.inter[symmidx(region1, region2)] = float32(0.)
p.invalidate()
}
func (p *exchParam) update() {
if !p.cpu_ok {
ex := p.parent.cpuLUT()
for i := 0; i < NREGION; i++ {
exi := ex[0][i]
for j := i; j < NREGION; j++ {
exj := ex[0][j]
I := symmidx(i, j)
p.lut[I] = p.scale[I]*2/(1/exi+1/exj) + p.inter[I]
}
}
p.gpu_ok = false
p.cpu_ok = true
}
}
func (p *exchParam) upload() {
// alloc if needed
if p.gpu == nil {
p.gpu = opencl.SymmLUT(opencl.MemAlloc(len(p.lut) * opencl.SIZEOF_FLOAT32))
}
lut := p.lut // Copy, to work around Go 1.6 cgo pointer limitations.
opencl.MemCpyHtoD(unsafe.Pointer(p.gpu), unsafe.Pointer(&lut[0]), opencl.SIZEOF_FLOAT32*len(p.lut))
p.gpu_ok = true
}
// Index in symmetric matrix where only one half is stored.
// (!) Code duplicated in exchange.cu
func symmidx(i, j int) int {
if j <= i {
return i*(i+1)/2 + j
} else {
return j*(j+1)/2 + i
}
} | engine/exchange.go | 0.602763 | 0.423398 | exchange.go | starcoder |
package projection
import (
"fmt"
"github.com/andrepxx/sydney/coordinates"
"math"
)
/*
* Mathematical constants.
*/
const (
MATH_HALF_PI = 0.5 * math.Pi
MATH_TWO_PI = 2.0 * math.Pi
MATH_QUARTER_PI = 0.25 * math.Pi
)
/*
* Interface type representing a projection from geographic locations to points
* in a plane (surface of a map) and the other way round.
*/
type Projection interface {
Forward(dst []coordinates.Cartesian, src []coordinates.Geographic) error
ForwardSingle(dst *coordinates.Cartesian, src *coordinates.Geographic) error
Inverse(dst []coordinates.Geographic, src []coordinates.Cartesian) error
InverseSingle(dst *coordinates.Geographic, src *coordinates.Cartesian) error
}
/*
* Data structure representing the Mercator projection.
*/
type mercatorProjectionStruct struct {
}
/*
* Project geographic coordinates in longitude and latitude to points on a map
* using the Mercator projection.
*/
func (this *mercatorProjectionStruct) Forward(dst []coordinates.Cartesian, src []coordinates.Geographic) error {
numSrc := len(src)
numDst := len(dst)
/*
* Check if source and destination have same length.
*/
if numSrc != numDst {
return fmt.Errorf("%s", "Source and destination must have same length")
} else {
/*
* Project all data points.
*/
for i := range src {
srcPtr := &src[i]
dstPtr := &dst[i]
this.ForwardSingle(dstPtr, srcPtr)
}
return nil
}
}
/*
* Project geographic coordinates in longitude and latitude to a point on a map
* using the Mercator projection.
*
* If src == nil or dst == nil, this is a no-op.
*/
func (this *mercatorProjectionStruct) ForwardSingle(dst *coordinates.Cartesian, src *coordinates.Geographic) error {
/*
* Make sure source and destination are valid.
*/
if src == nil || dst == nil {
return fmt.Errorf("%s", "Src and dst must be non-nil")
} else {
longitude := src.Longitude()
latitude := src.Latitude()
x := longitude / MATH_TWO_PI
latA := 0.5 * latitude
latB := MATH_QUARTER_PI + latA
latC := math.Tan(latB)
latD := math.Log(latC)
y := latD / MATH_TWO_PI
*dst = coordinates.CreateCartesian(x, y)
return nil
}
}
/*
* Project points on a map to geographic coordinates in longitude and latitude
* using the Mercator projection.
*/
func (this *mercatorProjectionStruct) Inverse(dst []coordinates.Geographic, src []coordinates.Cartesian) error {
numSrc := len(src)
numDst := len(dst)
/*
* Check if source and destination have same length.
*/
if numSrc != numDst {
return fmt.Errorf("%s", "Source and destination must have same length")
} else {
/*
* Project all data points.
*/
for i := range src {
srcPtr := &src[i]
dstPtr := &dst[i]
this.InverseSingle(dstPtr, srcPtr)
}
return nil
}
}
/*
* Project a point on a map to geographic coordinates in longitude and latitude
* using the Mercator projection.
*
* If src == nil or dst == nil, this is a no-op.
*/
func (this *mercatorProjectionStruct) InverseSingle(dst *coordinates.Geographic, src *coordinates.Cartesian) error {
/*
* Make sure source and destination are valid.
*/
if src == nil || dst == nil {
return fmt.Errorf("%s", "Src and dst must be non-nil")
} else {
x := src.X()
y := src.Y()
longitude := MATH_TWO_PI * x
yA := MATH_TWO_PI * y
yB := math.Exp(yA)
yC := math.Atan(yB)
yD := 2.0 * yC
latitude := yD - MATH_HALF_PI
*dst = coordinates.CreateGeographic(longitude, latitude)
return nil
}
}
/*
* Create a Mercator projection.
*/
func Mercator() Projection {
proj := mercatorProjectionStruct{}
return &proj
} | projection/projection.go | 0.694613 | 0.543893 | projection.go | starcoder |
// Package costs gets billing information from an ElasticSearch.
package costs
import (
"fmt"
"strings"
"time"
"github.com/olivere/elastic"
)
// aggregationBuilder is an alias for the function type that is used in the
// aggregationBuilder functions.
type aggregationBuilder func([]string) []paramAggrAndName
// paramNameToFuncPtr maps parameter names to functions building the aggregations.
// map of string keys and functions pointer as values. For each possible param
// after parsing (removing the ':<TAG_KEY>' in the case of the tag), there is a function associated to it
// that create the Aggregations needed by this param.
// If a new param, that is only creating aggregations, needs to be added,
// a functions with an aggregationBuilder prototype need to be added to the list below.
var paramNameToFuncPtr = map[string]aggregationBuilder{
"product": createAggregationPerProduct,
"availabilityzone": createAggregationPerAvailabilityZone,
"region": createAggregationPerRegion,
"account": createAggregationPerAccount,
"tag": createAggregationPerTag,
"cost": createCostSumAggregation,
"day": createAggregationPerDay,
"week": createAggregationPerWeek,
"month": createAggregationPerMonth,
"year": createAggregationPerYear,
}
// paramAggrAndName is a structure containing the name of the parameter and
// corresponding aggregation. A parameter is a string that is passed to the
// GetElasticSearchParams and represents an aggregation. A list of those
// parameters can be found in the paramNameToFuncPtr map.
type paramAggrAndName struct {
name string
aggr elastic.Aggregation
}
// aggregationMaxSize is the maximum size of an Elastic Search Aggregation
const aggregationMaxSize = 0x7FFFFFFF
// createQueryAccountFilter creates and return a new *elastic.TermsQuery on the accountList array
func createQueryAccountFilter(accountList []string) *elastic.TermsQuery {
accountListFormatted := make([]interface{}, len(accountList))
for i, v := range accountList {
accountListFormatted[i] = v
}
return elastic.NewTermsQuery("usageAccountId", accountListFormatted...)
}
// createQueryTimeRange creates and return a new *elastic.RangeQuery based on the duration
// defined by durationBegin and durationEnd
func createQueryTimeRange(durationBegin time.Time, durationEnd time.Time) *elastic.RangeQuery {
return elastic.NewRangeQuery("usageStartDate").
From(durationBegin).To(durationEnd)
}
// createAggregationPerProduct creates and returns a new []paramAggrAndName of size 1 which creates a
// bucket aggregation on the field 'product_name'
func createAggregationPerProduct(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-product",
aggr: elastic.NewTermsAggregation().
Field("productCode").Size(aggregationMaxSize),
},
}
}
// createAggregationPerAvailabilityZone creates and returns a new []paramAggrAndName of size 1 which creates a
// bucket aggregation on the field 'availability_zone'
func createAggregationPerAvailabilityZone(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-availabilityzone",
aggr: elastic.NewTermsAggregation().
Field("availabilityZone").Size(aggregationMaxSize),
},
}
}
// createAggregationPerRegion creates and returns a new []paramAggrAndName of size 1 which creates a
// bucket aggregation on the field 'region'
func createAggregationPerRegion(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-region",
aggr: elastic.NewTermsAggregation().
Field("region").Size(aggregationMaxSize),
},
}
}
// createAggregationPerAccount creates and returns a new []paramAggrAndName of size 1 which creates a
// bucket aggregation on the field 'linked_account_id'
func createAggregationPerAccount(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-account",
aggr: elastic.NewTermsAggregation().
Field("usageAccountId").Size(aggregationMaxSize),
},
}
}
// createAggregationPerDay creates and returns a new []paramAggrAndName of size 1 which creates a
// date histogram aggregation on the field 'usage_start_date' with a time range of a day
func createAggregationPerDay(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-day",
aggr: elastic.NewDateHistogramAggregation().
Field("usageStartDate").MinDocCount(0).Interval("day"),
},
}
}
// createAggregationPerWeek creates and returns a new []paramAggrAndName of size 1 which creates a
// date histogram aggregation on the field 'usage_start_date' with a time range of a week
func createAggregationPerWeek(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-week",
aggr: elastic.NewDateHistogramAggregation().
Field("usageStartDate").MinDocCount(0).Interval("week"),
},
}
}
// createAggregationPerMonth creates and returns a new []paramAggrAndName of size 1 which creates a
// date histogram aggregation on the field 'usage_start_date' with a time range of a month
func createAggregationPerMonth(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-month",
aggr: elastic.NewDateHistogramAggregation().
Field("usageStartDate").MinDocCount(0).Interval("month"),
},
}
}
// createAggregationPerYear creates and returns a new []paramAggrAndName of size 1 which creates a
// date histogram aggregation on the field 'usage_start_date' with a time range of a year
func createAggregationPerYear(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-year",
aggr: elastic.NewDateHistogramAggregation().
Field("usageStartDate").MinDocCount(0).Interval("year"),
},
}
}
// createAggregationPerTag creates and returns a new []paramAggrAndName of size 2 which consists
// of two aggregations that are required for the tag param.
// The first aggregation is a FilterAggregation on the field 'tag.key', and with a value of
// the tag key passed in the parameter 'paramSplit' in the form "user:<TAG_KEY_VALUE>".
// The second aggregation is a TermsAggregation that creates bucket aggregation on the field
// 'tag.value'.
// No SubAggregation is created in this function, as it needs to be created in the nestAggregation function
func createAggregationPerTag(paramSplit []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "by-tag_key",
aggr: elastic.NewFilterAggregation().
Filter(elastic.NewTermQuery("tag.key", fmt.Sprintf("user:%v", paramSplit[1])))},
{
name: "tag_value",
aggr: elastic.NewTermsAggregation().
Field("tag.value").Size(aggregationMaxSize)},
}
}
// createCostSumAggregation : Creates and return a new []paramAggrAndName of size 1, which creates a
// SumAggregation on the field 'cost'
func createCostSumAggregation(_ []string) []paramAggrAndName {
return []paramAggrAndName{
{
name: "value",
aggr: elastic.NewSumAggregation().Field("unblendedCost"),
},
}
}
// reverseAggregationArray : reverse the paramAggrAndName slice that is passed to it
func reverseAggregationArray(aggregationArray []paramAggrAndName) []paramAggrAndName {
for i := len(aggregationArray)/2 - 1; i >= 0; i-- {
opp := len(aggregationArray) - 1 - i
aggregationArray[i], aggregationArray[opp] = aggregationArray[opp], aggregationArray[i]
}
return aggregationArray
}
// nestAggregation takes a slice of paramAggrAndName type, and will nest the different aggregations.
// Aggregations are nested by creating a chain of SubAggregation
// A type switch is required to simulate downcasting from the interface elastic.Aggregation.
// Current types on the type switch are TermsAggregation, FilterAggregation, SumAggregation and
// DateHistogramAggregation.
// If a new function creating a type that is not listed here is added to the paramNameToFuncPtr map
// it should be added to the type switch, or the function will create bugged SubAggregations
func nestAggregation(allAggrSlice []paramAggrAndName) elastic.Aggregation {
allAggrSlice = reverseAggregationArray(allAggrSlice)
aggrToNest := allAggrSlice[0]
for _, baseAggr := range allAggrSlice[1:] {
switch assertedBaseAggr := baseAggr.aggr.(type) {
case *elastic.TermsAggregation:
aggrBuff := assertedBaseAggr.SubAggregation(aggrToNest.name, aggrToNest.aggr)
aggrToNest = paramAggrAndName{name: baseAggr.name, aggr: aggrBuff}
case *elastic.FilterAggregation:
aggrBuff := assertedBaseAggr.SubAggregation(aggrToNest.name, aggrToNest.aggr)
aggrToNest = paramAggrAndName{name: baseAggr.name, aggr: aggrBuff}
case *elastic.DateHistogramAggregation:
aggrBuff := assertedBaseAggr.SubAggregation(aggrToNest.name, aggrToNest.aggr)
aggrToNest = paramAggrAndName{name: baseAggr.name, aggr: aggrBuff}
}
}
return aggrToNest.aggr
}
// GetElasticSearchParams is used to construct an ElasticSearch *elastic.SearchService used to perform a request on ES
// It takes as parameters :
// - accountList []string : A slice of strings representing aws account number, in the format of the field
// 'awsdetailedlineitem.linked_account_id'
// - durationBeing time.Time : A time.Time struct representing the beginning of the time range in the query
// - durationEnd time.Time : A time.Time struct representing the end of the time range in the query
// - param []string : A slice of strings representing the different parameters, in the nesting order,
// that will create aggregations.
// Those can be :
// - "product" : It will create a TermsAggregation on the field 'product_name'
// - "availabilityzone" : It will create a TermsAggregation on the field 'availability_zone'
// - "region" : It will create a TermsAggregation on the field 'region'
// - "account" : It will create a TermsAggregation on the field 'linked_account_id'
// - "tag:<TAG_KEY>" : It will create a FilterAggregation on the field 'tag.key',
// filtering on the value 'user:<TAG_KEY>'.
// It will then create a TermsAggregation on the field 'tag.value'
// - "[day|week|month|year]": It will create a DateHistogramAggregation on the specified duration on
// the field 'usage_start_date'
// - client *elastic.Client : an instance of *elastic.Client that represent an Elastic Search client.
// It needs to be fully configured and ready to execute a client.Search()
// - index string : The Elastic Search index on which to execute the query. In this context the default value
// should be "awsdetailedlineitems"
// This function excepts arguments passed to it to be sanitize. If they are not, the following cases will make
// it crash :
// - For the 'tag:<TAG_KEY>' param, if the separator is not present, or if there is no key that is passed to it,
// the program will crash
// - If a param in the slice is not present in the detailedLineItemsFieldsName, the program will crash.
// - If the client is nil or malconfigured, it will crash
// - If the index is not an index present in the ES, it will crash
// We are excluding AWSDataTransfer products because it's value is always zero.
// Data transfer costs are included in other products' costs.
func GetElasticSearchParams(accountList []string, durationBegin time.Time,
durationEnd time.Time, params []string, client *elastic.Client, index string) *elastic.SearchService {
query := elastic.NewBoolQuery()
if len(accountList) > 0 {
query = query.Filter(createQueryAccountFilter(accountList))
}
query = query.Filter(createQueryTimeRange(durationBegin, durationEnd),
elastic.NewBoolQuery().MustNot(elastic.NewTermQuery("productCode", "AWSDataTransfer")))
search := client.Search().Index(index).Size(0).Query(query)
params = append(params, "cost")
var allAggregationSlice []paramAggrAndName
for _, paramName := range params {
paramNameSplit := strings.Split(paramName, ":")
paramAggr := paramNameToFuncPtr[paramNameSplit[0]](paramNameSplit)
allAggregationSlice = append(allAggregationSlice, paramAggr...)
}
aggregationParamName := allAggregationSlice[0].name
nestedAggregation := nestAggregation(allAggregationSlice)
search.Aggregation(aggregationParamName, nestedAggregation)
return search
} | costs/es_request_constructor.go | 0.671578 | 0.406391 | es_request_constructor.go | starcoder |
package client
// A StatefulSetSpec is the specification of a StatefulSet.
type V1beta1StatefulSetSpec struct {
// podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.
PodManagementPolicy string `json:"podManagementPolicy,omitempty"`
// replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.
Replicas int32 `json:"replicas,omitempty"`
// revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.
RevisionHistoryLimit int32 `json:"revisionHistoryLimit,omitempty"`
// selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Selector *V1LabelSelector `json:"selector,omitempty"`
// serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.
ServiceName string `json:"serviceName"`
// template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.
Template *V1PodTemplateSpec `json:"template"`
// updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.
UpdateStrategy *V1beta1StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"`
// volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.
VolumeClaimTemplates []V1PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"`
} | vendor/github.com/kubernetes-client/go/kubernetes/client/v1beta1_stateful_set_spec.go | 0.791418 | 0.599573 | v1beta1_stateful_set_spec.go | starcoder |
package graphic
import (
"github.com/adamlenda/engine/core"
"github.com/adamlenda/engine/geometry"
"github.com/adamlenda/engine/gls"
"github.com/adamlenda/engine/material"
"github.com/adamlenda/engine/math32"
)
// Sprite is a potentially animated image positioned in space that always faces the camera.
type Sprite struct {
Graphic // Embedded graphic
uniMVPM gls.Uniform // Model view projection matrix uniform location cache
}
// NewSprite creates and returns a pointer to a sprite with the specified dimensions and material
func NewSprite(width, height float32, imat material.IMaterial) *Sprite {
s := new(Sprite)
// Creates geometry
geom := geometry.NewGeometry()
w := width / 2
h := height / 2
// Builds array with vertex positions and texture coordinates
positions := math32.NewArrayF32(0, 12)
positions.Append(
-w, -h, 0, 0, 0,
w, -h, 0, 1, 0,
w, h, 0, 1, 1,
-w, h, 0, 0, 1,
)
// Builds array of indices
indices := math32.NewArrayU32(0, 6)
indices.Append(0, 1, 2, 0, 2, 3)
// Set geometry buffers
geom.SetIndices(indices)
geom.AddVBO(
gls.NewVBO(positions).
AddAttrib(gls.VertexPosition).
AddAttrib(gls.VertexTexcoord),
)
s.Graphic.Init(s, geom, gls.TRIANGLES)
s.AddMaterial(s, imat, 0, 0)
s.uniMVPM.Init("MVP")
return s
}
// RenderSetup sets up the rendering of the sprite.
func (s *Sprite) RenderSetup(gs *gls.GLS, rinfo *core.RenderInfo) {
// Calculates model view matrix
mw := s.MatrixWorld()
var mvm math32.Matrix4
mvm.MultiplyMatrices(&rinfo.ViewMatrix, &mw)
// Decomposes model view matrix
var position math32.Vector3
var quaternion math32.Quaternion
var scale math32.Vector3
mvm.Decompose(&position, &quaternion, &scale)
// Removes any rotation in X and Y axes and compose new model view matrix
rotation := s.Rotation()
rotation.X = 0
rotation.Y = 0
quaternion.SetFromEuler(&rotation)
var mvmNew math32.Matrix4
mvmNew.Compose(&position, &quaternion, &scale)
// Calculates final MVP and updates uniform
var mvpm math32.Matrix4
mvpm.MultiplyMatrices(&rinfo.ProjMatrix, &mvmNew)
location := s.uniMVPM.Location(gs)
gs.UniformMatrix4fv(location, 1, false, &mvpm[0])
} | graphic/sprite.go | 0.764716 | 0.4165 | sprite.go | starcoder |
package enum
import (
"database/sql/driver"
"errors"
"github.com/mwbrown/nagbot/ndb/nbsql"
)
// SchedType is the 'sched_type' enum type from schema 'Public'.
type SchedType uint16
const (
// UnknownSchedType defines an invalid SchedType.
UnknownSchedType SchedType = 0
OneshotSchedType SchedType = 1
IntervalSchedType SchedType = 2
WeeklySchedType SchedType = 3
MonthDaySchedType SchedType = 4
MonthWeekdaySchedType SchedType = 5
AnnualSchedType SchedType = 6
)
// DBString returns the DB string value of the SchedType.
func (e SchedType) DBString() string {
switch e {
case OneshotSchedType:
return "oneshot"
case IntervalSchedType:
return "interval"
case WeeklySchedType:
return "weekly"
case MonthDaySchedType:
return "month_day"
case MonthWeekdaySchedType:
return "month_weekday"
case AnnualSchedType:
return "annual"
default:
return "UnknownSchedType"
}
}
// String returns the friendly string value of the SchedType.
func (e SchedType) String() string {
switch e {
case OneshotSchedType:
return "Oneshot"
case IntervalSchedType:
return "Interval"
case WeeklySchedType:
return "Weekly"
case MonthDaySchedType:
return "MonthDay"
case MonthWeekdaySchedType:
return "MonthWeekday"
case AnnualSchedType:
return "Annual"
default:
return "UnknownSchedType"
}
}
// MarshalText marshals SchedType into text.
func (e SchedType) MarshalText() ([]byte, error) {
return []byte(e.DBString()), nil
}
// UnmarshalText unmarshals SchedType from text.
func (e *SchedType) UnmarshalText(text []byte) error {
val, err := ParseSchedType(string(text))
if err != nil {
return err
}
*e = val
return nil
}
// ParseSchedType converts s into a SchedType if it is a valid
// stringified value of SchedType.
func ParseSchedType(s string) (SchedType, error) {
switch s {
case "oneshot":
return OneshotSchedType, nil
case "interval":
return IntervalSchedType, nil
case "weekly":
return WeeklySchedType, nil
case "month_day":
return MonthDaySchedType, nil
case "month_weekday":
return MonthWeekdaySchedType, nil
case "annual":
return AnnualSchedType, nil
default:
return UnknownSchedType, errors.New("invalid SchedType")
}
}
// Value satisfies the sql/driver.Valuer interface for SchedType.
func (e SchedType) Value() (driver.Value, error) {
return e.DBString(), nil
}
// Scan satisfies the database/sql.Scanner interface for SchedType.
func (e *SchedType) Scan(src interface{}) error {
buf, ok := src.([]byte)
if !ok {
return errors.New("invalid SchedType")
}
return e.UnmarshalText(buf)
}
// SchedTypeField is a component that returns a nbsql.WhereClause that contains a
// comparison based on its field and a strongly typed value.
type SchedTypeField string
// Equals returns a nbsql.WhereClause for this field.
func (f SchedTypeField) Equals(v SchedType) nbsql.WhereClause {
return nbsql.Where{
Field: string(f),
Comp: nbsql.CompEqual,
Value: v,
}
}
// GreaterThan returns a nbsql.WhereClause for this field.
func (f SchedTypeField) GreaterThan(v SchedType) nbsql.WhereClause {
return nbsql.Where{
Field: string(f),
Comp: nbsql.CompGreater,
Value: v,
}
}
// LessThan returns a nbsql.WhereClause for this field.
func (f SchedTypeField) LessThan(v SchedType) nbsql.WhereClause {
return nbsql.Where{
Field: string(f),
Comp: nbsql.CompEqual,
Value: v,
}
}
// GreaterOrEqual returns a nbsql.WhereClause for this field.
func (f SchedTypeField) GreaterOrEqual(v SchedType) nbsql.WhereClause {
return nbsql.Where{
Field: string(f),
Comp: nbsql.CompGTE,
Value: v,
}
}
// LessOrEqual returns a nbsql.WhereClause for this field.
func (f SchedTypeField) LessOrEqual(v SchedType) nbsql.WhereClause {
return nbsql.Where{
Field: string(f),
Comp: nbsql.CompLTE,
Value: v,
}
}
// NotEqual returns a nbsql.WhereClause for this field.
func (f SchedTypeField) NotEqual(v SchedType) nbsql.WhereClause {
return nbsql.Where{
Field: string(f),
Comp: nbsql.CompNE,
Value: v,
}
}
// In returns a nbsql.WhereClause for this field.
func (f SchedTypeField) In(vals []SchedType) nbsql.WhereClause {
values := make([]interface{}, len(vals))
for x := range vals {
values[x] = vals[x]
}
return nbsql.InClause{
Field: string(f),
Vals: values,
}
} | ndb/nbsql/enum/schedtype.go | 0.770206 | 0.42185 | schedtype.go | starcoder |
package iso20022
// Instruction, initiated by the creditor, to debit a debtor's account in favour of the creditor. A direct debit can be pre-authorised or not. In most countries, authorisation is in the form of a mandate between the debtor and creditor.
type DirectDebitMandate4 struct {
// Unambiguous identification of the account of the debtor to which a debit entry will be made as a result of the transaction.
DebtorAccount *AccountIdentificationAndName3 `xml:"DbtrAcct"`
// Party that owes the cash to the creditor/final party. The debtor is also the debit account owner.
Debtor *PartyIdentification2Choice `xml:"Dbtr,omitempty"`
// Number assigned by a tax authority to an entity.
DebtorTaxIdentificationNumber *Max35Text `xml:"DbtrTaxIdNb,omitempty"`
// Number assigned by a national registration authority to an entity.
DebtorNationalRegistrationNumber *Max35Text `xml:"DbtrNtlRegnNb,omitempty"`
// Party that receives an amount of money from the debtor. In the context of the payment model, the creditor is also the credit account owner.
Creditor *PartyIdentification2Choice `xml:"Cdtr,omitempty"`
// Financial institution that receives the direct debit instruction from the creditor or other authorised party.
DebtorAgent *FinancialInstitutionIdentification3Choice `xml:"DbtrAgt"`
// Information identifying a specific branch of a financial institution.
//
// Usage : this component should be used in case the identification information in the financial institution component does not provide identification up to branch level.
DebtorAgentBranch *BranchData `xml:"DbtrAgtBrnch,omitempty"`
// Financial institution that receives the payment transaction on behalf of the creditor, or other nominated party, and credits the account.
CreditorAgent *FinancialInstitutionIdentification3Choice `xml:"CdtrAgt,omitempty"`
// Information identifying a specific branch of a financial institution.
//
// Usage : this component should be used in case the identification information in the financial institution component does not provide identification up to branch level.
CreditorAgentBranch *BranchData `xml:"CdtrAgtBrnch,omitempty"`
// Reference assigned to a creditor by its financial institution, or relevant authority, authorising the creditor to take part in a direct debit scheme.
RegistrationIdentification *Max35Text `xml:"RegnId,omitempty"`
// Reference of the direct debit mandate that has been agreed upon by the debtor and creditor.
MandateIdentification *Max35Text `xml:"MndtId,omitempty"`
}
func (d *DirectDebitMandate4) AddDebtorAccount() *AccountIdentificationAndName3 {
d.DebtorAccount = new(AccountIdentificationAndName3)
return d.DebtorAccount
}
func (d *DirectDebitMandate4) AddDebtor() *PartyIdentification2Choice {
d.Debtor = new(PartyIdentification2Choice)
return d.Debtor
}
func (d *DirectDebitMandate4) SetDebtorTaxIdentificationNumber(value string) {
d.DebtorTaxIdentificationNumber = (*Max35Text)(&value)
}
func (d *DirectDebitMandate4) SetDebtorNationalRegistrationNumber(value string) {
d.DebtorNationalRegistrationNumber = (*Max35Text)(&value)
}
func (d *DirectDebitMandate4) AddCreditor() *PartyIdentification2Choice {
d.Creditor = new(PartyIdentification2Choice)
return d.Creditor
}
func (d *DirectDebitMandate4) AddDebtorAgent() *FinancialInstitutionIdentification3Choice {
d.DebtorAgent = new(FinancialInstitutionIdentification3Choice)
return d.DebtorAgent
}
func (d *DirectDebitMandate4) AddDebtorAgentBranch() *BranchData {
d.DebtorAgentBranch = new(BranchData)
return d.DebtorAgentBranch
}
func (d *DirectDebitMandate4) AddCreditorAgent() *FinancialInstitutionIdentification3Choice {
d.CreditorAgent = new(FinancialInstitutionIdentification3Choice)
return d.CreditorAgent
}
func (d *DirectDebitMandate4) AddCreditorAgentBranch() *BranchData {
d.CreditorAgentBranch = new(BranchData)
return d.CreditorAgentBranch
}
func (d *DirectDebitMandate4) SetRegistrationIdentification(value string) {
d.RegistrationIdentification = (*Max35Text)(&value)
}
func (d *DirectDebitMandate4) SetMandateIdentification(value string) {
d.MandateIdentification = (*Max35Text)(&value)
} | DirectDebitMandate4.go | 0.651133 | 0.557123 | DirectDebitMandate4.go | starcoder |
QuadTrees
*/
//-----------------------------------------------------------------------------
package sdf
//-----------------------------------------------------------------------------
const (
ROOT = iota - 1 // root node
TL // top left
TR // top right
BL // bottom left
BR // bottom right
)
const BASE_INC = (1 << 63)
//-----------------------------------------------------------------------------
const (
VERTEX0 = iota // distance to vertex 0 on line
VERTEX1 // distance to vertex 1 on line
LINE // distance to line body
)
type QTInfo struct {
dtype int // distance type
index int // line index
inside bool // true if the point is inside the polygon
}
func (a QTInfo) Equals(b QTInfo) bool {
return (a.dtype == b.dtype) && (a.index == b.index) && (a.inside == b.inside)
}
func (t *QTree) GetInfo(n *QTNode, posn int) QTInfo {
return QTInfo{}
}
//-----------------------------------------------------------------------------
type QTNode struct {
child [4]*QTNode // pointers to the node children
leaf bool // true if this node is a leaf (no children)
corner V2 // top left corner of node box
size V2 // size of node box in x,y directions
xn, yn uint64 // top left corner x,y integer names
inc uint64 // integer increment
}
type QTree struct {
root *QTNode // root node
corner V2 // top left corner of bounding box
size V2 // size of bounding box in x,y directions
}
//-----------------------------------------------------------------------------
func (t *QTree) NewQTNode(parent *QTNode, posn int) *QTNode {
n := QTNode{}
// work out the node position values
if posn == ROOT {
// we are the root node and have no parent
// get the parameters from the tree
n.corner = t.corner
n.size = t.size
n.xn = 0
n.yn = 0
n.inc = BASE_INC
} else {
// the node size and increment is half the parent size
n.size = parent.size.MulScalar(0.5)
n.inc = parent.inc / 2
switch posn {
case TL:
n.corner = parent.corner
n.xn = parent.xn
n.yn = parent.yn
case TR:
n.corner = parent.corner.Add(V2{n.size.X, 0})
n.xn = parent.xn + n.inc
n.yn = parent.yn
case BL:
n.corner = parent.corner.Add(V2{0, n.size.Y})
n.xn = parent.xn
n.yn = parent.yn + n.inc
case BR:
n.corner = parent.corner.Add(n.size)
n.xn = parent.xn + n.inc
n.yn = parent.yn + n.inc
}
}
// evaluate the corner positions
i0 := t.GetInfo(&n, TL)
i1 := t.GetInfo(&n, TR)
i2 := t.GetInfo(&n, BL)
i3 := t.GetInfo(&n, BR)
// create children if we have to
if i0.Equals(i1) && i2.Equals(i3) && i0.Equals(i2) {
// they are all the same, this is a leaf node
n.leaf = true
} else {
// make the children
n.child[TL] = t.NewQTNode(&n, TL)
n.child[TR] = t.NewQTNode(&n, TR)
n.child[BL] = t.NewQTNode(&n, BL)
n.child[BR] = t.NewQTNode(&n, BR)
}
return &n
}
//-----------------------------------------------------------------------------
func NewQTree(lines []Line2, bb Box2) *QTree {
t := QTree{}
t.root = t.NewQTNode(nil, ROOT)
return &t
}
//----------------------------------------------------------------------------- | .wip/quadtree.go | 0.612078 | 0.436562 | quadtree.go | starcoder |
// The datatypes/collection package provides new structures and
// behaviours to the iteration of non-sorted unique element and homogeneous
// lists accepting primitives types and complex user structs as well.
// This part of package contains the core behaviour
package collection
import (
"reflect"
"github.com/jaimelopez/datatypes/generic"
)
// Element represents a generic element
type Element interface{}
// ElementList is a generic elements collection
// Used as parameter type in order to allow encapsulate any
// kind of iterable object including []Element as well
type ElementList interface{}
// Collection represents a non-sorted unique element and homogeneous lists
type Collection struct {
definition reflect.Type
elements []Element
}
// Add a single element to the collection
// The collection must to be homogeneous so the specified element
// should be the same type such the other elements already stored in the collection.
// If the collection is empty and has no elements, it will take the type of
// that element as type definition for the collection
func (col *Collection) Add(element Element) error {
if col.IsEmpty() {
col.definition = reflect.TypeOf(element)
} else if !col.isHomogeneousWith(element) {
return ErrInvalidElementType
}
if col.Contains(element) {
return ErrDuplicatedElement
}
col.elements = append(col.elements, element)
return nil
}
// AddRange inserts a range (slice) inside the collection
// If the parameter can't be converted to a iterable data type it's return an error
func (col *Collection) AddRange(elements ElementList) error {
slice, err := generic.ToSlice(elements)
if err != nil {
return err
}
for _, element := range slice {
err = col.Add(element)
if err != nil {
return err
}
}
return nil
}
// AddCollection adds the elements contained in the parameter collection inside the instanced collection
// If the parameter can't be converted to a iterable data type it's return an error
func (col *Collection) AddCollection(collection *Collection) error {
return col.AddRange(collection.elements)
}
// First returns the first element without removing it from the collection
func (col *Collection) First() Element {
return col.elements[0]
}
// Last returns the last element without removing it from the collection
func (col *Collection) Last() Element {
return col.elements[len(col.elements)-1]
}
// ElementAt returns the element in the specified position
// Although a collection is an unsorted data structure list and the position
// of the elements could be changed, this method allows to return an specific index position.
// Be aware that the order of elements could be changed constantly such it's described before
func (col *Collection) ElementAt(position int) Element {
return col.elements[position]
}
// Elements returns the stored collection elements as slice of this elements
// This is the proper way to iterate over all the elements of the collection
// treating them as a normal range
func (col *Collection) Elements() []Element {
return col.elements
}
// Extract the first element and return it
// Keep in mind that this method will modify the collection elements subtracting that element
func (col *Collection) Extract() Element {
element := col.First()
col.elements = col.elements[1:]
return element
}
// Set a new value for a specified index element
func (col *Collection) Set(position int, element Element) error {
if !col.isHomogeneousWith(element) {
return ErrInvalidElementType
}
col.elements[position] = element
return nil
}
// Delete removes an specified already stored element
// If it's not found the method will return an error
func (col *Collection) Delete(element Element) error {
if !col.isHomogeneousWith(element) {
return ErrInvalidElementType
}
for index, current := range col.elements {
if reflect.DeepEqual(current, element) {
col.elements = append(col.elements[:index], col.elements[index+1:]...)
return nil
}
}
return ErrElementNotFound
}
// DeleteRange removes all the found elements contained in the specified range (slice)
// If the parameter can't be converted to a iterable data type it's return an error
func (col *Collection) DeleteRange(elements ElementList) error {
slice, err := generic.ToSlice(elements)
if err != nil {
return err
}
for _, element := range slice {
if err = col.Delete(element); err != nil {
return err
}
}
return nil
}
// DeleteCollection removes all the found elements contained in the specified
// collection from the instaced collection.
// If the parameter can't be converted to a iterable data type it's return an error
func (col *Collection) DeleteCollection(collection *Collection) error {
return col.DeleteRange(collection.elements)
}
// Contains checks if the specified element is already existing in the collection
func (col *Collection) Contains(element Element) bool {
for _, iterator := range col.elements {
if reflect.DeepEqual(iterator, element) {
return true
}
}
return false
}
// ContainsAny checks if any of the parameter elements there are already contained in the collection
func (col *Collection) ContainsAny(elements ElementList) bool {
slice, err := generic.ToSlice(elements)
if err != nil {
return false
}
for _, element := range slice {
if col.Contains(element) {
return true
}
}
return false
}
// Filter returns a element colecction filtering the elements with a function
// If the functions return true the element will be filtered
func (col *Collection) Filter(f func(Element) bool) []Element {
var results []Element
for _, elem := range col.Elements() {
if !f(elem) {
continue
}
results = append(results, elem)
}
return results
}
// Size returns the number of elements inside the collection
func (col *Collection) Size() int {
return len(col.elements)
}
// IsEmpty checks if the collection is empty or not
func (col *Collection) IsEmpty() bool {
return col.Size() == 0
}
func (col *Collection) isHomogeneousWith(element Element) bool {
return col.definition == reflect.TypeOf(element)
}
// NewEmptyCollection instances a new empty collection
func NewEmptyCollection() *Collection {
return new(Collection)
}
// NewCollection allows to instance a new Collection with a group of elements
// It accepts an enumerable
func NewCollection(elements ElementList) *Collection {
collection := new(Collection)
err := collection.AddRange(elements)
if err != nil {
collection.Add(elements)
}
return collection
} | collection/collection.go | 0.819965 | 0.650883 | collection.go | starcoder |
package main
import (
"bufio"
"encoding/binary"
"flag"
"fmt"
"github.com/RedisAI/redisai-go/redisai/implementations"
"github.com/cheggaaa/pb/v3"
"image"
"image/jpeg"
"io"
"io/ioutil"
"log"
"math"
"os"
"strconv"
)
// Program option vars:
var (
inputDir string
outputFileName string
batchSize int
limit int
defaultWriteSize = 4 << 20 // 4 MB
)
// Vars only for git sha and diff handling
var GitSHA1 string = ""
var GitDirty string = "0"
func AibenchGitSHA1() string {
return GitSHA1
}
func AibenchGitDirty() (dirty bool) {
dirty = false
dirtyLines, err := strconv.Atoi(GitDirty)
if err == nil {
dirty = (dirtyLines != 0)
}
return
}
// img.At(x, y).RGBA() returns four uint32 values; we want a Pixel
func rgbaToPixel(r uint32, g uint32, b uint32, a uint32) (R, G, B, A uint8) {
return uint8(r / 257), uint8(g / 257), uint8(b / 257), uint8(a / 257)
}
// img.At(x, y).RGBA() returns four uint32 values; we want a Pixel
func rgbaToPixelFloat32(r uint32, g uint32, b uint32, a uint32, scale float32) (R, G, B, A float32) {
return float32(r/257) * scale, float32(g/257) * scale, float32(b/257) * scale, float32(a/257) * scale
}
func getRGBAPos_CxHxW(y int, width int, x int, height int) (int, int, int, int) {
r_pos := y*width + x
g_pos := (height * width) + r_pos
b_pos := (2 * height * width) + r_pos
a_pos := (3 * height * width) + r_pos
return r_pos, g_pos, b_pos, a_pos
}
// converts the image to a tensor with a H x W x C layout.
func JPEGImageTo_HxWxC_float32_AiTensor(img image.Image, useAlpha bool, scale float32) ([]float32, *implementations.AITensor) {
width, height, numChannels, pixels := JP(img, useAlpha, scale)
// Build a tensor
tensor := implementations.NewAiTensor()
tensor.SetShape([]int64{int64(height), int64(width), numChannels})
tensor.SetData(pixels)
return pixels, tensor
}
func JP(img image.Image, useAlpha bool, scale float32) (int, int, int64, []float32) {
bounds := img.Bounds()
width, height := bounds.Max.X, bounds.Max.Y
var numChannels int64 = 4
if !useAlpha {
numChannels = 3
}
var pixels = make([]float32, 0, height*width*int(numChannels))
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
uir, uig, uib, uia := img.At(x, y).RGBA()
r, g, b, a := rgbaToPixelFloat32(uir, uig, uib, uia, scale)
if useAlpha {
pixels = append(pixels, r, g, b, a)
} else {
pixels = append(pixels, r, g, b)
}
}
}
return width, height, numChannels, pixels
}
// converts the image to a tensor with a H x W x C layout.
func JPEGImageTo_HxWxC_uint8_AiTensor(img image.Image, useAlpha bool) ([]uint8, *implementations.AITensor) {
bounds := img.Bounds()
width, height := bounds.Max.X, bounds.Max.Y
var numChannels int64 = 4
if !useAlpha {
numChannels = 3
}
var pixels = make([]uint8, 0, height*width*int(numChannels))
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
r, g, b, a := rgbaToPixel(img.At(x, y).RGBA())
if useAlpha {
pixels = append(pixels, r, g, b, a)
} else {
pixels = append(pixels, r, g, b)
}
}
}
// Build a tensor
tensor := implementations.NewAiTensor()
tensor.SetShape([]int64{int64(height), int64(width), numChannels})
tensor.SetData(pixels)
return pixels, tensor
}
// converts the image to a tensor with a C x H x W layout.
func JPEGImageTo_CxHxW_uint8_AiTensor(img image.Image, useAlpha bool) ([]uint8, *implementations.AITensor) {
bounds := img.Bounds()
width, height := bounds.Max.X, bounds.Max.Y
var numChannels int64 = 4
if !useAlpha {
numChannels = 3
}
var pixels = make([]uint8, height*width*int(numChannels), height*width*int(numChannels))
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
r, g, b, a := rgbaToPixel(img.At(x, y).RGBA())
r_pos, g_pos, b_pos, a_pos := getRGBAPos_CxHxW(y, width, x, height)
pixels[r_pos] = r
pixels[g_pos] = g
pixels[b_pos] = b
if useAlpha {
pixels[a_pos] = a
}
}
}
// Build a tensor
tensor := implementations.NewAiTensor()
tensor.SetShape([]int64{numChannels, int64(height), int64(width)})
tensor.SetData(pixels)
return pixels, tensor
}
// converts the image to a tensor with a C x H x W layout.
func JPEGImageTo_CxHxW_float32_AiTensor(img image.Image, useAlpha bool, scale float32) ([]float32, *implementations.AITensor) {
bounds := img.Bounds()
width, height := bounds.Max.X, bounds.Max.Y
var numChannels int64 = 4
if !useAlpha {
numChannels = 3
}
var pixels = make([]float32, height*width*int(numChannels), height*width*int(numChannels))
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
uir, uig, uib, uia := img.At(x, y).RGBA()
r, g, b, a := rgbaToPixelFloat32(uir, uig, uib, uia, scale)
r_pos, g_pos, b_pos, a_pos := getRGBAPos_CxHxW(y, width, x, height)
pixels[r_pos] = r
pixels[g_pos] = g
pixels[b_pos] = b
if useAlpha {
pixels[a_pos] = a
}
}
}
// Build a tensor
tensor := implementations.NewAiTensor()
tensor.SetShape([]int64{numChannels, int64(height), int64(width)})
tensor.SetData(pixels)
return pixels, tensor
}
// GetBufferedWriter returns the buffered Writer that should be used for generated output
func GetBufferedWriter(fileName string) *bufio.Writer {
// Prepare output file/STDOUT
if len(fileName) > 0 {
// Write output to file
file, err := os.Create(fileName)
if err != nil {
log.Fatalf("cannot open file for write %s: %v", fileName, err)
}
return bufio.NewWriterSize(file, defaultWriteSize)
}
// Write output to STDOUT
return bufio.NewWriterSize(os.Stdout, defaultWriteSize)
}
// Serialize writes Transaction data to the given writer, in a format that will be easy to create a RedisAI command
func SerializeTensorData(pixels []byte, w io.Writer) (err error) {
var buf []byte
buf = append(buf, pixels...)
_, err = w.Write(buf)
return err
}
func Float32bytes(float float32) []byte {
bits := math.Float32bits(float)
bytes := make([]byte, 4)
binary.LittleEndian.PutUint32(bytes, bits)
return bytes
}
func SerializeTensorDataFloat32(pixels []float32, w io.Writer) (err error) {
var buf []byte
for _, value := range pixels {
buf = append(buf, Float32bytes(value)...)
}
_, err = w.Write(buf)
return err
}
func main() {
flag.StringVar(&inputDir, "input-val-dir", ".", fmt.Sprintf(""))
flag.StringVar(&outputFileName, "output-file", "", "File name to write generated data to")
flag.IntVar(&batchSize, "batch-size", 1, "Input tensor batch size")
flag.IntVar(&limit, "limit", -1, "limit the number of generated tensors. If < 0 no limit is applied")
version := flag.Bool("v", false, "Output version and exit")
flag.Parse()
if *version {
git_sha := AibenchGitSHA1()
git_dirty_str := ""
if AibenchGitDirty() {
git_dirty_str = "-dirty"
}
fmt.Fprintf(os.Stdout, "aibench_generate_data_vision (git_sha1:%s%s)\n", git_sha, git_dirty_str)
os.Exit(0)
}
// Get output writer
out := GetBufferedWriter(outputFileName)
defer func() {
err := out.Flush()
if err != nil {
log.Fatal(err.Error())
}
}()
items, _ := ioutil.ReadDir(inputDir)
log.Println(fmt.Sprintf("Reading images from: %s\n.Input tensor batch size %d.", inputDir, batchSize))
total_images_to_read := len(items)
if limit > 0 && total_images_to_read > limit {
total_images_to_read = limit
}
bar := pb.StartNew(total_images_to_read)
batchedPixels := make([]float32, 0, 0)
totalRows := 0
totalImages := 0
for _, item := range items {
if totalImages >= limit && limit > 0 {
log.Println(fmt.Sprintf("Reached limit of tensor generation %d.", limit))
break
}
if !item.IsDir() {
// Read image from file that already exists
imageFile, err := os.Open(fmt.Sprintf("%s/%s", inputDir, item.Name()))
if err != nil {
log.Fatal(err)
}
img, err := jpeg.Decode(imageFile)
pixels, _ := JPEGImageTo_HxWxC_float32_AiTensor(img, false, 1.0/255.0)
batchedPixels = append(batchedPixels, pixels...)
err = SerializeTensorDataFloat32(pixels, out)
if err != nil {
log.Fatal(err)
}
totalRows++
bar.Increment()
imageFile.Close()
out.Flush()
totalImages++
}
}
out.Flush()
bar.Finish()
fmt.Println(fmt.Sprintf("Read %d images. Generated a total of %d lines with %d image each. Total Bytes: %d", totalImages, totalRows, batchSize, out.Size()))
} | cmd/aibench_generate_data_vision/aibench_generate_data_vision.go | 0.623835 | 0.432243 | aibench_generate_data_vision.go | starcoder |
package checks
import (
model "github.com/DataDog/agent-payload/v5/process"
)
// payloadList is an abstract list of payloads subject to chunking
type payloadList interface {
// Len returns the length of the list
Len() int
// WeightAt returns weight for the payload at position `idx` in the list
WeightAt(idx int) int
// ToChunk copies a slice from the list to an abstract connected chunker providing the cumulative weight of the chunk
ToChunk(start, end int, weight int)
}
// chunkAllocator abstracts management operations for chunk allocation
type chunkAllocator interface {
// TakenSize returns the size allocated to the current chunk
TakenSize() int
// TakenWeight returns the weight allocated to the current chunk
TakenWeight() int
// Append creates a new chunk at the end (cases when it is known any previously allocated chunks cannot fit the payload)
Append()
// Next moves to the next chunk or allocates a new chunk if the current chunk is the last
Next()
}
// chunkPayloadsBySizeAndWeight allocates chunks of payloads taking max allowed size and max allowed weight
// algorithm in the nutshell:
// - iterate through payloads in the `payloadList`
// - keep track of size and weight available for allocation (`TakenSize` and `TakenWeight`)
// - create a new chunk once we exceed these limits
// - consider case when the current item exceeds the max allowed weight and create a new chunk at the end (`Append`)
// this implementation allows for multiple pases through the chunks, which can be useful in cases with different payload types
// being allocated within chunks
func chunkPayloadsBySizeAndWeight(l payloadList, a chunkAllocator, maxChunkSize int, maxChunkWeight int) {
start := 0
chunkWeight := 0
// Take available size and available weight by consulting the current chunk
availableSize := maxChunkSize - a.TakenSize()
availableWeight := maxChunkWeight - a.TakenWeight()
for i := 0; i < l.Len(); i++ {
itemWeight := l.WeightAt(i)
// Evaluate size of the currently accumulated items (from the start of the candidate chunk)
size := i - start
// Track if we need to skeep the item on the next chunk (large item chunked separately)
skipItem := false
if size >= availableSize || chunkWeight+itemWeight > availableWeight {
// We are exceeding available size or available weight and it is time to create a new chunk
if size > 0 {
// We already accumulated some items - create a new chunk
l.ToChunk(start, i, chunkWeight)
a.Next()
}
// Reset chunk weight
chunkWeight = 0
// Reset chunk start position
start = i
// Check if the current item exceeds the max allowed chunk weight
if itemWeight >= maxChunkWeight {
// Current item is exceeding max allowed chunk weight and should be chunked separately
if availableWeight < maxChunkWeight {
// Currently considered chunk already has allocations - create a new chunk at the end
a.Append()
}
// Chunk a single iem
l.ToChunk(i, i+1, itemWeight)
a.Next()
// Skip over this single item
start = i + 1
skipItem = true
} else {
// Find a chunk that can take the current items
for maxChunkSize-a.TakenSize() < 1 || maxChunkWeight-a.TakenWeight() < itemWeight {
a.Next()
}
}
// Reset available size and available weight based ont he current chunk
availableSize = maxChunkSize - a.TakenSize()
availableWeight = maxChunkWeight - a.TakenWeight()
}
if !skipItem {
// Only include the current item if it hasn't been to a separate chunk
chunkWeight += itemWeight
}
}
// Chunk the remainder of payloads
if start < l.Len() {
l.ToChunk(start, l.Len(), chunkWeight)
}
}
// chunkProcessesBySizeAndWeight chunks `model.Process` payloads by max allowed size and max allowed weight of a chunk
func chunkProcessesBySizeAndWeight(procs []*model.Process, ctr *model.Container, maxChunkSize, maxChunkWeight int, chunker *collectorProcChunker) {
// Use the last available chunk as it may have some space for payloads
chunker.idx = 0
if len(chunker.collectorProcs) > 1 {
chunker.idx = len(chunker.collectorProcs) - 1
}
// Processes that have a related container will add this container to every chunk they are split across
// This may result in the same container being sent in multiple payloads from the agent
// Note that this is necessary because container process tags (sent within `model.Container`) are only resolved from
// containers seen within the same `model.ContainerProc` as processes.
chunker.container = ctr
list := &processList{
procs: procs,
chunker: chunker,
}
chunkPayloadsBySizeAndWeight(list, chunker, maxChunkSize, maxChunkWeight)
}
// processList is a payload list of `model.Process` payloads
type processList struct {
procs []*model.Process
chunker processChunker
}
// processChunker abstracts chunking of `model.Process` payloads
type processChunker interface {
// Accept takes a slice of `model.Process` and allocates them to the current chunk
Accept(procs []*model.Process, weight int)
}
func (l *processList) Len() int {
return len(l.procs)
}
func (l *processList) WeightAt(idx int) int {
if idx >= len(l.procs) {
return 0
}
return weighProcess(l.procs[idx])
}
func (l *processList) ToChunk(start, end int, weight int) {
l.chunker.Accept(l.procs[start:end], weight)
}
// chunkProps is used to track weight and size of chunks
type chunkProps struct {
weight int
size int
}
// chunkPropsTracker tracks weight and size of chunked payloads
type chunkPropsTracker struct {
props []chunkProps
idx int
}
// TakenSize returns the size allocated to the current chunk
func (c *chunkPropsTracker) TakenSize() int {
if c.idx < len(c.props) {
return c.props[c.idx].size
}
return 0
}
// TakenWeight returns the weight allocated to the current chunk
func (c *chunkPropsTracker) TakenWeight() int {
if c.idx < len(c.props) {
return c.props[c.idx].weight
}
return 0
}
// Append creates a new chunk at the end (cases when it is known any previously allocated chunks cannot fit the payload)
func (c *chunkPropsTracker) Append() {
c.idx = len(c.props)
}
// Next moves to the next chunk or allocates a new chunk if the current chunk is the last
func (c *chunkPropsTracker) Next() {
c.idx++
}
// collectorProcChunker implements allocation of chunks to `model.CollectorProc`
type collectorProcChunker struct {
chunkPropsTracker
container *model.Container
collectorProcs []*model.CollectorProc
}
// collectprProcChunker implements both `chunkAllocator` and `processChunker`
var _ processChunker = &collectorProcChunker{}
var _ chunkAllocator = &collectorProcChunker{}
func (c *collectorProcChunker) Accept(procs []*model.Process, weight int) {
if c.idx >= len(c.collectorProcs) {
// If we are outside of the range of allocated chunks, allocate a new one
c.collectorProcs = append(c.collectorProcs, &model.CollectorProc{})
c.props = append(c.props, chunkProps{})
}
collectorProc := c.collectorProcs[c.idx]
// Note that we are currently not accounting for the container size/weight in calculations
if c.container != nil {
collectorProc.Containers = append(collectorProc.Containers, c.container)
}
collectorProc.Processes = append(collectorProc.Processes, procs...)
c.props[c.idx].size += len(procs)
c.props[c.idx].weight += weight
}
var (
// procSizeofSampleProcess is a sample process used in sizeof/weight calculations
procSizeofSampleProcess = &model.Process{
Memory: &model.MemoryStat{},
Cpu: &model.CPUStat{},
IoStat: &model.IOStat{},
Networks: &model.ProcessNetworks{},
}
// procSizeofProto is a size of the empty process
procSizeofProto = procSizeofSampleProcess.Size()
)
// weighProcess weighs `model.Process` payloads using an approximation of a serialized size of the proto message
func weighProcess(proc *model.Process) int {
weight := procSizeofProto
if proc.Command != nil {
weight += len(proc.Command.Exe) + len(proc.Command.Cwd) + len(proc.Command.Root)
for _, arg := range proc.Command.Args {
weight += len(arg)
}
}
if proc.User != nil {
weight += len(proc.User.Name)
}
weight += len(proc.ContainerId)
return weight
} | pkg/process/checks/chunking.go | 0.71423 | 0.461927 | chunking.go | starcoder |
package table
import (
"errors"
"fmt"
"strings"
"unicode"
)
// Options represents the options that can be set when creating a new table
type Options struct {
Data [][]string
}
// New returns a table by the given options
func New(o Options) *Table {
// Init vars
t := Table{
data: o.Data,
}
return &t
}
// Table represent a table
type Table struct {
data [][]string
colSizes map[int]int
}
// Data returns the data of the table
func (t *Table) Data() [][]string {
return t.data
}
// SetData sets the table data by the given row, column and value
func (t *Table) SetData(row, col int, val string) error {
// Check row and column
if row < 1 {
return errors.New("invalid row")
} else if col < 1 {
return errors.New("invalid column")
}
// Increase the row capacity if it's necessary
if row > len(t.data) {
nt := make([][]string, row)
copy(nt, t.data)
t.data = nt
}
// Increase the column capacity if it's necessary
if col > len(t.data[row-1]) {
nr := make([]string, col)
copy(nr, t.data[row-1])
t.data[row-1] = nr
}
// Set the value
t.data[row-1][col-1] = val
// Set the column size for alignment
if t.colSizes == nil {
t.colSizes = make(map[int]int)
}
if len(val) > t.colSizes[col-1] {
t.colSizes[col-1] = len(val)
}
return nil
}
// SetRow sets a row by the given row number and column values
func (t *Table) SetRow(row int, cols ...string) error {
// Iterate columns and set data
for i, v := range cols {
if err := t.SetData(row, i+1, v); err != nil {
return err
}
}
return nil
}
// AddRow adds a row at the end of the table by the given column values
func (t *Table) AddRow(cols ...string) error {
row := len(t.data) + 1
// Iterate columns and set data
for i, v := range cols {
if err := t.SetData(row, i+1, v); err != nil {
return err
}
}
return nil
}
// FormattedData returns the formatted table data
func (t *Table) FormattedData() string {
// Iterate over the rows and prepare result
result := ""
rowVal := ""
colSize := ""
for _, row := range t.data {
rowVal = ""
for i, c := range row {
colSize = fmt.Sprintf("%d", t.colSizes[i])
rowVal += fmt.Sprintf("%-"+colSize+"s\t", c)
}
result += fmt.Sprintf("%s\n", strings.TrimRightFunc(rowVal, unicode.IsSpace))
}
return result
} | table/table.go | 0.68763 | 0.412116 | table.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.