code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package model
// DataZoom is the option set for a zoom component.
// dataZoom component is used for zooming a specific area, which enables user to
// investigate data in detail, or get an overview of the data, or get rid of outlier points.
// https://echarts.apache.org/en/option.html#dataZoom
type DataZoomInside struct {
// Data zoom component of inside type, Options: "inside", "slider"
Type DataZoomType `json:"type" default:"inside"`
// Whether to show the component. If is set to be false, it will not show, but its data filtering function still works.
//Disabled bool `json:"disabled"`
// The start percentage of the window out of the data extent, in the range of 0 ~ 100.
// default 0
Start float32 `json:"start,omitempty"`
// The end percentage of the window out of the data extent, in the range of 0 ~ 100.
// default 100
End float32 `json:"end,omitempty"`
// Specify the frame rate of views refreshing, with unit millisecond (ms).
// If animation set as true and animationDurationUpdate set as bigger than 0,
// you can keep throttle as the default value 100 (or set it as a value bigger than 0),
// otherwise it might be not smooth when dragging.
// If animation set as false or animationDurationUpdate set as 0, and data size is not very large,
// and it seems to be not smooth when dragging, you can set throttle as 0 to improve that.
Throttle float32 `json:"throttle,omitempty"`
// Specify which xAxis is/are controlled by the dataZoom-inside when Cartesian coordinate system is used.
// By default the first xAxis that parallel to dataZoom are controlled when dataZoom-inside.
// Orient is set as 'horizontal'. But it is recommended to specify it explicitly but not use default value.
// If it is set as a single number, one axis is controlled, while if it is set as an Array ,
// multiple axes are controlled.
XAxisIndex interface{} `json:"xAxisIndex,omitempty"`
// Specify which yAxis is/are controlled by the dataZoom-inside when Cartesian coordinate system is used.
// By default the first yAxis that parallel to dataZoom are controlled when dataZoom-inside.
// Orient is set as 'vertical'. But it is recommended to specify it explicitly but not use default value.
// If it is set as a single number, one axis is controlled, while if it is set as an Array ,
// multiple axes are controlled.
YAxisIndex interface{} `json:"yAxisIndex,omitempty"`
}
type DataZoomSlider struct {
// Data zoom component of inside type, Options: "inside", "slider"
Type DataZoomType `json:"type" default:"slider"`
// Whether to show the component. If is set to be false, it will not show, but its data filtering function still works.
//Show bool `json:"show"`
// The start percentage of the window out of the data extent, in the range of 0 ~ 100.
// default 0
Start float32 `json:"start,omitempty"`
// The end percentage of the window out of the data extent, in the range of 0 ~ 100.
// default 100
End float32 `json:"end,omitempty"`
// Specify the frame rate of views refreshing, with unit millisecond (ms).
// If animation set as true and animationDurationUpdate set as bigger than 0,
// you can keep throttle as the default value 100 (or set it as a value bigger than 0),
// otherwise it might be not smooth when dragging.
// If animation set as false or animationDurationUpdate set as 0, and data size is not very large,
// and it seems to be not smooth when dragging, you can set throttle as 0 to improve that.
Throttle float32 `json:"throttle,omitempty"`
// Specify which xAxis is/are controlled by the dataZoom-inside when Cartesian coordinate system is used.
// By default the first xAxis that parallel to dataZoom are controlled when dataZoom-inside.
// Orient is set as 'horizontal'. But it is recommended to specify it explicitly but not use default value.
// If it is set as a single number, one axis is controlled, while if it is set as an Array ,
// multiple axes are controlled.
XAxisIndex interface{} `json:"xAxisIndex,omitempty"`
// Specify which yAxis is/are controlled by the dataZoom-inside when Cartesian coordinate system is used.
// By default the first yAxis that parallel to dataZoom are controlled when dataZoom-inside.
// Orient is set as 'vertical'. But it is recommended to specify it explicitly but not use default value.
// If it is set as a single number, one axis is controlled, while if it is set as an Array ,
// multiple axes are controlled.
YAxisIndex interface{} `json:"yAxisIndex,omitempty"`
}
type DataZoomType string
const (
// Data zoom functionalities is embeded inside coordinate systems, enable user to zoom or roam coordinate system by mouse dragging, mouse move or finger touch (in touch screen).
DataZoomTypeInside DataZoomType = "inside"
// A special slider bar is provided, on which coordinate systems can be zoomed or roamed by mouse dragging or finger touch (in touch screen).
DataZoomTypeSlider DataZoomType = "slider"
) | model/data_zoom.go | 0.892574 | 0.579787 | data_zoom.go | starcoder |
package ocalver
import (
"fmt"
"path/filepath"
"regexp"
"strconv"
"time"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"golang.org/x/mod/semver"
)
const (
tagRegexp string = `^(\d+).(\d+).(\d+)$`
tagPreRegexp string = `^(\d+).(\d+).(\d+)-%s.(\d+)+[a-f0-9]{7,10}$`
)
// Generate a new version based on the provided repo config/params
func Generate(cfg Config) (string, error) {
r, err := git.PlainOpen(filepath.Dir(cfg.RepositoryPath))
if err != nil {
return "", err
}
tags, err := r.Tags()
if err != nil {
return "", err
}
var mostRecentTagRef *plumbing.Reference
re := getRegexp("")
_ = tags.ForEach(func(ref *plumbing.Reference) error {
if re.Match([]byte(ref.Name().Short())) {
if mostRecentTagRef == nil || semver.Compare(fmt.Sprintf("v%s", ref.Name().Short()), fmt.Sprintf("v%s", (mostRecentTagRef.Name())[10:])) == 1 {
mostRecentTagRef = ref
}
}
return nil
})
tagInfo := TagInfo{}
if mostRecentTagRef != nil {
tagInfo, err = extractTagInfo(mostRecentTagRef, "")
if err != nil {
return "", err
}
}
nextTagInfo := nextTagInfo(tagInfo)
if len(cfg.Pre) > 0 {
if err = nextTagInfo.GetPreInfo(r, tagInfo, cfg.Pre); err != nil {
return "", err
}
}
return nextTagInfo.String(), nil
}
func getRegexp(pre string) *regexp.Regexp {
if len(pre) > 0 {
return regexp.MustCompile(fmt.Sprintf(tagPreRegexp, pre))
}
return regexp.MustCompile(tagRegexp)
}
func extractTagInfo(ref *plumbing.Reference, pre string) (t TagInfo, err error) {
t.Hash = ref.Hash()
re := getRegexp(pre)
d := re.FindStringSubmatch(ref.Name().Short())
expectedLength := 4
if len(pre) > 0 {
expectedLength = 6
}
if len(d) != expectedLength {
err = fmt.Errorf("invalid tag format %v", d)
return
}
if len(pre) > 0 {
t.PreInfo = &PreInfo{}
t.PreInfo.Name = pre
if t.PreInfo.Iteration, err = strconv.Atoi(d[4]); err != nil {
return
}
t.PreInfo.CommitHash = d[5]
}
if t.Year, err = strconv.Atoi(d[1]); err != nil {
return
}
if t.YearDay, err = strconv.Atoi(d[2]); err != nil {
return
}
t.Iteration, err = strconv.Atoi(d[3])
return
}
func nextTagInfo(ori TagInfo) (new TagInfo) {
new.Year = time.Now().Year() - 2000
new.YearDay = time.Now().YearDay()
if TagsShareDate(ori, new) {
new.Iteration = ori.Iteration + 1
}
return
}
// TagsShareDate returns whether 2 TagInfo objects have the same date
func TagsShareDate(a, b TagInfo) bool {
return a.Year == b.Year && a.YearDay == b.YearDay
}
// GetPreInfo ..
func (t *TagInfo) GetPreInfo(r *git.Repository, ori TagInfo, pre string) error {
t.PreInfo = &PreInfo{
Name: pre,
Iteration: 0,
}
h, err := r.Head()
if err != nil {
return err
}
headCommit, err := r.CommitObject(h.Hash())
if err != nil {
return err
}
t.PreInfo.CommitHash = headCommit.Hash.String()[:8]
today := time.Date(time.Now().UTC().Year(), time.Now().UTC().Month(), time.Now().UTC().Day(), 0, 0, 0, 0, time.UTC)
logOptions := &git.LogOptions{
Order: git.LogOrderCommitterTime,
Since: &today,
}
cIter, err := r.Log(logOptions)
if err != nil {
return err
}
var commits []plumbing.Hash
_ = cIter.ForEach(func(commit *object.Commit) error {
commits = append(commits, commit.Hash)
return nil
})
if TagsShareDate(*t, ori) && ori.Hash != (plumbing.Hash{}) {
for _, commit := range commits {
if ori.Hash == commit {
break
}
t.PreInfo.Iteration++
}
} else {
t.PreInfo.Iteration = len(commits)
}
return nil
} | pkg/ocalver/ocalver.go | 0.57821 | 0.408867 | ocalver.go | starcoder |
package hlsqlib
import (
"fmt"
"regexp"
"strings"
)
var (
numOps = map[string]func(float64, AttrValue) bool{
">": func(t float64, a AttrValue) bool { f, ok := attrAsNum(a); return ok && f > t },
">=": func(t float64, a AttrValue) bool { f, ok := attrAsNum(a); return ok && f >= t },
"<": func(t float64, a AttrValue) bool { f, ok := attrAsNum(a); return ok && f < t },
"<=": func(t float64, a AttrValue) bool { f, ok := attrAsNum(a); return ok && f <= t },
"=": func(t float64, a AttrValue) bool { f, ok := attrAsNum(a); return ok && f == t },
"!=": func(t float64, a AttrValue) bool { f, ok := attrAsNum(a); return ok && f != t },
}
strOps = map[string]func(string, AttrValue) bool{
"=": func(t string, a AttrValue) bool { s, ok := attrAsString(a); return ok && strings.EqualFold(s, t) },
"!=": func(t string, a AttrValue) bool { s, ok := attrAsString(a); return ok && !strings.EqualFold(s, t) },
"~": func(t string, a AttrValue) bool {
t = strings.ToLower(t)
s, ok := attrAsString(a)
s = strings.ToLower(s)
return ok && strings.Contains(s, t)
},
"!~": func(t string, a AttrValue) bool {
t = strings.ToLower(t)
s, ok := attrAsString(a)
s = strings.ToLower(s)
return ok && !strings.Contains(s, t)
},
"rlike": func(t string, a AttrValue) bool {
regex := regexp.MustCompile(t)
s, ok := attrAsString(a)
return ok && regex.MatchString(s)
},
}
)
func attrAsNum(t AttrValue) (float64, bool) {
switch n := t.(type) {
case AttrInt:
return float64(n), true
case AttrFloat:
return float64(n), true
}
return 0, false
}
func attrAsString(t AttrValue) (string, bool) {
switch s := t.(type) {
case AttrString:
return string(s), true
case AttrEnum:
return string(s), true
case AttrBool:
return s.String(), true
}
return "", false
}
// Query is a function that returns true when matched against an attribute
type Query func(Attr) bool
// ParseQuery takes a string in form of {attr} {op} {value} & turns it into a Query func
func ParseQuery(q string) (Query, error) {
q = strings.TrimSpace(q)
pieces := strings.Split(q, " ")
if len(pieces) != 3 {
return nil, fmt.Errorf("expected '{name} {op} {value}' got %q", q)
}
var (
match func(AttrValue) bool
t = ParseAttr(pieces[0], pieces[2])
op = pieces[1]
)
switch v := t.Value.(type) {
case AttrInt:
if opFunc, ok := numOps[op]; ok {
match = func(a AttrValue) bool { return opFunc(float64(v), a) }
}
case AttrFloat:
if opFunc, ok := numOps[op]; ok {
match = func(a AttrValue) bool { return opFunc(float64(v), a) }
}
case AttrBool:
if opFunc, ok := strOps[op]; ok {
match = func(a AttrValue) bool { return opFunc(v.String(), a) }
}
case AttrEnum:
if opFunc, ok := strOps[op]; ok {
match = func(a AttrValue) bool { return opFunc(string(v), a) }
}
case AttrString:
if opFunc, ok := strOps[op]; ok {
match = func(a AttrValue) bool { return opFunc(string(v), a) }
}
}
if match == nil {
return nil, fmt.Errorf("no operator %q defined on type %T", op, t.Value)
}
return Query(func(a Attr) bool {
return strings.EqualFold(a.Key, t.Key) && match(a.Value)
}), nil
} | hlsqlib/query.go | 0.593374 | 0.426859 | query.go | starcoder |
package i2ptunconf
// GetEncryptLeaseset takes an argument and a default. If the argument differs from the
// default, the argument is always returned. If the argument and default are
// the same and the key exists, the key is returned. If the key is absent, the
// default is returned.
func (c *Conf) GetEncryptLeaseset(arg, def bool, label ...string) bool {
if arg != def {
return arg
}
if c.Config == nil {
return arg
}
if x, o := c.GetBool("i2cp.encryptLeaseSet", label...); o {
return x
}
return arg
}
// SetEncryptLease tells the conf to use encrypted leasesets the from the config file
func (c *Conf) SetEncryptLease(label ...string) {
if v, ok := c.GetBool("i2cp.encryptLeaseSet", label...); ok {
c.EncryptLeaseSet = v
} else {
c.EncryptLeaseSet = false
}
}
// GetLeasesetKey takes an argument and a default. If the argument differs from the
// default, the argument is always returned. If the argument and default are
// the same and the key exists, the key is returned. If the key is absent, the
// default is returned.
func (c *Conf) GetLeasesetKey(arg, def string, label ...string) string {
if arg != def {
return arg
}
if c.Config == nil {
return arg
}
if x, o := c.Get("i2cp.leaseSetKey", label...); o {
return x
}
return arg
}
// SetEncryptLease tells the conf to use encrypted leasesets the from the config file
func (c *Conf) SetLeasesetKey(label ...string) {
if v, ok := c.Get("i2cp.leaseSetKey", label...); ok {
c.LeaseSetKey = v
} else {
c.LeaseSetKey = ""
}
}
// GetLeasesetPrivateKey takes an argument and a default. If the argument differs from the
// default, the argument is always returned. If the argument and default are
// the same and the key exists, the key is returned. If the key is absent, the
// default is returned.
func (c *Conf) GetLeasesetPrivateKey(arg, def string, label ...string) string {
if arg != def {
return arg
}
if c.Config == nil {
return arg
}
if x, o := c.Get("i2cp.leaseSetPrivateKey", label...); o {
return x
}
return arg
}
// SetLeasesetPrivateKey tells the conf to use encrypted leasesets the from the config file
func (c *Conf) SetLeasesetPrivateKey(label ...string) {
if v, ok := c.Get("i2cp.leaseSetPrivateKey", label...); ok {
c.LeaseSetPrivateKey = v
} else {
c.LeaseSetPrivateKey = ""
}
}
// GetLeasesetPrivateSigningKey takes an argument and a default. If the argument differs from the
// default, the argument is always returned. If the argument and default are
// the same and the key exists, the key is returned. If the key is absent, the
// default is returned.
func (c *Conf) GetLeasesetPrivateSigningKey(arg, def string, label ...string) string {
if arg != def {
return arg
}
if c.Config == nil {
return arg
}
if x, o := c.Get("i2cp.leaseSetPrivateSigningKey", label...); o {
return x
}
return arg
}
// SetLeasesetPrivateSigningKey tells the conf to use encrypted leasesets the from the config file
func (c *Conf) SetLeasesetPrivateSigningKey(label ...string) {
if v, ok := c.Get("i2cp.leaseSetPrivateKey", label...); ok {
c.LeaseSetPrivateSigningKey = v
} else {
c.LeaseSetPrivateSigningKey = ""
}
}
// GetLeaseSetEncType takes an argument and a default. If the argument differs from the
// default, the argument is always returned. If the argument and default are
// the same and the key exists, the key is returned. If the key is absent, the
// default is returned.
func (c *Conf) GetLeaseSetEncType(arg, def string, label ...string) string {
if arg != def {
return arg
}
if c.Config == nil {
return arg
}
if x, o := c.Get("i2cp.leaseSetEncType", label...); o {
return x
}
return arg
}
// SetLeaseSetEncType tells the conf to use encrypted leasesets the from the config file
func (c *Conf) SetLeaseSetEncType(label ...string) {
if v, ok := c.Get("i2cp.leaseSetEncType", label...); ok {
c.LeaseSetEncType = v
} else {
c.LeaseSetEncType = ""
}
} | config/leasesets.go | 0.560253 | 0.406744 | leasesets.go | starcoder |
package ptree
import (
"math"
"github.com/tidwall/geoindex/child"
)
const maxEntries = 256 // max number of entries per node
const minEntries = maxEntries * 40 / 100 // min number of entries per node
const maxHeight = 16 // a limit is needed to avoid infinite splits
const rows = 16 // 16 = 256 child nodes, 8 = 64, 4 = 16, 2 = 4
type item struct {
point [2]float64
data interface{}
}
type node struct {
nodes *[rows * rows]*node
count int
items []item
}
// PTree is a tree for storing points.
type PTree struct {
min [2]float64
max [2]float64
root node
}
// New returns a new PTree with the provided maximum bounding rectangle.
func New(min, max [2]float64) *PTree {
return &PTree{min: min, max: max}
}
// InBounds return true if the point can be contained in the tree's maximum
// bounding rectangle.
func (tr *PTree) InBounds(point [2]float64) bool {
return contains(tr.min, tr.max, point)
}
// Insert a point into the tree.
func (tr *PTree) Insert(point [2]float64, data interface{}) {
if !tr.InBounds(point) {
panic("point out of bounds")
}
tr.root.insert(tr.min, tr.max, point, data, 1)
}
func (n *node) split(nmin, nmax [2]float64, depth int) {
n.nodes = new([rows * rows]*node)
n.count = 0
for _, item := range n.items {
n.insert(nmin, nmax, item.point, item.data, depth)
}
n.items = nil
}
func contains(min, max, pt [2]float64) bool {
return !(pt[0] < min[0] || pt[0] > max[0] ||
pt[1] < min[1] || pt[1] > max[1])
}
// bottom-up z-order
func calcNodeIndex(x, y int) int {
return y*rows + x
}
func fmin(a, b float64) float64 {
if a < b {
return a
}
return b
}
func fmax(a, b float64) float64 {
if a > b {
return a
}
return b
}
func (n *node) insert(nmin, nmax, point [2]float64, data interface{}, depth int,
) {
if n.nodes == nil {
if len(n.items) < maxEntries || depth > maxHeight {
n.items = append(n.items, item{point: point, data: data})
n.count++
return
}
n.split(nmin, nmax, depth)
}
// choose the coordinates of the child node to insert into
cx := int((point[0] - nmin[0]) / (nmax[0] - nmin[0]) * rows) // node x index
cy := int((point[1] - nmin[1]) / (nmax[1] - nmin[1]) * rows) // node y index
cidx, cmin, cmax := n.getChildNodeIndex(nmin, nmax, cx, cy)
// insert into the node
if n.nodes[cidx] == nil {
n.nodes[cidx] = new(node)
}
n.nodes[cidx].insert(cmin, cmax, point, data, depth+1)
n.count++
}
// Search for points in the tree that are within the provided rectangle.
func (tr *PTree) Search(min, max [2]float64,
iter func(point [2]float64, data interface{}) bool,
) {
tr.root.search(tr.min, tr.max, min, max, iter)
}
func (n *node) search(
nmin, nmax [2]float64, // node rectangle
smin, smax [2]float64, // search rectangle
iter func(point [2]float64, data interface{}) bool,
) bool {
if n.nodes == nil {
for _, item := range n.items {
if contains(smin, smax, item.point) {
if !iter(item.point, item.data) {
return false
}
}
}
return true
}
// clip the search rectangle
smin[0] = fmax(smin[0], nmin[0])
smin[1] = fmax(smin[1], nmin[1])
smax[0] = fmin(smax[0], nmax[0])
smax[1] = fmin(smax[1], nmax[1])
// choose the coordinates of the child node to search
cx1 := int((smin[0] - nmin[0]) / (nmax[0] - nmin[0]) * rows) // x min index
cy1 := int((smin[1] - nmin[1]) / (nmax[1] - nmin[1]) * rows) // y min index
cx2 := int((smax[0] - nmin[0]) / (nmax[0] - nmin[0]) * rows) // x max index
cy2 := int((smax[1] - nmin[1]) / (nmax[1] - nmin[1]) * rows) // y max index
// clip the max boundaries of the coordinates
if cx2 >= rows {
cx2 = rows - 1
}
if cy2 >= rows {
cy2 = rows - 1
}
// scan over all child nodes within the coordinates range
for cy := cy1; cy <= cy2; cy++ {
for cx := cx1; cx <= cx2; cx++ {
cidx, cmin, cmax := n.getChildNodeIndex(nmin, nmax, cx, cy)
cn := n.nodes[cidx]
if cn != nil {
if !cn.search(cmin, cmax, smin, smax, iter) {
return false
}
}
}
}
return true
}
// Delete a point for the tree
func (tr *PTree) Delete(point [2]float64, data interface{}) {
tr.root.delete(tr.min, tr.max, point, data)
}
func (n *node) delete(nmin, nmax, point [2]float64, data interface{}) bool {
if n.nodes == nil {
for i := 0; i < len(n.items); i++ {
if n.items[i].point == point && n.items[i].data == data {
n.items[i] = n.items[len(n.items)-1]
n.items[len(n.items)-1].data = nil
n.items = n.items[:len(n.items)-1]
n.count--
return true
}
}
return false
}
// choose the coordinates of the child node to delete from
cx := int((point[0] - nmin[0]) / (nmax[0] - nmin[0]) * rows) // node x index
cy := int((point[1] - nmin[1]) / (nmax[1] - nmin[1]) * rows) // node y index
cidx, cmin, cmax := n.getChildNodeIndex(nmin, nmax, cx, cy)
cn := n.nodes[cidx]
if cn != nil {
// delete from the node
if !cn.delete(cmin, cmax, point, data) {
return false
}
if cn.count == 0 {
n.nodes[cidx] = nil
}
}
n.count--
if n.count < minEntries {
// compact the node
var items []item
n.items = n.gather(items)
n.nodes = nil
}
return true
}
func (n *node) gather(items []item) []item {
items = append(items, n.items...)
if n.nodes != nil {
for i := 0; i < rows*rows; i++ {
if n.nodes[i] != nil {
items = n.nodes[i].gather(items)
}
}
}
return items
}
// Len returns the number of points in the tree
func (tr *PTree) Len() int {
return tr.root.count
}
// Scan all items in tree
func (tr *PTree) Scan(iter func(point [2]float64, data interface{}) bool) {
tr.root.scan(iter)
}
func (n *node) scan(iter func(point [2]float64, data interface{}) bool) bool {
if n.nodes == nil {
for i := 0; i < len(n.items); i++ {
if !iter(n.items[i].point, n.items[i].data) {
return false
}
}
} else {
for i := 0; i < len(n.nodes); i++ {
if n.nodes[i].count > 0 {
if !n.nodes[i].scan(iter) {
return false
}
}
}
}
return true
}
func expand(amin, amax, bmin, bmax [2]float64) (min, max [2]float64) {
if bmin[0] < amin[0] {
amin[0] = bmin[0]
}
if bmax[0] > amax[0] {
amax[0] = bmax[0]
}
if bmin[1] < amin[1] {
amin[1] = bmin[1]
}
if bmax[1] > amax[1] {
amax[1] = bmax[1]
}
return amin, amax
}
// MinBounds returns the minumum bounding rectangle of the tree.
func (tr *PTree) MinBounds() (min, max [2]float64) {
if tr.Len() == 0 {
return
}
min[0] = tr.root.minValue(0, math.Inf(+1))
min[1] = tr.root.minValue(1, math.Inf(+1))
max[0] = tr.root.maxValue(0, math.Inf(-1))
max[1] = tr.root.maxValue(1, math.Inf(-1))
return min, max
}
func (n *node) minValue(coord int, value float64) float64 {
if n.nodes == nil {
for _, item := range n.items {
if item.point[coord] < value {
value = item.point[coord]
}
}
} else {
for ci := 0; ci < rows; ci++ {
for cj := 0; cj < rows; cj++ {
cx, cy := ci, cj
if coord == 1 {
cx, cy = cy, cx
}
cn := n.nodes[calcNodeIndex(cx, cy)]
if cn != nil {
value = cn.minValue(coord, value)
}
}
if !math.IsInf(value, 0) {
break
}
}
}
return value
}
func (n *node) maxValue(coord int, value float64) float64 {
if n.nodes == nil {
for _, item := range n.items {
if item.point[coord] > value {
value = item.point[coord]
}
}
} else {
for ci := rows - 1; ci >= 0; ci-- {
for cj := rows - 1; cj >= 0; cj-- {
cx, cy := ci, cj
if coord == 1 {
cx, cy = cy, cx
}
cn := n.nodes[calcNodeIndex(cx, cy)]
if cn != nil {
value = cn.maxValue(coord, value)
}
}
if !math.IsInf(value, 0) {
break
}
}
}
return value
}
type childNode struct {
min, max [2]float64
node *node
}
// Children returns all children for parent node. If parent node is nil
// then the root nodes should be returned.
// The reuse buffer is an empty length slice that can optionally be used
// to avoid extra allocations.
func (tr *PTree) Children(parent interface{}, reuse []child.Child,
) (children []child.Child) {
children = reuse[:0]
var nmin, nmax [2]float64
var n *node
if parent == nil {
children = append(children, child.Child{
Min: tr.min, Max: tr.max,
Data: childNode{tr.min, tr.max, &tr.root},
Item: false,
})
return children
}
cnode := parent.(childNode)
nmin, nmax = cnode.min, cnode.max
n = cnode.node
if n.nodes == nil {
// scan over child items
for _, item := range n.items {
children = append(children, child.Child{
Min: item.point, Max: item.point,
Data: item.data, Item: true,
})
}
} else {
// scan over all child nodes
for cy := 0; cy < rows; cy++ {
for cx := 0; cx < rows; cx++ {
cidx, cmin, cmax := n.getChildNodeIndex(nmin, nmax, cx, cy)
cn := n.nodes[cidx]
if cn == nil || cn.count == 0 {
continue
}
children = append(children, child.Child{
Min: cmin, Max: cmax,
Data: childNode{cmin, cmax, cn},
Item: false,
})
}
}
}
return children
}
// getChildNodeIndex returns the child node rect and index from the row x/y
// coordinates.
func (n *node) getChildNodeIndex(nmin, nmax [2]float64, cx, cy int,
) (cidx int, cmin, cmax [2]float64) {
cnw := (nmax[0] - nmin[0]) / rows // width of each node
cnh := (nmax[1] - nmin[1]) / rows // height of each node
cmin = [2]float64{
cnw*float64(cx) + nmin[0], // node min x
cnh*float64(cy) + nmin[1], // node max x
}
cmax = [2]float64{
cmin[0] + cnw, // node min y
cmin[1] + cnh, // node max y
}
cidx = calcNodeIndex(cx, cy)
return
} | ptree.go | 0.730386 | 0.604224 | ptree.go | starcoder |
package algebra
import (
"math"
)
type Matrix struct {
tuples [][]float64
}
//NewMatrix returns a new matrix of the specified size and data
func NewMatrix(col, row int, data ...float64) (*Matrix, error) {
if len(data) != col*row {
return nil, ExpectedDimension(col * row)
}
tuples := make([][]float64, 0, 0)
for i := 0; i < row; i++ {
rows := make([]float64, col, col)
for j := 0; j < col; j++ {
rows[j] = data[i*col+j]
}
tuples = append(tuples, rows)
}
return &Matrix{tuples: tuples}, nil
}
//NewEmptyMatrix returns a new zero matrix of the specified size
func NewEmptyMatrix(col, row int) *Matrix {
tuples := make([][]float64, 0, 0)
for i := 0; i < row; i++ {
rows := make([]float64, col, col)
tuples = append(tuples, rows)
}
return &Matrix{tuples: tuples}
}
//Get returns the slices that contain the matrix data
func (m *Matrix) Get() [][]float64 {
return m.tuples
}
// At returns the value at position (col, row)
func (m *Matrix) At(col, row int) (float64, error) {
if col < 0 || col > getNumCols(m) {
return 0.0, InvalidMatrixIndex(col)
}
if row < 0 || row > getNumRows(m) {
return 0.0, InvalidMatrixIndex(row)
}
return m.tuples[row][col], nil
}
//Equals returns whether or not two matrices are equals
func (m *Matrix) Equals(m2 *Matrix) bool {
if getNumCols(m) != getNumCols(m2) || getNumRows(m) != getNumRows(m2) {
return false
}
for i := 0; i < getNumRows(m); i++ {
for j := 0; j < getNumCols(m); j++ {
v, err := m.At(i, j)
if err != nil {
return false
}
v2, err := m2.At(i, j)
if err != nil {
return false
}
if !equals(v, v2) {
return false
}
}
}
return true
}
//Multiply multiplies two matrices together
func Multiply(m1 *Matrix, m2 *Matrix) *Matrix {
if getNumCols(m1) != getNumRows(m2) {
panic(ExpectedDimension(len(m1.Get()[0])))
return nil
}
res := make([]float64, 0, 0)
for i := 0; i < getNumRows(m1); i++ {
for j := 0; j < getNumCols(m2); j++ {
v := 0.0
for k := 0; k < getNumRows(m2); k++ {
Aik, err := m1.At(k, i)
if err != nil {
panic(err)
return nil
}
Bkj, err := m2.At(j, k)
if err != nil {
panic(err)
return nil
}
v += Aik * Bkj
}
res = append(res, v)
}
}
mMultiplied, err := NewMatrix(getNumRows(m1), getNumCols(m2), res...)
if err != nil {
panic(err)
return nil
}
return mMultiplied
}
func (m1 *Matrix) Multiply(m2 *Matrix) *Matrix {
if getNumCols(m1) != getNumRows(m2) {
panic(ExpectedDimension(len(m1.Get()[0])))
return nil
}
res := make([]float64, 0, 0)
for i := 0; i < getNumRows(m1); i++ {
for j := 0; j < getNumCols(m2); j++ {
v := 0.0
for k := 0; k < getNumRows(m2); k++ {
Aik, err := m1.At(k, i)
if err != nil {
panic(err)
return nil
}
Bkj, err := m2.At(j, k)
if err != nil {
panic(err)
return nil
}
v += Aik * Bkj
}
res = append(res, v)
}
}
mMultiplied, err := NewMatrix(getNumRows(m1), getNumCols(m2), res...)
if err != nil {
panic(err)
return nil
}
return mMultiplied
}
//MultiplyByVec returns left matrix multiplication with the given column vector
func (m *Matrix) MultiplyByVec(v *Vector) *Vector {
if getNumCols(m) != len(v.tuple) {
panic(ExpectedDimension(getNumRows(m)))
return nil
}
res := make([]float64, 0, 0)
for i := 0; i < getNumRows(m); i++ {
val := 0.0
for j := 0; j < getNumCols(m); j++ {
a, err := m.At(j, i)
if err != nil {
panic(err)
return nil
}
val += a * v.tuple[j]
}
res = append(res, val)
}
return &Vector{tuple: res}
}
//IdentityMatrix returns the identity matrix with the given size
func IdentityMatrix(size int) *Matrix {
res := make([]float64, 0, 0)
for i := 0; i < size; i++ {
for j := 0; j < size; j++ {
if j == i {
res = append(res, 1.0)
} else {
res = append(res, 0)
}
}
}
iden, err := NewMatrix(size, size, res...)
if err != nil {
panic(err)
}
return iden
}
//Transpose returns the transpose of the matrix
func (m *Matrix) Transpose() *Matrix {
rows := getNumRows(m)
cols := getNumCols(m)
res := make([]float64, rows*cols, rows*cols)
for i := 0; i < rows; i++ {
for j := 0; j < cols; j++ {
val, err := m.At(j, i)
if err != nil {
panic(err)
return nil
}
res[j*cols+i] = val
}
}
mTranspose, err := NewMatrix(cols, rows, res...)
if err != nil {
panic(err)
return nil
}
return mTranspose
}
//Determinant returns the determinant of a square matrix
func Determinant(m [][]float64) (float64, error) {
rows := len(m)
cols := len(m[0])
if rows != cols {
return 0.0, ExpectedSquareMatrix([2]int{rows, cols})
}
if rows == 1 {
return m[0][0], nil
}
if rows == 2 {
a := m[0][0]
b := m[0][1]
c := m[1][0]
d := m[1][1]
return a*d - b*c, nil
}
s := 0.0
for i := 0; i < cols; i++ {
sm := subMatrix(m[1:][:], i)
z, err := Determinant(sm)
if err == nil {
if i%2 != 0 {
s -= m[0][i] * z
} else {
s += m[0][i] * z
}
}
}
return s, nil
}
//subMatrix returns the subMatrix with the p'th row removed
func subMatrix(m [][]float64, p int) [][]float64 {
stacks := make([]stack, len(m))
for n := range m {
stacks[n] = stack{}
for j := range m[n] {
if j != p {
stacks[n].push(m[n][j])
}
}
}
out := make([][]float64, len(m))
for k := range stacks {
out[k] = stacks[k].ToSlice()
}
return out
}
//IsInvertible checks whether or not the matrix provided is invertible
func (m *Matrix) IsInvertible() bool {
det, err := Determinant(m.Get())
if err != nil {
return false
}
if getNumCols(m) != getNumRows(m) || det == 0 {
return false
}
return true
}
//Inverse returns the inverse of the matrix if applicable, otherwise nil
func (m *Matrix) Inverse() *Matrix {
if !m.IsInvertible() {
return nil
}
det, err := Determinant(m.Get())
if err != nil {
panic(err)
return nil
}
if getNumRows(m) == 2 {
a := m.Get()[0][0]
b := m.Get()[0][1]
c := m.Get()[1][0]
d := m.Get()[1][1]
mInverse, err := NewMatrix(getNumCols(m), getNumRows(m), d/det, -b/det, -c/det, a/det)
if err != nil {
panic(err)
}
return mInverse
}
res := make([]float64, 0, 0)
for i := 0; i < getNumRows(m); i++ {
for j := 0; j < getNumRows(m); j++ {
preDeletedRow := m.Get()[0:i][:]
postDeletedRow := m.Get()[i+1:][:]
tempMatrix := make([][]float64, 0, 0)
tempMatrix = append(tempMatrix, preDeletedRow...)
tempMatrix = append(tempMatrix, postDeletedRow...)
sm := subMatrix(tempMatrix, j)
detSm, err := Determinant(sm)
if err != nil {
panic(err)
return nil
}
d := 1.0
if (i+j)%2 == 1 {
d = -1.0
}
res = append(res, d*detSm/det)
}
}
mInverse, err := NewMatrix(getNumCols(m), getNumRows(m), res...)
if err != nil {
panic(err)
return nil
}
return mInverse.Transpose()
}
//TranslationMatrix returns a 4x4 translation matrix for 3d vectors/points
func TranslationMatrix(x, y, z float64) *Matrix {
m, err := NewMatrix(4, 4,
1, 0, 0, x,
0, 1, 0, y,
0, 0, 1, z,
0, 0, 0, 1)
if err != nil {
panic(err)
}
return m
}
//ScalingMatrix returns a 4x4 scaling matrix for 3d vectors/points
func ScalingMatrix(x, y, z float64) *Matrix {
m, err := NewMatrix(4, 4,
x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, 0, 1)
if err != nil {
panic(err)
}
return m
}
//RotationX returns a 4x4 matrix that rotates a 3d vector/point around the x-axis
func RotationX(r float64) *Matrix {
m, err := NewMatrix(4, 4,
1, 0, 0, 0,
0, math.Cos(r), -math.Sin(r), 0,
0, math.Sin(r), math.Cos(r), 0,
0, 0, 0, 1)
if err != nil {
panic(err)
}
return m
}
//RotationY returns a 4x4 matrix that rotates a 3d vector/point around the x-axis
func RotationY(r float64) *Matrix {
m, err := NewMatrix(4, 4,
math.Cos(r), 0, math.Sin(r), 0,
0, 1, 0, 0,
-math.Sin(r), 0, math.Cos(r), 0,
0, 0, 0, 1)
if err != nil {
panic(err)
}
return m
}
//RotationZ returns a 4x4 matrix that rotates a 3d vector/point around the x-axis
func RotationZ(r float64) *Matrix {
m, err := NewMatrix(4, 4,
math.Cos(r), -math.Sin(r), 0, 0,
math.Sin(r), math.Cos(r), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1)
if err != nil {
panic(err)
}
return m
}
//Shearing returns a 4x4 matrix that is used to shear a
func Shearing(xy, xz, yx, yz, zx, zy float64) *Matrix {
m, err := NewMatrix(4, 4,
1, xy, xz, 0,
yx, 1, yz, 0,
zx, zy, 1, 0,
0, 0, 0, 1)
if err != nil {
panic(err)
}
return m
}
func ViewTransform(fromX, fromY, fromZ, toX, toY, toZ, upX, upY, upZ float64) *Matrix {
forward, err := NewVector(toX-fromX, toY-fromY, toZ-fromZ).Normalize()
if err != nil {
panic(err)
return nil
}
upn, err := NewVector(upX, upY, upZ).Normalize()
if err != nil {
panic(err)
return nil
}
left, err := CrossProduct(forward, upn)
trueUp, err := CrossProduct(left, forward)
matValues := make([]float64, 0, 0)
matValues = append(matValues, left.Get()...)
matValues = append(matValues, trueUp.Get()...)
matValues = append(matValues, forward.Negate().Get()...)
matValues = append(matValues, 0, 0, 0, 1)
m, err := NewMatrix(4, 4, matValues...)
if err != nil {
panic(err)
return nil
}
m = Multiply(m, TranslationMatrix(-fromX, -fromY, -fromZ))
return m
}
//stack datatype helper for matrix functions/methods
type stack []float64
func (s *stack) isEmpty() bool {
return len(*s) == 0
}
func (s *stack) push(n float64) {
*s = append(*s, n)
}
func (s *stack) pop() (float64, bool) {
if s.isEmpty() {
return 0, false
}
i := len(*s) - 1
n := (*s)[i]
*s = (*s)[:i]
return n, true
}
func (s *stack) ToSlice() []float64 {
return *s
}
// other helper functions
func getNumRows(m *Matrix) int {
return len(m.Get())
}
func getNumCols(m *Matrix) int {
return len(m.Get()[0])
}
func equals(a, b float64) bool {
EPSILON := 0.00001
return math.Abs(a-b) < EPSILON
} | pkg/algebra/matrix.go | 0.812793 | 0.536677 | matrix.go | starcoder |
package fastmath
// Fast, efficient 8-bit scaling functions specifically
// designed for high-performance LED programming.
// Scale8 scales one byte by a second one, which is treated as the numerator of
// a fraction whose denominator is 256. In other words, it computes i * (scale /
// 256)
func Scale8(i uint8, scale uint8) uint8 {
return uint8((uint16(i) * (1 + uint16(scale))) >> 8)
}
// Scale8Video is the "video" version of Scale8. Guarantees the output will be
// only be zero if one or both of the inputs are zero. If both inputs are
// non-zero, the output is guaranteed to be non-zero. This makes for better
// 'video'/LED dimming, at the cost of several additional cycles.
func Scale8Video(i, scale uint8) uint8 {
var scaleFixed uint16
if i != 0 && scale != 0 {
scaleFixed = 1
}
return uint8((uint16(i)*uint16(scale))>>8 + scaleFixed)
}
// NScale8x3 scales three one byte values by a fourth one, which is treated as
// the numerator of a fraction whose demominator is 256. In other words, it
// computes r,g,b * (scale / 256).
func NScale8x3(r, g, b *uint8, scale uint8) {
scaleFixed := uint16(scale) + 1
*r = uint8((uint16(*r) * scaleFixed) >> 8)
*g = uint8((uint16(*g) * scaleFixed) >> 8)
*b = uint8((uint16(*b) * scaleFixed) >> 8)
}
// NScale8x3Video scale three one byte values by a fourth one, which is treated
// as the numerator of a fraction whose demominator is 256. In other words, it
// computes r,g,b * (scale / 256), ensuring that non-zero values passed in
// remain non zero, no matter how low the scale argument.
func NScale8x3Video(r, g, b *uint8, scale uint8) {
var nonZeroScale uint16
if scale != 0 {
nonZeroScale = 1
}
if *r != 0 {
*r = uint8((uint16(*r)*uint16(scale))>>8 + nonZeroScale)
}
if *g != 0 {
*g = uint8((uint16(*g)*uint16(scale))>>8 + nonZeroScale)
}
if *b != 0 {
*b = uint8((uint16(*b)*uint16(scale))>>8 + nonZeroScale)
}
}
// Scale16By8 scales a 16-bit unsigned value by an 8-bit value, considered as
// numerator of a fraction whose denominator is 256. In other words, it computes
// i * (scale / 256).
func Scale16By8(i uint16, scale uint8) uint16 {
return uint16((uint32(i) * (1 + uint32(scale))) >> 8)
}
// Scale16 scales a 16-bit unsigned value by a 16-bit value, considered as
// numerator of a fraction whose denominator is 65536. In other words, it
// computes i * (scale / 65536).
func Scale16(i, scale uint16) uint16 {
return uint16((uint32(i) * (1 + uint32(scale))) / 65536)
} | scale.go | 0.851459 | 0.420778 | scale.go | starcoder |
package conf
// Float64Var defines a float64 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) Float64Var(p *float64, name string, value float64, usage string) {
c.env().Float64Var(p, name, value, usage)
c.flag().Float64Var(p, name, value, usage)
}
// Float64 defines a float64 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag and/or environment variable.
func (c *Configurator) Float64(name string, value float64, usage string) *float64 {
p := new(float64)
c.Float64Var(p, name, value, usage)
return p
}
// Float64VarE defines a float64 environment variable with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the environment variable.
func (c *Configurator) Float64VarE(p *float64, name string, value float64, usage string) {
c.env().Float64Var(p, name, value, usage)
}
// Float64E defines a float64 environment variable with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the environment variable.
func (c *Configurator) Float64E(name string, value float64, usage string) *float64 {
p := new(float64)
c.Float64VarE(p, name, value, usage)
return p
}
// Float64VarF defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func (c *Configurator) Float64VarF(p *float64, name string, value float64, usage string) {
c.flag().Float64Var(p, name, value, usage)
}
// Float64F defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func (c *Configurator) Float64F(name string, value float64, usage string) *float64 {
p := new(float64)
c.Float64VarF(p, name, value, usage)
return p
}
// Float64Var defines a float64 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag and/or environment variable.
func Float64Var(p *float64, name string, value float64, usage string) {
Global.Float64Var(p, name, value, usage)
}
// Float64 defines a float64 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag and/or environment variable.
func Float64(name string, value float64, usage string) *float64 {
return Global.Float64(name, value, usage)
}
// Float64VarE defines a float64 environment variable with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the environment variable.
func Float64VarE(p *float64, name string, value float64, usage string) {
Global.Float64VarE(p, name, value, usage)
}
// Float64E defines a float64 environment variable with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the environment variable.
func Float64E(name string, value float64, usage string) *float64 {
return Global.Float64E(name, value, usage)
}
// Float64VarF defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func Float64VarF(p *float64, name string, value float64, usage string) {
Global.Float64VarF(p, name, value, usage)
}
// Float64F defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func Float64F(name string, value float64, usage string) *float64 {
return Global.Float64F(name, value, usage)
} | value_float64.go | 0.892243 | 0.826747 | value_float64.go | starcoder |
package proto
import (
"github.com/lyraproj/data-protobuf/datapb"
"github.com/lyraproj/pcore/px"
"github.com/lyraproj/pcore/types"
)
// A Consumer consumes values and produces a datapb.Data
type Consumer interface {
px.ValueConsumer
// Value returns the created value. Must not be called until the consumption
// of values is complete.
Value() *datapb.Data
}
type protoConsumer struct {
stack [][]*datapb.Data
}
// NewProtoConsumer creates a new Consumer
func NewProtoConsumer() Consumer {
return &protoConsumer{stack: make([][]*datapb.Data, 1, 8)}
}
func (pc *protoConsumer) CanDoBinary() bool {
return true
}
func (pc *protoConsumer) CanDoComplexKeys() bool {
return true
}
func (pc *protoConsumer) StringDedupThreshold() int {
return 0
}
func (pc *protoConsumer) AddArray(cap int, doer px.Doer) {
top := len(pc.stack)
pc.stack = append(pc.stack, make([]*datapb.Data, 0, cap))
doer()
els := pc.stack[top]
pc.stack = pc.stack[0:top]
pc.add(&datapb.Data{Kind: &datapb.Data_ArrayValue{ArrayValue: &datapb.DataArray{Values: els}}})
}
func (pc *protoConsumer) AddHash(cap int, doer px.Doer) {
top := len(pc.stack)
pc.stack = append(pc.stack, make([]*datapb.Data, 0, cap*2))
doer()
els := pc.stack[top]
pc.stack = pc.stack[0:top]
top = len(els)
vs := make([]*datapb.DataEntry, top/2)
for i := 0; i < top; i += 2 {
vs[i/2] = &datapb.DataEntry{Key: els[i], Value: els[i+1]}
}
pc.add(&datapb.Data{Kind: &datapb.Data_HashValue{HashValue: &datapb.DataHash{Entries: vs}}})
}
func (pc *protoConsumer) Add(v px.Value) {
pc.add(ToPBData(v))
}
func (pc *protoConsumer) AddRef(ref int) {
pc.add(&datapb.Data{Kind: &datapb.Data_Reference{Reference: int64(ref)}})
}
func (pc *protoConsumer) Value() *datapb.Data {
bs := pc.stack[0]
if len(bs) > 0 {
return bs[0]
}
return nil
}
func (pc *protoConsumer) add(value *datapb.Data) {
top := len(pc.stack) - 1
pc.stack[top] = append(pc.stack[top], value)
}
func ToPBData(v px.Value) (value *datapb.Data) {
switch v := v.(type) {
case px.Boolean:
value = &datapb.Data{Kind: &datapb.Data_BooleanValue{BooleanValue: v.Bool()}}
case px.Float:
value = &datapb.Data{Kind: &datapb.Data_FloatValue{FloatValue: v.Float()}}
case px.Integer:
value = &datapb.Data{Kind: &datapb.Data_IntegerValue{IntegerValue: v.Int()}}
case px.StringValue:
value = &datapb.Data{Kind: &datapb.Data_StringValue{StringValue: v.String()}}
case *types.UndefValue:
value = &datapb.Data{Kind: &datapb.Data_UndefValue{}}
case *types.Array:
vs := make([]*datapb.Data, v.Len())
v.EachWithIndex(func(elem px.Value, i int) {
vs[i] = ToPBData(elem)
})
value = &datapb.Data{Kind: &datapb.Data_ArrayValue{ArrayValue: &datapb.DataArray{Values: vs}}}
case *types.Hash:
vs := make([]*datapb.DataEntry, v.Len())
v.EachWithIndex(func(elem px.Value, i int) {
entry := elem.(*types.HashEntry)
vs[i] = &datapb.DataEntry{Key: ToPBData(entry.Key()), Value: ToPBData(entry.Value())}
})
value = &datapb.Data{Kind: &datapb.Data_HashValue{HashValue: &datapb.DataHash{Entries: vs}}}
case *types.Binary:
value = &datapb.Data{Kind: &datapb.Data_BinaryValue{BinaryValue: v.Bytes()}}
default:
value = &datapb.Data{Kind: &datapb.Data_UndefValue{}}
}
return
}
// ConsumePBData converts a datapb.Data into stream of values that are sent to a
// serialization.ValueConsumer.
func ConsumePBData(v *datapb.Data, consumer px.ValueConsumer) {
switch v.Kind.(type) {
case *datapb.Data_BooleanValue:
consumer.Add(types.WrapBoolean(v.GetBooleanValue()))
case *datapb.Data_FloatValue:
consumer.Add(types.WrapFloat(v.GetFloatValue()))
case *datapb.Data_IntegerValue:
consumer.Add(types.WrapInteger(v.GetIntegerValue()))
case *datapb.Data_StringValue:
consumer.Add(types.WrapString(v.GetStringValue()))
case *datapb.Data_UndefValue:
consumer.Add(px.Undef)
case *datapb.Data_ArrayValue:
av := v.GetArrayValue().GetValues()
consumer.AddArray(len(av), func() {
for _, elem := range av {
ConsumePBData(elem, consumer)
}
})
case *datapb.Data_HashValue:
av := v.GetHashValue().Entries
consumer.AddHash(len(av), func() {
for _, val := range av {
ConsumePBData(val.Key, consumer)
ConsumePBData(val.Value, consumer)
}
})
case *datapb.Data_BinaryValue:
consumer.Add(types.WrapBinary(v.GetBinaryValue()))
case *datapb.Data_Reference:
consumer.AddRef(int(v.GetReference()))
default:
consumer.Add(px.Undef)
}
}
func FromPBData(v *datapb.Data) (value px.Value) {
switch v.Kind.(type) {
case *datapb.Data_BooleanValue:
value = types.WrapBoolean(v.GetBooleanValue())
case *datapb.Data_FloatValue:
value = types.WrapFloat(v.GetFloatValue())
case *datapb.Data_IntegerValue:
value = types.WrapInteger(v.GetIntegerValue())
case *datapb.Data_StringValue:
value = types.WrapString(v.GetStringValue())
case *datapb.Data_UndefValue:
value = px.Undef
case *datapb.Data_ArrayValue:
av := v.GetArrayValue().GetValues()
vs := make([]px.Value, len(av))
for i, elem := range av {
vs[i] = FromPBData(elem)
}
value = types.WrapValues(vs)
case *datapb.Data_HashValue:
av := v.GetHashValue().Entries
vs := make([]*types.HashEntry, len(av))
for i, val := range av {
vs[i] = types.WrapHashEntry(FromPBData(val.Key), FromPBData(val.Value))
}
value = types.WrapHash(vs)
default:
value = px.Undef
}
return
} | proto/convert.go | 0.651133 | 0.581481 | convert.go | starcoder |
package radar
import (
"image"
"image/color"
"image/draw"
"github.com/oakmound/oak/render"
)
type Point struct {
X, Y *float64
}
type Radar struct {
render.LayeredPoint
points map[Point]color.Color
center Point
width, height int
r *image.RGBA
outline *render.Sprite
ratio float64
}
var (
centerColor = color.RGBA{255, 255, 0, 255}
)
// NewRadar creates a radar that will display at 0,0 with the given dimensions.
// The points given will be displayed on the radar relative to the center point,
// With the absolute distance reduced by the given ratio
func NewRadar(w, h int, points map[Point]color.Color, center Point, ratio float64) *Radar {
r := new(Radar)
r.LayeredPoint = render.NewLayeredPoint(0, 0, 0)
r.points = points
r.width = w
r.height = h
r.center = center
r.r = image.NewRGBA(image.Rect(0, 0, w, h))
r.outline = render.NewColorBox(w, h, color.RGBA{0, 0, 125, 125})
r.ratio = ratio
return r
}
// SetPos sets the position of the radar on the screen
func (r *Radar) SetPos(x, y float64) {
r.LayeredPoint.SetPos(x, y)
r.outline.SetPos(x, y)
}
// GetRGBA returns this radar's image
func (r *Radar) GetRGBA() *image.RGBA {
return r.r
}
// Draw draws the radar, satisfying render.Renderable
func (r *Radar) Draw(buff draw.Image) {
r.DrawOffset(buff, 0, 0)
}
// DrawOffset draws the radar at a given offset
func (r *Radar) DrawOffset(buff draw.Image, xOff, yOff float64) {
// Draw each point p in r.points
// at r.X() + center.X() - p.X(), r.Y() + center.Y() - p.Y()
// IF that value is < r.width/2, > -r.width/2, < r.height/2, > -r.height/2
for p, c := range r.points {
x := int((*p.X-*r.center.X)/r.ratio) + r.width/2
y := int((*p.Y-*r.center.Y)/r.ratio) + r.height/2
for x2 := x - 1; x2 < x+1; x2++ {
for y2 := y - 1; y2 < y+1; y2++ {
r.r.Set(x2, y2, c)
}
}
}
r.r.Set(r.width/2, r.height/2, centerColor)
render.ShinyDraw(buff, r.r, int(xOff+r.X()), int(yOff+r.Y()))
r.outline.DrawOffset(buff, xOff, yOff)
r.r = image.NewRGBA(image.Rect(0, 0, r.width, r.height))
}
// AddPoint adds an additional point to the radar to be tracked
func (r *Radar) AddPoint(loc Point, c color.Color) {
r.points[loc] = c
} | examples/radar-demo/radar/radar.go | 0.876568 | 0.413892 | radar.go | starcoder |
package imaging
import (
"image"
)
// ConvolveOptions are convolution parameters.
type ConvolveOptions struct {
// If Normalize is true the kernel is normalized before convolution.
Normalize bool
// If Abs is true the absolute value of each color class is taken after convolution.
Abs bool
// Bias is added to each color class value after convolution.
Bias int
}
// Convolve3x3 convolves the image with the specified 3x3 convolution kernel.
// Default parameters are used if a nil *ConvolveOptions is passed.
func Convolve3x3(img image.Image, kernel [9]float64, options *ConvolveOptions) *image.NRGBA {
return convolve(img, kernel[:], options)
}
// Convolve5x5 convolves the image with the specified 5x5 convolution kernel.
// Default parameters are used if a nil *ConvolveOptions is passed.
func Convolve5x5(img image.Image, kernel [25]float64, options *ConvolveOptions) *image.NRGBA {
return convolve(img, kernel[:], options)
}
func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *image.NRGBA {
src := toNRGBA(img)
w := src.Bounds().Max.X
h := src.Bounds().Max.Y
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
if w < 1 || h < 1 {
return dst
}
if options == nil {
options = &ConvolveOptions{}
}
if options.Normalize {
normalizeKernel(kernel)
}
type coef struct {
x, y int
k float64
}
var coefs []coef
var m int
switch len(kernel) {
case 9:
m = 1
case 25:
m = 2
}
i := 0
for y := -m; y <= m; y++ {
for x := -m; x <= m; x++ {
if kernel[i] != 0 {
coefs = append(coefs, coef{x: x, y: y, k: kernel[i]})
}
i++
}
}
parallel(0, h, func(ys <-chan int) {
for y := range ys {
for x := 0; x < w; x++ {
var r, g, b float64
for _, c := range coefs {
ix := x + c.x
if ix < 0 {
ix = 0
} else if ix >= w {
ix = w - 1
}
iy := y + c.y
if iy < 0 {
iy = 0
} else if iy >= h {
iy = h - 1
}
off := iy*src.Stride + ix*4
s := src.Pix[off : off+3 : off+3]
r += float64(s[0]) * c.k
g += float64(s[1]) * c.k
b += float64(s[2]) * c.k
}
if options.Abs {
if r < 0 {
r = -r
}
if g < 0 {
g = -g
}
if b < 0 {
b = -b
}
}
if options.Bias != 0 {
r += float64(options.Bias)
g += float64(options.Bias)
b += float64(options.Bias)
}
srcOff := y*src.Stride + x*4
dstOff := y*dst.Stride + x*4
d := dst.Pix[dstOff : dstOff+4 : dstOff+4]
d[0] = clamp(r)
d[1] = clamp(g)
d[2] = clamp(b)
d[3] = src.Pix[srcOff+3]
}
}
})
return dst
}
func normalizeKernel(kernel []float64) {
var sum, sumpos float64
for i := range kernel {
sum += kernel[i]
if kernel[i] > 0 {
sumpos += kernel[i]
}
}
if sum != 0 {
for i := range kernel {
kernel[i] /= sum
}
} else if sumpos != 0 {
for i := range kernel {
kernel[i] /= sumpos
}
}
} | vendor/github.com/disintegration/imaging/convolution.go | 0.821975 | 0.697667 | convolution.go | starcoder |
package merkletree2
import "fmt"
// Config defines the shape of the MerkleTree.
type Config struct {
// An encoder is used to compute hashes in this configuration, and also
// manages the blinding secrets (see UseBlindedValueHashes).
Encoder Encoder
// UseBlindedValueHashes controls whether this tree blinds hashes of
// KeyValuePairs with a per (Key,Seqno) specific secret (which is itself
// derived from a per Seqno specific secret which is stored together with
// the tree). This ensures values stored in the tree cannot are not leaked
// by the membership proofs (but keys can leak, as well as the rough tree
// size). If the tree is rebuilt at every Seqno, this also hides whether
// values are changing (but not when a value is first inserted).
UseBlindedValueHashes bool
// The number of children per node. Must be a power of two. Some children
// can be empty.
ChildrenPerNode int
// The maximum number of KeyValuePairs in a leaf node before we split
MaxValuesPerLeaf int
// The number of bits necessary to represent a ChildIndex, i.e.
// log2(childrenPerNode)
BitsPerIndex uint8
// The length of all the keys which will be stored in the tree. For
// simplicity, we enforce that all the keys have the same length and that
// bitsPerIndex divides keyByteLength*8
KeysByteLength int
// The maximum depth of the tree. Should always equal keysByteLength*8/bitsPerIndex
MaxDepth int
// ConstructValueContainer constructs a new empty value for the value in a KeyValuePair, so that the
// decoding routine has the correct type template.
ConstructValueContainer func() interface{}
}
// NewConfig makes a new config object. It takes a a Hasher, logChildrenPerNode
// which is the base 2 logarithm of the number of children per interior node,
// maxValuesPerLeaf the maximum number of entries in a leaf before the leaf is
// split into multiple nodes (at a lower level in the tree), keyByteLength the
// length of the Keys which the tree will store, and a ConstructValueContainer function (so that
// typed values can be pulled out of the Merkle Tree).
func NewConfig(e Encoder, useBlindedValueHashes bool, logChildrenPerNode uint8, maxValuesPerLeaf int, keysByteLength int, constructValueFunc func() interface{}) (Config, error) {
childrenPerNode := 1 << logChildrenPerNode
if (keysByteLength*8)%int(logChildrenPerNode) != 0 {
return Config{}, NewInvalidConfigError("The key bit length does not divide logChildrenPerNode")
}
if logChildrenPerNode > 63 {
return Config{}, NewInvalidConfigError("This package does not support more than 2^63 children per internal node")
}
if logChildrenPerNode < 1 {
return Config{}, NewInvalidConfigError(fmt.Sprintf("Need at least 2 children per node, but logChildrenPerNode = %v", logChildrenPerNode))
}
maxDepth := keysByteLength * 8 / int(logChildrenPerNode)
return Config{Encoder: e, UseBlindedValueHashes: useBlindedValueHashes, ChildrenPerNode: childrenPerNode, MaxValuesPerLeaf: maxValuesPerLeaf, BitsPerIndex: logChildrenPerNode, KeysByteLength: keysByteLength, MaxDepth: maxDepth, ConstructValueContainer: constructValueFunc}, nil
}
// MasterSecret is a secret used to hide wether a leaf value has changed between
// different versions (Seqnos) in a blinded merkle tree. One MasterSecret per
// tree is generated for each Seqno, and such secret is then used to generate a
// KeySpecific secret per leaf.
type MasterSecret []byte
// MasterSecret is a secret used to hide wether a leaf value has changed between
// different versions (Seqnos) in a blinded merkle tree. This is derived from a
// per-Seqno MasterSecret as specified by the Encoder
type KeySpecificSecret []byte
// Encoder is an interface for cryptographically hashing MerkleTree data
// structures. It also manages blinding secrets.
type Encoder interface {
Decode(dest interface{}, src []byte) error
Encode(src interface{}) (dst []byte, err error)
EncodeAndHashGeneric(interface{}) (encoded []byte, hash Hash, err error)
GenerateMasterSecret(Seqno) (MasterSecret, error)
ComputeKeySpecificSecret(MasterSecret, Key) KeySpecificSecret
HashKeyValuePairWithKeySpecificSecret(KeyValuePair, KeySpecificSecret) (Hash, error)
HashKeyEncodedValuePairWithKeySpecificSecret(KeyEncodedValuePair, KeySpecificSecret) (Hash, error)
} | go/merkletree2/config.go | 0.784113 | 0.510863 | config.go | starcoder |
package xlsx
import (
"math"
"time"
)
const (
MJD_0 float64 = 2400000.5
MJD_JD2000 float64 = 51544.5
secondsInADay = float64((24*time.Hour)/time.Second)
nanosInADay = float64((24*time.Hour)/time.Nanosecond)
)
var (
timeLocationUTC, _ = time.LoadLocation("UTC")
unixEpoc = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC)
// In 1900 mode, Excel takes dates in floating point numbers of days starting with Jan 1 1900.
// The days are not zero indexed, so Jan 1 1900 would be 1.
// Except that Excel pretends that Feb 29, 1900 occurred to be compatible with a bug in Lotus 123.
// So, this constant uses Dec 30, 1899 instead of Jan 1, 1900, so the diff will be correct.
// http://www.cpearson.com/excel/datetime.htm
excel1900Epoc = time.Date(1899, time.December, 30, 0, 0, 0, 0, time.UTC)
excel1904Epoc = time.Date(1904, time.January, 1, 0, 0, 0, 0, time.UTC)
// Days between epocs, including both off by one errors for 1900.
daysBetween1970And1900 = float64(unixEpoc.Sub(excel1900Epoc)/(24 * time.Hour))
daysBetween1970And1904 = float64(unixEpoc.Sub(excel1904Epoc)/(24 * time.Hour))
)
func TimeToUTCTime(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), timeLocationUTC)
}
func shiftJulianToNoon(julianDays, julianFraction float64) (float64, float64) {
switch {
case -0.5 < julianFraction && julianFraction < 0.5:
julianFraction += 0.5
case julianFraction >= 0.5:
julianDays += 1
julianFraction -= 0.5
case julianFraction <= -0.5:
julianDays -= 1
julianFraction += 1.5
}
return julianDays, julianFraction
}
// Return the integer values for hour, minutes, seconds and
// nanoseconds that comprised a given fraction of a day.
// values would round to 1 us.
func fractionOfADay(fraction float64) (hours, minutes, seconds, nanoseconds int) {
const (
c1us = 1e3
c1s = 1e9
c1day = 24 * 60 * 60 * c1s
)
frac := int64(c1day*fraction + c1us/2)
nanoseconds = int((frac%c1s)/c1us) * c1us
frac /= c1s
seconds = int(frac % 60)
frac /= 60
minutes = int(frac % 60)
hours = int(frac / 60)
return
}
func julianDateToGregorianTime(part1, part2 float64) time.Time {
part1I, part1F := math.Modf(part1)
part2I, part2F := math.Modf(part2)
julianDays := part1I + part2I
julianFraction := part1F + part2F
julianDays, julianFraction = shiftJulianToNoon(julianDays, julianFraction)
day, month, year := doTheFliegelAndVanFlandernAlgorithm(int(julianDays))
hours, minutes, seconds, nanoseconds := fractionOfADay(julianFraction)
return time.Date(year, time.Month(month), day, hours, minutes, seconds, nanoseconds, time.UTC)
}
// By this point generations of programmers have repeated the
// algorithm sent to the editor of "Communications of the ACM" in 1968
// (published in CACM, volume 11, number 10, October 1968, p.657).
// None of those programmers seems to have found it necessary to
// explain the constants or variable names set out by <NAME>
// and <NAME>. Maybe one day I'll buy that jounal and
// expand an explanation here - that day is not today.
func doTheFliegelAndVanFlandernAlgorithm(jd int) (day, month, year int) {
l := jd + 68569
n := (4 * l) / 146097
l = l - (146097*n+3)/4
i := (4000 * (l + 1)) / 1461001
l = l - (1461*i)/4 + 31
j := (80 * l) / 2447
d := l - (2447*j)/80
l = j / 11
m := j + 2 - (12 * l)
y := 100*(n-49) + i + l
return d, m, y
}
// Convert an excelTime representation (stored as a floating point number) to a time.Time.
func TimeFromExcelTime(excelTime float64, date1904 bool) time.Time {
var date time.Time
var wholeDaysPart = int(excelTime)
// Excel uses Julian dates prior to March 1st 1900, and
// Gregorian thereafter.
if wholeDaysPart <= 61 {
const OFFSET1900 = 15018.0
const OFFSET1904 = 16480.0
var date time.Time
if date1904 {
date = julianDateToGregorianTime(MJD_0, excelTime+OFFSET1904)
} else {
date = julianDateToGregorianTime(MJD_0, excelTime+OFFSET1900)
}
return date
}
var floatPart = excelTime - float64(wholeDaysPart)
if date1904 {
date = excel1904Epoc
} else {
date = excel1900Epoc
}
durationPart := time.Duration(nanosInADay * floatPart)
return date.AddDate(0,0, wholeDaysPart).Add(durationPart)
}
// TimeToExcelTime will convert a time.Time into Excel's float representation, in either 1900 or 1904
// mode. If you don't know which to use, set date1904 to false.
// TODO should this should handle Julian dates?
func TimeToExcelTime(t time.Time, date1904 bool) float64 {
// Get the number of days since the unix epoc
daysSinceUnixEpoc := float64(t.Unix())/secondsInADay
// Get the number of nanoseconds in days since Unix() is in seconds.
nanosPart := float64(t.Nanosecond())/nanosInADay
// Add both together plus the number of days difference between unix and Excel epocs.
var offsetDays float64
if date1904 {
offsetDays = daysBetween1970And1904
} else {
offsetDays = daysBetween1970And1900
}
daysSinceExcelEpoc := daysSinceUnixEpoc + offsetDays + nanosPart
return daysSinceExcelEpoc
} | vendor/github.com/rentiansheng/xlsx/date.go | 0.625438 | 0.45417 | date.go | starcoder |
package scripts
import "errors"
// OperatorTree tree containing all supported operators
type OperatorTree struct {
OperatorNode
}
// AddOperator adds an operator to the tree
func (tree *OperatorTree) AddOperator(literal string, operator OperatorType) {
current := &tree.OperatorNode
for _, character := range literal {
node, found := current.GetChild(character)
if !found {
node = &OperatorNode{
character: character,
children: make(map[rune]*OperatorNode, 0)}
current.children[character] = node
}
current = node
}
current.operator = operator
}
// ParseOperator parses an operator from tree
func (tree *OperatorTree) ParseOperator(data *string, index *int) (*Operator, error) {
current := &tree.OperatorNode
parsestart := *index
loop:
for *index < len(*data) {
character := (*data)[*index]
switch character {
case '=', '!', '~', '<', '>', '/', '+', '-', '*', '%', '&', '|', '^':
(*index)++
default:
break loop
}
child, found := current.GetChild(rune(character))
if !found {
(*index)--
break
}
current = child
if !current.HasChildren() {
break
}
}
if current == nil {
return nil, errors.New("Operator expected but nothing found")
}
switch current.operator {
case OP_Inc, OP_Dec:
if *index-parsestart >= 3 && !isWhiteSpace((*data)[*index-3]) {
return &Operator{Type: current.operator, Class: OP_PostUnary}, nil
}
if *index < len(*data) && !isWhiteSpace((*data)[*index]) {
return &Operator{Type: current.operator, Class: OP_PreUnary}, nil
}
return nil, errors.New("Increment/Decrement without connected operand detected")
case OP_Neg, OP_Not:
return &Operator{Type: current.operator, Class: OP_PreUnary}, nil
default:
return &Operator{Type: current.operator, Class: OP_Binary}, nil
}
}
// NewExpressionOperators creates a new operator tree containing all operators
// used for expression evaluation
func NewExpressionOperators() *OperatorTree {
tree := &OperatorTree{}
tree.children = make(map[rune]*OperatorNode, 0)
tree.AddOperator("+", OP_Add)
tree.AddOperator("-", OP_Sub)
tree.AddOperator("*", OP_Mul)
tree.AddOperator("/", OP_Div)
tree.AddOperator("=", OP_Assign)
tree.AddOperator("!", OP_Not)
tree.AddOperator("++", OP_Inc)
tree.AddOperator("--", OP_Dec)
tree.AddOperator("%", OP_Mod)
tree.AddOperator("<", OP_Less)
tree.AddOperator("<=", OP_LessEqual)
tree.AddOperator(">", OP_Greater)
tree.AddOperator(">=", OP_GreaterEqual)
tree.AddOperator("==", OP_Equal)
tree.AddOperator("!=", OP_NotEqual)
tree.AddOperator("~~", OP_Match)
tree.AddOperator("!~", OP_NotMatch)
tree.AddOperator("&", OP_BitAnd)
tree.AddOperator("|", OP_BitOr)
tree.AddOperator("^", OP_BitXor)
tree.AddOperator("&&", OP_And)
tree.AddOperator("||", OP_Or)
tree.AddOperator("^^", OP_Xor)
tree.AddOperator(">>", OP_Shr)
tree.AddOperator(">>>", OP_Ror)
tree.AddOperator("<<", OP_Shl)
tree.AddOperator("<<<", OP_Rol)
return tree
} | scripts/operatortree.go | 0.630457 | 0.47025 | operatortree.go | starcoder |
package msgraph
// RatingAustraliaMoviesType undocumented
type RatingAustraliaMoviesType int
const (
// RatingAustraliaMoviesTypeVAllAllowed undocumented
RatingAustraliaMoviesTypeVAllAllowed RatingAustraliaMoviesType = 0
// RatingAustraliaMoviesTypeVAllBlocked undocumented
RatingAustraliaMoviesTypeVAllBlocked RatingAustraliaMoviesType = 1
// RatingAustraliaMoviesTypeVGeneral undocumented
RatingAustraliaMoviesTypeVGeneral RatingAustraliaMoviesType = 2
// RatingAustraliaMoviesTypeVParentalGuidance undocumented
RatingAustraliaMoviesTypeVParentalGuidance RatingAustraliaMoviesType = 3
// RatingAustraliaMoviesTypeVMature undocumented
RatingAustraliaMoviesTypeVMature RatingAustraliaMoviesType = 4
// RatingAustraliaMoviesTypeVAgesAbove15 undocumented
RatingAustraliaMoviesTypeVAgesAbove15 RatingAustraliaMoviesType = 5
// RatingAustraliaMoviesTypeVAgesAbove18 undocumented
RatingAustraliaMoviesTypeVAgesAbove18 RatingAustraliaMoviesType = 6
)
// RatingAustraliaMoviesTypePAllAllowed returns a pointer to RatingAustraliaMoviesTypeVAllAllowed
func RatingAustraliaMoviesTypePAllAllowed() *RatingAustraliaMoviesType {
v := RatingAustraliaMoviesTypeVAllAllowed
return &v
}
// RatingAustraliaMoviesTypePAllBlocked returns a pointer to RatingAustraliaMoviesTypeVAllBlocked
func RatingAustraliaMoviesTypePAllBlocked() *RatingAustraliaMoviesType {
v := RatingAustraliaMoviesTypeVAllBlocked
return &v
}
// RatingAustraliaMoviesTypePGeneral returns a pointer to RatingAustraliaMoviesTypeVGeneral
func RatingAustraliaMoviesTypePGeneral() *RatingAustraliaMoviesType {
v := RatingAustraliaMoviesTypeVGeneral
return &v
}
// RatingAustraliaMoviesTypePParentalGuidance returns a pointer to RatingAustraliaMoviesTypeVParentalGuidance
func RatingAustraliaMoviesTypePParentalGuidance() *RatingAustraliaMoviesType {
v := RatingAustraliaMoviesTypeVParentalGuidance
return &v
}
// RatingAustraliaMoviesTypePMature returns a pointer to RatingAustraliaMoviesTypeVMature
func RatingAustraliaMoviesTypePMature() *RatingAustraliaMoviesType {
v := RatingAustraliaMoviesTypeVMature
return &v
}
// RatingAustraliaMoviesTypePAgesAbove15 returns a pointer to RatingAustraliaMoviesTypeVAgesAbove15
func RatingAustraliaMoviesTypePAgesAbove15() *RatingAustraliaMoviesType {
v := RatingAustraliaMoviesTypeVAgesAbove15
return &v
}
// RatingAustraliaMoviesTypePAgesAbove18 returns a pointer to RatingAustraliaMoviesTypeVAgesAbove18
func RatingAustraliaMoviesTypePAgesAbove18() *RatingAustraliaMoviesType {
v := RatingAustraliaMoviesTypeVAgesAbove18
return &v
} | v1.0/RatingAustraliaMoviesTypeEnum.go | 0.59561 | 0.48377 | RatingAustraliaMoviesTypeEnum.go | starcoder |
package gofa
/*
Fk425 Convert B1950.0 FK4 star catalog data to J2000.0 FK5
This function converts a star's catalog data from the old FK4
(Bessel-Newcomb) system to the later IAU 1976 FK5 (Fricke) system.
Given: (all B1950.0, FK4)
r1950,d1950 float64 B1950.0 RA,Dec (rad)
dr1950,dd1950 float64 B1950.0 proper motions (rad/trop.yr)
p1950 float64 parallax (arcsec)
v1950 float64 radial velocity (km/s, +ve = moving away)
Returned: (all J2000.0, FK5)
r2000,d2000 float64 J2000.0 RA,Dec (rad)
dr2000,dd2000 float64 J2000.0 proper motions (rad/Jul.yr)
p2000 float64 parallax (arcsec)
v2000 float64 radial velocity (km/s, +ve = moving away)
Notes:
1) The proper motions in RA are dRA/dt rather than cos(Dec)*dRA/dt,
and are per year rather than per century.
2) The conversion is somewhat complicated, for several reasons:
. Change of standard epoch from B1950.0 to J2000.0.
. An intermediate transition date of 1984 January 1.0 TT.
. A change of precession model.
. Change of time unit for proper motion (tropical to Julian).
. FK4 positions include the E-terms of aberration, to simplify
the hand computation of annual aberration. FK5 positions
assume a rigorous aberration computation based on the Earth's
barycentric velocity.
. The E-terms also affect proper motions, and in particular cause
objects at large distances to exhibit fictitious proper
motions.
The algorithm is based on Smith et al. (1989) and Yallop et al.
(1989), which presented a matrix method due to Standish (1982) as
developed by Aoki et al. (1983), using Kinoshita's development of
Andoyer's post-Newcomb precession. The numerical constants from
Seidelmann (1992) are used canonically.
3) Conversion from B1950.0 FK4 to J2000.0 FK5 only is provided for.
Conversions for different epochs and equinoxes would require
additional treatment for precession, proper motion and E-terms.
4) In the FK4 catalog the proper motions of stars within 10 degrees
of the poles do not embody differential E-terms effects and
should, strictly speaking, be handled in a different manner from
stars outside these regions. However, given the general lack of
homogeneity of the star data available for routine astrometry,
the difficulties of handling positions that may have been
determined from astrometric fields spanning the polar and non-
polar regions, the likelihood that the differential E-terms
effect was not taken into account when allowing for proper motion
in past astrometry, and the undesirability of a discontinuity in
the algorithm, the decision has been made in this SOFA algorithm
to include the effects of differential E-terms on the proper
motions for all stars, whether polar or not. At epoch J2000.0,
and measuring "on the sky" rather than in terms of RA change, the
errors resulting from this simplification are less than
1 milliarcsecond in position and 1 milliarcsecond per century in
proper motion.
Called:
Anp normalize angle into range 0 to 2pi
Pv2s pv-vector to spherical coordinates
Pdp scalar product of two p-vectors
Pvmpv pv-vector minus pv_vector
Pvppv pv-vector plus pv_vector
S2pv spherical coordinates to pv-vector
Sxp multiply p-vector by scalar
References:
<NAME>. et al., 1983, "Conversion matrix of epoch B1950.0
FK4-based positions of stars to epoch J2000.0 positions in
accordance with the new IAU resolutions". Astron.Astrophys.
128, 263-267.
<NAME>. (ed), 1992, "Explanatory Supplement to the
Astronomical Almanac", ISBN 0-935702-68-7.
<NAME> al., 1989, "The transformation of astrometric
catalog systems to the equinox J2000.0". Astron.J. 97, 265.
<NAME>., 1982, "Conversion of positions and proper motions
from B1950.0 to the IAU system at J2000.0". Astron.Astrophys.,
115, 1, 20-22.
<NAME>., 1989, "Transformation of mean star places
from FK4 B1950.0 to FK5 J2000.0 using matrices in 6-space".
Astron.J. 97, 274.
*/
func Fk425(r1950, d1950 float64, dr1950, dd1950 float64, p1950, v1950 float64,
r2000, d2000 *float64, dr2000, dd2000 *float64, p2000, v2000 *float64) {
/* Radians per year to arcsec per century */
const PMF = 100.0 * DR2AS
/* Small number to avoid arithmetic problems */
const TINY = 1e-30
/* Miscellaneous */
var r, d, ur, ud, px, rv, pxvf, w, rd float64
var i, j, k, l int
/* Pv-vectors */
var r0, pv1, pv2 [2][3]float64
/*
CANONICAL CONSTANTS (Seidelmann 1992)
*/
/* Km per sec to AU per tropical century */
/* = 86400 * 36524.2198782 / 149597870.7 */
const VF = 21.095
/* Constant pv-vector (cf. Seidelmann 3.591-2, vectors A and Adot) */
a := [2][3]float64{
{-1.62557e-6, -0.31919e-6, -0.13843e-6},
{+1.245e-3, -1.580e-3, -0.659e-3},
}
/* 3x2 matrix of pv-vectors (cf. Seidelmann 3.591-4, matrix M) */
em := [2][3][2][3]float64{
{{{+0.9999256782, -0.0111820611, -0.0048579477},
{+0.00000242395018, -0.00000002710663, -0.00000001177656}},
{{+0.0111820610, +0.9999374784, -0.0000271765},
{+0.00000002710663, +0.00000242397878, -0.00000000006587}},
{{+0.0048579479, -0.0000271474, +0.9999881997},
{+0.00000001177656, -0.00000000006582, +0.00000242410173}}},
{{{-0.000551, -0.238565, +0.435739},
{+0.99994704, -0.01118251, -0.00485767}},
{{+0.238514, -0.002667, -0.008541},
{+0.01118251, +0.99995883, -0.00002718}},
{{-0.435623, +0.012254, +0.002117},
{+0.00485767, -0.00002714, +1.00000956}}},
}
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* The FK4 data (units radians and arcsec per tropical century). */
r = r1950
d = d1950
ur = dr1950 * PMF
ud = dd1950 * PMF
px = p1950
rv = v1950
/* Express as a pv-vector. */
pxvf = px * VF
w = rv * pxvf
S2pv(r, d, 1.0, ur, ud, w, &r0)
/* Allow for E-terms (cf. Seidelmann 3.591-2). */
Pvmpv(r0, a, &pv1)
Sxp(Pdp(r0[0], a[0]), r0[0], &pv2[0])
Sxp(Pdp(r0[0], a[1]), r0[0], &pv2[1])
Pvppv(pv1, pv2, &pv1)
/* Convert pv-vector to Fricke system (cf. Seidelmann 3.591-3). */
for i = 0; i < 2; i++ {
for j = 0; j < 3; j++ {
w = 0.0
for k = 0; k < 2; k++ {
for l = 0; l < 3; l++ {
w += em[i][j][k][l] * pv1[k][l]
}
}
pv2[i][j] = w
}
}
/* Revert to catalog form. */
Pv2s(pv2, &r, &d, &w, &ur, &ud, &rd)
if px > TINY {
rv = rd / pxvf
px = px / w
}
/* Return the results. */
*r2000 = Anp(r)
*d2000 = d
*dr2000 = ur / PMF
*dd2000 = ud / PMF
*v2000 = rv
*p2000 = px
}
/*
Fk45z Convert a B1950.0 FK4 star position to J2000.0 FK5, assuming zero proper motion in the FK5 system
This function converts a star's catalog data from the old FK4
(Bessel-Newcomb) system to the later IAU 1976 FK5 (Fricke) system,
in such a way that the FK5 proper motion is zero. Because such a
star has, in general, a non-zero proper motion in the FK4 system,
the function requires the epoch at which the position in the FK4
system was determined.
Given:
r1950,d1950 float64 B1950.0 FK4 RA,Dec at epoch (rad)
bepoch float64 Besselian epoch (e.g. 1979.3)
Returned:
r2000,d2000 float64 J2000.0 FK5 RA,Dec (rad)
Notes:
1) The epoch bepoch is strictly speaking Besselian, but if a
Julian epoch is supplied the result will be affected only to a
negligible extent.
2) The method is from Appendix 2 of Aoki et al. (1983), but using
the constants of Seidelmann (1992). See the function Fk425
for a general introduction to the FK4 to FK5 conversion.
3) Conversion from equinox B1950.0 FK4 to equinox J2000.0 FK5 only
is provided for. Conversions for different starting and/or
ending epochs would require additional treatment for precession,
proper motion and E-terms.
4) In the FK4 catalog the proper motions of stars within 10 degrees
of the poles do not embody differential E-terms effects and
should, strictly speaking, be handled in a different manner from
stars outside these regions. However, given the general lack of
homogeneity of the star data available for routine astrometry,
the difficulties of handling positions that may have been
determined from astrometric fields spanning the polar and non-
polar regions, the likelihood that the differential E-terms
effect was not taken into account when allowing for proper motion
in past astrometry, and the undesirability of a discontinuity in
the algorithm, the decision has been made in this SOFA algorithm
to include the effects of differential E-terms on the proper
motions for all stars, whether polar or not. At epoch 2000.0,
and measuring "on the sky" rather than in terms of RA change, the
errors resulting from this simplification are less than
1 milliarcsecond in position and 1 milliarcsecond per century in
proper motion.
References:
<NAME> al., 1983, "Conversion matrix of epoch B1950.0
FK4-based positions of stars to epoch J2000.0 positions in
accordance with the new IAU resolutions". Astron.Astrophys.
128, 263-267.
<NAME>. (ed), 1992, "Explanatory Supplement to the
Astronomical Almanac", ISBN 0-935702-68-7.
Called:
Anp normalize angle into range 0 to 2pi
C2s p-vector to spherical
Epb2jd Besselian epoch to Julian date
Epj Julian date to Julian epoch
Pdp scalar product of two p-vectors
Pmp p-vector minus p-vector
Ppsp p-vector plus scaled p-vector
Pvu update a pv-vector
S2c spherical to p-vector
*/
func Fk45z(r1950, d1950, bepoch float64, r2000, d2000 *float64) {
/* Radians per year to arcsec per century */
const PMF = 100.0 * DR2AS
/* Position and position+velocity vectors */
var r0, p [3]float64
var pv [2][3]float64
/* Miscellaneous */
var w, djm0, djm float64
var i, j, k int
/*
CANONICAL CONSTANTS (Seidelmann 1992)
*/
/* Vectors A and Adot (Seidelmann 3.591-2) */
a := [3]float64{-1.62557e-6, -0.31919e-6, -0.13843e-6}
ad := [3]float64{+1.245e-3, -1.580e-3, -0.659e-3}
/* 3x2 matrix of p-vectors (cf. Seidelmann 3.591-4, matrix M) */
em := [2][3][3]float64{
{{+0.9999256782, -0.0111820611, -0.0048579477},
{+0.0111820610, +0.9999374784, -0.0000271765},
{+0.0048579479, -0.0000271474, +0.9999881997}},
{{-0.000551, -0.238565, +0.435739},
{+0.238514, -0.002667, -0.008541},
{-0.435623, +0.012254, +0.002117}},
}
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* Spherical coordinates to p-vector. */
S2c(r1950, d1950, &r0)
/* Adjust p-vector A to give zero proper motion in FK5. */
w = (bepoch - 1950) / PMF
Ppsp(a, w, ad, &p)
/* Remove E-terms. */
Ppsp(p, -Pdp(r0, p), r0, &p)
Pmp(r0, p, &p)
/* Convert to Fricke system pv-vector (cf. Seidelmann 3.591-3). */
for i = 0; i < 2; i++ {
for j = 0; j < 3; j++ {
w = 0.0
for k = 0; k < 3; k++ {
w += em[i][j][k] * p[k]
}
pv[i][j] = w
}
}
/* Allow for fictitious proper motion. */
Epb2jd(bepoch, &djm0, &djm)
w = (Epj(djm0, djm) - 2000.0) / PMF
Pvu(w, pv, &pv)
/* Revert to spherical coordinates. */
C2s(pv[0], &w, d2000)
*r2000 = Anp(w)
}
/*
Fk524 Convert J2000.0 FK5 star catalog data to B1950.0 FK4
Given: (all J2000.0, FK5)
r2000,d2000 float64 J2000.0 RA,Dec (rad)
dr2000,dd2000 float64 J2000.0 proper motions (rad/Jul.yr)
p2000 float64 parallax (arcsec)
v2000 float64 radial velocity (km/s, +ve = moving away)
Returned: (all B1950.0, FK4)
r1950,d1950 float64 B1950.0 RA,Dec (rad)
dr1950,dd1950 float64 B1950.0 proper motions (rad/trop.yr)
p1950 float64 parallax (arcsec)
v1950 float64 radial velocity (km/s, +ve = moving away)
Notes:
1) The proper motions in RA are dRA/dt rather than cos(Dec)*dRA/dt,
and are per year rather than per century.
2) The conversion is somewhat complicated, for several reasons:
. Change of standard epoch from J2000.0 to B1950.0.
. An intermediate transition date of 1984 January 1.0 TT.
. A change of precession model.
. Change of time unit for proper motion (Julian to tropical).
. FK4 positions include the E-terms of aberration, to simplify
the hand computation of annual aberration. FK5 positions
assume a rigorous aberration computation based on the Earth's
barycentric velocity.
. The E-terms also affect proper motions, and in particular cause
objects at large distances to exhibit fictitious proper
motions.
3) The algorithm is based on Smith et al. (1989) and Yallop et al.
(1989), which presented a matrix method due to Standish (1982) as
developed by Aoki et al. (1983), using Kinoshita's development of
Andoyer's post-Newcomb precession. The numerical constants from
Seidelmann (1992) are used canonically.
4) In the FK4 catalog the proper motions of stars within 10 degrees
of the poles do not embody differential E-terms effects and
should, strictly speaking, be handled in a different manner from
stars outside these regions. However, given the general lack of
homogeneity of the star data available for routine astrometry,
the difficulties of handling positions that may have been
determined from astrometric fields spanning the polar and non-
polar regions, the likelihood that the differential E-terms
effect was not taken into account when allowing for proper motion
in past astrometry, and the undesirability of a discontinuity in
the algorithm, the decision has been made in this SOFA algorithm
to include the effects of differential E-terms on the proper
motions for all stars, whether polar or not. At epoch J2000.0,
and measuring "on the sky" rather than in terms of RA change, the
errors resulting from this simplification are less than
1 milliarcsecond in position and 1 milliarcsecond per century in
proper motion.
Called:
Anp normalize angle into range 0 to 2pi
Pdp scalar product of two p-vectors
Pm modulus of p-vector
Pmp p-vector minus p-vector
Ppp p-vector pluus p-vector
Pv2s pv-vector to spherical coordinates
S2pv spherical coordinates to pv-vector
Sxp multiply p-vector by scalar
References:
<NAME>. et al., 1983, "Conversion matrix of epoch B1950.0
FK4-based positions of stars to epoch J2000.0 positions in
accordance with the new IAU resolutions". Astron.Astrophys.
128, 263-267.
<NAME>. (ed), 1992, "Explanatory Supplement to the
Astronomical Almanac", ISBN 0-935702-68-7.
<NAME> al., 1989, "The transformation of astrometric
catalog systems to the equinox J2000.0". Astron.J. 97, 265.
<NAME>., 1982, "Conversion of positions and proper motions
from B1950.0 to the IAU system at J2000.0". Astron.Astrophys.,
115, 1, 20-22.
<NAME> al., 1989, "Transformation of mean star places
from FK4 B1950.0 to FK5 J2000.0 using matrices in 6-space".
Astron.J. 97, 274.
*/
func Fk524(r2000, d2000 float64, dr2000, dd2000 float64, p2000, v2000 float64,
r1950, d1950 *float64, dr1950, dd1950 *float64, p1950, v1950 *float64) {
/* Radians per year to arcsec per century */
const PMF = 100.0 * DR2AS
/* Small number to avoid arithmetic problems */
const TINY = 1e-30
/* Miscellaneous */
var r, d, ur, ud, px, rv, pxvf, w, rd float64
var i, j, k, l int
/* Vectors, p and pv */
var r0, r1, pv [2][3]float64
var p1, p2 [3]float64
/*
CANONICAL CONSTANTS (Seidelmann 1992)
*/
/* Km per sec to AU per tropical century */
/* = 86400 * 36524.2198782 / 149597870.7 */
const VF = 21.095
/* Constant pv-vector (cf. Seidelmann 3.591-2, vectors A and Adot) */
a := [2][3]float64{
{-1.62557e-6, -0.31919e-6, -0.13843e-6},
{+1.245e-3, -1.580e-3, -0.659e-3},
}
/* 3x2 matrix of pv-vectors (cf. Seidelmann 3.592-1, matrix M^-1) */
em := [2][3][2][3]float64{
{{
{+0.9999256795, +0.0111814828, +0.0048590039},
{-0.00000242389840, -0.00000002710544, -0.00000001177742},
}, {
{-0.0111814828, +0.9999374849, -0.0000271771},
{+0.00000002710544, -0.00000242392702, +0.00000000006585},
}, {
{-0.0048590040, -0.0000271557, +0.9999881946},
{+0.00000001177742, +0.00000000006585, -0.00000242404995},
},
},
{{
{-0.000551, +0.238509, -0.435614},
{+0.99990432, +0.01118145, +0.00485852},
}, {
{-0.238560, -0.002667, +0.012254},
{-0.01118145, +0.99991613, -0.00002717},
}, {
{+0.435730, -0.008541, +0.002117},
{-0.00485852, -0.00002716, +0.99996684},
}},
}
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* The FK5 data (units radians and arcsec per Julian century). */
r = r2000
d = d2000
ur = dr2000 * PMF
ud = dd2000 * PMF
px = p2000
rv = v2000
/* Express as a pv-vector. */
pxvf = px * VF
w = rv * pxvf
S2pv(r, d, 1.0, ur, ud, w, &r0)
/* Convert pv-vector to Bessel-Newcomb system (cf. Seidelmann 3.592-1). */
for i = 0; i < 2; i++ {
for j = 0; j < 3; j++ {
w = 0.0
for k = 0; k < 2; k++ {
for l = 0; l < 3; l++ {
w += em[i][j][k][l] * r0[k][l]
}
}
r1[i][j] = w
}
}
/* Apply E-terms (equivalent to Seidelmann 3.592-3, one iteration). */
/* Direction. */
w = Pm(r1[0])
Sxp(Pdp(r1[0], a[0]), r1[0], &p1)
Sxp(w, a[0], &p2)
Pmp(p2, p1, &p1)
Ppp(r1[0], p1, &p1)
/* Recompute length. */
w = Pm(p1)
/* Direction. */
Sxp(Pdp(r1[0], a[0]), r1[0], &p1)
Sxp(w, a[0], &p2)
Pmp(p2, p1, &p1)
Ppp(r1[0], p1, &pv[0])
/* Derivative. */
Sxp(Pdp(r1[0], a[1]), pv[0], &p1)
Sxp(w, a[1], &p2)
Pmp(p2, p1, &p1)
Ppp(r1[1], p1, &pv[1])
/* Revert to catalog form. */
Pv2s(pv, &r, &d, &w, &ur, &ud, &rd)
if px > TINY {
rv = rd / pxvf
px = px / w
}
/* Return the results. */
*r1950 = Anp(r)
*d1950 = d
*dr1950 = ur / PMF
*dd1950 = ud / PMF
*p1950 = px
*v1950 = rv
}
/*
Fk52h Transform FK5 (J2000.0) star data into the Hipparcos frame
Given (all FK5, equinox J2000.0, epoch J2000.0):
r5 float64 RA (radians)
d5 float64 Dec (radians)
dr5 float64 proper motion in RA (dRA/dt, rad/Jyear)
dd5 float64 proper motion in Dec (dDec/dt, rad/Jyear)
px5 float64 parallax (arcsec)
rv5 float64 radial velocity (km/s, positive = receding)
Returned (all Hipparcos, epoch J2000.0):
rh float64 RA (radians)
dh float64 Dec (radians)
drh float64 proper motion in RA (dRA/dt, rad/Jyear)
ddh float64 proper motion in Dec (dDec/dt, rad/Jyear)
pxh float64 parallax (arcsec)
rvh float64 radial velocity (km/s, positive = receding)
Notes:
1) This function transforms FK5 star positions and proper motions
into the system of the Hipparcos catalog.
2) The proper motions in RA are dRA/dt rather than
cos(Dec)*dRA/dt, and are per year rather than per century.
3) The FK5 to Hipparcos transformation is modeled as a pure
rotation and spin; zonal errors in the FK5 catalog are not
taken into account.
4) See also H2fk5, Fk5hz, Hfk5z.
Called:
Starpv star catalog data to space motion pv-vector
Fk5hip FK5 to Hipparcos rotation and spin
Rxp product of r-matrix and p-vector
Pxp vector product of two p-vectors
Ppp p-vector plus p-vector
Pvstar space motion pv-vector to star catalog data
Reference:
F.Mignard & M.Froeschle, Astron.Astrophys., 354, 732-739 (2000).
*/
func Fk52h(r5, d5 float64, dr5, dd5, px5, rv5 float64,
rh, dh *float64, drh, ddh, pxh, rvh *float64) {
var i int
var pv5, pvh [2][3]float64
var r5h [3][3]float64
var s5h, wxp, vv [3]float64
/* FK5 barycentric position/velocity pv-vector (normalized). */
Starpv(r5, d5, dr5, dd5, px5, rv5, &pv5)
/* FK5 to Hipparcos orientation matrix and spin vector. */
Fk5hip(&r5h, &s5h)
/* Make spin units per day instead of per year. */
for i = 0; i < 3; i++ {
s5h[i] /= 365.25
}
/* Orient the FK5 position into the Hipparcos system. */
Rxp(r5h, pv5[0], &pvh[0])
/* Apply spin to the position giving an extra space motion component. */
Pxp(pv5[0], s5h, &wxp)
/* Add this component to the FK5 space motion. */
Ppp(wxp, pv5[1], &vv)
/* Orient the FK5 space motion into the Hipparcos system. */
Rxp(r5h, vv, &pvh[1])
/* Hipparcos pv-vector to spherical. */
Pvstar(pvh, rh, dh, drh, ddh, pxh, rvh)
}
/*
Fk54z Convert a J2000.0 FK5 star position to B1950.0 FK4, assuming zero proper motion in FK5 system and zero parallax
Given:
r2000,d2000 float64 J2000.0 FK5 RA,Dec (rad)
bepoch float64 Besselian epoch (e.g. 1950.0)
Returned:
r1950,d1950 float64 B1950.0 FK4 RA,Dec (rad) at epoch BEPOCH
dr1950,dd1950 float64 B1950.0 FK4 proper motions (rad/trop.yr)
Notes:
1) In contrast to the Fk524 function, here the FK5 proper
motions, the parallax and the radial velocity are presumed zero.
2) This function converts a star position from the IAU 1976 FK5
(Fricke) system to the former FK4 (Bessel-Newcomb) system, for
cases such as distant radio sources where it is presumed there is
zero parallax and no proper motion. Because of the E-terms of
aberration, such objects have (in general) non-zero proper motion
in FK4, and the present function returns those fictitious proper
motions.
3) Conversion from B1950.0 FK4 to J2000.0 FK5 only is provided for.
Conversions involving other equinoxes would require additional
treatment for precession.
4) The position returned by this function is in the B1950.0 FK4
reference system but at Besselian epoch BEPOCH. For comparison
with catalogs the BEPOCH argument will frequently be 1950.0. (In
this context the distinction between Besselian and Julian epoch
is insignificant.)
5) The RA component of the returned (fictitious) proper motion is
dRA/dt rather than cos(Dec)*dRA/dt.
Called:
Anp normalize angle into range 0 to 2pi
C2s p-vector to spherical
Fk524 FK4 to FK5
S2c spherical to p-vector
*/
func Fk54z(r2000, d2000, bepoch float64, r1950, d1950 *float64, dr1950, dd1950 *float64) {
var r, d, pr, pd, px, rv, w float64
var p, v [3]float64
var i int
/* FK5 equinox J2000.0 to FK4 equinox B1950.0. */
Fk524(r2000, d2000, 0.0, 0.0, 0.0, 0.0,
&r, &d, &pr, &pd, &px, &rv)
/* Spherical to Cartesian. */
S2c(r, d, &p)
/* Fictitious proper motion (radians per year). */
v[0] = -pr*p[1] - pd*cos(r)*sin(d)
v[1] = pr*p[0] - pd*sin(r)*sin(d)
v[2] = pd * cos(d)
/* Apply the motion. */
w = bepoch - 1950.0
for i = 0; i < 3; i++ {
p[i] += w * v[i]
}
/* Cartesian to spherical. */
C2s(p, &w, d1950)
*r1950 = Anp(w)
/* Fictitious proper motion. */
*dr1950 = pr
*dd1950 = pd
}
/*
Fk5hip FK5 orientation and spin with respect to Hipparcos
Returned:
r5h [3][3]float64 r-matrix: FK5 rotation wrt Hipparcos (Note 2)
s5h [3]float64 r-vector: FK5 spin wrt Hipparcos (Note 3)
Notes:
1) This function models the FK5 to Hipparcos transformation as a
pure rotation and spin; zonal errors in the FK5 catalogue are
not taken into account.
2) The r-matrix r5h operates in the sense:
P_Hipparcos = r5h x P_FK5
where P_FK5 is a p-vector in the FK5 frame, and P_Hipparcos is
the equivalent Hipparcos p-vector.
3) The r-vector s5h represents the time derivative of the FK5 to
Hipparcos rotation. The units are radians per year (Julian,
TDB).
Called:
Rv2m r-vector to r-matrix
Reference:
F.Mignard & M.Froeschle, Astron.Astrophys., 354, 732-739 (2000).
*/
func Fk5hip(r5h *[3][3]float64, s5h *[3]float64) {
var v [3]float64
/* FK5 wrt Hipparcos orientation and spin (radians, radians/year) */
var epx, epy, epz float64
var omx, omy, omz float64
epx = -19.9e-3 * DAS2R
epy = -9.1e-3 * DAS2R
epz = 22.9e-3 * DAS2R
omx = -0.30e-3 * DAS2R
omy = 0.60e-3 * DAS2R
omz = 0.70e-3 * DAS2R
/* FK5 to Hipparcos orientation expressed as an r-vector. */
v[0] = epx
v[1] = epy
v[2] = epz
/* Re-express as an r-matrix. */
Rv2m(v, r5h)
/* Hipparcos wrt FK5 spin expressed as an r-vector. */
s5h[0] = omx
s5h[1] = omy
s5h[2] = omz
}
/*
Fk5hz FK5 to Hipparcos assuming zero Hipparcos proper motion
Transform an FK5 (J2000.0) star position into the system of the
Hipparcos catalogue, assuming zero Hipparcos proper motion.
Given:
r5 float64 FK5 RA (radians), equinox J2000.0, at date
d5 float64 FK5 Dec (radians), equinox J2000.0, at date
date1,date2 float64 TDB date (Notes 1,2)
Returned:
rh float64 Hipparcos RA (radians)
dh float64 Hipparcos Dec (radians)
Notes:
1) This function converts a star position from the FK5 system to
the Hipparcos system, in such a way that the Hipparcos proper
motion is zero. Because such a star has, in general, a non-zero
proper motion in the FK5 system, the function requires the date
at which the position in the FK5 system was determined.
2) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
3) The FK5 to Hipparcos transformation is modeled as a pure
rotation and spin; zonal errors in the FK5 catalogue are not
taken into account.
4) The position returned by this function is in the Hipparcos
reference system but at date date1+date2.
5) See also Fk52h, H2fk5, Hfk5z.
Called:
S2c spherical coordinates to unit vector
Fk5hip FK5 to Hipparcos rotation and spin
Sxp multiply p-vector by scalar
Rv2m r-vector to r-matrix
Trxp product of transpose of r-matrix and p-vector
Pxp vector product of two p-vectors
C2s p-vector to spherical
Anp normalize angle into range 0 to 2pi
Reference:
F.Mignard & M.Froeschle, 2000, Astron.Astrophys. 354, 732-739.
*/
func Fk5hz(r5, d5 float64, date1, date2 float64, rh, dh *float64) {
var t, w float64
var p5e, s5h, vst, p5, ph [3]float64
var r5h, rst [3][3]float64
/* Interval from given date to fundamental epoch J2000.0 (JY). */
t = -((date1 - DJ00) + date2) / DJY
/* FK5 barycentric position vector. */
S2c(r5, d5, &p5e)
/* FK5 to Hipparcos orientation matrix and spin vector. */
Fk5hip(&r5h, &s5h)
/* Accumulated Hipparcos wrt FK5 spin over that interval. */
Sxp(t, s5h, &vst)
/* Express the accumulated spin as a rotation matrix. */
Rv2m(vst, &rst)
/* Derotate the vector's FK5 axes back to date. */
Trxp(rst, p5e, &p5)
/* Rotate the vector into the Hipparcos system. */
Rxp(r5h, p5, &ph)
/* Hipparcos vector to spherical. */
C2s(ph, &w, dh)
*rh = Anp(w)
}
/*
H2fk5 Transform Hipparcos star data into the FK5 (J2000.0) frame
Given (all Hipparcos, epoch J2000.0):
rh float64 RA (radians)
dh float64 Dec (radians)
drh float64 proper motion in RA (dRA/dt, rad/Jyear)
ddh float64 proper motion in Dec (dDec/dt, rad/Jyear)
pxh float64 parallax (arcsec)
rvh float64 radial velocity (km/s, positive = receding)
Returned (all FK5, equinox J2000.0, epoch J2000.0):
r5 float64 RA (radians)
d5 float64 Dec (radians)
dr5 float64 proper motion in RA (dRA/dt, rad/Jyear)
dd5 float64 proper motion in Dec (dDec/dt, rad/Jyear)
px5 float64 parallax (arcsec)
rv5 float64 radial velocity (km/s, positive = receding)
Notes:
1) This function transforms Hipparcos star positions and proper
motions into FK5 J2000.0.
2) The proper motions in RA are dRA/dt rather than
cos(Dec)*dRA/dt, and are per year rather than per century.
3) The FK5 to Hipparcos transformation is modeled as a pure
rotation and spin; zonal errors in the FK5 catalog are not
taken into account.
4) See also Fk52h, Fk5hz, Hfk5z.
Called:
Starpv star catalog data to space motion pv-vector
Fk5hip FK5 to Hipparcos rotation and spin
Rv2m r-vector to r-matrix
Rxp product of r-matrix and p-vector
Trxp product of transpose of r-matrix and p-vector
Pxp vector product of two p-vectors
Pmp p-vector minus p-vector
Pvstar space motion pv-vector to star catalog data
Reference:
F.Mignard & M.Froeschle, Astron.Astrophys., 354, 732-739 (2000).
*/
func H2fk5(rh, dh float64, drh, ddh, pxh, rvh float64,
r5, d5 *float64, dr5, dd5, px5, rv5 *float64) {
var i int
var pvh, pv5 [2][3]float64
var r5h [3][3]float64
var s5h, sh, wxp, vv [3]float64
/* Hipparcos barycentric position/velocity pv-vector (normalized). */
Starpv(rh, dh, drh, ddh, pxh, rvh, &pvh)
/* FK5 to Hipparcos orientation matrix and spin vector. */
Fk5hip(&r5h, &s5h)
/* Make spin units per day instead of per year. */
for i = 0; i < 3; i++ {
s5h[i] /= 365.25
}
/* Orient the spin into the Hipparcos system. */
Rxp(r5h, s5h, &sh)
/* De-orient the Hipparcos position into the FK5 system. */
Trxp(r5h, pvh[0], &pv5[0])
/* Apply spin to the position giving an extra space motion component. */
Pxp(pvh[0], sh, &wxp)
/* Subtract this component from the Hipparcos space motion. */
Pmp(pvh[1], wxp, &vv)
/* De-orient the Hipparcos space motion into the FK5 system. */
Trxp(r5h, vv, &pv5[1])
/* FK5 pv-vector to spherical. */
Pvstar(pv5, r5, d5, dr5, dd5, px5, rv5)
}
/*
Hfk5z Hipparcos to FK5 assuming zero Hipparcos proper motion
Transform a Hipparcos star position into FK5 J2000.0, assuming
zero Hipparcos proper motion.
Given:
rh float64 Hipparcos RA (radians)
dh float64 Hipparcos Dec (radians)
date1,date2 float64 TDB date (Note 1)
Returned (all FK5, equinox J2000.0, date date1+date2):
r5 float64 RA (radians)
d5 float64 Dec (radians)
dr5 float64 FK5 RA proper motion (rad/year, Note 4)
dd5 float64 Dec proper motion (rad/year, Note 4)
Notes:
1) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt.
3) The FK5 to Hipparcos transformation is modeled as a pure rotation
and spin; zonal errors in the FK5 catalogue are not taken into
account.
4) It was the intention that Hipparcos should be a close
approximation to an inertial frame, so that distant objects have
zero proper motion; such objects have (in general) non-zero
proper motion in FK5, and this function returns those fictitious
proper motions.
5) The position returned by this function is in the FK5 J2000.0
reference system but at date date1+date2.
6) See also Fk52h, H2fk5, Fk5zhz.
Called:
S2c spherical coordinates to unit vector
Fk5hip FK5 to Hipparcos rotation and spin
Rxp product of r-matrix and p-vector
Sxp multiply p-vector by scalar
Rxr product of two r-matrices
Trxp product of transpose of r-matrix and p-vector
Pxp vector product of two p-vectors
Pv2s pv-vector to spherical
Anp normalize angle into range 0 to 2pi
Reference:
F.Mignard & M.Froeschle, 2000, Astron.Astrophys. 354, 732-739.
*/
func Hfk5z(rh, dh float64, date1, date2 float64, r5, d5, dr5, dd5 *float64) {
var t, w, r, v float64
var ph, s5h, sh, vst, vv [3]float64
var pv5e [2][3]float64
var r5h, rst, r5ht [3][3]float64
/* Time interval from fundamental epoch J2000.0 to given date (JY). */
t = ((date1 - DJ00) + date2) / DJY
/* Hipparcos barycentric position vector (normalized). */
S2c(rh, dh, &ph)
/* FK5 to Hipparcos orientation matrix and spin vector. */
Fk5hip(&r5h, &s5h)
/* Rotate the spin into the Hipparcos system. */
Rxp(r5h, s5h, &sh)
/* Accumulated Hipparcos wrt FK5 spin over that interval. */
Sxp(t, s5h, &vst)
/* Express the accumulated spin as a rotation matrix. */
Rv2m(vst, &rst)
/* Rotation matrix: accumulated spin, then FK5 to Hipparcos. */
Rxr(r5h, rst, &r5ht)
/* De-orient & de-spin the Hipparcos position into FK5 J2000.0. */
Trxp(r5ht, ph, &pv5e[0])
/* Apply spin to the position giving a space motion. */
Pxp(sh, ph, &vv)
/* De-orient & de-spin the Hipparcos space motion into FK5 J2000.0. */
Trxp(r5ht, vv, &pv5e[1])
/* FK5 position/velocity pv-vector to spherical. */
Pv2s(pv5e, &w, d5, &r, dr5, dd5, &v)
*r5 = Anp(w)
} | catalog.go | 0.68595 | 0.653991 | catalog.go | starcoder |
package vespyr
import (
"fmt"
"math/rand"
"github.com/MaxHalford/gago"
)
// RSIStrategy is a strategy for that buys and sells based on the RSI
// values.
type RSIStrategy struct {
Period uint `yaml:"period"`
BuyThreshold float64 `yaml:"buy_threshold"`
SellThreshold float64 `yaml:"sell_threshold"`
strategy *TradingStrategyModel
}
// String returns the string representation of the strategy.
func (e *RSIStrategy) String() string {
return fmt.Sprintf("RSIStrategy-p(%d)-(%f)-(%f)",
e.Period, e.BuyThreshold, e.SellThreshold,
)
}
// SetTradingStrategy sets the underlying trading strategy.
func (e *RSIStrategy) SetTradingStrategy(t *TradingStrategyModel) {
e.strategy = t
}
// Indicators returns the indicators returned by the strategy.
func (e *RSIStrategy) Indicators() []Indicator {
var indicators []Indicator
indicators = append(indicators, NewRSIIndicator(e.Period))
return indicators
}
// Buy determines whether the currency should be bought using the
// indicator history.
func (e *RSIStrategy) Buy(history []*IndicatorSet, current int) (bool, error) {
if len(history)-1 < current {
return false, ErrNotEnoughData
}
currentValues := history[current].Values
rsi := currentValues[0]
if rsi == nil {
return false, ErrNotEnoughData
}
return rsi.Value <= e.BuyThreshold, nil
}
// Sell determines whether the currency should be sold using the
// indicator history.
func (e *RSIStrategy) Sell(history []*IndicatorSet, current int) (bool, error) {
if len(history)-1 < current {
return false, ErrNotEnoughData
}
currentValues := history[current].Values
rsi := currentValues[0]
if rsi == nil {
return false, ErrNotEnoughData
}
return rsi.Value >= e.SellThreshold, nil
}
// Rand creates a random version of the strategy.
func (e *RSIStrategy) Rand(rng *rand.Rand) {
e.Period = uint(rng.Float64() * 50)
e.SellThreshold = rng.Float64() * 100
e.BuyThreshold = e.SellThreshold / 2
}
// Clone returns a clone of the current strategy.
func (e *RSIStrategy) Clone() StrategyGenome {
return &RSIStrategy{
Period: e.Period,
BuyThreshold: e.BuyThreshold,
SellThreshold: e.SellThreshold,
}
}
// Mutate mutates the underlying strategy.
func (e *RSIStrategy) Mutate(rng *rand.Rand) {
mutateProb := 0.8
if rng.Float64() < mutateProb {
x := float64(e.Period)
x += x * rng.NormFloat64()
if x < 0 {
e.Period = 0
} else {
e.Period = uint(x)
}
}
if rng.Float64() < mutateProb {
x := e.BuyThreshold
x += x * rng.NormFloat64()
e.BuyThreshold = x
if x > 100 {
e.BuyThreshold = 100
}
if x < 0 {
e.BuyThreshold = 0
}
}
if rng.Float64() < mutateProb {
x := e.SellThreshold
x += x * rng.NormFloat64()
e.SellThreshold = x
if x > 100 {
e.SellThreshold = 100
}
if x < 0 {
e.SellThreshold = 0
}
}
}
// Crossover crosses over an RSIStrategy with a different one.
func (e *RSIStrategy) Crossover(m StrategyGenome,
r *rand.Rand) (StrategyGenome, StrategyGenome) {
mate := m.(*RSIStrategy)
p1 := []float64{float64(e.Period), e.BuyThreshold, e.SellThreshold}
p2 := []float64{float64(mate.Period), mate.BuyThreshold, mate.SellThreshold}
c1, c2 := gago.CrossUniformFloat64(p1, p2, r)
s1 := &RSIStrategy{
Period: uint(c1[0]),
BuyThreshold: c1[1],
SellThreshold: c1[2],
}
s2 := &RSIStrategy{
Period: uint(c2[0]),
BuyThreshold: c2[1],
SellThreshold: c2[2],
}
return s1, s2
} | pkg/vespyr/rsi_strategy.go | 0.800458 | 0.440469 | rsi_strategy.go | starcoder |
package rpmalloc
// #include "rpmalloc.h"
import "C"
type Config struct {
//! Map memory pages for the given number of bytes. The returned address MUST be
// aligned to the rpmalloc span size, which will always be a power of two.
// Optionally the function can store an alignment offset in the offset variable
// in case it performs alignment and the returned pointer is offset from the
// actual start of the memory region due to this alignment. The alignment offset
// will be passed to the memory unmap function. The alignment offset MUST NOT be
// larger than 65535 (storable in an uint16_t), if it is you must use natural
// alignment to shift it into 16 bits. If you set a memory_map function, you
// must also set a memory_unmap function or else the default implementation will
// be used for both.
MemoryMap uintptr
//! Unmap the memory pages starting at address and spanning the given number of bytes.
// If release is set to non-zero, the unmap is for an entire span range as returned by
// a previous libfuzzerCall to memory_map and that the entire range should be released. The
// release argument holds the size of the entire span range. If release is set to 0,
// the unmap is a partial decommit of a subset of the mapped memory range.
// If you set a memory_unmap function, you must also set a memory_map function or
// else the default implementation will be used for both.
MemoryUnmap uintptr
//! Called when an assert fails, if asserts are enabled. Will use the standard assert()
// if this is not set.
ErrorCallback uintptr
//! Called when a libfuzzerCall to map memory pages fails (out of memory). If this callback is
// not set or returns zero the library will return a null pointer in the allocation
// libfuzzerCall. If this callback returns non-zero the map libfuzzerCall will be retried. The argument
// passed is the number of bytes that was requested in the map libfuzzerCall. Only used if
// the default system memory map function is used (memory_map callback is not set).
MapFailCallback uintptr
//! Size of memory pages. The page size MUST be a power of two. All memory mapping
// requests to memory_map will be made with size set to a multiple of the page size.
// Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system page size is used.
PageSize uintptr
//! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
// range (unless 0 - set to 0 to use the default span size). Used if RPMALLOC_CONFIGURABLE
// is defined to 1.
SpanSize uintptr
//! Number of spans to map at each request to map new virtual memory blocks. This can
// be used to minimize the system libfuzzerCall overhead at the cost of virtual memory address
// space. The extra mapped pages will not be written until actually used, so physical
// committed memory should not be affected in the default implementation. Will be
// aligned to a multiple of spans that match memory page size in case of huge pages.
SpanMapCount uintptr
//! Enable use of large/huge pages. If this flag is set to non-zero and page size is
// zero, the allocator will try to enable huge pages and auto detect the configuration.
// If this is set to non-zero and page_size is also non-zero, the allocator will
// assume huge pages have been configured and enabled prior to initializing the
// allocator.
// For Windows, see https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
// For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
EnableHugePages int32
Unused int32
}
type GlobalStats struct {
//! Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
Mapped uintptr
//! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
MappedPeak uintptr
//! Current amount of memory in global caches for small and medium sizes (<32KiB)
Cached uintptr
//! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
HugeAlloc uintptr
//! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
HugeAllocPeak uintptr
//! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
MappedTotal uintptr
//! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
UnmappedTotal uintptr
}
type ThreadStats struct {
//! Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
SizeCache uintptr
//! Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
SpanCache uintptr
//! Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
ThreadToGlobal uintptr
//! Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
GlobalToThread uintptr
//! Per span count statistics (only if ENABLE_STATISTICS=1)
SpanUse [64]SpanStats
//! Per size class statistics (only if ENABLE_STATISTICS=1)
SizeUse [128]SizeUse
}
type SpanStats struct {
//! Currently used number of spans
Current uintptr
//! High water mark of spans used
Peak uintptr
//! Number of spans transitioned to global cache
ToGlobal uintptr
//! Number of spans transitioned from global cache
FromGlobal uintptr
//! Number of spans transitioned to thread cache
ToCache uintptr
//! Number of spans transitioned from thread cache
FromCache uintptr
//! Number of spans transitioned to reserved state
ToReserved uintptr
//! Number of spans transitioned from reserved state
FromReserved uintptr
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
MapCalls uintptr
}
type SizeUse struct {
//! Current number of allocations
AllocCurrent uintptr
//! Peak number of allocations
AllocPeak uintptr
//! Total number of allocations
AllocTotal uintptr
//! Total number of frees
FreeTotal uintptr
//! Number of spans transitioned to cache
SpansToCache uintptr
//! Number of spans transitioned from cache
SpansFromCache uintptr
//! Number of spans transitioned from reserved state
SpansFromReserved uintptr
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
MapCalls uintptr
} | alloc/rpmalloc/tinygo/model.go | 0.555918 | 0.492371 | model.go | starcoder |
package ml
import (
"errors"
"fmt"
"math"
"strings"
)
// Common erros.
var (
ErrBadDim = errors.New("bad dimenstion for matrix")
ErrInconsistentData = errors.New("matrix has different y dimension per x")
ErrUninitialized = errors.New("matrix not initialized")
ErrIdentityInvalidSize = errors.New("current dimension of matrix does not have an identity")
ErrNegativeIndex = errors.New("negative index are not supported")
ErrOutOfBound = errors.New("index out of bound")
ErrNotAVector = errors.New("the current vector has invalid dimension for a vector")
ErrSingularMatrix = errors.New("the matrix is singuler")
)
// MRow is the row type for matrix.
type MRow []float64
// Scale returns the result of the scalar multiplication of the given scalar
// and the current matrix row.
// NOTE: Does not change current row state.
func (mr MRow) Scale(n float64) MRow {
return Matrix{mr}.Scale(n)[0]
}
// Add adds the given matrix to the current one and return the result.
// NOTE: Does not change current matrix state.
func (mr MRow) Add(mr2 MRow) MRow {
return Matrix{mr}.Add(Matrix{mr2})[0]
}
// ToVector returns the current row as a vector.
// Note: Not a copy, changes to the vector affect the row.
func (mr MRow) ToVector() Vector {
v := NewVector(len(mr))
for i, elem := range mr {
v[i][0] = elem
}
return v
}
// Matrix .
type Matrix []MRow
// NewMatrix instantiates a new matrix of (n,m) dimension.
func NewMatrix(m, n int) Matrix {
ret := make(Matrix, m)
for i := 0; i < m; i++ {
ret[i] = make([]float64, n)
}
return ret
}
// ToVector asserts the current matrix as a vector
// and returns it.
func (ma Matrix) ToVector() Vector {
return ToVector(ma)
}
// Dim returns the dimension of the matrix.
func (ma Matrix) Dim() (int, int) {
if len(ma) == 0 {
return 0, 0
}
if len(ma[0]) == 0 {
return len(ma), 1
}
return len(ma), len(ma[0])
}
// Add adds the given matrix to the current one and return the result.
// NOTE: Does not change current matrix state.
func (ma Matrix) Add(ma2 Matrix) Matrix {
if !ma.DimMatch(ma2) {
panic(ErrBadDim)
}
ret := NewMatrix(ma.Dim())
for i, line := range ma {
if len(ma[i]) == 0 {
continue
}
for j := range line {
ret[i][j] = ma[i][j] + ma2[i][j]
}
}
return ret
}
// Sub substracts the given matrix to the current one and return the result.
// NOTE: Does not change current matrix state.
func (ma Matrix) Sub(ma2 Matrix) Matrix {
if !ma.DimMatch(ma2) {
panic(ErrBadDim)
}
ret := NewMatrix(ma.Dim())
for i, line := range ma {
if len(ma[i]) == 0 {
continue
}
for j := range line {
ret[i][j] = ma[i][j] - ma2[i][j]
}
}
return ret
}
// Mul returns the result of the current matrix multiplied by the given one.
// NOTE: Does not change current matrix state.
func (ma Matrix) Mul(ma2 Matrix) Matrix {
_, n1 := ma.Dim()
m2, _ := ma2.Dim()
if n1 != m2 {
panic(ErrBadDim)
}
ret := NewMatrix(ma.Dim())
for i := range ma {
if len(ma[i]) == 0 {
continue
}
for j := range ma2[0] {
sum := 0.
for k := range ma[0] {
sum += ma[i][k] * ma2[k][j]
}
ret[i][j] = sum
}
}
return ret
}
// Scale returns the result of the scalar multiplication of the given scalar
// and the current matrix.
// NOTE: Does not change current matrix state.
func (ma Matrix) Scale(n float64) Matrix {
ret := NewMatrix(ma.Dim())
for i, line := range ma {
if len(ma[i]) == 0 {
continue
}
for j := range line {
ret[i][j] = ma[i][j] * n
}
}
return ret
}
// MulV multiplies the current matrix with the given vector.
// Returns a Vector.
// NOTE: Does not change current matrix state.
func (ma Matrix) MulV(v Vector) Vector {
return Vector(ma.Mul(Matrix(v)))
}
// Transpose returns a transposed copy of the current matrix.
// NOTE: Does not change current matrix state.
func (ma Matrix) Transpose() Matrix {
x, y := ma.Dim()
ret := NewMatrix(y, x)
for i, line := range ma {
if len(ma[i]) == 0 {
continue
}
for j := range line {
ret[j][i] = ma[i][j]
}
}
return ret
}
// Inverse returns the inverted copy of the current matrix.
// NOTE: Does not change current matrix state.
func (ma Matrix) Inverse() Matrix {
m, n := ma.Dim()
if m != n {
panic(ErrBadDim)
}
// Step 1: Double the width of the matrix.
ret := ma.Extend(0, n) // Add 0 rows and n cols.
// Step 2: Set the right half of the matrix as the identity matrix.
ret = ret.SetSubMatrix(ret.SubMatrix(0, n, m, n).Identity(), 0, n) // sub matrix starts at (0,n) and has a (m,n) size.
for i := 0; i < len(ret); i++ {
if len(ma[i]) == 0 {
continue
}
j := i
for k := i; k < len(ret); k++ {
if math.Abs(ret[k][j]) > math.Abs(ret[j][i]) {
j = k
}
}
if j != i {
// Swap rows.
tmp := ret[i]
ret[i] = ret[j]
ret[j] = tmp
}
if ret[i][i] == 0 {
panic(ErrSingularMatrix)
}
// Inverse the i'th row.
ret[i] = ret[i].Scale(1 / ret[i][i])
for k := 0; k < n; k++ {
if k == i {
continue
}
ret[k] = ret[k].Add(ret[i].Scale(-ret[k][i]))
}
}
return ret.SubMatrix(0, n, m, n)
}
// Identity returns the identify matrix for the current one.
// NOTE: Does not change current matrix state.
func (ma Matrix) Identity() Matrix {
m, n := ma.Dim()
if m != n {
panic(ErrIdentityInvalidSize)
}
ret := NewMatrix(m, n) // Default to 0 for all fields.
for i := 0; i < m; i++ {
ret[i][i] = 1
}
return ret
}
// Equal compares the given matrix to the current one.
func (ma Matrix) Equal(ma2 Matrix) bool {
// If dim mismatch, mot equal.
if !ma.DimMatch(ma2) {
return false
}
// Check each element of both matrix.
m, n := ma.Dim()
for i := 0; i < m; i++ {
if len(ma[i]) == 0 {
if len(ma2[i]) != 0 {
return false
}
continue
}
for j := 0; j < n; j++ {
if ma[i][j] != ma2[i][j] {
return false
}
}
}
return true
}
// Validate checks if the current matrix is valid.
// NOTE: When instantiating a matrix outside ml.NewMatrix, Validate should be called.
func (ma Matrix) Validate() error {
// Empty matrix is valid, but nil one is not.
if ma == nil {
return ErrUninitialized
}
if len(ma) == 0 {
return nil
}
n := len(ma[0])
for _, line := range ma {
if len(line) != n {
return ErrInconsistentData
}
}
return nil
}
// DimMatch checks if the given matrice has the same dimension as the current one.
func (ma Matrix) DimMatch(ma2 Matrix) bool {
m, n := ma.Dim()
m1, n1 := ma2.Dim()
return m == m1 && n == n1
}
// Extend returns a copy of the current matrix with m more rows and n more cols.
// The extended rows/cols are set to 0.
// Extend(0, 0) creates an identical copy of the matrix.
// NOTE: Does not change state of current matrix.
func (ma Matrix) Extend(m1, n1 int) Matrix {
if ma == nil {
return NewMatrix(m1, n1)
}
m, n := ma.Dim()
ret := NewMatrix(m+m1, n+n1)
for i, line := range ma {
if len(ma[i]) == 0 {
continue
}
for j := range line {
ret[i][j] = ma[i][j]
}
}
return ret
}
// Copy returns a copy of the current matrix.
func (ma Matrix) Copy() Matrix {
return ma.Extend(0, 0)
}
// SubMatrix return a sub matrix part of the current matrix.
// Starts at (m,n) index (0 indexed) and of dimension (m1,n1)
// NOTE: Changes to the sub matrix will change the parent one.
func (ma Matrix) SubMatrix(m, n, m1, n1 int) Matrix {
if m < 0 || n < 0 || // Negative start index
m > len(ma) || // ma[m+i], m needs not be smaller than len(ma).
n+n1 > len(ma[0]) { // [n:n+n1], needs to be within slice length.
panic(ErrOutOfBound)
}
ret := make(Matrix, m1)
for i := 0; i < len(ret); i++ {
ret[i] = ma[m+i][n : n+n1]
}
return ret
}
// SetSubMatrix updates the current matrix with the given submatrix starting at m,n index.
// NOTE: Changes the state of the current matrix.
// NOTE: Overflowing submatrix produce an error.
func (ma Matrix) SetSubMatrix(ma2 Matrix, m, n int) Matrix {
m1, n1 := ma.Dim()
m2, n2 := ma2.Dim()
if m < 0 || n < 0 || m+m2 > m1 || n+n2 > n1 {
panic(ErrOutOfBound)
}
for i, line := range ma2 {
for j := range line {
ma[i+m][j+n] = ma2[i][j]
}
}
return ma
}
// // Row returns the ith row of the current matrix.
// // NOTE: Changes to the row will change the parent matrix.
// func (ma Matrix) Row(i int) MRow {
// if i < 0 || i >= len(ma) {
// panic(ErrOutOfBound)
// }
// return ma[i]
// }
// // SetRow sets the given row as ith row of the current matrix.
// // NOTE: Changes the state of the current matrix.
// func (ma Matrix) SetRow(row MRow, i int) Matrix {
// if i < 0 || i >= len(ma) {
// panic(ErrOutOfBound)
// }
// if _, n := ma.Dim(); n != len(row) {
// panic(ErrBadDim)
// }
// ma[i] = row
// return ma
// }
// String pretty prints the matrix.
func (ma Matrix) String() string {
if ma == nil {
return "<nil>"
}
if len(ma) == 0 {
return "||"
}
m, n := ma.Dim()
ret := fmt.Sprintf("(%d,%d)\n", m, n)
for _, line := range ma {
ret += fmt.Sprintf("%4v\n", line)
}
return strings.TrimSpace(ret)
}
// Vector is a matrix with 1 column.
type Vector Matrix
// NewVector instantiate a new vector of dimensin n.
func NewVector(n int) Vector {
return Vector(NewMatrix(n, 1))
}
// ToVector converts the matrix type to vector
// and validates the resulting vector.
// panic if the given matrix is not of (1,n) dimension.
func ToVector(m Matrix) Vector {
v := Vector(m)
if err := v.Validate(); err != nil {
panic(err)
}
return v
}
// Dim returns the size of the vector. dim (1,n)
func (v Vector) Dim() (int, int) {
return Matrix(v).Dim()
}
// Validate checks if the current matrix is valid.
// NOTE: When instantiating a matrix outside ml.NewVector, Validate should be called.
func (v Vector) Validate() error {
if err := Matrix(v).Validate(); err != nil {
return err
}
if len(v) == 0 || len(v[0]) != 1 {
return ErrNotAVector
}
return nil
}
// Transpose return a transposed copy of the vector as a matrix (n,1).
// NOTE: Does not change state of current vector.
func (v Vector) Transpose() Matrix {
return Matrix(v).Transpose()
}
// Sum computes the sum of all the vector elements.
func (v Vector) Sum() float64 {
sum := 0.0
for _, elem := range v[0] {
sum += elem
}
return sum
}
// SubV returns the result of v - v2 as a copy.
// NOTE: Does not change cureent vector state.
func (v Vector) SubV(v2 Vector) Vector {
return Vector(Matrix(v).Sub(Matrix(v2)))
}
// Scale .
func (v Vector) Scale(n float64) Vector {
return Vector(Matrix(v).Scale(n))
}
func (v Vector) String() string {
return Matrix(v).String()
} | matrix.go | 0.747155 | 0.5083 | matrix.go | starcoder |
package matrix
// Matrix interface defines the default contract for a matrix.
type Matrix interface {
// Dim returns number of rows (1st value) and columns (2nd value) of this Matrix.
Dim() (int, int)
// Entry returns Matrix entry value in the position of given row and col.
Entry(row, col int) int
// SetEntry sets given entry value to the position of given row and col.
SetEntry(row, col, entry int)
// ForEach iterates through all entries of this Matrix,
// given function receives row-index (1st argument), column-index (2nd argument)
// and value of an entry (3rd argument), return true to continue iteration.
ForEach(func(int, int, int) bool)
// Equals returns true if this Matrix equals to given Matrix m.
Equals(m Matrix) bool
// MultiplyScalar performs multiplication of this Matrix by a scalar "s" and returns result,
// without changing properties of this Matrix (no side effects).
MultiplyScalar(s int) Matrix
// Add performs addition of the m Matrix to this Matrix and returns result,
// without changing properties of this Matrix (no side effects).
Add(m Matrix) Matrix
// Multiply performs multiplication of this Matrix by given "m" Matrix and returns result,
// without changing properties of this Matrix (no side effects).
Multiply(m Matrix) Matrix
// Transpose returns transposed version of this Matrix,
// without changing properties of this Matrix (no side effects).
Transpose() Matrix
// IsIdentity returns true in case if this Matrix is an identity matrix.
IsIdentity() bool
}
// matrix - is default implementation of a matrix.
type matrix struct {
// rs - number of rows
// cs - number of columns
rs, cs int
// es - is a raw slice of entries
es []int
}
func (mx *matrix) index(row, col int) int {
return mx.cs*row + col
}
func newMatrix(rows, cols int, e []int) *matrix {
res := &matrix{rs: rows, cs: cols, es: make([]int, len(e))}
copy(res.es, e)
return res
}
// DimM creates and returns new instance of an m*n-dimensional Matrix.
func DimM(m, n int) Matrix {
return newMatrix(m, n, make([]int, m*n))
}
// IdentityM creates and returns new instance of an n*n-dimensional identity Matrix.
func IdentityM(n int) Matrix {
es := make([]int, n*n)
for i := range es {
row := i / n
col := i % n
if row == col {
es[i] = 1
}
}
return newMatrix(n, n, es)
}
// M creates and returns new instance of a Matrix based on the provided slice of bras (row-Vectors).
func M(bras []Vector) Matrix {
rows := len(bras)
if rows == 0 {
return nil
}
entries := []int{}
for _, v := range bras {
v.ForEach(func(idx, entry int) bool {
entries = append(entries, entry)
return true
})
}
return newMatrix(rows, bras[0].Dim(), entries)
}
func (mx *matrix) Dim() (int, int) {
return mx.rs, mx.cs
}
func (mx *matrix) Entry(row, col int) int {
return mx.es[mx.index(row, col)]
}
func (mx *matrix) SetEntry(row, col, entry int) {
mx.es[mx.index(row, col)] = entry
}
func (mx *matrix) ForEach(fn func(int, int, int) bool) {
for i, e := range mx.es {
row := i / mx.cs
col := i % mx.cs
if !fn(row, col, e) {
return
}
}
}
func (mx *matrix) Equals(m Matrix) bool {
rows, cols := mx.Dim()
rows2, cols2 := m.Dim()
if rows != rows2 || cols != cols2 {
return false
}
var ok bool
m.ForEach(func(r, c, e int) bool {
ok = mx.Entry(r, c) == e
return ok
})
return ok
}
func (mx *matrix) MultiplyScalar(s int) Matrix {
res := newMatrix(mx.rs, mx.cs, mx.es)
res.ForEach(func(r, c, e int) bool {
res.SetEntry(r, c, e*s)
return true
})
return res
}
func (mx *matrix) Add(m Matrix) Matrix {
rows, cols := mx.Dim()
rows2, cols2 := m.Dim()
if rows != rows2 || cols != cols2 {
return nil
}
res := newMatrix(rows, cols, mx.es)
m.ForEach(func(r, c, e int) bool {
en := res.Entry(r, c)
res.SetEntry(r, c, en+e)
return true
})
return res
}
func (mx *matrix) Multiply(m Matrix) Matrix {
rows, cols := mx.Dim()
rows2, cols2 := m.Dim()
if cols != rows2 {
return nil
}
res := DimM(rows, cols2)
res.ForEach(func(r, c, e int) bool {
var entry int
for i := 0; i < cols; i++ {
entry += mx.Entry(r, i) * m.Entry(i, c)
}
res.SetEntry(r, c, entry)
return true
})
return res
}
func (mx *matrix) Transpose() Matrix {
res := newMatrix(mx.cs, mx.rs, make([]int, mx.rs*mx.cs))
mx.ForEach(func(r, c, e int) bool {
res.es[r+c*mx.rs] = e
return true
})
return res
}
func (mx *matrix) IsIdentity() bool {
var ok bool
mx.ForEach(func(r, c, e int) bool {
if r == c {
ok = e == 1
} else {
ok = e == 0
}
return ok
})
return ok
} | matrix.go | 0.873309 | 0.74674 | matrix.go | starcoder |
package detect
import (
"image"
"math"
)
// Feature is a Haar-like feature.
type Feature struct {
Rect image.Rectangle
Weight float64
}
// Classifier is a set of features with a threshold.
type Classifier struct {
Feature []Feature
Threshold float64
Left float64
Right float64
}
// CascadeStage is a cascade of classifiers.
type CascadeStage struct {
Classifier []Classifier
Threshold float64
}
// Cascade is a degenerate tree of Haar-like classifiers.
type Cascade struct {
Stage []CascadeStage
Size image.Point
}
// Match returns true if the full image is classified as an object.
func (c *Cascade) Match(m image.Image) bool {
return c.classify(newWindow(m))
}
// Find returns a set of areas of m that match the feature cascade c.
func (c *Cascade) Find(m image.Image) []image.Rectangle {
// TODO(crawshaw): Consider de-duping strategies.
matches := []image.Rectangle{}
w := newWindow(m)
b := m.Bounds()
origScale := c.Size
for s := origScale; s.X < b.Dx() && s.Y < b.Dy(); s = s.Add(s.Div(10)) {
// translate region and classify
tx := image.Pt(s.X/10, 0)
ty := image.Pt(0, s.Y/10)
for r := image.Rect(0, 0, s.X, s.Y).Add(b.Min); r.In(b); r = r.Add(ty) {
for r1 := r; r1.In(b); r1 = r1.Add(tx) {
if c.classify(w.subWindow(r1)) {
matches = append(matches, r1)
}
}
}
}
return matches
}
type window struct {
mi *integral
miSq *integral
rect image.Rectangle
invArea float64
stdDev float64
}
func (w *window) init() {
w.invArea = 1 / float64(w.rect.Dx()*w.rect.Dy())
mean := float64(w.mi.sum(w.rect)) * w.invArea
vr := float64(w.miSq.sum(w.rect))*w.invArea - mean*mean
if vr < 0 {
vr = 1
}
w.stdDev = math.Sqrt(vr)
}
func newWindow(m image.Image) *window {
mi, miSq := newIntegrals(m)
res := &window{
mi: mi,
miSq: miSq,
rect: m.Bounds(),
}
res.init()
return res
}
func (w *window) subWindow(r image.Rectangle) *window {
res := &window{
mi: w.mi,
miSq: w.miSq,
rect: r,
}
res.init()
return res
}
func (c *Classifier) classify(w *window, pr *projector) float64 {
s := 0.0
for _, f := range c.Feature {
s += float64(w.mi.sum(pr.rect(f.Rect))) * f.Weight
}
s *= w.invArea // normalize to maintain scale invariance
if s < c.Threshold*w.stdDev {
return c.Left
}
return c.Right
}
func (s *CascadeStage) classify(w *window, pr *projector) bool {
sum := 0.0
for _, c := range s.Classifier {
sum += c.classify(w, pr)
}
return sum >= s.Threshold
}
func (c *Cascade) classify(w *window) bool {
pr := newProjector(w.rect, image.Rectangle{image.Pt(0, 0), c.Size})
for _, s := range c.Stage {
if !s.classify(w, pr) {
return false
}
}
return true
} | detect/detect.go | 0.719088 | 0.437944 | detect.go | starcoder |
package geo
import(
"fmt"
"math"
)
const kLineSnapKM = 0.3 // How far a trackpoint can be from a line, and still be on that line
const KLineSnapKM = 0.3 // How far a trackpoint can be from a line, and still be on that line
// Always using two anchor points, and then derive the equation of the line: y = m.x + b
type LatlongLine struct {
From,To Latlong
m,b float64
I,J int // index values for the two points used to build this line
}
func (line LatlongLine)String() string {
return fmt.Sprintf("[y=%.2f.x + %.2f] (%.3f,%.3f)->(%.3f,%.3f) [i=%d,j=%d]",
line.m, line.b, line.From.x(), line.From.y(), line.To.x(), line.To.y(), line.I, line.J)
}
// {{{ calcM, calcB
// Helpers for line construction
func calcM(p1,p2 Latlong) float64 { return (p2.y() - p1.y()) / (p2.x() - p1.x()) }
func calcB(m float64, p Latlong) float64 {
// Given a gradient(m) and a point, work out b (the value of y when x==0)
if math.IsInf(m,0) { return math.NaN() } // Equation of line does not apply for vertical lines
// y=m.x+b; so b=y-m.x for both points (x,y)
return p.y() - (m * p.x())
}
// }}}
// {{{ latlong.LineTo (BuildLine)
func (from Latlong)LineTo(to Latlong) LatlongLine {
m := calcM(from,to)
return LatlongLine{
From: from,
To: to,
m: m,
b: calcB(m,to),
}
}
func (from Latlong)BuildLine(to Latlong) LatlongLine { return from.LineTo(to) }
// }}}
// {{{ l.x, l.y, l.Box
// Apply equation of line: y=mx+b
func (line LatlongLine)y(x float64) float64 { return line.m * x + line.b }
func (line LatlongLine)x(y float64) float64 { return (y - line.b) / line.m }
func (l LatlongLine)Box() LatlongBox { return l.From.BoxTo(l.To) }
func (l LatlongLine)IsVertical() bool { return math.IsInf(l.m,0) }
func (l LatlongLine)IsDegenerate() bool {
return l.From.Lat==l.To.Lat && l.From.Long==l.To.Long
}
// }}}
// {{{ l.intersectByLineEqutions
// This function uses the m,b line constants.
// If either line is vertical, we use l.From anchor point; there is no need for a l.To point.
// The returned bool is true if lines were parallel.
// https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_the_equations_of_the_lines
func (l1 LatlongLine)intersectByLineEquations(l2 LatlongLine) (Latlong, bool) {
if l1.m == l2.m { return Latlong{}, true } // same slope; are parallel
// 1. y=ax+c 2. y=bx+d
a,c := l1.m,l1.b
b,d := l2.m,l2.b
if math.IsInf(a,0) {
// l1 is vertical; x is fixed, take it from the anchor point. Find the point on l2 for that x.
x := l1.From.x()
y := l2.y(x)
return Latlong{y,x}, false
} else if math.IsInf(b,0) {
// l2 is vertical; as above, but switch the lines
x := l2.From.x()
y := l1.y(x)
return Latlong{y,x}, false
} else {
x := (d-c) / (a-b)
y := (a*d - b*c) / (a-b)
return Latlong{y,x}, false
}
}
// }}}
// {{{ l.PerpendicularTo
func (orig LatlongLine)PerpendicularTo(pos Latlong) LatlongLine {
// The perpendicular has a gradient that is the negative inverse of the orig line
m := -1 / orig.m
perp := LatlongLine{
From: pos,
m: m,
b: calcB(m, pos),
}
// Both lines have equations, and anchor points at l.From; we can intersect them to
// derive the endpoint of the perpendicular. No chance of them being parallel :)
perp.To,_ = orig.intersectByLineEquations(perp)
return perp
}
// }}}
// {{{ l.ClosestTo
// Presumes infinite line
func (line LatlongLine)ClosestTo(pos Latlong) Latlong {
perp := line.PerpendicularTo(pos) // end point of this is the intersection point
return perp.To
}
// }}}
// {{{ l.ClosestDistance
func (line LatlongLine)ClosestDistance(pos Latlong) float64 {
return pos.Dist(line.ClosestTo(pos))
}
// }}}
// {{{ l.DistAlongLine
// If one unit is the dist between .From and .To, and .From is zero; how far along the line is pos?
func (line LatlongLine)DistAlongLine(pos Latlong) float64 {
// Todo: project pos onto the line itself, before doing the d1,d2,dPos stuff
// That means a perpendicular; measure its length and discard if close to zero
// The perpendicular will connect 'pos' to 'line'; the perp's endpoint will lie on 'line'
// (although, latlong geometry is skew, so this is never really 'perpendicular' :/
perp := line.PerpendicularTo(pos)
// If line is more horizontal than vertical, project onto X axis; else Y
d1,d2,dPos := 0.0,0.0,0.0
if math.Abs(line.m) < 1.0 {
d1,d2,dPos = line.From.x(),line.To.x(),perp.To.x()
} else {
d1,d2,dPos = line.From.y(),line.To.y(),perp.To.y()
}
// d1 represents 0.0; d2 represents 1.0. Where is pos ?
return (dPos - d1) / (d2 - d1)
}
// }}}
// {{{ l.IntersectsUnbounded
// Treats the lines as infinite. Returns whether there was an intersection.
func (l1 LatlongLine)IntersectsUnbounded(l2 LatlongLine) (Latlong, bool) {
pos,parallel := l1.intersectByLineEquations(l2)
return pos, !parallel
}
// }}}
// {{{ l.Intersects
// Returns point of intersection (may be invalid), and bool stating if intersection occurred
func (l1 LatlongLine)Intersects(l2 LatlongLine) (Latlong, bool) {
pos,parallel := l1.intersectByLineEquations(l2)
if parallel { return pos, false }
// Does the point of intersection lie within [from,to] for both lines ?
// Simple bounding box tests will work !
if ! l1.Box().Contains(pos) { return pos, false }
if ! l2.Box().Contains(pos) { return pos, false }
return pos, true
}
// }}}
// {{{ l.WhichSide
// -ve == left, +ve == right, 0 == lies-on-line
func (l LatlongLine)WhichSide(p Latlong) int {
x,y := p.x(),p.y()
x1,y1 := l.From.x(),l.From.y()
x2,y2 := l.To.x(),l.To.y()
d := (x - x1)*(y2 - y1) - (y - y1)*(x2 - x1)
if d < 0.0 { return +1 }
if d > 0.0 { return -1 }
return 0 // Lies on the line
}
// }}}
// {{{ latlong.LiesOn
func (pos Latlong)LiesOn(line LatlongLine) bool {
if ! line.From.BoxTo(line.To).Contains(pos) { return false }
if line.ClosestDistance(pos) > kLineSnapKM { return false }
return true
}
// }}}
// {{{ -------------------------={ E N D }=----------------------------------
// Local variables:
// folded-file: t
// end:
// }}} | line.go | 0.79736 | 0.5169 | line.go | starcoder |
package videosource
import (
"image"
"time"
)
// ObjectInfo contains the object information
type ObjectInfo struct {
Object Image
Description string
Percentage int
}
// NewObjectInfo creates a new ObjectInfo
func NewObjectInfo(img Image) *ObjectInfo {
o := &ObjectInfo{
Object: img,
Description: "",
Percentage: 0,
}
return o
}
// Ref will reference the ObjectInfo and underlying SharedMat
func (o *ObjectInfo) Ref() *ObjectInfo {
o.Object.Ref()
return o
}
// Clone will clone the ObjectInfo
func (o *ObjectInfo) Clone() *ObjectInfo {
c := &ObjectInfo{
Object: *o.Object.Clone(),
Description: o.Description,
Percentage: o.Percentage,
}
return c
}
// Cleanup will cleanup the ObjectInfo
func (o *ObjectInfo) Cleanup() {
o.Object.Cleanup()
o.Description = ""
o.Percentage = 0
}
func getHighestObjectPercentage(objs []ObjectInfo) (result int) {
for _, cur := range objs {
if result < cur.Percentage {
result = cur.Percentage
}
}
return
}
// FaceInfo contains the face information
type FaceInfo struct {
Face Image
Percentage int
}
// NewFaceInfo creates a new FaceInfo
func NewFaceInfo(img Image) *FaceInfo {
f := &FaceInfo{
Face: img,
Percentage: 0,
}
return f
}
// Ref will reference the FaceInfo and underlying SharedMat
func (f *FaceInfo) Ref() *FaceInfo {
f.Face.Ref()
return f
}
// Clone will clone the FaceInfo
func (f *FaceInfo) Clone() *FaceInfo {
c := &FaceInfo{
Face: *f.Face.Clone(),
Percentage: f.Percentage,
}
return c
}
// Cleanup will cleanup the FaceInfo
func (f *FaceInfo) Cleanup() {
f.Face.Cleanup()
f.Percentage = 0
}
func getHighestFacePercentage(faces []FaceInfo) (result int) {
for _, cur := range faces {
if result < cur.Percentage {
result = cur.Percentage
}
}
return
}
// ProcessedImage is the result of running through the processes
type ProcessedImage struct {
Original Image
HighlightedMotion Image
HighlightedObject Image
HighlightedFace Image
Motions []Image
MotionRects []image.Rectangle
Objects []ObjectInfo
ObjectRects []image.Rectangle
Faces []FaceInfo
FaceRects []image.Rectangle
}
// NewProcessedImage creates a new ProcessedImage
func NewProcessedImage(original Image) *ProcessedImage {
p := &ProcessedImage{
Original: original,
HighlightedMotion: Image{},
HighlightedObject: Image{},
HighlightedFace: Image{},
Motions: make([]Image, 0),
MotionRects: make([]image.Rectangle, 0),
Objects: make([]ObjectInfo, 0),
ObjectRects: make([]image.Rectangle, 0),
Faces: make([]FaceInfo, 0),
FaceRects: make([]image.Rectangle, 0),
}
return p
}
// Ref will reference the ProcessedImage and underlying SharedMats
func (p *ProcessedImage) Ref() *ProcessedImage {
p.Original.Ref()
p.HighlightedMotion.Ref()
p.HighlightedObject.Ref()
p.HighlightedFace.Ref()
for _, cur := range p.Motions {
cur.Ref()
}
for _, cur := range p.Objects {
cur.Ref()
}
for _, cur := range p.Faces {
cur.Ref()
}
return p
}
// Clone will clone the ProcessedImage
func (p *ProcessedImage) Clone() *ProcessedImage {
c := &ProcessedImage{
Original: *p.Original.Clone(),
HighlightedMotion: *p.HighlightedMotion.Clone(),
HighlightedObject: *p.HighlightedObject.Clone(),
HighlightedFace: *p.HighlightedFace.Clone(),
Motions: make([]Image, 0),
MotionRects: p.MotionRects,
Objects: make([]ObjectInfo, 0),
ObjectRects: p.ObjectRects,
Faces: make([]FaceInfo, 0),
FaceRects: p.FaceRects,
}
for _, cur := range p.Motions {
c.Motions = append(c.Motions, *cur.Clone())
}
for _, cur := range p.Objects {
c.Objects = append(c.Objects, *cur.Clone())
}
for _, cur := range p.Faces {
c.Faces = append(c.Faces, *cur.Clone())
}
return c
}
// Cleanup will cleanup the ProcessedImage
func (p *ProcessedImage) Cleanup() {
p.Original.Cleanup()
p.HighlightedMotion.Cleanup()
p.HighlightedObject.Cleanup()
p.HighlightedFace.Cleanup()
for _, cur := range p.Motions {
cur.Cleanup()
}
p.Motions = make([]Image, 0)
p.MotionRects = make([]image.Rectangle, 0)
for _, cur := range p.Objects {
cur.Cleanup()
}
p.Objects = make([]ObjectInfo, 0)
p.ObjectRects = make([]image.Rectangle, 0)
for _, cur := range p.Faces {
cur.Cleanup()
}
p.Faces = make([]FaceInfo, 0)
p.FaceRects = make([]image.Rectangle, 0)
}
// ProcessedImageByCreatedTime sorting ascending order
type ProcessedImageByCreatedTime []ProcessedImage
func (b ProcessedImageByCreatedTime) Len() int { return len(b) }
func (b ProcessedImageByCreatedTime) Less(i, j int) bool {
return b[i].Original.CreatedTime.Before(b[j].Original.CreatedTime)
}
func (b ProcessedImageByCreatedTime) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// ProcessedImageByObjLen sorting descending order
type ProcessedImageByObjLen []ProcessedImage
func (b ProcessedImageByObjLen) Len() int { return len(b) }
func (b ProcessedImageByObjLen) Less(i, j int) bool {
return len(b[i].Objects) > len(b[j].Objects)
}
func (b ProcessedImageByObjLen) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// ProcessedImageByObjPercent sorting descending order
type ProcessedImageByObjPercent []ProcessedImage
func (b ProcessedImageByObjPercent) Len() int { return len(b) }
func (b ProcessedImageByObjPercent) Less(i, j int) bool {
return getHighestObjectPercentage(b[i].Objects) > getHighestObjectPercentage(b[j].Objects)
}
func (b ProcessedImageByObjPercent) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// ProcessedImageByFaceLen sorting descending order
type ProcessedImageByFaceLen []ProcessedImage
func (b ProcessedImageByFaceLen) Len() int { return len(b) }
func (b ProcessedImageByFaceLen) Less(i, j int) bool {
return len(b[i].Faces) > len(b[j].Faces)
}
func (b ProcessedImageByFaceLen) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// ProcessedImageByFacePercent sorting descending order
type ProcessedImageByFacePercent []ProcessedImage
func (b ProcessedImageByFacePercent) Len() int { return len(b) }
func (b ProcessedImageByFacePercent) Less(i, j int) bool {
return getHighestFacePercentage(b[i].Faces) > getHighestFacePercentage(b[j].Faces)
}
func (b ProcessedImageByFacePercent) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// ProcessedImageFpsChan will notify caller via ProcessedImage channel at given fps
type ProcessedImageFpsChan struct {
outFps int
streamChan chan ProcessedImage
done chan bool
}
// NewProcessedImageFpsChan creates a new ProcessedImageFpsChan
func NewProcessedImageFpsChan(outFps int) *ProcessedImageFpsChan {
p := &ProcessedImageFpsChan{
outFps: outFps,
streamChan: make(chan ProcessedImage),
done: make(chan bool),
}
return p
}
// Start runs the channel
func (p *ProcessedImageFpsChan) Start() chan ProcessedImage {
outChan := make(chan ProcessedImage)
go func() {
var curImage *ProcessedImage
writeTick := time.NewTicker(time.Duration(1000/p.outFps) * time.Millisecond)
Loop:
for {
select {
case img, ok := <-p.streamChan:
if !ok {
img.Cleanup()
break Loop
}
if curImage != nil {
curImage.Cleanup()
}
curImage = &img
case <-writeTick.C:
if curImage != nil {
outChan <- *curImage
curImage = nil
}
}
}
writeTick.Stop()
if curImage != nil {
curImage.Cleanup()
}
close(outChan)
close(p.done)
}()
return outChan
}
// Send ProcessedImage to channel
func (p *ProcessedImageFpsChan) Send(img ProcessedImage) {
p.streamChan <- img
}
// Close notified by caller that input stream is done/closed
func (p *ProcessedImageFpsChan) Close() {
close(p.streamChan)
}
// Wait until done
func (p *ProcessedImageFpsChan) Wait() {
<-p.done
} | videosource/processedimage.go | 0.657098 | 0.460895 | processedimage.go | starcoder |
package hdrhist
import "errors"
/*
This file was ported from the Java source of HdrHistogram written
by <NAME>. See https://hdrhistogram.github.io/HdrHistogram/.
This files provides encoding and decoding functions for writing and
reading ZigZag-encoded LEB128-64b9B-variant (Little Endian Base 128)
values to/from a byte slice. LEB128's variable length encoding
provides for using a smaller nuber of bytes for smaller values, and
the use of ZigZag encoding allows small (closer to zero) negative
values to use fewer bytes. Details on both LEB128 and ZigZag can be
readily found elsewhere.
The LEB128-64b9B-variant encoding used here diverges from the
"original" LEB128 as it extends to 64 bit values: In the original
LEB128, a 64 bit value can take up to 10 bytes in the stream, where
this variant's encoding of a 64 bit values will max out at 9 bytes.
As such, this encoder/decoder should NOT be used for encoding or
decoding "standard" LEB128 formats (e.g. Google Protocol Buffers).
*/
func encodeZigZag(i int64) []byte {
b := make([]byte, 0, 8)
value := uint64((i << 1) ^ (i >> 63))
if value>>7 == 0 {
b = append(b, byte(value))
} else {
b = append(b, byte((value&0x7F)|0x80))
if uint64(value)>>14 == 0 {
b = append(b, byte(value>>7))
} else {
b = append(b, byte(value>>7|0x80))
if value>>21 == 0 {
b = append(b, byte(value>>14))
} else {
b = append(b, byte(value>>14|0x80))
if value>>28 == 0 {
b = append(b, byte(value>>21))
} else {
b = append(b, byte(value>>21|0x80))
if value>>35 == 0 {
b = append(b, byte(value>>28))
} else {
b = append(b, byte(value>>28|0x80))
if value>>42 == 0 {
b = append(b, byte(value>>35))
} else {
b = append(b, byte(value>>35|0x80))
if value>>49 == 0 {
b = append(b, byte(value>>42))
} else {
b = append(b, byte(value>>42|0x80))
if value>>56 == 0 {
b = append(b, byte(value>>49))
} else {
b = append(b, byte(value>>49|0x80))
b = append(b, byte(value>>56))
}
}
}
}
}
}
}
}
return b
}
func decodeZigZagUnsafe(b []byte) (int64, int) {
v := int64(b[0])
vlen := 1
value := v & 0x7F
if (v & 0x80) != 0 {
v = int64(b[1])
vlen++
value |= (v & 0x7F) << 7
if (v & 0x80) != 0 {
v = int64(b[2])
vlen++
value |= (v & 0x7F) << 14
if (v & 0x80) != 0 {
v = int64(b[3])
vlen++
value |= (v & 0x7F) << 21
if (v & 0x80) != 0 {
v = int64(b[4])
vlen++
value |= (v & 0x7F) << 28
if (v & 0x80) != 0 {
v = int64(b[5])
vlen++
value |= (v & 0x7F) << 35
if (v & 0x80) != 0 {
v = int64(b[6])
vlen++
value |= (v & 0x7F) << 42
if (v & 0x80) != 0 {
v = int64(b[7])
vlen++
value |= (v & 0x7F) << 49
if (v & 0x80) != 0 {
v = int64(b[9])
vlen++
value |= v << 56
}
}
}
}
}
}
}
}
return int64((uint64(value) >> 1) ^ uint64(-(value & 1))), vlen
}
// decodeZigZag safely catch bounds errors due to
// missing data.
func decodeZigZag(b []byte) (v int64, vlen int, err error) {
defer func() {
if e := recover(); e != nil {
err = errors.New("got incomplete data")
}
}()
v, vlen = decodeZigZagUnsafe(b)
return v, vlen, nil
} | v1/ao/internal/hdrhist/zigzag.go | 0.652906 | 0.608158 | zigzag.go | starcoder |
package p384
import (
"crypto/elliptic"
"math/big"
)
var (
// p is the order of the base field, represented as little-endian 64-bit words.
p = gfP{0xffffffff, 0xffffffff00000000, 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}
// pp satisfies r*rp - p*pp = 1 where rp and pp are both integers.
pp = gfP{0x100000001, 0x1, 0xfffffffbfffffffe, 0xfffffffcfffffffa, 0xc00000002, 0x1400000014}
// r2 is R^2 where R = 2^384 mod p.
r2 = gfP{0xfffffffe00000001, 0x200000000, 0xfffffffe00000000, 0x200000000, 0x1}
// r3 is R^3 where R = 2^384 mod p.
r3 = gfP{0xfffffffc00000002, 0x300000002, 0xfffffffcfffffffe, 0x300000005, 0xfffffffdfffffffd, 0x300000002}
// rN1 is R^-1 where R = 2^384 mod p.
rN1 = gfP{0xffffffe100000006, 0xffffffebffffffd8, 0xfffffffbfffffffd, 0xfffffffcfffffffa, 0xc00000002, 0x1400000014}
// b is the curve's B parameter, Montgomery encoded.
b = gfP{0x81188719d412dcc, 0xf729add87a4c32ec, 0x77f2209b1920022e, 0xe3374bee94938ae2, 0xb62b21f41f022094, 0xcd08114b604fbff9}
// baseMultiples has [2^i] * G at position i.
baseMultiples [384]affinePoint
)
func init() {
params := elliptic.P384().Params()
baseMultiples[0] = *newAffinePoint(params.Gx, params.Gy)
c := &Curve{}
for i := 1; i < len(baseMultiples); i++ {
pt := c.double(baseMultiples[i-1].ToJacobian()).ToAffine()
baseMultiples[i] = *pt
}
}
type Curve struct{}
func (c *Curve) Params() *elliptic.CurveParams {
return elliptic.P384().Params()
}
func (c *Curve) IsOnCurve(X, Y *big.Int) bool {
x, y := &gfP{}, &gfP{}
copy(x[:], X.Bits())
copy(y[:], Y.Bits())
montEncode(x, x)
montEncode(y, y)
y2, x3 := &gfP{}, &gfP{}
gfpMul(y2, y, y)
gfpMul(x3, x, x)
gfpMul(x3, x3, x)
threeX := &gfP{}
gfpAdd(threeX, x, x)
gfpAdd(threeX, threeX, x)
gfpSub(x3, x3, threeX)
gfpAdd(x3, x3, &b)
return *y2 == *x3
}
func (c *Curve) add(a *jacobianPoint, b *affinePoint) *jacobianPoint {
if a.IsZero() {
return b.ToJacobian()
} else if b.IsZero() {
return a.Dup()
}
z1z1, u2 := &gfP{}, &gfP{}
gfpMul(z1z1, &a.z, &a.z)
gfpMul(u2, &b.x, z1z1)
s2 := &gfP{}
gfpMul(s2, &b.y, &a.z)
gfpMul(s2, s2, z1z1)
if a.x == *u2 {
if a.y != *s2 {
return &jacobianPoint{}
}
return c.double(a)
}
h, r := &gfP{}, &gfP{}
gfpSub(h, u2, &a.x)
gfpSub(r, s2, &a.y)
h2, h3 := &gfP{}, &gfP{}
gfpMul(h2, h, h)
gfpMul(h3, h2, h)
h2x1 := &gfP{}
gfpMul(h2x1, h2, &a.x)
x3, y3, z3 := &gfP{}, &gfP{}, &gfP{}
gfpMul(x3, r, r)
gfpSub(x3, x3, h3)
gfpSub(x3, x3, h2x1)
gfpSub(x3, x3, h2x1)
gfpSub(y3, h2x1, x3)
gfpMul(y3, y3, r)
h3y1 := &gfP{}
gfpMul(h3y1, h3, &a.y)
gfpSub(y3, y3, h3y1)
gfpMul(z3, h, &a.z)
return &jacobianPoint{*x3, *y3, *z3}
}
func (c *Curve) double(a *jacobianPoint) *jacobianPoint {
delta, gamma, alpha, alpha2 := &gfP{}, &gfP{}, &gfP{}, &gfP{}
gfpMul(delta, &a.z, &a.z)
gfpMul(gamma, &a.y, &a.y)
gfpSub(alpha, &a.x, delta)
gfpAdd(alpha2, &a.x, delta)
gfpMul(alpha, alpha, alpha2)
*alpha2 = *alpha
gfpAdd(alpha, alpha, alpha)
gfpAdd(alpha, alpha, alpha2)
beta := &gfP{}
gfpMul(beta, &a.x, gamma)
x3, beta8 := &gfP{}, &gfP{}
gfpMul(x3, alpha, alpha)
gfpAdd(beta8, beta, beta)
gfpAdd(beta8, beta8, beta8)
gfpAdd(beta8, beta8, beta8)
gfpSub(x3, x3, beta8)
z3 := &gfP{}
gfpAdd(z3, &a.y, &a.z)
gfpMul(z3, z3, z3)
gfpSub(z3, z3, gamma)
gfpSub(z3, z3, delta)
gfpAdd(beta, beta, beta)
gfpAdd(beta, beta, beta)
gfpSub(beta, beta, x3)
y3 := &gfP{}
gfpMul(y3, alpha, beta)
gfpMul(gamma, gamma, gamma)
gfpAdd(gamma, gamma, gamma)
gfpAdd(gamma, gamma, gamma)
gfpAdd(gamma, gamma, gamma)
gfpSub(y3, y3, gamma)
return &jacobianPoint{*x3, *y3, *z3}
}
func (c *Curve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
pt := c.add(newAffinePoint(x1, y1).ToJacobian(), newAffinePoint(x2, y2))
return pt.ToAffine().ToInt()
}
func (c *Curve) Double(x1, y1 *big.Int) (x, y *big.Int) {
pt := c.double(newAffinePoint(x1, y1).ToJacobian())
return pt.ToAffine().ToInt()
}
func (c *Curve) ScalarMult(x1, y1 *big.Int, k []byte) (x, y *big.Int) {
pt := newAffinePoint(x1, y1)
sum := &jacobianPoint{}
for i := 0; i < len(k); i++ {
for j := 7; j >= 0; j-- {
sum = c.double(sum)
if (k[i]>>uint(j))&1 == 1 {
sum = c.add(sum, pt)
}
}
}
return sum.ToAffine().ToInt()
}
func (c *Curve) ScalarBaseMult(k []byte) (x, y *big.Int) {
sum := &jacobianPoint{}
max := 48
if len(k) < 48 {
max = len(k)
}
for i := 0; i < max; i++ {
for j := 7; j >= 0; j-- {
if (k[i]>>uint(j))&1 == 1 {
sum = c.add(sum, &baseMultiples[8*(max-i-1)+j])
}
}
}
for i := 48; i < len(k); i++ {
for j := 7; j >= 0; j-- {
sum = c.double(sum)
if (k[i]>>uint(j))&1 == 1 {
sum = c.add(sum, &baseMultiples[0])
}
}
}
return sum.ToAffine().ToInt()
}
func (c *Curve) CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int) {
ptA := baseMultiples[0]
ptB := newAffinePoint(bigX, bigY)
ptC := c.add(ptA.ToJacobian(), ptB).ToAffine()
sum := &jacobianPoint{}
kb, ks := 0, 0
if len(baseScalar) < len(scalar) {
kb = len(scalar) - len(baseScalar)
} else if len(scalar) < len(baseScalar) {
ks = len(baseScalar) - len(scalar)
}
for i := 0; i < len(baseScalar)+kb; i++ {
for j := 7; j >= 0; j-- {
sum = c.double(sum)
var a, b byte
if k := i - kb; k >= 0 && k < len(baseScalar) {
a = (baseScalar[k] >> uint(j)) & 1
}
if k := i - ks; k >= 0 && k < len(scalar) {
b = (scalar[k] >> uint(j)) & 1
}
if a == 1 && b == 0 {
sum = c.add(sum, &ptA)
} else if a == 0 && b == 1 {
sum = c.add(sum, ptB)
} else if a == 1 && b == 1 {
sum = c.add(sum, ptC)
}
}
}
return sum.ToAffine().ToInt()
} | p384.go | 0.617051 | 0.451145 | p384.go | starcoder |
package actions
import (
"encoding/json"
)
// InputFieldDefinition Configuration for an input field on the custom action
type InputFieldDefinition struct {
TypeDefinition FieldTypeDefinition `json:"typeDefinition"`
// Controls what kind of input a customer can use to specify the field value. Must contain exactly one of `STATIC_VALUE` or `OBJECT_PROPERTY`. If `STATIC_VALUE`, the customer will be able to choose a value when configuring the custom action; if `OBJECT_PROPERTY`, the customer will be able to choose a property from the enrolled workflow object that the field value will be copied from. In the future we may support more than one input control type here.
SupportedValueTypes *[]string `json:"supportedValueTypes,omitempty"`
// Whether the field is required for the custom action to be valid
IsRequired bool `json:"isRequired"`
}
// NewInputFieldDefinition instantiates a new InputFieldDefinition object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewInputFieldDefinition(typeDefinition FieldTypeDefinition, isRequired bool) *InputFieldDefinition {
this := InputFieldDefinition{}
this.TypeDefinition = typeDefinition
this.IsRequired = isRequired
return &this
}
// NewInputFieldDefinitionWithDefaults instantiates a new InputFieldDefinition object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewInputFieldDefinitionWithDefaults() *InputFieldDefinition {
this := InputFieldDefinition{}
return &this
}
// GetTypeDefinition returns the TypeDefinition field value
func (o *InputFieldDefinition) GetTypeDefinition() FieldTypeDefinition {
if o == nil {
var ret FieldTypeDefinition
return ret
}
return o.TypeDefinition
}
// GetTypeDefinitionOk returns a tuple with the TypeDefinition field value
// and a boolean to check if the value has been set.
func (o *InputFieldDefinition) GetTypeDefinitionOk() (*FieldTypeDefinition, bool) {
if o == nil {
return nil, false
}
return &o.TypeDefinition, true
}
// SetTypeDefinition sets field value
func (o *InputFieldDefinition) SetTypeDefinition(v FieldTypeDefinition) {
o.TypeDefinition = v
}
// GetSupportedValueTypes returns the SupportedValueTypes field value if set, zero value otherwise.
func (o *InputFieldDefinition) GetSupportedValueTypes() []string {
if o == nil || o.SupportedValueTypes == nil {
var ret []string
return ret
}
return *o.SupportedValueTypes
}
// GetSupportedValueTypesOk returns a tuple with the SupportedValueTypes field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InputFieldDefinition) GetSupportedValueTypesOk() (*[]string, bool) {
if o == nil || o.SupportedValueTypes == nil {
return nil, false
}
return o.SupportedValueTypes, true
}
// HasSupportedValueTypes returns a boolean if a field has been set.
func (o *InputFieldDefinition) HasSupportedValueTypes() bool {
if o != nil && o.SupportedValueTypes != nil {
return true
}
return false
}
// SetSupportedValueTypes gets a reference to the given []string and assigns it to the SupportedValueTypes field.
func (o *InputFieldDefinition) SetSupportedValueTypes(v []string) {
o.SupportedValueTypes = &v
}
// GetIsRequired returns the IsRequired field value
func (o *InputFieldDefinition) GetIsRequired() bool {
if o == nil {
var ret bool
return ret
}
return o.IsRequired
}
// GetIsRequiredOk returns a tuple with the IsRequired field value
// and a boolean to check if the value has been set.
func (o *InputFieldDefinition) GetIsRequiredOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.IsRequired, true
}
// SetIsRequired sets field value
func (o *InputFieldDefinition) SetIsRequired(v bool) {
o.IsRequired = v
}
func (o InputFieldDefinition) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["typeDefinition"] = o.TypeDefinition
}
if o.SupportedValueTypes != nil {
toSerialize["supportedValueTypes"] = o.SupportedValueTypes
}
if true {
toSerialize["isRequired"] = o.IsRequired
}
return json.Marshal(toSerialize)
}
type NullableInputFieldDefinition struct {
value *InputFieldDefinition
isSet bool
}
func (v NullableInputFieldDefinition) Get() *InputFieldDefinition {
return v.value
}
func (v *NullableInputFieldDefinition) Set(val *InputFieldDefinition) {
v.value = val
v.isSet = true
}
func (v NullableInputFieldDefinition) IsSet() bool {
return v.isSet
}
func (v *NullableInputFieldDefinition) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableInputFieldDefinition(val *InputFieldDefinition) *NullableInputFieldDefinition {
return &NullableInputFieldDefinition{value: val, isSet: true}
}
func (v NullableInputFieldDefinition) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableInputFieldDefinition) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | generated/actions/model_input_field_definition.go | 0.80969 | 0.425128 | model_input_field_definition.go | starcoder |
package intree
import (
"math"
"math/rand"
)
// Bounds is the main interface expected by NewINTree(); requires Limits method to access interval limits.
type Bounds interface {
Limits() (Lower, Upper float64)
StartingPoint() (ID int, Name string)
}
// INTree is the main package object;
// holds Slice of reference indices and the respective interval limits.
type INTree struct {
idxs []int
lmts []float64
}
// buildTree is the internal tree construction function;
// creates, sorts and augments nodes into Slices.
func (inT *INTree) buildTree(bnds []Bounds) {
inT.idxs = make([]int, len(bnds))
inT.lmts = make([]float64, 3*len(bnds))
for i, v := range bnds {
inT.idxs[i] = i
l, u := v.Limits()
inT.lmts[3*i] = l
inT.lmts[3*i+1] = u
inT.lmts[3*i+2] = 0
}
sort(inT.lmts, inT.idxs)
augment(inT.lmts, inT.idxs)
}
// Including is the main entry point for bounds searches;
// traverses the tree and collects intervals that overlap with the given value.
func (inT *INTree) Including(val float64) []int {
stk := []int{0, len(inT.idxs) - 1}
res := []int{}
for len(stk) > 0 {
rb := stk[len(stk)-1]
stk = stk[:len(stk)-1]
lb := stk[len(stk)-1]
stk = stk[:len(stk)-1]
if lb == rb+1 {
continue
}
cn := int(math.Ceil(float64(lb+rb) / 2.0))
nm := inT.lmts[3*cn+2]
if val <= nm {
stk = append(stk, lb)
stk = append(stk, cn-1)
}
l := inT.lmts[3*cn]
if l <= val {
stk = append(stk, cn+1)
stk = append(stk, rb)
u := inT.lmts[3*cn+1]
if val <= u {
res = append(res, inT.idxs[cn])
}
}
}
return res
}
// NewINTree is the main initialization function;
// creates the tree from the given Slice of Bounds.
func NewINTree(bnds []Bounds) *INTree {
inT := INTree{}
inT.buildTree(bnds)
return &inT
}
// augment is an internal utility function, adding maximum value of all child nodes to the current node.
func augment(lmts []float64, idxs []int) {
if len(idxs) < 1 {
return
}
max := 0.0
for idx := range idxs {
if lmts[3*idx+1] > max {
max = lmts[3*idx+1]
}
}
r := len(idxs)>>1
lmts[3*r+2] = max
augment(lmts[:3*r], idxs[:r])
augment(lmts[3*r+3:], idxs[r+1:])
}
// sort is an internal utility function, sorting the tree by lowest limits using Random Pivot QuickSearch
func sort(lmts []float64, idxs []int) {
if len(idxs) < 2 {
return
}
l, r := 0, len(idxs)-1
p := rand.Int() % len(idxs)
idxs[p], idxs[r] = idxs[r], idxs[p]
lmts[3*p], lmts[3*p+1], lmts[3*p+2], lmts[3*r], lmts[3*r+1], lmts[3*r+2] = lmts[3*r], lmts[3*r+1], lmts[3*r+2], lmts[3*p], lmts[3*p+1], lmts[3*p+2]
for i := range idxs {
if lmts[3*i] < lmts[3*r] {
idxs[l], idxs[i] = idxs[i], idxs[l]
lmts[3*l], lmts[3*l+1], lmts[3*l+2], lmts[3*i], lmts[3*i+1], lmts[3*i+2] = lmts[3*i], lmts[3*i+1], lmts[3*i+2], lmts[3*l], lmts[3*l+1], lmts[3*l+2]
l++
}
}
idxs[l], idxs[r] = idxs[r], idxs[l]
lmts[3*l], lmts[3*l+1], lmts[3*l+2], lmts[3*r], lmts[3*r+1], lmts[3*r+2] = lmts[3*r], lmts[3*r+1], lmts[3*r+2], lmts[3*l], lmts[3*l+1], lmts[3*l+2]
sort(lmts[:3*l], idxs[:l])
sort(lmts[3*l+3:], idxs[l+1:])
} | intree.go | 0.644896 | 0.451568 | intree.go | starcoder |
package d03
import (
"math"
"strconv"
"strings"
"github.com/jzimbel/adventofcode-go/solutions"
)
var (
origin = point{}
dirs = map[byte]*dir{
'D': &dir{0, 1},
'L': &dir{-1, 0},
'R': &dir{1, 0},
'U': &dir{0, -1},
}
)
type point struct {
x int
y int
}
// dir represents a unit vector in one of the cardinal directions.
type dir point
// record of whether a point has been visited and how long the path was when it was first visited.
type record struct {
visited bool
pathLength int
}
// cursor records a point and the path distance traveled to reach that point.
type cursor struct {
p point
dist int
}
// move a cursor 1 unit in the given direction.
func (c *cursor) move(d *dir) {
c.p.x += d.x
c.p.y += d.y
c.dist++
}
// grid holds a map that records which points have been visited by which paths,
// as well as cursors for the two paths.
type grid struct {
g map[point][2]record
c [2]cursor
}
func newGrid() *grid {
return &grid{
make(map[point][2]record),
[2]cursor{},
}
}
func (g *grid) moveCursor(wireNum int, d *dir) {
g.c[wireNum].move(d)
}
func (g *grid) getRecordsAtCursor(wireNum int) (r [2]record, ok bool) {
r, ok = g.g[g.c[wireNum].p]
return
}
func (g *grid) setRecordsAtCursor(wireNum int, r [2]record) {
g.g[g.c[wireNum].p] = r
}
// draw a new point on the grid for the given wire by moving that wire's cursor in the given direction.
func (g *grid) draw(d *dir, wireNum int) {
g.moveCursor(wireNum, d)
records, ok := g.getRecordsAtCursor(wireNum)
if !ok {
records = [2]record{}
}
records[wireNum].visited = true
records[wireNum].pathLength = g.c[wireNum].dist
g.setRecordsAtCursor(wireNum, records)
}
// draw a full wire on the grid based on the given move set.
func (g *grid) drawPath(moveSet []*dir, wireNum int) {
for stepNum := range moveSet {
g.draw(moveSet[stepNum], wireNum)
}
}
// intersections finds all points in the grid where the wires crossed,
// and returns a slice of cursors giving the intersection points and
// the total distances traveled by the wires at the time they crossed.
func (g *grid) intersections() []cursor {
shared := make([]cursor, 0, len(g.g))
for p, records := range g.g {
if records[0].visited && records[1].visited {
shared = append(shared, cursor{p, records[0].pathLength + records[1].pathLength})
}
}
return shared
}
// getMoves decomposes the input into slices of 1-step movements.
func getMoves(input string) (moves [2][]*dir) {
wires := strings.Split(input, "\n")
for i, wire := range wires {
vecs := strings.Split(wire, ",")
for _, vec := range vecs {
mag, _ := strconv.Atoi(vec[1:])
d := dirs[vec[0]]
stroke := make([]*dir, mag)
for j := 0; j < mag; j++ {
stroke[j] = d
}
moves[i] = append(moves[i], stroke...)
}
}
return moves
}
// manhattan calculates the Manhattan distance between two points.
func manhattan(p1 point, p2 point) int {
return int(
math.Abs(float64(p1.x-p2.x)) +
math.Abs(float64(p1.y-p2.y)),
)
}
// solve finds the answers to parts 1 and 2 simultaneously.
// minDist = part 1 solution
// minPathLength = part 2 solution
func solve(moves [2][]*dir) (minDist, minPathLength int) {
g := newGrid()
for wireNum := range moves {
g.drawPath(moves[wireNum], wireNum)
}
crosses := g.intersections()
minDist = manhattan(origin, crosses[0].p)
minPathLength = crosses[0].dist
for _, c := range crosses[1:] {
if dist := manhattan(origin, c.p); dist < minDist {
minDist = dist
}
if c.dist < minPathLength {
minPathLength = c.dist
}
}
return
}
// Solve provides the day 3 puzzle solution.
func Solve(input string) (*solutions.Solution, error) {
minDist, minPathLength := solve(getMoves(input))
return &solutions.Solution{Part1: minDist, Part2: minPathLength}, nil
} | solutions/y2019/d03/solution.go | 0.68941 | 0.466603 | solution.go | starcoder |
package iso20022
// Describes the amount, direction and parties involved in a payment obligation between two participants (and their netting group or trading party) of a netting service.
type NetObligation1 struct {
// Unique identification for the obligation.
ObligationIdentification *Max35Text `xml:"OblgtnId"`
// Amount and currency of the obligation
Amount *ActiveCurrencyAndAmount `xml:"Amt"`
// Describes the party or netting group (of the participant receiving the report) involved in the calculation of the obligation.
ParticipantNettingIdentification *NettingIdentification1Choice `xml:"PtcptNetgId"`
// Specifies the direction of the obligation.
ObligationDirection *CreditDebit3Code `xml:"OblgtnDrctn"`
// Describes the party or netting group (of the counterparty in the obligation) involved in the calculation of the obligation.
CounterpartyNettingIdentification *NettingIdentification1Choice `xml:"CtrPtyNetgId"`
// Describes the counterparty participant involved in the obligation.
NetServiceCounterpartyIdentification *PartyIdentification73Choice `xml:"NetSvcCtrPtyId,omitempty"`
// Specifies the standard settlement instructions used to issue payment to the counterparty in order to settle the obligation.
CounterpartySettlementInstructions *SettlementParties29 `xml:"CtrPtySttlmInstrs,omitempty"`
// Number of transactions used to calculate the obligation. This is used in reconciliation between the net report obligation and the previously provided transaction status updates.
TransactionsNumber *Max10NumericText `xml:"TxsNb,omitempty"`
}
func (n *NetObligation1) SetObligationIdentification(value string) {
n.ObligationIdentification = (*Max35Text)(&value)
}
func (n *NetObligation1) SetAmount(value, currency string) {
n.Amount = NewActiveCurrencyAndAmount(value, currency)
}
func (n *NetObligation1) AddParticipantNettingIdentification() *NettingIdentification1Choice {
n.ParticipantNettingIdentification = new(NettingIdentification1Choice)
return n.ParticipantNettingIdentification
}
func (n *NetObligation1) SetObligationDirection(value string) {
n.ObligationDirection = (*CreditDebit3Code)(&value)
}
func (n *NetObligation1) AddCounterpartyNettingIdentification() *NettingIdentification1Choice {
n.CounterpartyNettingIdentification = new(NettingIdentification1Choice)
return n.CounterpartyNettingIdentification
}
func (n *NetObligation1) AddNetServiceCounterpartyIdentification() *PartyIdentification73Choice {
n.NetServiceCounterpartyIdentification = new(PartyIdentification73Choice)
return n.NetServiceCounterpartyIdentification
}
func (n *NetObligation1) AddCounterpartySettlementInstructions() *SettlementParties29 {
n.CounterpartySettlementInstructions = new(SettlementParties29)
return n.CounterpartySettlementInstructions
}
func (n *NetObligation1) SetTransactionsNumber(value string) {
n.TransactionsNumber = (*Max10NumericText)(&value)
} | NetObligation1.go | 0.776284 | 0.437343 | NetObligation1.go | starcoder |
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
"github.com/DanielTitkov/anomaly-detection-service/internal/repository/entgo/ent/anomaly"
"github.com/DanielTitkov/anomaly-detection-service/internal/repository/entgo/ent/detectionjobinstance"
)
// Anomaly is the model entity for the Anomaly schema.
type Anomaly struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreateTime holds the value of the "create_time" field.
CreateTime time.Time `json:"create_time,omitempty"`
// UpdateTime holds the value of the "update_time" field.
UpdateTime time.Time `json:"update_time,omitempty"`
// Type holds the value of the "type" field.
Type string `json:"type,omitempty"`
// Value holds the value of the "value" field.
Value float64 `json:"value,omitempty"`
// Processed holds the value of the "processed" field.
Processed bool `json:"processed,omitempty"`
// PeriodStart holds the value of the "period_start" field.
PeriodStart time.Time `json:"period_start,omitempty"`
// PeriodEnd holds the value of the "period_end" field.
PeriodEnd time.Time `json:"period_end,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the AnomalyQuery when eager-loading is set.
Edges AnomalyEdges `json:"edges"`
detection_job_instance_anomalies *int
}
// AnomalyEdges holds the relations/edges for other nodes in the graph.
type AnomalyEdges struct {
// DetectionJobInstance holds the value of the detection_job_instance edge.
DetectionJobInstance *DetectionJobInstance `json:"detection_job_instance,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// DetectionJobInstanceOrErr returns the DetectionJobInstance value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e AnomalyEdges) DetectionJobInstanceOrErr() (*DetectionJobInstance, error) {
if e.loadedTypes[0] {
if e.DetectionJobInstance == nil {
// The edge detection_job_instance was loaded in eager-loading,
// but was not found.
return nil, &NotFoundError{label: detectionjobinstance.Label}
}
return e.DetectionJobInstance, nil
}
return nil, &NotLoadedError{edge: "detection_job_instance"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Anomaly) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case anomaly.FieldProcessed:
values[i] = &sql.NullBool{}
case anomaly.FieldValue:
values[i] = &sql.NullFloat64{}
case anomaly.FieldID:
values[i] = &sql.NullInt64{}
case anomaly.FieldType:
values[i] = &sql.NullString{}
case anomaly.FieldCreateTime, anomaly.FieldUpdateTime, anomaly.FieldPeriodStart, anomaly.FieldPeriodEnd:
values[i] = &sql.NullTime{}
case anomaly.ForeignKeys[0]: // detection_job_instance_anomalies
values[i] = &sql.NullInt64{}
default:
return nil, fmt.Errorf("unexpected column %q for type Anomaly", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Anomaly fields.
func (a *Anomaly) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case anomaly.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
a.ID = int(value.Int64)
case anomaly.FieldCreateTime:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field create_time", values[i])
} else if value.Valid {
a.CreateTime = value.Time
}
case anomaly.FieldUpdateTime:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field update_time", values[i])
} else if value.Valid {
a.UpdateTime = value.Time
}
case anomaly.FieldType:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
} else if value.Valid {
a.Type = value.String
}
case anomaly.FieldValue:
if value, ok := values[i].(*sql.NullFloat64); !ok {
return fmt.Errorf("unexpected type %T for field value", values[i])
} else if value.Valid {
a.Value = value.Float64
}
case anomaly.FieldProcessed:
if value, ok := values[i].(*sql.NullBool); !ok {
return fmt.Errorf("unexpected type %T for field processed", values[i])
} else if value.Valid {
a.Processed = value.Bool
}
case anomaly.FieldPeriodStart:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field period_start", values[i])
} else if value.Valid {
a.PeriodStart = value.Time
}
case anomaly.FieldPeriodEnd:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field period_end", values[i])
} else if value.Valid {
a.PeriodEnd = value.Time
}
case anomaly.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for edge-field detection_job_instance_anomalies", value)
} else if value.Valid {
a.detection_job_instance_anomalies = new(int)
*a.detection_job_instance_anomalies = int(value.Int64)
}
}
}
return nil
}
// QueryDetectionJobInstance queries the "detection_job_instance" edge of the Anomaly entity.
func (a *Anomaly) QueryDetectionJobInstance() *DetectionJobInstanceQuery {
return (&AnomalyClient{config: a.config}).QueryDetectionJobInstance(a)
}
// Update returns a builder for updating this Anomaly.
// Note that you need to call Anomaly.Unwrap() before calling this method if this Anomaly
// was returned from a transaction, and the transaction was committed or rolled back.
func (a *Anomaly) Update() *AnomalyUpdateOne {
return (&AnomalyClient{config: a.config}).UpdateOne(a)
}
// Unwrap unwraps the Anomaly entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (a *Anomaly) Unwrap() *Anomaly {
tx, ok := a.config.driver.(*txDriver)
if !ok {
panic("ent: Anomaly is not a transactional entity")
}
a.config.driver = tx.drv
return a
}
// String implements the fmt.Stringer.
func (a *Anomaly) String() string {
var builder strings.Builder
builder.WriteString("Anomaly(")
builder.WriteString(fmt.Sprintf("id=%v", a.ID))
builder.WriteString(", create_time=")
builder.WriteString(a.CreateTime.Format(time.ANSIC))
builder.WriteString(", update_time=")
builder.WriteString(a.UpdateTime.Format(time.ANSIC))
builder.WriteString(", type=")
builder.WriteString(a.Type)
builder.WriteString(", value=")
builder.WriteString(fmt.Sprintf("%v", a.Value))
builder.WriteString(", processed=")
builder.WriteString(fmt.Sprintf("%v", a.Processed))
builder.WriteString(", period_start=")
builder.WriteString(a.PeriodStart.Format(time.ANSIC))
builder.WriteString(", period_end=")
builder.WriteString(a.PeriodEnd.Format(time.ANSIC))
builder.WriteByte(')')
return builder.String()
}
// Anomalies is a parsable slice of Anomaly.
type Anomalies []*Anomaly
func (a Anomalies) config(cfg config) {
for _i := range a {
a[_i].config = cfg
}
} | internal/repository/entgo/ent/anomaly.go | 0.675015 | 0.518485 | anomaly.go | starcoder |
Solver provides operations on packages: install, remove, upgrade,
check integrity and others.
A package is an object comprised of a unique key (tentative release name,
version, namespace, chart name), and digested information about the chart that
it relates to (dependency relations, chart and repo URL, current and desired
state..).
To perform a package operation, for example, "install packageA", we:
1. Build a database of all packages in the world, which contains:
- Packages deployed in cluster (releases).
- Packages in the known repositories.
- Requested changes to packages (to install, to remove, to upgrade).
The gophersat/solver MAXSAT/Pseudo-Boolean solver operates over unique strings:
in our case, the package string fingerprint, created from its unique key.
The database contains information on a package current state (unknown,
installed, removed) and desired state (unknown, installed, removed).
The database can also be queried to obtain a list of packages that differ
only in the version.
Adding packages to the database can happen in any order (e.g: first toModify,
later releases, and at last repos). This means that db.Add() will intelligently
merge new information into the package in the db, if the package is already
present.
2. Iterate through the package database and create pseudo-boolean
constraints for the package fingerprint:
- If package needs to be installed or not
- If it depends on another package(s)
- If it conflicts with other similar packages that differ with it only in
version).
- If we want to minimize or maximize the distance between present version
and wanted version (upgrade to major versions, never upgrade, etc)
3. Find a solution to the SAT dependency problem if exists, or the
contradiction if there's no solution.
The result is a list of tuple of:
- Fingerprints (each corresponding with a package), and
- Resulting state of the package (if the package should be present in the
system or not).
4. We then iterate through the result list, separating the packages into
different sets by checking their current, desired, and resulting state:
unchanged packages, packages to install, packages to remove.
*/
package solver | internal/solver/doc.go | 0.704872 | 0.684251 | doc.go | starcoder |
package ast
import (
"strings"
)
type (
Node interface {
Type() NodeType
Text() string
}
Parent interface {
Node
Nodes() []Node
}
TextNode struct {
NodeType
Txt string
}
ParentNode struct {
NodeType
Children []Node
}
)
func (n TextNode) Type() NodeType { return n.NodeType }
func (n ParentNode) Type() NodeType { return n.NodeType }
func (n TextNode) Text() string { return n.Txt }
func (n ParentNode) Text() string {
sb := strings.Builder{}
for _, c := range n.Children {
sb.WriteString(c.Text())
}
return sb.String()
}
func (n ParentNode) Nodes() []Node { return n.Children }
func MakeEmptyLine() TextNode { return makeTextNode(EmptyLine, "") }
func MakeText(s string) TextNode { return makeTextNode(Text, s) }
func MakeSnippet(s string) TextNode { return makeTextNode(Snippet, s) }
func MakeTopic(ns ...Node) ParentNode { return makeParentNode(Topic, ns) }
func MakeSubTopic(ns ...Node) ParentNode { return makeParentNode(SubTopic, ns) }
func MakeBulPoint(ns ...Node) ParentNode { return makeParentNode(BulPoint, ns) }
func MakeSubBulPoint(ns ...Node) ParentNode { return makeParentNode(SubBulPoint, ns) }
func MakeNumPoint(ns ...Node) ParentNode { return makeParentNode(NumPoint, ns) }
func MakeSubNumPoint(ns ...Node) ParentNode { return makeParentNode(SubNumPoint, ns) }
func MakeTextLine(ns ...Node) ParentNode { return makeParentNode(TextLine, ns) }
func MakeKeyPhrase(ns ...Node) ParentNode { return makeParentNode(KeyPhrase, ns) }
func MakePositive(ns ...Node) ParentNode { return makeParentNode(Positive, ns) }
func MakeNegative(ns ...Node) ParentNode { return makeParentNode(Negative, ns) }
func MakeStrong(ns ...Node) ParentNode { return makeParentNode(Strong, ns) }
func MakeQuote(ns ...Node) ParentNode { return makeParentNode(Quote, ns) }
func MakeArtifact(ns ...Node) ParentNode { return makeParentNode(Artifact, ns) }
func makeTextNode(nt NodeType, s string) TextNode {
return TextNode{NodeType: nt, Txt: s}
}
func makeParentNode(nt NodeType, ns []Node) ParentNode {
if ns == nil {
return ParentNode{NodeType: nt, Children: []Node{}}
}
return ParentNode{NodeType: nt, Children: ns}
} | ast/node.go | 0.550849 | 0.419113 | node.go | starcoder |
package tin
import "math"
const (
EarthRadius = 6378137.0
MinLatitude = -85.05112878
MaxLatitude = 85.05112878
MinLongitude = -180.0
MaxLongitude = 180.0
TileSize = 256
MaxLevelOfDetail = 38
HalfCircumference = 20037508.342789243076571549020
)
func clip(n, minValue, maxValue float64) float64 {
if n < minValue {
return minValue
}
if n > maxValue {
return maxValue
}
return n
}
func MapSize(levelOfDetail uint64) uint64 {
return TileSize << levelOfDetail
}
func LatLongToPixelXY(latitude, longitude float64, levelOfDetail uint64) (pixelX, pixelY int64) {
latitude = clip(latitude, MinLatitude, MaxLatitude)
longitude = clip(longitude, MinLongitude, MaxLongitude)
x := (longitude + 180) / 360
sinLatitude := math.Sin(latitude * math.Pi / 180)
y := 0.5 - math.Log((1+sinLatitude)/(1-sinLatitude))/(4*math.Pi)
mapSize := float64(MapSize(levelOfDetail))
pixelX = int64(clip(x*mapSize+0.5, 0, mapSize-1))
pixelY = int64(clip(y*mapSize+0.5, 0, mapSize-1))
return
}
func PixelXYToLatLong(pixelX, pixelY int64, levelOfDetail uint64) (latitude, longitude float64) {
mapSize := float64(MapSize(levelOfDetail))
x := (clip(float64(pixelX), 0, mapSize-1) / mapSize) - 0.5
y := 0.5 - (clip(float64(pixelY), 0, mapSize-1) / mapSize)
latitude = 90 - 360*math.Atan(math.Exp(-y*2*math.Pi))/math.Pi
longitude = 360 * x
return
}
func PixelXYToTileXY(pixelX, pixelY int64) (tileX, tileY int64) {
return pixelX >> 8, pixelY >> 8
}
func TileXYToPixelXY(tileX, tileY int64) (pixelX, pixelY int64) {
return tileX << 8, tileY << 8
}
func TileXYToQuadKey(tileX, tileY int64, levelOfDetail uint64) string {
quadKey := make([]byte, levelOfDetail)
for i, j := levelOfDetail, 0; i > 0; i, j = i-1, j+1 {
mask := int64(1 << (i - 1))
if (tileX & mask) != 0 {
if (tileY & mask) != 0 {
quadKey[j] = '3'
} else {
quadKey[j] = '1'
}
} else if (tileY & mask) != 0 {
quadKey[j] = '2'
} else {
quadKey[j] = '0'
}
}
return string(quadKey)
}
func QuadKeyToTileXY(quadKey string) (tileX, tileY int64, levelOfDetail uint64) {
levelOfDetail = uint64(len(quadKey))
for i := levelOfDetail; i > 0; i-- {
mask := int64(1 << (i - 1))
switch quadKey[levelOfDetail-i] {
case '0':
case '1':
tileX |= mask
case '2':
tileY |= mask
case '3':
tileX |= mask
tileY |= mask
default:
panic("Invalid QuadKey digit sequence.")
}
}
return
}
func Res(levelOfDetail uint64) float64 {
invTileSize := 1.0 / TileSize
dres := 2.0 * HalfCircumference * invTileSize
res := dres / float64(uint64(1)<<levelOfDetail)
return res
}
func PixelXYTToMeters(pixelX, pixelY int64, levelOfDetail uint64) (meterX, meterY float64) {
res := Res(levelOfDetail)
meterX = float64(pixelX) * res
meterY = float64(pixelY) * res
return meterX, meterY
}
func TileBounds(tileX, tileY int64, levelOfDetail uint64) BBox2d {
minx, miny := PixelXYTToMeters(tileX*int64(TileSize), tileY*int64(TileSize), levelOfDetail)
maxx, maxy := PixelXYTToMeters((tileX+1)*int64(TileSize), (tileY+1)*int64(TileSize), levelOfDetail)
return BBox2d{minx, miny, maxx, maxy}
} | webmercator.go | 0.706089 | 0.45944 | webmercator.go | starcoder |
package iso20022
// Choice between formats for the identification of a financial instrument.
type SecurityIdentification1Choice struct {
// International Securities Identification Number (ISIN). A numbering system designed by the United Nation's International Organisation for Standardisation (ISO). The ISIN is composed of a 2-character prefix representing the country of issue, followed by the national security number (if one exists), and a check digit. Each country has a national numbering agency that assigns ISIN numbers for securities in that country.
ISIN *ISINIdentifier `xml:"ISIN"`
// Proprietary identification of a security assigned by an institution or organisation.
AlternateIdentification *AlternateSecurityIdentification1 `xml:"AltrnId"`
// Reuters Identification Code (RIC). A numbering system used within the Reuters system to identify instruments worldwide. The RIC contains an X-character market specific code (can be the CUSIP or EPIC codes) followed by a full stop, then the two-digit ISO country code, eg, IBM in UK is IBM.UK.
RIC *RICIdentifier `xml:"RIC"`
// Letters that identify a stock traded on a stock exchange. The Ticker Symbol is a short and convenient way of identifying a stock, eg, RTR.L for Reuters quoted in London.
TickerSymbol *TickerIdentifier `xml:"TckrSymb"`
// Identifier of a security assigned by the Bloomberg organisation.
Bloomberg *BloombergIdentifier `xml:"Blmbrg"`
// Identifier of a security assigned by the Consolidated Tape Association.
CTA *ConsolidatedTapeAssociationIdentifier `xml:"CTA"`
// Identifier of securities issued in Luxembourg. The common code is a 9-digit code that replaces the CEDEL (Clearstream) and Euroclear codes.
Common *EuroclearClearstreamIdentifier `xml:"Cmon"`
}
func (s *SecurityIdentification1Choice) SetISIN(value string) {
s.ISIN = (*ISINIdentifier)(&value)
}
func (s *SecurityIdentification1Choice) AddAlternateIdentification() *AlternateSecurityIdentification1 {
s.AlternateIdentification = new(AlternateSecurityIdentification1)
return s.AlternateIdentification
}
func (s *SecurityIdentification1Choice) SetRIC(value string) {
s.RIC = (*RICIdentifier)(&value)
}
func (s *SecurityIdentification1Choice) SetTickerSymbol(value string) {
s.TickerSymbol = (*TickerIdentifier)(&value)
}
func (s *SecurityIdentification1Choice) SetBloomberg(value string) {
s.Bloomberg = (*BloombergIdentifier)(&value)
}
func (s *SecurityIdentification1Choice) SetCTA(value string) {
s.CTA = (*ConsolidatedTapeAssociationIdentifier)(&value)
}
func (s *SecurityIdentification1Choice) SetCommon(value string) {
s.Common = (*EuroclearClearstreamIdentifier)(&value)
} | SecurityIdentification1Choice.go | 0.735262 | 0.611266 | SecurityIdentification1Choice.go | starcoder |
package parse
import (
"fmt"
"reflect"
"strconv"
"time"
)
var durationType = reflect.TypeOf(time.Duration(0))
func parseNumber(strVal string, numberType reflect.Type) (reflect.Value, error) {
var castVal reflect.Value
switch numberType.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if numberType == durationType {
convertedDuration, err := time.ParseDuration(strVal)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(&convertedDuration), nil
}
converted, err := strconv.ParseInt(strVal, 0, 64)
if err != nil {
return reflect.Value{}, &NumberError{err: err}
}
// Check for overflow
convertTo := reflect.Zero(numberType)
if convertTo.OverflowInt(converted) {
return reflect.Value{}, &OverflowError{err: fmt.Errorf("Overflow of %v type: %v", numberType, converted)}
}
switch numberType.Kind() {
case reflect.Int:
convertedInt := int(converted)
castVal = reflect.ValueOf(&convertedInt)
case reflect.Int8:
converted8 := int8(converted)
castVal = reflect.ValueOf(&converted8)
case reflect.Int16:
converted16 := int16(converted)
castVal = reflect.ValueOf(&converted16)
case reflect.Int32:
converted32 := int32(converted)
castVal = reflect.ValueOf(&converted32)
case reflect.Int64:
converted64 := int64(converted)
castVal = reflect.ValueOf(&converted64)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
converted, err := strconv.ParseUint(strVal, 0, 64)
if err != nil {
return reflect.Value{}, &NumberError{err: err}
}
// Check for overflow
convertTo := reflect.Zero(numberType)
if convertTo.OverflowUint(converted) {
return reflect.Value{}, &OverflowError{err: fmt.Errorf("Overflow of %v type: %v", numberType, converted)}
}
switch numberType.Kind() {
case reflect.Uint:
uintConverted := uint(converted)
castVal = reflect.ValueOf(&uintConverted)
case reflect.Uint8:
uintConverted := uint8(converted)
castVal = reflect.ValueOf(&uintConverted)
case reflect.Uint16:
uintConverted := uint16(converted)
castVal = reflect.ValueOf(&uintConverted)
case reflect.Uint32:
uintConverted := uint32(converted)
castVal = reflect.ValueOf(&uintConverted)
case reflect.Uint64:
uintConverted := uint64(converted)
castVal = reflect.ValueOf(&uintConverted)
}
case reflect.Float32:
converted, err := strconv.ParseFloat(strVal, 32)
if err != nil {
return reflect.Value{}, &NumberError{err: err}
}
// Check for overflow
convertTo := reflect.Zero(numberType)
if convertTo.OverflowFloat(converted) {
return reflect.Value{}, &OverflowError{err: fmt.Errorf("Overflow of %v type: %v", numberType, converted)}
}
fl32 := float32(converted)
castVal = reflect.ValueOf(&fl32)
case reflect.Float64:
converted, err := strconv.ParseFloat(strVal, 64)
if err != nil {
return reflect.Value{}, &NumberError{err: err}
}
// Check for overflow
convertTo := reflect.Zero(numberType)
if convertTo.OverflowFloat(converted) {
return reflect.Value{}, &OverflowError{err: fmt.Errorf("Overflow of %v type: %v", numberType, converted)}
}
castVal = reflect.ValueOf(&converted)
case reflect.Complex64:
converted, err := Complex64(strVal)
if err != nil {
return reflect.Value{}, &NumberError{err: err}
}
// Check for overflow
convertTo := reflect.Zero(numberType)
if convertTo.OverflowComplex(complex128(converted)) {
return reflect.Value{}, &OverflowError{err: fmt.Errorf("Overflow of %v type: %v", numberType, converted)}
}
castVal = reflect.ValueOf(&converted)
case reflect.Complex128:
converted, err := Complex128(strVal)
if err != nil {
return reflect.Value{}, &NumberError{err: err}
}
// Check for overflow
convertTo := reflect.Zero(numberType)
if convertTo.OverflowComplex(converted) {
return reflect.Value{}, &OverflowError{err: fmt.Errorf("Overflow of %v type: %v", numberType, converted)}
}
castVal = reflect.ValueOf(&converted)
}
return castVal, nil
}
// OverflowError represents an overflow when casting to a numeric type.
type OverflowError struct {
err error
}
func (e *OverflowError) Unwrap() error {
return e.err
}
func (e *OverflowError) Error() string {
return e.err.Error()
}
// NumberError represents an error when parsing a string to generate a numeric type.
type NumberError struct {
err error
}
func (e *NumberError) Error() string {
return e.err.Error()
}
func (e *NumberError) Unwrap() error {
return e.err
} | parse/number.go | 0.693161 | 0.441131 | number.go | starcoder |
package techan
import (
"github.com/sdcoffey/big"
"github.com/sdcoffey/techan"
)
// Position is a pair of two Order objects
type Position struct {
orders [2]*Order
}
// NewPosition returns a new Position with the passed-in order as the open order
func NewPosition(openOrder Order) (t *Position) {
t = new(Position)
t.orders[0] = &openOrder
return t
}
// Enter sets the open order to the order passed in
func (p *Position) Enter(order Order) {
p.orders[0] = &order
}
// Exit sets the exit order to the order passed in
func (p *Position) Exit(order Order) {
p.orders[1] = &order
}
// IsLong returns true if the entrance order is a buy order
func (p *Position) IsLong() bool {
return p.EntranceOrder() != nil && p.EntranceOrder().Side == BUY
}
// IsShort returns true if the entrance order is a sell order
func (p *Position) IsShort() bool {
return p.EntranceOrder() != nil && p.EntranceOrder().Side == SELL
}
// IsOpen returns true if there is an entrance order but no exit order
func (p *Position) IsOpen() bool {
return p.EntranceOrder() != nil && p.ExitOrder() == nil
}
// IsClosed returns true of there are both entrance and exit orders
func (p *Position) IsClosed() bool {
return p.EntranceOrder() != nil && p.ExitOrder() != nil
}
// IsNew returns true if there is neither an entrance or exit order
func (p *Position) IsNew() bool {
return p.EntranceOrder() == nil && p.ExitOrder() == nil
}
// EntranceOrder returns the entrance order of this position
func (p *Position) EntranceOrder() *Order {
return p.orders[0]
}
// ExitOrder returns the exit order of this position
func (p *Position) ExitOrder() *Order {
return p.orders[1]
}
// CostBasis returns the price to enter this order
func (p *Position) CostBasis() big.Decimal {
if p.EntranceOrder() != nil {
return p.EntranceOrder().Amount.Mul(p.EntranceOrder().Price)
}
return big.ZERO
}
// EvenPrice returns the even price (openPrice +- spread)
func (p *Position) EvenPrice() big.Decimal {
if p.EntranceOrder() != nil {
if p.EntranceOrder().Spread.GT(big.ZERO) {
var spread big.Decimal = big.ZERO
switch p.EntranceOrder().Side {
case OrderSide(techan.BUY):
spread = p.EntranceOrder().Spread
case OrderSide(techan.SELL):
spread = p.EntranceOrder().Spread.Neg()
}
return p.EntranceOrder().Price.Add(spread)
}
return p.EntranceOrder().Price
}
return big.ZERO
}
// ExitValue returns the value accrued by closing the position
func (p *Position) ExitValue() big.Decimal {
if p.IsClosed() {
return p.ExitOrder().Amount.Mul(p.ExitOrder().Price)
}
return big.ZERO
} | position.go | 0.760917 | 0.431464 | position.go | starcoder |
package primitive
import (
"fmt"
"math"
"math/rand"
"github.com/fogleman/gg"
)
type Triangle struct {
W, H int
X1, Y1 int
X2, Y2 int
X3, Y3 int
}
func NewRandomTriangle(w, h int) *Triangle {
x1 := rand.Intn(w)
y1 := rand.Intn(h)
x2 := rand.Intn(w)
y2 := rand.Intn(h)
x3 := rand.Intn(w)
y3 := rand.Intn(h)
t := &Triangle{w, h, x1, y1, x2, y2, x3, y3}
t.Mutate()
return t
}
func (t *Triangle) Draw(dc *gg.Context) {
dc.LineTo(float64(t.X1), float64(t.Y1))
dc.LineTo(float64(t.X2), float64(t.Y2))
dc.LineTo(float64(t.X3), float64(t.Y3))
dc.ClosePath()
}
func (t *Triangle) SVG(attrs string) string {
return fmt.Sprintf(
"<polygon %s points=\"%d,%d %d,%d %d,%d\" />",
attrs, t.X1, t.Y1, t.X2, t.Y2, t.X3, t.Y3)
}
func (t *Triangle) Copy() Shape {
a := *t
return &a
}
func (t *Triangle) Mutate() {
for {
switch rand.Intn(3) {
case 0:
t.X1 = clampInt(t.X1+rand.Intn(21)-10, 0, t.W-1)
t.Y1 = clampInt(t.Y1+rand.Intn(21)-10, 0, t.H-1)
case 1:
t.X2 = clampInt(t.X2+rand.Intn(21)-10, 0, t.W-1)
t.Y2 = clampInt(t.Y2+rand.Intn(21)-10, 0, t.H-1)
case 2:
t.X3 = clampInt(t.X3+rand.Intn(21)-10, 0, t.W-1)
t.Y3 = clampInt(t.Y3+rand.Intn(21)-10, 0, t.H-1)
}
if t.Valid() {
break
}
}
}
func (t *Triangle) Valid() bool {
const minDegrees = 15
var a1, a2, a3 float64
{
x1 := float64(t.X2 - t.X1)
y1 := float64(t.Y2 - t.Y1)
x2 := float64(t.X3 - t.X1)
y2 := float64(t.Y3 - t.Y1)
d1 := math.Sqrt(x1*x1 + y1*y1)
d2 := math.Sqrt(x2*x2 + y2*y2)
x1 /= d1
y1 /= d1
x2 /= d2
y2 /= d2
a1 = degrees(math.Acos(x1*x2 + y1*y2))
}
{
x1 := float64(t.X1 - t.X2)
y1 := float64(t.Y1 - t.Y2)
x2 := float64(t.X3 - t.X2)
y2 := float64(t.Y3 - t.Y2)
d1 := math.Sqrt(x1*x1 + y1*y1)
d2 := math.Sqrt(x2*x2 + y2*y2)
x1 /= d1
y1 /= d1
x2 /= d2
y2 /= d2
a2 = degrees(math.Acos(x1*x2 + y1*y2))
}
a3 = 180 - a1 - a2
return a1 > minDegrees && a2 > minDegrees && a3 > minDegrees
}
func (t *Triangle) Rasterize() []Scanline {
return rasterizeTriangle(t.X1, t.Y1, t.X2, t.Y2, t.X3, t.Y3)
}
func rasterizeTriangle(x1, y1, x2, y2, x3, y3 int) []Scanline {
if y1 > y3 {
x1, x3 = x3, x1
y1, y3 = y3, y1
}
if y1 > y2 {
x1, x2 = x2, x1
y1, y2 = y2, y1
}
if y2 > y3 {
x2, x3 = x3, x2
y2, y3 = y3, y2
}
if y2 == y3 {
return rasterizeTriangleBottom(x1, y1, x2, y2, x3, y3)
} else if y1 == y2 {
return rasterizeTriangleTop(x1, y1, x2, y2, x3, y3)
} else {
x4 := x1 + int((float64(y2-y1)/float64(y3-y1))*float64(x3-x1))
y4 := y2
bottom := rasterizeTriangleBottom(x1, y1, x2, y2, x4, y4)
top := rasterizeTriangleTop(x2, y2, x4, y4, x3, y3)
return append(bottom, top...)
}
}
func rasterizeTriangleBottom(x1, y1, x2, y2, x3, y3 int) []Scanline {
s1 := float64(x2-x1) / float64(y2-y1)
s2 := float64(x3-x1) / float64(y3-y1)
ax := float64(x1)
bx := float64(x1)
lines := make([]Scanline, y2-y1+1)
i := 0
for y := y1; y <= y2; y++ {
a := int(ax)
b := int(bx)
ax += s1
bx += s2
if a > b {
a, b = b, a
}
lines[i] = Scanline{y, a, b}
i++
}
return lines
}
func rasterizeTriangleTop(x1, y1, x2, y2, x3, y3 int) []Scanline {
s1 := float64(x3-x1) / float64(y3-y1)
s2 := float64(x3-x2) / float64(y3-y2)
ax := float64(x3)
bx := float64(x3)
lines := make([]Scanline, y3-y1)
i := 0
for y := y3; y > y1; y-- {
ax -= s1
bx -= s2
a := int(ax)
b := int(bx)
if a > b {
a, b = b, a
}
lines[i] = Scanline{y, a, b}
i++
}
return lines
} | primitive/triangle.go | 0.644001 | 0.487429 | triangle.go | starcoder |
package polygol
import (
"fmt"
"math"
)
type ringIn struct {
poly *polyIn
isExterior bool
segments []*segment
bbox bbox
}
func (o *operation) newRingIn(ring [][]float64, poly *polyIn, isExterior bool) (*ringIn, error) {
if len(ring) == 0 {
return nil, fmt.Errorf(`Input geometry is not a valid polygon or multipolygon (empty).`)
}
if len(ring[0]) < 2 {
return nil, fmt.Errorf(`Input geometry is not a valid polygon or multipolygon (empty).`)
}
ri := &ringIn{}
ri.poly = poly
ri.isExterior = isExterior
ri.segments = []*segment{}
firstPoint := o.rounder.round(ring[0][0], ring[0][1])
ri.bbox = bbox{ll: *firstPoint, ur: *firstPoint}
prevPoint := firstPoint
for i := 1; i < len(ring); i++ {
if len(ring[i]) < 2 {
return nil, fmt.Errorf(`Input geometry is not a valid polygon or multipolygon (missing coordinates).`)
}
point := o.rounder.round(ring[i][0], ring[i][1])
// skip repeated points
if point.x == prevPoint.x && point.y == prevPoint.y {
continue
}
segment, err := o.newSegmentFromRing(prevPoint, point, ri)
if err != nil {
return nil, err
}
ri.segments = append(ri.segments, segment)
if point.x < ri.bbox.ll.x {
ri.bbox.ll.x = point.x
}
if point.y < ri.bbox.ll.y {
ri.bbox.ll.y = point.y
}
if point.x > ri.bbox.ur.x {
ri.bbox.ur.x = point.x
}
if point.y > ri.bbox.ur.y {
ri.bbox.ur.y = point.y
}
prevPoint = point
}
// add segment from last to first if last is not the same as first
if firstPoint.x != prevPoint.x || firstPoint.y != prevPoint.y {
segment, err := o.newSegmentFromRing(prevPoint, firstPoint, ri)
if err != nil {
return nil, err
}
ri.segments = append(ri.segments, segment)
}
return ri, nil
}
func (ri *ringIn) getSweepEvents() []*sweepEvent {
sweepEvents := []*sweepEvent{}
for i := 0; i < len(ri.segments); i++ {
segment := ri.segments[i]
sweepEvents = append(sweepEvents, segment.leftSE, segment.rightSE)
}
return sweepEvents
}
func (ri *ringIn) indexOf(ringIns []*ringIn) int {
for i, r := range ringIns {
if ri == nil || r == nil {
continue
}
if ri == r {
return i
}
}
return -1
}
type polyIn struct {
multiPoly *multiPolyIn
exteriorRing *ringIn
interiorRings []*ringIn
bbox bbox
}
func (o *operation) newPolyIn(poly [][][]float64, multiPoly *multiPolyIn) (*polyIn, error) {
if len(poly) == 0 {
return nil, fmt.Errorf(`Input geometry is not a valid polygon or multipolygon (empty).`)
}
pi := &polyIn{}
exteriorRing, err := o.newRingIn(poly[0], pi, true)
if err != nil {
return nil, err
}
pi.exteriorRing = exteriorRing
pi.bbox = pi.exteriorRing.bbox
pi.interiorRings = []*ringIn{}
for i := 1; i < len(poly); i++ {
ring, err := o.newRingIn(poly[i], pi, false)
if err != nil {
return nil, err
}
if ring.bbox.ll.x < pi.bbox.ll.x {
pi.bbox.ll.x = ring.bbox.ll.x
}
if ring.bbox.ll.y < pi.bbox.ll.y {
pi.bbox.ll.y = ring.bbox.ll.y
}
if ring.bbox.ur.x > pi.bbox.ur.x {
pi.bbox.ur.x = ring.bbox.ur.x
}
if ring.bbox.ur.y > pi.bbox.ur.y {
pi.bbox.ur.y = ring.bbox.ur.y
}
pi.interiorRings = append(pi.interiorRings, ring)
}
pi.multiPoly = multiPoly
return pi, nil
}
func (pi *polyIn) getSweepEvents() []*sweepEvent {
sweepEvents := pi.exteriorRing.getSweepEvents()
for i := 0; i < len(pi.interiorRings); i++ {
ringSweepEvents := pi.interiorRings[i].getSweepEvents()
sweepEvents = append(sweepEvents, ringSweepEvents...)
}
return sweepEvents
}
func (pi *polyIn) indexOf(polyIns []*polyIn) int {
for i, p := range polyIns {
if pi == nil || p == nil {
continue
}
if pi == p {
return i
}
}
return -1
}
type multiPolyIn struct {
polys []*polyIn
bbox bbox
isSubject bool
}
func (o *operation) newMultiPolyIn(multiPoly [][][][]float64, isSubject bool) (*multiPolyIn, error) {
mpi := &multiPolyIn{}
mpi.polys = []*polyIn{}
mpi.bbox = bbox{
ll: point{x: math.Inf(1), y: math.Inf(1)},
ur: point{x: math.Inf(-1), y: math.Inf(-1)},
}
for i := 0; i < len(multiPoly); i++ {
poly, err := o.newPolyIn(multiPoly[i], mpi)
if err != nil {
return nil, err
}
if poly.bbox.ll.x < mpi.bbox.ll.x {
mpi.bbox.ll.x = poly.bbox.ll.x
}
if poly.bbox.ll.y < mpi.bbox.ll.y {
mpi.bbox.ll.y = poly.bbox.ll.y
}
if poly.bbox.ur.x > mpi.bbox.ur.x {
mpi.bbox.ur.x = poly.bbox.ur.x
}
if poly.bbox.ur.y > mpi.bbox.ur.y {
mpi.bbox.ur.y = poly.bbox.ur.y
}
mpi.polys = append(mpi.polys, poly)
}
mpi.isSubject = isSubject
return mpi, nil
}
func (mpi *multiPolyIn) getSweepEvents() []*sweepEvent {
sweepEvents := []*sweepEvent{}
for i := 0; i < len(mpi.polys); i++ {
polySweepEvents := mpi.polys[i].getSweepEvents()
sweepEvents = append(sweepEvents, polySweepEvents...)
}
return sweepEvents
}
func (mpi *multiPolyIn) indexOf(multiPolyIns []*multiPolyIn) int {
for i, mp := range multiPolyIns {
if mpi == nil || mp == nil {
continue
}
if mpi == mp {
return i
}
}
return -1
} | geom-in.go | 0.642208 | 0.509764 | geom-in.go | starcoder |
package client
var (
COND_EQ = QueryConditionType{"eq", 1}
COND_NE = QueryConditionType{"ne", 1}
COND_NULL = QueryConditionType{"null", 0}
COND_NOTNULL = QueryConditionType{"notnull", 0}
COND_IN = QueryConditionType{"in", -1}
COND_NOTIN = QueryConditionType{"notin", -1}
COND_OR = QueryConditionType{"or", 1}
COND_AND = QueryConditionType{"and", 1}
mods = map[string]QueryConditionType{
COND_EQ.Name: COND_EQ,
COND_NE.Name: COND_NE,
COND_NULL.Name: COND_NULL,
COND_NOTNULL.Name: COND_NOTNULL,
COND_IN.Name: COND_IN,
COND_NOTIN.Name: COND_NOTIN,
COND_OR.Name: COND_OR,
COND_AND.Name: COND_AND,
}
)
type QueryConditionType struct {
Name string
Args int
}
type QueryCondition struct {
Field string
values []interface{}
conditionType QueryConditionType
left, right *QueryCondition
}
func (q *QueryCondition) ToCondition() Condition {
cond := Condition{
Modifier: q.conditionType.Name,
}
if q.conditionType.Args == 1 && len(q.values) > 0 {
cond.Value = q.values[0]
} else if q.conditionType.Args == -1 {
cond.Value = q.values
}
return cond
}
func ValidMod(mod string) bool {
_, ok := mods[mod]
return ok
}
func NewConditionFromString(field, mod string, values ...interface{}) *QueryCondition {
return &QueryCondition{
Field: field,
values: values,
conditionType: mods[mod],
}
}
func NewCondition(mod QueryConditionType, values ...interface{}) *QueryCondition {
return &QueryCondition{
values: values,
conditionType: mod,
}
}
func NE(value interface{}) *QueryCondition {
return NewCondition(COND_NE, value)
}
func EQ(value interface{}) *QueryCondition {
return NewCondition(COND_EQ, value)
}
func NULL(value interface{}) *QueryCondition {
return NewCondition(COND_NULL)
}
func NOTNULL(value interface{}) *QueryCondition {
return NewCondition(COND_NOTNULL)
}
func IN(values ...interface{}) *QueryCondition {
return NewCondition(COND_IN, values...)
}
func NOTIN(values ...interface{}) *QueryCondition {
return NewCondition(COND_NOTIN, values...)
}
func (c *QueryCondition) AND(right *QueryCondition) *QueryCondition {
return &QueryCondition{
conditionType: COND_AND,
left: c,
right: right,
}
}
func (c *QueryCondition) OR(right *QueryCondition) *QueryCondition {
return &QueryCondition{
conditionType: COND_OR,
left: c,
right: right,
}
} | vnext/condition.go | 0.560974 | 0.437703 | condition.go | starcoder |
package minimum_knight_moves
import (
"container/list"
)
// MinKnightMovesBFS
// Bidirectional BFS
func MinKnightMovesBFS(x int, y int) int {
// the offsets in the eight directions
offsets := [][]int{
{1, 2}, {2, 1}, {2, -1}, {1, -2},
{-1, -2}, {-2, -1}, {-2, 1}, {-1, 2},
}
// data structures needed to move from the origin point
originQueue := list.New()
originQueue.PushBack([]int{0, 0, 0})
originDistance := make(map[string]int)
originDistance[generateKey(0, 0)] = 0
// data structures needed to move from the target point
targetQueue := list.New()
targetQueue.PushBack([]int{x, y, 0})
targetDistance := make(map[string]int)
targetDistance[generateKey(x, y)] = 0
for {
// check if we reach the circle of target
first := originQueue.Front()
originQueue.Remove(first)
origin := first.Value.([]int)
if value, ok := targetDistance[generateKey(origin[0], origin[1])]; ok {
return origin[2] + value
}
// check if we reach the circle of origin
first = targetQueue.Front()
targetQueue.Remove(first)
target := first.Value.([]int)
if value, ok := originDistance[generateKey(target[0], target[1])]; ok {
return target[2] + value
}
for _, offset := range offsets {
// expand the circle of origin
nextOrigin := []int{origin[0] + offset[0], origin[1] + offset[1]}
if _, ok := originDistance[generateKey(nextOrigin[0], nextOrigin[1])]; !ok {
originQueue.PushBack([]int{nextOrigin[0], nextOrigin[1], origin[2] + 1})
originDistance[generateKey(nextOrigin[0], nextOrigin[1])] = origin[2] + 1
}
// expand the circle of target
nextTarget := []int{target[0] + offset[0], target[1] + offset[1]}
if _, ok := targetDistance[generateKey(nextTarget[0], nextTarget[1])]; !ok {
targetQueue.PushBack([]int{nextTarget[0], nextTarget[1], target[2] + 1})
targetDistance[generateKey(nextTarget[0], nextTarget[1])] = origin[2] + 1
}
}
}
}
// unidirectional BFS
func minKnightMovesBFS(x int, y int) int {
offsets := [][]int{
{1, 2}, {2, 1}, {2, -1}, {1, -2},
{-1, -2}, {-2, -1}, {-2, 1}, {-1, 2},
}
bfs := func(x, y int) int {
visited := make(map[string]bool)
queue := list.New()
queue.PushBack([]int{0, 0})
steps := 0
for queue.Len() > 0 {
currLevel := queue.Len()
for i := 0; i < currLevel; i++ {
current := queue.Front()
queue.Remove(current)
curr := current.Value.([]int)
if curr[0] == x && curr[1] == y {
return steps
}
for _, offset := range offsets {
next := []int{
curr[0] + offset[0],
curr[1] + offset[1],
}
nextKey := generateKey(next[0]+302, next[1]+302)
if !visited[nextKey] {
visited[nextKey] = true
queue.PushBack(next)
}
}
}
steps++
}
return steps
}
return bfs(x, y)
} | golang/minimum_knight_moves/minimum_knight_moves_bfs.go | 0.592195 | 0.479138 | minimum_knight_moves_bfs.go | starcoder |
package datatype
import (
"fmt"
"math"
"github.com/i-sevostyanov/NanoDB/internal/sql"
)
type Float struct {
value float64
}
func NewFloat(v float64) Float {
return Float{value: v}
}
func (f Float) Raw() interface{} {
return f.value
}
func (f Float) DataType() sql.DataType {
return sql.Float
}
func (f Float) Compare(v sql.Value) (sql.CompareType, error) {
switch value := v.Raw().(type) {
case float64:
switch {
case f.value < value:
return sql.Less, nil
case f.value > value:
return sql.Greater, nil
default:
return sql.Equal, nil
}
case nil:
return sql.Greater, nil
default:
return sql.Equal, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (f Float) UnaryPlus() (sql.Value, error) {
return Float{value: f.value}, nil
}
func (f Float) UnaryMinus() (sql.Value, error) {
return Float{value: -f.value}, nil
}
func (f Float) Add(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Float{value: f.value + value}, nil
case int64:
return Float{value: f.value + float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("add: unexptected arg type: %T", value)
}
}
func (f Float) Sub(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Float{value: f.value - value}, nil
case int64:
return Float{value: f.value - float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("sub: unexptected arg type: %T", value)
}
}
func (f Float) Mul(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Float{value: f.value * value}, nil
case int64:
return Float{value: f.value * float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("mul: unexptected arg type: %T", value)
}
}
func (f Float) Div(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Float{value: f.value / value}, nil
case int64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Float{value: f.value / float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("div: unexptected arg type: %T", value)
}
}
func (f Float) Pow(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Float{value: math.Pow(f.value, value)}, nil
case int64:
return Float{value: math.Pow(f.value, float64(value))}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("pow: unexptected arg type: %T", value)
}
}
func (f Float) Mod(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Float{value: math.Mod(f.value, value)}, nil
case int64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Float{value: math.Mod(f.value, float64(value))}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("mod: unexptected arg type: %T", value)
}
}
func (f Float) Equal(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Boolean{value: f.value == value}, nil
case int64:
return Boolean{value: f.value == float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("equal: unexptected arg type: %T", value)
}
}
func (f Float) NotEqual(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Boolean{value: f.value != value}, nil
case int64:
return Boolean{value: f.value != float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("not equal: unexptected arg type: %T", value)
}
}
func (f Float) GreaterThan(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Boolean{value: f.value > value}, nil
case int64:
return Boolean{value: f.value > float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("greater-than: unexptected arg type: %T", value)
}
}
func (f Float) LessThan(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Boolean{value: f.value < value}, nil
case int64:
return Boolean{value: f.value < float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("less-than: unexptected arg type: %T", value)
}
}
func (f Float) GreaterOrEqual(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Boolean{value: f.value >= value}, nil
case int64:
return Boolean{value: f.value >= float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("greater-or-equal: unexptected arg type: %T", value)
}
}
func (f Float) LessOrEqual(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case float64:
return Boolean{value: f.value <= value}, nil
case int64:
return Boolean{value: f.value <= float64(value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("less-or-equal: unexptected arg type: %T", value)
}
}
func (f Float) And(_ sql.Value) (sql.Value, error) {
return nil, fmt.Errorf("and: unsupported operation")
}
func (f Float) Or(_ sql.Value) (sql.Value, error) {
return nil, fmt.Errorf("or: unsupported operation")
} | internal/sql/datatype/float.go | 0.778102 | 0.51818 | float.go | starcoder |
package spinix
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/rs/xid"
"github.com/mmadfox/geojson"
)
type Expr interface {
String() string
expr()
}
type DistanceUnit int
const (
DistanceUndefined DistanceUnit = 0
DistanceMeters DistanceUnit = 1
DistanceKilometers DistanceUnit = 2
)
func (u DistanceUnit) String() string {
switch u {
case DistanceMeters:
return "m"
case DistanceKilometers:
return "km"
default:
return "#?"
}
}
type RepeatMode int
const (
RepeatOnce RepeatMode = 1
RepeatEvery RepeatMode = 2
RepeatTimes RepeatMode = 3
)
func (rm RepeatMode) String() string {
switch rm {
case RepeatEvery:
return "every"
case RepeatOnce:
return "once"
case RepeatTimes:
return "times"
default:
return "#?"
}
}
type (
// An IdentLit nodes represents an identifier.
IdentLit struct {
Name string
Pos Pos
Kind Token
}
BaseLit struct {
Kind Token
Expr Expr
Pos Pos
}
// A BinaryExpr nodes represents a binary expression.
BinaryExpr struct {
LHS Expr // left operand
Op Token // operator
RHS Expr // right operand
}
// A ParenExpr nodes represents a parenthesized expression.
ParenExpr struct {
Expr Expr // parenthesized expression
}
PropExpr struct {
Expr Expr
List []Expr
}
DeviceLit struct {
Unit DistanceUnit
Kind Token
Value float64
Pos Pos
}
DevicesLit struct {
All bool
Unit DistanceUnit
Kind Token
Value float64
Pos Pos
Ref []xid.ID
}
ObjectLit struct {
All bool
Kind Token
Ref []xid.ID
DurVal time.Duration
DurTyp Token
Pos Pos
}
// A TriggerLit represents a repeat mode type.
TriggerLit struct {
Repeat RepeatMode
Interval time.Duration
Value time.Duration
Times int
Pos Pos
}
ResetLit struct {
Kind Token
Pos Pos
After time.Duration
}
PointLit struct {
Lat, Lon float64
Pos Pos
Kind Token
}
// A ListLit represents a list of int or float or string type.
ListLit struct {
Items []Expr
Pos Pos
Kind Token
Typ Token
}
IDLit struct {
Kind Token
Value xid.ID
Pos Pos
}
// A StringLit nodes represents a literal of string type.
StringLit struct {
Value string
Pos Pos
}
// An IntLit nodes represents a literal of int type.
IntLit struct {
Value int
Pos Pos
}
// A FloatLit nodes represents a literal of float type.
FloatLit struct {
Value float64
Pos Pos
}
// A TimeLit nodes represents a literal of time type.
TimeLit struct {
Hour int
Minute int
Pos Pos
}
DistanceLit struct {
Unit DistanceUnit
Value float64
Pos Pos
}
DurationLit struct {
Kind Token
Value time.Duration
Pos Pos
}
// A VarLit represents a variable literal.
VarLit struct {
Value Token
Pos Pos
}
// A BooleanLit represents a boolean literal.
BooleanLit struct {
Value bool
Pos Pos
}
)
type Object struct {
ID string
Data geojson.Object
}
func (e *ParenExpr) String() string {
return fmt.Sprintf("(%s)", e.Expr.String())
}
func (e *BinaryExpr) String() string {
return fmt.Sprintf("%s %s %s", e.LHS.String(), e.Op, e.RHS.String())
}
func (e *StringLit) String() string {
return fmt.Sprintf("%s", e.Value)
}
func (e *IntLit) String() string {
return fmt.Sprintf("%d", e.Value)
}
func (e FloatLit) String() string {
return fmt.Sprintf("%.2f", e.Value)
}
func (e *VarLit) String() string {
return fmt.Sprintf("{%s}", e.Value)
}
func (e *ListLit) String() string {
var sb strings.Builder
li := len(e.Items) - 1
sb.WriteString("[")
for i, expr := range e.Items {
if _, ok := expr.(*StringLit); ok {
sb.WriteString(`"` + expr.String() + `"`)
} else {
sb.WriteString(expr.String())
}
if i != li {
if e.Kind == RANGE {
sb.WriteString(" .. ")
} else {
sb.WriteString(COMMA.String())
}
}
}
sb.WriteString("]")
return sb.String()
}
func (e *BooleanLit) String() string {
if e.Value {
return "true"
} else {
return "false"
}
}
func (e *DeviceLit) String() string {
var sb strings.Builder
sb.WriteString("device")
writeProps := func(name string) {
sb.WriteString(" :")
sb.WriteString(name)
sb.WriteString(" ")
sb.WriteString(fmt.Sprintf("%.1f", e.Value))
sb.WriteString(e.Unit.String())
}
switch e.Kind {
case BBOX:
writeProps("bbox")
case RADIUS:
writeProps("radius")
}
return sb.String()
}
func (e *ObjectLit) String() string {
var sb strings.Builder
sb.WriteString(e.Kind.String())
sb.WriteString("(")
last := len(e.Ref) - 1
if e.All {
sb.WriteString("@")
}
if e.All && len(e.Ref) > 0 {
sb.WriteString(",")
}
for i, ref := range e.Ref {
sb.WriteString(`"`)
sb.WriteString(ref.String())
sb.WriteString(`"`)
if i != last {
sb.WriteString(",")
}
}
writeProps := func(name string) {
sb.WriteString(" :time ")
sb.WriteString(name)
sb.WriteString(" ")
sb.WriteString(e.DurVal.String())
}
sb.WriteString(")")
switch e.DurTyp {
case DURATION:
writeProps("duration")
case AFTER:
writeProps("after")
}
return sb.String()
}
func (e *IdentLit) String() string {
return e.Kind.String()
}
func (e *TimeLit) String() string {
var str string
h := strconv.Itoa(e.Hour)
m := strconv.Itoa(e.Minute)
if e.Hour < 10 {
str += "0" + h
} else {
str += h
}
str += ":"
if e.Minute < 10 {
str += "0" + m
} else {
str += m
}
return str
}
func (e *DevicesLit) steps() (steps int) {
switch e.Kind {
case RADIUS:
steps = 12
case BBOX:
steps = 4
}
return
}
func (e *DeviceLit) steps() (steps int) {
switch e.Kind {
case RADIUS:
steps = 12
case BBOX:
steps = 4
}
return
}
func (e *DeviceLit) meters() float64 {
switch e.Unit {
case DistanceMeters:
return e.Value
case DistanceKilometers:
return e.Value * 1000
default:
return 0
}
}
func (e *DevicesLit) meters() float64 {
switch e.Unit {
case DistanceMeters:
return e.Value
case DistanceKilometers:
return e.Value * 1000
default:
return 0
}
}
func (e *DeviceLit) hasRadius() bool {
switch e.Kind {
case RADIUS, BBOX:
default:
return false
}
switch e.Unit {
case DistanceMeters, DistanceKilometers:
if e.Value > 0 {
return true
}
}
return false
}
func (e *DevicesLit) hasRadius() bool {
switch e.Kind {
case RADIUS, BBOX:
default:
return false
}
switch e.Unit {
case DistanceMeters, DistanceKilometers:
if e.Value > 0 {
return true
}
}
return false
}
func (e *DevicesLit) String() string {
var sb strings.Builder
sb.WriteString("devices")
sb.WriteString("(")
last := len(e.Ref) - 1
if e.All {
sb.WriteString("@")
}
if e.All && len(e.Ref) > 0 {
sb.WriteString(",")
}
for i, ref := range e.Ref {
sb.WriteString(`"`)
sb.WriteString(ref.String())
sb.WriteString(`"`)
if i != last {
sb.WriteString(",")
}
}
sb.WriteString(")")
writeProps := func(name string) {
sb.WriteString(" :")
sb.WriteString(name)
sb.WriteString(" ")
sb.WriteString(fmt.Sprintf("%.1f", e.Value))
sb.WriteString(e.Unit.String())
}
switch e.Kind {
case BBOX:
writeProps("bbox")
case RADIUS:
writeProps("radius")
}
return sb.String()
}
func (e *TriggerLit) String() string {
var sb strings.Builder
sb.WriteString(TRIGGER.String())
sb.WriteString(" ")
switch e.Repeat {
case RepeatTimes:
sb.WriteString(strconv.Itoa(e.Times))
sb.WriteString(" ")
sb.WriteString("times")
sb.WriteString(" ")
sb.WriteString("interval")
sb.WriteString(" ")
sb.WriteString(e.Interval.String())
case RepeatEvery:
sb.WriteString("every")
sb.WriteString(" ")
sb.WriteString(e.Value.String())
case RepeatOnce:
sb.WriteString("once")
default:
sb.WriteString("once")
}
return sb.String()
}
func (e *PropExpr) String() string {
var sb strings.Builder
sb.WriteString(e.Expr.String())
sb.WriteString(" { ")
for i := 0; i < len(e.List); i++ {
sb.WriteString(":")
sb.WriteString(e.List[i].String())
sb.WriteString(" ")
}
sb.WriteString(" }")
return sb.String()
}
func (e *ResetLit) String() string {
return fmt.Sprintf("%s after %s", RESET, e.After)
}
func (e *PointLit) String() string {
return fmt.Sprintf("%s %f %f", e.Kind, e.Lat, e.Lon)
}
func (e *DistanceLit) String() string {
return fmt.Sprintf("%.2f%s", e.Value, e.Unit)
}
func (e *BaseLit) String() string {
return fmt.Sprintf("%s %s", e.Kind, e.Expr)
}
func (e *DurationLit) String() string {
return "duration todo"
}
func (e *IDLit) String() string {
return e.Value.String()
}
func (_ *ParenExpr) expr() {}
func (_ *BinaryExpr) expr() {}
func (_ *StringLit) expr() {}
func (_ *IntLit) expr() {}
func (_ *FloatLit) expr() {}
func (_ *VarLit) expr() {}
func (_ *BooleanLit) expr() {}
func (_ *DeviceLit) expr() {}
func (_ *ObjectLit) expr() {}
func (_ *IdentLit) expr() {}
func (_ *ListLit) expr() {}
func (_ *DevicesLit) expr() {}
func (_ *TimeLit) expr() {}
func (_ *PropExpr) expr() {}
func (_ *TriggerLit) expr() {}
func (_ *ResetLit) expr() {}
func (_ *PointLit) expr() {}
func (_ *DistanceLit) expr() {}
func (_ *DurationLit) expr() {}
func (_ *BaseLit) expr() {}
func (_ *IDLit) expr() {} | ast.go | 0.639511 | 0.469946 | ast.go | starcoder |
package zdb
import (
"github.com/ocdogan/rbt"
)
type dataColumn struct {
valueList
index int
name string
valueCount int
valueType ValueType
table *DataTable
btree *rbt.RbTree
nilRows map[int]*dataRow
}
func (column *dataColumn) get(valueIndex int) (interface{}, bool) {
if valueIndex > -1 && valueIndex < column.valueCount {
switch column.valueType {
case ValString:
return *column.strings[valueIndex], true
case ValInt:
return column.integers[valueIndex], true
case ValLong:
return column.longs[valueIndex], true
case ValFloat:
return column.floats[valueIndex], true
case ValDate:
return column.dates[valueIndex].Clone(), true
}
}
return nil, false
}
func (column *dataColumn) set(row *dataRow, value interface{}) int {
convertedVal, ok := toValueType(value, column.valueType)
if !ok || convertedVal == nil {
column.nilRows[row.id] = row
return nilValueInt
}
key, ok := toValueKey(convertedVal, column.valueType)
if !ok || key == nil || key == nilKey {
column.nilRows[row.id] = row
return nilValueInt
}
if irows, ok := column.btree.Get(key); ok {
node := irows.(*dataNode)
node.add(row)
return node.valueIndex
}
result := nilValueInt
switch column.valueType {
case ValString:
data := convertedVal.(string)
column.strings = append(column.strings, &data)
result = len(column.strings)-1
case ValInt:
column.integers = append(column.integers, convertedVal.(int))
result = len(column.integers)-1
case ValLong:
column.longs = append(column.longs, convertedVal.(int64))
result = len(column.longs)-1
case ValFloat:
column.floats = append(column.floats, convertedVal.(float64))
result = len(column.floats)-1
case ValDate:
data := convertedVal.(Date)
column.dates = append(column.dates, &data)
result = len(column.dates)-1
}
column.valueCount++
node := &dataNode{
valueIndex: result,
}
node.add(row)
column.btree.Insert(key, node)
return result
}
func (column *dataColumn) remove(row *dataRow) {
valueIndex := row.data[column.index]
if valueIndex < 0 {
delete(column.nilRows, row.id)
return
}
var ivalue interface{}
switch column.valueType {
case ValString:
ivalue = *column.strings[valueIndex]
case ValInt:
ivalue = column.integers[valueIndex]
case ValLong:
ivalue = column.longs[valueIndex]
case ValFloat:
ivalue = column.floats[valueIndex]
case ValDate:
ivalue = *column.dates[valueIndex]
}
key, _ := toValueKey(ivalue, column.valueType)
if irows, ok := column.btree.Get(key); ok {
irows.(*dataNode).remove(row)
}
}
func (column *dataColumn) update(row *dataRow, value interface{}) int {
column.remove(row)
return column.set(row, value)
} | datacolumn.go | 0.558086 | 0.472683 | datacolumn.go | starcoder |
package treap
import (
"math/rand"
"time"
goheap "github.com/theodesp/go-heaps"
)
const MaxInt = int(^uint(0) >> 1)
type Node struct {
Priority goheap.Integer
Key goheap.Item
Left, Right *Node
}
// Split treap into 2 treaps:
// - All key in left treap <= key
// - All key in right treap > key
func split(t *Node, key goheap.Item) (*Node, *Node) {
var (
left, right *Node
)
if t == nil {
return nil, nil
} else if t.Key.Compare(key) <= 0 {
t.Right, right = split(t.Right, key)
left := t
return left, right
} else {
left, t.Left = split(t.Left, key)
right := t
return left, right
}
}
// Merge 2 treaps into one with condition:
// max key on left treap is <= than min key on right treap
func merge(x, y *Node) *Node {
if x == nil {
return y
}
if y == nil {
return x
}
if x.Priority.Compare(y.Priority) > 0 {
x.Right = merge(x.Right, y)
return x
} else {
y.Left = merge(x, y.Left)
return y
}
}
func (t *Node) insert(pnode *Node) *Node {
if t == nil {
return pnode
}
if pnode.Priority.Compare(t.Priority) > 0 {
pnode.Left, pnode.Right = split(t, pnode.Key)
return pnode
}
if t.Key.Compare(pnode.Key) != 1 {
t.Right = t.Right.insert(pnode)
} else {
t.Left = t.Left.insert(pnode)
}
return t
}
// Generate priority for new node.
func generatePriority() goheap.Integer {
return goheap.Integer(rand.Intn(MaxInt))
}
// Treap implementation.
type Treap struct {
Root *Node
}
// Init initializes or clears the Treap
func (h *Treap) Init() *Treap {
rand.Seed(time.Now().UnixNano())
return &Treap{}
}
// New returns an initialized Treap.
func New() *Treap { return new(Treap).Init() }
// Insert adds an item into the heap.
func (h *Treap) Insert(v goheap.Item) goheap.Item {
pnode := &Node{
Priority: generatePriority(),
Key: v,
}
if h.Root == nil {
h.Root = pnode
} else {
h.Root = h.Root.insert(pnode)
}
return v
}
// DeleteMin deletes the minimum value and returns it.
func (h *Treap) DeleteMin() goheap.Item {
v := h.Root
if v == nil {
return nil
}
if v.Left == nil {
h.Root = v.Right
return v.Key
}
for ; v.Left.Left != nil; v = v.Left {
}
min := v.Left
v.Left = merge(v.Left.Left, v.Left.Right)
return min.Key
}
// FindMin finds the minimum value.
func (h *Treap) FindMin() goheap.Item {
v := h.Root
if v == nil {
return nil
}
for ; v.Left != nil; v = v.Left {
}
return v.Key
}
// Clear removes all items from the heap.
func (h *Treap) Clear() {
h.Root = nil
} | treap/treap.go | 0.727395 | 0.485356 | treap.go | starcoder |
package main
import (
"context"
"fmt"
"strings"
"time"
)
type Move struct {
startIndex uint8
endIndex uint8
}
func (m *Move) String() string {
return fmt.Sprintf("%s to %s", indexToStringPos(m.startIndex), indexToStringPos(m.endIndex))
}
func indexFromStringLoc(pos string) (uint8, error) {
if len(pos) != 2 {
return 65, fmt.Errorf("position string (%s) invalid lenth.", pos)
}
column := pos[0] - 'a'
row := pos[1] - '1'
if column < 0 || column > 7 {
return 65, fmt.Errorf("position string (%s) invalid column.", pos)
}
if row < 0 || row > 7 {
return 65, fmt.Errorf("position string (%s) invalid row.", pos)
}
return row*8 + column, nil
}
func MoveFromStringPos(pos string) (*Move, error) {
moves := strings.Split(pos, " to ")
if len(moves) != 2 {
moves = strings.Split(pos, " ")
if len(moves) != 2 {
return nil, fmt.Errorf("could not parse move string %s", pos)
}
}
m := &Move{}
var err error
m.startIndex, err = indexFromStringLoc(moves[0])
if err != nil {
return nil, err
}
m.endIndex, err = indexFromStringLoc(moves[1])
if err != nil {
return nil, err
}
return m, nil
}
func indexToStringPos(index uint8) string {
row := index / 8
column := index % 8
return fmt.Sprintf("%b%d", 'a'+column, row)
}
type ColorIndex int
type PieceIndex int
const (
KingIndex PieceIndex = 0
QueenIndex PieceIndex = 1
BishopIndex PieceIndex = 2
KnightIndex PieceIndex = 3
RookIndex PieceIndex = 4
PawnIndex PieceIndex = 5
InvalidPieceIndex PieceIndex = 6
WhiteIndex ColorIndex = 0
BlackIndex ColorIndex = 1
InvalidColorIndex ColorIndex = 2
)
type Board struct {
pieces [2][6]uint64
}
func convertPosToBitmap(pos uint8) uint64 {
return 1 << pos
}
func NewStandardBoard() *Board {
b := &Board{}
b.pieces[WhiteIndex][KingIndex] = convertPosToBitmap(4)
b.pieces[BlackIndex][KingIndex] = convertPosToBitmap(60)
b.pieces[WhiteIndex][QueenIndex] = convertPosToBitmap(3)
b.pieces[BlackIndex][QueenIndex] = convertPosToBitmap(59)
b.pieces[WhiteIndex][BishopIndex] = convertPosToBitmap(2) | convertPosToBitmap(5)
b.pieces[BlackIndex][BishopIndex] = convertPosToBitmap(58) | convertPosToBitmap(61)
b.pieces[WhiteIndex][KnightIndex] = convertPosToBitmap(1) | convertPosToBitmap(6)
b.pieces[BlackIndex][KnightIndex] = convertPosToBitmap(57) | convertPosToBitmap(62)
b.pieces[WhiteIndex][RookIndex] = 1 | convertPosToBitmap(7)
b.pieces[BlackIndex][RookIndex] = convertPosToBitmap(56) | convertPosToBitmap(63)
b.pieces[WhiteIndex][PawnIndex] = 0
for i := uint8(8); i < 16; i++ {
b.pieces[WhiteIndex][PawnIndex] |= convertPosToBitmap(i)
}
b.pieces[BlackIndex][PawnIndex] = 0
for i := uint8(48); i < 56; i++ {
b.pieces[BlackIndex][PawnIndex] |= convertPosToBitmap(i)
}
return b
}
func (b *Board) GetWhitePieces() uint64 {
return b.pieces[WhiteIndex][KingIndex] |
b.pieces[WhiteIndex][QueenIndex] |
b.pieces[WhiteIndex][BishopIndex] |
b.pieces[WhiteIndex][KnightIndex] |
b.pieces[WhiteIndex][RookIndex] |
b.pieces[WhiteIndex][PawnIndex]
}
func (b *Board) GetBlackPieces() uint64 {
return b.pieces[BlackIndex][KingIndex] |
b.pieces[BlackIndex][QueenIndex] |
b.pieces[BlackIndex][BishopIndex] |
b.pieces[BlackIndex][KnightIndex] |
b.pieces[BlackIndex][RookIndex] |
b.pieces[BlackIndex][PawnIndex]
}
func (b *Board) GetColorAndPieceForPos(posIndex uint8) (ColorIndex, PieceIndex) {
pos := convertPosToBitmap(posIndex)
whitePieces := b.GetWhitePieces()
color := InvalidColorIndex
if whitePieces&pos != 0 {
color = WhiteIndex
}
blackPieces := b.GetBlackPieces()
if blackPieces&pos != 0 {
color = BlackIndex
}
if color == InvalidColorIndex {
return InvalidColorIndex, InvalidPieceIndex
}
for i := KingIndex; i < InvalidPieceIndex; i++ {
if pos&b.pieces[color][i] != 0 {
return color, i
}
}
return InvalidColorIndex, InvalidPieceIndex
}
func (b *Board) GetAllPieces() uint64 {
return b.GetBlackPieces() | b.GetWhitePieces()
}
func (b *Board) String() string {
pieces := make([]byte, 64)
for i := uint64(0); i < 64; i++ {
pieces[i] = ' '
}
addPiece(b.pieces[WhiteIndex][KingIndex], 'K', pieces)
addPiece(b.pieces[BlackIndex][KingIndex], 'k', pieces)
addPiece(b.pieces[WhiteIndex][QueenIndex], 'Q', pieces)
addPiece(b.pieces[BlackIndex][QueenIndex], 'q', pieces)
addPiece(b.pieces[WhiteIndex][BishopIndex], 'B', pieces)
addPiece(b.pieces[BlackIndex][BishopIndex], 'b', pieces)
addPiece(b.pieces[WhiteIndex][KnightIndex], 'H', pieces)
addPiece(b.pieces[BlackIndex][KnightIndex], 'h', pieces)
addPiece(b.pieces[WhiteIndex][RookIndex], 'R', pieces)
addPiece(b.pieces[BlackIndex][RookIndex], 'r', pieces)
addPiece(b.pieces[WhiteIndex][PawnIndex], 'P', pieces)
addPiece(b.pieces[BlackIndex][PawnIndex], 'p', pieces)
retval := "-----------------\n"
for r := 7; r >= 0; r-- {
for c := 0; c < 8; c++ {
retval += fmt.Sprintf("|%c", pieces[r*8+c])
}
retval += "|\n-----------------\n"
}
return retval
}
func addPiece(bitmap uint64, charCode byte, pieces []byte) {
for i := uint64(0); i < 64; i++ {
if bitmap&(1<<i) != 0 {
pieces[i] = charCode
}
}
}
type Player interface {
GetNextMove(ctx context.Context, board *Board) (*Move, error)
GetTimePerTurn() time.Duration
}
type ChessGame struct {
whitePlayer Player
blackPlayer Player
board *Board
turnIndex ColorIndex
}
func (b *Board) IsValidMove(m *Move, colorIndex ColorIndex) bool {
return true
}
func (b *Board) applyMove(m *Move) error {
color, piece := b.GetColorAndPieceForPos(m.startIndex)
if color == InvalidColorIndex || piece == InvalidPieceIndex {
return fmt.Errorf("no piece at pos %d", m.startIndex)
}
endSpace := convertPosToBitmap(m.endIndex)
for i := WhiteIndex; i < InvalidColorIndex; i++ {
for j := KingIndex; j < InvalidPieceIndex; j++ {
b.pieces[i][j] = b.pieces[i][j] &^ endSpace
}
}
startSpace := convertPosToBitmap(m.startIndex)
b.pieces[color][piece] = b.pieces[color][piece] &^ startSpace
b.pieces[color][piece] = b.pieces[color][piece] | endSpace
return nil
}
func (g *ChessGame) IsFinished() bool {
return false
}
func (g *ChessGame) GetWinner() Player {
return nil
}
func NewStandardChessGame() *ChessGame {
g := &ChessGame{}
g.board = NewStandardBoard()
g.whitePlayer = &HumanPlayer{}
g.blackPlayer = &HumanPlayer{}
g.turnIndex = WhiteIndex
return g
}
func (g *ChessGame) PlayGame(ctx context.Context) {
fmt.Printf("board:\n%s\n", g.board)
var err error
for !g.IsFinished() {
if g.turnIndex == BlackIndex {
err = g.TakeTurn(ctx, g.blackPlayer)
} else {
err = g.TakeTurn(ctx, g.whitePlayer)
}
if err != nil {
fmt.Printf("error taking turn, trying again. err: %s\n", err)
}
fmt.Printf("board:\n%s\n", g.board)
}
}
func (g *ChessGame) updateTurnIndex() {
if g.turnIndex == WhiteIndex {
g.turnIndex = BlackIndex
} else {
g.turnIndex = WhiteIndex
}
}
func (g *ChessGame) TakeTurn(ctx context.Context, p Player) error {
pCtx, cancel := context.WithTimeout(ctx, p.GetTimePerTurn())
defer cancel()
m, err := p.GetNextMove(pCtx, g.board)
if err != nil {
return err
}
if !g.board.IsValidMove(m, g.turnIndex) {
return fmt.Errorf("move not valid. Move %s", m)
}
err = g.board.applyMove(m)
if err != nil {
return err
}
g.updateTurnIndex()
return nil
} | board.go | 0.576542 | 0.541409 | board.go | starcoder |
package optimize
import (
"errors"
"log"
"net/http"
"os"
)
// ErrFormat indicates that optimize encountered an unknown format.
var ErrFormat = errors.New("optimize: unknown format")
type OptimizeSummary struct {
// Size in bytes before passing in the optimization function
SizeBefore int64
// Size in bytes after passing in the optimization function
SizeAfter int64
// Size in bytes after passing in the optimization function
SizeLossy int64
}
// Optimizer is interface for optimization
type optimizer struct {
name, mimeType string
// Uses a optimize function on the source and saves to the destination
// Returns a summary of the optimization
optimizeFile func(string, string, string) (OptimizeSummary, error)
// Uses a optimization function on input data
// Returns optimized data along with a summary of optimization
optimizeData func([]byte, string) ([]byte, OptimizeSummary, error)
}
var optimizers []optimizer
// RegisterOptimizer registers an image format for use by Decode.
// Name is the name of the format, like "jpeg" or "png".
// MimeType is the mime type that identifies the format's encoding.
// OptimizeFile is the function that optimizes from a file to another file, returning the optimization summary.
// OptimizeData is the function that optimizes a byte array source and returns a byte array source.
func RegisterOptimizer(name, mimetype string, optimizeFile func(string, string, string) (OptimizeSummary, error), optimizeData func([]byte, string) ([]byte, OptimizeSummary, error)) {
log.Println("Registered optimizer: " + name)
optimizers = append(optimizers, optimizer{name, mimetype, optimizeFile, optimizeData})
}
func match(mimeType string) optimizer {
for _, c := range optimizers {
if c.mimeType == mimeType {
return c
}
}
return optimizer{}
}
func detectFileMimeType(filename string) (string, error) {
file, err := os.Open(filename)
if err != nil {
return "", err
}
// Create a buffer to read the first 512 bytes, which is enough to detect the mimetype
buffer := make([]byte, 512)
if _, err := file.Read(buffer); err != nil {
return "", err
}
return http.DetectContentType(buffer), nil
}
// File tries to optimize a file and save it to another file
func File(src string, dst string, lossy string) (OptimizeSummary, error) {
mimeType, err := detectFileMimeType(src)
if err != nil {
return OptimizeSummary{}, err
}
c := match(mimeType)
log.Println("Match:")
log.Println(c)
if c.optimizeFile == nil {
return OptimizeSummary{}, ErrFormat
}
return c.optimizeFile(src, dst, lossy)
}
// Data tries to optimize the data and return it
func Data(src []byte, lossy string) ([]byte, OptimizeSummary, error) {
mimeType := http.DetectContentType(src)
c := match(mimeType)
if c.optimizeData == nil {
return nil, OptimizeSummary{}, ErrFormat
}
return c.optimizeData(src, lossy)
}
func fileStat(src string) (int64, error) {
stat, err := os.Stat(src)
if err != nil {
return 0, err
}
return stat.Size(), nil
} | optimize/optimize.go | 0.763043 | 0.44342 | optimize.go | starcoder |
package sequences
import . "github.com/objecthub/containerkit"
type DependentSequence interface {
Sequence
}
func EmbeddedDependentSequence(obj DependentSequence) SequenceDerived {
return &dependentSequence{obj, EmbeddedSequence(obj)}
}
type dependentSequence struct {
obj DependentSequence
SequenceDerived
}
func (this *dependentSequence) ReadOnly() DependentSequence {
return this.obj
}
func (this *dependentSequence) String() string {
return "<" + this.SequenceDerived.String() + ">"
}
// Reversed sequences
func newReversedSequence(sequence Sequence) DependentSequence {
res := new(reversedSequence)
res.SequenceDerived = EmbeddedDependentSequence(res)
res.fst = sequence
return res
}
type reversedSequence struct {
SequenceDerived
fst Sequence
}
func (this *reversedSequence) Size() int {
return this.fst.Size()
}
func (this *reversedSequence) At(i int) interface{} {
return this.fst.At(this.fst.Size() - i - 1)
}
// Subsequences
func newSubsequence(sequence Sequence,
start int,
maxSize int) DependentSequence {
res := new(subsequence)
res.SequenceDerived = EmbeddedDependentSequence(res)
res.fst = sequence
res.start = start
res.maxSize = maxSize
return res
}
type subsequence struct {
SequenceDerived
fst Sequence
start int
maxSize int
}
func (this *subsequence) Size() int {
switch size := this.fst.Size() - this.start; {
case size > this.maxSize:
return this.maxSize
case size >= 0:
return size
}
return 0
}
func (this *subsequence) At(i int) interface{} {
switch {
case i < 0:
panic("subsequence.At: index below 0")
case i - this.start < this.maxSize:
return this.fst.At(i - this.start)
}
panic("subsequence.At: index above size")
}
// Mapped sequences
func newMappedSequence(sequence Sequence, f Mapping) DependentSequence {
res := new(mappedSequence)
res.SequenceDerived = EmbeddedDependentSequence(res)
res.fst = sequence
res.f = f
return res
}
type mappedSequence struct {
SequenceDerived
fst Sequence
f Mapping
}
func (this *mappedSequence) Size() int {
return this.fst.Size()
}
func (this *mappedSequence) At(i int) interface{} {
return this.f(this.fst.At(i))
}
// Appended sequences
func newAppendedSequence(fst Sequence, snd Sequence) DependentSequence {
res := new(appendedSequence)
res.SequenceDerived = EmbeddedDependentSequence(res)
res.fst = fst
res.snd = snd
return res
}
type appendedSequence struct {
SequenceDerived
fst Sequence
snd Sequence
}
func (this *appendedSequence) Size() int {
return this.fst.Size() + this.snd.Size()
}
func (this *appendedSequence) At(i int) interface{} {
if i >= this.fst.Size() {
return this.snd.At(i - this.fst.Size())
}
return this.fst.At(i)
}
// Wrapped sequences
func wrappedSequence(encapsulated Sequence, immutable bool) DependentSequence {
res := new(sequenceWrapper)
res.SequenceDerived = EmbeddedDependentSequence(res)
res.encapsulated = encapsulated
res.immutable = immutable
return res
}
type sequenceWrapper struct {
SequenceDerived
encapsulated Sequence
immutable bool
}
func (this *sequenceWrapper) Size() int {
return this.encapsulated.Size()
}
func (this *sequenceWrapper) At(index int) interface{} {
return this.encapsulated.At(index)
}
func (this *sequenceWrapper) Force() FiniteContainer {
return this
}
func (this *sequenceWrapper) Freeze() FiniteContainer {
if this.immutable {
return this
}
return this.SequenceDerived.Freeze()
} | sequences/dependentsequence.go | 0.795896 | 0.466724 | dependentsequence.go | starcoder |
package czml
// Material is a definition of how a surface is colored or shaded
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/Material
type Material struct {
SolidColor *SolidColorMaterial `json:"solidColor,omitempty"`
Image *ImageMaterial `json:"image,omitempty"`
Grid *GridMaterial `json:"grid,omitempty"`
Stripe *StripeMaterial `json:"stripe,omitempty"`
Checkerboard *CheckerboardMaterial `json:"checkerboard,omitempty"`
}
// SolidColorMaterial is a material that fills the surface with a solid color
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/SolidColorMaterial
type SolidColorMaterial struct {
Color *Color `json:"color,omitempty"`
}
// ImageMaterial is a material that fills the surface with an image
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/ImageMaterial
type ImageMaterial struct {
Image *Uri `json:"image,omitempty"`
Repeat *Repeat `json:"repeat,omitempty"`
Color *Color `json:"color,omitempty"`
Transparent *bool `json:"transparent,omitempty"`
}
// Repeat is the number of times an image repeats along each axis.
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/Repeat
type Repeat struct {
Cartesian2 *Cartesian2Value `json:"cartesian2,omitempty"`
Reference ReferenceValue `json:"reference,omitempty"`
}
// GridMaterial is a material that fills the surface with a two-dimensional grid.
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/GridMaterial
type GridMaterial struct {
Color *Color `json:"color,omitempty"`
CellAlpha *float64 `json:"cellAlpha,omitempty"`
LineCount *LineCount `json:"lineCount,omitempty"`
LineThickness *LineThickness `json:"lineThickness,omitempty"`
LineOffset *LineOffset `json:"lineOffset,omitempty"`
}
// LineCount is the number of grid lines along each axis
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/LineCount
type LineCount struct {
Cartesian2 *Cartesian2Value `json:"cartesian2,omitempty"`
Reference ReferenceValue `json:"reference,omitempty"`
}
// LineThickness is the thickness of grid lines along each axis, in pixels
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/LineThickness
type LineThickness struct {
Cartesian2 *Cartesian2Value `json:"cartesian2,omitempty"`
Reference ReferenceValue `json:"reference,omitempty"`
}
// LineOffset is the offset of grid lines along each axis, as a percentage from 0 to 1
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/LineOffset
type LineOffset struct {
Cartesian2 *Cartesian2Value `json:"cartesian2,omitempty"`
Reference ReferenceValue `json:"reference,omitempty"`
}
// StripeMaterial is a material that fills the surface with alternating colors
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/StripeMaterial
type StripeMaterial struct {
Orientation StripeOrientation `json:"orientation,omitempty"`
EvenColor *Color `json:"evenColor,omitempty"`
OddColor *Color `json:"oddColor,omitempty"`
Offset *float64 `json:"offset,omitempty"`
Repeat *float64 `json:"repeat,omitempty"`
}
// StripeOrientation describes the orientation of stripes in a stripe material
// Valid values are `HORIZONTAL` and `VERTICAL`
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/StripeOrientation
type StripeOrientation string
// CheckerboardMaterial is a material that fills the surface with a checkerboard pattern.
// https://github.com/AnalyticalGraphicsInc/czml-writer/wiki/CheckerboardMaterial
type CheckerboardMaterial struct {
EvenColor *Color `json:"evenColor,omitempty"`
OddColor *Color `json:"oddColor,omitempty"`
Repeat *Repeat `json:"repeat,omitempty"`
} | materials.go | 0.893466 | 0.40439 | materials.go | starcoder |
package gpu
import (
"fmt"
"github.com/goki/ki/kit"
)
// See: https://www.khronos.org/opengl/wiki/Data_Type_(GLSL)
// Types is a list of GPU data types
type Types int32
const (
UndefType Types = iota
Bool
Int
UInt
Float32
Float64
TypesN
)
//go:generate stringer -type=Types
var KiT_Types = kit.Enums.AddEnum(TypesN, kit.NotBitFlag, nil)
// GLSL type names
var TypeNames = map[Types]string{
UndefType: "none",
Bool: "bool",
Int: "int",
UInt: "uint",
Float32: "float",
Float64: "double",
}
// TypeBytes returns number of bytes for given type -- 4 except Float64 = 8
func TypeBytes(tp Types) int {
if tp == Float64 {
return 8
}
return 4
}
// UniType represents a fully-specified GPU uniform type, including vectors and matricies
type UniType struct {
Type Types `desc:"data type"`
Vec int `desc:"if a vector, this is the length of the vector, 0 for scalar (valid values are 2,3,4)"`
Mat int `desc:"square matrix dimensions, if a matrix (valid values are 3,4)"`
}
// Commonly-used types:
// FUniType is a single float32
var FUniType = UniType{Type: Float32}
// IUniType is a single int32
var IUniType = UniType{Type: Int}
// BUniType is a single bool
var BUniType = UniType{Type: Bool}
// Vec2fUniType is a 2-vector of float32
var Vec2fUniType = UniType{Type: Float32, Vec: 2}
// Vec3fUniType is a 3-vector of float32
var Vec3fUniType = UniType{Type: Float32, Vec: 3}
// Vec4fUniType is a 4-vector of float32
var Vec4fUniType = UniType{Type: Float32, Vec: 4}
// Mat3fUniType is a 3x3 matrix of float32
var Mat3fUniType = UniType{Type: Float32, Mat: 3}
// Mat4fUniType is a 4x4 matrix of float32
var Mat4fUniType = UniType{Type: Float32, Mat: 4}
// Name returns the full GLSL type name for the type
func (ty *UniType) Name() string {
if ty.Vec == 0 && ty.Mat == 0 {
return TypeNames[ty.Type]
}
pfx := TypeNames[ty.Type][0:1]
if ty.Type == Float32 {
pfx = ""
}
if ty.Vec > 0 {
return fmt.Sprintf("%svec%d", pfx, ty.Vec)
} else {
return fmt.Sprintf("%smat%d", pfx, ty.Mat)
}
}
// Bytes returns actual size of this element in bytes
func (ty *UniType) Bytes() int {
n := TypeBytes(ty.Type)
if ty.Vec == 0 && ty.Mat == 0 {
return n
}
if ty.Vec > 0 {
return ty.Vec * n
}
return ty.Mat * ty.Mat * n
}
// StdBytes returns number of bytes taken up by this element, in std140 format (including padding)
// https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL
func (ty *UniType) StdBytes() int {
n := TypeBytes(ty.Type)
if ty.Vec == 0 && ty.Mat == 0 {
return n
}
if ty.Vec > 0 {
if ty.Vec <= 2 {
return 2 * n
}
return 4 * n
}
return ty.Mat * 4 * n
}
// VectorType represents a fully-specified GPU vector type, e.g., for inputs / outputs
// to shader programs
type VectorType struct {
Type Types `desc:"data type"`
Vec int `desc:"length of vector (valid values are 2,3,4)"`
}
// commonly-used vector types:
// Vec2fVecType is a 2-vector of float32
var Vec2fVecType = VectorType{Type: Float32, Vec: 2}
// Vec3fVecType is a 3-vector of float32
var Vec3fVecType = VectorType{Type: Float32, Vec: 3}
// Vec4fVecType is a 4-vector of float32
var Vec4fVecType = VectorType{Type: Float32, Vec: 4}
// Bytes returns number of bytes per Vector element (len * 4 basically)
func (ty *VectorType) Bytes() int {
n := TypeBytes(ty.Type)
return n * ty.Vec
} | oswin/gpu/types.go | 0.840161 | 0.563498 | types.go | starcoder |
package game
type Tileish interface {
tile() *Tile
dist(tileish Tileish) Distance
stepOn(p Platform, s *State, monster Monstrous) error
setEffect(sprite SpriteIndex)
}
type Tile struct {
monster Monstrous
x, y Position
sprite SpriteIndex
passable bool
treasure bool
effect SpriteIndex
effectCounter counter
}
func NewTile(sprite SpriteIndex, x, y Position, passable bool) *Tile {
return &Tile{
x: x, y: y,
sprite: sprite,
passable: passable,
}
}
func (t *Tile) dist(tileish Tileish) Distance {
tile := tileish.tile()
return abs(Distance(t.x)-Distance(tile.x)) + abs(Distance(t.y)-Distance(tile.y))
}
// passes the minimum value (-2^N) through unchanged.
func abs(d Distance) Distance {
if d < 0 {
return -d
}
return d
}
const (
effectDuration = 30
)
func (t *Tile) draw(p Platform, shake shake) {
sprite(p, t.sprite, t.x, t.y, shake)
if t.treasure {
sprite(p, 12, t.x, t.y, shake)
}
if t.effectCounter.isActive() {
t.effectCounter.dec()
spriteWithAlpha(
p,
t.effect,
t.x,
t.y,
shake,
Alpha(float32(t.effectCounter.value)/effectDuration),
)
}
}
func (t *Tile) setEffect(sprite SpriteIndex) {
t.effect = sprite
t.effectCounter = counter{effectDuration}
}
func (t *Tile) tile() *Tile {
return t
}
func (t *Tile) stepOn(_ Platform, s *State, monster Monstrous) (err error) {
// Empty default implementation
return
}
type TileMaker = func(x, y Position) Tileish
type Floor struct {
*Tile
}
func NewFloor(x, y Position) Tileish {
return &Floor{
Tile: NewTile(2, x, y, true),
}
}
func (t *Floor) stepOn(p Platform, s *State, monster Monstrous) (err error) {
if _, isPlayer := monster.(*Player); isPlayer && t.treasure {
s.score++
if s.score%3 == 0 && s.numSpells < 9 {
s.numSpells++
s.player.addSpell(s.spells)
}
p.Sound(Treasure)
t.treasure = false
m, err := spawnMonster(s)
if err != nil {
return err
}
s.monsters = append(s.monsters, m)
}
return
}
type Wall struct {
*Tile
}
func NewWall(x, y Position) Tileish {
return &Wall{
Tile: NewTile(3, x, y, false),
}
}
type Exit struct {
*Tile
}
func NewExit(x, y Position) Tileish {
return &Exit{
Tile: NewTile(11, x, y, true),
}
}
func (t *Exit) stepOn(p Platform, s *State, monster Monstrous) (err error) {
_, isPlayer := monster.(*Player)
if isPlayer {
p.Sound(NewLevel)
if s.level == numLevels {
addScore(p, s.score, Won)
s.state = title
} else {
s.level++
newHP := s.player.hp + 1
if newHP > maxHP {
newHP = maxHP
}
err = startLevel(s, newHP, nil)
}
}
return
} | go/game/tile.go | 0.655557 | 0.44354 | tile.go | starcoder |
package xiaobattery
import (
"machine"
)
// ChargeStatus returns the state of the battery.
type ChargeStatus uint8
// Charge status of the battery: discharging, charging, and fully charged.
const (
Discharging ChargeStatus = iota + 1
Charging
FullyCharged
)
type voltagePercentPosition struct {
millivolts int
percent int
}
// The two pins connected to the power regulator chip, that indicate charging
// status and power presence.
const (
pinBatteryCharging machine.Pin = 12
pinPowerConnected machine.Pin = 19
)
// Voltage to percent mappings. Values in between can be linearly approximated.
// This is a rough fitting, better fits are likely possible.
var voltagePercentPositions = []voltagePercentPosition{
{3880, 100},
{3780, 80},
{3690, 60},
{3640, 40},
{3610, 20},
{3520, 0},
}
// voltageToPercent calculates the percentage the battery is full based on a
// linear approximation with multiple points on the graph. The points must be in
// order, from full to empty. The first entry in the slice must have
// percent==100, the last entry must have percent==0.
func voltageToPercent(millivolts int, pointsOnGraph []voltagePercentPosition) int {
if millivolts >= pointsOnGraph[0].millivolts {
return 100
}
if millivolts <= pointsOnGraph[len(pointsOnGraph)-1].millivolts {
return 0
}
for i := 0; i < len(pointsOnGraph)-1; i++ {
if millivolts < pointsOnGraph[i+1].millivolts {
continue
}
// Voltage is between pointsOnGraph[i] and pointsOnGraph[i+1].
high := pointsOnGraph[i]
low := pointsOnGraph[i+1]
return high.percent + (high.percent-low.percent)*(millivolts-high.millivolts)/(high.millivolts-low.millivolts)
}
// unreachable
return 0
}
// ---------------------
// BatteryStatusRaw reads and returns the current battery voltage in millivolts
// (mV) and returns the current charging status of the battery (discharging,
// charging, full).
func BatteryStatusRaw() (millivolt int, status ChargeStatus) {
if !pinPowerConnected.Get() {
// Power is connected.
if !pinBatteryCharging.Get() {
// Battery is charging.
status = Charging
} else {
status = FullyCharged
}
} else {
status = Discharging
}
value := machine.ADC{31}.Get()
return int(value) * 2000 / (65535 / 3), status
}
// BatteryStatus reads and returns the current battery status (percent and
// charge status).
func BatteryStatus() (millivolts, percent int, status ChargeStatus) {
millivolts, status = BatteryStatusRaw()
percent = voltageToPercent(millivolts, voltagePercentPositions)
return
} | utils/xiaoexpansion/battery/battery.go | 0.65368 | 0.544014 | battery.go | starcoder |
package main
/* Given the attached text file as an argument, your program will read the file, and output the 20 most frequently
used words in the file in order, along with their frequency.
The output should be the same to that of the following bash commands,
where the first argument is the text file to process As mentioned,
candidate needs to read the file and output the result of 20 most frequently used words in order and their frequency.
*/
import (
"fmt"
"os"
"sort"
"strings"
"time"
)
func main() {
defer elapsed()()
// Filename and max freq hardcoded
mostRepeatedTwenty := 20
filename := "mobydick.txt"
m := make(map[string]int)
b, err := os.ReadFile(filename)
if err != nil {
fmt.Errorf("Unable to read given file ", err)
return
}
// After reading the file as bytes convert that to string and split using space.
// returns a slice of words each as string and insert into map where repeated frequenty will be increamented
// on each repeation
words := strings.Fields(string(b))
for _, word := range words {
m[strings.Trim(strings.ToLower(word), "~`!@#$%^&*()_+-=[]{};':\"\\|,.<>/?")]++
}
// Sort the map by putting the word and frequency into a struct which will be easier to sort and get required data
sorted := sortByValue(m)
for i := 0; i < mostRepeatedTwenty; i++ {
fmt.Printf(" %4d %s\n", sorted[i].num, sorted[i].word)
}
}
func elapsed() func() {
start := time.Now()
return func() {
fmt.Printf("Took %v\n", time.Since(start))
}
}
type kv struct {
word string
num int
}
// Ref : https://go.dev/play/p/y1_WBENH4N
// https://stackoverflow.com/questions/18695346/how-can-i-sort-a-mapstringint-by-its-values#There's%20a%20new%20sort.Slice%20function
func sortByValue(m map[string]int) []kv {
var ss []kv
for k, v := range m {
ss = append(ss, kv{k, v})
}
sort.Slice(ss, func(i, j int) bool {
return ss[i].num > ss[j].num
})
return ss
}
// How to run the code ?
// go run frequsedwords.go
/* The output should be equal to the output the following shell script output
#!/usr/bin/env bash
cat mobidick.txt | tr -cs 'a-zA-Z' '[\n*]' | grep -v "^$" | tr '[:upper:]''[:lower:]'| sort | uniq -c | sort -nr | head -20
*/ | hashtables/frequsedwords.go | 0.595493 | 0.564579 | frequsedwords.go | starcoder |
package secp256k1
import (
"crypto/cipher"
"fmt"
"io"
"math/big"
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/util/key"
"golang.org/x/crypto/sha3"
)
// btcec's public interface uses this affine representation for points on the
// curve. This does not naturally accommodate the point at infinity. btcec
// represents it as (0, 0), which is not a point on {y²=x³+7}.
type secp256k1Point struct {
X *fieldElt
Y *fieldElt
}
func newPoint() *secp256k1Point {
return &secp256k1Point{newFieldZero(), newFieldZero()}
}
// String returns a string representation of P
func (P *secp256k1Point) String() string {
return fmt.Sprintf("Secp256k1{X: %s, Y: %s}", P.X, P.Y)
}
func (P *secp256k1Point) Equal(pPrime kyber.Point) bool {
return P.X.Equal(pPrime.(*secp256k1Point).X) &&
P.Y.Equal(pPrime.(*secp256k1Point).Y)
}
func (P *secp256k1Point) Null() kyber.Point {
P.X = fieldEltFromInt(0) // btcec representation of null point is (0,0)
P.Y = fieldEltFromInt(0)
return P
}
func (P *secp256k1Point) Base() kyber.Point {
P.X.SetInt(s256.Gx)
P.Y.SetInt(s256.Gy)
return P
}
func (P *secp256k1Point) Pick(rand cipher.Stream) kyber.Point {
for {
P.X.Set(newFieldZero().Pick(rand))
maybeRHS := rightHandSide(P.X)
if maybeY := maybeSqrtInField(maybeRHS); maybeY != (*fieldElt)(nil) {
P.Y.Set(maybeY)
// Take the negative with 50% probability
b := make([]byte, 1)
rand.XORKeyStream(b, b)
if b[0]&1 == 0 {
P.Y.Neg(P.Y)
}
return P
}
}
}
func (P *secp256k1Point) Set(pPrime kyber.Point) kyber.Point {
P.X.Set(pPrime.(*secp256k1Point).X)
P.Y.Set(pPrime.(*secp256k1Point).Y)
return P
}
func (P *secp256k1Point) Clone() kyber.Point {
return &secp256k1Point{X: P.X.Clone(), Y: P.Y.Clone()}
}
// EmbedLen returns the number of bytes of data which can be embedded in a point.
func (*secp256k1Point) EmbedLen() int {
// Reserve the most-significant 8 bits for pseudo-randomness.
// Reserve the least-significant 8 bits for embedded data length.
return (255 - 8 - 8) / 8
}
func (P *secp256k1Point) Embed(data []byte, r cipher.Stream) kyber.Point {
numEmbedBytes := P.EmbedLen()
if len(data) > numEmbedBytes {
panic("too much data to embed in a point")
}
numEmbedBytes = len(data)
var x [32]byte
randStart := 1 // First byte to fill with random data
if data != nil {
x[0] = byte(numEmbedBytes) // Encode length in low 8 bits
copy(x[1:1+numEmbedBytes], data) // Copy in data to embed
randStart = 1 + numEmbedBytes
}
maxAttempts := 10000
// Try random x ordinates satisfying the constraints, until one provides
// a point on secp256k1
for numAttempts := 0; numAttempts < maxAttempts; numAttempts++ {
// Fill the rest of the x ordinate with random data
r.XORKeyStream(x[randStart:], x[randStart:])
xOrdinate := newFieldZero().SetBytes(x)
// RHS of secp256k1 equation is x³+7 mod p. Success if square.
// We optimistically don't use btcec.IsOnCurve, here, because we
// hope to assign the intermediate result maybeY to P.Y
secp256k1RHS := rightHandSide(xOrdinate)
if maybeY := maybeSqrtInField(secp256k1RHS); maybeY != (*fieldElt)(nil) {
P.X = xOrdinate // success: found (x,y) s.t. y²=x³+7
P.Y = maybeY
return P
}
}
// Probability 2^{-maxAttempts}, under correct operation.
panic("failed to find point satisfying all constraints")
}
func (P *secp256k1Point) Data() ([]byte, error) {
b := P.X.Bytes()
dataLength := int(b[0])
if dataLength > P.EmbedLen() {
return nil, fmt.Errorf("point specifies too much data")
}
return b[1 : dataLength+1], nil
}
func (P *secp256k1Point) Add(a, b kyber.Point) kyber.Point {
X, Y := s256.Add(
a.(*secp256k1Point).X.int(), a.(*secp256k1Point).Y.int(),
b.(*secp256k1Point).X.int(), b.(*secp256k1Point).Y.int())
P.X.SetInt(X)
P.Y.SetInt(Y)
return P
}
func (P *secp256k1Point) Sub(a, b kyber.Point) kyber.Point {
X, Y := s256.Add(
a.(*secp256k1Point).X.int(), a.(*secp256k1Point).Y.int(),
b.(*secp256k1Point).X.int(),
newFieldZero().Neg(b.(*secp256k1Point).Y).int()) // -b_y
P.X.SetInt(X)
P.Y.SetInt(Y)
return P
}
func (P *secp256k1Point) Neg(a kyber.Point) kyber.Point {
P.X = a.(*secp256k1Point).X.Clone()
P.Y = newFieldZero().Neg(a.(*secp256k1Point).Y)
return P
}
func (P *secp256k1Point) Mul(s kyber.Scalar, a kyber.Point) kyber.Point {
sBytes, err := s.(*secp256k1Scalar).MarshalBinary()
if err != nil {
panic(fmt.Errorf("failure while marshaling multiplier: %s",
err))
}
var X, Y *big.Int
if a == (*secp256k1Point)(nil) || a == nil {
X, Y = s256.ScalarBaseMult(sBytes)
} else {
X, Y = s256.ScalarMult(a.(*secp256k1Point).X.int(),
a.(*secp256k1Point).Y.int(), sBytes)
}
P.X.SetInt(X)
P.Y.SetInt(Y)
return P
}
func (P *secp256k1Point) MarshalBinary() ([]byte, error) {
maybeSqrt := maybeSqrtInField(rightHandSide(P.X))
if maybeSqrt == (*fieldElt)(nil) {
return nil, fmt.Errorf("x³+7 not a square")
}
minusMaybeSqrt := newFieldZero().Neg(maybeSqrt)
if !P.Y.Equal(maybeSqrt) && !P.Y.Equal(minusMaybeSqrt) {
return nil, fmt.Errorf(
"y ≠ ±maybeSqrt(x³+7), so not a point on the curve")
}
rv := make([]byte, P.MarshalSize())
signByte := P.MarshalSize() - 1 // Last byte contains sign of Y.
xordinate := P.X.Bytes()
copyLen := copy(rv[:signByte], xordinate[:])
if copyLen != P.MarshalSize()-1 {
return []byte{}, fmt.Errorf("marshal of x ordinate too short")
}
if P.Y.isEven() {
rv[signByte] = 0
} else {
rv[signByte] = 1
}
return rv, nil
}
func (P *secp256k1Point) MarshalSize() int { return 33 }
func (P *secp256k1Point) MarshalID() [8]byte {
return [8]byte{'s', 'p', '2', '5', '6', '.', 'p', 'o'}
}
func (P *secp256k1Point) UnmarshalBinary(buf []byte) error {
var err error
if len(buf) != P.MarshalSize() {
err = fmt.Errorf("wrong length for marshaled point")
}
if err == nil && !(buf[32] == 0 || buf[32] == 1) {
err = fmt.Errorf("bad sign byte (the last one)")
}
if err != nil {
return err
}
var xordinate [32]byte
copy(xordinate[:], buf[:32])
P.X = newFieldZero().SetBytes(xordinate)
secp256k1RHS := rightHandSide(P.X)
maybeY := maybeSqrtInField(secp256k1RHS)
if maybeY == (*fieldElt)(nil) {
return fmt.Errorf("x ordinate does not correspond to a curve point")
}
isEven := maybeY.isEven()
P.Y.Set(maybeY)
if (buf[32] == 0 && !isEven) || (buf[32] == 1 && isEven) {
P.Y.Neg(P.Y)
} else {
if buf[32] != 0 && buf[32] != 1 {
return fmt.Errorf("parity byte must be 0 or 1")
}
}
return nil
}
func (P *secp256k1Point) MarshalTo(w io.Writer) (int, error) {
buf, err := P.MarshalBinary()
if err != nil {
return 0, err
}
return w.Write(buf)
}
func (P *secp256k1Point) UnmarshalFrom(r io.Reader) (int, error) {
buf := make([]byte, P.MarshalSize())
n, err := io.ReadFull(r, buf)
if err != nil {
return 0, err
}
return n, P.UnmarshalBinary(buf)
}
func EthereumAddress(p kyber.Point) (rv [20]byte) {
// The Ethereum address of P is the bottom 160 bits of keccak256(P.X‖P.Y),
// where P.X and P.Y are represented in 32 bytes as big-endian. See equations
// (277, 284) of Ethereum Yellow Paper version 3e36772, or go-ethereum's
// crypto.PubkeyToAddress.
h := sha3.NewLegacyKeccak256()
if _, err := h.Write(LongMarshal(p)); err != nil {
panic(err)
}
copy(rv[:], h.Sum(nil)[12:])
return rv
}
func IsSecp256k1Point(p kyber.Point) bool {
switch p.(type) {
case *secp256k1Point:
return true
default:
return false
}
}
func Coordinates(p kyber.Point) (*big.Int, *big.Int) {
return p.(*secp256k1Point).X.int(), p.(*secp256k1Point).Y.int()
}
func ValidPublicKey(p kyber.Point) bool {
if p == (*secp256k1Point)(nil) || p == nil {
return false
}
P, ok := p.(*secp256k1Point)
if !ok {
return false
}
maybeY := maybeSqrtInField(rightHandSide(P.X))
return maybeY != nil && (P.Y.Equal(maybeY) || P.Y.Equal(maybeY.Neg(maybeY)))
}
func Generate(random cipher.Stream) *key.Pair {
p := key.Pair{}
for !ValidPublicKey(p.Public) {
p.Private = (&Secp256k1{}).Scalar().Pick(random)
p.Public = (&Secp256k1{}).Point().Mul(p.Private, nil)
}
return &p
}
func LongMarshal(p kyber.Point) []byte {
xMarshal := p.(*secp256k1Point).X.Bytes()
yMarshal := p.(*secp256k1Point).Y.Bytes()
return append(xMarshal[:], yMarshal[:]...)
}
func LongUnmarshal(m []byte) (kyber.Point, error) {
if len(m) != 64 {
return nil, fmt.Errorf(
"0x%x does not represent an uncompressed secp256k1Point. Should be length 64, but is length %d",
m, len(m))
}
p := newPoint()
p.X.SetInt(big.NewInt(0).SetBytes(m[:32]))
p.Y.SetInt(big.NewInt(0).SetBytes(m[32:]))
if !ValidPublicKey(p) {
return nil, fmt.Errorf("%s is not a valid secp256k1 point", p)
}
return p, nil
}
func ScalarToPublicPoint(s kyber.Scalar) kyber.Point {
publicPoint := (&Secp256k1{}).Point()
return publicPoint.Mul(s, nil)
}
func SetCoordinates(x, y *big.Int) kyber.Point {
rv := newPoint()
rv.X.SetInt(x)
rv.Y.SetInt(y)
if !ValidPublicKey(rv) {
panic("point requested from invalid coordinates")
}
return rv
} | lib/signatures/secp256k1/point.go | 0.759136 | 0.519217 | point.go | starcoder |
package lib
import (
"errors"
"image"
"image/color"
)
// Offsets of channels within RGBA pixels:
const r = 0
const g = 1
const b = 2
// filter pattern, addressable as [y][x]
var pattern = [][]int{
{r, g},
{g, b},
}
// Simple-minded, I am. Generate indices of "neighbors" of a value at a given index.
func getIndices(index int, minIndex int, maxIndex int) []int {
result := []int{}
for currIndex := index - 1; currIndex <= (index + 1); currIndex++ {
if (minIndex <= currIndex) && (currIndex < maxIndex) {
result = append(result, currIndex)
}
}
return result
}
// Given a bayer RGGB filter pattern overlaid on a sensor,
// what filter color (channel) lies over a given pixel?
func getChannel(x int, y int) int {
return pattern[y%2][x%2]
}
func pixCompAvg(sum int, count int) uint8 {
divisor := 1
if count > 1 {
divisor = count
}
return uint8(sum / divisor)
}
// Demosaic an RGGB bayer image.
func Demosaic(bayerImage *image.Gray) (image.Image, error) {
// The most obvious way to do this is via convolution kernels,
// but I'm finding it tedious to declare those.
bounds := bayerImage.Bounds()
result := image.NewRGBA(bounds)
for xImage := bounds.Min.X; xImage < bounds.Max.X; xImage++ {
xIndices := getIndices(xImage, bounds.Min.X, bounds.Max.X)
for yImage := bounds.Min.Y; yImage < bounds.Max.Y; yImage++ {
yIndices := getIndices(yImage, bounds.Min.Y, bounds.Max.Y)
sums := []int{0, 0, 0}
count := []int{0, 0, 0}
for _, x := range xIndices {
for _, y := range yIndices {
channel := getChannel(x-bounds.Min.X, y-bounds.Min.Y)
sums[channel] += int(bayerImage.GrayAt(x, y).Y)
count[channel] += 1
}
}
result.Set(xImage, yImage, color.RGBA{
// Avoid divide-by-zero:
pixCompAvg(sums[r], count[r]),
pixCompAvg(sums[g], count[g]),
pixCompAvg(sums[b], count[b]),
0xff,
})
}
}
return result, nil
}
// Demosaic a grayscale image that was stored as RGBA.
func DemosaicRGBGray(bayerImage image.Image) (image.Image, error) {
rgbaImage, ok := (bayerImage).(*image.RGBA)
if !ok {
return bayerImage, errors.New("bayerImage must be an RGBA image")
}
grayBayer := image.NewGray(rgbaImage.Bounds())
// It should suffice to copy out any channel - all channels should
// have the same values.
iDest := 0
for iSrc := 0; iSrc < len(rgbaImage.Pix); iSrc += 4 {
grayBayer.Pix[iDest] = rgbaImage.Pix[iSrc]
iDest += 1
}
return Demosaic(grayBayer)
} | lib/demosaic.go | 0.792263 | 0.437343 | demosaic.go | starcoder |
package opc
// Raver plaid
// A rainbowy pattern with moving diagonal black stripes
import (
"github.com/longears/pixelslinger/colorutils"
"github.com/longears/pixelslinger/config"
"github.com/longears/pixelslinger/midi"
"math"
"time"
)
func MakePatternRaverPlaid(locations []float64) ByteThread {
return func(bytesIn chan []byte, bytesOut chan []byte, midiState *midi.MidiState) {
// Variables we'll want to tweak later to adjust the pattern artistically
var (
// how many sine wave cycles are squeezed into our n_pixels
// 24 happens to create nice diagonal stripes on the wall layout
freq_r float64 = 24
freq_g float64 = 24
freq_b float64 = 24
// how many seconds the color sine waves take to shift through a complete cycle
speed_r float64 = 7
speed_g float64 = -13
speed_b float64 = 19
)
// This code is running in its own thread. It is recieving byte slices over
// the bytesIn channel, filling them with pixel colors, and then sending them
// back over the bytesOut channel.
// A "slice" is Go's version of a list or array, loosely speaking.
// The byte slice is in the following format:
// [r, g, b, r, g, b, ...]
// where each value is a byte in the range 0-255.
// This pattern doesn't care about the pixels' locations. If it did, it would
// be using the locations slice which looks like this:
// [x, y, z, x, y, z, ...]
// The "spatial-stripes" pattern is a good example of that.
// Wait for the next incoming byte slice
last_t := 0.0
t := 0.0
for bytes := range bytesIn {
n_pixels := len(bytes) / 3
// Get the current time in Unix seconds.
// This requires some time and speed knob bookkeeping
this_t := float64(time.Now().UnixNano())/1.0e9 - 9.4e8
speedKnob := float64(midiState.ControllerValues[config.SPEED_KNOB]) / 127.0
if speedKnob < 0.5 {
speedKnob = colorutils.RemapAndClamp(speedKnob, 0, 0.4, 0, 1)
} else {
speedKnob = colorutils.RemapAndClamp(speedKnob, 0.6, 1, 1, 4)
}
if midiState.KeyVolumes[config.SLOWMO_PAD] > 0 {
speedKnob *= 0.25
}
if last_t != 0 {
t += (this_t - last_t) * speedKnob
}
last_t = this_t
// For each pixel...
for ii := 0; ii < n_pixels; ii++ {
//--------------------------------------------------------------------------------
// How far along the strip are we?
pct := float64(ii) / float64(n_pixels)
// Replicate a quirk in the original python version of this pattern
pct /= 2
// Make diagonal black stripes using a slowly shifting sine wave
// For more details on the "colorutils" package:
// http://godoc.org/github.com/longears/pixelslinger/colorutils
pct_jittered := colorutils.PosMod2((pct * 77), 37)
blackstripes := colorutils.Cos(pct_jittered, t*0.05, 1, -1.5, 1.5) // offset, period, minn, maxx
blackstripes_offset := colorutils.Cos(t, 0.9, 60, -0.5, 3) // slowly change the width of the stripes over a minute
blackstripes = colorutils.Clamp(blackstripes+blackstripes_offset, 0, 1)
// 3 sine waves for r, g, b which are out of sync with each other
r := blackstripes * colorutils.Remap(math.Cos((t/speed_r+pct*freq_r)*math.Pi*2), -1, 1, 0, 1)
g := blackstripes * colorutils.Remap(math.Cos((t/speed_g+pct*freq_g)*math.Pi*2), -1, 1, 0, 1)
b := blackstripes * colorutils.Remap(math.Cos((t/speed_b+pct*freq_b)*math.Pi*2), -1, 1, 0, 1)
// Write into the byte slice
bytes[ii*3+0] = colorutils.FloatToByte(r)
bytes[ii*3+1] = colorutils.FloatToByte(g)
bytes[ii*3+2] = colorutils.FloatToByte(b)
//--------------------------------------------------------------------------------
}
// Send our completed byte slice over the output channel
bytesOut <- bytes
}
}
} | opc/pattern-raver-plaid.go | 0.574992 | 0.595287 | pattern-raver-plaid.go | starcoder |
package gohome
import (
"github.com/PucklaMotzer09/mathgl/mgl32"
)
// A 2D camera used for showing different parts of the world
type Camera2D struct {
// It's position in world space
Position mgl32.Vec2
// It's zoom (>1.0 -> Zoom In and <1.0 -> Zoom Out)
Zoom float32
// It's rotation in degrees
Rotation float32
// The Origin for rotating and zooming
Origin mgl32.Vec2
oldPosition mgl32.Vec2
oldZoom float32
oldRotation float32
viewMatrix mgl32.Mat3
inverseViewMatrix mgl32.Mat3
}
func (cam *Camera2D) valuesChanged() bool {
return cam.Position != cam.oldPosition || cam.Zoom != cam.oldZoom || cam.Rotation != cam.oldRotation
}
// Calculates the view matrix of the camera that will be needed for the shader
func (cam *Camera2D) CalculateViewMatrix() {
if cam.valuesChanged() {
// -OT S R OT T
windowSize := Framew.WindowGetSize()
ot := mgl32.Translate2D(-windowSize[0]*cam.Origin[0], -windowSize[1]*cam.Origin[1])
not := mgl32.Translate2D(windowSize[0]*cam.Origin[0], windowSize[1]*cam.Origin[1])
cam.viewMatrix = not.Mul3(mgl32.Scale2D(cam.Zoom, cam.Zoom)).Mul3(mgl32.Rotate2D(mgl32.DegToRad(cam.Rotation)).Mat3()).Mul3(ot).Mul3(mgl32.Translate2D(-cam.Position[0], -cam.Position[1]))
cam.inverseViewMatrix = cam.viewMatrix.Inv()
} else {
return
}
cam.oldPosition = cam.Position
cam.oldZoom = cam.Zoom
cam.oldRotation = cam.Rotation
}
// Returns the view matrix of the camera
func (cam *Camera2D) GetViewMatrix() mgl32.Mat3 {
return cam.viewMatrix
}
// Returns the inverse of the view matrix of the camera
func (cam *Camera2D) GetInverseViewMatrix() mgl32.Mat3 {
return cam.inverseViewMatrix
}
// Adds pos to the position in respect to the current rotation
func (cam *Camera2D) AddPositionRotated(pos mgl32.Vec2) {
mat := mgl32.Rotate2D(mgl32.DegToRad(-cam.Rotation))
x := mat.At(0, 0)*pos[0] + mat.At(0, 1)*pos[1]
y := mat.At(1, 0)*pos[0] + mat.At(1, 1)*pos[1]
pos[0] = x
pos[1] = y
cam.Position[0] += pos[0]
cam.Position[1] += pos[1]
} | src/gohome/camera2d.go | 0.695752 | 0.439326 | camera2d.go | starcoder |
package entities
import (
"fmt"
"math"
"math/rand"
)
// Point represents geographical location on game map
type Point struct {
X float64 `gorm:"index" json:"x"`
Y float64 `gorm:"index" json:"y"`
}
// NewPoint create Point
func NewPoint(x float64, y float64) Point {
return Point{X: x, Y: y}
}
// IsIn returns true when Point is in specified area
func (p *Point) IsIn(x float64, y float64, scale float64) bool {
len := math.Pow(2, scale)
return p.X >= x-len/2 && p.X < x+len/2 && p.Y >= y-len/2 && p.Y < y+len/2
}
// IsInLine returns true when this or to or center is in.
func (p *Point) IsInLine(to *Point, x float64, y float64, scale float64) bool {
return p.IsIn(x, y, scale) ||
p.Center(to).IsIn(x, y, scale) ||
to.IsIn(x, y, scale)
}
// Dist calculate a distance between two Point
func (p *Point) Dist(oth *Point) float64 {
return math.Sqrt((oth.X-p.X)*(oth.X-p.X) + (oth.Y-p.Y)*(oth.Y-p.Y))
}
// Center returns devided point.
func (p *Point) Center(to *Point) *Point {
return p.Div(to, 0.5)
}
// Div returns dividing point to certain ratio.
func (p *Point) Div(to *Point, progress float64) *Point {
return &Point{
X: p.X*progress + to.X*(1-progress),
Y: p.Y*progress + to.Y*(1-progress),
}
}
// Rand generates other Point randaomly within 'max' distance.
func (p *Point) Rand(max float64) *Point {
dist := rand.Float64() * max
rad := rand.Float64() * math.Pi * 2
return &Point{
X: p.X + dist*math.Cos(rad),
Y: p.Y + dist*math.Sin(rad),
}
}
// Flat returns position as two value
func (p *Point) Flat() (float64, float64) {
return p.X, p.Y
}
// Sub returns new Point which is substracted by 'to' object
func (p *Point) Sub(to *Point) *Point {
return &Point{p.X - to.X, p.Y - to.Y}
}
// Unit returns unit vector of this Point origined by (0, 0)
func (p *Point) Unit() *Point {
length := p.Dist(&Point{})
return &Point{p.X / length, p.Y / length}
}
// InnerProduct returns inner product with 'to' object.
func (p *Point) InnerProduct(to *Point) float64 {
return p.X*to.X + p.Y*to.Y
}
// Clone returns same value but referrence is different value object.
func (p *Point) Clone() *Point {
return &Point{p.X, p.Y}
}
// Logarithm calculates log (2^base) num
func Logarithm(num float64, base int) int {
if base < 0 {
return int(num * math.Pow(2, float64(-base)))
}
// upper scale bit
return int(num) >> base
}
// DeLogarithm calculates num / 2 ^ base
func DeLogarithm(num int, base int) float64 {
if base < 0 {
max := 1 << -base
return float64(num) / float64(max)
}
return float64(num << base)
}
func (p Point) String() string {
return fmt.Sprintf("(%.2f,%.2f)", p.X, p.Y)
} | entities/point.go | 0.906943 | 0.621196 | point.go | starcoder |
package glc
import "errors"
// MapT0T1 is a type wrapper, implements IMap interfaces.
type MapT0T1 map[T0]T1
// PairMpT0T1 is a type alias used only in Map methods to avoid code generation issues.
type PairMpT0T1 = struct {
X1 T0
X2 T1
}
func (m MapT0T1) Copy() MapT0T1 {
if m == nil {
return nil
}
m1 := make(MapT0T1, len(m))
for k, v := range m {
m1[k] = v
}
return m1
}
func (m MapT0T1) Entries() []PairMpT0T1 {
if m == nil {
return nil
}
entries := make([]PairMpT0T1, len(m))
i := 0
for k, v := range m {
entries[i] = PairMpT0T1{k, v}
i++
}
return entries
}
func (m MapT0T1) Keys() []T0 {
if m == nil {
return nil
}
keys := make([]T0, len(m))
i := 0
for k := range m {
keys[i] = k
i++
}
return keys
}
func (m MapT0T1) Values() []T1 {
if m == nil {
return nil
}
values := make([]T1, len(m))
i := 0
for _, v := range m {
values[i] = v
i++
}
return values
}
// Length returns the number of items in the receiver.
func (m MapT0T1) Length() int {
return len(m)
}
// Size returns the number of items in the receiver. Same as Length.
func (m MapT0T1) Size() int {
return len(m)
}
func (m MapT0T1) ContainsKey(k T0) bool {
_, ok := m[k]
return ok
}
func (m MapT0T1) ContainsValue(v T1) bool {
for _, v1 := range m {
if v1 == v {
return true
}
}
return false
}
// Count returns the number of entries in the receiver that satisfy the predicate.
func (m MapT0T1) Count(pred func(PairMpT0T1) bool) int {
count := 0
for k, v := range m {
if pred(PairMpT0T1{k, v}) {
count++
}
}
return count
}
func (m MapT0T1) Get(k T0) (T1, bool) {
v, ok := m[k]
return v, ok
}
func (m MapT0T1) IsEmpty() bool {
return len(m) == 0
}
func (m MapT0T1) All(pred func(PairMpT0T1) bool) bool {
for k, v := range m {
if !pred(PairMpT0T1{k, v}) {
return false
}
}
return true
}
func (m MapT0T1) Any(pred func(PairMpT0T1) bool) bool {
for k, v := range m {
if pred(PairMpT0T1{k, v}) {
return true
}
}
return false
}
func (m MapT0T1) ToSlice() []PairMpT0T1 {
if m == nil {
return nil
}
s := make([]PairMpT0T1, len(m))
i := 0
for k, v := range m {
s[i] = PairMpT0T1{k, v}
i++
}
return s
}
func (m MapT0T1) Filter(pred func(PairMpT0T1) bool) MapT0T1 {
if m == nil {
return nil
}
m1 := MapT0T1{}
for k, v := range m {
if pred(PairMpT0T1{k, v}) {
m1[k] = v
}
}
return m1
}
func (m MapT0T1) FilterKeys(pred func(T0) bool) MapT0T1 {
if m == nil {
return nil
}
m1 := MapT0T1{}
for k, v := range m {
if pred(k) {
m1[k] = v
}
}
return m1
}
func (m MapT0T1) FilterNot(pred func(PairMpT0T1) bool) MapT0T1 {
if m == nil {
return nil
}
m1 := MapT0T1{}
for k, v := range m {
if !pred(PairMpT0T1{k, v}) {
m1[k] = v
}
}
return m1
}
func (m MapT0T1) FilterValues(pred func(T1) bool) MapT0T1 {
if m == nil {
return nil
}
m1 := MapT0T1{}
for k, v := range m {
if pred(v) {
m1[k] = v
}
}
return m1
}
func (m MapT0T1) ForEach(f func(PairMpT0T1)) {
for k, v := range m {
f(PairMpT0T1{k, v})
}
}
func (m MapT0T1) GetOrElse(k T0, f func(T0) T1) T1 {
if v, ok := m[k]; ok {
return v
}
return f(k)
}
func (m MapT0T1) IsNotEmpty() bool {
return len(m) > 0
}
// MaxWith returns an entry in the map with maximum value, using a comparator function.
// Returns an error if the map is empty.
func (m MapT0T1) MaxWith(comparator func(PairMpT0T1, PairMpT0T1) int) (PairMpT0T1, error) {
var max PairMpT0T1
if len(m) == 0 {
return max, errors.New("empty or nil map")
}
first := true
for k, v := range m {
if first {
max = PairMpT0T1{k, v}
first = false
continue
}
if pair := (PairMpT0T1{k, v}); comparator(max, pair) < 0 {
max = pair
}
}
return max, nil
}
// MinusKey returns a new MapT0T1 without the entry associated with the given key. If the
// key is not in the receiver then it returns a copy of the receiver.
func (m MapT0T1) MinusKey(k T0) MapT0T1 {
m1 := m.Copy()
delete(m1, k)
return m1
}
func (m MapT0T1) MinusKeys(s []T0) MapT0T1 {
m1 := m.Copy()
for _, k := range s {
delete(m1, k)
}
return m1
}
func (m MapT0T1) MinWith(comparator func(PairMpT0T1, PairMpT0T1) int) (PairMpT0T1, error) {
reverseComp := func(p1 PairMpT0T1, p2 PairMpT0T1) int { return -comparator(p1, p2) }
return m.MaxWith(reverseComp)
}
func (m MapT0T1) PlusEntry(entry PairMpT0T1) MapT0T1 {
m1 := m.Copy()
if m1 == nil {
m1 = MapT0T1{}
}
m1[entry.X1] = entry.X2
return m1
}
func (m MapT0T1) PlusMap(other MapT0T1) MapT0T1 {
var m1 MapT0T1
switch {
case m == nil && other == nil:
return nil
case m == nil:
m1 = MapT0T1{}
default:
m1 = m.Copy()
}
for k, v := range other {
m1[k] = v
}
return m1
}
func (m MapT0T1) PlusSlice(s []PairMpT0T1) MapT0T1 {
var m1 MapT0T1
switch {
case m == nil && s == nil:
return nil
case m == nil:
m1 = MapT0T1{}
default:
m1 = m.Copy()
}
for _, pair := range s {
m1[pair.X1] = pair.X2
}
return m1
}
func (m MapT0T1) Add(k T0, v T1) MapT0T1 {
m1 := m.Copy()
if m1 == nil {
m1 = MapT0T1{}
}
m1[k] = v
return m1
} | pkg/glc/map_01.go | 0.808408 | 0.412234 | map_01.go | starcoder |
package ttt
import (
"log"
)
// O is the integer value for O player
const O int = 1
// X is the integer value for X player
const X int = 2
// EMPTY is the integer value for empty space
const EMPTY int = 0
// EvaluateGame evaluates the game with given state (1: Max player wins, -1: Min player wins, 0: Draw or Game is still on)
func EvaluateGame(state [9]int) int {
// Winning cases' indexes
winningCases := [8][3]int{
{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, // Row
{0, 3, 6}, {1, 4, 7}, {2, 5, 8}, // Column
{0, 4, 8}, {2, 4, 6}, // Diagonal
}
// O,X Count
var (
maxCount int
minCount int
)
for _, winningCase := range winningCases {
maxCount = 0
minCount = 0
for _, winIdx := range winningCase {
switch state[winIdx] {
case X:
maxCount++
break
case O:
minCount++
break
case EMPTY:
break
default: // Error with state
log.Fatal("[ERROR] There is something wrong with the state. Values should be 0,1,or 2.")
}
}
// Check for winner
if maxCount == 3 { // Player 1 wins
return 1
} else if minCount == 3 { // Player 2 wins
return -1
}
}
// Game is still on || Game is a Draw
return 0
}
// IsFinished returns 1 if the game is finished, 0 if is still on, -1 if error
func IsFinished(state [9]int) bool {
emptyCell := 9
for _, box := range state {
if box == O || box == X {
emptyCell--
} else if box == EMPTY {
// Do nothing
} else { // Error state
log.Fatal("[ERROR] There is something wrong with the state. Values should be 0,1,or 2.")
}
}
if emptyCell == 0 { // Game is finished
return true
}
// Game is still on
return false
}
func isEmpty(state [9]int) bool {
empty := true
for _, pos := range state {
if pos != EMPTY {
empty = false
}
}
return empty
}
// GetNextAIMove gets the next best AI move based on Minimax algorithm
func GetNextAIMove(state [9]int, botIsX bool) int {
// Bot goes first and the board is empty return center action
if !botIsX && isEmpty(state) {
return 4
}
action := minimax(state, botIsX)
return action
}
func minimax(state [9]int, maxTurn bool) int {
action := 0
if maxTurn { // Max Player's Turn
maxv := -2000
// action = 0
for i := 0; i < 9; i++ {
if state[i] == EMPTY { // a valid move
newState := state
newState[i] = X
val := minValue(newState, -2000, 2000)
if val > maxv {
maxv = val
action = i
}
}
}
} else { // Min Player's Turn
minv := 2000
// action = 0
for i := 0; i < 9; i++ {
if state[i] == EMPTY { // a valid move
newState := state
newState[i] = O
val := maxValue(newState, -2000, 2000)
if val < minv {
minv = val
action = i
}
}
}
}
return action
}
func minValue(state [9]int, alpha int, beta int) int {
// Terminal Test
evaluation := EvaluateGame(state)
if evaluation == 1 || evaluation == -1 { // Winning condition
return EvaluateGame(state) // Returns 1 or -1
} else if IsFinished(state) { // Draw
return EvaluateGame(state)
}
utilityValue := 2000 // Arbitrary value
for i := 0; i < 9; i++ {
if state[i] == EMPTY {
newState := state
newState[i] = O
tmpValue := maxValue(newState, alpha, beta)
if tmpValue < utilityValue {
utilityValue = tmpValue
}
if utilityValue <= alpha {
return utilityValue
}
if beta > utilityValue {
beta = utilityValue
}
}
}
return utilityValue
}
func maxValue(state [9]int, alpha int, beta int) int {
// Terminal Test
evaluation := EvaluateGame(state)
if evaluation == 1 || evaluation == -1 { // Winning condition
return EvaluateGame(state) // Returns 1 or -1
} else if IsFinished(state) { // Draw
return EvaluateGame(state)
}
utilityValue := -2000 // Arbitrary value
for i := 0; i < 9; i++ {
if state[i] == EMPTY {
newState := state
newState[i] = X
tmpValue := minValue(newState, alpha, beta)
if tmpValue > utilityValue {
utilityValue = tmpValue
}
if utilityValue >= beta {
return utilityValue
}
if alpha < utilityValue {
alpha = utilityValue
}
}
}
return utilityValue
} | ttt/ttt.go | 0.58676 | 0.48871 | ttt.go | starcoder |
package analytics
import (
"math"
"strconv"
"strings"
)
var (
nan = math.NaN()
)
type Window struct {
values []float64
}
// NewWindow returns a new window able to hold up to capacity values.
func NewWindow(capacity int) *Window {
w := Window{
values: make([]float64, 0, capacity),
}
return &w
}
// Len returns the number of values in the Window.
func (w Window) Len() int { return len(w.values) }
// Cap returns the maximum number of values that the Window can hold.
func (w Window) Cap() int { return cap(w.values) }
// Values returns the values as a slice of float64 values.
func (w Window) Values() []float64 { return w.values }
// String implements the fmt.Stringer interface.
func (w Window) String() string {
join := func(fs []float64) string {
ss := make([]string, len(fs))
for _, f := range fs {
ss = append(ss, strconv.FormatFloat(f, 'f', -1, 64))
}
return strings.Join(ss, ", ")
}
str := "Window{"
if w.Len() < 10 {
str += join(w.values)
} else {
str += join(w.values[:3]) + " ... " + join(w.values[len(w.values)-3:])
}
return str + "}"
}
// Push adds a new value to the front of the Window. The Window's length increments until it
// the Window's capacity. If the Window is at full capacity existing values are shifted and the
// oldest values are discarded.
func (w *Window) Push(val ...float64) *Window {
min := func(a, b int) int {
if a < b {
return a
}
return b
}
// Number of values that need to be shifted towards the end
// of the slice
nShift := min(w.Cap()-len(val), w.Len())
// Increase the size of the array.
w.expandTo(w.Len() + len(val))
// Shift values
for i := 0; i < nShift; i++ {
w.values[w.Len()-1-i] = w.values[nShift-1-i]
}
// skip
if len(val) > w.Cap() {
val = val[len(val)-w.Cap():]
}
idx := len(val) - 1
for _, v := range val {
w.values[idx] = v
idx--
}
return w
}
// Sum returns the sum of all values in the Window.
func (w Window) Sum() float64 {
sum := 0.0
for _, f := range w.values {
sum += f
}
return sum
}
// Slice returns a new Window that refers to a subrange of the original Window. Both Window's
// share the underlying data.
func (w Window) Slice(start, end int) *Window {
if end > w.Len() {
end = w.Len()
}
switch {
case start < 0 && end < 0:
return &w
case start < 0:
return &Window{w.values[:end]}
case end < 0:
return &Window{w.values[start:]}
}
return &Window{w.values[start:end]}
}
func (w Window) Clone() *Window {
c := Window{
values: make([]float64, w.Len(), w.Cap()),
}
for i := 0; i < w.Len(); i++ {
c.values[i] = w.values[i]
}
return &c
}
func (w *Window) expandTo(n int) {
if n > w.Cap() {
n = w.Cap()
}
for i := w.Len(); i < n; i++ {
w.values = append(w.values, nan)
}
} | analytics/window.go | 0.804367 | 0.429908 | window.go | starcoder |
package day9
import (
"fmt"
"sort"
"github.com/pietrodll/aoc2021/utils/collections"
"github.com/pietrodll/aoc2021/utils/grid"
)
func parseInput(input string) grid.Grid {
return grid.NewGridFromString(input, "\n", "")
}
func findLowPoints(g *grid.Grid) []grid.GridPoint {
lowPoints := make([]grid.GridPoint, 0)
for point := range g.StreamPoints() {
adjacent := g.FindAdjacentPoints(point)
val := g.GetValue(point)
isLow := true
for _, adj := range adjacent {
isLow = isLow && (val < g.GetValue(adj))
}
if isLow {
lowPoints = append(lowPoints, point)
}
}
return lowPoints
}
func totalRiskLevel(g *grid.Grid) int {
riskLevel := 0
for _, point := range findLowPoints(g) {
riskLevel += 1 + g.GetValue(point)
}
return riskLevel
}
func findBasins(g *grid.Grid) [][]grid.GridPoint {
visited := collections.NewCodableSet(g)
lowPoints := findLowPoints(g)
basins := make([][]grid.GridPoint, len(lowPoints))
for i, lowPoint := range lowPoints {
if !visited.Contains(lowPoint) {
// breadth-first search starting from the low point to explore the basin
basin := make([]grid.GridPoint, 1)
basin[0] = lowPoint
toVisit := collections.NewQueue(lowPoint)
visited.Add(lowPoint)
for !toVisit.IsEmpty() {
point := toVisit.Dequeue().(grid.GridPoint)
pointVal := g.GetValue(point)
for _, neighbor := range g.FindAdjacentPoints(point) {
val := g.GetValue(neighbor)
if val >= pointVal && val < 9 && !visited.Contains(neighbor) {
toVisit.Enqueue(neighbor)
basin = append(basin, neighbor)
visited.Add(neighbor)
}
}
}
basins[i] = basin
}
}
return basins
}
func findAndMultiplyThreeLargestBasins(g *grid.Grid) int {
basins := findBasins(g)
basinSizes := make([]int, len(basins))
for i, basin := range basins {
basinSizes[i] = len(basin)
}
sort.Sort(sort.Reverse(sort.IntSlice(basinSizes)))
return basinSizes[0] * basinSizes[1] * basinSizes[2]
}
func Run(input string) {
grid := parseInput(input)
fmt.Println("Total risk level:", totalRiskLevel(&grid))
fmt.Println("Three largest basins multiplied:", findAndMultiplyThreeLargestBasins(&grid))
} | advent-of-code-2021/day9/day9.go | 0.67662 | 0.426023 | day9.go | starcoder |
package memfs
import "strings"
// Formatters for representing the date and time as a string.
const (
JSONDateTime = "2006-01-02T15:04:05-07:00"
)
//===========================================================================
// String Helpers
//===========================================================================
// Regularize a string for comparison, e.g. make all lowercase and trim
// whitespace. This utility is often used on user input to compare to constant
// strings like database drivers or hashing algorithm names.
func Regularize(value string) string {
value = strings.TrimSpace(value)
value = strings.ToLower(value)
return value
}
// Stride returns an array of N length substrings.
func Stride(s string, n int) []string {
a := []rune(s) // Convert string to a slice of runes
// Compute the length of the output array
l := len(a) / n
if len(a)%n != 0 {
l++
}
o := make([]string, 0, l) // Create the output array
// Range over the runes by n strides and append strings to output.
for i := 0; i < len(a); i = i + n {
j := i + n
if j > len(a) {
j = len(a)
}
o = append(o, string(a[i:j]))
}
return o
}
// StrideFixed returns an array of N length substrings and does not allow the
// last element to have a length < N (e.g. no remainders).
func StrideFixed(s string, n int) []string {
o := Stride(s, n)
// If the length of the last element is less than n, don't return it
if len(o[len(o)-1]) < n {
return o[:len(o)-1]
}
return o
}
//===========================================================================
// String Collection Helpers
//===========================================================================
// ListContains searches a list for a particular value in O(n) time.
func ListContains(value string, list []string) bool {
for _, elem := range list {
if elem == value {
return true
}
}
return false
}
//===========================================================================
// Numeric Helpers
//===========================================================================
// MaxUInt64 returns the maximal value of the list of passed in uints
func MaxUInt64(values ...uint64) uint64 {
max := uint64(0) // this works because values are unsigned.
for _, val := range values {
if val > max {
max = val
}
}
return max
}
// Blocks returns the number of 512 byte blocks required
func Blocks(value uint64) uint64 {
if value == 0 {
return 0
}
blocks := value / minBlockSize
if value%minBlockSize > 0 {
return blocks + 1
}
return blocks
} | utils.go | 0.757525 | 0.440108 | utils.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPLiteralNumber258 struct for BTPLiteralNumber258
type BTPLiteralNumber258 struct {
Atomic *bool `json:"atomic,omitempty"`
BtType *string `json:"btType,omitempty"`
DocumentationType *string `json:"documentationType,omitempty"`
EndSourceLocation *int32 `json:"endSourceLocation,omitempty"`
Integer *bool `json:"integer,omitempty"`
NodeId *string `json:"nodeId,omitempty"`
ShortDescriptor *string `json:"shortDescriptor,omitempty"`
SpaceAfter *BTPSpace10 `json:"spaceAfter,omitempty"`
SpaceBefore *BTPSpace10 `json:"spaceBefore,omitempty"`
SpaceDefault *bool `json:"spaceDefault,omitempty"`
StartSourceLocation *int32 `json:"startSourceLocation,omitempty"`
Text *string `json:"text,omitempty"`
Value *float64 `json:"value,omitempty"`
}
// NewBTPLiteralNumber258 instantiates a new BTPLiteralNumber258 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPLiteralNumber258() *BTPLiteralNumber258 {
this := BTPLiteralNumber258{}
return &this
}
// NewBTPLiteralNumber258WithDefaults instantiates a new BTPLiteralNumber258 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPLiteralNumber258WithDefaults() *BTPLiteralNumber258 {
this := BTPLiteralNumber258{}
return &this
}
// GetAtomic returns the Atomic field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetAtomic() bool {
if o == nil || o.Atomic == nil {
var ret bool
return ret
}
return *o.Atomic
}
// GetAtomicOk returns a tuple with the Atomic field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetAtomicOk() (*bool, bool) {
if o == nil || o.Atomic == nil {
return nil, false
}
return o.Atomic, true
}
// HasAtomic returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasAtomic() bool {
if o != nil && o.Atomic != nil {
return true
}
return false
}
// SetAtomic gets a reference to the given bool and assigns it to the Atomic field.
func (o *BTPLiteralNumber258) SetAtomic(v bool) {
o.Atomic = &v
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPLiteralNumber258) SetBtType(v string) {
o.BtType = &v
}
// GetDocumentationType returns the DocumentationType field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetDocumentationType() string {
if o == nil || o.DocumentationType == nil {
var ret string
return ret
}
return *o.DocumentationType
}
// GetDocumentationTypeOk returns a tuple with the DocumentationType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetDocumentationTypeOk() (*string, bool) {
if o == nil || o.DocumentationType == nil {
return nil, false
}
return o.DocumentationType, true
}
// HasDocumentationType returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasDocumentationType() bool {
if o != nil && o.DocumentationType != nil {
return true
}
return false
}
// SetDocumentationType gets a reference to the given string and assigns it to the DocumentationType field.
func (o *BTPLiteralNumber258) SetDocumentationType(v string) {
o.DocumentationType = &v
}
// GetEndSourceLocation returns the EndSourceLocation field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetEndSourceLocation() int32 {
if o == nil || o.EndSourceLocation == nil {
var ret int32
return ret
}
return *o.EndSourceLocation
}
// GetEndSourceLocationOk returns a tuple with the EndSourceLocation field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetEndSourceLocationOk() (*int32, bool) {
if o == nil || o.EndSourceLocation == nil {
return nil, false
}
return o.EndSourceLocation, true
}
// HasEndSourceLocation returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasEndSourceLocation() bool {
if o != nil && o.EndSourceLocation != nil {
return true
}
return false
}
// SetEndSourceLocation gets a reference to the given int32 and assigns it to the EndSourceLocation field.
func (o *BTPLiteralNumber258) SetEndSourceLocation(v int32) {
o.EndSourceLocation = &v
}
// GetInteger returns the Integer field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetInteger() bool {
if o == nil || o.Integer == nil {
var ret bool
return ret
}
return *o.Integer
}
// GetIntegerOk returns a tuple with the Integer field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetIntegerOk() (*bool, bool) {
if o == nil || o.Integer == nil {
return nil, false
}
return o.Integer, true
}
// HasInteger returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasInteger() bool {
if o != nil && o.Integer != nil {
return true
}
return false
}
// SetInteger gets a reference to the given bool and assigns it to the Integer field.
func (o *BTPLiteralNumber258) SetInteger(v bool) {
o.Integer = &v
}
// GetNodeId returns the NodeId field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetNodeId() string {
if o == nil || o.NodeId == nil {
var ret string
return ret
}
return *o.NodeId
}
// GetNodeIdOk returns a tuple with the NodeId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetNodeIdOk() (*string, bool) {
if o == nil || o.NodeId == nil {
return nil, false
}
return o.NodeId, true
}
// HasNodeId returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasNodeId() bool {
if o != nil && o.NodeId != nil {
return true
}
return false
}
// SetNodeId gets a reference to the given string and assigns it to the NodeId field.
func (o *BTPLiteralNumber258) SetNodeId(v string) {
o.NodeId = &v
}
// GetShortDescriptor returns the ShortDescriptor field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetShortDescriptor() string {
if o == nil || o.ShortDescriptor == nil {
var ret string
return ret
}
return *o.ShortDescriptor
}
// GetShortDescriptorOk returns a tuple with the ShortDescriptor field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetShortDescriptorOk() (*string, bool) {
if o == nil || o.ShortDescriptor == nil {
return nil, false
}
return o.ShortDescriptor, true
}
// HasShortDescriptor returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasShortDescriptor() bool {
if o != nil && o.ShortDescriptor != nil {
return true
}
return false
}
// SetShortDescriptor gets a reference to the given string and assigns it to the ShortDescriptor field.
func (o *BTPLiteralNumber258) SetShortDescriptor(v string) {
o.ShortDescriptor = &v
}
// GetSpaceAfter returns the SpaceAfter field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetSpaceAfter() BTPSpace10 {
if o == nil || o.SpaceAfter == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceAfter
}
// GetSpaceAfterOk returns a tuple with the SpaceAfter field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetSpaceAfterOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceAfter == nil {
return nil, false
}
return o.SpaceAfter, true
}
// HasSpaceAfter returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasSpaceAfter() bool {
if o != nil && o.SpaceAfter != nil {
return true
}
return false
}
// SetSpaceAfter gets a reference to the given BTPSpace10 and assigns it to the SpaceAfter field.
func (o *BTPLiteralNumber258) SetSpaceAfter(v BTPSpace10) {
o.SpaceAfter = &v
}
// GetSpaceBefore returns the SpaceBefore field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetSpaceBefore() BTPSpace10 {
if o == nil || o.SpaceBefore == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceBefore
}
// GetSpaceBeforeOk returns a tuple with the SpaceBefore field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetSpaceBeforeOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceBefore == nil {
return nil, false
}
return o.SpaceBefore, true
}
// HasSpaceBefore returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasSpaceBefore() bool {
if o != nil && o.SpaceBefore != nil {
return true
}
return false
}
// SetSpaceBefore gets a reference to the given BTPSpace10 and assigns it to the SpaceBefore field.
func (o *BTPLiteralNumber258) SetSpaceBefore(v BTPSpace10) {
o.SpaceBefore = &v
}
// GetSpaceDefault returns the SpaceDefault field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetSpaceDefault() bool {
if o == nil || o.SpaceDefault == nil {
var ret bool
return ret
}
return *o.SpaceDefault
}
// GetSpaceDefaultOk returns a tuple with the SpaceDefault field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetSpaceDefaultOk() (*bool, bool) {
if o == nil || o.SpaceDefault == nil {
return nil, false
}
return o.SpaceDefault, true
}
// HasSpaceDefault returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasSpaceDefault() bool {
if o != nil && o.SpaceDefault != nil {
return true
}
return false
}
// SetSpaceDefault gets a reference to the given bool and assigns it to the SpaceDefault field.
func (o *BTPLiteralNumber258) SetSpaceDefault(v bool) {
o.SpaceDefault = &v
}
// GetStartSourceLocation returns the StartSourceLocation field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetStartSourceLocation() int32 {
if o == nil || o.StartSourceLocation == nil {
var ret int32
return ret
}
return *o.StartSourceLocation
}
// GetStartSourceLocationOk returns a tuple with the StartSourceLocation field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetStartSourceLocationOk() (*int32, bool) {
if o == nil || o.StartSourceLocation == nil {
return nil, false
}
return o.StartSourceLocation, true
}
// HasStartSourceLocation returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasStartSourceLocation() bool {
if o != nil && o.StartSourceLocation != nil {
return true
}
return false
}
// SetStartSourceLocation gets a reference to the given int32 and assigns it to the StartSourceLocation field.
func (o *BTPLiteralNumber258) SetStartSourceLocation(v int32) {
o.StartSourceLocation = &v
}
// GetText returns the Text field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetText() string {
if o == nil || o.Text == nil {
var ret string
return ret
}
return *o.Text
}
// GetTextOk returns a tuple with the Text field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetTextOk() (*string, bool) {
if o == nil || o.Text == nil {
return nil, false
}
return o.Text, true
}
// HasText returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasText() bool {
if o != nil && o.Text != nil {
return true
}
return false
}
// SetText gets a reference to the given string and assigns it to the Text field.
func (o *BTPLiteralNumber258) SetText(v string) {
o.Text = &v
}
// GetValue returns the Value field value if set, zero value otherwise.
func (o *BTPLiteralNumber258) GetValue() float64 {
if o == nil || o.Value == nil {
var ret float64
return ret
}
return *o.Value
}
// GetValueOk returns a tuple with the Value field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPLiteralNumber258) GetValueOk() (*float64, bool) {
if o == nil || o.Value == nil {
return nil, false
}
return o.Value, true
}
// HasValue returns a boolean if a field has been set.
func (o *BTPLiteralNumber258) HasValue() bool {
if o != nil && o.Value != nil {
return true
}
return false
}
// SetValue gets a reference to the given float64 and assigns it to the Value field.
func (o *BTPLiteralNumber258) SetValue(v float64) {
o.Value = &v
}
func (o BTPLiteralNumber258) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Atomic != nil {
toSerialize["atomic"] = o.Atomic
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.DocumentationType != nil {
toSerialize["documentationType"] = o.DocumentationType
}
if o.EndSourceLocation != nil {
toSerialize["endSourceLocation"] = o.EndSourceLocation
}
if o.Integer != nil {
toSerialize["integer"] = o.Integer
}
if o.NodeId != nil {
toSerialize["nodeId"] = o.NodeId
}
if o.ShortDescriptor != nil {
toSerialize["shortDescriptor"] = o.ShortDescriptor
}
if o.SpaceAfter != nil {
toSerialize["spaceAfter"] = o.SpaceAfter
}
if o.SpaceBefore != nil {
toSerialize["spaceBefore"] = o.SpaceBefore
}
if o.SpaceDefault != nil {
toSerialize["spaceDefault"] = o.SpaceDefault
}
if o.StartSourceLocation != nil {
toSerialize["startSourceLocation"] = o.StartSourceLocation
}
if o.Text != nil {
toSerialize["text"] = o.Text
}
if o.Value != nil {
toSerialize["value"] = o.Value
}
return json.Marshal(toSerialize)
}
type NullableBTPLiteralNumber258 struct {
value *BTPLiteralNumber258
isSet bool
}
func (v NullableBTPLiteralNumber258) Get() *BTPLiteralNumber258 {
return v.value
}
func (v *NullableBTPLiteralNumber258) Set(val *BTPLiteralNumber258) {
v.value = val
v.isSet = true
}
func (v NullableBTPLiteralNumber258) IsSet() bool {
return v.isSet
}
func (v *NullableBTPLiteralNumber258) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPLiteralNumber258(val *BTPLiteralNumber258) *NullableBTPLiteralNumber258 {
return &NullableBTPLiteralNumber258{value: val, isSet: true}
}
func (v NullableBTPLiteralNumber258) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPLiteralNumber258) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_literal_number_258.go | 0.727007 | 0.432902 | model_btp_literal_number_258.go | starcoder |
package mat
import (
"math"
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func NewSphere() *Sphere {
return &Sphere{
Id: rand.Int63(),
Transform: New4x4(),
Inverse: New4x4(),
InverseTranspose: New4x4(),
Material: NewDefaultMaterial(),
savedVec: NewVector(0, 0, 0),
savedNormal: NewVector(0, 0, 0),
savedRay: NewRay(NewPoint(0, 0, 0), NewVector(0, 0, 0)),
xsCache: make([]Intersection, 2),
xsEmpty: make([]Intersection, 0),
originPoint: NewPoint(0, 0, 0),
CastShadow: true,
Label: "Sphere",
}
}
func NewGlassSphere() *Sphere {
s := NewSphere()
material := NewGlassMaterial(1.5)
s.SetMaterial(material)
return s
}
type Sphere struct {
Id int64
Transform Mat4x4
Inverse Mat4x4
InverseTranspose Mat4x4
Material Material
Label string
parent Shape
savedRay Ray
// cached stuff
originPoint Tuple4
savedVec Tuple4
xsCache []Intersection
xsEmpty []Intersection
savedNormal Tuple4
CastShadow bool
}
func (s *Sphere) CastsShadow() bool {
return s.CastShadow
}
func (s *Sphere) GetParent() Shape {
return s.parent
}
func (s *Sphere) NormalAtLocal(point Tuple4, intersection *Intersection) Tuple4 {
SubPtr(point, s.originPoint, &s.savedNormal)
return s.savedNormal
}
func (s *Sphere) GetLocalRay() Ray {
return s.savedRay
}
// IntersectLocal implements Sphere-ray intersection
func (s *Sphere) IntersectLocal(r Ray) []Intersection {
s.savedRay = r
//s.XsCache = s.XsCache[:0]
// this is a vector from the origin of the ray to the center of the sphere at 0,0,0
//SubPtr(r.Origin, s.originPoint, &s.savedVec)
// Note that doing the Subtraction inlined was much faster than letting SubPtr do it.
// Shouldn't the SubPtr be inlined by the compiler? Need to figure out what's going on here...
for i := 0; i < 4; i++ {
s.savedVec[i] = r.Origin[i] - s.originPoint[i]
}
// This dot product is
a := Dot(r.Direction, r.Direction)
// Take the dot of the direction and the vector from ray origin to sphere center times 2
b := 2.0 * Dot(r.Direction, s.savedVec)
// Take the dot of the two sphereToRay vectors and decrease by 1 (is that because the sphere is unit length 1?
c := Dot(s.savedVec, s.savedVec) - 1.0
// calculate the discriminant
discriminant := (b * b) - 4*a*c
if discriminant < 0.0 {
return s.xsEmpty
}
// finally, find the intersection distances on our ray. Some values:
t1 := (-b - math.Sqrt(discriminant)) / (2 * a)
t2 := (-b + math.Sqrt(discriminant)) / (2 * a)
s.xsCache[0].T = t1
s.xsCache[1].T = t2
s.xsCache[0].S = s
s.xsCache[1].S = s
return s.xsCache
}
func (s *Sphere) ID() int64 {
return s.Id
}
func (s *Sphere) GetTransform() Mat4x4 {
return s.Transform
}
func (s *Sphere) GetInverse() Mat4x4 {
return s.Inverse
}
func (s *Sphere) GetInverseTranspose() Mat4x4 {
return s.InverseTranspose
}
func (s *Sphere) GetMaterial() Material {
return s.Material
}
// SetTransform passes a pointer to the Sphere on which to apply the translation matrix
func (s *Sphere) SetTransform(translation Mat4x4) {
s.Transform = Multiply(s.Transform, translation)
s.Inverse = Inverse(s.Transform)
s.InverseTranspose = Transpose(s.Inverse)
}
// SetMaterial passes a pointer to the Sphere on which to set the material
func (s *Sphere) SetMaterial(m Material) {
s.Material = m
}
func (s *Sphere) SetParent(shape Shape) {
s.parent = shape
}
func (s *Sphere) Name() string {
return s.Label
} | internal/pkg/mat/sphere.go | 0.741674 | 0.507812 | sphere.go | starcoder |
package sdk
import (
"encoding/binary"
"math"
"time"
"unicode/utf16"
)
type IntGetter func(Record) (int, bool)
type FloatGetter func(Record) (float64, bool)
type BoolGetter func(Record) (bool, bool)
type TimeGetter func(Record) (time.Time, bool)
type StringGetter func(Record) (string, bool)
func bytesToByte(getBytes BytesGetter) IntGetter {
return func(record Record) (int, bool) {
bytes := getBytes(record)
if bytes[1] == 1 {
return 0, true
}
return int(bytes[0]), false
}
}
func bytesToInt16(getBytes BytesGetter) IntGetter {
return func(record Record) (int, bool) {
bytes := getBytes(record)
if bytes[2] == 1 {
return 0, true
}
return int(int16(binary.LittleEndian.Uint16(bytes))), false
}
}
func bytesToInt32(getBytes BytesGetter) IntGetter {
return func(record Record) (int, bool) {
bytes := getBytes(record)
if bytes[4] == 1 {
return 0, true
}
return int(int32(binary.LittleEndian.Uint32(bytes))), false
}
}
func bytesToInt64(getBytes BytesGetter) IntGetter {
return func(record Record) (int, bool) {
bytes := getBytes(record)
if bytes[8] == 1 {
return 0, true
}
return int(int64(binary.LittleEndian.Uint64(bytes))), false
}
}
func bytesToFloat(getBytes BytesGetter) FloatGetter {
return func(record Record) (float64, bool) {
bytes := getBytes(record)
if bytes[4] == 1 {
return 0, true
}
return float64(math.Float32frombits(binary.LittleEndian.Uint32(bytes))), false
}
}
func bytesToDouble(getBytes BytesGetter) FloatGetter {
return func(record Record) (float64, bool) {
bytes := getBytes(record)
if bytes[8] == 1 {
return 0, true
}
return math.Float64frombits(binary.LittleEndian.Uint64(bytes)), false
}
}
func bytesToString(getBytes BytesGetter, size int) StringGetter {
return func(record Record) (string, bool) {
bytes := getBytes(record)
if bytes[size] == 1 {
return ``, true
}
return string(truncateAtNullByte(bytes)), false
}
}
func truncateAtNullUtf16(raw []uint16) []uint16 {
var dataLen int
for dataLen = 0; dataLen < len(raw); dataLen++ {
if raw[dataLen] == 0 {
break
}
}
return raw[:dataLen]
}
func bytesToWString(getBytes BytesGetter, size int) StringGetter {
return func(record Record) (string, bool) {
bytes := getBytes(record)
if bytes[size*2] == 1 {
return ``, true
}
utf16Bytes := bytesToUtf16(bytes)
utf16Bytes = truncateAtNullUtf16(utf16Bytes)
if len(utf16Bytes) == 0 {
return ``, false
}
value := string(utf16.Decode(utf16Bytes))
return value, false
}
}
func bytesToV_String(getBytes BytesGetter, _ int) StringGetter {
return func(record Record) (string, bool) {
bytes := getBytes(record)
if bytes == nil {
return ``, true
}
return string(bytes), false
}
}
func bytesToV_WString(getBytes BytesGetter, _ int) StringGetter {
return func(record Record) (string, bool) {
bytes := getBytes(record)
if bytes == nil {
return ``, true
}
if len(bytes) == 0 {
return ``, false
}
utf16Bytes := bytesToUtf16(bytes)
value := string(utf16.Decode(utf16Bytes))
return value, false
}
} | sdk/field_getters.go | 0.568176 | 0.408454 | field_getters.go | starcoder |
package activation
import (
"fmt"
"math"
"sort"
"strings"
)
//function types
const (
FuncTypeSigmoid = "sig"
FuncTypeTanh = "tanh"
FuncTypeRelu = "relu"
FuncTypeLeakyRelu = "leaky_relu"
FuncTypeElu = "elu"
FuncTypeIden = "iden"
FuncTypeCustom = "custom"
)
var validFtypes = map[string]struct{}{FuncTypeSigmoid: {}, FuncTypeTanh: {}, FuncTypeRelu: {}, FuncTypeLeakyRelu: {}, FuncTypeElu: {}, FuncTypeIden: {}, FuncTypeCustom: {}}
//ValidateFType checks if valid function type
func ValidateFType(ftype string) error {
if _, ok := validFtypes[ftype]; !ok {
return fmt.Errorf("invalid activation function type '%s': expected one of [%s]", ftype, strings.Join(getValidFTypes(), ", "))
}
return nil
}
//getValidFuncTypes returns valid activation function types
func getValidFTypes() []string {
valids := make([]string, len(validFtypes))
i := 0
for k := range validFtypes {
valids[i] = k
i++
}
sort.Strings(valids)
return valids
}
//GetF generates an instance of F for a pair of activation function type and parameters
func GetF(ftype string, params []float64) (F, error) {
if err := ValidateFType(ftype); err != nil {
return F{}, err
}
switch ftype {
case FuncTypeIden:
return Iden(), nil
case FuncTypeSigmoid:
return Sigmoid(), nil
case FuncTypeTanh:
return Tanh(), nil
case FuncTypeRelu:
return Relu(), nil
case FuncTypeLeakyRelu:
nparams := 1
if len(params) != nparams {
return F{}, fmt.Errorf("expected %d parameter(s) for func '%s'", nparams, ftype)
}
return LeakyRelu(params[0]), nil
case FuncTypeElu:
nparams := 1
if len(params) != nparams {
return F{}, fmt.Errorf("expected %d parameter(s) for func '%s'", nparams, ftype)
}
return Elu(params[0]), nil
default:
return F{}, fmt.Errorf("invalid activation function type '%s': expected one of [%s]", ftype, strings.Join(getValidFTypes(), ", "))
}
}
//F holds a function and its derivative
type F struct {
Func func(x float64) float64
Deriv func(x float64) float64
}
//Sigmoid returns a sigmoid function with its derivative
func Sigmoid() F {
return F{Func: sig, Deriv: derivSig}
}
//Sigmoid or logistic activation function
func sig(x float64) float64 {
return 1 / (1 + math.Exp(-x))
}
//DerivSigmoid is Sigmoid's derivative
func derivSig(x float64) float64 {
return sig(x) * (1 - sig(x))
}
//Tanh returns hyperbolic tangent and its derivative
func Tanh() F {
return F{Func: tanh, Deriv: derivTanh}
}
//Tanh or hyperbolic tangent
func tanh(x float64) float64 {
return (math.Exp(x) - math.Exp(-x)) / (math.Exp(x) + math.Exp(-x))
}
//DerivTanh is Tanh's derivative
func derivTanh(x float64) float64 {
return 1 - tanh(x)
}
//Elu returns an exponential linear unit with its derivative
func Elu(alpha float64) F {
return F{Func: newElu(alpha), Deriv: newDerivElu(alpha)}
}
//NewElu returns a parametrized Exponential Linear Unit
func newElu(alpha float64) func(x float64) float64 {
return func(x float64) float64 {
if x > 0 {
return x
}
return alpha * (math.Exp(x) - 1)
}
}
//NewDerivElu returns the derivative of a parametrized Exponential Linear Unit
func newDerivElu(alpha float64) func(x float64) float64 {
return func(x float64) float64 {
if x > 0 {
return 1
}
return alpha * math.Exp(x)
}
}
//Relu returns a rectified linear unit and its derivative
func Relu() F {
return F{Func: relu, Deriv: derivRelu}
}
//Relu rectified linear unit
func relu(x float64) float64 {
if x > 0 {
return x
}
return 0
}
//DerivRelu is Relu's derivative. Undefined for x=0
func derivRelu(x float64) float64 {
if x > 0 {
return 1
}
return 0
}
//LeakyRelu returns a leaky rectified linear unit and its derivative
func LeakyRelu(alpha float64) F {
return F{Func: newLeakyRelu(alpha), Deriv: newDerivLeakyRelu(alpha)}
}
//NewLeakyRelu adds a slight slope for x<=0
func newLeakyRelu(alpha float64) func(x float64) float64 {
return func(x float64) float64 {
if x > 0 {
return x
}
return alpha * x
}
}
//NewDerivLeakyRelu is the derivative of a parametrized LeakyRelu. Undefined for x=0
func newDerivLeakyRelu(alpha float64) func(x float64) float64 {
return func(x float64) float64 {
if x > 0 {
return 1
}
return alpha
}
}
//Iden returns the identity function and its derivative
func Iden() F {
return F{Func: iden, Deriv: derivIden}
}
//iden returns x
func iden(x float64) float64 {
return x
}
//derivIden is iden's derivative
func derivIden(x float64) float64 {
return 1
}
//Power returns x^n and its derivative
func Power(coef float64, n uint) F {
return F{Func: newPower(coef, n), Deriv: newDerivPower(coef, n)}
}
func newPower(coef float64, n uint) func(x float64) float64 {
return func(x float64) float64 {
return coef * math.Pow(x, float64(n))
}
}
func newDerivPower(coef float64, n uint) func(x float64) float64 {
return func(x float64) float64 {
return coef * float64(n) * math.Pow(x, float64(n-1))
}
}
//Abs returnsthe absolute value function and its derivative
func Abs() F {
return F{Func: math.Abs, Deriv: derivAbs}
}
func derivAbs(x float64) float64 {
if x < 0 {
return -1.0
}
return 1.0
}
//Softmax? | internal/activation/activation.go | 0.808559 | 0.486941 | activation.go | starcoder |
package matrix
import (
"fmt"
"math"
"math/rand"
"os"
"time"
)
// Ternary simple function for simulating ternary operator
func Ternary(statement bool, a, b interface{}) interface{} {
if statement {
return a
}
return b
}
// AbsInt returns absolute value of input int
func AbsInt(n int) int {
if n < 0 {
return -n
}
return n
}
// MinInt returns minimum value of input two ints
func MinInt(x, y int) int {
return Ternary(x > y, y, x).(int)
}
// MaxInt returns maximum value of input two ints
func MaxInt(x, y int) int {
return Ternary(x > y, x, y).(int)
}
// GetFloat64 transfers input golang number type into float64
func GetFloat64(x interface{}) float64 {
switch x := x.(type) {
case uint8:
return float64(x)
case int8:
return float64(x)
case uint16:
return float64(x)
case int16:
return float64(x)
case uint32:
return float64(x)
case int32:
return float64(x)
case uint64:
return float64(x)
case int64:
return float64(x)
case int:
return float64(x)
case float32:
return float64(x)
case float64:
return x
}
panic("invalid numeric type of input")
}
// SortPair struct for sorting according to value
type SortPair struct {
Key int
Value float64
}
// SortPairSlice slice of SortPair
type SortPairSlice []SortPair
func (sps SortPairSlice) Swap(i, j int) {
sps[i], sps[j] = sps[j], sps[i]
}
func (sps SortPairSlice) Len() int {
return len(sps)
}
func (sps SortPairSlice) Less(i, j int) bool {
return sps[i].Value < sps[j].Value
}
// FloatEqual checks whether two float numbers are equal, defined by threshold EPS
// https://floating-point-gui.de/errors/comparison/
func FloatEqual(x, y float64) bool {
diff := math.Abs(x - y)
mean := math.Abs(x+y) / 2.
absX := math.Abs(x)
absY := math.Abs(y)
if x == y {
return true
} else if x == 0 || y == 0 || absX+absY < EPS {
return diff < EPS
} else {
return diff/mean < EPS
}
}
// VEqual checks whether two vector are equal, based on `FloatEqual`
func VEqual(v1, v2 *Vector) bool {
if len(*v1) != len(*v2) {
return false
}
for i, v := range *v1 {
if v != (*v2)[i] && !FloatEqual(v, (*v2)[i]) {
return false
}
}
return true
}
// MEqual checks whether two matrix are equal, based on `VEqual`
func MEqual(mat1, mat2 *Matrix) bool {
row1, col1 := mat1.Dims()
row2, col2 := mat2.Dims()
if [2]int{row1, col1} != [2]int{row2, col2} {
return false
}
for i, col := range mat1.Data {
if !VEqual(&col, mat2.Row(i)) {
return false
}
}
return true
}
// GenerateRandomFloat generates a random float64
func GenerateRandomFloat() float64 {
rand.Seed(time.Now().UnixNano())
return rand.Float64() - rand.Float64()
}
// GenerateRandomVector generates a vector with random float64
func GenerateRandomVector(size int) *Vector {
slice := make(Vector, size, size)
rand.Seed(time.Now().UnixNano())
for i := 0; i < size; i++ {
slice[i] = rand.Float64() - rand.Float64()
}
return &slice
}
// GenerateRandomSymmetric33Matrix generates a 3 x 3 matrix with random float64
func GenerateRandomSymmetric33Matrix() *Matrix {
entries := *GenerateRandomVector(6)
m := ZeroMatrix(3, 3)
m.Set(0, 0, entries[0])
m.Set(1, 1, entries[1])
m.Set(2, 2, entries[2])
m.Set(0, 1, entries[3])
m.Set(1, 0, entries[3])
m.Set(0, 2, entries[4])
m.Set(2, 0, entries[4])
m.Set(1, 2, entries[5])
m.Set(2, 1, entries[5])
return m
}
// GenerateRandomSquareMatrix generates a `size x size` square matrix with random float64
func GenerateRandomSquareMatrix(size int) *Matrix {
return GenerateRandomMatrix(size, size)
}
// GenerateRandomMatrix generates a `row x col` matrix with random float64
func GenerateRandomMatrix(row, col int) *Matrix {
rows := make(Data, row)
for i := range rows {
rows[i] = *GenerateRandomVector(col)
}
m := new(Matrix).Init(rows)
return m
}
// GenerateRandomSparseMatrix generates a `rows x cols` sparse matrix with `entriesNum` elements
func GenerateRandomSparseMatrix(rows, cols, entriesNum int) *SparseMatrix {
nsm := ZeroSparseMatrix(rows, cols)
rand.Seed(time.Now().UnixNano())
for i := 0; i < entriesNum; i++ {
nsm.Set(rand.Intn(rows), rand.Intn(cols), rand.Float64()-rand.Float64())
}
return nsm
}
/*
// Vector to iterable
func VectorIter(v *Vector) interface{} {
return *v
}
// Matrix to row iterable
func MatrixRowIter(t *Matrix) interface{} {
return t.Data
}
// Matrix to element iterable
// row-wise
func MatrixElementIter(t *Matrix) interface{} {
return *(t.Flat())
}
// Map function on iterable
func Map(input interface{}, mapper func(interface{}) interface{}) (output interface{}) {
val := reflect.ValueOf(input)
out := make([]interface{}, val.Len())
wg := &sync.WaitGroup{}
for i := 0; i < val.Len(); i++ {
wg.Add(1)
go func(i int) {
wg.Done()
out[i] = mapper(val.Index(i).Interface())
}(i)
}
wg.Wait()
return out
}
// Reduce iterable by function
func Reduce(input interface{}, reducer func(interface{}, interface{}) interface{}) interface{} {
val := reflect.ValueOf(input)
tmp := val.Index(0).Interface()
for i := 0; i < val.Len()-1; i++ {
tmp = reducer(tmp, val.Index(i).Interface())
}
return tmp
}
// Filter iterable by function
func Filter(input interface{}, filter func(interface{}) bool) interface{} {
val := reflect.ValueOf(input)
out := make([]interface{}, 0, val.Len())
for i := 0; i < val.Len(); i++ {
if filter(val.Index(i).Interface()) {
out = append(out, val.Index(i).Interface())
}
}
return out
}
*/
func getFileSize(filename string) int64 {
fileStat, err := os.Stat(filename)
if err != nil {
panic(err)
}
fileSize := fileStat.Size()
return fileSize
}
// Load3DToMatrix reads 3D data into matrix
func Load3DToMatrix(path string) (*Matrix, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
fileSize := getFileSize(path)
est := 3 * fileSize / (4*8*3 + 1*2)
lines := make(Data, 0, est)
var x, y, z float64
for {
rowNum, err := fmt.Fscanln(file, &x, &y, &z)
if rowNum == 0 || err != nil {
break
}
lines = append(lines, Vector{x, y, z})
}
return new(Matrix).Init(lines), err
}
// Load2DToMatrix reads 2D data into matrix
func Load2DToMatrix(path string) (*Matrix, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
fileSize := getFileSize(path)
est := 3 * fileSize / (4*8*2 + 1)
lines := make(Data, 0, est)
var x, y float64
for {
rowNum, err := fmt.Fscanln(file, &x, &y)
if rowNum == 0 || err != nil {
break
}
lines = append(lines, Vector{x, y})
}
return new(Matrix).Init(lines), err
}
// WriteMatrixToTxt writes matrix data into file
func WriteMatrixToTxt(path string, t *Matrix) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
_, c := t.Dims()
for i := range t.Data {
for j := range t.Data[i] {
_, err = fmt.Fprintf(file, "%f ", t.Data[i][j])
if j == c-1 {
_, err = fmt.Fprintf(file, "\n")
}
}
}
return err
} | matrix/utils.go | 0.738198 | 0.538801 | utils.go | starcoder |
package dconv
import (
"github.com/osgochina/donkeygo/errors/derror"
"reflect"
)
// Scan automatically calls MapToMap, MapToMaps, Struct or Structs function according to
// the type of parameter `pointer` to implement the converting.
// It calls function MapToMap if `pointer` is type of *map to do the converting.
// It calls function MapToMaps if `pointer` is type of *[]map/*[]*map to do the converting.
// It calls function Struct if `pointer` is type of *struct/**struct to do the converting.
// It calls function Structs if `pointer` is type of *[]struct/*[]*struct to do the converting.
func Scan(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) {
var (
pointerType = reflect.TypeOf(pointer)
pointerKind = pointerType.Kind()
)
if pointerKind != reflect.Ptr {
return derror.Newf("params should be type of pointer, but got: %v", pointerKind)
}
var (
pointerElem = pointerType.Elem()
pointerElemKind = pointerElem.Kind()
)
switch pointerElemKind {
case reflect.Map:
return MapToMap(params, pointer, mapping...)
case reflect.Array, reflect.Slice:
var (
sliceElem = pointerElem.Elem()
sliceElemKind = sliceElem.Kind()
)
for sliceElemKind == reflect.Ptr {
sliceElem = sliceElem.Elem()
sliceElemKind = sliceElem.Kind()
}
if sliceElemKind == reflect.Map {
return MapToMaps(params, pointer, mapping...)
}
return Structs(params, pointer, mapping...)
default:
return Struct(params, pointer, mapping...)
}
}
// ScanDeep automatically calls StructDeep or StructsDeep function according to the type of
// parameter `pointer` to implement the converting..
// It calls function StructDeep if `pointer` is type of *struct/**struct to do the converting.
// It calls function StructsDeep if `pointer` is type of *[]struct/*[]*struct to do the converting.
// Deprecated, use Scan instead.
func ScanDeep(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) {
t := reflect.TypeOf(pointer)
k := t.Kind()
if k != reflect.Ptr {
return derror.Newf("params should be type of pointer, but got: %v", k)
}
switch t.Elem().Kind() {
case reflect.Array, reflect.Slice:
return StructsDeep(params, pointer, mapping...)
default:
return StructDeep(params, pointer, mapping...)
}
} | util/dconv/dconv_scan.go | 0.687315 | 0.4206 | dconv_scan.go | starcoder |
package simpleflow
// BatchSlice takes a slice and breaks it up into sub-slices of `size` length each
func BatchSlice[T any](items []T, size int) [][]T {
batches := make([][]T, 0, (len(items)+size-1)/size)
for size < len(items) {
items, batches = items[size:], append(batches, items[0:size:size])
}
batches = append(batches, items)
return batches
}
// BatchMap takes a map and breaks it up into sub-maps of `size` keys each
func BatchMap[K comparable, V any](items map[K]V, size int) []map[K]V {
batches := make([]map[K]V, 0, (len(items)+size-1)/size)
batch := make(map[K]V, size)
for k, v := range items {
batch[k] = v
if len(batch) == size {
batches = append(batches, batch)
batch = make(map[K]V, size)
}
}
if len(batch) > 0 {
batches = append(batches, batch)
}
return batches
}
// BatchChan reads from a channel and pushes batches of size `size` onto the `to` channel
func BatchChan[T any](items <-chan T, size int, to chan []T) {
batch := make([]T, 0, size)
for v := range items {
batch = append(batch, v)
if len(batch) == size {
to <- batch
batch = make([]T, 0, size)
}
}
if len(batch) > 0 {
to <- batch
}
return
}
// IncrementalBatchSlice incrementally builds slice batches of size `batchSize` by appending to a slice
// If the slice is larger than `batchSize` elements, a single batch is returned. The remaining
// elements of the slice are always returned. Batched items are returned from the head of the slice.
func IncrementalBatchSlice[T any](items []T, batchSize int, v T) (remaining, batch []T) {
items = append(items, v)
if len(items) >= batchSize {
remaining = items[batchSize:]
batch = items[:batchSize]
return remaining, batch
}
return items, nil
}
// IncrementalBatchMap incrementally builds map batches of size `batchSize` by adding elements to a map
// If the map is larger than `batchSize` elements, a single batch is returned along with the remaining
// elements of the map. Batched items are chosen by iterating the (unordered) map and thus you cannot make
// assumptions on which keys will exist in the batch.
func IncrementalBatchMap[K comparable, V any](items map[K]V, batchSize int, k K, v V) (batch map[K]V) {
items[k] = v
if len(items) >= batchSize {
batch = make(map[K]V, batchSize)
var count int
// iterate the map and pop off batchSize keys to return
// Since map order is indeterminate, you cannot know which elements
// will be returned in the batch
for kk, vv := range items {
batch[kk] = vv
delete(items, kk)
count++
if count == batchSize {
break
}
}
return batch
}
return nil
} | batches.go | 0.870941 | 0.625781 | batches.go | starcoder |
package zcl
import (
"errors"
"fmt"
"github.com/shimmeringbee/bytecodec"
"github.com/shimmeringbee/bytecodec/bitbuffer"
"github.com/shimmeringbee/zigbee"
"math"
)
func marshalZCLType(bb *bitbuffer.BitBuffer, ctx bytecodec.Context, dt AttributeDataType, v interface{}) error {
switch dt {
case TypeNull:
return nil
case TypeData8:
return marshalData(bb, v, 1)
case TypeData16:
return marshalData(bb, v, 2)
case TypeData24:
return marshalData(bb, v, 3)
case TypeData32:
return marshalData(bb, v, 4)
case TypeData40:
return marshalData(bb, v, 5)
case TypeData48:
return marshalData(bb, v, 6)
case TypeData56:
return marshalData(bb, v, 7)
case TypeData64:
return marshalData(bb, v, 8)
case TypeBoolean:
return marshalBoolean(bb, v)
case TypeBitmap8:
return marshalUint(bb, v, 8)
case TypeBitmap16:
return marshalUint(bb, v, 16)
case TypeBitmap24:
return marshalUint(bb, v, 24)
case TypeBitmap32:
return marshalUint(bb, v, 32)
case TypeBitmap40:
return marshalUint(bb, v, 40)
case TypeBitmap48:
return marshalUint(bb, v, 48)
case TypeBitmap56:
return marshalUint(bb, v, 56)
case TypeBitmap64:
return marshalUint(bb, v, 64)
case TypeUnsignedInt8:
return marshalUint(bb, v, 8)
case TypeUnsignedInt16:
return marshalUint(bb, v, 16)
case TypeUnsignedInt24:
return marshalUint(bb, v, 24)
case TypeUnsignedInt32:
return marshalUint(bb, v, 32)
case TypeUnsignedInt40:
return marshalUint(bb, v, 40)
case TypeUnsignedInt48:
return marshalUint(bb, v, 48)
case TypeUnsignedInt56:
return marshalUint(bb, v, 56)
case TypeUnsignedInt64:
return marshalUint(bb, v, 64)
case TypeSignedInt8:
return marshalInt(bb, v, 8)
case TypeSignedInt16:
return marshalInt(bb, v, 16)
case TypeSignedInt24:
return marshalInt(bb, v, 24)
case TypeSignedInt32:
return marshalInt(bb, v, 32)
case TypeSignedInt40:
return marshalInt(bb, v, 40)
case TypeSignedInt48:
return marshalInt(bb, v, 48)
case TypeSignedInt56:
return marshalInt(bb, v, 56)
case TypeSignedInt64:
return marshalInt(bb, v, 64)
case TypeEnum8:
return marshalUint(bb, v, 8)
case TypeEnum16:
return marshalUint(bb, v, 16)
case TypeStringOctet8:
return marshalString(bb, v, 8)
case TypeStringOctet16:
return marshalString(bb, v, 16)
case TypeStringCharacter8:
return marshalString(bb, v, 8)
case TypeStringCharacter16:
return marshalString(bb, v, 16)
case TypeTimeOfDay:
return marshalTimeOfDay(bb, v)
case TypeDate:
return marshalDate(bb, v)
case TypeUTCTime:
return marshalUTCTime(bb, v)
case TypeClusterID:
return marshalClusterID(bb, v)
case TypeAttributeID:
return marshalAttributeID(bb, v)
case TypeIEEEAddress:
return marshalIEEEAddress(bb, v)
case TypeSecurityKey128:
return marshalSecurityKey(bb, v)
case TypeBACnetOID:
return marshalBACnetOID(bb, v)
case TypeStructure:
return marshalStructure(bb, ctx, v)
case TypeArray, TypeSet, TypeBag:
return marshalSlice(bb, ctx, v)
case TypeFloatSingle:
return marshalFloatSingle(bb, v)
case TypeFloatDouble:
return marshalFloatDouble(bb, v)
default:
return fmt.Errorf("unsupported ZCL type to marshal: %d", dt)
}
}
func marshalData(bb *bitbuffer.BitBuffer, v interface{}, size int) error {
data, ok := v.([]byte)
if !ok {
return errors.New("could not cast value")
}
if len(data) != size {
return fmt.Errorf("data array provided does not match output size")
}
for i := size - 1; i >= 0; i-- {
if err := bb.WriteByte(data[i]); err != nil {
return err
}
}
return nil
}
func marshalBoolean(bb *bitbuffer.BitBuffer, v interface{}) error {
data, ok := v.(bool)
if !ok {
return errors.New("could not cast value")
}
if data {
return bb.WriteByte(0x01)
} else {
return bb.WriteByte(0x00)
}
}
func marshalUint(bb *bitbuffer.BitBuffer, v interface{}, bitsize int) error {
switch v := v.(type) {
case uint:
return bb.WriteUint(uint64(v), bitbuffer.LittleEndian, bitsize)
case uint8:
return bb.WriteUint(uint64(v), bitbuffer.LittleEndian, bitsize)
case uint16:
return bb.WriteUint(uint64(v), bitbuffer.LittleEndian, bitsize)
case uint32:
return bb.WriteUint(uint64(v), bitbuffer.LittleEndian, bitsize)
case uint64:
return bb.WriteUint(v, bitbuffer.LittleEndian, bitsize)
}
return errors.New("marshalling uint to ZCL type received unsupported value")
}
func marshalInt(bb *bitbuffer.BitBuffer, v interface{}, bitsize int) error {
switch v := v.(type) {
case int:
return bb.WriteInt(int64(v), bitbuffer.LittleEndian, bitsize)
case int8:
return bb.WriteInt(int64(v), bitbuffer.LittleEndian, bitsize)
case int16:
return bb.WriteInt(int64(v), bitbuffer.LittleEndian, bitsize)
case int32:
return bb.WriteInt(int64(v), bitbuffer.LittleEndian, bitsize)
case int64:
return bb.WriteInt(v, bitbuffer.LittleEndian, bitsize)
}
return errors.New("marshalling int to ZCL type received unsupported value")
}
func marshalString(bb *bitbuffer.BitBuffer, v interface{}, bitsize int) error {
data, ok := v.(string)
if !ok {
return errors.New("could not cast value")
}
return bb.WriteStringLengthPrefixed(data, bitbuffer.LittleEndian, bitsize)
}
func marshalStringRune(bb *bitbuffer.BitBuffer, v interface{}, bitsize int) error {
data, ok := v.(string)
if !ok {
return errors.New("could not cast value")
}
if err := bb.WriteUint(uint64(len([]rune(data))), bitbuffer.LittleEndian, bitsize); err != nil {
return err
}
for i := 0; i < len(data); i++ {
if err := bb.WriteByte(data[i]); err != nil {
return err
}
}
return nil
}
func marshalTimeOfDay(bb *bitbuffer.BitBuffer, v interface{}) error {
tod, ok := v.(TimeOfDay)
if !ok {
return errors.New("could not cast value")
}
return bytecodec.MarshalToBitBuffer(bb, &tod)
}
func marshalDate(bb *bitbuffer.BitBuffer, v interface{}) error {
date, ok := v.(Date)
if !ok {
return errors.New("could not cast value")
}
return bytecodec.MarshalToBitBuffer(bb, &date)
}
func marshalUTCTime(bb *bitbuffer.BitBuffer, v interface{}) error {
utcTime, ok := v.(UTCTime)
if !ok {
return errors.New("could not cast value")
}
return bb.WriteUint(uint64(utcTime), bitbuffer.LittleEndian, 32)
}
func marshalClusterID(bb *bitbuffer.BitBuffer, v interface{}) error {
clusterID, ok := v.(zigbee.ClusterID)
if !ok {
return errors.New("could not cast value")
}
return bb.WriteUint(uint64(clusterID), bitbuffer.LittleEndian, 16)
}
func marshalAttributeID(bb *bitbuffer.BitBuffer, v interface{}) error {
attributeID, ok := v.(AttributeID)
if !ok {
return errors.New("could not cast value")
}
return bb.WriteUint(uint64(attributeID), bitbuffer.LittleEndian, 16)
}
func marshalIEEEAddress(bb *bitbuffer.BitBuffer, v interface{}) error {
ieeeAddress, ok := v.(zigbee.IEEEAddress)
if !ok {
return errors.New("could not cast value")
}
return bb.WriteUint(uint64(ieeeAddress), bitbuffer.LittleEndian, 64)
}
func marshalSecurityKey(bb *bitbuffer.BitBuffer, v interface{}) error {
networkKey, ok := v.(zigbee.NetworkKey)
if !ok {
return errors.New("could not cast value")
}
return bytecodec.MarshalToBitBuffer(bb, &networkKey)
}
func marshalBACnetOID(bb *bitbuffer.BitBuffer, v interface{}) error {
oid, ok := v.(BACnetOID)
if !ok {
return errors.New("could not cast value")
}
return bb.WriteUint(uint64(oid), bitbuffer.LittleEndian, 32)
}
func marshalStructure(bb *bitbuffer.BitBuffer, ctx bytecodec.Context, v interface{}) error {
values, ok := v.([]AttributeDataTypeValue)
if !ok {
return errors.New("could not cast value")
}
if err := bb.WriteUint(uint64(len(values)), bitbuffer.LittleEndian, 16); err != nil {
return err
}
for _, val := range values {
if err := val.Marshal(bb, ctx); err != nil {
return err
}
}
return nil
}
func marshalSlice(bb *bitbuffer.BitBuffer, ctx bytecodec.Context, v interface{}) error {
slice, ok := v.(AttributeSlice)
if !ok {
return errors.New("could not cast value")
}
if err := bb.WriteByte(byte(slice.DataType)); err != nil {
return err
}
if err := bb.WriteUint(uint64(len(slice.Values)), bitbuffer.LittleEndian, 16); err != nil {
return err
}
for i := 0; i < len(slice.Values); i++ {
if err := marshalZCLType(bb, ctx, slice.DataType, slice.Values[i]); err != nil {
return err
}
}
return nil
}
func marshalFloatSingle(bb *bitbuffer.BitBuffer, v interface{}) error {
value, ok := v.(float32)
if !ok {
return errors.New("could not cast value")
}
bits := math.Float32bits(value)
return bb.WriteUint(uint64(bits), bitbuffer.LittleEndian, 32)
}
func marshalFloatDouble(bb *bitbuffer.BitBuffer, v interface{}) error {
value, ok := v.(float64)
if !ok {
return errors.New("could not cast value")
}
bits := math.Float64bits(value)
return bb.WriteUint(bits, bitbuffer.LittleEndian, 64)
} | zcl_types_marshal.go | 0.522933 | 0.483283 | zcl_types_marshal.go | starcoder |
package iavl
import (
"bytes"
"fmt"
"strings"
"github.com/pkg/errors"
)
// pathWithLeaf is a path to a leaf node and the leaf node itself.
type pathWithLeaf struct {
Path PathToLeaf `json:"path"`
Leaf proofLeafNode `json:"leaf"`
}
func (pwl pathWithLeaf) String() string {
return pwl.StringIndented("")
}
func (pwl pathWithLeaf) StringIndented(indent string) string {
return fmt.Sprintf(`pathWithLeaf{
%s Path: %v
%s Leaf: %v
%s}`,
indent, pwl.Path.stringIndented(indent+" "),
indent, pwl.Leaf.stringIndented(indent+" "),
indent)
}
// `verify` checks that the leaf node's hash + the inner nodes merkle-izes to
// the given root. If it returns an error, it means the leafHash or the
// PathToLeaf is incorrect.
func (pwl pathWithLeaf) verify(root []byte) error {
leafHash := pwl.Leaf.Hash()
return pwl.Path.verify(leafHash, root)
}
// `computeRootHash` computes the root hash with leaf node.
// Does not verify the root hash.
func (pwl pathWithLeaf) computeRootHash() []byte {
leafHash := pwl.Leaf.Hash()
return pwl.Path.computeRootHash(leafHash)
}
//----------------------------------------
// PathToLeaf represents an inner path to a leaf node.
// Note that the nodes are ordered such that the last one is closest
// to the root of the tree.
type PathToLeaf []proofInnerNode
func (pl PathToLeaf) String() string {
return pl.stringIndented("")
}
func (pl PathToLeaf) stringIndented(indent string) string {
if len(pl) == 0 {
return "empty-PathToLeaf"
}
strs := make([]string, len(pl))
for i, pin := range pl {
if i == 20 {
strs[i] = fmt.Sprintf("... (%v total)", len(pl))
break
}
strs[i] = fmt.Sprintf("%v:%v", i, pin.stringIndented(indent+" "))
}
return fmt.Sprintf(`PathToLeaf{
%s %v
%s}`,
indent, strings.Join(strs, "\n"+indent+" "),
indent)
}
// `verify` checks that the leaf node's hash + the inner nodes merkle-izes to
// the given root. If it returns an error, it means the leafHash or the
// PathToLeaf is incorrect.
func (pl PathToLeaf) verify(leafHash []byte, root []byte) error {
hash := leafHash
for i := len(pl) - 1; i >= 0; i-- {
pin := pl[i]
hash = pin.Hash(hash)
}
if !bytes.Equal(root, hash) {
return errors.Wrap(ErrInvalidProof, "")
}
return nil
}
// `computeRootHash` computes the root hash assuming some leaf hash.
// Does not verify the root hash.
func (pl PathToLeaf) computeRootHash(leafHash []byte) []byte {
hash := leafHash
for i := len(pl) - 1; i >= 0; i-- {
pin := pl[i]
hash = pin.Hash(hash)
}
return hash
}
func (pl PathToLeaf) isLeftmost() bool {
for _, node := range pl {
if len(node.Left) > 0 {
return false
}
}
return true
}
func (pl PathToLeaf) isRightmost() bool {
for _, node := range pl {
if len(node.Right) > 0 {
return false
}
}
return true
}
func (pl PathToLeaf) isEmpty() bool {
return pl == nil || len(pl) == 0
}
func (pl PathToLeaf) dropRoot() PathToLeaf {
if pl.isEmpty() {
return pl
}
return PathToLeaf(pl[:len(pl)-1])
}
func (pl PathToLeaf) hasCommonRoot(pl2 PathToLeaf) bool {
if pl.isEmpty() || pl2.isEmpty() {
return false
}
leftEnd := pl[len(pl)-1]
rightEnd := pl2[len(pl2)-1]
return bytes.Equal(leftEnd.Left, rightEnd.Left) &&
bytes.Equal(leftEnd.Right, rightEnd.Right)
}
func (pl PathToLeaf) isLeftAdjacentTo(pl2 PathToLeaf) bool {
for pl.hasCommonRoot(pl2) {
pl, pl2 = pl.dropRoot(), pl2.dropRoot()
}
pl, pl2 = pl.dropRoot(), pl2.dropRoot()
return pl.isRightmost() && pl2.isLeftmost()
}
// returns -1 if invalid.
func (pl PathToLeaf) Index() (idx int64) {
for i, node := range pl {
if node.Left == nil {
continue
} else if node.Right == nil {
if i < len(pl)-1 {
idx += node.Size - pl[i+1].Size
} else {
idx += node.Size - 1
}
} else {
return -1
}
}
return idx
} | vendor/github.com/tendermint/iavl/proof_path.go | 0.615897 | 0.456652 | proof_path.go | starcoder |
package rgb
import (
"image/color"
"math"
"math/rand"
"github.com/rrothenb/pbr/pkg/geom"
)
const sRGB = 1.8
// Energy stores RGB light energy as a 3D Vector.
type Energy geom.Vec
var White = Energy{1, 1, 1}
var Black = Energy{0, 0, 0}
// Merged merges energy b into energy a with a given signal strength.
func (a Energy) Merged(b Energy, signal Energy) Energy {
return Energy{a.X + b.X*signal.X, a.Y + b.Y*signal.Y, a.Z + b.Z*signal.Z}
}
func (a Energy) Compressed(n float64) (b Energy, scale float64) {
max := math.Max(a.X, math.Max(a.Y, a.Z))
scale = max / n
return a.Scaled(n / max), scale
}
func (a Energy) ToRGBA() color.RGBA {
return color.RGBA{
R: rgba(a.X, sRGB),
G: rgba(a.Y, sRGB),
B: rgba(a.Z, sRGB),
A: 255,
}
}
func rgba(c, g float64) uint8 {
mapped := gamma(c, g)
return uint8(math.Min(255, math.Max(0, mapped)))
}
func gamma(c, g float64) float64 {
return math.Pow(c/255, (1/g)) * 255
}
// Scaled returns energy a scaled by n.
func (a Energy) Scaled(n float64) Energy {
return Energy{a.X * n, a.Y * n, a.Z * n}
}
func (a Energy) Zero() bool {
return a.X == 0 && a.Y == 0 && a.Z == 0
}
func (a Energy) Plus(b Energy) Energy {
return Energy{a.X + b.X, a.Y + b.Y, a.Z + b.Z}
}
func (a Energy) Minus(b Energy) Energy {
return Energy{a.X - b.X, a.Y - b.Y, a.Z - b.Z}
}
func (a Energy) Size() float64 {
return math.Sqrt(a.X*a.X + a.Y*a.Y + a.Z*a.Z)
}
func (a Energy) Limit(n float64) Energy {
return Energy{math.Min(a.X, n), math.Min(a.Y, n), math.Min(a.Z, n)}
}
// RandomGain randomly amplifies or destroys a signal.
// Strong signals are less likely to be destroyed and gain less amplification.
// Weak signals are more likely to be destroyed but gain more amplification.
// This creates greater overall system throughput (higher energy per signal, fewer signals).
func (a Energy) RandomGain(rnd *rand.Rand) Energy {
greatest := geom.Vec(a).Greatest()
if rnd.Float64() > greatest {
return Black
}
return a.Scaled(1 / greatest)
}
// Times returns energy a multiplied by energy b.
func (a Energy) Times(b Energy) Energy {
return Energy{a.X * b.X, a.Y * b.Y, a.Z * b.Z}
}
// Diff returns the difference in two Energies
func (a Energy) Variance(b Energy) float64 {
d := geom.Vec(a).Minus(geom.Vec(b))
return d.X*d.X + d.Y*d.Y + d.Z*d.Z
}
func (a Energy) Mean() float64 {
return (a.X + a.Y + a.Z) / 3
}
func (a Energy) Max() float64 {
return math.Max(a.X, math.Max(a.Y, a.Z))
}
func (a Energy) Lerp(b Energy, n float64) Energy {
return Energy(geom.Vec(a).Lerp(geom.Vec(b), n))
}
func (a *Energy) Set(b Energy) {
a.X = b.X
a.Y = b.Y
a.Z = b.Z
}
func (a *Energy) UnmarshalText(b []byte) error {
v, err := geom.ParseVec(string(b))
if err != nil {
return err
}
a.Set(Energy(v))
return nil
}
func ParseEnergy(s string) (e Energy, err error) {
v, err := geom.ParseVec(s)
return Energy(v), err
} | pkg/rgb/energy.go | 0.88856 | 0.60644 | energy.go | starcoder |
*/
//-----------------------------------------------------------------------------
package sdf
import (
"errors"
"math"
)
//-----------------------------------------------------------------------------
// Box3 is a 3d bounding box.
type Box3 struct {
Min, Max V3
}
// Box2 is a 2d bounding box.
type Box2 struct {
Min, Max V2
}
//-----------------------------------------------------------------------------
// NewBox3 creates a 3d box with a given center and size.
func NewBox3(center, size V3) Box3 {
half := size.MulScalar(0.5)
return Box3{center.Sub(half), center.Add(half)}
}
// NewBox2 creates a 2d box with a given center and size.
func NewBox2(center, size V2) Box2 {
half := size.MulScalar(0.5)
return Box2{center.Sub(half), center.Add(half)}
}
//-----------------------------------------------------------------------------
// Equals test the equality of 3d boxes.
func (a Box3) Equals(b Box3, tolerance float64) bool {
return (a.Min.Equals(b.Min, tolerance) && a.Max.Equals(b.Max, tolerance))
}
// Equals test the equality of 2d boxes.
func (a Box2) Equals(b Box2, tolerance float64) bool {
return (a.Min.Equals(b.Min, tolerance) && a.Max.Equals(b.Max, tolerance))
}
//-----------------------------------------------------------------------------
// Extend returns a box that encloses two 3d boxes.
func (a Box3) Extend(b Box3) Box3 {
return Box3{a.Min.Min(b.Min), a.Max.Max(b.Max)}
}
// Extend returns a box that encloses two 2d boxes.
func (a Box2) Extend(b Box2) Box2 {
return Box2{a.Min.Min(b.Min), a.Max.Max(b.Max)}
}
//-----------------------------------------------------------------------------
// Translate translates a 3d box.
func (a Box3) Translate(v V3) Box3 {
return Box3{a.Min.Add(v), a.Max.Add(v)}
}
// Translate translates a 2d box.
func (a Box2) Translate(v V2) Box2 {
return Box2{a.Min.Add(v), a.Max.Add(v)}
}
//-----------------------------------------------------------------------------
// Size returns the size of a 3d box.
func (a Box3) Size() V3 {
return a.Max.Sub(a.Min)
}
// Size returns the size of a 2d box.
func (a Box2) Size() V2 {
return a.Max.Sub(a.Min)
}
// Center returns the center of a 3d box.
func (a Box3) Center() V3 {
return a.Min.Add(a.Size().MulScalar(0.5))
}
// Center returns the center of a 2d box.
func (a Box2) Center() V2 {
return a.Min.Add(a.Size().MulScalar(0.5))
}
//-----------------------------------------------------------------------------
// ScaleAboutCenter returns a new 2d box scaled about the center of a box.
func (a Box2) ScaleAboutCenter(k float64) Box2 {
return NewBox2(a.Center(), a.Size().MulScalar(k))
}
// ScaleAboutCenter returns a new 3d box scaled about the center of a box.
func (a Box3) ScaleAboutCenter(k float64) Box3 {
return NewBox3(a.Center(), a.Size().MulScalar(k))
}
//-----------------------------------------------------------------------------
// Enlarge returns a new 3d box enlarged by a size vector.
func (a Box3) Enlarge(v V3) Box3 {
v = v.MulScalar(0.5)
return Box3{a.Min.Sub(v), a.Max.Add(v)}
}
// Enlarge returns a new 2d box enlarged by a size vector.
func (a Box2) Enlarge(v V2) Box2 {
v = v.MulScalar(0.5)
return Box2{a.Min.Sub(v), a.Max.Add(v)}
}
//-----------------------------------------------------------------------------
// Vertices returns a slice of 2d box corner vertices.
func (a Box2) Vertices() V2Set {
v := make([]V2, 4)
v[0] = a.Min // bl
v[1] = V2{a.Max.X, a.Min.Y} // br
v[2] = V2{a.Min.X, a.Max.Y} // tl
v[3] = a.Max // tr
return v
}
// Vertices returns a slice of 3d box corner vertices.
func (a Box3) Vertices() V3Set {
v := make([]V3, 8)
v[0] = a.Min
v[1] = V3{a.Min.X, a.Min.Y, a.Max.Z}
v[2] = V3{a.Min.X, a.Max.Y, a.Min.Z}
v[3] = V3{a.Min.X, a.Max.Y, a.Max.Z}
v[4] = V3{a.Max.X, a.Min.Y, a.Min.Z}
v[5] = V3{a.Max.X, a.Min.Y, a.Max.Z}
v[6] = V3{a.Max.X, a.Max.Y, a.Min.Z}
v[7] = a.Max
return v
}
// BottomLeft returns the bottom left corner of a 2d bounding box.
func (a Box2) BottomLeft() V2 {
return a.Min
}
// TopLeft returns the top left corner of a 2d bounding box.
func (a Box2) TopLeft() V2 {
return V2{a.Min.X, a.Max.Y}
}
//-----------------------------------------------------------------------------
// Map2 maps a 2d region to integer grid coordinates.
type Map2 struct {
bb Box2 // bounding box
grid V2i // integral dimension
delta V2
flipy bool // flip the y-axis
}
// NewMap2 returns a 2d region to grid coordinates map.
func NewMap2(bb Box2, grid V2i, flipy bool) (*Map2, error) {
// sanity check the bounding box
bbSize := bb.Size()
if bbSize.X <= 0 || bbSize.Y <= 0 {
return nil, errors.New("bad bounding box")
}
// sanity check the integer dimensions
if grid[0] <= 0 || grid[1] <= 0 {
return nil, errors.New("bad grid dimensions")
}
m := Map2{}
m.bb = bb
m.grid = grid
m.flipy = flipy
m.delta = bbSize.Div(grid.ToV2())
return &m, nil
}
// ToV2 converts grid integer coordinates to 2d region float coordinates.
func (m *Map2) ToV2(p V2i) V2 {
ofs := p.ToV2().AddScalar(0.5).Mul(m.delta)
var origin V2
if m.flipy {
origin = m.bb.TopLeft()
ofs.Y = -ofs.Y
} else {
origin = m.bb.BottomLeft()
}
return origin.Add(ofs)
}
// ToV2i converts 2d region float coordinates to grid integer coordinates.
func (m *Map2) ToV2i(p V2) V2i {
var v V2
if m.flipy {
v = p.Sub(m.bb.TopLeft())
v.Y = -v.Y
} else {
v = p.Sub(m.bb.BottomLeft())
}
return v.Div(m.delta).ToV2i()
}
//-----------------------------------------------------------------------------
// Minimum/Maximum distances from a point to a box
// MinMaxDist2 returns the minimum and maximum dist * dist from a point to a box.
// Points within the box have minimum distance = 0.
func (a Box2) MinMaxDist2(p V2) V2 {
maxDist2 := 0.0
minDist2 := 0.0
// translate the box so p is at the origin
a = a.Translate(p.Neg())
// consider the vertices
vs := a.Vertices()
for i := range vs {
d2 := vs[i].Length2()
if i == 0 {
minDist2 = d2
} else {
minDist2 = math.Min(minDist2, d2)
}
maxDist2 = math.Max(maxDist2, d2)
}
// consider the sides (for the minimum)
withinX := a.Min.X < 0 && a.Max.X > 0
withinY := a.Min.Y < 0 && a.Max.Y > 0
if withinX && withinY {
minDist2 = 0
} else {
if withinX {
d := math.Min(math.Abs(a.Max.Y), math.Abs(a.Min.Y))
minDist2 = math.Min(minDist2, d*d)
}
if withinY {
d := math.Min(math.Abs(a.Max.X), math.Abs(a.Min.X))
minDist2 = math.Min(minDist2, d*d)
}
}
return V2{minDist2, maxDist2}
}
// MinMaxDist2 returns the minimum and maximum dist * dist from a point to a box.
// Points within the box have minimum distance = 0.
func (a Box3) MinMaxDist2(p V3) V2 {
maxDist2 := 0.0
minDist2 := 0.0
// translate the box so p is at the origin
a = a.Translate(p.Neg())
// consider the vertices
vs := a.Vertices()
for i := range vs {
d2 := vs[i].Length2()
if i == 0 {
minDist2 = d2
} else {
minDist2 = math.Min(minDist2, d2)
}
maxDist2 = math.Max(maxDist2, d2)
}
// consider the faces (for the minimum)
withinX := a.Min.X < 0 && a.Max.X > 0
withinY := a.Min.Y < 0 && a.Max.Y > 0
withinZ := a.Min.Z < 0 && a.Max.Z > 0
if withinX && withinY && withinZ {
minDist2 = 0
} else {
if withinX && withinY {
d := math.Min(math.Abs(a.Max.Z), math.Abs(a.Min.Z))
minDist2 = math.Min(minDist2, d*d)
}
if withinX && withinZ {
d := math.Min(math.Abs(a.Max.Y), math.Abs(a.Min.Y))
minDist2 = math.Min(minDist2, d*d)
}
if withinY && withinZ {
d := math.Min(math.Abs(a.Max.X), math.Abs(a.Min.X))
minDist2 = math.Min(minDist2, d*d)
}
}
return V2{minDist2, maxDist2}
}
//----------------------------------------------------------------------------- | sdf/box.go | 0.915949 | 0.637242 | box.go | starcoder |
package main
/*
Given a 2D matrix of zeros and ones that represent an image,
replace all ones that are not connected to a one that's in a
border of the image with zero.
1 represents black.
0 represents white.
Example:
Input:
[
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 1, 0],
[1, 1, 0, 0, 1, 0],
[1, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1],
]
Output:
[
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1],
]
*/
/*
Solution:
For each value that's in the border of the matrix,
if the value is a 1, mark it as value that should not be replaced with 0,
repeat the process for each neighbor of the current value.
After all ones that are connected to a one in a border are marked,
go through each element in the matrix and replace it with 0 if it is not marked.
*/
func rows(graph [][]int) int {
return len(graph)
}
func columns(graph [][]int) int {
return len(graph[0])
}
func isOutOfBounds(graph [][]int, rowIndex, columnIndex int) bool {
rows := rows(graph)
columns := columns(graph)
return rowIndex < 0 || rowIndex > rows-1 || columnIndex < 0 || columnIndex > columns-1
}
func dfs(graph [][]int, rowIndex int, columnIndex int, visited map[pair]bool, connectedToBorder map[pair]bool) {
if isOutOfBounds(graph, rowIndex, columnIndex) {
return
}
value := graph[rowIndex][columnIndex]
position := newPair(rowIndex, columnIndex)
if visited[position] || value == 0 {
return
}
// the first call to dfs() is always from a 1 that's in a border,
// so any time we find a 1, we know it is connected to another 1 that's
// connected to a 1 that's in a border.
visited[position] = true
connectedToBorder[position] = true
dfs(graph, rowIndex-1, columnIndex, visited, connectedToBorder)
dfs(graph, rowIndex+1, columnIndex, visited, connectedToBorder)
dfs(graph, rowIndex, columnIndex-1, visited, connectedToBorder)
dfs(graph, rowIndex, columnIndex+1, visited, connectedToBorder)
}
type pair struct {
first int
second int
}
func newPair(first, second int) pair {
return pair{
first: first,
second: second,
}
}
// time O(n * m + n * m)
// space O(n * m)
func RemoveIslands(graph [][]int) {
rows := rows(graph)
columns := columns(graph)
visited := make(map[pair]bool)
connectedToBorder := make(map[pair]bool)
// go through first and last rows and search for neighboring ones whenever a one is found
// [
// [1, 0, 0, 0, 0, 0], <-- visiting columns in this row
// [0, 1, 0, 1, 1, 1],
// [0, 0, 1, 0, 1, 0],
// [1, 1, 0, 0, 1, 0],
// [1, 0, 1, 1, 0, 0],
// [1, 0, 0, 0, 0, 1], <-- and this row
// ]
for columnIndex := 0; columnIndex < columns; columnIndex++ {
if graph[0][columnIndex] == 1 {
dfs(graph, 0, columnIndex, visited, connectedToBorder)
}
if graph[rows-1][columnIndex] == 1 {
dfs(graph, rows-1, columnIndex, visited, connectedToBorder)
}
}
// go through first and last columns and search for neighboring ones whenever a one is found
// [
// visiting rows in this column
// | and this column
// | |
// [1, 0, 0, 0, 0, 0],
// [0, 1, 0, 1, 1, 1],
// [0, 0, 1, 0, 1, 0],
// [1, 1, 0, 0, 1, 0],
// [1, 0, 1, 1, 0, 0],
// [1, 0, 0, 0, 0, 1],
// ]
for rowIndex := 0; rowIndex < rows; rowIndex++ {
if graph[rowIndex][0] == 1 {
dfs(graph, rowIndex, 0, visited, connectedToBorder)
}
if graph[rowIndex][columns-1] == 1 {
dfs(graph, rowIndex, columns-1, visited, connectedToBorder)
}
}
for rowIndex, row := range graph {
for columnIndex := range row {
if connectedToBorder[newPair(rowIndex, columnIndex)] {
continue
}
graph[rowIndex][columnIndex] = 0
}
}
}
func main() {
} | golang/algorithms/others/remove_islands/main.go | 0.647464 | 0.73874 | main.go | starcoder |
package resolver
import (
"context"
models "go.edusense.io/storage/models"
)
// PersonInferenceResolver resolves query-agnostic PersonInference model.
type PersonInferenceResolver struct {
Inference models.PersonInference
}
// Posture extracts collection of posture inferences from given PersonInference
// resolver.
func (vi *PersonInferenceResolver) Posture(ctx context.Context) (*PostureResolver, error) {
return &PostureResolver{Posture: vi.Inference.Posture}, nil
}
// Face extacts collection of face inferences from given PersonInference
// resolver.
func (vi *PersonInferenceResolver) Face(ctx context.Context) (*FaceResolver, error) {
return &FaceResolver{Face: vi.Inference.Face}, nil
}
// Head extracts collection of head inferences from given PersonInference
// resolver.
func (vi *PersonInferenceResolver) Head(ctx context.Context) (*HeadResolver, error) {
return &HeadResolver{Head: vi.Inference.Head}, nil
}
// TrackingID extracts tracking id field from given PersonInference
// resolver.
func (vi *PersonInferenceResolver) TrackingID(ctx context.Context) (*int32, error) {
id := int32(vi.Inference.TrackingID)
if vi.Inference.TrackingID <= 0 {
return nil, nil
}
return &id, nil
}
// PostureResolver resolves query-agnostic Posture model.
type PostureResolver struct {
Posture models.Posture
}
// ArmPose extracts arm pose (a.k.a. upper body pose) field from
// given Posture resolver.
func (p *PostureResolver) ArmPose(ctx context.Context) (*string, error) {
if p.Posture.ArmPose == "" {
return nil, nil
}
return &p.Posture.ArmPose, nil
}
// SitStand extracts sit vs. stand field from given Posture resolver.
func (p *PostureResolver) SitStand(ctx context.Context) (*string, error) {
if p.Posture.SitStand == "" {
return nil, nil
}
return &p.Posture.SitStand, nil
}
// CentroidDelta extracts centroid delta field from given Posture resolver.
func (p *PostureResolver) CentroidDelta(ctx context.Context) (*[]int32, error) {
if len(p.Posture.CentroidDelta) == 0 {
return nil, nil
}
vector := make([]int32, len(p.Posture.CentroidDelta))
for i, k := range p.Posture.CentroidDelta {
vector[i] = int32(k)
}
return &vector, nil
}
// ArmDelta extracts arm keypoint delta field from given Posture resolver.
func (p *PostureResolver) ArmDelta(ctx context.Context) (*[][]int32, error) {
if len(p.Posture.ArmDelta) == 0 {
return nil, nil
}
vector := make([][]int32, len(p.Posture.ArmDelta))
for i, v := range p.Posture.ArmDelta {
newPoint := make([]int32, len(v))
for j, k := range v {
newPoint[j] = int32(k)
}
vector[i] = newPoint
}
return &vector, nil
}
// FaceResolver resolves query-agnostic Face model.
type FaceResolver struct {
Face models.Face
}
// BoundingBox extracts bounding box of face from given Face resolver.
func (f *FaceResolver) BoundingBox(ctx context.Context) (*[][]int32, error) {
if len(f.Face.BoundingBox) == 0 {
return nil, nil
}
vector := make([][]int32, len(f.Face.BoundingBox))
for i, p := range f.Face.BoundingBox {
newPoint := make([]int32, len(p))
for j, k := range p {
newPoint[j] = int32(k)
}
vector[i] = newPoint
}
return &vector, nil
}
// MouthOpen extracts mouth open/close inference from given Face resolver.
func (f *FaceResolver) MouthOpen(ctx context.Context) (*string, error) {
if f.Face.MouthOpen == "" {
return nil, nil
}
return &f.Face.MouthOpen, nil
}
// MouthSmile extracts smile/no-smile inference from given Face resolver.
func (f *FaceResolver) MouthSmile(ctx context.Context) (*string, error) {
if f.Face.MouthSmile == "" {
return nil, nil
}
return &f.Face.MouthSmile, nil
}
// Orientation extracts face orientation front/back inference from given Face
// resolver.
func (f *FaceResolver) Orientation(Ctx context.Context) (*string, error) {
if f.Face.Orientation == "" {
return nil, nil
}
return &f.Face.Orientation, nil
}
// HeadResolver resolves query-agnostic Head model.
type HeadResolver struct {
Head models.Head
}
// Roll extracts roll of head from given Head resolver.
func (h *HeadResolver) Roll(ctx context.Context) (float64, error) {
return float64(h.Head.Roll), nil
}
// Pitch extracts pitch of head from given Head resolver.
func (h *HeadResolver) Pitch(ctx context.Context) (float64, error) {
return float64(h.Head.Pitch), nil
}
// Yaw extracts yaw of head from given Head resolver.
func (h *HeadResolver) Yaw(ctx context.Context) (float64, error) {
return float64(h.Head.Yaw), nil
}
// TranslationVector extracts translation vector from given Head resolver.
func (h *HeadResolver) TranslationVector(ctx context.Context) (*[]float64, error) {
if len(h.Head.TranslationVector) == 0 {
return nil, nil
}
vector := make([]float64, len(h.Head.TranslationVector))
for i, k := range h.Head.TranslationVector {
vector[i] = float64(k)
}
return &vector, nil
}
// GazeVector extracts gaze vector from given Head resolver.
func (h *HeadResolver) GazeVector(ctx context.Context) (*[][]int32, error) {
if len(h.Head.GazeVector) == 0 {
return nil, nil
}
vector := make([][]int32, len(h.Head.GazeVector))
for i, p := range h.Head.GazeVector {
newPoint := make([]int32, len(p))
for j, k := range p {
newPoint[j] = int32(k)
}
vector[i] = newPoint
}
return &vector, nil
} | storage/query/resolver/videoinference.go | 0.802439 | 0.434881 | videoinference.go | starcoder |
package iso20022
// Execution of the subscription part, in a switch between investment funds or investment fund classes.
type SwitchSubscriptionLegExecution2 struct {
// Unique technical identifier for an instance of a leg within a switch.
LegIdentification *Max35Text `xml:"LegId,omitempty"`
// Unique identifier for an instance of a leg execution within a switch confirmation.
LegExecutionIdentification *Max35Text `xml:"LegExctnId,omitempty"`
// Security that is a sub-set of an investment fund, and is governed by the same investment fund policy, eg, dividend option or valuation currency.
FinancialInstrumentDetails *FinancialInstrument6 `xml:"FinInstrmDtls"`
// Number of investment fund units subscribed.
UnitsNumber *FinancialInstrumentQuantity1 `xml:"UnitsNb"`
// Net amount of money invested in a specific financial instrument by an investor, expressed in the currency requested by the investor.
NetAmount *ActiveCurrencyAndAmount `xml:"NetAmt"`
// Gross amount of money invested in a specific financial instrument by an investor, expressed in the currency requested by the investor.
GrossAmount *ActiveCurrencyAndAmount `xml:"GrssAmt,omitempty"`
// Date and time at which a price is applied, according to the terms stated in the prospectus.
TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"`
// Price at which the order was executed.
PriceDetails *UnitPrice5 `xml:"PricDtls"`
// Indicates whether the dividend is included, ie, cum-dividend, in the executed price. When the dividend is not included, the price will be ex-dividend.
CumDividendIndicator *YesNoIndicator `xml:"CumDvddInd"`
// Part of the price deemed as accrued income or profit rather than capital. The interim profit amount is used for tax purposes.
InterimProfitAmount *ProfitAndLoss1Choice `xml:"IntrmPrftAmt,omitempty"`
// Dividend option chosen by the account owner based on the options offered in the prospectus.
IncomePreference *IncomePreference1Code `xml:"IncmPref,omitempty"`
// Currency requested for settlement of cash proceeds.
RequestedSettlementCurrency *CurrencyCode `xml:"ReqdSttlmCcy,omitempty"`
// Currency to be used for pricing the fund. This currency must be among the set of currencies in which the price may be expressed, as stated in the prospectus.
RequestedNAVCurrency *CurrencyCode `xml:"ReqdNAVCcy,omitempty"`
// Charge for the execution of an order.
ChargeGeneralDetails *TotalCharges2 `xml:"ChrgGnlDtls,omitempty"`
// Commission for the execution of an investment fund order.
CommissionGeneralDetails *TotalCommissions2 `xml:"ComssnGnlDtls,omitempty"`
// Tax applicable to execution of an investment fund order.
TaxGeneralDetails *TotalTaxes2 `xml:"TaxGnlDtls,omitempty"`
// Parameters used to execute the settlement of an investment fund order.
SettlementAndCustodyDetails *FundSettlementParameters3 `xml:"SttlmAndCtdyDtls,omitempty"`
// Indicates whether the financial instrument is to be physically delivered.
PhysicalDeliveryIndicator *YesNoIndicator `xml:"PhysDlvryInd"`
// Information related to physical delivery of the securities.
PhysicalDeliveryDetails *DeliveryParameters3 `xml:"PhysDlvryDtls,omitempty"`
}
func (s *SwitchSubscriptionLegExecution2) SetLegIdentification(value string) {
s.LegIdentification = (*Max35Text)(&value)
}
func (s *SwitchSubscriptionLegExecution2) SetLegExecutionIdentification(value string) {
s.LegExecutionIdentification = (*Max35Text)(&value)
}
func (s *SwitchSubscriptionLegExecution2) AddFinancialInstrumentDetails() *FinancialInstrument6 {
s.FinancialInstrumentDetails = new(FinancialInstrument6)
return s.FinancialInstrumentDetails
}
func (s *SwitchSubscriptionLegExecution2) AddUnitsNumber() *FinancialInstrumentQuantity1 {
s.UnitsNumber = new(FinancialInstrumentQuantity1)
return s.UnitsNumber
}
func (s *SwitchSubscriptionLegExecution2) SetNetAmount(value, currency string) {
s.NetAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (s *SwitchSubscriptionLegExecution2) SetGrossAmount(value, currency string) {
s.GrossAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (s *SwitchSubscriptionLegExecution2) AddTradeDateTime() *DateAndDateTimeChoice {
s.TradeDateTime = new(DateAndDateTimeChoice)
return s.TradeDateTime
}
func (s *SwitchSubscriptionLegExecution2) AddPriceDetails() *UnitPrice5 {
s.PriceDetails = new(UnitPrice5)
return s.PriceDetails
}
func (s *SwitchSubscriptionLegExecution2) SetCumDividendIndicator(value string) {
s.CumDividendIndicator = (*YesNoIndicator)(&value)
}
func (s *SwitchSubscriptionLegExecution2) AddInterimProfitAmount() *ProfitAndLoss1Choice {
s.InterimProfitAmount = new(ProfitAndLoss1Choice)
return s.InterimProfitAmount
}
func (s *SwitchSubscriptionLegExecution2) SetIncomePreference(value string) {
s.IncomePreference = (*IncomePreference1Code)(&value)
}
func (s *SwitchSubscriptionLegExecution2) SetRequestedSettlementCurrency(value string) {
s.RequestedSettlementCurrency = (*CurrencyCode)(&value)
}
func (s *SwitchSubscriptionLegExecution2) SetRequestedNAVCurrency(value string) {
s.RequestedNAVCurrency = (*CurrencyCode)(&value)
}
func (s *SwitchSubscriptionLegExecution2) AddChargeGeneralDetails() *TotalCharges2 {
s.ChargeGeneralDetails = new(TotalCharges2)
return s.ChargeGeneralDetails
}
func (s *SwitchSubscriptionLegExecution2) AddCommissionGeneralDetails() *TotalCommissions2 {
s.CommissionGeneralDetails = new(TotalCommissions2)
return s.CommissionGeneralDetails
}
func (s *SwitchSubscriptionLegExecution2) AddTaxGeneralDetails() *TotalTaxes2 {
s.TaxGeneralDetails = new(TotalTaxes2)
return s.TaxGeneralDetails
}
func (s *SwitchSubscriptionLegExecution2) AddSettlementAndCustodyDetails() *FundSettlementParameters3 {
s.SettlementAndCustodyDetails = new(FundSettlementParameters3)
return s.SettlementAndCustodyDetails
}
func (s *SwitchSubscriptionLegExecution2) SetPhysicalDeliveryIndicator(value string) {
s.PhysicalDeliveryIndicator = (*YesNoIndicator)(&value)
}
func (s *SwitchSubscriptionLegExecution2) AddPhysicalDeliveryDetails() *DeliveryParameters3 {
s.PhysicalDeliveryDetails = new(DeliveryParameters3)
return s.PhysicalDeliveryDetails
} | SwitchSubscriptionLegExecution2.go | 0.789193 | 0.418222 | SwitchSubscriptionLegExecution2.go | starcoder |
package compute
// MachineImagesClient is a client for the MachineImage functions of the Compute API.
type MachineImagesClient struct {
ResourceClient
}
// MachineImages obtains an MachineImagesClient which can be used to access to the
// MachineImage functions of the Compute API
func (c *ComputeClient) MachineImages() *MachineImagesClient {
return &MachineImagesClient{
ResourceClient: ResourceClient{
ComputeClient: c,
ResourceDescription: "MachineImage",
ContainerPath: "/machineimage/",
ResourceRootPath: "/machineimage",
}}
}
// MahineImage describes an existing Machine Image.
type MachineImage struct {
// account of the associated Object Storage Classic instance
Account string `json:"account"`
// Dictionary of attributes to be made available to the instance
Attributes map[string]interface{} `json:"attributes"`
// Last time when this image was audited
Audited string `json:"audited"`
// Describing the image
Description string `json:"description"`
// Description of the state of the machine image if there is an error
ErrorReason string `json:"error_reason"`
// dictionary of hypervisor-specific attributes
Hypervisor map[string]interface{} `json:"hypervisor"`
// The format of the image
ImageFormat string `json:"image_format"`
// name of the machine image file uploaded to Object Storage Classic
File string `json:"file"`
// name of the machine image
Name string `json:"name"`
// Indicates that the image file is available in Object Storage Classic
NoUpload bool `json:"no_upload"`
// The OS platform of the image
Platform string `json:"platform"`
// Size values of the image file
Sizes map[string]interface{} `json:"sizes"`
// The state of the uploaded machine image
State string `json:"state"`
// Uniform Resource Identifier
URI string `json:"uri"`
}
// CreateMachineImageInput defines an Image List to be created.
type CreateMachineImageInput struct {
// account of the associated Object Storage Classic instance
Account string `json:"account"`
// Dictionary of attributes to be made available to the instance
Attributes map[string]interface{} `json:"attributes,omitempty"`
// Describing the image
Description string `json:"description,omitempty"`
// name of the machine image file uploaded to Object Storage Classic
File string `json:"file,omitempty"`
// name of the machine image
Name string `json:"name"`
// Indicates that the image file is available in Object Storage Classic
NoUpload bool `json:"no_upload"`
// Size values of the image file
Sizes map[string]interface{} `json:"sizes"`
}
// CreateMachineImage creates a new Machine Image with the given parameters.
func (c *MachineImagesClient) CreateMachineImage(createInput *CreateMachineImageInput) (*MachineImage, error) {
var machineImage MachineImage
// If `sizes` is not set then is mst be defaulted to {"total": 0}
if createInput.Sizes == nil {
createInput.Sizes = map[string]interface{}{"total": 0}
}
// `no_upload` must always be true
createInput.NoUpload = true
createInput.Name = c.getQualifiedName(createInput.Name)
if err := c.createResource(createInput, &machineImage); err != nil {
return nil, err
}
return c.success(&machineImage)
}
// DeleteMachineImageInput describes the MachineImage to delete
type DeleteMachineImageInput struct {
// The name of the MachineImage
Name string `json:"name"`
}
// DeleteMachineImage deletes the MachineImage with the given name.
func (c *MachineImagesClient) DeleteMachineImage(deleteInput *DeleteMachineImageInput) error {
return c.deleteResource(deleteInput.Name)
}
// GetMachineList describes the MachineImage to get
type GetMachineImageInput struct {
// account of the associated Object Storage Classic instance
Account string `json:"account"`
// The name of the Machine Image
Name string `json:"name"`
}
// GetMachineImage retrieves the MachineImage with the given name.
func (c *MachineImagesClient) GetMachineImage(getInput *GetMachineImageInput) (*MachineImage, error) {
getInput.Name = c.getQualifiedName(getInput.Name)
var machineImage MachineImage
if err := c.getResource(getInput.Name, &machineImage); err != nil {
return nil, err
}
return c.success(&machineImage)
}
func (c *MachineImagesClient) success(result *MachineImage) (*MachineImage, error) {
c.unqualify(&result.Name)
return result, nil
} | vendor/github.com/mitchellh/packer/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go | 0.847558 | 0.447038 | machine_images.go | starcoder |
package days
const input2 = `
forward 5
down 8
down 6
down 7
down 8
forward 7
down 3
up 6
forward 6
down 2
forward 5
down 6
up 3
down 4
forward 4
down 6
down 1
up 5
forward 5
down 1
down 7
up 2
down 7
forward 1
forward 6
down 1
up 1
up 4
forward 3
forward 6
forward 1
forward 4
up 3
forward 1
forward 4
down 9
forward 4
forward 8
up 8
forward 5
up 4
up 3
down 8
forward 5
down 4
forward 1
forward 7
down 1
forward 8
down 4
forward 2
forward 7
forward 9
up 4
down 3
forward 7
forward 6
down 8
forward 2
forward 5
forward 4
down 6
forward 6
up 5
down 3
down 6
down 5
down 7
down 8
up 5
down 5
forward 5
forward 4
up 3
down 7
down 3
forward 4
down 2
forward 4
forward 3
forward 4
forward 9
forward 6
forward 8
up 8
down 8
up 5
down 4
down 8
up 7
up 8
down 6
down 3
forward 2
forward 7
up 1
up 2
forward 2
down 7
down 1
up 9
forward 6
forward 4
down 2
up 6
down 2
down 1
down 3
up 6
down 1
down 8
forward 7
up 8
forward 5
forward 8
down 8
forward 6
forward 8
down 3
down 4
down 6
up 2
forward 6
up 9
forward 4
forward 8
up 4
down 8
forward 8
down 8
down 4
down 5
forward 7
down 6
down 6
up 2
up 1
forward 7
forward 8
forward 4
forward 9
down 7
forward 4
up 5
down 3
up 4
down 9
down 2
down 8
forward 3
forward 5
forward 7
forward 9
forward 5
forward 8
forward 6
forward 4
forward 6
forward 7
forward 2
down 1
down 8
down 4
down 5
down 6
up 3
up 2
forward 4
down 4
forward 7
up 6
up 9
down 1
down 3
down 1
up 3
up 1
down 2
up 5
forward 1
down 7
forward 9
down 4
up 4
down 6
down 3
forward 4
up 6
up 4
forward 1
up 7
down 1
down 7
down 7
forward 9
down 3
down 3
forward 6
down 2
forward 7
up 4
up 8
down 8
forward 7
forward 6
down 7
forward 5
up 6
up 6
down 9
up 6
up 2
forward 9
forward 1
up 5
up 3
down 9
up 8
down 7
up 7
forward 5
down 7
down 4
forward 2
forward 3
forward 5
down 1
up 6
down 6
up 6
down 8
down 3
down 4
forward 9
down 3
forward 3
up 1
down 2
forward 8
down 7
up 9
forward 1
down 3
forward 1
forward 8
down 3
forward 8
forward 6
down 1
down 9
forward 2
down 1
down 6
up 1
up 7
down 9
forward 6
forward 5
forward 2
up 6
down 6
forward 6
up 3
down 7
down 8
forward 5
down 7
forward 8
down 8
forward 4
down 6
forward 4
down 7
up 5
down 5
down 5
down 4
down 3
forward 8
forward 1
down 8
down 2
forward 3
forward 7
forward 3
down 5
down 6
down 8
down 6
forward 9
forward 4
forward 8
down 5
down 7
forward 4
up 5
down 8
up 6
up 7
down 6
down 8
forward 3
up 6
forward 7
down 4
up 1
up 8
forward 3
down 6
down 1
forward 7
down 1
down 9
forward 6
down 4
forward 3
forward 1
down 5
down 9
down 9
down 5
down 8
down 7
forward 1
forward 5
down 2
forward 2
forward 1
down 8
forward 6
down 3
forward 4
up 2
up 8
forward 7
forward 4
down 8
up 6
forward 3
up 1
up 2
forward 5
forward 9
down 5
forward 2
forward 5
up 6
down 1
down 1
down 6
forward 6
down 7
forward 5
forward 8
down 7
down 5
forward 9
forward 1
up 6
down 7
forward 1
forward 4
down 5
down 6
up 3
up 8
up 5
down 8
down 8
down 6
down 2
down 3
down 9
forward 8
forward 7
forward 7
up 5
down 5
forward 9
up 8
up 5
forward 1
down 9
down 9
forward 9
forward 4
forward 6
up 9
up 5
up 3
down 9
up 7
up 1
down 3
down 9
down 7
forward 6
down 7
forward 7
forward 8
down 2
forward 5
up 1
down 6
up 9
forward 5
up 9
down 2
down 3
forward 5
down 9
forward 9
forward 2
forward 8
down 1
forward 8
up 1
forward 3
up 1
down 1
forward 9
down 2
forward 2
up 1
up 8
down 2
down 7
down 5
up 2
up 6
down 9
down 7
down 7
up 6
up 8
down 7
forward 5
down 4
down 5
up 8
up 6
down 6
forward 6
up 6
down 1
down 1
down 1
forward 1
down 8
down 4
down 5
down 2
down 5
up 8
up 8
down 3
down 6
down 1
forward 6
forward 5
forward 1
down 3
down 4
up 9
down 3
up 8
forward 5
down 5
forward 2
down 8
down 2
up 1
forward 7
up 8
forward 7
down 3
down 1
down 3
forward 4
down 5
down 8
forward 8
forward 3
forward 7
down 7
forward 4
down 1
forward 3
up 2
down 7
down 1
forward 4
forward 7
down 3
down 1
forward 4
down 3
forward 2
up 9
down 5
down 9
forward 5
up 5
down 3
up 6
up 8
down 7
down 3
down 9
forward 6
forward 8
forward 3
down 6
up 8
forward 8
forward 9
down 4
down 1
forward 2
down 2
up 2
down 5
down 1
down 3
forward 4
down 3
up 8
up 6
up 5
down 4
forward 3
up 6
forward 6
forward 2
down 8
down 5
forward 3
up 1
forward 5
forward 9
forward 5
down 5
forward 3
forward 6
forward 5
forward 3
down 1
down 1
down 1
down 9
forward 8
forward 2
forward 4
forward 8
down 1
up 8
down 1
down 6
down 5
up 8
down 4
forward 8
forward 6
down 6
forward 2
forward 7
forward 2
up 7
forward 4
up 1
up 8
down 3
down 2
down 3
up 7
down 9
up 5
down 1
down 3
up 5
down 6
up 9
down 4
down 7
down 6
down 4
forward 5
forward 6
down 8
forward 3
forward 8
up 5
up 6
up 8
forward 8
forward 1
down 6
forward 3
forward 3
forward 6
down 3
down 2
forward 5
down 5
forward 6
down 3
down 9
down 8
down 6
down 6
forward 1
up 5
down 9
forward 3
forward 3
down 2
forward 8
forward 3
forward 2
forward 5
down 4
down 1
up 2
down 1
down 1
forward 5
down 7
up 7
down 9
down 8
down 6
forward 3
forward 5
down 3
down 6
up 3
up 2
up 8
down 3
up 3
down 6
forward 7
forward 4
up 5
forward 1
up 3
forward 8
down 2
down 5
down 2
forward 4
forward 4
down 4
up 8
down 1
up 2
forward 2
forward 9
forward 4
down 3
down 7
forward 1
down 2
forward 8
down 8
forward 3
down 7
forward 9
forward 6
up 1
forward 3
up 2
up 3
forward 6
down 8
up 9
down 2
down 9
down 6
down 4
forward 5
forward 3
up 7
forward 7
up 7
up 6
down 7
down 2
up 7
down 5
up 9
forward 3
up 6
up 6
up 6
up 1
forward 5
forward 5
down 8
forward 6
forward 7
down 3
down 4
down 2
down 4
down 1
forward 7
down 7
down 5
forward 8
up 6
up 8
forward 8
forward 2
forward 4
down 6
down 4
down 2
down 3
forward 8
forward 6
down 3
forward 7
forward 4
up 8
down 9
forward 5
up 5
up 5
up 7
forward 3
up 1
down 2
forward 5
forward 5
up 1
forward 4
down 6
up 5
up 3
forward 9
down 9
down 6
down 1
down 2
down 4
down 7
forward 3
up 5
forward 2
down 3
forward 7
up 8
up 3
forward 6
up 7
up 1
up 2
down 5
forward 5
down 3
down 5
down 6
up 1
down 2
up 1
forward 3
down 3
down 4
down 6
down 1
down 3
forward 9
forward 1
down 1
up 3
forward 4
forward 7
forward 4
down 2
forward 6
forward 2
forward 7
down 9
forward 8
forward 3
up 8
down 9
up 8
forward 5
forward 9
down 4
forward 1
up 9
forward 2
down 6
up 3
forward 1
forward 3
forward 8
down 7
down 3
down 5
down 2
down 2
forward 4
forward 1
down 2
up 8
down 2
forward 3
down 2
down 6
down 1
up 1
down 7
down 3
forward 3
forward 1
forward 9
down 9
down 2
up 1
forward 9
up 2
down 2
forward 3
down 4
forward 9
forward 5
up 5
forward 2
up 3
forward 8
down 3
forward 5
forward 5
down 8
up 9
forward 7
up 2
up 2
up 1
up 7
down 8
forward 9
forward 9
up 6
down 5
forward 7
down 9
down 8
down 5
down 3
down 2
forward 6
down 7
forward 3
up 5
forward 1
up 7
forward 3
down 5
down 9
down 8
forward 2
up 4
forward 7
forward 5
forward 8
forward 7
up 7
forward 4
up 7
down 9
forward 1
forward 3
down 3
forward 4
down 3
forward 3
down 5
down 1
forward 6
down 4
down 3
down 2
up 1
down 1
down 6
down 6
forward 9
down 5
forward 1
up 4
forward 7
down 8
forward 1
forward 9
forward 7
down 1
down 3
up 2
down 5
up 6
forward 2
up 2
down 7
down 9
forward 3
up 5
up 7
down 4
forward 6
down 8
forward 7
up 1
up 4
forward 4
down 9
forward 9
forward 9
down 3
forward 5
forward 1
down 3
down 8
forward 7
down 4
forward 3
down 3
forward 8
forward 2
forward 6
up 9
forward 2
down 9
forward 2
down 1
forward 9
up 1
up 4
up 1
down 1
forward 4
up 9
up 8
down 1
down 3
down 2
forward 9
down 7
down 4
forward 2
up 9
down 7
down 1
down 9
forward 2
down 2
forward 9
down 5
up 1
down 3
up 6
down 4
forward 8
down 2
down 2
down 9
forward 9
forward 2
down 1
forward 6
down 2
up 4
down 8
up 4
down 6
down 2
forward 7
down 3
up 3
forward 1
up 4
forward 5
down 7
down 8
forward 7
forward 3
down 5
up 6
down 7
down 1
up 7
down 1
forward 6
forward 3
forward 3
forward 7
` | days/input2.go | 0.793826 | 0.492249 | input2.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPExpressionSwitch2632 struct for BTPExpressionSwitch2632
type BTPExpressionSwitch2632 struct {
BTPExpression9
BtType *string `json:"btType,omitempty"`
Choices *BTPLiteralMap256 `json:"choices,omitempty"`
Selector *BTPExpression9 `json:"selector,omitempty"`
SpaceAfterSwitch *BTPSpace10 `json:"spaceAfterSwitch,omitempty"`
}
// NewBTPExpressionSwitch2632 instantiates a new BTPExpressionSwitch2632 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPExpressionSwitch2632() *BTPExpressionSwitch2632 {
this := BTPExpressionSwitch2632{}
return &this
}
// NewBTPExpressionSwitch2632WithDefaults instantiates a new BTPExpressionSwitch2632 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPExpressionSwitch2632WithDefaults() *BTPExpressionSwitch2632 {
this := BTPExpressionSwitch2632{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPExpressionSwitch2632) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPExpressionSwitch2632) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPExpressionSwitch2632) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPExpressionSwitch2632) SetBtType(v string) {
o.BtType = &v
}
// GetChoices returns the Choices field value if set, zero value otherwise.
func (o *BTPExpressionSwitch2632) GetChoices() BTPLiteralMap256 {
if o == nil || o.Choices == nil {
var ret BTPLiteralMap256
return ret
}
return *o.Choices
}
// GetChoicesOk returns a tuple with the Choices field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPExpressionSwitch2632) GetChoicesOk() (*BTPLiteralMap256, bool) {
if o == nil || o.Choices == nil {
return nil, false
}
return o.Choices, true
}
// HasChoices returns a boolean if a field has been set.
func (o *BTPExpressionSwitch2632) HasChoices() bool {
if o != nil && o.Choices != nil {
return true
}
return false
}
// SetChoices gets a reference to the given BTPLiteralMap256 and assigns it to the Choices field.
func (o *BTPExpressionSwitch2632) SetChoices(v BTPLiteralMap256) {
o.Choices = &v
}
// GetSelector returns the Selector field value if set, zero value otherwise.
func (o *BTPExpressionSwitch2632) GetSelector() BTPExpression9 {
if o == nil || o.Selector == nil {
var ret BTPExpression9
return ret
}
return *o.Selector
}
// GetSelectorOk returns a tuple with the Selector field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPExpressionSwitch2632) GetSelectorOk() (*BTPExpression9, bool) {
if o == nil || o.Selector == nil {
return nil, false
}
return o.Selector, true
}
// HasSelector returns a boolean if a field has been set.
func (o *BTPExpressionSwitch2632) HasSelector() bool {
if o != nil && o.Selector != nil {
return true
}
return false
}
// SetSelector gets a reference to the given BTPExpression9 and assigns it to the Selector field.
func (o *BTPExpressionSwitch2632) SetSelector(v BTPExpression9) {
o.Selector = &v
}
// GetSpaceAfterSwitch returns the SpaceAfterSwitch field value if set, zero value otherwise.
func (o *BTPExpressionSwitch2632) GetSpaceAfterSwitch() BTPSpace10 {
if o == nil || o.SpaceAfterSwitch == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceAfterSwitch
}
// GetSpaceAfterSwitchOk returns a tuple with the SpaceAfterSwitch field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPExpressionSwitch2632) GetSpaceAfterSwitchOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceAfterSwitch == nil {
return nil, false
}
return o.SpaceAfterSwitch, true
}
// HasSpaceAfterSwitch returns a boolean if a field has been set.
func (o *BTPExpressionSwitch2632) HasSpaceAfterSwitch() bool {
if o != nil && o.SpaceAfterSwitch != nil {
return true
}
return false
}
// SetSpaceAfterSwitch gets a reference to the given BTPSpace10 and assigns it to the SpaceAfterSwitch field.
func (o *BTPExpressionSwitch2632) SetSpaceAfterSwitch(v BTPSpace10) {
o.SpaceAfterSwitch = &v
}
func (o BTPExpressionSwitch2632) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTPExpression9, errBTPExpression9 := json.Marshal(o.BTPExpression9)
if errBTPExpression9 != nil {
return []byte{}, errBTPExpression9
}
errBTPExpression9 = json.Unmarshal([]byte(serializedBTPExpression9), &toSerialize)
if errBTPExpression9 != nil {
return []byte{}, errBTPExpression9
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Choices != nil {
toSerialize["choices"] = o.Choices
}
if o.Selector != nil {
toSerialize["selector"] = o.Selector
}
if o.SpaceAfterSwitch != nil {
toSerialize["spaceAfterSwitch"] = o.SpaceAfterSwitch
}
return json.Marshal(toSerialize)
}
type NullableBTPExpressionSwitch2632 struct {
value *BTPExpressionSwitch2632
isSet bool
}
func (v NullableBTPExpressionSwitch2632) Get() *BTPExpressionSwitch2632 {
return v.value
}
func (v *NullableBTPExpressionSwitch2632) Set(val *BTPExpressionSwitch2632) {
v.value = val
v.isSet = true
}
func (v NullableBTPExpressionSwitch2632) IsSet() bool {
return v.isSet
}
func (v *NullableBTPExpressionSwitch2632) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPExpressionSwitch2632(val *BTPExpressionSwitch2632) *NullableBTPExpressionSwitch2632 {
return &NullableBTPExpressionSwitch2632{value: val, isSet: true}
}
func (v NullableBTPExpressionSwitch2632) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPExpressionSwitch2632) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_expression_switch_2632.go | 0.729231 | 0.417093 | model_btp_expression_switch_2632.go | starcoder |
package main
import (
"github.com/g3n/engine/core"
"github.com/g3n/engine/geometry"
"github.com/g3n/engine/gls"
"github.com/g3n/engine/graphic"
"github.com/g3n/engine/light"
"github.com/g3n/engine/material"
"github.com/g3n/engine/math32"
"github.com/g3n/engine/window"
"math/rand"
)
type Raycast struct {
rc *core.Raycaster
}
func init() {
TestMap["other.raycast"] = &Raycast{}
}
func (t *Raycast) Initialize(ctx *Context) {
axis := graphic.NewAxisHelper(1)
ctx.Scene.Add(axis)
l1 := light.NewDirectional(math32.NewColor(1, 1, 1), 1.0)
l1.SetPosition(0, 0, 5)
ctx.Scene.Add(l1)
// Plane
geom1 := geometry.NewPlane(1.5, 1, 1, 1)
mat1 := material.NewStandard(math32.NewColor(0, 1, 0))
mat1.SetSide(material.SideFront)
mesh1 := graphic.NewMesh(geom1, mat1)
mesh1.SetPosition(-1.2, 0, 0)
ctx.Scene.Add(mesh1)
// Box
geom2 := geometry.NewBox(1, 1, 1, 1, 1, 1)
mat2 := material.NewPhong(math32.NewColor(1, 0, 0))
mat2.SetSide(material.SideFront)
mesh2 := graphic.NewMesh(geom2, mat2)
mesh2.SetPosition(1.2, 0, 0)
ctx.Scene.Add(mesh2)
// Sphere
geom3 := geometry.NewSphere(0.5, 16, 16, 0, math32.Pi*2, 0, math32.Pi)
mat3 := material.NewStandard(math32.NewColor(0, 1, 1))
mesh3 := graphic.NewMesh(geom3, mat3)
mesh3.SetPosition(0, 1, -1)
ctx.Scene.Add(mesh3)
// Open ended cylinder
geom4 := geometry.NewCylinder(0.5, 0.5, 1, 16, 1, 0, 2*math32.Pi, false, false)
mat4 := material.NewPhong(math32.NewColor(1, 1, 0))
mat4.SetSide(material.SideDouble)
mesh4 := graphic.NewMesh(geom4, mat4)
mesh4.SetPosition(0, -1.2, -0.5)
ctx.Scene.Add(mesh4)
// Circle
geom5 := geometry.NewCircle(0.6, 5, 0, 2*math32.Pi)
mat5 := material.NewStandard(math32.NewColor(0.5, 0.5, 0.9))
mat5.SetSide(material.SideDouble)
mesh5 := graphic.NewMesh(geom5, mat5)
mesh5.SetPosition(-1.2, -1.2, -0.5)
mesh5.SetRotation(math32.Pi/4, 0, 0)
ctx.Scene.Add(mesh5)
// Torus
geom6 := geometry.NewTorus(0.5, 0.2, 16, 16, math32.Pi)
mat6 := material.NewStandard(math32.NewColor(0, 0, 0.5))
mat6.SetSide(material.SideDouble)
mesh6 := graphic.NewMesh(geom6, mat6)
mesh6.SetPosition(1.5, -1.2, -1)
ctx.Scene.Add(mesh6)
// Cone (Cylinder)
geom7 := geometry.NewCylinder(0, 0.5, 1, 16, 16, 0, 2*math32.Pi, true, true)
mat7 := material.NewPhong(math32.NewColor(0.8, 0.7, 0.3))
mat7.SetSide(material.SideFront)
mat7.SetOpacity(0.6)
mesh7 := graphic.NewMesh(geom7, mat7)
mesh7.SetPosition(0, 0, 0)
ctx.Scene.Add(mesh7)
// Sprite
mat8 := material.NewStandard(&math32.Color{0, 0.3, 1})
mesh8 := graphic.NewSprite(1, 1, mat8)
mesh8.SetPosition(2, -2, -2)
mesh8.SetRotationZ(math32.Pi / 4)
mesh8.SetScale(2, 1, 1)
ctx.Scene.Add(mesh8)
// Line strip
geom9 := geometry.NewGeometry()
positions := math32.NewArrayF32(0, 0)
positions.Append(
-1, 0, -1, 1, 0, -1,
-1, 1, -1, 1, 1, -1,
-1, 2, -1, 1, 2, -1,
)
geom9.AddVBO(gls.NewVBO().AddAttrib("VertexPosition", 3).SetBuffer(positions))
mat9 := material.NewStandard(math32.NewColor(1, 0, 0))
mesh9 := graphic.NewLineStrip(geom9, mat9)
mesh9.SetPosition(-1.5, 0.5, -0.4)
ctx.Scene.Add(mesh9)
// Line segments
geom10 := geometry.NewGeometry()
positions = math32.NewArrayF32(0, 0)
positions.Append(
0, 0, 0, 1, 0, 0,
0, 0, 0, -1, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, -1, 0,
0, 0, 0, 0, 0, -1,
0, 0, 0, 0, 0, -1,
0, 0, 0, 0, 0, 1,
0.1, 0.1, 0.1, 0.5, 0.5, 0.5,
)
geom10.AddVBO(gls.NewVBO().AddAttrib("VertexPosition", 3).SetBuffer(positions))
mat10 := material.NewStandard(math32.NewColor(0, 0, 1))
mesh10 := graphic.NewLines(geom10, mat10)
mesh10.SetScale(0.8, 0.8, 0.8)
mesh10.SetPosition(1, 1.5, 0)
ctx.Scene.Add(mesh10)
// Points
geom11 := geometry.NewGeometry()
positions = math32.NewArrayF32(0, 0)
for i := 0; i < 30; i++ {
x := rand.Float32()
y := rand.Float32()
z := rand.Float32()
positions.Append(x, y, z)
}
geom11.AddVBO(gls.NewVBO().AddAttrib("VertexPosition", 3).SetBuffer(positions))
mat11 := material.NewPoint(math32.NewColor(0, 0, 0))
mat11.SetSize(1000)
mesh11 := graphic.NewPoints(geom11, mat11)
mesh11.SetPosition(-2, -1, 0)
ctx.Scene.Add(mesh11)
// Creates the raycaster
t.rc = core.NewRaycaster(&math32.Vector3{}, &math32.Vector3{})
t.rc.LinePrecision = 0.05
t.rc.PointPrecision = 0.05
// Subscribe to mouse button down events
ctx.Win.Subscribe(window.OnMouseDown, func(evname string, ev interface{}) {
t.onMouse(ctx, ev)
})
}
func (t *Raycast) onMouse(ctx *Context, ev interface{}) {
// Convert mouse coordinates to normalized device coordinates
mev := ev.(*window.MouseEvent)
width, height := ctx.Win.GetSize()
x := 2*(mev.Xpos/float32(width)) - 1
y := -2*(mev.Ypos/float32(height)) + 1
// Set the raycaster from the current camera and mouse coordinates
ctx.Camera.SetRaycaster(t.rc, x, y)
//fmt.Printf("rc:%+v\n", t.rc.Ray)
// Checks intersection with all objects in the scene
intersects := t.rc.IntersectObjects(ctx.Scene.Children(), true)
//fmt.Printf("intersects:%+v\n", intersects)
if len(intersects) == 0 {
return
}
// Get first intersection
obj := intersects[0].Object
// Convert INode to IGraphic
ig, ok := obj.(graphic.IGraphic)
if !ok {
log.Debug("Not graphic:%T", obj)
return
}
// Get graphic object
gr := ig.GetGraphic()
imat := gr.GetMaterial(0)
type matI interface {
EmissiveColor() math32.Color
SetEmissiveColor(*math32.Color)
}
if v, ok := imat.(matI); ok {
if em := v.EmissiveColor(); em.R == 1 && em.G == 1 && em.B == 1 {
v.SetEmissiveColor(&math32.Color{0, 0, 0})
} else {
v.SetEmissiveColor(&math32.Color{1, 1, 1})
}
}
}
func (t *Raycast) Render(ctx *Context) {
} | other_raycast.go | 0.553023 | 0.410993 | other_raycast.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"log"
"strings"
"strconv"
"math"
)
type Pair struct {
X, Y int64
}
func main() {
// read input
data, err := ioutil.ReadFile("input.txt")
handleError(err)
// parse input
wires := strings.Split(string(data), "\n")
wire1 := wires[0]
wire2 := wires[1]
// follow and store path of wire 1
wire1visited := make(map[Pair]bool)
startingPoint := Pair{0, 0}
for _, step := range strings.Split(wire1, ",") {
direction := string(step[0])
stepsInt, err := strconv.Atoi(string(step[1:]))
steps := int64(stepsInt)
handleError(err)
switch direction {
case "U":
for y := startingPoint.Y+1; y <= startingPoint.Y+steps; y++ {
wire1visited[Pair{startingPoint.X, y}] = true
}
startingPoint = Pair{startingPoint.X, startingPoint.Y+steps}
case "R":
for x := startingPoint.X+1; x <= startingPoint.X+steps; x++ {
wire1visited[Pair{x, startingPoint.Y}] = true
}
startingPoint = Pair{startingPoint.X+steps, startingPoint.Y}
case "D":
for y := startingPoint.Y-1; y >= startingPoint.Y-steps; y-- {
wire1visited[Pair{startingPoint.X, y}] = true
}
startingPoint = Pair{startingPoint.X, startingPoint.Y-steps}
case "L":
for x := startingPoint.X-1; x >= startingPoint.X-steps; x-- {
wire1visited[Pair{x, startingPoint.Y}] = true
}
startingPoint = Pair{startingPoint.X-steps, startingPoint.Y}
}
}
// follow wire 2 and store intersections with wire 1
intersections := []Pair{}
startingPoint = Pair{0, 0}
for _, step := range strings.Split(wire2, ",") {
direction := string(step[0])
stepsInt, err := strconv.Atoi(string(step[1:]))
steps := int64(stepsInt)
handleError(err)
switch direction {
case "U":
for y := startingPoint.Y+1; y <= startingPoint.Y+steps; y++ {
if wire1visited[Pair{startingPoint.X, y}] {
intersections = append(intersections, Pair{startingPoint.X, y})
}
}
startingPoint = Pair{startingPoint.X, startingPoint.Y+steps}
case "R":
for x := startingPoint.X+1; x <= startingPoint.X+steps; x++ {
if wire1visited[Pair{x, startingPoint.Y}] {
intersections = append(intersections, Pair{x, startingPoint.Y})
}
}
startingPoint = Pair{startingPoint.X+steps, startingPoint.Y}
case "D":
for y := startingPoint.Y-1; y >= startingPoint.Y-steps; y-- {
if wire1visited[Pair{startingPoint.X, y}] {
intersections = append(intersections, Pair{startingPoint.X, y})
}
}
startingPoint = Pair{startingPoint.X, startingPoint.Y-steps}
case "L":
for x := startingPoint.X-1; x >= startingPoint.X-steps; x-- {
if wire1visited[Pair{x, startingPoint.Y}] {
intersections = append(intersections, Pair{x, startingPoint.Y})
}
}
startingPoint = Pair{startingPoint.X-steps, startingPoint.Y}
}
}
// calculate manhattan distance for each point
var minDist int64 = math.MaxInt64
minDistPoint := Pair{0, 0}
for _, intersection := range intersections {
dist := manhattanDistanceToOrigo(intersection.X, intersection.Y)
if dist < minDist {
minDistPoint = intersection
minDist = dist
}
}
fmt.Printf("Minimum distance of a wire intersection to central port: %d (intersection %v)", minDist, minDistPoint)
}
func handleError(err error) {
if err != nil {
log.Fatal(err)
}
}
func manhattanDistanceToOrigo(x int64, y int64) int64 {
return abs(x) + abs(y)
}
func abs(x int64) int64 {
if x < 0 {
return -x
}
return x
} | 2019/3/part1/main.go | 0.530236 | 0.50238 | main.go | starcoder |
package header
/**
* The From header field indicates the logical identity of the initiator
* of the request, possibly the user's address-of-record. This may be different
* from the initiator of the dialog. Requests sent by the callee to the caller
* use the callee's address in the From header field.
* <p>
* Like the To header field, it contains a URI and optionally a display name,
* encapsulated in a {@link javax.sip.address.Address}. It is used by SIP
* elements to determine which processing rules to apply to a request (for
* example, automatic call rejection). As such, it is very important that the
* From URI not contain IP addresses or the FQDN of the host on which the UA is
* running, since these are not logical names.
* <p>
* The From header field allows for a display name. A UAC SHOULD use
* the display name "Anonymous", along with a syntactically correct, but
* otherwise meaningless URI (like sip:thisis@anonymous.invalid), if the
* identity of the client is to remain hidden.
* <p>
* Usually, the value that populates the From header field in requests
* generated by a particular UA is pre-provisioned by the user or by the
* administrators of the user's local domain. If a particular UA is used by
* multiple users, it might have switchable profiles that include a URI
* corresponding to the identity of the profiled user. Recipients of requests
* can authenticate the originator of a request in order to ascertain that
* they are who their From header field claims they are.
* <p>
* Two From header fields are equivalent if their URIs match, and their
* parameters match. Extension parameters in one header field, not present in
* the other are ignored for the purposes of comparison. This means that the
* display name and presence or absence of angle brackets do not affect
* matching.
* <ul>
* <li> The "Tag" parameter - is used in the To and From header fields of SIP
* messages. It serves as a general mechanism to identify a dialog, which is
* the combination of the Call-ID along with two tags, one from each
* participant in the dialog. When a User Agent sends a request outside of a dialog,
* it contains a From tag only, providing "half" of the dialog ID. The dialog
* is completed from the response(s), each of which contributes the second half
* in the To header field. When a tag is generated by a User Agent for insertion into
* a request or response, it MUST be globally unique and cryptographically
* random with at least 32 bits of randomness. Besides the requirement for
* global uniqueness, the algorithm for generating a tag is implementation
* specific. Tags are helpful in fault tolerant systems, where a dialog is to
* be recovered on an alternate server after a failure. A UAS can select the
* tag in such a way that a backup can recognize a request as part of a dialog
* on the failed server, and therefore determine that it should attempt to
* recover the dialog and any other state associated with it.
* </ul>
* For Example:<br>
* <code>From: "Bob" sips:<EMAIL> ;tag=a48s<br>
* From: sip:+<EMAIL>;tag=887s<br>
* From: Anonymous sip:<EMAIL>;tag=hyh8</code>
*/
type FromHeader interface {
ParametersHeader
AddressHeader
/**
* Sets the tag parameter of the FromHeader. The tag in the From field of a
* request identifies the peer of the dialog. When a UA sends a request
* outside of a dialog, it contains a From tag only, providing "half" of
* the dialog Identifier.
* <p>
* The From Header MUST contain a new "tag" parameter, chosen by the UAC
* applicaton. Once the initial From "tag" is assigned it should not be
* manipulated by the application. That is on the client side for outbound
* requests the application is responsible for Tag assigmennment, after
* dialog establishment the stack will take care of Tag assignment.
*
* @param tag - the new tag of the FromHeader
* @throws ParseException which signals that an error has been reached
* unexpectedly while parsing the Tag value.
*/
SetTag(tag string) (ParseException error)
/**
* Gets the tag of FromHeader. The Tag parameter identified the Peer of the
* dialogue and must always be present.
*
* @return the tag parameter of the FromHeader.
*/
GetTag() string
} | sip/header/FromHeader.go | 0.893728 | 0.641605 | FromHeader.go | starcoder |
package bipbuffer
import "errors"
// BipBuffer is a spsc circular non-thread safe buffer that always
// supports writing a contiguous chunk of data. Write requests that
// cannot fit in an available contiguous area will be failed with
// an error.
type BipBuffer struct {
buf []byte
idxRegionA int
sizeOfRegionA int
idxRegionB int
sizeOfRegionB int
idxReserve int
sizeOfReserve int
}
func New(size uint64) *BipBuffer {
return &BipBuffer{buf: make([]byte, size)}
}
// Reserve reserves a space for writing at an index in the buffer,
// of length equal to size, and returns that index and space as byte slice.
// It will return nil slice if requested size is not available in buffer.
func (bb *BipBuffer) Reserve(size int) (int, []byte) {
if bb.sizeOfRegionB != 0 {
availableSpace := bb.getFreeSpaceInRegionB()
if availableSpace == 0 {
return 0, nil
}
if size > availableSpace {
return 0, nil
}
bb.sizeOfReserve = size
bb.idxReserve = bb.idxRegionB + bb.sizeOfRegionB
} else {
// Only region A is present
availableSpace := bb.getFreeSpaceAfterRegionA()
if availableSpace >= bb.idxRegionA {
if availableSpace == 0 {
return 0, nil
}
if size > availableSpace {
return 0, nil
}
bb.sizeOfReserve = size
bb.idxReserve = bb.idxRegionA + bb.sizeOfRegionA
} else {
if bb.idxRegionA == 0 {
return 0, nil
}
if bb.sizeOfRegionA < size {
return 0, nil
}
bb.sizeOfReserve = size
bb.idxReserve = 0
}
}
return bb.idxReserve, bb.buf[bb.idxReserve : bb.idxReserve+bb.sizeOfReserve]
}
// Commit commits reserved space of length equal to size in the buffer.
// It makes the data in the reserved space permanent in buffer. If the
// asked size is more than current reserved space, it will simply
// commits the current reserved space.
func (bb *BipBuffer) Commit(size int) {
if size > bb.sizeOfReserve {
size = bb.sizeOfReserve
}
if bb.sizeOfRegionA == 0 && bb.sizeOfRegionB == 0 {
bb.idxRegionA = bb.idxReserve
bb.sizeOfRegionA = size
bb.idxReserve = 0
bb.sizeOfReserve = 0
return
}
if bb.idxReserve == (bb.idxRegionA + bb.sizeOfRegionA) {
bb.sizeOfRegionA += size
} else {
bb.sizeOfRegionB += size
}
bb.idxReserve = 0
bb.sizeOfReserve = 0
return
}
// Decommit frees already committed space of length size in the buffer.
func (bb *BipBuffer) Decommit(size int) {
if size >= bb.sizeOfRegionA {
bb.idxRegionA = bb.idxRegionB
bb.sizeOfRegionA = bb.sizeOfRegionB
bb.idxRegionB = 0
bb.sizeOfRegionB = 0
} else {
bb.idxRegionA += size
bb.sizeOfRegionA -= size
}
}
// GetContiguousBlock returns byte slice representing single
// contiguous region in the buffer. To read all data out of the buffer
// call this method in loop. It will return nil slice when
// there will be no committed region.
func (bb *BipBuffer) GetContiguousBlock() []byte {
if bb.sizeOfRegionA == 0 {
return nil
}
return bb.buf[bb.idxRegionA : bb.idxRegionA+bb.sizeOfRegionA]
}
// PeekAt returns a byte slice representing a region starting at
// index idx of length size. It will throw error when idx doesn't
// belong to any region inside the buffer.
func (bb *BipBuffer) PeekAt(idx, size int) ([]byte, error) {
if !bb.isAreaInRegionA(idx, size) && !bb.isAreaInRegionB(idx, size) {
return nil, errors.New("invalid index")
}
return bb.buf[idx : idx+size], nil
}
// Capacity returns capacity of the buffer.
func (bb *BipBuffer) Capacity() int {
return cap(bb.buf)
}
// CommittedSize returns total committed space in the buffer.
func (bb *BipBuffer) CommittedSize() int {
return bb.sizeOfRegionA + bb.sizeOfRegionB
}
// Grow will increase the underlying buffer size to twice
// of the current size.
func (bb *BipBuffer) Grow() {
newBuf := make([]byte, 2*cap(bb.buf))
n := 0
for {
b := bb.GetContiguousBlock()
if b == nil {
break
}
k := copy(newBuf[n:], b)
n += k
bb.Decommit(k)
}
bb.buf = newBuf[:n]
bb.idxReserve = 0
bb.sizeOfReserve = n
bb.Commit(n)
}
func (bb *BipBuffer) isAreaInRegionA(idx, size int) bool {
if bb.sizeOfRegionA == 0 {
return false
}
return idx >= bb.idxRegionA &&
idx <= (bb.idxRegionA+bb.sizeOfRegionA) &&
bb.sizeOfRegionA >= size
}
func (bb *BipBuffer) isAreaInRegionB(idx, size int) bool {
if bb.sizeOfRegionB == 0 {
return false
}
return idx >= bb.idxRegionB &&
idx <= (bb.idxRegionB+bb.sizeOfRegionB) &&
bb.sizeOfRegionB >= size
}
func (bb *BipBuffer) getFreeSpaceAfterRegionA() int {
return cap(bb.buf) - (bb.idxRegionA + bb.sizeOfRegionA)
}
func (bb *BipBuffer) getFreeSpaceInRegionB() int {
// Region B stands before Region A
return bb.idxRegionA - (bb.idxRegionB + bb.sizeOfRegionB)
} | bipbuffer/bip_buffer.go | 0.810441 | 0.580857 | bip_buffer.go | starcoder |
package astits
import (
"fmt"
"time"
"github.com/asticode/go-astikit"
)
// parseDVBTime parses a DVB time
// This field is coded as 16 bits giving the 16 LSBs of MJD followed by 24 bits coded as 6 digits in 4 - bit Binary
// Coded Decimal (BCD). If the start time is undefined (e.g. for an event in a NVOD reference service) all bits of the
// field are set to "1".
// I apologize for the computation which is really messy but details are given in the documentation
// Page: 160 | Annex C | Link: https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf
func parseDVBTime(i *astikit.BytesIterator) (t time.Time, err error) {
// Get next 2 bytes
var bs []byte
if bs, err = i.NextBytes(2); err != nil {
err = fmt.Errorf("astits: fetching next bytes failed: %w", err)
return
}
// Date
var mjd = uint16(bs[0])<<8 | uint16(bs[1])
var yt = int((float64(mjd) - 15078.2) / 365.25)
var mt = int((float64(mjd) - 14956.1 - float64(int(float64(yt)*365.25))) / 30.6001)
var d = int(float64(mjd) - 14956 - float64(int(float64(yt)*365.25)) - float64(int(float64(mt)*30.6001)))
var k int
if mt == 14 || mt == 15 {
k = 1
}
var y = yt + k
var m = mt - 1 - k*12
t, _ = time.Parse("06-01-02", fmt.Sprintf("%d-%d-%d", y, m, d))
// Time
var s time.Duration
if s, err = parseDVBDurationSeconds(i); err != nil {
err = fmt.Errorf("astits: parsing DVB duration seconds failed: %w", err)
return
}
t = t.Add(s)
return
}
// parseDVBDurationMinutes parses a minutes duration
// 16 bit field containing the duration of the event in hours, minutes. format: 4 digits, 4 - bit BCD = 18 bit
func parseDVBDurationMinutes(i *astikit.BytesIterator) (d time.Duration, err error) {
var bs []byte
if bs, err = i.NextBytes(2); err != nil {
err = fmt.Errorf("astits: fetching next bytes failed: %w", err)
return
}
d = parseDVBDurationByte(bs[0])*time.Hour + parseDVBDurationByte(bs[1])*time.Minute
return
}
// parseDVBDurationSeconds parses a seconds duration
// 24 bit field containing the duration of the event in hours, minutes, seconds. format: 6 digits, 4 - bit BCD = 24 bit
func parseDVBDurationSeconds(i *astikit.BytesIterator) (d time.Duration, err error) {
var bs []byte
if bs, err = i.NextBytes(3); err != nil {
err = fmt.Errorf("astits: fetching next bytes failed: %w", err)
return
}
d = parseDVBDurationByte(bs[0])*time.Hour + parseDVBDurationByte(bs[1])*time.Minute + parseDVBDurationByte(bs[2])*time.Second
return
}
// parseDVBDurationByte parses a duration byte
func parseDVBDurationByte(i byte) time.Duration {
return time.Duration(uint8(i)>>4*10 + uint8(i)&0xf)
} | dvb.go | 0.605566 | 0.460168 | dvb.go | starcoder |
package datadog
import (
"encoding/json"
"fmt"
)
// LogsCategoryProcessor Use the Category Processor to add a new attribute (without spaces or special characters in the new attribute name) to a log matching a provided search query. Use categories to create groups for an analytical view. For example, URL groups, machine groups, environments, and response time buckets. **Notes**: - The syntax of the query is the one of Logs Explorer search bar. The query can be done on any log attribute or tag, whether it is a facet or not. Wildcards can also be used inside your query. - Once the log has matched one of the Processor queries, it stops. Make sure they are properly ordered in case a log could match several queries. - The names of the categories must be unique. - Once defined in the Category Processor, you can map categories to log status using the Log Status Remapper.
type LogsCategoryProcessor struct {
// Array of filters to match or not a log and their corresponding `name`to assign a custom value to the log.
Categories []LogsCategoryProcessorCategory `json:"categories"`
// Whether or not the processor is enabled.
IsEnabled *bool `json:"is_enabled,omitempty"`
// Name of the processor.
Name *string `json:"name,omitempty"`
// Name of the target attribute which value is defined by the matching category.
Target string `json:"target"`
Type LogsCategoryProcessorType `json:"type"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:-`
}
// NewLogsCategoryProcessor instantiates a new LogsCategoryProcessor object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewLogsCategoryProcessor(categories []LogsCategoryProcessorCategory, target string, type_ LogsCategoryProcessorType) *LogsCategoryProcessor {
this := LogsCategoryProcessor{}
this.Categories = categories
var isEnabled bool = false
this.IsEnabled = &isEnabled
this.Target = target
this.Type = type_
return &this
}
// NewLogsCategoryProcessorWithDefaults instantiates a new LogsCategoryProcessor object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewLogsCategoryProcessorWithDefaults() *LogsCategoryProcessor {
this := LogsCategoryProcessor{}
var isEnabled bool = false
this.IsEnabled = &isEnabled
var type_ LogsCategoryProcessorType = LOGSCATEGORYPROCESSORTYPE_CATEGORY_PROCESSOR
this.Type = type_
return &this
}
// GetCategories returns the Categories field value
func (o *LogsCategoryProcessor) GetCategories() []LogsCategoryProcessorCategory {
if o == nil {
var ret []LogsCategoryProcessorCategory
return ret
}
return o.Categories
}
// GetCategoriesOk returns a tuple with the Categories field value
// and a boolean to check if the value has been set.
func (o *LogsCategoryProcessor) GetCategoriesOk() (*[]LogsCategoryProcessorCategory, bool) {
if o == nil {
return nil, false
}
return &o.Categories, true
}
// SetCategories sets field value
func (o *LogsCategoryProcessor) SetCategories(v []LogsCategoryProcessorCategory) {
o.Categories = v
}
// GetIsEnabled returns the IsEnabled field value if set, zero value otherwise.
func (o *LogsCategoryProcessor) GetIsEnabled() bool {
if o == nil || o.IsEnabled == nil {
var ret bool
return ret
}
return *o.IsEnabled
}
// GetIsEnabledOk returns a tuple with the IsEnabled field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LogsCategoryProcessor) GetIsEnabledOk() (*bool, bool) {
if o == nil || o.IsEnabled == nil {
return nil, false
}
return o.IsEnabled, true
}
// HasIsEnabled returns a boolean if a field has been set.
func (o *LogsCategoryProcessor) HasIsEnabled() bool {
if o != nil && o.IsEnabled != nil {
return true
}
return false
}
// SetIsEnabled gets a reference to the given bool and assigns it to the IsEnabled field.
func (o *LogsCategoryProcessor) SetIsEnabled(v bool) {
o.IsEnabled = &v
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *LogsCategoryProcessor) GetName() string {
if o == nil || o.Name == nil {
var ret string
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LogsCategoryProcessor) GetNameOk() (*string, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *LogsCategoryProcessor) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given string and assigns it to the Name field.
func (o *LogsCategoryProcessor) SetName(v string) {
o.Name = &v
}
// GetTarget returns the Target field value
func (o *LogsCategoryProcessor) GetTarget() string {
if o == nil {
var ret string
return ret
}
return o.Target
}
// GetTargetOk returns a tuple with the Target field value
// and a boolean to check if the value has been set.
func (o *LogsCategoryProcessor) GetTargetOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Target, true
}
// SetTarget sets field value
func (o *LogsCategoryProcessor) SetTarget(v string) {
o.Target = v
}
// GetType returns the Type field value
func (o *LogsCategoryProcessor) GetType() LogsCategoryProcessorType {
if o == nil {
var ret LogsCategoryProcessorType
return ret
}
return o.Type
}
// GetTypeOk returns a tuple with the Type field value
// and a boolean to check if the value has been set.
func (o *LogsCategoryProcessor) GetTypeOk() (*LogsCategoryProcessorType, bool) {
if o == nil {
return nil, false
}
return &o.Type, true
}
// SetType sets field value
func (o *LogsCategoryProcessor) SetType(v LogsCategoryProcessorType) {
o.Type = v
}
func (o LogsCategoryProcessor) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.UnparsedObject != nil {
return json.Marshal(o.UnparsedObject)
}
if true {
toSerialize["categories"] = o.Categories
}
if o.IsEnabled != nil {
toSerialize["is_enabled"] = o.IsEnabled
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if true {
toSerialize["target"] = o.Target
}
if true {
toSerialize["type"] = o.Type
}
return json.Marshal(toSerialize)
}
func (o *LogsCategoryProcessor) UnmarshalJSON(bytes []byte) (err error) {
raw := map[string]interface{}{}
required := struct {
Categories *[]LogsCategoryProcessorCategory `json:"categories"`
Target *string `json:"target"`
Type *LogsCategoryProcessorType `json:"type"`
}{}
all := struct {
Categories []LogsCategoryProcessorCategory `json:"categories"`
IsEnabled *bool `json:"is_enabled,omitempty"`
Name *string `json:"name,omitempty"`
Target string `json:"target"`
Type LogsCategoryProcessorType `json:"type"`
}{}
err = json.Unmarshal(bytes, &required)
if err != nil {
return err
}
if required.Categories == nil {
return fmt.Errorf("Required field categories missing")
}
if required.Target == nil {
return fmt.Errorf("Required field target missing")
}
if required.Type == nil {
return fmt.Errorf("Required field type missing")
}
err = json.Unmarshal(bytes, &all)
if err != nil {
err = json.Unmarshal(bytes, &raw)
if err != nil {
return err
}
o.UnparsedObject = raw
return nil
}
if v := all.Type; !v.IsValid() {
err = json.Unmarshal(bytes, &raw)
if err != nil {
return err
}
o.UnparsedObject = raw
return nil
}
o.Categories = all.Categories
o.IsEnabled = all.IsEnabled
o.Name = all.Name
o.Target = all.Target
o.Type = all.Type
return nil
}
type NullableLogsCategoryProcessor struct {
value *LogsCategoryProcessor
isSet bool
}
func (v NullableLogsCategoryProcessor) Get() *LogsCategoryProcessor {
return v.value
}
func (v *NullableLogsCategoryProcessor) Set(val *LogsCategoryProcessor) {
v.value = val
v.isSet = true
}
func (v NullableLogsCategoryProcessor) IsSet() bool {
return v.isSet
}
func (v *NullableLogsCategoryProcessor) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableLogsCategoryProcessor(val *LogsCategoryProcessor) *NullableLogsCategoryProcessor {
return &NullableLogsCategoryProcessor{value: val, isSet: true}
}
func (v NullableLogsCategoryProcessor) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableLogsCategoryProcessor) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | api/v1/datadog/model_logs_category_processor.go | 0.75037 | 0.403038 | model_logs_category_processor.go | starcoder |
package iso20022
// Set of elements used to provide information on the original amount.
type AmountAndCurrencyExchange3 struct {
// Identifies the amount of money to be moved between the debtor and creditor, before deduction of charges, expressed in the currency as ordered by the initiating party and provides currency exchange information in case the instructed amount and/or currency is/are different from the entry amount and/or currency.
InstructedAmount *AmountAndCurrencyExchangeDetails3 `xml:"InstdAmt,omitempty"`
// Amount of the underlying transaction.
TransactionAmount *AmountAndCurrencyExchangeDetails3 `xml:"TxAmt,omitempty"`
// Set of elements used to provide the countervalue amount and currency exchange information.
// Usage: This can be either the counter amount quoted in an FX deal, or the result of the currency information applied to an instructed amount, before deduction of charges.
CounterValueAmount *AmountAndCurrencyExchangeDetails3 `xml:"CntrValAmt,omitempty"`
// Amount of money, based on terms of corporate action event and balance of underlying securities, entitled to/from the account owner.
// In some situations, this amount may alternatively be called entitled amount.
AnnouncedPostingAmount *AmountAndCurrencyExchangeDetails3 `xml:"AnncdPstngAmt,omitempty"`
// Set of elements used to provide information on the original amount and currency exchange.
ProprietaryAmount []*AmountAndCurrencyExchangeDetails4 `xml:"PrtryAmt,omitempty"`
}
func (a *AmountAndCurrencyExchange3) AddInstructedAmount() *AmountAndCurrencyExchangeDetails3 {
a.InstructedAmount = new(AmountAndCurrencyExchangeDetails3)
return a.InstructedAmount
}
func (a *AmountAndCurrencyExchange3) AddTransactionAmount() *AmountAndCurrencyExchangeDetails3 {
a.TransactionAmount = new(AmountAndCurrencyExchangeDetails3)
return a.TransactionAmount
}
func (a *AmountAndCurrencyExchange3) AddCounterValueAmount() *AmountAndCurrencyExchangeDetails3 {
a.CounterValueAmount = new(AmountAndCurrencyExchangeDetails3)
return a.CounterValueAmount
}
func (a *AmountAndCurrencyExchange3) AddAnnouncedPostingAmount() *AmountAndCurrencyExchangeDetails3 {
a.AnnouncedPostingAmount = new(AmountAndCurrencyExchangeDetails3)
return a.AnnouncedPostingAmount
}
func (a *AmountAndCurrencyExchange3) AddProprietaryAmount() *AmountAndCurrencyExchangeDetails4 {
newValue := new(AmountAndCurrencyExchangeDetails4)
a.ProprietaryAmount = append(a.ProprietaryAmount, newValue)
return newValue
} | AmountAndCurrencyExchange3.go | 0.804367 | 0.666124 | AmountAndCurrencyExchange3.go | starcoder |
package rs485
import (
"math"
. "github.com/volkszaehler/mbmd/meters"
)
func init() {
Register("SEMTR", NewSEMTRProducer)
}
type SEMTRProducer struct {
Opcodes
}
func NewSEMTRProducer() Producer {
/**
* Opcodes as defined by SolarEdge SE-MTR-3Y
* reverse engineered from: https://github.com/nmakel/solaredge_meterproxy/blob/master/semp-rtu.py
* Uses the modbus RTU protocol over RS485.
*
* These are only necessary, if you'd like to connect the power meter
* directly via rs485 (e.g. if you have no inverter from solaredge).
* Otherwise, the values can be accessed readily through the network
* connection of the inverter via the sunspec protocol.
*/
ops := Opcodes{
Sum: 0x03E8, // total active energy
Import: 0x03EA, // imported active energy
SumT1: 0x03EC, // total active energy non-reset
ImportT1: 0x03EE, // imported active energy non-reset
Power: 0x03F0, // total power
PowerL1: 0x03F2, // power l1
PowerL2: 0x03F4, // power l2
PowerL3: 0x03F6, // power l3
// VoltageLN: 0x03F8, // l-n voltage
VoltageL1: 0x03FA, // l1-n voltage
VoltageL2: 0x03FC, // l2-n voltage
VoltageL3: 0x03FE, // l3-n voltage
/* VoltageLL: 0x0400, // l-l voltage
VoltageL12: 0x0402, // l1-l2 voltage
VoltageL23: 0x0404, // l2-l3 voltage
VoltageL31: 0x0406, // l3-l1 voltage */
Frequency: 0x0408, // line frequency
SumL1: 0x044C, // total active energy l1
SumL2: 0x044E, // total active energy l2
SumL3: 0x0450, // total active energy l3
ImportL1: 0x0452, // imported active energy l1
ImportL2: 0x0454, // imported active energy l2
ImportL3: 0x0456, // imported active energy l3
Export: 0x0458, // total exported active energy
ExportT1: 0x045A, // total exported active energy non-reset
ExportL1: 0x045C, // exported energy l1
ExportL2: 0x045E, // exported energy l2
ExportL3: 0x0460, // exported energy l3
ReactiveSum: 0x0462, // total reactive energy
ReactiveSumL1: 0x0464, // reactive energy l1
ReactiveSumL2: 0x0468, // reactive energy l2
ReactiveSumL3: 0x046A, // reactive energy l3
// EnergyApparent: 0x046C, // total apparent energy
// EnergyApparentL1: 0x046E, // apparent energy l1
// EnergyApparentL2: 0x0470, // apparent energy l2
// EnergyApparentL3: 0x0472, // apparent energy l3
Cosphi: 0x0472, // power factor
CosphiL1: 0x0474, // power factor l1
CosphiL2: 0x0476, // power factor l2
CosphiL3: 0x0478, // power factor l3
ReactivePower: 0x047A, // total reactive power
ReactivePowerL1: 0x047C, // reactive power l1
ReactivePowerL2: 0x047e, // reactive power l2
ReactivePowerL3: 0x0480, // reactive power l3
ApparentPower: 0x0482, // total apparent power
ApparentPowerL1: 0x0484, // apparent power l1
ApparentPowerL2: 0x0486, // apparent power l2
ApparentPowerL3: 0x0488, // apparent power l3
CurrentL1: 0x048A, // current l1
CurrentL2: 0x048C, // current l2
CurrentL3: 0x048E, // current l3
// PowerDemand: 0x0490, // demand power
// MinimumPowerDemand: 0x0492, // minimum demand power
// MaximumPowerDemand: 0x0494, // maximum demand power
// ApparentPowerDemand: 0x0496, // apparent demand power
// PowerDemandL1: 0x0498, // demand power l1
// PowerDemandL2: 0x049A, // demand power l2
// PowerDemandL3: 0x049C, // demand power l3
}
return &SEMTRProducer{Opcodes: ops}
}
func (p *SEMTRProducer) Description() string {
return "SolarEdge SE-MTR-3Y"
}
// RTUIeee754SolaredgeToFloat64 converts 32 bit IEEE 754 solar edge big endian float readings
// The wire protocol seems to have some strange byte ordering (?)
func RTUIeee754SolaredgeToFloat64(b []byte) float64 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
bits := uint32(b[1]) | uint32(b[0])<<8 | uint32(b[3])<<16 | uint32(b[2])<<24
f := math.Float32frombits(bits)
return float64(f)
}
func (p *SEMTRProducer) snip(iec Measurement) Operation {
operation := Operation{
FuncCode: ReadInputReg,
OpCode: p.Opcode(iec),
ReadLen: 2,
IEC61850: iec,
Transform: RTUIeee754SolaredgeToFloat64,
}
return operation
}
func (p *SEMTRProducer) Probe() Operation {
return p.snip(VoltageL1)
}
func (p *SEMTRProducer) Produce() (res []Operation) {
for op := range p.Opcodes {
res = append(res, p.snip(op))
}
return res
} | meters/rs485/semtr3y.go | 0.742515 | 0.467271 | semtr3y.go | starcoder |
package mock
import (
"testing"
"time"
"github.com/ActiveState/cli/internal/progress"
"github.com/autarch/testify/assert"
)
var _ progress.Incrementer = &Incrementer{}
// TestProgress is wrapper around a Progress that can be used in test to ensure that the progress bar
// loop terminates after a configurable time-out.
// This construct has been added, due to numerous problems with hanging progress bar loops.
type TestProgress struct {
*progress.Progress
}
// NewTestProgress returns a new testable progress-bar for unit tests
func NewTestProgress() *TestProgress {
p := progress.New(progress.WithOutput(nil))
return &TestProgress{
Progress: p,
}
}
// Close should be run at the end of the test. It ensures that all resources are released
func (tp *TestProgress) Close() {
tp.Cancel()
tp.Progress.Close()
}
// AssertProperClose asserts that the progress bar loop terminated normally (with all sub-bars completed at 100%)
func (tp *TestProgress) AssertProperClose(t *testing.T) {
tp.assertClose(t, time.Second*2, true)
}
// AssertCloseAfterCancellation asserts that the progress bar loop terminated due to a cancellation event
func (tp *TestProgress) AssertCloseAfterCancellation(t *testing.T) {
tp.assertClose(t, time.Second*2, false)
}
func (tp *TestProgress) assertClose(t *testing.T, after time.Duration, properShutdown bool) {
done := make(chan struct{})
go func() {
tp.Progress.Close()
close(done)
}()
select {
case <-done:
if properShutdown {
assert.False(t, tp.HasBeenCancelled(), "progress bar shut down without cancellation")
} else {
assert.True(t, tp.HasBeenCancelled(), "progress bar shut down after cancellation")
}
case <-time.After(after):
tp.Cancel()
t.Error("Timed out waiting for progress bar to shut down. Either a bar did not complete, or you forgot to call the Cancel() method.")
<-done
}
}
// Incrementer implements a simple counter. This can be used to test functions that are expected to report its progress incrementally
type Incrementer struct {
Count int
}
func NewMockIncrementer() *Incrementer {
return &Incrementer{Count: 0}
}
// Increment increments the progress count by one
func (mi *Incrementer) Increment(_ ...time.Duration) {
mi.Count++
} | internal/progress/mock/mock.go | 0.618896 | 0.403156 | mock.go | starcoder |
Copyright 2016 https://github.com/AsynkronIT/protoactor-go
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*****************************************************/
/*
Package actor declares the types used to represent actors in the Actor Model.
The actors model provide a high level abstraction for writing concurrent and distributed systems. This approach
simplifies the burden imposed on engineers, such as explicit locks and concurrent access to shared state, as actors
receive messages synchronously.
The following quote from Wikipedia distills the definition of an actor down to its essence
In response to a message that it receives, an actor can: make local decisions, create more actors,
send more messages, and determine how to respond to the next message received.
Creating Actors
Props provide the building blocks for declaring how actors should be created. The following example defines an actor
using a function literal to process messages:
var props Props = actor.FromFunc(func(c Context) {
// process messages
})
Alternatively, a type which conforms to the Actor interface, by defining a single Receive method, can be used.
type MyActor struct {}
func (a *MyActor) Receive(c Context) {
// process messages
}
var props Props = actor.FromProducer(func() Actor { return &MyActor{} })
Spawn and SpawnNamed use the given props to create a running instances of an actor. Once spawned, the actor is
ready to process incoming messages. To spawn an actor with a unique name, use
pid := actor.Spawn(props)
The result of calling Spawn is a unique PID or process identifier.
Each time an actor is spawned, a new mailbox is created and associated with the PID. Messages are sent to the mailbox
and then forwarded to the actor to process.
Processing Messages
An actor processes messages via its Receive handler. The signature of this function is:
Receive(c actor.Context)
The actor system guarantees that this method is called synchronously, therefore there is no requirement to protect
shared state inside calls to this function.
Communicating With Actors
A PID is the primary interface for sending messages to actors. The PID.Tell method is used to send an asynchronous
message to the actor associated with the PID:
pid.Tell("Hello World")
Depending on the requirements, communication between actors can take place synchronously or asynchronously. Regardless
of the circumstances, actors always communicate via a PID.
When sending a message using PID.Request or PID.RequestFuture, the actor which receives the message will respond
using the Context.Sender method, which returns the PID of of the sender.
For synchronous communication, an actor will use a Future and wait for the result before continuing. To send a message
to an actor and wait for a response, use the RequestFuture method, which returns a Future:
f := actor.RequestFuture(pid,"Hello", 50 * time.Millisecond)
res, err := f.Result() // waits for pid to reply */
package actor | actor/doc.go | 0.846356 | 0.601067 | doc.go | starcoder |
package convexhull
import "github.com/yukirin/algo/geo"
// Solve2 is Quickhull
func Solve2(ps []complex128) []complex128 {
lp, rp := leftMost(ps), rightMost(ps)
ret := make([]complex128, 2, 500)
ret[0], ret[1] = lp, rp
return append(ret, solve(lp, rp, ps)...)
}
func solve(lp, rp complex128, ps []complex128) []complex128 {
if len(ps) == 0 {
return nil
}
a, b, c := geo.EquationL(lp, rp)
updown, div := [][]complex128{make([]complex128, 0, 50), make([]complex128, 0, 50)}, make([]complex128, 2)
maxU, maxD := float64(0), float64(0)
ret := make([]complex128, 0, 100)
for _, p := range ps {
d := geo.Distance(a, b, c, p)
switch side := geo.PosL(p, lp, rp); {
case side > 0:
if d > maxU {
maxU, div[0] = d, p
}
updown[0] = append(updown[0], p)
case side < 0:
if d > maxD {
maxD, div[1] = d, p
}
updown[1] = append(updown[1], p)
}
}
if len(updown[0]) > 0 {
ret = append(ret, div[0])
}
if len(updown[1]) > 0 {
ret = append(ret, div[1])
}
left := [][]complex128{updown[0][:0], updown[1][:0]}
right := [][]complex128{make([]complex128, 0, 25), make([]complex128, 0, 25)}
tri := [][]complex128{[]complex128{lp, rp, div[0]}, []complex128{lp, rp, div[1]}}
inter := []complex128{geo.NearestP(div[0], lp, rp), geo.NearestP(div[1], lp, rp)}
for i := 0; i < 2; i++ {
for _, p := range updown[i] {
if geo.InPolygon(p, tri[i]) {
continue
}
if geo.PosL(p, inter[i], div[i]) > 0 {
left[i] = append(left[i], p)
continue
}
right[i] = append(right[i], p)
}
}
ret = append(ret, solve(lp, div[0], left[0])...)
ret = append(ret, solve(div[0], rp, right[0])...)
ret = append(ret, solve(rp, div[1], left[1])...)
ret = append(ret, solve(div[1], lp, right[1])...)
return ret
}
func leftMost(ps []complex128) complex128 {
p := ps[0]
for _, v := range ps[1:] {
if real(v) < real(p) {
p = v
}
}
return p
}
func rightMost(ps []complex128) complex128 {
p := ps[0]
for _, v := range ps[1:] {
if real(v) > real(p) {
p = v
}
}
return p
} | convexhull/quickhull.go | 0.584864 | 0.408336 | quickhull.go | starcoder |
package assert
import (
"testing"
"github.com/ppapapetrou76/go-testing/internal/pkg/values"
"github.com/ppapapetrou76/go-testing/types"
)
// AssertableMap is the structure to assert maps
type AssertableMap struct {
t *testing.T
actual types.Map
}
// ThatMap returns a proper assertable structure based on the map key type
func ThatMap(t *testing.T, actual interface{}) AssertableMap {
return AssertableMap{
t: t,
actual: values.NewKeyStringMap(actual),
}
}
// IsEqualTo asserts if the expected map is equal to the assertable map value
// It errors the tests if the compared values (actual VS expected) are not equal
func (a AssertableMap) IsEqualTo(expected interface{}) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if !a.actual.IsEqualTo(expected) {
a.t.Error(shouldBeEqual(a.actual, expected))
}
return a
}
// IsNotEqualTo asserts if the expected map is not equal to the assertable map value
// It errors the tests if the compared values (actual VS expected) are equal
func (a AssertableMap) IsNotEqualTo(expected interface{}) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if a.actual.IsEqualTo(expected) {
a.t.Error(shouldNotBeEqual(a.actual, expected))
}
return a
}
// HasSize asserts if the assertable string map has the expected length size
// It errors the test if it doesn't have the expected size
func (a AssertableMap) HasSize(size int) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if !a.actual.HasSize(size) {
a.t.Error(shouldHaveSize(a.actual, size))
}
return a
}
// IsEmpty asserts if the assertable string map is empty or not
func (a AssertableMap) IsEmpty() AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if a.actual.IsNotEmpty() {
a.t.Error(shouldBeEmpty(a.actual))
}
return a
}
// IsNotEmpty asserts if the assertable string map is not empty
func (a AssertableMap) IsNotEmpty() AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if a.actual.IsEmpty() {
a.t.Error(shouldNotBeEmpty(a.actual))
}
return a
}
// HasKey asserts if the assertable map has the given key
// It errors the test if
// * they key can't be found
// * the key is not comparable
// * the asserted type is not a map
func (a AssertableMap) HasKey(elements interface{}) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if !a.actual.HasKey(elements) {
a.t.Error(shouldHaveKey(a.actual, elements))
}
return a
}
// HasValue asserts if the assertable map has the given value
// It errors the test if
// * they key can't be found
// * the key is not comparable
// * the asserted type is not a map
func (a AssertableMap) HasValue(elements interface{}) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if !a.actual.HasValue(elements) {
a.t.Error(shouldHaveValue(a.actual, elements))
}
return a
}
// HasEntry asserts if the assertable map has the given entry
// It errors the test if
// * they entry can't be found
// * the key is not comparable
// * the asserted type is not a map
func (a AssertableMap) HasEntry(value types.MapEntry) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if !a.actual.HasEntry(value) {
a.t.Error(shouldHaveEntry(a.actual, value))
}
return a
}
// HasNotKey asserts if the assertable map has not the given key
// It errors the test if
// * they key is found
// * the key is not comparable
// * the asserted type is not a map
func (a AssertableMap) HasNotKey(elements interface{}) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if a.actual.HasKey(elements) {
a.t.Error(shouldNotHaveKey(a.actual, elements))
}
return a
}
// HasNotValue asserts if the assertable map has not the given value
// It errors the test if
// * they key can be found
// * the key is not comparable
// * the asserted type is not a map
func (a AssertableMap) HasNotValue(elements interface{}) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if a.actual.HasValue(elements) {
a.t.Error(shouldNotHaveValue(a.actual, elements))
}
return a
}
// HasNotEntry asserts if the assertable map has not the given entry
// It errors the test if
// * they entry can be found
// * the key is not comparable
// * the asserted type is not a map
func (a AssertableMap) HasNotEntry(value types.MapEntry) AssertableMap {
if !values.IsMap(a.actual.Value()) {
a.t.Error(shouldBeMap(a.actual))
return a
}
if a.actual.HasEntry(value) {
a.t.Error(shouldNotHaveEntry(a.actual, value))
}
return a
} | assert/map.go | 0.766643 | 0.768255 | map.go | starcoder |
package game
import (
"snake/utils"
)
type direction int
const (
directionUp direction = iota
directionRight
directionLeft
directionDown
)
var oppositeDirections map[direction]direction
var moves map[direction]utils.Vector
func init() {
oppositeDirections = map[direction]direction{
directionUp: directionDown,
directionRight: directionLeft,
directionLeft: directionRight,
directionDown: directionUp,
}
moves = map[direction]utils.Vector{
directionUp: {X: 0, Y: -1},
directionRight: {X: 1, Y: 0},
directionLeft: {X: -1, Y: 0},
directionDown: {X: 0, Y: 1},
}
}
type snake struct {
fieldWidth int
fieldHeight int
body []utils.Vector // the head is at the end
headDirection direction
previousTurn direction
freeCellsMap map[utils.Vector]bool
freeCells int
}
func newSnake(fieldHeight, fieldWidth int) *snake {
s := snake{
fieldWidth: fieldWidth,
fieldHeight: fieldHeight,
freeCellsMap: make(map[utils.Vector]bool, fieldHeight*fieldWidth),
}
for x := 0; x < fieldWidth; x++ {
for y := 0; y < fieldHeight; y++ {
s.setCellOccupation(utils.Vector{X: x, Y: y}, true)
}
}
s.initSnake()
return &s
}
func (s *snake) setCellOccupation(p utils.Vector, isFree bool) {
s.freeCellsMap[p] = isFree
switch isFree {
case true:
s.freeCells++
case false:
s.freeCells--
}
}
func (s *snake) initSnake() {
s.headDirection = directionUp
s.previousTurn = s.headDirection
x := s.fieldWidth / 2
const length = 4
for i := 0; i < length; i++ {
y := s.fieldHeight/2 + length - i
p := utils.Vector{X: x, Y: y}
s.body = append(s.body, p)
s.setCellOccupation(p, false)
}
}
func (s *snake) move() (previousHeadPosition, currentHeadPosition, previousTailPosition utils.Vector, ok bool) {
offset := moves[s.headDirection]
previousHeadPosition = s.body[len(s.body)-1]
currentHeadPosition = previousHeadPosition.Add(offset)
if isFreeCell, ok := s.freeCellsMap[currentHeadPosition]; !ok || !isFreeCell {
return utils.Vector{}, utils.Vector{}, utils.Vector{}, false
}
previousTailPosition = s.body[0]
//TODO: does the same slice grows indefinitely?
s.body = append(s.body[1:], currentHeadPosition)
s.previousTurn = s.headDirection
s.setCellOccupation(currentHeadPosition, false)
s.setCellOccupation(previousTailPosition, true)
return previousHeadPosition, currentHeadPosition, previousTailPosition, true
}
func (s *snake) increaseTail(p utils.Vector) {
s.body = append([]utils.Vector{p}, s.body...) //TODO: allocating new memory
s.setCellOccupation(p, false)
}
func (s *snake) rotateHead(d direction) {
if s.previousTurn != oppositeDirections[d] {
s.headDirection = d
}
} | controller/game/snake.go | 0.560974 | 0.491151 | snake.go | starcoder |
package triangulate
import (
"github.com/go-spatial/geom"
"github.com/go-spatial/geom/planar/triangulate/quadedge"
)
/*
Segment models a constraint segment in a triangulation.
A constraint segment is an oriented straight line segment between a start point
and an end point.
Author <NAME>
Author <NAME>
Ported to Go by <NAME>
*/
type Segment struct {
ls geom.Line
data interface{}
}
func NewSegment(l geom.Line) Segment {
return Segment{ls: l}
}
/*
This makes a deep copy of the line segment, but not the data.
If seg is nil a panic will occur.
*/
func (seg *Segment) DeepCopy() Segment {
cp := Segment{}
cp.ls[0][0] = seg.ls[0][0]
cp.ls[0][1] = seg.ls[0][1]
cp.ls[1][0] = seg.ls[1][0]
cp.ls[1][1] = seg.ls[1][1]
return cp
}
/**
* Creates a new instance for the given ordinates.
public Segment(double x1, double y1, double z1, double x2, double y2, double z2) {
this(new Coordinate(x1, y1, z1), new Coordinate(x2, y2, z2));
}
*/
/**
* Creates a new instance for the given ordinates, with associated external data.
public Segment(double x1, double y1, double z1, double x2, double y2, double z2, Object data) {
this(new Coordinate(x1, y1, z1), new Coordinate(x2, y2, z2), data);
}
*/
/**
* Creates a new instance for the given points.
*
* @param p0 the start point
* @param p1 the end point
public Segment(Coordinate p0, Coordinate p1) {
ls = new LineSegment(p0, p1);
}
*/
/*
Gets the start coordinate of the segment
Returns the starting vertex
If seg is nil a panic will occur.
*/
func (seg *Segment) GetStart() quadedge.Vertex {
return quadedge.Vertex(seg.ls[0])
}
/*
Gets the end coordinate of the segment
Return a Coordinate
If seg is nil a panic will occur.
*/
func (seg *Segment) GetEnd() quadedge.Vertex {
return quadedge.Vertex(seg.ls[1])
}
/**
* Gets the start X ordinate of the segment
*
* @return the X ordinate value
public double getStartX() {
Coordinate p = ls.getCoordinate(0);
return p.x;
}
*/
/**
* Gets the start Y ordinate of the segment
*
* @return the Y ordinate value
public double getStartY() {
Coordinate p = ls.getCoordinate(0);
return p.y;
}
*/
/**
* Gets the start Z ordinate of the segment
*
* @return the Z ordinate value
public double getStartZ() {
Coordinate p = ls.getCoordinate(0);
return p.z;
}
*/
/**
* Gets the end X ordinate of the segment
*
* @return the X ordinate value
public double getEndX() {
Coordinate p = ls.getCoordinate(1);
return p.x;
}
*/
/**
* Gets the end Y ordinate of the segment
*
* @return the Y ordinate value
public double getEndY() {
Coordinate p = ls.getCoordinate(1);
return p.y;
}
*/
/**
* Gets the end Z ordinate of the segment
*
* @return the Z ordinate value
public double getEndZ() {
Coordinate p = ls.getCoordinate(1);
return p.z;
}
*/
// GetLineSegment gets a Line modelling this segment.
func (seg *Segment) GetLineSegment() geom.Line {
return seg.ls
}
/**
* Gets the external data associated with this segment
*
* @return a data object
public Object getData() {
return data;
}
*/
/**
* Sets the external data to be associated with this segment
*
* @param data a data object
public void setData(Object data) {
this.data = data;
}
*/
/**
* Determines whether two segments are topologically equal.
* I.e. equal up to orientation.
*
* @param s a segment
* @return true if the segments are topologically equal
public boolean equalsTopo(Segment s) {
return ls.equalsTopo(s.getLineSegment());
}
*/
/**
* Computes the intersection point between this segment and another one.
*
* @param s a segment
* @return the intersection point, or <code>null</code> if there is none
public Coordinate intersection(Segment s) {
return ls.intersection(s.getLineSegment());
}
*/
/**
* Computes a string representation of this segment.
*
* @return a string
public String toString() {
return ls.toString();
}
*/ | planar/triangulate/segment.go | 0.886297 | 0.707186 | segment.go | starcoder |
package game
import (
"github.com/mokiat/lacking-gl/internal"
"github.com/mokiat/lacking/game/graphics"
)
func newDirectionalLightShaderSet() graphics.ShaderSet {
vsBuilder := internal.NewShaderSourceBuilder(directionalLightVertexShader)
fsBuilder := internal.NewShaderSourceBuilder(directionalLightFragmentShader)
return graphics.ShaderSet{
VertexShader: vsBuilder.Build,
FragmentShader: fsBuilder.Build,
}
}
const directionalLightVertexShader = `
layout(location = 0) in vec3 coordIn;
noperspective out vec2 texCoordInOut;
void main()
{
texCoordInOut = (coordIn.xy + 1.0) / 2.0;
gl_Position = vec4(coordIn.xy, 0.0, 1.0);
}
`
const directionalLightFragmentShader = `
layout(location = 0) out vec4 fbColor0Out;
uniform sampler2D fbColor0TextureIn;
uniform sampler2D fbColor1TextureIn;
uniform sampler2D fbDepthTextureIn;
uniform mat4 projectionMatrixIn;
uniform mat4 viewMatrixIn;
uniform mat4 cameraMatrixIn;
uniform vec3 lightDirectionIn;
uniform vec3 lightIntensityIn = vec3(1.0, 1.0, 1.0);
noperspective in vec2 texCoordInOut;
const float pi = 3.141592;
struct fresnelInput {
vec3 reflectanceF0;
vec3 halfDirection;
vec3 lightDirection;
};
vec3 calculateFresnel(fresnelInput i) {
float halfLightDot = clamp(dot(i.halfDirection, i.lightDirection), 0.0, 1.0);
return i.reflectanceF0 + (1.0 - i.reflectanceF0) * pow(1.0 - halfLightDot, 5);
}
struct distributionInput {
float roughness;
vec3 normal;
vec3 halfDirection;
};
float calculateDistribution(distributionInput i) {
float sqrRough = i.roughness * i.roughness;
float halfNormDot = dot(i.normal, i.halfDirection);
float denom = halfNormDot * halfNormDot * (sqrRough - 1.0) + 1.0;
return sqrRough / (pi * denom * denom);
}
struct geometryInput {
float roughness;
};
float calculateGeometry(geometryInput i) {
// TODO: Use better model
return 1.0 / 4.0;
}
struct directionalSetup {
float roughness;
vec3 reflectedColor;
vec3 refractedColor;
vec3 viewDirection;
vec3 lightDirection;
vec3 normal;
vec3 lightIntensity;
};
vec3 calculateDirectionalHDR(directionalSetup s) {
vec3 halfDirection = normalize(s.lightDirection + s.viewDirection);
vec3 fresnel = calculateFresnel(fresnelInput(
s.reflectedColor,
halfDirection,
s.lightDirection
));
float distributionFactor = calculateDistribution(distributionInput(
s.roughness,
s.normal,
halfDirection
));
float geometryFactor = calculateGeometry(geometryInput(
s.roughness
));
vec3 reflectedHDR = fresnel * distributionFactor * geometryFactor;
vec3 refractedHDR = (vec3(1.0) - fresnel) * s.refractedColor / pi;
return (reflectedHDR + refractedHDR) * s.lightIntensity * clamp(dot(s.normal, s.lightDirection), 0.0, 1.0);
}
void main()
{
vec3 ndcPosition = vec3(
(texCoordInOut.x - 0.5) * 2.0,
(texCoordInOut.y - 0.5) * 2.0,
texture(fbDepthTextureIn, texCoordInOut).x * 2.0 - 1.0
);
vec3 clipPosition = vec3(
ndcPosition.x / projectionMatrixIn[0][0],
ndcPosition.y / projectionMatrixIn[1][1],
-1.0
);
vec3 viewPosition = clipPosition * projectionMatrixIn[3][2] / (projectionMatrixIn[2][2] + ndcPosition.z);
vec3 worldPosition = (cameraMatrixIn * vec4(viewPosition, 1.0)).xyz;
vec3 cameraPosition = cameraMatrixIn[3].xyz;
vec4 albedoMetalness = texture(fbColor0TextureIn, texCoordInOut);
vec4 normalRoughness = texture(fbColor1TextureIn, texCoordInOut);
vec3 baseColor = albedoMetalness.xyz;
vec3 normal = normalize(normalRoughness.xyz);
float metalness = albedoMetalness.w;
float roughness = normalRoughness.w;
vec3 refractedColor = baseColor * (1.0 - metalness);
vec3 reflectedColor = mix(vec3(0.02), baseColor, metalness);
vec3 hdr = calculateDirectionalHDR(directionalSetup(
roughness,
reflectedColor,
refractedColor,
normalize(cameraPosition - worldPosition),
normalize(lightDirectionIn),
normal,
lightIntensityIn
));
fbColor0Out = vec4(hdr, 1.0);
}
` | game/shader_dir_light.go | 0.632049 | 0.458409 | shader_dir_light.go | starcoder |
package cache
import (
"fmt"
"github.com/ghodss/yaml"
"github.com/intel/cri-resource-manager/pkg/cri/resource-manager/kubernetes"
"strings"
)
const (
// annotation key for specifying container affinity rules
keyAffinity = "affinity"
// annotation key for specifying container anti-affinity rules
keyAntiAffinity = "anti-affinity"
)
// simpleAffinity is an alternative, simplified syntax for intra-pod container affinity.
type simpleAffinity map[string][]string
// PodContainerAffinity defines a set of per-container affinities and anti-affinities.
type podContainerAffinity map[string][]*Affinity
// Affinity specifies a single container affinity.
type Affinity struct {
Scope *Expression `json:"scope,omitempty"` // scope for evaluating this affinity
Match *Expression `json:"match"` // affinity expression
Weight int32 `json:"weight,omitempty"` // (optional) weight for this affinity
}
// Expression is used to describe a criteria to select objects within a domain.
type Expression struct {
Key string `json:"key"` // key to check values of/against
Op Operator `json:"operator"` // operator to apply to value of Key and Values
Values []string `json:"values,omitempty"` // value(s) for domain key
}
// Operator defines the possible operators for an Expression.
type Operator string
const (
// Equals tests for equality with a single value.
Equals Operator = "Equals"
// NotEqual test for inequality with a single value.
NotEqual Operator = "NotEqual"
// In tests if the key's value is one of the specified set.
In Operator = "In"
// NotIn tests if the key's value is not one of the specified set.
NotIn Operator = "NotIn"
// Exists evalutes to true if the named key exists.
Exists Operator = "Exists"
// NotExist evalutes to true if the named key does not exist.
NotExist Operator = "NotExist"
// AlwaysTrue always evaluates to true.
AlwaysTrue = "AlwaysTrue"
)
// ImplicitAffinity is an affinity that gets implicitly added to all eligible containers.
type ImplicitAffinity struct {
Eligible func(Container) bool // function to determine if Affinity is added to a Container
Affinity *Affinity // the actual implicitly added Affinity
}
// Validate checks the affinity for (obvious) invalidity.
func (a *Affinity) Validate() error {
if err := a.Scope.Validate(); err != nil {
return cacheError("invalid affinity scope: %v", err)
}
if err := a.Match.Validate(); err != nil {
return cacheError("invalid affinity match: %v", err)
}
return nil
}
// Validate checks the expression for (obvious) invalidity.
func (e *Expression) Validate() error {
if e == nil {
return cacheError("nil expression")
}
switch e.Op {
case Equals, NotEqual:
if len(e.Values) != 1 {
return cacheError("invalid expression, '%s' requires a single value", e.Op)
}
case Exists, NotExist:
if e.Values != nil && len(e.Values) != 0 {
return cacheError("invalid expression, '%s' does not take any values", e.Op)
}
}
return nil
}
// EvaluateAffinity evaluates the given affinity against all known in-scope containers.
func (cch *cache) EvaluateAffinity(a *Affinity) map[string]int32 {
results := make(map[string]int32)
for _, c := range cch.FilterScope(a.Scope) {
if a.Match.Evaluate(c) {
id := c.GetCacheID()
results[id] += a.Weight
}
}
return results
}
// FilterScope returns the containers selected by the scope expression.
func (cch *cache) FilterScope(scope *Expression) []Container {
cch.Debug("calculating scope %s", scope.String())
result := []Container{}
for _, c := range cch.GetContainers() {
if scope.Evaluate(c) {
cch.Debug(" + container %s: IN scope", c.PrettyName())
result = append(result, c)
} else {
cch.Debug(" - container %s: NOT IN scope", c.PrettyName())
}
}
return result
}
// Evaluate evaluates an expression against a container.
func (e *Expression) Evaluate(container Container) bool {
value, ok := e.KeyValue(container)
result := false
switch e.Op {
case Equals:
result = ok && (value == e.Values[0] || e.Values[0] == "*")
case NotEqual:
result = !ok || value != e.Values[0]
case In:
result = false
if ok {
for _, v := range e.Values {
if value == v || v == "*" {
result = true
}
}
}
case NotIn:
result = true
if ok {
for _, v := range e.Values {
if value == v || v == "*" {
result = false
}
}
}
case Exists:
result = ok
case NotExist:
result = !ok
case AlwaysTrue:
result = true
}
return result
}
// KeyValue extracts the value of the expresssion key from a container.
func (e *Expression) KeyValue(c Container) (string, bool) {
value, ok, _ := c.(*container).resolveRef(e.Key)
return value, ok
}
// resolveRef walks an object trying to resolve a reference to a value.
func (c *container) resolveRef(path string) (string, bool, error) {
var obj interface{}
c.cache.Debug("resolving %s/%s...", c.PrettyName(), path)
obj = c
ref := strings.Split(path, "/")
if len(ref) == 1 {
ref = []string{"labels", path}
}
for len(ref) > 0 {
key := ref[0]
c.cache.Debug("* resolve: walking %s, @%s, obj %T...", path, key, obj)
switch v := obj.(type) {
case *container:
switch strings.ToLower(key) {
case "pod":
pod, ok := v.GetPod()
if !ok {
return "", false, cacheError("failed to find pod (%s) for container %s",
v.PodID, v.Name)
}
obj = pod
case "labels":
obj = v.Labels
case "tags":
obj = v.Tags
case "name":
obj = v.Name
case "namespace":
obj = v.Namespace
case "qosclass":
obj = string(v.QOSClass)
}
case *pod:
switch strings.ToLower(key) {
case "labels":
obj = v.Labels
case "name":
obj = v.Name
case "namespace":
obj = v.Namespace
case "qosclass":
obj = string(v.QOSClass)
}
case map[string]string:
value, ok := v[key]
if !ok {
return "", false, nil
}
obj = value
default:
return "", false, cacheError("can't handle object of type %T in reference %s",
obj, path)
}
ref = ref[1:]
}
str, ok := obj.(string)
if !ok {
return "", false, cacheError("reference %s resolved to non-string: %T", path, obj)
}
c.cache.Debug("%s/%s => %s", c.PrettyName(), path, str)
return str, true, nil
}
// String returns the affinity as a string.
func (a *Affinity) String() string {
kind := ""
if a.Weight < 0 {
kind = "anti-"
}
return fmt.Sprintf("<%saffinity: scope %s %s => %d>",
kind, a.Scope.String(), a.Match.String(), a.Weight)
}
// String returns the expression as a string.
func (e *Expression) String() string {
return fmt.Sprintf("<%s %s %s>", e.Key, e.Op, strings.Join(e.Values, ","))
}
// Try to parse affinities in simplified notation from the given annotation value.
func (pca *podContainerAffinity) parseSimple(pod *pod, value string, weight int32) bool {
parsed := simpleAffinity{}
if err := yaml.Unmarshal([]byte(value), &parsed); err != nil {
return false
}
podScope := pod.ScopeExpression()
for name, values := range parsed {
(*pca)[name] = append((*pca)[name],
&Affinity{
Scope: podScope,
Match: &Expression{
Key: kubernetes.ContainerNameLabel,
Op: In,
Values: values,
},
Weight: weight,
})
}
return true
}
// Try to parse affinities in full notation from the given annotation value.
func (pca *podContainerAffinity) parseFull(pod *pod, value string, weight int32) error {
parsed := podContainerAffinity{}
if err := yaml.Unmarshal([]byte(value), &parsed); err != nil {
return cacheError("failed to parse affinity annotation '%s': %v", value, err)
}
podScope := pod.ScopeExpression()
for name, pa := range parsed {
ca, ok := (*pca)[name]
if !ok {
ca = make([]*Affinity, 0, len(pa))
}
for _, a := range pa {
if a.Scope == nil {
a.Scope = podScope
}
if a.Weight == 0 {
a.Weight = weight
} else {
if weight < 0 {
a.Weight *= -1
}
}
if err := a.Validate(); err != nil {
return err
}
ca = append(ca, a)
}
(*pca)[name] = ca
}
return nil
}
// GlobalAffinity creates an affinity with all containers in scope.
func GlobalAffinity(key string, weight int32) *Affinity {
return &Affinity{
Scope: &Expression{
Op: AlwaysTrue, // evaluate against all containers
},
Match: &Expression{
Key: key,
Op: Exists,
},
Weight: weight,
}
}
// GlobalAntiAffinity creates an anti-affinity with all containers in scope.
func GlobalAntiAffinity(key string, weight int32) *Affinity {
return GlobalAffinity(key, -weight)
}
// AddImplicitAffinities registers a set of implicit affinities.
func (cch *cache) AddImplicitAffinities(implicit map[string]*ImplicitAffinity) error {
for name := range implicit {
if existing, ok := cch.implicit[name]; ok {
return cacheError("implicit affinity %s already defined (%s)",
name, existing.Affinity.String())
}
}
for name, a := range implicit {
cch.implicit[name] = a
}
return nil
}
// DeleteImplicitAffinities removes a previously registered set of implicit affinities.
func (cch *cache) DeleteImplicitAffinities(names []string) {
for _, name := range names {
delete(cch.implicit, name)
}
} | pkg/cri/resource-manager/cache/affinity.go | 0.765856 | 0.411998 | affinity.go | starcoder |
Regexp like a Perl Pumpking in Go!
Rationale:
#Perl5:
$str =~ m/This is how (?<we>party!)/g
$we = $1;
$we = $+{we}
//Golang:
M(str, `m/This is how (?P<we>party!)/g`)
we := R0.S[1] // access the first capture group
we = R0.Z["we"] // access the named capture group
Flags supported:
- g
- x
- m
- s
- i
Notes:
- Named capture groups are overwritten with the last capture group when using global matching
- Unicode might not work properly.
*/
package re
import (
"fmt"
"regexp"
"strings"
"sync"
)
type RE struct {
_orig string // original regex string
f *string // altered flags string
n *string // altered regex string
s *string // substitution string in substitute-operation
regex *regexp.Regexp // compiled regexp after preprocessing the needle parsed
mode byte // s or m or tr
separator byte // separate the mode/matcher/substituter/flags components
captures bool // Enable capture group functionality
nCaptures bool // Enable named capture groups functionality. The way Go regexp works with mixing named and non-named groups together makes it difficult to distinguish if named groups are actually used or not. This saves a lot of computation.
g bool // flag g used
x bool // flag x used
Matches int // how many times the regex matched
S []string // $1, $2, ..., $n Captured subpatterns
Z map[string]string // %+ Named capture buffers
}
var R0 *RE = &RE{} // The result of the latest regexp operation. Not thread-safe! It could be if Go had thread-local variables or a way to identify the running thread.
var UseRECache bool = true // Enable/Disable transparent RE caching. With caching enabled, the performance of repeated regex operations is increased ~600%
/*
Inspired by https://github.com/patrickmn/go-cache
Transparently caches given regexps to save on the expensive computation
*/
type reCache struct {
cache map[string]*RE
mu sync.RWMutex
}
func newRECache() *reCache {
return &reCache{
cache: make(map[string]*RE),
}
}
func (self *reCache) PutStr(regex string, re *RE) {
self.Put(®ex, re)
}
func (self *reCache) Put(regex *string, re *RE) {
copy := *re
self.mu.Lock()
self.cache[*regex] = ©
self.mu.Unlock()
}
func (self *reCache) GetStr(regex string) *RE {
return self.Get(®ex)
}
func (self *reCache) Get(regex *string) *RE {
self.mu.Lock()
copyPtr := self.cache[*regex]
if copyPtr == nil {
self.mu.Unlock()
return nil
}
copy := *copyPtr
self.mu.Unlock()
return ©
}
func (self *reCache) Flush() {
self.cache = make(map[string]*RE)
}
var regexpCache = newRECache()
/*
R is useful if you don't know the type of the regex (match/substitute) beforehand and need to dynamically do things.
*/
func R(haystack *string, needle string) bool {
r := regexParser(&needle)
if r.mode == 's' {
s(haystack, r)
if r.Matches > 0 {
return true
}
} else {
m(haystack, r)
if r.Matches > 0 {
return true
}
return false
}
return false
}
func M(haystack string, needle string) bool {
r := regexParser(&needle)
m(&haystack, r)
if r.Matches > 0 {
return true
} else {
return false
}
}
/*
Mr returns *RE, which is thread-safe
*/
func Mr(haystack string, needle string) *RE {
return m(&haystack, regexParser(&needle))
}
func S(haystack *string, needle string) bool {
r := regexParser(&needle)
s(haystack, r)
if r.Matches > 0 {
return true
} else {
return false
}
}
/*
Sr returns *RE, which is thread-safe
*/
func Sr(haystack *string, needle string) *RE {
return s(haystack, regexParser(&needle))
}
/*
Ss returns the substituted string
*/
func Ss(haystack string, needle string) string {
s(&haystack, regexParser(&needle))
return haystack
}
func m(haystack *string, r *RE) *RE {
R0 = r
if strings.Contains(*r.f, "g") {
captureGroups := r.regex.FindAllStringSubmatch(*haystack, -1)
if captureGroups == nil {
return r
}
r.Matches = len(captureGroups)
if r.captures {
namedCaptureGroups := r.regex.SubexpNames()
if r.nCaptures && len(namedCaptureGroups) > 1 {
r.Z = make(map[string]string, len(namedCaptureGroups))
}
r.S = make([]string, len(captureGroups)*(len(captureGroups[0])-1)+1)
for i, captures := range captureGroups {
captureGroup(r, captures, i, namedCaptureGroups)
}
}
} else {
captures := r.regex.FindStringSubmatch(*haystack)
if captures == nil {
return r
}
r.Matches = 1
if r.captures {
namedCaptureGroups := r.regex.SubexpNames()
if r.nCaptures && len(namedCaptureGroups) > 1 {
r.Z = make(map[string]string, len(namedCaptureGroups))
}
r.S = make([]string, len(captures))
captureGroup(r, captures, 0, namedCaptureGroups)
}
}
return r
}
func s(haystack *string, r *RE) *RE {
R0 = r
result := []byte{}
if strings.Contains(*r.f, "g") {
if r.captures {
captureGroups := r.regex.FindAllStringSubmatch(*haystack, -1)
if captureGroups == nil {
return r
}
namedCaptureGroups := r.regex.SubexpNames()
if r.nCaptures && len(namedCaptureGroups) > 1 {
r.Z = make(map[string]string, len(namedCaptureGroups))
}
r.S = make([]string, len(captureGroups)*(len(captureGroups[0])-1)+1)
for i, captures := range captureGroups {
captureGroup(r, captures, i, namedCaptureGroups)
}
}
// For each match of the regex in the content.
r.Matches = len(r.regex.FindAllStringSubmatchIndex(*haystack, -1))
if r.Matches > 0 {
*haystack = r.regex.ReplaceAllString(*haystack, *r.s)
}
} else {
if r.captures {
captures := r.regex.FindStringSubmatch(*haystack)
if captures == nil {
return r
}
namedCaptureGroups := r.regex.SubexpNames()
if r.nCaptures && len(namedCaptureGroups) > 1 {
r.Z = make(map[string]string, len(namedCaptureGroups))
}
r.S = make([]string, len(captures))
captureGroup(r, captures, 0, namedCaptureGroups)
}
captureIdxs := r.regex.FindStringSubmatchIndex(*haystack)
if len(captureIdxs) > 0 {
r.Matches = 1
result = r.regex.ExpandString(nil, *r.s, *haystack, captureIdxs)
*haystack = (*haystack)[:captureIdxs[0]] + string(result) + (*haystack)[captureIdxs[1]:]
}
}
return r
}
func captureGroup(r *RE, captures []string, captureGroupsIteration int, namedCaptureGroups []string) {
for j := 1; j < len(captures); j++ {
r.S[(captureGroupsIteration*(len(captures)-1))+j] = captures[j]
if j < len(namedCaptureGroups) {
if namedCaptureGroup := namedCaptureGroups[j]; namedCaptureGroup != "" && captures[j] != "" {
r.Z[namedCaptureGroup] = captures[j]
}
}
}
}
func regexParser(needle *string) *RE {
var r *RE
if UseRECache {
r = regexpCache.Get(needle)
if r != nil {
return r
}
}
r = &RE{
_orig: *needle,
mode: 'm',
}
sbM := strings.Builder{}
sbM.Grow(len(r._orig))
sbS := strings.Builder{}
sbS.Grow(len(r._orig))
sbF := strings.Builder{}
sbF.Grow(3)
var sb *strings.Builder
i := 0
switch (r._orig)[i] {
case 'm':
r.mode = 'm'
i++
case 't':
r.mode = 't'
if (r._orig)[i+1] == 'r' {
i++
}
i++
case 's':
r.mode = 's'
i++
}
r.separator = (r._orig)[i]
i++
var mode byte = 'm'
sb = &sbM
for ; i < len(r._orig); i++ {
switch (r._orig)[i] {
case r.separator:
switch mode {
case 'm':
if r.mode == 'm' {
mode = 'f'
sb = &sbF
} else {
mode = 's'
sb = &sbS
}
case 's':
mode = 'f'
sb = &sbF
case 'f':
fmt.Printf("Separator '%d' after flags-field in regex '%s'", r.separator, r._orig)
}
continue
case '\\':
sb.WriteByte((r._orig)[i])
i++ // Skip the escaping backslash and the character being escaped
sb.WriteByte((r._orig)[i])
continue
case '(':
if (r._orig)[i+1] == '?' && (r._orig)[i+2] == ':' { // (?:) is a non-capturing group
} else if (r._orig)[i+1] == '?' && (r._orig)[i+2] == 'P' && (r._orig)[i+3] == '<' { // (?P<) named capture groups
r.nCaptures = true
r.captures = true
} else {
r.captures = true
}
}
sb.WriteByte((r._orig)[i])
}
if mode != 'f' && r._orig[len(r._orig)-1] != r.separator {
fmt.Printf("Ending terminator '%c' not found for regexp string '%s'!\n", r.separator, r._orig)
}
rn, rs, rf := sbM.String(), sbS.String(), sbF.String()
r.n, r.s, r.f = &rn, &rs, &rf
flagHandler_x(r)
flagHandlerGoNative(r)
r.regex = regexp.MustCompile(*r.n)
if UseRECache {
regexpCache.Put(needle, r)
}
return r
}
func flagHandlerGoNative(r *RE) {
sb := strings.Builder{}
if strings.Contains(*r.f, "i") {
sb.WriteByte('i')
}
if strings.Contains(*r.f, "m") {
sb.WriteByte('m')
}
if strings.Contains(*r.f, "s") {
sb.WriteByte('s')
}
if sb.Len() > 0 {
*r.n = `(?` + sb.String() + `)` + *r.n
}
}
func flagHandler_x(r *RE) {
if strings.Contains(*r.f, "x") {
flagHandler_x_(r, r.n)
flagHandler_x_(r, r.s)
}
}
func flagHandler_x_(r *RE, regstr *string) {
sb := strings.Builder{}
sb.Grow(len(*regstr))
var inBracketedCharacterClass bool
var inComment bool
for i := 0; i < len(*regstr); i++ {
if inComment {
if (*regstr)[i] == '\n' {
inComment = false
continue
}
continue
}
switch (*regstr)[i] {
case '[':
if inBracketedCharacterClass {
// This is an illegal regex, but keep removing whitespace to detect the real problem
} else {
inBracketedCharacterClass = true
}
case ']':
if inBracketedCharacterClass {
inBracketedCharacterClass = false
} else {
// This is an illegal regex, but keep removing whitespace to detect the real problem
}
case '\\':
sb.WriteByte((*regstr)[i])
i++ // Skip the escaping backslash and the character being escaped
sb.WriteByte((*regstr)[i])
continue
case '#':
if inBracketedCharacterClass {
} else {
inComment = true
continue
}
case '\t', '\n', '\v', '\f', '\r', ' ':
if !inBracketedCharacterClass {
continue
}
}
sb.WriteByte((*regstr)[i])
}
str := sb.String()
*regstr = str
} | v0/re.go | 0.620392 | 0.403743 | re.go | starcoder |
package types
import (
"strings"
"github.com/fabulousduck/proto/src/tokens"
)
//DetermineType takes a char and determines its type
func DetermineType(str string) (string, string) {
chars := []string{
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_",
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
}
numbers := []string{
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
}
singleOperators := map[string]string{
"<": "left_arrow",
">": "right_arrow",
"|": "pipe",
"{": "left_curly_brace",
"}": "right_curly_brace",
"[": "left_square_bracket",
"]": "right_square_bracket",
";": "semi_colon",
":": "double_dot",
"/": "slash",
"\\": "back_slash",
".": "dot",
",": "comma",
"'": "single_quote",
"\"": "double_quote",
"+": "plus",
"-": "dash",
"=": "equals",
"#": "bang",
"^": "carrot",
"&": "logical_and",
"%": "modulo",
"*": "star",
"(": "left_round_bracket",
")": "right_round_bracket",
}
ignorables := map[string]string{
"\r": "win_return",
"\n": "newline",
"\t": "tab",
" ": "space",
}
if val, ok := ignorables[str]; ok {
return "ignoreable", val
}
if Contains(str, chars) {
return "char", str
}
if Contains(str, numbers) {
return "integer", str
}
if val, ok := singleOperators[str]; ok {
return "operator", val
}
if str == " " {
return "space", str
}
return "", "undefined_character"
}
//CheckKeywords checks if the given string is a keyword in the language
func CheckKeywords(token *tokens.Token) {
keywords := map[string]string{
"int": "integer",
"bool": "boolean",
"string": "string_literal",
"float": "floating_point_integer",
"class": "class",
}
if val, ok := keywords[token.Value]; ok {
token.Type = val
}
}
//IsValidDoubleOperator determines if 2 characters make a valid double operator
func IsValidDoubleOperator(base string, next string) (string, bool) {
combinedOperator := strings.Join([]string{base, next}, "")
operators := map[string]string{
"==": "exact_equals",
"=>": "equals_greater",
"=<": "equals_smaller",
"++": "increment",
"--": "decrement",
"->": "arrow",
":=": "double_dick",
"/*": "open_multiline_comment",
"*/": "close_multiline_comment",
}
if val, ok := operators[combinedOperator]; ok {
return val, true
}
return "", false
}
//Contains checks is a variable of type string N is present in given list V
func Contains(name string, list []string) bool {
for i := 0; i < len(list); i++ {
if string(list[i]) == name {
return true
}
}
return false
}
//IsLitChar checks if a char is valid hex numeral
func IsLitChar(char string) bool {
litChars := []string{"A", "B", "C", "D", "E", "F", "a", "b", "c", "d", "e", "f"}
return Contains(char, litChars)
} | src/types/types.go | 0.614047 | 0.44065 | types.go | starcoder |
package paint
import (
"encoding/binary"
"image"
"image/color"
"image/draw"
"math"
"gioui.org/f32"
"gioui.org/internal/opconst"
"gioui.org/op"
)
// ImageOp sets the material to an image.
type ImageOp struct {
uniform bool
color color.RGBA
src *image.RGBA
size image.Point
}
// ColorOp sets the material to a constant color.
type ColorOp struct {
Color color.RGBA
}
// PaintOp draws the current material, respecting the
// clip path and transformation.
type PaintOp struct {
Rect f32.Rectangle
}
func NewImageOp(src image.Image) ImageOp {
switch src := src.(type) {
case *image.Uniform:
col := color.RGBAModel.Convert(src.C).(color.RGBA)
return ImageOp{
uniform: true,
color: col,
}
default:
sz := src.Bounds().Size()
// Copy the image into a GPU friendly format.
dst := image.NewRGBA(image.Rectangle{
Max: sz,
})
draw.Draw(dst, src.Bounds(), src, image.Point{}, draw.Src)
return ImageOp{
src: dst,
size: sz,
}
}
}
func (i ImageOp) Size() image.Point {
return i.size
}
func (i ImageOp) Add(o *op.Ops) {
if i.uniform {
ColorOp{
Color: i.color,
}.Add(o)
return
}
data := o.Write(opconst.TypeImageLen, i.src)
data[0] = byte(opconst.TypeImage)
bo := binary.LittleEndian
bo.PutUint32(data[1:], uint32(i.size.X))
bo.PutUint32(data[5:], uint32(i.size.Y))
}
func (c ColorOp) Add(o *op.Ops) {
data := o.Write(opconst.TypeColorLen)
data[0] = byte(opconst.TypeColor)
data[1] = c.Color.R
data[2] = c.Color.G
data[3] = c.Color.B
data[4] = c.Color.A
}
func (d PaintOp) Add(o *op.Ops) {
data := o.Write(opconst.TypePaintLen)
data[0] = byte(opconst.TypePaint)
bo := binary.LittleEndian
bo.PutUint32(data[1:], math.Float32bits(d.Rect.Min.X))
bo.PutUint32(data[5:], math.Float32bits(d.Rect.Min.Y))
bo.PutUint32(data[9:], math.Float32bits(d.Rect.Max.X))
bo.PutUint32(data[13:], math.Float32bits(d.Rect.Max.Y))
}
// RectClip returns a ClipOp corresponding to a pixel aligned
// rectangular area.
func RectClip(r image.Rectangle) ClipOp {
return ClipOp{bounds: toRectF(r)}
}
func toRectF(r image.Rectangle) f32.Rectangle {
return f32.Rectangle{
Min: f32.Point{X: float32(r.Min.X), Y: float32(r.Min.Y)},
Max: f32.Point{X: float32(r.Max.X), Y: float32(r.Max.Y)},
}
} | op/paint/paint.go | 0.796015 | 0.473292 | paint.go | starcoder |
package ut
import (
"reflect"
"strings"
"testing"
"runtime"
"bytes"
"fmt"
)
// Test is the testing.T instance for the test being run.
var test *testing.T = &testing.T{}
// Run sets up an individual test.
func Run(t *testing.T) {
test = t
}
// Test returns the testing.T passed to the Run() method.
func Test() *testing.T {
return test
}
// AssertTrue tests whether the given value is true.
func AssertTrue(actual bool) bool {
if !actual {
Errorf("Failed asserting %q is true.", actual)
return false
}
return true
}
// AssertFalse tests whether the given value is false.
func AssertFalse(actual bool) bool {
if actual {
Errorf("Failed asserting %q is false.", actual)
return false
}
return true
}
// AssertNotNil tests whether the given value is not nil.
func AssertNotNil(actual interface{}) bool {
if actual == nil {
Errorf("Failed asserting the value is not nil.")
return false
}
return true
}
// AssertNil tests whether the given value is nil.
func AssertNil(actual interface{}) bool {
if actual != nil {
Errorf("Failed asserting %T is nil.", actual)
return false
}
return true
}
// AssertEmpty tests whether the given value is empty.
func AssertEmpty(actual interface{}) bool {
t := reflect.ValueOf(actual)
if !isZero(t) {
Errorf("Failed asserting %q is empty.", actual)
return false
}
return true
}
// AssertNotEmpty tests whether the given value is not empty.
func AssertNotEmpty(actual interface{}) bool {
t := reflect.ValueOf(actual)
if isZero(t) {
Errorf("Failed asserting %q is not empty.", actual)
return false
}
return true
}
// AssertEquals tests whether two values are equal.
func AssertEquals(expected, actual interface{}) bool {
if !reflect.DeepEqual(expected, actual) {
Errorf("Failed asserting %q equals %q.", expected, actual)
return false
}
return true
}
// AssertNotEquals tests whether two values do not equal each other.
func AssertNotEquals(expected, actual interface{}) bool {
if reflect.DeepEqual(expected, actual) {
Errorf("Failed asserting %q is not equal to %q.", expected, actual)
return false
}
return true
}
// AssertGreaterThan tests whether the actual value is greater than the expected value.
func AssertGreaterThan(expected, actual int) bool {
if expected >= actual {
Errorf("Failed asserting %q is greater than %q.", actual, expected)
return false
}
return true
}
// AssertContains tests whether the expected value contains the actual value.
func AssertContains(expected, actual string) bool {
if !strings.Contains(actual, expected) {
Errorf("Failed asserting %q contains %q.", actual, expected)
return false
}
return true
}
// isZero returns if the value is zero.
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Func, reflect.Map, reflect.Slice:
return v.IsNil()
case reflect.Array:
z := true
for i := 0; i < v.Len(); i++ {
z = z && isZero(v.Index(i))
}
return z
case reflect.Struct:
z := true
for i := 0; i < v.NumField(); i++ {
z = z && isZero(v.Field(i))
}
return z
}
// Compare other types directly:
z := reflect.Zero(v.Type())
return v.Interface() == z.Interface()
}
// Hack.
func Errorf(format string, args ...interface{}) {
format = decorate(format)
test.Log(fmt.Sprintf(format, args...))
test.Fail()
}
// decorate prefixes the string with the file and line of the call site
// and inserts the final newline if needed and indentation tabs for formatting.
func decorate(s string) string {
_, file, line, ok := runtime.Caller(3) // decorate + log + public function.
if ok {
// Truncate file name at last file name separator.
if index := strings.LastIndex(file, "/"); index >= 0 {
file = file[index+1:]
} else if index = strings.LastIndex(file, "\\"); index >= 0 {
file = file[index+1:]
}
} else {
file = "???"
line = 1
}
buf := new(bytes.Buffer)
// Every line is indented at least one tab.
buf.WriteByte('\t')
fmt.Fprintf(buf, "%s:%d: ", file, line)
lines := strings.Split(s, "\n")
if l := len(lines); l > 1 && lines[l-1] == "" {
lines = lines[:l-1]
}
for i, line := range lines {
if i > 0 {
// Second and subsequent lines are indented an extra tab.
buf.WriteString("\n\t\t")
}
buf.WriteString(line)
}
buf.WriteByte('\n')
return buf.String()
} | vendor/github.com/headzoo/ut/ut.go | 0.661814 | 0.519887 | ut.go | starcoder |
package ast
import (
"errors"
"fmt"
"strings"
)
type Package struct {
BaseScope
Name string
Comment string
Imports []*Import
Consts map[string]*Const
Subtypes map[string]*Subtype
InstantiatedTypes map[string]*InstantiateType
Enums map[string]*Enum
Unions map[string]*Union
Bitmasks map[string]*BitmaskType
Structs map[string]*Struct
Choices map[string]*Choice
ImportedPackages []*Package
LocalSymbols *SymbolScope
}
// NativeZserioTypeReference is a reference to the basic zserio type
type NativeZserioTypeReference struct {
Type *TypeReference
RequiresCast bool
IsMarshaler bool
}
func NewPackage(name string) *Package {
return &Package{
BaseScope: BaseScope{
Parent: rootScope,
},
Name: name,
Consts: map[string]*Const{},
Subtypes: map[string]*Subtype{},
InstantiatedTypes: map[string]*InstantiateType{},
Structs: map[string]*Struct{},
Enums: map[string]*Enum{},
Unions: map[string]*Union{},
Bitmasks: map[string]*BitmaskType{},
Choices: map[string]*Choice{},
}
}
// HasType checks if a type is defined *in this scope*. It does not check parent
// scopes.
func (p *Package) HasType(name string) bool {
_, err := p.LocalSymbols.GetType(name)
return err == nil
}
// CollectSymbols collects all symbols from a package, and organizes them in the
// scope
func (p *Package) CollectSymbols() error {
p.LocalSymbols = &SymbolScope{
TypeScope: make(map[string]any),
OtherScope: make(map[string]any),
CompoundScopes: make(map[string]*CompoundScope),
}
for name, value := range p.Subtypes {
p.LocalSymbols.TypeScope[name] = value
}
for name, value := range p.InstantiatedTypes {
p.LocalSymbols.TypeScope[name] = value
}
for name, value := range p.Consts {
p.LocalSymbols.TypeScope[name] = value
}
for name, value := range p.Structs {
p.LocalSymbols.TypeScope[name] = value
if err := value.BuildScope(p); err != nil {
return err
}
}
for name, value := range p.Enums {
p.LocalSymbols.TypeScope[name] = value
// add all enum values as symbols
for _, enumItem := range value.Items {
p.LocalSymbols.OtherScope[enumItem.Name] = enumItem
}
}
for name, value := range p.Unions {
p.LocalSymbols.TypeScope[name] = value
if err := value.BuildScope(p); err != nil {
return err
}
}
for name, value := range p.Bitmasks {
p.LocalSymbols.TypeScope[name] = value
}
for name, value := range p.Choices {
p.LocalSymbols.TypeScope[name] = value
if err := value.BuildScope(p); err != nil {
return err
}
}
return nil
}
// GoType looks up a name in the scope, and provides the Go type name for it.
// If the type is not found ErrUnkownType is returned.
func (p *Package) GoType(t *TypeReference) (string, error) {
if !t.IsBuiltin {
if t.Package == "" {
return "", errors.New("type is not resolved")
}
if t.Package == p.Name {
return strings.Title(t.Name), nil
}
parts := strings.Split(t.Package, ".")
if len(parts) == 0 {
return strings.Title(t.Name), nil
}
packageAlias := GoPackageAlias(t.Package)
return fmt.Sprintf("%s.%s", packageAlias, strings.Title(t.Name)), nil
}
return p.BaseScope.GoType(t)
}
// GoArrayTraits returns the array traits object for non-basic zserio types,
// such as enums, subtypes, or structures.
func (p *Package) GoArrayTraits(t *TypeReference) (string, error) {
if t.IsBuiltin {
return p.BaseScope.GoArrayTraits(t)
}
typeScope, err := p.GetImportedScope(t.Package)
if err != nil {
return "", err
}
typeSymbol, err := typeScope.GetSymbol(t.Name)
if err != nil {
return "", fmt.Errorf("%w: %s", ErrUnknownType, t.Name)
}
objArrayTraitsStr := "ztype.ObjectArrayTraits"
switch n := typeSymbol.Symbol.(type) {
case *Const:
return p.GoArrayTraits(n.Type)
case *Subtype:
return p.GoArrayTraits(n.Type)
case *InstantiateType, *Struct, *Enum, *Union, *BitmaskType, *Choice:
return objArrayTraitsStr, nil
default:
return "", fmt.Errorf("%w: %s", ErrUnknownType, t.Name)
}
}
// IsDeltaPackable indicates if a non-basic zserio type (struct, enum, subtype, ...)
// can be delta-packed.
func (p *Package) IsDeltaPackable(t *TypeReference) (bool, error) {
if t.IsBuiltin {
return p.BaseScope.Parent.IsDeltaPackable(t)
}
typeScope, err := p.GetImportedScope(t.Package)
if err != nil {
return false, err
}
typeSymbol, err := typeScope.GetSymbol(t.Name)
if err != nil {
return false, fmt.Errorf("%w: %s", ErrUnknownType, t.Name)
}
switch n := typeSymbol.Symbol.(type) {
case *Const:
return p.IsDeltaPackable(n.Type)
case *Subtype:
return p.IsDeltaPackable(n.Type)
case *InstantiateType:
return p.IsDeltaPackable(n.Type)
case *Struct, *Union, *Choice:
// compound types have their own packed marshaling function
return false, nil
case *Enum, *BitmaskType:
return true, nil
default:
return false, fmt.Errorf("%w: %s", ErrUnknownType, t.Name)
}
}
type SymbolReference struct {
Symbol any
Name string
Package string
CompoundName string
}
// GetSymbol searches for a symbol not only in the current scope, but also in
// all imported scopes.
func (p *Package) GetSymbol(name string) (*SymbolReference, error) {
// The search order is important to avoid any recursive lookup, in case
// field and type have the same name.
// first search for types in local and all imported packages
if typeSymbol, err := p.GetType(name); err == nil {
return typeSymbol, nil
}
// second, search for composite types values (struct fields), in the
// active local scopes
if typeSymbol, err := p.GetCurrentCompoundType(name); err == nil {
return typeSymbol, nil
}
// third, search all other types (enum fields)
if typeSymbol, err := p.GetOtherType(name); err == nil {
return typeSymbol, nil
}
return nil, errors.New("symbol not found")
}
func (p *Package) GetType(name string) (*SymbolReference, error) {
// First check the local scope
if typeSymbol, err := p.LocalSymbols.GetType(name); err == nil {
typeSymbol.Package = p.Name
return typeSymbol, nil
}
// If the symbol was not found in the local scope, check all imported scopes
for _, importedPackage := range p.ImportedPackages {
if typeSymbol, err := importedPackage.LocalSymbols.GetType(name); err == nil {
typeSymbol.Package = importedPackage.Name
return typeSymbol, nil
}
}
// Check in the scope of imported imported
return nil, errors.New("type not found")
}
func (p *Package) GetCurrentCompoundType(name string) (*SymbolReference, error) {
if p.LocalSymbols.CurrentCompoundScope != nil {
if typeSymbol, err := p.LocalSymbols.GetCompoundType(*p.LocalSymbols.CurrentCompoundScope, name); err == nil {
typeSymbol.Package = p.Name
return typeSymbol, nil
}
}
// check all imported scopes
for _, importedPackage := range p.ImportedPackages {
if importedPackage.LocalSymbols.CurrentCompoundScope != nil {
if typeSymbol, err := importedPackage.LocalSymbols.GetCompoundType(*importedPackage.LocalSymbols.CurrentCompoundScope, name); err == nil {
typeSymbol.Package = importedPackage.Name
return typeSymbol, nil
}
}
}
return nil, errors.New("compound symbol not found")
}
func (p *Package) GetCompoundType(compoundSymbolName, name string) (*SymbolReference, error) {
if typeSymbol, err := p.LocalSymbols.GetCompoundType(compoundSymbolName, name); err == nil {
typeSymbol.Package = p.Name
return typeSymbol, nil
}
// check all imported scopes
for _, importedPackage := range p.ImportedPackages {
if typeSymbol, err := importedPackage.LocalSymbols.GetCompoundType(compoundSymbolName, name); err == nil {
typeSymbol.Package = importedPackage.Name
return typeSymbol, nil
}
}
return nil, errors.New("compound symbol not found")
}
func (p *Package) GetOtherType(name string) (*SymbolReference, error) {
if symbolRef, err := p.LocalSymbols.GetOtherType(name); err == nil {
symbolRef.Package = p.Name
return symbolRef, nil
}
// check all imported scopes
for _, importedPackage := range p.ImportedPackages {
if symbolRef, err := importedPackage.GetOtherType(name); err == nil {
return symbolRef, nil
}
}
return nil, errors.New("other type not found")
}
func (p *Package) GetZserioNativeType(typeRef *TypeReference) (*NativeZserioTypeReference, error) {
requiresCast := false
counter := 0
currentScope := p
for {
if typeRef.IsBuiltin {
return &NativeZserioTypeReference{
Type: typeRef,
RequiresCast: requiresCast,
}, nil
}
currentScope, err := currentScope.GetImportedScope(typeRef.Package)
if err != nil {
return nil, err
}
symbol, err := currentScope.GetSymbol(typeRef.Name)
if err != nil {
return nil, err
}
switch n := symbol.Symbol.(type) {
case *Enum:
return &NativeZserioTypeReference{
Type: typeRef,
RequiresCast: requiresCast,
IsMarshaler: true,
}, nil
case *Union:
return &NativeZserioTypeReference{
Type: typeRef,
RequiresCast: requiresCast,
IsMarshaler: true,
}, nil
case *BitmaskType:
return &NativeZserioTypeReference{
Type: typeRef,
RequiresCast: requiresCast,
IsMarshaler: true,
}, nil
case *Choice:
return &NativeZserioTypeReference{
Type: typeRef,
RequiresCast: requiresCast,
IsMarshaler: true,
}, nil
case *Struct:
return &NativeZserioTypeReference{
Type: typeRef,
RequiresCast: requiresCast,
IsMarshaler: true,
}, nil
case *Field:
typeRef = n.Type
case *Parameter:
typeRef = n.Type
case *Subtype:
typeRef = n.Type
default:
return nil, errors.New("unable to find the native zserio type")
}
requiresCast = true
counter++
if counter > 100 {
return nil, errors.New("internal failure to resolve type (circular lookup)")
}
}
}
func GoPackageAlias(packageName string) string {
return strings.Replace(packageName, ".", "_", -1)
}
// GetTypeParameter returns the parameters of a referenced type
func (p *Package) GetTypeParameter(typeRef *TypeReference) ([]*Parameter, error) {
nativeType, err := p.GetZserioNativeType(typeRef)
if err != nil {
return nil, err
}
typeScope, err := p.GetImportedScope(nativeType.Type.Package)
if err != nil {
return nil, err
}
symbol, err := typeScope.GetSymbol(nativeType.Type.Name)
if err != nil {
return nil, err
}
var parameters []*Parameter
switch n := symbol.Symbol.(type) {
case *Union:
parameters = n.TypeParameters
case *Choice:
parameters = n.TypeParameters
case *Struct:
parameters = n.TypeParameters
default:
return nil, errors.New("type reference is not parameterizable")
}
if len(parameters) != len(typeRef.TypeArguments) {
return nil, fmt.Errorf("expected %d type parameters, got %d", len(typeRef.TypeArguments), len(parameters))
}
return parameters, nil
}
// GetImportedScope returns a scope, if it can be found within the imported scope
func (p *Package) GetImportedScope(name string) (*Package, error) {
if name == "" {
return nil, errors.New("unresolved package scope")
}
if name == p.Name {
return p, nil
}
for _, pkg := range p.ImportedPackages {
if pkg.Name == name {
return pkg, nil
}
}
// still not found? try the imports of all imports
for _, pkg := range p.ImportedPackages {
obj, err := pkg.GetImportedScope(name)
if err == nil {
return obj, nil
}
}
return nil, errors.New("scope does not exist, or has not been imported")
} | internal/ast/package.go | 0.61173 | 0.457318 | package.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.