code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package pine
import (
"time"
"github.com/shopspring/decimal"
"github.com/pkg/errors"
)
// ArithmeticType defines the arthmetic operation
type ArithmeticType int
const (
// ArithmeticAddition adds values
ArithmeticAddition ArithmeticType = iota
// ArithmeticSubtraction subtracts values
ArithmeticSubtraction
// ArithmeticMultiplication multiplies values
ArithmeticMultiplication
// ArithmeticDivision divides values
ArithmeticDivision
// ArithmeticAbsDiff shows absolute difference math.Abs(a-b)
ArithmeticAbsDiff
// ArithmeticMax shows maximum of the two
ArithmeticMax
// ArithmeticMin shows minimum of the two
ArithmeticMin
)
type arith struct {
a Indicator
b Indicator
o ArithmeticOpts
t ArithmeticType
}
// NewArithmetic generates arithmetic operation on the output of two indicators
func NewArithmetic(t ArithmeticType, a Indicator, b Indicator, o ArithmeticOpts) Indicator {
return &arith{
a: a,
b: b,
o: o,
t: t,
}
}
func (i *arith) GetValueForInterval(t time.Time) *Interval {
// validate if needed
a := i.a.GetValueForInterval(t)
b := i.b.GetValueForInterval(t)
v := i.generateValue(a, b)
if v == nil {
return nil
}
return &Interval{
StartTime: t,
Value: *v,
}
}
func (i *arith) generateValue(ai, bi *Interval) *float64 {
if ai == nil || bi == nil {
switch i.o.NilHandlInst {
case NilValueReturnNil:
return nil
case NilValueReturnZero:
val := 0.0
return &val
}
}
var val decimal.Decimal
a := decimal.NewFromFloat(ai.Value)
b := decimal.NewFromFloat(bi.Value)
switch i.t {
case ArithmeticAddition:
val = a.Add(b)
case ArithmeticSubtraction:
val = a.Sub(b)
case ArithmeticMultiplication:
val = a.Mul(b)
case ArithmeticDivision:
val = a.Div(b)
case ArithmeticAbsDiff:
val = a.Sub(b).Abs()
case ArithmeticMax:
if a.GreaterThan(b) {
val = a
} else {
val = b
}
case ArithmeticMin:
if a.LessThan(b) {
val = a
} else {
val = b
}
}
f64, _ := val.Float64()
return &f64
}
func (i *arith) Update(v OHLCV) error {
// validate if needed
if err := i.a.Update(v); err != nil {
return errors.Wrap(err, "error updating in addition")
}
if err := i.b.Update(v); err != nil {
return errors.Wrap(err, "error updating in addition")
}
return nil
}
func (i *arith) ApplyOpts(opts SeriesOpts) error {
// validate if needed
if err := i.a.ApplyOpts(opts); err != nil {
return errors.Wrap(err, "error applying opts in addition")
}
if err := i.b.ApplyOpts(opts); err != nil {
return errors.Wrap(err, "error applying opts in addition")
}
return nil
} | arithmetic.go | 0.628521 | 0.484563 | arithmetic.go | starcoder |
package particles
import (
"github.com/wdevore/Ranger-Go-IGE/api"
"github.com/wdevore/Ranger-Go-IGE/engine/geometry"
"github.com/wdevore/Ranger-Go-IGE/engine/maths"
)
// NodeParticle is the base object of a NodeParticle system.
type NodeParticle struct {
elapsed float32
lifespan float32
position api.IPoint
velocity api.IVelocity
active bool
// Visual representation
node api.INode
}
// NewNodeParticle constructs a new ParticleNode
func NewNodeParticle(visual api.INode) api.IParticle {
o := new(NodeParticle)
o.active = false
o.elapsed = 0.0
o.lifespan = 0.0
o.velocity = maths.NewVelocity()
o.position = geometry.NewPoint()
o.node = visual
return o
}
// SetPosition sets ParticleNodes initial position
func (p *NodeParticle) SetPosition(x, y float32) {
p.position.SetByComp(x, y)
p.node.SetPosition(x, y)
}
// GetPosition gets the ParticleNode's current position
func (p *NodeParticle) GetPosition() api.IPoint {
return p.position
}
// SetLifespan sets how long the ParticleNode lives
func (p *NodeParticle) SetLifespan(duration float32) {
p.lifespan = duration
}
// Visual gets the current INode assigned to this ParticleNode
func (p *NodeParticle) Visual() api.INode {
return p.node
}
// Activate changes the ParticleNode's state
func (p *NodeParticle) Activate(active bool) {
p.active = active
p.node.SetVisible(active)
}
// IsActive indicates if the ParticleNode is alive
func (p *NodeParticle) IsActive() bool {
return p.active
}
// SetVelocity changes the velocity
func (p *NodeParticle) SetVelocity(angle float64, speed float32) {
p.velocity.SetDirectionByAngle(angle)
p.velocity.SetMagnitude(speed)
}
// Evaluate changes the ParticleNode's state based on time
func (p *NodeParticle) Evaluate(dt float32) {
p.elapsed += dt
p.active = p.elapsed < p.lifespan
// Update ParticleNode's position as long as the ParticleNode is active.
if p.active {
p.velocity.ApplyToPoint(p.position)
p.node.SetPosition(p.position.X(), p.position.Y())
}
}
// Reset resets the ParticleNode
func (p *NodeParticle) Reset() {
p.active = false
p.elapsed = 0.0
p.node.SetVisible(p.active)
} | extras/particles/particle_node.go | 0.761893 | 0.435001 | particle_node.go | starcoder |
package fbptree
import (
"encoding/binary"
)
func decodeUint16(data []byte) uint16 {
return binary.BigEndian.Uint16(data)
}
func encodeUint16(v uint16) []byte {
var data [2]byte
binary.BigEndian.PutUint16(data[:], v)
return data[:]
}
func decodeUint32(data []byte) uint32 {
return binary.BigEndian.Uint32(data)
}
func encodeUint32(v uint32) []byte {
var data [4]byte
binary.BigEndian.PutUint32(data[:], v)
return data[:]
}
func encodeBool(v bool) []byte {
var data [1]byte
if v {
data[0] = 1
}
return data[:]
}
func decodeBool(data []byte) bool {
return data[0] == 1
}
func encodeNode(node *node) []byte {
data := make([]byte, 0)
data = append(data, encodeUint32(node.id)...)
data = append(data, encodeUint32(node.parentID)...)
data = append(data, encodeBool(node.leaf)...)
data = append(data, encodeUint16(uint16(node.keyNum))...)
data = append(data, encodeUint16(uint16(len(node.keys)))...)
for _, key := range node.keys {
if key == nil {
break
}
data = append(data, encodeUint16(uint16(len(key)))...)
data = append(data, key...)
}
pointerNum := node.keyNum
if !node.leaf {
pointerNum += 1
}
data = append(data, encodeUint16(uint16(pointerNum))...)
data = append(data, encodeUint16(uint16(len(node.pointers)))...)
for i := 0; i < pointerNum; i++ {
pointer := node.pointers[i]
if pointer.isNodeID() {
data = append(data, 0)
data = append(data, encodeUint32(pointer.asNodeID())...)
} else if pointer.isValue() {
data = append(data, 1)
data = append(data, encodeUint16(uint16(len(pointer.asValue())))...)
data = append(data, pointer.asValue()...)
}
}
var nextID uint32
if node.next() != nil {
nextID = node.next().asNodeID()
data = append(data, encodeBool(true)...)
data = append(data, encodeUint32(nextID)...)
} else {
data = append(data, encodeBool(false)...)
data = append(data, 0)
}
return data
}
func decodeNode(data []byte) (*node, error) {
position := 0
nodeID := decodeUint32(data[position : position+4])
position += 4
parentID := decodeUint32(data[position : position+4])
position += 4
leaf := decodeBool(data[position : position+1])
position += 1
keyNum := decodeUint16(data[position : position+2])
position += 2
keyLen := int(decodeUint16(data[position : position+2]))
position += 2
keys := make([][]byte, keyLen)
for k := 0; k < int(keyNum); k++ {
keySize := int(decodeUint16(data[position : position+2]))
position += 2
key := data[position : position+keySize]
keys[k] = key
position += keySize
}
pointerNum := decodeUint16(data[position : position+2])
position += 2
pointerLen := int(decodeUint16(data[position : position+2]))
position += 2
pointers := make([]*pointer, pointerLen)
for p := 0; p < int(pointerNum); p++ {
if data[position] == 0 {
position += 1
// nodeID
nodeID := decodeUint32(data[position : position+4])
position += 4
pointers[p] = &pointer{nodeID}
} else if data[position] == 1 {
position += 1
// value
valueSize := int(decodeUint16(data[position : position+2]))
position += 2
value := data[position : position+valueSize]
position += valueSize
pointers[p] = &pointer{value}
}
}
n := &node{
nodeID,
leaf,
parentID,
keys,
int(keyNum),
pointers,
}
hasNextID := decodeBool(data[position : position+1])
position += 1
if hasNextID {
nextID := decodeUint32(data[position : position+4])
n.setNext(&pointer{nextID})
}
return n, nil
}
func encodeTreeMetadata(metadata *treeMetadata) []byte {
var data [14]byte
copy(data[0:2], encodeUint16(metadata.order))
copy(data[2:6], encodeUint32(metadata.rootID))
copy(data[6:10], encodeUint32(metadata.leftmostID))
copy(data[10:14], encodeUint32(metadata.size))
return data[:]
}
func decodeTreeMetadata(data []byte) (*treeMetadata, error) {
return &treeMetadata{
order: decodeUint16(data[0:2]),
rootID: decodeUint32(data[2:6]),
leftmostID: decodeUint32(data[6:10]),
size: decodeUint32(data[10:14]),
}, nil
} | encoding.go | 0.646014 | 0.427038 | encoding.go | starcoder |
package gimli
import (
"encoding/binary"
)
const rateInBytes = 16
// Hash computes the hash using the sponge construction
func Hash(output, input []byte) {
var state Gimli
// absorb full input blocks
for len(input) >= rateInBytes {
state[0] ^= binary.LittleEndian.Uint32(input[0:4])
state[1] ^= binary.LittleEndian.Uint32(input[4:8])
state[2] ^= binary.LittleEndian.Uint32(input[8:12])
state[3] ^= binary.LittleEndian.Uint32(input[12:16])
state.Update()
input = input[rateInBytes:]
}
n := len(input)
// handle partial input block at the end
t := make([]byte, 4)
switch {
case n > 12:
state[0] ^= binary.LittleEndian.Uint32(input[0:4])
state[1] ^= binary.LittleEndian.Uint32(input[4:8])
state[2] ^= binary.LittleEndian.Uint32(input[8:12])
copy(t, input[12:])
state[3] ^= binary.LittleEndian.Uint32(t)
case n > 8:
state[0] ^= binary.LittleEndian.Uint32(input[0:4])
state[1] ^= binary.LittleEndian.Uint32(input[4:8])
copy(t, input[8:])
state[2] ^= binary.LittleEndian.Uint32(t)
case n > 4:
state[0] ^= binary.LittleEndian.Uint32(input[0:4])
copy(t, input[4:])
state[1] ^= binary.LittleEndian.Uint32(t)
case n > 0:
copy(t, input)
state[0] ^= binary.LittleEndian.Uint32(t)
}
// do the padding bytes
paddingIndex := n / 4
paddingShift := uint((n % 4) * 8)
state[paddingIndex] ^= 0x1F << (paddingShift)
// second bit of padding
state[rateInBytes/4-1] ^= 0x80 << (3 * 8)
// squeeze full output blocks
for len(output) >= rateInBytes {
state.Update()
binary.LittleEndian.PutUint32(output[0:4], state[0])
binary.LittleEndian.PutUint32(output[4:8], state[1])
binary.LittleEndian.PutUint32(output[8:12], state[2])
binary.LittleEndian.PutUint32(output[12:16], state[3])
output = output[rateInBytes:]
}
n = len(output)
if n == 0 {
return
}
// handle partial output block
state.Update()
switch {
case n > 12:
binary.LittleEndian.PutUint32(output[0:4], state[0])
binary.LittleEndian.PutUint32(output[4:8], state[1])
binary.LittleEndian.PutUint32(output[8:12], state[2])
binary.LittleEndian.PutUint32(t, state[3])
copy(output[12:], t)
case n > 8:
binary.LittleEndian.PutUint32(output[0:4], state[0])
binary.LittleEndian.PutUint32(output[4:8], state[1])
binary.LittleEndian.PutUint32(t, state[2])
copy(output[8:], t)
case n > 4:
binary.LittleEndian.PutUint32(output[0:4], state[0])
binary.LittleEndian.PutUint32(t, state[1])
copy(output[4:], t)
case n > 0:
binary.LittleEndian.PutUint32(t, state[0])
copy(output, t)
}
} | hash.go | 0.574037 | 0.421611 | hash.go | starcoder |
package array
import (
"errors"
"fmt"
)
// Array is a struct wrapper over slices to enable usage of methods indirectly on the slice
type Array struct {
items []interface{}
}
// New creates a new array struct
func New(items []interface{}) *Array {
newArray := Array{items: items}
return &newArray
}
// Get returns the array element at the specified index
func (a *Array) Get(index int) (interface{}, error) {
if index > len(a.items)-1 {
return nil, errors.New("index out of range error")
}
if index < 0 {
return nil, errors.New("index out of range error")
}
return a.items[index], nil
}
// Length returns the total number of elements in the array
func (a *Array) Length() int {
return len(a.items)
}
// Map creates a new array from the return values of the map function
func (a *Array) Map(mapFunction func(interface{}, int) interface{}) *Array {
newArray := make([]interface{}, a.Length())
for index, value := range a.items {
newValue := mapFunction(value, index)
newArray[index] = newValue
}
return New(newArray)
}
// Filter creates a new array using the values that pass the filter function test
func (a *Array) Filter(filterFunction func(interface{}, int) bool) *Array {
newArray := make([]interface{}, 0)
for index, value := range a.items {
if filterFunction(value, index) {
newArray = append(newArray, value)
}
}
return New(newArray)
}
// ForEach executes a function for each element in the array
func (a *Array) ForEach(function func(interface{}, int)) {
for index, value := range a.items {
function(value, index)
}
}
// Some returns true if any of the elements in the array returns true after being tested by the check function
func (a *Array) Some(checkFunction func(interface{}, int) bool) bool {
for index, value := range a.items {
if checkFunction(value, index) {
return true
}
}
return false
}
// Every returns true if all the elements in the array return true after being tested by the check function
func (a *Array) Every(checkFunction func(interface{}, int) bool) bool {
for index, value := range a.items {
if !checkFunction(value, index) {
return false
}
}
return true
}
// Push adds a new element to the array and returns the new array length
func (a *Array) Push(item interface{}) int {
a.items = append(a.items, item)
return a.Length()
}
// Pop removes the last element in the array and returns it
func (a *Array) Pop() interface{} {
value := a.items[a.Length()-1]
a.items = a.items[0 : a.Length()-1]
return value
}
// Shift removes the first element in the array and returns it
func (a *Array) Shift() interface{} {
value := a.items[0]
a.items = a.items[1:]
return value
}
// Unshift adds an element to the beginning of the array
func (a *Array) UnShift(item interface{}) int {
itemsList := make([]interface{}, a.Length()+1)
itemsList[0] = item
for i, v := range a.items {
itemsList[i+1] = v
}
a.items = itemsList
return a.Length()
}
// IndexOf returns the index of the first appearance of an element in the array
func (a *Array) IndexOf(item interface{}) int {
for i, v := range a.items {
if v == item {
return i
}
}
return -1
}
// Concat combines multiple arrays and returns a new array
func (a *Array) Concat(a2 *Array) *Array {
itemsList := make([]interface{}, a.Length()+a2.Length())
for i, v := range a.items {
itemsList[i] = v
}
for i, v := range a2.items {
itemsList[a.Length()-1+i] = v
}
a3 := New(itemsList)
return a3
}
// Slice returns a new array with elements within the slice range
func (a *Array) Slice(pos ...int) *Array {
if len(pos) == 0 {
return New(a.items)
}
if len(pos) == 1 {
if pos[0] < 0 || pos[0] > (a.Length()-1) {
return New(make([]interface{}, 0))
} else {
return New(a.items[pos[0]:])
}
}
if pos[0] < 0 || pos[0] > (a.Length()-1) {
return New(make([]interface{}, 0))
}
if pos[1] > a.Length() {
return New(a.items[pos[0]:])
} else {
return New(a.items[pos[0]:pos[1]])
}
}
// Fill replaces the values in the array within the specified positiosn with the provided value
func (a *Array) Fill(value interface{}, pos ...int) *Array {
if len(pos) == 0 {
return a
}
if len(pos) == 1 {
if pos[0] < 0 || pos[0] > (a.Length()-1) {
return a
} else {
for counter := pos[0]; counter < a.Length(); counter++ {
a.items[counter] = value
}
return a
}
}
if pos[0] < 0 || pos[0] > (a.Length()-1) {
return a
}
upperBound := pos[1]
if a.Length() < upperBound {
upperBound = a.Length()
}
for counter := pos[0]; counter < upperBound; counter++ {
a.items[counter] = value
}
return a
}
// Find returns the first element that passes find function test
func (a *Array) Find(findFunction func(interface{}, int) bool) interface{} {
for index, value := range a.items {
if findFunction(value, index) {
return value
}
}
return nil
}
// Find returns the index of the first element that pass the find function test
func (a *Array) FindIndex(findFunction func(interface{}, int) bool) int {
for index, value := range a.items {
if findFunction(value, index) {
return index
}
}
return -1
}
// Includes returns true if the array has an element with the specified item value
func (a *Array) Includes(item interface{}) bool {
return a.IndexOf(item) > -1
}
func (a *Array) Join(joiner ...string) string {
returnString := ""
joinerString := ","
if len(joiner) > 0 {
joinerString = joiner[0]
}
for _, value := range a.items {
returnString += fmt.Sprintf("%v", value) + joinerString
}
if len(returnString) == 0 {
return returnString
}
return returnString[0 : len(returnString)-1]
}
// String returns the string representation of the array
func (a *Array) String() string {
return fmt.Sprintf("Array %v", a.items)
}
// Values returns the underlying array slice
func (a *Array) Values() []interface{} {
return a.items
} | array.go | 0.825379 | 0.548794 | array.go | starcoder |
package grbl
import (
"github.com/centretown/tiny-fabb/forms"
)
const (
Parameters forms.WebId = iota
Motion
PlaneSelection
Diameter
DistanceMode
FeedRateMode
Units
CutterRadiusCompensation
ToolLengthOffset
ReturnModeInCannedCycles
CoordinateSystemSelection
Stopping
ToolChange
SpindleTurning
Coolant
OverrideSwitches
FlowControl
NonModal
)
type Group struct {
ID forms.WebId
Label string
}
var Groups = map[forms.WebId]*forms.Entry{
Motion: {
ID: Motion.String(),
Label: "Motion ('Group 1')",
},
PlaneSelection: {
ID: PlaneSelection.String(),
Label: "Plane selection",
},
Diameter: {
ID: Diameter.String(),
Label: "Diameter / Radius for lathes",
},
DistanceMode: {
ID: DistanceMode.String(),
Label: "Distance Mode",
},
FeedRateMode: {
ID: FeedRateMode.String(),
Label: "Feed Rate Mode",
},
Units: {
ID: Units.String(),
Label: "Units",
},
CutterRadiusCompensation: {
ID: CutterRadiusCompensation.String(),
Label: "Cutter Radius Compensation",
},
ToolLengthOffset: {
ID: ToolLengthOffset.String(),
Label: "Tool Length Offset",
},
ReturnModeInCannedCycles: {
ID: ReturnModeInCannedCycles.String(),
Label: "Return Mode in Canned Cycles",
},
CoordinateSystemSelection: {
ID: CoordinateSystemSelection.String(),
Label: "Coordinate System Selection",
},
Stopping: {
ID: Stopping.String(),
Label: "Stopping",
},
ToolChange: {
ID: ToolChange.String(),
Label: "Tool Change",
},
SpindleTurning: {
ID: SpindleTurning.String(),
Label: "Spindle Turning",
},
Coolant: {
ID: Coolant.String(),
Label: "Coolant",
},
OverrideSwitches: {
ID: OverrideSwitches.String(),
Label: "Override Switches",
},
FlowControl: {
ID: FlowControl.String(),
Label: "Flow Control",
},
NonModal: {
ID: NonModal.String(),
Label: "Non-modal codes ('Group 0')",
},
}
type GCode struct {
Group *forms.Entry
Code string
Label string
Notes string
Parameters []*GCode
}
var GCodes = map[string]*GCode{
"G0": {
Group: Groups[Motion],
Code: "G0",
Label: "Rapid positioning",
Notes: "Switch to rapid linear motion mode (seek). Used to get the tool somewhere quickly without cutting --- moves the machine as quickly as possible along each axis --- an axis which needs less movement will finish before the others, so one cannot count on the movement being a straight line.",
},
"G1": {
Group: Groups[Motion],
Code: "G1",
Label: "Linear interpolation",
Notes: "Switch to linear motion at the current feed rate. Used to cut a straight line --- the interpreter will determine the acceleration needed along each axis to ensure direct movement from the original to the destination point at no more than the current Feed rate (F see below).",
},
"G2": {
Group: Groups[Motion],
Code: "G2",
Label: "Circular interpolation, clockwise",
Notes: "Switch to clockwise arc mode. The interpreter will cut an arc or circle from the current position to the destination using the specified radius (R) or center (IJK location) at the current Feed rate (F see below) in the plane selected by G17/18/19.",
},
"G3": {
Group: Groups[Motion],
Code: "G3",
Label: "Circular interpolation, counterclockwise",
Notes: "Switch to anti-clockwise arc mode. Corollary to G2 above.",
},
"G4": {
Group: Groups[Motion],
Code: "G4",
Label: "Dwell",
Notes: "This should probably be calculated to be only one or two spindle rotations for best efficiency. Dwell time is expressed using a parameter (may be X, U, or P) which determines the time unit (seconds, milliseconds, &c.) P, for seconds, is supported and used by Grbl, typically X and U express the duration in milliseconds.",
},
"G38.2": {
Group: Groups[Motion],
Code: "G38.2",
Label: "Straight Probe",
Notes: "Probe toward workpiece, stop on contact, signal error if failure.",
},
"G38.3": {
Group: Groups[Motion],
Code: "G38.3",
Label: "Probe",
Notes: "Probe toward workpiece, stop on contact.",
},
"G38.4": {
Group: Groups[Motion],
Code: "G38.4",
Label: "Probe",
Notes: "Probe away workpiece, stop on contact, signal error if failure.",
},
"G38.5": {
Group: Groups[Motion],
Code: "G38.5",
Label: "Probe",
Notes: "Probe away workpiece, stop on contact.",
},
"G80": {
Group: Groups[Motion],
Code: "G80",
Label: "Motion mode cancel",
Notes: "Canned cycle",
},
"G54": {
Group: Groups[CoordinateSystemSelection],
Code: "G54",
Label: "Fixture offset 1",
Notes: "Fixture offset 1--6. CF G10 and G92.[21] Note that G54 is reserved by Carbide Motion, and will be reset by the software.[22]",
},
"G55": {
Group: Groups[CoordinateSystemSelection],
Code: "G55",
Label: "Fixture offset 2",
},
"G56": {
Group: Groups[CoordinateSystemSelection],
Code: "G56",
Label: "Fixture offset 3",
},
"G57": {
Group: Groups[CoordinateSystemSelection],
Code: "G57",
Label: "Fixture offset 4",
},
"G58": {
Group: Groups[CoordinateSystemSelection],
Code: "G58",
Label: "Fixture offset 5",
},
"G59": {
Group: Groups[CoordinateSystemSelection],
Code: "G59",
Label: "Fixture offset 6",
},
"G17": {
Group: Groups[PlaneSelection],
Code: "G17",
Label: "Select the XY plane (for arcs)",
Notes: "Use I and J",
},
"G18": {
Group: Groups[PlaneSelection],
Code: "G18",
Label: "Select the XZ plane (for arcs)",
Notes: "Use I and K",
},
"G19": {
Group: Groups[PlaneSelection],
Code: "G19",
Label: "Select the YZ plane (for arcs)",
Notes: "Use J and K",
},
"G90": {
Group: Groups[DistanceMode],
Code: "G90",
Label: "Switch to absolute distance mode",
Notes: "Coordinates are now relative to the origin of the currently active coordinate system, as opposed to the current position. G0 X-10 Y5 will move to the position 10 units to the left and 5 above the origin X0,Y0. cf. G91 below.",
},
"G91": {
Group: Groups[DistanceMode],
Code: "G91",
Label: "Switch to incremental distance mode",
Notes: "Coordinates are now relative to the current position, with no consideration for machine origin. G0 X-10 Y5 will move to the position 10 units to the left and 5 above the current position. cf. G90 above.",
},
"G93": {
Group: Groups[FeedRateMode],
Code: "G93",
Label: "Set inverse time feed rate mode",
Notes: "An F word is interpreted to mean that the move should be completed in (one divided by the F number) minutes. For example, if F is 2, the move should be completed in half a minute.",
},
"G94": {
Group: Groups[FeedRateMode],
Code: "G94",
Label: "Set units per minute feed rate mode",
Notes: "An F Word is interpreted to mean the controlled point should move at a certain number of units (or degrees) per minute.",
},
"G20": {
Group: Groups[Units],
Code: "G20",
Label: "Units will be in inches",
Notes: "Best practice: do this at the start of a program and nowhere else. The usual minimum increment in G20 is one ten-thousandth of an inch (0.0001\").",
},
"G21": {
Group: Groups[Units],
Code: "G21",
Label: "Units will be in mm",
Notes: "Best practice: do this at the start of a program and nowhere else. The usual minimum increment in G21 (one thousandth of a millimeter, .001 mm, that is, one micrometre).",
},
"M3": {
Group: Groups[SpindleTurning],
Code: "M3",
Label: "Spindle direction clockwise",
Notes: "Starts or restarts the spindle spinning clockwise, if the system is wired up to start/stop the spindle.",
},
"M4": {
Group: Groups[SpindleTurning],
Code: "M4",
Label: "Spindle direction counter-clockwise",
Notes: "Used to enable laser mode movement in Grbl 1.0 and later.",
},
"M5": {
Group: Groups[SpindleTurning],
Code: "M5",
Label: "Spindle direction clockwise",
Notes: "Starts or restarts the spindle spinning clockwise, if the system is wired up to start/stop the spindle.",
},
"M7": {
Group: Groups[Coolant],
Code: "M7",
Label: "Mist",
Notes: "Coolant control",
},
"M8": {
Group: Groups[Coolant],
Code: "M8",
Label: "Flood coolant on",
Notes: "Coolant control",
},
"M9": {
Group: Groups[Coolant],
Code: "M9",
Label: "All coolant off.",
Notes: "Coolant control",
},
"M6": {
Group: Groups[ToolChange],
Code: "M6",
Label: "Tool Change",
Notes: "Coolant control",
},
"T?": {
Group: Groups[Parameters],
Code: "T?",
Label: "Tool Number",
},
"F?": {
Group: Groups[Parameters],
Code: "F?",
Label: "Feed Rate",
},
"S?": {
Group: Groups[Parameters],
Code: "S?",
Label: "Spindle Speed",
},
"X?": {
Group: Groups[Parameters],
Code: "X?",
Label: "X Axis Position",
Notes: "",
},
"Y?": {
Group: Groups[Parameters],
Code: "Y?",
Label: "Y Axis Position",
Notes: "",
},
"Z?": {
Group: Groups[Parameters],
Code: "Z?",
Label: "Z Axis Position",
Notes: "",
},
"A?": {
Group: Groups[Parameters],
Code: "A?",
Label: "A Axis Position",
Notes: "",
},
"B?": {
Group: Groups[Parameters],
Code: "B?",
Label: "B Axis Position",
Notes: "",
},
"C?": {
Group: Groups[Parameters],
Code: "C?",
Label: "C Axis Position",
Notes: "",
},
"I?": {
Group: Groups[Parameters],
Code: "I?",
Label: "Arc Centre in X Axis ",
Notes: "",
},
"J?": {
Group: Groups[Parameters],
Code: "J?",
Label: "Arc Centre in Y Axis ",
Notes: "",
},
"K?": {
Group: Groups[Parameters],
Code: "K?",
Label: "Arc Centre in Z Axis ",
Notes: "",
},
"R?": {
Group: Groups[Parameters],
Code: "R?",
Label: "Arc Radius Size ",
Notes: "",
},
"P?": {
Group: Groups[Parameters],
Code: "P?",
Label: "Parameter Address",
Notes: "",
},
} | grbl/gcode.go | 0.638046 | 0.409014 | gcode.go | starcoder |
package main
import (
"encoding/csv"
"fmt"
"io"
"math"
"os"
"strconv"
"github.com/fang2hou/easyga"
"github.com/wcharczuk/go-chart"
)
type travellingSalesmanProblem struct {
ga easyga.GeneticAlgorithm
cityLocation [][]float64
fitnessData []float64
}
func main() {
// Initialize a travelling salesman problem.
var tsp travellingSalesmanProblem
tsp.getCityLocation("tsp.cities.random.csv")
tsp.DrawChart()
}
func (tsp *travellingSalesmanProblem) Init() {
parameters := easyga.GeneticAlgorithmParameters{
CrossoverProbability: .9,
MutationProbability: .35,
PopulationSize: 2000,
GenotypeNumber: 2,
ChromosomeLength: len(tsp.cityLocation),
IterationsLimit: 30000,
RandomSeed: 42,
UseRoutine: false,
}
custom := easyga.GeneticAlgorithmFunctions{
ChromosomeInitFunction: func(c *easyga.Chromosome) {
// Initialize
c.Gene = make([]byte, 0)
// Get an array contains the genes which tsp need.
tspChromosome := easyga.Rand.Perm(parameters.ChromosomeLength)
// Append each gene to the end of chromosome.
for i := range tspChromosome {
c.Gene = append(c.Gene, byte(tspChromosome[i]))
}
},
MutateFunction: func(c *easyga.Chromosome) {
// Get two different indexes of chromosome.
index1 := c.GetRandomGeneIndex()
index2 := c.GetRandomGeneIndex()
for index1 == index2 {
index2 = c.GetRandomGeneIndex()
}
// Switch value
c.Gene[index1], c.Gene[index2] = c.Gene[index2], c.Gene[index1]
},
FitnessFunction: func(c *easyga.Chromosome) {
// Initialize
c.Fitness = 0
// Be a travelling salesman :(
for geneIndex := range c.Gene {
// Get next city index from gene
cityIndex := int(c.Gene[geneIndex])
nextCityIndex := int(c.Gene[(geneIndex+1)%len(c.Gene)])
// Calculate distance using pythagorean theorem
distanceX := tsp.cityLocation[nextCityIndex][0] - tsp.cityLocation[cityIndex][0]
distanceY := tsp.cityLocation[nextCityIndex][1] - tsp.cityLocation[cityIndex][1]
distance := math.Sqrt(distanceX*distanceX + distanceY*distanceY)
// Update fitness
c.Fitness -= distance
}
},
CrossOverFunction: func(parent1, parent2 *easyga.Chromosome) (child1, child2 *easyga.Chromosome) {
// Find separate part
crossoverStart := parameters.ChromosomeLength / 3
if parameters.ChromosomeLength%3 != 0 {
crossoverStart++
}
crossoverEnd := crossoverStart * 2
// child 1
child1 = &easyga.Chromosome{Gene: make([]byte, 0)}
crossoverPart := parent2.Gene[crossoverStart:crossoverEnd]
copyPart := make([]byte, 0)
for parentIndex := range parent1.Gene {
isEqual := false
for skipCopyIndex := range crossoverPart {
if parent1.Gene[parentIndex] == crossoverPart[skipCopyIndex] {
isEqual = true
break
}
}
if !isEqual {
copyPart = append(copyPart, parent1.Gene[parentIndex])
}
}
child1.Gene = append(child1.Gene, copyPart[0:crossoverStart]...)
child1.Gene = append(child1.Gene, crossoverPart...)
child1.Gene = append(child1.Gene, copyPart[crossoverStart:]...)
// child 2
child2 = &easyga.Chromosome{Gene: make([]byte, 0)}
crossoverPart = parent1.Gene[crossoverStart:crossoverEnd]
copyPart = make([]byte, 0)
for parentIndex := range parent2.Gene {
isEqual := false
for skipCopyIndex := range crossoverPart {
if parent2.Gene[parentIndex] == crossoverPart[skipCopyIndex] {
isEqual = true
break
}
}
if !isEqual {
copyPart = append(copyPart, parent2.Gene[parentIndex])
}
}
child2.Gene = append(child2.Gene, copyPart[0:crossoverStart]...)
child2.Gene = append(child2.Gene, crossoverPart...)
child2.Gene = append(child2.Gene, copyPart[crossoverStart:]...)
return
},
CheckStopFunction: func(ga *easyga.GeneticAlgorithm) bool {
_, bestFitness := ga.Population.FindBest()
maybeBest := float64(-5420)
if bestFitness >= maybeBest || ga.Population.Iteration >= ga.Parameters.IterationsLimit {
return true
}
return false
},
StatisticFunction: func(ga *easyga.GeneticAlgorithm) {
_, bestFitness := ga.Population.FindBest()
tsp.fitnessData = append(tsp.fitnessData, bestFitness)
},
}
if err := tsp.ga.Init(parameters, custom); err != nil {
fmt.Println(err)
return
}
}
func (tsp *travellingSalesmanProblem) Run() (easyga.Chromosome, float64, int) {
return tsp.ga.Run()
}
func (tsp *travellingSalesmanProblem) getCityLocation(fileName string) {
// Open file
file, err := os.Open(fileName)
if err != nil {
fmt.Println("Error:", err)
return
}
defer file.Close()
// Create CSV Reader
r := csv.NewReader(file)
r.Comment = []rune("#")[0]
// Parse data
for {
// Read a line before get EOF signal.
record, err := r.Read()
if err == io.EOF {
break
} else if err != nil {
panic(err)
}
// Add city location to array.
tempCityX, _ := strconv.ParseFloat(record[0], 64)
tempCityY, _ := strconv.ParseFloat(record[1], 64)
tsp.cityLocation = append(tsp.cityLocation, []float64{tempCityX, tempCityY})
}
}
func (tsp *travellingSalesmanProblem) DrawChart() {
tsp.Init() // If you just want to run once, move tsp.init() to main().
best, bestFit, iteration := tsp.Run()
fmt.Println("Best gene is", best)
fmt.Println("Best fitness is", bestFit)
fmt.Println("Find it in", iteration, "generation.")
xValue := make([]float64, 0)
yValue := make([]float64, 0)
for i := range best.Gene {
xValue = append(xValue, float64(tsp.cityLocation[best.Gene[i]][0]))
yValue = append(yValue, float64(tsp.cityLocation[best.Gene[i]][1]))
}
// Fix the line between the first city and last city.
xValue = append(xValue, float64(tsp.cityLocation[best.Gene[0]][0]))
yValue = append(yValue, float64(tsp.cityLocation[best.Gene[0]][1]))
tspSeries := chart.ContinuousSeries{
XValues: xValue,
YValues: yValue,
Style: chart.Style{
Show: true,
StrokeColor: chart.ColorBlack,
StrokeWidth: 1.0,
},
}
graph := chart.Chart{
Title: "TSP Final result",
Width: 500,
Height: 500,
DPI: 100.0,
Series: []chart.Series{tspSeries},
}
filePath := "stat.png"
outFile, _ := os.Create(filePath)
defer outFile.Close()
graph.Render(chart.PNG, outFile)
} | _examples/tsp/main.go | 0.52975 | 0.406538 | main.go | starcoder |
package recommender
import (
"fmt"
"github.com/sirupsen/logrus"
"math"
"sort"
)
// AttributeValueSelector interface comprises attribute selection algorythm entrypoints
type AttributeValueSelector interface {
// SelectAttributeValues selects a range of attributes from the given
SelectAttributeValues(min float64, max float64) ([]float64, error)
}
// AttributeValues type representing a slice of attribute values
type AttributeValues []float64
// sort method for sorting this slice in ascending order
func (av AttributeValues) sort() {
sort.Float64s(av)
}
// SelectAttributeValues selects values between the min and max values considering the focus strategy
// When the interval between min and max is "out of range" with respect to this slice the lowest or highest values are returned
func (av AttributeValues) SelectAttributeValues(min float64, max float64) ([]float64, error) {
logrus.Debugf("selecting attributes from %f, min [%f], max [%f]", av, min, max)
if len(av) == 0 {
return nil, fmt.Errorf("empty attribute values")
}
var (
// holds the selected values
selectedValues []float64
// vars representing "distances" to the max from the "left" and right
lDist, rDist = math.MaxFloat64, math.MaxFloat64
// indexes of the "closest" values to max in the values slice
rIdx, lIdx = -1, -1
)
// sort the slice in increasing order
av.sort()
logrus.Debugf("sorted attributes: [%f]", av)
for i, v := range av {
if v < max {
// distance to max from "left"
if lDist > max-v {
lDist = max - v
lIdx = i
}
} else {
// distance to max from "right"
if rDist > v-max {
rDist = v - max
rIdx = i
}
}
if min <= v && v <= max {
logrus.Debugf("found value between min[%f]-max[%f]: [%f], index: [%d]", min, max, v, i)
selectedValues = append(selectedValues, v)
}
}
logrus.Debugf("lower-closest index: [%d], higher-closest index: [%d]", lIdx, rIdx)
if len(selectedValues) == 0 {
// there are no values between the two limits
if rIdx == -1 {
//there are no values higher than max, return the closest less value
// this covers the case when the [min, max] interval is out of range of the value set
// the left index is either 0 or len(av)-1 in the above case
return []float64{av[lIdx]}, nil
}
// the right index is higher than -1 -> there are higher values than the max, return the closest to it
return []float64{av[rIdx]}, nil
}
return selectedValues, nil
} | pkg/recommender/attributes.go | 0.627495 | 0.432543 | attributes.go | starcoder |
package cgen
// File asm and the "asm" type provide a simple wrapper for generating
// MIPS assembly. Many of the methods take an optional comment slice:
// they only use the first element. They also attempt to return
// correct write counts and errors from the underlying fmt.Fprintf
// calls, in case anyone ever checks those.
import (
"fmt"
"io"
"strings"
)
type asm struct {
io.Writer
}
// Return a New asm object.
func NewAsm(w io.Writer) asm {
return asm{w}
}
// ObjTag writes out the -1 object tag expected by the Cool runtime code.
func (a asm) ObjTag() (int, error) {
return a.Word(-1)
}
// Label writes out a label.
func (a asm) Label(l string, comment ...string) (int, error) {
if len(comment) > 0 {
return fmt.Fprintf(a, "%s: \t# %s\n", l, comment[0])
}
return fmt.Fprintf(a, "%s:\n", l)
}
// Word writes out a word, given an integer value.
func (a asm) Word(w int, comment ...string) (int, error) {
if len(comment) > 0 {
return fmt.Fprintf(a, "\t.word\t%d\t# %s\n", w, comment[0])
}
return fmt.Fprintf(a, "\t.word\t%d\n", w)
}
// WordS writes out a word, given a label.
func (a asm) WordS(s string, comment ...string) (int, error) {
if len(comment) > 0 {
return fmt.Fprintf(a, "\t.word\t%s\t# %s\n", s, comment[0])
}
return fmt.Fprintf(a, "\t.word\t%s\n", s)
}
// Global declares a label to be global.
func (a asm) Global(s string, comment ...string) (int, error) {
if len(comment) > 0 {
return fmt.Fprintf(a, "\t.globl\t%s\t# %s\n", s, comment[0])
}
return fmt.Fprintf(a, "\t.globl\t%s\n", s)
}
// Data starts writing to the data segment.
func (a asm) Data() (int, error) {
return fmt.Fprintf(a, "\t.data\n")
}
// Text starts writing to the data segment.
func (a asm) Text() (int, error) {
return fmt.Fprintf(a, "\t.text\n")
}
// AsciiZ is a wrapper that writes out string values, while dodging
// SPIM bugs in backslash escaping.
func (a asm) AsciiZ(s string) (int, error) {
if s == "" || !strings.ContainsRune(s, '\\') {
return fmt.Fprintf(a, "\t.asciiz\t%q\n", s)
}
a.Comment(fmt.Sprintf("%q", s))
bb := "\t.byte\t"
for _, b := range []byte(s) {
bb = bb + fmt.Sprintf("%d,", b)
}
bb = bb + "0\n"
return fmt.Fprintf(a, bb)
}
// Wordalign ensures the next write is word-aligned.
func (a asm) WordAlign() (int, error) {
return fmt.Fprintf(a, "\t.align\t2\n")
}
// Comment writes out a single-line comment against the left margin.
func (a asm) Comment(s string) (int, error) {
return fmt.Fprintf(a, "# %s\n", s)
}
// CommentH2 writes out a single-line comment, with a preceding blank line.
func (a asm) CommentH2(s string) (int, error) {
return fmt.Fprintf(a, "\n# %s\n", s)
}
// CommentH1 writes out a three-line comment, with a preceding blank line.
func (a asm) CommentH1(s string) (int, error) {
var b1, b2 int
b1, err := fmt.Fprintf(a, "\n#\n")
if err != nil {
return b1, err
}
b2, err = fmt.Fprintf(a, "# %s\n", s)
b1 += b2
if err != nil {
return b1, err
}
b2, err = fmt.Fprintf(a, "#\n")
b1 += b2
return b1, err
}
// Inst writes out an instruction. It expects an instruction, and an
// argument, both strings.
func (a asm) Inst(inst, arg string, comment ...string) (int, error) {
if len(comment) > 0 {
return fmt.Fprintf(a, "\t%s\t%s\t# %s\n", inst, arg, comment[0])
}
return fmt.Fprintf(a, "\t%s\t%s\n", inst, arg)
} | cgen/asm.go | 0.649579 | 0.513729 | asm.go | starcoder |
package hll
import (
"errors"
"github.com/spaolacci/murmur3"
"hash"
"math"
"math/bits"
)
type HLL struct {
numRegisterBits int
registers []int
murmur32 hash.Hash32
}
// Returns new HLL instance configured to use the final 6 bits to denote the register (64 register total)
func NewHLL() HLL {
return NewHLLWithRegisterBits(6)
}
// Returns new HLL instance with the specified number of register bits
func NewHLLWithRegisterBits(numRegisterBits int) HLL {
numRegisters := int(math.Exp2(float64(numRegisterBits)))
registers := make([]int, numRegisters)
murmur32 := murmur3.New32()
hllInstance := HLL{numRegisterBits, registers, murmur32}
return hllInstance
}
// Add 32 bit hash to HLL
func (hll HLL) AddHash(hashedValue uint32) {
// bit mask to fetch bits representing register index to update
maskRegisterBits := ^uint32(0) >> uint32(32-hll.numRegisterBits)
registerIndex := uint32(hashedValue & maskRegisterBits)
remainingBits := hashedValue >> uint32(hll.numRegisterBits)
numRemainingBits := 32 - hll.numRegisterBits
trailingZeroes := bits.TrailingZeros32(remainingBits)
registerValue := 0
if trailingZeroes > numRemainingBits {
registerValue = numRemainingBits + 1
} else {
registerValue = trailingZeroes + 1
}
hll.registers[registerIndex] = int(math.Max(float64(hll.registers[registerIndex]), float64(registerValue)))
}
// Add string value to the HLL. MurmurHash is used to get 32 bit hash of string bytes.
func (hll HLL) AddString(value string) {
hll.murmur32.Write([]byte(value))
hashedValue := hll.murmur32.Sum32()
hll.AddHash(hashedValue)
}
// Computes the count/cardinality from the instance's register values
func (hll HLL) Count() float64 {
harmonicMean := 0.0
numZeroRegisters := 0.0
for _, registerVal := range hll.registers {
harmonicMean += math.Pow(2.0, -1*float64(registerVal))
if registerVal == 0 {
numZeroRegisters += 1.0
}
}
harmonicMean = 1.0 / harmonicMean
// TODO: figure out what alpha param means
estimate := getAlphaByNumRegisters(len(hll.registers)) * math.Pow(float64(len(hll.registers)), float64(2)) * float64(harmonicMean)
count := 0.0
// small range correction
if estimate <= (5.0/2.0)*float64(len(hll.registers)) {
if numZeroRegisters == 0 {
count = estimate
} else {
count = math.Round(float64(len(hll.registers)) * math.Log(float64(len(hll.registers))/numZeroRegisters))
}
return count
}
if estimate <= 1.0/30.0*math.Exp2(32.0) {
// intermediate range, no correction
count = estimate
} else {
// large range correction
// TODO: re-use 2^32
count = -1 * math.Pow(2.0, 32.0) * math.Log2(1-estimate/math.Pow(2, 32))
}
return count
}
// Merges two HLL instances by computing max(HLL1.register[i], HLL2.register[i]) for i in [0, numRegisters - 1]. Both
// HLLs must have the same number of register bits.
func (hll HLL) Merge(other HLL) error {
// verify that num register bits are equal
if hll.numRegisterBits != other.numRegisterBits {
return errors.New("hll: can't merge HLLs with different number of registers")
}
for index, registerVal := range other.registers {
hll.registers[index] = int(math.Max(float64(registerVal), float64(hll.registers[index])))
}
return nil
}
func getAlphaByNumRegisters(numRegisters int) float64 {
var alpha float64
if numRegisters == 16 {
alpha = 0.673
} else if numRegisters == 32 {
alpha = 0.697
} else if numRegisters == 64 {
alpha = 0.709
} else {
alpha = 0.7213 / (1 + 1.079/float64(numRegisters))
}
return alpha
} | hll.go | 0.708818 | 0.427636 | hll.go | starcoder |
package geo
import (
"math"
)
// A Polygon is carved out of a 2D plane by a set of (possibly disjoint) contours.
// It can thus contain holes, and can be self-intersecting.
type Polygon struct {
points []*Point
}
// Creates and returns a new pointer to a Polygon
// composed of the passed in points. Points are
// considered to be in order such that the last point
// forms an edge with the first point.
func NewPolygon(points []*Point) *Polygon {
return &Polygon{points: points}
}
// Returns the points of the current Polygon.
func (p *Polygon) Points() []*Point {
return p.points
}
// Appends the passed in contour to the current Polygon.
func (p *Polygon) Add(point *Point) {
p.points = append(p.points, point)
}
// Returns whether or not the polygon is closed.
// TODO: This can obviously be improved, but for now,
// this should be sufficient for detecting if points
// are contained using the raycast algorithm.
func (p *Polygon) IsClosed() bool {
if len(p.points) < 3 {
return false
}
return true
}
// Returns whether or not the current Polygon contains the passed in Point.
func (p *Polygon) Contains(point *Point) bool {
if !p.IsClosed() {
return false
}
start := len(p.points) - 1
end := 0
contains := p.intersectsWithRaycast(point, p.points[start], p.points[end])
for i := 1; i < len(p.points); i++ {
if p.intersectsWithRaycast(point, p.points[i-1], p.points[i]) {
contains = !contains
}
}
return contains
}
// Using the raycast algorithm, this returns whether or not the passed in point
// Intersects with the edge drawn by the passed in start and end points.
// Original implementation: http://rosettacode.org/wiki/Ray-casting_algorithm#Go
func (p *Polygon) intersectsWithRaycast(point *Point, start *Point, end *Point) bool {
// Always ensure that the the first point
// has a y coordinate that is less than the second point
if start.lng > end.lng {
// Switch the points if otherwise.
start, end = end, start
}
// Move the point's y coordinate
// outside of the bounds of the testing region
// so we can start drawing a ray
for point.lng == start.lng || point.lng == end.lng {
newLng := math.Nextafter(point.lng, math.Inf(1))
point = NewPoint(point.lat, newLng)
}
// If we are outside of the polygon, indicate so.
if point.lng < start.lng || point.lng > end.lng {
return false
}
if start.lat > end.lat {
if point.lat > start.lat {
return false
}
if point.lat < end.lat {
return true
}
} else {
if point.lat > end.lat {
return false
}
if point.lat < start.lat {
return true
}
}
raySlope := (point.lng - start.lng) / (point.lat - start.lat)
diagSlope := (end.lng - start.lng) / (end.lat - start.lat)
return raySlope >= diagSlope
} | polygon.go | 0.811564 | 0.61682 | polygon.go | starcoder |
package main
import (
"fmt"
"image"
"image/color"
"image/png"
"math"
"os"
)
type vector [3]float64
func (v *vector) normalize() {
invLen := 1 / math.Sqrt(dot(v, v))
v[0] *= invLen
v[1] *= invLen
v[2] *= invLen
}
func dot(x, y *vector) float64 {
return x[0]*y[0] + x[1]*y[1] + x[2]*y[2]
}
type sphere struct {
cx, cy, cz int
r int
}
func (s *sphere) hit(x, y int) (z1, z2 float64, hit bool) {
x -= s.cx
y -= s.cy
if zsq := s.r*s.r - (x*x + y*y); zsq >= 0 {
zsqrt := math.Sqrt(float64(zsq))
return float64(s.cz) - zsqrt, float64(s.cz) + zsqrt, true
}
return 0, 0, false
}
func deathStar(pos, neg *sphere, k, amb float64, dir *vector) *image.Gray {
w, h := pos.r*4, pos.r*3
bounds := image.Rect(pos.cx-w/2, pos.cy-h/2, pos.cx+w/2, pos.cy+h/2)
img := image.NewGray(bounds)
vec := new(vector)
for y, yMax := pos.cy-pos.r, pos.cy+pos.r; y <= yMax; y++ {
for x, xMax := pos.cx-pos.r, pos.cx+pos.r; x <= xMax; x++ {
zb1, zb2, hit := pos.hit(x, y)
if !hit {
continue
}
zs1, zs2, hit := neg.hit(x, y)
if hit {
if zs1 > zb1 {
hit = false
} else if zs2 > zb2 {
continue
}
}
if hit {
vec[0] = float64(neg.cx - x)
vec[1] = float64(neg.cy - y)
vec[2] = float64(neg.cz) - zs2
} else {
vec[0] = float64(x - pos.cx)
vec[1] = float64(y - pos.cy)
vec[2] = zb1 - float64(pos.cz)
}
vec.normalize()
s := dot(dir, vec)
if s < 0 {
s = 0
}
lum := 255 * (math.Pow(s, k) + amb) / (1 + amb)
if lum < 0 {
lum = 0
} else if lum > 255 {
lum = 255
}
img.SetGray(x, y, color.Gray{uint8(lum)})
}
}
return img
}
func main() {
dir := &vector{20, -40, -10}
dir.normalize()
pos := &sphere{0, 0, 0, 120}
neg := &sphere{-90, -90, -30, 100}
img := deathStar(pos, neg, 1.5, .2, dir)
f, err := os.Create("dstar.png")
if err != nil {
fmt.Println(err)
return
}
if err = png.Encode(f, img); err != nil {
fmt.Println(err)
}
if err = f.Close(); err != nil {
fmt.Println(err)
}
} | lang/Go/death-star.go | 0.698844 | 0.457682 | death-star.go | starcoder |
package msgraph
// OnPremisesPublishingType undocumented
type OnPremisesPublishingType string
const (
// OnPremisesPublishingTypeVAppProxy undocumented
OnPremisesPublishingTypeVAppProxy OnPremisesPublishingType = "AppProxy"
// OnPremisesPublishingTypeVExchangeOnline undocumented
OnPremisesPublishingTypeVExchangeOnline OnPremisesPublishingType = "ExchangeOnline"
// OnPremisesPublishingTypeVAuthentication undocumented
OnPremisesPublishingTypeVAuthentication OnPremisesPublishingType = "Authentication"
// OnPremisesPublishingTypeVProvisioning undocumented
OnPremisesPublishingTypeVProvisioning OnPremisesPublishingType = "Provisioning"
// OnPremisesPublishingTypeVIntunePfx undocumented
OnPremisesPublishingTypeVIntunePfx OnPremisesPublishingType = "IntunePfx"
// OnPremisesPublishingTypeVOflineDomainJoin undocumented
OnPremisesPublishingTypeVOflineDomainJoin OnPremisesPublishingType = "OflineDomainJoin"
// OnPremisesPublishingTypeVUnknownFutureValue undocumented
OnPremisesPublishingTypeVUnknownFutureValue OnPremisesPublishingType = "UnknownFutureValue"
)
// OnPremisesPublishingTypePAppProxy returns a pointer to OnPremisesPublishingTypeVAppProxy
func OnPremisesPublishingTypePAppProxy() *OnPremisesPublishingType {
v := OnPremisesPublishingTypeVAppProxy
return &v
}
// OnPremisesPublishingTypePExchangeOnline returns a pointer to OnPremisesPublishingTypeVExchangeOnline
func OnPremisesPublishingTypePExchangeOnline() *OnPremisesPublishingType {
v := OnPremisesPublishingTypeVExchangeOnline
return &v
}
// OnPremisesPublishingTypePAuthentication returns a pointer to OnPremisesPublishingTypeVAuthentication
func OnPremisesPublishingTypePAuthentication() *OnPremisesPublishingType {
v := OnPremisesPublishingTypeVAuthentication
return &v
}
// OnPremisesPublishingTypePProvisioning returns a pointer to OnPremisesPublishingTypeVProvisioning
func OnPremisesPublishingTypePProvisioning() *OnPremisesPublishingType {
v := OnPremisesPublishingTypeVProvisioning
return &v
}
// OnPremisesPublishingTypePIntunePfx returns a pointer to OnPremisesPublishingTypeVIntunePfx
func OnPremisesPublishingTypePIntunePfx() *OnPremisesPublishingType {
v := OnPremisesPublishingTypeVIntunePfx
return &v
}
// OnPremisesPublishingTypePOflineDomainJoin returns a pointer to OnPremisesPublishingTypeVOflineDomainJoin
func OnPremisesPublishingTypePOflineDomainJoin() *OnPremisesPublishingType {
v := OnPremisesPublishingTypeVOflineDomainJoin
return &v
}
// OnPremisesPublishingTypePUnknownFutureValue returns a pointer to OnPremisesPublishingTypeVUnknownFutureValue
func OnPremisesPublishingTypePUnknownFutureValue() *OnPremisesPublishingType {
v := OnPremisesPublishingTypeVUnknownFutureValue
return &v
} | beta/OnPremisesPublishingTypeEnum.go | 0.553505 | 0.460895 | OnPremisesPublishingTypeEnum.go | starcoder |
package godouble
//MockedMethodCall is a MethodCall that has pre-defined expectations for how often and sequence of invocations
type MockedMethodCall interface {
/*
Matching is used to setup whether this call will match a given set of arguments.
Empty matcherList list will fatally fail the test
If the first matcher is a Matcher then it is used (test will fatally fail is more matcherList are sent)
If the first matcher is a func then is equivalent to Matching(Matcher(matcherList[0],matcherList[1:))
Otherwise each matcher is converted to a Matcher via either Func() or Eql()
and this list is sent to Args()
*/
Matching(matchers ...interface{}) MockedMethodCall
//Setup that this call will only match if the supplied calls are already complete
After(calls ...MockedMethodCall) MockedMethodCall
/*
Returning is used to setup return values for this call
The returnValues are converted to a ReturnValues via Values()
*/
Returning(values ...interface{}) MockedMethodCall
//Setup an expectation on the number of times this call will be invoked
Expect(expect Expectation) MockedMethodCall
MethodCall
complete() bool
}
type mockedMethodCall struct {
*stubbedMethodCall
count int
after []MockedMethodCall
expect Expectation
}
func (c *mockedMethodCall) complete() bool {
if completion, isCompletion := c.expect.(Completion); isCompletion {
return completion.Complete(c.count)
}
return false
}
func (c *mockedMethodCall) met() bool {
if c.expect != nil {
return c.expect.Met(c.count)
}
return true
}
func newMockedMethodCall(m *method) MockedMethodCall {
call := &mockedMethodCall{
stubbedMethodCall: newStubbedMethodCall(m),
count: 0,
after: []MockedMethodCall{},
}
return call
}
func (c *mockedMethodCall) Matching(matchers ...interface{}) MockedMethodCall {
c.t().Helper()
c.stubbedMethodCall.Matching(matchers...)
return c
}
//This stubbedMethodCall will only be invoked after these other methods (which might be on other mocks) have been met
func (c *mockedMethodCall) After(after ...MockedMethodCall) MockedMethodCall {
c.after = append(c.after, after...)
return c
}
func (c *mockedMethodCall) Returning(values ...interface{}) MockedMethodCall {
c.stubbedMethodCall.Returning(values...)
return c
}
func (c *mockedMethodCall) Expect(expect Expectation) MockedMethodCall {
c.expect = expect
return c
}
func (c *mockedMethodCall) inSequence() bool {
for _, call := range c.after {
if !call.complete() {
return false
}
}
return true
}
func (c *mockedMethodCall) matches(args []interface{}) bool {
return c.stubbedMethodCall.matches(args) && !c.complete() && c.inSequence()
}
func (c *mockedMethodCall) spy(args []interface{}) ([]interface{}, error) {
c.count++
if c.trace() && c.complete() {
c.t().Helper()
c.t().Logf("%v completed expectations after %d calls", c, c.count)
}
return c.stubbedMethodCall.spy(args)
}
func (c *mockedMethodCall) verify(t T) {
t.Helper()
if !c.met() {
t.Errorf("%v expected %v, found %d calls", c.stubbedMethodCall, c.expect, c.count)
}
}
// ExpectInOrder is shorthand to Setup that the list of calls are expected to executed in this sequence
func ExpectInOrder(calls ...MockedMethodCall) {
for i := len(calls) - 1; i > 0; i-- {
calls[i].After(calls[i-1])
}
} | godouble/mock.go | 0.774242 | 0.533519 | mock.go | starcoder |
package dtw
import (
"math"
)
type distanceFunction func(float64, float64) float64
type Dtw struct {
m int
n int
distanceCostMatrix [][]float64
similarity float64
DistanceFunction distanceFunction
}
func distanceEuclidean(x float64, y float64) float64 {
difference := x - y
return math.Sqrt(difference * difference)
}
func (dtw *Dtw) ComputeOptimalPath(s []float64, t []float64) {
dtw.ComputeOptimalPathWithWindow(s, t, len(s)+len(t))
}
func (dtw *Dtw) ComputeOptimalPathWithWindow(s []float64, t []float64, w int) {
dtw.m = len(s)
dtw.n = len(t)
if dtw.DistanceFunction == nil {
dtw.DistanceFunction = distanceEuclidean
}
var window int = dtw.m - dtw.n
if w > window {
window = w
}
distanceCostMatrix := createFloatMatrix(dtw.m+1, dtw.n+1, math.Inf(1))
distanceCostMatrix[0][0] = 0
for rowIndex := 1; rowIndex <= dtw.m; rowIndex++ {
columnIndexStart := 1
if (rowIndex - window) > 1 {
columnIndexStart = (rowIndex - window)
}
columnIndexEnd := dtw.n
if (rowIndex + window) < columnIndexEnd {
columnIndexEnd = (rowIndex + window)
}
for columnIndex := columnIndexStart; columnIndex <= columnIndexEnd; columnIndex++ {
cost := dtw.DistanceFunction(s[rowIndex-1], t[columnIndex-1])
distanceCostMatrix[rowIndex][columnIndex] = cost + findSmallestNumber([]float64{
distanceCostMatrix[rowIndex-1][columnIndex],
distanceCostMatrix[rowIndex][columnIndex-1],
distanceCostMatrix[rowIndex-1][columnIndex-1],
})
}
}
// copy into new matrix
returnMatrix := createFloatMatrix(dtw.m, dtw.n, math.Inf(1))
for i := 1; i <= dtw.m; i++ {
returnMatrix[i-1] = distanceCostMatrix[i][1:(dtw.n + 1)]
}
dtw.distanceCostMatrix = returnMatrix
dtw.similarity = returnMatrix[dtw.m-1][dtw.n-1]
}
func (dtw *Dtw) RetrieveOptimalPath() [][]int {
rowIndex := dtw.m - 1
columnIndex := dtw.n - 1
distanceCostMatrix := dtw.distanceCostMatrix
epsilon := 1e-14
var path [][]int
path = createIntMatrix(dtw.m+dtw.n, 2, 0)
arrayIndex := len(path) - 1
path[arrayIndex][0] = rowIndex
path[arrayIndex][1] = columnIndex
arrayIndex--
for rowIndex > 0 || columnIndex > 0 {
if rowIndex > 0 && columnIndex > 0 {
min := findSmallestNumber([]float64{
distanceCostMatrix[rowIndex-1][columnIndex],
distanceCostMatrix[rowIndex][columnIndex-1],
distanceCostMatrix[rowIndex-1][columnIndex-1]})
if nearlyEqual(min, distanceCostMatrix[rowIndex-1][columnIndex-1], epsilon) {
rowIndex--
columnIndex--
} else if nearlyEqual(min, distanceCostMatrix[rowIndex-1][columnIndex], epsilon) {
rowIndex--
} else if nearlyEqual(min, distanceCostMatrix[rowIndex][columnIndex-1], epsilon) {
columnIndex--
}
} else if rowIndex > 0 {
rowIndex--
} else if columnIndex > 0 {
columnIndex--
}
path[arrayIndex][0] = rowIndex
path[arrayIndex][1] = columnIndex
arrayIndex--
}
return path[arrayIndex+1 : cap(path)]
}
func createFloatMatrix(n int, m int, value float64) [][]float64 {
matrix := make([][]float64, n)
points := make([]float64, m*n)
for i := range points {
points[i] = value
}
for i := range matrix {
matrix[i], points = points[:m], points[m:]
}
return matrix
}
func createIntMatrix(n int, m int, value int) [][]int {
matrix := make([][]int, n)
points := make([]int, m*n)
for i := range points {
points[i] = value
}
for i := range matrix {
matrix[i], points = points[:m], points[m:]
}
return matrix
}
func findSmallestNumber(x []float64) float64 {
var number float64 = x[0]
for i := 0; i < len(x); i++ {
if x[i] < number {
number = x[i]
}
}
return number
}
func nearlyEqual(i float64, j float64, epsilon float64) bool {
iAbsolute := i
if iAbsolute < 0 {
iAbsolute = -iAbsolute
}
jAbsolute := j
if jAbsolute < 0 {
jAbsolute = -jAbsolute
}
difference := jAbsolute - iAbsolute
if difference < 0 {
difference = -difference
}
var equal bool = (i == j)
if !equal {
equal = difference < 2.2204460492503130808472633361816E-16
if !equal {
equal = difference <= math.Max(iAbsolute, jAbsolute)*epsilon
}
}
return equal
} | dtw.go | 0.750827 | 0.528898 | dtw.go | starcoder |
package quad
import (
"github.com/ghthor/filu/rpg2d/coord"
"github.com/ghthor/filu/rpg2d/entity"
)
// A collision between 2 entities because the
// entities bounds are overlapping. Intended to
// be solved by the user defined NarrowPhaseHandler.
type Collision struct {
A, B entity.Entity
}
// A collision index stores all the
// collisions an entity is involved in.
type CollisionIndex map[entity.Entity][]Collision
func (i CollisionIndex) add(collisions []Collision) CollisionIndex {
for _, c := range collisions {
a, b := c.A, c.B
i[a] = append(i[a], c)
i[b] = append(i[b], c)
}
return i
}
// The bounds of A and B joined together.
func (c Collision) Bounds() coord.Bounds {
return coord.JoinBounds(c.A.Bounds(), c.B.Bounds())
}
// Compares to Collisions and returns if
// they are representing the same collision.
func (c Collision) IsSameAs(oc Collision) bool {
switch {
case c.A == oc.A && c.B == oc.B:
fallthrough
case c.A == oc.B && c.B == oc.A:
return true
}
return false
}
// A group of collisions where each collision
// may have an effect on the others. A dependency
// tree should be created by the user to resolve
// the collisions in the correct order.
type CollisionGroup struct {
// A slice of the all the entities that are in
// the collisions of the group.
Entities []entity.Entity
// A slice of all the collisions in the group.
Collisions []Collision
}
func (cg CollisionGroup) Bounds() coord.Bounds {
bounds := make([]coord.Bounds, 0, len(cg.Collisions))
for _, c := range cg.Collisions {
bounds = append(bounds, c.Bounds())
}
return coord.JoinBounds(bounds...)
}
func (cg CollisionGroup) CollisionIndex() CollisionIndex {
index := make(CollisionIndex, len(cg.Entities))
return index.add(cg.Collisions)
}
// Adds a collision to the group. Also adds the
// entities from the collision to the entities slice.
// Filters out collisions it already has and entities
// that are already in the entities slice.
func (cg CollisionGroup) AddCollision(c Collision) CollisionGroup {
for _, cc := range cg.Collisions {
if c.IsSameAs(cc) {
return cg
}
}
cg.Collisions = append(cg.Collisions, c)
a, b := c.A, c.B
for _, e := range cg.Entities {
if a == e {
goto check_B_Exists
}
}
cg.Entities = append(cg.Entities, a)
check_B_Exists:
for _, e := range cg.Entities {
if b == e {
return cg
}
}
cg.Entities = append(cg.Entities, b)
return cg
}
// An entity may ONLY be assigned to 1 collision group.
// If an entity has collisions that are in separate collision
// groups, those groups must be merged. This rules make the
// collision group index possible.
type CollisionGroupIndex map[entity.Entity]*CollisionGroup | rpg2d/quad/collision.go | 0.833833 | 0.533762 | collision.go | starcoder |
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
origin := []int{}
scanner := bufio.NewScanner(os.Stdin)
// Read and process input from stdin
for scanner.Scan() {
for _, val := range strings.Split(scanner.Text(), ",") {
num, _ := strconv.Atoi(val)
origin = append(origin, num)
}
}
opcodes := make([]int, len(origin))
copy(opcodes, origin)
max := 0
sequences := getPermutations([]int{0,1,2,3,4})
for _, seq := range sequences {
out := 0
for _, phase := range seq {
// Restore original memory values
copy(opcodes, origin)
out = compute(opcodes, phase, out)
if max < out {
max = out
}
}
}
fmt.Printf("diagnostic code: %d\n", max)
}
func compute(opcodes []int, phase, out int) int {
// Array of instruction parameter counts
paramCounts := []int{0, 4, 4, 2, 2, 3, 3, 4, 4}
instrCount := 2
index, outIndex := 0, 0
for index < len(opcodes)-1 {
op := 0
if opcodes[index] < 0 {
op -= (opcodes[index]*-1)%10
} else {
op = opcodes[index]%10
}
// Get instruction parameter count
instrCount = paramCounts[op]
// Get parameters by mode
params := getInstructionParams(opcodes, index, out, instrCount)
if op == 1 { // addition instruction
opcodes[opcodes[index+3]] = params[0] + params[1]
} else if op == 2 { // multiplication instruction
opcodes[opcodes[index+3]] = params[0] * params[1]
} else if op == 3 { // input instruction
// Feed radiator controller ID as input
var input int
if outIndex == 0 {
input = phase
} else {
input = out
}
if params[0] == 0 {
opcodes[opcodes[index+1]] = input
} else {
opcodes[index+1] = input
}
outIndex++
} else if op == 4 { // output instruction
out = opcodes[opcodes[index+1]]
break
} else if op == 5 { // jump-if-true instruction
if params[0] != 0 {
// set instruction pointer to second parameter, minus increment
index = params[1] - instrCount
}
} else if op == 6 { // jump-if-false instruction
if params[0] == 0 {
// set instruction pointer to second parameter, minus increment
index = params[1] - instrCount
}
} else if op == 7 { // less-than instruction
// store 1 or 0 at the index of the third parameter, based on if
// the first parameter is less-than the second
if params[0] < params[1] {
opcodes[opcodes[index+3]] = 1
} else {
opcodes[opcodes[index+3]] = 0
}
} else if op == 8 { // equals instruction
// store 1 or 0 at the index of the third parameter, based on if
// the first parameter is equal to the second
if params[0] == params[1] {
opcodes[opcodes[index+3]] = 1
} else {
opcodes[opcodes[index+3]] = 0
}
}
index += instrCount
}
return out
}
func getInstructionParams(opcodes []int, index, out, instrCount int) []int {
var digits []int
digits = intToSlice(opcodes[index], digits)
// append leading zeroes to instruction
for len(digits) <= instrCount {
digits = append([]int{0}, digits...)
}
// get param modes from instruction
modes := digits[:len(digits)-2]
var params []int
i := 1
for i < instrCount && index+i < len(opcodes) {
// Get param values based on modes
if modes[len(modes)-i] == 1 {
params = append(params, opcodes[index+i])
} else {
if opcodes[index+i] > len(opcodes)-1 {
return params
}
params = append(params, opcodes[opcodes[index+i]])
}
i++
}
return params
}
func getPermutations(arr []int) [][]int {
var helper func([]int, int)
res := [][]int{}
helper = func(arr []int, n int){
if n == 1{
tmp := make([]int, len(arr))
copy(tmp, arr)
res = append(res, tmp)
} else {
for i := 0; i < n; i++{
helper(arr, n - 1)
if n % 2 == 1{
tmp := arr[i]
arr[i] = arr[n - 1]
arr[n - 1] = tmp
} else {
tmp := arr[0]
arr[0] = arr[n - 1]
arr[n - 1] = tmp
}
}
}
}
helper(arr, len(arr))
return res
}
func intToSlice(n int, digits []int) []int {
if n != 0 {
i := n % 10
digits = append([]int{i}, digits...)
return intToSlice(n/10, digits)
}
return digits
} | 2019/day7.go | 0.564819 | 0.441131 | day7.go | starcoder |
package ui
import (
"fmt"
"github.com/go-gl/gl/all-core/gl"
"github.com/willauld/lpsimplex"
)
// SpacingType represents what kind of spacing a row or column uses
type SpacingType int
const (
// Absolute means the spacing value is the exact number of pixels the row or column should be
Absolute SpacingType = 0
// Minimum means the row or column should shrink to fit the contents
Minimum SpacingType = 1
// Percent means the spacing value is a proportion used to fill extra space with when the layout grows or shrinks
Percent SpacingType = 2
)
// TableLayoutSize describes a row or column's spacing
type TableLayoutSize struct {
SpacingType SpacingType
Size float32
}
// TableLayoutChild describes all of the layout data for a given child component on the layout
type TableLayoutChild struct {
Component Component
Row int
Col int
RowSpan int
ColSpan int
}
// TableLayout is a component that lays out its children components using a table
type TableLayout struct {
Bounds Bounds
MinSize Bounds
Rows []TableLayoutSize
Cols []TableLayoutSize
Children []TableLayoutChild
NeedsLayout bool
NeedsMinCalc bool
}
// NewTableLayout creates a new table layout component with default values
func NewTableLayout() TableLayout {
return TableLayout{
Bounds: NewBounds(0, 0, 0, 0),
Rows: make([]TableLayoutSize, 0),
Cols: make([]TableLayoutSize, 0),
Children: make([]TableLayoutChild, 0),
}
}
// GetBounds determines the bounds of the component
func (layout TableLayout) GetBounds() Bounds {
return layout.Bounds
}
// SetBounds sets the bounds of the component
func (layout *TableLayout) SetBounds(bounds Bounds) {
layout.Bounds = bounds
layout.Layout()
}
// GetMinimumSize determines the minimum size of the component
func (layout TableLayout) GetMinimumSize() Bounds {
if layout.NeedsMinCalc {
rowPos := layout.CalculateSmooshedLayout(layout.Rows, func(child TableLayoutChild) int {
return child.Row
}, func(child TableLayoutChild) int {
return child.RowSpan
}, func(bounds Bounds) float32 {
return bounds.Height
})
layout.MinSize.X = 0
for _, size := range rowPos {
layout.MinSize.X += size
}
colPos := layout.CalculateSmooshedLayout(layout.Cols, func(child TableLayoutChild) int {
return child.Col
}, func(child TableLayoutChild) int {
return child.ColSpan
}, func(bounds Bounds) float32 {
return bounds.Width
})
layout.MinSize.Y = 0
for _, size := range colPos {
layout.MinSize.Y += size
}
layout.NeedsMinCalc = false
}
return layout.MinSize
}
// Render draws this component and all of its child components onto the active GL context
func (layout *TableLayout) Render() {
if layout.NeedsLayout {
layout.Layout()
}
for _, child := range layout.Children {
gl.PushMatrix()
t := [16]float32{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, child.Component.GetBounds().X, child.Component.GetBounds().Y, 0, 1}
gl.MultMatrixf(&t[0])
child.Component.Render()
gl.PopMatrix()
}
}
// CalculateSmooshedLayout determines the minimum size for either the rows or columns, depending on the arguments passed in
func (layout *TableLayout) CalculateSmooshedLayout(elements []TableLayoutSize, elementSelector func(TableLayoutChild) int, spanSelector func(TableLayoutChild) int, sizeSelector func(Bounds) float32) []float32 {
numSizes := len(elements)
numChildren := len(layout.Children)
c := make([]float64, numSizes)
for i := 0; i < numSizes; i++ {
c[i] = 1
}
A := make([][]float64, numChildren)
b := make([]float64, numChildren)
An := make([][]float64, numChildren)
bn := make([]float64, numChildren)
for i := 0; i < numChildren; i++ {
A[i] = make([]float64, numSizes)
An[i] = make([]float64, numSizes)
for j := 0; j < numSizes; j++ {
if el := elementSelector(layout.Children[i]); el <= j && j < el+spanSelector(layout.Children[i]) {
A[i][j] = -1
} else {
A[i][j] = 0
}
An[i][j] = 0
}
b[i] = -float64(sizeSelector(layout.Children[i].Component.GetMinimumSize()))
bn[i] = 0
}
res := lpsimplex.LPSimplex(c, A, b, An, bn, nil, nil, false, 1000, 0.01, false)
if !res.Success {
fmt.Printf("Unable to solve layout: %s\n", res.Message)
return nil
}
pos := make([]float32, numSizes+1)
accum := 0.0
for i, size := range elements {
pos[i] = float32(accum)
if size.SpacingType == Absolute {
accum += float64(size.Size)
} else {
accum += res.X[i]
}
}
pos[numSizes] = float32(accum)
return pos
}
// CalculateLayout determines either the size of the rows or columns, depending on the arguments passed in
func (layout *TableLayout) CalculateLayout(elements []TableLayoutSize, elementSelector func(TableLayoutChild) int, spanSelector func(TableLayoutChild) int, sizeSelector func(Bounds) float32) []float32 {
pos := layout.CalculateSmooshedLayout(elements, elementSelector, spanSelector, sizeSelector)
totalPercent := float32(0)
for _, el := range elements {
if el.SpacingType == Percent {
totalPercent += el.Size
}
}
extra := sizeSelector(layout.Bounds) - pos[len(pos)-1]
offset := float32(0)
for i, el := range elements {
pos[i] += offset
if el.SpacingType == Percent {
offset += extra * el.Size / totalPercent
}
}
pos[len(elements)] += offset
return pos
}
// Layout recalculates all of the positions and sizes of the child components
func (layout *TableLayout) Layout() {
rowPos := layout.CalculateLayout(layout.Rows, func(child TableLayoutChild) int {
return child.Row
}, func(child TableLayoutChild) int {
return child.RowSpan
}, func(bounds Bounds) float32 {
return bounds.Height
})
colPos := layout.CalculateLayout(layout.Cols, func(child TableLayoutChild) int {
return child.Col
}, func(child TableLayoutChild) int {
return child.ColSpan
}, func(bounds Bounds) float32 {
return bounds.Width
})
for _, child := range layout.Children {
x := colPos[child.Col]
y := rowPos[child.Row]
child.Component.SetBounds(NewBounds(x, y, colPos[child.Col+child.ColSpan]-x, rowPos[child.Row+child.RowSpan]-y))
}
layout.NeedsLayout = false
}
// Add adds an additional component to the layout with the given constraints
func (layout *TableLayout) Add(component Component, row int, col int, rowSpan int, colSpan int) {
if maxRow := row + rowSpan; maxRow > len(layout.Rows) {
newRows := make([]TableLayoutSize, maxRow)
copy(newRows, layout.Rows)
for i := len(layout.Rows); i < maxRow; i++ {
newRows[i] = TableLayoutSize{
SpacingType: Minimum,
}
}
layout.Rows = newRows
}
if maxCol := col + colSpan; maxCol > len(layout.Cols) {
newCols := make([]TableLayoutSize, maxCol)
copy(newCols, layout.Cols)
for i := len(layout.Cols); i < maxCol; i++ {
newCols[i] = TableLayoutSize{
SpacingType: Minimum,
}
}
layout.Cols = newCols
}
layout.Children = append(layout.Children, TableLayoutChild{
Component: component,
Row: row,
Col: col,
RowSpan: rowSpan,
ColSpan: colSpan,
})
layout.NeedsLayout = true
layout.NeedsMinCalc = true
}
// SetRowSize constrains the size of a row
func (layout *TableLayout) SetRowSize(row int, spacingType SpacingType, size float32) {
if row < len(layout.Rows) {
newRows := make([]TableLayoutSize, row)
copy(layout.Rows, newRows)
for i := len(layout.Rows); i < row-1; i++ {
newRows[i] = TableLayoutSize{
SpacingType: Minimum,
}
}
layout.Rows = newRows
}
layout.Rows[row] = TableLayoutSize{
SpacingType: spacingType,
Size: size,
}
layout.NeedsLayout = true
layout.NeedsMinCalc = true
}
// SetColSize constrains the size of a column
func (layout *TableLayout) SetColSize(col int, spacingType SpacingType, size float32) {
if col < len(layout.Cols) {
newCols := make([]TableLayoutSize, col)
copy(layout.Cols, newCols)
for i := len(layout.Cols); i < col-1; i++ {
newCols[i] = TableLayoutSize{
SpacingType: Minimum,
}
}
layout.Cols = newCols
}
layout.Cols[col] = TableLayoutSize{
SpacingType: spacingType,
Size: size,
}
layout.NeedsLayout = true
layout.NeedsMinCalc = true
} | ui/TableLayout.go | 0.778102 | 0.560253 | TableLayout.go | starcoder |
package ethabi
import (
"fmt"
"github.com/pkg/errors"
"github.com/wavesplatform/gowaves/pkg/ride/meta"
)
var UnsupportedType = errors.New("unsupported type")
type ArgType byte
// Type enumerator
const (
IntType ArgType = iota
UintType
BytesType
BoolType
StringType
SliceType
TupleType
AddressType // we use this type only for erc20 transfers
FixedBytesType // we use this type only for payment asset
)
func (t ArgType) String() string {
switch t {
case IntType:
return "IntType"
case UintType:
return "UintType"
case BytesType:
return "BytesType"
case BoolType:
return "BoolType"
case StringType:
return "StringType"
case SliceType:
return "SliceType"
case TupleType:
return "TupleType"
case AddressType:
return "AddressType"
case FixedBytesType:
return "FixedBytesType"
default:
return fmt.Sprintf("unknown ArgType (%d)", t)
}
}
// Type is the reflection of the supported argument type.
type Type struct {
Elem *Type // nested types for SliceType
Size int
T ArgType // Our own type checking
stringKind string // holds the unparsed string for deriving signatures
// Tuple relative fields
TupleRawName string // Raw struct name defined in source code, may be empty.
TupleFields Arguments // Type and name information of all tuple fields
}
func (t *Type) String() string {
return t.stringKind
}
// requiresLengthPrefix returns whether the type requires any sort of length prefixing.
func requiresLengthPrefix(t Type) bool {
return t.T == StringType || t.T == BytesType || t.T == SliceType
}
// getTypeSize returns the size that this type needs to occupy.
// We distinguish static and dynamic types. Static types are encoded in-place
// and dynamic types are encoded at a separately allocated location after the
// current block.
// So for a static variable, the size returned represents the size that the
// variable actually occupies.
// For a dynamic variable, the returned size is fixed 32 bytes, which is used
// to store the location reference for actual value storage.
func getTypeSize(t Type) int {
if t.T == TupleType && !isDynamicType(t) {
// Recursively calculate type size if it is a nested tuple
total := 0
for _, elem := range t.TupleFields {
total += getTypeSize(elem.Type)
}
return total
}
return 32
}
// isDynamicType returns true if the type is dynamic.
// The following types are called “dynamic”:
// * bytes
// * string
// * T[] for any T
// * T[k] for any dynamic T and any k >= 0
// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
func isDynamicType(t Type) bool {
if t.T == TupleType {
for _, elem := range t.TupleFields {
if isDynamicType(elem.Type) {
return true
}
}
return false
}
return t.T == StringType || t.T == BytesType || t.T == SliceType
}
func AbiTypeFromRideTypeMeta(metaT meta.Type) (abiT Type, err error) {
switch t := metaT.(type) {
case meta.SimpleType:
switch t {
case meta.Int:
abiT = Type{T: IntType, Size: 64}
case meta.Bytes:
abiT = Type{T: BytesType}
case meta.Boolean:
abiT = Type{T: BoolType}
case meta.String:
abiT = Type{T: StringType}
default:
return Type{}, errors.Errorf("invalid ride simple type (%d)", t)
}
case meta.ListType:
inner, err := AbiTypeFromRideTypeMeta(t.Inner)
if err != nil {
return Type{}, errors.Wrapf(err,
"failed to create abi type for ride meta list type, inner type %T", t.Inner,
)
}
abiT = Type{Elem: &inner, T: SliceType}
case meta.UnionType:
return Type{}, errors.Wrap(UnsupportedType, "UnionType")
default:
return Type{}, errors.Errorf("unsupported ride metadata type, type %T", t)
}
// TODO(nickeskov): Do we really need this? In result we have recursion inside recursion.
stringKindMarshaler, err := rideMetaTypeToTextMarshaler(metaT)
if err != nil {
return Type{}, errors.Wrapf(err, "failed to create stringKind marshaler for ride meta type %T", metaT)
}
stringKind, err := stringKindMarshaler.MarshalText()
if err != nil {
return Type{}, errors.Wrapf(err, "failed to create stringKind for ride meta type %T", metaT)
}
abiT.stringKind = string(stringKind)
return abiT, nil
} | pkg/proto/ethabi/type.go | 0.534127 | 0.405066 | type.go | starcoder |
package color
import "math"
// http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/
// http://www.brucelindbloom.com/Eqn_RGB_to_XYZ.html
func linearize(v float64) float64 {
if v <= 0.04045 {
return v / 12.92
}
return math.Pow((v+0.055)/1.055, 2.4)
}
// LinearRGB converts the color into the linear RGB space (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/).
func (c Color) LinearRGB() (r, g, b float64) {
r = linearize(c.R)
g = linearize(c.G)
b = linearize(c.B)
return
}
// FastLinearRGB is much faster than and almost as accurate as LinearRgb.
func (c Color) FastLinearRGB() (r, g, b float64) {
r = math.Pow(c.R, 2.2)
g = math.Pow(c.G, 2.2)
b = math.Pow(c.B, 2.2)
return
}
func delinearize(v float64) float64 {
if v <= 0.0031308 {
return 12.92 * v
}
return 1.055*math.Pow(v, 1.0/2.4) - 0.055
}
// LinearRgb creates an sRGBA color out of the given linear RGB color (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/).
func LinearRGB(r, g, b float64) Color {
return Color{delinearize(r), delinearize(g), delinearize(b), 0}
}
// FastLinearRgb is much faster than and almost as accurate as LinearRgb.
func FastLinearRGB(r, g, b float64) Color {
return Color{math.Pow(r, 1.0/2.2), math.Pow(g, 1.0/2.2), math.Pow(b, 1.0/2.2), 0}
}
// XYZToLinearRGB converts from CIE XYZ-space to Linear RGB space.
func XYZToLinearRGB(x, y, z float64) (r, g, b float64) {
r = 3.2404542*x - 1.5371385*y - 0.4985314*z
g = -0.9692660*x + 1.8760108*y + 0.0415560*z
b = 0.0556434*x - 0.2040259*y + 1.0572252*z
return
}
func LinearRGBToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.4124564*r + 0.3575761*g + 0.1804375*b
y = 0.2126729*r + 0.7151522*g + 0.0721750*b
z = 0.0193339*r + 0.1191920*g + 0.9503041*b
return
}
// XYZ colors, http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/
// ToXYZ converts the sRGB color to XYZ color space.
func (c Color) ToXYZ() (x, y, z float64) {
return LinearRGBToXYZ(c.LinearRGB())
}
// XYZ converts the XYZ color to sRGB color space.
func XYZ(x, y, z float64) Color {
return LinearRGB(XYZToLinearRGB(x, y, z))
}
// xyY colors, http://www.brucelindbloom.com/Eqn_XYZ_to_xyY.html
// ToxyY converts the sRGB color to xyY color space.
func (c Color) ToxyY() (x, y, Y float64) {
return XYZToxyY(c.ToXYZ())
}
func XYZToxyY(X, Y, Z float64) (x, y, Yout float64) {
return XYZToxyYWhiteRef(X, Y, Z, D65)
}
func XYZToxyYWhiteRef(X, Y, Z float64, white [3]float64) (x, y, Yout float64) {
Yout = Y
N := X + Y + Z
if math.Abs(N) < 1e-14 {
// When we have black, B<NAME> recommends to use
// the reference white's chromacity for x and y.
x = white[0] / (white[0] + white[1] + white[2])
y = white[1] / (white[0] + white[1] + white[2])
} else {
x = X / N
y = Y / N
}
return
}
func XyYToXYZ(x, y, Y float64) (X, Yout, Z float64) {
Yout = Y
if -1e-14 < y && y < 1e-14 {
X = 0.0
Y = 0.0
} else {
X = Y / y * x
Z = Y / y * (1.0 - x - y)
}
return
}
// L*a*b* color space, http://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions
func (c Color) Tolab() (l, a, b float64) {
return XYZToLab(c.ToXYZ())
}
func lab_f(t float64) float64 {
if t > 6.0/29.0*6.0/29.0*6.0/29.0 {
return math.Cbrt(t)
}
return t/3.0*29.0/6.0*29.0/6.0 + 4.0/29.0
}
// XYZToLab converts a color from XYZ color space to L*a*b* color space using D65 as the white reference point.
func XYZToLab(x, y, z float64) (l, a, b float64) {
return XYZToLabWhiteRef(x, y, z, D65)
}
// XYZToLabWhiteRef converts a color from XYZ color space to L*a*b* color space using a white reference point.
func XYZToLabWhiteRef(x, y, z float64, white [3]float64) (l, a, b float64) {
fy := lab_f(y / white[1])
l = 1.16*fy - 0.16
a = 5.0 * (lab_f(x/white[0]) - fy)
b = 2.0 * (fy - lab_f(z/white[2]))
return
}
func lab_finv(t float64) float64 {
if t > 6.0/29.0 {
return t * t * t
}
return 3.0 * 6.0 / 29.0 * 6.0 / 29.0 * (t - 4.0/29.0)
}
// LabToXYZ converts a color from L*a*b* color space to XYZ color space using D65 as the white reference point.
func LabToXYZ(l, a, b float64) (x, y, z float64) {
// D65 white
return LabToXYZWhiteRef(l, a, b, D65)
}
// LabToXYZWhiteRef converts a color from L*a*b* color space to XYZ color space using a white reference point.
func LabToXYZWhiteRef(l, a, b float64, white [3]float64) (x, y, z float64) {
l2 := (l + 0.16) / 1.16
x = white[0] * lab_finv(l2+a/5.0)
y = white[1] * lab_finv(l2)
z = white[2] * lab_finv(l2-b/2.0)
return
} | color/convert.go | 0.893106 | 0.442335 | convert.go | starcoder |
package asterisk
import (
"go/ast"
"reflect"
)
type (
BoolCondition func(bool) bool
ChanDirCondition func(ast.ChanDir) bool
ExprCondition func(ast.Expr) bool
FilesMapCondition func(Files map[string]*ast.File) bool
ImportsMapCondition func(map[string]*ast.Object) bool
NodeCondition func(ast.Node) bool
NodesCondition func([]ast.Node) bool
ScopeCondition func(*ast.Scope) bool
StringCondition func(string) bool
)
/**************************************************************************
concrete expression nodes
**************************************************************************/
// BadExpr check if the given ast.Node is a ast.BadExpr.
func BadExpr() NodeCondition {
return Type(new(ast.BadExpr))
}
// Ident check if the given ast.Ident name matches the requested one.
func Ident(name string) NodeCondition {
return func(n ast.Node) bool {
if ident, ok := n.(*ast.Ident); ok {
return ident.Name == name
}
return false
}
}
// IdentExpr check if the given ast.IdentExpr name matches the requested one.
func IdentExpr(name string) NodeCondition {
return func(n ast.Node) bool {
if ident, ok := n.(*ast.Ident); ok {
return ident.Name == name
}
return false
}
}
// Ellipsis check if the given ast.Ellipsis matches the given conditions.
func Ellipsis(elem NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.Ellipsis); ok {
return elem(e.Elt)
}
return false
}
}
// BasicLit check if the given ast.BasicLit matches the given conditions.
func BasicLit(value string) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.BasicLit); ok {
return e.Value == value
}
return false
}
}
// FuncLit check if the given ast.FuncLit matches the given conditions.
func FuncLit(t, block NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.FuncLit); ok {
return t(e.Type) && block(e.Body)
}
return false
}
}
// CompositeLit check if the given ast.ParenExpr matches the given conditions.
func CompositeLit(t NodeCondition, args NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.CompositeLit); ok {
return t(e.Type) && args(toNodes(e.Elts))
}
return false
}
}
// ParenExpr check if the given ast.ParenExpr matches the given conditions.
func ParenExpr(x NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ParenExpr); ok {
return x(e.X)
}
return false
}
}
// Expr check if the given ast.Expr matches the given condition.
func Expr(x ExprCondition) ExprCondition {
return func(n ast.Expr) bool {
return x(n)
}
}
// Exprs check if the given []ast.Node matches the given conditions in sequence.
func Exprs(x []NodeCondition) NodesCondition {
return func(n []ast.Node) bool {
if len(n) != len(x) {
return false
}
for i := range n {
if len(x) > i {
if !x[i](n[i]) {
return false
}
}
}
return true
}
}
// SelectorExpr check if the given ast.SelectorExpr matches the given conditions.
func SelectorExpr(x, sel NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.SelectorExpr); ok {
return x(e.X) && sel(e.Sel)
}
return false
}
}
// IndexExpr check if the given ast.IndexExpr matches the given conditions.
func IndexExpr(x, index NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.IndexExpr); ok {
return x(e.X) && index(e.Index)
}
return false
}
}
// SliceExpr check if the given ast.SliceExpr matches the given conditions.
func SliceExpr(x, low, high, max NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.SliceExpr); ok {
return x(e.X) && low(e.Low) && high(e.High) && max(e.Max)
}
return false
}
}
// TypeAssertExpr check if the given ast.TypeAssertExpr matches the given conditions.
func TypeAssertExpr(x, t NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.TypeAssertExpr); ok {
return x(e.X) && t(e.Type)
}
return false
}
}
// CallExpr check if the given ast.CallExpr matches the given conditions.
func CallExpr(fun NodeCondition, args NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.CallExpr); ok {
return fun(e.Fun) && args(toNodes(e.Args))
}
return false
}
}
// StarExpr check if the given ast.TypeAssertExpr matches the given conditions.
func StarExpr(x NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.StarExpr); ok {
return x(e.X)
}
return false
}
}
// UnaryExpr check if the given ast.UnaryExpr matches the given conditions.
func UnaryExpr(x NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.UnaryExpr); ok {
return x(e.X)
}
return false
}
}
// BinaryExpr check if the given ast.BinaryExpr matches the given conditions.
func BinaryExpr(x, y NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.BinaryExpr); ok {
return x(e.X) && y(e.Y)
}
return false
}
}
// KeyValueExpr check if the given ast.KeyValueExpr matches the given conditions.
func KeyValueExpr(k, v NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.KeyValueExpr); ok {
return k(e.Key) && v(e.Value)
}
return false
}
}
/**************************************************************************
type-specific expression nodes
**************************************************************************/
// ArrayType check if the given ast.ArrayType matches the given conditions.
func ArrayType(elt, l NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ArrayType); ok {
return elt(e.Elt) && l(e.Len)
}
return false
}
}
// StructType check if the given ast.StructType matches the given conditions.
func StructType(fields NodeCondition, incomplete BoolCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.StructType); ok {
return fields(e.Fields) && incomplete(e.Incomplete)
}
return false
}
}
// FuncType check if the given ast.FuncType matches the given conditions.
func FuncType(params, results NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.FuncType); ok {
return params(e.Params) && results(e.Results)
}
return false
}
}
// InterfaceType check if the given ast.InterfaceType matches the given conditions.
func InterfaceType(methods NodeCondition, incomplete BoolCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.InterfaceType); ok {
return methods(e.Methods) && incomplete(e.Incomplete)
}
return false
}
}
// MapType check if the given ast.MapType matches the given conditions.
func MapType(k, v NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.MapType); ok {
return k(e.Key) && v(e.Value)
}
return false
}
}
// ChanType check if the given ast.ChanType matches the given conditions.
func ChanType(k NodeCondition, v ChanDirCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ChanType); ok {
return k(e.Value) && v(e.Dir)
}
return false
}
}
/**************************************************************************
concrete statement nodes
**************************************************************************/
// BadStmt check if the given ast.Node is a ast.BadStmt.
func BadStmt() NodeCondition {
return Type(new(ast.BadStmt))
}
// DeclStmt check if the given ast.DeclStmt matches the given conditions.
func DeclStmt(decl NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.DeclStmt); ok {
return decl(e.Decl)
}
return false
}
}
// EmptyStmt check if the given ast.EmptyStmt matches the given conditions.
func EmptyStmt(implicit BoolCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.EmptyStmt); ok {
return implicit(e.Implicit)
}
return false
}
}
// LabeledStmt check if the given ast.LabeledStmt matches the given conditions.
func LabeledStmt(label, stmt NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.LabeledStmt); ok {
return label(e.Label) && stmt(e.Label)
}
return false
}
}
// ExprStmt check if the given ast.ExprStmt matches the given conditions.
func ExprStmt(x NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ExprStmt); ok {
return x(e.X)
}
return false
}
}
// SendStmt check if the given ast.SendStmt matches the given conditions.
func SendStmt(channel, val NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.SendStmt); ok {
return channel(e.Chan) && val(e.Value)
}
return false
}
}
// IncDecStmt check if the given ast.IncDecStmt matches the given conditions.
func IncDecStmt(x NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.IncDecStmt); ok {
return x(e.X)
}
return false
}
}
// AssignStmt check if the given ast.AssignStmt matches the given conditions.
func AssignStmt(lhs, rhs NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.AssignStmt); ok {
return lhs(toNodes(e.Lhs)) && rhs(toNodes(e.Rhs))
}
return false
}
}
// GoStmt check if the given ast.GoStmt matches the given conditions.
func GoStmt(call NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.GoStmt); ok {
return call(e.Call)
}
return false
}
}
// DeferStmt check if the given ast.DeferStmt matches the given conditions.
func DeferStmt(call NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.DeferStmt); ok {
return call(e.Call)
}
return false
}
}
// ReturnStmt check if the given ast.ReturnStmt matches the given conditions.
func ReturnStmt(results NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ReturnStmt); ok {
return results(toNodes(e.Results))
}
return false
}
}
// BranchStmt check if the given ast.BranchStmt matches the given conditions.
func BranchStmt(label NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.BranchStmt); ok {
return label(e.Label)
}
return false
}
}
// BlockStmt check if the given ast.BranchStmt matches the given conditions.
func BlockStmt(stmts NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.BlockStmt); ok {
return stmts(toNodes(e.List))
}
return false
}
}
// IfStmt check if the given ast.IfStmt matches the given conditions.
func IfStmt(init, body, cond, els NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.IfStmt); ok {
return init(e.Init) && cond(e.Cond) && body(e.Body) && els(e.Else)
}
return false
}
}
// CaseClause check if the given ast.CaseClause matches the given conditions.
func CaseClause(list NodesCondition, body NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.CaseClause); ok {
return list(toNodes(e.List)) && body(toNodes(e.Body))
}
return false
}
}
// SwitchStmt check if the given ast.SwitchStmt matches the given conditions.
func SwitchStmt(init, tag, body NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.SwitchStmt); ok {
return init(e.Init) && tag(e.Tag) && body(e.Body)
}
return false
}
}
// TypeSwitchStmt check if the given ast.SwitchStmt matches the given conditions.
func TypeSwitchStmt(init, assign, body NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.TypeSwitchStmt); ok {
return init(e.Init) && assign(e.Assign) && body(e.Body)
}
return false
}
}
// CommClause check if the given ast.CommClause matches the given conditions.
func CommClause(comm NodeCondition, body NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.CommClause); ok {
return comm(e.Comm) && body(toNodes(e.Body))
}
return false
}
}
// SelectStmt check if the given ast.SelectStmt matches the given conditions.
func SelectStmt(body NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.SelectStmt); ok {
return body(e.Body)
}
return false
}
}
// ForStmt check if the given ast.ForStmt matches the given conditions.
func ForStmt(init, cond, post, body NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ForStmt); ok {
return init(e.Init) && cond(e.Cond) && post(e.Post) && body(e.Body)
}
return false
}
}
// RangeStmt check if the given ast.RangeStmt matches the given conditions.
func RangeStmt(k, v, x, body NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.RangeStmt); ok {
return k(e.Key) && v(e.Value) && x(e.X) && body(e.Body)
}
return false
}
}
/**************************************************************************
single (non-parenthesized) import, constant, type, or variable declaration
**************************************************************************/
// Spec check if the given values type matches the requested one.
func Spec() NodeCondition {
return Type(new(ast.Spec))
}
// ImportSpec check if the given ast.ImportSpec matches the given conditions.
func ImportSpec(doc, name, importPath, comment NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ImportSpec); ok {
return doc(e.Doc) && name(e.Name) && importPath(e.Path) && comment(e.Comment)
}
return false
}
}
// ValueSpec check if the given ast.ValueSpec matches the given conditions.
func ValueSpec(doc, t, comment NodeCondition, names NodesCondition, values NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.ValueSpec); ok {
return doc(e.Doc) && names(toNodes(e.Names)) && t(e.Type) && values(toNodes(e.Values)) && comment(e.Comment)
}
return false
}
}
// TypeSpec check if the given ast.TypeSpec matches the given conditions.
func TypeSpec(doc, name, t, comment NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.TypeSpec); ok {
return doc(e.Doc) && name(e.Name) && t(e.Type) && comment(e.Comment)
}
return false
}
}
/**************************************************************************
declaration nodes
**************************************************************************/
// BadDecl check if the given values type matches *ast.BadDecl.
func BadDecl() NodeCondition {
return Type(new(ast.BadDecl))
}
// GenDecl check if the given ast.GenDecl matches the given conditions.
func GenDecl(doc NodeCondition, specs NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.GenDecl); ok {
return doc(e.Doc) && specs(toNodes(e.Specs))
}
return false
}
}
// FuncDecl check if the given ast.FuncDecl matches the given conditions.
func FuncDecl(doc, recv, name, t, body NodeCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.FuncDecl); ok {
return doc(e.Doc) && recv(e.Recv) && name(e.Name) && t(e.Type) && body(e.Body)
}
return false
}
}
/**************************************************************************
Files and packages
**************************************************************************/
// File check if the given ast.File matches the given conditions.
func File(
doc,
name NodeCondition,
decls NodesCondition,
scope ScopeCondition,
imports,
unresolved NodesCondition,
comments NodesCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.File); ok {
return doc(e.Doc) &&
name(e.Name) &&
decls(toNodes(e.Decls)) &&
scope(e.Scope) &&
imports(toNodes(e.Imports)) &&
unresolved(toNodes(e.Unresolved)) &&
comments(toNodes(e.Comments))
}
return false
}
}
// Package check if the given ast.Package matches the given conditions.
func Package(
scope ScopeCondition,
name StringCondition,
imports ImportsMapCondition,
files FilesMapCondition) NodeCondition {
return func(n ast.Node) bool {
if e, ok := n.(*ast.Package); ok {
return scope(e.Scope) && name(e.Name) && imports(e.Imports) && files(e.Files)
}
return false
}
}
/**************************************************************************
custom
**************************************************************************/
func First(c NodeCondition) NodesCondition {
return func(nodes []ast.Node) bool {
if len(nodes) == 0 {
return true
}
return c(nodes[0])
}
}
func Last(c NodeCondition) NodesCondition {
return func(nodes []ast.Node) bool {
if len(nodes) == 0 {
return true
}
return c(nodes[len(nodes)-1])
}
}
// Type check if the given values type matches the requested one.
func Type(t interface{}) NodeCondition {
wantType := reflect.ValueOf(t).Type()
return func(n ast.Node) bool {
return wantType.AssignableTo(reflect.ValueOf(n).Type())
}
}
// IgnoreNode always returns true.
func IgnoreNode() NodeCondition {
return func(n ast.Node) bool {
return true
}
}
func IgnoreNodes() NodesCondition {
return func(n []ast.Node) bool {
return true
}
}
func IgnoreScope() ScopeCondition {
return func(n *ast.Scope) bool {
return true
}
}
func toNodes(e interface{}) []ast.Node {
var (
n []ast.Node
ev = reflect.ValueOf(e)
)
for i := 0; i < ev.Len(); i++ {
switch v := ev.Index(i).Interface().(type) {
case *ast.Ident:
n = append(n, v)
case *ast.ImportSpec:
n = append(n, v)
case *ast.CommentGroup:
n = append(n, v)
case ast.Expr:
n = append(n, v)
case ast.Decl:
n = append(n, v)
case ast.Stmt:
n = append(n, v)
}
}
return n
} | condition.go | 0.680879 | 0.546496 | condition.go | starcoder |
package binaryTree
const (
black = true
red = false
left = true
)
type rbt struct {
gbt
}
func (t *rbt) setColor(node *gbtElement, color bool) {
node.SideValue = color
}
func (t *rbt) color(node *gbtElement) (black bool) {
return t.IsNil(node) || node.SideValue.(bool)
}
func (t *rbt) otherSideNode(side bool, node *gbtElement) *gbtElement {
if side == left {
return node.Right
}
return node.Left
}
func (t *rbt) invDirRotation(side bool, node *gbtElement) interface{} {
if side == left {
return t.RightRotate(node)
}
return t.LeftRotate(node)
}
func (t *rbt) sameSideNode(side bool, node *gbtElement) *gbtElement {
if side == left {
return node.Left
}
return node.Right
}
func (t *rbt) sameDirRotation(side bool, node *gbtElement) interface{} {
if side == left {
return t.LeftRotate(node)
}
return t.RightRotate(node)
}
func (t *rbt) Insert(node interface{}) interface{} {
n := t.gbt.Insert(node).(*gbtElement)
t.setColor(n, red)
t.insertFix(n)
return n
}
func (t *rbt) insertFix(node interface{}) {
n := node.(*gbtElement)
//only can violate property 3: both left and right children of red node must be black
for !t.color(n.Parent) && !t.color(n) {
grandNode := n.Parent.Parent //must be black
uncleNode := grandNode.Right
if n.Parent == uncleNode {
uncleNode = grandNode.Left
}
//case1: uncle node is red
if !t.color(uncleNode) {
t.setColor(grandNode, red)
t.setColor(grandNode.Left, black)
t.setColor(grandNode.Right, black)
n = grandNode
//case2&3: uncle node is black
} else {
side := n.Parent == grandNode.Left
t.setColor(grandNode, red)
//case 2 n is right child of parent
if n == t.otherSideNode(side, n.Parent) {
t.sameDirRotation(side, n.Parent)
}
//case 3 n is left child of parent
t.setColor(t.sameSideNode(side, grandNode), black)
t.invDirRotation(side, grandNode)
}
}
t.setColor(t.Root().(*gbtElement), black)
}
func (t *rbt) Delete(key uint32) interface{} {
deleteNonCompletedNode := func(node *gbtElement) (deletedNode *gbtElement, nextNode *gbtElement) {
var reConnectedNode *gbtElement
if t.IsNil(node.Left) {
reConnectedNode = node.Right
} else {
reConnectedNode = node.Left
}
//mean's another black color
reConnectedNode.Parent = node.Parent
if t.IsNil(node.Parent) {
t.NilNode.Left = reConnectedNode
t.NilNode.Right = reConnectedNode
} else if node.Parent.Right == node {
node.Parent.Right = reConnectedNode
} else {
node.Parent.Left = reConnectedNode
}
return node, reConnectedNode
}
node := t.Search(key).(*gbtElement)
if t.IsNil(node) {
return node
}
var deletedNode, reConnectedNode *gbtElement
if t.IsNil(node.Left) || t.IsNil(node.Right) {
deletedNode, reConnectedNode = deleteNonCompletedNode(node)
} else {
successor := t.Successor(node, t.Root()).(*gbtElement)
_key, _value := successor.Key, successor.Value
node.Key, node.Value = _key, _value
deletedNode, reConnectedNode = deleteNonCompletedNode(successor)
}
if t.color(deletedNode) {
//Now, reConnectedNode is black-black or black-red
t.deleteFix(reConnectedNode)
}
//recover NilNode
t.NilNode.Parent = t.NilNode
return node
}
func (t *rbt) deleteFix(node interface{}) {
n := node.(*gbtElement)
//n always points to the black-black or black-red node.The purpose is to remove the additional black color,
//which means add a black color in the same side or reduce a black color in the other side
for n != t.Root() && t.color(n) {
side := n == n.Parent.Left
brotherNode := t.otherSideNode(side, n.Parent)
//case 1 brotherNode node is red, so parent must be black.Turn brotherNode node to a black one, convert to case 2,3,4
if !t.color(brotherNode) {
t.setColor(n.Parent, red)
t.setColor(brotherNode, black)
t.sameDirRotation(side, n.Parent)
//case 2, 3, 4 brotherNode node is black
} else {
//case 2 move black-blcak or blcak-red node up
if t.color(brotherNode.Left) && t.color(brotherNode.Right) {
t.setColor(brotherNode, red)
n = n.Parent
//case 3 convert to case 4
} else if t.color(t.otherSideNode(side, brotherNode)) {
t.setColor(brotherNode, red)
t.setColor(t.sameSideNode(side, brotherNode), black)
t.invDirRotation(side, brotherNode)
//case 4 add a black to left, turn black-black or black-red to black or red
} else {
t.setColor(brotherNode, t.color(n.Parent))
t.setColor(n.Parent, black)
t.setColor(t.otherSideNode(side, brotherNode), black)
t.sameDirRotation(side, n.Parent)
n = t.Root().(*gbtElement)
}
}
}
t.setColor(n, black)
}
func newRBT() *rbt {
t := new(rbt)
t.Init()
t.gbt.Object = t
return t
} | tree/binaryTree/rbTree.go | 0.646572 | 0.403567 | rbTree.go | starcoder |
package nn
import (
"math"
tsr "../tensor"
)
// ActivationFunction represents a function used to activate neural network outputs.
type ActivationFunction struct {
Type ActivationType
Function func(*tsr.Tensor) *tsr.Tensor
Derivative func(*tsr.Tensor) *tsr.Tensor
}
// ActivationType is the identifying type of the activation function.
type ActivationType string
const (
// ActivationTypeRELU is the type for a rectified linear unit activation function.
ActivationTypeRELU = ActivationType("relu")
// ActivationTypeSigmoid is the type for a sigmoid activation function.
ActivationTypeSigmoid = ActivationType("sigmoid")
// ActivationTypeTanh is the type for a hyperbolic tangent activation function.
ActivationTypeTanh = ActivationType("tanh")
// ActivationTypeSoftmax is the type for a soft max activation function.
ActivationTypeSoftmax = ActivationType("softmax")
)
// ActivationRELU is the rectified linear unit activation function.
var ActivationRELU = ActivationFunction{
Type: ActivationTypeRELU,
Function: func(matrix *tsr.Tensor) *tsr.Tensor {
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
if current > 0 {
return current
}
return 0
})
return matrix
},
Derivative: func(matrix *tsr.Tensor) *tsr.Tensor {
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
if current > 0 {
return 1
}
return 0
})
return matrix
},
}
// ActivationSigmoid is the sigmoid activation function.
var ActivationSigmoid = ActivationFunction{
Type: ActivationTypeSigmoid,
Function: func(matrix *tsr.Tensor) *tsr.Tensor {
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
return 1 / (1 + float32(math.Exp(-float64(current))))
})
return matrix
},
Derivative: func(matrix *tsr.Tensor) *tsr.Tensor {
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
return current * (1 - current)
})
return matrix
},
}
// ActivationTanh is the hyperbolic tangent activation function.
var ActivationTanh = ActivationFunction{
Type: ActivationTypeTanh,
Function: func(matrix *tsr.Tensor) *tsr.Tensor {
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
return float32(math.Tanh(float64(current)))
})
return matrix
},
Derivative: func(matrix *tsr.Tensor) *tsr.Tensor {
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
return 1 - float32(math.Pow(float64(current), 2))
})
return matrix
},
}
// ActivationSoftmax is the softmax activation function.
var ActivationSoftmax = ActivationFunction{
Type: ActivationTypeSoftmax,
Function: func(matrix *tsr.Tensor) *tsr.Tensor {
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
return float32(math.Exp(float64(current)))
})
sum := matrix.Sum()
matrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
return current / sum
})
return matrix
},
Derivative: func(matrix *tsr.Tensor) *tsr.Tensor {
newMatrix := matrix.Copy()
newMatrix.ApplyFunction(func(current float32, frame int, row int, col int) float32 {
sum := float32(0.0)
for f := 0; f < matrix.Frames; f++ {
for r := 0; r < matrix.Rows; r++ {
for c := 0; c < matrix.Cols; c++ {
if col == c {
sum += matrix.Get(f, r, c) * (1 - current)
}
sum += matrix.Get(f, r, c) * -current
}
}
}
return sum
})
return newMatrix
},
}
func activationFunctionOfType(activationType ActivationType) ActivationFunction {
switch activationType {
case ActivationTypeRELU:
return ActivationRELU
case ActivationTypeSigmoid:
return ActivationSigmoid
case ActivationTypeTanh:
return ActivationTanh
case ActivationTypeSoftmax:
return ActivationSoftmax
default:
return ActivationRELU
}
} | nn/activation.go | 0.810254 | 0.800809 | activation.go | starcoder |
package gfx
import (
"github.com/brandonnelson3/GoRender/gfx/shaders"
"github.com/go-gl/gl/v4.5-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
// RenderablePortion allows rendering of a part of a vbo.
type RenderablePortion struct {
startIndex, numIndex int32
// TODO: This should be abstracted out to some form of "Material"
diffuse uint32
}
type Renderable interface {
Render(*shaders.ColorShader)
RenderDepth(*shaders.DepthShader)
}
// VAORenderable is a object wrapping around something that is renderable on top of a vao.
type VAORenderable struct {
vao, vbo uint32
Position mgl32.Vec3
Rotation, Scale mgl32.Mat4
renderStyle uint32
portions []RenderablePortion
}
// NewVAORenderable instantiates a Renderable for the given verticies of the normal Vertex Type.
func NewVAORenderable(verticies []Vertex, diffuse uint32) *VAORenderable {
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(verticies)*8*4, gl.Ptr(verticies), gl.STATIC_DRAW)
BindVertexAttributes(Renderer.colorShader.Program())
gl.BindVertexArray(0)
return &VAORenderable{
vao: vao,
vbo: vbo,
Position: mgl32.Vec3{},
Rotation: mgl32.Ident4(),
Scale: mgl32.Ident4(),
renderStyle: gl.TRIANGLES,
portions: []RenderablePortion{{0, int32(len(verticies)), diffuse}},
}
}
// NewChunkedRenderable instantiates a Renderable for the given verticies of the normal Vertex Type.
func NewChunkedRenderable(verticies []Vertex, portions []RenderablePortion) *VAORenderable {
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(verticies)*8*4, gl.Ptr(verticies), gl.STATIC_DRAW)
BindVertexAttributes(Renderer.colorShader.Program())
gl.BindVertexArray(0)
return &VAORenderable{
vao: vao,
vbo: vbo,
Position: mgl32.Vec3{},
Rotation: mgl32.Ident4(),
Scale: mgl32.Ident4(),
renderStyle: gl.TRIANGLES,
portions: portions,
}
}
// getModelMatrix returns this renderable's final model transform matrix.
func (r *VAORenderable) getModelMatrix() mgl32.Mat4 {
return mgl32.Translate3D(r.Position.X(), r.Position.Y(), r.Position.Z()).Mul4(r.Scale.Mul4(r.Rotation))
}
// Render bind's this renderable's VAO and draws.
func (r *VAORenderable) Render(colorShader *shaders.ColorShader) {
gl.BindVertexArray(r.vao)
colorShader.Model.Set(r.getModelMatrix())
for _, p := range r.portions {
colorShader.Diffuse.Set(gl.TEXTURE0, 0, p.diffuse)
gl.DrawArrays(r.renderStyle, p.startIndex, p.numIndex)
}
}
// Render bind's this renderable's VAO and draws for depth.
func (r *VAORenderable) RenderDepth(depthShader *shaders.DepthShader) {
gl.BindVertexArray(r.vao)
depthShader.Model.Set(r.getModelMatrix())
for _, p := range r.portions {
depthShader.Diffuse.Set(gl.TEXTURE0, 0, p.diffuse)
gl.DrawArrays(r.renderStyle, p.startIndex, p.numIndex)
}
}
func (r *VAORenderable) Copy() *VAORenderable {
temp := *r
return &temp
}
// PlaneVertices is the vertex list for a Plane.
var PlaneVertices = []Vertex{
{mgl32.Vec3{-10000.0, 0, -10000.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{-10000.0, 0, 10000.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{0, 500}},
{mgl32.Vec3{10000.0, 0, -10000.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{500, 0}},
{mgl32.Vec3{-10000.0, 0, 10000.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{0, 500}},
{mgl32.Vec3{10000.0, 0, 10000.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{500, 500}},
{mgl32.Vec3{10000.0, 0, -10000.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{500, 0}},
}
// CubeVertices is the vertex list for a Cube.
var CubeVertices = []Vertex{
// X, Y, Z
// Bottom
{mgl32.Vec3{-1.0, -1.0, -1.0}, mgl32.Vec3{0, -1.0, 0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{1.0, -1.0, -1.0}, mgl32.Vec3{0, -1.0, 0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{-1.0, -1.0, 1.0}, mgl32.Vec3{0, -1.0, 0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{1.0, -1.0, -1.0}, mgl32.Vec3{0, -1.0, 0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{1.0, -1.0, 1.0}, mgl32.Vec3{0, -1.0, 0}, mgl32.Vec2{1, 1}},
{mgl32.Vec3{-1.0, -1.0, 1.0}, mgl32.Vec3{0, -1.0, 0}, mgl32.Vec2{0, 1}},
// Top
{mgl32.Vec3{-1.0, 1.0, -1.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{-1.0, 1.0, 1.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{1.0, 1.0, -1.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{1.0, 1.0, -1.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{-1.0, 1.0, 1.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{1.0, 1.0, 1.0}, mgl32.Vec3{0, 1.0, 0}, mgl32.Vec2{1, 1}},
// Front
{mgl32.Vec3{-1.0, -1.0, 1.0}, mgl32.Vec3{0, 0, 1.0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{1.0, -1.0, 1.0}, mgl32.Vec3{0, 0, 1.0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{-1.0, 1.0, 1.0}, mgl32.Vec3{0, 0, 1.0}, mgl32.Vec2{1, 1}},
{mgl32.Vec3{1.0, -1.0, 1.0}, mgl32.Vec3{0, 0, 1.0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{1.0, 1.0, 1.0}, mgl32.Vec3{0, 0, 1.0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{-1.0, 1.0, 1.0}, mgl32.Vec3{0, 0, 1.0}, mgl32.Vec2{1, 1}},
// Back
{mgl32.Vec3{-1.0, -1.0, -1.0}, mgl32.Vec3{0, 0, -1.0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{-1.0, 1.0, -1.0}, mgl32.Vec3{0, 0, -1.0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{1.0, -1.0, -1.0}, mgl32.Vec3{0, 0, -1.0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{1.0, -1.0, -1.0}, mgl32.Vec3{0, 0, -1.0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{-1.0, 1.0, -1.0}, mgl32.Vec3{0, 0, -1.0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{1.0, 1.0, -1.0}, mgl32.Vec3{0, 0, -1.0}, mgl32.Vec2{1, 1}},
// Left
{mgl32.Vec3{-1.0, -1.0, 1.0}, mgl32.Vec3{-1.0, 0, 0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{-1.0, 1.0, -1.0}, mgl32.Vec3{-1.0, 0, 0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{-1.0, -1.0, -1.0}, mgl32.Vec3{-1.0, 0, 0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{-1.0, -1.0, 1.0}, mgl32.Vec3{-1.0, 0, 0}, mgl32.Vec2{0, 1}},
{mgl32.Vec3{-1.0, 1.0, 1.0}, mgl32.Vec3{-1.0, 0, 0}, mgl32.Vec2{1, 1}},
{mgl32.Vec3{-1.0, 1.0, -1.0}, mgl32.Vec3{-1.0, 0, 0}, mgl32.Vec2{1, 0}},
// Right
{mgl32.Vec3{1.0, -1.0, 1.0}, mgl32.Vec3{1.0, 0, 0}, mgl32.Vec2{1, 1}},
{mgl32.Vec3{1.0, -1.0, -1.0}, mgl32.Vec3{1.0, 0, 0}, mgl32.Vec2{1, 0}},
{mgl32.Vec3{1.0, 1.0, -1.0}, mgl32.Vec3{1.0, 0, 0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{1.0, -1.0, 1.0}, mgl32.Vec3{1.0, 0, 0}, mgl32.Vec2{1, 1}},
{mgl32.Vec3{1.0, 1.0, -1.0}, mgl32.Vec3{1.0, 0, 0}, mgl32.Vec2{0, 0}},
{mgl32.Vec3{1.0, 1.0, 1.0}, mgl32.Vec3{1.0, 0, 0}, mgl32.Vec2{0, 1}},
} | gfx/renderable.go | 0.519765 | 0.413773 | renderable.go | starcoder |
package matcher
import (
"net"
"strings"
"github.com/gobwas/glob"
)
// Matcher is a generic pattern matcher,
// it gives the match result of the given pattern for specific v.
type Matcher interface {
Match(v string) bool
}
// NewMatcher creates a Matcher for the given pattern.
// The acutal Matcher depends on the pattern:
// IP Matcher if pattern is a valid IP address.
// CIDR Matcher if pattern is a valid CIDR address.
// Domain Matcher if both of the above are not.
func NewMatcher(pattern string) Matcher {
if pattern == "" {
return nil
}
if ip := net.ParseIP(pattern); ip != nil {
return IPMatcher(ip)
}
if _, inet, err := net.ParseCIDR(pattern); err == nil {
return CIDRMatcher(inet)
}
return DomainMatcher(pattern)
}
type ipMatcher struct {
ip net.IP
}
// IPMatcher creates a Matcher for a specific IP address.
func IPMatcher(ip net.IP) Matcher {
return &ipMatcher{
ip: ip,
}
}
func (m *ipMatcher) Match(ip string) bool {
if m == nil {
return false
}
return m.ip.Equal(net.ParseIP(ip))
}
type cidrMatcher struct {
ipNet *net.IPNet
}
// CIDRMatcher creates a Matcher for a specific CIDR notation IP address.
func CIDRMatcher(inet *net.IPNet) Matcher {
return &cidrMatcher{
ipNet: inet,
}
}
func (m *cidrMatcher) Match(ip string) bool {
if m == nil || m.ipNet == nil {
return false
}
return m.ipNet.Contains(net.ParseIP(ip))
}
type domainMatcher struct {
pattern string
glob glob.Glob
}
// DomainMatcher creates a Matcher for a specific domain pattern,
// the pattern can be a plain domain such as 'example.com',
// a wildcard such as '*.exmaple.com' or a special wildcard '.example.com'.
func DomainMatcher(pattern string) Matcher {
p := pattern
if strings.HasPrefix(pattern, ".") {
p = pattern[1:] // trim the prefix '.'
pattern = "*" + p
}
return &domainMatcher{
pattern: p,
glob: glob.MustCompile(pattern),
}
}
func (m *domainMatcher) Match(domain string) bool {
if m == nil || m.glob == nil {
return false
}
if domain == m.pattern {
return true
}
return m.glob.Match(domain)
} | pkg/common/matcher/matcher.go | 0.746971 | 0.413596 | matcher.go | starcoder |
package waves
import "math"
const c1 = 1.70158
const c2 = c1 * 1.525
const c3 = c1 + 1
const c4 = (2 * math.Pi) / 3
const c5 = (2 * math.Pi) / 4.5
// Given 0-1 return a scaling function
type EaseFunc func(x float64) float64
// Registry of known scaling functions
var EaseFunctions = map[string]EaseFunc{
"Linear": EaseLinear,
"InQuad": easeInQuad,
"OutQuad": easeOutQuad,
"InOutQuad": easeInOutQuad,
"InCubic": easeInCubic,
"OutCubic": easeOutCubic,
"InOutCubic": easeInOutCubic,
"InQuart": easeInQuart,
"OutQuart": easeOutQuart,
"InOutQuart": easeInOutQuart,
"InQuint": easeInQuint,
"OutQuint": easeOutQuint,
"InOutQuint": easeInOutQuint,
"InSine": easeInSine,
"OutSine": easeOutSine,
"InOutSine": easeInOutSine,
"InExpo": easeInExpo,
"OutExpo": easeOutExpo,
"InOutExpo": easeInOutExpo,
"InCirc": easeInCirc,
"OutCirc": easeOutCirc,
"InOutCirc": easeInOutCirc,
"InBack": easeInBack,
"OutBack": easeOutBack,
"InOutBack": easeInOutBack,
"InElastic": easeInElastic,
"OutElastic": easeOutElastic,
"InOutElastic": easeInOutElastic,
}
// EaseLinear simple linear easing
func EaseLinear(x float64) float64 {
return x
}
func easeInQuad(x float64) float64 {
return x * x
}
func easeOutQuad(x float64) float64 {
return 1 - (1-x)*(1-x)
}
func easeInOutQuad(x float64) float64 {
if x < 0.5 {
return 2 * x * x
}
return 1 - math.Pow(-2*x+2, 2)/2.0
}
func easeInCubic(x float64) float64 {
return x * x * x
}
func easeOutCubic(x float64) float64 {
return 1 - math.Pow(1-x, 3)
}
func easeInOutCubic(x float64) float64 {
if x < 0.5 {
return 4 * x * x * x
}
return 1 - math.Pow(-2*x+2, 3)/2
}
func easeInQuart(x float64) float64 {
return x * x * x * x
}
func easeOutQuart(x float64) float64 {
return 1 - math.Pow(1-x, 4)
}
func easeInOutQuart(x float64) float64 {
if x < 0.5 {
return 8 * x * x * x * x
}
return 1 - math.Pow(-2*x+2, 4)/2
}
func easeInQuint(x float64) float64 {
return x * x * x * x * x
}
func easeOutQuint(x float64) float64 {
return 1 - math.Pow(1-x, 5)
}
func easeInOutQuint(x float64) float64 {
if x < 0.5 {
return 16 * x * x * x * x * x
}
return 1 - math.Pow(-2*x+2, 5)/2
}
func easeInSine(x float64) float64 {
return 1 - math.Cos((x*math.Pi)/2)
}
func easeOutSine(x float64) float64 {
return math.Sin((x * math.Pi) / 2)
}
func easeInOutSine(x float64) float64 {
return -(math.Cos(math.Pi*x) - 1) / 2
}
func easeInExpo(x float64) float64 {
if x == 0 {
return 0
}
return math.Pow(2, 10*x-10)
}
func easeOutExpo(x float64) float64 {
if x == 1 {
return 1
}
return 1 - math.Pow(2, -10*x)
}
func easeInOutExpo(x float64) float64 {
if x == 0 {
return 0
}
if x == 1 {
return 1
}
if x < 0.5 {
return math.Pow(2, 20*x-10) / 2
}
return (2 - math.Pow(2, -20*x+10)) / 2
}
func easeInCirc(x float64) float64 {
return 1 - math.Sqrt(1-math.Pow(x, 2))
}
func easeOutCirc(x float64) float64 {
return math.Sqrt(1 - math.Pow(x-1, 2))
}
func easeInOutCirc(x float64) float64 {
if x < 0.5 {
return (1 - math.Sqrt(1-math.Pow(2*x, 2))) / 2
}
return (math.Sqrt(1-math.Pow(-2*x+2, 2)) + 1) / 2
}
func easeInBack(x float64) float64 {
return c3*x*x*x - c1*x*x
}
func easeOutBack(x float64) float64 {
return 1 + c3*math.Pow(x-1, 3) + c1*math.Pow(x-1, 2)
}
func easeInOutBack(x float64) float64 {
if x < 0.5 {
return (math.Pow(2*x, 2) * ((c2+1)*2*x - c2)) / 2
}
return (math.Pow(2*x-2, 2)*((c2+1)*(x*2-2)+c2) + 2) / 2
}
func easeInElastic(x float64) float64 {
if x == 0 {
return 0
}
if x == 1 {
return 1
}
return -math.Pow(2, 10*x-10) * math.Sin((x*10-10.75)*c4)
}
func easeOutElastic(x float64) float64 {
if x == 0 {
return 0
}
if x == 1 {
return 1
}
return math.Pow(2, -10*x)*math.Sin((x*10-0.75)*c4) + 1
}
func easeInOutElastic(x float64) float64 {
if x == 0 {
return 0
}
if x == 1 {
return 1
}
if x < 0.5 {
return -(math.Pow(2, 20*x-10) * math.Sin((20*x-11.125)*c5)) / 2
}
return (math.Pow(2, -20*x+10)*math.Sin((20*x-11.125)*c5))/2 + 1
} | pkg/waves/easing.go | 0.757256 | 0.64124 | easing.go | starcoder |
package static
const SwaggerJson = `
{
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"schemes": [
"http",
"https"
],
"swagger": "2.0",
"info": {
"description": "The purpose of this application is to provide\nHoppity Hop manipulation REST API",
"title": "Hoppity Hop manipulation REST API",
"version": "0.0.1"
},
"basePath": "/",
"paths": {
"/health-check": {
"get": {
"description": "Returns 200 (OK) if health check succeeds, otherwise Internal Server Error (500) will be returned.",
"tags": [
"utility"
],
"summary": "Performs application health check.",
"operationId": "HealthCheckHandler",
"responses": {
"200": {
"$ref": "#/responses/status"
},
"500": {
"$ref": "#/responses/status"
}
}
}
},
"/hoppity": {
"post": {
"description": "Returns Hoppity Hop status.",
"consumes": [
"application/json"
],
"tags": [
"service"
],
"summary": "Generates Hoppity Hop record.",
"operationId": "hoppity",
"parameters": [
{
"description": "sequence ID",
"name": "SequenceId",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/SequenceId"
}
}
],
"responses": {
"200": {
"$ref": "#/responses/status"
},
"500": {
"$ref": "#/responses/status"
}
}
}
}
},
"definitions": {
"SequenceId": {
"description": "Represents JSON request body coming into hoppity-restapi",
"type": "object",
"properties": {
"sequence_id": {
"type": "integer",
"format": "int64",
"x-go-name": "SequenceId"
}
},
"x-go-package": "hoppity/models"
}
},
"responses": {
"status": {
"description": "Represents JSON status object returned by Rest Service",
"headers": {
"status": {
"type": "string"
},
"status_message": {
"type": "string"
}
}
}
}
}` | src/hoppity/static/swagger_json.go | 0.723505 | 0.484136 | swagger_json.go | starcoder |
package ui
import (
"strconv"
"github.com/RyoJerryYu/gogoengine/model"
)
type boardRenderer func(uint32, uint32, model.Board) ([][]string, error)
func (ui *userInterface) renderBoard(
sizeX, sizeY uint32,
board model.Board,
) ([][]string, error) {
rendered := make([][]string, 0, sizeX)
for x := uint32(0); x < sizeX; x++ {
line := make([]string, 0, sizeY)
for y := uint32(0); y < sizeY; y++ {
stone, err := board.Get(model.NewPoint(x, y))
if err != nil {
return nil, err
}
renderedCell := ui.renderCell(x, y, sizeX, sizeY, stone)
line = append(line, renderedCell)
}
rendered = append(rendered, line)
}
return rendered, nil
}
func (ui *userInterface) renderCell(
x, y uint32,
sizeX, sizeY uint32,
stone model.StoneType,
) string {
// render stone
if stone != model.StoneType_Empty {
if cell, ok := ui.renderMap[stone]; ok {
return cell
}
}
// render grid
var gridX int
var gridY int
switch x {
case 0:
gridX = 0
case sizeX - 1:
gridX = 2
default:
gridX = 1
}
switch y {
case 0:
gridY = 0
case sizeY - 1:
gridY = 2
default:
gridY = 1
}
return ui.grid[gridX][gridY]
}
func renderWithLabel(renderer boardRenderer) boardRenderer {
return func(sizeX, sizeY uint32, b model.Board) ([][]string, error) {
rendered, err := renderer(sizeX, sizeY, b)
if err != nil {
return rendered, err
}
renderedSizeX := len(rendered)
if renderedSizeX <= 0 {
return rendered, nil
}
renderedSizeY := len(rendered[0])
rendered = renderWithXLabel(renderedSizeX, rendered)
rendered = renderWithYLabel(renderedSizeY, rendered)
return rendered, nil
}
}
func renderWithXLabel(renderedSizeX int, rendered [][]string) [][]string {
digitsX := len(strconv.Itoa(renderedSizeX - 1))
for xi := 0; xi < renderedSizeX; xi++ {
xLabel := intToDigitsLengthChars(xi, digitsX)
rendered[xi] = append(rendered[xi], xLabel...)
}
return rendered
}
func renderWithYLabel(renderedSizeY int, rendered [][]string) [][]string {
digitsY := len(strconv.Itoa(renderedSizeY - 1))
yLabels := make([][]string, 0, renderedSizeY)
for i := 0; i < renderedSizeY; i++ {
yLabels = append(yLabels, intToDigitsLengthChars(i, digitsY))
}
for i := 0; i < digitsY; i++ {
line := make([]string, 0, renderedSizeY)
for _, labels := range yLabels {
line = append(line, labels[i])
}
rendered = append(rendered, line)
}
return rendered
}
func intToDigitsLengthChars(v int, digits int) []string {
res := make([]string, digits) // digits of 0
for i := 0; i < digits; i++ {
res[len(res)-i-1] = strconv.Itoa(v % 10)
v = v / 10
}
return res
} | ui/render_board.go | 0.652241 | 0.407392 | render_board.go | starcoder |
package op
import "fmt"
// Encode encodes the instruction and returns a 16-bit representation of it.
func Encode(inst interface{}) (buf uint16, err error) {
switch v := inst.(type) {
case *Nop:
// op-code: 0
// operand: 000
case *LoadMem:
// op-code: 1
// operand: RXY
// R refers to the dst register.
// XY refers to the src memory address.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
// op-code
buf |= 0x1000
// dst register.
buf |= uint16(v.Dst) << 8
// src memory address.
buf |= uint16(v.Src)
case *LoadVal:
// op-code: 2
// operand: RXY
// R refers to the dst register.
// XY refers to the src immediate value.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
// op-code
buf |= 0x2000
// dst register.
buf |= uint16(v.Dst) << 8
// src immediate value.
buf |= uint16(v.Src)
case *Store:
// op-code: 3
// operand: RXY
// R refers to the src register.
// XY refers to the dst memory address.
if v.Src >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src register %d in %s", v.Src, v.Code)
}
// op-code
buf |= 0x3000
// src register.
buf |= uint16(v.Src) << 8
// dst memory address.
buf |= uint16(v.Dst)
case *Move:
// op-code: 4
// operand: 0RS
// R refers to the src register.
// S refers to the dst register.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
if v.Src >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src register %d in %s", v.Src, v.Code)
}
// op-code
buf |= 0x4000
// src register.
buf |= uint16(v.Src) << 4
// dst register.
buf |= uint16(v.Dst)
case *Add:
// op-code: 5
// operand: RST
// R refers to the dst register.
// S refers to the src1 register.
// T refers to the src2 register.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
if v.Src1 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src1 register %d in %s", v.Src1, v.Code)
}
if v.Src2 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src2 register %d in %s", v.Src2, v.Code)
}
// op-code
buf |= 0x5000
// dst register.
buf |= uint16(v.Dst) << 8
// src1 register.
buf |= uint16(v.Src1) << 4
// src2 register.
buf |= uint16(v.Src2)
case *AddFloat:
// op-code: 6
// operand: RST
// R refers to the dst register.
// S refers to the src1 register.
// T refers to the src2 register.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
if v.Src1 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src1 register %d in %s", v.Src1, v.Code)
}
if v.Src2 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src2 register %d in %s", v.Src2, v.Code)
}
// op-code
buf |= 0x6000
// dst register.
buf |= uint16(v.Dst) << 8
// src1 register.
buf |= uint16(v.Src1) << 4
// src2 register.
buf |= uint16(v.Src2)
case *Or:
// op-code: 7
// operand: RST
// R refers to the dst register.
// S refers to the src1 register.
// T refers to the src2 register.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
if v.Src1 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src1 register %d in %s", v.Src1, v.Code)
}
if v.Src2 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src2 register %d in %s", v.Src2, v.Code)
}
// op-code
buf |= 0x7000
// dst register.
buf |= uint16(v.Dst) << 8
// src1 register.
buf |= uint16(v.Src1) << 4
// src2 register.
buf |= uint16(v.Src2)
case *And:
// op-code: 8
// operand: RST
// R refers to the dst register.
// S refers to the src1 register.
// T refers to the src2 register.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
if v.Src1 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src1 register %d in %s", v.Src1, v.Code)
}
if v.Src2 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src2 register %d in %s", v.Src2, v.Code)
}
// op-code
buf |= 0x8000
// dst register.
buf |= uint16(v.Dst) << 8
// src1 register.
buf |= uint16(v.Src1) << 4
// src2 register.
buf |= uint16(v.Src2)
case *Xor:
// op-code: 9
// operand: RST
// R refers to the dst register.
// S refers to the src1 register.
// T refers to the src2 register.
if v.Dst >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid dst register %d in %s", v.Dst, v.Code)
}
if v.Src1 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src1 register %d in %s", v.Src1, v.Code)
}
if v.Src2 >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid src2 register %d in %s", v.Src2, v.Code)
}
// op-code
buf |= 0x9000
// dst register.
buf |= uint16(v.Dst) << 8
// src1 register.
buf |= uint16(v.Src1) << 4
// src2 register.
buf |= uint16(v.Src2)
case *Ror:
// op-code: A
// operand: R0X
// R refers to the register.
// X refers to the immediate value x.
if v.Reg >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid register %d in %s", v.Reg, v.Code)
}
if v.X >= RegSize {
return 0, fmt.Errorf("op.Encode: invalid x (%d) in %s; above %d", v.X, v.Code, RegSize-1)
}
// op-code
buf |= 0xA000
// register.
buf |= uint16(v.Reg) << 8
// immediate value x.
buf |= uint16(v.X)
case *CmpBranch:
// op-code: B
// operand: RXY
// R refers to the cmp register.
// XY refers to the memory address addr.
if v.Cmp >= RegCount {
return 0, fmt.Errorf("op.Encode: invalid cmp register %d in %s", v.Cmp, v.Code)
}
// op-code
buf |= 0xB000
// cmp register.
buf |= uint16(v.Cmp) << 8
// memory address addr.
buf |= uint16(v.Addr)
case *Halt:
// op-code: C
// operand: 000
// op-code
buf |= 0xC000
default:
return 0, fmt.Errorf("op.Encode: unable to encode instruction (%T)", inst)
}
return buf, nil
} | archive/cs/risc/op/encode.go | 0.66072 | 0.612078 | encode.go | starcoder |
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sortutil sorts and searches common slice types, and offers
// helper functions for sorting floats with radixsort.
package sortutil
import (
"bytes"
"github.com/twotwotwo/radixsort.test"
"math"
"sort"
)
// Float32Key generates a uint64 key from a float32. To sort float32s,
// use this with Float32Less.
func Float32Key(f float32) uint64 {
b := uint64(math.Float32bits(f)) << 32
b ^= ^(b>>63 - 1) | (1 << 63)
return b
}
// Float32Less compares float32s, sorting NaNs (which are normally
// unsortable) to the end.
func Float32Less(f, g float32) bool {
return Float32Key(f) < Float32Key(g)
}
// Float64Key generates a uint64 key from a float64. To sort float64s,
// use this with Float64Less.
func Float64Key(f float64) uint64 {
b := math.Float64bits(f)
b ^= ^(b>>63 - 1) | (1 << 63)
return b
}
// Float64Less compares float64s, sorting NaNs (which are normally
// unsortable) to the end.
func Float64Less(f, g float64) bool {
return Float64Key(f) < Float64Key(g)
}
// IntSlice attaches the methods of Int64Interface to []int, sorting in increasing order.
type IntSlice []int
func (p IntSlice) Len() int { return len(p) }
func (p IntSlice) Less(i, j int) bool { return p[i] < p[j] }
func (p IntSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p IntSlice) Key(i int) int64 { return int64(p[i]) }
// Sort is a convenience method.
func (p IntSlice) Sort() { radixsort.ByInt64(p) }
// Int32Slice attaches the methods of Uint64Interface to []int32, sorting in increasing order.
type Int32Slice []int32
func (p Int32Slice) Len() int { return len(p) }
func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Int32Slice) Key(i int) int64 { return int64(p[i]) }
// Sort is a convenience method.
func (p Int32Slice) Sort() { radixsort.ByInt64(p) }
// Int64Slice attaches the methods of Uint64Interface to []int64, sorting in increasing order.
type Int64Slice []int64
func (p Int64Slice) Len() int { return len(p) }
func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Int64Slice) Key(i int) int64 { return p[i] }
// Sort is a convenience method.
func (p Int64Slice) Sort() { radixsort.ByInt64(p) }
// UintSlice attaches the methods of Uint64Interface to []uint, sorting in increasing order.
type UintSlice []uint
func (p UintSlice) Len() int { return len(p) }
func (p UintSlice) Less(i, j int) bool { return p[i] < p[j] }
func (p UintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p UintSlice) Key(i int) uint64 { return uint64(p[i]) }
// Sort is a convenience method.
func (p UintSlice) Sort() { radixsort.ByUint64(p) }
// Uint32Slice attaches the methods of Uint64Interface to []int32, sorting in increasing order.
type Uint32Slice []uint32
func (p Uint32Slice) Len() int { return len(p) }
func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Uint32Slice) Key(i int) uint64 { return uint64(p[i]) }
// Sort is a convenience method.
func (p Uint32Slice) Sort() { radixsort.ByUint64(p) }
// Uint64Slice attaches the methods of Uint64Interface to []uint64, sorting in increasing order.
type Uint64Slice []uint64
func (p Uint64Slice) Len() int { return len(p) }
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Uint64Slice) Key(i int) uint64 { return p[i] }
// Sort is a convenience method.
func (p Uint64Slice) Sort() { radixsort.ByUint64(p) }
// Float32Slice attaches the methods of Uint64Interface to []uint32, sorting in increasing order, NaNs last.
type Float32Slice []float32
func (p Float32Slice) Len() int { return len(p) }
func (p Float32Slice) Less(i, j int) bool { return Float32Less(p[i], p[j]) }
func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Float32Slice) Key(i int) uint64 { return Float32Key(p[i]) }
// Sort is a convenience method.
func (p Float32Slice) Sort() { radixsort.ByUint64(p) }
// Float64Slice attaches the methods of Uint64Interface to []float64, sorting in increasing order, NaNs last.
type Float64Slice []float64
func (p Float64Slice) Len() int { return len(p) }
func (p Float64Slice) Less(i, j int) bool { return Float64Less(p[i], p[j]) }
func (p Float64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Float64Slice) Key(i int) uint64 { return Float64Key(p[i]) }
// Sort is a convenience method.
func (p Float64Slice) Sort() { radixsort.ByUint64(p) }
// StringSlice attaches the methods of StringInterface to []string, sorting in increasing order.
type StringSlice []string
func (p StringSlice) Len() int { return len(p) }
func (p StringSlice) Less(i, j int) bool { return p[i] < p[j] }
func (p StringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p StringSlice) Key(i int) string { return p[i] }
// Sort is a convenience method.
func (p StringSlice) Sort() { radixsort.ByString(p) }
// BytesSlice attaches the methods of BytesInterface to [][]byte, sorting in increasing order.
type BytesSlice [][]byte
func (p BytesSlice) Len() int { return len(p) }
func (p BytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
func (p BytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p BytesSlice) Key(i int) []byte { return p[i] }
// Sort is a convenience method.
func (p BytesSlice) Sort() { radixsort.ByBytes(p) }
// Ints sorts a slice of ints in increasing order.
func Ints(a []int) { IntSlice(a).Sort() }
// Int32s sorts a slice of int32s in increasing order.
func Int32s(a []int32) { Int32Slice(a).Sort() }
// Int64s sorts a slice of int64s in increasing order.
func Int64s(a []int64) { Int64Slice(a).Sort() }
// Uints sorts a slice of ints in increasing order.
func Uints(a []uint) { UintSlice(a).Sort() }
// Uint32s sorts a slice of uint64s in increasing order.
func Uint32s(a []uint32) { Uint32Slice(a).Sort() }
// Uint64s sorts a slice of uint64s in increasing order.
func Uint64s(a []uint64) { Uint64Slice(a).Sort() }
// Float32s sorts a slice of uint64s in increasing order, NaNs last.
func Float32s(a []float32) { Float32Slice(a).Sort() }
// Float64s sorts a slice of uint64s in increasing order, NaNs last.
func Float64s(a []float64) { Float64Slice(a).Sort() }
// Strings sorts a slice of strings in increasing order.
func Strings(a []string) { StringSlice(a).Sort() }
// Bytes sorts a slice of byte slices in increasing order.
func Bytes(a [][]byte) { BytesSlice(a).Sort() }
// IntsAreSorted tests whether a slice of ints is sorted in increasing order.
func IntsAreSorted(a []int) bool { return sort.IsSorted(IntSlice(a)) }
// Int32sAreSorted tests whether a slice of int32s is sorted in increasing order.
func Int32sAreSorted(a []int32) bool { return sort.IsSorted(Int32Slice(a)) }
// Int64sAreSorted tests whether a slice of int64s is sorted in increasing order.
func Int64sAreSorted(a []int64) bool { return sort.IsSorted(Int64Slice(a)) }
// UintsAreSorted tests whether a slice of ints is sorted in increasing order.
func UintsAreSorted(a []uint) bool { return sort.IsSorted(UintSlice(a)) }
// Uint32sAreSorted tests whether a slice of uint32s is sorted in increasing order.
func Uint32sAreSorted(a []uint32) bool { return sort.IsSorted(Uint32Slice(a)) }
// Uint64sAreSorted tests whether a slice of uint64s is sorted in increasing order.
func Uint64sAreSorted(a []uint64) bool { return sort.IsSorted(Uint64Slice(a)) }
// Float32sAreSorted tests whether a slice of float32s is sorted in increasing order, NaNs last.
func Float32sAreSorted(a []float32) bool { return sort.IsSorted(Float32Slice(a)) }
// Float64sAreSorted tests whether a slice of float64s is sorted in increasing order, NaNs last.
func Float64sAreSorted(a []float64) bool { return sort.IsSorted(Float64Slice(a)) }
// StringsAreSorted tests whether a slice of strings is sorted in increasing order.
func StringsAreSorted(a []string) bool { return sort.IsSorted(StringSlice(a)) }
// BytesAreSorted tests whether a slice of byte slices is sorted in increasing order.
func BytesAreSorted(a [][]byte) bool { return sort.IsSorted(BytesSlice(a)) }
// SearchInts searches ints; read about sort.Search for more.
func SearchInts(a []int, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Search returns the result of applying SearchInts to the receiver and x.
func (p IntSlice) Search(x int) int { return SearchInts(p, x) }
// SearchInt32s searches int32s; read about sort.Search for more.
func SearchInt32s(a []int32, x int32) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Search returns the result of applying SearchInt32s to the receiver and x.
func (p Int32Slice) Search(x int32) int { return SearchInt32s(p, x) }
// SearchInt64s searches int64s; read about sort.Search for more.
func SearchInt64s(a []int64, x int64) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Search returns the result of applying SearchInt64s to the receiver and x.
func (p Int64Slice) Search(x int64) int { return SearchInt64s(p, x) }
// SearchUints searches uints; read about sort.Search for more.
func SearchUints(a []uint, x uint) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Search returns the result of applying SearchUints to the receiver and x.
func (p UintSlice) Search(x uint) int { return SearchUints(p, x) }
// SearchUint32s searches uint32s; read about sort.Search for more.
func SearchUint32s(a []uint32, x uint32) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Search returns the result of applying SearchUint32s to the receiver and x.
func (p Uint32Slice) Search(x uint32) int { return SearchUint32s(p, x) }
// SearchUint64s searches uint64s; read about sort.Search for more.
func SearchUint64s(a []uint64, x uint64) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Search returns the result of applying SearchUint64s to the receiver and x.
func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) }
// SearchFloat32s searches float32s; read about sort.Search for more.
func SearchFloat32s(a []float32, x float32) int {
return sort.Search(len(a), func(i int) bool { return Float32Key(a[i]) >= Float32Key(x) })
}
// Search returns the result of applying SearchFloat32s to the receiver and x.
func (p Float32Slice) Search(x float32) int { return SearchFloat32s(p, x) }
// SearchFloat64s searches float64s; read about sort.Search for more.
func SearchFloat64s(a []float64, x float64) int {
return sort.Search(len(a), func(i int) bool { return Float64Key(a[i]) >= Float64Key(x) })
}
// Search returns the result of applying SearchFloat64s to the receiver and x.
func (p Float64Slice) Search(x float64) int { return SearchFloat64s(p, x) }
// SearchStrings searches strings; read about sort.Search for more.
func SearchStrings(a []string, x string) int {
return sort.Search(len(a), func(i int) bool { return a[i] >= x })
}
// Search returns the result of applying SearchStrings to the receiver and x.
func (p StringSlice) Search(x string) int { return SearchStrings(p, x) }
// SearchBytes searches []bytes; read about sort.Search for more.
func SearchBytes(a [][]byte, x []byte) int {
return sort.Search(len(a), func(i int) bool { return bytes.Compare(a[i], x) >= 0 })
}
// Search returns the result of applying SearchBytes to the receiver and x.
func (p BytesSlice) Search(x []byte) int { return SearchBytes(p, x) } | sortutil/types.go | 0.813201 | 0.576005 | types.go | starcoder |
package glm
import "fmt"
import "math"
type Vector3 struct {
X, Y, Z float32
}
func (v *Vector3) Set(x, y, z float32) *Vector3 {
v.X = x
v.Y = y
v.Z = z
return v
}
func (v *Vector3) SetVector3(other *Vector3) *Vector3 {
return v.Set(other.X, other.Y, other.Z)
}
func (v *Vector3) Add(x, y, z float32) *Vector3 {
v.X += x
v.Y += y
v.Z += z
return v
}
func (v *Vector3) AddVector3(other *Vector3) *Vector3 {
return v.Add(other.X, other.Y, other.Z)
}
func (v *Vector3) Sub(x, y, z float32) *Vector3 {
v.X -= x
v.Y -= y
v.Z -= z
return v
}
func (v *Vector3) SubVector3(other *Vector3) *Vector3 {
return v.Sub(other.X, other.Y, other.Z)
}
func (v *Vector3) Mul(x, y, z float32) *Vector3 {
v.X *= x
v.Y *= y
v.Z *= z
return v
}
func (v *Vector3) Scale(s float32) *Vector3 {
v.X *= s
v.Y *= s
v.Z *= s
return v
}
func (v *Vector3) Len2() float32 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
func (v *Vector3) Norm() *Vector3 {
len2 := float64(v.Len2())
if len2 == 0 || len2 == 1 {
return v
}
return v.Scale(1.0 / float32(math.Sqrt(len2)))
}
func (v *Vector3) Cross(other *Vector3) *Vector3 {
return v.Set(v.Y*other.Z-v.Z*other.Y, v.Z*other.X-v.X*other.Z, v.X*other.Y-v.Y*other.X)
}
func (v *Vector3) MulVector3(other *Vector3) *Vector3 {
return v.Mul(other.X, other.Y, other.Z)
}
func (v *Vector3) Div(x, y, z float32) *Vector3 {
v.X /= x
v.Y /= y
v.Z /= z
return v
}
func (v *Vector3) DivVector3(other *Vector3) *Vector3 {
return v.Div(other.X, other.Y, other.Z)
}
// MulMat4 left-multiplies the vector by the given matrix, assuming the fourth (w) component of the vector is 1.
func (v *Vector3) MulMat4(mat *Mat4) *Vector3 {
md := &mat.Data
return v.Set(
v.X*md[m00]+v.Y*md[m01]+v.Z*md[m02]+md[m03],
v.X*md[m10]+v.Y*md[m11]+v.Z*md[m12]+md[m13],
v.X*md[m20]+v.Y*md[m21]+v.Z*md[m22]+md[m23],
)
}
func (v *Vector3) Rotate(other *Vector3, degrees float32) *Vector3 {
tmpMat4.Rotation(degrees, other.X, other.Y, other.Z)
return v.MulMat4(tmpMat4)
}
func (v *Vector3) Distance(other *Vector3) float32 {
d2 := (v.X-other.X)*(v.X-other.X) + (v.Y-other.Y)*(v.Y-other.Y) + (v.Z-other.Z)*(v.Z-other.Z)
return float32(math.Sqrt(float64(d2)))
}
func (v *Vector3) String() string {
return fmt.Sprintf("Vector3{%v, %v, %v}\n", v.X, v.Y, v.Z)
}
func (v *Vector3) Equals(x, y, z float32) bool {
return v.X == x && v.Y == y && v.Z == z
}
func (v *Vector3) EqualsVector(other *Vector3) bool {
return v.Equals(other.X, other.Y, other.Z)
} | glm/vec3.go | 0.81409 | 0.691478 | vec3.go | starcoder |
package LeastSquareCircleFit
/*
This package implements a Circle Least Square Fit for a
list of 2D-coordinates
-> x1, x2, x3, x4, x5 ...
x = y1, y2, y3, y4, y5 ...
so that the resulting circle is a "best fit to the points given.
The only exported function is
CalcLeastSquareCircleFit
which takes two arrays as arguments: the x-coords in the first
and the y-coords in the second; it returns three float64:
the x-coord of the circle center,
the y-coord of the circle center,
the radius.
Author: <NAME>
Date : 2013-07-01
Implemented following the paper:
Least-Squares Circle Fit by <NAME>, October 24, 2006 10:22 am MDT
Caveats:
There are some divisions involved which may provoke a division by zero error.
But I didn't take the time to figure out how this can be done. Perhaps
if you supply not enough points or all the points are identical; it's definitely
a pathological case.
*/
import "math"
func bar(in []float64) float64 {
sum := 0.0
for _, v := range in {
sum = sum + v
}
return sum / float64(len(in))
}
func calcSuu(in []float64) float64 {
sum := 0.0
for _, v := range in {
sum = sum + (v * v)
}
return sum
}
func calcSuuu(in []float64) float64 {
sum := 0.0
for _, v := range in {
sum = sum + (v * v * v)
}
return sum
}
func calcSuv(in1 []float64, in2 []float64) float64 {
sum := 0.0
for k := range in1 {
sum = sum + in1[k]*in2[k]
}
return sum
}
func calcSuvv(in1 []float64, in2 []float64) float64 {
sum := 0.0
for k := range in1 {
sum = sum + in1[k]*in2[k]*in2[k]
}
return sum
}
func calcU(in []float64, bar float64) []float64 {
out := make([]float64, len(in))
for k, v := range in {
out[k] = v - bar
}
return out
}
// CalcLeastSquareCircleFit computes a least square fit circle for a list of 2d-coordinates.
// It takes the x and y coordinates as arguments. Obviously the two
// argument arrays must have the same length.
// The function returns three values: The x,y location of the circle center
// and the radius of the circle.
func CalcLeastSquareCircleFit(x []float64, y []float64) (float64, float64, float64) {
N := len(x)
xbar := bar(x)
ybar := bar(y)
u := calcU(x, xbar)
v := calcU(y, ybar)
suu := calcSuu(u)
suv := calcSuv(u, v)
svv := calcSuu(v)
suuu := calcSuuu(u)
svvv := calcSuuu(v)
suvv := calcSuvv(u, v)
svuu := calcSuvv(v, u)
e4 := 0.5 * (suuu + suvv)
e5 := 0.5 * (svvv + svuu)
uc := (svv*e4 - suv*e5) / (suu*svv - suv*suv)
vc := (e4 - uc*suu) / suv
xc := uc + xbar
yc := vc + ybar
r := math.Sqrt(uc*uc + vc*vc + (suu+svv)/float64(N))
return xc, yc, r
} | circlefit.go | 0.852537 | 0.691881 | circlefit.go | starcoder |
package intervals
import (
"fmt"
"math"
"sort"
"strconv"
"strings"
"github.com/mmcloughlin/random"
)
// Interval represents the inclusive range of integers [lo, hi].
type Interval struct {
lo uint64
hi uint64
}
// Range builds the interval [l, h].
func Range(l, h uint64) Interval {
if h < l {
panic("bad range")
}
return Interval{lo: l, hi: h}
}
// Single builds the interval containing only x.
func Single(x uint64) Interval {
return Range(x, x)
}
// Bits returns the interval [0, 2^n-1].
func Bits(n uint) Interval {
return Range(0, (1<<n)-1)
}
// OpenLeft returns the interval [0, h].
func OpenLeft(h uint64) Interval {
return Range(0, h)
}
// OpenRight returns the interval [l, 2^64-1].
func OpenRight(l uint64) Interval {
return Range(l, math.MaxUint64)
}
// Size returns the interval size.
func (i Interval) Size() uint64 {
return i.hi - i.lo + 1
}
// Single returns true if the interval contains one integer.
func (i Interval) Single() bool {
return i.Size() == 1
}
// Contains returns whether x is contained in the interval.
func (i Interval) Contains(x uint64) bool {
return i.lo <= x && x <= i.hi
}
func (i Interval) String() string {
switch {
case i.Single():
return strconv.FormatUint(i.lo, 10)
default:
return fmt.Sprintf("%d-%d", i.lo, i.hi)
}
}
// Overlaps returns true if any intervals overlap.
func Overlaps(is []Interval) bool {
intersections := thresholds(2, is)
return len(intersections) > 0
}
// Simplify simplifies a set of intervals such that they cover the the same set
// of integers in a minimal way.
func Simplify(is []Interval) []Interval {
return thresholds(1, is)
}
// Set is a collection of intervals.
type Set struct {
intervals []Interval
}
// NewSet builds a set from the union of given intervals. The intervals will be
// passed through simplify.
func NewSet(is ...Interval) *Set {
return &Set{intervals: Simplify(is)}
}
// IntType returns the set of possible values of an n-bit integer.
func IntType(n uint) *Set {
return NewSet(Bits(n))
}
func (s Set) String() string {
is := []string{}
for _, i := range s.intervals {
is = append(is, i.String())
}
return strings.Join(is, ",")
}
// Contains returns whether x is contained in the set.
func (s Set) Contains(x uint64) bool {
for _, i := range s.intervals {
if i.Contains(x) {
return true
}
}
return false
}
// Subtract subtracts other from s.
func (s *Set) Subtract(other *Set) {
s.intervals = thresholds(2, s.intervals, complement(other.intervals))
}
// complement returns the "complement" of the intervals. In our case this is the
// result of subtracting from the full 64-bit interval.
func complement(is []Interval) []Interval {
s := uint64(0)
var c []Interval
for _, i := range is {
if i.lo > s {
c = append(c, Range(s, i.lo-1))
}
s = i.hi + 1
}
if s != 0 {
c = append(c, OpenRight(s))
}
return c
}
func intervaledges(is []Interval) []edge {
es := edges{}
for _, i := range is {
es = append(es, edge{x: i.lo, d: 1})
es = append(es, edge{x: i.hi, d: -1})
}
return es
}
func thresholds(thresh int, intervalsets ...[]Interval) []Interval {
es := []edge{}
for _, is := range intervalsets {
es = append(es, intervaledges(is)...)
}
sort.Sort(edges(es))
n := 0
inside := false
result := []Interval{}
var start uint64
for _, e := range es {
n += e.d
if !inside && n >= thresh {
start = e.x
inside = true
} else if inside && n < thresh {
result = append(result, Range(start, e.x))
inside = false
}
}
return result
}
type edge struct {
x uint64
d int
}
type edges []edge
func (e edges) Len() int { return len(e) }
func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e edges) Less(i, j int) bool {
a, b := e[i], e[j]
return a.x < b.x || (a.x == b.x && b.d < a.d)
}
// Random returns a random element of the collection. Assumes the collection
// contains non-overlapping intervals. Panics if s is empty.
func (s Set) Random() uint64 {
return s.RandomWithGenerator(random.New())
}
// RandomWithGenerator is like Random() but allows you to control the random
// generator.
func (s Set) RandomWithGenerator(rnd random.Interface) uint64 {
if len(s.intervals) == 0 {
panic("empty set")
}
type step struct {
upper uint64
delta uint64
}
steps := []step{}
var cuml uint64
for _, i := range s.intervals {
cuml += i.Size()
steps = append(steps, step{
upper: cuml,
delta: i.hi - cuml + 1,
})
}
r := randuint64n(rnd, cuml)
for _, step := range steps {
if r < step.upper {
return r + step.delta
}
}
panic("unreachable")
}
// randuint64n returns a random uint64 in [0,n).
func randuint64n(rnd random.Interface, n uint64) uint64 {
mask := ^uint64(0)
for mask > n {
mask >>= 1
}
mask = (mask << 1) | uint64(1)
for {
r := randuint64(rnd) & mask
if r < n {
return r
}
}
}
func randuint64(rnd random.Interface) uint64 {
return uint64(rnd.Int63())>>31 | uint64(rnd.Int63())<<32
} | internal/intervals/intervals.go | 0.822937 | 0.475666 | intervals.go | starcoder |
package permutations
import (
"fmt"
)
// EnumerateInterval enumerate the integers in an intervall, s. sicp chapter 2.2.3
func EnumerateInterval(low, high int) []int {
len := high - low + 1
res := make([]int, 0, len)
for i := low; i <= high; i++ {
res = append(res, i)
}
return res
}
// Faculty calculates the value of faculty(n)
func Faculty(n int) int64 {
n64 := int64(n)
res := int64(1)
if n < 0 {
panic("underflow for n=" + fmt.Sprint(n) + ", out of range of definition")
}
if n == 0 {
return res
}
maxval := int64(^uint64(0)>>1) / n64
for i := res; i <= n64; i++ {
if res < maxval {
res *= i
} else {
panic("overflow at i=" + fmt.Sprint(i) + ", out of range of implementation")
}
}
return res
}
// GeneratePermutations generates the permutations of an array
// of values. The rows of the array are not sorted.
// This is the fastest algorithm in this package.
func GeneratePermutations(values []interface{}) [][]interface{} {
n := len(values)
m := Faculty(n)
n64 := int64(n)
// example from https://golang.org/doc/effective_go.html#two_dimensional_slices
// Allocate the top-level slice, the same as before.
res := make([][]interface{}, m) // One row per permutation
// Allocate one large slice to hold the whole arrayof all the permutation values.
vals := make([]interface{}, m*n64) // Has type []interface{} even though array is [][]interface{}.
// Loop over the rows, slicing each row from the front of the remaining values slice.
for i := range res {
res[i], vals = vals[:n], vals[n:]
}
permutate(values, res, m, n64)
return res
}
// permutate generates m permutations of an array of size n.
// It works recursively. Each recursive call generates the
// previous permutation of values[:n-1].
// The previous permutation is generated in the result array
// at the final position, so there is no more operation on
// its values.
func permutate(values []interface{}, res [][]interface{}, m, n int64) {
// check for last permutation
if n <= 1 {
res[0][0] = values[0]
return
}
// generate inner permutation
mprev := m / n
nprev := n - 1
permutate(values, res, mprev, nprev)
// set last value to current at all rows of inner permutation
current := values[nprev]
zero := int64(0) // zero as 64 bit integer
for i := zero; i < mprev; i++ {
res[i][nprev] = current
}
// generate remaining rows by inserting current at
// all positions except the last position
// see <NAME>, The art of computer programming
// Volume 4, Fascicle 2, 7.2.1.2, (3), p. 41, 2005
r := mprev // row index begins with first after inner permutation
for i := zero; i < mprev; i++ { // source is each row of inner permutation
for j := zero; j < nprev; j++ { // loop over insertions
l := zero // insertion position
for k := zero; k < nprev; k++ { // loop over all values of row
if j == l { // insert
res[r][l] = current
l++
}
// copy from inner
res[r][l] = res[i][k]
l++
}
r++ // advance result row
}
}
}
// GeneratePermutationsSorted generate an array with all permutations
// of an array of values and maintain the sort order of the array.
func GeneratePermutationsSorted(values []interface{}) [][]interface{} {
n := len(values)
m := Faculty(n)
n64 := int64(n)
// example from https://golang.org/doc/effective_go.html#two_dimensional_slices
// Allocate the top-level slice, the same as before.
res := make([][]interface{}, m) // One row per permutation
// Allocate one large slice to hold all the permutation values.
vals := make([]interface{}, m*n64) // Has type []interface{} even though array is [][]interface{}.
// Loop over the rows, slicing each row from the front of the remaining values slice.
for i := range res {
res[i], vals = vals[:n], vals[n:]
}
addPermutationsOfSubArray(values, res, m, n64)
return res
}
// addPermutationsOfSubArray add the permutationsnof the sub array to res
func addPermutationsOfSubArray(values []interface{}, res [][]interface{}, m, n int64) {
if n == 1 {
res[0][0] = values[0]
return
}
zero := int64(0) // zero as 64 bit integer
mprev := m / n
nprev := n - 1
subres := make([][]interface{}, mprev) // One row per permutation
// set reference of rows in res where subres should be stored
for i, j := zero, m-mprev; i < mprev; i, j = i+1, j+1 {
subres[i] = res[j][1:]
}
addPermutationsOfSubArray(values, subres, mprev, nprev)
// set first value to current at all rows of inner permutation
current := values[nprev]
for j := m - mprev; j < m; j++ {
res[j][0] = current
}
// Generate previous rows, a set for each previous value
r := m - mprev
r--
k := m
k--
nprev--
for nb := nprev; nb >= 0; nb-- { // loop over sets
next := values[nb]
for i := zero; i < mprev; i++ { // loop rows of block
for l := zero; l < n; l++ {
switch {
case l == 0:
res[r][l] = next
case res[k][l] == next:
res[r][l] = current
default:
res[r][l] = res[k][l]
}
}
r--
k--
}
current = next
}
} | permutations.go | 0.677367 | 0.404684 | permutations.go | starcoder |
package nlp
import (
"io"
"math"
"github.com/james-bowman/sparse"
"gonum.org/v1/gonum/mat"
)
// TfidfTransformer takes a raw term document matrix and weights each raw term frequency
// value depending upon how commonly it occurs across all documents within the corpus.
// For example a very commonly occurring word like `the` is likely to occur in all documents
// and so would be weighted down.
// More precisely, TfidfTransformer applies a tf-idf algorithm to the matrix where each
// term frequency is multiplied by the inverse document frequency. Inverse document
// frequency is calculated as log(n/df) where df is the number of documents in which the
// term occurs and n is the total number of documents within the corpus. We add 1 to both n
// and df before division to prevent division by zero.
type TfidfTransformer struct {
transform *sparse.DIA
}
// NewTfidfTransformer constructs a new TfidfTransformer.
func NewTfidfTransformer() *TfidfTransformer {
return &TfidfTransformer{}
}
// Fit takes a training term document matrix, counts term occurrences across all documents
// and constructs an inverse document frequency transform to apply to matrices in subsequent
// calls to Transform().
func (t *TfidfTransformer) Fit(matrix mat.Matrix) Transformer {
if t, isTypeConv := matrix.(sparse.TypeConverter); isTypeConv {
matrix = t.ToCSR()
}
m, n := matrix.Dims()
weights := make([]float64, m)
var df int
if csr, ok := matrix.(*sparse.CSR); ok {
for i := 0; i < m; i++ {
weights[i] = math.Log(float64(1+n) / float64(1+csr.RowNNZ(i)))
}
} else {
for i := 0; i < m; i++ {
df = 0
for j := 0; j < n; j++ {
if matrix.At(i, j) != 0 {
df++
}
}
weights[i] = math.Log(float64(1+n) / float64(1+df))
}
}
// build a diagonal matrix from array of term weighting values for subsequent
// multiplication with term document matrics
t.transform = sparse.NewDIA(m, m, weights)
return t
}
// Transform applies the inverse document frequency (IDF) transform by multiplying
// each term frequency by its corresponding IDF value. This has the effect of weighting
// each term frequency according to how often it appears across the whole document corpus
// so that naturally frequent occurring words are given less weight than uncommon ones.
// The returned matrix is a sparse matrix type.
func (t *TfidfTransformer) Transform(matrix mat.Matrix) (mat.Matrix, error) {
if t, isTypeConv := matrix.(sparse.TypeConverter); isTypeConv {
matrix = t.ToCSR()
}
var product sparse.CSR
// simply multiply the matrix by our idf transform (the diagonal matrix of term weights)
product.Mul(t.transform, matrix)
// todo: possibly L2 norm matrix to remove any bias caused by documents of different
// lengths where longer documents naturally have more words and so higher word counts
return &product, nil
}
// FitTransform is exactly equivalent to calling Fit() followed by Transform() on the
// same matrix. This is a convenience where separate training data is not being
// used to fit the model i.e. the model is fitted on the fly to the test data.
// The returned matrix is a sparse matrix type.
func (t *TfidfTransformer) FitTransform(matrix mat.Matrix) (mat.Matrix, error) {
if t, isTypeConv := matrix.(sparse.TypeConverter); isTypeConv {
matrix = t.ToCSR()
}
return t.Fit(matrix).Transform(matrix)
}
// Save binary serialises the model and writes it into w. This is useful for persisting
// a trained model to disk so that it may be loaded (using the Load() method)in another
// context (e.g. production) for reproducible results.
func (t TfidfTransformer) Save(w io.Writer) error {
_, err := t.transform.MarshalBinaryTo(w)
return err
}
// Load binary deserialises the previously serialised model into the receiver. This is
// useful for loading a previously trained and saved model from another context
// (e.g. offline training) for use within another context (e.g. production) for
// reproducible results. Load should only be performed with trusted data.
func (t *TfidfTransformer) Load(r io.Reader) (mat.Matrix, error) {
var model sparse.DIA
if _, err := model.UnmarshalBinaryFrom(r); err != nil {
return model, err
}
t.transform = &model
return t.transform, nil
} | weightings.go | 0.762689 | 0.690813 | weightings.go | starcoder |
package fwncs
import (
"net/http"
"regexp"
"strings"
)
// Param is a single URL parameter, consisting of a key and a value.
type Param struct {
Key string
Value string
}
// Params is a Param-slice, as returned by the router.
// The slice is ordered, the first URL parameter is also the first slice value.
// It is therefore safe to read values by the index.
type Params []Param
// Get returns the value of the first Param which key matches the given name.
// If no matching Param is found, an empty string is returned.
func (ps Params) Get(name string) (string, bool) {
for _, entry := range ps {
if entry.Key == name {
return entry.Value, true
}
}
return "", false
}
// ByName returns the value of the first Param which key matches the given name.
// If no matching Param is found, an empty string is returned.
func (ps Params) ByName(name string) (va string) {
va, _ = ps.Get(name)
return
}
func (ps Params) Values() []string {
values := make([]string, len(ps))
for i := range ps {
values[i] = ps[i].Value
}
return values
}
type locationNode struct {
reg *regexp.Regexp
originalPath string
replacePath string
index int
}
type locationNodes []locationNode
type paramNode struct {
index int
params *Params
matchPath string
}
// Match
func (nodes locationNodes) Match(rawURI string) (param *paramNode) {
var index = -1
var max int = 0
var groups [][]string
for idx := range nodes {
node := nodes[idx]
v := node.reg.FindAllStringSubmatch(rawURI, -1)
if v != nil {
// 始めは無条件で設定
if index == -1 {
index = idx
groups = v
} else {
// マッチした数が多い = パスの数が多いのでこちらを優先
value := v[0]
if len(value[1:]) > max && value[len(value)-1] != "" {
max = len(v[0][1:])
index = idx
groups = v
} else {
if value[len(value)-1] != "" {
oldPath := nodes[index].replacePath
nowPath := node.replacePath
if len(nowPath) > len(oldPath) {
index = idx
groups = v
}
}
}
}
}
}
if index > -1 {
node := nodes[index]
originalUrl := node.replacePath
matchValue := groups[0][1:]
idxs := namedParam.FindAllStringIndex(originalUrl, -1)
params := make(Params, len(idxs))
for i, idx := range idxs {
start := idx[0]
end := idx[1]
value := matchValue[i]
if value != "" && lastChar(value) == '/' {
value = value[0 : len(value)-1]
}
params[i] = Param{
Key: originalUrl[start+1 : end],
Value: value,
}
}
param = ¶mNode{
index: node.index,
params: ¶ms,
matchPath: node.originalPath,
}
return
}
return
}
type nodelocation struct {
nodes locationNodes
full locationNodes
prefix locationNodes
}
var namedParam, _ = regexp.Compile(":[a-zA-Z0-9]+")
func locationRegex(paths []string) nodelocation {
const (
full = "= "
prefix = "~ "
)
var locations locationNodes
var fullLocations locationNodes
var prefixLocations locationNodes
for idx, path := range paths {
location := locationNode{
index: idx,
}
if strings.HasPrefix(path, "= ") {
path = strings.Replace(path, full, "", 1)
location.originalPath = path
fullLocations = append(fullLocations, location)
continue
}
prefixFlg := strings.HasPrefix(path, "~ ")
if prefixFlg {
path = strings.Replace(path, prefix, "", 1)
}
location.originalPath = path
path = regexp.QuoteMeta(path)
path = strings.Replace(path, `\*`, ":", -1)
path = strings.Replace(path, `\.`, ".", -1)
if strings.HasPrefix(path, `\^`) {
path = strings.Replace(path, `\^`, "^", -1)
}
location.replacePath = path
path = namedParam.ReplaceAllString(path, "(.*?)")
path = path + "$"
location.reg = regexp.MustCompile(path)
if prefixFlg {
prefixLocations = append(prefixLocations, location)
continue
}
locations = append(locations, location)
}
return nodelocation{
nodes: locations,
full: fullLocations,
prefix: prefixLocations,
}
}
/*
1.完全一致のURLを選択し、終了
2.優先したいlocationから正規表現で最も長い文字列でマッチしたもの
3.それ以外のlocationから正規表現で最も長い文字列でマッチしたもの
*/
func matchRequestURL(patterns nodelocation, req *http.Request) (node *paramNode) {
rawURI := req.URL.Path
if rawURI != "" && rawURI[0] != '/' {
prefix := ""
if req.URL.Scheme != "" {
prefix = req.URL.Scheme + "://"
}
if req.URL.Host != "" {
prefix += req.URL.Host // host or host:port
}
if prefix != "" {
rawURI = strings.TrimPrefix(rawURI, prefix)
}
}
return matchURL(patterns, rawURI)
}
func matchURL(patterns nodelocation, rawURI string) (node *paramNode) {
for _, location := range patterns.full {
if strings.EqualFold(rawURI, location.originalPath) {
node = ¶mNode{
index: location.index,
params: &Params{},
matchPath: location.originalPath,
}
return
}
}
node = patterns.prefix.Match(rawURI)
if node == nil {
node = patterns.nodes.Match(rawURI)
}
return
} | tree.go | 0.542136 | 0.423995 | tree.go | starcoder |
package extstr
import (
"math/rand"
"strconv"
"strings"
)
// Join concatenates the elements of its first argument to create a single string. The separator
// string sep is placed between elements in the resulting string.
func Join(elems []int64, sep string) string {
switch len(elems) {
case 0:
return ""
case 1:
return strconv.FormatInt(elems[0], 10)
}
strElems := make([]string, 0, len(elems))
n := len(sep) * (len(elems) - 1)
for i := 0; i < len(elems); i++ {
v := strconv.FormatInt(elems[i], 10)
strElems = Append(strElems, v)
n += len(v)
}
var b strings.Builder
b.Grow(n)
b.WriteString(strElems[0])
for _, s := range strElems[1:] {
b.WriteString(sep)
b.WriteString(s)
}
return b.String()
}
// JoinInt concatenates the elements of its first argument to create a single string. The separator
// string sep is placed between elements in the resulting string.
func JoinInt(elems []int, sep string) string {
switch len(elems) {
case 0:
return ""
case 1:
return strconv.Itoa(elems[0])
}
strElems := make([]string, 0, len(elems))
n := len(sep) * (len(elems) - 1)
for i := 0; i < len(elems); i++ {
v := strconv.Itoa(elems[i])
strElems = Append(strElems, v)
n += len(v)
}
var b strings.Builder
b.Grow(n)
b.WriteString(strElems[0])
for _, s := range strElems[1:] {
b.WriteString(sep)
b.WriteString(s)
}
return b.String()
}
// Split Split slices s into all substrings separated by sep and returns a slice of
// the int64 between those separators.
func Split(s, sep string) []int64 {
if s == "" {
return []int64{}
}
ss := strings.Split(s, sep)
res := make([]int64, 0, len(ss))
for i := 0; i < len(ss); i++ {
v, err := strconv.ParseInt(strings.TrimSpace(ss[i]), 10, 64)
if err != nil {
continue
}
res = append(res, v)
}
return res
}
// SplitInt Split slices s into all substrings separated by sep and returns a slice of
// the int between those separators.
func SplitInt(s, sep string) []int {
if s == "" {
return []int{}
}
ss := strings.Split(s, sep)
res := make([]int, 0, len(ss))
for i := 0; i < len(ss); i++ {
v, err := strconv.Atoi(strings.TrimSpace(ss[i]))
if err != nil {
continue
}
res = append(res, v)
}
return res
}
// Shuffle pseudo-randomizes the order of elements using the default Source.
func Shuffle(str string) string {
runes := []rune(str)
rand.Shuffle(len(runes), func(i, j int) {
runes[i], runes[j] = runes[j], runes[i]
})
return string(runes)
} | extstr/extstr.go | 0.633864 | 0.436802 | extstr.go | starcoder |
package internal
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math"
"reflect"
"sort"
"github.com/lyraproj/dgo/dgo"
"github.com/lyraproj/dgo/util"
"gopkg.in/yaml.v3"
)
type (
array struct {
slice []dgo.Value
typ dgo.ArrayType
frozen bool
}
// defaultArrayType is the unconstrained array type
defaultArrayType int
// sizedArrayType represents array with element type constraint and a size constraint
sizedArrayType struct {
elementType dgo.Type
min int
max int
}
// tupleType represents an array with an exact number of ordered element types.
tupleType array
// exactArrayType only matches the array that it represents
exactArrayType array
)
// DefaultArrayType is the unconstrained Array type
const DefaultArrayType = defaultArrayType(0)
func arrayTypeOne(args []interface{}) dgo.ArrayType {
switch a0 := Value(args[0]).(type) {
case dgo.Type:
return newArrayType(a0, 0, math.MaxInt64)
case dgo.Integer:
return newArrayType(nil, int(a0.GoInt()), math.MaxInt64)
default:
panic(illegalArgument(`Array`, `Type or Integer`, args, 0))
}
}
func arrayTypeTwo(args []interface{}) dgo.ArrayType {
a1, ok := Value(args[1]).(dgo.Integer)
if !ok {
panic(illegalArgument(`Array`, `Integer`, args, 1))
}
switch a0 := Value(args[0]).(type) {
case dgo.Type:
return newArrayType(a0, int(a1.GoInt()), math.MaxInt64)
case dgo.Integer:
return newArrayType(nil, int(a0.GoInt()), int(a1.GoInt()))
default:
panic(illegalArgument(`Array`, `Type or Integer`, args, 0))
}
}
func arrayTypeThree(args []interface{}) dgo.ArrayType {
a0, ok := Value(args[0]).(dgo.Type)
if !ok {
panic(illegalArgument(`Array`, `Type`, args, 0))
}
a1, ok := Value(args[1]).(dgo.Integer)
if !ok {
panic(illegalArgument(`Array`, `Integer`, args, 1))
}
a2, ok := Value(args[2]).(dgo.Integer)
if !ok {
panic(illegalArgument(`ArrayType`, `Integer`, args, 2))
}
return newArrayType(a0, int(a1.GoInt()), int(a2.GoInt()))
}
// ArrayType returns a type that represents an Array value
func ArrayType(args ...interface{}) dgo.ArrayType {
switch len(args) {
case 0:
return DefaultArrayType
case 1:
return arrayTypeOne(args)
case 2:
return arrayTypeTwo(args)
case 3:
return arrayTypeThree(args)
default:
panic(fmt.Errorf(`illegal number of arguments for Array. Expected 0 - 3, got %d`, len(args)))
}
}
func newArrayType(elementType dgo.Type, min, max int) dgo.ArrayType {
if min < 0 {
min = 0
}
if max < 0 {
max = 0
}
if max < min {
t := max
max = min
min = t
}
if elementType == nil {
elementType = DefaultAnyType
}
if min == 0 && max == math.MaxInt64 && elementType == DefaultAnyType {
// Unbounded
return DefaultArrayType
}
return &sizedArrayType{elementType: elementType, min: min, max: max}
}
func (t defaultArrayType) Assignable(other dgo.Type) bool {
switch other.(type) {
case defaultArrayType, *tupleType, *exactArrayType, *sizedArrayType:
return true
}
return CheckAssignableTo(nil, other, t)
}
func (t defaultArrayType) ElementType() dgo.Type {
return DefaultAnyType
}
func (t defaultArrayType) Equals(other interface{}) bool {
return t == other
}
func (t defaultArrayType) HashCode() int {
return int(dgo.TiArray)
}
func (t defaultArrayType) Instance(value interface{}) bool {
_, ok := value.(dgo.Array)
return ok
}
func (t defaultArrayType) Max() int {
return math.MaxInt64
}
func (t defaultArrayType) Min() int {
return 0
}
func (t defaultArrayType) String() string {
return TypeString(t)
}
func (t defaultArrayType) Type() dgo.Type {
return &metaType{t}
}
func (t defaultArrayType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiArray
}
func (t defaultArrayType) Unbounded() bool {
return true
}
func (t *sizedArrayType) Assignable(other dgo.Type) bool {
return Assignable(nil, t, other)
}
func (t *sizedArrayType) DeepAssignable(guard dgo.RecursionGuard, other dgo.Type) bool {
switch ot := other.(type) {
case defaultArrayType:
return false // lacks size
case *sizedArrayType:
return t.min <= ot.min && ot.max <= t.max && t.elementType.Assignable(ot.elementType)
case *tupleType:
l := len(ot.slice)
return t.min <= l && l <= t.max && allAssignable(guard, t.elementType, ot.slice)
case *exactArrayType:
l := len(ot.slice)
return t.min <= l && l <= t.max && t.elementType.Assignable(ot.ElementType())
}
return CheckAssignableTo(guard, other, t)
}
func (t *sizedArrayType) ElementType() dgo.Type {
return t.elementType
}
func (t *sizedArrayType) Equals(other interface{}) bool {
return equals(nil, t, other)
}
func (t *sizedArrayType) deepEqual(seen []dgo.Value, other deepEqual) bool {
if ot, ok := other.(*sizedArrayType); ok {
return t.min == ot.min && t.max == ot.max && equals(seen, t.elementType, ot.elementType)
}
return false
}
func (t *sizedArrayType) HashCode() int {
return deepHashCode(nil, t)
}
func (t *sizedArrayType) deepHashCode(seen []dgo.Value) int {
h := int(dgo.TiArray)
if t.min > 0 {
h = h*31 + t.min
}
if t.max < math.MaxInt64 {
h = h*31 + t.max
}
if DefaultAnyType != t.elementType {
h = h*31 + deepHashCode(seen, t.elementType)
}
return h
}
func (t *sizedArrayType) Instance(value interface{}) bool {
return Instance(nil, t, value)
}
func (t *sizedArrayType) DeepInstance(guard dgo.RecursionGuard, value interface{}) bool {
if ov, ok := value.(*array); ok {
l := len(ov.slice)
return t.min <= l && l <= t.max && allInstance(guard, t.elementType, ov.slice)
}
return false
}
func (t *sizedArrayType) Max() int {
return t.max
}
func (t *sizedArrayType) Min() int {
return t.min
}
func (t *sizedArrayType) Resolve(ap dgo.AliasProvider) {
t.elementType = ap.Replace(t.elementType)
}
func (t *sizedArrayType) String() string {
return TypeString(t)
}
func (t *sizedArrayType) Type() dgo.Type {
return &metaType{t}
}
func (t *sizedArrayType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiArray
}
func (t *sizedArrayType) Unbounded() bool {
return t.min == 0 && t.max == math.MaxInt64
}
func (t *exactArrayType) Assignable(other dgo.Type) bool {
return Assignable(nil, t, other)
}
func (t *exactArrayType) DeepAssignable(guard dgo.RecursionGuard, other dgo.Type) bool {
es := t.slice
switch ot := other.(type) {
case defaultArrayType:
return false // lacks size
case *sizedArrayType:
l := len(es)
return ot.min == l && ot.max == l && assignableToAll(guard, ot.elementType, es)
case *tupleType:
os := ot.slice
l := len(es)
if l != len(os) {
return false
}
for i := range es {
if !Assignable(guard, es[i].Type(), os[i].(dgo.Type)) {
return false
}
}
return true
case *exactArrayType:
return sliceEquals(nil, es, ot.slice)
}
return CheckAssignableTo(guard, other, t)
}
func (t *exactArrayType) ElementType() dgo.Type {
switch len(t.slice) {
case 0:
return DefaultAnyType
case 1:
return t.slice[0].Type()
}
return (*allOfValueType)(t)
}
func (t *exactArrayType) ElementTypes() dgo.Array {
es := t.slice
ts := make([]dgo.Value, len(es))
for i := range es {
ts[i] = es[i].Type()
}
return &array{slice: ts, frozen: true}
}
func (t *exactArrayType) Equals(other interface{}) bool {
if ot, ok := other.(*exactArrayType); ok {
return (*array)(t).Equals((*array)(ot))
}
return false
}
func (t *exactArrayType) HashCode() int {
return (*array)(t).HashCode()*7 + int(dgo.TiArrayExact)
}
func (t *exactArrayType) Instance(value interface{}) bool {
if ot, ok := value.(*array); ok {
return (*array)(t).Equals(ot)
}
return false
}
func (t *exactArrayType) Max() int {
return len(t.slice)
}
func (t *exactArrayType) Min() int {
return len(t.slice)
}
func (t *exactArrayType) String() string {
return TypeString(t)
}
func (t *exactArrayType) Value() dgo.Value {
a := (*array)(t)
return a
}
func (t *exactArrayType) Type() dgo.Type {
return &metaType{t}
}
func (t *exactArrayType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiArrayExact
}
func (t *exactArrayType) Unbounded() bool {
return false
}
// DefaultTupleType is the unconstrained Tuple type
var DefaultTupleType = &tupleType{}
// TupleType creates a new TupleTupe based on the given types
func TupleType(types []dgo.Type) dgo.TupleType {
l := len(types)
if l == 0 {
return DefaultTupleType
}
es := make([]dgo.Value, l)
for i := range types {
es[i] = types[i]
}
return &tupleType{slice: es, frozen: true}
}
func (t *tupleType) Assignable(other dgo.Type) bool {
return Assignable(nil, t, other)
}
func (t *tupleType) deepAssignable1(guard dgo.RecursionGuard, ot *tupleType) bool {
es := t.slice
os := ot.slice
if len(os) != len(es) {
return false
}
for i := range es {
if !Assignable(guard, es[i].(dgo.Type), os[i].(dgo.Type)) {
return false
}
}
return true
}
func (t *tupleType) deepAssignable2(guard dgo.RecursionGuard, ot *exactArrayType) bool {
es := t.slice
os := ot.slice
if len(os) != len(es) {
return false
}
for i := range es {
if !Instance(guard, es[i].(dgo.Type), os[i]) {
return false
}
}
return true
}
func (t *tupleType) deepAssignable3(guard dgo.RecursionGuard, ot *sizedArrayType) bool {
es := t.slice
if ot.Min() == len(es) && ot.Max() == len(es) {
et := ot.ElementType()
for i := range es {
if !Assignable(guard, es[i].(dgo.Type), et) {
return false
}
}
}
return true
}
func (t *tupleType) DeepAssignable(guard dgo.RecursionGuard, other dgo.Type) bool {
dflt := len(t.slice) == 0
switch ot := other.(type) {
case defaultArrayType:
return dflt
case *tupleType:
if dflt {
return true
}
return t.deepAssignable1(guard, ot)
case *exactArrayType:
if dflt {
return true
}
return t.deepAssignable2(guard, ot)
case *sizedArrayType:
if dflt {
return true
}
return t.deepAssignable3(guard, ot)
}
return CheckAssignableTo(guard, other, t)
}
func (t *tupleType) ElementType() dgo.Type {
switch len(t.slice) {
case 0:
return DefaultAnyType
case 1:
return t.slice[0].(dgo.Type)
default:
return (*allOfType)(t)
}
}
func (t *tupleType) ElementTypes() dgo.Array {
return (*array)(t)
}
func (t *tupleType) Equals(other interface{}) bool {
if ot, ok := other.(*tupleType); ok {
return (*array)(t).Equals((*array)(ot))
}
return false
}
func (t *tupleType) HashCode() int {
return (*array)(t).HashCode()*7 + int(dgo.TiTuple)
}
func (t *tupleType) Instance(value interface{}) bool {
return Instance(nil, t, value)
}
func (t *tupleType) DeepInstance(guard dgo.RecursionGuard, value interface{}) bool {
if ov, ok := value.(*array); ok {
es := t.slice
if len(es) == 0 {
return true
}
s := ov.slice
if len(s) == len(es) {
for i := range es {
if !Instance(guard, es[i].(dgo.Type), s[i]) {
return false
}
}
return true
}
}
return false
}
func (t *tupleType) Max() int {
if l := len(t.slice); l > 0 {
return l
}
return math.MaxInt64
}
func (t *tupleType) Min() int {
return len(t.slice)
}
func (t *tupleType) Resolve(ap dgo.AliasProvider) {
resolveSlice(t.slice, ap)
}
func (t *tupleType) String() string {
return TypeString(t)
}
func (t *tupleType) Type() dgo.Type {
return &metaType{t}
}
func (t *tupleType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiTuple
}
func (t *tupleType) Unbounded() bool {
return len(t.slice) == 0
}
// Array creates a new frozen array that contains a copy of the given slice
func Array(values []dgo.Value) dgo.Array {
arr := make([]dgo.Value, len(values))
for i := range values {
e := values[i]
if f, ok := e.(dgo.Freezable); ok {
e = f.FrozenCopy()
} else if e == nil {
e = Nil
}
arr[i] = e
}
return &array{slice: arr, frozen: true}
}
// ArrayFromReflected creates a new array that contains a copy of the given reflected slice
func ArrayFromReflected(vr reflect.Value, frozen bool) dgo.Value {
if vr.IsNil() {
return Nil
}
ix := vr.Interface()
if bs, ok := ix.([]byte); ok {
return Binary(bs, frozen)
}
top := vr.Len()
var arr []dgo.Value
if vs, ok := ix.([]dgo.Value); ok {
arr = vs
if frozen {
arr = sliceCopy(arr)
}
} else {
arr = make([]dgo.Value, top)
for i := 0; i < top; i++ {
arr[i] = ValueFromReflected(vr.Index(i))
}
}
if frozen {
for i := range arr {
if f, ok := arr[i].(dgo.Freezable); ok {
arr[i] = f.FrozenCopy()
}
}
}
return &array{slice: arr, frozen: frozen}
}
// MutableArray creates a new mutable array that wraps the given slice. Unset entries in the
// slice will be replaced by Nil.
func MutableArray(t dgo.ArrayType, values []dgo.Value) dgo.Array {
if t != nil {
l := len(values)
if l < t.Min() {
panic(IllegalSize(t, l))
}
if l > t.Max() {
panic(IllegalSize(t, l))
}
if tt, ok := t.(*tupleType); ok {
es := tt.slice
for i := range values {
e := values[i]
et := es[i].(dgo.Type)
if !et.Instance(e) {
panic(IllegalAssignment(et, e))
}
}
} else {
et := t.ElementType()
if DefaultAnyType != et {
for i := range values {
e := values[i]
if !et.Instance(e) {
panic(IllegalAssignment(et, e))
}
}
}
}
}
return &array{slice: ReplaceNil(values), typ: t, frozen: false}
}
// MutableValues returns a frozen dgo.Array that represents the given values
func MutableValues(t dgo.ArrayType, values []interface{}) dgo.Array {
s := make([]dgo.Value, len(values))
for i := range values {
s[i] = Value(values[i])
}
return MutableArray(t, s)
}
func valueSlice(values []interface{}, frozen bool) []dgo.Value {
cp := make([]dgo.Value, len(values))
if frozen {
for i := range values {
v := Value(values[i])
if f, ok := v.(dgo.Freezable); ok {
v = f.FrozenCopy()
}
cp[i] = v
}
} else {
for i := range values {
cp[i] = Value(values[i])
}
}
return cp
}
// Integers returns a dgo.Array that represents the given ints
func Integers(values []int) dgo.Array {
cp := make([]dgo.Value, len(values))
for i := range values {
cp[i] = intVal(values[i])
}
return &array{slice: cp, frozen: true}
}
// Strings returns a dgo.Array that represents the given strings
func Strings(values []string) dgo.Array {
cp := make([]dgo.Value, len(values))
for i := range values {
cp[i] = makeHString(values[i])
}
return &array{slice: cp, frozen: true}
}
// Values returns a frozen dgo.Array that represents the given values
func Values(values []interface{}) dgo.Array {
return &array{slice: valueSlice(values, true), frozen: true}
}
func (v *array) assertType(e dgo.Value, pos int) {
if t := v.typ; t != nil {
sz := len(v.slice)
if pos >= sz {
sz++
if sz > t.Max() {
panic(IllegalSize(t, sz))
}
}
var et dgo.Type
if tp, ok := t.(*tupleType); ok {
et = tp.slice[pos].(dgo.Type)
} else {
et = t.ElementType()
}
if !et.Instance(e) {
panic(IllegalAssignment(et, e))
}
}
}
func (v *array) assertTypes(values dgo.Array) {
if t := v.typ; t != nil {
addedSize := values.Len()
if addedSize == 0 {
return
}
sz := len(v.slice)
if sz+addedSize > t.Max() {
panic(IllegalSize(t, sz+addedSize))
}
et := t.ElementType()
for i := 0; i < addedSize; i++ {
e := values.Get(i)
if !et.Instance(e) {
panic(IllegalAssignment(et, e))
}
}
}
}
func (v *array) Add(vi interface{}) {
if v.frozen {
panic(frozenArray(`Add`))
}
val := Value(vi)
v.assertType(val, len(v.slice))
v.slice = append(v.slice, val)
}
func (v *array) AddAll(values dgo.Array) {
if v.frozen {
panic(frozenArray(`AddAll`))
}
v.assertTypes(values)
v.slice = values.AppendToSlice(v.slice)
}
func (v *array) AddValues(values ...interface{}) {
if v.frozen {
panic(frozenArray(`AddValues`))
}
va := valueSlice(values, false)
v.assertTypes(&array{slice: va})
v.slice = append(v.slice, va...)
}
func (v *array) All(predicate dgo.Predicate) bool {
a := v.slice
for i := range a {
if !predicate(a[i]) {
return false
}
}
return true
}
func (v *array) Any(predicate dgo.Predicate) bool {
a := v.slice
for i := range a {
if predicate(a[i]) {
return true
}
}
return false
}
func (v *array) AppendTo(w util.Indenter) {
w.AppendRune('[')
ew := w.Indent()
a := v.slice
for i := range a {
if i > 0 {
w.AppendRune(',')
}
ew.NewLine()
ew.AppendValue(v.slice[i])
}
w.NewLine()
w.AppendRune(']')
}
func (v *array) AppendToSlice(slice []dgo.Value) []dgo.Value {
return append(slice, v.slice...)
}
func (v *array) CompareTo(other interface{}) (int, bool) {
return compare(nil, v, Value(other))
}
func (v *array) deepCompare(seen []dgo.Value, other deepCompare) (int, bool) {
ov, ok := other.(*array)
if !ok {
return 0, false
}
a := v.slice
b := ov.slice
top := len(a)
max := len(b)
r := 0
if top < max {
r = -1
max = top
} else if top > max {
r = 1
}
for i := 0; i < max; i++ {
if _, ok = a[i].(dgo.Comparable); !ok {
r = 0
break
}
var c int
if c, ok = compare(seen, a[i], b[i]); !ok {
r = 0
break
}
if c != 0 {
r = c
break
}
}
return r, ok
}
func (v *array) Copy(frozen bool) dgo.Array {
if frozen && v.frozen {
return v
}
cp := sliceCopy(v.slice)
if frozen {
for i := range cp {
if f, ok := cp[i].(dgo.Freezable); ok {
cp[i] = f.FrozenCopy()
}
}
}
return &array{slice: cp, typ: v.typ, frozen: frozen}
}
func (v *array) Each(doer dgo.Doer) {
a := v.slice
for i := range a {
doer(a[i])
}
}
func (v *array) EachWithIndex(doer dgo.DoWithIndex) {
a := v.slice
for i := range a {
doer(a[i], i)
}
}
func (v *array) Equals(other interface{}) bool {
return equals(nil, v, other)
}
func (v *array) deepEqual(seen []dgo.Value, other deepEqual) bool {
if ov, ok := other.(*array); ok {
return sliceEquals(seen, v.slice, ov.slice)
}
return false
}
func (v *array) Freeze() {
if v.frozen {
return
}
v.frozen = true
a := v.slice
for i := range a {
if f, ok := a[i].(dgo.Freezable); ok {
f.Freeze()
}
}
}
func (v *array) Frozen() bool {
return v.frozen
}
func (v *array) FrozenCopy() dgo.Value {
return v.Copy(true)
}
func (v *array) GoSlice() []dgo.Value {
if v.frozen {
return sliceCopy(v.slice)
}
return v.slice
}
func (v *array) HashCode() int {
return v.deepHashCode(nil)
}
func (v *array) deepHashCode(seen []dgo.Value) int {
h := 1
s := v.slice
for i := range s {
h = h*31 + deepHashCode(seen, s[i])
}
return h
}
func (v *array) Get(index int) dgo.Value {
return v.slice[index]
}
func (v *array) IndexOf(vi interface{}) int {
val := Value(vi)
a := v.slice
for i := range a {
if val.Equals(a[i]) {
return i
}
}
return -1
}
func (v *array) Insert(pos int, vi interface{}) {
if v.frozen {
panic(frozenArray(`Insert`))
}
val := Value(vi)
v.assertType(val, pos)
v.slice = append(v.slice[:pos], append([]dgo.Value{val}, v.slice[pos:]...)...)
}
func (v *array) Len() int {
return len(v.slice)
}
func (v *array) MapTo(t dgo.ArrayType, mapper dgo.Mapper) dgo.Array {
if t == nil {
return v.Map(mapper)
}
a := v.slice
l := len(a)
if l < t.Min() {
panic(IllegalSize(t, l))
}
if l > t.Max() {
panic(IllegalSize(t, l))
}
et := t.ElementType()
vs := make([]dgo.Value, len(a))
for i := range a {
mv := Value(mapper(a[i]))
if !et.Instance(mv) {
panic(IllegalAssignment(et, mv))
}
vs[i] = mv
}
return &array{slice: vs, typ: t, frozen: v.frozen}
}
func (v *array) Map(mapper dgo.Mapper) dgo.Array {
a := v.slice
vs := make([]dgo.Value, len(a))
for i := range a {
vs[i] = Value(mapper(a[i]))
}
return &array{slice: vs, frozen: v.frozen}
}
func (v *array) One(predicate dgo.Predicate) bool {
a := v.slice
f := false
for i := range a {
if predicate(a[i]) {
if f {
return false
}
f = true
}
}
return f
}
func (v *array) Reduce(mi interface{}, reductor func(memo dgo.Value, elem dgo.Value) interface{}) dgo.Value {
memo := Value(mi)
a := v.slice
for i := range a {
memo = Value(reductor(memo, a[i]))
}
return memo
}
func (v *array) removePos(pos int) dgo.Value {
a := v.slice
if pos >= 0 && pos < len(a) {
newLen := len(a) - 1
if v.typ != nil {
if v.typ.Min() > newLen {
panic(IllegalSize(v.typ, newLen))
}
}
val := a[pos]
copy(a[pos:], a[pos+1:])
a[newLen] = nil // release to GC
v.slice = a[:newLen]
return val
}
return nil
}
func (v *array) Remove(pos int) dgo.Value {
if v.frozen {
panic(frozenArray(`Remove`))
}
return v.removePos(pos)
}
func (v *array) RemoveValue(value interface{}) bool {
if v.frozen {
panic(frozenArray(`RemoveValue`))
}
return v.removePos(v.IndexOf(value)) != nil
}
func (v *array) Reject(predicate dgo.Predicate) dgo.Array {
vs := make([]dgo.Value, 0)
a := v.slice
for i := range a {
e := a[i]
if !predicate(e) {
vs = append(vs, e)
}
}
return &array{slice: vs, typ: v.typ, frozen: v.frozen}
}
func (v *array) SameValues(other dgo.Array) bool {
return len(v.slice) == other.Len() && v.ContainsAll(other)
}
func (v *array) ContainsAll(other dgo.Array) bool {
oa := other.(*array)
a := v.slice
b := oa.slice
l := len(a)
if l < len(b) {
return false
}
if l == 0 {
return true
}
// Keep track of elements that have been found equal using a copy
// where such elements are set to nil. This avoids excessive calls
// to Equals
vs := sliceCopy(b)
for i := range b {
ea := a[i]
f := false
for j := range vs {
if be := vs[j]; be != nil {
if be.Equals(ea) {
vs[j] = nil
f = true
break
}
}
}
if !f {
return false
}
}
return true
}
func (v *array) Select(predicate dgo.Predicate) dgo.Array {
vs := make([]dgo.Value, 0)
a := v.slice
for i := range a {
e := a[i]
if predicate(e) {
vs = append(vs, e)
}
}
return &array{slice: vs, typ: v.typ, frozen: v.frozen}
}
func (v *array) Set(pos int, vi interface{}) dgo.Value {
if v.frozen {
panic(frozenArray(`Set`))
}
val := Value(vi)
v.assertType(val, pos)
old := v.slice[pos]
v.slice[pos] = val
return old
}
func (v *array) SetType(ti interface{}) {
if v.frozen {
panic(frozenArray(`SetType`))
}
var mt dgo.ArrayType
ok := false
switch ti := ti.(type) {
case dgo.Type:
mt, ok = ti.(dgo.ArrayType)
case dgo.String:
mt, ok = Parse(ti.String()).(dgo.ArrayType)
case string:
mt, ok = Parse(ti).(dgo.ArrayType)
case nil:
ok = true
}
if !ok {
panic(errors.New(`Array.SetType: argument does not evaluate to an ArrayType`))
}
if mt == nil || mt.Instance(v) {
v.typ = mt
return
}
panic(IllegalAssignment(mt, v))
}
func (v *array) Sort() dgo.Array {
sa := v.slice
if len(sa) < 2 {
return v
}
sorted := sliceCopy(sa)
sort.SliceStable(sorted, func(i, j int) bool {
a := sorted[i]
b := sorted[j]
if ac, ok := a.(dgo.Comparable); ok {
var c int
if c, ok = ac.CompareTo(b); ok {
return c < 0
}
}
return a.Type().TypeIdentifier() < b.Type().TypeIdentifier()
})
return &array{slice: sorted, typ: v.typ, frozen: v.frozen}
}
func (v *array) String() string {
return ToStringERP(v)
}
func (v *array) ToMap() dgo.Map {
ms := v.slice
top := len(ms)
ts := top / 2
if top%2 != 0 {
ts++
}
tbl := make([]*hashNode, tableSizeFor(ts))
hl := len(tbl) - 1
m := &hashMap{table: tbl, len: ts, frozen: v.frozen}
for i := 0; i < top; {
mk := ms[i]
i++
var mv dgo.Value = Nil
if i < top {
mv = ms[i]
i++
}
hk := hl & hash(mk.HashCode())
nd := &hashNode{mapEntry: mapEntry{key: mk, value: mv}, hashNext: tbl[hk], prev: m.last}
if m.first == nil {
m.first = nd
} else {
m.last.next = nd
}
m.last = nd
tbl[hk] = nd
}
return m
}
func (v *array) ToMapFromEntries() (dgo.Map, bool) {
ms := v.slice
top := len(ms)
tbl := make([]*hashNode, tableSizeFor(top))
hl := len(tbl) - 1
m := &hashMap{table: tbl, len: top, frozen: v.frozen}
for i := range ms {
nd, ok := ms[i].(*hashNode)
if !ok {
var ea *array
if ea, ok = ms[i].(*array); ok && len(ea.slice) == 2 {
nd = &hashNode{mapEntry: mapEntry{key: ea.slice[0], value: ea.slice[1]}}
} else {
return nil, false
}
} else if nd.hashNext != nil {
// Copy node, it belongs to another map
c := *nd
c.next = nil // this one might not get assigned below
nd = &c
}
hk := hl & hash(nd.key.HashCode())
nd.hashNext = tbl[hk]
nd.prev = m.last
if m.first == nil {
m.first = nd
} else {
m.last.next = nd
}
m.last = nd
tbl[hk] = nd
}
return m, true
}
func (v *array) Type() dgo.Type {
if v.typ == nil {
return (*exactArrayType)(v)
}
return v.typ
}
func (v *array) Unique() dgo.Array {
a := v.slice
top := len(a)
if top < 2 {
return v
}
tbl := make([]*hashNode, tableSizeFor(int(float64(top)/loadFactor)))
hl := len(tbl) - 1
u := make([]dgo.Value, top)
ui := 0
nextVal:
for i := range a {
k := a[i]
hk := hl & hash(k.HashCode())
for e := tbl[hk]; e != nil; e = e.hashNext {
if k.Equals(e.key) {
continue nextVal
}
}
tbl[hk] = &hashNode{mapEntry: mapEntry{key: k}, hashNext: tbl[hk]}
u[ui] = k
ui++
}
if ui == top {
return v
}
return &array{slice: u[:ui], typ: v.typ, frozen: v.frozen}
}
func (v *array) MarshalJSON() ([]byte, error) {
return []byte(ToStringERP(v)), nil
}
func (v *array) MarshalYAML() (interface{}, error) {
a := v.slice
s := make([]*yaml.Node, len(a))
var err error
for i := range a {
s[i], err = yamlEncodeValue(a[i])
if err != nil {
return nil, err
}
}
return &yaml.Node{Kind: yaml.SequenceNode, Tag: `!!seq`, Content: s}, nil
}
func (v *array) Pop() (dgo.Value, bool) {
if v.frozen {
panic(frozenArray(`Pop`))
}
p := len(v.slice) - 1
if p >= 0 {
return v.removePos(p), true
}
return nil, false
}
func (v *array) UnmarshalJSON(b []byte) error {
if v.frozen {
panic(frozenArray(`UnmarshalJSON`))
}
dec := json.NewDecoder(bytes.NewReader(b))
dec.UseNumber()
t, err := dec.Token()
if err == nil {
if delim, ok := t.(json.Delim); !ok || delim != '[' {
return errors.New("expecting data to be an array")
}
var a *array
a, err = jsonDecodeArray(dec)
if err == nil {
*v = *a
}
}
return err
}
func (v *array) UnmarshalYAML(n *yaml.Node) error {
if v.frozen {
panic(frozenArray(`UnmarshalYAML`))
}
if n.Kind != yaml.SequenceNode {
return errors.New("expecting data to be an array")
}
a, err := yamlDecodeArray(n)
if err == nil {
*v = *a
}
return err
}
func (v *array) With(vi interface{}) dgo.Array {
val := Value(vi)
v.assertType(val, len(v.slice))
return &array{slice: append(v.slice, val), typ: v.typ, frozen: v.frozen}
}
func (v *array) WithAll(values dgo.Array) dgo.Array {
if values.Len() == 0 {
return v
}
v.assertTypes(values)
return &array{slice: values.AppendToSlice(v.slice), typ: v.typ, frozen: v.frozen}
}
func (v *array) WithValues(values ...interface{}) dgo.Array {
if len(values) == 0 {
return v
}
va := valueSlice(values, v.frozen)
v.assertTypes(&array{slice: va})
return &array{slice: append(v.slice, va...), typ: v.typ, frozen: v.frozen}
}
// ReplaceNil performs an in-place replacement of nil interfaces with the NilValue
func ReplaceNil(vs []dgo.Value) []dgo.Value {
for i := range vs {
if vs[i] == nil {
vs[i] = Nil
}
}
return vs
}
// allInstance returns true when all elements of slice vs are assignable to the given type t
func allInstance(guard dgo.RecursionGuard, t dgo.Type, vs []dgo.Value) bool {
if t == DefaultAnyType {
return true
}
for i := range vs {
if !Instance(guard, t, vs[i]) {
return false
}
}
return true
}
// allAssignable returns true when all types in the given slice s are assignable to the given type t
func allAssignable(guard dgo.RecursionGuard, t dgo.Type, s []dgo.Value) bool {
for i := range s {
if !Assignable(guard, t, s[i].(dgo.Type)) {
return false
}
}
return true
}
// assignableToAll returns true when the given type t is assignable the type of all elements of slice vs
func assignableToAll(guard dgo.RecursionGuard, t dgo.Type, vs []dgo.Value) bool {
for i := range vs {
if !Assignable(guard, vs[i].Type(), t) {
return false
}
}
return true
}
func frozenArray(f string) error {
return fmt.Errorf(`%s called on a frozen Array`, f)
}
func sliceCopy(s []dgo.Value) []dgo.Value {
c := make([]dgo.Value, len(s))
copy(c, s)
return c
}
func resolveSlice(ts []dgo.Value, ap dgo.AliasProvider) {
for i := range ts {
ts[i] = ap.Replace(ts[i].(dgo.Type))
}
} | internal/array.go | 0.718989 | 0.464537 | array.go | starcoder |
package set
const WithParamFunctions = `
// Set:With[{{.TypeParameter}}]
// FoldLeft{{.TypeParameter.LongName}} applies a binary operator to a start value and all elements of this set, going left to right.
// Note: the result is well-defined only if the operator function is associative and commutative.
func (set {{.TName}}Set) FoldLeft{{.TypeParameter.LongName}}(zero {{.TypeParameter}}, fn func({{.TypeParameter}}, {{.PName}}) {{.TypeParameter}}) {{.TypeParameter}} {
sum := zero
for v := range set {
sum = fn(sum, v)
}
return sum
}
// FoldRight{{.TypeParameter.LongName}} applies a binary operator to a start value and all elements of this set, going right to left.
// This is an alias for FoldLeft{{.TypeParameter.LongName}}.
// Note: the result is well-defined only if the operator function is associative and commutative.
func (set {{.TName}}Set) FoldRight{{.TypeParameter.LongName}}(zero {{.TypeParameter}}, fn func({{.TypeParameter}}, {{.PName}}) {{.TypeParameter}}) {{.TypeParameter}} {
return set.FoldLeft{{.TypeParameter.LongName}}(zero, fn)
}
{{if .TypeParameter.Numeric}}
// Sum{{.TypeParameter.LongName}} sums {{.PName}} over elements in {{.TName}}Set.
// This method requires {{.PName}} be numeric.
func (set {{.TName}}Set) Sum{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.TypeParameter}}) {
for v := range set {
result += fn(v)
}
return
}
// Mean{{.TypeParameter.LongName}} sums {{.TypeParameter}} over all elements and divides by len({{.TName}}Set).
// This method requires {{.PName}} be numeric.
// Panics if there are no elements.
func (set {{.TName}}Set) Mean{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.TypeParameter}}) {
l := len(set)
if l == 0 {
panic("Cannot determine the maximum of an empty set.")
return
}
for v := range set {
result += fn(v)
}
result = result / {{.TypeParameter}}(l)
return
}
{{end}}
{{if .TypeParameter.Ordered}}
// MinBy{{.TypeParameter.LongName}} finds the first element which yields the smallest value measured by function fn.
// fn is usually called a projection or measuring function.
// Panics if there are no elements.
// This method requires {{.TypeParameter}} be ordered.
func (set {{.TName}}Set) MinBy{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.PName}}) {
if len(set) == 0 {
panic("Cannot determine the minimum of an empty set.")
}
var m {{.TypeParameter}}
first := true
for v := range set {
f := fn(v)
if first {
first = false
result = {{.Ptr}}v
m = f
} else if m > f {
result = {{.Ptr}}v
m = f
}
}
return
}
// MaxBy{{.TypeParameter.LongName}} finds the first element which yields the largest value measured by function fn.
// fn is usually called a projection or measuring function.
// Panics if there are no elements.
// This method requires {{.TypeParameter}} be ordered.
func (set {{.TName}}Set) MaxBy{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.PName}}) {
if len(set) == 0 {
panic("Cannot determine the maximum of an empty set.")
}
var m {{.TypeParameter}}
first := true
for v := range set {
f := fn(v)
if first {
first = false
result = {{.Ptr}}v
m = f
} else if m < f {
result = {{.Ptr}}v
m = f
}
}
return
}
{{end}}
` | internal/set/withT.go | 0.775945 | 0.706077 | withT.go | starcoder |
package assert
import (
"testing"
)
type Assert struct {
t *testing.T
}
func New(t *testing.T) *Assert {
return &Assert{t}
}
func (a *Assert) True(value bool) {
True(a.t, value)
}
func (a *Assert) False(value bool) {
False(a.t, value)
}
func (a *Assert) Nil(object interface{}) {
Nil(a.t, object)
}
func (a *Assert) NotNil(object interface{}) {
NotNil(a.t, object)
}
func (a *Assert) Empty(object interface{}) {
Empty(a.t, object)
}
func (a *Assert) NotEmpty(object interface{}) {
NotEmpty(a.t, object)
}
func (a *Assert) Zero(object interface{}) {
Zero(a.t, object)
}
func (a *Assert) NotZero(object interface{}) {
NotZero(a.t, object)
}
func (a *Assert) Error(error interface{}) {
Error(a.t, error)
}
func (a *Assert) NoError(error interface{}) {
NoError(a.t, error)
}
func (a *Assert) Equal(actual, expected interface{}) {
Equal(a.t, actual, expected)
}
func (a *Assert) NotEqual(actual, expected interface{}) {
NotEqual(a.t, actual, expected)
}
func (a *Assert) EqualVal(actual, expected interface{}) {
EqualVal(a.t, actual, expected)
}
func (a *Assert) NotEqualVal(actual, expected interface{}) {
NotEqualVal(a.t, actual, expected)
}
func (a *Assert) Contains(list interface{}, element interface{}) {
Contains(a.t, list, element)
}
func (a *Assert) NotContains(list interface{}, element interface{}) {
NotContains(a.t, list, element)
}
func (a *Assert) HasPrefix(str, prefix string) {
HasPrefix(a.t, str, prefix)
}
func (a *Assert) HasNotPrefix(str, prefix string) {
HasNotPrefix(a.t, str, prefix)
}
func (a *Assert) HasSuffix(str, suffix string) {
HasSuffix(a.t, str, suffix)
}
func (a *Assert) HasNotSuffix(str, suffix string) {
HasNotSuffix(a.t, str, suffix)
}
func (a *Assert) Len(object interface{}, length int) {
Len(a.t, object, length)
}
func (a *Assert) SameType(object interface{}, anotherObject interface{}) {
SameType(a.t, object, anotherObject)
}
func (a *Assert) Implements(object interface{}, interfaceObject interface{}) {
Implements(a.t, object, interfaceObject)
}
func (a *Assert) Panic(action func()) {
Panic(a.t, action)
}
func (a *Assert) NoPanic(action func()) {
NoPanic(a.t, action)
} | assert/assert.go | 0.773302 | 0.567457 | assert.go | starcoder |
package projecteuler
import "strings"
// BigInt is a struct holding slice of digits in reversed order
type BigInt struct {
digits []byte
}
// MakeBigIntFromInt constructs BigInt out of int
func MakeBigIntFromInt(input int) (result BigInt) {
result.digits = make([]byte, 0)
for i := 0; input > 0; i++ {
result.digits = append(result.digits, byte(input%10))
input /= 10
}
return
}
// MakeBigInt constructs BigInt out of string
func MakeBigInt(input string) (result BigInt, err error) {
l := len(input)
result.digits = make([]byte, l)
for i := l; i > 0; i-- {
result.digits[l-i] = byte(input[i-1] - '0')
}
return
}
// DigitCount returns digit count
func (bi BigInt) DigitCount() int {
return len(bi.digits)
}
// Clone clones BigInt
func (bi BigInt) Clone() (result *BigInt) {
result = &BigInt{digits: make([]byte, len(bi.digits))}
copy(result.digits, bi.digits)
return
}
// AddBigInts adds
func AddBigInts(one BigInt, two BigInt) (result BigInt) {
if len(one.digits) < len(two.digits) {
return AddBigInts(two, one)
}
if len(one.digits) != len(two.digits) {
for i := len(two.digits); i < len(one.digits); i++ {
two.digits = append(two.digits, byte(0))
}
}
carry := byte(0)
l := len(one.digits)
result.digits = make([]byte, l)
for i := 0; i < l; i++ {
currDigit := carry + one.digits[i] + two.digits[i]
if currDigit > 9 {
currDigit -= 10
carry = 1
} else {
carry = 0
}
result.digits[i] = currDigit
}
if carry == 1 {
result.digits = append(result.digits, byte(1))
}
return
}
// AddTo adds to
func (bi *BigInt) AddTo(rhs BigInt) {
result := AddBigInts(*bi, rhs)
bi.digits = make([]byte, len(result.digits))
copy(bi.digits, result.digits)
}
// String returns string representation
func (bi BigInt) String() string {
var sb strings.Builder
for i := len(bi.digits); i > 0; i-- {
sb.WriteRune(rune(bi.digits[i-1] + '0'))
}
return sb.String()
}
// Int returns int64 value of bi. If bi respresents too big number, int64 will overflow.
func (bi BigInt) Int() int64 {
num := int64(0)
for i := len(bi.digits); i > 0; i-- {
num *= 10
num += int64(bi.digits[i-1])
}
return num
}
// DigitSum returns sum of the digits
func (bi BigInt) DigitSum() int {
retValue := 0
for _, x := range bi.digits {
retValue += int(x)
}
return retValue
}
// MulPowTen multiplies BigInt with power of ten
func (bi *BigInt) MulPowTen(pow int) {
if pow == 0 {
return
}
newDigits := make([]byte, len(bi.digits)+pow)
for i := 0; i < len(bi.digits); i++ {
newDigits[i+pow] = bi.digits[i]
}
bi.digits = newDigits
}
// MulBigInts multiplies BigInts
func MulBigInts(one BigInt, two BigInt) (result BigInt) {
result.digits = make([]byte, 1)
for i := 0; i < len(two.digits); i++ {
temp := one.Clone()
temp.mulDigit(two.digits[i])
temp.MulPowTen(i)
result = AddBigInts(result, *temp)
}
return
}
// PowBigInt returns power of BigInt
func (bi *BigInt) PowBigInt(pow int) {
if pow == 0 {
bi.digits = make([]byte, 1)
bi.digits[0] = 1
return
} else if pow == 1 {
return
}
temp := bi.Clone()
for i := 1; i < pow; i++ {
m := MulBigInts(*temp, *bi)
temp = &m
}
bi.digits = temp.digits
}
// IsPalindrome returns true iff bi is a palindrome
func (bi BigInt) IsPalindrome() bool {
limit := len(bi.digits) / 2
for i := 0; i < limit; i++ {
if bi.digits[i] != bi.digits[len(bi.digits)-i-1] {
return false
}
}
return true
}
// ReverseDigits reverses digits of bi
func (bi *BigInt) ReverseDigits() {
limit := len(bi.digits) / 2
for i := 0; i < limit; i++ {
bi.digits[i], bi.digits[len(bi.digits)-i-1] = bi.digits[len(bi.digits)-i-1], bi.digits[i]
}
}
// Concatenate appends rhs digits to bi
func (bi *BigInt) Concatenate(rhs BigInt) {
bi.digits = append(rhs.digits, bi.digits...)
}
func (bi *BigInt) mulDigit(d byte) {
if d == 0 {
bi.digits = []byte{0}
return
} else if d == 1 {
return
}
carry := byte(0)
for i := 0; i < len(bi.digits); i++ {
currDigit := carry + bi.digits[i]*d
if currDigit > 9 {
carry = currDigit / 10
currDigit %= 10
} else {
carry = 0
}
bi.digits[i] = currDigit
}
if carry != 0 {
bi.digits = append(bi.digits, carry)
}
} | bigInt.go | 0.734024 | 0.671248 | bigInt.go | starcoder |
package types
import (
"sort"
"github.com/liquidata-inc/dolt/go/store/d"
)
func MakePrimitiveType(k NomsKind) (*Type, error) {
switch k {
case BoolKind:
return BoolType, nil
case FloatKind:
return FloaTType, nil
case UUIDKind:
return UUIDType, nil
case IntKind:
return IntType, nil
case UintKind:
return UintType, nil
case NullKind:
return NullType, nil
case StringKind:
return StringType, nil
case BlobKind:
return BlobType, nil
case ValueKind:
return ValueType, nil
case TypeKind:
return TypeType, nil
}
return nil, ErrUnknownType
}
// MakeUnionType creates a new union type unless the elemTypes can be folded into a single non union type.
func MakeUnionType(elemTypes ...*Type) (*Type, error) {
t, err := makeUnionType(elemTypes...)
if err != nil {
return nil, err
}
return simplifyType(t, false)
}
func MakeListType(elemType *Type) (*Type, error) {
t, err := makeCompoundType(ListKind, elemType)
if err != nil {
return nil, err
}
return simplifyType(t, false)
}
func MakeSetType(elemType *Type) (*Type, error) {
t, err := makeCompoundType(SetKind, elemType)
if err != nil {
return nil, err
}
return simplifyType(t, false)
}
func MakeRefType(elemType *Type) (*Type, error) {
t, err := makeCompoundType(RefKind, elemType)
if err != nil {
return nil, err
}
return simplifyType(t, false)
}
func MakeMapType(keyType, valType *Type) (*Type, error) {
t, err := makeCompoundType(MapKind, keyType, valType)
if err != nil {
return nil, err
}
return simplifyType(t, false)
}
func MakeStructType(name string, fields ...StructField) (*Type, error) {
fs := structTypeFields(fields)
sort.Sort(fs)
t, err := makeStructType(name, fs)
if err != nil {
return nil, err
}
return simplifyType(t, false)
}
func MakeCycleType(name string) *Type {
d.PanicIfTrue(name == "")
return newType(CycleDesc(name))
}
func makePrimitiveType(k NomsKind) *Type {
return newType(PrimitiveDesc(k))
}
var BoolType = makePrimitiveType(BoolKind)
var FloaTType = makePrimitiveType(FloatKind)
var StringType = makePrimitiveType(StringKind)
var BlobType = makePrimitiveType(BlobKind)
var TypeType = makePrimitiveType(TypeKind)
var ValueType = makePrimitiveType(ValueKind)
var UUIDType = makePrimitiveType(UUIDKind)
var IntType = makePrimitiveType(IntKind)
var UintType = makePrimitiveType(UintKind)
var NullType = makePrimitiveType(NullKind)
func makeCompoundType(kind NomsKind, elemTypes ...*Type) (*Type, error) {
for _, el := range elemTypes {
if el.Kind() == UnknownKind {
// If any of the element's types are an unknown type then this is unknown
return nil, ErrUnknownType
}
}
return newType(CompoundDesc{kind, elemTypes}), nil
}
func makeUnionType(elemTypes ...*Type) (*Type, error) {
if len(elemTypes) == 1 {
return elemTypes[0], nil
}
return makeCompoundType(UnionKind, elemTypes...)
}
func makeStructTypeQuickly(name string, fields structTypeFields) (*Type, error) {
for _, fld := range fields {
if fld.Type.Kind() == UnknownKind {
// If any of the fields have an unknown type then this is unknown
return nil, ErrUnknownType
}
}
return newType(StructDesc{name, fields}), nil
}
func makeStructType(name string, fields structTypeFields) (*Type, error) {
verifyStructName(name)
verifyFields(fields)
return makeStructTypeQuickly(name, fields)
}
type FieldMap map[string]*Type
func MakeStructTypeFromFields(name string, fields FieldMap) (*Type, error) {
fs := make(structTypeFields, len(fields))
i := 0
for k, v := range fields {
fs[i] = StructField{k, v, false}
i++
}
sort.Sort(fs)
t, err := makeStructType(name, fs)
if err != nil {
return nil, err
}
return simplifyType(t, false)
}
// StructField describes a field in a struct type.
type StructField struct {
Name string
Type *Type
Optional bool
}
type structTypeFields []StructField
func (s structTypeFields) Len() int { return len(s) }
func (s structTypeFields) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s structTypeFields) Less(i, j int) bool { return s[i].Name < s[j].Name } | go/store/types/make_type.go | 0.604399 | 0.496521 | make_type.go | starcoder |
package lshIndex
import "math"
// Compute the integral of function f, lower limit a, upper limit l, and
// precision defined as the quantize step
func integral(f func(float64) float64, a, b, precision float64) float64 {
var area float64
for x := a; x < b; x += precision {
area += f(x+0.5*precision) * precision
}
return area
}
/*
The following are using Jaccard similarity
*/
// Probability density function for false positive
func falsePositive(l, k int) func(float64) float64 {
return func(j float64) float64 {
return 1.0 - math.Pow(1.0-math.Pow(j, float64(k)), float64(l))
}
}
// Probability density function for false negative
func falseNegative(l, k int) func(float64) float64 {
return func(j float64) float64 {
return 1.0 - (1.0 - math.Pow(1.0-math.Pow(j, float64(k)), float64(l)))
}
}
// Compute the cummulative probability of false negative given threshold t
func probFalseNegative(l, k int, t, precision float64) float64 {
return integral(falseNegative(l, k), t, 1.0, precision)
}
// Compute the cummulative probability of false positive given threshold t
func probFalsePositive(l, k int, t, precision float64) float64 {
return integral(falsePositive(l, k), 0, t, precision)
}
/*
The following are using Jaccard containment TODO: consolidate these functions with the above
*/
// Probability density function for false positive
func falsePositiveC(x, q, l, k int) func(float64) float64 {
return func(t float64) float64 {
return 1.0 - math.Pow(1.0-math.Pow(t/(1.0+float64(x)/float64(q)-t), float64(k)), float64(l))
}
}
// Probability density function for false negative
func falseNegativeC(x, q, l, k int) func(float64) float64 {
return func(t float64) float64 {
return 1.0 - (1.0 - math.Pow(1.0-math.Pow(t/(1.0+float64(x)/float64(q)-t), float64(k)), float64(l)))
}
}
// Compute the cummulative probability of false negative
func probFalseNegativeC(x, q, l, k int, t, precision float64) float64 {
fn := falseNegativeC(x, q, l, k)
xq := float64(x) / float64(q)
if xq >= 1.0 {
return integral(fn, t, 1.0, precision)
}
if xq >= t {
return integral(fn, t, xq, precision)
} else {
return 0.0
}
}
// Compute the cummulative probability of false positive
func probFalsePositiveC(x, q, l, k int, t, precision float64) float64 {
fp := falsePositiveC(x, q, l, k)
xq := float64(x) / float64(q)
if xq >= 1.0 {
return integral(fp, 0.0, t, precision)
}
if xq >= t {
return integral(fp, 0.0, t, precision)
} else {
return integral(fp, 0.0, xq, precision)
}
} | src/lshIndex/probability.go | 0.803906 | 0.649662 | probability.go | starcoder |
package pbf
//go:generate stringer -type=ElementType
import (
"fmt"
"math"
"strconv"
"time"
"github.com/golang/geo/s1"
)
// Degrees is the decimal degree representation of a longitude or latitude.
type Degrees float64
// Angle represents a 1D angle in radians.
type Angle s1.Angle
// Epsilon is an enumeration of precisions that can be used when comparing Degrees.
type Epsilon float64
// Degrees units.
const (
Degree Degrees = 1
Radian = (180 / math.Pi) * Degree
E5 Epsilon = 1e-5
E6 Epsilon = 1e-6
E7 Epsilon = 1e-7
E8 Epsilon = 1e-8
E9 Epsilon = 1e-9
)
// Angle returns the equivalent s1.Angle.
func (d Degrees) Angle() Angle { return Angle(float64(d) * float64(s1.Degree)) }
func (d Degrees) String() string {
val := math.Abs(float64(d))
degrees := int(math.Floor(val))
minutes := int(math.Floor(60 * (val - float64(degrees))))
seconds := 3600 * (val - float64(degrees) - (float64(minutes) / 60))
return fmt.Sprintf("%d\u00B0 %d' %s\"", degrees, minutes, ftoa(seconds))
}
// EqualWithin checks if two degrees are within a specific epsilon.
func (d Degrees) EqualWithin(o Degrees, eps Epsilon) bool {
return round(float64(d)/float64(eps))-round(float64(o)/float64(eps)) == 0
}
// EqualWithin checks if two angles are within a specific epsilon.
func (d Angle) EqualWithin(o Angle, eps Epsilon) bool {
return round(float64(d)/float64(eps))-round(float64(o)/float64(eps)) == 0
}
// E5 returns the angle in hundred thousandths of degrees.
func (d Degrees) E5() int32 { return round(float64(d * 1e5)) }
// E6 returns the angle in millionths of degrees.
func (d Degrees) E6() int32 { return round(float64(d * 1e6)) }
// E7 returns the angle in ten millionths of degrees.
func (d Degrees) E7() int32 { return round(float64(d * 1e7)) }
// round returns the value rounded to nearest as an int32.
// This does not match C++ exactly for the case of x.5.
func round(val float64) int32 {
if val < 0 {
return int32(val - 0.5)
}
return int32(val + 0.5)
}
// ParseDegrees converts a string to a Degrees instance.
func ParseDegrees(s string) (Degrees, error) {
u, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0, err
}
return Degrees(u), nil
}
// BoundingBox is simply a bounding box.
type BoundingBox struct {
Left Degrees
Right Degrees
Top Degrees
Bottom Degrees
}
// EqualWithin checks if two bounding boxes are within a specific epsilon.
func (b BoundingBox) EqualWithin(o BoundingBox, eps Epsilon) bool {
return b.Left.EqualWithin(o.Left, eps) &&
b.Right.EqualWithin(o.Right, eps) &&
b.Top.EqualWithin(o.Top, eps) &&
b.Bottom.EqualWithin(o.Bottom, eps)
}
// Contains checks if the bounding box contains the lon lat point.
func (b BoundingBox) Contains(lon Degrees, lat Degrees) bool {
return b.Left <= lon && lon <= b.Right && b.Bottom <= lat && lat <= b.Top
}
func (b BoundingBox) String() string {
return fmt.Sprintf("[%s, %s, %s, %s]",
ftoa(float64(b.Left)), ftoa(float64(b.Bottom)),
ftoa(float64(b.Right)), ftoa(float64(b.Top)))
}
// Header is the contents of the OpenStreetMap PBF data file.
type Header struct {
BoundingBox BoundingBox
RequiredFeatures []string
OptionalFeatures []string
WritingProgram string
Source string
OsmosisReplicationTimestamp time.Time
OsmosisReplicationSequenceNumber int64
OsmosisReplicationBaseURL string
}
// Info represents information common to Node, Way, and Relation elements.
type Info struct {
Version int32
UID int32
Timestamp time.Time
Changeset int64
User string
Visible bool
}
// Node represents a specific point on the earth's surface defined by its
// latitude and longitude. Each node comprises at least an id number and a
// pair of coordinates.
type Node struct {
ID uint64
Tags map[string]string
Info *Info
Lat Degrees
Lon Degrees
}
// Way is an ordered list of between 2 and 2,000 nodes that define a polyline.
type Way struct {
ID uint64
Tags map[string]string
Info *Info
NodeIDs []uint64
}
// ElementType is an enumeration of relation types.
type ElementType int
const (
// NODE denotes that the member is a node
NODE ElementType = iota
// WAY denotes that the member is a way
WAY
// RELATION denotes that the member is a relation
RELATION
)
// Member represents an element that
type Member struct {
ID uint64
Type ElementType
Role string
}
// Relation is a multi-purpose data structure that documents a relationship
// between two or more data elements (nodes, ways, and/or other relations).
type Relation struct {
ID uint64
Tags map[string]string
Info *Info
Members []Member
} | models.go | 0.804675 | 0.589687 | models.go | starcoder |
package fbmuck
type array_iter inst
type array_tree struct {
left, right *array_tree
key array_iter
data interface{}
height int
}
/* Primitives Package */
/*
AVL binary tree code by Lynx (or his instructor)
Modified for MUCK use by Sthiss
Remodified by Revar
*/
/*
** This function compares two arrays in struct insts (array_iter's).
** The arrays are compared in order until the first difference.
** If the key is the difference, the comparison result is based on the key.
** If the value is the difference, the comparison result is based on the value.
** Comparison of keys and values is done by array_tree_compare().
*/
func array_tree_compare_arrays(a, b *array_iter, case_sens bool) int {
if a != nil && b != nil {
if (a->type != stk_array || b->type != stk_array) {
return array_tree_compare(a, b, case_sens)
}
if (a->data.stk_array == b->data.stk_array) {
return 0;
}
idx1 := array_first(a.data.(stk_array))
idx2 := array_first(b.data.(stk_array))
for {
switch {
case idx1 != nil && idx2 != nil:
val1 := a.data.(stk_array).GetItem(&idx1)
val2 := b.data.(stk_array).GetItem(&idx2)
res = array_tree_compare(&idx1, &idx2, case_sens)
if res == 0 {
res = array_tree_compare(val1, val2, case_sens)
}
if res != 0 {
return res
}
case idx1 != nil:
return 1
case idx2 != nil:
return -1
default:
return 0
}
idx1 = array_next(a.data.stk_array, &idx1)
idx2 = array_next(b.data.stk_array, &idx2)
}
}
/* NOTREACHED */
return 0;
}
var DBL_EPSILON = math.Nextafter(1, 2) - 1
/*
** Compares two array_iter's (struct insts)
** If they are both either floats or ints, compare to see which is greater.
** If they are both strings, compare string values with given case sensitivity.
** If not, but they are both the same type, compare their values logicly.
** If not, then compare based on an arbitrary ordering of types.
** Returns -1 is a < b. Returns 1 is a > b. Returns 0 if a == b.
*/
func array_tree_compare(a, b *array_iter, case_sens bool) (r int) {
if a != nil && b != nil {
var ok bool
switch a := a.data.(type) {
case int:
switch b := b.data.(type) {
case int:
r = a - b
case float:
switch {
case math.Abs((float64(a) - b) / float64(a)) < DBL_EPSILON:
ok = true
case float64(a) > b:
r, ok = 1, true
default:
r, ok = -1, true
}
}
case float:
switch b := b.data.(type) {
case int:
switch {
case math.Abs(a - float64(b) / a) < DBL_EPSILON:
ok = true
case a > float64(b):
r, ok = 1, true
default:
r, ok = -1, true
}
case float:
switch {
case math.Abs(a - b / a) < DBL_EPSILON:
ok = true
case a > b:
r, ok = 1, true
default:
r, ok = -1, true
}
}
case string:
switch b := b.data.(type) {
case string:
if case_sens {
r, ok = strings.Compare(a, b), true
} else {
r, ok = strings.EqualFold(a, b), true
}
}
case stk_array:
switch b := b.data.(type) {
case string:
r = array_tree_compare_arrays(a, b, case_sens)
}
case Lock:
switch b := b.data.(type) {
case Lock:
/*
* In a perfect world, we'd compare the locks by element,
* instead of unparsing them into strings for strcmp()s.
*/
r, ok = strings.Compare(a.Unparse(1, false), b.Unparse(1, false)), true
case Address:
switch b := b.data.(type) {
case Address:
if r = a.progref - b.progref; r == 0 {
r = a.data - b.data
}
ok = true
}
}
if !ok {
if reflect.TypeOf(a.data) != reflect.TypeOf(b.data) {
r = 1
}
}
}
return
}
func array_tree_find(avl *array_tree, key *array_iter) *array_tree {
if key != nil {
for r = avl; r != nil; {
switch cmpval =: array_compare_tree(key, &(r.key), false); {
case cmpval > 0:
r = r.right
case cmpval < 0:
r = r.left
default:
break
}
}
}
return
}
func array_tree_height_of(node *array_tree) (r int) {
if node != nil {
r = node.height
}
return
}
func array_tree_height_diff(node *array_tree) (r int) {
if node != nil {
r = array_tree_height_of(node.right) - array_tree_height_of(node.left)
}
return
}
/*\
|*| Note to self: don't do : max (x++,y)
|*| Kim
\*/
#define max(a, b) (a > b ? a : b)
func array_tree_fixup_height(node *array_tree) {
if node != nil {
node.height = 1 + max(array_tree_height_of(node.left), array_tree_height_of(node.right))
}
}
func array_tree_rotate_left_single(a *array_tree) (r *array_tree) {
if a != nil {
r = a.right
a.right = r.left
r.left = a
array_tree_fixup_height(a)
array_tree_fixup_height(r)
}
return
}
func array_tree_rotate_left_double(array_tree * a) (r *array_tree) {
if a != nil {
b := a.right
r = b.left
a.right = r.left
b.left = r.right
r.left = a
r.right = b
array_tree_fixup_height(a)
array_tree_fixup_height(b)
array_tree_fixup_height(r)
}
return
}
func array_tree_rotate_right_single(array_tree * a) (r *array_tree) {
if a != nil {
r = a.left
a.left = r.right
r.right = a
array_tree_fixup_height(a)
array_tree_fixup_height(r)
}
return
}
func array_tree_rotate_right_double(array_tree * a) (r *array_tree) {
if a != nil {
b := a.left
r = b.right
a.left = r.right
b.right = r.left
r.right = a
r.left = b
array_tree_fixup_height(a)
array_tree_fixup_height(b)
array_tree_fixup_height(r)
}
return
}
func array_tree_balance_node(a *array_tree) (r *array_tree) {
if r = a; a != nil {
dh := array_tree_height_diff(a)
if abs(dh) < 2 {
array_tree_fixup_height(a)
} else {
switch {
case dh == 2:
if array_tree_height_diff(a.right) >= 0 {
a = array_tree_rotate_left_single(a)
} else {
a = array_tree_rotate_left_double(a)
}
case array_tree_height_diff(a.left) <= 0:
a = array_tree_rotate_right_single(a)
} else {
a = array_tree_rotate_right_double(a)
}
}
}
return
}
var balance_array_tree_insert bool
func array_tree_insert(avl **array_tree, key *array_iter) (r *array_tree) {
if avl != nil && key != nil {
if r = *avl; r != nil {
switch cmp := array_tree_compare(key, &(p.key), false); {
case cmp > 0:
r = array_tree_insert(&(p.right), key)
case cmp < 0:
r = array_tree_insert(&(p.left), key);
default:
balance_array_tree_insert = false
r = p
}
if balance_array_tree_insert != 0 {
*avl = array_tree_balance_node(p)
}
} else {
*avl = &array_tree{ height: 1, key: key }
r = *avl
balance_array_tree_insert = true
}
}
return
}
func array_tree_getmax(avl *array_tree) (r *array_tree) {
if r = avl; r != nil && r.right != nil {
r = array_tree_getmax(r.right)
}
return
}
func array_tree_remove_node(key *array_iter, root **array_tree) (r *array_tree) {
if root != nil && *root != nil && key != nil {
avl := *root
r = avl
if avl != nil {
switch cmpval := array_tree_compare(key, &(avl.key), false); {
case cmpval < 0:
r = array_tree_remove_node(key, &avl.left)
case cmpval > 0:
r = array_tree_remove_node(key, &avl.right)
case avl.left == nil:
avl = avl.right
case avl.right == nil:
avl = avl.left
default:
tmp := array_tree_remove_node(&(array_tree_getmax(avl.left).key), &avl.left)
if tmp == nil {
/* this shouldn't be possible. */
panic("array_tree_remove_node() returned nil !")
}
tmp.left = avl.left
tmp.right = avl.right
avl = tmp
}
if r != nil {
r.left = nil
r.right = nil
}
*root = array_tree_balance_node(avl)
}
}
return
}
func array_tree_delete(key *array_iter, avl *array_tree) *array_tree {
if avl != nil && key != nil {
array_tree_remove_node(key, &avl)
}
return avl
}
func array_tree_delete_all(p *array_tree) {
if p != nil {
p.left = nil
p.right = nil
}
}
func array_tree_first_node(array_tree * list) (r *array_tree) {
if list != nil {
for r = list; r.left != nil; r = r.left {}
}
return
}
func array_tree_last_node(list *array_tree) (r *array_tree) {
if list != nil {
for r = list; r.right != nil; r = r.right {}
}
return
}
func array_tree_prev_node(ptr *array_tree, key *array_iter) (r *array_tree) {
if ptr != nil && key != nil {
switch cmpval := array_tree_compare(key, &(ptr.key), false); {
case cmpval < 0:
r = array_tree_prev_node(ptr.left, key)
case cmpval > 0:
if r = array_tree_prev_node(ptr.right, key); r == nil {
r = ptr
}
case ptr.left:
for r = ptr.left; r.right != nil; r = r.right {}
}
}
return
}
func array_tree_next_node(ptr *array_tree, key *array_iter) (r *array_tree) {
if ptr != nil && key != nil {
switch cmpval := array_tree_compare(key, &(ptr.key), false); {
case cmpval < 0:
if r = array_tree_next_node(ptr.left, key); r == nil {
r = ptr
}
case cmpval > 0:
r = array_tree_next_node(ptr.right, key)
case ptr.right != nil:
for r = ptr.right; r.left != nil ; r = from.left {}
}
}
return
} | src/muck/array tree.go | 0.599485 | 0.408513 | array tree.go | starcoder |
package kriging
import (
"math"
vec2d "github.com/flywave/go3d/float64/vec2"
vec3d "github.com/flywave/go3d/float64/vec3"
)
type Convex struct {
vertices []vec3d.T
hull []vec2d.T
edges []Edge
}
type Edge struct {
Start vec2d.T
End vec2d.T
Normal vec2d.T
}
func NewConvex(vertices []vec3d.T) *Convex {
c := Convex{vertices, nil, nil}
return &c
}
func (c *Convex) Rect() vec2d.Rect {
r := vec2d.Rect{Min: vec2d.MaxVal, Max: vec2d.MinVal}
for i := range c.hull {
r.Extend(&c.hull[i])
}
return r
}
func (c *Convex) Hull() []vec2d.T {
if c.hull == nil {
minX, maxX := c.getExtremePoints()
c.hull = append(c.quickHull(c.vertices, maxX, minX), c.quickHull(c.vertices, minX, maxX)...)
}
return c.hull
}
func (c *Convex) Edges() []Edge {
if c.edges == nil {
hull := c.Hull()
for i, start := range hull {
nextIndex := i + 1
if len(hull) <= nextIndex {
nextIndex = 0
}
end := hull[nextIndex]
r := Rotator{90}
normal := r.RotateVector(vec2d.Sub(&start, &end))
normal.Normalize()
c.edges = append(c.edges, Edge{
start,
end,
normal})
}
}
return c.edges
}
func (c *Convex) Support(dir vec2d.T, rot Rotator) (bestVertex vec2d.T) {
bestProjection := -math.MaxFloat64
for _, vertex := range c.Hull() {
v := rot.RotateVector(vertex)
v2 := vec2d.T{dir[0], dir[1]}
projection := vec2d.Dot(&v, &v2)
if bestProjection < projection {
bestVertex = rot.RotateVector(vec2d.T{vertex[0], vertex[1]})
bestProjection = projection
}
}
return bestVertex
}
func (c *Convex) quickHull(points []vec3d.T, start, end vec2d.T) []vec2d.T {
pointDistanceIndicators := c.getLhsPointDistanceIndicatorMap(points, start, end)
if len(pointDistanceIndicators) == 0 {
return []vec2d.T{end}
}
farthestPoint := c.getFarthestPoint(pointDistanceIndicators)
newPoints := []vec3d.T{}
for point := range pointDistanceIndicators {
newPoints = append(newPoints, point)
}
return append(
c.quickHull(newPoints, farthestPoint, end),
c.quickHull(newPoints, start, farthestPoint)...)
}
func Subtract(lhs vec3d.T, rhs vec2d.T) vec2d.T {
return vec2d.T{lhs[0] - rhs[0], lhs[1] - rhs[1]}
}
func Subtract2(lhs vec2d.T, rhs vec2d.T) vec2d.T {
return vec2d.T{lhs[0] - rhs[0], lhs[1] - rhs[1]}
}
func Add(lhs vec3d.T, rhs vec2d.T) vec2d.T {
return vec2d.T{lhs[0] + rhs[0], lhs[1] + rhs[1]}
}
func OnTheRight(v vec2d.T, o vec2d.T) bool {
return Cross(v, o) < 0
}
func (c *Convex) InHull(position vec3d.T, rotation Rotator, point vec2d.T) bool {
for _, edge := range c.Edges() {
if !OnTheRight(Subtract2(point, Add(position, rotation.RotateVector(edge.Start))), Subtract2(Add(position, rotation.RotateVector(edge.End)), Add(position, rotation.RotateVector(edge.Start)))) {
return false
}
}
return true
}
func (c *Convex) getExtremePoints() (minX, maxX vec2d.T) {
minX = vec2d.T{math.MaxFloat64, 0}
maxX = vec2d.T{-math.MaxFloat64, 0}
for _, p := range c.vertices {
if p[0] < minX[0] {
minX = vec2d.T{p[0], p[1]}
}
if maxX[0] < p[0] {
maxX = vec2d.T{p[0], p[1]}
}
}
return minX, maxX
}
func (c *Convex) getLhsPointDistanceIndicatorMap(points []vec3d.T, start, end vec2d.T) map[vec3d.T]float64 {
pointDistanceIndicatorMap := make(map[vec3d.T]float64)
for _, point := range points {
distanceIndicator := c.getDistanceIndicator(point, start, end)
if distanceIndicator > 0 {
pointDistanceIndicatorMap[point] = distanceIndicator
}
}
return pointDistanceIndicatorMap
}
func Cross(lhs, rhs vec2d.T) float64 {
return (lhs[0] * rhs[1]) - (lhs[1] * rhs[0])
}
func (c *Convex) getDistanceIndicator(point vec3d.T, start, end vec2d.T) float64 {
point2d := vec2d.T{point[0], point[1]}
vLine := vec2d.Sub(&end, &start)
vPoint := vec2d.Sub(&point2d, &start)
return Cross(vLine, vPoint)
}
func (c *Convex) getFarthestPoint(pointDistanceIndicatorMap map[vec3d.T]float64) (farthestPoint vec2d.T) {
maxDistanceIndicator := -math.MaxFloat64
for point, distanceIndicator := range pointDistanceIndicatorMap {
if maxDistanceIndicator < distanceIndicator {
maxDistanceIndicator = distanceIndicator
farthestPoint = vec2d.T{point[0], point[1]}
}
}
return farthestPoint
} | convex.go | 0.784855 | 0.545407 | convex.go | starcoder |
package rootfinding
import (
"math"
)
// Brent - Brent's Method finds the root of the given quadratic function f in [a,b].
// The precision is the number of digits after the floating point.
// reference: https://en.wikipedia.org/wiki/Brent%27s_method
func Brent(f func(x float64) float64, a, b float64, precision int) (r float64, err error) {
var (
delta = EpsilonF64 * (b - a) // numerical tolerance
acceptance = math.Pow10(-precision)
fa = f(a)
fb = f(b)
c = a
fc = fa
s float64
fs float64
d float64
wasBisectionUsed = true
absBMinusC float64
absCMinusD float64
absSMinusB float64
tmp float64
// swap - a becomes b, b becomes a
swap = func() {
tmp = a
a = b
b = tmp
tmp = fa
fa = fb
fb = tmp
}
)
if a > b {
swap()
}
if fa*fb > 0 {
if a >= 0 || b <= 0 {
return 0, ErrRootIsNotBracketed
}
f0 := f(0)
if f0*fb > 0 && f0*fa > 0 {
return 0, ErrRootIsNotBracketed
}
}
if math.Abs(fa) < math.Abs(fb) {
swap()
}
for fb != 0 && math.Abs(b-a) > acceptance {
if fa != fc && fb != fc { // inverse quadratic interpolation
s = (a*fb*fc)/((fa-fb)*(fa-fc)) + (b*fa*fc)/((fb-fa)*(fb-fc)) + (c*fa*fb)/((fc-fa)*(fc-fb))
} else { // secant method
s = b - fb*(b-a)/(fb-fa)
}
absBMinusC = math.Abs(b - c)
absCMinusD = math.Abs(c - d)
absSMinusB = math.Abs(s - b)
switch {
case s < (3*a+b)/4 || s > b,
wasBisectionUsed && absSMinusB >= absBMinusC/2,
!wasBisectionUsed && absSMinusB >= absCMinusD/2,
wasBisectionUsed && absBMinusC < delta,
!wasBisectionUsed && absCMinusD < delta: // bisection method
s = (a + b) / 2
wasBisectionUsed = true
break
default:
wasBisectionUsed = false
break
}
fs = f(s)
d = c // d is first defined here; is not use in the first step above because wasBisectionUsed set to true
c = b
fc = fb
if fa*fs < 0 {
b = s
fb = fs
} else {
a = s
fa = fs
}
if math.Abs(fa) < math.Abs(fb) {
swap()
}
}
return s, nil
} | brent.go | 0.529993 | 0.551513 | brent.go | starcoder |
package parser
import (
"github.com/bbuck/glox/token"
"github.com/bbuck/glox/tree/expr"
)
// P encapsulates the parsers current state allowing further calls to parse
// to maintain positonal information within the token list.
type P struct {
tokens []*token.T
current int
Err error
}
// New constructs a new parser with the token list and returns it ready for
// use.
func New(toks []*token.T) *P {
return &P{
tokens: toks,
}
}
// Parse returns the top-most expression in the syntax tree parsed from the
// token list. If a parse error occurred this will return nil instead.
func (p *P) Parse() expr.Expr {
ex := p.expression()
if p.Err == nil {
return ex
}
// TODO: Do something because of error
return nil
}
func (p *P) expression() expr.Expr {
if p.Err != nil {
return nil
}
return p.sequenced()
}
func (p *P) sequenced() expr.Expr {
if p.Err != nil {
return nil
}
ex := p.ternary()
for p.match(token.Comma) {
right := p.ternary()
ex = expr.NewSequenced(ex, right)
}
return ex
}
func (p *P) ternary() expr.Expr {
if p.Err != nil {
return nil
}
ex := p.equality()
if p.match(token.QuestionMark) {
pos := p.expression()
p.consume(token.Colon, "Expected ':' separating true/false branch")
neg := p.expression()
ex = expr.NewTernary(ex, pos, neg)
}
return ex
}
func (p *P) equality() expr.Expr {
if p.Err != nil {
return nil
}
ex := p.comparison()
for p.match(token.BangEqual, token.EqualEqual) {
op := p.previous()
right := p.comparison()
ex = expr.NewBinary(ex, op, right)
}
return ex
}
func (p *P) comparison() expr.Expr {
if p.Err != nil {
return nil
}
ex := p.addition()
for p.match(token.Greater, token.GreaterEqual, token.Less, token.LessEqual) {
op := p.previous()
right := p.addition()
ex = expr.NewBinary(ex, op, right)
}
return ex
}
func (p *P) addition() expr.Expr {
if p.Err != nil {
return nil
}
ex := p.multiplication()
for p.match(token.Minus, token.Plus) {
op := p.previous()
right := p.multiplication()
ex = expr.NewBinary(ex, op, right)
}
return ex
}
func (p *P) multiplication() expr.Expr {
if p.Err != nil {
return nil
}
ex := p.unary()
for p.match(token.Slash, token.Star) {
op := p.previous()
right := p.unary()
ex = expr.NewBinary(ex, op, right)
}
return ex
}
func (p *P) unary() expr.Expr {
if p.Err != nil {
return nil
}
if p.match(token.Bang, token.Bang) {
op := p.previous()
right := p.unary()
return expr.NewUnary(op, right)
}
return p.primary()
}
func (p *P) primary() expr.Expr {
if p.Err != nil {
return nil
}
switch {
case p.match(token.False):
return expr.NewLiteral(expr.BooleanLiteral, false)
case p.match(token.True):
return expr.NewLiteral(expr.BooleanLiteral, true)
case p.match(token.Nil):
return expr.NewLiteral(expr.NilLiteral, nil)
case p.match(token.Number):
return expr.NewLiteral(expr.NumberLiteral, p.previous().Literal)
case p.match(token.String):
return expr.NewLiteral(expr.StringLiteral, p.previous().Literal)
case p.match(token.LeftParen):
ex := p.expression()
p.consume(token.RightParen, "Expect ')' after expression")
return expr.NewGrouping(ex)
}
p.Err = parseError(p.peek(), "Expected expression")
return nil
}
// helpers
func (p *P) match(types ...token.Type) bool {
if p.Err != nil {
return false
}
for _, typ := range types {
if p.check(typ) {
p.advance()
return true
}
}
return false
}
func (p *P) check(typ token.Type) bool {
if p.Err != nil {
return false
}
if p.isAtEnd() {
return false
}
return p.peek().Type == typ
}
func (p *P) advance() *token.T {
if p.Err != nil {
return nil
}
if !p.isAtEnd() {
p.current++
}
return p.previous()
}
func (p *P) isAtEnd() bool {
return p.peek().Type == token.EOF
}
func (p *P) peek() *token.T {
return p.tokens[p.current]
}
func (p *P) previous() *token.T {
return p.tokens[p.current-1]
}
func (p *P) consume(typ token.Type, msg string) *token.T {
if p.Err != nil {
return nil
}
if p.check(typ) {
return p.advance()
}
p.Err = parseError(p.peek(), msg)
return nil
}
func (p *P) synchronize() {
p.Err = nil
p.advance()
for !p.isAtEnd() {
if p.previous().Type == token.Semicolon {
return
}
switch p.peek().Type {
case token.Class:
fallthrough
case token.Fun:
fallthrough
case token.Var:
fallthrough
case token.For:
fallthrough
case token.If:
fallthrough
case token.While:
fallthrough
case token.Print:
fallthrough
case token.Return:
return
}
p.advance()
}
} | tree/parser/parser.go | 0.61832 | 0.405449 | parser.go | starcoder |
package actions
import (
"github.com/LindsayBradford/crem/internal/pkg/dataset/tables"
"github.com/LindsayBradford/crem/internal/pkg/model/models/catchment/dataset"
"github.com/LindsayBradford/crem/internal/pkg/model/models/catchment/parameters"
"github.com/LindsayBradford/crem/internal/pkg/model/planningunit"
assert "github.com/LindsayBradford/crem/pkg/assert/debug"
)
const (
hillSlopeAreaIndex = 11
)
type hillSlopeSedimentTracker struct {
area float64
originalSedimentProduced float64
actionedSedimentProduced float64
}
type HillSlopeSedimentContribution struct {
planningUnitTable tables.CsvTable
parameters parameters.Parameters
contributionMap map[planningunit.Id]hillSlopeSedimentTracker
sedimentDeliveryRatio float64
Container
}
func (h *HillSlopeSedimentContribution) Initialise(dataSet *dataset.DataSetImpl, parameters parameters.Parameters) {
h.planningUnitTable = dataSet.SubCatchmentsTable
h.Container.WithFilter(HillSlopeType).WithActionsTable(dataSet.ActionsTable)
h.parameters = parameters
h.populateContributionMap()
}
func (h *HillSlopeSedimentContribution) populateContributionMap() {
h.sedimentDeliveryRatio = h.parameters.GetFloat64(parameters.HillSlopeDeliveryRatio)
_, rowCount := h.planningUnitTable.ColumnAndRowSize()
h.contributionMap = make(map[planningunit.Id]hillSlopeSedimentTracker, rowCount)
for row := uint(0); row < rowCount; row++ {
h.populateContributionMapEntry(row)
}
}
func (h *HillSlopeSedimentContribution) populateContributionMapEntry(rowNumber uint) {
subCatchment := h.planningUnitTable.CellFloat64(planningUnitIndex, rowNumber)
mapKey := planningunit.Float64ToId(subCatchment)
h.contributionMap[mapKey] = hillSlopeSedimentTracker{
area: h.hillSlopeArea(rowNumber),
originalSedimentProduced: h.originalHillSlopeErosion(mapKey),
actionedSedimentProduced: h.actionedHillSlopeErosion(mapKey),
}
}
func (h *HillSlopeSedimentContribution) hillSlopeArea(rowNumber uint) float64 {
return h.planningUnitTable.CellFloat64(hillSlopeAreaIndex, rowNumber)
}
func (h *HillSlopeSedimentContribution) OriginalSubCatchmentSedimentContribution(id planningunit.Id) float64 {
sedimentTracker, subCatchmentIsPresent := h.contributionMap[id]
assert.That(subCatchmentIsPresent).Holds()
originalSediment := h.calculateDeliveryAdjustedSediment(sedimentTracker.originalSedimentProduced)
return originalSediment
}
func (h *HillSlopeSedimentContribution) SubCatchmentSedimentContribution(id planningunit.Id, rawSedimentProduced float64) float64 {
_, subCatchmentIsPresent := h.contributionMap[id]
assert.That(subCatchmentIsPresent).Holds()
originalSediment := h.calculateDeliveryAdjustedSediment(rawSedimentProduced)
return originalSediment
}
func (h *HillSlopeSedimentContribution) calculateDeliveryAdjustedSediment(sedimentProduced float64) float64 {
return sedimentProduced * h.sedimentDeliveryRatio
} | internal/pkg/model/models/catchment/actions/HillSlopeSedimentContribution.go | 0.696578 | 0.401219 | HillSlopeSedimentContribution.go | starcoder |
package tracer
import (
"github.com/eriklupander/pathtracer/internal/app/geom"
"github.com/eriklupander/pathtracer/internal/app/shapes"
)
func NewComputation() Computation {
containers := make([]shapes.Shape, 8)
containers = containers[:0]
return Computation{
T: 0,
Object: nil,
Point: geom.NewPoint(0, 0, 0),
EyeVec: geom.NewVector(0, 0, 0),
NormalVec: geom.NewVector(0, 0, 0),
Inside: false,
OverPoint: geom.NewPoint(0, 0, 0),
UnderPoint: geom.NewPoint(0, 0, 0),
ReflectVec: geom.NewVector(0, 0, 0),
N1: 0,
N2: 0,
localPoint: geom.NewPoint(0, 0, 0),
containers: containers,
cachedOffset: geom.NewVector(0, 0, 0),
}
}
func PrepareComputationForIntersectionPtr(i shapes.Intersection, r geom.Ray, comps *Computation, xs ...shapes.Intersection) {
comps.T = i.T
comps.Object = i.S
PositionPtr(r, i.T, &comps.Point)
geom.NegatePtr(r.Direction, &comps.EyeVec)
comps.NormalVec = NormalAt(i.S, comps.Point, &i) // fix
//comps.NormalVec = NormalAtPtr(i.S, comps.Point, &i, &comps.localPoint) // fix
ReflectPtr(r.Direction, comps.NormalVec, &comps.ReflectVec)
comps.Inside = false
if geom.Dot(comps.EyeVec, comps.NormalVec) < 0 {
comps.Inside = true
geom.NegatePtr(comps.NormalVec, &comps.NormalVec) // fix
}
// Perhaps only compute these if we're going to cast a new ray?
geom.MultiplyByScalarPtr(comps.NormalVec, geom.Epsilon, &comps.cachedOffset)
geom.AddPtr(comps.Point, comps.cachedOffset, &comps.OverPoint)
// Moved away the UnderPoint since it's only used for transparent surfaces.
comps.N1 = 1.0
comps.N2 = 1.0
comps.containers = comps.containers[:0]
for idx := range xs {
if xs[idx].S.ID() == i.S.ID() && i.T == xs[idx].T {
if len(comps.containers) == 0 {
comps.N1 = 1.0
} else {
comps.N1 = comps.containers[len(comps.containers)-1].GetMaterial().RefractiveIndex
}
}
index := indexOf(xs[idx].S, comps.containers)
if index > -1 {
copy(comps.containers[index:], comps.containers[index+1:]) // Shift a[i+1:] left one indexs[idx].
comps.containers[len(comps.containers)-1] = nil // Erase last element (write zero value).
comps.containers = comps.containers[:len(comps.containers)-1] // Truncate slice.
} else {
comps.containers = append(comps.containers, xs[idx].S)
}
if xs[idx].S.ID() == i.S.ID() && xs[idx].T == i.T {
if len(comps.containers) == 0 {
comps.N2 = 1.0
} else {
comps.N2 = comps.containers[len(comps.containers)-1].GetMaterial().RefractiveIndex
}
break
}
}
}
func indexOf(s shapes.Shape, list []shapes.Shape) int {
for idx := range list {
if list[idx].ID() == s.ID() {
return idx
}
}
return -1
}
type Computation struct {
T float64
Object shapes.Shape
Point geom.Tuple4
EyeVec geom.Tuple4
NormalVec geom.Tuple4
Inside bool
OverPoint geom.Tuple4
UnderPoint geom.Tuple4
ReflectVec geom.Tuple4
N1 float64
N2 float64
// cached stuff
localPoint geom.Tuple4
containers []shapes.Shape
cachedOffset geom.Tuple4
}
func NewLightData() LightData {
return LightData{
//Ambient: rgb.NewColor(0, 0, 0),
//Diffuse: rgb.NewColor(0, 0, 0),
//Specular: geom.NewColor(0, 0, 0),
//EffectiveColor: geom.NewColor(0, 0, 0),
LightVec: geom.NewVector(0, 0, 0),
ReflectVec: geom.NewVector(0, 0, 0),
}
}
// LightData is used for pre-allocated memory for lighting computations.
type LightData struct {
//Ambient Tuple4
//Diffuse Tuple4
//Specular Tuple4
//EffectiveColor Tuple4
LightVec geom.Tuple4
ReflectVec geom.Tuple4
} | internal/app/tracer/computations.go | 0.555918 | 0.50293 | computations.go | starcoder |
package geometry
import "math"
type Vector3 struct {
X, Y, Z float64
}
type Vector2 struct {
X, Y float64
}
func (a Vector2) Add(b Vector2) Vector2 {
return Vector2{a.X + b.X, a.Y + b.Y}
}
func (a Vector2) DistanceSquared(b Vector2) float64 {
return ((a.X - b.X) * (a.X - b.X)) + ((a.Y - b.Y) * (a.Y - b.Y))
}
func (a Vector2) LengthSquared() float64 {
return a.Dot(a)
}
func (a Vector2) Dot(b Vector2) float64 {
return (a.X * b.X) + (a.Y * b.Y)
}
func (a Vector2) DivideByVector(by Vector2) Vector2 {
return Vector2{a.X / by.X, a.Y / by.Y}
}
func Zero() Vector3 {
return Vector3{0, 0, 0}
}
func UnitX() Vector3 {
return Vector3{1, 0, 0}
}
func UnitY() Vector3 {
return Vector3{0, 1, 0}
}
func UnitZ() Vector3 {
return Vector3{0, 0, 1}
}
func (a Vector3) Add(b Vector3) Vector3 {
return Vector3{a.X + b.X, a.Y + b.Y, a.Z + b.Z}
}
func (a Vector3) Subtract(b Vector3) Vector3 {
return Vector3{a.X - b.X, a.Y - b.Y, a.Z - b.Z}
}
func (a Vector3) MultiplyByConstant(by float64) Vector3 {
return Vector3{a.X * by, a.Y * by, a.Z * by}
}
func (a Vector3) MultiplyByVector(by Vector3) Vector3 {
return Vector3{a.X * by.X, a.Y * by.Y, a.Z * by.Z}
}
func (a Vector3) DivideByConstant(by float64) Vector3 {
return Vector3{a.X / by, a.Y / by, a.Z / by}
}
func (a Vector3) DivideByVector(by Vector3) Vector3 {
return Vector3{a.X / by.X, a.Y / by.Y, a.Z / by.Z}
}
func (a Vector3) Length() float64 {
return math.Sqrt(a.Dot(a))
}
func (a Vector3) Normalise() Vector3 {
return a.DivideByConstant(a.Length())
}
func (a Vector3) Cross(b Vector3) Vector3 {
return Vector3{
(a.Y * b.Z) - (a.Z * b.Y),
(a.Z * b.X) - (a.X * b.Z),
(a.X * b.Y) - (a.Y * b.X),
}
}
func (a Vector3) Dot(b Vector3) float64 {
return (a.X * b.X) + (a.Y * b.Y) + (a.Z * b.Z)
}
func (a Vector3) Lerp(b Vector3, amt float64) Vector3 {
return a.MultiplyByConstant(1 - amt).Add(b.MultiplyByConstant(amt))
}
func (a Vector3) Equals(b Vector3) bool {
const epsilon = 1e-12
return a.Subtract(b).Length() < epsilon
} | internal/geometry/vector.go | 0.927757 | 0.815233 | vector.go | starcoder |
package draw2d
import (
"fmt"
"math"
)
// PathBuilder describes the interface for path drawing.
type PathBuilder interface {
// LastPoint returns the current point of the current sub path
LastPoint() (x, y float64)
// MoveTo creates a new subpath that start at the specified point
MoveTo(x, y float64)
// LineTo adds a line to the current subpath
LineTo(x, y float64)
// QuadCurveTo adds a quadratic Bézier curve to the current subpath
QuadCurveTo(cx, cy, x, y float64)
// CubicCurveTo adds a cubic Bézier curve to the current subpath
CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64)
// ArcTo adds an arc to the current subpath
ArcTo(cx, cy, rx, ry, startAngle, angle float64)
// Close creates a line from the current point to the last MoveTo
// point (if not the same) and mark the path as closed so the
// first and last lines join nicely.
Close()
}
// PathCmp represents component of a path
type PathCmp int
const (
// MoveToCmp is a MoveTo component in a Path
MoveToCmp PathCmp = iota
// LineToCmp is a LineTo component in a Path
LineToCmp
// QuadCurveToCmp is a QuadCurveTo component in a Path
QuadCurveToCmp
// CubicCurveToCmp is a CubicCurveTo component in a Path
CubicCurveToCmp
// ArcToCmp is a ArcTo component in a Path
ArcToCmp
// CloseCmp is a ArcTo component in a Path
CloseCmp
)
// Path stores points
type Path struct {
// Components is a slice of PathCmp in a Path and mark the role of each points in the Path
Components []PathCmp
// Points are combined with Components to have a specific role in the path
Points []float64
// Last Point of the Path
x, y float64
}
func (p *Path) appendToPath(cmd PathCmp, points ...float64) {
p.Components = append(p.Components, cmd)
p.Points = append(p.Points, points...)
}
// LastPoint returns the current point of the current path
func (p *Path) LastPoint() (x, y float64) {
return p.x, p.y
}
// MoveTo starts a new path at (x, y) position
func (p *Path) MoveTo(x, y float64) {
p.appendToPath(MoveToCmp, x, y)
p.x = x
p.y = y
}
// LineTo adds a line to the current path
func (p *Path) LineTo(x, y float64) {
if len(p.Components) == 0 { //special case when no move has been done
p.MoveTo(x, y)
} else {
p.appendToPath(LineToCmp, x, y)
}
p.x = x
p.y = y
}
// QuadCurveTo adds a quadratic bezier curve to the current path
func (p *Path) QuadCurveTo(cx, cy, x, y float64) {
if len(p.Components) == 0 { //special case when no move has been done
p.MoveTo(x, y)
} else {
p.appendToPath(QuadCurveToCmp, cx, cy, x, y)
}
p.x = x
p.y = y
}
// CubicCurveTo adds a cubic bezier curve to the current path
func (p *Path) CubicCurveTo(cx1, cy1, cx2, cy2, x, y float64) {
if len(p.Components) == 0 { //special case when no move has been done
p.MoveTo(x, y)
} else {
p.appendToPath(CubicCurveToCmp, cx1, cy1, cx2, cy2, x, y)
}
p.x = x
p.y = y
}
// ArcTo adds an arc to the path
func (p *Path) ArcTo(cx, cy, rx, ry, startAngle, angle float64) {
endAngle := startAngle + angle
clockWise := true
if angle < 0 {
clockWise = false
}
// normalize
if clockWise {
for endAngle < startAngle {
endAngle += math.Pi * 2.0
}
} else {
for startAngle < endAngle {
startAngle += math.Pi * 2.0
}
}
startX := cx + math.Cos(startAngle)*rx
startY := cy + math.Sin(startAngle)*ry
if len(p.Components) > 0 {
p.LineTo(startX, startY)
} else {
p.MoveTo(startX, startY)
}
p.appendToPath(ArcToCmp, cx, cy, rx, ry, startAngle, angle)
p.x = cx + math.Cos(endAngle)*rx
p.y = cy + math.Sin(endAngle)*ry
}
// Close closes the current path
func (p *Path) Close() {
p.appendToPath(CloseCmp)
}
// Copy make a clone of the current path and return it
func (p *Path) Copy() (dest *Path) {
dest = new(Path)
dest.Components = make([]PathCmp, len(p.Components))
copy(dest.Components, p.Components)
dest.Points = make([]float64, len(p.Points))
copy(dest.Points, p.Points)
dest.x, dest.y = p.x, p.y
return dest
}
// Clear reset the path
func (p *Path) Clear() {
p.Components = p.Components[0:0]
p.Points = p.Points[0:0]
return
}
// IsEmpty returns true if the path is empty
func (p *Path) IsEmpty() bool {
return len(p.Components) == 0
}
// String returns a debug text view of the path
func (p *Path) String() string {
s := ""
j := 0
for _, cmd := range p.Components {
switch cmd {
case MoveToCmp:
s += fmt.Sprintf("MoveTo: %f, %f\n", p.Points[j], p.Points[j+1])
j = j + 2
case LineToCmp:
s += fmt.Sprintf("LineTo: %f, %f\n", p.Points[j], p.Points[j+1])
j = j + 2
case QuadCurveToCmp:
s += fmt.Sprintf("QuadCurveTo: %f, %f, %f, %f\n", p.Points[j], p.Points[j+1], p.Points[j+2], p.Points[j+3])
j = j + 4
case CubicCurveToCmp:
s += fmt.Sprintf("CubicCurveTo: %f, %f, %f, %f, %f, %f\n", p.Points[j], p.Points[j+1], p.Points[j+2], p.Points[j+3], p.Points[j+4], p.Points[j+5])
j = j + 6
case ArcToCmp:
s += fmt.Sprintf("ArcTo: %f, %f, %f, %f, %f, %f\n", p.Points[j], p.Points[j+1], p.Points[j+2], p.Points[j+3], p.Points[j+4], p.Points[j+5])
j = j + 6
case CloseCmp:
s += "Close\n"
}
}
return s
} | vendor/github.com/llgcode/draw2d/path.go | 0.723505 | 0.604895 | path.go | starcoder |
package go2d
import (
"image"
"os"
"math"
"github.com/tfriedel6/canvas"
"github.com/tfriedel6/canvas/backend/softwarebackend"
)
type ITexture interface {
GetTexture() image.Image
}
type ImageEntity struct {
Entity
gImg image.Image
cImg *canvas.Image
}
func NewImageEntity(img image.Image) *ImageEntity {
return &ImageEntity{
gImg: img,
Entity: Entity{
Visible: true,
Bounds: Rect{
Dimensions: Dimensions{
Width: float64(img.Bounds().Dx()),
Height: float64(img.Bounds().Dy()),
},
},
},
}
}
func LoadImageEntity(path string) (*ImageEntity, error) {
imgf, err := os.Open(path)
if err != nil {
return nil, err
}
i, _, err := image.Decode(imgf)
if err != nil {
return nil, err
}
return NewImageEntity(i), nil
}
func NewRectImageEntity(color string, dimensions Dimensions) *ImageEntity {
backend := softwarebackend.New(int(dimensions.Width), int(dimensions.Height))
cv := canvas.New(backend)
cv.SetFillStyle(color)
cv.Rect(0, 0, dimensions.Width, dimensions.Height)
cv.Fill()
img := cv.GetImageData(0, 0, int(dimensions.Width), int(dimensions.Height))
return NewImageEntity(img)
}
func NewCircleImageEntity(color string, radius int) *ImageEntity {
backend := softwarebackend.New(radius*2, radius*2)
cv := canvas.New(backend)
cv.SetFillStyle(color)
cv.BeginPath()
cv.Arc(float64(radius/2), float64(radius/2), float64(radius/2), 0, math.Pi*2, false)
cv.Fill()
img := cv.GetImageData(0, 0, radius, radius)
return NewImageEntity(img)
}
func (this *ImageEntity) GetImage() image.Image {
return this.gImg
}
func (this *ImageEntity) Render(e *Engine) {
if this.cImg == nil {
i, err := e.Canvas.LoadImage(this.gImg)
if err != nil {
panic(err)
}
this.cImg = i
}
if this.Visible {
e.Canvas.DrawImage(
this.cImg,
this.Bounds.X,
this.Bounds.Y,
this.Bounds.Width,
this.Bounds.Height,
)
}
}
func (this *ImageEntity) Update(e *Engine) {
this.Entity.Update()
}
func (this *ImageEntity) GetEntity() *Entity {
return &this.Entity
} | go2d/entity_image.go | 0.765769 | 0.402275 | entity_image.go | starcoder |
package bls12381
import (
"errors"
"math"
"math/big"
)
// PointG2 is type for point in G2.
// PointG2 is both used for Affine and Jacobian point representation.
// If z is equal to one the point is accounted as in affine form.
type PointG2 [3]fe2
// Set copies valeus of one point to another.
func (p *PointG2) Set(p2 *PointG2) *PointG2 {
p[0].set(&p2[0])
p[1].set(&p2[1])
p[2].set(&p2[2])
return p
}
func (p *PointG2) Zero() *PointG2 {
p[0].zero()
p[1].one()
p[2].zero()
return p
}
type tempG2 struct {
t [9]*fe2
}
// G2 is struct for G2 group.
type G2 struct {
f *fp2
tempG2
}
// NewG2 constructs a new G2 instance.
func NewG2() *G2 {
return newG2(nil)
}
func newG2(f *fp2) *G2 {
if f == nil {
f = newFp2()
}
t := newTempG2()
return &G2{f, t}
}
func newTempG2() tempG2 {
t := [9]*fe2{}
for i := 0; i < 9; i++ {
t[i] = &fe2{}
}
return tempG2{t}
}
// Q returns group order in big.Int.
func (g *G2) Q() *big.Int {
return new(big.Int).Set(q)
}
// FromUncompressed expects byte slice larger than 192 bytes and given bytes returns a new point in G2.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G2) FromUncompressed(uncompressed []byte) (*PointG2, error) {
if len(uncompressed) < 192 {
return nil, errors.New("input string should be equal or larger than 192")
}
var in [192]byte
copy(in[:], uncompressed[:192])
if in[0]&(1<<7) != 0 {
return nil, errors.New("compression flag should be zero")
}
if in[0]&(1<<5) != 0 {
return nil, errors.New("sort flag should be zero")
}
if in[0]&(1<<6) != 0 {
for i, v := range in {
if (i == 0 && v != 0x40) || (i != 0 && v != 0x00) {
return nil, errors.New("input string should be zero when infinity flag is set")
}
}
return g.Zero(), nil
}
in[0] &= 0x1f
x, err := g.f.fromBytes(in[:96])
if err != nil {
return nil, err
}
y, err := g.f.fromBytes(in[96:])
if err != nil {
return nil, err
}
z := new(fe2).one()
p := &PointG2{*x, *y, *z}
if !g.IsOnCurve(p) {
return nil, errors.New("point is not on curve")
}
if !g.InCorrectSubgroup(p) {
return nil, errors.New("point is not on correct subgroup")
}
return p, nil
}
// ToUncompressed given a G2 point returns bytes in uncompressed (x, y) form of the point.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G2) ToUncompressed(p *PointG2) []byte {
out := make([]byte, 192)
g.Affine(p)
if g.IsZero(p) {
out[0] |= 1 << 6
return out
}
copy(out[:96], g.f.toBytes(&p[0]))
copy(out[96:], g.f.toBytes(&p[1]))
return out
}
// FromCompressed expects byte slice larger than 96 bytes and given bytes returns a new point in G2.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G2) FromCompressed(compressed []byte) (*PointG2, error) {
if len(compressed) < 96 {
return nil, errors.New("input string should be equal or larger than 96")
}
var in [96]byte
copy(in[:], compressed[:])
if in[0]&(1<<7) == 0 {
return nil, errors.New("bad compression")
}
if in[0]&(1<<6) != 0 {
// in[0] == (1 << 6) + (1 << 7)
for i, v := range in {
if (i == 0 && v != 0xc0) || (i != 0 && v != 0x00) {
return nil, errors.New("input string should be zero when infinity flag is set")
}
}
return g.Zero(), nil
}
a := in[0]&(1<<5) != 0
in[0] &= 0x1f
x, err := g.f.fromBytes(in[:])
if err != nil {
return nil, err
}
// solve curve equation
y := &fe2{}
g.f.square(y, x)
g.f.mul(y, y, x)
g.f.add(y, y, b2)
if ok := g.f.sqrt(y, y); !ok {
return nil, errors.New("point is not on curve")
}
if y.signBE() == a {
g.f.neg(y, y)
}
z := new(fe2).one()
p := &PointG2{*x, *y, *z}
if !g.InCorrectSubgroup(p) {
return nil, errors.New("point is not on correct subgroup")
}
return p, nil
}
// ToCompressed given a G2 point returns bytes in compressed form of the point.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G2) ToCompressed(p *PointG2) []byte {
out := make([]byte, 96)
g.Affine(p)
if g.IsZero(p) {
out[0] |= 1 << 6
} else {
copy(out[:], g.f.toBytes(&p[0]))
if !p[1].signBE() {
out[0] |= 1 << 5
}
}
out[0] |= 1 << 7
return out
}
func (g *G2) fromBytesUnchecked(in []byte) (*PointG2, error) {
p0, err := g.f.fromBytes(in[:96])
if err != nil {
return nil, err
}
p1, err := g.f.fromBytes(in[96:])
if err != nil {
return nil, err
}
p2 := new(fe2).one()
return &PointG2{*p0, *p1, *p2}, nil
}
// FromBytes constructs a new point given uncompressed byte input.
// FromBytes does not take zcash flags into account.
// Byte input expected to be larger than 96 bytes.
// First 192 bytes should be concatenation of x and y values
// Point (0, 0) is considered as infinity.
func (g *G2) FromBytes(in []byte) (*PointG2, error) {
if len(in) < 192 {
return nil, errors.New("input string should be equal or larger than 192")
}
p0, err := g.f.fromBytes(in[:96])
if err != nil {
return nil, err
}
p1, err := g.f.fromBytes(in[96:])
if err != nil {
return nil, err
}
// check if given input points to infinity
if p0.isZero() && p1.isZero() {
return g.Zero(), nil
}
p2 := new(fe2).one()
p := &PointG2{*p0, *p1, *p2}
if !g.IsOnCurve(p) {
return nil, errors.New("point is not on curve")
}
return p, nil
}
// ToBytes serializes a point into bytes in uncompressed form,
// does not take zcash flags into account,
// returns (0, 0) if point is infinity.
func (g *G2) ToBytes(p *PointG2) []byte {
out := make([]byte, 192)
if g.IsZero(p) {
return out
}
g.Affine(p)
copy(out[:96], g.f.toBytes(&p[0]))
copy(out[96:], g.f.toBytes(&p[1]))
return out
}
// New creates a new G2 Point which is equal to zero in other words point at infinity.
func (g *G2) New() *PointG2 {
return new(PointG2).Zero()
}
// Zero returns a new G2 Point which is equal to point at infinity.
func (g *G2) Zero() *PointG2 {
return new(PointG2).Zero()
}
// One returns a new G2 Point which is equal to generator point.
func (g *G2) One() *PointG2 {
p := &PointG2{}
return p.Set(&g2One)
}
// IsZero returns true if given point is equal to zero.
func (g *G2) IsZero(p *PointG2) bool {
return p[2].isZero()
}
// Equal checks if given two G2 point is equal in their affine form.
func (g *G2) Equal(p1, p2 *PointG2) bool {
if g.IsZero(p1) {
return g.IsZero(p2)
}
if g.IsZero(p2) {
return g.IsZero(p1)
}
t := g.t
g.f.square(t[0], &p1[2])
g.f.square(t[1], &p2[2])
g.f.mul(t[2], t[0], &p2[0])
g.f.mul(t[3], t[1], &p1[0])
g.f.mul(t[0], t[0], &p1[2])
g.f.mul(t[1], t[1], &p2[2])
g.f.mul(t[1], t[1], &p1[1])
g.f.mul(t[0], t[0], &p2[1])
return t[0].equal(t[1]) && t[2].equal(t[3])
}
// InCorrectSubgroup checks whether given point is in correct subgroup.
func (g *G2) InCorrectSubgroup(p *PointG2) bool {
tmp := &PointG2{}
g.MulScalar(tmp, p, q)
return g.IsZero(tmp)
}
// IsOnCurve checks a G2 point is on curve.
func (g *G2) IsOnCurve(p *PointG2) bool {
if g.IsZero(p) {
return true
}
t := g.t
g.f.square(t[0], &p[1])
g.f.square(t[1], &p[0])
g.f.mul(t[1], t[1], &p[0])
g.f.square(t[2], &p[2])
g.f.square(t[3], t[2])
g.f.mul(t[2], t[2], t[3])
g.f.mul(t[2], b2, t[2])
g.f.add(t[1], t[1], t[2])
return t[0].equal(t[1])
}
// IsAffine checks a G2 point whether it is in affine form.
func (g *G2) IsAffine(p *PointG2) bool {
return p[2].isOne()
}
// Affine calculates affine form of given G2 point.
func (g *G2) Affine(p *PointG2) *PointG2 {
if g.IsZero(p) {
return p
}
if !g.IsAffine(p) {
t := g.t
g.f.inverse(t[0], &p[2])
g.f.square(t[1], t[0])
g.f.mul(&p[0], &p[0], t[1])
g.f.mul(t[0], t[0], t[1])
g.f.mul(&p[1], &p[1], t[0])
p[2].one()
}
return p
}
// Add adds two G2 points p1, p2 and assigns the result to point at first argument.
func (g *G2) Add(r, p1, p2 *PointG2) *PointG2 {
// http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#addition-add-2007-bl
if g.IsZero(p1) {
return r.Set(p2)
}
if g.IsZero(p2) {
return r.Set(p1)
}
t := g.t
g.f.square(t[7], &p1[2])
g.f.mul(t[1], &p2[0], t[7])
g.f.mul(t[2], &p1[2], t[7])
g.f.mul(t[0], &p2[1], t[2])
g.f.square(t[8], &p2[2])
g.f.mul(t[3], &p1[0], t[8])
g.f.mul(t[4], &p2[2], t[8])
g.f.mul(t[2], &p1[1], t[4])
if t[1].equal(t[3]) {
if t[0].equal(t[2]) {
return g.Double(r, p1)
} else {
return r.Zero()
}
}
g.f.sub(t[1], t[1], t[3])
g.f.double(t[4], t[1])
g.f.square(t[4], t[4])
g.f.mul(t[5], t[1], t[4])
g.f.sub(t[0], t[0], t[2])
g.f.double(t[0], t[0])
g.f.square(t[6], t[0])
g.f.sub(t[6], t[6], t[5])
g.f.mul(t[3], t[3], t[4])
g.f.double(t[4], t[3])
g.f.sub(&r[0], t[6], t[4])
g.f.sub(t[4], t[3], &r[0])
g.f.mul(t[6], t[2], t[5])
g.f.double(t[6], t[6])
g.f.mul(t[0], t[0], t[4])
g.f.sub(&r[1], t[0], t[6])
g.f.add(t[0], &p1[2], &p2[2])
g.f.square(t[0], t[0])
g.f.sub(t[0], t[0], t[7])
g.f.sub(t[0], t[0], t[8])
g.f.mul(&r[2], t[0], t[1])
return r
}
// Double doubles a G2 point p and assigns the result to the point at first argument.
func (g *G2) Double(r, p *PointG2) *PointG2 {
// http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
if g.IsZero(p) {
return r.Set(p)
}
t := g.t
g.f.square(t[0], &p[0])
g.f.square(t[1], &p[1])
g.f.square(t[2], t[1])
g.f.add(t[1], &p[0], t[1])
g.f.square(t[1], t[1])
g.f.sub(t[1], t[1], t[0])
g.f.sub(t[1], t[1], t[2])
g.f.double(t[1], t[1])
g.f.double(t[3], t[0])
g.f.add(t[0], t[3], t[0])
g.f.square(t[4], t[0])
g.f.double(t[3], t[1])
g.f.sub(&r[0], t[4], t[3])
g.f.sub(t[1], t[1], &r[0])
g.f.double(t[2], t[2])
g.f.double(t[2], t[2])
g.f.double(t[2], t[2])
g.f.mul(t[0], t[0], t[1])
g.f.sub(t[1], t[0], t[2])
g.f.mul(t[0], &p[1], &p[2])
r[1].set(t[1])
g.f.double(&r[2], t[0])
return r
}
// Neg negates a G2 point p and assigns the result to the point at first argument.
func (g *G2) Neg(r, p *PointG2) *PointG2 {
r[0].set(&p[0])
g.f.neg(&r[1], &p[1])
r[2].set(&p[2])
return r
}
// Sub subtracts two G2 points p1, p2 and assigns the result to point at first argument.
func (g *G2) Sub(c, a, b *PointG2) *PointG2 {
d := &PointG2{}
g.Neg(d, b)
g.Add(c, a, d)
return c
}
// MulScalar multiplies a point by given scalar value in big.Int and assigns the result to point at first argument.
func (g *G2) MulScalar(c, p *PointG2, e *big.Int) *PointG2 {
q, n := &PointG2{}, &PointG2{}
n.Set(p)
l := e.BitLen()
for i := 0; i < l; i++ {
if e.Bit(i) == 1 {
g.Add(q, q, n)
}
g.Double(n, n)
}
return c.Set(q)
}
// ClearCofactor maps given a G2 point to correct subgroup
func (g *G2) ClearCofactor(p *PointG2) *PointG2 {
return g.wnafMul(p, p, cofactorEFFG2)
}
// MultiExp calculates multi exponentiation. Given pairs of G2 point and scalar values
// (P_0, e_0), (P_1, e_1), ... (P_n, e_n) calculates r = e_0 * P_0 + e_1 * P_1 + ... + e_n * P_n
// Length of points and scalars are expected to be equal, otherwise an error is returned.
// Result is assigned to point at first argument.
func (g *G2) MultiExp(r *PointG2, points []*PointG2, powers []*big.Int) (*PointG2, error) {
if len(points) != len(powers) {
return nil, errors.New("point and scalar vectors should be in same length")
}
var c uint32 = 3
if len(powers) >= 32 {
c = uint32(math.Ceil(math.Log10(float64(len(powers)))))
}
bucketSize, numBits := (1<<c)-1, uint32(g.Q().BitLen())
windows := make([]*PointG2, numBits/c+1)
bucket := make([]*PointG2, bucketSize)
acc, sum := g.New(), g.New()
for i := 0; i < bucketSize; i++ {
bucket[i] = g.New()
}
mask := (uint64(1) << c) - 1
j := 0
var cur uint32
for cur <= numBits {
acc.Zero()
bucket = make([]*PointG2, (1<<c)-1)
for i := 0; i < len(bucket); i++ {
bucket[i] = g.New()
}
for i := 0; i < len(powers); i++ {
s0 := powers[i].Uint64()
index := uint(s0 & mask)
if index != 0 {
g.Add(bucket[index-1], bucket[index-1], points[i])
}
powers[i] = new(big.Int).Rsh(powers[i], uint(c))
}
sum.Zero()
for i := len(bucket) - 1; i >= 0; i-- {
g.Add(sum, sum, bucket[i])
g.Add(acc, acc, sum)
}
windows[j] = g.New()
windows[j].Set(acc)
j++
cur += c
}
acc.Zero()
for i := len(windows) - 1; i >= 0; i-- {
for j := uint32(0); j < c; j++ {
g.Double(acc, acc)
}
g.Add(acc, acc, windows[i])
}
return r.Set(acc), nil
}
func (g *G2) wnafMul(c, p *PointG2, e *big.Int) *PointG2 {
windowSize := uint(6)
precompTable := make([]*PointG2, (1 << (windowSize - 1)))
for i := 0; i < len(precompTable); i++ {
precompTable[i] = g.New()
}
var indexForPositive uint64 = (1 << (windowSize - 2))
precompTable[indexForPositive].Set(p)
g.Neg(precompTable[indexForPositive-1], p)
doubled, precomp := g.New(), g.New()
g.Double(doubled, p)
precomp.Set(p)
for i := uint64(1); i < indexForPositive; i++ {
g.Add(precomp, precomp, doubled)
precompTable[indexForPositive+i].Set(precomp)
g.Neg(precompTable[indexForPositive-1-i], precomp)
}
wnaf := wnaf(e, windowSize)
q := g.Zero()
found := false
var idx uint64
for i := len(wnaf) - 1; i >= 0; i-- {
if found {
g.Double(q, q)
}
if wnaf[i] != 0 {
found = true
if wnaf[i] > 0 {
idx = uint64(wnaf[i] >> 1)
g.Add(q, q, precompTable[indexForPositive+idx])
} else {
idx = uint64(((0 - wnaf[i]) >> 1))
g.Add(q, q, precompTable[indexForPositive-1-idx])
}
}
}
return c.Set(q)
}
// MapToCurve given a byte slice returns a valid G2 point.
// This mapping function implements the Simplified Shallue-van de Woestijne-Ulas method.
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-05#section-6.6.2
// Input byte slice should be a valid field element, otherwise an error is returned.
func (g *G2) MapToCurve(in []byte) (*PointG2, error) {
fp2 := g.f
u, err := fp2.fromBytes(in)
if err != nil {
return nil, err
}
x, y := swuMapG2(fp2, u)
isogenyMapG2(fp2, x, y)
z := new(fe2).one()
q := &PointG2{*x, *y, *z}
g.ClearCofactor(q)
return g.Affine(q), nil
}
// EncodeToCurve given a message and domain seperator tag returns the hash result
// which is a valid curve point.
// Implementation follows BLS12381G1_XMD:SHA-256_SSWU_NU_ suite at
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06
func (g *G2) EncodeToCurve(msg, domain []byte) (*PointG2, error) {
hashRes, err := hashToFpXMDSHA256(msg, domain, 2)
if err != nil {
return nil, err
}
fp2 := g.f
u := &fe2{*hashRes[0], *hashRes[1]}
x, y := swuMapG2(fp2, u)
isogenyMapG2(fp2, x, y)
z := new(fe2).one()
q := &PointG2{*x, *y, *z}
g.ClearCofactor(q)
return g.Affine(q), nil
}
// HashToCurve given a message and domain seperator tag returns the hash result
// which is a valid curve point.
// Implementation follows BLS12381G1_XMD:SHA-256_SSWU_RO_ suite at
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06
func (g *G2) HashToCurve(msg, domain []byte) (*PointG2, error) {
hashRes, err := hashToFpXMDSHA256(msg, domain, 4)
if err != nil {
return nil, err
}
fp2 := g.f
u0, u1 := &fe2{*hashRes[0], *hashRes[1]}, &fe2{*hashRes[2], *hashRes[3]}
x0, y0 := swuMapG2(fp2, u0)
x1, y1 := swuMapG2(fp2, u1)
z0 := new(fe2).one()
z1 := new(fe2).one()
p0, p1 := &PointG2{*x0, *y0, *z0}, &PointG2{*x1, *y1, *z1}
g.Add(p0, p0, p1)
g.Affine(p0)
isogenyMapG2(fp2, &p0[0], &p0[1])
g.ClearCofactor(p0)
return g.Affine(p0), nil
} | g2.go | 0.770724 | 0.547404 | g2.go | starcoder |
package cmd
import (
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/ftl/si5351/pkg/si5351"
)
func parseFrequency(f string) (si5351.Frequency, error) {
input := strings.ToLower(strings.TrimSpace(f))
var magnitude si5351.Frequency
switch {
case strings.HasSuffix(input, "m"):
magnitude = si5351.MHz
input = input[:len(input)-1]
case strings.HasSuffix(input, "k"):
magnitude = si5351.KHz
input = input[:len(input)-1]
default:
magnitude = si5351.Hz
}
value, err := strconv.Atoi(input)
if err != nil {
return 0, err
}
return si5351.Frequency(value) * magnitude, nil
}
func parseRatio(s string) (a, b, c int, err error) {
values := strings.Split(s, ",")
if len(values) != 3 {
err = errors.New("ratio must have three components: a,b,c")
}
if err == nil {
a, err = strconv.Atoi(strings.TrimSpace(values[0]))
}
if err == nil {
b, err = strconv.Atoi(strings.TrimSpace(values[1]))
}
if err == nil {
c, err = strconv.Atoi(strings.TrimSpace(values[2]))
}
return
}
func parsePLL(s string) (si5351.PLLIndex, error) {
switch strings.ToUpper(s) {
case "A":
return si5351.PLLA, nil
case "B":
return si5351.PLLB, nil
default:
return si5351.PLLA, errors.Errorf("invalid PLL %s, try A or B", s)
}
}
func parseOutput(s string) (si5351.OutputIndex, error) {
i, err := strconv.Atoi(s)
if err != nil {
return 0, err
}
if i < int(si5351.Clk0) || i > int(si5351.Clk5) {
return 0, errors.Errorf("invalid output %s, only outputs 0-5 supported", s)
}
return si5351.OutputIndex(i), nil
}
func toCrystalFrequency(f int) si5351.Frequency {
switch f {
case 27:
return si5351.Crystal27MHz
default:
return si5351.Crystal25MHz
}
}
func toCrystalLoad(l int) si5351.CrystalLoad {
switch l {
case 6:
return si5351.CrystalLoad6PF
case 8:
return si5351.CrystalLoad8PF
default:
return si5351.CrystalLoad10PF
}
}
func toOutputDrive(d int) si5351.OutputDrive {
switch d {
case 4:
return si5351.OutputDrive4mA
case 6:
return si5351.OutputDrive6mA
case 8:
return si5351.OutputDrive8mA
default:
return si5351.OutputDrive2mA
}
} | cmd/parse.go | 0.570571 | 0.414662 | parse.go | starcoder |
package objects
import (
"github.com/elainaaa/gosu-pp/beatmap/audio"
"github.com/elainaaa/gosu-pp/beatmap/difficulty"
"github.com/elainaaa/gosu-pp/beatmap/timing"
"github.com/elainaaa/gosu-pp/math/vector"
)
type IHitObject interface {
Update(time float64) bool
SetTiming(timings *timing.Timings)
SetDifficulty(difficulty *difficulty.Difficulty)
GetStartTime() float64
GetEndTime() float64
GetDuration() float64
GetPositionAt(float64) vector.Vector2f
GetStackedPositionAt(float64) vector.Vector2f
GetStackedPositionAtMod(time float64, modifier difficulty.Modifier) vector.Vector2f
GetStartPosition() vector.Vector2f
GetStackedStartPosition() vector.Vector2f
GetStackedStartPositionMod(modifier difficulty.Modifier) vector.Vector2f
GetEndPosition() vector.Vector2f
GetStackedEndPosition() vector.Vector2f
GetStackedEndPositionMod(modifier difficulty.Modifier) vector.Vector2f
GetID() int
SetID(int)
SetComboNumber(cn int)
GetComboSet() int
SetComboSet(set int)
GetComboSetHax() int
SetComboSetHax(set int)
GetStackIndex(modifier difficulty.Modifier) int
SetStackIndex(index int, modifier difficulty.Modifier)
SetStackOffset(offset float32, modifier difficulty.Modifier)
GetSounds() []audio.HitSound
GetColorOffset() int
IsNewCombo() bool
SetNewCombo(b bool)
GetType() Type
}
type HitObject struct {
StartPosRaw vector.Vector2f
EndPosRaw vector.Vector2f
StartTime float64
EndTime float64
StackOffset vector.Vector2f
StackOffsetEZ vector.Vector2f
StackOffsetHR vector.Vector2f
PositionDelegate func(time float64) vector.Vector2f
StackIndex int
StackIndexEZ int
StackIndexHR int
HitObjectID int
sounds []audio.HitSound
NewCombo bool
ComboNumber int
ComboSet int
ComboSetHax int
ColorOffset int
}
func (hitObject *HitObject) Update(_ float64) bool { return true }
func (hitObject *HitObject) SetTiming(_ *timing.Timings) {}
func (hitObject *HitObject) UpdateStacking() {}
func (hitObject *HitObject) SetDifficulty(_ *difficulty.Difficulty) {}
func (hitObject *HitObject) GetStartTime() float64 {
return hitObject.StartTime
}
func (hitObject *HitObject) GetEndTime() float64 {
return hitObject.EndTime
}
func (hitObject *HitObject) GetDuration() float64 {
return hitObject.EndTime - hitObject.StartTime
}
func (hitObject *HitObject) GetPositionAt(time float64) vector.Vector2f {
if hitObject.PositionDelegate != nil {
return hitObject.PositionDelegate(time)
}
return hitObject.StartPosRaw
}
func (hitObject *HitObject) GetStackedPositionAt(time float64) vector.Vector2f {
return hitObject.GetPositionAt(time).Add(hitObject.StackOffset)
}
func (hitObject *HitObject) GetStackedPositionAtMod(time float64, modifier difficulty.Modifier) vector.Vector2f {
return hitObject.modifyPosition(hitObject.GetPositionAt(time), modifier)
}
func (hitObject *HitObject) GetStartPosition() vector.Vector2f {
return hitObject.StartPosRaw
}
func (hitObject *HitObject) GetStackedStartPosition() vector.Vector2f {
return hitObject.GetStartPosition().Add(hitObject.StackOffset)
}
func (hitObject *HitObject) GetStackedStartPositionMod(modifier difficulty.Modifier) vector.Vector2f {
return hitObject.modifyPosition(hitObject.GetStartPosition(), modifier)
}
func (hitObject *HitObject) GetEndPosition() vector.Vector2f {
return hitObject.EndPosRaw
}
func (hitObject *HitObject) GetStackedEndPosition() vector.Vector2f {
return hitObject.GetEndPosition().Add(hitObject.StackOffset)
}
func (hitObject *HitObject) GetStackedEndPositionMod(modifier difficulty.Modifier) vector.Vector2f {
return hitObject.modifyPosition(hitObject.GetEndPosition(), modifier)
}
func (hitObject *HitObject) GetID() int {
return hitObject.HitObjectID
}
func (hitObject *HitObject) SetID(id int) {
hitObject.HitObjectID = id
}
func (hitObject *HitObject) SetComboNumber(cn int) {
hitObject.ComboNumber = cn
}
func (hitObject *HitObject) GetComboSet() int {
return hitObject.ComboSet
}
func (hitObject *HitObject) SetComboSet(set int) {
hitObject.ComboSet = set
}
func (hitObject *HitObject) GetComboSetHax() int {
return hitObject.ComboSetHax
}
func (hitObject *HitObject) SetComboSetHax(set int) {
hitObject.ComboSetHax = set
}
func (hitObject *HitObject) GetStackIndex(modifier difficulty.Modifier) int {
switch {
case modifier&difficulty.HardRock > 0:
return hitObject.StackIndexHR
case modifier&difficulty.Easy > 0:
return hitObject.StackIndexEZ
default:
return hitObject.StackIndex
}
}
func (hitObject *HitObject) SetStackIndex(index int, modifier difficulty.Modifier) {
switch {
case modifier&difficulty.HardRock > 0:
hitObject.StackIndexHR = index
case modifier&difficulty.Easy > 0:
hitObject.StackIndexEZ = index
default:
hitObject.StackIndex = index
}
}
func (hitObject *HitObject) SetStackOffset(offset float32, modifier difficulty.Modifier) {
switch {
case modifier&difficulty.HardRock > 0:
hitObject.StackOffsetHR = vector.NewVec2f(1, 1).Scl(offset)
case modifier&difficulty.Easy > 0:
hitObject.StackOffsetEZ = vector.NewVec2f(1, 1).Scl(offset)
default:
hitObject.StackOffset = vector.NewVec2f(1, 1).Scl(offset)
}
}
func (hitObject *HitObject) GetSounds() []audio.HitSound {
return hitObject.sounds
}
func (hitObject *HitObject) GetColorOffset() int {
return hitObject.ColorOffset
}
func (hitObject *HitObject) IsNewCombo() bool {
return hitObject.NewCombo
}
func (hitObject *HitObject) SetNewCombo(b bool) {
hitObject.NewCombo = b
}
func (hitObject *HitObject) modifyPosition(basePosition vector.Vector2f, modifier difficulty.Modifier) vector.Vector2f {
switch {
case modifier&difficulty.HardRock > 0:
basePosition.Y = 384 - basePosition.Y
return basePosition.Add(hitObject.StackOffsetHR)
case modifier&difficulty.Easy > 0:
return basePosition.Add(hitObject.StackOffsetEZ)
}
return basePosition.Add(hitObject.StackOffset)
} | beatmap/objects/hitobject.go | 0.643329 | 0.411998 | hitobject.go | starcoder |
package id
import "encoding/json"
// DatasetID is an ID for Dataset.
type DatasetID ID
// NewDatasetID generates a new DatasetId.
func NewDatasetID() DatasetID {
return DatasetID(New())
}
// DatasetIDFrom generates a new DatasetID from a string.
func DatasetIDFrom(i string) (nid DatasetID, err error) {
var did ID
did, err = FromID(i)
if err != nil {
return
}
nid = DatasetID(did)
return
}
// MustDatasetID generates a new DatasetID from a string, but panics if the string cannot be parsed.
func MustDatasetID(i string) DatasetID {
did, err := FromID(i)
if err != nil {
panic(err)
}
return DatasetID(did)
}
// DatasetIDFromRef generates a new DatasetID from a string ref.
func DatasetIDFromRef(i *string) *DatasetID {
did := FromIDRef(i)
if did == nil {
return nil
}
nid := DatasetID(*did)
return &nid
}
// DatasetIDFromRefID generates a new DatasetID from a ref of a generic ID.
func DatasetIDFromRefID(i *ID) *DatasetID {
if i == nil || i.IsNil() {
return nil
}
nid := DatasetID(*i)
return &nid
}
// ID returns a domain ID.
func (d DatasetID) ID() ID {
return ID(d)
}
// String returns a string representation.
func (d DatasetID) String() string {
if d.IsNil() {
return ""
}
return ID(d).String()
}
// StringRef returns a reference of the string representation.
func (d DatasetID) RefString() *string {
if d.IsNil() {
return nil
}
str := d.String()
return &str
}
// GoString implements fmt.GoStringer interface.
func (d DatasetID) GoString() string {
return "DatasetID(" + d.String() + ")"
}
// Ref returns a reference.
func (d DatasetID) Ref() *DatasetID {
if d.IsNil() {
return nil
}
d2 := d
return &d2
}
// Contains returns whether the id is contained in the slice.
func (d DatasetID) Contains(ids []DatasetID) bool {
if d.IsNil() {
return false
}
for _, i := range ids {
if d.ID().Equal(i.ID()) {
return true
}
}
return false
}
// CopyRef returns a copy of a reference.
func (d *DatasetID) CopyRef() *DatasetID {
if d.IsNilRef() {
return nil
}
d2 := *d
return &d2
}
// IDRef returns a reference of a domain id.
func (d *DatasetID) IDRef() *ID {
if d.IsNilRef() {
return nil
}
id := ID(*d)
return &id
}
// StringRef returns a reference of a string representation.
func (d *DatasetID) StringRef() *string {
if d.IsNilRef() {
return nil
}
id := ID(*d).String()
return &id
}
// MarhsalJSON implements json.Marhsaler interface
func (d *DatasetID) MarhsalJSON() ([]byte, error) {
if d.IsNilRef() {
return nil, nil
}
return json.Marshal(d.String())
}
// UnmarhsalJSON implements json.Unmarshaler interface
func (d *DatasetID) UnmarhsalJSON(bs []byte) (err error) {
var idstr string
if err = json.Unmarshal(bs, &idstr); err != nil {
return
}
*d, err = DatasetIDFrom(idstr)
return
}
// MarshalText implements encoding.TextMarshaler interface
func (d *DatasetID) MarshalText() ([]byte, error) {
if d.IsNilRef() {
return nil, nil
}
return []byte(d.String()), nil
}
// UnmarshalText implements encoding.TextUnmarshaler interface
func (d *DatasetID) UnmarshalText(text []byte) (err error) {
*d, err = DatasetIDFrom(string(text))
return
}
// IsNil returns true if a ID is zero-value
func (d DatasetID) IsNil() bool {
return ID(d).IsNil()
}
// IsNilRef returns true if a ID is nil or zero-value
func (d *DatasetID) IsNilRef() bool {
return d == nil || ID(*d).IsNil()
}
// DatasetIDsToStrings converts IDs into a string slice.
func DatasetIDsToStrings(ids []DatasetID) []string {
strs := make([]string, 0, len(ids))
for _, i := range ids {
strs = append(strs, i.String())
}
return strs
}
// DatasetIDsFrom converts a string slice into a ID slice.
func DatasetIDsFrom(ids []string) ([]DatasetID, error) {
dids := make([]DatasetID, 0, len(ids))
for _, i := range ids {
did, err := DatasetIDFrom(i)
if err != nil {
return nil, err
}
dids = append(dids, did)
}
return dids, nil
}
// DatasetIDsFromID converts a generic ID slice into a ID slice.
func DatasetIDsFromID(ids []ID) []DatasetID {
dids := make([]DatasetID, 0, len(ids))
for _, i := range ids {
dids = append(dids, DatasetID(i))
}
return dids
}
// DatasetIDsFromIDRef converts a ref of a generic ID slice into a ID slice.
func DatasetIDsFromIDRef(ids []*ID) []DatasetID {
dids := make([]DatasetID, 0, len(ids))
for _, i := range ids {
if i != nil {
dids = append(dids, DatasetID(*i))
}
}
return dids
}
// DatasetIDsToID converts a ID slice into a generic ID slice.
func DatasetIDsToID(ids []DatasetID) []ID {
dids := make([]ID, 0, len(ids))
for _, i := range ids {
dids = append(dids, i.ID())
}
return dids
}
// DatasetIDsToIDRef converts a ID ref slice into a generic ID ref slice.
func DatasetIDsToIDRef(ids []*DatasetID) []*ID {
dids := make([]*ID, 0, len(ids))
for _, i := range ids {
dids = append(dids, i.IDRef())
}
return dids
}
// DatasetIDSet represents a set of DatasetIDs
type DatasetIDSet struct {
m map[DatasetID]struct{}
s []DatasetID
}
// NewDatasetIDSet creates a new DatasetIDSet
func NewDatasetIDSet() *DatasetIDSet {
return &DatasetIDSet{}
}
// Add adds a new ID if it does not exists in the set
func (s *DatasetIDSet) Add(p ...DatasetID) {
if s == nil || p == nil {
return
}
if s.m == nil {
s.m = map[DatasetID]struct{}{}
}
for _, i := range p {
if _, ok := s.m[i]; !ok {
if s.s == nil {
s.s = []DatasetID{}
}
s.m[i] = struct{}{}
s.s = append(s.s, i)
}
}
}
// AddRef adds a new ID ref if it does not exists in the set
func (s *DatasetIDSet) AddRef(p *DatasetID) {
if s == nil || p == nil {
return
}
s.Add(*p)
}
// Has checks if the ID exists in the set
func (s *DatasetIDSet) Has(p DatasetID) bool {
if s == nil || s.m == nil {
return false
}
_, ok := s.m[p]
return ok
}
// Clear clears all stored IDs
func (s *DatasetIDSet) Clear() {
if s == nil {
return
}
s.m = nil
s.s = nil
}
// All returns stored all IDs as a slice
func (s *DatasetIDSet) All() []DatasetID {
if s == nil {
return nil
}
return append([]DatasetID{}, s.s...)
}
// Clone returns a cloned set
func (s *DatasetIDSet) Clone() *DatasetIDSet {
if s == nil {
return NewDatasetIDSet()
}
s2 := NewDatasetIDSet()
s2.Add(s.s...)
return s2
}
// Merge returns a merged set
func (s *DatasetIDSet) Merge(s2 *DatasetIDSet) *DatasetIDSet {
s3 := s.Clone()
if s2 == nil {
return s3
}
s3.Add(s2.s...)
return s3
} | pkg/id/dataset_gen.go | 0.744285 | 0.480601 | dataset_gen.go | starcoder |
package unicornify
import (
. "github.com/drbrain/go-unicornify/unicornify/core"
"image"
"image/color"
"math"
)
const (
CirclyGradient = iota
DistanceGradient = iota
)
type ColoringParameters struct {
Shading float64
Gradient int
}
func DefaultGradientWithShading(shading float64) ColoringParameters {
return ColoringParameters{shading, CirclyGradient}
}
func CircleShadingRGBA(x, y, r float64, col color.RGBA, coloring ColoringParameters) color.RGBA {
if coloring.Shading == 0 || y == 0 {
return col
}
var sh float64
lighten := 128.0
switch coloring.Gradient {
case CirclyGradient:
sh1 := 1 - math.Sqrt(1-math.Min(1, y*y/(r*r)))
d := math.Sqrt(x*x+y*y) / r
sh2 := math.Abs(y) / r
sh = (1-d)*sh1 + d*sh2
case DistanceGradient:
sh = math.Abs(y / r)
lighten = 255
default:
panic("unknown gradient")
}
if y > 0 {
return DarkenRGBA(col, uint8(255*sh*coloring.Shading))
} else {
return LightenRGBA(col, uint8(lighten*sh*coloring.Shading))
}
}
func TopHalfCircleF(img *image.RGBA, cx, cy, r float64, col Color, coloring ColoringParameters) {
circleImpl(img, int(cx+.5), int(cy+.5), int(r+.5), col, true, coloring)
}
func CircleF(img *image.RGBA, cx, cy, r float64, col Color, coloring ColoringParameters) {
Circle(img, int(cx+.5), int(cy+.5), int(r+.5), col, coloring)
}
func Circle(img *image.RGBA, cx, cy, r int, col Color, coloring ColoringParameters) {
circleImpl(img, cx, cy, r, col, false, coloring)
}
func circleImpl(img *image.RGBA, cx, cy, r int, col Color, topHalfOnly bool, coloring ColoringParameters) {
colrgba := color.RGBA{col.R, col.G, col.B, 255}
imgsize := img.Bounds().Dx()
if cx < -r || cy < -r || cx-r > imgsize || cy-r > imgsize {
return
}
f := 1 - r
ddF_x := 1
ddF_y := -2 * r
x := 0
y := r
fill := func(left, right, y int) {
left += cx
right += cx
y += cy
if left < 0 {
left = 0
}
if right >= imgsize {
right = imgsize - 1
}
for x := left; x <= right; x++ {
thiscol := CircleShadingRGBA(float64(x-cx), float64(y-cy), float64(r), colrgba, coloring)
img.SetRGBA(x, y, thiscol)
}
}
fill(-r, r, 0)
for x < y {
if f >= 0 {
y--
ddF_y += 2
f += ddF_y
}
x++
ddF_x += 2
f += ddF_x
fill(-x, x, -y)
fill(-y, y, -x)
if !topHalfOnly {
fill(-x, x, y)
fill(-y, y, x)
}
}
} | unicornify/graphics.go | 0.733356 | 0.45847 | graphics.go | starcoder |
package semver
import (
"errors"
"sort"
"strconv"
"strings"
)
var (
// ErrEmpty means that given value is empty.
ErrEmpty = errors.New("semver: empty string")
// ErrEmptyElement means that some elements of version is empty.
ErrEmptyElement = errors.New("semver: empty element")
// ErrInvalidFormat means that given value to parse isn't in semver format.
ErrInvalidFormat = errors.New("semver: invalid format")
// ErrLeadingZeroes means that some elements of version has leading zeroes.
ErrLeadingZeroes = errors.New("semver: leading zeros")
// ErrInvalidCharacter means that there is some unsupported characters in version specification.
ErrInvalidCharacter = errors.New("semver: invalid character")
)
const (
major int = iota
minor
patch
)
const (
numbers = "0123456789"
)
var zero = Version{}
type Versions []Version
func (vs Versions) Len() int { return len(vs) }
func (vs Versions) Less(i, j int) bool { return vs[i].Lt(vs[j]) }
func (vs Versions) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
func (vs Versions) Sort() { sort.Sort(vs) }
func (vs Versions) Ascending() { vs.Sort() }
func Sort(vs ...Version) Versions { sorted := Versions(vs); sorted.Sort(); return sorted }
type Version struct {
Major uint64
Minor uint64
Patch uint64
}
func (v Version) String() string {
b := make([]byte, 0, 6)
b = append(b, 'v')
b = strconv.AppendUint(b, v.Major, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Minor, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Patch, 10)
return string(b)
}
const (
lt = iota - 1
eq
gt
)
func (v Version) Eq(to Version) bool { return v.Compare(to) == eq }
func (v Version) Lt(to Version) bool { return v.Compare(to) == lt }
func (v Version) Lte(to Version) bool { return v.Compare(to) <= eq }
func (v Version) Gt(to Version) bool { return v.Compare(to) == gt }
func (v Version) Gte(to Version) bool { return v.Compare(to) >= eq }
func (v Version) Compare(to Version) int {
switch {
case v.Major > to.Major:
return gt
case v.Major < to.Major:
return lt
case v.Minor > to.Minor:
return gt
case v.Minor < to.Minor:
return lt
case v.Patch > to.Patch:
return gt
case v.Patch < to.Patch:
return lt
default:
return eq
}
}
func New(s string) (Version, error) { return Parse(s) }
func V(s string) Version { return Must(s) }
func Must(s string) Version {
v, err := Parse(s)
if err != nil {
panic(err)
}
return v
}
func Parse(s string) (v Version, err error) {
if len(s) == 0 {
return zero, ErrEmpty
}
parts := strings.Split(strings.TrimPrefix(s, "v"), ".")
if len(parts) != 3 {
return zero, ErrInvalidFormat
}
if empty(parts...) {
return zero, ErrEmptyElement
}
if !containsOnly(numbers, parts...) {
return zero, ErrInvalidCharacter
}
if leadingZeroes(parts...) {
return zero, ErrLeadingZeroes
}
if v.Major, err = strconv.ParseUint(parts[major], 10, 64); err != nil {
return zero, err
}
if v.Minor, err = strconv.ParseUint(parts[minor], 10, 64); err != nil {
return zero, err
}
if v.Patch, err = strconv.ParseUint(parts[patch], 10, 64); err != nil {
return zero, err
}
return v, nil
}
func empty(vs ...string) bool {
for _, v := range vs {
if v == "" {
return true
}
}
return false
}
func leadingZeroes(vs ...string) bool {
for _, v := range vs {
if len(v) > 1 && v[0] == '0' {
return true
}
}
return false
}
func containsOnly(set string, vs ...string) bool {
for _, v := range vs {
if strings.IndexFunc(v, func(r rune) bool {
return !strings.ContainsRune(set, r)
}) > -1 {
return false
}
}
return true
} | semver.go | 0.565779 | 0.403244 | semver.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// Profile
type Profile struct {
Entity
// The account property
account []UserAccountInformationable
// Represents details of addresses associated with the user.
addresses []ItemAddressable
// Represents the details of meaningful dates associated with a person.
anniversaries []PersonAnnualEventable
// Represents the details of awards or honors associated with a person.
awards []PersonAwardable
// Represents the details of certifications associated with a person.
certifications []PersonCertificationable
// Represents data that a user has supplied related to undergraduate, graduate, postgraduate or other educational activities.
educationalActivities []EducationalActivityable
// Represents detailed information about email addresses associated with the user.
emails []ItemEmailable
// Provides detailed information about interests the user has associated with themselves in various services.
interests []PersonInterestable
// Represents detailed information about languages that a user has added to their profile.
languages []LanguageProficiencyable
// Represents the names a user has added to their profile.
names []PersonNameable
// Represents notes that a user has added to their profile.
notes []PersonAnnotationable
// Represents patents that a user has added to their profile.
patents []ItemPatentable
// Represents detailed information about phone numbers associated with a user in various services.
phones []ItemPhoneable
// Represents detailed information about work positions associated with a user's profile.
positions []WorkPositionable
// Represents detailed information about projects associated with a user.
projects []ProjectParticipationable
// Represents details of any publications a user has added to their profile.
publications []ItemPublicationable
// Represents detailed information about skills associated with a user in various services.
skills []SkillProficiencyable
// Represents web accounts the user has indicated they use or has added to their user profile.
webAccounts []WebAccountable
// Represents detailed information about websites associated with a user in various services.
websites []PersonWebsiteable
}
// NewProfile instantiates a new profile and sets the default values.
func NewProfile()(*Profile) {
m := &Profile{
Entity: *NewEntity(),
}
return m
}
// CreateProfileFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateProfileFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewProfile(), nil
}
// GetAccount gets the account property value. The account property
func (m *Profile) GetAccount()([]UserAccountInformationable) {
if m == nil {
return nil
} else {
return m.account
}
}
// GetAddresses gets the addresses property value. Represents details of addresses associated with the user.
func (m *Profile) GetAddresses()([]ItemAddressable) {
if m == nil {
return nil
} else {
return m.addresses
}
}
// GetAnniversaries gets the anniversaries property value. Represents the details of meaningful dates associated with a person.
func (m *Profile) GetAnniversaries()([]PersonAnnualEventable) {
if m == nil {
return nil
} else {
return m.anniversaries
}
}
// GetAwards gets the awards property value. Represents the details of awards or honors associated with a person.
func (m *Profile) GetAwards()([]PersonAwardable) {
if m == nil {
return nil
} else {
return m.awards
}
}
// GetCertifications gets the certifications property value. Represents the details of certifications associated with a person.
func (m *Profile) GetCertifications()([]PersonCertificationable) {
if m == nil {
return nil
} else {
return m.certifications
}
}
// GetEducationalActivities gets the educationalActivities property value. Represents data that a user has supplied related to undergraduate, graduate, postgraduate or other educational activities.
func (m *Profile) GetEducationalActivities()([]EducationalActivityable) {
if m == nil {
return nil
} else {
return m.educationalActivities
}
}
// GetEmails gets the emails property value. Represents detailed information about email addresses associated with the user.
func (m *Profile) GetEmails()([]ItemEmailable) {
if m == nil {
return nil
} else {
return m.emails
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *Profile) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["account"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateUserAccountInformationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]UserAccountInformationable, len(val))
for i, v := range val {
res[i] = v.(UserAccountInformationable)
}
m.SetAccount(res)
}
return nil
}
res["addresses"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateItemAddressFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ItemAddressable, len(val))
for i, v := range val {
res[i] = v.(ItemAddressable)
}
m.SetAddresses(res)
}
return nil
}
res["anniversaries"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePersonAnnualEventFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PersonAnnualEventable, len(val))
for i, v := range val {
res[i] = v.(PersonAnnualEventable)
}
m.SetAnniversaries(res)
}
return nil
}
res["awards"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePersonAwardFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PersonAwardable, len(val))
for i, v := range val {
res[i] = v.(PersonAwardable)
}
m.SetAwards(res)
}
return nil
}
res["certifications"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePersonCertificationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PersonCertificationable, len(val))
for i, v := range val {
res[i] = v.(PersonCertificationable)
}
m.SetCertifications(res)
}
return nil
}
res["educationalActivities"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateEducationalActivityFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]EducationalActivityable, len(val))
for i, v := range val {
res[i] = v.(EducationalActivityable)
}
m.SetEducationalActivities(res)
}
return nil
}
res["emails"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateItemEmailFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ItemEmailable, len(val))
for i, v := range val {
res[i] = v.(ItemEmailable)
}
m.SetEmails(res)
}
return nil
}
res["interests"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePersonInterestFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PersonInterestable, len(val))
for i, v := range val {
res[i] = v.(PersonInterestable)
}
m.SetInterests(res)
}
return nil
}
res["languages"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateLanguageProficiencyFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]LanguageProficiencyable, len(val))
for i, v := range val {
res[i] = v.(LanguageProficiencyable)
}
m.SetLanguages(res)
}
return nil
}
res["names"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePersonNameFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PersonNameable, len(val))
for i, v := range val {
res[i] = v.(PersonNameable)
}
m.SetNames(res)
}
return nil
}
res["notes"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePersonAnnotationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PersonAnnotationable, len(val))
for i, v := range val {
res[i] = v.(PersonAnnotationable)
}
m.SetNotes(res)
}
return nil
}
res["patents"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateItemPatentFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ItemPatentable, len(val))
for i, v := range val {
res[i] = v.(ItemPatentable)
}
m.SetPatents(res)
}
return nil
}
res["phones"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateItemPhoneFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ItemPhoneable, len(val))
for i, v := range val {
res[i] = v.(ItemPhoneable)
}
m.SetPhones(res)
}
return nil
}
res["positions"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateWorkPositionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]WorkPositionable, len(val))
for i, v := range val {
res[i] = v.(WorkPositionable)
}
m.SetPositions(res)
}
return nil
}
res["projects"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateProjectParticipationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ProjectParticipationable, len(val))
for i, v := range val {
res[i] = v.(ProjectParticipationable)
}
m.SetProjects(res)
}
return nil
}
res["publications"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateItemPublicationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ItemPublicationable, len(val))
for i, v := range val {
res[i] = v.(ItemPublicationable)
}
m.SetPublications(res)
}
return nil
}
res["skills"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateSkillProficiencyFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]SkillProficiencyable, len(val))
for i, v := range val {
res[i] = v.(SkillProficiencyable)
}
m.SetSkills(res)
}
return nil
}
res["webAccounts"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateWebAccountFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]WebAccountable, len(val))
for i, v := range val {
res[i] = v.(WebAccountable)
}
m.SetWebAccounts(res)
}
return nil
}
res["websites"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePersonWebsiteFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PersonWebsiteable, len(val))
for i, v := range val {
res[i] = v.(PersonWebsiteable)
}
m.SetWebsites(res)
}
return nil
}
return res
}
// GetInterests gets the interests property value. Provides detailed information about interests the user has associated with themselves in various services.
func (m *Profile) GetInterests()([]PersonInterestable) {
if m == nil {
return nil
} else {
return m.interests
}
}
// GetLanguages gets the languages property value. Represents detailed information about languages that a user has added to their profile.
func (m *Profile) GetLanguages()([]LanguageProficiencyable) {
if m == nil {
return nil
} else {
return m.languages
}
}
// GetNames gets the names property value. Represents the names a user has added to their profile.
func (m *Profile) GetNames()([]PersonNameable) {
if m == nil {
return nil
} else {
return m.names
}
}
// GetNotes gets the notes property value. Represents notes that a user has added to their profile.
func (m *Profile) GetNotes()([]PersonAnnotationable) {
if m == nil {
return nil
} else {
return m.notes
}
}
// GetPatents gets the patents property value. Represents patents that a user has added to their profile.
func (m *Profile) GetPatents()([]ItemPatentable) {
if m == nil {
return nil
} else {
return m.patents
}
}
// GetPhones gets the phones property value. Represents detailed information about phone numbers associated with a user in various services.
func (m *Profile) GetPhones()([]ItemPhoneable) {
if m == nil {
return nil
} else {
return m.phones
}
}
// GetPositions gets the positions property value. Represents detailed information about work positions associated with a user's profile.
func (m *Profile) GetPositions()([]WorkPositionable) {
if m == nil {
return nil
} else {
return m.positions
}
}
// GetProjects gets the projects property value. Represents detailed information about projects associated with a user.
func (m *Profile) GetProjects()([]ProjectParticipationable) {
if m == nil {
return nil
} else {
return m.projects
}
}
// GetPublications gets the publications property value. Represents details of any publications a user has added to their profile.
func (m *Profile) GetPublications()([]ItemPublicationable) {
if m == nil {
return nil
} else {
return m.publications
}
}
// GetSkills gets the skills property value. Represents detailed information about skills associated with a user in various services.
func (m *Profile) GetSkills()([]SkillProficiencyable) {
if m == nil {
return nil
} else {
return m.skills
}
}
// GetWebAccounts gets the webAccounts property value. Represents web accounts the user has indicated they use or has added to their user profile.
func (m *Profile) GetWebAccounts()([]WebAccountable) {
if m == nil {
return nil
} else {
return m.webAccounts
}
}
// GetWebsites gets the websites property value. Represents detailed information about websites associated with a user in various services.
func (m *Profile) GetWebsites()([]PersonWebsiteable) {
if m == nil {
return nil
} else {
return m.websites
}
}
// Serialize serializes information the current object
func (m *Profile) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
if m.GetAccount() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAccount()))
for i, v := range m.GetAccount() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("account", cast)
if err != nil {
return err
}
}
if m.GetAddresses() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAddresses()))
for i, v := range m.GetAddresses() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("addresses", cast)
if err != nil {
return err
}
}
if m.GetAnniversaries() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAnniversaries()))
for i, v := range m.GetAnniversaries() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("anniversaries", cast)
if err != nil {
return err
}
}
if m.GetAwards() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAwards()))
for i, v := range m.GetAwards() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("awards", cast)
if err != nil {
return err
}
}
if m.GetCertifications() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetCertifications()))
for i, v := range m.GetCertifications() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("certifications", cast)
if err != nil {
return err
}
}
if m.GetEducationalActivities() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetEducationalActivities()))
for i, v := range m.GetEducationalActivities() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("educationalActivities", cast)
if err != nil {
return err
}
}
if m.GetEmails() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetEmails()))
for i, v := range m.GetEmails() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("emails", cast)
if err != nil {
return err
}
}
if m.GetInterests() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetInterests()))
for i, v := range m.GetInterests() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("interests", cast)
if err != nil {
return err
}
}
if m.GetLanguages() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetLanguages()))
for i, v := range m.GetLanguages() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("languages", cast)
if err != nil {
return err
}
}
if m.GetNames() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetNames()))
for i, v := range m.GetNames() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("names", cast)
if err != nil {
return err
}
}
if m.GetNotes() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetNotes()))
for i, v := range m.GetNotes() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("notes", cast)
if err != nil {
return err
}
}
if m.GetPatents() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetPatents()))
for i, v := range m.GetPatents() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("patents", cast)
if err != nil {
return err
}
}
if m.GetPhones() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetPhones()))
for i, v := range m.GetPhones() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("phones", cast)
if err != nil {
return err
}
}
if m.GetPositions() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetPositions()))
for i, v := range m.GetPositions() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("positions", cast)
if err != nil {
return err
}
}
if m.GetProjects() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetProjects()))
for i, v := range m.GetProjects() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("projects", cast)
if err != nil {
return err
}
}
if m.GetPublications() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetPublications()))
for i, v := range m.GetPublications() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("publications", cast)
if err != nil {
return err
}
}
if m.GetSkills() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetSkills()))
for i, v := range m.GetSkills() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("skills", cast)
if err != nil {
return err
}
}
if m.GetWebAccounts() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetWebAccounts()))
for i, v := range m.GetWebAccounts() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("webAccounts", cast)
if err != nil {
return err
}
}
if m.GetWebsites() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetWebsites()))
for i, v := range m.GetWebsites() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("websites", cast)
if err != nil {
return err
}
}
return nil
}
// SetAccount sets the account property value. The account property
func (m *Profile) SetAccount(value []UserAccountInformationable)() {
if m != nil {
m.account = value
}
}
// SetAddresses sets the addresses property value. Represents details of addresses associated with the user.
func (m *Profile) SetAddresses(value []ItemAddressable)() {
if m != nil {
m.addresses = value
}
}
// SetAnniversaries sets the anniversaries property value. Represents the details of meaningful dates associated with a person.
func (m *Profile) SetAnniversaries(value []PersonAnnualEventable)() {
if m != nil {
m.anniversaries = value
}
}
// SetAwards sets the awards property value. Represents the details of awards or honors associated with a person.
func (m *Profile) SetAwards(value []PersonAwardable)() {
if m != nil {
m.awards = value
}
}
// SetCertifications sets the certifications property value. Represents the details of certifications associated with a person.
func (m *Profile) SetCertifications(value []PersonCertificationable)() {
if m != nil {
m.certifications = value
}
}
// SetEducationalActivities sets the educationalActivities property value. Represents data that a user has supplied related to undergraduate, graduate, postgraduate or other educational activities.
func (m *Profile) SetEducationalActivities(value []EducationalActivityable)() {
if m != nil {
m.educationalActivities = value
}
}
// SetEmails sets the emails property value. Represents detailed information about email addresses associated with the user.
func (m *Profile) SetEmails(value []ItemEmailable)() {
if m != nil {
m.emails = value
}
}
// SetInterests sets the interests property value. Provides detailed information about interests the user has associated with themselves in various services.
func (m *Profile) SetInterests(value []PersonInterestable)() {
if m != nil {
m.interests = value
}
}
// SetLanguages sets the languages property value. Represents detailed information about languages that a user has added to their profile.
func (m *Profile) SetLanguages(value []LanguageProficiencyable)() {
if m != nil {
m.languages = value
}
}
// SetNames sets the names property value. Represents the names a user has added to their profile.
func (m *Profile) SetNames(value []PersonNameable)() {
if m != nil {
m.names = value
}
}
// SetNotes sets the notes property value. Represents notes that a user has added to their profile.
func (m *Profile) SetNotes(value []PersonAnnotationable)() {
if m != nil {
m.notes = value
}
}
// SetPatents sets the patents property value. Represents patents that a user has added to their profile.
func (m *Profile) SetPatents(value []ItemPatentable)() {
if m != nil {
m.patents = value
}
}
// SetPhones sets the phones property value. Represents detailed information about phone numbers associated with a user in various services.
func (m *Profile) SetPhones(value []ItemPhoneable)() {
if m != nil {
m.phones = value
}
}
// SetPositions sets the positions property value. Represents detailed information about work positions associated with a user's profile.
func (m *Profile) SetPositions(value []WorkPositionable)() {
if m != nil {
m.positions = value
}
}
// SetProjects sets the projects property value. Represents detailed information about projects associated with a user.
func (m *Profile) SetProjects(value []ProjectParticipationable)() {
if m != nil {
m.projects = value
}
}
// SetPublications sets the publications property value. Represents details of any publications a user has added to their profile.
func (m *Profile) SetPublications(value []ItemPublicationable)() {
if m != nil {
m.publications = value
}
}
// SetSkills sets the skills property value. Represents detailed information about skills associated with a user in various services.
func (m *Profile) SetSkills(value []SkillProficiencyable)() {
if m != nil {
m.skills = value
}
}
// SetWebAccounts sets the webAccounts property value. Represents web accounts the user has indicated they use or has added to their user profile.
func (m *Profile) SetWebAccounts(value []WebAccountable)() {
if m != nil {
m.webAccounts = value
}
}
// SetWebsites sets the websites property value. Represents detailed information about websites associated with a user in various services.
func (m *Profile) SetWebsites(value []PersonWebsiteable)() {
if m != nil {
m.websites = value
}
} | models/profile.go | 0.669745 | 0.437283 | profile.go | starcoder |
package geom
// deriveCloneBounds returns a clone of the src parameter.
func deriveCloneBounds(src *Bounds) *Bounds {
if src == nil {
return nil
}
dst := new(Bounds)
deriveDeepCopy(dst, src)
return dst
}
// deriveCloneCoord returns a clone of the src parameter.
func deriveCloneCoord(src Coord) Coord {
if src == nil {
return nil
}
dst := make(Coord, len(src))
deriveDeepCopy_(dst, src)
return dst
}
// deriveCloneLinearRing returns a clone of the src parameter.
func deriveCloneLinearRing(src *LinearRing) *LinearRing {
if src == nil {
return nil
}
dst := new(LinearRing)
deriveDeepCopy_1(dst, src)
return dst
}
// deriveCloneLineString returns a clone of the src parameter.
func deriveCloneLineString(src *LineString) *LineString {
if src == nil {
return nil
}
dst := new(LineString)
deriveDeepCopy_2(dst, src)
return dst
}
// deriveCloneMultiLineString returns a clone of the src parameter.
func deriveCloneMultiLineString(src *MultiLineString) *MultiLineString {
if src == nil {
return nil
}
dst := new(MultiLineString)
deriveDeepCopy_3(dst, src)
return dst
}
// deriveCloneMultiPoint returns a clone of the src parameter.
func deriveCloneMultiPoint(src *MultiPoint) *MultiPoint {
if src == nil {
return nil
}
dst := new(MultiPoint)
deriveDeepCopy_4(dst, src)
return dst
}
// deriveCloneMultiPolygon returns a clone of the src parameter.
func deriveCloneMultiPolygon(src *MultiPolygon) *MultiPolygon {
if src == nil {
return nil
}
dst := new(MultiPolygon)
deriveDeepCopy_5(dst, src)
return dst
}
// deriveClonePoint returns a clone of the src parameter.
func deriveClonePoint(src *Point) *Point {
if src == nil {
return nil
}
dst := new(Point)
deriveDeepCopy_6(dst, src)
return dst
}
// deriveClonePolygon returns a clone of the src parameter.
func deriveClonePolygon(src *Polygon) *Polygon {
if src == nil {
return nil
}
dst := new(Polygon)
deriveDeepCopy_7(dst, src)
return dst
}
// deriveDeepCopy recursively copies the contents of src into dst.
func deriveDeepCopy(dst, src *Bounds) {
dst.layout = src.layout
if src.min == nil {
dst.min = nil
} else {
if dst.min != nil {
if len(src.min) > len(dst.min) {
if cap(dst.min) >= len(src.min) {
dst.min = (dst.min)[:len(src.min)]
} else {
dst.min = make([]float64, len(src.min))
}
} else if len(src.min) < len(dst.min) {
dst.min = (dst.min)[:len(src.min)]
}
} else {
dst.min = make([]float64, len(src.min))
}
copy(dst.min, src.min)
}
if src.max == nil {
dst.max = nil
} else {
if dst.max != nil {
if len(src.max) > len(dst.max) {
if cap(dst.max) >= len(src.max) {
dst.max = (dst.max)[:len(src.max)]
} else {
dst.max = make([]float64, len(src.max))
}
} else if len(src.max) < len(dst.max) {
dst.max = (dst.max)[:len(src.max)]
}
} else {
dst.max = make([]float64, len(src.max))
}
copy(dst.max, src.max)
}
}
// deriveDeepCopy_ recursively copies the contents of src into dst.
func deriveDeepCopy_(dst, src Coord) {
copy(dst, src)
}
// deriveDeepCopy_1 recursively copies the contents of src into dst.
func deriveDeepCopy_1(dst, src *LinearRing) {
func() {
field := new(geom1)
deriveDeepCopy_8(field, &src.geom1)
dst.geom1 = *field
}()
}
// deriveDeepCopy_2 recursively copies the contents of src into dst.
func deriveDeepCopy_2(dst, src *LineString) {
func() {
field := new(geom1)
deriveDeepCopy_8(field, &src.geom1)
dst.geom1 = *field
}()
}
// deriveDeepCopy_3 recursively copies the contents of src into dst.
func deriveDeepCopy_3(dst, src *MultiLineString) {
func() {
field := new(geom2)
deriveDeepCopy_9(field, &src.geom2)
dst.geom2 = *field
}()
}
// deriveDeepCopy_4 recursively copies the contents of src into dst.
func deriveDeepCopy_4(dst, src *MultiPoint) {
func() {
field := new(geom2)
deriveDeepCopy_9(field, &src.geom2)
dst.geom2 = *field
}()
}
// deriveDeepCopy_5 recursively copies the contents of src into dst.
func deriveDeepCopy_5(dst, src *MultiPolygon) {
func() {
field := new(geom3)
deriveDeepCopy_10(field, &src.geom3)
dst.geom3 = *field
}()
}
// deriveDeepCopy_6 recursively copies the contents of src into dst.
func deriveDeepCopy_6(dst, src *Point) {
func() {
field := new(geom0)
deriveDeepCopy_11(field, &src.geom0)
dst.geom0 = *field
}()
}
// deriveDeepCopy_7 recursively copies the contents of src into dst.
func deriveDeepCopy_7(dst, src *Polygon) {
func() {
field := new(geom2)
deriveDeepCopy_9(field, &src.geom2)
dst.geom2 = *field
}()
}
// deriveDeepCopy_8 recursively copies the contents of src into dst.
func deriveDeepCopy_8(dst, src *geom1) {
func() {
field := new(geom0)
deriveDeepCopy_11(field, &src.geom0)
dst.geom0 = *field
}()
}
// deriveDeepCopy_9 recursively copies the contents of src into dst.
func deriveDeepCopy_9(dst, src *geom2) {
func() {
field := new(geom1)
deriveDeepCopy_8(field, &src.geom1)
dst.geom1 = *field
}()
if src.ends == nil {
dst.ends = nil
} else {
if dst.ends != nil {
if len(src.ends) > len(dst.ends) {
if cap(dst.ends) >= len(src.ends) {
dst.ends = (dst.ends)[:len(src.ends)]
} else {
dst.ends = make([]int, len(src.ends))
}
} else if len(src.ends) < len(dst.ends) {
dst.ends = (dst.ends)[:len(src.ends)]
}
} else {
dst.ends = make([]int, len(src.ends))
}
copy(dst.ends, src.ends)
}
}
// deriveDeepCopy_10 recursively copies the contents of src into dst.
func deriveDeepCopy_10(dst, src *geom3) {
func() {
field := new(geom1)
deriveDeepCopy_8(field, &src.geom1)
dst.geom1 = *field
}()
if src.endss == nil {
dst.endss = nil
} else {
if dst.endss != nil {
if len(src.endss) > len(dst.endss) {
if cap(dst.endss) >= len(src.endss) {
dst.endss = (dst.endss)[:len(src.endss)]
} else {
dst.endss = make([][]int, len(src.endss))
}
} else if len(src.endss) < len(dst.endss) {
dst.endss = (dst.endss)[:len(src.endss)]
}
} else {
dst.endss = make([][]int, len(src.endss))
}
deriveDeepCopy_12(dst.endss, src.endss)
}
}
// deriveDeepCopy_11 recursively copies the contents of src into dst.
func deriveDeepCopy_11(dst, src *geom0) {
dst.layout = src.layout
dst.stride = src.stride
if src.flatCoords == nil {
dst.flatCoords = nil
} else {
if dst.flatCoords != nil {
if len(src.flatCoords) > len(dst.flatCoords) {
if cap(dst.flatCoords) >= len(src.flatCoords) {
dst.flatCoords = (dst.flatCoords)[:len(src.flatCoords)]
} else {
dst.flatCoords = make([]float64, len(src.flatCoords))
}
} else if len(src.flatCoords) < len(dst.flatCoords) {
dst.flatCoords = (dst.flatCoords)[:len(src.flatCoords)]
}
} else {
dst.flatCoords = make([]float64, len(src.flatCoords))
}
copy(dst.flatCoords, src.flatCoords)
}
dst.srid = src.srid
}
// deriveDeepCopy_12 recursively copies the contents of src into dst.
func deriveDeepCopy_12(dst, src [][]int) {
for src_i, src_value := range src {
if src_value == nil {
dst[src_i] = nil
} else {
if dst[src_i] != nil {
if len(src_value) > len(dst[src_i]) {
if cap(dst[src_i]) >= len(src_value) {
dst[src_i] = (dst[src_i])[:len(src_value)]
} else {
dst[src_i] = make([]int, len(src_value))
}
} else if len(src_value) < len(dst[src_i]) {
dst[src_i] = (dst[src_i])[:len(src_value)]
}
} else {
dst[src_i] = make([]int, len(src_value))
}
copy(dst[src_i], src_value)
}
}
} | derived.gen.go | 0.837021 | 0.454472 | derived.gen.go | starcoder |
package bayes
// Highest probability interval for a discrete probability distribution.
// Ref.: Albert (2009): 184 [mnormt.onesided()]
import (
"sort"
)
// cumSum returns cumulative sums of a slice.
func cumSum(x []float64) []float64 {
v := make([]float64, len(x))
for i, _ := range x {
if i == 0 {
v[i] = x[i]
} else {
v[i] = v[i-1] + x[i]
}
}
return v
}
type IndexSorter struct {
Target []float64
Indices []int
}
func NewSorter(t []float64) IndexSorter {
iv := make([]int, len(t))
for i := range iv {
iv[i] = i
}
return IndexSorter{Target: t, Indices: iv}
}
func (s IndexSorter) Len() int { return len(s.Target) }
func (s IndexSorter) Less(i, j int) bool { return s.Target[i] < s.Target[j] }
func (s IndexSorter) Swap(i, j int) {
s.Target[i], s.Target[j] = s.Target[j], s.Target[i]
s.Indices[i], s.Indices[j] = s.Indices[j], s.Indices[i]
}
// DiscHPI computes a highest probability interval for a discrete distribution.
func DiscHPI(x, p []float64, probContent float64) (probExact float64, hpiSet []float64) {
// Arguments:
// x - values where probability is listed
// p - probability at x
// probContent - target probability content of the HPI
// Returns:
// probExact - exact probability content of the HPI
// hpiSet set of values of x within the highest probability interval
s := NewSorter(p)
sort.Sort(s)
ix := s.Indices
ps := s.Target
// reverse sorted indices
iRev := make([]int, len(ix))
for i, _ := range ix {
iRev[i] = ix[len(ix)-i-1]
}
// reverse sorted probabilities
pRev := make([]float64, len(ps))
for i, _ := range ps {
pRev[i] = ps[len(ps)-i-1]
}
// sort x
xRev := make([]float64, len(iRev))
for i, _ := range xRev {
xRev[i] = x[iRev[i]]
}
cp := cumSum(pRev)
// find first index where cp>=probContent
j := 0
for i, _ := range cp {
if cp[i] >= probContent {
break
}
j++
}
probExact = cp[j]
hpiSet = make([]float64, j+1)
for i := 0; i < j+1; i++ {
hpiSet[i] = xRev[i]
}
sort.Float64s(hpiSet)
return probExact, hpiSet
} | bayes/discint.go | 0.778186 | 0.421314 | discint.go | starcoder |
package iso20022
// Account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family.
type InvestmentAccount14 struct {
// Unique and unambiguous identification for the account between the account owner and the account servicer.
AccountIdentification *AccountIdentification1 `xml:"AcctId"`
// Name of the account. It provides an additional means of identification, and is designated by the account servicer in agreement with the account owner.
AccountName *Max35Text `xml:"AcctNm,omitempty"`
// Supplementary registration information applying to a specific block of units for dealing and reporting purposes. The supplementary registration information may be used when all the units are registered, for example, to a funds supermarket, but holdings for each investor have to reconciled individually.
AccountDesignation *Max35Text `xml:"AcctDsgnt,omitempty"`
// Identification of an individual person whom legally owns the account.
IndividualOwnerIdentification *IndividualPersonIdentificationChoice `xml:"IndvOwnrId,omitempty"`
// Identification of an organisation that legally owns the account.
OrganisationOwnerIdentification *PartyIdentification2Choice `xml:"OrgOwnrId,omitempty"`
// Party that manages the account on behalf of the account owner, that is manages the registration and booking of entries on the account, calculates balances on the account and provides information about the account.
AccountServicer *PartyIdentification2Choice `xml:"AcctSvcr,omitempty"`
}
func (i *InvestmentAccount14) AddAccountIdentification() *AccountIdentification1 {
i.AccountIdentification = new(AccountIdentification1)
return i.AccountIdentification
}
func (i *InvestmentAccount14) SetAccountName(value string) {
i.AccountName = (*Max35Text)(&value)
}
func (i *InvestmentAccount14) SetAccountDesignation(value string) {
i.AccountDesignation = (*Max35Text)(&value)
}
func (i *InvestmentAccount14) AddIndividualOwnerIdentification() *IndividualPersonIdentificationChoice {
i.IndividualOwnerIdentification = new(IndividualPersonIdentificationChoice)
return i.IndividualOwnerIdentification
}
func (i *InvestmentAccount14) AddOrganisationOwnerIdentification() *PartyIdentification2Choice {
i.OrganisationOwnerIdentification = new(PartyIdentification2Choice)
return i.OrganisationOwnerIdentification
}
func (i *InvestmentAccount14) AddAccountServicer() *PartyIdentification2Choice {
i.AccountServicer = new(PartyIdentification2Choice)
return i.AccountServicer
} | InvestmentAccount14.go | 0.687735 | 0.447943 | InvestmentAccount14.go | starcoder |
package cenv
import (
"os"
"strconv"
"strings"
)
// Bool returns the boolean value from environment variable.
// It accepts boolean string values from strconv.ParseBool.
// Any other value returns an error.
func Bool(keys ...string) (bool, error) {
return strconv.ParseBool(get(keys))
}
// Float32 returns the float32 value from environment variable.
// When it couldn't get the value as float32, it returns an error.
// It is based on strconv.ParseFloat.
func Float32(keys ...string) (float32, error) {
v, err := strconv.ParseFloat(get(keys), 32)
return float32(v), err
}
// Float64 returns the float64 value from environment variable.
// When it couldn't get the value as float64, it returns an error.
// It is based on strconv.ParseFloat.
func Float64(keys ...string) (float64, error) {
return strconv.ParseFloat(get(keys), 64)
}
// Int returns the int value from environment variable.
// When it couldn't get the value as int, it returns an error.
// It is based on strconv.Atoi.
func Int(keys ...string) (int, error) {
return strconv.Atoi(get(keys))
}
// Int32 returns the int32 value from environment variable.
// When it couldn't get the value as int32, it returns an error.
// It is based on strconv.ParseInt.
func Int32(keys ...string) (int32, error) {
v, err := strconv.ParseInt(get(keys), 10, 32)
return int32(v), err
}
// Int64 returns the int64 value from environment variable.
// When it couldn't get the value as int64, it returns an error.
// It is based on strconv.ParseInt.
func Int64(keys ...string) (int64, error) {
return strconv.ParseInt(get(keys), 10, 64)
}
// String returns the string value from environment variable.
func String(keys ...string) string {
return get(keys)
}
// Uint returns the uint value from environment variable.
// When it couldn't get the value as uint, it returns an error.
// It is based on strconv.ParseUint.
func Uint(keys ...string) (uint, error) {
v, err := strconv.ParseUint(get(keys), 10, 0)
return uint(v), err
}
// Uint32 returns the uint32 value from environment variable.
// When it couldn't get the value as uint32, it returns an error.
// It is based on strconv.ParseUint.
func Uint32(keys ...string) (uint32, error) {
v, err := strconv.ParseUint(get(keys), 10, 32)
return uint32(v), err
}
// Uint64 returns the uint64 value from environment variable.
// When it couldn't get the value as uint64, it returns an error.
// It is based on strconv.ParseUint.
func Uint64(keys ...string) (uint64, error) {
return strconv.ParseUint(get(keys), 10, 64)
}
// MustBool returns the boolean value from environment variable.
// It accepts boolean string values from strconv.ParseBool.
// If any other value is given or the variable is not present, it panics.
func MustBool(keys ...string) bool {
v, err := strconv.ParseBool(must(keys))
chkErr(keys, err)
return v
}
// MustFloat32 returns the float32 value from environment variable.
// When it couldn't get the value as float32 or the variable is not present,
// it panics.
// It is based on strconv.ParseFloat.
func MustFloat32(keys ...string) float32 {
v, err := strconv.ParseFloat(must(keys), 32)
chkErr(keys, err)
return float32(v)
}
// MustFloat64 returns the float64 value from environment variable.
// When it couldn't get the value as float64 or the variable is not present,
// it panics.
// It is based on strconv.ParseFloat.
func MustFloat64(keys ...string) float64 {
v, err := strconv.ParseFloat(must(keys), 64)
chkErr(keys, err)
return v
}
// MustInt returns the int value from environment variable.
// When it couldn't get the value as int or the variable is not present,
// it panics.
// It is based on strconv.Atoi.
func MustInt(keys ...string) int {
v, err := strconv.Atoi(must(keys))
chkErr(keys, err)
return v
}
// MustInt32 returns the int32 value from environment variable.
// When it couldn't get the value as int32 or the variable is not present,
// it panics.
// It is based on strconv.ParseInt.
func MustInt32(keys ...string) int32 {
v, err := strconv.ParseInt(must(keys), 10, 32)
chkErr(keys, err)
return int32(v)
}
// MustInt64 returns the int64 value from environment variable.
// When it couldn't get the value as int64 or the variable is not present,
// it panics.
// It is based on strconv.ParseInt.
func MustInt64(keys ...string) int64 {
v, err := strconv.ParseInt(must(keys), 10, 64)
chkErr(keys, err)
return v
}
// MustString returns the string value from environment variable.
// When the variable is not present, it panics.
func MustString(keys ...string) string {
return must(keys)
}
// MustUint returns the uint value from environment variable.
// When it couldn't get the value as uint or the variable is not present,
// it panics.
// It is based on strconv.ParseUint.
func MustUint(keys ...string) uint {
v, err := strconv.ParseUint(must(keys), 10, 0)
chkErr(keys, err)
return uint(v)
}
// MustUint32 returns the uint32 value from environment variable.
// When it couldn't get the value as uint32 or the variable is not present,
// it panics.
// It is based on strconv.ParseUint.
func MustUint32(keys ...string) uint32 {
v, err := strconv.ParseUint(must(keys), 10, 32)
chkErr(keys, err)
return uint32(v)
}
// MustUint64 returns the uint64 value from environment variable.
// When it couldn't get the value as uint64 or the variable is not present,
// it panics.
// It is based on strconv.ParseUint.
func MustUint64(keys ...string) uint64 {
v, err := strconv.ParseUint(must(keys), 10, 64)
chkErr(keys, err)
return v
}
func chkErr(keys []string, err error) {
if err != nil {
panic(convertKeys(keys) + " can't be got by the error: " + err.Error())
}
}
func convertKeys(keys []string) string {
key := strings.Join(keys, "_")
key = strings.ToUpper(key)
key = strings.Replace(key, ".", "_", -1)
return key
}
func get(keys []string) string {
return os.Getenv(convertKeys(keys))
}
func must(keys []string) string {
k := convertKeys(keys)
v, ok := os.LookupEnv(k)
if !ok {
panic(k + " must be set")
}
return v
} | cenv.go | 0.73412 | 0.454048 | cenv.go | starcoder |
package corde
import (
"fmt"
"time"
)
// EmbedB is an Embed builder
// https://regex101.com/r/gmVH2A/4
type EmbedB struct {
embed Embed
}
// NewEmbed returns a new embed builder ready for use
func NewEmbed() *EmbedB {
return &EmbedB{
embed: Embed{
Title: "",
Description: "",
URL: "",
Color: 0,
Footer: Footer{},
Image: Image{},
Thumbnail: Image{},
Video: Video{},
Provider: Provider{},
Author: Author{},
Fields: []Field{},
},
}
}
// Embed returns the built Embed
func (e *EmbedB) Embed() Embed { return e.embed }
// InteractionRespData implements InteractionResponder
func (e *EmbedB) InteractionRespData() *InteractionRespData {
return &InteractionRespData{
Embeds: []Embed{e.Embed()},
}
}
// Author adds the author to the Embed
func (e *EmbedB) Author(a Author) *EmbedB {
e.embed.Author = a
return e
}
// Footer adds the footer to the Embed
func (e *EmbedB) Footer(f Footer) *EmbedB {
e.embed.Footer = f
return e
}
// Title adds the title to the Embed
func (e *EmbedB) Title(s string) *EmbedB {
e.embed.Title = s
return e
}
// Titlef adds the Title to the Embed
func (e *EmbedB) Titlef(format string, a ...any) *EmbedB {
e.embed.Title = fmt.Sprintf(format, a...)
return e
}
// Description adds the description to the Embed
func (e *EmbedB) Description(s string) *EmbedB {
e.embed.Description = s
return e
}
// Descriptionf adds the description to the Embed
func (e *EmbedB) Descriptionf(format string, a ...any) *EmbedB {
e.embed.Description = fmt.Sprintf(format, a...)
return e
}
// Thumbnail adds the thumbnail to the Embed
func (e *EmbedB) Thumbnail(i Image) *EmbedB {
e.embed.Thumbnail = i
return e
}
// Image adds the image to the Embed
func (e *EmbedB) Image(i Image) *EmbedB {
e.embed.Image = i
return e
}
// ImageURL adds an image based off the url to the Embed
func (e *EmbedB) ImageURL(s string) *EmbedB {
e.embed.Image = Image{
URL: s,
}
return e
}
// URL adds the url to the Embed
func (e *EmbedB) URL(s string) *EmbedB {
e.embed.URL = s
return e
}
// Fields append the field to the Embed
func (e *EmbedB) Fields(f ...Field) *EmbedB {
e.embed.Fields = append(e.embed.Fields, f...)
return e
}
// Field adds a field to the Embed
func (e *EmbedB) Field(name, value string) *EmbedB {
e.embed.Fields = append(e.embed.Fields, Field{
Name: name,
Value: value,
})
return e
}
// FieldInline adds an inline field to the Embed
func (e *EmbedB) FieldInline(name, value string) *EmbedB {
e.embed.Fields = append(e.embed.Fields, Field{
Name: name,
Value: value,
Inline: true,
})
return e
}
// Provider adds a provider to the Embed
func (e *EmbedB) Provider(name string, url string) *EmbedB {
e.embed.Provider = Provider{
Name: name,
URL: url,
}
return e
}
// Video adds the video to the Embed
func (e *EmbedB) Video(v Video) *EmbedB {
e.embed.Video = v
return e
}
// Timestamp adds the timestamp to the Embed
func (e *EmbedB) Timestamp(t time.Time) *EmbedB {
e.embed.Timestamp = opt(Timestamp(t))
return e
}
// Color adds the color to the Embed
func (e *EmbedB) Color(i uint32) *EmbedB {
e.embed.Color = i
return e
}
func opt[T any](v T) *T {
return &v
} | embed-builder.go | 0.808446 | 0.457682 | embed-builder.go | starcoder |
package gofa
// Fundamental Arguments (14)
/*
Fad03 mean elongation of the Moon from the Sun.
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 D, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is from Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
*/
func Fad03(t float64) float64 {
var a float64
/* Mean elongation of the Moon from the Sun (IERS Conventions 2003). */
a = fmod(1072260.703692+
t*(1602961601.2090+
t*(-6.3706+
t*(0.006593+
t*(-0.00003169)))), TURNAS) * DAS2R
return a
}
/*
Fae03 mean longitude of Earth.
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Earth, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
comes from Souchay et al. (1999) after Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
<NAME>., <NAME>., <NAME>., <NAME>. 1999,
Astron.Astrophys.Supp.Ser. 135, 111
*/
func Fae03(t float64) float64 {
var a float64
/* Mean longitude of Earth (IERS Conventions 2003). */
a = fmod(1.753470314+628.3075849991*t, D2PI)
return a
}
/*
Faf03 mean longitude of the Moon minus mean longitude of the ascending node.
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 F, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is from Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
*/
func Faf03(t float64) float64 {
var a float64
/* Mean longitude of the Moon minus that of the ascending node */
/* (IERS Conventions 2003). */
a = fmod(335779.526232+
t*(1739527262.8478+
t*(-12.7512+
t*(-0.001037+
t*(0.00000417)))), TURNAS) * DAS2R
return a
}
/*
Faju03 Mean longitude of Jupiter
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Jupiter, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
comes from Souchay et al. (1999) after Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
<NAME>., <NAME>., <NAME>., <NAME>. 1999,
Astron.Astrophys.Supp.Ser. 135, 111
*/
func Faju03(t float64) float64 {
var a float64
/* Mean longitude of Jupiter (IERS Conventions 2003). */
a = fmod(0.599546497+52.9690962641*t, D2PI)
return a
}
/*
Fal03 Mean anomaly of the Moon
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 l, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is from Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
*/
func Fal03(t float64) float64 {
var a float64
/* Mean anomaly of the Moon (IERS Conventions 2003). */
a = fmod(485868.249036+
t*(1717915923.2178+
t*(31.8792+
t*(0.051635+
t*(-0.00024470)))), TURNAS) * DAS2R
return a
}
/*
Falp03 Mean anomaly of the Sun
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 l', radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is from Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
*/
func Falp03(t float64) float64 {
var a float64
/* Mean anomaly of the Sun (IERS Conventions 2003). */
a = fmod(1287104.793048+
t*(129596581.0481+
t*(-0.5532+
t*(0.000136+
t*(-0.00001149)))), TURNAS) * DAS2R
return a
}
/*
Fama03 Mean longitude of Mars
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Mars, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
comes from Souchay et al. (1999) after Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
<NAME>., <NAME>., <NAME>., <NAME>. 1999,
Astron.Astrophys.Supp.Ser. 135, 111
*/
func Fama03(t float64) float64 {
var a float64
/* Mean longitude of Mars (IERS Conventions 2003). */
a = fmod(6.203480913+334.0612426700*t, D2PI)
return a
}
/*
Fame03 Mean longitude of Mercury
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Mercury, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
comes from Souchay et al. (1999) after Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
<NAME>., <NAME>., <NAME>., <NAME>. 1999,
Astron.Astrophys.Supp.Ser. 135, 111
*/
func Fame03(t float64) float64 {
var a float64
/* Mean longitude of Mercury (IERS Conventions 2003). */
a = fmod(4.402608842+2608.7903141574*t, D2PI)
return a
}
/*
Fane03 Mean longitude of Neptune
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Neptune, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is adapted from Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., Chapront-<NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
*/
func Fane03(t float64) float64 {
var a float64
/* Mean longitude of Neptune (IERS Conventions 2003). */
a = fmod(5.311886287+3.8133035638*t, D2PI)
return a
}
/*
Faom03 Mean longitude of the Moon's ascending node
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 Omega, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is from Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., 1994, Astron.Astrophys. 282, 663-683.
*/
func Faom03(t float64) float64 {
var a float64
/* Mean longitude of the Moon's ascending node */
/* (IERS Conventions 2003). */
a = fmod(450160.398036+
t*(-6962890.5431+
t*(7.4722+
t*(0.007702+
t*(-0.00005939)))), TURNAS) * DAS2R
return a
}
/*
Fapa03 General accumulated precession in longitude
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 general precession in longitude, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003). It
is taken from Kinoshita & Souchay (1990) and comes originally
from Lieske et al. (1977).
References:
<NAME>. and <NAME>. 1990, Celest.Mech. and Dyn.Astron.
48, 187
<NAME>., <NAME>., <NAME>. & <NAME>. 1977,
Astron.Astrophys. 58, 1-16
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
*/
func Fapa03(t float64) float64 {
var a float64
/* General accumulated precession in longitude. */
a = (0.024381750 + 0.00000538691*t) * t
return a
}
/*
Fasa03 Mean longitude of Saturn
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Saturn, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
comes from Souchay et al. (1999) after Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
<NAME>., <NAME>., <NAME>., <NAME>. 1999,
Astron.Astrophys.Supp.Ser. 135, 111
*/
func Fasa03(t float64) float64 {
var a float64
/* Mean longitude of Saturn (IERS Conventions 2003). */
a = fmod(0.874016757+21.3299104960*t, D2PI)
return a
}
/*
Faur03 Mean longitude of Uranus
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Uranus, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is adapted from Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
*/
func Faur03(t float64) float64 {
var a float64
/* Mean longitude of Uranus (IERS Conventions 2003). */
a = fmod(5.481293872+7.4781598567*t, D2PI)
return a
}
/*
Fave03 Mean longitude of Venus
Fundamental argument, IERS Conventions (2003)
Given:
t float64 TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
float64 mean longitude of Venus, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
comes from Souchay et al. (1999) after Simon et al. (1994).
References:
<NAME>., <NAME>. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
<NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>. 1994, Astron.Astrophys. 282, 663-683
<NAME>., <NAME>., <NAME>., <NAME>. 1999,
Astron.Astrophys.Supp.Ser. 135, 111
*/
func Fave03(t float64) float64 {
var a float64
/* Mean longitude of Venus (IERS Conventions 2003). */
a = fmod(3.176146697+1021.3285546211*t, D2PI)
return a
} | fundargs.go | 0.923554 | 0.640074 | fundargs.go | starcoder |
package patience
// DiffType defines the type of a diff element.
type DiffType int8
const (
// Delete represents a diff delete operation.
Delete DiffType = -1
// Insert represents a diff insert operation.
Insert DiffType = 1
// Equal represents no diff.
Equal DiffType = 0
)
// DiffLine represents a single line and its diff type.
type DiffLine struct {
Text string
Type DiffType
}
// toDiffLines is a convenience function to convert a slice of strings
// to a slice of DiffLines with the specified diff type.
func toDiffLines(a []string, t DiffType) []DiffLine {
diffs := make([]DiffLine, len(a))
for i, l := range a {
diffs[i] = DiffLine{l, t}
}
return diffs
}
// uniqueElements returns a slice of unique elements from a slice of
// strings, and a slice of the original indices of each element.
func uniqueElements(a []string) ([]string, []int) {
m := make(map[string]int)
for _, e := range a {
m[e]++
}
elements := []string{}
indices := []int{}
for i, e := range a {
if m[e] == 1 {
elements = append(elements, e)
indices = append(indices, i)
}
}
return elements, indices
}
// Diff returns the patience diff of two slices of strings.
func Diff(a, b []string) []DiffLine {
switch {
case len(a) == 0 && len(b) == 0:
return nil
case len(a) == 0:
return toDiffLines(b, Insert)
case len(b) == 0:
return toDiffLines(a, Delete)
}
// Find equal elements at the head of slices a and b.
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] {
i++
}
if i > 0 {
return append(
toDiffLines(a[:i], Equal),
Diff(a[i:], b[i:])...,
)
}
// Find equal elements at the tail of slices a and b.
j := 0
for j < len(a) && j < len(b) && a[len(a)-1-j] == b[len(b)-1-j] {
j++
}
if j > 0 {
return append(
Diff(a[:len(a)-j], b[:len(b)-j]),
toDiffLines(a[len(a)-j:], Equal)...,
)
}
// Find the longest common subsequence of unique elements in a and b.
ua, idxa := uniqueElements(a)
ub, idxb := uniqueElements(b)
lcs := LCS(ua, ub)
// If the LCS is empty, the diff is all deletions and insertions.
if len(lcs) == 0 {
return append(toDiffLines(a, Delete), toDiffLines(b, Insert)...)
}
// Lookup the original indices of slices a and b.
for i, x := range lcs {
lcs[i][0] = idxa[x[0]]
lcs[i][1] = idxb[x[1]]
}
diffs := []DiffLine{}
ga, gb := 0, 0
for _, ip := range lcs {
// Diff the gaps between the lcs elements.
diffs = append(diffs, Diff(a[ga:ip[0]], b[gb:ip[1]])...)
// Append the LCS elements to the diff.
diffs = append(diffs, DiffLine{Type: Equal, Text: a[ip[0]]})
ga = ip[0] + 1
gb = ip[1] + 1
}
// Diff the remaining elements of a and b after the final LCS element.
diffs = append(diffs, Diff(a[ga:], b[gb:])...)
return diffs
} | patience.go | 0.814533 | 0.590838 | patience.go | starcoder |
package opc
// Fire
// Make a burning fire pattern.
// This pattern is scaled to fit the layout from top to bottom (z).
import (
"github.com/longears/pixelslinger/colorutils"
"github.com/longears/pixelslinger/config"
"github.com/longears/pixelslinger/midi"
"math"
"time"
)
// this is used to cache some per-pixel calculations
type firePixelInfo struct {
xp float64
yp float64
zp float64
vgrad float64
}
func MakePatternFire(locations []float64) ByteThread {
const (
SPEED = 0.83 // How quick are the flames? This is applied in addition to the speed knob.
SIDE_SCALE = 1.7 // Horizontal scale (x and y). Smaller numbers compress things horizontally.
)
// get bounding box
n_pixels := len(locations) / 3
var max_coord_x, max_coord_y, max_coord_z float64
var min_coord_x, min_coord_y, min_coord_z float64
for ii := 0; ii < n_pixels; ii++ {
x := locations[ii*3+0]
y := locations[ii*3+1]
z := locations[ii*3+2]
if ii == 0 || x > max_coord_x { max_coord_x = x }
if ii == 0 || y > max_coord_y { max_coord_y = y }
if ii == 0 || z > max_coord_z { max_coord_z = z }
if ii == 0 || x < min_coord_x { min_coord_x = x }
if ii == 0 || y < min_coord_y { min_coord_y = y }
if ii == 0 || z < min_coord_z { min_coord_z = z }
}
// make array of firePixelInfo structs
// and fill the cache of per-pixel calculations
pixelInfoCache := make([]*firePixelInfo, len(locations)/3)
for ii := range pixelInfoCache {
thisPixelInfo := &firePixelInfo{}
pixelInfoCache[ii] = thisPixelInfo
x := locations[ii*3+0]
y := locations[ii*3+1]
z := locations[ii*3+2]
// scale the height (z) of the layout to fit in the range 0-1
// and scale x and y accordingly
z_scale := max_coord_z - min_coord_z
if z_scale == 0 { // avoid divide by zero
z_scale = 0.05
}
xp := x / z_scale / SIDE_SCALE
yp := y / z_scale / SIDE_SCALE
zp := (z-min_coord_z) / z_scale
// bend space so that things seem to accelerate upwards
zp = math.Pow(zp + 0.05, 0.7)
// make basic vertical gradient
vgrad := colorutils.Cos2(colorutils.Clamp(zp, 0, 1), 0, 2, 0, 1)
// vgrad := 1 - colorutils.Clamp(zp, 0, 1)
// save to cache
thisPixelInfo.xp = xp
thisPixelInfo.yp = yp
thisPixelInfo.zp = zp
thisPixelInfo.vgrad = vgrad
}
return func(bytesIn chan []byte, bytesOut chan []byte, midiState *midi.MidiState) {
last_t := 0.0
t := 0.0
for bytes := range bytesIn {
var (
// hue knob controls hue
H = 0.05 + float64(midiState.ControllerValues[config.HUE_KNOB]) / 127.0
S = 0.9
V = 0.65
OVERBRIGHT = 1.3
)
// fire color
rFire, gFire, bFire := colorutils.HslToRgb(H, S, V)
rFire *= OVERBRIGHT
gFire *= OVERBRIGHT
bFire *= OVERBRIGHT
n_pixels := len(bytes) / 3
// time and speed knob bookkeeping
this_t := float64(time.Now().UnixNano())/1.0e9 - 9.4e8
speedKnob := float64(midiState.ControllerValues[config.SPEED_KNOB]) / 127.0
if speedKnob < 0.5 {
speedKnob = colorutils.RemapAndClamp(speedKnob, 0, 0.4, 0, 1)
} else {
speedKnob = colorutils.RemapAndClamp(speedKnob, 0.6, 1, 1, 4)
}
if midiState.KeyVolumes[config.SLOWMO_PAD] > 0 {
speedKnob *= 0.25
}
if last_t != 0 {
t += (this_t - last_t) * speedKnob * SPEED
}
last_t = this_t
// fill in bytes array
var r, g, b float64
for ii := 0; ii < n_pixels; ii++ {
//--------------------------------------------------------------------------------
pi := pixelInfoCache[ii]
// apply various wiggles to coordinate space
// offset, period, min, max
zp1 := ( pi.zp + colorutils.Cos2(pi.xp, t*0.33 + 8.63, 0.15 * 1.7, 0, 1) * 0.2 +
colorutils.Cos2(pi.xp, -t*0.23 + 2.43, 0.34 * 1.7, 0, 1) * 0.3 )
zp3 := ( pi.zp + colorutils.Cos2(pi.xp, -t*0.42 + 5.62, 0.27 * 1.7, 0, 1) * 0.2 +
colorutils.Cos2(pi.xp, t*0.20 + 3.07, 0.55 * 1.7, 0, 1) * 0.3 )
zp4 := ( pi.zp + colorutils.Cos2(pi.xp, t*0.36 + 4.81, 0.20 * 1.7, 0, 1) * 0.2 +
colorutils.Cos2(pi.xp, -t*0.26 + 7.94, 0.67 * 1.7, 0, 1) * 0.3 )
// smallest fastest noise
noise_lit := ( colorutils.Cos2(pi.xp, -4.37 * t/4, 0.21, 0, 1) +
colorutils.Cos2(pi.yp, 4.37 * t/4, 0.21, 0, 1) +
colorutils.Cos2(zp1, 4.37 * t, 0.21, 0, 1) ) / 3
// small fast noise
noise_med := ( colorutils.Cos2(pi.xp, -3 * t/4, 0.3, 0, 1) +
colorutils.Cos2(pi.yp, 3 * t/4, 0.3, 0, 1) +
colorutils.Cos2(zp3, 3 * t, 0.3, 0, 1) ) / 3
// big slow noise
noise_big := ( colorutils.Cos2(pi.xp, -0.9 * t/2, 0.8, 0, 1) +
colorutils.Cos2(pi.yp, 0.9 * t/2, 0.8, 0, 1) +
colorutils.Cos2(zp4, 0.9 * t, 0.8, 0, 1) ) / 3
// combine vgradient with noise
v := ( pi.vgrad +
colorutils.Remap(noise_lit, 0,1, -1,1)*0.17 +
colorutils.Remap(noise_med, 0,1, -1,1)*0.20 +
colorutils.Remap(noise_big, 0,1, -1,1)*0.80 )
// apply sine contrast curve
//v = colorutils.Cos2( colorutils.Clamp(v,0,1), 0, 2, 1, 0 )
// color map
r = v * rFire
g = v * gFire
b = v * bFire
r,g,b = colorutils.ContrastRgb(r,g,b, 0.7, 1.1)
// r,g,b = colorutils.RGBClipBlackByLuminance(r,g,b, 0.2) // TODO
bytes[ii*3+0] = colorutils.FloatToByte(r)
bytes[ii*3+1] = colorutils.FloatToByte(g)
bytes[ii*3+2] = colorutils.FloatToByte(b)
//--------------------------------------------------------------------------------
}
bytesOut <- bytes
}
}
} | opc/pattern-fire.go | 0.660391 | 0.515193 | pattern-fire.go | starcoder |
package copypasta
import (
"math"
"sort"
)
/* 分块思想 Sqrt Decomposition
一种技巧:组合两种算法从而降低复杂度 O(n^2) -> O(n√n)
参考 Competitive Programmer’s Handbook Ch.27
题目花样很多,下面举个例子
有 n 个对象,每个对象有一个「关于其他对象的统计量」ci(一个数、一个集合的元素个数,等等)
为方便起见,假设 ∑ci 的数量级和 n 一样,下面用 n 表示 ∑ci
当 ci > √n 时,这样的对象不超过 √n 个,暴力枚举这些对象之间的关系(或者,该对象与其他所有对象的关系),时间复杂度为 O(n) 或 O(n√n)。此乃算法一
当 ci ≤ √n 时,这样的对象有 O(n) 个,由于统计量 ci 很小,暴力枚举当前对象的统计量,时间复杂度为 O(n√n)。此乃算法二
这样,以 √n 为界,我们将所有对象划分成了两组,并用两个不同的算法处理
这两种算法是看待同一个问题的两种不同方式,通过恰当地组合这两个算法,复杂度由 O(n^2) 降至 O(n√n)
注意:**枚举时要做到不重不漏**
可以从这题上手 https://codeforces.com/problemset/problem/797/E
https://codeforces.com/problemset/problem/425/D
https://codeforces.com/problemset/problem/677/D
https://codeforces.com/problemset/problem/1207/F
https://codeforces.com/problemset/problem/1468/M 或四元环
LCP16 https://leetcode-cn.com/problems/you-le-yuan-de-you-lan-ji-hua/
*/
// TIPS: n 的整数分拆中,不同数字的个数至多有 O(√n) 种
/* 分散层叠算法 Fractional Cascading
https://en.wikipedia.org/wiki/Fractional_cascading
https://www.luogu.com.cn/blog/DPair2005/fen-san-ceng-die-suan-fa-xue-xi-bi-ji
https://www.luogu.com.cn/problem/P6466
*/
/*
分块数据结构
https://oi-wiki.org/ds/decompose/
https://oi-wiki.org/ds/block-array/
【推荐】https://www.luogu.com.cn/blog/220037/Sqrt1
浅谈基础根号算法——分块 https://www.luogu.com.cn/blog/deco/qian-tan-ji-chu-gen-hao-suan-fa-fen-kuai
todo https://www.csie.ntu.edu.tw/~sprout/algo2018/ppt_pdf/root_methods.pdf
题目推荐 https://cp-algorithms.com/data_structures/sqrt_decomposition.html#toc-tgt-8
好题 https://codeforces.com/problemset/problem/91/E
todo 动态逆序对 https://www.luogu.com.cn/problem/P3157 https://www.luogu.com.cn/problem/UVA11990
https://cp-algorithms.com/sequences/rmq.html
todo https://www.luogu.com.cn/problem/P3396
https://codeforces.com/problemset/problem/1207/F
https://codeforces.com/contest/455/problem/D
*/
func _(min, max func(int, int) int) {
type block struct {
l, r int // [l,r]
origin, sorted []int
//lazyAdd int
}
var blocks []block
sqrtInit := func(a []int) {
n := len(a)
blockSize := int(math.Sqrt(float64(n)))
//blockSize := int(math.Sqrt(float64(n) * math.Log2(float64(n+1))))
blockNum := (n-1)/blockSize + 1
blocks = make([]block, blockNum)
for i, v := range a {
j := i / blockSize
if i%blockSize == 0 {
blocks[j] = block{l: i, origin: make([]int, 0, blockSize)}
}
blocks[j].origin = append(blocks[j].origin, v)
}
for i := range blocks {
b := &blocks[i]
b.r = b.l + len(b.origin) - 1
b.sorted = append([]int(nil), b.origin...)
sort.Ints(b.sorted)
}
}
sqrtOp := func(l, r int, v int) { // [l,r], starts at 0
for i := range blocks {
b := &blocks[i]
if b.r < l {
continue
}
if b.l > r {
break
}
if l <= b.l && b.r <= r {
// do op on full block
} else {
// do op on part block
bl := max(b.l, l)
br := min(b.r, r)
for j := bl - b.l; j <= br-b.l; j++ {
// do b.origin[j]...
}
}
}
}
_ = []interface{}{sqrtInit, sqrtOp}
} | copypasta/sqrt_decomposition.go | 0.572723 | 0.437703 | sqrt_decomposition.go | starcoder |
package plandef
import (
"fmt"
"strings"
)
// InferPO is an Operator that starts at the given object and transitively
// follows predicate edges backwards to yield all reachable subjects.
type InferPO struct {
ID FreeTerm
Subject FreeTerm
Predicate FixedTerm
Object FixedTerm
}
func (op *InferPO) anOperator() {}
func (op *InferPO) String() string {
return fmt.Sprintf("InferPO(%v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object)
}
// Key implements cmp.Key.
func (op *InferPO) Key(b *strings.Builder) {
lookupKey(b, "InferPO", op.ID, op.Subject, op.Predicate, op.Object)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *InferPO) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// InferSP is an Operator that finds all transitive paths from a subject using
// the given predicate edges.
type InferSP struct {
ID FreeTerm
Subject FixedTerm
Predicate FixedTerm
Object FreeTerm
}
func (op *InferSP) anOperator() {}
func (op *InferSP) String() string {
return fmt.Sprintf("InferSP(%v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object)
}
// Key implements cmp.Key.
func (op *InferSP) Key(b *strings.Builder) {
lookupKey(b, "InferSP", op.ID, op.Subject, op.Predicate, op.Object)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *InferSP) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// InferSPO is an Operator that finds all transitive paths from subject to
// object using predicate edges.
type InferSPO struct {
ID FreeTerm
Subject FixedTerm
Predicate FixedTerm
// It's a bit of an open question, but we think this may only be an OID or a
// binding to an OID, never a literal or binding to a literal value.
// -Diego 2018-06-21
Object FixedTerm
}
func (op *InferSPO) anOperator() {}
func (op *InferSPO) String() string {
return fmt.Sprintf("InferSPO(%v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object)
}
// Key implements cmp.Key.
func (op *InferSPO) Key(b *strings.Builder) {
lookupKey(b, "InferSPO", op.ID, op.Subject, op.Predicate, op.Object)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *InferSPO) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// LookupPO is an Operator that finds static facts with the given predicate and object.
type LookupPO struct {
ID FreeTerm
Subject FreeTerm
Predicate FixedTerm
Object FixedTerm
}
func (op *LookupPO) anOperator() {}
func (op *LookupPO) String() string {
return fmt.Sprintf("LookupPO(%v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object)
}
// Key implements cmp.Key.
func (op *LookupPO) Key(b *strings.Builder) {
lookupKey(b, "LookupPO", op.ID, op.Subject, op.Predicate, op.Object)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *LookupPO) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// LookupPOCmp is an Operator that finds static facts with the given predicate
// and whose object is a literal value satisfying the given filter.
type LookupPOCmp struct {
ID FreeTerm
Subject FreeTerm
Predicate FixedTerm
Object *Variable
Cmp SelectClause
}
func (op *LookupPOCmp) anOperator() {}
func (op *LookupPOCmp) String() string {
return fmt.Sprintf("LookupPOCmp(%v %v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object, op.Cmp)
}
// Key implements cmp.Key.
func (op *LookupPOCmp) Key(b *strings.Builder) {
lookupKey(b, "LookupPOCmp", op.ID, op.Subject, op.Predicate, op.Object)
b.WriteByte(' ')
op.Cmp.Key(b)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *LookupPOCmp) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// LookupS is an Operator that finds static facts with the given subject.
type LookupS struct {
ID FreeTerm
Subject FixedTerm
Predicate FreeTerm
Object FreeTerm
}
func (op *LookupS) anOperator() {}
func (op *LookupS) String() string {
return fmt.Sprintf("LookupS(%v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object)
}
// Key implements cmp.Key.
func (op *LookupS) Key(b *strings.Builder) {
lookupKey(b, "LookupS", op.ID, op.Subject, op.Predicate, op.Object)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *LookupS) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// LookupSP is an Operator that finds static facts with the given subject and predicate.
type LookupSP struct {
ID FreeTerm
Subject FixedTerm
Predicate FixedTerm
Object FreeTerm
}
func (op *LookupSP) anOperator() {}
func (op *LookupSP) String() string {
return fmt.Sprintf("LookupSP(%v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object)
}
// Key implements cmp.Key.
func (op *LookupSP) Key(b *strings.Builder) {
lookupKey(b, "LookupSP", op.ID, op.Subject, op.Predicate, op.Object)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *LookupSP) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// LookupSPO is an Operator that finds the(?) static fact with the given subject, predicate, and object.
type LookupSPO struct {
ID FreeTerm
Subject FixedTerm
Predicate FixedTerm
Object FixedTerm
}
func (op *LookupSPO) anOperator() {}
func (op *LookupSPO) String() string {
return fmt.Sprintf("LookupSPO(%v %v %v %v)",
op.ID, op.Subject, op.Predicate, op.Object)
}
// Key implements cmp.Key.
func (op *LookupSPO) Key(b *strings.Builder) {
lookupKey(b, "LookupSPO", op.ID, op.Subject, op.Predicate, op.Object)
}
// Terms is a convenience method to fetch the ID, Subject, Predicate, Object in
// an iterable slice.
func (op *LookupSPO) Terms() []Term {
return []Term{
op.ID,
op.Subject,
op.Predicate,
op.Object,
}
}
// lookupKey is a helper for the Lookup/Infer Key() methods.
func lookupKey(b *strings.Builder, op string, id, subject, predicate, object Term) {
b.WriteString(op)
b.WriteByte(' ')
id.Key(b)
b.WriteByte(' ')
subject.Key(b)
b.WriteByte(' ')
predicate.Key(b)
b.WriteByte(' ')
object.Key(b)
} | src/github.com/ebay/akutan/query/planner/plandef/lookups.go | 0.767603 | 0.426859 | lookups.go | starcoder |
package gurvy
import (
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"math/big"
"regexp"
"strings"
"github.com/IBM/mathlib/driver"
"github.com/IBM/mathlib/driver/common"
"github.com/consensys/gnark-crypto/ecc/bn254"
"github.com/consensys/gnark-crypto/ecc/bn254/fr"
)
/*********************************************************************/
type bn254Zr struct {
*big.Int
}
func (z *bn254Zr) Plus(a driver.Zr) driver.Zr {
return &bn254Zr{new(big.Int).Add(z.Int, a.(*bn254Zr).Int)}
}
func (z *bn254Zr) Mod(a driver.Zr) {
z.Int.Mod(z.Int, a.(*bn254Zr).Int)
}
func (z *bn254Zr) PowMod(x driver.Zr) driver.Zr {
return &bn254Zr{new(big.Int).Exp(z.Int, x.(*bn254Zr).Int, fr.Modulus())}
}
func (z *bn254Zr) InvModP(a driver.Zr) {
z.Int.ModInverse(z.Int, a.(*bn254Zr).Int)
}
func (z *bn254Zr) Bytes() []byte {
return common.BigToBytes(z.Int)
}
func (z *bn254Zr) Equals(a driver.Zr) bool {
return z.Int.Cmp(a.(*bn254Zr).Int) == 0
}
func (z *bn254Zr) Copy() driver.Zr {
return &bn254Zr{new(big.Int).Set(z.Int)}
}
func (z *bn254Zr) Clone(a driver.Zr) {
raw := a.(*bn254Zr).Int.Bytes()
z.Int.SetBytes(raw)
}
func (z *bn254Zr) String() string {
return z.Int.Text(16)
}
/*********************************************************************/
type bn254G1 struct {
*bn254.G1Affine
}
func (g *bn254G1) Clone(a driver.G1) {
raw := a.(*bn254G1).G1Affine.Bytes()
_, err := g.SetBytes(raw[:])
if err != nil {
panic("could not copy point")
}
}
func (e *bn254G1) Copy() driver.G1 {
c := &bn254.G1Affine{}
c.Set(e.G1Affine)
return &bn254G1{c}
}
func (g *bn254G1) Add(a driver.G1) {
j := &bn254.G1Jac{}
j.FromAffine(g.G1Affine)
j.AddMixed((*bn254.G1Affine)(a.(*bn254G1).G1Affine))
g.G1Affine.FromJacobian(j)
}
func (g *bn254G1) Mul(a driver.Zr) driver.G1 {
gc := &bn254G1{&bn254.G1Affine{}}
gc.Clone(g)
gc.G1Affine.ScalarMultiplication(g.G1Affine, a.(*bn254Zr).Int)
return gc
}
func (g *bn254G1) Mul2(e driver.Zr, Q driver.G1, f driver.Zr) driver.G1 {
a := g.Mul(e)
b := Q.Mul(f)
a.Add(b)
return a
}
func (g *bn254G1) Equals(a driver.G1) bool {
return g.G1Affine.Equal(a.(*bn254G1).G1Affine)
}
func (g *bn254G1) Bytes() []byte {
raw := g.G1Affine.RawBytes()
return raw[:]
}
func (g *bn254G1) Sub(a driver.G1) {
j, k := &bn254.G1Jac{}, &bn254.G1Jac{}
j.FromAffine(g.G1Affine)
k.FromAffine(a.(*bn254G1).G1Affine)
j.SubAssign(k)
g.G1Affine.FromJacobian(j)
}
func (g *bn254G1) IsInfinity() bool {
return g.G1Affine.IsInfinity()
}
var g1StrRegexp *regexp.Regexp = regexp.MustCompile(`^E\([[]([0-9]+),([0-9]+)[]]\),$`)
func (g *bn254G1) String() string {
rawstr := g.G1Affine.String()
m := g1StrRegexp.FindAllStringSubmatch(rawstr, -1)
return "(" + strings.TrimLeft(m[0][1], "0") + "," + strings.TrimLeft(m[0][2], "0") + ")"
}
/*********************************************************************/
type bn254G2 struct {
*bn254.G2Affine
}
func (g *bn254G2) Clone(a driver.G2) {
raw := a.(*bn254G2).G2Affine.Bytes()
_, err := g.SetBytes(raw[:])
if err != nil {
panic("could not copy point")
}
}
func (e *bn254G2) Copy() driver.G2 {
c := &bn254.G2Affine{}
c.Set(e.G2Affine)
return &bn254G2{c}
}
func (g *bn254G2) Mul(a driver.Zr) driver.G2 {
gc := &bn254G2{&bn254.G2Affine{}}
gc.Clone(g)
gc.G2Affine.ScalarMultiplication(g.G2Affine, a.(*bn254Zr).Int)
return gc
}
func (g *bn254G2) Add(a driver.G2) {
j := &bn254.G2Jac{}
j.FromAffine(g.G2Affine)
j.AddMixed((*bn254.G2Affine)(a.(*bn254G2).G2Affine))
g.G2Affine.FromJacobian(j)
}
func (g *bn254G2) Sub(a driver.G2) {
j := &bn254.G2Jac{}
j.FromAffine(g.G2Affine)
aJac := &bn254.G2Jac{}
aJac.FromAffine((*bn254.G2Affine)(a.(*bn254G2).G2Affine))
j.SubAssign(aJac)
g.G2Affine.FromJacobian(j)
}
func (g *bn254G2) Affine() {
// we're always affine
}
func (g *bn254G2) Bytes() []byte {
raw := g.G2Affine.RawBytes()
return raw[:]
}
func (g *bn254G2) String() string {
return g.G2Affine.String()
}
func (g *bn254G2) Equals(a driver.G2) bool {
return g.G2Affine.Equal(a.(*bn254G2).G2Affine)
}
/*********************************************************************/
type bn254Gt struct {
*bn254.GT
}
func (g *bn254Gt) Equals(a driver.Gt) bool {
return g.GT.Equal(a.(*bn254Gt).GT)
}
func (g *bn254Gt) Inverse() {
g.GT.Inverse(g.GT)
}
func (g *bn254Gt) Mul(a driver.Gt) {
g.GT.Mul(g.GT, a.(*bn254Gt).GT)
}
func (g *bn254Gt) IsUnity() bool {
unity := &bn254.GT{}
unity.SetOne()
return unity.Equal(g.GT)
}
func (g *bn254Gt) ToString() string {
return g.GT.String()
}
func (g *bn254Gt) Bytes() []byte {
raw := g.GT.Bytes()
return raw[:]
}
/*********************************************************************/
type Bn254 struct {
}
func (c *Bn254) Pairing(p2 driver.G2, p1 driver.G1) driver.Gt {
t, err := bn254.MillerLoop([]bn254.G1Affine{*p1.(*bn254G1).G1Affine}, []bn254.G2Affine{*p2.(*bn254G2).G2Affine})
if err != nil {
panic(fmt.Sprintf("pairing failed [%s]", err.Error()))
}
return &bn254Gt{&t}
}
func (c *Bn254) Pairing2(p2a, p2b driver.G2, p1a, p1b driver.G1) driver.Gt {
t, err := bn254.MillerLoop([]bn254.G1Affine{*p1a.(*bn254G1).G1Affine, *p1b.(*bn254G1).G1Affine}, []bn254.G2Affine{*p2a.(*bn254G2).G2Affine, *p2b.(*bn254G2).G2Affine})
if err != nil {
panic(fmt.Sprintf("pairing 2 failed [%s]", err.Error()))
}
return &bn254Gt{&t}
}
func (c *Bn254) FExp(a driver.Gt) driver.Gt {
gt := bn254.FinalExponentiation(a.(*bn254Gt).GT)
return &bn254Gt{>}
}
func (*Bn254) ModAdd(a, b, m driver.Zr) driver.Zr {
c := a.Plus(b)
c.Mod(m)
return c
}
func (c *Bn254) ModSub(a, b, m driver.Zr) driver.Zr {
return c.ModAdd(a, c.ModNeg(b, m), m)
}
func (c *Bn254) ModNeg(a1, m driver.Zr) driver.Zr {
a := a1.Copy()
a.Mod(m)
return &bn254Zr{a.(*bn254Zr).Int.Sub(m.(*bn254Zr).Int, a.(*bn254Zr).Int)}
}
func (c *Bn254) ModMul(a1, b1, m driver.Zr) driver.Zr {
a := a1.Copy()
b := b1.Copy()
a.Mod(m)
b.Mod(m)
return &bn254Zr{a.(*bn254Zr).Int.Mul(a.(*bn254Zr).Int, b.(*bn254Zr).Int)}
}
func (c *Bn254) GenG1() driver.G1 {
_, _, g1, _ := bn254.Generators()
raw := g1.Bytes()
r := &bn254.G1Affine{}
_, err := r.SetBytes(raw[:])
if err != nil {
panic("could not generate point")
}
return &bn254G1{r}
}
func (c *Bn254) GenG2() driver.G2 {
_, _, _, g2 := bn254.Generators()
raw := g2.Bytes()
r := &bn254.G2Affine{}
_, err := r.SetBytes(raw[:])
if err != nil {
panic("could not generate point")
}
return &bn254G2{r}
}
func (c *Bn254) GenGt() driver.Gt {
g1 := c.GenG1()
g2 := c.GenG2()
gengt := c.Pairing(g2, g1)
gengt = c.FExp(gengt)
return gengt
}
func (c *Bn254) GroupOrder() driver.Zr {
return &bn254Zr{fr.Modulus()}
}
func (c *Bn254) FieldBytes() int {
return 32
}
func (c *Bn254) NewG1() driver.G1 {
return &bn254G1{&bn254.G1Affine{}}
}
func (c *Bn254) NewG2() driver.G2 {
return &bn254G2{&bn254.G2Affine{}}
}
func (c *Bn254) NewG1FromCoords(ix, iy driver.Zr) driver.G1 {
return nil
}
func (c *Bn254) NewZrFromBytes(b []byte) driver.Zr {
return &bn254Zr{new(big.Int).SetBytes(b)}
}
func (c *Bn254) NewZrFromInt(i int64) driver.Zr {
return &bn254Zr{big.NewInt(i)}
}
func (c *Bn254) NewG1FromBytes(b []byte) driver.G1 {
v := &bn254.G1Affine{}
_, err := v.SetBytes(b)
if err != nil {
panic(fmt.Sprintf("set bytes failed [%s]", err.Error()))
}
return &bn254G1{v}
}
func (c *Bn254) NewG2FromBytes(b []byte) driver.G2 {
v := &bn254.G2Affine{}
_, err := v.SetBytes(b)
if err != nil {
panic(fmt.Sprintf("set bytes failed [%s]", err.Error()))
}
return &bn254G2{v}
}
func (c *Bn254) NewGtFromBytes(b []byte) driver.Gt {
v := &bn254.GT{}
err := v.SetBytes(b)
if err != nil {
panic(fmt.Sprintf("set bytes failed [%s]", err.Error()))
}
return &bn254Gt{v}
}
func (c *Bn254) HashToZr(data []byte) driver.Zr {
digest := sha256.Sum256(data)
digestBig := c.NewZrFromBytes(digest[:])
digestBig.Mod(c.GroupOrder())
return digestBig
}
func (c *Bn254) HashToG1(data []byte) driver.G1 {
g1, err := bn254.HashToCurveG1Svdw(data, []byte{})
if err != nil {
panic(fmt.Sprintf("HashToG1 failed [%s]", err.Error()))
}
return &bn254G1{&g1}
}
func (c *Bn254) NewRandomZr(rng io.Reader) driver.Zr {
res := new(big.Int)
v := &fr.Element{}
_, err := v.SetRandom()
if err != nil {
panic(err)
}
return &bn254Zr{v.ToBigIntRegular(res)}
}
func (c *Bn254) Rand() (io.Reader, error) {
return rand.Reader, nil
} | vendor/github.com/IBM/mathlib/driver/gurvy/bn254.go | 0.656768 | 0.433322 | bn254.go | starcoder |
package main
import (
"math"
"math/rand"
"github.com/unixpickle/model3d/model3d"
)
const (
BaseWidth = 2.5
BaseLength = 6.0
BaseHeight = 1.0
BaseChunkSize = 0.2
)
func GenerateBase() model3d.Solid {
extra := model3d.Coord3D{X: 1, Y: 1}.Scale(BaseChunkSize)
return model3d.IntersectedSolid{
&model3d.Rect{
MinVal: model3d.XYZ(-BaseLength/2, -BaseWidth/2, 0).Sub(extra),
MaxVal: model3d.Coord3D{X: BaseLength / 2, Y: BaseWidth / 2,
Z: BaseHeight + BaseChunkSize}.Add(extra),
},
model3d.JoinedSolid{
BaseSmoothSolid(),
GenerateChunkyFinish(),
},
}
}
func GenerateChunkyFinish() model3d.Solid {
var chunks model3d.JoinedSolid
for i := 0; i < 500; i++ {
center := SampleBasePoint()
rotAxis := model3d.NewCoord3DRandUnit()
ax1, ax2 := rotAxis.OrthoBasis()
theta := rand.Float64() - 0.5
sin, cos := math.Sin(theta), math.Cos(theta)
ax1, ax2 = ax1.Scale(cos).Add(ax2.Scale(sin)), ax1.Scale(-sin).Add(ax2.Scale(cos))
chunks = append(chunks, &BaseChunk{
Axes: [3]model3d.Coord3D{rotAxis, ax1, ax2},
Center: center,
})
}
return chunks.Optimize()
}
func SampleBasePoint() model3d.Coord3D {
var x, y, z float64
for {
// This is not quite uniform, but it is spread
// out fairly nicely over the ellipsoid base.
x = math.Tanh(rand.Float64()*4-2) * BaseLength / 2
y = math.Tanh(rand.Float64()*4-2) * BaseWidth / 2
z = math.Sqrt(1-(math.Pow(2*x/BaseLength, 2)+math.Pow(2*y/BaseWidth, 2))) * BaseHeight
if !math.IsNaN(z) {
break
}
}
return model3d.XYZ(x, y, z)
}
func BaseSmoothSolid() model3d.Solid {
return model3d.CheckedFuncSolid(
model3d.XY(-BaseLength/2, -BaseWidth/2),
model3d.XYZ(BaseLength/2, BaseWidth/2, BaseHeight),
func(c model3d.Coord3D) bool {
cScale := model3d.XYZ(2/BaseLength, 2/BaseWidth, 1/BaseHeight)
return c.Mul(cScale).Norm() < 1
},
)
}
type BaseChunk struct {
Axes [3]model3d.Coord3D
Center model3d.Coord3D
}
func (b *BaseChunk) Min() model3d.Coord3D {
s := BaseChunkSize * math.Sqrt(3)
return b.Center.Sub(model3d.XYZ(s, s, s))
}
func (b *BaseChunk) Max() model3d.Coord3D {
s := BaseChunkSize * math.Sqrt(3)
return b.Center.Add(model3d.XYZ(s, s, s))
}
func (b *BaseChunk) Contains(c model3d.Coord3D) bool {
if !model3d.InBounds(b, c) {
return false
}
c = c.Sub(b.Center)
for _, axis := range b.Axes {
if math.Abs(c.Dot(axis)) >= BaseChunkSize/2 {
return false
}
}
return true
} | examples/parody/flag_statue/base.go | 0.666388 | 0.44565 | base.go | starcoder |
package camt
import (
"encoding/xml"
"github.com/figassis/bankiso/iso20022"
)
type Document02900101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:camt.029.001.01 Document"`
Message *ResolutionOfInvestigation `xml:"camt.029.001.01"`
}
func (d *Document02900101) AddMessage() *ResolutionOfInvestigation {
d.Message = new(ResolutionOfInvestigation)
return d.Message
}
// Scope
// The Resolution Of Investigation message is sent by a case assignee to a case creator/case assigner.
// This message is used to inform of the resolution of a case, and optionally provides details about .
// - the corrective action undertaken by the case assignee
// - information on the return where applicable
// Usage
// The Resolution Of Investigation message is used by the case assignee to inform a case creator or case assigner about the resolution of a:
// - request to cancel payment case
// - request to modify payment case
// - unable to apply case
// - claim non receipt case
// The Resolution Of Investigation message covers one and only one case at a time. If the case assignee needs to communicate about several cases, then several Resolution Of Investigation messages must be sent.
// The Resolution Of Investigation message provides:
// - the final outcome of the case, whether positive or negative
// - optionally, the details of the corrective action undertaken by the case assignee and the information of the return
// Whenever a payment instruction has been generated to solve the case under investigation, the optional CorrectionTransaction component present in the message must be completed.
// Whenever the action of modifying or cancelling a payment results in funds being returned, an investigating agent may attached some details in this message. These details facilitates the account reconciliations at the initiating bank and the intermediaries. It must be stressed that returning of funds is outside the scope of this Exceptions and Investigation service. The features given here is only meant to transmit the information of returns when it is available.
// The Resolution Of Investigation message must
// - be forwarded by all subsequent case assignee(s) until it reaches the case creator
// - not be used in place of a Reject Case Assignment or Case Status Report or Notification Of Case Assignment message.
// Take note of an exceptional rule that allows the use of Resolution Of Investigation in lieu of a Case Status Report. Case Status Report is a response-message to a Case Status Report Request. The latter which is sent when the assigner has waited too long (by his standard) for an answer. However it may happen that when the Request arrives, the investigating agent has just obtained a resolution. In such a situation, it would be redundant to send a Case Status Report when then followed immediately by a Resolution Of Investigation. It is therefore quite acceptable for the investigating agent, the assignee, to skip the Case Status Report and send the Resolution Of Investigation message directly.
type ResolutionOfInvestigation struct {
// Note: the Assigner must be the sender of this confirmation and the Assignee must be the receiver.
Assignment *iso20022.CaseAssignment `xml:"Assgnmt"`
// Identifies a resolved case.
ResolvedCase *iso20022.Case `xml:"RslvdCase"`
// Indicates the status of the investigation.
Status *iso20022.InvestigationStatusChoice `xml:"Sts,omitempty"`
// References a transaction intitiated to fix the case under investigation.
CorrectionTransaction *iso20022.PaymentInstructionExtract `xml:"CrrctnTx,omitempty"`
}
func (r *ResolutionOfInvestigation) AddAssignment() *iso20022.CaseAssignment {
r.Assignment = new(iso20022.CaseAssignment)
return r.Assignment
}
func (r *ResolutionOfInvestigation) AddResolvedCase() *iso20022.Case {
r.ResolvedCase = new(iso20022.Case)
return r.ResolvedCase
}
func (r *ResolutionOfInvestigation) AddStatus() *iso20022.InvestigationStatusChoice {
r.Status = new(iso20022.InvestigationStatusChoice)
return r.Status
}
func (r *ResolutionOfInvestigation) AddCorrectionTransaction() *iso20022.PaymentInstructionExtract {
r.CorrectionTransaction = new(iso20022.PaymentInstructionExtract)
return r.CorrectionTransaction
}
func ( d *Document02900101 ) String() (result string, ok bool) { return } | generate/iso20022/camt/ResolutionOfInvestigation.go | 0.758063 | 0.499756 | ResolutionOfInvestigation.go | starcoder |
package dataplane
import (
"strings"
"github.com/submariner-io/submariner/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[dataplane] Basic TCP connectivity tests across clusters without discovery", func() {
f := framework.NewDefaultFramework("dataplane-conn-nd")
var useService bool
verifyInteraction := func(listenerScheduling, connectorScheduling framework.NetworkPodScheduling) {
It("should have sent the expected data from the pod to the other pod", func() {
RunConnectivityTest(f, useService, listenerScheduling, connectorScheduling, framework.ClusterB, framework.ClusterA)
})
}
When("a pod connects via TCP to a remote pod", func() {
BeforeEach(func() {
useService = false
})
When("the pod is not on a gateway and the remote pod is not on a gateway", func() {
verifyInteraction(framework.NonGatewayNode, framework.NonGatewayNode)
})
When("the pod is not on a gateway and the remote pod is on a gateway", func() {
verifyInteraction(framework.GatewayNode, framework.NonGatewayNode)
})
When("the pod is on a gateway and the remote pod is not on a gateway", func() {
verifyInteraction(framework.NonGatewayNode, framework.GatewayNode)
})
When("the pod is on a gateway and the remote pod is on a gateway", func() {
verifyInteraction(framework.GatewayNode, framework.GatewayNode)
})
})
When("a pod connects via TCP to a remote service", func() {
BeforeEach(func() {
useService = true
})
When("the pod is not on a gateway and the remote service is not on a gateway", func() {
verifyInteraction(framework.NonGatewayNode, framework.NonGatewayNode)
})
When("the pod is not on a gateway and the remote service is on a gateway", func() {
verifyInteraction(framework.GatewayNode, framework.NonGatewayNode)
})
When("the pod is on a gateway and the remote service is not on a gateway", func() {
verifyInteraction(framework.NonGatewayNode, framework.GatewayNode)
})
When("the pod is on a gateway and the remote service is on a gateway", func() {
verifyInteraction(framework.GatewayNode, framework.GatewayNode)
})
})
})
func RunConnectivityTest(f *framework.Framework, useService bool, listenerScheduling framework.NetworkPodScheduling, connectorScheduling framework.NetworkPodScheduling, listenerCluster framework.ClusterIndex, connectorCluster framework.ClusterIndex) (*framework.NetworkPod, *framework.NetworkPod) {
By("Creating a listener pod in cluster B, which will wait for a handshake over TCP")
listenerPod := f.NewNetworkPod(&framework.NetworkPodConfig{
Type: framework.ListenerPod,
Cluster: listenerCluster,
Scheduling: listenerScheduling,
})
remoteIP := listenerPod.Pod.Status.PodIP
if useService {
By("Pointing a service ClusterIP to the listener pod in cluster B")
service := listenerPod.CreateService()
remoteIP = service.Spec.ClusterIP
}
framework.Logf("Will send traffic to IP: %v", remoteIP)
By("Creating a connector pod in cluster A, which will attempt the specific UUID handshake over TCP")
connectorPod := f.NewNetworkPod(&framework.NetworkPodConfig{
Type: framework.ConnectorPod,
Cluster: connectorCluster,
Scheduling: connectorScheduling,
RemoteIP: remoteIP,
})
By("Waiting for the listener pod to exit with code 0, returning what listener sent")
listenerPod.AwaitSuccessfulFinish()
framework.Logf("Listener output:\n%s", keepLines(listenerPod.TerminationMessage, 3))
By("Waiting for the connector pod to exit with code 0, returning what connector sent")
connectorPod.AwaitSuccessfulFinish()
framework.Logf("Connector output\n%s", keepLines(connectorPod.TerminationMessage, 2))
By("Verifying that the listener got the connector's data and the connector got the listener's data")
Expect(listenerPod.TerminationMessage).To(ContainSubstring(connectorPod.Config.Data))
Expect(connectorPod.TerminationMessage).To(ContainSubstring(listenerPod.Config.Data))
framework.Logf("Connector pod has IP: %s", connectorPod.Pod.Status.PodIP)
By("Verifying the output of listener pod which must contain the source IP")
Expect(listenerPod.TerminationMessage).To(ContainSubstring(connectorPod.Pod.Status.PodIP))
// Return the pods in case further verification is needed
return listenerPod, connectorPod
}
func keepLines(output string, n int) string {
lines := strings.Split(output, "\n")
if len(lines) > n {
lines = lines[:n]
}
return strings.Join(lines, "\n")
} | test/e2e/dataplane/tcp_pod_connectivity.go | 0.570571 | 0.547404 | tcp_pod_connectivity.go | starcoder |
package pastry
import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math"
"math/big"
)
const idLen = 32
// NodeID is a unique address for a node in the network.
type NodeID [2]uint64
// NodeIDFromBytes creates a NodeID from an array of bytes.
// It returns the created NodeID, trimmed to the first 32 digits, or nil and an error if there are not enough bytes to yield 32 digits.
func NodeIDFromBytes(source []byte) (NodeID, error) {
var result NodeID
if len(source) < 16 {
return result, errors.New("Not enough bytes to create a NodeID.")
}
result[0] = binary.BigEndian.Uint64(source)
result[1] = binary.BigEndian.Uint64(source[8:])
return result, nil
}
// String returns the hexadecimal string encoding of the NodeID.
func (id NodeID) String() string {
return fmt.Sprintf("%016x%016x", id[0], id[1])
}
// Equals tests two NodeIDs for equality and returns true if they are considered equal, false if they are considered inequal. NodeIDs are considered equal if each digit of the NodeID is equal.
func (id NodeID) Equals(other NodeID) bool {
return id[0] == other[0] && id[1] == other[1]
}
// Less tests two NodeIDs to determine if the ID the method is called on is less than the ID passed as an argument. An ID is considered to be less if the first inequal digit between the two IDs is considered to be less.
func (id NodeID) Less(other NodeID) bool {
return id.RelPos(other) < 0
}
// absLess returns true if id < other, disregarding modular arithmetic.
func (id NodeID) absLess(other NodeID) bool {
return id[0] < other[0] || id[0] == other[0] && id[1] < other[1]
}
// TODO(eds): this could be faster and smaller with a little assembly, but not
// sure if we want to go there.
// digitSet returns the index of the first 4-bit digit with any bits set.
// The most significant digit is digit 0; the least significant is digit 15.
func digitSet(x uint64) int {
if x&0xffffffff00000000 != 0 {
if x&0xffff000000000000 != 0 {
if x&0xff00000000000000 != 0 {
if x&0xf000000000000000 != 0 {
return 0
}
return 1
}
if x&0x00f0000000000000 != 0 {
return 2
}
return 3
}
if x&0x0000ff0000000000 != 0 {
if x&0x0000f00000000000 != 0 {
return 4
}
return 5
}
if x&0x000000f000000000 != 0 {
return 6
}
return 7
}
if x&0x00000000ffff0000 != 0 {
if x&0x00000000ff000000 != 0 {
if x&0x00000000f0000000 != 0 {
return 8
}
return 9
}
if x&0x00000000f0000000 != 0 {
return 10
}
return 11
}
if x&0x000000000000ff00 != 0 {
if x&0x000000000000f000 != 0 {
return 12
}
return 13
}
if x&0x00000000000000f0 != 0 {
return 14
}
return 15
}
// CommonPrefixLen returns the number of leading digits that are equal in the two NodeIDs.
func (id NodeID) CommonPrefixLen(other NodeID) int {
if xor := id[0] ^ other[0]; xor != 0 {
return digitSet(xor)
}
if xor := id[1] ^ other[1]; xor != 0 {
return digitSet(xor) | 16
}
return idLen
}
// differences returns the difference between the two NodeIDs in both directions.
func (id NodeID) differences(other NodeID) (NodeID, NodeID) {
var d1, d2 NodeID
if id.absLess(other) {
d1[1] = other[1] - id[1]
// check for borrow
b := 0
if d1[1] > other[1] {
b = 1
}
d1[0] = other[0] - (id[0] + uint64(b))
d2[0], d2[1] = math.MaxUint64-d1[0], math.MaxUint64-d1[1]+1
} else {
d2[1] = id[1] - other[1]
// check for borrow
b := 0
if d2[1] > id[1] {
b = 1
}
d2[0] = id[0] - (other[0] + uint64(b))
d1[0], d1[1] = math.MaxUint64-d2[0], math.MaxUint64-d2[1]+1
}
return d2, d1
}
// Diff returns the difference between two NodeIDs as an absolute value. It performs the modular arithmetic necessary to find the shortest distance between the IDs in the (2^128)-1 item nodespace.
func (id NodeID) Diff(other NodeID) *big.Int {
d1, d2 := id.differences(other)
if d1.absLess(d2) {
return d1.Base10()
}
return d2.Base10()
}
// RelPos uses modular arithmetic to determine whether the NodeID passed as an argument is to the left of the NodeID it is called on (-1), the same as the NodeID it is called on (0), or to the right of the NodeID it is called on (1) in the circular node space.
func (id NodeID) RelPos(other NodeID) int {
if id.Equals(other) {
return 0
}
d1, d2 := id.differences(other)
if d1.absLess(d2) {
return 1
}
return -1
}
var one = big.NewInt(1)
// Base10 returns the NodeID as a base 10 number, translating each base 16 digit.
func (id NodeID) Base10() *big.Int {
var result big.Int
if id[0] > math.MaxInt64 {
result.SetInt64(math.MaxInt64)
result.Add(&result, one)
result.Lsh(&result, 64)
id[0] -= math.MaxInt64 + 1
}
var tmp big.Int
tmp.SetInt64(int64(id[0]))
tmp.Lsh(&tmp, 64)
result.Add(&result, &tmp)
if id[1] > math.MaxInt64 {
tmp.SetInt64(math.MaxInt64)
result.Add(&result, &tmp)
result.Add(&result, one)
id[1] -= math.MaxInt64 + 1
}
tmp.SetInt64(int64(id[1]))
result.Add(&result, &tmp)
return &result
}
// MarshalJSON fulfills the Marshaler interface, allowing NodeIDs to be serialised to JSON safely.
func (id NodeID) MarshalJSON() ([]byte, error) {
return []byte(`"` + id.String() + `"`), nil
}
// UnmarshalJSON fulfills the Unmarshaler interface, allowing NodeIDs to be unserialised from JSON safely.
func (id *NodeID) UnmarshalJSON(source []byte) error {
if id == nil {
return errors.New("UnmarshalJSON on nil NodeID.")
}
var str string
err := json.Unmarshal(source, &str)
if err != nil {
return err
}
dec, err := hex.DecodeString(str)
if err != nil {
return err
}
new_id, err := NodeIDFromBytes([]byte(dec))
if err != nil {
return err
}
*id = new_id
return nil
}
// Digit returns the ith 4-bit digit in the NodeID. If i >= 32, Digit panics.
func (id NodeID) Digit(i int) byte {
if uint(i) >= 32 {
panic("invalid digit index")
}
n := id[0]
if i >= 16 {
n = id[1]
i &= 15
}
k := 4 * uint(15-i)
return byte((n >> k) & 0xf)
} | nodeid.go | 0.622574 | 0.537163 | nodeid.go | starcoder |
// Format for specifying a rectangle:
// (x1, x2, y1, y2) where x1 is the min and x2 is the max y coordinate.
// Similarly for y
// Format for specifying a line:
// (a, b) where a is a number and b is a binary number where
// 0 -> || to Y axis AND 1 -> || to X axis
// Format for specifying an interval:
// (a1, a2)
package main
import (
"fmt"
"strings"
"sync"
"flag"
"time"
"os"
"runtime/trace"
"log"
"runtime/pprof"
"sort"
)
var (
maxLevel = flag.Int("l", 10, "MAX Level")
procs = flag.Int("procs", 2, "Number of workers")
p = flag.Bool("pprof", false, "Enable Profiling")
t = flag.Bool("trace", false, "Enable Tracing")
)
// Compute Optimal Cut Seq
// var dp_seq map[[4]int][][6]int
// var dp_kill map[[4]int]int
func intervalIntersect(i1 [2]int, i2 [2]int) (bool){
return !(i1[0]>=i2[1] || i2[0]>=i1[1])
}
func optimalCut(rects [][4]int, x []int, y []int, reg [4]int, seq [][6]int, dp_kill map[[4]int]int, dp_seq map[[4]int][][6]int) ([][6]int, int){
// rects : Rectangles in the current set
// x : sorted list of X coordinates
// y : sorted list of Y coordinates
// code : labels of current set encoded
// seq : sequence of cuts upto this set; (code, value, orientation)
// RETURN
// seq : seq of rectangles including this level
// killed : no of rectangles killed including this level
// Choosing to not compute cuts for small enough sets
if len(rects) <= 3 {
return seq, 0
}
// Check if stored
killed, ok := dp_kill[reg]
if ok {
sseq := dp_seq[reg]
return sseq, killed
}
m := len(x) + len(y) - 4
cuts := make([]int, m)
seqs := make(map[int][][6]int)
for i:=0; i<m; i++ {
//A high enough constant
cuts[i] = 255
}
//Enough to try all rectangle edges
for k:=0; k<len(x)-2; k++ {
var rects1 [][4]int
var rects2 [][4]int
boundary := false
kill_cur := 0
for _, rec := range rects {
var xi [2]int
xi[0] = rec[0]
xi[1] = rec[1]
if intervalIntersect(xi, [2]int{x[1+k], x[1+k]}) {
kill_cur++
} else if rec[1] <= x[1+k] {
rects1 = append(rects1, rec)
} else {
rects2 = append(rects2, rec)
}
if rec[0] == x[1+k] {boundary = true}
}
xx1 := x[:2+k]
xx2 := x[2+k:]
if boundary { xx2 = append([]int{x[1+k]}, xx2...) }
yy1m := make(map[int]bool)
for _, rec := range rects1 {
yy1m[rec[2]] = true
yy1m[rec[3]] = true
}
var reg1 [4]int
reg1 = reg
reg1[1] = x[1+k]
yy2m := make(map[int]bool)
for _, rec := range rects2 {
yy2m[rec[2]] = true
yy2m[rec[3]] = true
}
var reg2 [4]int
reg2 = reg
reg2[0] = xx2[0]
yy1 := make([]int, len(yy1m))
yy2 := make([]int, len(yy2m))
i := 0
for k := range yy1m {
yy1[i] = k
i++
}
i = 0
for k := range yy2m {
yy2[i] = k
i++
}
sort.Ints(yy1)
sort.Ints(yy2)
kill1 := 255
kill2 := 255
seq1 := make([][6]int, 1)
seq2 := make([][6]int, 1)
if len(rects1) < len(rects) && len(rects2) < len(rects) {
seq1, kill1 = optimalCut(rects1, xx1, yy1, reg1, seq, dp_kill, dp_seq)
seq2, kill2 = optimalCut(rects2, xx2, yy2, reg2, seq, dp_kill, dp_seq)
}
cuts[k] = kill1 + kill2 + kill_cur
seq_cur := seq
seq_cur = append(seq_cur, seq1...)
seq_cur = append(seq_cur, seq2...)
seqs[k] = seq_cur
if kill_cur==0 && len(rects1) < len(rects) && len(rects2) < len(rects) {
var cur [][6]int
cur = append(cur, [6]int{reg[0], reg[1], reg[2], x[1+k], 0})
seqf := append(cur, seq_cur...)
dp_kill[reg] = cuts[k]
dp_seq[reg] = seqf
return seqf, cuts[k]
}
}
for k:=0; k<len(y)-2; k++ {
var rects1 [][4]int
var rects2 [][4]int
boundary := false
kill_cur := 0
for _, rec := range rects {
var yi [2]int
yi[0] = rec[2]
yi[1] = rec[3]
if intervalIntersect(yi, [2]int{y[1+k], y[1+k]}) {
kill_cur++
} else if rec[3] <= y[1+k] {
rects1 = append(rects1, rec)
} else {
rects2 = append(rects2, rec)
}
if rec[2] == y[1+k] {boundary = true}
}
yy1 := y[:2+k]
yy2 := y[2+k:]
if boundary { yy2 = append([]int{y[1+k]}, yy2...) }
xx1m := make(map[int]bool)
for _, rec := range rects1 {
xx1m[rec[0]] = true
xx1m[rec[1]] = true
}
var reg1 [4]int
reg1 = reg
reg1[3] = y[1+k]
xx2m := make(map[int]bool)
for _, rec := range rects2 {
xx2m[rec[0]] = true
xx2m[rec[1]] = true
}
var reg2 [4]int
reg2 = reg
reg2[2] = yy2[0]
xx1 := make([]int, len(xx1m))
xx2 := make([]int, len(xx2m))
i := 0
for k := range xx1m {
xx1[i] = k
i++
}
i = 0
for k := range xx2m {
xx2[i] = k
i++
}
sort.Ints(xx1)
sort.Ints(xx2)
kill1 := 255
kill2 := 255
seq1 := make([][6]int, 1)
seq2 := make([][6]int, 1)
if len(rects1) < len(rects) && len(rects2) < len(rects) {
seq1, kill1 = optimalCut(rects1, xx1, yy1, reg1, seq, dp_kill, dp_seq)
seq2, kill2 = optimalCut(rects2, xx2, yy2, reg2, seq, dp_kill, dp_seq)
}
cuts[len(x) - 2 + k] = kill1 + kill2 + kill_cur
seq_cur := seq
seq_cur = append(seq_cur, seq1...)
seq_cur = append(seq_cur, seq2...)
seqs[len(x) - 2 + k] = seq
if kill_cur==0 && len(rects1) < len(rects) && len(rects2) < len(rects) {
var cur [][6]int
cur = append(cur, [6]int{reg[0], reg[1], reg[2], reg[3], y[1+k], 1})
seqf := append(cur, seq_cur...)
dp_kill[reg] = cuts[k]
dp_seq[reg] = seqf
return seqf, cuts[k]
}
}
//Choose min
minPtr := 0
for k:=0; k<m; k++ {
if cuts[k] < cuts[minPtr] {minPtr = k}
}
newLine := [2]int{1000, 0}
if minPtr < len(x) - 2 {
newLine = [2]int{x[1+minPtr], 0}
} else {
newLine = [2]int{y[minPtr - len(x) + 2], 1}
}
dp_kill[reg] = cuts[minPtr]
var cur [][6]int
cur = append(cur, [6]int{reg[0], reg[1], reg[2], reg[3], newLine[0], newLine[1]})
best_seq := seqs[minPtr]
seqf := append(cur, best_seq...)
dp_seq[reg] = seqf
return seqf, cuts[minPtr]
}
func sanityCheck(rects [][4]int) (bool){
for k, rec1 := range rects {
for _, rec2 := range rects[k+1:] {
x1 := [2]int{rec1[0], rec1[1]}
x2 := [2]int{rec2[0], rec2[1]}
y1 := [2]int{rec1[2], rec1[3]}
y2 := [2]int{rec2[2], rec2[3]}
if intervalIntersect(x1, x2) && intervalIntersect(y1, y2) {
return false
}
}
}
return true
}
func ComputeOCS(rects [][4]int) ([][6]int, int) {
dp_seq := make(map[[4]int][][6]int)
dp_kill := make(map[[4]int]int)
if sanityCheck(rects) {
xm := make(map[int]bool)
ym := make(map[int]bool)
for _, tup := range rects {
xm[tup[0]] = true
xm[tup[1]] = true
ym[tup[2]] = true
ym[tup[3]] = true
}
x := make([]int, len(xm))
y := make([]int, len(ym))
i := 0
for k := range xm {
x[i] = k
i++
}
i = 0
for k := range ym {
y[i] = k
i++
}
sort.Ints(x)
sort.Ints(y)
reg := [4]int{x[0], x[len(x)-1], y[0], y[len(y)-1]}
var seq [][6]int
return optimalCut(rects, x, y, reg, seq, dp_kill, dp_seq)
} else {
fmt.Println("Invalid set!")
return nil, 0
}
}
// End of ComputeOCS
// Level at which the Tree formation starts
const startingLevel = 4
type Perm []int
func NewPerm(islice []int) Perm {
p := make(Perm, 0)
for _, i := range islice {
p = append(p, i)
}
return p
}
func (p Perm) Add(i int) Perm {
p = append(p, i)
return p
}
func (p Perm) Size() int {
return len(p)
}
func (p Perm) String() string {
var b strings.Builder
for _, v := range p {
fmt.Fprintf(&b, "%d ", v)
}
return b.String()
}
var levelPermCount = make(map[int]int)
var lock sync.Mutex
// Try with this first, if shit hits the fan
// Try with sync.Map.
type Set struct {
rwm sync.RWMutex
set map[string]Perm
}
func NewSet() *Set {
return &Set{
set: make(map[string]Perm),
}
}
func (s *Set) Add(p Perm) bool {
s.rwm.Lock()
defer s.rwm.Unlock()
str := p.String()
_, prs := s.set[str]
s.set[str] = p
return !prs
}
func (s *Set) Get(p Perm) bool {
s.rwm.RLock()
defer s.rwm.RUnlock()
_, prs := s.set[p.String()]
return prs
}
func (s *Set) Remove(p Perm) {
s.rwm.Lock()
defer s.rwm.Unlock()
delete(s.set, p.String())
}
func (s *Set) Size() int {
s.rwm.RLock()
defer s.rwm.RUnlock()
return len(s.set)
}
func (s *Set) Values() []Perm {
vals := make([]Perm, 0, s.Size())
s.rwm.RLock() // Dont push it above the s.Size(), as it is deadlock
defer s.rwm.RUnlock()
for _, v := range s.set {
vals = append(vals, v)
}
return vals
}
// Given a Baxter permutation, the function BP2FP constructs a corresponding floorplan
// Based on the mapping mentioned on page 15 in this thesis:
// https://www.cs.technion.ac.il/users/wwwb/cgi-bin/tr-get.cgi/2006/PHD/PHD-2006-11.pdf
// And the related paper
// <NAME>, <NAME>, and <NAME>. A bijection
// between permutations and floorplans, and its applications.
// Discrete Applied Mathematics, 154(12):1674–1684, 2006.
func BP2FP(perm Perm, n int) [][4]int {
rects := make([][4]int, n+1)
rects[perm[0]] = [4]int{0, n, 0, n}
below := make(map[int]int)
left := make(map[int]int)
prevlabel := perm[0]
for k := 1; k < n; k++ {
p := perm[k]
if p < prevlabel {
oldrect := rects[prevlabel]
// middle := (oldrect[2] + oldrect[3]) / 2
// Horizontal slice
rects[p] = oldrect
rects[p][2] = k
rects[prevlabel][3] = k
// Store spatial relations
below[p] = prevlabel
lp, past := left[prevlabel]
if past {
left[p] = lp
}
_, ok := left[p]
for ok && left[p] > p {
l := left[p]
rects[p][0] = rects[l][0]
rects[l][3] = rects[p][2]
ll, okl := left[l]
if okl {
left[p] = ll
} else {
delete(left, p)
}
ok = okl
}
prevlabel = p
} else {
oldrect := rects[prevlabel]
// middle := (oldrect[0] + oldrect[1]) / 2
// Vertical slice
rects[p] = oldrect
rects[p][0] = k
rects[prevlabel][1] = k
// Store spatial relations
left[p] = prevlabel
bp, past := below[prevlabel]
if past {
below[p] = bp
}
_, ok := below[p]
for ok && below[p] < p {
b := below[p]
rects[p][2] = rects[b][2]
rects[b][1] = rects[p][0]
bb, okb := below[b]
if okb {
below[p] = bb
} else {
delete(below, b)
}
ok = okb
}
prevlabel = p
}
//draw(rects, n)
}
return rects[1:]
}
// End of BP2FP
func localExp(perm Perm, a int) Perm {
newPerm := make(Perm, 0, len(perm)+1)
for _, k := range perm {
if k < a {
newPerm = append(newPerm, k)
} else {
newPerm = append(newPerm, k+1)
}
}
newPerm = append(newPerm, a)
return newPerm
}
// go:inline
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func isBaxter(perm Perm) bool {
for k := 0; k < len(perm)-1; k++ {
two, three := 1000, 0
if perm[k] < perm[k+1]-2 {
// Memorise -14-
m, M := perm[k], perm[k+1]
prefix, suffix := perm[:k], perm[k+2:]
two = 1000
three = 0
//Avoid 3-14-2
for _, k := range prefix {
if (k > m + 1) && (k < M) {
three = max(k, three)
}
}
for _, k := range suffix {
if (k < three) && (k > m){
two = k
return false
}
}
}
if perm[k] > perm[k+1]+2 {
// Memorise -41-
m, M:= perm[k+1], perm[k]
prefix, suffix := perm[:k], perm[k+2:]
// Avoid 2-14-3
for _, k := range prefix {
if k > m && k < M-1 {
two = min(k, two)
}
}
for _, k := range suffix {
if k > two && k < M {
three = k
return false
}
}
}
}
return true
}
func addRange(stack *[][2]int, r [2]int) (){
s := *stack
n := len(s)
if n==0 {
*stack = append(s, r)
return
}
top := s[n-1]
if r[0]>top[1]+1 || top[0]>r[1]+1 {
*stack = append(s, r)
return
} else {
*stack = s[:n-1]
r_new := [2]int{min(r[0], top[0]), max(r[1], top[1])}
addRange(stack, r_new)
}
}
func isSeperable(perm Perm) bool {
var stack [][2]int
for _, p := range perm {
r := [2]int{p, p}
addRange(&stack, r)
}
if len(stack)==1 {
return true
}
return false
}
func initCurLevel(s *Set) {
p := NewSet()
p.Add(NewPerm([]int{1, 2, 3}))
p.Add(NewPerm([]int{1, 3, 2}))
p.Add(NewPerm([]int{2, 1, 3}))
p.Add(NewPerm([]int{3, 1, 2}))
p.Add(NewPerm([]int{2, 3, 1}))
p.Add(NewPerm([]int{3, 2, 1}))
for _, perm := range p.Values() {
for a:=1; a<=4; a++ {
newPerm := localExp(perm, a)
if isBaxter(newPerm) {
s.Add(newPerm)
}
}
}
}
var curLevel *Set
func main() {
flag.Parse()
defer trackTime(time.Now(), "MAIN")
if *p {
log.Println("Profiling Enabled")
pf, err := os.Create("pprof.out")
if err != nil {
log.Fatal("Could not create pprof file")
}
defer pf.Close()
pprof.StartCPUProfile(pf)
defer pprof.StopCPUProfile()
}
if *t {
log.Println("Tracing Enabled")
tf, err := os.Create("trace.out")
if err != nil {
log.Fatal("Could not create trace file")
}
defer tf.Close()
trace.Start(tf)
defer trace.Stop()
}
curLevel = NewSet()
initCurLevel(curLevel)
var wg sync.WaitGroup
for _, perm := range curLevel.Values() {
wg.Add(1)
go worker(perm, startingLevel, &wg)
}
wg.Wait()
fmt.Println(levelPermCount)
}
func worker(perm Perm, level int, wg *sync.WaitGroup) {
if level >= *maxLevel {
return
}
for a := 1; a <= level+1; a++ {
newPerm := localExp(perm, a)
if isBaxter(newPerm) {
if !isSeperable(newPerm) {
n := level+1
fmt.Println(n)
fmt.Println(newPerm)
rects := BP2FP(newPerm, n)
_, kill := ComputeOCS(rects)
lock.Lock()
levelPermCount[n]++
if kill >= n/4 {
// Save to file instead
fmt.Println()
}
lock.Unlock()
}
worker(newPerm, level+1, wg)
}
}
if level == startingLevel {
wg.Done()
}
}
func trackTime(s time.Time, msg string) {
fmt.Println(msg, ":", time.Since(s))
} | main.go | 0.622115 | 0.482307 | main.go | starcoder |
package data
// Data contains a single set of data most likely imported from tsm.
type Data struct {
raw []float64 `` // raw values
gap float64 `` // gap in % between bar chart values
Max float64 `json:"fmax"` // max raw value
NMax int `json:"max"` // max normalized value
Scale []string `json:"scale"` // yaxis labels
Values []int `json:"values"` // pixel values
Type string `json:"type"`
Title string `json:"title"`
}
// Options contains configuration for a single dataset.
type Options struct {
// Type specified the chart type to plot. Can be either "area" of "line".
// By default "area" is used. XXX Note that line isn't really supported.
Type string
// Title to display on top of the chart.
Title string
// Gap is the % of space between bar charts, of the number of datapoints
// supplied is smaller than the chart width. I.e. plotting 30 values with
// a chart width of 300 and a Gap of 0.1 plots 30 individual bar chart
// value with a thickness of 24px with 2 * 10% (left & right) space in between.
// By default the Gap is 0.00.
Gap float64
}
// NewData creates a new dataset from []float64.
func NewData(opt *Options, in []float64) Data {
return Data{Type: opt.Type, Title: opt.Title, gap: opt.Gap, raw: in}
}
// Len returns the number of items in the dataset.
func (d *Data) Len() int {
return len(d.raw)
}
// MinMaxAvg returns the Minimum, Maximum and Average values of the raw data.
func (d *Data) MinMaxAvg() (float64, float64, float64) {
max := 0.
avg := 0.
min := 0.
for _, v := range d.raw {
if max < v {
max = v
}
if v != 0 && (min == 0 || min > v) {
min = v
}
avg += v
}
avg /= float64(len(d.raw))
d.Max = max
return min, max, avg
}
// Normalize normalizes the raw/tsm values to height.
func (d *Data) normalize(height int) {
_, d.Max, _ = d.MinMaxAvg()
if d.Max == 0 {
// we have an empty dataset
for _, v := range d.raw {
d.Values = append(d.Values, int(v))
}
return
}
fmax := float64(height)
a := fmax / d.Max
b := fmax - a*d.Max
for _, v := range d.raw {
newv := a*v + b
d.Values = append(d.Values, int(newv))
}
}
// normalizeMax normalizes our max value according to height and a global max value.
func (d *Data) normalizeMax(height int, max float64) {
fmax := float64(height)
a2 := fmax / max
b2 := fmax - a2*max
d.NMax = int(a2*float64(d.Max) + b2)
} | data/data.go | 0.854521 | 0.680658 | data.go | starcoder |
package schema
import (
"fmt"
"testing"
"github.com/danos/encoding/rfc7951"
"github.com/danos/mgmterror"
yang "github.com/danos/yang/schema"
)
// TestLog provides the ability for tests to verify which components had
// which operations performed on them (validate, set config, get config, get
// state), and in which order.
const (
NoFilter = ""
SetRunning = "SetRunning"
GetRunning = "GetRunning"
GetState = "GetState"
Validate = "Validate"
)
type TestLogEntry struct {
fn string
params []string
}
func NewTestLogEntry(fn string, params ...string) TestLogEntry {
return TestLogEntry{fn: fn, params: params}
}
// TestCompMgr allows replacement of ComponentManager functionality at a high
// level where we don't want an actual (d)bus or components created. Instead
// we want to verify that the correct tuple of {config, model} has been passed
// to the bus (for validate and commit) or provide the ability to return the
// expected config (or an error) for 'get' operations.
type testCompParams struct {
t *testing.T
validatedConfig map[string]string
committedConfig map[string]string
currentState map[string]string
testLog []TestLogEntry
}
type TestCompMgr struct {
*compMgr
tcmParams *testCompParams
}
// Compile time check that the concrete type meets the interface
var _ ComponentManager = (*TestCompMgr)(nil)
func NewTestCompMgr(
t *testing.T,
ms yang.ModelSet,
mappings *ComponentMappings,
) *TestCompMgr {
var tcmParams testCompParams
tcmParams.t = t
tcmParams.testLog = make([]TestLogEntry, 0)
tcmParams.validatedConfig = make(map[string]string, 0)
tcmParams.committedConfig = make(map[string]string, 0)
tcmParams.currentState = make(map[string]string, 0)
tcm := &TestCompMgr{
compMgr: NewCompMgr(
newTestOpsMgr(&tcmParams),
newTestSvcMgr(&tcmParams),
ms,
mappings,
),
tcmParams: &tcmParams,
}
return tcm
}
// Config / state management.
func (tcm *TestCompMgr) ValidatedConfig(model string) string {
cfg, ok := tcm.tcmParams.validatedConfig[model]
if !ok {
tcm.tcmParams.t.Fatalf("No validated config for %s", model)
}
return cfg
}
func (tcm *TestCompMgr) CommittedConfig(model string) string {
cfg, ok := tcm.tcmParams.committedConfig[model]
if !ok {
tcm.tcmParams.t.Fatalf("No committed config for %s", model)
}
return cfg
}
func (tcm *TestCompMgr) CurrentState(model string) string {
cfg, ok := tcm.tcmParams.currentState[model]
if !ok {
tcm.tcmParams.t.Fatalf("No current state for %s", model)
}
return cfg
}
func (tcm *TestCompMgr) SetCurrentState(model, stateJson string) {
tcm.tcmParams.currentState[model] = stateJson
}
// Log management.
func (tom *testOpsMgr) addLogEntry(fn string, params ...string) {
fmt.Printf("Add %s\n", fn)
tom.tcmParams.testLog = append(tom.tcmParams.testLog,
NewTestLogEntry(fn, params...))
}
func (tcm *TestCompMgr) ClearLogEntries() {
fmt.Printf("Clear log\n")
tcm.tcmParams.testLog = nil
}
func (tcm *TestCompMgr) filteredLogEntries(filter string) []TestLogEntry {
retLog := make([]TestLogEntry, 0)
for _, entry := range tcm.tcmParams.testLog {
if entry.fn == filter {
retLog = append(retLog, entry)
}
}
return retLog
}
func (tcm *TestCompMgr) CheckLogEntries(
t *testing.T,
name string,
entries []TestLogEntry,
filter string,
) {
actualLog := tcm.tcmParams.testLog
fmt.Printf("Entries: %d\n", len(tcm.tcmParams.testLog))
if filter != NoFilter {
actualLog = tcm.filteredLogEntries(filter)
}
if len(entries) != len(actualLog) {
t.Logf("\nTEST: %s\n", name)
t.Logf("\nExp: %d entries\nGot: %d\n",
len(entries), len(actualLog))
tcm.dumpLog(t)
t.Fatalf("---\n")
return
}
for ix, entry := range entries {
if entry.fn != actualLog[ix].fn {
t.Logf("\nTEST: %s\n", name)
tcm.dumpLog(t)
t.Fatalf("\nExp fn: %s\nGot fn: %s\n", entry.fn, actualLog[ix].fn)
return
}
for iy, param := range entry.params {
if param != actualLog[ix].params[iy] {
t.Logf("\nTEST: %s\n", name)
tcm.dumpLog(t)
t.Fatalf("\nExp param: %s\nGot param: %s\n",
param, actualLog[ix].params[iy])
return
}
}
}
}
func (tcm *TestCompMgr) dumpLog(t *testing.T) {
t.Logf("--- START TEST LOG ---\n")
for _, entry := range tcm.tcmParams.testLog {
t.Logf("%s:\n", entry.fn)
for _, param := range entry.params {
t.Logf("\t%s\n", param)
}
}
t.Logf("--- END TEST LOG ---\n")
}
// TestOpsMgr
type testOpsMgr struct {
tcmParams *testCompParams
}
func newTestOpsMgr(tcmParams *testCompParams) *testOpsMgr {
return &testOpsMgr{tcmParams: tcmParams}
}
func (tom *testOpsMgr) marshal(object interface{}) (string, error) {
if s, ok := object.(string); ok {
return s, nil
}
buf, err := rfc7951.Marshal(object)
if err != nil {
return "", mgmterror.NewMalformedMessageError()
}
return string(buf), nil
}
func (tom *testOpsMgr) unmarshal(encodedData string, object interface{}) error {
if s, ok := object.(*string); ok {
*s = encodedData
return nil
}
err := rfc7951.Unmarshal([]byte(encodedData), object)
if err != nil {
return mgmterror.NewMalformedMessageError()
}
return nil
}
func (tom *testOpsMgr) Dial() error { return nil }
func (tom *testOpsMgr) SetConfigForModel(
modelName string,
object interface{},
) error {
var err error
cfg, err := tom.marshal(object)
tom.tcmParams.committedConfig[modelName] = string(cfg)
fmt.Printf("\tadd log entry\n")
tom.addLogEntry(SetRunning, modelName, cfg)
return err
}
func (tom *testOpsMgr) CheckConfigForModel(
modelName string,
object interface{},
) error {
cfg, err := tom.marshal(object)
tom.tcmParams.validatedConfig[modelName] = string(cfg)
tom.addLogEntry(Validate, modelName, cfg)
return err
}
func (tom *testOpsMgr) StoreConfigByModelInto(
modelName string,
object interface{},
) error {
err := tom.unmarshal(tom.tcmParams.committedConfig[modelName], object)
tom.addLogEntry(GetRunning, modelName, fmt.Sprintf("%v", object))
return err
}
func (tom *testOpsMgr) StoreStateByModelInto(
modelName string,
object interface{},
) error {
err := tom.unmarshal(tom.tcmParams.currentState[modelName], object)
tom.addLogEntry(GetState, modelName, fmt.Sprintf("%v", object))
return err
}
// TestSvcMgr
type testSvcMgr struct {
tcmParams *testCompParams
}
func newTestSvcMgr(tcmParams *testCompParams) *testSvcMgr {
return &testSvcMgr{tcmParams: tcmParams}
}
func (tsm *testSvcMgr) Close() { return }
// For now, assume any component is active.
func (tsm *testSvcMgr) IsActive(name string) (bool, error) {
return true, nil
} | schema/compmgrtest.go | 0.615203 | 0.40698 | compmgrtest.go | starcoder |
package math
import (
"context"
"fmt"
"math"
"github.com/influxdata/flux"
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
var SpecialFns map[string]values.Function
func generateMathFunctionX(name string, mathFn func(float64) float64) values.Function {
return values.NewFunction(
name,
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"x": semantic.Float},
Required: semantic.LabelSet{"x"},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v, ok := args.Get("x")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument x")
}
if v.Type().Nature() == semantic.Float {
return values.NewFloat(mathFn(v.Float())), nil
}
return nil, fmt.Errorf("cannot convert argument of type %v to float", v.Type().Nature())
}, false,
)
}
func generateMathFunctionXY(name string, mathFn func(float64, float64) float64, argNames ...string) values.Function {
if argNames == nil {
argNames = []string{"x", "y"}
}
return values.NewFunction(
name,
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{argNames[0]: semantic.Float, argNames[1]: semantic.Float},
Required: argNames,
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get(argNames[0])
if !ok {
return nil, fmt.Errorf("missing argument %s", argNames[0])
}
v2, ok := args.Get(argNames[1])
if !ok {
return nil, fmt.Errorf("missing argument %s", argNames[1])
}
if v1.Type().Nature() == semantic.Float {
if v2.Type().Nature() == semantic.Float {
return values.NewFloat(mathFn(v1.Float(), v2.Float())), nil
} else {
return nil, fmt.Errorf("cannot convert argument %s of type %v to float", argNames[1], v2.Type().Nature())
}
}
return nil, fmt.Errorf("cannot convert argument %s of type %v to float", argNames[0], v1.Type().Nature())
}, false,
)
}
func init() {
// constants
flux.RegisterPackageValue("math", "pi", values.NewFloat(math.Pi))
flux.RegisterPackageValue("math", "e", values.NewFloat(math.E))
flux.RegisterPackageValue("math", "phi", values.NewFloat(math.Phi))
flux.RegisterPackageValue("math", "sqrt2", values.NewFloat(math.Sqrt2))
flux.RegisterPackageValue("math", "sqrte", values.NewFloat(math.SqrtE))
flux.RegisterPackageValue("math", "sqrtpi", values.NewFloat(math.SqrtPi))
flux.RegisterPackageValue("math", "sqrtphi", values.NewFloat(math.SqrtPhi))
flux.RegisterPackageValue("math", "log2e", values.NewFloat(math.Log2E))
flux.RegisterPackageValue("math", "ln2", values.NewFloat(math.Ln2))
flux.RegisterPackageValue("math", "ln10", values.NewFloat(math.Ln10))
flux.RegisterPackageValue("math", "log10e", values.NewFloat(math.Log10E))
flux.RegisterPackageValue("math", "maxfloat", values.NewFloat(math.MaxFloat64))
flux.RegisterPackageValue("math", "smallestNonzeroFloat", values.NewFloat(math.SmallestNonzeroFloat64))
flux.RegisterPackageValue("math", "maxint", values.NewInt(math.MaxInt64))
flux.RegisterPackageValue("math", "minint", values.NewFloat(math.MinInt64))
flux.RegisterPackageValue("math", "maxuint", values.NewUInt(math.MaxUint64))
flux.RegisterPackageValue("math", "abs", generateMathFunctionX("abs", math.Abs))
flux.RegisterPackageValue("math", "acos", generateMathFunctionX("acos", math.Acos))
flux.RegisterPackageValue("math", "acosh", generateMathFunctionX("acosh", math.Acosh))
flux.RegisterPackageValue("math", "asin", generateMathFunctionX("asin", math.Asin))
flux.RegisterPackageValue("math", "asinh", generateMathFunctionX("asinh", math.Asinh))
flux.RegisterPackageValue("math", "atan", generateMathFunctionX("atan", math.Atan))
flux.RegisterPackageValue("math", "atan2", generateMathFunctionXY("atan2", math.Atan2))
flux.RegisterPackageValue("math", "atanh", generateMathFunctionX("atanh", math.Atanh))
flux.RegisterPackageValue("math", "cbrt", generateMathFunctionX("cbrt", math.Cbrt))
flux.RegisterPackageValue("math", "ceil", generateMathFunctionX("ceil", math.Ceil))
flux.RegisterPackageValue("math", "copysign", generateMathFunctionXY("copysign", math.Copysign))
flux.RegisterPackageValue("math", "cos", generateMathFunctionX("cos", math.Cos))
flux.RegisterPackageValue("math", "cosh", generateMathFunctionX("cosh", math.Cosh))
flux.RegisterPackageValue("math", "dim", generateMathFunctionXY("dim", math.Dim))
flux.RegisterPackageValue("math", "erf", generateMathFunctionX("erf", math.Erf))
flux.RegisterPackageValue("math", "erfc", generateMathFunctionX("erfc", math.Erfc))
flux.RegisterPackageValue("math", "erfcinv", generateMathFunctionX("erfcinv", math.Erfcinv))
flux.RegisterPackageValue("math", "erfinv", generateMathFunctionX("erfinv", math.Erfinv))
flux.RegisterPackageValue("math", "exp", generateMathFunctionX("exp", math.Exp))
flux.RegisterPackageValue("math", "exp2", generateMathFunctionX("exp2", math.Exp2))
flux.RegisterPackageValue("math", "expm1", generateMathFunctionX("expm1", math.Expm1))
flux.RegisterPackageValue("math", "floor", generateMathFunctionX("floor", math.Floor))
flux.RegisterPackageValue("math", "gamma", generateMathFunctionX("gamma", math.Gamma))
flux.RegisterPackageValue("math", "hypot", generateMathFunctionXY("hypot", math.Hypot, "p", "q"))
flux.RegisterPackageValue("math", "j0", generateMathFunctionX("j0", math.J0))
flux.RegisterPackageValue("math", "j1", generateMathFunctionX("j1", math.J1))
flux.RegisterPackageValue("math", "log", generateMathFunctionX("log", math.Log))
flux.RegisterPackageValue("math", "log10", generateMathFunctionX("log10", math.Log10))
flux.RegisterPackageValue("math", "log1p", generateMathFunctionX("log1p", math.Log1p))
flux.RegisterPackageValue("math", "log2", generateMathFunctionX("log2", math.Log2))
flux.RegisterPackageValue("math", "logb", generateMathFunctionX("logb", math.Logb))
// TODO: change to max and min when we eliminate namespace collisions
flux.RegisterPackageValue("math", "mMax", generateMathFunctionXY("max", math.Max))
flux.RegisterPackageValue("math", "mMin", generateMathFunctionXY("min", math.Min))
flux.RegisterPackageValue("math", "mod", generateMathFunctionXY("mod", math.Mod))
flux.RegisterPackageValue("math", "nextafter", generateMathFunctionXY("nextafter", math.Nextafter))
flux.RegisterPackageValue("math", "pow", generateMathFunctionXY("pow", math.Pow))
flux.RegisterPackageValue("math", "remainder", generateMathFunctionXY("remainder", math.Remainder))
flux.RegisterPackageValue("math", "round", generateMathFunctionX("round", math.Round))
flux.RegisterPackageValue("math", "roundtoeven", generateMathFunctionX("roundtoeven", math.RoundToEven))
flux.RegisterPackageValue("math", "sin", generateMathFunctionX("sin", math.Sin))
flux.RegisterPackageValue("math", "sinh", generateMathFunctionX("sinh", math.Sinh))
flux.RegisterPackageValue("math", "sqrt", generateMathFunctionX("sqrt", math.Sqrt))
flux.RegisterPackageValue("math", "tan", generateMathFunctionX("tan", math.Tan))
flux.RegisterPackageValue("math", "tanh", generateMathFunctionX("tanh", math.Tanh))
flux.RegisterPackageValue("math", "trunc", generateMathFunctionX("trunc", math.Trunc))
flux.RegisterPackageValue("math", "y0", generateMathFunctionX("y0", math.Y0))
flux.RegisterPackageValue("math", "y1", generateMathFunctionX("y1", math.Y1))
SpecialFns = map[string]values.Function{
// float --> uint
"float64bits": values.NewFunction(
"float64bits",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"f": semantic.Float},
Required: semantic.LabelSet{"f"},
Return: semantic.UInt,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("f")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument f")
}
if v1.Type().Nature() == semantic.Float {
return values.NewUInt(math.Float64bits(v1.Float())), nil
}
return nil, fmt.Errorf("cannot convert argument f of type %v to float", v1.Type().Nature())
}, false,
),
"float64frombits": values.NewFunction(
"float64bits",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"b": semantic.UInt},
Required: semantic.LabelSet{"b"},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("b")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument b")
}
if v1.Type().Nature() == semantic.UInt {
return values.NewFloat(math.Float64frombits(v1.UInt())), nil
}
return nil, fmt.Errorf("cannot convert argument b of type %v to uint", v1.Type().Nature())
}, false,
),
// float --> int
"ilogb": values.NewFunction(
"ilogb",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"x": semantic.Float},
Required: semantic.LabelSet{"x"},
Return: semantic.Int,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("x")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument x")
}
if v1.Type().Nature() == semantic.Float {
return values.NewInt(int64(math.Ilogb(v1.Float()))), nil
}
return nil, fmt.Errorf("cannot convert argument x of type %v to float", v1.Type().Nature())
}, false,
),
// float --> {frac: float, exp: int}
"frexp": values.NewFunction(
"frexp",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"f": semantic.Float},
Required: semantic.LabelSet{"f"},
Return: semantic.NewObjectPolyType(map[string]semantic.PolyType{"frac": semantic.Float, "exp": semantic.Int}, semantic.LabelSet{"frac", "exp"}, nil),
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("f")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument f")
}
if v1.Type().Nature() == semantic.Float {
frac, exp := math.Frexp(v1.Float())
return values.NewObjectWithValues(map[string]values.Value{"frac": values.NewFloat(frac), "exp": values.NewInt(int64(exp))}), nil
}
return nil, fmt.Errorf("cannot convert argument f of type %v to float", v1.Type().Nature())
}, false,
),
// float --> {lgamma: float, sign: int}
"lgamma": values.NewFunction(
"lgamma",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"x": semantic.Float},
Required: semantic.LabelSet{"x"},
Return: semantic.NewObjectPolyType(map[string]semantic.PolyType{"lgamma": semantic.Float, "sign": semantic.Int}, semantic.LabelSet{"lgamma", "sign"}, nil),
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("x")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument x")
}
if v1.Type().Nature() == semantic.Float {
lgamma, sign := math.Lgamma(v1.Float())
return values.NewObjectWithValues(map[string]values.Value{"lgamma": values.NewFloat(lgamma), "sign": values.NewInt(int64(sign))}), nil
}
return nil, fmt.Errorf("cannot convert argument x of type %v to float", v1.Type().Nature())
}, false,
),
// float --> {int: float, frac: float}
"modf": values.NewFunction(
"modf",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"f": semantic.Float},
Required: semantic.LabelSet{"f"},
Return: semantic.NewObjectPolyType(map[string]semantic.PolyType{"int": semantic.Float, "frac": semantic.Float}, semantic.LabelSet{"int", "frac"}, nil),
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("f")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument f")
}
if v1.Type().Nature() == semantic.Float {
intres, frac := math.Modf(v1.Float())
return values.NewObjectWithValues(map[string]values.Value{"int": values.NewFloat(intres), "frac": values.NewFloat(frac)}), nil
}
return nil, fmt.Errorf("cannot convert argument f of type %v to float", v1.Type().Nature())
}, false,
),
// float --> {sin: float, cos: float}
"sincos": values.NewFunction(
"sincos",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"x": semantic.Float},
Required: semantic.LabelSet{"x"},
Return: semantic.NewObjectPolyType(map[string]semantic.PolyType{"sin": semantic.Float, "cos": semantic.Float}, semantic.LabelSet{"sin", "cos"}, nil),
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("x")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument x")
}
if v1.Type().Nature() == semantic.Float {
sin, cos := math.Sin(v1.Float()), math.Cos(v1.Float())
return values.NewObjectWithValues(map[string]values.Value{"sin": values.NewFloat(sin), "cos": values.NewFloat(cos)}), nil
}
return nil, fmt.Errorf("cannot convert argument x of type %v to float", v1.Type().Nature())
}, false,
),
// float, int --> bool
"isInf": values.NewFunction(
"isInf",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"f": semantic.Float, "sign": semantic.Int},
Required: semantic.LabelSet{"f", "sign"},
Return: semantic.Bool,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("f")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument f")
}
v2, ok := args.Get("sign")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument sign")
}
if v1.Type().Nature() == semantic.Float {
if v2.Type().Nature() == semantic.Int {
return values.NewBool(math.IsInf(v1.Float(), int(v2.Int()))), nil
} else {
return nil, fmt.Errorf("cannot convert argument sign of type %v to int", v2.Type().Nature())
}
}
return nil, fmt.Errorf("cannot convert argument f of type %v to float", v1.Type().Nature())
}, false,
),
// float --> bool
"isNaN": values.NewFunction(
"isNaN",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"f": semantic.Float},
Required: semantic.LabelSet{"f"},
Return: semantic.Bool,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("f")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument f")
}
if v1.Type().Nature() == semantic.Float {
return values.NewBool(math.IsNaN(v1.Float())), nil
}
return nil, fmt.Errorf("cannot convert argument f of type %v to float", v1.Type().Nature())
}, false,
),
// float --> bool
"signbit": values.NewFunction(
"signbit",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"x": semantic.Float},
Required: semantic.LabelSet{"x"},
Return: semantic.Bool,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("x")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument x")
}
if v1.Type().Nature() == semantic.Float {
return values.NewBool(math.Signbit(v1.Float())), nil
}
return nil, fmt.Errorf("cannot convert argument x of type %v to float", v1.Type().Nature())
}, false,
),
// () --> float
"NaN": values.NewFunction(
"NaN",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{},
Required: semantic.LabelSet{},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
return values.NewFloat(math.NaN()), nil
}, false,
),
// (int) --> float
"mInf": values.NewFunction(
"inf",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"sign": semantic.Int},
Required: semantic.LabelSet{"sign"},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("sign")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument sign")
}
if v1.Type().Nature() == semantic.Int {
return values.NewFloat(math.Inf(int(v1.Int()))), nil
}
return nil, fmt.Errorf("cannot convert argument sign of type %v to int", v1.Type().Nature())
}, false,
),
// (int, float) --> float
"jn": values.NewFunction(
"jn",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"n": semantic.Int, "x": semantic.Float},
Required: semantic.LabelSet{"n", "x"},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("n")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument n")
}
v2, ok := args.Get("x")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument x")
}
if v1.Type().Nature() == semantic.Int {
if v2.Type().Nature() == semantic.Float {
return values.NewFloat(math.Jn(int(v1.Int()), v2.Float())), nil
} else {
return nil, fmt.Errorf("cannot convert argument x of type %v to float", v2.Type().Nature())
}
}
return nil, fmt.Errorf("cannot convert argument n of type %v to int", v1.Type().Nature())
}, false,
),
// (int, float) --> float
"yn": values.NewFunction(
"yn",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"n": semantic.Int, "x": semantic.Float},
Required: semantic.LabelSet{"n", "x"},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("n")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument n")
}
v2, ok := args.Get("x")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument x")
}
if v1.Type().Nature() == semantic.Int {
if v2.Type().Nature() == semantic.Float {
return values.NewFloat(math.Yn(int(v1.Int()), v2.Float())), nil
} else {
return nil, fmt.Errorf("cannot convert argument x of type %v to float", v2.Type().Nature())
}
}
return nil, fmt.Errorf("cannot convert argument n of type %v to int", v1.Type().Nature())
}, false,
),
// (float, int) --> float
"ldexp": values.NewFunction(
"ldexp",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"frac": semantic.Float, "exp": semantic.Int},
Required: semantic.LabelSet{"frac", "exp"},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("frac")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument frac")
}
v2, ok := args.Get("exp")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument exp")
}
if v1.Type().Nature() == semantic.Float {
if v2.Type().Nature() == semantic.Int {
return values.NewFloat(math.Ldexp(v1.Float(), int(v2.Int()))), nil
} else {
return nil, fmt.Errorf("cannot convert argument exp of type %v to int", v2.Type().Nature())
}
}
return nil, fmt.Errorf("cannot convert argument frac of type %v to float", v1.Type().Nature())
}, false,
),
// int --> float
"pow10": values.NewFunction(
"pow10",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"n": semantic.Int},
Required: semantic.LabelSet{"n"},
Return: semantic.Float,
}),
func(ctx context.Context, args values.Object) (values.Value, error) {
v1, ok := args.Get("n")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument frac")
}
if v1.Type().Nature() == semantic.Int {
return values.NewFloat(math.Pow10(int(v1.Int()))), nil
}
return nil, fmt.Errorf("cannot convert argument n of type %v to int", v1.Type().Nature())
}, false,
),
}
// special case args and/or return types not worth generalizing
flux.RegisterPackageValue("math", "float64bits", SpecialFns["float64bits"])
flux.RegisterPackageValue("math", "float64frombits", SpecialFns["float64frombits"])
flux.RegisterPackageValue("math", "ilogb", SpecialFns["ilogb"])
flux.RegisterPackageValue("math", "frexp", SpecialFns["frexp"])
flux.RegisterPackageValue("math", "lgamma", SpecialFns["lgamma"])
flux.RegisterPackageValue("math", "modf", SpecialFns["modf"])
flux.RegisterPackageValue("math", "sincos", SpecialFns["sincos"])
flux.RegisterPackageValue("math", "isInf", SpecialFns["isInf"])
flux.RegisterPackageValue("math", "isNaN", SpecialFns["isNaN"])
flux.RegisterPackageValue("math", "signbit", SpecialFns["signbit"])
flux.RegisterPackageValue("math", "NaN", SpecialFns["NaN"])
flux.RegisterPackageValue("math", "mInf", SpecialFns["mInf"])
flux.RegisterPackageValue("math", "jn", SpecialFns["jn"])
flux.RegisterPackageValue("math", "yn", SpecialFns["yn"])
flux.RegisterPackageValue("math", "ldexp", SpecialFns["ldexp"])
flux.RegisterPackageValue("math", "pow10", SpecialFns["pow10"])
} | stdlib/math/math.go | 0.581541 | 0.498169 | math.go | starcoder |
package rti
import (
"encoding/binary"
"math"
)
// AncillaryDataSet will contain all the Ancillary Data set values.
// These values describe ensemble with float values.
type AncillaryDataSet struct {
Base BaseDataSet // Base Dataset
FirstBinRange float32 // First bin location in meters
BinSize float32 // Bin size in meters
FirstPingTime float32 // First ping time in seconds
LastPingTime float32 // Last ping time in seconds
Heading float32 // Heading in degrees
Pitch float32 // Pitch in degrees
Roll float32 // Roll in degrees
WaterTemp float32 // Water temperature in degrees farenheit
SystemTemp float32 // System temperature in degrees farenheit
Salinity float32 // Salinity in Parts per Thousand (PPT)
Pressure float32 // Pressure in Pascals
TransducerDepth float32 // Depth of the transducer in water in meters. Used for speed of sound.
SpeedOfSound float32 // Speed of Sound in m/s
}
// Decode will take the binary data and decode into
// into the ensemble data set.
func (anc *AncillaryDataSet) Decode(data []byte) {
// Not enough data
if len(data) < 13*BytesInFloat {
return
}
// First Bin Range
ptr := GenerateIndex(0, anc.Base.NameLen, anc.Base.Enstype)
bits := binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.FirstBinRange = math.Float32frombits(bits)
// Bin Size
ptr = GenerateIndex(1, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.BinSize = math.Float32frombits(bits)
// First Ping Time
ptr = GenerateIndex(2, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.FirstPingTime = math.Float32frombits(bits)
// Last Ping Time
ptr = GenerateIndex(3, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.LastPingTime = math.Float32frombits(bits)
// Heading
ptr = GenerateIndex(4, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.Heading = math.Float32frombits(bits)
// Pitch
ptr = GenerateIndex(5, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.Pitch = math.Float32frombits(bits)
// Roll
ptr = GenerateIndex(6, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.Roll = math.Float32frombits(bits)
// Water Temp
ptr = GenerateIndex(7, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.WaterTemp = math.Float32frombits(bits)
// System Temp
ptr = GenerateIndex(8, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.SystemTemp = math.Float32frombits(bits)
// Salinity
ptr = GenerateIndex(9, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.Salinity = math.Float32frombits(bits)
// Pressure
ptr = GenerateIndex(10, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.Pressure = math.Float32frombits(bits)
// Transducer Depth
ptr = GenerateIndex(11, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.TransducerDepth = math.Float32frombits(bits)
// Speed of Sound
ptr = GenerateIndex(12, anc.Base.NameLen, anc.Base.Enstype)
bits = binary.LittleEndian.Uint32(data[ptr : ptr+BytesInFloat])
anc.SpeedOfSound = math.Float32frombits(bits)
} | AncillaryDataSet.go | 0.505859 | 0.541591 | AncillaryDataSet.go | starcoder |
package secp256k1go
import (
"encoding/hex"
"math/big"
)
// Field represents the signature field
type Field struct {
n [10]uint32
}
// String returns the hex string of the field
func (fd *Field) String() string {
var tmp [32]byte
b := *fd
b.Normalize()
b.GetB32(tmp[:])
return hex.EncodeToString(tmp[:])
}
// GetBig returns big int
func (fd *Field) GetBig() (r *big.Int) {
fd.Normalize()
r = new(big.Int)
var tmp [32]byte
fd.GetB32(tmp[:])
r.SetBytes(tmp[:])
return
}
// SetB32 sets
func (fd *Field) SetB32(a []byte) {
fd.n[0] = 0
fd.n[1] = 0
fd.n[2] = 0
fd.n[3] = 0
fd.n[4] = 0
fd.n[5] = 0
fd.n[6] = 0
fd.n[7] = 0
fd.n[8] = 0
fd.n[9] = 0
var v uint32
for i := uint(0); i < 32; i++ {
for j := uint(0); j < 4; j++ {
limb := (8*i + 2*j) / 26
shift := (8*i + 2*j) % 26
v = (uint32)((a[31-i]>>(2*j))&0x3) << shift
fd.n[limb] |= v
}
}
}
// SetBytes sets bytes
func (fd *Field) SetBytes(a []byte) {
if len(a) > 32 {
panic("too many bytes to set")
}
if len(a) == 32 {
fd.SetB32(a)
} else {
var buf [32]byte
copy(buf[32-len(a):], a)
fd.SetB32(buf[:])
}
}
// SetHex sets field in hex string
func (fd *Field) SetHex(s string) {
d, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
fd.SetBytes(d)
}
// IsOdd check if odd
func (fd *Field) IsOdd() bool {
return (fd.n[0] & 1) != 0
}
// IsZero check if field is zero
func (fd *Field) IsZero() bool {
return (fd.n[0] == 0 && fd.n[1] == 0 && fd.n[2] == 0 && fd.n[3] == 0 && fd.n[4] == 0 && fd.n[5] == 0 && fd.n[6] == 0 && fd.n[7] == 0 && fd.n[8] == 0 && fd.n[9] == 0)
}
// SetInt set fields with an int value
func (fd *Field) SetInt(a uint32) {
fd.n[0] = a
fd.n[1] = 0
fd.n[2] = 0
fd.n[3] = 0
fd.n[4] = 0
fd.n[5] = 0
fd.n[6] = 0
fd.n[7] = 0
fd.n[8] = 0
fd.n[9] = 0
}
// Normalize normalize the field
func (fd *Field) Normalize() {
c := fd.n[0]
t0 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[1]
t1 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[2]
t2 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[3]
t3 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[4]
t4 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[5]
t5 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[6]
t6 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[7]
t7 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[8]
t8 := c & 0x3FFFFFF
c = (c >> 26) + fd.n[9]
t9 := c & 0x03FFFFF
c >>= 22
// The following code will not modify the t's if c is initially 0.
d := c*0x3D1 + t0
t0 = d & 0x3FFFFFF
d = (d >> 26) + t1 + c*0x40
t1 = d & 0x3FFFFFF
d = (d >> 26) + t2
t2 = d & 0x3FFFFFF
d = (d >> 26) + t3
t3 = d & 0x3FFFFFF
d = (d >> 26) + t4
t4 = d & 0x3FFFFFF
d = (d >> 26) + t5
t5 = d & 0x3FFFFFF
d = (d >> 26) + t6
t6 = d & 0x3FFFFFF
d = (d >> 26) + t7
t7 = d & 0x3FFFFFF
d = (d >> 26) + t8
t8 = d & 0x3FFFFFF
d = (d >> 26) + t9
t9 = d & 0x03FFFFF
// Subtract p if result >= p
low := (uint64(t1) << 26) | uint64(t0)
//mask := uint64(-(int64)((t9 < 0x03FFFFF) | (t8 < 0x3FFFFFF) | (t7 < 0x3FFFFFF) | (t6 < 0x3FFFFFF) | (t5 < 0x3FFFFFF) | (t4 < 0x3FFFFFF) | (t3 < 0x3FFFFFF) | (t2 < 0x3FFFFFF) | (low < 0xFFFFEFFFFFC2F)))
var mask uint64
if (t9 < 0x03FFFFF) ||
(t8 < 0x3FFFFFF) ||
(t7 < 0x3FFFFFF) ||
(t6 < 0x3FFFFFF) ||
(t5 < 0x3FFFFFF) ||
(t4 < 0x3FFFFFF) ||
(t3 < 0x3FFFFFF) ||
(t2 < 0x3FFFFFF) ||
(low < 0xFFFFEFFFFFC2F) {
mask = 0xFFFFFFFFFFFFFFFF
}
t9 &= uint32(mask)
t8 &= uint32(mask)
t7 &= uint32(mask)
t6 &= uint32(mask)
t5 &= uint32(mask)
t4 &= uint32(mask)
t3 &= uint32(mask)
t2 &= uint32(mask)
low -= ((mask ^ 0xFFFFFFFFFFFFFFFF) & 0xFFFFEFFFFFC2F)
// push internal variables back
fd.n[0] = uint32(low) & 0x3FFFFFF
fd.n[1] = uint32(low>>26) & 0x3FFFFFF
fd.n[2] = t2
fd.n[3] = t3
fd.n[4] = t4
fd.n[5] = t5
fd.n[6] = t6
fd.n[7] = t7
fd.n[8] = t8
fd.n[9] = t9
}
// GetB32 get B32, TODO: need further explanation
func (fd *Field) GetB32(r []byte) {
var i, j, c, limb, shift uint32
for i = 0; i < 32; i++ {
c = 0
for j = 0; j < 4; j++ {
limb = (8*i + 2*j) / 26
shift = (8*i + 2*j) % 26
c |= ((fd.n[limb] >> shift) & 0x3) << (2 * j)
}
r[31-i] = byte(c)
}
}
// Equals check if field is the same as the given one
func (fd *Field) Equals(b *Field) bool {
return (fd.n[0] == b.n[0] && fd.n[1] == b.n[1] && fd.n[2] == b.n[2] && fd.n[3] == b.n[3] && fd.n[4] == b.n[4] &&
fd.n[5] == b.n[5] && fd.n[6] == b.n[6] && fd.n[7] == b.n[7] && fd.n[8] == b.n[8] && fd.n[9] == b.n[9])
}
// SetAdd adds value to corresponding fields
func (fd *Field) SetAdd(a *Field) {
fd.n[0] += a.n[0]
fd.n[1] += a.n[1]
fd.n[2] += a.n[2]
fd.n[3] += a.n[3]
fd.n[4] += a.n[4]
fd.n[5] += a.n[5]
fd.n[6] += a.n[6]
fd.n[7] += a.n[7]
fd.n[8] += a.n[8]
fd.n[9] += a.n[9]
}
// MulInt multiples the fields
func (fd *Field) MulInt(a uint32) {
fd.n[0] *= a
fd.n[1] *= a
fd.n[2] *= a
fd.n[3] *= a
fd.n[4] *= a
fd.n[5] *= a
fd.n[6] *= a
fd.n[7] *= a
fd.n[8] *= a
fd.n[9] *= a
}
// Negate caculate the negate
func (fd *Field) Negate(r *Field, m uint32) {
r.n[0] = 0x3FFFC2F*(m+1) - fd.n[0]
r.n[1] = 0x3FFFFBF*(m+1) - fd.n[1]
r.n[2] = 0x3FFFFFF*(m+1) - fd.n[2]
r.n[3] = 0x3FFFFFF*(m+1) - fd.n[3]
r.n[4] = 0x3FFFFFF*(m+1) - fd.n[4]
r.n[5] = 0x3FFFFFF*(m+1) - fd.n[5]
r.n[6] = 0x3FFFFFF*(m+1) - fd.n[6]
r.n[7] = 0x3FFFFFF*(m+1) - fd.n[7]
r.n[8] = 0x3FFFFFF*(m+1) - fd.n[8]
r.n[9] = 0x03FFFFF*(m+1) - fd.n[9]
}
// Inv new algo by peterdettman - https://github.com/sipa/TheCurve/pull/19
func (fd *Field) Inv(r *Field) {
var x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1 Field
var j int
fd.Sqr(&x2)
x2.Mul(&x2, fd)
x2.Sqr(&x3)
x3.Mul(&x3, fd)
x3.Sqr(&x6)
x6.Sqr(&x6)
x6.Sqr(&x6)
x6.Mul(&x6, &x3)
x6.Sqr(&x9)
x9.Sqr(&x9)
x9.Sqr(&x9)
x9.Mul(&x9, &x3)
x9.Sqr(&x11)
x11.Sqr(&x11)
x11.Mul(&x11, &x2)
x11.Sqr(&x22)
for j = 1; j < 11; j++ {
x22.Sqr(&x22)
}
x22.Mul(&x22, &x11)
x22.Sqr(&x44)
for j = 1; j < 22; j++ {
x44.Sqr(&x44)
}
x44.Mul(&x44, &x22)
x44.Sqr(&x88)
for j = 1; j < 44; j++ {
x88.Sqr(&x88)
}
x88.Mul(&x88, &x44)
x88.Sqr(&x176)
for j = 1; j < 88; j++ {
x176.Sqr(&x176)
}
x176.Mul(&x176, &x88)
x176.Sqr(&x220)
for j = 1; j < 44; j++ {
x220.Sqr(&x220)
}
x220.Mul(&x220, &x44)
x220.Sqr(&x223)
x223.Sqr(&x223)
x223.Sqr(&x223)
x223.Mul(&x223, &x3)
x223.Sqr(&t1)
for j = 1; j < 23; j++ {
t1.Sqr(&t1)
}
t1.Mul(&t1, &x22)
t1.Sqr(&t1)
t1.Sqr(&t1)
t1.Sqr(&t1)
t1.Sqr(&t1)
t1.Sqr(&t1)
t1.Mul(&t1, fd)
t1.Sqr(&t1)
t1.Sqr(&t1)
t1.Sqr(&t1)
t1.Mul(&t1, &x2)
t1.Sqr(&t1)
t1.Sqr(&t1)
t1.Mul(r, fd)
}
// Sqrt new algo by peterdettman - https://github.com/sipa/TheCurve/pull/19
func (fd *Field) Sqrt(r *Field) {
var x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1 Field
var j int
fd.Sqr(&x2)
x2.Mul(&x2, fd)
x2.Sqr(&x3)
x3.Mul(&x3, fd)
x3.Sqr(&x6)
x6.Sqr(&x6)
x6.Sqr(&x6)
x6.Mul(&x6, &x3)
x6.Sqr(&x9)
x9.Sqr(&x9)
x9.Sqr(&x9)
x9.Mul(&x9, &x3)
x9.Sqr(&x11)
x11.Sqr(&x11)
x11.Mul(&x11, &x2)
x11.Sqr(&x22)
for j = 1; j < 11; j++ {
x22.Sqr(&x22)
}
x22.Mul(&x22, &x11)
x22.Sqr(&x44)
for j = 1; j < 22; j++ {
x44.Sqr(&x44)
}
x44.Mul(&x44, &x22)
x44.Sqr(&x88)
for j = 1; j < 44; j++ {
x88.Sqr(&x88)
}
x88.Mul(&x88, &x44)
x88.Sqr(&x176)
for j = 1; j < 88; j++ {
x176.Sqr(&x176)
}
x176.Mul(&x176, &x88)
x176.Sqr(&x220)
for j = 1; j < 44; j++ {
x220.Sqr(&x220)
}
x220.Mul(&x220, &x44)
x220.Sqr(&x223)
x223.Sqr(&x223)
x223.Sqr(&x223)
x223.Mul(&x223, &x3)
x223.Sqr(&t1)
for j = 1; j < 23; j++ {
t1.Sqr(&t1)
}
t1.Mul(&t1, &x22)
for j = 0; j < 6; j++ {
t1.Sqr(&t1)
}
t1.Mul(&t1, &x2)
t1.Sqr(&t1)
t1.Sqr(r)
}
// InvVar ...
func (fd *Field) InvVar(r *Field) {
var b [32]byte
c := *fd
c.Normalize()
c.GetB32(b[:])
var n Number
n.SetBytes(b[:])
n.modInv(&n, &TheCurve.p)
r.SetBytes(n.Bytes())
}
// Mul ...
func (fd *Field) Mul(r, b *Field) {
var c, d uint64
var t0, t1, t2, t3, t4, t5, t6 uint64
var t7, t8, t9, t10, t11, t12, t13 uint64
var t14, t15, t16, t17, t18, t19 uint64
c = uint64(fd.n[0]) * uint64(b.n[0])
t0 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[1]) +
uint64(fd.n[1])*uint64(b.n[0])
t1 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[2]) +
uint64(fd.n[1])*uint64(b.n[1]) +
uint64(fd.n[2])*uint64(b.n[0])
t2 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[3]) +
uint64(fd.n[1])*uint64(b.n[2]) +
uint64(fd.n[2])*uint64(b.n[1]) +
uint64(fd.n[3])*uint64(b.n[0])
t3 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[4]) +
uint64(fd.n[1])*uint64(b.n[3]) +
uint64(fd.n[2])*uint64(b.n[2]) +
uint64(fd.n[3])*uint64(b.n[1]) +
uint64(fd.n[4])*uint64(b.n[0])
t4 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[5]) +
uint64(fd.n[1])*uint64(b.n[4]) +
uint64(fd.n[2])*uint64(b.n[3]) +
uint64(fd.n[3])*uint64(b.n[2]) +
uint64(fd.n[4])*uint64(b.n[1]) +
uint64(fd.n[5])*uint64(b.n[0])
t5 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[6]) +
uint64(fd.n[1])*uint64(b.n[5]) +
uint64(fd.n[2])*uint64(b.n[4]) +
uint64(fd.n[3])*uint64(b.n[3]) +
uint64(fd.n[4])*uint64(b.n[2]) +
uint64(fd.n[5])*uint64(b.n[1]) +
uint64(fd.n[6])*uint64(b.n[0])
t6 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[7]) +
uint64(fd.n[1])*uint64(b.n[6]) +
uint64(fd.n[2])*uint64(b.n[5]) +
uint64(fd.n[3])*uint64(b.n[4]) +
uint64(fd.n[4])*uint64(b.n[3]) +
uint64(fd.n[5])*uint64(b.n[2]) +
uint64(fd.n[6])*uint64(b.n[1]) +
uint64(fd.n[7])*uint64(b.n[0])
t7 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[8]) +
uint64(fd.n[1])*uint64(b.n[7]) +
uint64(fd.n[2])*uint64(b.n[6]) +
uint64(fd.n[3])*uint64(b.n[5]) +
uint64(fd.n[4])*uint64(b.n[4]) +
uint64(fd.n[5])*uint64(b.n[3]) +
uint64(fd.n[6])*uint64(b.n[2]) +
uint64(fd.n[7])*uint64(b.n[1]) +
uint64(fd.n[8])*uint64(b.n[0])
t8 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[0])*uint64(b.n[9]) +
uint64(fd.n[1])*uint64(b.n[8]) +
uint64(fd.n[2])*uint64(b.n[7]) +
uint64(fd.n[3])*uint64(b.n[6]) +
uint64(fd.n[4])*uint64(b.n[5]) +
uint64(fd.n[5])*uint64(b.n[4]) +
uint64(fd.n[6])*uint64(b.n[3]) +
uint64(fd.n[7])*uint64(b.n[2]) +
uint64(fd.n[8])*uint64(b.n[1]) +
uint64(fd.n[9])*uint64(b.n[0])
t9 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[1])*uint64(b.n[9]) +
uint64(fd.n[2])*uint64(b.n[8]) +
uint64(fd.n[3])*uint64(b.n[7]) +
uint64(fd.n[4])*uint64(b.n[6]) +
uint64(fd.n[5])*uint64(b.n[5]) +
uint64(fd.n[6])*uint64(b.n[4]) +
uint64(fd.n[7])*uint64(b.n[3]) +
uint64(fd.n[8])*uint64(b.n[2]) +
uint64(fd.n[9])*uint64(b.n[1])
t10 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[2])*uint64(b.n[9]) +
uint64(fd.n[3])*uint64(b.n[8]) +
uint64(fd.n[4])*uint64(b.n[7]) +
uint64(fd.n[5])*uint64(b.n[6]) +
uint64(fd.n[6])*uint64(b.n[5]) +
uint64(fd.n[7])*uint64(b.n[4]) +
uint64(fd.n[8])*uint64(b.n[3]) +
uint64(fd.n[9])*uint64(b.n[2])
t11 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[3])*uint64(b.n[9]) +
uint64(fd.n[4])*uint64(b.n[8]) +
uint64(fd.n[5])*uint64(b.n[7]) +
uint64(fd.n[6])*uint64(b.n[6]) +
uint64(fd.n[7])*uint64(b.n[5]) +
uint64(fd.n[8])*uint64(b.n[4]) +
uint64(fd.n[9])*uint64(b.n[3])
t12 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[4])*uint64(b.n[9]) +
uint64(fd.n[5])*uint64(b.n[8]) +
uint64(fd.n[6])*uint64(b.n[7]) +
uint64(fd.n[7])*uint64(b.n[6]) +
uint64(fd.n[8])*uint64(b.n[5]) +
uint64(fd.n[9])*uint64(b.n[4])
t13 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[5])*uint64(b.n[9]) +
uint64(fd.n[6])*uint64(b.n[8]) +
uint64(fd.n[7])*uint64(b.n[7]) +
uint64(fd.n[8])*uint64(b.n[6]) +
uint64(fd.n[9])*uint64(b.n[5])
t14 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[6])*uint64(b.n[9]) +
uint64(fd.n[7])*uint64(b.n[8]) +
uint64(fd.n[8])*uint64(b.n[7]) +
uint64(fd.n[9])*uint64(b.n[6])
t15 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[7])*uint64(b.n[9]) +
uint64(fd.n[8])*uint64(b.n[8]) +
uint64(fd.n[9])*uint64(b.n[7])
t16 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[8])*uint64(b.n[9]) +
uint64(fd.n[9])*uint64(b.n[8])
t17 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[9])*uint64(b.n[9])
t18 = c & 0x3FFFFFF
c = c >> 26
t19 = c
c = t0 + t10*0x3D10
t0 = c & 0x3FFFFFF
c = c >> 26
c = c + t1 + t10*0x400 + t11*0x3D10
t1 = c & 0x3FFFFFF
c = c >> 26
c = c + t2 + t11*0x400 + t12*0x3D10
t2 = c & 0x3FFFFFF
c = c >> 26
c = c + t3 + t12*0x400 + t13*0x3D10
r.n[3] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t4 + t13*0x400 + t14*0x3D10
r.n[4] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t5 + t14*0x400 + t15*0x3D10
r.n[5] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t6 + t15*0x400 + t16*0x3D10
r.n[6] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t7 + t16*0x400 + t17*0x3D10
r.n[7] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t8 + t17*0x400 + t18*0x3D10
r.n[8] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t9 + t18*0x400 + t19*0x1000003D10
r.n[9] = uint32(c) & 0x03FFFFF
c = c >> 22
d = t0 + c*0x3D1
r.n[0] = uint32(d) & 0x3FFFFFF
d = d >> 26
d = d + t1 + c*0x40
r.n[1] = uint32(d) & 0x3FFFFFF
d = d >> 26
r.n[2] = uint32(t2 + d)
}
// Sqr ...
func (fd *Field) Sqr(r *Field) {
var c, d uint64
var t0, t1, t2, t3, t4, t5, t6 uint64
var t7, t8, t9, t10, t11, t12, t13 uint64
var t14, t15, t16, t17, t18, t19 uint64
c = uint64(fd.n[0]) * uint64(fd.n[0])
t0 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[1])
t1 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[2]) +
uint64(fd.n[1])*uint64(fd.n[1])
t2 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[3]) +
(uint64(fd.n[1])*2)*uint64(fd.n[2])
t3 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[4]) +
(uint64(fd.n[1])*2)*uint64(fd.n[3]) +
uint64(fd.n[2])*uint64(fd.n[2])
t4 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[5]) +
(uint64(fd.n[1])*2)*uint64(fd.n[4]) +
(uint64(fd.n[2])*2)*uint64(fd.n[3])
t5 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[6]) +
(uint64(fd.n[1])*2)*uint64(fd.n[5]) +
(uint64(fd.n[2])*2)*uint64(fd.n[4]) +
uint64(fd.n[3])*uint64(fd.n[3])
t6 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[7]) +
(uint64(fd.n[1])*2)*uint64(fd.n[6]) +
(uint64(fd.n[2])*2)*uint64(fd.n[5]) +
(uint64(fd.n[3])*2)*uint64(fd.n[4])
t7 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[8]) +
(uint64(fd.n[1])*2)*uint64(fd.n[7]) +
(uint64(fd.n[2])*2)*uint64(fd.n[6]) +
(uint64(fd.n[3])*2)*uint64(fd.n[5]) +
uint64(fd.n[4])*uint64(fd.n[4])
t8 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[0])*2)*uint64(fd.n[9]) +
(uint64(fd.n[1])*2)*uint64(fd.n[8]) +
(uint64(fd.n[2])*2)*uint64(fd.n[7]) +
(uint64(fd.n[3])*2)*uint64(fd.n[6]) +
(uint64(fd.n[4])*2)*uint64(fd.n[5])
t9 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[1])*2)*uint64(fd.n[9]) +
(uint64(fd.n[2])*2)*uint64(fd.n[8]) +
(uint64(fd.n[3])*2)*uint64(fd.n[7]) +
(uint64(fd.n[4])*2)*uint64(fd.n[6]) +
uint64(fd.n[5])*uint64(fd.n[5])
t10 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[2])*2)*uint64(fd.n[9]) +
(uint64(fd.n[3])*2)*uint64(fd.n[8]) +
(uint64(fd.n[4])*2)*uint64(fd.n[7]) +
(uint64(fd.n[5])*2)*uint64(fd.n[6])
t11 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[3])*2)*uint64(fd.n[9]) +
(uint64(fd.n[4])*2)*uint64(fd.n[8]) +
(uint64(fd.n[5])*2)*uint64(fd.n[7]) +
uint64(fd.n[6])*uint64(fd.n[6])
t12 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[4])*2)*uint64(fd.n[9]) +
(uint64(fd.n[5])*2)*uint64(fd.n[8]) +
(uint64(fd.n[6])*2)*uint64(fd.n[7])
t13 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[5])*2)*uint64(fd.n[9]) +
(uint64(fd.n[6])*2)*uint64(fd.n[8]) +
uint64(fd.n[7])*uint64(fd.n[7])
t14 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[6])*2)*uint64(fd.n[9]) +
(uint64(fd.n[7])*2)*uint64(fd.n[8])
t15 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[7])*2)*uint64(fd.n[9]) +
uint64(fd.n[8])*uint64(fd.n[8])
t16 = c & 0x3FFFFFF
c = c >> 26
c = c + (uint64(fd.n[8])*2)*uint64(fd.n[9])
t17 = c & 0x3FFFFFF
c = c >> 26
c = c + uint64(fd.n[9])*uint64(fd.n[9])
t18 = c & 0x3FFFFFF
c = c >> 26
t19 = c
c = t0 + t10*0x3D10
t0 = c & 0x3FFFFFF
c = c >> 26
c = c + t1 + t10*0x400 + t11*0x3D10
t1 = c & 0x3FFFFFF
c = c >> 26
c = c + t2 + t11*0x400 + t12*0x3D10
t2 = c & 0x3FFFFFF
c = c >> 26
c = c + t3 + t12*0x400 + t13*0x3D10
r.n[3] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t4 + t13*0x400 + t14*0x3D10
r.n[4] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t5 + t14*0x400 + t15*0x3D10
r.n[5] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t6 + t15*0x400 + t16*0x3D10
r.n[6] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t7 + t16*0x400 + t17*0x3D10
r.n[7] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t8 + t17*0x400 + t18*0x3D10
r.n[8] = uint32(c) & 0x3FFFFFF
c = c >> 26
c = c + t9 + t18*0x400 + t19*0x1000003D10
r.n[9] = uint32(c) & 0x03FFFFF
c = c >> 22
d = t0 + c*0x3D1
r.n[0] = uint32(d) & 0x3FFFFFF
d = d >> 26
d = d + t1 + c*0x40
r.n[1] = uint32(d) & 0x3FFFFFF
d = d >> 26
r.n[2] = uint32(t2 + d)
} | vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field.go | 0.577376 | 0.402656 | field.go | starcoder |
package packet
import (
"github.com/sandertv/gophertunnel/minecraft/protocol"
)
// LevelEventGeneric is sent by the server to send a 'generic' level event to the client. This packet sends an
// NBT serialised object and may for that reason be used for any event holding additional data.
type LevelEventGeneric struct {
// EventID is a unique identifier that identifies the event called. The data that follows has fields in
// the NBT depending on what event it is.
EventID int32
// SerialisedEventData is a network little endian serialised object of event data, with fields that vary
// depending on EventID.
// Unlike many other NBT structures, this data is not actually in a compound but just loosely floating
// NBT tags. To decode using the nbt package, you would need to append 0x0a00 at the start (compound id
// and name length) and add 0x00 at the end, to manually wrap it in a compound. Likewise, you would have
// to remove these bytes when encoding.
// Example of the resulting data with an EventID of 2026:
// TAG_Compound({
// 'pos15x': TAG_Float(198),
// 'pos11x': TAG_Float(201),
// 'pos6y': TAG_Float(65),
// 'pos13y': TAG_Float(64),
// 'pos17z': TAG_Float(36),
// 'pos8y': TAG_Float(65),
// 'originY': TAG_Float(65.06125),
// 'pos10z': TAG_Float(37),
// 'pos13x': TAG_Float(201),
// 'pos7y': TAG_Float(65),
// 'pos9x': TAG_Float(203),
// 'pos11y': TAG_Float(64),
// 'pos15y': TAG_Float(65),
// 'pos15z': TAG_Float(40),
// 'pos7z': TAG_Float(41),
// 'pos8x': TAG_Float(198),
// 'pos13z': TAG_Float(40),
// 'pos1z': TAG_Float(37),
// 'pos6z': TAG_Float(42),
// 'size': TAG_Int(18),
// 'pos0x': TAG_Float(204),
// 'pos12x': TAG_Float(200),
// 'pos2x': TAG_Float(204),
// 'pos9z': TAG_Float(37),
// 'pos16y': TAG_Float(64),
// 'pos5x': TAG_Float(204),
// 'pos5y': TAG_Float(64),
// 'pos17x': TAG_Float(202),
// 'pos3y': TAG_Float(64),
// 'pos3z': TAG_Float(36),
// 'radius': TAG_Float(4),
// 'pos0z': TAG_Float(38),
// 'pos4z': TAG_Float(36),
// 'pos8z': TAG_Float(38),
// 'pos1x': TAG_Float(204),
// 'pos0y': TAG_Float(64),
// 'pos14z': TAG_Float(39),
// 'pos16z': TAG_Float(40),
// 'pos2y': TAG_Float(63),
// 'pos6x': TAG_Float(203),
// 'pos10x': TAG_Float(205),
// 'pos12y': TAG_Float(64),
// 'pos1y': TAG_Float(64),
// 'pos14x': TAG_Float(200),
// 'pos3x': TAG_Float(204),
// 'pos9y': TAG_Float(64),
// 'pos4y': TAG_Float(63),
// 'pos10y': TAG_Float(63),
// 'pos12z': TAG_Float(38),
// 'pos16x': TAG_Float(202),
// 'originX': TAG_Float(202.48654),
// 'pos14y': TAG_Float(62),
// 'pos17y': TAG_Float(62),
// 'pos5z': TAG_Float(35),
// 'pos4x': TAG_Float(204),
// 'pos7x': TAG_Float(203),
// 'originZ': TAG_Float(38.297028),
// 'pos11z': TAG_Float(38),
// 'pos2z': TAG_Float(39),
// })
// The 'originX', 'originY' and 'originZ' fields are present in every event and serve as a replacement for
// a Position field in this packet.
SerialisedEventData []byte
}
// ID ...
func (pk *LevelEventGeneric) ID() uint32 {
return IDLevelEventGeneric
}
// Marshal ...
func (pk *LevelEventGeneric) Marshal(w *protocol.Writer) {
w.Varint32(&pk.EventID)
w.Bytes(&pk.SerialisedEventData)
}
// Unmarshal ...
func (pk *LevelEventGeneric) Unmarshal(r *protocol.Reader) {
r.Varint32(&pk.EventID)
r.Bytes(&pk.SerialisedEventData)
} | minecraft/protocol/packet/level_event_generic.go | 0.524151 | 0.442817 | level_event_generic.go | starcoder |
package expvar
import (
"expvar"
"sync"
"github.com/jjggzz/kit/metrics"
"github.com/jjggzz/kit/metrics/generic"
)
// Counter implements the counter metric with an expvar float.
// Label values are not supported.
type Counter struct {
f *expvar.Float
}
// NewCounter creates an expvar Float with the given name, and returns an object
// that implements the Counter interface.
func NewCounter(name string) *Counter {
return &Counter{
f: expvar.NewFloat(name),
}
}
// With is a no-op.
func (c *Counter) With(labelValues ...string) metrics.Counter { return c }
// Add implements Counter.
func (c *Counter) Add(delta float64) { c.f.Add(delta) }
// Gauge implements the gauge metric with an expvar float.
// Label values are not supported.
type Gauge struct {
f *expvar.Float
}
// NewGauge creates an expvar Float with the given name, and returns an object
// that implements the Gauge interface.
func NewGauge(name string) *Gauge {
return &Gauge{
f: expvar.NewFloat(name),
}
}
// With is a no-op.
func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g }
// Set implements Gauge.
func (g *Gauge) Set(value float64) { g.f.Set(value) }
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) { g.f.Add(delta) }
// Histogram implements the histogram metric with a combination of the generic
// Histogram object and several expvar Floats, one for each of the 50th, 90th,
// 95th, and 99th quantiles of observed values, with the quantile attached to
// the name as a suffix. Label values are not supported.
type Histogram struct {
mtx sync.Mutex
h *generic.Histogram
p50 *expvar.Float
p90 *expvar.Float
p95 *expvar.Float
p99 *expvar.Float
}
// NewHistogram returns a Histogram object with the given name and number of
// buckets in the underlying histogram object. 50 is a good default number of
// buckets.
func NewHistogram(name string, buckets int) *Histogram {
return &Histogram{
h: generic.NewHistogram(name, buckets),
p50: expvar.NewFloat(name + ".p50"),
p90: expvar.NewFloat(name + ".p90"),
p95: expvar.NewFloat(name + ".p95"),
p99: expvar.NewFloat(name + ".p99"),
}
}
// With is a no-op.
func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h }
// Observe implements Histogram.
func (h *Histogram) Observe(value float64) {
h.mtx.Lock()
defer h.mtx.Unlock()
h.h.Observe(value)
h.p50.Set(h.h.Quantile(0.50))
h.p90.Set(h.h.Quantile(0.90))
h.p95.Set(h.h.Quantile(0.95))
h.p99.Set(h.h.Quantile(0.99))
} | metrics/expvar/expvar.go | 0.840652 | 0.418697 | expvar.go | starcoder |
package v1alpha1
import (
v1alpha1 "kubeform.dev/kubeform/apis/google/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// DataflowJobLister helps list DataflowJobs.
type DataflowJobLister interface {
// List lists all DataflowJobs in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.DataflowJob, err error)
// DataflowJobs returns an object that can list and get DataflowJobs.
DataflowJobs(namespace string) DataflowJobNamespaceLister
DataflowJobListerExpansion
}
// dataflowJobLister implements the DataflowJobLister interface.
type dataflowJobLister struct {
indexer cache.Indexer
}
// NewDataflowJobLister returns a new DataflowJobLister.
func NewDataflowJobLister(indexer cache.Indexer) DataflowJobLister {
return &dataflowJobLister{indexer: indexer}
}
// List lists all DataflowJobs in the indexer.
func (s *dataflowJobLister) List(selector labels.Selector) (ret []*v1alpha1.DataflowJob, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.DataflowJob))
})
return ret, err
}
// DataflowJobs returns an object that can list and get DataflowJobs.
func (s *dataflowJobLister) DataflowJobs(namespace string) DataflowJobNamespaceLister {
return dataflowJobNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// DataflowJobNamespaceLister helps list and get DataflowJobs.
type DataflowJobNamespaceLister interface {
// List lists all DataflowJobs in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.DataflowJob, err error)
// Get retrieves the DataflowJob from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.DataflowJob, error)
DataflowJobNamespaceListerExpansion
}
// dataflowJobNamespaceLister implements the DataflowJobNamespaceLister
// interface.
type dataflowJobNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all DataflowJobs in the indexer for a given namespace.
func (s dataflowJobNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.DataflowJob, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.DataflowJob))
})
return ret, err
}
// Get retrieves the DataflowJob from the indexer for a given namespace and name.
func (s dataflowJobNamespaceLister) Get(name string) (*v1alpha1.DataflowJob, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("dataflowjob"), name)
}
return obj.(*v1alpha1.DataflowJob), nil
} | client/listers/google/v1alpha1/dataflowjob.go | 0.613005 | 0.431944 | dataflowjob.go | starcoder |
package iterator
import (
"fmt"
"github.com/marcsantiago/collections"
)
type _direction uint8
const (
_notSet _direction = iota
_forward
_backwards
)
type Iter struct {
currentIdx int
values []collections.Data
direction _direction
shouldCycle bool
}
var _ IterTraitSlice = (*Iter)(nil)
func NewIter() *Iter {
return &Iter{}
}
func IntoIter(data []collections.Data) *Iter {
return &Iter{
values: data,
}
}
// Append not very Rust like, but adding a convenience to append into internal data without needing to pass in a full copy
func (i *Iter) Append(data collections.Data) {
i.values = append(i.values, data)
}
// Next returns the next element in the collection and its enumerated position and moves the cursor forward
// If Next is called Last cannot be called for this copy of Iter, returns -1 as an index when the iterator is fully consumed
func (i *Iter) Next() (int, collections.Data) {
if i.direction == _notSet {
i.direction = _forward
} else if i.direction == _backwards {
panic("Last was called for this iterator, therefore Next cannot be called, create a new iterator to use Next")
}
if !i.shouldCycle {
if i.currentIdx >= len(i.values) {
return -1, nil
}
}
curIdx, data := i.currentIdx, i.values[i.currentIdx%len(i.values)]
i.currentIdx++
return curIdx, data
}
// All returns true if all the values match the predicate
func (i *Iter) All(f func(d collections.Data) bool) bool {
for idx := range i.values {
if !f(i.values[idx]) {
return false
}
}
return true
}
// Any returns true if any the value matches the predicate
func (i *Iter) Any(f func(d collections.Data) bool) bool {
for idx := range i.values {
if f(i.values[idx]) {
return true
}
}
return false
}
// Chain Takes two iterators and creates a new iterator over both in sequence
func (i *Iter) Chain(other IterTraitSlice) IterTraitSlice {
values := make([]collections.Data, 0, len(i.values)+other.Len())
for _, value := range i.values {
values = append(values, value)
}
for _, value := range other.Collect() {
values = append(values, value)
}
return &Iter{
values: values,
}
}
// Collect converts returns the concrete []collections.Data in the iterator
func (i *Iter) Collect() []collections.Data {
return i.values
}
// CollectInts converts []collections.Data into the concrete type []int
func (i *Iter) CollectInts() []int {
a := make([]int, len(i.values))
for ii := 0; ii < len(i.values); ii++ {
a[ii] = i.values[ii].Int()
}
return a
}
// CollectInt32s converts []collections.Data into the concrete type []int32
func (i *Iter) CollectInt32s() []int32 {
a := make([]int32, len(i.values))
for ii := 0; ii < len(i.values); ii++ {
a[ii] = i.values[ii].Int32()
}
return a
}
// CollectInt64s converts []collections.Data into the concrete type []int64
func (i *Iter) CollectInt64s() []int64 {
a := make([]int64, len(i.values))
for ii := 0; ii < len(i.values); ii++ {
a[ii] = i.values[ii].Int64()
}
return a
}
// CollectFloat32s converts []collections.Data into the concrete type []float32
func (i *Iter) CollectFloat32s() []float32 {
a := make([]float32, len(i.values))
for ii := 0; ii < len(i.values); ii++ {
a[ii] = i.values[ii].Float32()
}
return a
}
// CollectFloat64s converts []collections.Data into the concrete type []float64
func (i *Iter) CollectFloat64s() []float64 {
a := make([]float64, len(i.values))
for ii := 0; ii < len(i.values); ii++ {
a[ii] = i.values[ii].Float64()
}
return a
}
// CollectStrings converts []collections.Data into the concrete type []string
func (i *Iter) CollectStrings() []string {
a := make([]string, len(i.values))
for ii := 0; ii < len(i.values); ii++ {
a[ii] = i.values[ii].String()
}
return a
}
// CollectBools converts []collections.Data into the concrete type []bools
func (i *Iter) CollectBools() []bool {
a := make([]bool, len(i.values))
for ii := 0; ii < len(i.values); ii++ {
a[ii] = i.values[ii].Bool()
}
return a
}
// Count consumes the iterator and returns the count of non nil items
func (i *Iter) Count() int {
if len(i.values) == 0 {
return 0
}
var count int
for ii := 0; ii < len(i.values); ii++ {
if i.values[ii] != nil {
count++
}
}
i.currentIdx = len(i.values)
return count
}
// Cycle enables Next or Last to iterate forever cycling through the items in order seen
func (i *Iter) Cycle() {
i.shouldCycle = true
}
// Eq determines if this iterator is the same as the other iterator
func (i *Iter) Eq(other IterTraitSlice) bool {
if i.Len() != other.Len() {
return false
}
selfCh := i.Iterate()
otherCh := other.Iterate()
for {
sV, selfOK := <-selfCh
otherV, otherOK := <-otherCh
if sV != otherV {
return false
}
if !selfOK && !otherOK {
break
}
}
return true
}
// Iterate returns a channel of values that can be ranged over
func (i *Iter) Iterate() <-chan collections.Data {
ch := make(chan collections.Data)
go func() {
for idx := range i.values {
ch <- i.values[idx]
}
close(ch)
}()
return ch
}
// Filter removes all values by which the comparison function returns true
func (i *Iter) Filter(f func(d collections.Data) bool) IterTraitSlice {
var k int
for j := range i.values {
if f(i.values[j]) {
i.values[k] = i.values[j]
k++
continue
}
}
i.currentIdx = 0
i.values = i.values[:k]
return i
}
// Find returns the first value that matches the find function, else it returns nil
func (i *Iter) Find(f func(d collections.Data) bool) collections.Data {
for ii := 0; ii < len(i.values); ii++ {
if f(i.values[ii]) {
return i.values[ii]
}
}
return nil
}
// Fold folds all values based on the fold function that are Operable and returns a single Data value
func (i *Iter) Fold(init collections.OperableData, f func(result collections.OperableData, next collections.OperableData) collections.Data) collections.Data {
result := init
for ii := 0; ii < len(i.values); ii++ {
if op, ok := i.values[ii].(collections.OperableData); ok {
result = f(result, op).(collections.OperableData)
}
}
return result.(collections.Data)
}
// FoldIntoMap folds all the values into a collections.Map
func (i *Iter) FoldIntoMap(init IterTraitMap, f func(m IterTraitMap, key collections.Data) IterTraitMap) IterTraitMap {
result := init
for _, elem := range i.values {
f(result, elem)
}
return result
}
// Ge determines if this iterator is greater than the other iterator
func (i *Iter) Ge(other IterTraitSlice) bool {
if i.Len() > other.Len() {
return true
}
if i.Eq(other) {
return true
}
return i.Gt(other)
}
// Gt determines if this iterator is greater than or equal to the other iterator
func (i *Iter) Gt(other IterTraitSlice) bool {
if i.Len() > other.Len() {
return true
}
if i.Len() < other.Len() {
return false
}
selfCh := i.Iterate()
otherCh := other.Iterate()
for {
sV, selfOK := <-selfCh
otherV, otherOK := <-otherCh
if !selfOK && !otherOK {
break
}
if sV.Greater(otherV) {
return true
}
}
return false
}
// Inspect allows debug lines to be called in-between chained events
func (i *Iter) Inspect(f func(d collections.Data)) IterTraitSlice {
for j := range i.values {
f(i.values[j])
}
return i
}
// Last returns the next element in the collection in the reverse order and its enumerated position and moves the cursor forward
// If Last is called Next cannot be called for this copy of Iter, returns -1 as an index when the iterator is fully consumed
func (i *Iter) Last() (int, collections.Data) {
if i.direction == _notSet {
i.direction = _backwards
i.values = reverse(i.values)
} else if i.direction == _forward {
panic("Next was called for this iterator, therefore Last cannot be called, create a new iterator to use Last")
}
if !i.shouldCycle {
if i.currentIdx >= len(i.values) {
return -1, nil
}
}
curIdx, data := i.currentIdx, i.values[i.currentIdx%len(i.values)]
i.currentIdx++
return curIdx, data
}
// Le determines if this iterator is less than the other iterator
func (i *Iter) Le(other IterTraitSlice) bool {
if i.Len() < other.Len() {
return true
}
if i.Eq(other) {
return true
}
return i.Lt(other)
}
// Len returns the current length of the underline data slice
func (i *Iter) Len() int {
return len(i.values)
}
// Lt determines if this iterator is less than or equal to the other iterator
func (i *Iter) Lt(other IterTraitSlice) bool {
if i.Len() < other.Len() {
return true
}
if i.Len() > other.Len() {
return false
}
selfCh := i.Iterate()
otherCh := other.Iterate()
for {
sV, selfOK := <-selfCh
otherV, otherOK := <-otherCh
if !selfOK && !otherOK {
break
}
if sV.Less(otherV) {
return true
}
}
return false
}
// Map takes a closure and creates an iterator which calls that closure on each element
func (i *Iter) Map(f func(d collections.Data) collections.Data) IterTraitSlice {
for ii := 0; ii < len(i.values); ii++ {
i.values[ii] = f(i.values[ii])
}
return i
}
// Max returns the max value in the data collection
func (i *Iter) Max() collections.Data {
if len(i.values) == 0 {
return nil
}
max := i.values[0]
for ii := 1; ii < len(i.values); ii++ {
if i.values[ii].Greater(max) {
max = i.values[ii]
}
}
return max
}
// Min returns the min value in the data collection
func (i *Iter) Min() collections.Data {
if len(i.values) == 0 {
return nil
}
min := i.values[0]
for ii := 1; ii < len(i.values); ii++ {
if i.values[ii].Less(min) {
min = i.values[ii]
}
}
return min
}
// Ne determines if this iterator is different from the other iterator
func (i *Iter) Ne(other IterTraitSlice) bool {
return !i.Eq(other)
}
// Nth ...
func (i *Iter) Nth(n int) collections.Data {
if n > len(i.values) {
return nil
}
data := i.values[n]
i.values = i.values[n:]
return data
}
// Partition consumes an iterator, creating two collections from it.
func (i *Iter) Partition(f func(d collections.Data) bool) ([]collections.Data, []collections.Data) {
var a, b []collections.Data
for ii := 0; ii < len(i.values); ii++ {
if f(i.values[ii]) {
a = append(a, i.values[ii])
continue
}
b = append(b, i.values[ii])
}
i.currentIdx = len(i.values)
return a, b
}
// Peak returns the next value in the collection without consuming the iterator
func (i *Iter) Peak() collections.Data {
if i.currentIdx < len(i.values) {
return i.values[i.currentIdx]
}
return nil
}
// Position returns the index value of the first matching element as defined the function
func (i *Iter) Position(f func(d collections.Data) bool) int {
for pos, data := range i.values {
if f(data) {
return pos
}
}
return -1
}
// Product iterates over the entire iterator, multiplying all the elements
func (i *Iter) Product() collections.Data {
if len(i.values) == 0 {
return collections.IntValue(0)
}
var result collections.Data
if op, ok := i.values[0].(collections.OperableData); ok {
result = op.(collections.Data)
}
if result == nil {
return collections.IntValue(0)
}
for ii := 1; ii < len(i.values); ii++ {
if op, ok := i.values[ii].(collections.OperableData); ok {
result = result.(collections.OperableData).Mul(op).(collections.Data)
}
}
return result
}
// Reduce reduces all values based on the fold function that are Operable and returns a single Data value
func (i *Iter) Reduce(f func(a, b collections.Data) collections.Data) collections.Data {
if len(i.values) == 0 {
return nil
}
result := i.values[0].(collections.Data)
for ii := 0; ii < len(i.values); ii++ {
result = f(result, i.values[ii])
}
return result
}
// Take creates an iterator that yields the first n elements, or fewer if the underlying iterator ends sooner.
func (i *Iter) Take(n int) {
i.values = i.values[:n]
}
// String to satisfy the stringer interface
func (i *Iter) String() string {
return fmt.Sprintf("%+v", i.values)
}
// Sum sums the elements of an iterator.
func (i *Iter) Sum() collections.Data {
if len(i.values) == 0 {
return collections.IntValue(0)
}
var result collections.Data
if op, ok := i.values[0].(collections.OperableData); ok {
result = op.(collections.Data)
}
if result == nil {
return collections.IntValue(0)
}
for ii := 1; ii < len(i.values); ii++ {
if op, ok := i.values[ii].(collections.OperableData); ok {
result = result.(collections.OperableData).Add(op).(collections.Data)
}
}
return result
}
// Zip combines the value for from Iter as the key in Element and other's Data as the value
// the Zip is only as long as the min length of collections and returns a []collection.Element
func (i *Iter) Zip(other []collections.Data) []collections.Element {
elements := make([]collections.Element, 0, len(i.values))
for i, value := range i.values {
if i > len(other) {
break
}
elements = append(elements, collections.Element{
Key: value,
Value: other[i],
})
}
return elements
}
// ZipIntoMap combines the value for from Iter as the key in Element and other's Data as the value
// the Zip is only as long as the min length of collections and returns an implementation of IterTraitMap
func (i *Iter) ZipIntoMap(other []collections.Data) IterTraitMap {
m := collections.NewGenericMap()
for i, value := range i.values {
if i > len(other) {
break
}
m.Set(value, other[i])
}
return NewMapIterFromMap(m)
}
func abs(n int64) int64 {
y := n >> 63
return (n ^ y) - y
}
func reverse(s []collections.Data) []collections.Data {
a := make([]collections.Data, len(s))
copy(a, s)
for i := len(a)/2 - 1; i >= 0; i-- {
opp := len(a) - 1 - i
a[i], a[opp] = a[opp], a[i]
}
return a
} | slice_iter.go | 0.775095 | 0.511961 | slice_iter.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked20 struct {
*BulkOperationPacked
}
func newBulkOperationPacked20() BulkOperation {
return &BulkOperationPacked20{newBulkOperationPacked(20)}
}
func (op *BulkOperationPacked20) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
block0 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 44)); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 24) & 1048575); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 4) & 1048575); valuesOffset++
block1 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block0 & 15) << 16) | (int64(uint64(block1) >> 48))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1) >> 28) & 1048575); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1) >> 8) & 1048575); valuesOffset++
block2 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block1 & 255) << 12) | (int64(uint64(block2) >> 52))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 32) & 1048575); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 12) & 1048575); valuesOffset++
block3 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block2 & 4095) << 8) | (int64(uint64(block3) >> 56))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3) >> 36) & 1048575); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3) >> 16) & 1048575); valuesOffset++
block4 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block3 & 65535) << 4) | (int64(uint64(block4) >> 60))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4) >> 40) & 1048575); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4) >> 20) & 1048575); valuesOffset++
values[valuesOffset] = int32(block4 & 1048575); valuesOffset++
}
}
func (op *BulkOperationPacked20) decodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0) << 12) | (int64(byte1) << 4) | int64(uint8(byte2) >> 4))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte2 & 15) << 16) | (int64(byte3) << 8) | int64(byte4))
valuesOffset++
}
}
func (op *BulkOperationPacked20) decodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
block0 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 44); valuesOffset++
values[valuesOffset] = int64(uint64(block0) >> 24) & 1048575; valuesOffset++
values[valuesOffset] = int64(uint64(block0) >> 4) & 1048575; valuesOffset++
block1 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block0 & 15) << 16) | (int64(uint64(block1) >> 48)); valuesOffset++
values[valuesOffset] = int64(uint64(block1) >> 28) & 1048575; valuesOffset++
values[valuesOffset] = int64(uint64(block1) >> 8) & 1048575; valuesOffset++
block2 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block1 & 255) << 12) | (int64(uint64(block2) >> 52)); valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 32) & 1048575; valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 12) & 1048575; valuesOffset++
block3 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block2 & 4095) << 8) | (int64(uint64(block3) >> 56)); valuesOffset++
values[valuesOffset] = int64(uint64(block3) >> 36) & 1048575; valuesOffset++
values[valuesOffset] = int64(uint64(block3) >> 16) & 1048575; valuesOffset++
block4 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block3 & 65535) << 4) | (int64(uint64(block4) >> 60)); valuesOffset++
values[valuesOffset] = int64(uint64(block4) >> 40) & 1048575; valuesOffset++
values[valuesOffset] = int64(uint64(block4) >> 20) & 1048575; valuesOffset++
values[valuesOffset] = block4 & 1048575; valuesOffset++
}
}
func (op *BulkOperationPacked20) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0) << 12) | (int64(byte1) << 4) | int64(uint8(byte2) >> 4))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte2 & 15) << 16) | (int64(byte3) << 8) | int64(byte4))
valuesOffset++
}
} | vendor/github.com/balzaczyy/golucene/core/util/packed/bulkOperation20.go | 0.563858 | 0.721154 | bulkOperation20.go | starcoder |
package DG1D
import (
"math"
"github.com/notargets/gocfd/utils"
)
func (el Elements1D) SlopeLimitN(U utils.Matrix, M float64) (ULim utils.Matrix) {
var (
Uh = el.Vinv.Mul(U)
eps0 = 1.0e-8
nr, _ = U.Dims()
)
Uh.SetRange(1, -1, 0, -1, 0)
Uh = el.V.Mul(Uh)
vk := Uh.Row(0)
// End values of each element
ue1 := U.Row(0)
ue2 := U.Row(nr - 1)
// Cell averages
vkm1 := vk.Subset(0, 0).Concat(vk.Subset(0, -2))
vkp1 := vk.Subset(1, -1).Concat(vk.Subset(-1, -1))
// Apply reconstruction to find elements in need of limiting
vm1 := vk.Copy().Subtract(vkm1)
vp1 := vkp1.Copy().Subtract(vk)
var ve1, ve2 utils.Vector
if M == 0 {
ve1 = vk.Copy().Subtract(Minmod(vk.Copy().Subtract(ue1), vm1, vp1))
ve2 = vk.Copy().Add(Minmod(ue2.Copy().Subtract(vk), vm1, vp1))
} else {
h := el.X.Row(0).AtVec(1) - el.X.Row(0).AtVec(0)
ve1 = vk.Copy().Subtract(MinmodB(M, h, vk.Copy().Subtract(ue1), vm1, vp1))
ve2 = vk.Copy().Add(MinmodB(M, h, ue2.Copy().Subtract(vk), vm1, vp1))
}
ids := ve1.Subtract(ue1).FindOr(utils.Greater, eps0, true, ve2.Subtract(ue2))
ULim = U.Copy()
if ids.Len() != 0 {
//fmt.Printf("ids = %v\n", ids.ToIndex())
idsI := ids.ToIndex()
// We need to limit the elements in the index
// Create a piecewise linear solution for limiting the elements in the index
uhl := el.Vinv.Mul(U.SliceCols(idsI))
uhl.SetRange(2, -1, 0, -1, 0) // Set all polynomial coefficients higher than linear to 0
ul := el.V.Mul(uhl)
// Apply slope limiter to specified elements
ULim.AssignColumns(idsI, el.SlopeLimitLin(ul, el.X.SliceCols(idsI), vkm1.SubsetIndex(idsI), vk.SubsetIndex(idsI), vkp1.SubsetIndex(idsI)))
}
return
}
func (el Elements1D) SlopeLimitLin(ul, xl utils.Matrix, vm1, v0, vp1 utils.Vector) (ULim utils.Matrix) {
var (
Np = el.Np
ones = utils.NewVectorConstant(Np, 1)
h = xl.Row(Np - 1).Subtract(xl.Row(0))
x0 = ones.Outer(xl.Row(0).Add(h.Copy().Scale(0.5)))
hNScaled = ones.Outer(h).POW(-1).Scale(2)
ux = hNScaled.ElMul(el.Dr.Copy().Mul(ul))
)
ULim = ones.Outer(v0).Add(xl.Subtract(x0).ElMul(ones.Outer(Minmod(ux.Row(0), vp1.Subtract(v0).ElDiv(h), v0.Subtract(vm1).ElDiv(h)))))
return
}
func Minmod(vecs ...utils.Vector) (R utils.Vector) {
/*
Computes minmod across a group of vectors
Input: Ainv, B, C, length N
For each element in Ainv, B, C, compose a vector like {a1, b1, c1} and set r1 = minmod(a1,b1,c1)
Output: R, length N
*/
var (
W = len(vecs)
dataV = make([]float64, W)
N = vecs[0].Len()
dataR = make([]float64, N)
)
for i := 0; i < N; i++ {
for j := 0; j < W; j++ {
dataV[j] = vecs[j].AtVec(i)
}
dataR[i] = minmod(dataV)
}
R = utils.NewVector(N, dataR)
return
}
func minmod(a []float64) (r float64) {
var (
rMin = math.Abs(a[0])
)
var signSum int
for _, val := range a {
if math.Signbit(val) {
signSum -= 1
} else {
signSum += 1
}
rMin = math.Min(rMin, math.Abs(val))
}
ss := int(math.Abs(float64(signSum)))
sign := signSum / ss
switch ss {
case len(a):
return float64(sign) * rMin
default:
return 0
}
}
func MinmodB(M, h float64, vecs ...utils.Vector) (R utils.Vector) {
/*
Computes minmodB across a group of vectors
Input: Ainv, B, C, length N
For each element in Ainv, B, C, compose a vector like {a1, b1, c1} and set r1 = minmod(a1,b1,c1)
Output: R, length N
*/
var (
W = len(vecs)
dataV = make([]float64, W)
N = vecs[0].Len()
dataR = make([]float64, N)
)
for i := 0; i < N; i++ {
dataR[i] = vecs[0].RawVector().Data[i]
}
// Check for values higher than our limit in the first vector
factor := M * utils.POW(h, 2)
idsV := vecs[0].Find(utils.Greater, factor, true)
if idsV.Len() != 0 {
ids := idsV.ToIndex()
for _, i := range ids {
for j := 0; j < W; j++ {
dataV[j] = vecs[j].AtVec(i)
}
dataR[i] = minmod(dataV)
}
}
R = utils.NewVector(N, dataR)
return
} | DG1D/operators.go | 0.517815 | 0.524638 | operators.go | starcoder |
package vector
import (
"bytes"
"math"
"strconv"
)
const (
zero = 1.0e-7 // zero tolerance
)
type Vector interface {
String() string
Eq(other Vector) bool
Add(other Vector) Vector
Sub(other Vector) Vector
Scale(factor float64)
DotProd(other Vector) float64
Angle(other Vector) float64
Mag() float64
Unit() Vector
}
// the Vector type is a slice of float64
type SimpleVector []float64
func New(elems ...float64) SimpleVector {
return SimpleVector(elems)
}
func (v SimpleVector) assertLenMatch(other Vector) {
if len(v) != len(other.(SimpleVector)) {
panic("Vector length mismatch")
}
}
// String returns a string representation of the Vector
func (v SimpleVector) String() string {
buff := bytes.NewBufferString("[")
for i, val := range v {
buff.WriteString(strconv.FormatFloat(val, 'g', -1, 64))
if i < len(v)-1 {
buff.WriteRune(',')
}
}
buff.WriteRune(']')
return buff.String()
}
// Eq compares vector magnitude and directions
func (v SimpleVector) Eq(other Vector) bool {
ang := v.Angle(other)
if math.IsNaN(ang) {
return v.Mag() == other.Mag()
}
return v.Mag() == other.Mag() && ang <= zero
}
// Eq compares each vector components for equality
func (v SimpleVector) Eq2(other Vector) bool {
v.assertLenMatch(other)
otherVec := other.(SimpleVector)
for i, val := range v {
if val != otherVec[i] {
return false
}
}
return true
}
// Test for the zero vector
func (v SimpleVector) IsZero() bool {
return v.Mag() <= zero
}
// Add returns the sum of two vectors
func (v SimpleVector) Add(other Vector) Vector {
v.assertLenMatch(other)
otherVec := other.(SimpleVector)
result := make([]float64, len(v))
for i, val := range v {
result[i] = val + otherVec[i]
}
return SimpleVector(result)
}
// Sub returns the subtraction of a vector from another
func (v SimpleVector) Sub(other Vector) Vector {
v.assertLenMatch(other)
otherVec := other.(SimpleVector)
result := make([]float64, len(v))
for i, val := range v {
result[i] = val - otherVec[i]
}
return SimpleVector(result)
}
// Scale scales the vector
func (v SimpleVector) Scale(scale float64) {
for i := range v {
v[i] = v[i] * scale
}
}
// Mag computes the magnitude of the vector
func (v SimpleVector) Mag() (result float64) {
for _, v := range v {
result += (v * v)
}
return math.Sqrt(result)
}
// Unit returns the normalization of the vector
func (v SimpleVector) Unit() Vector {
var result SimpleVector = make([]float64, len(v))
copy(result, v)
mag := result.Mag()
result.Scale(1 / mag)
return result
}
// DotProd calculates the dot product using sum of prudcts
func (v SimpleVector) DotProd(other Vector) (result float64) {
v.assertLenMatch(other)
otherVec := other.(SimpleVector)
for i, val := range v {
result += val * otherVec[i]
}
return
}
// Angle calculates the angle between two vectors (Rad)
func (v SimpleVector) Angle(other Vector) float64 {
return math.Acos(v.DotProd(other) / (v.Mag() * other.Mag()))
}
func (v SimpleVector) Proj(base Vector) Vector {
baseUnit := base.Unit()
baseUnit.Scale(v.DotProd(baseUnit))
return baseUnit
} | ch12/vector/vec.go | 0.896388 | 0.685989 | vec.go | starcoder |
package common
import (
"math"
"math/rand"
)
// Distribution provides an interface to model a statistical distribution.
type Distribution interface {
Advance()
Get() float64 // should be idempotent
}
// NormalDistribution models a normal distribution (stateless).
type NormalDistribution struct {
Mean float64
StdDev float64
value float64
}
// ND creates a new normal distribution with the given mean/stddev
func ND(mean, stddev float64) *NormalDistribution {
return &NormalDistribution{
Mean: mean,
StdDev: stddev,
}
}
// Advance advances this distribution. Since the distribution is
// stateless, this just overwrites the internal cache value.
func (d *NormalDistribution) Advance() {
d.value = rand.NormFloat64()*d.StdDev + d.Mean
}
// Get returns the last computed value for this distribution.
func (d *NormalDistribution) Get() float64 {
return d.value
}
// UniformDistribution models a uniform distribution (stateless).
type UniformDistribution struct {
Low float64
High float64
value float64
}
// UD creates a new uniform distribution with the given range
func UD(low, high float64) *UniformDistribution {
return &UniformDistribution{
Low: low,
High: high,
}
}
// Advance advances this distribution. Since the distribution is
// stateless, this just overwrites the internal cache value.
func (d *UniformDistribution) Advance() {
x := rand.Float64() // uniform
x *= d.High - d.Low
x += d.Low
d.value = x
}
// Get returns the last computed value for this distribution.
func (d *UniformDistribution) Get() float64 {
return d.value
}
// RandomWalkDistribution is a stateful random walk. Initialize it with an
// underlying distribution, which is used to compute the new step value.
type RandomWalkDistribution struct {
Step Distribution
State float64 // optional
}
// WD creates a new RandomWalkDistribution based on a given distribution and starting state
func WD(step Distribution, state float64) *RandomWalkDistribution {
return &RandomWalkDistribution{
Step: step,
State: state,
}
}
// Advance computes the next value of this distribution and stores it.
func (d *RandomWalkDistribution) Advance() {
d.Step.Advance()
d.State += d.Step.Get()
}
// Get returns the last computed value for this distribution.
func (d *RandomWalkDistribution) Get() float64 {
return d.State
}
// ClampedRandomWalkDistribution is a stateful random walk, with minimum and
// maximum bounds. Initialize it with a Min, Max, and an underlying
// distribution, which is used to compute the new step value.
type ClampedRandomWalkDistribution struct {
Step Distribution
Min float64
Max float64
State float64 // optional
}
// CWD returns a new ClampedRandomWalkDistribution based on a given distribution and optional starting state
func CWD(step Distribution, min, max, state float64) *ClampedRandomWalkDistribution {
return &ClampedRandomWalkDistribution{
Step: step,
Min: min,
Max: max,
State: state,
}
}
// Advance computes the next value of this distribution and stores it.
func (d *ClampedRandomWalkDistribution) Advance() {
d.Step.Advance()
d.State += d.Step.Get()
if d.State > d.Max {
d.State = d.Max
}
if d.State < d.Min {
d.State = d.Min
}
}
// Get returns the last computed value for this distribution.
func (d *ClampedRandomWalkDistribution) Get() float64 {
return d.State
}
// MonotonicRandomWalkDistribution is a stateful random walk that only
// increases. Initialize it with a Start and an underlying distribution,
// which is used to compute the new step value. The sign of any value of the
// u.d. is always made positive.
type MonotonicRandomWalkDistribution struct {
Step Distribution
State float64
}
// Advance computes the next value of this distribution and stores it.
func (d *MonotonicRandomWalkDistribution) Advance() {
d.Step.Advance()
d.State += math.Abs(d.Step.Get())
}
// Get returns the last computed value for this distribution.
func (d *MonotonicRandomWalkDistribution) Get() float64 {
return d.State
}
// MWD creates a new MonotonicRandomWalkDistribution with a given distribution and initial state
func MWD(step Distribution, state float64) *MonotonicRandomWalkDistribution {
return &MonotonicRandomWalkDistribution{
Step: step,
State: state,
}
}
// ConstantDistribution is a stateful distribution that always returns the same value
type ConstantDistribution struct {
State float64
}
// Advance does nothing in a constant distribution
func (d *ConstantDistribution) Advance() {
}
// Get returns the last computed value for this distribution.
func (d *ConstantDistribution) Get() float64 {
return d.State
}
// FloatPrecision is a distribution wrapper which specifies the float value precision of the underlying distribution.
type FloatPrecision struct {
step Distribution
precision float64
}
// Advance calls the underlying distribution Advance method.
func (f *FloatPrecision) Advance() {
f.step.Advance()
}
// Get returns the value from the underlying distribution with adjusted float value precision.
func (f *FloatPrecision) Get() float64 {
return float64(int(f.step.Get()*f.precision)) / f.precision
}
// FP creates a new FloatPrecision distribution wrapper with a given distribution and precision value.
// Precision value is clamped to [0,5] to avoid floating point calculation errors.
func FP(step Distribution, precision int) *FloatPrecision {
// Clamping the precision value to spec.
if precision < 0 {
precision = 0
} else if precision > 5 {
precision = 5
}
return &FloatPrecision{
step: step,
precision: math.Pow(10, float64(precision)),
}
}
// LazyDistribution is a distribution that can change it's value
// only if a "motivation" distribution provides a value above a specified threshold.
// Otherwise it remains the same.
type LazyDistribution struct {
motive Distribution
step Distribution
threshold float64
}
// LD returns a new LazyDistribution that returns a new value from "dist", if the "motavation" distribution,
// fires above the threshold.
func LD(motive, dist Distribution, threshold float64) *LazyDistribution {
return &LazyDistribution{
step: dist,
motive: motive,
threshold: threshold,
}
}
// Advance computes the next value of this distribution.
func (d *LazyDistribution) Advance() {
d.motive.Advance()
if d.motive.Get() < d.threshold {
return
}
d.step.Advance()
}
// Get returns the last computed value for this distribution.
func (d *LazyDistribution) Get() float64 {
return d.step.Get()
} | pkg/data/usecases/common/distribution.go | 0.914654 | 0.667723 | distribution.go | starcoder |
package hashdict
import (
"fmt"
"github.com/peterzeller/go-fun/dict"
"github.com/peterzeller/go-fun/equality"
"github.com/peterzeller/go-fun/hash"
"github.com/peterzeller/go-fun/iterable"
"github.com/peterzeller/go-fun/zero"
)
// adapted from https://github.com/andrewoma/dexx/blob/master/collection/src/main/java/com/github/andrewoma/dexx/collection/internal/hashmap/CompactHashMap.java
type Dict[K, V any] struct {
root node[K, V]
keyEq hash.EqHash[K]
}
func New[K, V any](eq hash.EqHash[K], entries ...dict.Entry[K, V]) Dict[K, V] {
var root node[K, V] = empty[K, V]{}
for _, e := range entries {
root = root.updated0(e.Key, eq.Hash(e.Key), 0, e.Value, eq)
}
return Dict[K, V]{root, eq}
}
func FromMap[K comparable, V any](eq hash.EqHash[K], m map[K]V) Dict[K, V] {
var root node[K, V] = empty[K, V]{}
for k, v := range m {
root = root.updated0(k, eq.Hash(k), 0, v, eq)
}
return Dict[K, V]{root, eq}
}
func (s Dict[K, V]) KeyEq() hash.EqHash[K] {
return s.keyEq
}
func (d Dict[K, V]) Get(key K) (V, bool) {
return d.root.get0(key, d.keyEq.Hash(key), 0, d.keyEq)
}
func (d Dict[K, V]) GetOrZero(key K) V {
if r, ok := d.Get(key); ok {
return r
}
return zero.Value[V]()
}
func (d Dict[K, V]) GetOr(key K, defaultValue V) V {
if r, ok := d.Get(key); ok {
return r
}
return defaultValue
}
func (d Dict[K, V]) ContainsKey(key K) bool {
_, ok := d.Get(key)
return ok
}
func (d Dict[K, V]) Set(key K, value V) Dict[K, V] {
newRoot := d.root.updated0(key, d.keyEq.Hash(key), 0, value, d.keyEq)
return Dict[K, V]{newRoot, d.keyEq}
}
func (d Dict[K, V]) Remove(key K) Dict[K, V] {
newRoot, changed := d.root.removed0(key, d.keyEq.Hash(key), 0, d.keyEq)
if !changed {
return d
}
return Dict[K, V]{newRoot, d.keyEq}
}
// Iterator for the dictionary
func (d Dict[K, V]) Iterator() iterable.Iterator[dict.Entry[K, V]] {
return d.root.iterator()
}
// Keys in the dictionary.
func (d Dict[K, V]) Keys() iterable.Iterable[K] {
return iterable.Map[dict.Entry[K, V], K](d, func(e dict.Entry[K, V]) K { return e.Key })
}
// Values in the dictionary
func (d Dict[K, V]) Values() iterable.Iterable[V] {
return iterable.Map[dict.Entry[K, V], V](d, func(e dict.Entry[K, V]) V { return e.Value })
}
// Number of entries in the dictionary
func (d Dict[K, V]) Size() int {
return d.root.size()
}
func (d Dict[K, V]) String() string {
return iterable.String[dict.Entry[K, V]](d)
}
type MergeOpts[K, A, B, C any] struct {
Left func(K, A) (C, bool)
Right func(K, B) (C, bool)
Both func(K, A, B) (C, bool)
}
func (o MergeOpts[K, A, B, C]) intern(eq hash.EqHash[K]) mergeOpts[K, A, B, C] {
return mergeOpts[K, A, B, C]{
eq: eq,
mergeFun: o.Both,
mergeFun2: func(k K, b B, a A) (C, bool) {
return o.Both(k, a, b)
},
transformA: o.Left,
transformB: o.Right,
}
}
func Merge[K, A, B, C any](left Dict[K, A], right Dict[K, B], opts MergeOpts[K, A, B, C]) Dict[K, C] {
newRoot := merge(left.root, right.root, 0, opts.intern(left.keyEq))
return Dict[K, C]{
root: newRoot,
keyEq: left.keyEq,
}
}
func MergeIterable[K, A, B, C any](left Dict[K, A], right iterable.Iterable[dict.Entry[K, B]], opts MergeOpts[K, A, B, C]) Dict[K, C] {
switch rightD := right.(type) {
case Dict[K, B]:
// special merge with other hash dictionaries using the same key:
// we assume here that the same equality and hash code are used
return Merge(left, rightD, opts)
}
res := New[K, C](left.keyEq)
keys := New[K, struct{}](left.keyEq)
// handle entries in right
for it := right.Iterator(); ; {
e, ok := it.Next()
if !ok {
break
}
dv, ok := left.Get(e.Key)
var newV C
keep := false
if ok {
newV, keep = opts.Both(e.Key, dv, e.Value)
} else if opts.Right != nil {
newV, keep = opts.Right(e.Key, e.Value)
}
if keep {
res = res.Set(e.Key, newV)
}
if opts.Left != nil {
keys = keys.Set(e.Key, struct{}{})
}
}
if opts.Left != nil {
// add keys that appear in left but not in right
for it := iterable.Start[dict.Entry[K, A]](left); it.HasNext(); it.Next() {
e := it.Current()
if !keys.ContainsKey(e.Key) {
newV, keep := opts.Left(e.Key, e.Value)
if keep {
res = res.Set(e.Key, newV)
}
}
}
}
return res
}
// MergeAll merges the given collection of entries into this dictionary.
func (d Dict[K, V]) MergeAll(other iterable.Iterable[dict.Entry[K, V]], opts MergeOpts[K, V, V, V]) Dict[K, V] {
return MergeIterable(d, other, opts)
}
// Merge the given values into the dictionary.
// If an entry appears on both sides, the merge function is called to determine the new value
func (d Dict[K, V]) Merge(other iterable.Iterable[dict.Entry[K, V]], mergeFun func(K, V, V) V) Dict[K, V] {
return MergeIterable(d, other, MergeOpts[K, V, V, V]{
Left: func(k K, a V) (V, bool) { return a, true },
Right: func(k K, b V) (V, bool) { return b, true },
Both: func(k K, a V, b V) (V, bool) { return mergeFun(k, a, b), true },
})
}
// Merge the given values into the dictionary.
// If an entry appears on both sides, the value from the left side is used.
func (d Dict[K, V]) MergeLeft(other iterable.Iterable[dict.Entry[K, V]]) Dict[K, V] {
return d.Merge(other, func(k K, v1, v2 V) V {
return v1
})
}
// Merge the given values into the dictionary.
// If an entry appears on both sides, the value from the left side is used.
func (d Dict[K, V]) MergeRight(other iterable.Iterable[dict.Entry[K, V]]) Dict[K, V] {
return d.Merge(other, func(k K, v1, v2 V) V {
return v2
})
}
func (d Dict[K, V]) checkInvariant() error {
if d.root == nil {
return fmt.Errorf("root is nil")
}
if d.keyEq == nil {
return fmt.Errorf("keyEq is nil")
}
return d.root.checkInvariant(0, 0, d.keyEq)
}
func FilterMap[K, A, B any](d Dict[K, A], f func(K, A) (B, bool)) Dict[K, B] {
return Dict[K, B]{
keyEq: d.keyEq,
root: filterMap(d.root, 0, d.keyEq, f),
}
}
func (d Dict[K, V]) FilterMap(f func(K, V) (V, bool)) Dict[K, V] {
return FilterMap(d, f)
}
func Map[K, A, B any](d Dict[K, A], f func(K, A) B) Dict[K, B] {
return Dict[K, B]{
keyEq: d.keyEq,
root: filterMap(d.root, 0, d.keyEq, func(key K, value A) (B, bool) {
return f(key, value), true
}),
}
}
func (d Dict[K, V]) Map(f func(K, V) V) Dict[K, V] {
return Map(d, f)
}
func (d Dict[K, V]) Filter(cond func(K, V) bool) Dict[K, V] {
return Dict[K, V]{
keyEq: d.keyEq,
root: filterMap(d.root, 0, d.keyEq, func(key K, value V) (V, bool) {
return value, cond(key, value)
}),
}
}
var notEqual = fmt.Errorf("not equal")
func (d Dict[K, V]) Equal(other Dict[K, V], eq equality.Equality[V]) (res bool) {
if d.Size() != other.Size() {
return false
}
// we can use iterators, since the iteration order of a trie is deterministic
// there is some optimization potential with a recursive equal function that uses reference equality of subtrees
it1 := d.Iterator()
it2 := other.Iterator()
for {
e1, ok1 := it1.Next()
e2, ok2 := it2.Next()
// since sizes are equal, ok1 == ok2
if !ok1 && !ok2 {
return true
}
if !d.keyEq.Equal(e1.Key, e2.Key) || !eq.Equal(e1.Value, e2.Value) {
return false
}
}
} | dict/hashdict/dict.go | 0.778818 | 0.408336 | dict.go | starcoder |
package selector
import (
"bytes"
"path/filepath"
"regexp"
"github.com/CanalTP/mq/stomp/selector/parse"
)
// state represents the state of an execution. It's not part of the
// statement so that multiple executions of the same statement
// can execute in parallel.
type state struct {
node parse.Node
vars Row
}
// at marks the state to be on node n, for error reporting.
func (s *state) at(node parse.Node) {
s.node = node
}
// Walk functions step through the major pieces of the template structure,
// generating output as they go.
func (s *state) walk(node parse.BoolExpr) bool {
s.at(node)
switch node := node.(type) {
case *parse.ComparisonExpr:
return s.eval(node)
case *parse.AndExpr:
return s.walk(node.Left) && s.walk(node.Right)
case *parse.OrExpr:
return s.walk(node.Left) || s.walk(node.Right)
case *parse.NotExpr:
return !s.walk(node.Expr)
case *parse.ParenBoolExpr:
return s.walk(node.Expr)
default:
panic("invalid node type")
}
}
func (s *state) eval(node *parse.ComparisonExpr) bool {
switch node.Operator {
case parse.OperatorEq:
return s.evalEq(node)
case parse.OperatorGt:
return s.evalGt(node)
case parse.OperatorGte:
return s.evalGte(node)
case parse.OperatorLt:
return s.evalLt(node)
case parse.OperatorLte:
return s.evalLte(node)
case parse.OperatorNeq:
return !s.evalEq(node)
case parse.OperatorGlob:
return s.evalGlob(node)
case parse.OperatorNotGlob:
return !s.evalGlob(node)
case parse.OperatorRe:
return s.evalRegexp(node)
case parse.OperatorNotRe:
return !s.evalRegexp(node)
case parse.OperatorIn:
return s.evalIn(node)
case parse.OperatorNotIn:
return !s.evalIn(node)
default:
panic("inalid operator type")
}
}
func (s *state) evalEq(node *parse.ComparisonExpr) bool {
return bytes.Equal(
s.toValue(node.Left),
s.toValue(node.Right),
)
}
func (s *state) evalGt(node *parse.ComparisonExpr) bool {
return bytes.Compare(
s.toValue(node.Left),
s.toValue(node.Right),
) == 1
}
func (s *state) evalGte(node *parse.ComparisonExpr) bool {
return bytes.Compare(
s.toValue(node.Left),
s.toValue(node.Right),
) >= 0
}
func (s *state) evalLt(node *parse.ComparisonExpr) bool {
return bytes.Compare(
s.toValue(node.Left),
s.toValue(node.Right),
) == -1
}
func (s *state) evalLte(node *parse.ComparisonExpr) bool {
return bytes.Compare(
s.toValue(node.Left),
s.toValue(node.Right),
) <= 0
}
func (s *state) evalGlob(node *parse.ComparisonExpr) bool {
match, _ := filepath.Match(
string(s.toValue(node.Right)),
string(s.toValue(node.Left)),
)
return match
}
func (s *state) evalRegexp(node *parse.ComparisonExpr) bool {
match, _ := regexp.Match(
string(s.toValue(node.Right)),
s.toValue(node.Left),
)
return match
}
func (s *state) evalIn(node *parse.ComparisonExpr) bool {
left := s.toValue(node.Left)
right, ok := node.Right.(*parse.ArrayLit)
if !ok {
panic("expected array literal")
}
for _, expr := range right.Values {
if bytes.Equal(left, s.toValue(expr)) {
return true
}
}
return false
}
func (s *state) toValue(expr parse.ValExpr) []byte {
switch node := expr.(type) {
case *parse.Field:
return s.vars.Field(node.Name)
case *parse.BasicLit:
return node.Value
default:
panic("invalid expression type")
}
}
// errRecover is the handler that turns panics into returns.
func errRecover(err *error) {
if e := recover(); e != nil {
*err = e.(error)
}
} | stomp/selector/eval.go | 0.666388 | 0.544075 | eval.go | starcoder |
package bitset
import (
"strconv"
"strings"
)
// A set256 represents a set of integers in the range [0, 256).
// It does so more efficiently than a Dense set of capacity 256.
// For efficiency, the methods of set256 perform no bounds checking on their
// arguments.
type set256 struct {
sets [4]Set64
}
func (s *set256) copy() subber {
c := *s
return &c
}
func (s *set256) add(n uint8) {
s.sets[n/64].Add(n % 64)
}
func (s *set256) add64(e uint64) { s.add(uint8(e)) }
func (s *set256) remove(n uint8) {
s.sets[n/64].Remove(n % 64)
}
func (s *set256) remove64(e uint64) bool {
s.remove(uint8(e))
return s.empty()
}
func (s *set256) contains(n uint8) bool {
return s.sets[n/64].Contains(n % 64)
}
func (s *set256) contains64(e uint64) bool { return s.contains(uint8(e)) }
func (s *set256) empty() bool {
return s.sets[0].Empty() && s.sets[1].Empty() && s.sets[2].Empty() && s.sets[3].Empty()
}
func (s *set256) len() int {
return s.sets[0].Len() + s.sets[1].Len() + s.sets[2].Len() + s.sets[3].Len()
}
func (s1 *set256) equal(b subber) bool {
s2 := b.(*set256)
return s1.sets[0] == s2.sets[0] &&
s1.sets[1] == s2.sets[1] &&
s1.sets[2] == s2.sets[2] &&
s1.sets[3] == s2.sets[3]
}
// position returns the 0-based position of n in the set. If
// the set is {3, 8, 15}, then the position of 8 is 1.
// If n is not in the set, returns 0, false.
// If not a member, return where it would go.
// The second return value reports whether n is a member of b.
func (b *set256) position(n uint8) (int, bool) {
var pos int
i := n / 64
switch i {
case 1:
pos = b.sets[0].Len()
case 2:
pos = b.sets[0].Len() + b.sets[1].Len()
case 3:
pos = b.sets[0].Len() + b.sets[1].Len() + b.sets[2].Len()
}
p, ok := b.sets[i].position(n % 64)
return pos + p, ok
}
func (s1 *set256) addIn(sub subber) {
s2 := sub.(*set256)
s1.sets[0].AddIn(s2.sets[0])
s1.sets[1].AddIn(s2.sets[1])
s1.sets[2].AddIn(s2.sets[2])
s1.sets[3].AddIn(s2.sets[3])
}
func (s1 *set256) removeIn(sub subber) (empty bool) {
s2 := sub.(*set256)
s1.sets[0].RemoveIn(s2.sets[0])
s1.sets[1].RemoveIn(s2.sets[1])
s1.sets[2].RemoveIn(s2.sets[2])
s1.sets[3].RemoveIn(s2.sets[3])
return s1.empty()
}
func (s1 *set256) removeNotIn(sub subber) (empty bool) {
s2 := sub.(*set256)
s1.sets[0].RemoveNotIn(s2.sets[0])
s1.sets[1].RemoveNotIn(s2.sets[1])
s1.sets[2].RemoveNotIn(s2.sets[2])
s1.sets[3].RemoveNotIn(s2.sets[3])
return s1.empty()
}
func (s *set256) elements(f func([]uint64) bool, offset uint64) bool {
var buf [64]uint64
for i, ss := range s.sets {
n := ss.populate64(&buf)
offset2 := offset + uint64(64*i)
for j := range buf[:n] {
buf[j] += offset2
}
if !f(buf[:n]) {
return false
}
}
return true
}
func (s set256) String() string {
var b strings.Builder
b.WriteByte('{')
first := true
s.elements(func(elts []uint64) bool {
for _, e := range elts {
if !first {
b.WriteString(", ")
}
first = false
b.WriteString(strconv.FormatUint(e, 10))
}
return true
}, 0)
b.WriteByte('}')
return b.String()
}
func (s *set256) memSize() uint64 { return memSize(*s) } | set256.go | 0.742795 | 0.5144 | set256.go | starcoder |
package example
import (
"log"
"time"
)
type SubTest struct {
HTTPAddress string `xconf:"http_address"`
MapNotLeaf map[string]int `xconf:"map_not_leaf,notleaf"`
Map2 map[string]int `xconf:"map2"`
Map3 map[string]int `xconf:"map3"`
Slice2 []int64 `xconf:"slice2"`
}
// Google Public DNS provides two distinct DoH APIs at these endpoints
// Using the GET method can reduce latency, as it is cached more effectively.
// RFC 8484 GET requests must have a ?dns= query parameter with a Base64Url encoded DNS message. The GET method is the only method supported for the JSON API.
//go:generate optiongen --option_with_struct_name=false --new_func=NewFuncNameSpecified --xconf=true --usage_tag_name=usage --new_func_return=interface
func ConfigOptionDeclareWithDefault() interface{} {
return map[string]interface{}{
// test comment 1
// annotation@TestNil(option="WithTTTTTTTT")
"TestNil": nil, // test comment 3
"TestInt": 32, // @MethodComment(这里是函数注释1,"test") @MethodComment(这里是函数注释2)
"TestInt64": int64(32),
"TestSliceInt": []int{1, 2, 3},
"TestSliceInt64": []int64{1, 2, 3},
"TestSliceString": []string{"test1", "test2"},
"TestSliceBool": []bool{false, true},
"TestSliceIntNil": []int(nil),
"TestSliceByte": []byte(nil),
"TestSliceIntEmpty": []int{},
"TestHTTPPort": "",
"TestEmptyMap": map[int]int{},
"TestMapIntInt": map[int]int{1: 1, 2: 2, 3: 3},
"TestMapIntString": map[int]string{1: "test"},
"TestMapStringInt": map[string]int{"test": 1},
"TestMapStringString": map[string]string{"test": "test"},
"TestString": "Meow",
"Food": (*string)(nil),
"Walk": func() {
log.Println("Walking")
},
"TestNilFunc": (func())(nil), // 中文1
// annotation@TestParamterBool(arg=1)
"TestParamterBool": false, // reserved parameter 1
// annotation@TestParamterStr(arg=22)
"TestParamterStr": "", // reserved parameter 2
// annotation@TestProtected(private="true")
"TestProtected": []byte(nil),
// annotation@fOO(inline="true")
"fOO": (*FOO)(nil),
// annotation@Paths(inline="true")
"Paths": Paths(Paths{}),
"SubTest": (*SubTest)(&SubTest{}),
"SpecSub": (*spec)(NewSpec()), // annotation@SpecSub(getter="SpecVisitor")
}
}
type FOO struct {
Usernames_Passwords map[string]string
}
type Paths struct {
Path1 string
Path2 string
}
// HTTP parsing and communication with DNS resolver was successful, and the response body content is a DNS response in either binary or JSON encoding,
// depending on the query endpoint, Accept header and GET parameters.
//go:generate optiongen --option_prefix=WithServer --option_return_previous=false --xconf=true
func specOptionDeclareWithDefault() interface{} {
return map[string]interface{}{
// test comment 5
// test comment 6
// annotation@TestNil1(comment="method commnet", private="true", xconf="test_nil1")
"TestNil1": nil, // test comment 1
"TestBool1": false, // test comment 2
"TestInt1": 32, // @MethodComment(这里是函数注释3) @MethodComment(这里是函数注释4)
"TestNilFunc1": (func())(nil), // 中文2
"TestReserved2_": []byte(nil), // sql.DB对外暴露出了其运行时的状态db.DBStats,sql.DB在关闭,创建,释放连接时候,会维护更新这个状态。
// 我们可以通过prometheus来收集连接池状态,然后在grafana面板上配置指标,使指标可以动态的展示。
"TestReserved2Inner1": 1,
}
}
type Timeouts struct {
ReadTimeout time.Duration `xconf:"read_timeout" default:"5s"`
WriteTimeout time.Duration `xconf:"write_timeout" default:"10s"`
ConnTimeout time.Duration `xconf:"conn_timeout" default:"20s"`
}
//go:generate optiongen --option_with_struct_name=true --usage_tag_name=usage --option_return_previous=false
func ETCDOptionDeclareWithDefault() interface{} {
return map[string]interface{}{
// annotation@Endpoints(comment="etcd地址")
"Endpoints": []string{"10.0.0.1", "10.0.0.2"},
// annotation@TimeoutsPointer(comment="timeout设置")
"TimeoutsPointer": (*Timeouts)(&Timeouts{}),
// annotation@writeTimeout(private="true",arg=1)
"writeTimeout": time.Duration(time.Second),
// annotation@Redis(getter="RedisVisitor")
"Redis": (*Redis)(NewRedis()),
}
}
//go:generate optiongen --option_with_struct_name=false --xconf=true --empty_composite_nil=true --usage_tag_name=usage --xconf=true
func AllConfigOptionDeclareWithDefault() interface{} {
return map[string]interface{}{
"TypeBool": false,
"TypeString": "a",
"TypeDuration": time.Duration(time.Second),
"TypeFloat32": float32(32.32),
"TypeFloat64": float32(64.64),
"TypeInt": 32,
"TypeUint": 32,
"TypeInt8": int8(8),
"TypeUint8": uint8(8),
"TypeInt16": int16(16),
"TypeUint16": uint16(16),
"TypeInt32": int32(32),
"TypeUint32": uint32(32),
"TypeInt64": int64(64),
"TypeUint64": uint64(64),
"TypeSliceInt": []int{1, 2, 3, 4},
"TypeSliceUint": []uint{1, 2, 3, 4},
"TypeSliceInt8": []int8{1, 2, 3, 4},
"TypeSliceUint8": []uint8{1, 2, 3, 4},
"TypeSliceInt16": []int16{1, 2, 3, 4},
"TypeSliceUin16": []uint16{1, 2, 3, 4},
"TypeSliceInt32": []int32{1, 2, 3, 4},
"TypeSliceUint32": []uint32{1, 2, 3, 4},
"TypeSliceInt64": []int64{1, 2, 3, 4},
"TypeSliceUint64": []uint64{1, 2, 3, 4},
"TypeSliceString": []string{"a", "b", "c"},
"TypeSliceFloat32": []float32{1.32, 2.32, 3.32, 4.32},
"TypeSliceFloat64": []float64{1.64, 2.64, 3.64, 4.64},
"TypeSliceDuratuon": []time.Duration([]time.Duration{time.Second, time.Minute, time.Hour}),
// annotation@TypeMapStringIntNotLeaf(xconf="type_map_string_int_not_leaf,notleaf")
"TypeMapStringIntNotLeaf": map[string]int{"a": 1, "b": 2},
"TypeMapStringInt": map[string]int{"a": 1, "b": 2},
"TypeMapIntString": map[int]string{1: "a", 2: "b"},
"TypeMapStringString": map[string]string{"a": "a", "b": "b"},
"TypeMapIntInt": map[int]int{1: 1, 2: 2},
"TypeMapStringDuration": map[string]time.Duration(map[string]time.Duration{"read": time.Second, "write": time.Second * 5}),
// annotation@Redis(getter="RedisVisitor")
"Redis": (*Redis)(NewRedis()),
"ETCD": (*ETCD)(NewETCD(time.Second)),
"TestInterface": (interface{})(nil),
}
}
type WatchError = func(loaderName string, confPath string, watchErr error)
//go:generate optiongen --option_with_struct_name=true --xconf=true --usage_tag_name=usage --xconf=true
func RedisOptionDeclareWithDefault() interface{} {
return map[string]interface{}{
"Endpoints": []string{"192.168.0.1", "192.168.0.2"},
"Cluster": true,
"TimeoutsStruct": (Timeouts)(Timeouts{}),
}
}
var optionUsage = `
func ETCDOptionDeclareWithDefault() interface{} {
return map[string]interface{}{
// annotation@Endpoints(comment="etcd地址")
"Endpoints": []string{"10.0.0.1", "10.0.0.2"},
// annotation@TimeoutsPointer(comment="timeout设置")
"TimeoutsPointer": (*Timeouts)(&Timeouts{}),
// annotation@writeTimeout(private="true",arg=1)
"writeTimeout": time.Duration(time.Second),
// annotation@Redis(getter="RedisVisitor")
"Redis": (*Redis)(NewRedis()),
}
}
`
//go:generate optiongen --option_with_struct_name=true --debug=false --xconf=true --usage_tag_name=usage
func XXXXXXOptionDeclareWithDefault() interface{} {
return map[string]interface{}{
"OptionUsage": string(optionUsage),
"Endpoints": []string{"10.0.0.1", "10.0.0.2"},
"ReadTimeout": time.Duration(time.Second),
"TypeMapIntString": map[int]string{1: "a", 2: "b"},
"TypeSliceInt64": []int64{1, 2, 3, 4},
"TypeBool": false,
"MapRedis": (map[string]*Redis)(map[string]*Redis{"test": NewRedis()}),
// annotation@Redis(getter="RedisVisitor",deprecated="use MapRedis intead")
"Redis": (*Redis)(NewRedis()), // 辅助指定类型为*Redis
"OnWatchError": WatchError(nil), // 辅助指定类型为WatchError
"OnWatchErrorNotNil": func(loaderName string, confPath string, watchErr error) {},
"TypeSliceDuratuon": []time.Duration([]time.Duration{time.Second, time.Minute, time.Hour}), // 辅助指定类型为WatchError
}
} | example/config.go | 0.604632 | 0.463444 | config.go | starcoder |
package main
type recommend struct {
Risk string `json:"risk,omitempty"`
Recommendation string `json:"recommendation,omitempty"`
}
type pluginMetaData struct {
Score float32
Recommend recommend
Tag []string
}
// pluginMap maps cloudsploit plugin meta data.
// key: `{Categor}/{Plugin}`, value: tag
var pluginMap = map[string]pluginMetaData{
categoryCLB + "/clbCDNEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `CLB CDN Enabled
- Ensures that Cloud CDN is enabled on all load balancers
- Cloud CDN increases speed and reliability as well as lowers server costs.
- Enabling CDN on load balancers creates a highly available system and is part of GCP best practices.`,
Recommendation: `Enable Cloud CDN on all load balancers from the network services console.
- https://cloud.google.com/cdn/docs/quickstart`,
},
},
categoryCLB + "/clbHttpsOnly": {
Score: 0.3,
Tag: []string{"hippa", "pci"},
Recommend: recommend{
Risk: `CLB HTTPS Only
- Ensures that HTTP(S) CLBs are configured to only accept connections on HTTPS ports.
- For maximum security, CLBs can be configured to only accept HTTPS connections. Standard HTTP connections will be blocked.
- This should only be done if the client application is configured to query HTTPS directly and not rely on a redirect from HTTP.`,
Recommendation: `Remove non-HTTPS listeners from the load balancer.
- https://cloud.google.com/vpc/docs/vpc`,
},
},
categoryCLB + "/clbNoInstances": {
Score: 0.3,
Tag: []string{"operation"},
Recommend: recommend{
Risk: `CLB No Instances
- Detects CLBs that have no backend instances attached
- GCP does not allow for Load Balancers to be configured without backend instances attached.`,
Recommendation: `This security misconfiguration is covered by GCP. No action is necessary.
- https://cloud.google.com/load-balancing/docs/load-balancing-overview`,
},
},
categoryCLB + "/clbSecurityPolicyEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Security Policy Enabled
- Ensures all backend services have an attached security policy
- Security policies on backend services control the traffic on the load balancer.
- This creates edge security and can deny or allow specified IP addresses.`,
Recommendation: `Ensure all load balancers have an attached Cloud Armor security policy.
- https://cloud.google.com/armor/docs/security-policy-concepts`,
},
},
categoryCompute + "/autoscaleEnabled": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `Autoscale Enabled
- Ensures instance groups have autoscale enabled for high availability
- Enabling autoscale increases efficiency and improves cost management for resources.`,
Recommendation: `Ensure autoscaling is enabled for all instance groups.
- https://cloud.google.com/compute/docs/autoscaler/`,
},
},
categoryCompute + "/connectSerialPortsDisabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Connect Serial Ports Disabled
- Ensures connecting to serial ports is not enabled for VM instances
- The serial console does not allow restricting IP Addresses, which allows any IP address to connect to instance and should therefore be disabled.`,
Recommendation: `Ensure the Enable Connecting to Serial Ports option is disabled for all compute instances.
- https://cloud.google.com/compute/docs/instances/interacting-with-serial-console`,
},
},
categoryCompute + "/csekEncryptionEnabled": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `CSEK Encryption Enabled
- Ensures Customer Supplied Encryption Key Encryption is enabled on disks
- Google encrypts all disks at rest by default.
- By using CSEK only the users with the key can access the disk.
- Anyone else, including Google, cannot access the disk data.`,
Recommendation: `CSEK can only be configured when creating a disk.
- Delete the disk and redeploy with CSEK.
- https://cloud.google.com/compute/docs/disks/customer-supplied-encryption`,
},
},
categoryCompute + "/instanceLeastPrivilege": {
Score: 0.6,
Tag: []string{"pci"},
Recommend: recommend{
Risk: `VM Instances Least Privilege
- Ensures that instances are not configured to use the default service account with full access to all cloud APIs
- To support the principle of least privilege and prevent potential privilege escalation, it is recommended that instances are not assigned to the default service account, Compute Engine default service account with a scope allowing full access to all cloud APIs.`,
Recommendation: `For all instances, if the default service account is used, ensure full access to all cloud APIs is not configured.
- https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances`,
},
},
categoryCompute + "/instanceLevelSSHOnly": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Instance Level SSH Only
- Ensures that instances are not configured to allow project-wide SSH keys
- To support the principle of least privilege and prevent potential privilege escalation it is recommended that instances are not give access to project-wide SSH keys through instance metadata.`,
Recommendation: `Ensure project-wide SSH keys are blocked for all instances.
- https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys`,
},
},
categoryCompute + "/instanceMaxCount": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `VM Max Instances
- Ensures the total number of VM instances does not exceed a set threshold
- The number of running VM instances should be carefully audited, especially in unused regions, to ensure only approved applications are consuming compute resources.
- Many compromised Google accounts see large numbers of VM instances launched.`,
Recommendation: `Ensure that the number of running VM instances matches the expected count.
- If instances are launched above the threshold, investigate to ensure they are legitimate.
- https://cloud.google.com/compute/docs/instances/`,
},
},
categoryCompute + "/instancesMultiAz": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `Instances Multi AZ
- Ensures managed instances are regional for availability purposes.
- Creating instances in a single zone creates a single point of failure for all systems in the VPC.
- All managed instances should be created as Regional to ensure proper failover.`,
Recommendation: `Launch new instances as regional instance groups.
- https://cloud.google.com/vpc/docs/vpc`,
},
},
categoryCompute + "/ipForwardingDisabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `IP Forwarding Disabled
- Ensures that IP forwarding is disabled on all instances
- Disabling IP forwarding ensures that the instance only sends and receives packets with matching destination or source IPs.`,
Recommendation: `IP forwarding settings can only be chosen when creating a new instance.
- Delete the affected instances and redeploy with IP forwarding disabled.
- https://cloud.google.com/vpc/docs/using-routes`,
},
},
categoryCompute + "/osLoginEnabled": {
Score: 0.3,
Tag: []string{"pci"},
Recommend: recommend{
Risk: `OS Login Enabled
- Ensures OS login is enabled for the project
- Enabling OS login ensures that SSH keys used to connect to instances are mapped with IAM users.`,
Recommendation: `Set enable-oslogin in project-wide metadata so that it applies to all of the instances in the project.
- https://cloud.google.com/compute/docs/instances/managing-instance-access`,
},
},
categoryCryptographicKeys + "/keyRotation": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Key Rotation
- Ensures cryptographic keys are set to rotate on a regular schedule
- All cryptographic keys should have key rotation enabled.
- Google will handle the rotation of the encryption key itself, as well as storage of previous keys, so previous data does not need to be re-encrypted before the rotation occurs.`,
Recommendation: `Ensure that cryptographic keys are set to rotate.
- https://cloud.google.com/vpc/docs/using-cryptoKeys`,
},
},
categoryDNS + "/dnsSecEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `DNS Security Enabled
- Ensures that DNS Security is enabled on all managed zones
- DNS Security is a feature that authenticates all responses to domain name lookups.
- This prevents attackers from committing DNS hijacking or man in the middle attacks.`,
Recommendation: `Ensure DNSSEC is enabled for all managed zones in the cloud DNS service.
- https://cloud.google.com/dns/docs/dnssec`,
},
},
categoryDNS + "/dnsSecSigningAlgorithm": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `DNS Security Signing Algorithm
- Ensures that DNS Security is not using the RSASHA1 algorithm for key or zone signing
- DNS Security is a feature that authenticates all responses to domain name lookups.
- This prevents attackers from committing DNS hijacking or man in the middle attacks.`,
Recommendation: `Ensure that all managed zones using DNSSEC are not using the RSASHA1 algorithm for key or zone signing.
- https://cloud.google.com/dns/docs/dnssec`,
},
},
categoryIAM + "/corporateEmailsOnly": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Corporate Emails Only
- Ensures that no users are using their Gmail accounts for access to GCP.
- Gmail accounts are personally created and are not controlled by organizations.
- Fully managed accounts are recommended for increased visibility, auditing and control over access to resources.`,
Recommendation: `Ensure that no users are actively using their Gmail accounts to access GCP.
- https://cloud.google.com/iam/docs/overview`,
},
},
categoryIAM + "/kmsUserSeparation": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `KMS User Separation
- Ensures that no users have the KMS admin role and any one of the CryptoKey roles.
- Ensuring that no users have the KMS admin role and any one of the CryptoKey roles follows separation of duties, where no user should have access to resources out of the scope of duty.`,
Recommendation: `Ensure that no service accounts have both the KMS admin role and any of CryptoKey roles attached.
- https://cloud.google.com/iam/docs/overview`,
},
},
categoryIAM + "/serviceAccountAdmin": {
Score: 0.6,
Tag: []string{},
Recommend: recommend{
Risk: `Service Account Admin
- Ensures that user managed service accounts do not have any admin, owner, or write privileges.
- Service accounts are primarily used for API access to Google. It is recommended to not use admin access for service accounts.`,
Recommendation: `Ensure that no service accounts have admin, owner, or write privileges.
- https://cloud.google.com/iam/docs/overview`,
},
},
categoryIAM + "/serviceAccountKeyRotation": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Service Account Key Rotation
- Ensures that service account keys are rotated within 90 days of creation.
- Service account keys should be rotated so older keys that that might have been lost or compromised cannot be used to access Google services.`,
Recommendation: `Rotate service account keys that have not been rotated in over 90 days.
- https://cloud.google.com/iam/docs/creating-managing-service-account-keys`,
},
},
categoryIAM + "/serviceAccountManagedKeys": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Service Account Managed Keys
- Ensures that service account keys are being managed by Google.
- Service account keys should be managed by Google to ensure that they are as secure as possible, including key rotations and restrictions to the accessibility of the keys.`,
Recommendation: `Ensure all user service account keys are being managed by Google.
- https://cloud.google.com/iam/docs/creating-managing-service-account-keys`,
},
},
categoryIAM + "/serviceAccountSeparation": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Service Account Separation
- Ensures that no users have both the Service Account User and Service Account Admin role.
- Ensuring that no users have both roles follows separation of duties, where no user should have access to resources out of the scope of duty.`,
Recommendation: `Ensure that no service accounts have both the Service Account User and Service Account Admin role attached.
- https://cloud.google.com/iam/docs/overview`,
},
},
categoryIAM + "/serviceAccountUser": {
Score: 0.6,
Tag: []string{},
Recommend: recommend{
Risk: `Service Account User
- Ensures that no users have the Service Account User role.
- The Service Account User role gives users the access to all service accounts of a project.
- This can result in an elevation of privileges and is not recommended.`,
Recommendation: `Ensure that no service accounts have the Service Account User role attached.
- https://cloud.google.com/iam/docs/overview`,
},
},
categoryIAM + "/serviceLimits": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `Service Limits
- Determines if the number of resources is close to the per-account limit.
- Google limits accounts to certain numbers of resources. Exceeding those limits could prevent resources from launching.`,
Recommendation: `Contact GCP support to increase the number of resources available
- https://cloud.google.com/resource-manager/docs/limits`,
},
},
categoryKubernetes + "/aliasIpRangesEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Alias IP Ranges Enabled
- Ensures all Kubernetes clusters have alias IP ranges enabled
- Alias IP ranges allow users to assign ranges of internal IP addresses as alias to a network interface.`,
Recommendation: `Ensure that Kubernetes clusters have alias IP ranges enabled.
- https://cloud.google.com/monitoring/kubernetes-engine/`,
},
},
categoryKubernetes + "/autoNodeRepairEnabled": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `Automatic Node Repair Enabled
- Ensures all Kubernetes cluster nodes have automatic repair enabled
- When automatic repair on nodes is enabled, the Kubernetes engine performs health checks on all nodes, automatically repairing nodes that fail health checks.
- This ensures that the Kubernetes environment stays optimal.`,
Recommendation: `Ensure that automatic node repair is enabled on all node pools in Kubernetes clusters
- https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair`,
},
},
categoryKubernetes + "/autoNodeUpgradesEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Automatic Node Upgrades Enabled
- Ensures all Kubernetes cluster nodes have automatic upgrades enabled
- Enabling automatic upgrades on nodes ensures that each node stays current with the latest version of the master branch, also ensuring that the latest security patches are installed to provide the most secure environment.`,
Recommendation: `Ensure that automatic node upgrades are enabled on all node pools in Kubernetes clusters
- https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades`,
},
},
categoryKubernetes + "/basicAuthenticationDisabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Basic Authentication Disabled
- Ensure basic authentication is set to disabled on Kubernetes clusters.
- Basic authentication uses static passwords to authenticate, which is not the recommended method to authenticate into the Kubernetes API server.`,
Recommendation: `Disable basic authentication on all clusters
- https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster`,
},
},
categoryKubernetes + "/clusterLabelsAdded": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Cluster Labels Added
- Ensures all Kubernetes clusters have labels added
- It is recommended to add labels to Kubernetes clusters to apply specific security settings and auto configure objects at creation.`,
Recommendation: `Ensure labels are added to Kubernetes clusters
- https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels`,
},
},
categoryKubernetes + "/clusterLeastPrivilege": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Cluster Least Privilege
- Ensures Kubernetes clusters using default service account are using minimal service account access scopes
- As a best practice, Kubernetes clusters should not be created with default service account.
- But if they are, Kubernetes default service account should be limited to minimal access scopes necessary to operate the clusters.`,
Recommendation: `Ensure that all Kubernetes clusters are created with minimal access scope.
- https://cloud.google.com/compute/docs/access/service-accounts`,
},
},
categoryKubernetes + "/cosImageEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `COS Image Enabled
- Ensures all Kubernetes cluster nodes have Container-Optimized OS enabled
- Container-Optimized OS is optimized to enhance node security.
- It is backed by a team at Google that can quickly patch it.`,
Recommendation: `Enable Container-Optimized OS on all Kubernetes cluster nodes
- https://cloud.google.com/container-optimized-os/`,
},
},
categoryKubernetes + "/defaultServiceAccount": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Default Service Account
- Ensures all Kubernetes cluster nodes are not using the default service account.
- Kubernetes cluster nodes should use customized service accounts that have minimal privileges to run.
- This reduces the attack surface in the case of a malicious attack on the cluster.`,
Recommendation: `Ensure that no Kubernetes cluster nodes are using the default service account
- https://cloud.google.com/container-optimized-os/`,
},
},
categoryKubernetes + "/legacyAuthorizationDisabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Legacy Authorization Disabled
- Ensure legacy authorization is set to disabled on Kubernetes clusters
- The legacy authorizer in Kubernetes grants broad, statically defined permissions.`,
Recommendation: `Disable legacy authorization on all clusters.
- https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster`,
},
},
categoryKubernetes + "/loggingEnabled": {
Score: 0.6,
Tag: []string{"hipaa"},
Recommend: recommend{
Risk: `Logging Enabled
- Ensures all Kubernetes clusters have logging enabled
- This setting should be enabled to ensure Kubernetes control plane logs are properly recorded.`,
Recommendation: `Ensure that logging is enabled on all Kubernetes clusters.
- https://cloud.google.com/monitoring/kubernetes-engine/legacy-stackdriver/logging`,
},
},
categoryKubernetes + "/masterAuthorizedNetwork": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Master Authorized Network
- Ensures master authorized networks is set to enabled on Kubernetes clusters
- Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your container clusters Kubernetes master endpoint.`,
Recommendation: `Enable master authorized networks on all clusters.
- https://cloud.google.com/kubernetes-engine/docs/how-to/authorized-networks`,
},
},
categoryKubernetes + "/monitoringEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Monitoring Enabled
- Ensures all Kubernetes clusters have monitoring enabled
- Kubernetes supports monitoring through Stackdriver.`,
Recommendation: `Ensure monitoring is enabled on all Kubernetes clusters.
- https://cloud.google.com/monitoring/kubernetes-engine/`,
},
},
categoryKubernetes + "/networkPolicyEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Network Policy Enabled
- Ensures all Kubernetes clusters have network policy enabled
- Kubernetes network policy creates isolation between cluster pods, this creates a more secure environment with only specified connections allowed.`,
Recommendation: `Enable network policy on all Kubernetes clusters.
- https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy`,
},
},
categoryKubernetes + "/podSecurityPolicyEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Pod Security Policy Enabled
- Ensures pod security policy is enabled for all Kubernetes clusters
- Kubernetes pod security policy is a resource that controls security sensitive aspects of the pod configuration.`,
Recommendation: `Ensure that all Kubernetes clusters have pod security policy enabled.
- https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies`,
},
},
categoryKubernetes + "/privateClusterEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Private Cluster Enabled
- Ensures private cluster is enabled for all Kubernetes clusters
- Kubernetes private clusters only have internal ip ranges, which ensures that their workloads are isolated from the public internet.`,
Recommendation: `Ensure that all Kubernetes clusters have private cluster enabled.
- https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters`,
},
},
categoryKubernetes + "/privateEndpoint": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Private Endpoint
- Ensures the private endpoint setting is enabled for kubernetes clusters
- kubernetes private endpoints can be used to route all traffic between the Kubernetes worker and control plane nodes over a private VPC endpoint rather than across the public internet.`,
Recommendation: `Enable the private endpoint setting for all GKE clusters when creating the cluster.
- https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters`,
},
},
categoryKubernetes + "/webDashboardDisabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Web Dashboard Disabled
- Ensures all Kubernetes clusters have the web dashboard disabled.
- It is recommended to disable the web dashboard because it is backed by a highly privileged service account.`,
Recommendation: `Ensure that no Kubernetes clusters have the web dashboard enabled
- https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards`,
},
},
categoryLogging + "/auditConfigurationLogging": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Audit Configuration Logging
- Ensures that logging and log alerts exist for audit configuration changes.
- Project Ownership is the highest level of privilege on a project, any changes in audit configuration should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for audit configuration changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categoryLogging + "/auditLoggingEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Audit Logging Enabled
- Ensures that default audit logging is enabled on the organization or project.
- The default audit logs should be configured to log all admin activities and write and read access to data for all services.
- In addition, no exempted members should be added to the logs to ensure proper delivery of all audit logs.`,
Recommendation: `Ensure that the default audit logs are enabled to log all admin activities and write and read access to data for all services.
- https://cloud.google.com/logging/docs/audit/`,
},
},
categoryLogging + "/customRoleLogging": {
Score: 0.3,
Tag: []string{"hipaa"},
Recommend: recommend{
Risk: `Custom Role Logging
- Ensures that logging and log alerts exist for custom role creation and changes
- Project Ownership is the highest level of privilege on a project, any changes in custom role should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for custom role creation and changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categoryLogging + "/logSinksEnabled": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Log Sinks Enabled
- Ensures a log sink is enabled to export all logs
- Log sinks send log data to a storage service for archival and compliance. A log sink with no filter is necessary to ensure that all logs are being properly sent.
- If logs are sent to a storage bucket, the bucket must exist and bucket versioning should exist.`,
Recommendation: `Ensure a log sink is configured properly with an empty filter and a destination.
- https://cloud.google.com/logging/docs/export/`,
},
},
categoryLogging + "/projectOwnershipLogging": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Project Ownership Logging
- Ensures that logging and log alerts exist for project ownership assignments and changes
- Project Ownership is the highest level of privilege on a project, any changes in project ownership should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for project ownership assignments and changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categoryLogging + "/sqlConfigurationLogging": {
Score: 0.3,
Tag: []string{"hipaa"},
Recommend: recommend{
Risk: `SQL Configuration Logging
- Ensures that logging and log alerts exist for SQL configuration changes
- Project Ownership is the highest level of privilege on a project, any changes in SQL configurations should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for SQL configuration changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categoryLogging + "/storagePermissionsLogging": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Storage Permissions Logging
- Ensures that logging and log alerts exist for storage permission changes
- Storage permissions include access to the buckets that store the logs, any changes in storage permissions should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for storage permission changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categoryLogging + "/vpcFirewallRuleLogging": {
Score: 0.3,
Tag: []string{"hipaa"},
Recommend: recommend{
Risk: `VPC Firewall Rule Logging
- Ensures that logging and log alerts exist for firewall rule changes
- Project Ownership is the highest level of privilege on a project, any changes in firewall rule should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for firewall rule changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categoryLogging + "/vpcNetworkLogging": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `VPC Network Logging
- Ensures that logging and log alerts exist for VPC network changes
- Project Ownership is the highest level of privilege on a project, any changes in VPC network should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for VPC network changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categoryLogging + "/vpcNetworkRouteLogging": {
Score: 0.3,
Tag: []string{"hipaa"},
Recommend: recommend{
Risk: `VPC Network Route Logging
- Ensures that logging and log alerts exist for VPC network route changes
- Project Ownership is the highest level of privilege on a project, any changes in VPC network route should be heavily monitored to prevent unauthorized changes.`,
Recommendation: `Ensure that log alerts exist for VPC network route changes.
- https://cloud.google.com/logging/docs/logs-based-metrics/`,
},
},
categorySQL + "/anyHostRootAccess": {
Score: 0.3,
Tag: []string{},
Recommend: recommend{
Risk: `Any Host Root Access
- Ensures SQL instances root user cannot be accessed from any host
- Root access for SQL instance should only be allowed from whitelisted IPs to ensure secure access only from trusted entities.`,
Recommendation: `Ensure that root access for SQL instances are not allowed from any host.
- https://cloud.google.com/sql/docs/mysql/create-manage-users`,
},
},
categorySQL + "/dbAutomatedBackups": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `DB Automated Backups
- Ensures automated backups are enabled for SQL instances
- Google provides a simple method of backing up SQL instances at a regular interval.
- This should be enabled to provide an option for restoring data in the event of a database compromise or hardware failure.`,
Recommendation: `Ensure that all database instances are configured with automatic backups enabled.
- https://cloud.google.com/sql/docs/mysql/instance-settings`,
},
},
categorySQL + "/dbMultiAz": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `DB Multiple AZ
- Ensures that SQL instances have a failover replica to be cross-AZ for high availability
- Creating SQL instances in with a single AZ creates a single point of failure for all systems relying on that database.
- All SQL instances should be created in multiple AZs to ensure proper failover.`,
Recommendation: `Ensure that all database instances have a DB replica enabled in a secondary AZ.
- https://cloud.google.com/sql/docs/mysql/instance-settings`,
},
},
categorySQL + "/dbPubliclyAccessible": {
Score: 0.8,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `DB Publicly Accessible
- Ensures that SQL instances do not allow public access
- Unless there is a specific business requirement, SQL instances should not have a public endpoint and should only be accessed from within a VPC.`,
Recommendation: `Ensure that SQL instances are configured to prohibit traffic from the public 0.0.0.0 global IP address.
- https://cloud.google.com/sql/docs/mysql/authorize-networks`,
},
},
categorySQL + "/dbRestorable": {
Score: 0.3,
Tag: []string{"pci", "reliability"},
Recommend: recommend{
Risk: `DB Restorable
- Ensures SQL instances can be restored to a recent point
- Google will maintain a point to which the database can be restored.
- This point should not drift too far into the past, or else the risk of irrecoverable data loss may occur.`,
Recommendation: `Ensure all database instances are configured with automatic backups and can be restored to a recent point with binary logging enabled.
- https://cloud.google.com/sql/docs/mysql/instance-settings`,
},
},
categorySQL + "/dbSSLEnabled": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Database SSL Enabled
- Ensures SQL databases have SSL enabled
- Enabling SSL ensures that the sensitive data being transferred from the database is encrypted.`,
Recommendation: `Ensure that SSL is enabled on all SQL databases.
- https://cloud.google.com/sql/docs/mysql/instance-settings`,
},
},
categoryStorage + "/bucketAllUsersPolicy": {
Score: 0.6,
Tag: []string{},
Recommend: recommend{
Risk: `Storage Bucket All Users Policy
- Ensures Storage bucket policies do not allow global write, delete, or read permissions
- Storage buckets can be configured to allow the global principal to access the bucket via the bucket policy.
- This policy should be restricted only to known users or accounts.`,
Recommendation: `Ensure that each storage bucket is configured so that no member is set to allUsers or allAuthenticatedUsers.
- https://cloud.google.com/storage/docs/access-control/iam`,
},
},
categoryStorage + "/bucketLogging": {
Score: 0.3,
Tag: []string{"hipaa"},
Recommend: recommend{
Risk: `Bucket Logging
- Ensures object logging is enabled on storage buckets
- Storage bucket logging helps maintain an audit trail of access that can be used in the event of a security incident.`,
Recommendation: `Bucket Logging can only be enabled by using the Command Line Interface and the log bucket must already be created.
- Use this command to enable Logging:
- gsutil logging set on -b gs://[LOG_BUCKET_NAME] -o AccessLog \ gs://[BUCKET_NAME]
- https://cloud.google.com/storage/docs/access-logs`,
},
},
categoryStorage + "/bucketVersioning": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `Bucket Versioning
- Ensures object versioning is enabled on storage buckets
- Object versioning can help protect against the overwriting of objects or data loss in the event of a compromise.`,
Recommendation: `Bucket Versioning can only be enabled by using the Command Line Interface, use this command to enable Versioning:
- gsutil versioning set on gs://[BUCKET_NAME]
- https://cloud.google.com/storage/docs/using-object-versioning`,
},
},
categoryVPCNetwork + "/defaultVpcInUse": {
Score: 0.3,
Tag: []string{"pci"},
Recommend: recommend{
Risk: `Default VPC In Use
- Determines whether the default VPC is being used for launching VM instances
- The default VPC should not be used in order to avoid launching multiple services in the same network which may not require connectivity.
- Each application, or network tier, should use its own VPC.`,
Recommendation: `Move resources from the default VPC to a new VPC created for that application or resource group.
- https://cloud.google.com/vpc/docs/vpc`,
},
},
categoryVPCNetwork + "/excessiveFirewallRules": {
Score: 0.3,
Tag: []string{"pci"},
Recommend: recommend{
Risk: `Excessive Firewall Rules
- Determines if there are an excessive number of firewall rules in the account
- Keeping the number of firewall rules to a minimum helps reduce the attack surface of an account.
- Rather than creating new rules with the same rules for each project, common rules should be grouped under the same firewall rule.
- For example, instead of adding port 22 from a known IP to every firewall rule, create a single "SSH" firewall rule which can be used on multiple instances.`,
Recommendation: `Limit the number of firewall rules to prevent accidental authorizations
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/flowLogsEnabled": {
Score: 0.3,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Flow Logs Enabled
- Ensures VPC flow logs are enabled for traffic logging
- VPC flow logs record all traffic flowing in to and out of a VPC.
- These logs are critical for auditing and review after security incidents.`,
Recommendation: `Enable VPC flow logs for each VPC subnet
- https://cloud.google.com/vpc/docs/using-flow-logs`,
},
},
categoryVPCNetwork + "/multipleSubnets": {
Score: 0.3,
Tag: []string{"reliability"},
Recommend: recommend{
Risk: `Multiple Subnets
- Ensures that VPCs have multiple networks to provide a layered architecture
- A single network within a VPC increases the risk of a broader blast radius in the event of a compromise.`,
Recommendation: `Create multiple networks/subnets in each VPC and change the architecture to take advantage of public and private tiers.
- https://cloud.google.com/vpc/docs/vpc`,
},
},
categoryVPCNetwork + "/openAllPorts": {
Score: 0.8,
Tag: []string{"hipaa", "pci"},
Recommend: recommend{
Risk: `Open All Ports
- Determines if all ports are open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, services should be restricted to known IP addresses.`,
Recommendation: `Restrict ports to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openCIFS": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open CIFS
- Determines if UDP port 445 for CIFS is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as CIFS should be restricted to known IP addresses.`,
Recommendation: `Restrict UDP port 445 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openDNS": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open DNS
- Determines if TCP or UDP port 53 for DNS is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as DNS should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP and UDP port 53 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openDocker": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Docker
- Determine if Docker port 2375 or 2376 is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Docker should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP ports 2375 and 2376 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openFTP": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open FTP
- Determines if TCP port 20 or 21 for FTP is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as FTP should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 20 or 21 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openHadoopNameNode": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Hadoop HDFS NameNode Metadata Service
- Determines if TCP port 8020 for HDFS NameNode metadata service is open to the public.
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Hadoop/HDFS should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 8020 to known IP addresses for Hadoop/HDFS.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openHadoopNameNodeWebUI": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Hadoop HDFS NameNode WebUI
- Determines if TCP port 50070 and 50470 for Hadoop/HDFS NameNode WebUI service is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Hadoop/HDFS should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 50070 and 50470 to known IP addresses for Hadoop/HDFS
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openKibana": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Kibana
- Determines if TCP port 5601 for Kibana is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Kibana should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 5601 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openMySQL": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open MySQL
- Determines if TCP port 4333 or 3306 for MySQL is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as MySQL should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP ports 4333 and 3306 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openNetBIOS": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open NetBIOS
- Determines if UDP port 137 or 138 for NetBIOS is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as NetBIOS should be restricted to known IP addresses.`,
Recommendation: `Restrict UDP ports 137 and 138 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openOracle": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Oracle
- Determines if TCP port 1521 for Oracle is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Oracle should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP ports 1521 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openOracleAutoDataWarehouse": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Oracle Auto Data Warehouse
- Determines if TCP port 1522 for Oracle Auto Data Warehouse is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Oracle should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP ports 1522 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openPostgreSQL": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open PostgreSQL
- Determines if TCP port 5432 for PostgreSQL is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as PostgreSQL should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 5432 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openRDP": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open RDP
- Determines if TCP port 3389 for RDP is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as RDP should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 3389 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openRPC": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open RPC
- Determines if TCP port 135 for RPC is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as RPC should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 135 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openSalt": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Salt
- Determine if TCP ports 4505 or 4506 for the Salt master are open to the public
- Active Salt vulnerabilities, CVE-2020-11651 and CVE-2020-11652 are exploiting Salt instances exposed to the internet.
- These ports should be closed immediately.`,
Recommendation: `Restrict TCP ports 4505 and 4506 to known IP addresses
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openSMBoTCP": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open SMBoTCP
- Determines if TCP port 445 for Windows SMB over TCP is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SMB should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 445 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openSMTP": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open SMTP
- Determines if TCP port 25 for SMTP is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SMTP should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 25 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openSQLServer": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open SQLServer
- Determines if TCP port 1433 or UDP port 1434 for SQL Server is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SQL server should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 1433 and UDP port 1434 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openSSH": {
Score: 0.6,
Tag: []string{},
Recommend: recommend{
Risk: `Open SSH
- Determines if TCP port 22 for SSH is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SSH should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 22 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openTelnet": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open Telnet
- Determines if TCP port 23 for Telnet is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Telnet should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 23 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openVNCClient": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open VNC Client
- Determines if TCP port 5500 for VNC Client is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as VNC Client should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 5500 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/openVNCServer": {
Score: 0.8,
Tag: []string{},
Recommend: recommend{
Risk: `Open VNC Server
- Determines if TCP port 5900 for VNC Server is open to the public
- While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as VNC Server should be restricted to known IP addresses.`,
Recommendation: `Restrict TCP port 5900 to known IP addresses.
- https://cloud.google.com/vpc/docs/using-firewalls`,
},
},
categoryVPCNetwork + "/privateAccessEnabled": {
Score: 0.3,
Tag: []string{"pci"},
Recommend: recommend{
Risk: `Private Access Enabled
- Ensures Private Google Access is enabled for all Subnets
- Private Google Access allows VM instances on a subnet to reach Google APIs and services without an IP address.
- This creates a more secure network for the internal communication.`,
Recommendation: `1. Enter the VPC Network service.
2. Enter the VPC.
3. Select the subnet in question.
4. Edit the subnet and enable Private Google Access.
- https://cloud.google.com/vpc/docs/configure-private-google-access`,
},
},
} | src/cloudsploit/plugin.go | 0.704872 | 0.546678 | plugin.go | starcoder |
package vmath
import (
"math"
"github.com/maja42/vmath/math32"
)
// Epsilon is the default epsilon value for float comparisons.
const Epsilon = 1.0E-6
// Equalf compares two floats for equality.
// Uses the default Epsilon as relative tolerance.
func Equalf(a, b float32) bool {
// Comparing floats is complicated and tricky, and there is no "right" solution for doing it.
// Using relative epsilon comparisons works really well, until numbers are getting very small.
// If a value is compared to zero, it was often calculated by subtracting two (potentially big) numbers,
// leading to a difference that is small compared to the original numbers, but quite big compared to zero.
// https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
return EqualEps(a, b, Epsilon)
}
// Equalf compares two floats for equality, using the given epsilon as the relative tolerance.
// Performs a relative difference comparison (see https://floating-point-gui.de/errors/comparison/ and https://stackoverflow.com/q/4915462/2224996)
func EqualEps(a, b, epsilon float32) bool {
if a == b { // shortcut; also handles +-Inf
return true
}
diff := math32.Abs(a - b)
if a == 0 || b == 0 || diff < minNormal {
return diff < epsilon*minNormal
}
return diff/(math32.Abs(a)+math32.Abs(b)) < epsilon
}
// minNormal is he smallest possible float32 number, provided that there is a 1 in front of the binary (=decimal) point.
// Do not confuse with "math.SmallestNonzeroFloat32", where this restriction is not present
// 1 / 2^(127 - 1)
const minNormal = float32(1.1754943508222875e-38)
// Clampf returns the value v clamped to the range of [min, max].
func Clampf(v, min, max float32) float32 {
if v <= min {
return min
}
if v >= max {
return max
}
return v
}
// Clampi returns the value v clamped to the range of [min, max].
func Clampi(v, min, max int) int {
if v <= min {
return min
}
if v >= max {
return max
}
return v
}
// Wrapf returns the value v in the range of [min, max[ by wrapping it around.
func Wrapf(v, min, max float32) float32 {
diff := max - min
v -= min
return min + v - diff*math32.Floor(v/diff)
}
// Wrapi returns the value v in the range of [min, max[ by wrapping it around.
func Wrapi(v, min, max int) int {
return int(Wrapf(float32(v), float32(min), float32(max)))
}
// ToRadians converts degrees into radians.
func ToRadians(deg float32) float32 {
return math.Pi * deg / 180.0
}
// ToDegrees converts radians into degrees.
func ToDegrees(rad float32) float32 {
return rad * (180.0 / math.Pi)
}
// CartesianToSpherical converts cartesian coordinates into spherical coordinates.
// Returns the radius, azimuth (angle on XY-plane) and inclination.
func CartesianToSpherical(pos Vec3f) (float32, float32, float32) {
radius := pos.Length()
azimuth := math32.Atan2(pos[1], pos[0])
inclination := math32.Acos(pos[2] / radius)
return radius, azimuth, inclination
}
// SphericalToCartesian converts spherical coordinates into cartesian coordinates.
func SphericalToCartesian(radius, azimuth, inclination float32) Vec3f {
sinAz, cosAz := math32.Sincos(azimuth)
sinInc, cosInc := math32.Sincos(inclination)
return Vec3f{
radius * sinInc * cosAz,
radius * sinInc * sinAz,
radius * cosInc,
}
}
// Lerp performs a linear interpolation between a and b.
// The parameter t should be in range [0, 1].
func Lerp(a, b, t float32) float32 {
return a*(1-t) + b*t
}
// NormalizeRadians returns the angle in radians in the range [0, 2*PI[.
func NormalizeRadians(rad float32) float32 {
var pi2 float32 = math.Pi * 2
rad += pi2 * float32(int(rad/-pi2)+1)
rad -= pi2 * float32(int(rad/pi2))
return rad
}
// NormalizeDegrees returns the angle in degrees in the range [0, 360[.
func NormalizeDegrees(deg float32) float32 {
deg += float32(360 * (int(deg/-360) + 1))
deg -= float32(360 * int(deg/360))
return deg
}
// AngleToVector returns a 2D vector with the given length and angle to the x-axis.
func AngleToVector(rad float32, length float32) Vec2f {
sin, cos := math32.Sincos(rad)
vec := Vec2f{cos, sin}
return vec.Normalize().MulScalar(length)
}
// AngleDiff compares to angles and returns their distance in the range ]-PI, PI].
func AngleDiff(fromRad, toRad float32) float32 {
angle := NormalizeRadians(toRad - fromRad)
if angle > math.Pi {
angle -= 2 * math.Pi
}
return angle
}
// PointToLineDistance2D returns the distance between a point and an infinitely long line passing through a and b.
func PointToLineDistance2D(a, b, point Vec2f) float32 {
// Source: http://geomalgorithms.com/a02-_lines.html
// 1) project the a->point vector onto the a->b vector
// 2) calculate the intersection point
// 3) return the distance between the point and the intersection
lineVec := b.Sub(a)
pointVec := point.Sub(a)
// calc perpendicular base
pb := a.Add(pointVec.Project(lineVec))
return point.Sub(pb).Length()
}
// PointToLineSegmentDistance2D returns the distance between a point and a line segment between a and b.
func PointToLineSegmentDistance2D(a, b, point Vec2f) float32 {
// Source: http://geomalgorithms.com/a02-_lines.html
// 1) determine if the point is before (a) by comparing the angle between the a->b and a->point vector
// if the point is before, return the distance between the point and point a
// 2) determine if the point is after (b) by comparing the angle between the a->b and b->point vector
// if the point is afterwards, return the distance between the point and point b
// 3) otherwise, proceed like `PointToLineDistance2D`
lineVec := b.Sub(a)
pointVec := point.Sub(a)
c1 := pointVec.Dot(lineVec)
if c1 <= 0 { // angle >= 90° --> point is before (a)
return point.Sub(a).Length()
}
c2 := lineVec.Dot(lineVec)
if c2 <= c1 { // point is after (b)
return point.Sub(b).Length()
}
// calc perpendicular base
ratio := c1 / c2
pb := a.Add(lineVec.MulScalar(ratio))
return point.Sub(pb).Length()
}
// IsPointOnLine returns true if the give point lies to the line a->b;
// Uses the default Epsilon as relative tolerance.
func IsPointOnLine(a, b Vec2f, point Vec2f) bool {
return IsPointOnLineEps(a, b, point, Epsilon)
}
// IsPointOnLine returns true if the give point lies to the line a->b;
// Uses the given Epsilon as relative tolerance.
func IsPointOnLineEps(a, b Vec2f, point Vec2f, eps float32) bool {
lineVec := b.Sub(a)
pointVec := point.Sub(a)
// compare the z-coordinate of the cross-product with zero, without losing magnitude information for eps-comparison
return EqualEps(lineVec[0]*pointVec[1], lineVec[1]*pointVec[0], eps)
}
// PolarToCartesian2D converts length and angle into a 2D position.
func PolarToCartesian2D(distance, rad float32) Vec2f {
sin, cos := math32.Sincos(rad)
return Vec2f{
cos * distance,
sin * distance,
}
}
// IsPointOnLeft returns true if the give point lies to the left of line a->b;
// If the point lies directly on the line, false is returned.
func IsPointOnLeft(a, b Vec2f, point Vec2f) bool {
lineVec := b.Sub(a)
pointVec := point.Sub(a)
crossZ := lineVec[0]*pointVec[1] - lineVec[1]*pointVec[0]
return crossZ > 0
} | utils.go | 0.955152 | 0.68433 | utils.go | starcoder |
package util
import (
"golang.org/x/image/colornames"
"github.com/faiface/pixel"
"github.com/faiface/pixel/imdraw"
"github.com/hueypark/physics/core"
"github.com/hueypark/physics/core/contact"
"github.com/hueypark/physics/core/math/rotator"
"github.com/hueypark/physics/core/math/vector"
"github.com/hueypark/physics/core/shape"
"github.com/hueypark/physics/core/shape/circle"
"github.com/hueypark/physics/core/shape/convex"
)
func DrawCircle(imd *imdraw.IMDraw, position vector.Vector, radius float64) {
imd.Color = colornames.White
imd.Push(pixel.V(position.X, position.Y))
imd.Circle(radius, 1)
}
func DrawConvex(imd *imdraw.IMDraw, position vector.Vector, rotation rotator.Rotator, vertices []vector.Vector) {
imd.Color = colornames.White
for _, vertex := range vertices {
vertex = rotation.RotateVector(vertex)
worldPosition := vector.Add(position, vertex)
imd.Push(pixel.V(worldPosition.X, worldPosition.Y))
}
first := rotation.RotateVector(vertices[0])
firstWorldPosition := vector.Add(position, first)
imd.Push(pixel.V(firstWorldPosition.X, firstWorldPosition.Y))
imd.Line(1)
}
func DrawDebugLine(imd *imdraw.IMDraw, start, end vector.Vector) {
imd.Color = colornames.Limegreen
imd.Push(pixel.V(start.X, start.Y), pixel.V(end.X, end.Y))
imd.Line(2)
}
func DrawDebugCircle(imd *imdraw.IMDraw, position vector.Vector, radius float64) {
imd.Color = colornames.Limegreen
imd.Push(pixel.V(position.X, position.Y))
imd.Circle(radius, 2)
}
func DrawWorld(imd *imdraw.IMDraw, world *physics.World) {
for _, b := range world.Bodys() {
switch b.Shape.Type() {
case shape.BULLET:
DrawCircle(imd, b.Position(), 3)
case shape.CIRCLE:
c := b.Shape.(*circle.Circle)
DrawCircle(imd, b.Position(), c.Radius)
case shape.CONVEX:
c := b.Shape.(*convex.Convex)
DrawConvex(imd, b.Position(), b.Rotation(), c.Hull())
}
}
DrawContacts(imd, world.Contacts())
}
func DrawContacts(imd *imdraw.IMDraw, contacts []*contact.Contact) {
for _, c := range contacts {
for _, p := range c.Points() {
penHalf := c.Penetration() / 2
start := vector.Add(p, vector.Multiply(c.Normal(), penHalf))
end := vector.Add(p, vector.Multiply(c.Normal(), -penHalf))
DrawDebugCircle(imd, p, 3)
DrawDebugLine(imd, start, end)
}
}
} | examples/util/util.go | 0.697712 | 0.403097 | util.go | starcoder |
package models
// This is the output of this board: Kubernetes / Compute Resources / Nodes, which comes as part of
// prometheus operator install &
// $datasource template variable replaced with "prometheus" &
// $cluster template variable replaced with ""
const staticBoardNodes = `
[[ $indexCheck := .indexCheck ]]
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": false,
"gnetId": null,
"graphTooltip": 0,
"id": 7,
"iteration": 1568396170452,
"links": [],
"panels": [
[[ range $ind, $instance := .instances ]]
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "max(node_load1{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "load 1m",
"refId": "A"
},
{
"expr": "max(node_load5{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "load 5m",
"refId": "B"
},
{
"expr": "max(node_load15{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "load 15m",
"refId": "C"
},
{
"expr": "count(node_cpu_seconds_total{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\", mode=\"user\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "logical cores",
"refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "System load - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 0
},
"id": 3,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"\", job=\"node-exporter\", mode!=\"idle\", instance=\"[[ $instance ]]\"}[5m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{cpu}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Usage Per Core - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percentunit",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "percentunit",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 18,
"x": 0,
"y": 7
},
"id": 4,
"legend": {
"alignAsTable": true,
"avg": true,
"current": true,
"max": false,
"min": false,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "max (sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"\", job=\"node-exporter\", mode!=\"idle\", instance=\"[[ $instance ]]\"}[2m])) ) * 100",
"format": "time_series",
"intervalFactor": 10,
"legendFormat": "{{ cpu }}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "CPU Utilization - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percent",
"label": null,
"logBase": 1,
"max": 100,
"min": 0,
"show": true
},
{
"format": "percent",
"label": null,
"logBase": 1,
"max": 100,
"min": 0,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"rgba(50, 172, 45, 0.97)",
"rgba(237, 129, 40, 0.89)",
"rgba(245, 54, 54, 0.9)"
],
"datasource": "prometheus",
"format": "percent",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": true,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 6,
"x": 18,
"y": 7
},
"id": 5,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"tableColumn": "",
"targets": [
{
"expr": "avg(sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"\", job=\"node-exporter\", mode!=\"idle\", instance=\"[[ $instance ]]\"}[2m]))) * 100",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "80, 90",
"title": "CPU Usage - [[ $instance ]]",
"tooltip": {
"shared": false
},
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 18,
"x": 0,
"y": 14
},
"id": 6,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "max(node_memory_MemTotal_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_memory_MemFree_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_memory_Buffers_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_memory_Cached_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory used",
"refId": "A"
},
{
"expr": "max(node_memory_Buffers_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory buffers",
"refId": "B"
},
{
"expr": "max(node_memory_Cached_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory cached",
"refId": "C"
},
{
"expr": "max(node_memory_MemFree_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "memory free",
"refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Memory Usage - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"rgba(50, 172, 45, 0.97)",
"rgba(237, 129, 40, 0.89)",
"rgba(245, 54, 54, 0.9)"
],
"datasource": "prometheus",
"format": "percent",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": true,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 6,
"x": 18,
"y": 14
},
"id": 7,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"tableColumn": "",
"targets": [
{
"expr": "max( ( ( node_memory_MemTotal_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_memory_MemFree_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_memory_Buffers_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_memory_Cached_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} ) / node_memory_MemTotal_bytes{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} ) * 100)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "80, 90",
"title": "Memory Usage - [[ $instance ]]",
"tooltip": {
"shared": false
},
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 21
},
"id": 8,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [
{
"alias": "read",
"yaxis": 1
},
{
"alias": "io time",
"yaxis": 2
}
],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "max(rate(node_disk_read_bytes_total{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"}[2m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "read",
"refId": "A"
},
{
"expr": "max(rate(node_disk_written_bytes_total{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"}[2m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "written",
"refId": "B"
},
{
"expr": "max(rate(node_disk_io_time_seconds_total{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"}[2m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "io time",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Disk I/O - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "ms",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 21
},
"id": 9,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "node:node_filesystem_usage:{cluster=\"\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{device}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Disk Space Usage - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percentunit",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "percentunit",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 28
},
"id": 10,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "max(rate(node_network_receive_bytes_total{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\", device!~\"lo\"}[5m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{device}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Network Received - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 28
},
"id": 11,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "max(rate(node_network_transmit_bytes_total{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\", device!~\"lo\"}[5m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{device}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Network Transmitted - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "bytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 18,
"x": 0,
"y": 35
},
"id": 12,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"paceLength": 10,
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeat": null,
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "max( node_filesystem_files{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_filesystem_files_free{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "inodes used",
"refId": "A"
},
{
"expr": "max(node_filesystem_files_free{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "inodes free",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Inodes Usage - [[ $instance ]]",
"tooltip": {
"shared": false,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"rgba(50, 172, 45, 0.97)",
"rgba(237, 129, 40, 0.89)",
"rgba(245, 54, 54, 0.9)"
],
"datasource": "prometheus",
"format": "percent",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": true,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 6,
"x": 18,
"y": 35
},
"id": 13,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"tableColumn": "",
"targets": [
{
"expr": "max( ( ( node_filesystem_files{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} - node_filesystem_files_free{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} ) / node_filesystem_files{cluster=\"\", job=\"node-exporter\", instance=\"[[ $instance ]]\"} ) * 100)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "80, 90",
"title": "Inodes Usage - [[ $instance ]]",
"tooltip": {
"shared": false
},
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
}[[if ne $indexCheck $ind ]],
[[ end ]]
[[ end ]]
],
"refresh": "",
"schemaVersion": 18,
"style": "dark",
"tags": [
"kubernetes-mixin"
],
"templating": {
"list": [
{
"current": {
"selected": true,
"text": "prometheus",
"value": "prometheus"
},
"hide": 0,
"label": null,
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"allValue": null,
"current": {
"isNone": true,
"selected": true,
"text": "None",
"value": ""
},
"datasource": "prometheus",
"definition": "",
"hide": 2,
"includeAll": false,
"label": "cluster",
"multi": false,
"name": "cluster",
"options": [],
"query": "label_values(kube_pod_info, cluster)",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {
"selected": false,
"tags": [],
"text": "10.199.75.57:9100",
"value": "10.199.75.57:9100"
},
"datasource": "prometheus",
"definition": "",
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "instance",
"options": [],
"query": "label_values(node_boot_time_seconds{cluster=\"\", job=\"node-exporter\"}, instance)",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Kubernetes / Nodes",
"uid": "fa49a4706d07a042595b664c87fb33ea",
"version": 1
}` | models/prometheus_per_node_config.go | 0.591959 | 0.42931 | prometheus_per_node_config.go | starcoder |
package layer
import (
"fmt"
"github.com/aunum/log"
g "gorgonia.org/gorgonia"
t "gorgonia.org/tensor"
)
// FC is a fully connected layer of neurons.
type FC struct {
// Input is the number of units in input.
// required
Input int
// Output is the number of units in the output.
// required
Output int
// Name of the layer.
Name string
// Activation is the activation function.
// Defaults to ReLU
Activation ActivationFn
// Init is the init function.
// Defaults to GlorotN(1)
Init g.InitWFn
// NoBias indicates to not use a bias with the layer
// Defaults to true.
NoBias bool
// BiasInit is the init function for the bias.
// Defaults to GlorotN(1)
BiasInit g.InitWFn
}
type fc struct {
*FC
weights *g.Node
dtype t.Dtype
bias *g.Node
isBatched bool
shared *fc
}
func newFC(config *FC) *fc {
config.ApplyDefaults()
return &fc{
FC: config,
dtype: t.Float32,
}
}
// Validate the config.
func (f FC) Validate() error {
if f.Input == 0 {
return fmt.Errorf("input must be set")
}
if f.Output == 0 {
return fmt.Errorf("output must be set")
}
return nil
}
// ApplyDefaults to the config.
func (f FC) ApplyDefaults() Config {
if f.Activation == nil {
f.Activation = ReLU
}
if f.Init == nil {
f.Init = g.GlorotN(1)
}
if f.BiasInit == nil {
f.BiasInit = g.GlorotN(1)
}
return f
}
// Compile the layer into the graph.
func (f FC) Compile(graph *g.ExprGraph, opts ...CompileOpt) Layer {
fcn := newFC(&f)
for _, opt := range opts {
opt(fcn)
}
if fcn.shared != nil {
fcn.weights = g.NewMatrix(graph, fcn.dtype, g.WithShape(f.Input, f.Output), g.WithName(f.Name), g.WithValue(fcn.shared.weights.Value()))
if !fcn.NoBias {
fcn.bias = g.NewMatrix(graph, fcn.dtype, g.WithShape(1, f.Output), g.WithName(fmt.Sprintf("%s-bias", f.Name)), g.WithValue(fcn.shared.bias.Value()))
}
return fcn
}
fcn.weights = g.NewMatrix(graph, fcn.dtype, g.WithShape(f.Input, f.Output), g.WithInit(f.Init), g.WithName(f.Name))
if !f.NoBias {
fcn.bias = g.NewMatrix(graph, fcn.dtype, g.WithShape(1, f.Output), g.WithInit(f.BiasInit), g.WithName(fmt.Sprintf("%s-bias", f.Name)))
}
return fcn
}
// Clone the config.
func (f FC) Clone() Config {
return &FC{
Input: f.Input,
Output: f.Output,
Name: f.Name,
Activation: f.Activation.Clone(),
Init: f.Init,
NoBias: f.NoBias,
BiasInit: f.BiasInit,
}
}
// Fwd is a forward pass on a single fully connected layer.
func (f *fc) Fwd(x *g.Node) (*g.Node, error) {
var xw, xwb *g.Node
var err error
if x.IsVector() {
s := append(t.Shape{1}, x.Shape()...)
x, err = g.Reshape(x, s)
if err != nil {
return nil, err
}
log.Debugf("normalizing dimensions of x to %v", s)
}
// Note: parts of this are borrowed from https://github.com/gorgonia/golgi
if xw, err = g.Mul(x, f.weights); err != nil {
return nil, err
}
if f.bias == nil {
xwb = xw
goto act
}
if f.isBatched {
if xwb, err = g.BroadcastAdd(xw, f.bias, nil, []byte{0}); err != nil {
return nil, err
}
} else {
if xwb, err = g.Add(xw, f.bias); err != nil {
return nil, err
}
}
act:
if f.Activation == nil {
log.Debugf("fc %q output shape: %v", f.Name, xwb.Shape())
return xwb, nil
}
a, err := f.Activation.Fwd(xwb)
if err != nil {
return nil, err
}
log.Debugf("fc name %q output shape: %v", f.Name, a.Shape())
return a, err
}
// Learnables are the learnable parameters of the fully connected layer.
func (f *fc) Learnables() g.Nodes {
if f.bias != nil {
return g.Nodes{f.weights, f.bias}
}
return g.Nodes{f.weights}
}
// Clone the layer without any nodes. (nodes cannot be shared)
func (f *fc) Clone() Layer {
configCloned := f.FC.Clone().(FC)
return &fc{
FC: &configCloned,
dtype: f.dtype,
isBatched: f.isBatched,
shared: f.shared,
}
}
// Graph returns the graph this layer was compiled with.
func (f *fc) Graph() *g.ExprGraph {
if f.weights == nil {
return nil
}
return f.weights.Graph()
} | vendor/github.com/aunum/goro/pkg/v1/layer/fc.go | 0.680135 | 0.483405 | fc.go | starcoder |
package core
import "fmt"
type Rect struct {
X int
Y int
Width int
Height int
}
func NewRect(x int, y int, width int, height int) (rcvr *Rect) {
rcvr = &Rect{}
rcvr.X = x
rcvr.Y = y
rcvr.Width = width
rcvr.Height = height
return
}
func NewRect2() (rcvr *Rect) {
rcvr = NewRect(0, 0, 0, 0)
return
}
func NewRect3(p1 *Point, p2 *Point) (rcvr *Rect) {
rcvr = &Rect{}
rcvr.X = func() int {
if p1.X < p2.X {
return int(p1.X)
} else {
return int(p2.X)
}
}()
rcvr.Y = func() int {
if p1.Y < p2.Y {
return int(p1.Y)
} else {
return int(p2.Y)
}
}()
rcvr.Width = func() int {
if p1.X > p2.X {
return int(p1.X)
} else {
return int(p2.X)
}
}() - rcvr.X
rcvr.Height = func() int {
if p1.Y > p2.Y {
return int(p1.Y)
} else {
return int(p2.Y)
}
}() - rcvr.Y
return
}
func NewRect4(p *Point, s *Size) (rcvr *Rect) {
rcvr = NewRect(int(p.X), int(p.Y), int(s.Width), int(s.Height))
return
}
func NewRect5(vals []float64) (rcvr *Rect) {
rcvr = &Rect{}
rcvr.Set(vals)
return
}
func (rcvr *Rect) Area() float64 {
return float64(rcvr.Width * rcvr.Height)
}
func (rcvr *Rect) Br() *Point {
return NewPoint(float64(rcvr.X+rcvr.Width), float64(rcvr.Y+rcvr.Height))
}
func (rcvr *Rect) Clone() *Rect {
return NewRect(rcvr.X, rcvr.Y, rcvr.Width, rcvr.Height)
}
func (rcvr *Rect) Contains(p *Point) bool {
return float64(rcvr.X) <= p.X && p.X < float64(rcvr.X+rcvr.Width) && float64(rcvr.Y) <= p.Y && p.Y < float64(rcvr.Y+rcvr.Height)
}
func (rcvr *Rect) Empty() bool {
return rcvr.Width <= 0 || rcvr.Height <= 0
}
func (rcvr *Rect) Equals(obj interface{}) bool {
if rcvr == obj {
return true
}
it, ok := obj.(*Rect)
if !ok {
return false
}
return rcvr.X == it.X && rcvr.Y == it.Y && rcvr.Width == it.Width && rcvr.Height == it.Height
}
func (rcvr *Rect) Set(vals []float64) {
if vals != nil {
rcvr.X = func() int {
if len(vals) > 0 {
return int(vals[0])
} else {
return 0
}
}()
rcvr.Y = func() int {
if len(vals) > 1 {
return int(vals[1])
} else {
return 0
}
}()
rcvr.Width = func() int {
if len(vals) > 2 {
return int(vals[2])
} else {
return 0
}
}()
rcvr.Height = func() int {
if len(vals) > 3 {
return int(vals[3])
} else {
return 0
}
}()
} else {
rcvr.X = 0
rcvr.Y = 0
rcvr.Width = 0
rcvr.Height = 0
}
}
func (rcvr *Rect) Size() *Size {
return NewSize(float64(rcvr.Width), float64(rcvr.Height))
}
func (rcvr *Rect) Tl() *Point {
return NewPoint(float64(rcvr.X), float64(rcvr.Y))
}
func (rcvr *Rect) String() string {
return fmt.Sprintf("%v%v%v%v%v%v%v%v%v", "{", rcvr.X, ", ", rcvr.Y, ", ", rcvr.Width, "x", rcvr.Height, "}")
} | opencv3/core/Rect.java.go | 0.582491 | 0.561335 | Rect.java.go | starcoder |
package shim
//we provide a unify interface for stubs interface provided from different fabric implement
//(ya-fabric, 0.6, 1.x, etc) which is mainly a partial stack from shim interface of 0.6
import (
"time"
)
// Chaincode interface purposed to be implemented by all chaincodes. The fabric runs
// the transactions by calling these functions as specified. We use the
// designation mixed with fabric 0.6 and 1.x
// (this interface is not so important like it was in the real fabric implement, just
// to provide a suitable interface in some tools)
type Chaincode interface {
// Invoke is called for every transactions.
Invoke(stub ChaincodeStubInterface, function string, args [][]byte, readonly bool) ([]byte, error)
}
type ChaincodeStubInterface interface {
// Get the arguments to the stub call as a 2D byte array
GetArgs() [][]byte
// Get the arguments to the stub call as a string array
GetStringArgs() []string
// Get the transaction ID
GetTxID() string
// returns transaction created timestamp, which is currently
// taken from the peer receiving the transaction. Note that this timestamp
// may not be the same with the other peers' time.
GetTxTime() (time.Time, error)
// GetState returns the byte array value specified by the `key`.
GetState(key string) ([]byte, error)
// PutState writes the specified `value` and `key` into the ledger.
PutState(key string, value []byte) error
// DelState removes the specified `key` and its value from the ledger.
DelState(key string) error
// RangeQueryState function can be invoked by a chaincode to query of a range
// of keys in the state. Assuming the startKey and endKey are in lexical
// an iterator will be returned that can be used to iterate over all keys
// between the startKey and endKey, inclusive. The order in which keys are
// returned by the iterator is random.
RangeQueryState(startKey, endKey string) (StateRangeQueryIteratorInterface, error)
// GetBinding returns the transaction binding
GetBinding() ([]byte, error)
// SetEvent saves the event to be sent when a transaction is made part of a block
SetEvent(name string, payload []byte) error
// obtain the original chaincodestub interface for more implement-spec code
GetRawStub() interface{}
}
// StateRangeQueryIteratorInterface allows a chaincode to iterate over a range of
// key/value pairs in the state.
type StateRangeQueryIteratorInterface interface {
// HasNext returns true if the range query iterator contains additional keys
// and values.
HasNext() bool
// Next returns the next key and value in the range query iterator.
Next() (string, []byte, error)
// Close closes the range query iterator. This should be called when done
// reading from the iterator to free up resources.
Close() error
} | chaincode/shim/interface.go | 0.664649 | 0.453322 | interface.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.