code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package zipcodes
import (
"bufio"
"errors"
"fmt"
"io"
"log"
"math"
"os"
"strconv"
"strings"
)
const (
earthRadiusKm = 6371
earthRadiusMi = 3958
)
// ZipCodeLocation struct represents each line of the dataset
type ZipCodeLocation struct {
ZipCode string
PlaceName string
AdminName string
AbbreviatedAdminName string
Lat float64
Lon float64
}
// Zipcodes contains the whole list of structs representing
// the zipcode dataset
type Zipcodes struct {
DatasetList map[string]ZipCodeLocation
CityStateToZip map[string][]ZipCodeLocation
stateFullToAbbreviated map[string]string
stateAbbreviatedToFull map[string]string
}
// New loads the dataset that this packages uses and
// returns a struct that contains the dataset as a map interface
func New(datasetPath string) (*Zipcodes, error) {
zipcodes, err := LoadDataset(datasetPath)
if err != nil {
return nil, err
}
return &zipcodes, nil
}
// Lookup looks for a zipcode inside the map interface
func (zc *Zipcodes) Lookup(zipCode string) (*ZipCodeLocation, error) {
foundedZipcode := zc.DatasetList[zipCode]
if (foundedZipcode == ZipCodeLocation{}) {
return &ZipCodeLocation{}, fmt.Errorf("zipcodes: zipcode %s not found !", zipCode)
}
return &foundedZipcode, nil
}
// DistanceInKm returns the line of sight distance between two zipcodes in Kilometers
func (zc *Zipcodes) DistanceInKm(zipCodeA string, zipCodeB string) (float64, error) {
return zc.CalculateDistance(zipCodeA, zipCodeB, earthRadiusKm)
}
// DistanceInMiles returns the line of sight distance between two zipcodes in Miles
func (zc *Zipcodes) DistanceInMiles(zipCodeA string, zipCodeB string) (float64, error) {
return zc.CalculateDistance(zipCodeA, zipCodeB, earthRadiusMi)
}
// CalculateDistance returns the line of sight distance between two zipcodes in Kilometers
func (zc *Zipcodes) CalculateDistance(zipCodeA string, zipCodeB string, radius float64) (float64, error) {
locationA, errLocA := zc.Lookup(zipCodeA)
if errLocA != nil {
return 0, errLocA
}
locationB, errLocB := zc.Lookup(zipCodeB)
if errLocB != nil {
return 0, errLocB
}
return DistanceBetweenPoints(locationA.Lat, locationA.Lon, locationB.Lat, locationB.Lon, radius), nil
}
// DistanceInKmToZipcode calculates the distance between a zipcode and a give lat/lon in Kilometers
func (zc *Zipcodes) DistanceInKmToZipCode(zipCode string, latitude, longitude float64) (float64, error) {
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return 0, errLoc
}
return DistanceBetweenPoints(location.Lat, location.Lon, latitude, longitude, earthRadiusKm), nil
}
// DistanceInMilToZipcode calculates the distance between a zipcode and a give lat/lon in Miles
func (zc *Zipcodes) DistanceInMilToZipCode(zipCode string, latitude, longitude float64) (float64, error) {
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return 0, errLoc
}
return DistanceBetweenPoints(location.Lat, location.Lon, latitude, longitude, earthRadiusMi), nil
}
// GetZipcodesWithinKmRadius get all zipcodes within the radius of this zipcode
func (zc *Zipcodes) GetZipcodesWithinKmRadius(zipCode string, radius float64) ([]string, error) {
zipcodeList := []string{}
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return zipcodeList, errLoc
}
return zc.FindZipcodesWithinRadius(location, radius, earthRadiusKm), nil
}
// GetZipcodesWithinMlRadius get all zipcodes within the radius of this zipcode
func (zc *Zipcodes) GetZipcodesWithinMlRadius(zipCode string, radius float64) ([]string, error) {
zipcodeList := []string{}
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return zipcodeList, errLoc
}
return zc.FindZipcodesWithinRadius(location, radius, earthRadiusMi), nil
}
// FindZipcodesWithinRadius finds zipcodes within a given radius
func (zc *Zipcodes) FindZipcodesWithinRadius(location *ZipCodeLocation, maxRadius float64, earthRadius float64) []string {
zipcodeList := []string{}
for _, elm := range zc.DatasetList {
if elm.ZipCode != location.ZipCode {
distance := DistanceBetweenPoints(location.Lat, location.Lon, elm.Lat, elm.Lon, earthRadius)
if distance < maxRadius {
zipcodeList = append(zipcodeList, elm.ZipCode)
}
}
}
return zipcodeList
}
// LookupByCityState list zipcodes in a city
func (zc *Zipcodes) LookupByCityState(city, state string) []ZipCodeLocation {
lstate := strings.ToLower(state)
list, ok := zc.CityStateToZip[lstate+strings.ToLower(city)]
if !ok {
lstate = zc.stateAbbreviatedToFull[lstate]
list, ok = zc.CityStateToZip[lstate+strings.ToLower(city)]
if !ok {
return nil
}
}
return list
}
func hsin(t float64) float64 {
return math.Pow(math.Sin(t/2), 2)
}
// degreesToRadians converts degrees to radians
func degreesToRadians(d float64) float64 {
return d * math.Pi / 180
}
// DistanceBetweenPoints returns the distance between two lat/lon
// points using the Haversin distance formula.
func DistanceBetweenPoints(latitude1, longitude1, latitude2, longitude2 float64, radius float64) float64 {
lat1 := degreesToRadians(latitude1)
lon1 := degreesToRadians(longitude1)
lat2 := degreesToRadians(latitude2)
lon2 := degreesToRadians(longitude2)
diffLat := lat2 - lat1
diffLon := lon2 - lon1
a := hsin(diffLat) + math.Cos(lat1)*math.Cos(lat2)*hsin(diffLon)
c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
distance := c * radius
return math.Round(distance*100) / 100
}
// LoadDataset reads and loads the dataset into a map interface
func LoadDataset(datasetPath string) (Zipcodes, error) {
file, err := os.Open(datasetPath)
if err != nil {
log.Fatal(err)
return Zipcodes{}, fmt.Errorf("zipcodes: error while opening file %v", err)
}
defer file.Close()
return LoadDatasetReader(file)
}
// LoadDatasetReader reads and loads the dataset into a map interface
func LoadDatasetReader(r io.Reader) (Zipcodes, error) {
if r == nil {
return Zipcodes{}, errors.New("zipcodes: unexpected nil reader")
}
scanner := bufio.NewScanner(r)
zipcodeMap := Zipcodes{
DatasetList: make(map[string]ZipCodeLocation),
CityStateToZip: make(map[string][]ZipCodeLocation),
stateFullToAbbreviated: map[string]string{},
stateAbbreviatedToFull: map[string]string{},
}
for scanner.Scan() {
splittedLine := strings.Split(scanner.Text(), "\t")
if len(splittedLine) != 12 {
return Zipcodes{}, fmt.Errorf("zipcodes: file line does not have 12 fields")
}
lat, errLat := strconv.ParseFloat(splittedLine[9], 64)
if errLat != nil {
return Zipcodes{}, fmt.Errorf("zipcodes: error while converting %s to Latitude", splittedLine[9])
}
lon, errLon := strconv.ParseFloat(splittedLine[10], 64)
if errLon != nil {
return Zipcodes{}, fmt.Errorf("zipcodes: error while converting %s to Longitude", splittedLine[10])
}
zipcodeMap.DatasetList[splittedLine[1]] = ZipCodeLocation{
ZipCode: splittedLine[1],
PlaceName: splittedLine[2],
AdminName: splittedLine[3],
AbbreviatedAdminName: splittedLine[4],
Lat: lat,
Lon: lon,
}
}
for _, v := range zipcodeMap.DatasetList {
ladmin := strings.ToLower(v.AdminName)
labadmin := strings.ToLower(v.AbbreviatedAdminName)
key := strings.ToLower(ladmin + v.PlaceName)
if zipcodeMap.CityStateToZip[key] == nil {
zipcodeMap.CityStateToZip[key] = make([]ZipCodeLocation, 0)
}
zipcodeMap.CityStateToZip[key] = append(zipcodeMap.CityStateToZip[key], v)
zipcodeMap.stateFullToAbbreviated[ladmin] = labadmin
zipcodeMap.stateAbbreviatedToFull[labadmin] = ladmin
}
if err := scanner.Err(); err != nil {
return Zipcodes{}, fmt.Errorf("zipcodes: error while opening file %v", err)
}
return zipcodeMap, nil
} | zipcodes.go | 0.78785 | 0.500549 | zipcodes.go | starcoder |
package rotate
import (
"fluorescence/geometry"
"fluorescence/geometry/primitive"
"fluorescence/geometry/primitive/aabb"
"fluorescence/shading/material"
"fmt"
"strings"
"github.com/go-gl/mathgl/mgl64"
)
// Quaternion is a quaternion rotation
type Quaternion struct {
AxisAngles [3]float64 `json:"axis_angles"`
Order string `json:"order"`
TypeName string `json:"type"`
Data interface{} `json:"data"`
Primitive primitive.Primitive
quaternion mgl64.Quat
inverse mgl64.Quat
}
// Setup sets up some internal fields of a rotation
func (q *Quaternion) Setup() (*Quaternion, error) {
q.Order = strings.ToUpper(q.Order)
var rotationOrder mgl64.RotationOrder
switch q.Order {
case "XYX":
rotationOrder = mgl64.XYX
case "XYZ":
rotationOrder = mgl64.XYZ
case "XZX":
rotationOrder = mgl64.XZX
case "XZY":
rotationOrder = mgl64.XZY
case "YXY":
rotationOrder = mgl64.YXY
case "YXZ":
rotationOrder = mgl64.YXZ
case "YZX":
rotationOrder = mgl64.YZX
case "YZY":
rotationOrder = mgl64.YZY
case "ZXY":
rotationOrder = mgl64.ZXY
case "ZXZ":
rotationOrder = mgl64.ZXZ
case "ZYX":
rotationOrder = mgl64.ZYX
case "ZYZ":
rotationOrder = mgl64.ZYZ
default:
return nil, fmt.Errorf("invalid order (%s) for quaternion", q.Order)
}
q.quaternion = mgl64.AnglesToQuat(
mgl64.DegToRad(q.AxisAngles[0]),
mgl64.DegToRad(q.AxisAngles[1]),
mgl64.DegToRad(q.AxisAngles[2]),
rotationOrder,
)
q.inverse = q.quaternion.Inverse()
return q, nil
}
// Intersection computer the intersection of this object and a given ray if it exists
func (q *Quaternion) Intersection(ray geometry.Ray, tMin, tMax float64) (*material.RayHit, bool) {
rotatedRay := ray
originMGL := mgl64.Vec3{rotatedRay.Origin.X, rotatedRay.Origin.Y, rotatedRay.Origin.Z}
directionMGL := mgl64.Vec3{rotatedRay.Direction.X, rotatedRay.Direction.Y, rotatedRay.Direction.Z}
rotatedOriginMGL := q.inverse.Rotate(originMGL)
rotatedDirectionMGL := q.inverse.Rotate(directionMGL)
rotatedRay.Origin = geometry.Point{
X: rotatedOriginMGL.X(),
Y: rotatedOriginMGL.Y(),
Z: rotatedOriginMGL.Z(),
}
rotatedRay.Direction = geometry.Vector{
X: rotatedDirectionMGL.X(),
Y: rotatedDirectionMGL.Y(),
Z: rotatedDirectionMGL.Z(),
}
rayHit, wasHit := q.Primitive.Intersection(rotatedRay, tMin, tMax)
if wasHit {
rotatedNormalMGL := mgl64.Vec3{rayHit.NormalAtHit.X, rayHit.NormalAtHit.Y, rayHit.NormalAtHit.Z}
unrotatedNormalMGL := q.quaternion.Rotate(rotatedNormalMGL)
unrotatedNormal := geometry.Vector{
X: unrotatedNormalMGL.X(),
Y: unrotatedNormalMGL.Y(),
Z: unrotatedNormalMGL.Z(),
}
return &material.RayHit{
Ray: ray,
NormalAtHit: unrotatedNormal,
Time: rayHit.Time,
U: rayHit.U,
V: rayHit.V,
Material: rayHit.Material,
}, true
}
return nil, false
}
// BoundingBox returns an AABB for this object
func (q *Quaternion) BoundingBox(t0, t1 float64) (*aabb.AABB, bool) {
box, ok := q.Primitive.BoundingBox(t0, t1)
if !ok {
return nil, false
}
minPoint := geometry.PointMax
maxPoint := geometry.PointMax.Negate()
for i := 0.0; i < 2; i++ {
for j := 0.0; j < 2; j++ {
for k := 0.0; k < 2; k++ {
x := i*box.B.X + (1-i)*box.A.X
y := j*box.B.Y + (1-j)*box.A.Y
z := k*box.B.Z + (1-k)*box.A.Z
unrotatedCornerMGL := mgl64.Vec3{x, y, z}
rotatedCornerMGL := q.quaternion.Rotate(unrotatedCornerMGL)
rotatedCorner := geometry.Point{
X: rotatedCornerMGL.X(),
Y: rotatedCornerMGL.Y(),
Z: rotatedCornerMGL.Z(),
}
maxPoint = geometry.MaxComponents(maxPoint, rotatedCorner)
minPoint = geometry.MinComponents(minPoint, rotatedCorner)
}
}
}
return &aabb.AABB{
A: minPoint,
B: maxPoint,
}, true
}
// SetMaterial sets the material of this object
func (q *Quaternion) SetMaterial(m material.Material) {
q.Primitive.SetMaterial(m)
}
// IsInfinite returns whether this object is infinite
func (q *Quaternion) IsInfinite() bool {
return q.Primitive.IsInfinite()
}
// IsClosed returns whether this object is closed
func (q *Quaternion) IsClosed() bool {
return q.Primitive.IsClosed()
}
// Copy returns a shallow copy of this object
func (q *Quaternion) Copy() primitive.Primitive {
newRX := *q
return &newRX
} | geometry/primitive/transform/rotate/quaternion.go | 0.865452 | 0.562777 | quaternion.go | starcoder |
package dna
import (
"log"
)
// ToUpper changes the input base to uppercase.
func ToUpper(b Base) Base {
switch b {
case LowerA:
return A
case LowerC:
return C
case LowerG:
return G
case LowerT:
return T
case LowerN:
return N
default:
return b
}
}
// ToLower changes the input base to lowercase.
func ToLower(b Base) Base {
switch b {
case A:
return LowerA
case C:
return LowerC
case G:
return LowerG
case T:
return LowerT
case N:
return LowerN
default:
return b
}
}
// RangeToUpper changes the bases in a set range to uppercase.
// start is closed, end is open, both are zero-based.
func RangeToUpper(bases []Base, start int, end int) {
for i := start; i < end; i++ {
bases[i] = ToUpper(bases[i])
}
}
// RangeToLower changes the bases in a set range to lowercase.
// start is closed, end is open, both are zero-based.
func RangeToLower(bases []Base, start int, end int) {
for i := start; i < end; i++ {
bases[i] = ToLower(bases[i])
}
}
// AllToUpper changes all bases in a sequence to uppercase.
func AllToUpper(bases []Base) {
RangeToUpper(bases, 0, len(bases))
}
// AllToLower changes all bases in a sequence to lowercase.
func AllToLower(bases []Base) {
RangeToLower(bases, 0, len(bases))
}
// complementArray is an efficient lookup for the complement of the input base.
// intended to remain as a private array to help the Complement functions.
// panics if value input is not a valid Base.
var complementArray = []Base{T, G, C, A, N, LowerT, LowerG, LowerC, LowerA, LowerN, Gap, Dot, Nil}
// ComplementSingleBase returns the nucleotide complementary to the input base.
func ComplementSingleBase(b Base) Base {
switch b {
case A:
return T
case C:
return G
case G:
return C
case T:
return A
case N:
return N
case LowerA:
return LowerT
case LowerC:
return LowerG
case LowerG:
return LowerC
case LowerT:
return LowerA
case LowerN:
return LowerN
case Gap:
return Gap
case Dot:
return Dot
case Nil:
return Nil
default:
log.Panicf("unrecognized base %v", b)
return Nil
}
}
// ReverseComplement reverses a sequence of bases and complements each base.
// Used to switch strands and maintain 5' -> 3' orientation.
func ReverseComplement(bases []Base) {
for i, j := 0, len(bases)-1; i <= j; i, j = i+1, j-1 {
bases[i], bases[j] = complementArray[bases[j]], complementArray[bases[i]]
}
}
// Complement all bases in a sequence of bases.
func Complement(bases []Base) {
for i := range bases {
bases[i] = complementArray[bases[i]]
}
}
// RemoveGaps returns a sequence of bases with no gaps.
func RemoveGaps(bases []Base) []Base {
return RemoveBase(bases, Gap)
}
// RemoveBase returns a sequence of bases without any of the designated base.
func RemoveBase(bases []Base, baseToRemove Base) []Base {
ans := make([]Base, 0, len(bases))
for i := range bases {
if bases[i] != baseToRemove {
ans = append(ans, bases[i])
}
}
return ans
}
// Delete removes bases from a sequence of bases.
// all base positions are zero based and left closed, right open.
func Delete(seq []Base, delStart int, delEnd int) []Base {
if delStart >= delEnd || delStart < 0 || delEnd > len(seq) {
log.Panicf("a deletion on sequence of length %d with start of %d and length of %d is invalid", len(seq), delStart, delEnd)
}
return append(seq[:delStart], seq[delEnd:]...)
}
// Insert adds bases to a sequence of bases.
// base position is zero-based, insertion happens before specified base
// giving the length of the sequence puts the insertion at the end.
func Insert(seq []Base, insPos int, insSeq []Base) []Base {
if insPos < 0 || insPos > len(seq) {
log.Panicf("an insertion on sequence of length %d with start of %d is invalid", len(seq), insPos)
}
return append(seq[:insPos], append(insSeq, seq[insPos:]...)...)
}
// Replace performs both a deletion and an insertion,
// replacing the input interval with the input insSeq.
// all base positions are zero based and left closed, right open.
func Replace(seq []Base, start int, end int, insSeq []Base) []Base {
return append(seq[:start], append(insSeq, seq[end:]...)...)
} | dna/modify.go | 0.696062 | 0.504089 | modify.go | starcoder |
package ratelimiter
import (
"sync"
"time"
)
type MultiLimiter struct {
mu *sync.RWMutex
limitChanMap map[string]*singleLimiter
ticker *time.Ticker
cancelChan chan struct{}
}
type singleLimiter struct {
chunk int // size of increase
limitChan chan struct{}
}
// NewMultiLimiter returns the MultiLimiter object similar to RateLimiter object. Calls and Burst control the limiter: calls is
// the number of calls to limit, and burst is the allowed burst (it fills up over the unused calls).
// The timeFrame allows to specify over what period the calls are spread and tolerance is a %
// of extra calls to be tolerated. Eg. tolerance of 5% would allow 105% calls per timeFrame.
// To use the MultiLimiter, you need to initialize Tenant. The allowed calls are filled up for all tenants peridically,
// each tenant can have a integer multiple of basic rate. Eg. if you have the basic rate of 100 calls per minute, then
// you can have tenant with 100, 200, or 300 etc. calls per minute allowed. Each can have a different buffer.
func NewMultiLimiter(calls, burst int, timeFrame time.Duration, tolerance float64) *MultiLimiter {
if calls < 1 {
panic("RateLimiter need positive number of calls")
}
if burst < 0 {
burst = 0
}
// setup the configuration
rate := time.Duration(float64(timeFrame) / (float64(calls) * (1 + tolerance)))
tick := time.NewTicker(rate)
cChan := make(chan struct{})
limitChanMap := make(map[string]*singleLimiter)
// prepare the object to be returned
ml := &MultiLimiter{
mu: &sync.RWMutex{},
limitChanMap: limitChanMap,
ticker: tick,
cancelChan: cChan,
}
// run the background fillup
go func(mLimiter *MultiLimiter) {
for {
select {
case <-tick.C:
mLimiter.mu.RLock()
for _, v := range mLimiter.limitChanMap {
v.incTenant()
}
mLimiter.mu.RUnlock()
case <-cChan:
return
default:
}
}
}(ml)
return ml
}
// Wait is the rate limiting call, use it to 'consume' limit
func (mrl *MultiLimiter) Wait(tenantID string) bool {
mrl.mu.RLock()
defer mrl.mu.RUnlock()
singleLimiter, ok := mrl.limitChanMap[tenantID]
if !ok {
return false
}
<-singleLimiter.limitChan
return true
}
// HasTenant checks if such tenant is set up
func (mrl *MultiLimiter) HasTenant(tenantID string) bool {
_, ok := mrl.limitChanMap[tenantID]
if !ok {
return false
}
return true
}
// AddTenant adds a tenant with a config.
// It also updates a tenant limits with a new config if it exist.
func (mrl *MultiLimiter) AddTenant(tenantID string, burst, chunk int) {
mrl.mu.Lock()
defer mrl.mu.Unlock()
slOld, ok := mrl.limitChanMap[tenantID]
if ok {
slOld.chunk = chunk
newChan := make(chan struct{}, burst)
close(slOld.limitChan)
c := 0
for v := range slOld.limitChan {
c++
if c > burst {
break
}
newChan <- v
}
slOld.limitChan = newChan
return
}
sl := singleLimiter{
limitChan: make(chan struct{}, burst),
chunk: chunk,
}
mrl.limitChanMap[tenantID] = &sl
}
func (mrl *MultiLimiter) Stop() {
close(mrl.cancelChan)
mrl.ticker.Stop()
}
func (sl *singleLimiter) incTenant() {
for i := 0; i < sl.chunk; i++ {
select {
case sl.limitChan <- struct{}{}:
default:
}
}
} | multi-tenant-limiter.go | 0.578329 | 0.400251 | multi-tenant-limiter.go | starcoder |
package pgo
type PairFlags uint32
const (
/**
\brief Process the contacts of this collision pair in the dynamics solver.
\note Only takes effect if the colliding actors are rigid bodies.
*/
PairFlags_eSOLVE_CONTACT PairFlags = (1 << 0)
/**
\brief Call contact modification callback for this collision pair
\note Only takes effect if the colliding actors are rigid bodies.
@see PxContactModifyCallback
*/
PairFlags_eMODIFY_CONTACTS PairFlags = (1 << 1)
/**
\brief Call contact report callback or trigger callback when this collision pair starts to be in contact.
If one of the two collision objects is a trigger shape (see #PxShapeFlag::eTRIGGER_SHAPE)
then the trigger callback will get called as soon as the other object enters the trigger volume.
If none of the two collision objects is a trigger shape then the contact report callback will get
called when the actors of this collision pair start to be in contact.
\note Only takes effect if the colliding actors are rigid bodies.
\note Only takes effect if eDETECT_DISCRETE_CONTACT or eDETECT_CCD_CONTACT is raised
@see PxSimulationEventCallback.onContact() PxSimulationEventCallback.onTrigger()
*/
PairFlags_eNOTIFY_TOUCH_FOUND PairFlags = (1 << 2)
/**
\brief Call contact report callback while this collision pair is in contact
If none of the two collision objects is a trigger shape then the contact report callback will get
called while the actors of this collision pair are in contact.
\note Triggers do not support this event. Persistent trigger contacts need to be tracked separately by observing eNOTIFY_TOUCH_FOUND/eNOTIFY_TOUCH_LOST events.
\note Only takes effect if the colliding actors are rigid bodies.
\note No report will get sent if the objects in contact are sleeping.
\note Only takes effect if eDETECT_DISCRETE_CONTACT or eDETECT_CCD_CONTACT is raised
\note If this flag gets enabled while a pair is in touch already, there will be no eNOTIFY_TOUCH_PERSISTS events until the pair loses and regains touch.
@see PxSimulationEventCallback.onContact() PxSimulationEventCallback.onTrigger()
*/
PairFlags_eNOTIFY_TOUCH_PERSISTS PairFlags = (1 << 3)
/**
\brief Call contact report callback or trigger callback when this collision pair stops to be in contact
If one of the two collision objects is a trigger shape (see #PxShapeFlag::eTRIGGER_SHAPE)
then the trigger callback will get called as soon as the other object leaves the trigger volume.
If none of the two collision objects is a trigger shape then the contact report callback will get
called when the actors of this collision pair stop to be in contact.
\note Only takes effect if the colliding actors are rigid bodies.
\note This event will also get triggered if one of the colliding objects gets deleted.
\note Only takes effect if eDETECT_DISCRETE_CONTACT or eDETECT_CCD_CONTACT is raised
@see PxSimulationEventCallback.onContact() PxSimulationEventCallback.onTrigger()
*/
PairFlags_eNOTIFY_TOUCH_LOST PairFlags = (1 << 4)
/**
\brief Call contact report callback when this collision pair is in contact during CCD passes.
If CCD with multiple passes is enabled, then a fast moving object might bounce on and off the same
object multiple times. Hence, the same pair might be in contact multiple times during a simulation step.
This flag will make sure that all the detected collision during CCD will get reported. For performance
reasons, the system can not always tell whether the contact pair lost touch in one of the previous CCD
passes and thus can also not always tell whether the contact is new or has persisted. eNOTIFY_TOUCH_CCD
just reports when the two collision objects were detected as being in contact during a CCD pass.
\note Only takes effect if the colliding actors are rigid bodies.
\note Trigger shapes are not supported.
\note Only takes effect if eDETECT_CCD_CONTACT is raised
@see PxSimulationEventCallback.onContact() PxSimulationEventCallback.onTrigger()
*/
PairFlags_eNOTIFY_TOUCH_CCD PairFlags = (1 << 5)
/**
\brief Call contact report callback when the contact force between the actors of this collision pair exceeds one of the actor-defined force thresholds.
\note Only takes effect if the colliding actors are rigid bodies.
\note Only takes effect if eDETECT_DISCRETE_CONTACT or eDETECT_CCD_CONTACT is raised
@see PxSimulationEventCallback.onContact()
*/
PairFlags_eNOTIFY_THRESHOLD_FORCE_FOUND PairFlags = (1 << 6)
/**
\brief Call contact report callback when the contact force between the actors of this collision pair continues to exceed one of the actor-defined force thresholds.
\note Only takes effect if the colliding actors are rigid bodies.
\note If a pair gets re-filtered and this flag has previously been disabled, then the report will not get fired in the same frame even if the force threshold has been reached in the
previous one (unless #eNOTIFY_THRESHOLD_FORCE_FOUND has been set in the previous frame).
\note Only takes effect if eDETECT_DISCRETE_CONTACT or eDETECT_CCD_CONTACT is raised
@see PxSimulationEventCallback.onContact()
*/
PairFlags_eNOTIFY_THRESHOLD_FORCE_PERSISTS PairFlags = (1 << 7)
/**
\brief Call contact report callback when the contact force between the actors of this collision pair falls below one of the actor-defined force thresholds (includes the case where this collision pair stops being in contact).
\note Only takes effect if the colliding actors are rigid bodies.
\note If a pair gets re-filtered and this flag has previously been disabled, then the report will not get fired in the same frame even if the force threshold has been reached in the
previous one (unless #eNOTIFY_THRESHOLD_FORCE_FOUND or #eNOTIFY_THRESHOLD_FORCE_PERSISTS has been set in the previous frame).
\note Only takes effect if eDETECT_DISCRETE_CONTACT or eDETECT_CCD_CONTACT is raised
@see PxSimulationEventCallback.onContact()
*/
PairFlags_eNOTIFY_THRESHOLD_FORCE_LOST PairFlags = (1 << 8)
/**
\brief Provide contact points in contact reports for this collision pair.
\note Only takes effect if the colliding actors are rigid bodies and if used in combination with the flags eNOTIFY_TOUCH_... or eNOTIFY_THRESHOLD_FORCE_...
\note Only takes effect if eDETECT_DISCRETE_CONTACT or eDETECT_CCD_CONTACT is raised
@see PxSimulationEventCallback.onContact() PxContactPair PxContactPair.extractContacts()
*/
PairFlags_eNOTIFY_CONTACT_POINTS PairFlags = (1 << 9)
/**
\brief This flag is used to indicate whether this pair generates discrete collision detection contacts.
\note Contacts are only responded to if eSOLVE_CONTACT is enabled.
*/
PairFlags_eDETECT_DISCRETE_CONTACT PairFlags = (1 << 10)
/**
\brief This flag is used to indicate whether this pair generates CCD contacts.
\note The contacts will only be responded to if eSOLVE_CONTACT is enabled on this pair.
\note The scene must have PxSceneFlag::eENABLE_CCD enabled to use this feature.
\note Non-static bodies of the pair should have PxRigidBodyFlag::eENABLE_CCD specified for this feature to work correctly.
\note This flag is not supported with trigger shapes. However, CCD trigger events can be emulated using non-trigger shapes
and requesting eNOTIFY_TOUCH_FOUND and eNOTIFY_TOUCH_LOST and not raising eSOLVE_CONTACT on the pair.
@see PxRigidBodyFlag::eENABLE_CCD
@see PxSceneFlag::eENABLE_CCD
*/
PairFlags_eDETECT_CCD_CONTACT PairFlags = (1 << 11)
/**
\brief Provide pre solver velocities in contact reports for this collision pair.
If the collision pair has contact reports enabled, the velocities of the rigid bodies before contacts have been solved
will be provided in the contact report callback unless the pair lost touch in which case no data will be provided.
\note Usually it is not necessary to request these velocities as they will be available by querying the velocity from the provided
PxRigidActor object directly. However, it might be the case that the velocity of a rigid body gets set while the simulation is running
in which case the PxRigidActor would return this new velocity in the contact report callback and not the velocity the simulation used.
@see PxSimulationEventCallback.onContact() PxContactPairVelocity, PxContactPairHeader.extraDataStream
*/
PairFlags_ePRE_SOLVER_VELOCITY PairFlags = (1 << 12)
/**
\brief Provide post solver velocities in contact reports for this collision pair.
If the collision pair has contact reports enabled, the velocities of the rigid bodies after contacts have been solved
will be provided in the contact report callback unless the pair lost touch in which case no data will be provided.
@see PxSimulationEventCallback.onContact(), PxContactPairVelocity, PxContactPairHeader.extraDataStream
*/
PairFlags_ePOST_SOLVER_VELOCITY PairFlags = (1 << 13)
/**
\brief Provide rigid body poses in contact reports for this collision pair.
If the collision pair has contact reports enabled, the rigid body poses at the contact event will be provided
in the contact report callback unless the pair lost touch in which case no data will be provided.
\note Usually it is not necessary to request these poses as they will be available by querying the pose from the provided
PxRigidActor object directly. However, it might be the case that the pose of a rigid body gets set while the simulation is running
in which case the PxRigidActor would return this new pose in the contact report callback and not the pose the simulation used.
Another use case is related to CCD with multiple passes enabled, A fast moving object might bounce on and off the same
object multiple times. This flag can be used to request the rigid body poses at the time of impact for each such collision event.
@see PxSimulationEventCallback.onContact(), PxContactPairPose, PxContactPairHeader.extraDataStream
*/
PairFlags_eCONTACT_EVENT_POSE PairFlags = (1 << 14)
PairFlags_eNEXT_FREE PairFlags = (1 << 15) //!< For internal use only.
/**
\brief Provided default flag to do simple contact processing for this collision pair.
*/
PairFlags_eCONTACT_DEFAULT PairFlags = PairFlags_eSOLVE_CONTACT | PairFlags_eDETECT_DISCRETE_CONTACT
/**
\brief Provided default flag to get commonly used trigger behavior for this collision pair.
*/
PairFlags_eTRIGGER_DEFAULT PairFlags = PairFlags_eNOTIFY_TOUCH_FOUND | PairFlags_eNOTIFY_TOUCH_LOST | PairFlags_eDETECT_DISCRETE_CONTACT
) | pgo/pairFlags.go | 0.802362 | 0.603552 | pairFlags.go | starcoder |
package fastbytes
import (
"reflect"
"github.com/yehan2002/errors"
"github.com/yehan2002/fastbytes/v2/internal"
)
const (
// ErrUnsupported the given type is not supported.
// All signed and unsigned intergers except uint and int, and floats are supported
// uint and int are unsupported since their size is platform dependent.
ErrUnsupported = errors.Error("bytes: unsupported target/source type")
// ErrUnadressable the give reflect.Value cannot be addressed
ErrUnadressable = errors.Error("bytes: un-addressable value")
)
var (
// BigEndian copies bytes to and from big endian byte slices
BigEndian = bytes{rotate: rotateBigEndian}
// LittleEndian copies bytes to and from little endian byte slices
LittleEndian = bytes{rotate: !rotateBigEndian}
_ ByteOrder = &BigEndian
_ ByteOrder = &LittleEndian
)
// ByteOrder the byteorder
type ByteOrder interface {
// FromI8 converts and copies bytes from `src` into `dst`.
// The number of bytes copied is min(len(src), len(dst))
FromI8(src []int8, dst []byte) (n int)
// FromI16 converts and copies []int16 from `src` into `dst`.
// The number of bytes copied is min(len(src)*2, len(dst))
FromI16(src []int16, dst []byte) (n int)
// FromU16 converts and copies []uint16 from `src` into `dst`.
// The number of bytes copied is min(len(src)*2, len(dst))
FromU16(src []uint16, dst []byte) (n int)
// FromI32 converts and copies []int32 from `src` into `dst`.
// The number of bytes copied is min(len(src)*4, len(dst))
FromI32(src []int32, dst []byte) (n int)
// FromU32 converts and copies []uint32 from `src` into `dst`.
// The number of bytes copied is min(len(src)*4, len(dst))
FromU32(src []uint32, dst []byte) (n int)
// FromF32 converts and copies []float32 from `src` into `dst`.
// The number of bytes copied is min(len(src)*4, len(dst))
FromF32(src []float32, dst []byte) (n int)
// FromI64 converts and copies []int64 from `src` into `dst`.
// The number of bytes copied is min(len(src)*8, len(dst))
FromI64(src []int64, dst []byte) (n int)
// FromU64 converts and copies []int64 from `src` into `dst`.
// The number of bytes copied is min(len(src)*8, len(dst))
FromU64(src []uint64, dst []byte) (n int)
// FromF64 converts and copies []float64 from `src` into `dst`.
// The number of bytes copied is min(len(src)*8, len(dst))
FromF64(src []float64, dst []byte) (n int)
// ToI8 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst))
ToI8(src []byte, dst []int8) (n int)
// ToI16 converts and copies bytes form `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*2)
ToI16(src []byte, dst []int16) (n int)
// ToU16 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*2)
ToU16(src []byte, dst []uint16) (n int)
// ToI32 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*4)
ToI32(src []byte, dst []int32) (n int)
// ToU32 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*4)
ToU32(src []byte, dst []uint32) (n int)
// ToF32 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*4)
ToF32(src []byte, dst []float32) (n int)
// ToI64 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*8)
ToI64(src []byte, dst []int64) (n int)
// ToU64 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*8)
ToU64(src []byte, dst []uint64) (n int)
// ToU64 converts and copies bytes from `src` into `dst`
// The number of bytes copied is min(len(src), len(dst)*8)
ToF64(src []byte, dst []float64) (n int)
// To copies bytes from `s` into the given slice.
// The given interface must be a type that can be safely written to.
// The number of bytes copied is min(len(src), len(dst)* element size of dst)
To(src []byte, dst interface{}) (n int, err error)
// From copies bytes from the given interface.
// The provided interface must be a type that can be safely copied.
// The number of bytes copied is min(len(src)* element size of dst, len(dst))
From(src interface{}, dst []byte) (n int, err error)
// ToValue copies bytes from `src` into the given value
// The given interface must be a type that can be safely written to.
// The number of bytes copied is min(len(src), len(dst)* element size of dst)
ToValue(src []byte, dst reflect.Value) (n int, err error)
// FromValue copies bytes from the given value.
// The provided value must be a type that can be safely converted to bytes.
// The number of bytes copied is min(len(src)* element size of dst, len(dst))
FromValue(src reflect.Value, dst []byte) (n int, err error)
}
var _ internal.Provider = provider{}
// export errors to `internal`
func init() {
internal.ErrUnsupported, internal.ErrUnaddressable = ErrUnsupported, ErrUnadressable
} | bytes.go | 0.63023 | 0.423637 | bytes.go | starcoder |
package filebuf
/* A binary node that holds Data */
type node struct {
left, right, parent *node
data data
size int64 //left.size + data.size + right.size
}
func mkNode(d data) *node {
return &node{data: d, size: d.Size()}
}
//Copy this node
func (t *node) Copy() *node {
if t == nil {
return nil
}
n := *t
n.data = n.data.Copy()
n.setLeft(n.left.Copy())
n.setRight(n.right.Copy())
return &n
}
/* The set{Left, Right, Parent} functions should be used,
* because they take into account updating the size field */
func (t *node) setLeft(l *node) {
t.left = l
if t.left != nil {
t.left.parent = t
}
t.resetSize()
}
func (t *node) setRight(r *node) {
t.right = r
if t.right != nil {
t.right.parent = t
}
t.resetSize()
}
func (t *node) setParent(p *node) {
t.parent = p
if t.parent != nil {
t.parent.resetSize()
}
}
func (t *node) resetSize() {
t.size = nodesize(t.left) + t.data.Size() + nodesize(t.right)
}
//helper function to query t.size, return 0 on t == nil
func nodesize(t *node) int64 {
if t != nil {
return t.size
}
return 0
}
func (n *node) first() *node {
for n.left != nil {
n = n.left
}
return n
}
func (n *node) last() *node {
for n.right != nil {
n = n.right
}
return n
}
func (n *node) iter(cb func(*node) bool) bool {
if n == nil {
return false
}
if n.left.iter(cb) {
return true
}
if cb(n) {
return true
}
return n.right.iter(cb)
}
//helper functions for determining where to go in the tree based on offset
func goleft(offset int64, t *node) bool {
return offset < nodesize(t.left)
}
func goright(offset int64, t *node) bool {
nodeOff := offset - nodesize(t.left)
return nodeOff >= t.data.Size()
}
//get the node that contains the requested offset
func (node *node) get(offset int64) (*node, int64) {
if offset > node.size {
panic("node.get; offset > node.size")
}
offsetInNode := offset - nodesize(node.left)
nodeSize := node.data.Size()
switch {
case offsetInNode < 0:
return node.left.get(offset)
case offsetInNode < nodeSize:
return node, offsetInNode
default:
return node.right.get(offsetInNode - nodeSize)
}
}
type stats struct {
size int64
numnodes, filenodes, datanodes, fixeddata int64
maxdist int64 //max distance to root
avgdist float64 //avg distance to root
maxsz, minsz int64 //max/min nodesize
avgsz float64 //average nodesize
}
func updateAvg(avg float64, n_, val_ int64) float64 {
n := float64(n_)
val := float64(val_)
oldsum := avg * n
return (oldsum + val) / (n + 1)
}
func (t *node) stats(st *stats, depth int64) {
if t != nil {
t.left.stats(st, depth+1)
t.right.stats(st, depth+1)
switch t.data.(type) {
case *fileData:
st.filenodes++
case *bufData:
st.datanodes++
if t.data.(*bufData).frozen {
st.fixeddata++
}
}
if depth > st.maxdist {
st.maxdist = depth
}
st.avgdist = updateAvg(st.avgdist, st.numnodes, depth)
tsz := t.data.Size()
st.avgsz = updateAvg(st.avgsz, st.numnodes, tsz)
st.size += tsz
if tsz > st.maxsz {
st.maxsz = tsz
}
if tsz < st.minsz {
st.minsz = tsz
}
st.numnodes++
}
}
//splay functions from wikipedia
//take care to adjust the size fields
/* Cool ascii art illustration:
* y
* x / \
* / \ --> x c
* a y / \
* / \ a b
* b c
*/
func rotateLeft(x *node) {
y := x.right
if y != nil {
x.setRight(y.left)
y.setParent(x.parent)
}
if x.parent == nil {
} else if x == x.parent.left {
x.parent.setLeft(y)
} else {
x.parent.setRight(y)
}
if y != nil {
y.setLeft(x)
}
x.setParent(y)
}
/* Cool ascii art illustration:
* x
* y / \
* / \ <-- y c
* a x / \
* / \ a b
* b c
*/
func rotateRight(x *node) {
y := x.left
if y != nil {
x.setLeft(y.right)
y.setParent(x.parent)
}
if x.parent == nil {
} else if x == x.parent.right {
x.parent.setRight(y)
} else {
x.parent.setLeft(y)
}
if y != nil {
y.setRight(x)
}
x.setParent(y)
}
//see https://en.wikipedia.org/wiki/Splay_tree
func splay(x *node) *node {
for x.parent != nil {
if x.parent.parent == nil {
if x == x.parent.left {
rotateRight(x.parent)
} else {
rotateLeft(x.parent)
}
} else if x.parent.left == x && x.parent.parent.left == x.parent {
rotateRight(x.parent.parent)
rotateRight(x.parent)
} else if x.parent.right == x && x.parent.parent.right == x.parent {
rotateLeft(x.parent.parent)
rotateLeft(x.parent)
} else if x.parent.left == x && x.parent.parent.right == x.parent {
rotateRight(x.parent)
rotateLeft(x.parent)
} else {
rotateLeft(x.parent)
rotateRight(x.parent)
}
}
return x
} | node.go | 0.607547 | 0.438725 | node.go | starcoder |
package bsonkit
import (
"fmt"
"sort"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// MustConvert will call Convert and panic on errors.
func MustConvert(v interface{}) Doc {
doc, err := Convert(v)
if err != nil {
panic("bsonkit: " + err.Error())
}
return doc
}
// Convert will convert the provided value to a document. The value is expected
// to be a bson.M or bson.D composed of standard types.
func Convert(v interface{}) (Doc, error) {
// convert value
res, err := ConvertValue(v)
if err != nil {
return nil, err
}
// check value
doc, ok := res.(bson.D)
if !ok {
return nil, fmt.Errorf(`expected conversion to result in a "bson.D"`)
}
return &doc, nil
}
// MustConvertList will call ConvertList and panic on errors.
func MustConvertList(v interface{}) List {
list, err := ConvertList(v)
if err != nil {
panic("bsonkit: " + err.Error())
}
return list
}
// ConvertList will convert an array to a list. The value is expected to be an
// array of bson.M or bson.D elements composed of standard types.
func ConvertList(v interface{}) (List, error) {
// convert value
doc, err := ConvertValue(v)
if err != nil {
return nil, err
}
// check array
array, ok := doc.(bson.A)
if !ok {
return nil, fmt.Errorf(`expected array`)
}
// build list
list := make(List, 0, len(array))
for _, item := range array {
doc, ok := item.(bson.D)
if !ok {
return nil, fmt.Errorf(`expected array of documents`)
}
list = append(list, &doc)
}
return list, nil
}
// MustConvertValue will call ConvertValue and panic on errors.
func MustConvertValue(v interface{}) interface{} {
// convert value
res, err := ConvertValue(v)
if err != nil {
panic(err)
}
return res
}
// ConvertValue will convert the provided type to a standard type.
func ConvertValue(v interface{}) (interface{}, error) {
// convert recursively
var err error
switch value := v.(type) {
case bson.M:
return convertMap(value)
case map[string]interface{}:
return convertMap(value)
case bson.A:
a := make(bson.A, len(value))
for i, item := range value {
a[i], err = ConvertValue(item)
if err != nil {
return nil, err
}
}
return a, nil
case []interface{}:
a := make(bson.A, len(value))
for i, item := range value {
a[i], err = ConvertValue(item)
if err != nil {
return nil, err
}
}
return a, nil
case []string:
a := make(bson.A, len(value))
for i, item := range value {
a[i] = item
}
return a, nil
case []bson.M:
a := make(bson.A, len(value))
for i, item := range value {
a[i], err = ConvertValue(item)
if err != nil {
return nil, err
}
}
return a, nil
case bson.D:
d := make(bson.D, len(value))
for i, item := range value {
d[i].Key = item.Key
d[i].Value, err = ConvertValue(item.Value)
if err != nil {
return nil, err
}
}
return d, nil
case []bson.D:
a := make(bson.A, len(value))
for i, item := range value {
a[i], err = ConvertValue(item)
if err != nil {
return nil, err
}
}
return a, nil
case []primitive.ObjectID:
a := make(bson.A, len(value))
for i, item := range value {
a[i] = item
}
return a, nil
case nil, int32, int64, float64, string, bool:
return value, nil
case int:
return int64(value), nil
case primitive.Null, primitive.ObjectID, primitive.DateTime,
primitive.Timestamp, primitive.Regex, primitive.Decimal128,
primitive.Binary:
return value, nil
case *primitive.ObjectID:
if value != nil {
return *value, nil
}
return nil, nil
case time.Time:
return primitive.NewDateTimeFromTime(value.UTC()), nil
case *time.Time:
if value != nil {
return primitive.NewDateTimeFromTime(value.UTC()), nil
}
return nil, nil
default:
return nil, fmt.Errorf("unsupported type %T", v)
}
}
func convertMap(m bson.M) (bson.D, error) {
// prepare document
d := make(bson.D, 0, len(m))
// copy keys
for key, field := range m {
v, err := ConvertValue(field)
if err != nil {
return nil, err
}
d = append(d, bson.E{
Key: key,
Value: v,
})
}
// sort document
sort.Slice(d, func(i, j int) bool {
return d[i].Key < d[j].Key
})
return d, nil
} | bsonkit/convert.go | 0.675229 | 0.414543 | convert.go | starcoder |
package scenario
import (
"fmt"
"math"
"math/rand"
"sort"
)
func Generate(name string) (Execution, error) {
g, ok := generators[name]
if !ok {
return Execution{}, fmt.Errorf("generator %q not found", name)
}
return generate(g()), nil
}
func Generators() []string {
var s []string
for name := range generators {
s = append(s, name)
}
sort.Strings(s)
return s
}
func generate(e exec) Execution {
c := make([]Cycle, 0, e.length)
for i := 0; i < e.length; i++ {
c = append(c, Cycle{
AllocRate: e.allocRate.min(0)(),
ScanRate: e.scanRate.min(0)(),
GrowthRate: e.growthRate.min(0)(),
ScannableFrac: e.scannableFrac.limit(0, 1)(),
StackBytes: uint64(e.stackBytes.quantize(2048).min(0)()),
HeapTargetBytes: int64(e.heapTargetBytes.quantize(1)()),
})
}
return Execution{
Globals: e.globals,
Cycles: c,
}
}
type exec struct {
globals Globals
allocRate stream
scanRate stream
growthRate stream
scannableFrac stream
stackBytes stream
heapTargetBytes stream
length int
}
var generators = map[string]func() exec{
"steady": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: constant(1.0),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 50,
}
},
"step-alloc": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: constant(1.0).mix(ramp(1.0, 1).delay(50)),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 100,
}
},
"big-stacks": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: constant(4.0),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8)),
scannableFrac: constant(1.0),
stackBytes: constant(2048).mix(ramp(128<<20, 8)),
heapTargetBytes: constant(-1),
length: 50,
}
},
"big-globals": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 128 << 20,
InitialHeap: 2 << 20,
},
allocRate: constant(4.0),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 50,
}
},
"osc-alloc": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: oscillate(0.4, 0, 8).offset(2),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 50,
}
},
"jitter-alloc": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.4).offset(4),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 50,
}
},
"high-GOGC": func() exec {
return exec{
globals: Globals{
Gamma: 16,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.2).offset(5),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01), unit(14).delay(25)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 50,
}
},
"heavy-jitter-alloc": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(1.0).offset(10),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 50,
}
},
"heavy-step-alloc": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: constant(1.0).mix(ramp(10.0, 1).delay(50)),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1),
length: 100,
}
},
"high-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.2).offset(5),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01), unit(14).delay(25)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(2 << 30),
length: 50,
}
},
"low-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.1).offset(4),
scanRate: constant(31.0),
growthRate: constant(1.5).mix(ramp(-0.5, 4), random(0.01), unit(3).delay(25)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(64 << 20),
length: 50,
}
},
"very-low-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.1).offset(4),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 20), random(0.01)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(64 << 20),
length: 50,
}
},
"step-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.1).offset(4),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(-1).mix(constant((256 << 20) + 1).delay(25)),
length: 50,
}
},
"heavy-step-alloc-high-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: constant(1.0).mix(ramp(10.0, 1).delay(25)),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(2 << 30),
length: 50,
}
},
"exceed-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.1).offset(4),
scanRate: constant(31.0),
growthRate: constant(1.5).mix(ramp(-0.5, 4), random(0.01), unit(6).delay(25)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(64 << 20),
length: 50,
}
},
"exceed-heap-target-high-GOGC": func() exec {
return exec{
globals: Globals{
Gamma: 16,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.1).offset(4),
scanRate: constant(31.0),
growthRate: constant(1.5).mix(ramp(-0.5, 4), random(0.01), unit(14).delay(25)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(64 << 20),
length: 50,
}
},
"low-noise-high-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.2).offset(5),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01), unit(14).delay(25)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(2 << 30).mix(random(64 << 20)),
length: 50,
}
},
"high-noise-high-heap-target": func() exec {
return exec{
globals: Globals{
Gamma: 2,
GlobalsBytes: 32 << 10,
InitialHeap: 2 << 20,
},
allocRate: random(0.2).offset(5),
scanRate: constant(31.0),
growthRate: constant(2.0).mix(ramp(-1.0, 8), random(0.01), unit(14).delay(25)),
scannableFrac: constant(1.0),
stackBytes: constant(8192),
heapTargetBytes: constant(2 << 30).mix(random(1 << 30)),
length: 50,
}
},
}
type stream func() float64
func constant(c float64) stream {
return func() float64 {
return c
}
}
func unit(amp float64) stream {
dropped := false
return func() float64 {
if dropped {
return 0
}
dropped = true
return amp
}
}
func oscillate(amp, phase float64, period int) stream {
var cycle int
return func() float64 {
p := float64(cycle)/float64(period)*2*math.Pi + phase
cycle++
if cycle == period {
cycle = 0
}
return math.Sin(p) * amp
}
}
func ramp(height float64, length int) stream {
var cycle int
return func() float64 {
h := height * float64(cycle) / float64(length)
if cycle < length {
cycle++
}
return h
}
}
func random(amp float64) stream {
return func() float64 {
return ((rand.Float64() - 0.5) * 2) * amp
}
}
func (f stream) delay(cycles int) stream {
buf := make([]float64, 0, cycles)
next := 0
return func() float64 {
old := f()
if len(buf) < cap(buf) {
buf = append(buf, old)
return 0
}
res := buf[next]
buf[next] = old
next++
if next == len(buf) {
next = 0
}
return res
}
}
func (f stream) vga(gain stream) stream {
return func() float64 {
return f() * gain()
}
}
func (f stream) scale(amt float64) stream {
return f.vga(constant(amt))
}
func (f stream) offset(amt float64) stream {
return func() float64 {
old := f()
return old + amt
}
}
func (f stream) mix(fs ...stream) stream {
return func() float64 {
sum := f()
for _, s := range fs {
sum += s()
}
return sum
}
}
func (f stream) quantize(mult float64) stream {
return func() float64 {
r := f() / mult
if r < 0 {
return math.Ceil(r) * mult
}
return math.Floor(r) * mult
}
}
func (f stream) min(min float64) stream {
return func() float64 {
return math.Max(min, f())
}
}
func (f stream) max(max float64) stream {
return func() float64 {
return math.Min(max, f())
}
}
func (f stream) limit(min, max float64) stream {
return func() float64 {
v := f()
if v < min {
v = min
} else if v > max {
v = max
}
return v
}
} | scenario/generators.go | 0.515132 | 0.411052 | generators.go | starcoder |
package tok
import (
"strings"
)
// Tracker is an interface that can be coupled with a Scannar and track the movemend.
type Tracker interface {
Update(m Marker)
}
// Returns a new empty Basket that is coupled as Tracker on the scanner.
func (s *Scanner) NewBasket() *Basket {
b := &Basket{}
s.Tracker = b
return b
}
// Returns a new empty Basket that is coupled as Tracker on the scanner.
// Each Rule that matches picks the Segment to the Basket.
func (s *Scanner) NewBasketFor(g Grammar) *Basket {
b := &Basket{}
b.PickWith(g.Grammar()...)
s.Tracker = b
return b
}
//------------------------------------------------------------------------------
// Basket can be used to Pick readed Segments.
type Basket struct {
segments []Segment
}
// Add adds a Segment to the Basket.
func (b *Basket) Add(seg Segment) {
b.segments = append(b.segments, seg)
}
func (b *Basket) Update(m Marker) {
for i := len(b.segments); i > 0; i-- {
seg := b.segments[i-1]
if seg.to <= m {
b.segments = b.segments[:i]
return
}
}
b.segments = []Segment{}
}
// Picked returns the picked Segments.
func (b *Basket) Picked() []Segment {
return b.segments
}
func (b *Basket) String() string {
segs := []string{}
for _, seg := range b.segments {
segs = append(segs, seg.String())
}
return strings.Join(segs, ";")
}
// PickWith calls Pick on all rules with the Basket as paramter.
func (b *Basket) PickWith(rules ...*Rule) {
for _, r := range rules {
r.Pick(b)
}
}
//------------------------------------------------------------------------------
type pickReader struct {
info string
basket *Basket
sub Reader
}
func (r *pickReader) Read(s *Scanner) error {
t, err := s.TokenizeUse(r.sub)
if err == nil {
r.basket.Add(Segment{
Info: r.info,
Token: t,
})
}
return err
}
func (r *pickReader) What() string {
return r.sub.What()
}
// Pick creates a Reader that appends the Segments that r reads forward to the Basket with info as Info value.
func Pick(r Reader, b *Basket, info string) Reader {
return &pickReader{
info: info,
basket: b,
sub: r,
}
} | tracker.go | 0.791015 | 0.450239 | tracker.go | starcoder |
package neuron
import (
"fmt"
)
// A Net is a neural network consisting of a sequence of layers, each of which
// contains one or more Units. Arch defines the layer sizes. Layers points to
// each of the units in each of the layers.
type Net struct {
// Size of each layer
Arch []int
// Pointers to the units in each layer
Layers [][](*Unit)
stepDone chan int
}
// NewMLP constructs a new fully-connected network with the given architecture.
func NewMLP(arch []int, opt Optimizer) *Net {
// Check for valid architecture
numLayers := len(arch)
if numLayers < 3 {
// TODO: These should probably be errors, not panics. Also, add error
// handling elsewhere as needed.
panic(fmt.Sprintf("MLP architectures need >= 2 layers; got %d",
numLayers))
}
for _, sz := range arch {
if sz < 1 {
panic(fmt.Sprintf("Each layer >= 1 unit; got %d", sz))
}
}
n := Net{
Arch: make([]int, len(arch)),
Layers: make([][](*Unit), numLayers),
stepDone: make(chan int),
}
logf(1, "Building a %d layer network.\n Arch=%v\n", numLayers, arch)
copy(n.Arch, arch)
// Make layers.
const idFormStr = "%03d_%06d"
var id string
var u *Unit
for ii := 0; ii < numLayers; ii++ {
l := make([]*Unit, arch[ii])
for jj := 0; jj < arch[ii]; jj++ {
id = fmt.Sprintf(idFormStr, ii, jj)
switch ii {
case 0:
// Need a new opt for each unit so that each gets their own buffer data.
u = newInputUnit(id, opt.New(), n.stepDone)
case numLayers - 1:
u = newOutputUnit(id, opt.New(), n.stepDone)
default:
u = newHiddenUnit(id, opt.New(), n.stepDone)
}
l[jj] = u
}
n.Layers[ii] = l
}
// Connect all the layers in a fully-connected pattern.
for ii := 0; ii < numLayers-1; ii++ {
for _, u1 := range n.Layers[ii] {
for _, u2 := range n.Layers[ii+1] {
u1.connect(u2)
}
}
}
return &n
}
// Forward pass through the network. The input is a single data sample.
func (n *Net) Forward(data []float64) (output []float64) {
inDim := len(data)
if inDim != n.Arch[0] {
panic(fmt.Sprintf("Input dim (%d) not equal to number of input units (%d)",
inDim, n.Arch[0]))
}
logf(2, "MLP Forward\n")
// Feed in.
for ii, v := range data {
n.Layers[0][ii].input <- signal{id: inputID, value: v}
}
numLayers := len(n.Arch)
outDim := n.Arch[numLayers-1]
output = make([]float64, outDim)
// Feed out.
var s signal
for ii := 0; ii < outDim; ii++ {
s = <-n.Layers[numLayers-1][ii].output[outputID]
output[ii] = s.value
}
return
}
// Backward pass a loss gradient through the network. Input grad should be a
// gradient with respect to each of the network outputs.
func (n *Net) Backward(grad []float64) {
outDim := n.Arch[len(n.Arch)-1]
gradDim := len(grad)
if gradDim != outDim {
panic(fmt.Sprintf("Grad dim (%d) not equal to number of output units (%d)",
gradDim, outDim))
}
logf(2, "MLP Backward\n")
// Feed in (backward).
numLayers := len(n.Arch)
for ii, v := range grad {
n.Layers[numLayers-1][ii].inputB <- signal{id: inputID, value: v}
}
// Wait for all units to finish backward and step to avoid a race.
n.sync()
}
// sync waits for all units to complete their forward/backward/step sequence.
func (n *Net) sync() {
totalUnits := 0
for _, v := range n.Arch {
totalUnits += v
}
for ii := 0; ii < totalUnits; ii++ {
<-n.stepDone
}
}
// Start running each unit's forward/backward/step loop concurrently. Neuron
// weights and biases are updated every updateFreq iterations. By setting
// updateFreq > 1, we can simulate mini-batch optimization.
func (n *Net) Start(train bool, updateFreq int) {
for _, l := range n.Layers {
for _, u := range l {
go u.start(train, updateFreq)
logf(2, "Start %s\n", u.ID)
}
}
} | net.go | 0.60743 | 0.511412 | net.go | starcoder |
package interp
import (
"fmt"
"math"
"math/cmplx"
"strconv"
"strings"
)
type valueType uint8
const (
typeNil valueType = iota
typeStr
typeNum
)
// An AWK value (these are passed around by value)
type value struct {
typ valueType // Value type
isNumStr bool // An AWK "numeric string" from user input
s string // String value (for typeStr)
n complex128 // Numeric value (for typeNum and numeric strings)
}
// Create a new number value
func num(n complex128) value {
return value{typ: typeNum, n: n}
}
// Create a new string value
func str(s string) value {
return value{typ: typeStr, s: s}
}
// Create a new value for a "numeric string" context, converting the
// string to a number if possible.
func numStr(s string) value {
f, ok := parseComplex(strings.TrimSpace(s))
return value{typ: typeStr, isNumStr: ok, s: s, n: f}
}
// parseComplex parses a complex number in the form magnitude@angle, where
// the angle is given in degree. E.g: 10.234@92.5
func parseComplex(s string) (complex128, bool) {
idx := strings.Index(s, "@")
if idx == -1 {
idx = len(s)
}
mag, err := strconv.ParseFloat(s[:idx], 64)
if err != nil {
return 0, false
}
deg := 0.0
if idx < len(s) {
deg, err = strconv.ParseFloat(s[idx+1:], 64)
if err != nil {
return 0, false
}
}
switch deg {
case 0:
return complex(mag, 0), true
case 90:
return complex(0, mag), true
case 180:
return complex(-mag, 0), true
case 270:
return complex(0, -mag), true
}
return cmplx.Rect(mag, deg/180.0*math.Pi), true
}
// Create a numeric value from a Go bool
func boolean(b bool) value {
if b {
return num(1)
}
return num(0)
}
// Return true if value is a "true string" (string but not a "numeric
// string")
func (v value) isTrueStr() bool {
return v.typ == typeStr && !v.isNumStr
}
// Return true if number has no imag part.
func (v value) isReal() bool {
return imag(v.n) == 0
}
// Return Go bool value of AWK value. For numbers or numeric strings,
// zero is false and everything else is true. For strings, empty
// string is false and everything else is true.
func (v value) boolean() bool {
if v.isTrueStr() {
return v.s != ""
} else {
return v.n != 0
}
}
// Return value's string value, or convert to a string using given
// format if a number value. Integers are a special case and don't
// use floatFormat.
func (v value) str(floatFormat string) string {
switch v.typ {
case typeNum:
if cmplx.IsNaN(v.n) {
return "nan"
} else if cmplx.IsInf(v.n) {
return "inf" // ignore -inf for real numbers
} else if v.n == complex(float64(int(real(v.n))), 0) {
return strconv.Itoa(int(real(v.n)))
} else if v.isReal() {
return fmt.Sprintf(floatFormat, v.n)
} else {
deg := cmplx.Phase(v.n) / math.Pi * 180.0
if deg < 0 {
deg += 360
}
return fmt.Sprintf(floatFormat+"@"+floatFormat, cmplx.Abs(v.n), deg)
}
case typeStr:
return v.s
default:
return ""
}
}
// Return value's number value, converting from string if necessary
func (v value) num() complex128 {
f, _ := v.numChecked()
return f
}
// Return value's number value and a success flag, converting from a
// string if necessary
func (v value) numChecked() (complex128, bool) {
switch v.typ {
case typeNum:
return v.n, true
case typeStr:
if v.isNumStr {
// If it's a numeric string, we already have the float
// value from the numStr() call
return v.n, true
}
// Otherwise ensure string starts with a float and convert it
return parseFloatPrefix(v.s)
default:
return 0, true
}
}
// Like strconv.ParseFloat, but parses at the start of string and
// allows things like "1.5foo"
func parseFloatPrefix(s string) (complex128, bool) {
// Skip whitespace at start
i := 0
for i < len(s) && (s[i] == ' ' || s[i] == '\t' || s[i] == '\n' || s[i] == '\r') {
i++
}
start := i
// Parse mantissa: optional sign, initial digit(s), optional '.',
// then more digits
gotDigit := false
if i < len(s) && (s[i] == '+' || s[i] == '-') {
i++
}
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
gotDigit = true
i++
}
if i < len(s) && s[i] == '.' {
i++
}
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
gotDigit = true
i++
}
if !gotDigit {
return 0, false
}
// Parse exponent ("1e" and similar are allowed, but ParseFloat
// rejects them)
end := i
if i < len(s) && (s[i] == 'e' || s[i] == 'E') {
i++
if i < len(s) && (s[i] == '+' || s[i] == '-') {
i++
}
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
i++
end = i
}
}
// TODO parse complex
floatStr := s[start:end]
f, err := strconv.ParseFloat(floatStr, 64)
return complex(f, 0), err == nil // May be "value out of range" error
} | interp/value.go | 0.677154 | 0.442877 | value.go | starcoder |
package mandelbrot
import (
"image"
"image/color"
"image/draw"
"image/gif"
"image/png"
"math"
"os"
"sync"
)
type Drawer interface {
Draw(minX, maxX, minY, maxY float64, colors []color.Color) *image.RGBA
Gif(frames uint16, x, y, scaleIn float64, colors []color.Color) *gif.GIF
SetSize(sizeX, sizeY uint16)
SetIterations(maxIterations uint8)
}
type MandelbrotBuilder struct {
SizeX, SizeY uint16
MaxIterations uint8
}
func (bb *MandelbrotBuilder) SetSize(sizeX, sizeY uint16) {
bb.SizeX = sizeX
bb.SizeY = sizeY
}
func (bb *MandelbrotBuilder) SetIterations(maxIterations uint8) {
bb.MaxIterations = maxIterations
}
// FloatFunction is a takes a float64 and returns a float64.
type FloatFunction func(a float64) float64
// Gif returns the gif containing frames and delays for a mandelbrot animation
func (bb MandelbrotBuilder) Gif(frames uint16, x, y, scaleIn float64, colors []color.Color) *gif.GIF {
var images []*image.Paletted
var delays []int
xShift := 1.0
yShift := 1.0
xMin, xMax, yMin, yMax := ExtentFromPoint(x, y, xShift, yShift)
for frame := uint16(0); frame < frames; frame++ {
img := bb.Draw(xMin, xMax, yMin, yMax, colors)
palettedImage := image.NewPaletted(img.Bounds(), colors)
draw.Draw(palettedImage, palettedImage.Rect, img, img.Bounds().Min, draw.Over)
images = append(images, palettedImage)
delays = append(delays, 0)
xShift *= scaleIn
yShift *= scaleIn
xMin, xMax, yMin, yMax = ExtentFromPoint(x, y, xShift, yShift)
}
return &gif.GIF{
Image: images,
Delay: delays,
}
}
// ExtentFromPoint converts an x,y point + offsets in x and y into ranges of x and y values
func ExtentFromPoint(x, y, xShift, yShift float64) (xMin, xMax, yMin, yMax float64) {
xMin = x - xShift
xMax = x + xShift
yMin = y - yShift
yMax = y + yShift
return
}
// ColorRow fills in one row of Mandelbrot values for an image
func ColorRow(img *image.RGBA, row, length uint16, xScale, yScale FloatFunction, colors []color.Color, wg *sync.WaitGroup) {
defer wg.Done()
for j := uint16(0); j < length; j++ {
pointX := xScale(float64(row))
pointY := yScale(float64(j))
iterations := EscapeIterations(pointX, pointY, 300)
color := colors[iterations%len(colors)]
img.Set(int(row), int(j), color)
}
}
// Draw draws a Mandelbrot image of a given size with a given domain and range
func (bb MandelbrotBuilder) Draw(minX, maxX, minY, maxY float64, colors []color.Color) *image.RGBA {
var wg sync.WaitGroup
img := image.NewRGBA(image.Rect(0, 0, int(bb.SizeX), int(bb.SizeY)))
xScale := Scale(0, float64(bb.SizeX), float64(minX), float64(maxX))
yScale := Scale(0, float64(bb.SizeY), float64(minY), float64(maxY))
for i := uint16(0); i < bb.SizeX; i++ {
wg.Add(1)
go ColorRow(img, i, bb.SizeY, xScale, yScale, colors, &wg)
}
wg.Wait()
return img
}
// NewPalette returns a list of colors to use as a palette
func NewPalette(maxIterations uint8) []color.Color {
colors := make([]color.Color, 0, maxIterations)
colorScale := Scale(0, float64(maxIterations), 0, 255)
for x := uint8(0); x < maxIterations; x++ {
value := uint8(colorScale(float64(x)))
colors = append(colors, color.RGBA{1, value, value, 1})
}
return colors
}
// WritePng writes an image to a filename
// Is there a good way to test this without deleting and recreating the file?
func WritePng(img *image.RGBA, filename string) {
f, _ := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0600)
defer f.Close()
png.Encode(f, img)
}
// EscapeIterations calculates how many iterations it takes for this point to escape Mandelbrot iteration, with a cap of maxIterations
func EscapeIterations(x0, y0 float64, maxIterations int) (iterations int) {
x := 0.0
y := 0.0
var xTemp float64
for !HasEscaped(x, y) && iterations < maxIterations {
xTemp = math.Pow(x, 2) - math.Pow(y, 2) + x0
y = 2*x*y + y0
x = xTemp
iterations++
}
return
}
// HasEscaped tells us whether a point has escaped under Mandelbrot iteration, ie it has length > 2
func HasEscaped(x, y float64) bool {
return math.Pow(x, 2)+math.Pow(y, 2) > 4
}
// Scale returns a scaling function clamped to a given range
func Scale(inputMin, inputMax, outputMin, outputMax float64) func(a float64) float64 {
return func(a float64) float64 {
if a < math.Min(inputMin, inputMax) {
return outputMin
} else if a > math.Max(inputMin, inputMax) {
return outputMax
}
return outputMin + (outputMax-outputMin)*(a-inputMin)/(inputMax-inputMin)
}
} | mandelbrot.go | 0.736116 | 0.458894 | mandelbrot.go | starcoder |
package projecteuler
import (
"fmt"
"math"
"math/big"
)
// ChakravalaTriplet holds X, Y, K which are solution to X^2 - N*Y^2 = K
type ChakravalaTriplet struct {
X *big.Int
Y *big.Int
K int64
}
func (c *ChakravalaTriplet) assign(rhs ChakravalaTriplet) {
c.X.Set(rhs.X)
c.Y.Set(rhs.Y)
c.K = rhs.K
}
func (c ChakravalaTriplet) string() string {
return fmt.Sprintf("[%s, %s, %d]", c.X.String(), c.Y.String(), c.K)
}
// Chakravala returns minimal solution to X^2 - N*Y^2 = 1.
// It uses Chakravala method, commonly attributed to Bhāskara II, a 12th century mathematician.
// His work was building on earlier works by Jayadeva (10th century) and Brahmagupta (7th century).
// Quality and mathematical depth of works by Indian mathematicians regarding numbers and algebra in general
// were only reached by European mathematicians much, much later
func Chakravala(n int) ChakravalaTriplet {
prev := startingTriplet(n)
for prev.K != 1 {
prev.assign(nextTriplet(n, prev))
}
return prev
}
func startingTriplet(n int) ChakravalaTriplet {
var x, k int64
// x^2 - 61 = k
xFloat := math.Ceil(math.Sqrt(float64(n)))
x = int64(xFloat)
k = x*x - int64(n)
return ChakravalaTriplet{X: big.NewInt(x), Y: big.NewInt(1), K: k}
}
func nextTriplet(n int, prev ChakravalaTriplet) ChakravalaTriplet {
m := calcM(n, prev)
return samasa(n, m, prev)
}
func samasa(n int, m *big.Int, prev ChakravalaTriplet) ChakravalaTriplet {
// samasa of prev (x, y, k) and (m, 1, m^2 - n) gives
// [math.Abs((xm + ny)/k), math.Abs((my + x)/k), (m^2 - n)/k] with application of Bhaskara's lemma
// my =(mod k) -x, minimizing m^2 - n
// iff gcd(y, k) == 1, there is a unique modular multiplicative inverse for b (b^-1)
// myy^-1 =(mod k) -xy^-1 => m =(mod k) -xy^-1
kInt, nInt := big.NewInt(prev.K), big.NewInt(int64(n))
// x
x, temp, temp2, temp3 := &big.Int{}, &big.Int{}, &big.Int{}, &big.Int{}
temp.Add(temp2.Mul(m, prev.X), temp3.Mul(nInt, prev.Y))
x.Div(temp, kInt)
if x.Sign() == -1 {
x.Neg(x)
}
// y
y := &big.Int{}
y.Div(temp2.Add(temp.Mul(m, prev.Y), prev.X), kInt)
if y.Sign() == -1 {
y.Neg(y)
}
// z
z := &big.Int{}
z.Div(temp.Sub(temp2.Mul(m, m), nInt), kInt)
retValue := ChakravalaTriplet{X: x, Y: y, K: z.Int64()}
//fmt.Printf("(%d) %s x [%s, 1, %s] = %s\n", n, prev.string(), m.String(), temp.String(), retValue.string())
return retValue
}
func calcM(n int, prev ChakravalaTriplet) *big.Int {
kInt := big.NewInt(int64(prev.K))
a := &big.Int{}
a.GCD(nil, nil, prev.Y, kInt)
if a.Int64() == 0 {
return big.NewInt(0)
}
// y^-1
yInverse := &big.Int{}
yInverse.ModInverse(prev.Y, kInt)
// m congruence class, e.g. m =(mod 3) 1
mCInt := &big.Int{}
mCInt.Mul(yInverse, prev.X)
mCInt.Neg(mCInt)
mCongruent := a.Mod(mCInt, kInt).Int64()
// number less-equal than root n floor, minus what is neccessary to make it same congruence class as m
nRootFloor := int64(math.Floor(math.Sqrt(float64(n))))
rfCongruent := a.Mod(big.NewInt(nRootFloor), kInt).Int64()
nRootFloor -= rfCongruent - mCongruent
// number greater-equal than root n floor, plus what is neccessary to make it same congruence class as m
nRootCeil := nRootFloor + prev.K
f := math.Abs(float64(nRootFloor*nRootFloor - int64(n)))
c := math.Abs(float64(nRootCeil*nRootCeil - int64(n)))
mCongruent = nRootFloor
if c < f {
mCongruent = nRootCeil
}
return big.NewInt(mCongruent)
} | chakravala.go | 0.632957 | 0.462959 | chakravala.go | starcoder |
package backend
type Instruction interface {
Generate() []byte
}
// Halt
// - takes no arguments, unconditionally stops program execution
// - typically appended to the end of the top-level main function
type Halt struct{}
// Generate converts this instruction to raw bytes
func (inst Halt) Generate() (blob []byte) {
blob = append(blob, OpcodeHalt)
return blob
}
// BoolConst <32 bit integer value> <destination register>
type BoolConst struct {
Value bool
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst BoolConst) Generate() (blob []byte) {
blob = append(blob, OpcodeBoolConst)
if inst.Value {
blob = append(blob, int32ToBytes(1)...)
} else {
blob = append(blob, int32ToBytes(0)...)
}
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntConst <32 bit integer value> <destination register>
type IntConst struct {
Value int32
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntConst) Generate() (blob []byte) {
blob = append(blob, OpcodeIntConst)
blob = append(blob, int32ToBytes(inst.Value)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecConst <32 bit floating point value> <destination register>
type DecConst struct {
Value float32
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecConst) Generate() (blob []byte) {
blob = append(blob, OpcodeDecConst)
blob = append(blob, float32ToBytes(inst.Value)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// StrConst <constant pool index> <destination register>
type StrConst struct {
ConstantIndex uint32
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst StrConst) Generate() (blob []byte) {
blob = append(blob, OpcodeStrConst)
blob = append(blob, uint32ToBytes(inst.ConstantIndex)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// FuncConst <function pool index> <destination register>
type FuncConst struct {
ConstantIndex uint32
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst FuncConst) Generate() (blob []byte) {
blob = append(blob, OpcodeFuncConst)
blob = append(blob, uint32ToBytes(inst.ConstantIndex)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// Move <source register> <destination register>
// - copies the value in the source register into the destination register
type Move struct {
Source RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst Move) Generate() (blob []byte) {
blob = append(blob, OpcodeMove)
blob = append(blob, registerToBytes(inst.Source)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// LoadUpVal <enclosing closure lookup index> <destination register>
// - value is coped from enclosing closure's upvalue into destination register
type LoadUpVal struct {
Index int32
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst LoadUpVal) Generate() (blob []byte) {
blob = append(blob, OpcodeLoadUpVal)
blob = append(blob, int32ToBytes(inst.Index)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// LoadUpVal <source register> <enclosing closure lookup index>
// - value is copied from source register and used to update the upvalue in the
// enclosing closure
type SetUpVal struct {
Source RegisterAddress
Index int32
}
// Generate converts this instruction to raw bytes
func (inst SetUpVal) Generate() (blob []byte) {
blob = append(blob, OpcodeSetUpVal)
blob = append(blob, registerToBytes(inst.Source)...)
blob = append(blob, int32ToBytes(inst.Index)...)
return blob
}
// BrAlways <bytecode address to jump to>
// - will unconditionally jump to a given address
type BrAlways struct {
Addr BytecodeAddress
}
// Generate converts this instruction to raw bytes
// - Addr field MUST BE LAST 4 BYTES OF INSTRUCTION (see compiler.go @ computeJumps)
func (inst BrAlways) Generate() (blob []byte) {
blob = append(blob, OpcodeBrAlways)
blob = append(blob, addressToBytes(inst.Addr)...)
return blob
}
// BrTrue <decision register> <bytecode address>
// - will jump to the given address if the value in the decision register is 1
type BrTrue struct {
Test RegisterAddress
Addr BytecodeAddress
}
// Generate converts this instruction to raw bytes
// - Addr field MUST BE LAST 4 BYTES OF INSTRUCTION (see compiler.go @ computeJumps)
func (inst BrTrue) Generate() (blob []byte) {
blob = append(blob, OpcodeBrTrue)
blob = append(blob, registerToBytes(inst.Test)...)
blob = append(blob, addressToBytes(inst.Addr)...)
return blob
}
// BrFalse <decision register> <bytecode address>
// - will jump to the given address if the value in the decision register is 0
type BrFalse struct {
Source RegisterAddress
Addr BytecodeAddress
}
// Generate converts this instruction to raw bytes
// - Addr field MUST BE LAST 4 BYTES OF INSTRUCTION (see compiler.go @ computeJumps)
func (inst BrFalse) Generate() (blob []byte) {
blob = append(blob, OpcodeBrFalse)
blob = append(blob, registerToBytes(inst.Source)...)
blob = append(blob, addressToBytes(inst.Addr)...)
return blob
}
// IntLT <left operand register> <right operand register> <destination register>
// - if left < right, load 1 into the destination register, else load 0
type IntLT struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntLT) Generate() (blob []byte) {
blob = append(blob, OpcodeIntLT)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntLTEq <left operand register> <right operand register> <destination register>
// - if left <= right, load 1 into the destination register, else load 0
type IntLTEq struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntLTEq) Generate() (blob []byte) {
blob = append(blob, OpcodeIntLTEq)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntGT <left operand register> <right operand register> <destination register>
// - if left > right, load 1 into the destination register, else load 0
type IntGT struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntGT) Generate() (blob []byte) {
blob = append(blob, OpcodeIntGT)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntGTEq <left operand register> <right operand register> <destination register>
// - if left >= right, load 1 into the destination register, else load 0
type IntGTEq struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntGTEq) Generate() (blob []byte) {
blob = append(blob, OpcodeIntGTEq)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntEq <left operand register> <right operand register> <destination register>
// - if left == right, load 1 into the destination register, else load 0
type IntEq struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntEq) Generate() (blob []byte) {
blob = append(blob, OpcodeIntEq)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecLT <left operand register> <right operand register> <destination register>
// - if left < right, load 1 into the destination register, else load 0
type DecLT struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecLT) Generate() (blob []byte) {
blob = append(blob, OpcodeDecLT)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecLTEq <left operand register> <right operand register> <destination register>
// - if left <= right, load 1 into the destination register, else load 0
type DecLTEq struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecLTEq) Generate() (blob []byte) {
blob = append(blob, OpcodeDecLTEq)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecGT <left operand register> <right operand register> <destination register>
// - if left > right, load 1 into the destination register, else load 0
type DecGT struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecGT) Generate() (blob []byte) {
blob = append(blob, OpcodeDecGT)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecGTEq <left operand register> <right operand register> <destination register>
// - if left >= right, load 1 into the destination register, else load 0
type DecGTEq struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecGTEq) Generate() (blob []byte) {
blob = append(blob, OpcodeDecGTEq)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecEq <left operand register> <right operand register> <destination register>
// - if left == right, load 1 into the destination register, else load 0
type DecEq struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecEq) Generate() (blob []byte) {
blob = append(blob, OpcodeDecEq)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// Dispatch <source register storing closure> <register with first argument>
// - after the first argument register, any other arguments are assumed to be
// sequential in the register array
type Dispatch struct {
Source RegisterAddress
FirstArgRegister RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst Dispatch) Generate() (blob []byte) {
blob = append(blob, OpcodeDispatch)
blob = append(blob, registerToBytes(inst.Source)...)
blob = append(blob, registerToBytes(inst.FirstArgRegister)...)
return blob
}
// Return <source register holding value to return>
type Return struct {
Source RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst Return) Generate() (blob []byte) {
blob = append(blob, OpcodeReturn)
blob = append(blob, registerToBytes(inst.Source)...)
return blob
}
// IntAdd <left operand> <right operand> <destination register>
type IntAdd struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntAdd) Generate() (blob []byte) {
blob = append(blob, OpcodeIntAdd)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntSub <left operand> <right operand> <destination register>
type IntSub struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntSub) Generate() (blob []byte) {
blob = append(blob, OpcodeIntSub)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntMul <left operand> <right operand> <destination register>
type IntMul struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntMul) Generate() (blob []byte) {
blob = append(blob, OpcodeIntMul)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntDiv <left operand> <right operand> <destination register>
type IntDiv struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntDiv) Generate() (blob []byte) {
blob = append(blob, OpcodeIntDiv)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// IntNeg <operand> <destination register>
type IntNeg struct {
Operand RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst IntNeg) Generate() (blob []byte) {
blob = append(blob, OpcodeIntNeg)
blob = append(blob, registerToBytes(inst.Operand)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecAdd <left operand> <right operand> <destination register>
type DecAdd struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecAdd) Generate() (blob []byte) {
blob = append(blob, OpcodeDecAdd)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecSub <left operand> <right operand> <destination register>
type DecSub struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecSub) Generate() (blob []byte) {
blob = append(blob, OpcodeDecSub)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecMul <left operand> <right operand> <destination register>
type DecMul struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecMul) Generate() (blob []byte) {
blob = append(blob, OpcodeDecMul)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecDiv <left operand> <right operand> <destination register>
type DecDiv struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecDiv) Generate() (blob []byte) {
blob = append(blob, OpcodeDecDiv)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// DecNeg <operand> <destination register>
type DecNeg struct {
Operand RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst DecNeg) Generate() (blob []byte) {
blob = append(blob, OpcodeDecNeg)
blob = append(blob, registerToBytes(inst.Operand)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// StrConcat <left operand> <right operand> <destination register>
type StrConcat struct {
Left RegisterAddress
Right RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst StrConcat) Generate() (blob []byte) {
blob = append(blob, OpcodeStrConcat)
blob = append(blob, registerToBytes(inst.Left)...)
blob = append(blob, registerToBytes(inst.Right)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
}
// Print <register holding value to output to `stdin`>
type Print struct {
Source RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst Print) Generate() (blob []byte) {
blob = append(blob, OpcodePrint)
blob = append(blob, registerToBytes(inst.Source)...)
return blob
}
type CastToStr struct {
Source RegisterAddress
Dest RegisterAddress
}
// Generate converts this instruction to raw bytes
func (inst CastToStr) Generate() (blob []byte) {
blob = append(blob, OpcodeCastToStr)
blob = append(blob, registerToBytes(inst.Source)...)
blob = append(blob, registerToBytes(inst.Dest)...)
return blob
} | backend/instructions.go | 0.756627 | 0.50177 | instructions.go | starcoder |
package resourcefilter
import (
"fmt"
"cloud.google.com/go/spanner/spansql"
"github.com/google/cel-go/common/operators"
expr "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
func exprToSpannerSQL(e *expr.Expr) (spansql.Expr, error) {
switch e := e.ExprKind.(type) {
case *expr.Expr_CallExpr:
switch e.CallExpr.Function {
case operators.In:
return inExprToSpannerSQL(e.CallExpr.Args)
case operators.LogicalAnd:
return binaryLogicalExprToSpannerSQL(spansql.And, e.CallExpr.Args)
case operators.LogicalOr:
return binaryLogicalExprToSpannerSQL(spansql.Or, e.CallExpr.Args)
case operators.LogicalNot:
return notExprToSpannerSQL(e.CallExpr.Args)
default:
return nil, fmt.Errorf("unsupported function: %s", e.CallExpr.Function)
}
case *expr.Expr_IdentExpr:
return spansql.ID(e.IdentExpr.Name), nil
case *expr.Expr_SelectExpr:
return spansql.ID(e.SelectExpr.Field), nil
case *expr.Expr_ConstExpr:
switch k := e.ConstExpr.ConstantKind.(type) {
case *expr.Constant_StringValue:
return spansql.StringLiteral(k.StringValue), nil
default:
return nil, fmt.Errorf("unsupported const expr: %v", k)
}
default:
return nil, fmt.Errorf("unsupported expr: %v", e)
}
}
func notExprToSpannerSQL(args []*expr.Expr) (spansql.BoolExpr, error) {
if len(args) != 1 {
return nil, fmt.Errorf("unexpected number of arguments to `!` expression: %d", len(args))
}
rhsExpr, err := exprToSpannerSQL(args[0])
if err != nil {
return nil, err
}
rhsBoolExpr, ok := rhsExpr.(spansql.BoolExpr)
if !ok {
return nil, fmt.Errorf("unexpected argument to `!`: not a bool expr")
}
spanExpr := spansql.LogicalOp{
Op: spansql.Not,
RHS: rhsBoolExpr,
}
return spanExpr, nil
}
func binaryLogicalExprToSpannerSQL(op spansql.LogicalOperator, args []*expr.Expr) (spansql.BoolExpr, error) {
if len(args) != 2 {
return nil, fmt.Errorf("unexpected number of arguments to `&&` expression: %d", len(args))
}
lhsExpr, err := exprToSpannerSQL(args[0])
if err != nil {
return nil, err
}
rhsExpr, err := exprToSpannerSQL(args[1])
if err != nil {
return nil, err
}
lhsBoolExpr, ok := lhsExpr.(spansql.BoolExpr)
if !ok {
return nil, fmt.Errorf("unexpected arguments to `&&`: lhs not a bool expr")
}
rhsBoolExpr, ok := rhsExpr.(spansql.BoolExpr)
if !ok {
return nil, fmt.Errorf("unexpected arguments to `&&`: rhs not a bool expr")
}
boolExpr := spansql.LogicalOp{
Op: op,
LHS: lhsBoolExpr,
RHS: rhsBoolExpr,
}
parenExpr := spansql.Paren{
Expr: boolExpr,
}
return parenExpr, nil
}
func inExprToSpannerSQL(args []*expr.Expr) (spansql.BoolExpr, error) {
if len(args) != 2 {
return nil, fmt.Errorf("unexpected number of arguments to `in` expression: %d", len(args))
}
needle, err := exprToSpannerSQL(args[0])
if err != nil {
return nil, err
}
list, ok := args[1].ExprKind.(*expr.Expr_ListExpr)
if !ok {
return nil, fmt.Errorf("arg of `in` expression must be list")
}
if len(list.ListExpr.Elements) == 0 {
return nil, fmt.Errorf("`in` expression requires at least one element in list")
}
var boolExpr spansql.BoolExpr
for i, element := range list.ListExpr.Elements {
elementExpr, err := exprToSpannerSQL(element)
if err != nil {
return nil, err
}
eq := spansql.ComparisonOp{
LHS: needle,
Op: spansql.Eq,
RHS: elementExpr,
}
if i == 0 {
boolExpr = eq
} else {
boolExpr = spansql.LogicalOp{
LHS: boolExpr,
Op: spansql.Or,
RHS: eq,
}
}
}
parenExpr := spansql.Paren{Expr: boolExpr}
return parenExpr, nil
} | internal/resourcefilter/spansql.go | 0.538741 | 0.461623 | spansql.go | starcoder |
package batch
import "github.com/Jeffail/benthos/v3/lib/x/docs"
// FieldSpec returns a spec for a common batching field.
func FieldSpec() docs.FieldSpec {
return docs.FieldSpec{
Name: "batching",
Description: `
Allows you to configure a [batching policy](/docs/configuration/batching).`,
Examples: []interface{}{
map[string]interface{}{
"byte_size": 5000,
"period": "1s",
},
map[string]interface{}{
"count": 10,
"period": "1s",
},
map[string]interface{}{
"period": "1m",
"condition": map[string]interface{}{
"text": map[string]interface{}{
"operator": "contains",
"arg": "END BATCH",
},
},
},
},
Children: docs.FieldSpecs{
docs.FieldCommon("count", "A number of messages at which the batch should be flushed. If `0` disables count based batching."),
docs.FieldCommon("byte_size", "An amount of bytes at which the batch should be flushed. If `0` disables size based batching."),
docs.FieldCommon("period", "A period in which an incomplete batch should be flushed regardless of its size.", "1s", "1m", "500ms"),
docs.FieldAdvanced("condition", "A [condition](/docs/components/conditions/about) to test against each message entering the batch, if this condition resolves to `true` then the batch is flushed."),
docs.FieldAdvanced(
"processors", "A list of [processors](/docs/components/processors/about) to apply to a batch as it is flushed. This allows you to aggregate and archive the batch however you see fit. Please note that all resulting messages are flushed as a single batch, therefore splitting the batch into smaller batches using these processors is a no-op.",
[]map[string]interface{}{
{
"archive": map[string]interface{}{
"format": "lines",
},
},
},
[]map[string]interface{}{
{
"archive": map[string]interface{}{
"format": "json_array",
},
},
},
[]map[string]interface{}{
{
"merge_json": struct{}{},
},
},
),
},
}
} | lib/message/batch/docs.go | 0.788909 | 0.44065 | docs.go | starcoder |
package series
import (
"fmt"
"reflect"
"github.com/go-gota/gota/series"
"github.com/gojek/merlin/pkg/transformer/types/converter"
)
type Type string
const (
String Type = "string"
Int Type = "int"
Float Type = "float"
Bool Type = "bool"
)
var numericTypes = []Type{Int, Float}
type Series struct {
series *series.Series
}
type contentType struct {
hasFloat bool
hasInt bool
hasBool bool
hasString bool
}
func NewSeries(s *series.Series) *Series {
return &Series{s}
}
func New(values interface{}, t Type, name string) *Series {
s := series.New(values, series.Type(t), name)
return &Series{&s}
}
func NewInferType(values interface{}, seriesName string) (*Series, error) {
s, ok := values.(*Series)
if ok {
newSeries := s.Series().Copy()
newSeries.Name = seriesName
return NewSeries(&newSeries), nil
}
seriesType := detectType(values)
seriesValues, err := castValues(values, seriesType)
if err != nil {
return nil, err
}
return New(seriesValues, seriesType, seriesName), nil
}
func (s *Series) Series() *series.Series {
return s.series
}
func (s *Series) Type() Type {
return Type(s.series.Type())
}
func (s *Series) IsNumeric() error {
seriesType := s.Type()
for _, sType := range numericTypes {
if seriesType == sType {
return nil
}
}
return fmt.Errorf("this series type is not numeric but %s", seriesType)
}
func (s *Series) GetRecords() []interface{} {
genericArr := make([]interface{}, s.series.Len())
for i := 0; i < s.series.Len(); i++ {
genericArr[i] = s.series.Val(i)
}
return genericArr
}
func (s *Series) Get(index int) interface{} {
return s.series.Elem(index).Val()
}
func detectType(values interface{}) Type {
contentType := &contentType{}
v := reflect.ValueOf(values)
switch v.Kind() {
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
if v.Index(i).Interface() == nil {
continue
}
contentType = hasType(v.Index(i).Interface(), contentType)
}
default:
contentType = hasType(values, contentType)
}
switch {
case contentType.hasString:
return String
case contentType.hasBool:
return Bool
case contentType.hasFloat:
return Float
case contentType.hasInt:
return Int
default:
return String
}
}
func hasType(value interface{}, contentType *contentType) *contentType {
switch value.(type) {
case float64, float32:
contentType.hasFloat = true
case int, int8, int16, int32, int64:
contentType.hasInt = true
case bool:
contentType.hasBool = true
default:
contentType.hasString = true
}
return contentType
}
func castValues(values interface{}, colType Type) ([]interface{}, error) {
v := reflect.ValueOf(values)
var seriesValues []interface{}
switch v.Kind() {
case reflect.Slice:
seriesValues = make([]interface{}, v.Len())
for i := 0; i < v.Len(); i++ {
iVal := v.Index(i).Interface()
if iVal == nil {
seriesValues[i] = iVal
continue
}
cVal, err := castValue(iVal, colType)
if err != nil {
return nil, err
}
seriesValues[i] = cVal
}
default:
seriesValues = make([]interface{}, 1)
iVal := v.Interface()
if iVal == nil {
return seriesValues, nil
}
v, err := castValue(iVal, colType)
if err != nil {
return nil, err
}
seriesValues[0] = v
}
return seriesValues, nil
}
func castValue(singleValue interface{}, seriesType Type) (interface{}, error) {
switch seriesType {
case Int:
return converter.ToInt(singleValue)
case Float:
return converter.ToFloat64(singleValue)
case Bool:
return converter.ToBool(singleValue)
case String:
return converter.ToString(singleValue)
default:
return nil, fmt.Errorf("unknown series type %s", seriesType)
}
} | api/pkg/transformer/types/series/series.go | 0.665193 | 0.470068 | series.go | starcoder |
package dyff
import (
"regexp"
"github.com/gonvenience/ytbx"
)
func (r Report) filter(hasPath func(*ytbx.Path) bool) (result Report) {
result = Report{
From: r.From,
To: r.To,
}
for _, diff := range r.Diffs {
if hasPath(diff.Path) {
result.Diffs = append(result.Diffs, diff)
}
}
return result
}
// Filter accepts YAML paths as input and returns a new report with differences for those paths only
func (r Report) Filter(paths ...string) (result Report) {
if len(paths) == 0 {
return r
}
return r.filter(func(filterPath *ytbx.Path) bool {
for _, pathString := range paths {
path, err := ytbx.ParsePathStringUnsafe(pathString)
if err == nil && path.String() == filterPath.String() {
return true
}
}
return false
})
}
// Exclude accepts YAML paths as input and returns a new report with differences without those paths
func (r Report) Exclude(paths ...string) (result Report) {
if len(paths) == 0 {
return r
}
return r.filter(func(filterPath *ytbx.Path) bool {
for _, pathString := range paths {
path, err := ytbx.ParsePathStringUnsafe(pathString)
if err == nil && path.String() == filterPath.String() {
return false
}
}
return true
})
}
// FilterRegexp accepts regular expressions as input and returns a new report with differences for matching those patterns
func (r Report) FilterRegexp(pattern ...string) (result Report) {
if len(pattern) == 0 {
return r
}
regexps := make([]*regexp.Regexp, len(pattern))
for i := range pattern {
regexps[i] = regexp.MustCompile(pattern[i])
}
return r.filter(func(filterPath *ytbx.Path) bool {
for _, regexp := range regexps {
if regexp.MatchString(filterPath.String()) {
return true
}
}
return false
})
}
// ExcludeRegexp accepts regular expressions as input and returns a new report with differences for not matching those patterns
func (r Report) ExcludeRegexp(pattern ...string) (result Report) {
if len(pattern) == 0 {
return r
}
regexps := make([]*regexp.Regexp, len(pattern))
for i := range pattern {
regexps[i] = regexp.MustCompile(pattern[i])
}
return r.filter(func(filterPath *ytbx.Path) bool {
for _, regexp := range regexps {
if regexp.MatchString(filterPath.String()) {
return false
}
}
return true
})
} | pkg/dyff/reports.go | 0.743634 | 0.419291 | reports.go | starcoder |
package nn
import (
"fmt"
"math/rand"
"sync"
)
// Layer is a layer of neural network.
type Layer interface {
InputShape() Shape
OutputShape() Shape
Init(inputShape Shape, factory OptimizerFactory) error
Call(inputs []*Tensor) []*Tensor
Forward(inputs []*Tensor) []*Tensor
Backward(douts []*Tensor) []*Tensor
Params() []*Tensor
Update()
}
type inputLayer struct {
inputShape Shape
outputShape Shape
}
func (i *inputLayer) Init(inputShape Shape, _ OptimizerFactory) error {
i.inputShape = inputShape
i.outputShape = inputShape
return nil
}
func (i *inputLayer) Call(inputs []*Tensor) []*Tensor {
return inputs
}
func (i *inputLayer) Forward(inputs []*Tensor) []*Tensor {
return inputs
}
func (i *inputLayer) Backward(douts []*Tensor) []*Tensor {
return douts
}
func (i *inputLayer) InputShape() Shape {
return i.inputShape
}
func (i *inputLayer) OutputShape() Shape {
return i.outputShape
}
func (i *inputLayer) Params() []*Tensor {
return nil
}
func (i *inputLayer) Update() {}
type dense struct {
units int
weight *Tensor
bias *Tensor
inputs []*Tensor
dw []*Tensor
db []*Tensor
optW Optimizer
optB Optimizer
inputShape Shape
outputShape Shape
}
// Dense is a fully connected layer.
func Dense(units int) Layer {
return &dense{units: units}
}
func (d *dense) Init(inputShape Shape, factory OptimizerFactory) error {
if inputShape.Rank() != 1 {
return fmt.Errorf("invalid rank %v", inputShape.Rank())
}
d.inputShape = inputShape
d.outputShape = Shape{d.units}
wShape := Shape{inputShape[0], d.units}
d.weight = NewTensor(wShape)
d.weight = d.weight.BroadCast(func(_ float64) float64 {
return rand.Float64() * 0.01
})
d.bias = NewTensor(d.outputShape)
d.optW = factory.Create(wShape)
d.optB = factory.Create(d.outputShape)
return nil
}
func (d *dense) Call(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
outputs[i] = input.ReShape(Shape{1, input.shape[0]}).Dot(d.weight).ReShape(d.outputShape).AddTensor(d.bias)
wg.Done()
}(i, input)
}
wg.Wait()
return outputs
}
func (d *dense) Forward(inputs []*Tensor) []*Tensor {
d.inputs = make([]*Tensor, len(inputs))
outputs := make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
d.inputs[i] = input
outputs[i] = input.ReShape(Shape{1, input.shape[0]}).Dot(d.weight).ReShape(d.outputShape).AddTensor(d.bias)
wg.Done()
}(i, input)
}
wg.Wait()
return outputs
}
func (d *dense) Backward(douts []*Tensor) []*Tensor {
d.dw = make([]*Tensor, len(douts))
d.db = make([]*Tensor, len(douts))
dx := make([]*Tensor, len(douts))
wg := new(sync.WaitGroup)
wg.Add(len(douts))
for i, dout := range douts {
go func(i int, dout *Tensor) {
d.db[i] = dout.Clone()
dout = dout.ReShape(Shape{1, dout.shape[0]})
dx[i] = dout.Dot(d.weight.Transpose())
dx[i] = dx[i].ReShape(Shape{dx[i].shape[1]})
d.dw[i] = d.inputs[i].ReShape(Shape{1, d.inputs[i].shape[0]}).Transpose().Dot(dout)
wg.Done()
}(i, dout)
}
wg.Wait()
return dx
}
func (d *dense) Params() []*Tensor {
return []*Tensor{d.weight, d.bias}
}
func (d *dense) Update() {
dw := NewTensor(d.dw[0].shape)
db := NewTensor(d.db[0].shape)
for i := 0; i < len(d.dw); i++ {
dw = dw.AddTensor(d.dw[i])
db = db.AddTensor(d.db[i])
}
dw = dw.DivBroadCast(float64(len(d.dw)))
db = db.DivBroadCast(float64(len(d.db)))
d.weight = d.optW.Update(d.weight, dw)
d.bias = d.optB.Update(d.bias, db)
}
func (d *dense) InputShape() Shape {
return d.inputShape
}
func (d *dense) OutputShape() Shape {
return d.outputShape
}
type flatten struct {
inputShape Shape
outputShape Shape
}
// Flatten flattens the inputs.
func Flatten() Layer {
return &flatten{}
}
func (f *flatten) Init(inputShape Shape, _ OptimizerFactory) error {
f.inputShape = inputShape
f.outputShape = Shape{inputShape.Elements()}
return nil
}
func (f *flatten) Call(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
for i, input := range inputs {
outputs[i] = input.Clone()
outputs[i].shape = f.outputShape.Clone()
}
return outputs
}
func (f *flatten) Forward(inputs []*Tensor) []*Tensor {
return f.Call(inputs)
}
func (f *flatten) Backward(douts []*Tensor) []*Tensor {
return douts
}
func (f *flatten) InputShape() Shape {
return f.inputShape
}
func (f *flatten) OutputShape() Shape {
return f.outputShape
}
func (f *flatten) Params() []*Tensor {
return nil
}
func (f *flatten) Update() {}
type dropout struct {
rate float64
mask [][]bool
inputShape Shape
outputShape Shape
}
// Dropout dropouts inputs.
func Dropout(rate float64) Layer {
return &dropout{rate: rate}
}
func (d *dropout) Init(inputShape Shape, _ OptimizerFactory) error {
d.inputShape = inputShape
d.outputShape = inputShape
return nil
}
func (d *dropout) Call(inputs []*Tensor) []*Tensor {
return inputs
}
func (d *dropout) Forward(inputs []*Tensor) []*Tensor {
d.mask = make([][]bool, len(inputs))
units := inputs[0].shape.Elements()
active := int(float64(units) * (1 - d.rate))
for i, input := range inputs {
mask := make([]bool, units)
for n := 0; n < active; {
index := rand.Intn(units)
if mask[index] {
continue
}
input.rawData[index] = 0
mask[index] = true
n++
}
d.mask[i] = mask
}
return inputs
}
func (d *dropout) Backward(douts []*Tensor) []*Tensor {
for i, dout := range douts {
for j, drop := range d.mask[i] {
if drop {
dout.rawData[j] = 0
}
}
}
return douts
}
func (d *dropout) InputShape() Shape {
return d.inputShape
}
func (d *dropout) OutputShape() Shape {
return d.outputShape
}
func (d *dropout) Params() []*Tensor {
return nil
}
func (d *dropout) Update() {}
type lambda struct {
function func(*Tensor) *Tensor
calcOutputShape func(inputShape Shape) Shape
inputShape Shape
outputShape Shape
}
// Lambda is a user defined function layer.
func Lambda(f func(*Tensor) *Tensor, outputShape func(inputShape Shape) Shape) Layer {
return &lambda{function: f, calcOutputShape: outputShape}
}
func (l *lambda) Init(inputShape Shape, _ OptimizerFactory) error {
l.inputShape = inputShape
l.outputShape = l.calcOutputShape(inputShape)
return nil
}
func (l *lambda) Call(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
outputs[i] = l.function(input)
wg.Done()
}(i, input)
}
wg.Wait()
return outputs
}
func (l *lambda) Forward(inputs []*Tensor) []*Tensor {
return l.Call(inputs)
}
func (l *lambda) Backward(douts []*Tensor) []*Tensor {
return douts
}
func (l *lambda) InputShape() Shape {
return l.inputShape
}
func (l *lambda) OutputShape() Shape {
return l.outputShape
}
func (l *lambda) Params() []*Tensor {
return nil
}
func (l *lambda) Update() {} | nn/layer.go | 0.728941 | 0.534309 | layer.go | starcoder |
package doudizhu
import (
"game/internal/poker"
"reflect"
)
type cardPatternQuadrupletWithPairs struct {
cardPatternBase
}
func (r cardPatternQuadrupletWithPairs) Name() string {
return reflect.TypeOf(r).Name()
}
func (r cardPatternQuadrupletWithPairs) Valid() bool {
r.Cards().Sort(DoudizhuValueRanks)
if r.Cards().Length() != 8 {
return false
}
counts := r.Cards().Counts()
count4 := r.Cards().Count(func(c *poker.Card) bool {
for value, count := range counts {
if count == 4 && c.Value == value {
return true
}
}
return false
})
if !(count4 == 4 || count4 == 8) {
return false
}
if count4 == 4 {
count2 := r.Cards().Count(func(c *poker.Card) bool {
for value, count := range counts {
if count == 2 && c.Value == value {
return true
}
}
return false
})
if count2 != 4 {
return false
}
}
sum := 0
for _, count := range counts {
sum += count
if count != 4 && count != 2 {
return false
}
}
return sum == 8
}
func (r cardPatternQuadrupletWithPairs) Same(s poker.CardPattern) bool {
return r.Name() == s.Name()
}
func (r cardPatternQuadrupletWithPairs) Equal(s poker.CardPattern) bool {
return false
}
func (r cardPatternQuadrupletWithPairs) Greeter(s poker.CardPattern) bool {
if !r.Same(s) || !r.Valid() || !s.Valid() {
return false
}
rCard := r.Cards().First(func(c1 *poker.Card) bool {
return r.Cards().Count(func(c2 *poker.Card) bool {
return c1.Value == c2.Value
}) == 4
})
sCard := s.Cards().First(func(c1 *poker.Card) bool {
return s.Cards().Count(func(c2 *poker.Card) bool {
return c1.Value == c2.Value
}) == 4
})
return DoudizhuValueRanks.Rank(rCard) > DoudizhuValueRanks.Rank(sCard)
}
func (r cardPatternQuadrupletWithPairs) Lesser(s poker.CardPattern) bool {
return s.Greeter(r)
}
func (r cardPatternQuadrupletWithPairs) String() string {
return ""
}
func (r cardPatternQuadrupletWithPairs) Factory(cards poker.Cards) poker.CardPattern {
return cardPatternQuadrupletWithPairs{cardPatternBase: cardPatternBase{cards: cards}}
}
func FactoryCardPatternQuadrupletWithPairs(cards poker.Cards) poker.CardPattern {
return cardPatternQuadrupletWithPairs{}.Factory(cards)
} | internal/poker/doudizhu/cardPatternQuadrupletWithPairs.go | 0.524395 | 0.445952 | cardPatternQuadrupletWithPairs.go | starcoder |
package elastic
// The geo_distance facet is a facet providing information for ranges of
// distances from a provided geo_point including count of the number of hits
// that fall within each range, and aggregation information (like total).
// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-geo-distance-facet.html
type GeoDistanceFacet struct {
facetFilter Filter
global *bool
nested string
mode string
fieldName string
valueFieldName string
lat float64
lon float64
geoHash string
geoDistance string
unit string
params map[string]interface{}
valueScript string
lang string
entries []geoDistanceFacetEntry
}
func NewGeoDistanceFacet() GeoDistanceFacet {
return GeoDistanceFacet{
params: make(map[string]interface{}),
entries: make([]geoDistanceFacetEntry, 0),
}
}
func (f GeoDistanceFacet) FacetFilter(filter Facet) GeoDistanceFacet {
f.facetFilter = filter
return f
}
func (f GeoDistanceFacet) Global(global bool) GeoDistanceFacet {
f.global = &global
return f
}
func (f GeoDistanceFacet) Nested(nested string) GeoDistanceFacet {
f.nested = nested
return f
}
func (f GeoDistanceFacet) Mode(mode string) GeoDistanceFacet {
f.mode = mode
return f
}
func (f GeoDistanceFacet) Field(fieldName string) GeoDistanceFacet {
f.fieldName = fieldName
return f
}
func (f GeoDistanceFacet) ValueField(valueFieldName string) GeoDistanceFacet {
f.valueFieldName = valueFieldName
return f
}
func (f GeoDistanceFacet) ValueScript(valueScript string) GeoDistanceFacet {
f.valueScript = valueScript
return f
}
func (f GeoDistanceFacet) Lang(lang string) GeoDistanceFacet {
f.lang = lang
return f
}
func (f GeoDistanceFacet) ScriptParam(name string, value interface{}) GeoDistanceFacet {
f.params[name] = value
return f
}
func (f GeoDistanceFacet) Point(lat, lon float64) GeoDistanceFacet {
f.lat = lat
f.lon = lon
return f
}
func (f GeoDistanceFacet) Lat(lat float64) GeoDistanceFacet {
f.lat = lat
return f
}
func (f GeoDistanceFacet) Lon(lon float64) GeoDistanceFacet {
f.lon = lon
return f
}
func (f GeoDistanceFacet) GeoHash(geoHash string) GeoDistanceFacet {
f.geoHash = geoHash
return f
}
func (f GeoDistanceFacet) GeoDistance(geoDistance string) GeoDistanceFacet {
f.geoDistance = geoDistance
return f
}
func (f GeoDistanceFacet) AddRange(from, to float64) GeoDistanceFacet {
f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: to})
return f
}
func (f GeoDistanceFacet) AddUnboundedTo(from float64) GeoDistanceFacet {
f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: nil})
return f
}
func (f GeoDistanceFacet) AddUnboundedFrom(to float64) GeoDistanceFacet {
f.entries = append(f.entries, geoDistanceFacetEntry{From: nil, To: to})
return f
}
func (f GeoDistanceFacet) Unit(distanceUnit string) GeoDistanceFacet {
f.unit = distanceUnit
return f
}
func (f GeoDistanceFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
if f.facetFilter != nil {
source["facet_filter"] = f.facetFilter.Source()
}
if f.nested != "" {
source["nested"] = f.nested
}
if f.global != nil {
source["global"] = *f.global
}
if f.mode != "" {
source["mode"] = f.mode
}
}
func (f GeoDistanceFacet) Source() interface{} {
source := make(map[string]interface{})
f.addFilterFacetAndGlobal(source)
opts := make(map[string]interface{})
source["geo_distance"] = opts
if f.geoHash != "" {
opts[f.fieldName] = f.geoHash
} else {
opts[f.fieldName] = []float64{f.lat, f.lon}
}
if f.valueFieldName != "" {
opts["value_field"] = f.valueFieldName
}
if f.valueScript != "" {
opts["value_script"] = f.valueScript
if f.lang != "" {
opts["lang"] = f.lang
}
if len(f.params) > 0 {
opts["params"] = f.params
}
}
ranges := make([]interface{}, 0)
for _, ent := range f.entries {
r := make(map[string]interface{})
if ent.From != nil {
switch from := ent.From.(type) {
case int, int16, int32, int64, float32, float64:
r["from"] = from
case string:
r["from"] = from
}
}
if ent.To != nil {
switch to := ent.To.(type) {
case int, int16, int32, int64, float32, float64:
r["to"] = to
case string:
r["to"] = to
}
}
ranges = append(ranges, r)
}
opts["ranges"] = ranges
if f.unit != "" {
opts["unit"] = f.unit
}
if f.geoDistance != "" {
opts["distance_type"] = f.geoDistance
}
return source
}
type geoDistanceFacetEntry struct {
From interface{}
To interface{}
} | search_facets_geo_distance.go | 0.899689 | 0.52616 | search_facets_geo_distance.go | starcoder |
package output
import (
"fmt"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/output/writer"
"github.com/Jeffail/benthos/v3/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeDynamoDB] = TypeSpec{
constructor: NewDynamoDB,
Description: `
Inserts items into a DynamoDB table.
The field ` + "`string_columns`" + ` is a map of column names to string values,
where the values are
[function interpolated](/docs/configuration/interpolation#functions) per message of a
batch. This allows you to populate string columns of an item by extracting
fields within the document payload or metadata like follows:
` + "``` yaml" + `
string_columns:
id: ${!json_field:id}
title: ${!json_field:body.title}
topic: ${!metadata:kafka_topic}
full_content: ${!content}
` + "```" + `
The field ` + "`json_map_columns`" + ` is a map of column names to json paths,
where the [dot path](/docs/configuration/field_paths) is extracted from each document and
converted into a map value. Both an empty path and the path ` + "`.`" + ` are
interpreted as the root of the document. This allows you to populate map columns
of an item like follows:
` + "``` yaml" + `
json_map_columns:
user: path.to.user
whole_document: .
` + "```" + `
A column name can be empty:
` + "``` yaml" + `
json_map_columns:
"": .
` + "```" + `
In which case the top level document fields will be written at the root of the
item, potentially overwriting previously defined column values. If a path is not
found within a document the column will not be populated.
### Credentials
By default Benthos will use a shared credentials file when connecting to AWS
services. It's also possible to set them explicitly at the component level,
allowing you to transfer data across accounts. You can find out more
[in this document](/docs/guides/aws).`,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
return sanitiseWithBatch(conf.DynamoDB, conf.DynamoDB.Batching)
},
Async: true,
Batches: true,
}
}
//------------------------------------------------------------------------------
// NewDynamoDB creates a new DynamoDB output type.
func NewDynamoDB(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
dyn, err := writer.NewDynamoDB(conf.DynamoDB, log, stats)
if err != nil {
return nil, err
}
var w Type
if conf.DynamoDB.MaxInFlight == 1 {
w, err = NewWriter(
TypeDynamoDB, dyn, log, stats,
)
} else {
w, err = NewAsyncWriter(
TypeDynamoDB, conf.DynamoDB.MaxInFlight, dyn, log, stats,
)
}
if bconf := conf.DynamoDB.Batching; err == nil && !bconf.IsNoop() {
policy, err := batch.NewPolicy(bconf, mgr, log.NewModule(".batching"), metrics.Namespaced(stats, "batching"))
if err != nil {
return nil, fmt.Errorf("failed to construct batch policy: %v", err)
}
w = NewBatcher(policy, w, log, stats)
}
return w, err
}
//------------------------------------------------------------------------------ | lib/output/dynamodb.go | 0.796094 | 0.790369 | dynamodb.go | starcoder |
package gomovie
import "io"
//SampleInt16 describes a single 16 bit sample
type SampleInt16 int16
//ToFloat Normalizes the sample to a value between -1 and 1
func (p SampleInt16) Float() float32 {
return float32(p) / float32(32768.)
}
//SampleInt32 describes a single 32 bit sample
type SampleInt32 int32
//ToFloat Normalizes the sample to a value between -1 and 1
func (p SampleInt32) Float() float32 {
return float32(p) / float32(2147483648.)
}
//SampleFormat describes the format of a SampleBlock
type SampleFormat struct {
Depth int
BlockSize int
}
//NewSampleFormat creates a SampleFormat with the default values
func NewSampleFormat() *SampleFormat {
return &SampleFormat{Depth: 16, BlockSize: GlobalConfig.SampleBlockSize}
}
//SampleBlock describes a chunk of sample values
type SampleBlock struct {
*SampleFormat
Data interface{}
Time float32
Duration float32
}
func (sb *SampleBlock) Bytes() (bd []byte) {
switch d := sb.Data.(type) {
case []SampleInt16:
bd = make([]byte, len(d)*2)
for i, x := range d {
v := uint16(x)
bd[i] = byte(v)
bd[i+1] = byte(v >> 8)
}
case []SampleInt32:
bd = make([]byte, len(d)*4)
for i, x := range d {
v := uint32(x)
bd[i] = byte(v)
bd[i+1] = byte(v >> 8)
bd[i+2] = byte(v >> 16)
bd[i+3] = byte(v >> 24)
}
}
return
}
//ConvertFloats converts each value to a float (normalized between 0 and 1) and passes it to the given callback. The callback is expected to return a modified float value.
func (sb *SampleBlock) ConvertFloats(fn func(i int, f float32) float32) {
switch t := sb.Data.(type) {
case []SampleInt16:
for i, v := range t {
t[i] = SampleInt16(fn(i, v.Float()) * 32768.)
}
case []SampleInt32:
for i, v := range t {
t[i] = SampleInt32(fn(i, v.Float()) * 2147483648.)
}
}
}
//SampleReaderInfo contains information about an Audio stream in a video file
type SampleReaderInfo struct {
CodecName string
Duration float32
SampleRate int
Channels int
}
//SampleReader describes an interface to read audio sample blocks
type SampleReader interface {
io.ReadCloser
Slice(r *Range) SampleReader
Range() *Range
//read a single sample block in the format SampleInt16 or SampleInt32 (depending on SampleDepth)
ReadSampleBlock() (*SampleBlock, error)
SampleFormat() *SampleFormat
//information about the sample reader
Info() *SampleReaderInfo
} | sample.go | 0.786582 | 0.562657 | sample.go | starcoder |
package enhanced
import "github.com/samuel/go-zookeeper/zk"
type nsBasicOperations struct {
basicOperations
*namespace
}
func newNSBasicOperations(conner Conner, ns *namespace) nsBasicOperations {
return nsBasicOperations{
basicOperations: newBasicOperations(conner),
namespace: ns,
}
}
// Get fetches value and stat of given znode.
func (nb *nsBasicOperations) Get(p string) ([]byte, *zk.Stat, error) {
return nb.get(nb.namespaced(p))
}
// Exist returns true and stat of given znode if it exists.
func (nb *nsBasicOperations) Exist(p string) (bool, *zk.Stat, error) {
return nb.exist(nb.namespaced(p))
}
// GetChildren fetches children of given path.
func (nb *nsBasicOperations) GetChildren(p string) ([]string, *zk.Stat, error) {
return nb.getChildren(nb.namespaced(p))
}
// Set sets the value on given znode.
func (nb *nsBasicOperations) Set(p string, value []byte, version int32) (*zk.Stat, error) {
return nb.set(nb.namespaced(p), value, version)
}
// Create creates given znode with value set to nil.
func (nb *nsBasicOperations) Create(p string) error {
return nb.create(nb.namespaced(p))
}
// CreateValue creates given znode with value.
func (nb *nsBasicOperations) CreateValue(p string, value []byte) error {
return nb.createValue(nb.namespaced(p), value)
}
// Delete deletes given znode.
func (nb *nsBasicOperations) Delete(p string, version int32) error {
return nb.delete(nb.namespaced(p), version)
}
// DeleteWithChildren deletes given znode with its children if any.
func (nb *nsBasicOperations) DeleteWithChildren(p string) error {
return nb.deleteWithChildren(nb.namespaced(p))
}
// CreateWithParents create path with its parents created if missing.
func (nb *nsBasicOperations) CreateWithParents(p string) error {
return nb.createWithParents(nb.namespaced(p))
}
// CreateValueWithParents create path with value and its parents created if missing.
func (nb *nsBasicOperations) CreateValueWithParents(p string, value []byte) error {
return nb.createValueWithParents(p, value)
} | enhanced/ns_basic_operations.go | 0.736874 | 0.466846 | ns_basic_operations.go | starcoder |
package modis
import (
"errors"
"fmt"
"math"
"github.com/nordicsense/gdal"
)
// ModisKWT defines the MODIS projection.
const ModisWKT = `PROJCS["MODIS",
GEOGCS["Unknown datum based upon the custom spheroid",
DATUM["Not specified (based on custom spheroid)",
SPHEROID["Custom spheroid",6371007.181,0]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]],
PROJECTION["Sinusoidal"],
PARAMETER["longitude_of_center",0],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["Meter",1]]`
// LatLon represents a latitude/longitude pair.
type LatLon [2]float64
// Box defines an area of raster: x,y offset and x,y size.
type Box [4]int
// Transform coordinates from one ESPG projection into another.
func (ll LatLon) Transform(fromESPG, toESPG int) (LatLon, error) {
from, err := ll.CSRFromESPG(fromESPG)
if err != nil {
return ll, err
}
defer from.Destroy()
to, err := ll.CSRFromESPG(toESPG)
if err != nil {
return ll, err
}
defer to.Destroy()
return ll.transform(from, to)
}
// Transform coordinates from one ESPG projection into another.
func (ll LatLon) transform(from, to gdal.SpatialReference) (LatLon, error) {
t := gdal.CreateCoordinateTransform(from, to)
defer t.Destroy()
lat := []float64{ll[0]}
lon := []float64{ll[1]}
z := []float64{0.0}
if ok := t.Transform(1, lon, lat, z); ok {
return LatLon{lat[0], lon[0]}, nil
}
return LatLon{lat[0], lon[0]}, errors.New("transformation failed")
}
func (ll LatLon) CSRFromESPG(espg int) (gdal.SpatialReference, error) {
res := gdal.CreateSpatialReference("")
err := res.FromEPSG(espg)
return res, err
}
func (ll LatLon) MODIS_CSR() gdal.SpatialReference {
return gdal.CreateSpatialReference(ModisWKT)
}
// Degrees2Sin transforms coordinates from the World Geodetic System (WGS84, given in degrees) into Sphere Sinusoidal.
func (ll LatLon) Degrees2Sin() (LatLon, error) {
from, err := ll.CSRFromESPG(4326)
if err != nil {
return ll, err
}
to := ll.MODIS_CSR()
defer from.Destroy()
defer to.Destroy()
return ll.transform(from, to)
}
// Sin2Degrees transforms coordinates from the Sphere Sinusoidal system into the World Geodetic System (WGS84).
func (ll LatLon) Sin2Degree() (LatLon, error) {
from := ll.MODIS_CSR()
defer from.Destroy()
to, err := ll.CSRFromESPG(4326)
if err != nil {
return ll, err
}
defer to.Destroy()
return ll.transform(from, to)
}
func (ll LatLon) String() string {
return fmt.Sprintf("(%.2f,%.2f)", ll[0], ll[1])
}
// AffineTransform defines the transformation of the projection.
type AffineTransform [6]float64
// Performs the direct affine transform from image pixels to World Sinusoidal coordinates.
func (at AffineTransform) Pixels2LatLonSin(x, y int) LatLon {
lat := float64(y)*at[5] + at[3]
lon := float64(x)*at[1] + at[0]
return LatLon{lat, lon}
}
// Performs the direct affine transform from image pixels to lat/lon in degrees.
func (at AffineTransform) Pixels2LatLon(x, y int) LatLon {
lat := float64(y)*at[5] + at[3]
lon := float64(x)*at[1] + at[0]
res, _ := LatLon{lat, lon}.Sin2Degree()
return res
}
// Performs the inverse affine transform from World Sinusoidal coordinates to image pixels.
func (at AffineTransform) LatLonSin2Pixels(ll LatLon) (int, int) {
x := int(math.Round((ll[1] - at[0]) / at[1]))
y := int(math.Round((ll[0] - at[3]) / at[5]))
return x, y
}
// Performs the inverse affine transform from lat/lon in degrees to image pixels.
func (at AffineTransform) LatLon2Pixels(ll LatLon) (int, int) {
ll, _ = ll.Degrees2Sin()
return at.LatLonSin2Pixels(ll)
}
// ModisLST2UTC transforms MODIS time values in hours given from Local Solar Time to UTC.
func ModisLST2UTC(lst, lonDegree float64) float64 {
offset := 0.0
if lst < 0.0 {
offset = 24.0
} else if lst >= 24.0 {
offset = -24.0 // FIXME should this be next day?
}
return lst - lonDegree/15.0 + offset
} | units.go | 0.762336 | 0.412294 | units.go | starcoder |
package main
/*
1. think of pre-processing steps: sort, arrange the data, index the data, prefix sums!
2. split into small functions which you will implement later
3. solution scanning and offer alternatives (always talk about complexity in space and time)
1. pattern matching (find similar problems)
2. simplify and generalize (start with a simpler problem)
3. iterate through programming paradigms (greedy, divide and conquer, dynamic programming)
4. iterate through all data structures (lists, arrays, stacks, queues, heap, hash, tree, trie, bloom filter, union_find)
5. try free primitive and see if you make progress (sorting, bfs, dfs, strongly connected components, shortest path)
4. BUD optimisation:
1. bottleneck
2. unnecessary work
3. duplicate work
5. identify pain points: array indices, loop termination conditions.
*/
import "fmt"
func Solution(A []int, B []int) int {
// Maintain a stack of fish going downstream: when a fish is going downstream, push it to the stack.
// When a fish goes upstream:
// - pop all the fish in the stack it will eat.
// - if the stack ends up empty, then the upstream fish will survive, so increment the count
// Add to the count the length of the stack, ie all fish going downstream which haven't been eaten.
count := 0
downstream := empty
for i := 0; i < len(A); i ++ {
size := A[i]
isDownstream := B[i] == 1
if isDownstream {
downstream = downstream.push(i)
continue
}
var isEmpty bool
var j int
for {
isEmpty, j = downstream.peek()
if isEmpty || A[j] >= size {
break
}
isEmpty, _, downstream = downstream.pop()
}
if isEmpty || A[j] == size {
count += 1
}
}
return count + downstream.size()
}
type node struct {
val int
next *node
}
var empty = &node{0, nil}
func (n *node) debug() string {
if n == empty {
return "|"
}
return fmt.Sprintf("%d->%s", n.val, n.next.debug())
}
func (n *node) peek() (isEmpty bool, val int) {
if n == empty {
return true, 0
}
return false, n.val
}
func (n *node) pop() (isEmpty bool, val int, head *node) {
if n == empty {
return true, 0, empty
}
return false, n.val, n.next
}
func (n *node) push(val int) *node {
return &node{val, n}
}
func (n *node) size() int {
if n == empty {
return 0
}
return 1 + n.next.size()
}
func main() {} | go/interview/codility/7_3_fish/main.go | 0.630685 | 0.648327 | main.go | starcoder |
package main
func (e *event) date() string {
// 0 - date
// example: ["09/Jul/2020 16:34:27",
return e.raw[0].(string)
}
func (e *event) device() string {
// 1 - device
// example: "RockBLOCK 18388",
return e.raw[1].(string)
}
func (e *event) direction() string {
// 2 - direction. MO: from device to the float64ernet, MT: from the float64ernet to device.
// example: "MO",
return e.raw[2].(string)
}
func (e *event) hex() string {
// 3: hex data
// example: "372c392c31362c33342c31372c33362e39363039382c2d3132322e30303135312c34352c20342c32312c31332e34312c302e30302c31332e34302c302e31342c31342e35392c302e33302c313030302c302c33302e302c312c33362e39333536302c2d3132322e30303037392c3235322c2d3133302c323832342c322e30332c3431",
return e.raw[3].(string)
}
func (e *event) Lat() float64 {
// 4: Iridium lat; very imprecise.
// example: 36.973366666666664,
return e.raw[4].(float64)
}
func (e *event) Long() float64 {
// 5: Iridium long; very imprecise.
// example: -121.98155,
return e.raw[5].(float64)
}
func (e *event) month() float64 {
// 6: month
// example: 7,
return e.raw[6].(float64)
}
func (e *event) day() float64 {
// 7: day
// example: 9,
return e.raw[7].(float64)
}
func (e *event) hour() float64 {
// 8: hour
// example: 16,
return e.raw[8].(float64)
}
func (e *event) minute() float64 {
// 9: minute
// example: 34,
return e.raw[9].(float64)
}
func (e *event) second() float64 {
// 10: second
// example: 17,
return e.raw[10].(float64)
}
func (e *event) latitude() float64 {
// 11: latitude
// example: 36.96098,
return e.raw[11].(float64)
}
func (e *event) longitude() float64 {
// 12: longitude
// example: -122.00151,
return e.raw[12].(float64)
}
func (e *event) heading() float64 {
// 13: heading
// example: 45,
return e.raw[13].(float64)
}
func (e *event) pitch() float64 {
// 14: pitch
// example: 4,
return e.raw[14].(float64)
}
func (e *event) roll() float64 {
// 15: roll
// example: 21,
return e.raw[15].(float64)
}
func (e *event) thrusterV() float64 {
// 16: thrusterV
// example: 13.41,
return e.raw[16].(float64)
}
func (e *event) thrusterA() float64 {
// 17: thrusterA
// example: 0.00,
return e.raw[17].(float64)
}
func (e *event) hotelV() float64 {
// 18: hotelV
// example: 13.40,
return e.raw[18].(float64)
}
func (e *event) hotelA() float64 {
// 19: hotelA
// example: 0.14,
return e.raw[19].(float64)
}
func (e *event) solarV() float64 {
// 20: solarV
// example: 14.59,
return e.raw[20].(float64)
}
func (e *event) solarA() float64 {
// 21: solarA
// example: 0.30,
return e.raw[21].(float64)
}
func (e *event) throttle() float64 {
// 22: throttle
// example: 1000,
return e.raw[22].(float64)
}
func (e *event) rpm() float64 {
// 23: rpm
// example: 0,
return e.raw[23].(float64)
}
func (e *event) rudderAngle() float64 {
// 24: rudderAngle
// example: 30.0,
return e.raw[24].(float64)
}
func (e *event) nextWaypofloat64Number() float64 {
// 25: nextWaypofloat64Number
// example: 1,
return e.raw[25].(float64)
}
func (e *event) nextWaypofloat64Lat() float64 {
// 26: nextWaypofloat64Lat
// example: 36.93560,
return e.raw[26].(float64)
}
func (e *event) nextWaypofloat64Long() float64 {
// 27: nextWaypofloat64Long
// example: -122.00079,
return e.raw[27].(float64)
}
func (e *event) targetHeading() float64 {
// 28: targetHeading
// example: 252,
return e.raw[28].(float64)
}
func (e *event) crossTrackError() float64 {
// 29: crossTrackError
// example: -130,
return e.raw[29].(float64)
}
func (e *event) nextWaypofloat64Distance() float64 {
// 30: nextWaypofloat64Distance
// example: 2824,
return e.raw[30].(float64)
}
func (e *event) averageSpeed() float64 {
// 31: averageSpeed
// example: 2.03,
return e.raw[31].(float64)
}
func (e *event) uptimeMins() float64 {
// 32: uptimeMins
// example: 41,
return e.raw[32].(float64)
} | btm/event_fields.go | 0.860369 | 0.434941 | event_fields.go | starcoder |
package packet
import (
"bytes"
"github.com/sandertv/gophertunnel/minecraft/protocol"
)
const (
BlockToEntityTransition = iota + 1
EntityToBlockTransition
)
// UpdateBlockSynced is sent by the server to synchronise the falling of a falling block entity with the
// transitioning back and forth from and to a solid block. It is used to prevent the entity from flickering,
// and is used in places such as the pushing of blocks with pistons.
type UpdateBlockSynced struct {
// Position is the block position at which a block is updated.
Position protocol.BlockPos
// NewBlockRuntimeID is the runtime ID of the block that is placed at Position after sending the packet
// to the client. The runtime ID must point to a block sent in the list in the StartGame packet.
NewBlockRuntimeID uint32
// Flags is a combination of flags that specify the way the block is updated client-side. It is a
// combination of the flags above, but typically sending only the BlockUpdateNetwork flag is sufficient.
Flags uint32
// Layer is the world layer on which the block is updated. For most blocks, this is the first layer, as
// that layer is the default layer to place blocks on, but for blocks inside of each other, this differs.
Layer uint32
// EntityUniqueID is the unique ID of the falling block entity that the block transitions to or that the
// entity transitions from.
// Note that for both possible values for TransitionType, the EntityUniqueID should point to the falling
// block entity involved.
EntityUniqueID int64
// TransitionType is the type of the transition that happened. It is either BlockToEntityTransition, when
// a block placed becomes a falling entity, or EntityToBlockTransition, when a falling entity hits the
// ground and becomes a solid block again.
TransitionType uint64
}
// ID ...
func (*UpdateBlockSynced) ID() uint32 {
return IDUpdateBlockSynced
}
// Marshal ...
func (pk *UpdateBlockSynced) Marshal(buf *bytes.Buffer) {
_ = protocol.WriteUBlockPosition(buf, pk.Position)
_ = protocol.WriteVaruint32(buf, pk.NewBlockRuntimeID)
_ = protocol.WriteVaruint32(buf, pk.Flags)
_ = protocol.WriteVaruint32(buf, pk.Layer)
_ = protocol.WriteVarint64(buf, pk.EntityUniqueID)
_ = protocol.WriteVaruint64(buf, pk.TransitionType)
}
// Unmarshal ...
func (pk *UpdateBlockSynced) Unmarshal(buf *bytes.Buffer) error {
return chainErr(
protocol.UBlockPosition(buf, &pk.Position),
protocol.Varuint32(buf, &pk.NewBlockRuntimeID),
protocol.Varuint32(buf, &pk.Flags),
protocol.Varuint32(buf, &pk.Layer),
protocol.Varint64(buf, &pk.EntityUniqueID),
protocol.Varuint64(buf, &pk.TransitionType),
)
} | minecraft/protocol/packet/update_block_synced.go | 0.593374 | 0.442275 | update_block_synced.go | starcoder |
package protocol
import (
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
solsha3 "github.com/miguelmota/go-solidity-sha3"
"github.com/offchainlabs/arbitrum/packages/arb-util/value"
)
type Identity [32]byte
type TokenType [21]byte
func TokenTypeFromIntValue(val value.IntValue) TokenType {
var tokType TokenType
tokBytes := val.ToBytes()
copy(tokType[:], tokBytes[:])
return tokType
}
func (t TokenType) ToIntValue() value.IntValue {
var bigtok [32]byte
copy(bigtok[:], t[:])
return value.NewIntValue(new(big.Int).SetBytes(bigtok[:]))
}
func (t TokenType) IsToken() bool {
return t[20] == 0
}
func tokenTypeEncoded(input [21]byte) []byte {
return common.RightPadBytes(input[:], 21)
}
func TokenTypeArrayEncoded(input [][21]byte) []byte {
var values []byte
for _, val := range input {
values = append(values, common.RightPadBytes(val[:], 32)...)
}
return values
}
type Message struct {
Data value.Value
TokenType [21]byte
Currency *big.Int
Destination [32]byte
}
func NewMessage(data value.Value, tokenType [21]byte, currency *big.Int, destination [32]byte) Message {
return Message{data, tokenType, new(big.Int).Set(currency), destination}
}
func NewSimpleMessage(data value.Value, tokenType [21]byte, currency *big.Int, sender common.Address) Message {
senderArr := [32]byte{}
copy(senderArr[:], sender.Bytes())
return Message{data, tokenType, currency, senderArr}
}
func NewMessageFromReader(rd io.Reader) (Message, error) {
data, err := value.UnmarshalValue(rd)
if err != nil {
return Message{}, err
}
tokenType := [21]byte{}
_, err = rd.Read(tokenType[:])
if err != nil {
return Message{}, fmt.Errorf("error unmarshalling OutgoingMessage: %v", err)
}
currency, err := value.NewIntValueFromReader(rd)
if err != nil {
return Message{}, fmt.Errorf("error unmarshalling OutgoingMessage: %v", err)
}
dest := [32]byte{}
_, err = rd.Read(tokenType[:])
if err != nil {
return Message{}, fmt.Errorf("error unmarshalling OutgoingMessage: %v", err)
}
return NewMessage(data, tokenType, currency.BigInt(), dest), nil
}
func NewMessageFromValue(val value.Value) (Message, error) {
tup, ok := val.(value.TupleValue)
if !ok {
return Message{}, errors.New("msg must be tuple value")
}
if tup.Len() != 4 {
return Message{}, fmt.Errorf("advise expected tuple of length 5, but recieved %v", tup)
}
data, _ := tup.GetByInt64(0)
destVal, _ := tup.GetByInt64(1)
amountVal, _ := tup.GetByInt64(2)
typeVal, _ := tup.GetByInt64(3)
typeInt, ok := typeVal.(value.IntValue)
if !ok {
return Message{}, errors.New("type must be an int")
}
amountInt, ok := amountVal.(value.IntValue)
if !ok {
return Message{}, errors.New("value must be an int")
}
destInt, ok := destVal.(value.IntValue)
if !ok {
return Message{}, errors.New("value must be an int")
}
typeBytes := typeInt.ToBytes()
var tokenType [21]byte
copy(tokenType[:], typeBytes[:21])
return NewMessage(
data,
tokenType,
amountInt.BigInt(),
destInt.ToBytes(),
), nil
}
func (msg Message) Marshal(w io.Writer) error {
if err := value.MarshalValue(msg.Data, w); err != nil {
return err
}
_, err := w.Write(msg.TokenType[:])
if err != nil {
return err
}
err = value.NewIntValue(msg.Currency).Marshal(w)
if err != nil {
return err
}
_, err = w.Write(msg.Destination[:])
if err != nil {
return err
}
return nil
}
func (msg Message) Hash() [32]byte {
var ret [32]byte
hashVal := solsha3.SoliditySHA3(
solsha3.Bytes32(msg.Data.Hash),
tokenTypeEncoded(msg.TokenType),
solsha3.Uint256(msg.Currency),
solsha3.Bytes32(msg.Destination),
)
copy(ret[:], hashVal)
return ret
}
func (msg Message) Clone() Message {
// Message shouldn't require cloning currency, but something is mutating that variable elsewhere in the code
return Message{
msg.Data.Clone(),
msg.TokenType,
new(big.Int).Set(msg.Currency),
msg.Destination,
}
}
func (msg Message) AsValue() value.Value {
destination := big.NewInt(0)
destination.SetBytes(msg.Destination[:])
tokTypeBytes := [32]byte{}
copy(tokTypeBytes[:], msg.TokenType[:])
tokTypeInt := big.NewInt(0)
tokTypeInt.SetBytes(tokTypeBytes[:])
newTup, _ := value.NewTupleFromSlice([]value.Value{
msg.Data,
value.NewIntValue(destination),
value.NewIntValue(msg.Currency),
value.NewIntValue(tokTypeInt),
})
return newTup
}
func (msg Message) Equals(b Message) bool {
if msg.TokenType != b.TokenType {
return false
}
if !value.Eq(msg.Data, b.Data) {
return false
}
if msg.Currency.Cmp(b.Currency) != 0 {
return false
}
if msg.Destination != b.Destination {
return false
}
return true
} | packages/arb-util/protocol/message.go | 0.621656 | 0.409162 | message.go | starcoder |
package stick
// Registry is a multi-key index of typed values.
type Registry[T any] struct {
validator func(T) error
indexer []func(T) string
indexes []map[string]T
list []T
}
// NewRegistry will create and return a new registry using the specified index
// functions that must return unique keys.
func NewRegistry[T any](values []T, validator func(T) error, indexer ...func(T) string) *Registry[T] {
// created indexes
indexes := make([]map[string]T, 0, len(indexer))
for range indexer {
indexes = append(indexes, map[string]T{})
}
// created registry
r := &Registry[T]{
validator: validator,
indexer: indexer,
indexes: indexes,
}
// add values
r.Add(values...)
return r
}
// Add will add the specified values to the registry.
func (r *Registry[T]) Add(values ...T) {
for _, value := range values {
// validate value
if r.validator != nil {
err := r.validator(value)
if err != nil {
panic("stick: invalid value: " + err.Error())
}
}
// index value
for i, indexer := range r.indexer {
// get key
key := indexer(value)
if key == "" {
panic("stick: missing key")
}
// check index
_, ok := r.indexes[i][key]
if ok {
panic("stick: value already added")
}
// add to index
r.indexes[i][key] = value
}
// add to list
r.list = append(r.list, value)
}
}
// Get will attempt to look up a value using the specified predicate.
func (r *Registry[T]) Get(predicate T) (T, bool) {
// check indexes
for i, index := range r.indexes {
value, ok := index[r.indexer[i](predicate)]
if ok {
return value, true
}
}
// prepare zero value
var value T
return value, false
}
// MustGet will call Get and panic if not value has been found.
func (r *Registry[T]) MustGet(predicate T) T {
// get value
value, ok := r.Get(predicate)
if !ok {
panic("stick: missing value")
}
return value
}
// All will return a list of all added values.
func (r *Registry[T]) All() []T {
// copy list
list := make([]T, len(r.list))
copy(list, r.list)
return list
} | stick/registry.go | 0.60871 | 0.419053 | registry.go | starcoder |
package caseconversion
import (
"fmt"
"go/token"
"strings"
"unicode"
"unicode/utf8"
)
// DecodeCasingFunc takes in an identifier in a case such as camelCase or
// snake_case and splits it up into a DecodedIdentifier for encoding by an
// EncodeCasingFunc into a different case.
type DecodeCasingFunc func(string) (DecodedIdentifier, error)
// EncodeCasingFunc combines the contents of a DecodedIdentifier into an
// identifier in a case such as camelCase or snake_case.
type EncodeCasingFunc func(DecodedIdentifier) string
// DecodedIdentifier is an slice of lowercase words (e.g., []string{"test",
// "string"}) produced by a DecodeCasingFunc, which can be encoded by an
// EncodeCasingFunc into a string in the specified case (e.g., with
// EncodeLowerCamelCase, "testString").
type DecodedIdentifier []string
func decodeCamelCase(typeName, s string) (DecodedIdentifier, error) {
// ignore the size of the rune
r, _ := utf8.DecodeRuneInString(s)
if r == utf8.RuneError || unicode.IsDigit(r) {
return nil, fmt.Errorf("Converting case of %q: %s strings can't start with characters of the Decimal Digit category", s, typeName)
}
words := []string{}
lastBoundary := 0
for z, char := range s {
if !unicode.IsLetter(char) && !unicode.IsDigit(char) {
return nil, fmt.Errorf("Converting case of %q: Only characters of the Letter and Decimal Digit categories can appear in %s strings: %c at byte offset %d",
s, typeName, char, z)
}
if unicode.IsUpper(char) {
// flush out current substring
if lastBoundary < z {
words = append(words, strings.ToLower(s[lastBoundary:z]))
}
lastBoundary = z
}
}
// flush one last time to get the remainder of the string
words = append(words, strings.ToLower(s[lastBoundary:]))
return words, nil
}
// DecodeUpperCamelCase decodes UpperCamelCase strings into a slice of lower-cased sub-strings
func DecodeUpperCamelCase(s string) (DecodedIdentifier, error) {
// ignore the size of the rune
r, _ := utf8.DecodeRuneInString(s)
if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
return nil, fmt.Errorf("Converting case of %q: First character of upperCamelCase string must be an uppercase character of the Letter category", s)
}
return decodeCamelCase("UpperCamelCase", s)
}
// DecodeLowerCamelCase decodes lowerCamelCase strings into a slice of lower-cased sub-strings
func DecodeLowerCamelCase(s string) (DecodedIdentifier, error) {
// ignore the size of the rune
r, _ := utf8.DecodeRuneInString(s)
if !unicode.IsLetter(r) || !unicode.IsLower(r) {
return nil, fmt.Errorf("Converting case of %q: First character of lowerCamelCase string must be a lowercase character of the Letter category", s)
}
return decodeCamelCase("lowerCamelCase", s)
}
// firstCharOfInitialism, as used in DecodeGoCamelCase, attempts to
// detect when the indexed rune is the first character of an initialism (e.g.,
// json*A*PI).
func firstCharOfInitialism(s string, i int) bool {
r1, rl1 := utf8.DecodeRuneInString(s[i:])
// ignore the rune length for the previous character
r2, _ := utf8.DecodeLastRuneInString(s[:i])
// need the equal to for when the rune is the last char in the string (ex: EnvVarA)
return len(s) >= i+rl1 && i >= 1 && unicode.IsUpper(r1) && unicode.IsLower(r2)
}
// firstCharAfterInitialism, as used in DecodeGoCamelCase, attempts to
// detect when the indexed rune is the first character of a non-initialism after
// an initialism (e.g., JSON*F*ile).
func firstCharAfterInitialism(s string, i int) bool {
r1, rl1 := utf8.DecodeRuneInString(s[i:])
// ensure the rune isn't the last character of the string
if i+rl1 >= len(s) {
return false
}
r2, rl2 := utf8.DecodeRuneInString(s[i+rl1:])
return i+rl1+rl2 < len(s) && unicode.IsUpper(r1) && unicode.IsLower(r2)
}
// lastCharOfInitialismAtEOS, as used in DecodeGoCamelCase, attempts to
// detect when the indexed rune is the last character of an initialism at the
// end of a string (e.g., jsonAP*I*).
func lastCharOfInitialismAtEOS(s string, i int) bool {
s1 := s[i:]
r, rl := utf8.DecodeRuneInString(s1)
return i+rl == len(s) && unicode.IsUpper(r)
}
// decodeGoCamelCase splits up a string in a slice of lower cased sub-string by
// splitting after fully capitalized acronyms and after the characters that
// signal word boundaries as specified in the passed isWordBoundary function
func decodeGoCamelCase(s string, isWordBoundary func(rune) bool) (DecodedIdentifier, error) {
words := []string{}
lastBoundary := 0
for i, char := range s {
if firstCharOfInitialism(s, i) || firstCharAfterInitialism(s, i) || isWordBoundary(char) {
if lastBoundary < i {
word := s[lastBoundary:i]
if word == strings.ToUpper(word) {
words = append(words, extractInitialisms(word)...)
} else {
words = append(words, strings.ToLower(word))
}
}
switch {
case isWordBoundary(char):
lastBoundary = i + 1
default:
lastBoundary = i
}
} else if lastCharOfInitialismAtEOS(s, i) {
if lastBoundary < i {
word := s[lastBoundary:]
if word == strings.ToUpper(word) {
words = append(words, extractInitialisms(word)...)
return words, nil
}
}
lastBoundary = i
}
}
if last := strings.ToLower(s[lastBoundary:]); len(last) > 0 {
words = append(words, strings.ToLower(s[lastBoundary:]))
}
return words, nil
}
// TODO: Add EncodeGoCamelCase function and set as default name encoder in
// FlattenMangler
// DecodeGoCamelCase decodes UpperCamelCase and lowerCamelCase strings with
// fully capitalized acronyms (e.g., "jsonAPIDocs") into a slice of lower-cased
// sub-strings.
func DecodeGoCamelCase(s string) (DecodedIdentifier, error) {
if !token.IsIdentifier(s) {
return nil, fmt.Errorf("Only characters of the Letter category or '_' can appear in strings")
}
return decodeGoCamelCase(s, func(r rune) bool {
return r == '_'
})
}
// DecodeGoTags decodes CamelCase, snake_case, and kebab-case strings with fully
// capitalized acronyms into a slice of lower cased strings.
func DecodeGoTags(s string) (DecodedIdentifier, error) {
return decodeGoCamelCase(s, func(r rune) bool {
return r == '_' || r == '-'
})
}
// List from https://github.com/golang/lint/blob/master/lint.go
var commonInitialisms = []string{"ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS"}
// Given an entirely uppercase string, extract any initialisms sequentially from the start of the string and return them with the remainder of the string
func extractInitialisms(s string) []string {
words := []string{}
for {
initialismFound := false
for _, initialism := range commonInitialisms {
if len(s) >= len(initialism) && initialism == s[:len(initialism)] {
initialismFound = true
words = append(words, strings.ToLower(initialism))
s = s[len(initialism):]
}
}
if !initialismFound {
break
}
}
if len(s) > 0 {
words = append(words, strings.ToLower(s))
}
return words
}
func decodeLowerCaseWithSplitChar(splitChar rune, typeName, s string) (DecodedIdentifier, error) {
// ignore the size of the rune
r, _ := utf8.DecodeRuneInString(s)
if r == utf8.RuneError || unicode.IsDigit(r) {
return nil, fmt.Errorf("Converting case of %q: %s strings can't start with characters of the Decimal Digit category", s, typeName)
}
words := []string{}
lastBoundary := 0
for z, char := range s {
if char == splitChar {
// flush
if lastBoundary < z {
words = append(words, s[lastBoundary:z])
}
lastBoundary = z + utf8.RuneLen(splitChar)
} else if (!unicode.IsLetter(char) && !unicode.IsDigit(char)) || (!unicode.IsLower(char) && !unicode.IsDigit(char)) {
return nil, fmt.Errorf("Converting case of %q: Only lower-case letter-category characters, digits, and '%c' can appear in `%s` strings: %c at byte-offset %d does not comply", s, splitChar, typeName, char, z)
}
}
// flush one last time to get the remainder of the string
if last := strings.ToLower(s[lastBoundary:]); len(last) > 0 {
words = append(words, strings.ToLower(s[lastBoundary:]))
}
return words, nil
}
// DecodeLowerSnakeCase decodes lower_snake_case into a slice of lower-cased sub-strings
func DecodeLowerSnakeCase(s string) (DecodedIdentifier, error) {
return decodeLowerCaseWithSplitChar('_', "lower_snake_case", s)
}
// DecodeKebabCase decodes kebab-case into a slice of lower-cased sub-strings
func DecodeKebabCase(s string) (DecodedIdentifier, error) {
return decodeLowerCaseWithSplitChar('-', "kebab-case", s)
}
// DecodeUpperSnakeCase decodes UPPER_SNAKE_CASE (sometimes called
// SCREAMING_SNAKE_CASE) into a slice of lower-cased sub-strings
func DecodeUpperSnakeCase(s string) (DecodedIdentifier, error) {
// ignore the size of the rune
r, _ := utf8.DecodeRuneInString(s)
if r == utf8.RuneError || unicode.IsDigit(r) {
return nil, fmt.Errorf("Converting case of %q: UPPER_SNAKE_CASE strings can't start with characters of the Decimal Digit category", s)
}
words := []string{}
lastBoundary := 0
for z, char := range s {
if char == '_' {
// flush
if lastBoundary < z {
words = append(words, strings.ToLower(s[lastBoundary:z]))
}
lastBoundary = z + 1
} else if (!unicode.IsLetter(char) && !unicode.IsDigit(char)) || (!unicode.IsUpper(char) && !unicode.IsDigit(char)) {
return nil, fmt.Errorf("Converting case of %q: Only uppercase characters of the Letter category and '_' can appear in UPPER_SNAKE_CASE strings: %c in at byte-offset %d does not comply", s, char, z)
}
}
// flush one last time to get the remainder of the string
if last := strings.ToLower(s[lastBoundary:]); len(last) > 0 {
words = append(words, strings.ToLower(s[lastBoundary:]))
}
return words, nil
}
// DecodeCasePreservingSnakeCase decodes Case_Preserving_Snake_Case into a
// slice of lower-cased sub-string
func DecodeCasePreservingSnakeCase(s string) (DecodedIdentifier, error) {
// ignore the size of the rune
r, _ := utf8.DecodeRuneInString(s)
if r == utf8.RuneError || unicode.IsDigit(r) {
return nil, fmt.Errorf("Converting case of %q: Case_Preserving_Snake_Case strings can't start with characters of the Decimal Digit category", s)
}
words := []string{}
lastBoundary := 0
for z, char := range s {
if char == '_' {
// flush
if lastBoundary < z {
words = append(words, strings.ToLower(s[lastBoundary:z]))
}
lastBoundary = z + 1
} else if !unicode.IsLetter(char) && !unicode.IsDigit(char) {
return nil, fmt.Errorf("Converting case of %q: Only characters of the Letter category and '_' can appear in Preserving_Snake_Case strings: %c at byte-offset %d does not comply", s, char, z)
}
}
words = append(words, strings.ToLower(s[lastBoundary:]))
return words, nil
}
func aggregateStringLen(words DecodedIdentifier) int {
total := 0
for _, w := range words {
total += len(w)
}
return total
}
// EncodeUpperCamelCase encodes a slice of words into UpperCamelCase
func EncodeUpperCamelCase(words DecodedIdentifier) string {
b := strings.Builder{}
b.Grow(aggregateStringLen(words))
for _, w := range words {
b.WriteString(strings.Title(w))
}
return b.String()
}
// EncodeLowerCamelCase encodes a slice of words into lowerCamelCase
func EncodeLowerCamelCase(words DecodedIdentifier) string {
if len(words) == 0 {
return ""
}
b := strings.Builder{}
b.Grow(aggregateStringLen(words))
b.WriteString(words[0])
for _, w := range words[1:] {
b.WriteString(strings.Title(w))
}
return b.String()
}
// EncodeKebabCase encodes a slice of words into kebab-case
func EncodeKebabCase(words DecodedIdentifier) string {
return strings.Join(words, "-")
}
// EncodeLowerSnakeCase encodes a slice of words into lower_snake_case
func EncodeLowerSnakeCase(words DecodedIdentifier) string {
if len(words) == 0 {
return ""
}
b := strings.Builder{}
b.Grow(aggregateStringLen(words) + len(words) - 1)
for i, w := range words {
b.WriteString(strings.ToLower(w))
if i != len(words)-1 {
b.WriteRune('_')
}
}
return b.String()
}
// EncodeUpperSnakeCase encodes a slice of words into UPPER_SNAKE_CASE (AKA
// SCREAMING_SNAKE_CASE)
func EncodeUpperSnakeCase(words DecodedIdentifier) string {
if len(words) == 0 {
return ""
}
b := strings.Builder{}
b.Grow(aggregateStringLen(words) + len(words) - 1)
for i, w := range words {
b.WriteString(strings.ToUpper(w))
if i != len(words)-1 {
b.WriteRune('_')
}
}
return b.String()
}
// EncodeCasePreservingSnakeCase encodes a slice of words into case_Preserving_snake_case
func EncodeCasePreservingSnakeCase(words DecodedIdentifier) string {
return strings.Join(words, "_")
} | tagformat/caseconversion/case_conversion.go | 0.525125 | 0.447521 | case_conversion.go | starcoder |
package sparql
import (
"io"
)
// reader represents a buffered rune reader used by the scanner.
// It provides a fixed-length circular buffer that can be unread.
type reader struct {
r io.RuneScanner
i int // buffer index
n int // buffer char count
pos Pos // last read rune position
buf [3]struct {
ch rune
pos Pos
}
eof bool // true if reader has ever seen eof.
}
// ReadRune reads the next rune from the reader.
// This is a wrapper function to implement the io.RuneReader interface.
// Note that this function does not return size.
func (r *reader) ReadRune() (ch rune, size int, err error) {
ch, _ = r.read()
if ch == eof {
err = io.EOF
}
return
}
// UnreadRune pushes the previously read rune back onto the buffer.
// This is a wrapper function to implement the io.RuneScanner interface.
func (r *reader) UnreadRune() error {
r.unread()
return nil
}
// read reads the next rune from the reader.
func (r *reader) read() (ch rune, pos Pos) {
// If we have unread characters then read them off the buffer first.
if r.n > 0 {
r.n--
return r.curr()
}
// Read next rune from underlying reader.
// Any error (including io.EOF) should return as EOF.
ch, _, err := r.r.ReadRune()
if err != nil {
ch = eof
} else if ch == '\r' {
if ch, _, err := r.r.ReadRune(); err != nil {
// nop
} else if ch != '\n' {
_ = r.r.UnreadRune()
}
ch = '\n'
}
// Save character and position to the buffer.
r.i = (r.i + 1) % len(r.buf)
buf := &r.buf[r.i]
buf.ch, buf.pos = ch, r.pos
// Update position.
// Only count EOF once.
if ch == '\n' {
r.pos.Line++
r.pos.Char = 0
} else if !r.eof {
r.pos.Char++
}
// Mark the reader as EOF.
// This is used so we don't double count EOF characters.
if ch == eof {
r.eof = true
}
return r.curr()
}
// unread pushes the previously read rune back onto the buffer.
func (r *reader) unread() {
r.n++
}
// curr returns the last read character and position.
func (r *reader) curr() (ch rune, pos Pos) {
i := (r.i - r.n + len(r.buf)) % len(r.buf)
buf := &r.buf[i]
return buf.ch, buf.pos
}
// eof is a marker code point to signify that the reader can't read any more.
const eof = rune(0) | sparql/reader.go | 0.695648 | 0.413122 | reader.go | starcoder |
package calculator
import (
"fmt"
"math"
)
// Add takes two numbers and returns the
// result of adding them together
func Add(inputs ...float64) float64 {
var result float64 = 0
for _, input := range inputs {
result += input
}
return result
}
// Substract takes two numbers and returns the
// result of substracting the first from the last
func Substract(inputs ...float64) float64 {
var result float64 = 0
for i, input := range inputs {
if i == 0 {
result = input
} else {
result -= input
}
}
return result
}
// Multiply takes two numbers and returns the
// result of multiplying one by another
func Multiply(inputs ...float64) float64 {
var result float64 = 1
for _, input := range inputs {
result *= input
}
return result
}
// Divide takes two numbers and returns the
// result of dividing one by another
func Divide(inputs ...float64) (float64, error) {
var result float64 = 1
for i, input := range inputs {
if i == 0 {
result = input
} else {
if input == 0 {
return 0, fmt.Errorf("bad input: %v, %f (division by zero is undefined)", inputs, input)
}
result /= input
}
}
return result, nil
}
// Sqrt takes a positive number and returns its square root
func Sqrt(a float64) (float64, error) {
if a > 0 {
return math.Sqrt(a), nil
}
return 0, fmt.Errorf("bad input: %f sqrt of negative numbers is not allowed", a)
}
// Evaluate receives a string with an aritmetic operation and returns the result
// only expressions with a floating point value followed by one or more spaces
// followed by an aritmentic operator *,+,/,- followed by one or more spaces
// followed by a floating point value are accepted.
func Evaluate(expr string) (float64, error) {
var a float64
var b float64
var op string
_, err := fmt.Sscanf(expr, "%f%s%f\n", &a, &op, &b)
if err != nil {
return 0, fmt.Errorf("%s Unexpected error %s", expr, err)
}
switch operation := op; operation {
case "+":
return Add(a, b), nil
case "-":
return Substract(a, b), nil
case "*":
return Multiply(a, b), nil
case "/":
result, err := Divide(a, b)
if err != nil {
return 0, err
}
return result, nil
default:
return 0, fmt.Errorf("%s Invalid operator %s", expr, op)
}
} | calculator.go | 0.745954 | 0.588268 | calculator.go | starcoder |
package statsrunner
import (
"fmt"
"sort"
"gonum.org/v1/gonum/stat"
)
// calcRelativeFrequency calculates the relative frequency for a given set of observations obs
func calcRelativeFrequency(obs []string) map[string]float64 {
result := make(map[string]float64)
totalObservations := len(obs)
if totalObservations == 0 {
return result
}
for _, o := range obs {
result[o]++
}
for val, absCount := range result {
result[val] = absCount / float64(totalObservations)
}
return result
}
func calcMeanStdDev(x, weights []float64) (mean, std float64) {
return stat.MeanStdDev(x, weights)
}
func calcMeanStdDevUint16(x, weights []uint16) (mean, std float64) {
xFloat64 := make([]float64, 0, len(x))
for _, val := range x {
xFloat64 = append(xFloat64, float64(val))
}
var weightsFloat64 []float64
if weights != nil {
weightsFloat64 := make([]float64, 0, len(weights))
for _, val := range weights {
weightsFloat64 = append(weightsFloat64, float64(val))
}
}
return calcMeanStdDev(xFloat64, weightsFloat64)
}
func calcMeanStdDevUint32(x, weights []uint32) (mean, std float64) {
xFloat64 := make([]float64, 0, len(x))
for _, val := range x {
xFloat64 = append(xFloat64, float64(val))
}
var weightsFloat64 []float64
if weights != nil {
weightsFloat64 := make([]float64, 0, len(weights))
for _, val := range weights {
weightsFloat64 = append(weightsFloat64, float64(val))
}
}
return calcMeanStdDev(xFloat64, weightsFloat64)
}
func calcMedian(x, weights []float64) (float64, error) {
if len(x) == 0 {
return 0.0, fmt.Errorf("Cannot calculate Median: No elements in slice")
}
sort.Float64s(x)
return stat.Quantile(0.5, stat.Empirical, x, weights), nil
}
func calcMedianUint16(x, weights []uint16) (float64, error) {
if len(x) == 0 {
return 0.0, fmt.Errorf("Cannot calculate Median: No elements in slice")
}
xFloat64 := make([]float64, 0, len(x))
for _, val := range x {
xFloat64 = append(xFloat64, float64(val))
}
var weightsFloat64 []float64
if weights != nil {
weightsFloat64 := make([]float64, 0, len(weights))
for _, val := range weights {
weightsFloat64 = append(weightsFloat64, float64(val))
}
}
return calcMedian(xFloat64, weightsFloat64)
}
func calcMedianUint32(x, weights []uint32) (float64, error) {
if len(x) == 0 {
return 0.0, fmt.Errorf("Cannot calculate Median: No elements in slice")
}
xFloat64 := make([]float64, 0, len(x))
for _, val := range x {
xFloat64 = append(xFloat64, float64(val))
}
var weightsFloat64 []float64
if weights != nil {
weightsFloat64 := make([]float64, 0, len(weights))
for _, val := range weights {
weightsFloat64 = append(weightsFloat64, float64(val))
}
}
return calcMedian(xFloat64, weightsFloat64)
} | statsrunner/statistics.go | 0.861553 | 0.565599 | statistics.go | starcoder |
package epochdate
import (
"database/sql/driver"
"errors"
"time"
)
const (
day = 60 * 60 * 24
nsPerSec = 1e9
maxUnix = (1<<16)*day - 1
)
const (
RFC3339 = "2006-01-02"
AmericanShort = "1-2-06"
AmericanCommon = "01-02-06"
)
var ErrOutOfRange = errors.New("The given date is out of range")
type Date uint16
// Today returns the local date at this instant. If the local date does not
// fall within the representable range, then then zero value will be returned
// (1970-01-01).
func Today() Date {
date, _ := NewFromTime(time.Now())
return date
}
// TodayUTC returns the date at this instant, relative to UTC. If the UTC
// date does not fall within the representable range, then then zero value
// will be returned (1970-01-01).
func TodayUTC() Date {
date, _ := NewFromTime(time.Now().UTC())
return date
}
// Parse follows the same semantics as time.Parse, but ignores time-of-day
// information and returns a Date value.
func Parse(layout, value string) (d Date, err error) {
t, err := time.Parse(layout, value)
if err == nil {
d, err = NewFromTime(t)
}
return
}
// NewFromTime returns a Date equivalent to NewFromDate(t.Date()),
// where t is a time.Time object.
func NewFromTime(t time.Time) (Date, error) {
s := t.Unix()
_, offset := t.Zone()
return NewFromUnix(s + int64(offset))
}
// NewFromDate returns a Date value corresponding to the supplied
// year, month, and day.
func NewFromDate(year int, month time.Month, day int) (Date, error) {
return NewFromUnix(time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Unix())
}
// NewFromUnix creates a Date from a Unix timestamp, relative to any location
// Specifically, if you pass in t.Unix(), where t is a time.Time value with a
// non-UTC zone, you may receive an unexpected Date. Unless this behavior is
// specifically desired (returning the date in one location at the given time
// instant in another location), it's best to use epochdate.NewFromTime(t),
// which normalizes the resulting Date value by adjusting for zone offsets.
func NewFromUnix(seconds int64) (d Date, err error) {
if UnixInRange(seconds) {
d = Date(seconds / day)
} else {
err = ErrOutOfRange
}
return
}
// UnixInRange is true if the provided Unix timestamp is in Date's
// representable range. The timestamp is interpreted according to the semantics
// used by NewFromUnix. You probably won't need to use this, since this will
// only return false if NewFromUnix returns an error of ErrOutOfRange.
func UnixInRange(seconds int64) bool {
return seconds >= 0 && seconds <= maxUnix
}
// Returns an RFC3339/ISO-8601 date string, of the form "2006-01-02".
func (d Date) String() string {
return d.Format(RFC3339)
}
// Unix returns the number of seconds elapsed since Jan 1 1970 UTC, from the
// start of the given date value. In this case, the date is considered to be
// a UTC date, rather than a location-independent date.
func (d Date) Unix() int64 {
return int64(d) * day
}
// UnixNano is semantically identical to the Unix method, except that it
// returns elapsed nanoseconds.
func (d Date) UnixNano() int64 {
return int64(d) * day * nsPerSec
}
// Identical to time.Time.Format, except that any time-of-day format specifiers
// that are used will be equivalent to "00:00:00Z".
func (d Date) Format(layout string) string {
return d.UTC().Format(layout)
}
// Date is semantically identical to the behavior of t.Date(), where t is a
// time.Time value.
func (d Date) Date() (year int, month time.Month, day int) {
return d.UTC().Date()
}
// AddDate is semantically identical to the behavior of t.AddDate(), where t is a
// time.Time value.
func (d Date) AddDate(years int, months int, days int) Date {
r, _ := NewFromUnix(d.UTC().AddDate(years, months, days).Unix())
return r
}
// UTC returns a UTC Time object set to 00:00:00 on the given date.
func (d Date) UTC() time.Time {
return time.Unix(int64(d)*day, 0).UTC()
}
// UTC returns a UTC Time object set to 00:00:00 on the given date.
func (d Date) UTCTime(hour int, min int, sec int, nsec int) time.Time {
t := d.UTC()
return time.Date(t.Year(), t.Month(), t.Day(), hour, min, sec, nsec, time.UTC)
}
// Local returns a local Time object set to 00:00:00 on the given date.
func (d Date) Local() time.Time {
return d.In(time.Local)
}
// In returns a location-relative Time object set to 00:00:00 on the given date.
func (d Date) In(loc *time.Location) time.Time {
t := time.Unix(int64(d)*day, 0).In(loc)
_, offset := t.Zone()
return t.Add(time.Duration(-offset) * time.Second)
}
// Returns whether the dates are equals
func (d Date) Equals(date Date) bool {
return d == date
}
// Returns whether the date is into the time (between 00:00:00 and 23:59:59)
func (d Date) EqualsTime(t time.Time) bool {
dtz := d.In(t.Location())
return t.After(dtz) && t.Before(dtz.Add(time.Hour*24-time.Second))
}
// Returns whether the date d is after date
func (d Date) After(date Date) bool {
return d > date
}
// Returns whether the date d is after t
func (d Date) AfterTime(t time.Time) bool {
// check if equals
if d.EqualsTime(t) {
return false
}
dtz := d.In(t.Location())
return t.Before(dtz)
}
// Returns whether the date d is before date
func (d Date) Before(date Date) bool {
return d < date
}
// Returns whether the date d is before t
func (d Date) BeforeTime(t time.Time) bool {
// check if equals
if d.EqualsTime(t) {
return false
}
dtz := d.In(t.Location())
return t.After(dtz.Add(time.Hour*24 - time.Second))
}
func (d Date) MarshalJSON() ([]byte, error) {
return []byte(d.Format(`"` + RFC3339 + `"`)), nil
}
func (d *Date) UnmarshalJSON(data []byte) (err error) {
*d, err = Parse(`"`+RFC3339+`"`, string(data))
return
}
// Scan implements the Scanner interface.
func (d *Date) Scan(value interface{}) error {
// cast to time
time, ok := value.(time.Time)
if !ok {
return errors.New("Only time.Time is supported")
}
// convert to date
nd, err := NewFromTime(time)
if err != nil {
return err
}
*d = nd
return nil
}
// Value implements the driver Valuer interface.
func (d Date) Value() (driver.Value, error) {
return d.UTC(), nil
}
// Calculate WeekDay
func (d Date) Weekday() time.Weekday {
// Absolute date is 1970-01-01, Thursday
return time.Weekday((uint16(d) + uint16(time.Thursday)) % 7)
} | epochdate.go | 0.770206 | 0.462776 | epochdate.go | starcoder |
package random
import (
"math"
"math/rand"
"github.com/DexterLB/traytor/maths"
)
// Random is a random generator with convenient methods
type Random struct {
generator *rand.Rand
}
// New returns a new Random object initialized with the given seed
func New(seed int64) *Random {
r := &Random{}
source := rand.NewSource(seed)
r.generator = rand.New(source)
return r
}
// Vec3Sphere returns a random unit vector
func (r *Random) Vec3Sphere() *maths.Vec3 {
u := r.FloatAB(-1, 1)
theta := r.Float02Pi()
return maths.NewVec3(
math.Sqrt(1-u*u)*math.Cos(theta),
math.Sqrt(1-u*u)*math.Sin(theta),
u,
)
}
// Vec3Hemi returns a random unit vector in the hemisphere defined by normal
func (r *Random) Vec3Hemi(normal *maths.Vec3) *maths.Vec3 {
vec := r.Vec3Sphere()
if maths.DotProduct(vec, normal) < 0 {
vec.Negate()
}
return vec
}
// Vec3HemiCos returns a random unit vector chosen on a
// cosine-weighed hemisphere defined by normal
func (r *Random) Vec3HemiCos(normal *maths.Vec3) *maths.Vec3 {
ox := maths.CrossProduct(maths.NewVec3(42, 56, -15), normal)
for math.Abs(ox.Length()) < maths.Epsilon {
ox = maths.CrossProduct(r.Vec3Sphere(), normal)
}
oy := maths.CrossProduct(ox, normal)
ox.Normalise()
oy.Normalise()
u := r.Float01()
radius := math.Sqrt(u)
theta := r.Float02Pi()
vec := normal.Scaled(math.Sqrt(math.Max(0, 1-u)))
vec.Add(ox.Scaled(radius * math.Cos(theta)))
vec.Add(oy.Scaled(radius * math.Sin(theta)))
return vec
}
// Float01 returns a random float between 0 and 1
func (r *Random) Float01() float64 {
return r.generator.Float64()
}
// Float0Pi returns a random float between 0 and Pi
func (r *Random) Float0Pi() float64 {
return r.generator.Float64() * math.Pi
}
// Float02Pi returns a random float between 0 and 2*Pi
func (r *Random) Float02Pi() float64 {
return r.generator.Float64() * 2 * math.Pi
}
// Float0A returns a random float between 0 and a
func (r *Random) Float0A(a float64) float64 {
return r.Float01() * a
}
// FloatAB returns a random float between 0 and a
func (r *Random) FloatAB(a, b float64) float64 {
return r.Float0A(b-a) + a
}
// Int640N returns a random int64 within [0..n]
func (r *Random) Int640N(n int64) int64 {
return r.generator.Int63n(n + 1)
}
// Int320N returns a random int32 within [0..n]
func (r *Random) Int320N(n int32) int32 {
return r.generator.Int31n(n + 1)
}
// Int0N returns a random int within [0..n]
func (r *Random) Int0N(n int) int {
return r.generator.Intn(n + 1)
}
// Int64AB returns a random int64 within [a..b]
func (r *Random) Int64AB(a, b int64) int64 {
return r.Int640N(b-a) + a
}
// Int32AB returns a random int32 within [a..b]
func (r *Random) Int32AB(a, b int32) int32 {
return r.Int320N(b-a) + a
}
// IntAB returns a random int within [a..b]
func (r *Random) IntAB(a, b int) int {
return r.Int0N(b-a) + a
}
// Bool returns true or false at random
func (r *Random) Bool() bool {
return (r.Int0N(1) == 0)
}
// Sign returns -1 or 1 at random
func (r *Random) Sign() int {
if r.Bool() {
return 1
}
return -1
}
// Sign32 returns -1 or 1 at random
func (r *Random) Sign32() int32 {
if r.Bool() {
return 1
}
return -1
}
// Sign64 returns -1 or 1 at random
func (r *Random) Sign64() int64 {
if r.Bool() {
return 1
}
return -1
}
// NewSeed returns a random seed
func (r *Random) NewSeed() int64 {
return r.generator.Int63()
} | random/random.go | 0.840095 | 0.483648 | random.go | starcoder |
package stcdetail
import (
"github.com/xdrpp/goxdr/xdr"
"reflect"
"strings"
)
type trivSprintf struct{}
func (trivSprintf) Sprintf(f string, args ...interface{}) string {
return ""
}
// Marshal an XDR type to the raw binary bytes defined in RFC4506.
// The return value is binary, not UTF-8. For most marshaling
// purposes you might prefer a []byte (so see XdrOut), but this
// function is handy if you want to convert the contents of an XDR
// structure into a map key or compare two XDR structures for
// equality.
func XdrToBin(t xdr.XdrType) string {
out := strings.Builder{}
t.XdrMarshal(&xdr.XdrOut{&out}, "")
return out.String()
}
// Unmarshal an XDR type from the raw binary bytes defined in RFC4506.
func XdrFromBin(t xdr.XdrType, input string) (err error) {
defer func() {
if i := recover(); i != nil {
if xe, ok := i.(xdr.XdrError); ok {
err = xe
return
}
panic(i)
}
}()
in := strings.NewReader(input)
t.XdrMarshal(&xdr.XdrIn{in}, "")
return
}
type forEachXdr struct {
fn func(xdr.XdrType) bool
trivSprintf
}
func (fex forEachXdr) Marshal(_ string, val xdr.XdrType) {
if !fex.fn(val) {
if xa, ok := val.(xdr.XdrAggregate); ok {
xa.XdrRecurse(fex, "")
}
}
}
// Calls fn, recursively, on every value inside an XdrType. Prunes
// the recursion if fn returns true.
func ForEachXdr(t xdr.XdrType, fn func(xdr.XdrType) bool) {
t.XdrMarshal(forEachXdr{fn: fn}, "")
}
// Calls fn on each instance of a type encountered while traversing a
// data structure. fn should be of type func(*T) or func(*T)bool
// where T is an XDR structure. By default, the traversal does not
// recurse into T. In the case that T is part of a linked list (or
// otherwise contains a pointer to T internally), if the function
// returns false then fields within T will continue to be examined
// recursively.
func ForEachXdrType(a xdr.XdrType, fn interface{}) {
fnv := reflect.ValueOf(fn)
fnt := fnv.Type()
if fnt.Kind() != reflect.Func || fnt.NumIn() != 1 || fnt.NumOut() > 1 ||
(fnt.NumOut() == 1 && fnt.Out(0).Kind() != reflect.Bool) {
panic("ForEachXdrType: invalid function")
}
argt := fnt.In(0)
argv := reflect.New(argt).Elem()
ForEachXdr(a, func(t xdr.XdrType) bool {
p := t.XdrPointer()
if p != nil && reflect.TypeOf(p).AssignableTo(argt) {
argv.Set(reflect.ValueOf(p))
res := fnv.Call([]reflect.Value{argv})
if len(res) == 0 || len(res) == 1 && res[0].Bool() {
return true
}
}
return false
})
}
type xdrExtract struct {
out reflect.Value
done bool
trivSprintf
}
func (x *xdrExtract) Marshal(_ string, t xdr.XdrType) {
if x.done {
return
}
p := t.XdrPointer()
if p != nil && reflect.TypeOf(p).AssignableTo(x.out.Type()) {
x.out.Set(reflect.ValueOf(p))
x.done = true
return
} else if a, ok := t.(xdr.XdrAggregate); ok {
a.XdrRecurse(x, "")
}
}
// If out is of type **T, then *out is set to point to the first
// instance of T found when traversing t.
func XdrExtract(t xdr.XdrType, out interface{}) bool {
x := xdrExtract{out: reflect.ValueOf(out).Elem()}
t.XdrMarshal(&x, "")
return x.done
} | stcdetail/xdrmisc.go | 0.59749 | 0.420957 | xdrmisc.go | starcoder |
package grid
import (
"math"
"sort"
"strconv"
"strings"
)
func stringToDir(token string) direction {
switch strings.ToLower(token) {
case "u":
return up
case "r":
return right
case "d":
return down
case "l":
return left
default:
return up
}
}
func parse(arg string) (movementSlice, error) {
split := strings.Split(arg, ",")
mov := make(movementSlice, 0, len(split))
for _, token := range split {
dir := stringToDir(string(token[0]))
dist, err := strconv.Atoi(token[1:])
if err != nil {
return movementSlice{}, err
}
mov = append(mov, movement{
direction: dir,
distance: dist,
})
}
return mov, nil
}
func New(data string) (movementSlice, error) {
return parse(data)
}
func (ms movementSlice) toCornerPoints() path {
grid := make([]point, 1) // one element here is on purpose
for _, m := range ms {
lastElem := grid[len(grid)-1]
switch m.direction {
case up:
{
newGridElem := point{
x: lastElem.x,
y: lastElem.y + m.distance,
}
grid = append(grid, newGridElem)
}
case down:
{
newGridElem := point{
x: lastElem.x,
y: lastElem.y - m.distance,
}
grid = append(grid, newGridElem)
}
case right:
{
newGridElem := point{
x: lastElem.x + m.distance,
y: lastElem.y,
}
grid = append(grid, newGridElem)
}
case left:
{
newGridElem := point{
x: lastElem.x - m.distance,
y: lastElem.y,
}
grid = append(grid, newGridElem)
}
}
}
return grid
}
func getPathBetweenPoints(p1, p2 point) (path, error) {
dx := p2.x - p1.x
dy := p2.y - p1.y
if dx == 0 && dy == 0 {
return path{}, twoSamePointsNextToEachOtherErr
}
var out path
if dx != 0 {
out = make(path, 0, abs(dx))
} else {
out = make(path, 0, abs(dy))
}
if dx > 0 {
for m := 0; m < dx-1; m++ {
out = append(out, point{y: p1.y, x: p1.x + m + 1})
}
} else {
for m := 0; m > dx+1; m-- {
out = append(out, point{y: p1.y, x: p1.x + m - 1})
}
}
if dy > 0 {
for m := 0; m < dy-1; m++ {
out = append(out, point{y: p1.y + m + 1, x: p1.x})
}
} else {
for m := 0; m > dy+1; m-- {
out = append(out, point{y: p1.y + m - 1, x: p1.x})
}
}
return out, nil
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func (ms movementSlice) GetPath() (path, error) {
cp := ms.toCornerPoints()
cornerPointLength := len(cp)
out := make(path, 0, cornerPointLength*2)
for i := 0; i < cornerPointLength-1; i++ {
out = append(out, cp[i])
p, err := getPathBetweenPoints(cp[i], cp[i+1])
if err != nil {
return path{}, err
}
out = append(out, p...)
}
out = append(out, cp[cornerPointLength-1])
return out, nil
}
func (p path) FindIntersections(next path) []point {
out := make([]point, 0, 0)
for _, pt := range p {
for _, nextPt := range next {
if pt.isCentral() && nextPt.isCentral() {
continue
}
if pt.x == nextPt.x && pt.y == nextPt.y {
out = append(out, pt)
}
}
}
return out
}
func (p point) isCentral() bool {
return p.x == 0 && p.y == 0
}
func FindManhattanDistanceOfNearestPoint(pts []point) int {
distances := make([]int, 0, len(pts))
for _, pt := range pts {
distances = append(distances, abs(pt.x)+abs(pt.y))
}
sort.Ints(distances)
return distances[0]
}
// TODO test me bitch
// later: nah, tested in production
func (p path) findDistanceToIntersection(inter point) int {
dist := 0
for i := 0; i < len(p)-1; i++ {
firstPoint := p[i]
secondPoint := p[i+1]
if isBetween := inter.isBetweenTwoPoints(firstPoint, secondPoint); isBetween {
dist += firstPoint.manhattanDistance(inter)
break
}
dist += firstPoint.manhattanDistance(secondPoint)
}
return dist
}
func (p point) manhattanDistance(next point) int {
return abs(p.x-next.x) + abs(p.y-next.y)
}
func (p point) isBetweenTwoPoints(p1, p2 point) bool {
// there could be test for p1==p2==p or something like this, but come on...
if p1.x < p.x && p.x < p2.x && p.y == p1.y && p.y == p2.y {
// p is between p1 and p2 on X axis
return true
} else if p.x < p1.x && p.x > p2.x && p.y == p1.y && p.y == p2.y {
// p is between p1 and p2 on X axis when p1 is on left side of p2
return true
} else if p1.y < p.y && p.y < p2.y && p.x == p1.x && p.x == p2.x {
// p is between p1 and p2 on Y axis
return true
} else if p.y < p1.y && p.y > p2.y && p.x == p1.x && p.x == p2.x {
// p is between p1 and p2 on X axis when p1 is on top of p2
return true
}
return false
}
func (p path) FindDistanceToIntersections(inter []point) map[point]int {
out := make(map[point]int, len(inter))
for i, point := range p {
if i == 0 {
continue
}
for _, intersectionPoint := range inter {
if point.x == intersectionPoint.x && point.y == intersectionPoint.y {
out[point] = i
}
}
}
return out
}
func FindDistanceToClosesIntersection(path1, path2 map[point]int) int {
min := math.MaxInt64
for inter1, dist1 := range path1 {
for inter2, dist2 := range path2 {
if inter1.x == inter2.x && inter1.y == inter2.y && dist1+dist2 < min {
min = dist1 + dist2
}
}
}
return min
} | pkg/grid/grid.go | 0.546496 | 0.490175 | grid.go | starcoder |
package main
import (
"fmt"
"math"
"math/rand"
)
// Vec3 is a vector of three items X, Y, Z.
type Vec3 struct {
X float64
Y float64
Z float64
}
// NewRandomVec3 generate a random Vec3.
func NewRandomVec3() *Vec3 {
return &Vec3{
X: rand.Float64(),
Y: rand.Float64(),
Z: rand.Float64(),
}
}
// NewRandomInRangeVec3 generate a random Vec3 in range of min and max.
func NewRandomInRangeVec3(min, max float64) *Vec3 {
return &Vec3{
X: RandFloat(min, max),
Y: RandFloat(min, max),
Z: RandFloat(min, max),
}
}
// NewRandomInUnitSphereVec3 generate a random vector in unit sphere.
func NewRandomInUnitSphereVec3() *Vec3 {
for true {
p := NewRandomInRangeVec3(-1, 1)
if p.SquaredLen() >= 1 {
continue
}
return p
}
return nil
}
// NewRandomUnitVec3 generates a random unit vector.
func NewRandomUnitVec3() *Vec3 {
return NewRandomInUnitSphereVec3().UnitVector()
}
// NewRandomInHemisphereVec3 generates a random vector in hemisphere.
func NewRandomInHemisphereVec3(normal *Vec3) *Vec3 {
inUnitSphere := NewRandomInUnitSphereVec3()
// In the same hemisphere as the normal.
if inUnitSphere.Dot(normal) > 0.0 {
return inUnitSphere
}
return inUnitSphere.Neg()
}
// NewRandomInUnitDiskVec3 generate random vector in a unit disk.
func NewRandomInUnitDiskVec3() *Vec3 {
for {
p := &Vec3{RandFloat(-1, 1), RandFloat(-1, 1), 0}
if p.SquaredLen() >= 1 {
continue
}
return p
}
return nil
}
// ToPoint3 converts p to a Vec3.
func (v *Vec3) ToPoint3() *Point3 {
return &Point3{
X: v.X,
Y: v.Y,
Z: v.Z,
}
}
// ToColor converts v to a Color.
func (v *Vec3) ToColor() *Color {
return &Color{
R: v.X,
G: v.Y,
B: v.Z,
}
}
// Neg negatives items of vector.
func (v *Vec3) Neg() *Vec3 {
return &Vec3{
X: -v.X,
Y: -v.Y,
Z: -v.Z,
}
}
// Add adds v2 items to vector items.
func (v *Vec3) Add(v2 *Vec3) *Vec3 {
return &Vec3{
X: v.X + v2.X,
Y: v.Y + v2.Y,
Z: v.Z + v2.Z,
}
}
// Sub subtracts v2 items from vector items.
func (v *Vec3) Sub(v2 *Vec3) *Vec3 {
return &Vec3{
X: v.X - v2.X,
Y: v.Y - v2.Y,
Z: v.Z - v2.Z,
}
}
// Mul multiplies v and v2 items.
func (v *Vec3) Mul(v2 *Vec3) *Vec3 {
return &Vec3{
X: v.X * v2.X,
Y: v.Y * v2.Y,
Z: v.Z * v2.Z,
}
}
// Mult multiplies vector items by t.
func (v *Vec3) Mult(t float64) *Vec3 {
return &Vec3{
X: v.X * t,
Y: v.Y * t,
Z: v.Z * t,
}
}
// Div divides vector items by t.
func (v *Vec3) Div(t float64) *Vec3 {
return v.Mult(1 / t)
}
// SquaredLen returns the sum of all vector squared items.
func (v *Vec3) SquaredLen() float64 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
// Len returns the sum of all vector items.
func (v *Vec3) Len() float64 {
return math.Sqrt(v.SquaredLen())
}
// Dot calculates u and v dot product.
func (v *Vec3) Dot(v2 *Vec3) float64 {
return v.X*v2.X + v.Y*v2.Y + v.Z*v2.Z
}
// Cross calculates u and v cross product.
func (v *Vec3) Cross(v2 *Vec3) *Vec3 {
return &Vec3{
X: v.Y*v2.Z - v.Z*v2.Y,
Y: v.Z*v2.X - v.X*v2.Z,
Z: v.X*v2.Y - v.Y*v2.X,
}
}
// UnitVector returns unit vector of v.
func (v *Vec3) UnitVector() *Vec3 {
return v.Div(v.Len())
}
// NearZero returns true if all items of vector are near zero.
func (v *Vec3) NearZero() bool {
const s = 1e-8
return (math.Abs(v.X) < s) && (math.Abs(v.Y) < s) && (math.Abs(v.Z) < s)
}
// Reflect calculates the reflection vector based on n vector.
func (v *Vec3) Reflect(n *Vec3) *Vec3 {
return v.Sub(n.Mult(2 * v.Dot(n)))
}
// Refract calculates the refraction vector based on n vector and etai.
func (v *Vec3) Refract(n *Vec3, etaiOverEtat float64) *Vec3 {
cosTheta := math.Min(v.Neg().Dot(n), 1.0)
rOutPerp := v.Add(n.Mult(cosTheta)).Mult(etaiOverEtat)
rOutParallel := n.Mult(-math.Sqrt(math.Abs(1.0 - rOutPerp.SquaredLen())))
return rOutPerp.Add(rOutParallel)
}
func (v Vec3) String() string {
return fmt.Sprintf("%f %f %f", v.X, v.Y, v.Z)
} | vector.go | 0.837487 | 0.532486 | vector.go | starcoder |
package expectation
import (
"fmt"
"reflect"
"github.com/goldenspider/gspec/errors"
)
// Checker is the type of function that checks between actual and expected value
// then returns an Error if the expectation fails.
type Checker func(actual, expected interface{}, name string, skip int) error
// Equal checks for the equality of contents and is tolerant of type differences.
func Equal(actual, expected interface{}, name string, skip int) error {
if reflect.DeepEqual(actual, expected) {
return nil
}
if fmt.Sprint(actual) == fmt.Sprint(expected) {
return nil
}
return errors.Compare(actual, expected, "to equal", name, skip+1)
}
// NotEqual is the reverse of Equal.
func NotEqual(actual, expected interface{}, name string, skip int) error {
if Equal(actual, expected, name, skip+1) != nil {
return nil
}
return errors.Compare(actual, expected, "not to equal", name, skip+1)
}
// Panic checks if a function panics.
func Panic(actual, expected interface{}, name string, skip int) (ret error) {
f, ok := actual.(func())
if !ok {
ret = errors.Expect("the argument of Panic has to be a function of type func().", skip)
}
defer func() {
if err := recover(); err == nil {
ret = errors.Expect("panicking", skip+1)
}
}()
f()
return nil
}
// IsType checks if the actual value is of the same type as the expected value.
func IsType(actual, expected interface{}, name string, skip int) error {
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
return errors.Compare(actual, expected, "to have type of", name, skip+1)
}
return nil
}
// Equal is the fluent method for checker Equal.
func (a *Actual) Equal(expected interface{}) {
a.to(Equal, expected, 1)
}
// NotEqual is the fluent method for checker NotEqual.
func (a *Actual) NotEqual(expected interface{}) {
a.to(NotEqual, expected, 1)
}
// Panic is the fluent method for checker Panic.
func (a *Actual) Panic() {
a.to(Panic, nil, 1)
}
// IsType is the fluent method for checker IsType.
func (a *Actual) IsType(expected interface{}) {
a.to(IsType, expected, 1)
} | expectation/checker.go | 0.812644 | 0.607896 | checker.go | starcoder |
package jsonassert
import (
"fmt"
"reflect"
)
type JSONComparator interface {
CompareJSONObject(expected, actual JSONNode) *JSONCompareResult
CompareJSONArray(expected, actual JSONNode) *JSONCompareResult
CompareJSONObjectWithPrefix(prefix string, expected, actual JSONNode, result *JSONCompareResult)
CompareJSONArrayWithPrefix(prefix string, expected, actual JSONNode, result *JSONCompareResult)
CompareValues(prefix string, expected interface{}, actual interface{}, result *JSONCompareResult)
}
type DefaultComparator struct {
compareMode JSONCompareMode
}
func (comp *DefaultComparator) CompareJSONObjectWithPrefix(prefix string, expected, actual JSONNode, result *JSONCompareResult) {
comp.CheckJsonObjectKeysExpectedInActual(prefix, expected, actual, result)
if comp.compareMode == NON_EXTENSIBLE || comp.compareMode == STRICT {
comp.CheckJsonObjectKeysActualInExpected(prefix, expected, actual, result)
}
}
func (comp *DefaultComparator) CompareJSONArrayWithPrefix(prefix string, expected, actual JSONNode, result *JSONCompareResult) {
if expected.GetSize() != actual.GetSize() {
result.FailWithMessage(fmt.Sprintf("%s[]: Expected %d vallues but got %d", prefix, expected.GetSize(), actual.GetSize()))
} else if expected.GetSize() == 0 {
return
}
if comp.compareMode == STRICT || comp.compareMode == STRICT_ORDER {
comp.CompareJSONArrayWithStrictOrder(prefix, expected, actual, result)
} else {
comp.RecursivelyCompareJSONArray(prefix, expected, actual, result)
}
}
func (comp *DefaultComparator) CompareValues(prefix string, expected, actual interface{}, result *JSONCompareResult) {
if actual != nil && expected == nil || actual == nil && expected != nil {
result.Fail(prefix, expected, actual)
} else if reflect.TypeOf(actual).Name() != reflect.TypeOf(expected).Name() ||
reflect.TypeOf(actual).Kind() != reflect.TypeOf(expected).Kind() {
result.Fail(prefix, expected, actual)
} else {
if expectedElementSafe, actualElementSafe, ok := safeGetJSONNode(expected, actual); ok {
comp.CompareValuesJSONNode(prefix, expectedElementSafe, actualElementSafe, result)
} else {
expectedKind := reflect.TypeOf(expected).Kind()
actualKind := reflect.TypeOf(actual).Kind()
if expectedKind == reflect.Slice && actualKind == reflect.Slice {
expectedElementSafe := expected.([]interface{})
actualElementSafe := actual.([]interface{})
newExpected := NewJSONNode()
newExpected.SetArray(expectedElementSafe)
newActual := NewJSONNode()
newActual.SetArray(actualElementSafe)
if comp.CompareJSONArray(newExpected, newActual).Failed() {
result.Fail(prefix, expected, actual)
}
} else if expectedKind == reflect.Map && actualKind == reflect.Map {
expectedElementSafe := expected.(map[string]interface{})
actualElementSafe := actual.(map[string]interface{})
newExpected := NewJSONNodeFromMap(expectedElementSafe)
newActual := NewJSONNodeFromMap(actualElementSafe)
if comp.CompareJSONObject(newExpected, newActual).Failed() {
result.Fail(prefix, expected, actual)
}
} else if expected != actual {
result.Fail(prefix, expected, actual)
}
}
}
}
func (comp *DefaultComparator) CompareValuesJSONNode(prefix string, expected, actual JSONNode, result *JSONCompareResult) {
if expected.IsMap() {
if actual.IsMap() {
comp.CompareJSONObjectWithPrefix(prefix, expected, actual, result)
} else {
result.Fail(prefix, expected, actual)
}
} else if expected.IsArray() {
if actual.IsArray() {
comp.CompareJSONArrayWithPrefix(prefix, expected, actual, result)
} else {
result.Fail(prefix, expected, actual)
}
} else {
if expected.GetData() != actual.GetData() {
result.Fail(prefix, expected, actual)
}
}
}
func (comp *DefaultComparator) CompareJSONObject(expected, actual JSONNode) *JSONCompareResult {
result := NewJSONCompareResult()
comp.CompareJSONObjectWithPrefix("", expected, actual, result)
return result
}
func (comp *DefaultComparator) CompareJSONArray(expected, actual JSONNode) *JSONCompareResult {
result := NewJSONCompareResult()
comp.CompareJSONArrayWithPrefix("", expected, actual, result)
return result
}
func (comp *DefaultComparator) CompareJSONArrayWithStrictOrder(key string, expected, actual JSONNode, result *JSONCompareResult) {
for i, expectedValue := range expected.GetArray() {
actualValues := actual.GetArray()
var actualValue interface{}
if i < len(actualValues) {
actualValue = actualValues[i]
}
comp.CompareValues(fmt.Sprintf("%s[%d]", key, i), expectedValue, actualValue, result)
}
}
func (comp *DefaultComparator) RecursivelyCompareJSONArray(key string, expected, actual JSONNode, result *JSONCompareResult) {
matched := []int{}
for i, expectedElement := range expected.GetArray() {
matchFound := false
for j, actualElement := range actual.GetArray() {
if contains(matched, j) ||
reflect.TypeOf(actualElement).Name() != reflect.TypeOf(expectedElement).Name() ||
reflect.TypeOf(actualElement).Kind() != reflect.TypeOf(expectedElement).Kind() {
continue
}
if expectedElementSafe, actualElementSafe, ok := safeGetJSONNode(expectedElement, actualElement); ok {
if expectedElementSafe.IsMap() && actualElementSafe.IsMap() {
if comp.CompareJSONObject(expectedElementSafe, actualElementSafe).Passed() {
matched = append(matched, j)
matchFound = true
break
}
} else if expectedElementSafe.IsArray() && actualElementSafe.IsArray() {
if comp.CompareJSONArray(expectedElementSafe, actualElementSafe).Passed() {
matched = append(matched, j)
matchFound = true
break
}
}
} else {
expectedKind := reflect.TypeOf(expectedElement).Kind()
actualKind := reflect.TypeOf(actualElement).Kind()
if expectedKind == reflect.Slice && actualKind == reflect.Slice {
expectedElementSafe := expectedElement.([]interface{})
actualElementSafe := actualElement.([]interface{})
newExpected := NewJSONNodeFromArray(expectedElementSafe)
newActual := NewJSONNodeFromArray(actualElementSafe)
if comp.CompareJSONArray(newExpected, newActual).Passed() {
matched = append(matched, j)
matchFound = true
break
}
} else if expectedKind == reflect.Map && actualKind == reflect.Map {
expectedElementSafe := expectedElement.(map[string]interface{})
actualElementSafe := actualElement.(map[string]interface{})
newExpected := NewJSONNodeFromMap(expectedElementSafe)
newActual := NewJSONNodeFromMap(actualElementSafe)
if comp.CompareJSONObject(newExpected, newActual).Passed() {
matched = append(matched, j)
matchFound = true
break
}
} else if expectedElement == actualElement {
matched = append(matched, j)
matchFound = true
break
}
}
}
if !matchFound {
result.FailWithMessage(fmt.Sprintf("%s[%d] Could not find match for element %s", key, i, expectedElement))
}
}
}
func (comp *DefaultComparator) CheckJsonObjectKeysActualInExpected(prefix string, expected, actual JSONNode, result *JSONCompareResult) {
for key, _ := range actual.GetMap() {
if _, ok := expected.CheckGet(key); !ok {
result.Unexpected(prefix, key)
}
}
}
func (comp *DefaultComparator) CheckJsonObjectKeysExpectedInActual(prefix string, expected, actual JSONNode, result *JSONCompareResult) {
for key, _ := range expected.GetMap() {
expectedValue, _ := expected.CheckGet(key)
if actualValue, ok := actual.CheckGet(key); ok {
comp.CompareValues(qualify(prefix, key), expectedValue, actualValue, result)
} else {
result.Missing(prefix, key)
}
}
}
func contains(s []int, e int) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func qualify(prefix string, key string) string {
if prefix == "" {
return key
} else {
return fmt.Sprintf("%s.%s", prefix, key)
}
}
func safeGetJSONNode(expected, actual interface{}) (JSONNode, JSONNode, bool) {
expectedSafe, ok1 := expected.(JSONNode)
actualSafe, ok2 := actual.(JSONNode)
if ok1 && ok2 {
return expectedSafe, actualSafe, true
} else {
return nil, nil, false
}
} | comparators.go | 0.610453 | 0.534248 | comparators.go | starcoder |
Goom Voice
https://www.quinapalus.com/goom.html
*/
//-----------------------------------------------------------------------------
package goom
import (
"github.com/deadsy/babi/core"
"github.com/deadsy/babi/module/env"
"github.com/deadsy/babi/module/filter"
"github.com/deadsy/babi/module/osc"
"github.com/deadsy/babi/utils/log"
)
//-----------------------------------------------------------------------------
var voiceGoomInfo = core.ModuleInfo{
Name: "voiceGoom",
In: []core.PortInfo{
// overall control
{"note", "note value", core.PortTypeFloat, voiceGoomNote},
{"gate", "voice gate, attack(>0) or release(=0)", core.PortTypeFloat, voiceGoomGate},
{"midi", "midi input", core.PortTypeMIDI, voiceGoomMidiIn},
/*
{"omode", "oscillator combine mode (0,1,2)", core.PortTypeInt, goomPortOscillatorMode},
{"fmode", "frequency mode (0,1,2)", core.PortTypeInt, goomPortFrequencyMode},
// amplitude envelope
{"amp_attack", "amplitude attack time (secs)", core.PortTypeFloat, goomPortAmplitudeAttack},
{"amp_decay", "amplitude decay time (secs)", core.PortTypeFloat, goomPortAmplitudeDecay},
{"amp_sustain", "amplitude sustain level 0..1", core.PortTypeFloat, goomPortAmplitudeSustain},
{"amp_release", "amplitude release time (secs)", core.PortTypeFloat, goomPortAmplitudeRelease},
// wave oscillator
{"wav_duty", "wave duty cycle (0..1)", core.PortTypeFloat, goomPortWaveDuty},
{"wav_slope", "wave slope (0..1)", core.PortTypeFloat, goomPortWaveSlope},
// modulation envelope
{"mod_attack", "modulation attack time (secs)", core.PortTypeFloat, goomPortModulationAttack},
{"mod_decay", "modulation decay time (secs)", core.PortTypeFloat, goomPortModulationDecay},
// modulation oscillator
{"mod_duty", "modulation duty cycle (0..1)", core.PortTypeFloat, goomPortModulationDuty},
{"mod_slope", "modulation slope (0..1)", core.PortTypeFloat, goomPortModulationSlope},
// modulation control
{"mod_tuning", "modulation tuning (0..1)", core.PortTypeFloat, goomPortModulationTuning},
{"mod_level", "modulation level (0..1)", core.PortTypeFloat, goomPortModulationLevel},
// filter envelope
{"flt_attack", "filter attack time (secs)", core.PortTypeFloat, goomPortFilterAttack},
{"flt_decay", "filter decay time (secs)", core.PortTypeFloat, goomPortFilterDecay},
{"flt_sustain", "filter sustain level 0..1", core.PortTypeFloat, goomPortFilterSustain},
{"flt_release", "filter release time (secs)", core.PortTypeFloat, goomPortFilterRelease},
// filter control
{"flt_sensitivity", "low pass filter sensitivity", core.PortTypeFloat, goomPortFilterSensitivity},
{"flt_cutoff", "low pass filter cutoff frequency (Hz)", core.PortTypeFloat, goomPortFilterCutoff},
{"flt_resonance", "low pass filter resonance (0..1)", core.PortTypeFloat, goomPortFilterResonance},
*/
},
Out: []core.PortInfo{
{"out", "output", core.PortTypeAudio, nil},
},
}
// Info returns the module information.
func (m *voiceGoom) Info() *core.ModuleInfo {
return &m.info
}
//-----------------------------------------------------------------------------
type oModeType uint
const (
oMode0 oModeType = iota
oMode1
oMode2
)
type fModeType uint
const (
fModeNote fModeType = iota
fModeHigh
fModeLow
)
type voiceGoom struct {
info core.ModuleInfo // module info
wavOsc core.Module // wave oscillator
ampEnv core.Module // amplitude envelope generator
lpf core.Module // low pass filter
fltEnv core.Module // filter envelope generator
oMode oModeType // oscillator mode
fMode fModeType // frequency mode
modEnv core.Module // modulation envelope generator
modOsc core.Module // modulation oscillator
modTuning float32 // modulation tuning
modLevel float32 // modulation level
fltSensitivity float32 // filter sensitivity
fltCutoff float32 // filter cutoff
velocity float32 // note velocity
}
// NewVoice returns a Goom voice.
func NewVoice(s *core.Synth) core.Module {
log.Info.Printf("")
ampEnv := env.NewADSR(s)
wavOsc := osc.NewGoom(s)
modEnv := env.NewADSR(s)
modOsc := osc.NewGoom(s)
fltEnv := env.NewADSR(s)
lpf := filter.NewSVFilterTrapezoidal(s)
m := &voiceGoom{
info: voiceGoomInfo,
ampEnv: ampEnv,
wavOsc: wavOsc,
modEnv: modEnv,
modOsc: modOsc,
fltEnv: fltEnv,
lpf: lpf,
}
return s.Register(m)
}
// Child returns the child modules of this module.
func (m *voiceGoom) Child() []core.Module {
return []core.Module{m.ampEnv, m.wavOsc, m.modEnv, m.modOsc, m.fltEnv, m.lpf}
}
// Stop performs any cleanup of a module.
func (m *voiceGoom) Stop() {
}
//-----------------------------------------------------------------------------
// Port Events
func voiceGoomNote(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
note := e.GetEventFloat().Val
// set the wave oscillator frequency
core.EventInFloat(m.wavOsc, "frequency", core.MIDIToFrequency(note))
/*
// set the modulation oscillator frequency
switch m.fMode {
case fModeLow:
note = 10
case fModeHigh:
note = 100
}
note += m.modTuning * 2 // +/- 2 semitones
core.EventInFloat(m.modOsc, "frequency", core.MIDIToFrequency(note))
*/
}
func voiceGoomGate(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
gate := e.GetEventFloat().Val
log.Info.Printf("gate %f", gate)
if gate > 0 {
// gate all of the envelopes
core.EventInFloat(m.ampEnv, "gate", gate)
core.EventInFloat(m.modEnv, "gate", gate)
core.EventInFloat(m.fltEnv, "gate", gate)
// record the note velocity
m.velocity = gate
} else {
// release all of the envelopes
core.EventInFloat(m.ampEnv, "gate", 0)
core.EventInFloat(m.modEnv, "gate", 0)
core.EventInFloat(m.fltEnv, "gate", 0)
}
}
func voiceGoomMidiIn(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
me := e.GetEventMIDI()
if me != nil {
if me.GetType() == core.EventMIDIControlChange {
val := me.GetCcInt()
fval := me.GetCcFloat()
switch me.GetCcNum() {
case midiWaveDutyCC: // wave oscillator duty cycle
core.EventInFloat(m.wavOsc, "duty", fval)
case midiWaveSlopeCC: // wave oscillator slope
core.EventInFloat(m.wavOsc, "slope", fval)
case midiAmpAttackCC: // amplitude attack (secs)
core.EventInFloat(m.ampEnv, "attack", core.MapLin(fval, 0.01, 0.4))
case midiAmpDecayCC: // amplitude decay (secs)
core.EventInFloat(m.ampEnv, "decay", core.MapLin(fval, 0.01, 2.0))
case midiAmpSustainCC: // amplitude sustain (0..1)
core.EventInFloat(m.ampEnv, "sustain", fval)
case midiAmpReleaseCC: // amplitude release (secs)
core.EventInFloat(m.ampEnv, "release", core.MapLin(fval, 0.02, 2.0))
case midiFltSensitivityCC: // filter sensitivity
// TODO
case midiFltCutoffCC: // filter cutoff
core.EventInFloat(m.lpf, "cutoff", core.MapLin(fval, 0, 0.5*core.AudioSampleFrequency))
case midiFltResonanceCC: // filter resonance
core.EventInFloat(m.lpf, "resonance", fval)
case midiFltAttackCC: // filter attack (secs)
core.EventInFloat(m.fltEnv, "attack", core.MapLin(fval, 0.01, 0.4))
case midiFltDecayCC: // filter decay (secs)
core.EventInFloat(m.fltEnv, "decay", core.MapLin(fval, 0.01, 2.0))
case midiFltSustainCC: // filter sustain (0..1)
core.EventInFloat(m.fltEnv, "sustain", fval)
case midiFltReleaseCC: // filter release (secs)
core.EventInFloat(m.fltEnv, "release", core.MapLin(fval, 0.02, 2.0))
case midiOscillatorModeCC: // oscillator combine mode (0,1,2)
log.Info.Printf("set oscillator mode %d", val)
m.oMode = oModeType(val)
case midiFrequencyModeCC: // frequency mode (0,1,2)
log.Info.Printf("set frequency mode %d", val)
m.fMode = fModeType(val)
default:
// ignore
}
}
}
}
/*
func goomPortModulationAttack(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
core.EventIn(m.modEnv, "attack", e)
}
func goomPortModulationDecay(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
core.EventIn(m.modEnv, "decay", e)
}
func goomPortModulationDuty(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
core.EventIn(m.modOsc, "duty", e)
}
func goomPortModulationSlope(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
core.EventIn(m.modOsc, "slope", e)
}
func goomPortModulationTuning(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
tune := core.Clamp(e.GetEventFloat().Val, 0, 1)
tune = core.Map(tune, -1, 1)
log.Info.Printf("set modulation tuning %f", tune)
m.modTuning = tune
}
func goomPortModulationLevel(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
m.modLevel = core.Clamp(e.GetEventFloat().Val, 0, 1)
log.Info.Printf("set modulation level %f", m.modLevel)
}
func goomPortFilterSensitivity(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
sensitivity := core.Clamp(e.GetEventFloat().Val, 0, 1)
log.Info.Printf("set filter sensitivity %f", sensitivity)
m.fltSensitivity = sensitivity
}
func goomPortFilterCutoff(cm core.Module, e *core.Event) {
m := cm.(*voiceGoom)
cutoff := core.Clamp(e.GetEventFloat().Val, 0, 1)
log.Info.Printf("set filter cutoff %f", cutoff)
m.fltCutoff = cutoff
}
*/
//-----------------------------------------------------------------------------
// Process runs the module DSP.
func (m *voiceGoom) Process(buf ...*core.Buf) bool {
// generate envelope
var env core.Buf
active := m.ampEnv.Process(&env)
if !active {
return false
}
out := buf[0]
// generate wave
var wave core.Buf
m.wavOsc.Process(&wave)
// apply the low pass filter
m.lpf.Process(&wave, out)
// apply the envelope
out.Mul(&env)
return true
}
//----------------------------------------------------------------------------- | module/goom/voice.go | 0.63307 | 0.469885 | voice.go | starcoder |
package test_clients
import (
"testing"
cdata "github.com/pip-services3-go/pip-services3-commons-go/data"
cerr "github.com/pip-services3-go/pip-services3-commons-go/errors"
tdata "github.com/pip-services3-go/pip-services3-rpc-go/test/data"
"github.com/stretchr/testify/assert"
)
type DummyClientFixture struct {
client IDummyClient
}
func NewDummyClientFixture(client IDummyClient) *DummyClientFixture {
dcf := DummyClientFixture{client: client}
return &dcf
}
func (c *DummyClientFixture) TestCrudOperations(t *testing.T) {
dummy1 := tdata.Dummy{Id: "", Key: "Key 1", Content: "Content 1"}
dummy2 := tdata.Dummy{Id: "", Key: "Key 2", Content: "Content 2"}
// Create one dummy
dummy, err := c.client.CreateDummy("ClientFixture", dummy1)
assert.Nil(t, err)
assert.NotNil(t, dummy)
assert.Equal(t, dummy.Content, dummy1.Content)
assert.Equal(t, dummy.Key, dummy1.Key)
dummy1 = *dummy
// Create another dummy
dummy, err = c.client.CreateDummy("ClientFixture", dummy2)
assert.Nil(t, err)
assert.NotNil(t, dummy)
assert.Equal(t, dummy.Content, dummy2.Content)
assert.Equal(t, dummy.Key, dummy2.Key)
dummy2 = *dummy
// Get all dummies
dummies, err := c.client.GetDummies("ClientFixture", cdata.NewEmptyFilterParams(), cdata.NewPagingParams(0, 5, false))
assert.Nil(t, err)
assert.NotNil(t, dummies)
assert.Len(t, dummies.Data, 2)
// Update the dummy
dummy1.Content = "Updated Content 1"
dummy, err = c.client.UpdateDummy("ClientFixture", dummy1)
assert.Nil(t, err)
assert.NotNil(t, dummy)
assert.Equal(t, dummy.Content, "Updated Content 1")
assert.Equal(t, dummy.Key, dummy1.Key)
dummy1 = *dummy
// Delete dummy
dummy, err = c.client.DeleteDummy("ClientFixture", dummy1.Id)
assert.Nil(t, err)
// Try to get delete dummy
dummy, err = c.client.GetDummyById("ClientFixture", dummy1.Id)
assert.Nil(t, err)
assert.Nil(t, dummy)
// Check correlation id propagation
values, err := c.client.CheckCorrelationId("test_cor_id")
assert.Nil(t, err)
assert.Equal(t, values["correlationId"], "test_cor_id")
values, err = c.client.CheckCorrelationId("test cor id")
assert.Nil(t, err)
assert.Equal(t, values["correlationId"], "test cor id")
// Check error propagation
err = c.client.CheckErrorPropagation("test_error_propagation")
appErr, ok := err.(*cerr.ApplicationError)
assert.True(t, ok)
assert.Equal(t, appErr.CorrelationId, "test_error_propagation")
assert.Equal(t, appErr.Status, 404)
assert.Equal(t, appErr.Code, "NOT_FOUND_TEST")
assert.Equal(t, appErr.Message, "Not found error")
} | test/clients/DummyClientFixture.go | 0.590543 | 0.454593 | DummyClientFixture.go | starcoder |
package master
import (
"math/rand"
"sync"
"time"
"github.com/chubaofs/chubaofs/proto"
"github.com/chubaofs/chubaofs/util"
)
// DataNode stores all the information about a data node
type DataNode struct {
Total uint64 `json:"TotalWeight"`
Used uint64 `json:"UsedWeight"`
AvailableSpace uint64
ID uint64
CellName string `json:"Cell"`
Addr string
ReportTime time.Time
isActive bool
sync.RWMutex
UsageRatio float64 // used / total space
SelectedTimes uint64 // number times that this datanode has been selected as the location for a data partition.
Carry float64 // carry is a factor used in cacluate the node's weight
TaskManager *AdminTaskManager
DataPartitionReports []*proto.PartitionReport
DataPartitionCount uint32
NodeSetID uint64
PersistenceDataPartitions []uint64
BadDisks []string
}
func newDataNode(addr, clusterID string) (dataNode *DataNode) {
dataNode = new(DataNode)
dataNode.Carry = rand.Float64()
dataNode.Total = 1
dataNode.Addr = addr
dataNode.TaskManager = newAdminTaskManager(dataNode.Addr, clusterID)
return
}
func (dataNode *DataNode) checkLiveness() {
dataNode.Lock()
defer dataNode.Unlock()
if time.Since(dataNode.ReportTime) > time.Second*time.Duration(defaultNodeTimeOutSec) {
dataNode.isActive = false
}
return
}
func (dataNode *DataNode) badPartitions(diskPath string, c *Cluster) (partitions []*DataPartition) {
partitions = make([]*DataPartition, 0)
vols := c.copyVols()
if len(vols) == 0 {
return partitions
}
for _, vol := range vols {
dps := vol.dataPartitions.checkBadDiskDataPartitions(diskPath, dataNode.Addr)
partitions = append(partitions, dps...)
}
return
}
func (dataNode *DataNode) updateNodeMetric(resp *proto.DataNodeHeartbeatResponse) {
dataNode.Lock()
defer dataNode.Unlock()
dataNode.Total = resp.Total
dataNode.Used = resp.Used
dataNode.AvailableSpace = resp.Available
dataNode.CellName = resp.CellName
dataNode.DataPartitionCount = resp.CreatedPartitionCnt
dataNode.DataPartitionReports = resp.PartitionReports
dataNode.BadDisks = resp.BadDisks
if dataNode.Total == 0 {
dataNode.UsageRatio = 0.0
} else {
dataNode.UsageRatio = (float64)(dataNode.Used) / (float64)(dataNode.Total)
}
dataNode.ReportTime = time.Now()
dataNode.isActive = true
}
func (dataNode *DataNode) isWriteAble() (ok bool) {
dataNode.RLock()
defer dataNode.RUnlock()
if dataNode.isActive == true && dataNode.AvailableSpace > 10*util.GB {
ok = true
}
return
}
func (dataNode *DataNode) isAvailCarryNode() (ok bool) {
dataNode.RLock()
defer dataNode.RUnlock()
return dataNode.Carry >= 1
}
// SetCarry implements "SetCarry" in the Node interface
func (dataNode *DataNode) SetCarry(carry float64) {
dataNode.Lock()
defer dataNode.Unlock()
dataNode.Carry = carry
}
// SelectNodeForWrite implements "SelectNodeForWrite" in the Node interface
func (dataNode *DataNode) SelectNodeForWrite() {
dataNode.Lock()
defer dataNode.Unlock()
dataNode.UsageRatio = float64(dataNode.Used) / float64(dataNode.Total)
dataNode.SelectedTimes++
dataNode.Carry = dataNode.Carry - 1.0
}
func (dataNode *DataNode) clean() {
dataNode.TaskManager.exitCh <- struct{}{}
}
func (dataNode *DataNode) createHeartbeatTask(masterAddr string) (task *proto.AdminTask) {
request := &proto.HeartBeatRequest{
CurrTime: time.Now().Unix(),
MasterAddr: masterAddr,
}
task = proto.NewAdminTask(proto.OpDataNodeHeartbeat, dataNode.Addr, request)
return
} | master/data_node.go | 0.52342 | 0.403508 | data_node.go | starcoder |
package chunk
import (
"github.com/df-mc/dragonfly/server/block/cube"
"sync"
)
// Chunk is a segment in the world with a size of 16x16x256 blocks. A chunk contains multiple sub chunks
// and stores other information such as biomes.
// It is not safe to call methods on Chunk simultaneously from multiple goroutines.
type Chunk struct {
sync.Mutex
// r holds the (vertical) range of the Chunk. It includes both the minimum and maximum coordinates.
r cube.Range
// air is the runtime ID of air.
air uint32
// recalculateHeightMap is true if the chunk's height map should be recalculated on the next call to the HeightMap
// function.
recalculateHeightMap bool
// heightMap is the height map of the chunk.
heightMap HeightMap
// sub holds all sub chunks part of the chunk. The pointers held by the array are nil if no sub chunk is
// allocated at the indices.
sub []*SubChunk
// biomes is an array of biome IDs. There is one biome ID for every column in the chunk.
biomes []*PalettedStorage
}
// New initialises a new chunk and returns it, so that it may be used.
func New(air uint32, r cube.Range) *Chunk {
n := (r.Height() >> 4) + 1
sub, biomes := make([]*SubChunk, n), make([]*PalettedStorage, n)
for i := 0; i < n; i++ {
sub[i] = NewSubChunk(air)
biomes[i] = emptyStorage(0)
}
return &Chunk{
r: r,
air: air,
sub: sub,
biomes: biomes,
recalculateHeightMap: true,
heightMap: make(HeightMap, 256),
}
}
// Range returns the cube.Range of the Chunk as passed to New.
func (chunk *Chunk) Range() cube.Range {
return chunk.r
}
// Sub returns a list of all sub chunks present in the chunk.
func (chunk *Chunk) Sub() []*SubChunk {
return chunk.sub
}
// Block returns the runtime ID of the block at a given x, y and z in a chunk at the given layer. If no
// sub chunk exists at the given y, the block is assumed to be air.
func (chunk *Chunk) Block(x uint8, y int16, z uint8, layer uint8) uint32 {
sub := chunk.SubChunk(y)
if sub.Empty() || uint8(len(sub.storages)) <= layer {
return chunk.air
}
return sub.storages[layer].At(x, uint8(y), z)
}
// SetBlock sets the runtime ID of a block at a given x, y and z in a chunk at the given layer. If no
// SubChunk exists at the given y, a new SubChunk is created and the block is set.
func (chunk *Chunk) SetBlock(x uint8, y int16, z uint8, layer uint8, block uint32) {
sub := chunk.sub[chunk.SubIndex(y)]
if uint8(len(sub.storages)) <= layer && block == chunk.air {
// Air was set at n layer, but there were less than n layers, so there already was air there.
// Don't do anything with this, just return.
return
}
sub.Layer(layer).Set(x, uint8(y), z, block)
chunk.recalculateHeightMap = true
}
// Biome returns the biome ID at a specific column in the chunk.
func (chunk *Chunk) Biome(x uint8, y int16, z uint8) uint32 {
return chunk.biomes[chunk.SubIndex(y)].At(x, uint8(y), z)
}
// SetBiome sets the biome ID at a specific column in the chunk.
func (chunk *Chunk) SetBiome(x uint8, y int16, z uint8, biome uint32) {
chunk.biomes[chunk.SubIndex(y)].Set(x, uint8(y), z, biome)
}
// Light returns the light level at a specific position in the chunk.
func (chunk *Chunk) Light(x uint8, y int16, z uint8) uint8 {
ux, uy, uz, sub := x&0xf, uint8(y&0xf), z&0xf, chunk.SubChunk(y)
sky := sub.SkyLight(ux, uy, uz)
if sky == 15 {
// The skylight was already on the maximum value, so return it without checking block light.
return sky
}
if block := sub.BlockLight(ux, uy, uz); block > sky {
return block
}
return sky
}
// SkyLight returns the skylight level at a specific position in the chunk.
func (chunk *Chunk) SkyLight(x uint8, y int16, z uint8) uint8 {
return chunk.SubChunk(y).SkyLight(x&15, uint8(y&15), z&15)
}
// HighestLightBlocker iterates from the highest non-empty sub chunk downwards to find the Y value of the
// highest block that completely blocks any light from going through. If none is found, the value returned is
// the minimum height.
func (chunk *Chunk) HighestLightBlocker(x, z uint8) int16 {
for index := int16(len(chunk.sub) - 1); index >= 0; index-- {
if sub := chunk.sub[index]; !sub.Empty() {
for y := 15; y >= 0; y-- {
if FilteringBlocks[sub.storages[0].At(x, uint8(y), z)] == 15 {
return int16(y) | chunk.SubY(index)
}
}
}
}
return int16(chunk.r[0])
}
// HighestBlock iterates from the highest non-empty sub chunk downwards to find the Y value of the highest
// non-air block at an x and z. If no blocks are present in the column, the minimum height is returned.
func (chunk *Chunk) HighestBlock(x, z uint8) int16 {
for index := int16(len(chunk.sub) - 1); index >= 0; index-- {
if sub := chunk.sub[index]; !sub.Empty() {
for y := 15; y >= 0; y-- {
if rid := sub.storages[0].At(x, uint8(y), z); rid != chunk.air {
return int16(y) | chunk.SubY(index)
}
}
}
}
return int16(chunk.r[0])
}
// HeightMap returns the height map of the chunk. If the chunk is edited, the height map will be recalculated on the
// next call to this function.
func (chunk *Chunk) HeightMap() HeightMap {
if chunk.recalculateHeightMap {
for x := uint8(0); x < 16; x++ {
for z := uint8(0); z < 16; z++ {
chunk.heightMap.Set(x, z, chunk.HighestLightBlocker(x, z))
}
}
chunk.recalculateHeightMap = false
}
return chunk.heightMap
}
// Compact compacts the chunk as much as possible, getting rid of any sub chunks that are empty, and compacts
// all storages in the sub chunks to occupy as little space as possible.
// Compact should be called right before the chunk is saved in order to optimise the storage space.
func (chunk *Chunk) Compact() {
for i := range chunk.sub {
chunk.sub[i].compact()
}
}
// SubChunk finds the correct SubChunk in the Chunk by a Y value.
func (chunk *Chunk) SubChunk(y int16) *SubChunk {
return chunk.sub[chunk.SubIndex(y)]
}
// SubIndex returns the sub chunk Y index matching the y value passed.
func (chunk *Chunk) SubIndex(y int16) int16 {
return (y - int16(chunk.r[0])) >> 4
}
// SubY returns the sub chunk Y value matching the index passed.
func (chunk *Chunk) SubY(index int16) int16 {
return (index << 4) + int16(chunk.r[0])
} | server/world/chunk/chunk.go | 0.729231 | 0.605187 | chunk.go | starcoder |
// Package pointer provides the access to the pointing device of the device,
// either of the mouse or the touch.
package pointer
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/touch"
"chromiumos/tast/local/coords"
"chromiumos/tast/testing"
)
// Context provides the interface to control a pointing device.
type Context interface {
// Close cleans up its internal resource.
Close() error
// Click returns a function to cause a click or a tap on the node.
Click(finder *nodewith.Finder) uiauto.Action
// ClickAt returns a function to cause a click or a tap on the specified
// location.
ClickAt(p coords.Point) uiauto.Action
// MenuClick returns a function to cause a right-click or a long-press on the
// node to cause the secondary behavior (i.e. opening context menu).
MenuClick(finder *nodewith.Finder) uiauto.Action
// Drag returns a function which initiates a dragging session, and conducts
// the specified gestures. It ensures that the dragging session ends properly
// at end.
// Example:
// // Start from p0, move to p1, and then move to p2.
// pc.Drag(p0, pc.DragTo(p1, time.Second), pc.DragTo(p2, time.Second))
Drag(initLoc coords.Point, gestures ...uiauto.Action) uiauto.Action
// DragTo returns a function to cause a drag to the specified location.
DragTo(p coords.Point, duration time.Duration) uiauto.Action
// DragToNode returns a function to cause a drag to the specified node.
DragToNode(f *nodewith.Finder, duration time.Duration) uiauto.Action
}
// MouseContext is a Context with the mouse.
type MouseContext struct {
ac *uiauto.Context
tconn *chrome.TestConn
}
// NewMouse creates a new instance of MouseContext.
func NewMouse(tconn *chrome.TestConn) *MouseContext {
return &MouseContext{ac: uiauto.New(tconn), tconn: tconn}
}
// Close implements Context.Close.
func (mc *MouseContext) Close() error {
return nil
}
// Click implements Context.Click.
func (mc *MouseContext) Click(finder *nodewith.Finder) uiauto.Action {
return mc.ac.LeftClick(finder)
}
// ClickAt implements Context.ClickAt.
func (mc *MouseContext) ClickAt(loc coords.Point) uiauto.Action {
return mouse.Click(mc.tconn, loc, mouse.LeftButton)
}
// MenuClick implements Context.MenuClick.
func (mc *MouseContext) MenuClick(finder *nodewith.Finder) uiauto.Action {
return mc.ac.RightClick(finder)
}
// Drag implements Context.Drag.
func (mc *MouseContext) Drag(loc coords.Point, gestures ...uiauto.Action) uiauto.Action {
gestureAction := uiauto.Combine("drag gresture", gestures...)
return func(ctx context.Context) (err error) {
pressed := false
defer func() {
if !pressed {
return
}
releaseErr := mouse.Release(mc.tconn, mouse.LeftButton)(ctx)
if releaseErr != nil {
testing.ContextLog(ctx, "Failed to release the mouse button: ", releaseErr)
if err == nil {
err = releaseErr
}
}
}()
sctx, cancel := ctxutil.Shorten(ctx, 2*time.Second)
defer cancel()
if err := uiauto.Combine(
"start drag",
mouse.Move(mc.tconn, loc, 0),
mouse.Press(mc.tconn, mouse.LeftButton),
)(sctx); err != nil {
return errors.Wrap(err, "failed to start dragging")
}
pressed = true
return gestureAction(sctx)
}
}
// DragTo implements Context.DragTo.
func (mc *MouseContext) DragTo(p coords.Point, duration time.Duration) uiauto.Action {
return mouse.Move(mc.tconn, p, duration)
}
// DragToNode implements Context.DragToNode.
func (mc *MouseContext) DragToNode(f *nodewith.Finder, duration time.Duration) uiauto.Action {
return func(ctx context.Context) error {
loc, err := mc.ac.Location(ctx, f)
if err != nil {
return err
}
return mouse.Move(mc.tconn, loc.CenterPoint(), duration)(ctx)
}
}
// TouchContext is a Context with the touchscreen.
type TouchContext struct {
tc *touch.Context
}
// NewTouch creates a new TouchContext instance.
func NewTouch(ctx context.Context, tconn *chrome.TestConn) (*TouchContext, error) {
tc, err := touch.New(ctx, tconn)
if err != nil {
return nil, err
}
return &TouchContext{tc: tc}, nil
}
// Close implements Context.Close.
func (tc *TouchContext) Close() error {
return tc.tc.Close()
}
// Click implements Context.Click.
func (tc *TouchContext) Click(finder *nodewith.Finder) uiauto.Action {
return tc.tc.Tap(finder)
}
// ClickAt implements Context.ClickAt.
func (tc *TouchContext) ClickAt(loc coords.Point) uiauto.Action {
return tc.tc.TapAt(loc)
}
// MenuClick implements Context.MenuClick.
func (tc *TouchContext) MenuClick(finder *nodewith.Finder) uiauto.Action {
return tc.tc.LongPress(finder)
}
// Drag implements Context.Drag.
func (tc *TouchContext) Drag(loc coords.Point, gestures ...uiauto.Action) uiauto.Action {
return tc.tc.Swipe(loc, gestures...)
}
// DragTo implements Context.DragTo.
func (tc *TouchContext) DragTo(loc coords.Point, duration time.Duration) uiauto.Action {
return tc.tc.SwipeTo(loc, duration)
}
// DragToNode implements COntext.DragToNode.
func (tc *TouchContext) DragToNode(f *nodewith.Finder, duration time.Duration) uiauto.Action {
return tc.tc.SwipeToNode(f, duration)
} | src/chromiumos/tast/local/chrome/uiauto/pointer/pointer.go | 0.753104 | 0.401101 | pointer.go | starcoder |
//go:generate go run gen-benchmarks.go
// Package mathNodes defines the floating-point function collection available for the GEP algorithm.
package mathNodes
import (
"log"
"math"
"github.com/gmlewis/gep/v2/functions"
)
// MathNode is a floating-point function used for the formation of GEP expressions.
type MathNode struct {
index int
symbol string
terminals int
function func(x []float64) float64
}
// Symbol returns the Karva symbol for this floating-point function.
func (n MathNode) Symbol() string {
return n.symbol
}
// Terminals returns the number of input terminals for this floating-point function.
func (n MathNode) Terminals() int {
return n.terminals
}
// BoolFunction is unused in this package and returns an error.
func (n MathNode) BoolFunction([]bool) bool {
log.Println("error calling BoolFunction on MathNode model.")
return false
}
// IntFunction is unused in this package and returns an error.
func (n MathNode) IntFunction([]int) int {
log.Println("error calling IntFunction on MathNode model.")
return 0
}
// Float64Function calls the floating-point function and returns the result.
func (n MathNode) Float64Function(x []float64) float64 {
return n.function(x)
}
// VectorIntFunction allows FuncMap to implement interace functions.FuncMap.
func (n MathNode) VectorIntFunction([]functions.VectorInt) functions.VectorInt {
return functions.VectorInt{}
}
// Math lists all the available floating-point functions for this package.
var Math = functions.FuncMap{
// TODO(gmlewis): Change functions to operate on the entire length of the slice.
"+": MathNode{0, "+", 2, func(x []float64) float64 { return (x[0] + x[1]) }},
"-": MathNode{1, "-", 2, func(x []float64) float64 { return (x[0] - x[1]) }},
"*": MathNode{2, "*", 2, func(x []float64) float64 { return (x[0] * x[1]) }},
"/": MathNode{3, "/", 2, func(x []float64) float64 { return (x[0] / x[1]) }},
"Mod": MathNode{4, "Mod", 2, func(x []float64) float64 { return gepMod(x[0], x[1]) }},
"Pow": MathNode{5, "Pow", 2, func(x []float64) float64 { return math.Pow(x[0], x[1]) }},
"Sqrt": MathNode{6, "Sqrt", 1, func(x []float64) float64 { return math.Sqrt(x[0]) }},
"Exp": MathNode{7, "Exp", 1, func(x []float64) float64 { return math.Exp(x[0]) }},
"Pow10": MathNode{8, "Pow10", 1, func(x []float64) float64 { return math.Pow(10.0, x[0]) }},
"Ln": MathNode{9, "Ln", 1, func(x []float64) float64 { return math.Log(x[0]) }},
"Log": MathNode{10, "Log", 1, func(x []float64) float64 { return math.Log10(x[0]) }},
"Log2": MathNode{83, "Log2", 2, func(x []float64) float64 { return gepLog2(x[0], x[1]) }},
"Floor": MathNode{12, "Floor", 1, func(x []float64) float64 { return math.Floor(x[0]) }},
"Ceil": MathNode{13, "Ceil", 1, func(x []float64) float64 { return math.Ceil(x[0]) }},
"Abs": MathNode{14, "Abs", 1, func(x []float64) float64 { return math.Abs(x[0]) }},
"Inv": MathNode{15, "Inv", 1, func(x []float64) float64 { return (1.0 / (x[0])) }},
"Neg": MathNode{17, "Neg", 1, func(x []float64) float64 { return (-(x[0])) }},
"Nop": MathNode{16, "Nop", 1, func(x []float64) float64 { return (x[0]) }},
"X2": MathNode{76, "X2", 1, func(x []float64) float64 { return math.Pow(x[0], 2.0) }},
"X3": MathNode{77, "X3", 1, func(x []float64) float64 { return math.Pow(x[0], 3.0) }},
"X4": MathNode{78, "X4", 1, func(x []float64) float64 { return math.Pow(x[0], 4.0) }},
"X5": MathNode{79, "X5", 1, func(x []float64) float64 { return math.Pow(x[0], 5.0) }},
"3Rt": MathNode{80, "3Rt", 1, func(x []float64) float64 { return gep3Rt(x[0]) }},
"4Rt": MathNode{81, "4Rt", 1, func(x []float64) float64 { return math.Pow(x[0], (1.0 / 4.0)) }},
"5Rt": MathNode{82, "5Rt", 1, func(x []float64) float64 { return gep5Rt(x[0]) }},
"Add3": MathNode{84, "Add3", 3, func(x []float64) float64 { return (x[0] + x[1] + x[2]) }},
"Sub3": MathNode{86, "Sub3", 3, func(x []float64) float64 { return (x[0] - x[1] - x[2]) }},
"Mul3": MathNode{88, "Mul3", 3, func(x []float64) float64 { return (x[0] * x[1] * x[2]) }},
"Div3": MathNode{90, "Div3", 3, func(x []float64) float64 { return (x[0] / x[1] / x[2]) }},
"Add4": MathNode{85, "Add4", 4, func(x []float64) float64 { return (x[0] + x[1] + x[2] + x[3]) }},
"Sub4": MathNode{87, "Sub4", 4, func(x []float64) float64 { return (x[0] - x[1] - x[2] - x[3]) }},
"Mul4": MathNode{89, "Mul4", 4, func(x []float64) float64 { return (x[0] * x[1] * x[2] * x[3]) }},
"Div4": MathNode{91, "Div4", 4, func(x []float64) float64 { return (x[0] / x[1] / x[2] / x[3]) }},
"Min2": MathNode{92, "Min2", 2, func(x []float64) float64 { return gepMin2(x[0], x[1]) }},
"Min3": MathNode{93, "Min3", 3, func(x []float64) float64 { return gepMin3(x[0], x[1], x[2]) }},
"Min4": MathNode{94, "Min4", 4, func(x []float64) float64 { return gepMin4(x[0], x[1], x[2], x[3]) }},
"Max2": MathNode{95, "Max2", 2, func(x []float64) float64 { return gepMax2(x[0], x[1]) }},
"Max3": MathNode{96, "Max3", 3, func(x []float64) float64 { return gepMax3(x[0], x[1], x[2]) }},
"Max4": MathNode{97, "Max4", 4, func(x []float64) float64 { return gepMax4(x[0], x[1], x[2], x[3]) }},
"Avg2": MathNode{98, "Avg2", 2, func(x []float64) float64 { return ((x[0] + x[1]) / 2.0) }},
"Avg3": MathNode{99, "Avg3", 3, func(x []float64) float64 { return ((x[0] + x[1] + x[2]) / 3.0) }},
"Avg4": MathNode{100, "Avg4", 4, func(x []float64) float64 { return ((x[0] + x[1] + x[2] + x[3]) / 4.0) }},
"Logi": MathNode{11, "Logi", 1, func(x []float64) float64 { return gepLogi(x[0]) }},
"Logi2": MathNode{101, "Logi2", 2, func(x []float64) float64 { return gepLogi2(x[0], x[1]) }},
"Logi3": MathNode{102, "Logi3", 3, func(x []float64) float64 { return gepLogi3(x[0], x[1], x[2]) }},
"Logi4": MathNode{103, "Logi4", 4, func(x []float64) float64 { return gepLogi4(x[0], x[1], x[2], x[3]) }},
"Gau": MathNode{104, "Gau", 1, func(x []float64) float64 { return gepGau(x[0]) }},
"Gau2": MathNode{105, "Gau2", 2, func(x []float64) float64 { return gepGau2(x[0], x[1]) }},
"Gau3": MathNode{106, "Gau3", 3, func(x []float64) float64 { return gepGau3(x[0], x[1], x[2]) }},
"Gau4": MathNode{107, "Gau4", 4, func(x []float64) float64 { return gepGau4(x[0], x[1], x[2], x[3]) }},
"Zero": MathNode{70, "Zero", 1, func(x []float64) float64 { return (0.0) }},
"One": MathNode{71, "One", 1, func(x []float64) float64 { return (1.0) }},
"Zero2": MathNode{72, "Zero2", 2, func(x []float64) float64 { return (0.0) }},
"One2": MathNode{73, "One2", 2, func(x []float64) float64 { return (1.0) }},
"Pi": MathNode{74, "Pi", 1, func(x []float64) float64 { return (math.Pi) }},
"E": MathNode{75, "E", 1, func(x []float64) float64 { return (math.E) }},
"Sin": MathNode{18, "Sin", 1, func(x []float64) float64 { return math.Sin(x[0]) }},
"Cos": MathNode{19, "Cos", 1, func(x []float64) float64 { return math.Cos(x[0]) }},
"Tan": MathNode{20, "Tan", 1, func(x []float64) float64 { return math.Tan(x[0]) }},
"Csc": MathNode{21, "Csc", 1, func(x []float64) float64 { return (1.0 / math.Sin(x[0])) }},
"Sec": MathNode{22, "Sec", 1, func(x []float64) float64 { return (1.0 / math.Cos(x[0])) }},
"Cot": MathNode{23, "Cot", 1, func(x []float64) float64 { return (1.0 / math.Tan(x[0])) }},
"Asin": MathNode{24, "Asin", 1, func(x []float64) float64 { return math.Asin(x[0]) }},
"Acos": MathNode{25, "Acos", 1, func(x []float64) float64 { return math.Acos(x[0]) }},
"Atan": MathNode{26, "Atan", 1, func(x []float64) float64 { return math.Atan(x[0]) }},
"Acsc": MathNode{27, "Acsc", 1, func(x []float64) float64 { return gepAcsc(x[0]) }},
"Asec": MathNode{28, "Asec", 1, func(x []float64) float64 { return gepAsec(x[0]) }},
"Acot": MathNode{29, "Acot", 1, func(x []float64) float64 { return gepAcot(x[0]) }},
"Sinh": MathNode{30, "Sinh", 1, func(x []float64) float64 { return math.Sinh(x[0]) }},
"Cosh": MathNode{31, "Cosh", 1, func(x []float64) float64 { return math.Cosh(x[0]) }},
"Tanh": MathNode{32, "Tanh", 1, func(x []float64) float64 { return math.Tanh(x[0]) }},
"Csch": MathNode{33, "Csch", 1, func(x []float64) float64 { return (1.0 / math.Sinh(x[0])) }},
"Sech": MathNode{34, "Sech", 1, func(x []float64) float64 { return (1.0 / math.Cosh(x[0])) }},
"Coth": MathNode{35, "Coth", 1, func(x []float64) float64 { return (1.0 / math.Tanh(x[0])) }},
"Asinh": MathNode{36, "Asinh", 1, func(x []float64) float64 { return gepAsinh(x[0]) }},
"Acosh": MathNode{37, "Acosh", 1, func(x []float64) float64 { return gepAcosh(x[0]) }},
"Atanh": MathNode{38, "Atanh", 1, func(x []float64) float64 { return gepAtanh(x[0]) }},
"Acsch": MathNode{39, "Acsch", 1, func(x []float64) float64 { return gepAcsch(x[0]) }},
"Asech": MathNode{40, "Asech", 1, func(x []float64) float64 { return gepAsech(x[0]) }},
"Acoth": MathNode{41, "Acoth", 1, func(x []float64) float64 { return gepAcoth(x[0]) }},
"NOT": MathNode{108, "NOT", 1, func(x []float64) float64 { return (1.0 - x[0]) }},
"OR1": MathNode{42, "OR1", 2, func(x []float64) float64 { return gepOR1(x[0], x[1]) }},
"OR2": MathNode{43, "OR2", 2, func(x []float64) float64 { return gepOR2(x[0], x[1]) }},
"OR3": MathNode{109, "OR3", 2, func(x []float64) float64 { return gepOR3(x[0], x[1]) }},
"OR4": MathNode{110, "OR4", 2, func(x []float64) float64 { return gepOR4(x[0], x[1]) }},
"OR5": MathNode{111, "OR5", 2, func(x []float64) float64 { return gepOR5(x[0], x[1]) }},
"OR6": MathNode{112, "OR6", 2, func(x []float64) float64 { return gepOR6(x[0], x[1]) }},
"AND1": MathNode{44, "AND1", 2, func(x []float64) float64 { return gepAND1(x[0], x[1]) }},
"AND2": MathNode{45, "AND2", 2, func(x []float64) float64 { return gepAND2(x[0], x[1]) }},
"AND3": MathNode{113, "AND3", 2, func(x []float64) float64 { return gepAND3(x[0], x[1]) }},
"AND4": MathNode{114, "AND4", 2, func(x []float64) float64 { return gepAND4(x[0], x[1]) }},
"AND5": MathNode{115, "AND5", 2, func(x []float64) float64 { return gepAND5(x[0], x[1]) }},
"AND6": MathNode{116, "AND6", 2, func(x []float64) float64 { return gepAND6(x[0], x[1]) }},
"LT2A": MathNode{46, "LT2A", 2, func(x []float64) float64 { return gepLT2A(x[0], x[1]) }},
"GT2A": MathNode{47, "GT2A", 2, func(x []float64) float64 { return gepGT2A(x[0], x[1]) }},
"LOE2A": MathNode{48, "LOE2A", 2, func(x []float64) float64 { return gepLOE2A(x[0], x[1]) }},
"GOE2A": MathNode{49, "GOE2A", 2, func(x []float64) float64 { return gepGOE2A(x[0], x[1]) }},
"ET2A": MathNode{50, "ET2A", 2, func(x []float64) float64 { return gepET2A(x[0], x[1]) }},
"NET2A": MathNode{51, "NET2A", 2, func(x []float64) float64 { return gepNET2A(x[0], x[1]) }},
"LT2B": MathNode{52, "LT2B", 2, func(x []float64) float64 { return gepLT2B(x[0], x[1]) }},
"GT2B": MathNode{53, "GT2B", 2, func(x []float64) float64 { return gepGT2B(x[0], x[1]) }},
"LOE2B": MathNode{54, "LOE2B", 2, func(x []float64) float64 { return gepLOE2B(x[0], x[1]) }},
"GOE2B": MathNode{55, "GOE2B", 2, func(x []float64) float64 { return gepGOE2B(x[0], x[1]) }},
"ET2B": MathNode{56, "ET2B", 2, func(x []float64) float64 { return gepET2B(x[0], x[1]) }},
"NET2B": MathNode{57, "NET2B", 2, func(x []float64) float64 { return gepNET2B(x[0], x[1]) }},
"LT2C": MathNode{117, "LT2C", 2, func(x []float64) float64 { return gepLT2C(x[0], x[1]) }},
"GT2C": MathNode{118, "GT2C", 2, func(x []float64) float64 { return gepGT2C(x[0], x[1]) }},
"LOE2C": MathNode{119, "LOE2C", 2, func(x []float64) float64 { return gepLOE2C(x[0], x[1]) }},
"GOE2C": MathNode{120, "GOE2C", 2, func(x []float64) float64 { return gepGOE2C(x[0], x[1]) }},
"ET2C": MathNode{121, "ET2C", 2, func(x []float64) float64 { return gepET2C(x[0], x[1]) }},
"NET2C": MathNode{122, "NET2C", 2, func(x []float64) float64 { return gepNET2C(x[0], x[1]) }},
"LT2D": MathNode{123, "LT2D", 2, func(x []float64) float64 { return gepLT2D(x[0], x[1]) }},
"GT2D": MathNode{124, "GT2D", 2, func(x []float64) float64 { return gepGT2D(x[0], x[1]) }},
"LOE2D": MathNode{125, "LOE2D", 2, func(x []float64) float64 { return gepLOE2D(x[0], x[1]) }},
"GOE2D": MathNode{126, "GOE2D", 2, func(x []float64) float64 { return gepGOE2D(x[0], x[1]) }},
"ET2D": MathNode{127, "ET2D", 2, func(x []float64) float64 { return gepET2D(x[0], x[1]) }},
"NET2D": MathNode{128, "NET2D", 2, func(x []float64) float64 { return gepNET2D(x[0], x[1]) }},
"LT2E": MathNode{129, "LT2E", 2, func(x []float64) float64 { return gepLT2E(x[0], x[1]) }},
"GT2E": MathNode{130, "GT2E", 2, func(x []float64) float64 { return gepGT2E(x[0], x[1]) }},
"LOE2E": MathNode{131, "LOE2E", 2, func(x []float64) float64 { return gepLOE2E(x[0], x[1]) }},
"GOE2E": MathNode{132, "GOE2E", 2, func(x []float64) float64 { return gepGOE2E(x[0], x[1]) }},
"ET2E": MathNode{133, "ET2E", 2, func(x []float64) float64 { return gepET2E(x[0], x[1]) }},
"NET2E": MathNode{134, "NET2E", 2, func(x []float64) float64 { return gepNET2E(x[0], x[1]) }},
"LT2F": MathNode{135, "LT2F", 2, func(x []float64) float64 { return gepLT2F(x[0], x[1]) }},
"GT2F": MathNode{136, "GT2F", 2, func(x []float64) float64 { return gepGT2F(x[0], x[1]) }},
"LOE2F": MathNode{137, "LOE2F", 2, func(x []float64) float64 { return gepLOE2F(x[0], x[1]) }},
"GOE2F": MathNode{138, "GOE2F", 2, func(x []float64) float64 { return gepGOE2F(x[0], x[1]) }},
"ET2F": MathNode{139, "ET2F", 2, func(x []float64) float64 { return gepET2F(x[0], x[1]) }},
"NET2F": MathNode{140, "NET2F", 2, func(x []float64) float64 { return gepNET2F(x[0], x[1]) }},
"LT2G": MathNode{141, "LT2G", 2, func(x []float64) float64 { return gepLT2G(x[0], x[1]) }},
"GT2G": MathNode{142, "GT2G", 2, func(x []float64) float64 { return gepGT2G(x[0], x[1]) }},
"LOE2G": MathNode{143, "LOE2G", 2, func(x []float64) float64 { return gepLOE2G(x[0], x[1]) }},
"GOE2G": MathNode{144, "GOE2G", 2, func(x []float64) float64 { return gepGOE2G(x[0], x[1]) }},
"ET2G": MathNode{145, "ET2G", 2, func(x []float64) float64 { return gepET2G(x[0], x[1]) }},
"NET2G": MathNode{146, "NET2G", 2, func(x []float64) float64 { return gepNET2G(x[0], x[1]) }},
"LT3A": MathNode{58, "LT3A", 3, func(x []float64) float64 { return gepLT3A(x[0], x[1], x[2]) }},
"GT3A": MathNode{59, "GT3A", 3, func(x []float64) float64 { return gepGT3A(x[0], x[1], x[2]) }},
"LOE3A": MathNode{60, "LOE3A", 3, func(x []float64) float64 { return gepLOE3A(x[0], x[1], x[2]) }},
"GOE3A": MathNode{61, "GOE3A", 3, func(x []float64) float64 { return gepGOE3A(x[0], x[1], x[2]) }},
"ET3A": MathNode{62, "ET3A", 3, func(x []float64) float64 { return gepET3A(x[0], x[1], x[2]) }},
"NET3A": MathNode{63, "NET3A", 3, func(x []float64) float64 { return gepNET3A(x[0], x[1], x[2]) }},
"LT3B": MathNode{147, "LT3B", 3, func(x []float64) float64 { return gepLT3B(x[0], x[1], x[2]) }},
"GT3B": MathNode{148, "GT3B", 3, func(x []float64) float64 { return gepGT3B(x[0], x[1], x[2]) }},
"LOE3B": MathNode{149, "LOE3B", 3, func(x []float64) float64 { return gepLOE3B(x[0], x[1], x[2]) }},
"GOE3B": MathNode{150, "GOE3B", 3, func(x []float64) float64 { return gepGOE3B(x[0], x[1], x[2]) }},
"ET3B": MathNode{151, "ET3B", 3, func(x []float64) float64 { return gepET3B(x[0], x[1], x[2]) }},
"NET3B": MathNode{152, "NET3B", 3, func(x []float64) float64 { return gepNET3B(x[0], x[1], x[2]) }},
"LT3C": MathNode{153, "LT3C", 3, func(x []float64) float64 { return gepLT3C(x[0], x[1], x[2]) }},
"GT3C": MathNode{154, "GT3C", 3, func(x []float64) float64 { return gepGT3C(x[0], x[1], x[2]) }},
"LOE3C": MathNode{155, "LOE3C", 3, func(x []float64) float64 { return gepLOE3C(x[0], x[1], x[2]) }},
"GOE3C": MathNode{156, "GOE3C", 3, func(x []float64) float64 { return gepGOE3C(x[0], x[1], x[2]) }},
"ET3C": MathNode{157, "ET3C", 3, func(x []float64) float64 { return gepET3C(x[0], x[1], x[2]) }},
"NET3C": MathNode{158, "NET3C", 3, func(x []float64) float64 { return gepNET3C(x[0], x[1], x[2]) }},
"LT3D": MathNode{159, "LT3D", 3, func(x []float64) float64 { return gepLT3D(x[0], x[1], x[2]) }},
"GT3D": MathNode{160, "GT3D", 3, func(x []float64) float64 { return gepGT3D(x[0], x[1], x[2]) }},
"LOE3D": MathNode{161, "LOE3D", 3, func(x []float64) float64 { return gepLOE3D(x[0], x[1], x[2]) }},
"GOE3D": MathNode{162, "GOE3D", 3, func(x []float64) float64 { return gepGOE3D(x[0], x[1], x[2]) }},
"ET3D": MathNode{163, "ET3D", 3, func(x []float64) float64 { return gepET3D(x[0], x[1], x[2]) }},
"NET3D": MathNode{164, "NET3D", 3, func(x []float64) float64 { return gepNET3D(x[0], x[1], x[2]) }},
"LT3E": MathNode{165, "LT3E", 3, func(x []float64) float64 { return gepLT3E(x[0], x[1], x[2]) }},
"GT3E": MathNode{166, "GT3E", 3, func(x []float64) float64 { return gepGT3E(x[0], x[1], x[2]) }},
"LOE3E": MathNode{167, "LOE3E", 3, func(x []float64) float64 { return gepLOE3E(x[0], x[1], x[2]) }},
"GOE3E": MathNode{168, "GOE3E", 3, func(x []float64) float64 { return gepGOE3E(x[0], x[1], x[2]) }},
"ET3E": MathNode{169, "ET3E", 3, func(x []float64) float64 { return gepET3E(x[0], x[1], x[2]) }},
"NET3E": MathNode{170, "NET3E", 3, func(x []float64) float64 { return gepNET3E(x[0], x[1], x[2]) }},
"LT3F": MathNode{171, "LT3F", 3, func(x []float64) float64 { return gepLT3F(x[0], x[1], x[2]) }},
"GT3F": MathNode{172, "GT3F", 3, func(x []float64) float64 { return gepGT3F(x[0], x[1], x[2]) }},
"LOE3F": MathNode{173, "LOE3F", 3, func(x []float64) float64 { return gepLOE3F(x[0], x[1], x[2]) }},
"GOE3F": MathNode{174, "GOE3F", 3, func(x []float64) float64 { return gepGOE3F(x[0], x[1], x[2]) }},
"ET3F": MathNode{175, "ET3F", 3, func(x []float64) float64 { return gepET3F(x[0], x[1], x[2]) }},
"NET3F": MathNode{176, "NET3F", 3, func(x []float64) float64 { return gepNET3F(x[0], x[1], x[2]) }},
"LT3G": MathNode{177, "LT3G", 3, func(x []float64) float64 { return gepLT3G(x[0], x[1], x[2]) }},
"GT3G": MathNode{178, "GT3G", 3, func(x []float64) float64 { return gepGT3G(x[0], x[1], x[2]) }},
"LOE3G": MathNode{179, "LOE3G", 3, func(x []float64) float64 { return gepLOE3G(x[0], x[1], x[2]) }},
"GOE3G": MathNode{180, "GOE3G", 3, func(x []float64) float64 { return gepGOE3G(x[0], x[1], x[2]) }},
"ET3G": MathNode{181, "ET3G", 3, func(x []float64) float64 { return gepET3G(x[0], x[1], x[2]) }},
"NET3G": MathNode{182, "NET3G", 3, func(x []float64) float64 { return gepNET3G(x[0], x[1], x[2]) }},
"LT3H": MathNode{183, "LT3H", 3, func(x []float64) float64 { return gepLT3H(x[0], x[1], x[2]) }},
"GT3H": MathNode{184, "GT3H", 3, func(x []float64) float64 { return gepGT3H(x[0], x[1], x[2]) }},
"LOE3H": MathNode{185, "LOE3H", 3, func(x []float64) float64 { return gepLOE3H(x[0], x[1], x[2]) }},
"GOE3H": MathNode{186, "GOE3H", 3, func(x []float64) float64 { return gepGOE3H(x[0], x[1], x[2]) }},
"ET3H": MathNode{187, "ET3H", 3, func(x []float64) float64 { return gepET3H(x[0], x[1], x[2]) }},
"NET3H": MathNode{188, "NET3H", 3, func(x []float64) float64 { return gepNET3H(x[0], x[1], x[2]) }},
"LT3I": MathNode{189, "LT3I", 3, func(x []float64) float64 { return gepLT3I(x[0], x[1], x[2]) }},
"GT3I": MathNode{190, "GT3I", 3, func(x []float64) float64 { return gepGT3I(x[0], x[1], x[2]) }},
"LOE3I": MathNode{191, "LOE3I", 3, func(x []float64) float64 { return gepLOE3I(x[0], x[1], x[2]) }},
"GOE3I": MathNode{192, "GOE3I", 3, func(x []float64) float64 { return gepGOE3I(x[0], x[1], x[2]) }},
"ET3I": MathNode{193, "ET3I", 3, func(x []float64) float64 { return gepET3I(x[0], x[1], x[2]) }},
"NET3I": MathNode{194, "NET3I", 3, func(x []float64) float64 { return gepNET3I(x[0], x[1], x[2]) }},
"LT3J": MathNode{195, "LT3J", 3, func(x []float64) float64 { return gepLT3J(x[0], x[1], x[2]) }},
"GT3J": MathNode{196, "GT3J", 3, func(x []float64) float64 { return gepGT3J(x[0], x[1], x[2]) }},
"LOE3J": MathNode{197, "LOE3J", 3, func(x []float64) float64 { return gepLOE3J(x[0], x[1], x[2]) }},
"GOE3J": MathNode{198, "GOE3J", 3, func(x []float64) float64 { return gepGOE3J(x[0], x[1], x[2]) }},
"ET3J": MathNode{199, "ET3J", 3, func(x []float64) float64 { return gepET3J(x[0], x[1], x[2]) }},
"NET3J": MathNode{200, "NET3J", 3, func(x []float64) float64 { return gepNET3J(x[0], x[1], x[2]) }},
"LT3K": MathNode{201, "LT3K", 3, func(x []float64) float64 { return gepLT3K(x[0], x[1], x[2]) }},
"GT3K": MathNode{202, "GT3K", 3, func(x []float64) float64 { return gepGT3K(x[0], x[1], x[2]) }},
"LOE3K": MathNode{203, "LOE3K", 3, func(x []float64) float64 { return gepLOE3K(x[0], x[1], x[2]) }},
"GOE3K": MathNode{204, "GOE3K", 3, func(x []float64) float64 { return gepGOE3K(x[0], x[1], x[2]) }},
"ET3K": MathNode{205, "ET3K", 3, func(x []float64) float64 { return gepET3K(x[0], x[1], x[2]) }},
"NET3K": MathNode{206, "NET3K", 3, func(x []float64) float64 { return gepNET3K(x[0], x[1], x[2]) }},
"LT3L": MathNode{207, "LT3L", 3, func(x []float64) float64 { return gepLT3L(x[0], x[1], x[2]) }},
"GT3L": MathNode{208, "GT3L", 3, func(x []float64) float64 { return gepGT3L(x[0], x[1], x[2]) }},
"LOE3L": MathNode{209, "LOE3L", 3, func(x []float64) float64 { return gepLOE3L(x[0], x[1], x[2]) }},
"GOE3L": MathNode{210, "GOE3L", 3, func(x []float64) float64 { return gepGOE3L(x[0], x[1], x[2]) }},
"ET3L": MathNode{211, "ET3L", 3, func(x []float64) float64 { return gepET3L(x[0], x[1], x[2]) }},
"NET3L": MathNode{212, "NET3L", 3, func(x []float64) float64 { return gepNET3L(x[0], x[1], x[2]) }},
"LT4A": MathNode{64, "LT4A", 4, func(x []float64) float64 { return gepLT4A(x[0], x[1], x[2], x[3]) }},
"GT4A": MathNode{65, "GT4A", 4, func(x []float64) float64 { return gepGT4A(x[0], x[1], x[2], x[3]) }},
"LOE4A": MathNode{66, "LOE4A", 4, func(x []float64) float64 { return gepLOE4A(x[0], x[1], x[2], x[3]) }},
"GOE4A": MathNode{67, "GOE4A", 4, func(x []float64) float64 { return gepGOE4A(x[0], x[1], x[2], x[3]) }},
"ET4A": MathNode{68, "ET4A", 4, func(x []float64) float64 { return gepET4A(x[0], x[1], x[2], x[3]) }},
"NET4A": MathNode{69, "NET4A", 4, func(x []float64) float64 { return gepNET4A(x[0], x[1], x[2], x[3]) }},
"LT4B": MathNode{213, "LT4B", 4, func(x []float64) float64 { return gepLT4B(x[0], x[1], x[2], x[3]) }},
"GT4B": MathNode{214, "GT4B", 4, func(x []float64) float64 { return gepGT4B(x[0], x[1], x[2], x[3]) }},
"LOE4B": MathNode{215, "LOE4B", 4, func(x []float64) float64 { return gepLOE4B(x[0], x[1], x[2], x[3]) }},
"GOE4B": MathNode{216, "GOE4B", 4, func(x []float64) float64 { return gepGOE4B(x[0], x[1], x[2], x[3]) }},
"ET4B": MathNode{217, "ET4B", 4, func(x []float64) float64 { return gepET4B(x[0], x[1], x[2], x[3]) }},
"NET4B": MathNode{218, "NET4B", 4, func(x []float64) float64 { return gepNET4B(x[0], x[1], x[2], x[3]) }},
"LT4C": MathNode{219, "LT4C", 4, func(x []float64) float64 { return gepLT4C(x[0], x[1], x[2], x[3]) }},
"GT4C": MathNode{220, "GT4C", 4, func(x []float64) float64 { return gepGT4C(x[0], x[1], x[2], x[3]) }},
"LOE4C": MathNode{221, "LOE4C", 4, func(x []float64) float64 { return gepLOE4C(x[0], x[1], x[2], x[3]) }},
"GOE4C": MathNode{222, "GOE4C", 4, func(x []float64) float64 { return gepGOE4C(x[0], x[1], x[2], x[3]) }},
"ET4C": MathNode{223, "ET4C", 4, func(x []float64) float64 { return gepET4C(x[0], x[1], x[2], x[3]) }},
"NET4C": MathNode{224, "NET4C", 4, func(x []float64) float64 { return gepNET4C(x[0], x[1], x[2], x[3]) }},
"LT4D": MathNode{225, "LT4D", 4, func(x []float64) float64 { return gepLT4D(x[0], x[1], x[2], x[3]) }},
"GT4D": MathNode{226, "GT4D", 4, func(x []float64) float64 { return gepGT4D(x[0], x[1], x[2], x[3]) }},
"LOE4D": MathNode{227, "LOE4D", 4, func(x []float64) float64 { return gepLOE4D(x[0], x[1], x[2], x[3]) }},
"GOE4D": MathNode{228, "GOE4D", 4, func(x []float64) float64 { return gepGOE4D(x[0], x[1], x[2], x[3]) }},
"ET4D": MathNode{229, "ET4D", 4, func(x []float64) float64 { return gepET4D(x[0], x[1], x[2], x[3]) }},
"NET4D": MathNode{230, "NET4D", 4, func(x []float64) float64 { return gepNET4D(x[0], x[1], x[2], x[3]) }},
"LT4E": MathNode{231, "LT4E", 4, func(x []float64) float64 { return gepLT4E(x[0], x[1], x[2], x[3]) }},
"GT4E": MathNode{232, "GT4E", 4, func(x []float64) float64 { return gepGT4E(x[0], x[1], x[2], x[3]) }},
"LOE4E": MathNode{233, "LOE4E", 4, func(x []float64) float64 { return gepLOE4E(x[0], x[1], x[2], x[3]) }},
"GOE4E": MathNode{234, "GOE4E", 4, func(x []float64) float64 { return gepGOE4E(x[0], x[1], x[2], x[3]) }},
"ET4E": MathNode{235, "ET4E", 4, func(x []float64) float64 { return gepET4E(x[0], x[1], x[2], x[3]) }},
"NET4E": MathNode{236, "NET4E", 4, func(x []float64) float64 { return gepNET4E(x[0], x[1], x[2], x[3]) }},
"LT4F": MathNode{237, "LT4F", 4, func(x []float64) float64 { return gepLT4F(x[0], x[1], x[2], x[3]) }},
"GT4F": MathNode{238, "GT4F", 4, func(x []float64) float64 { return gepGT4F(x[0], x[1], x[2], x[3]) }},
"LOE4F": MathNode{239, "LOE4F", 4, func(x []float64) float64 { return gepLOE4F(x[0], x[1], x[2], x[3]) }},
"GOE4F": MathNode{240, "GOE4F", 4, func(x []float64) float64 { return gepGOE4F(x[0], x[1], x[2], x[3]) }},
"ET4F": MathNode{241, "ET4F", 4, func(x []float64) float64 { return gepET4F(x[0], x[1], x[2], x[3]) }},
"NET4F": MathNode{242, "NET4F", 4, func(x []float64) float64 { return gepNET4F(x[0], x[1], x[2], x[3]) }},
"LT4G": MathNode{243, "LT4G", 4, func(x []float64) float64 { return gepLT4G(x[0], x[1], x[2], x[3]) }},
"GT4G": MathNode{244, "GT4G", 4, func(x []float64) float64 { return gepGT4G(x[0], x[1], x[2], x[3]) }},
"LOE4G": MathNode{245, "LOE4G", 4, func(x []float64) float64 { return gepLOE4G(x[0], x[1], x[2], x[3]) }},
"GOE4G": MathNode{246, "GOE4G", 4, func(x []float64) float64 { return gepGOE4G(x[0], x[1], x[2], x[3]) }},
"ET4G": MathNode{247, "ET4G", 4, func(x []float64) float64 { return gepET4G(x[0], x[1], x[2], x[3]) }},
"NET4G": MathNode{248, "NET4G", 4, func(x []float64) float64 { return gepNET4G(x[0], x[1], x[2], x[3]) }},
"LT4H": MathNode{249, "LT4H", 4, func(x []float64) float64 { return gepLT4H(x[0], x[1], x[2], x[3]) }},
"GT4H": MathNode{250, "GT4H", 4, func(x []float64) float64 { return gepGT4H(x[0], x[1], x[2], x[3]) }},
"LOE4H": MathNode{251, "LOE4H", 4, func(x []float64) float64 { return gepLOE4H(x[0], x[1], x[2], x[3]) }},
"GOE4H": MathNode{252, "GOE4H", 4, func(x []float64) float64 { return gepGOE4H(x[0], x[1], x[2], x[3]) }},
"ET4H": MathNode{253, "ET4H", 4, func(x []float64) float64 { return gepET4H(x[0], x[1], x[2], x[3]) }},
"NET4H": MathNode{254, "NET4H", 4, func(x []float64) float64 { return gepNET4H(x[0], x[1], x[2], x[3]) }},
"LT4I": MathNode{255, "LT4I", 4, func(x []float64) float64 { return gepLT4I(x[0], x[1], x[2], x[3]) }},
"GT4I": MathNode{256, "GT4I", 4, func(x []float64) float64 { return gepGT4I(x[0], x[1], x[2], x[3]) }},
"LOE4I": MathNode{257, "LOE4I", 4, func(x []float64) float64 { return gepLOE4I(x[0], x[1], x[2], x[3]) }},
"GOE4I": MathNode{258, "GOE4I", 4, func(x []float64) float64 { return gepGOE4I(x[0], x[1], x[2], x[3]) }},
"ET4I": MathNode{259, "ET4I", 4, func(x []float64) float64 { return gepET4I(x[0], x[1], x[2], x[3]) }},
"NET4I": MathNode{260, "NET4I", 4, func(x []float64) float64 { return gepNET4I(x[0], x[1], x[2], x[3]) }},
"LT4J": MathNode{261, "LT4J", 4, func(x []float64) float64 { return gepLT4J(x[0], x[1], x[2], x[3]) }},
"GT4J": MathNode{262, "GT4J", 4, func(x []float64) float64 { return gepGT4J(x[0], x[1], x[2], x[3]) }},
"LOE4J": MathNode{263, "LOE4J", 4, func(x []float64) float64 { return gepLOE4J(x[0], x[1], x[2], x[3]) }},
"GOE4J": MathNode{264, "GOE4J", 4, func(x []float64) float64 { return gepGOE4J(x[0], x[1], x[2], x[3]) }},
"ET4J": MathNode{265, "ET4J", 4, func(x []float64) float64 { return gepET4J(x[0], x[1], x[2], x[3]) }},
"NET4J": MathNode{266, "NET4J", 4, func(x []float64) float64 { return gepNET4J(x[0], x[1], x[2], x[3]) }},
"LT4K": MathNode{267, "LT4K", 4, func(x []float64) float64 { return gepLT4K(x[0], x[1], x[2], x[3]) }},
"GT4K": MathNode{268, "GT4K", 4, func(x []float64) float64 { return gepGT4K(x[0], x[1], x[2], x[3]) }},
"LOE4K": MathNode{269, "LOE4K", 4, func(x []float64) float64 { return gepLOE4K(x[0], x[1], x[2], x[3]) }},
"GOE4K": MathNode{270, "GOE4K", 4, func(x []float64) float64 { return gepGOE4K(x[0], x[1], x[2], x[3]) }},
"ET4K": MathNode{271, "ET4K", 4, func(x []float64) float64 { return gepET4K(x[0], x[1], x[2], x[3]) }},
"NET4K": MathNode{272, "NET4K", 4, func(x []float64) float64 { return gepNET4K(x[0], x[1], x[2], x[3]) }},
"LT4L": MathNode{273, "LT4L", 4, func(x []float64) float64 { return gepLT4L(x[0], x[1], x[2], x[3]) }},
"GT4L": MathNode{274, "GT4L", 4, func(x []float64) float64 { return gepGT4L(x[0], x[1], x[2], x[3]) }},
"LOE4L": MathNode{275, "LOE4L", 4, func(x []float64) float64 { return gepLOE4L(x[0], x[1], x[2], x[3]) }},
"GOE4L": MathNode{276, "GOE4L", 4, func(x []float64) float64 { return gepGOE4L(x[0], x[1], x[2], x[3]) }},
"ET4L": MathNode{277, "ET4L", 4, func(x []float64) float64 { return gepET4L(x[0], x[1], x[2], x[3]) }},
"NET4L": MathNode{278, "NET4L", 4, func(x []float64) float64 { return gepNET4L(x[0], x[1], x[2], x[3]) }},
}
func gep3Rt(x float64) float64 {
if x < 0.0 {
return -math.Pow(-x, (1.0 / 3.0))
}
return math.Pow(x, (1.0 / 3.0))
}
func gep5Rt(x float64) float64 {
if x < 0.0 {
return -math.Pow(-x, (1.0 / 5.0))
}
return math.Pow(x, (1.0 / 5.0))
}
func gepLog2(x, y float64) float64 {
if y == 0.0 {
return 0.0
}
return math.Log(x) / math.Log(y)
}
func gepMod(x, y float64) float64 {
// The built-in function is incorrect for cases such as -1.0 and 0.2.
return ((x / y) - float64(int(x/y))) * y
}
func gepLogi(x float64) float64 {
if math.Abs(x) > 709.0 {
return 1.0 / (1.0 + math.Exp(math.Abs(x)/x*709.0))
}
return 1.0 / (1.0 + math.Exp(-x))
}
func gepLogi2(x, y float64) float64 {
if math.Abs(x+y) > 709.0 {
return 1.0 / (1.0 + math.Exp(math.Abs(x+y)/(x+y)*709.0))
}
return 1.0 / (1.0 + math.Exp(-(x + y)))
}
func gepLogi3(x, y, z float64) float64 {
if math.Abs(x+y+z) > 709.0 {
return 1.0 / (1.0 + math.Exp(math.Abs(x+y+z)/(x+y+z)*709.0))
}
return 1.0 / (1.0 + math.Exp(-(x + y + z)))
}
func gepLogi4(a, b, c, d float64) float64 {
if math.Abs(a+b+c+d) > 709.0 {
return 1.0 / (1.0 + math.Exp(math.Abs(a+b+c+d)/(a+b+c+d)*709.0))
}
return 1.0 / (1.0 + math.Exp(-(a + b + c + d)))
}
func gepGau(x float64) float64 {
return math.Exp(-math.Pow(x, 2.0))
}
func gepGau2(x, y float64) float64 {
return math.Exp(-math.Pow((x + y), 2.0))
}
func gepGau3(x, y, z float64) float64 {
return math.Exp(-math.Pow((x + y + z), 2.0))
}
func gepGau4(a, b, c, d float64) float64 {
return math.Exp(-math.Pow((a + b + c + d), 2.0))
}
func gepAcsc(x float64) float64 {
varSign := 0.0
if x < 0.0 {
varSign = -1.0
} else {
if x > 0.0 {
varSign = 1.0
} else {
varSign = 0.0
}
}
return math.Atan(varSign / math.Sqrt(x*x-1.0))
}
func gepAsec(x float64) float64 {
varSign := 0.0
if x < 0.0 {
varSign = -1.0
} else {
if x > 0.0 {
varSign = 1.0
} else {
varSign = 0.0
}
}
if math.Abs(x) == 1.0 {
if x == -1.0 {
return 4.0 * math.Atan(1.0)
}
return 0.0
}
return 2.0*math.Atan(1.0) - math.Atan(varSign/math.Sqrt(x*x-1.0))
}
func gepAcot(x float64) float64 {
return math.Atan(1.0 / x)
}
func gepAsinh(x float64) float64 {
return math.Log(x + math.Sqrt(x*x+1.0))
}
func gepAcosh(x float64) float64 {
return math.Log(x + math.Sqrt(x*x-1.0))
}
func gepAtanh(x float64) float64 {
return math.Log((1.0+x)/(1.0-x)) / 2.0
}
func gepAcsch(x float64) float64 {
varSign := 0.0
if x < 0.0 {
varSign = -1.0
} else {
if x > 0.0 {
varSign = 1.0
} else {
varSign = 0.0
}
}
return math.Log((varSign*math.Sqrt(x*x+1.0) + 1.0) / x)
}
func gepAsech(x float64) float64 {
return math.Log((math.Sqrt(-x*x+1.0) + 1.0) / x)
}
func gepAcoth(x float64) float64 {
return math.Log((x+1.0)/(x-1.0)) / 2.0
}
func gepMin2(x, y float64) float64 {
if x > y {
return y
}
return x
}
func gepMin3(x, y, z float64) float64 {
varTemp := x
if varTemp > y {
varTemp = y
}
if varTemp > z {
varTemp = z
}
return varTemp
}
func gepMin4(a, b, c, d float64) float64 {
varTemp := a
if varTemp > b {
varTemp = b
}
if varTemp > c {
varTemp = c
}
if varTemp > d {
varTemp = d
}
return varTemp
}
func gepMax2(x, y float64) float64 {
if x < y {
return y
}
return x
}
func gepMax3(x, y, z float64) float64 {
varTemp := x
if varTemp < y {
varTemp = y
}
if varTemp < z {
varTemp = z
}
return varTemp
}
func gepMax4(a, b, c, d float64) float64 {
varTemp := a
if varTemp < b {
varTemp = b
}
if varTemp < c {
varTemp = c
}
if varTemp < d {
varTemp = d
}
return varTemp
}
func gepOR1(x, y float64) float64 {
if (x < 0.0) || (y < 0.0) {
return 1.0
}
return 0.0
}
func gepOR2(x, y float64) float64 {
if (x >= 0.0) || (y >= 0.0) {
return 1.0
}
return 0.0
}
func gepOR3(x, y float64) float64 {
if (x <= 0.0) || (y <= 0.0) {
return 1.0
}
return 0.0
}
func gepOR4(x, y float64) float64 {
if (x < 1.0) || (y < 1.0) {
return 1.0
}
return 0.0
}
func gepOR5(x, y float64) float64 {
if (x >= 1.0) || (y >= 1.0) {
return 1.0
}
return 0.0
}
func gepOR6(x, y float64) float64 {
if (x <= 1.0) || (y <= 1.0) {
return 1.0
}
return 0.0
}
func gepAND1(x, y float64) float64 {
if (x < 0.0) && (y < 0.0) {
return 1.0
}
return 0.0
}
func gepAND2(x, y float64) float64 {
if (x >= 0.0) && (y >= 0.0) {
return 1.0
}
return 0.0
}
func gepAND3(x, y float64) float64 {
if (x <= 0.0) && (y <= 0.0) {
return 1.0
}
return 0.0
}
func gepAND4(x, y float64) float64 {
if (x < 1.0) && (y < 1.0) {
return 1.0
}
return 0.0
}
func gepAND5(x, y float64) float64 {
if (x >= 1.0) && (y >= 1.0) {
return 1.0
}
return 0.0
}
func gepAND6(x, y float64) float64 {
if (x <= 1.0) && (y <= 1.0) {
return 1.0
}
return 0.0
}
func gepLT2A(x, y float64) float64 {
if x < y {
return x
}
return y
}
func gepGT2A(x, y float64) float64 {
if x > y {
return x
}
return y
}
func gepLOE2A(x, y float64) float64 {
if x <= y {
return x
}
return y
}
func gepGOE2A(x, y float64) float64 {
if x >= y {
return x
}
return y
}
func gepET2A(x, y float64) float64 {
if x == y {
return x
}
return y
}
func gepNET2A(x, y float64) float64 {
if x != y {
return x
}
return y
}
func gepLT2B(x, y float64) float64 {
if x < y {
return 1.0
}
return 0.0
}
func gepGT2B(x, y float64) float64 {
if x > y {
return 1.0
}
return 0.0
}
func gepLOE2B(x, y float64) float64 {
if x <= y {
return 1.0
}
return 0.0
}
func gepGOE2B(x, y float64) float64 {
if x >= y {
return 1.0
}
return 0.0
}
func gepET2B(x, y float64) float64 {
if x == y {
return 1.0
}
return 0.0
}
func gepNET2B(x, y float64) float64 {
if x != y {
return 1.0
}
return 0.0
}
func gepLT2C(x, y float64) float64 {
if x < y {
return (x + y)
}
return (x - y)
}
func gepGT2C(x, y float64) float64 {
if x > y {
return (x + y)
}
return (x - y)
}
func gepLOE2C(x, y float64) float64 {
if x <= y {
return (x + y)
}
return (x - y)
}
func gepGOE2C(x, y float64) float64 {
if x >= y {
return (x + y)
}
return (x - y)
}
func gepET2C(x, y float64) float64 {
if x == y {
return (x + y)
}
return (x - y)
}
func gepNET2C(x, y float64) float64 {
if x != y {
return (x + y)
}
return (x - y)
}
func gepLT2D(x, y float64) float64 {
if x < y {
return (x * y)
}
return (x / y)
}
func gepGT2D(x, y float64) float64 {
if x > y {
return (x * y)
}
return (x / y)
}
func gepLOE2D(x, y float64) float64 {
if x <= y {
return (x * y)
}
return (x / y)
}
func gepGOE2D(x, y float64) float64 {
if x >= y {
return (x * y)
}
return (x / y)
}
func gepET2D(x, y float64) float64 {
if x == y {
return (x * y)
}
return (x / y)
}
func gepNET2D(x, y float64) float64 {
if x != y {
return (x * y)
}
return (x / y)
}
func gepLT2E(x, y float64) float64 {
if x < y {
return (x + y)
}
return (x * y)
}
func gepGT2E(x, y float64) float64 {
if x > y {
return (x + y)
}
return (x * y)
}
func gepLOE2E(x, y float64) float64 {
if x <= y {
return (x + y)
}
return (x * y)
}
func gepGOE2E(x, y float64) float64 {
if x >= y {
return (x + y)
}
return (x * y)
}
func gepET2E(x, y float64) float64 {
if x == y {
return (x + y)
}
return (x * y)
}
func gepNET2E(x, y float64) float64 {
if x != y {
return (x + y)
}
return (x * y)
}
func gepLT2F(x, y float64) float64 {
if x < y {
return (x + y)
}
return math.Sin(x * y)
}
func gepGT2F(x, y float64) float64 {
if x > y {
return (x + y)
}
return math.Sin(x * y)
}
func gepLOE2F(x, y float64) float64 {
if x <= y {
return (x + y)
}
return math.Sin(x * y)
}
func gepGOE2F(x, y float64) float64 {
if x >= y {
return (x + y)
}
return math.Sin(x * y)
}
func gepET2F(x, y float64) float64 {
if x == y {
return (x + y)
}
return math.Sin(x * y)
}
func gepNET2F(x, y float64) float64 {
if x != y {
return (x + y)
}
return math.Sin(x * y)
}
func gepLT2G(x, y float64) float64 {
if x < y {
return (x + y)
}
return math.Atan(x * y)
}
func gepGT2G(x, y float64) float64 {
if x > y {
return (x + y)
}
return math.Atan(x * y)
}
func gepLOE2G(x, y float64) float64 {
if x <= y {
return (x + y)
}
return math.Atan(x * y)
}
func gepGOE2G(x, y float64) float64 {
if x >= y {
return (x + y)
}
return math.Atan(x * y)
}
func gepET2G(x, y float64) float64 {
if x == y {
return (x + y)
}
return math.Atan(x * y)
}
func gepNET2G(x, y float64) float64 {
if x != y {
return (x + y)
}
return math.Atan(x * y)
}
func gepLT3A(x, y, z float64) float64 {
if x < 0.0 {
return y
}
return z
}
func gepGT3A(x, y, z float64) float64 {
if x > 0.0 {
return y
}
return z
}
func gepLOE3A(x, y, z float64) float64 {
if x <= 0.0 {
return y
}
return z
}
func gepGOE3A(x, y, z float64) float64 {
if x >= 0.0 {
return y
}
return z
}
func gepET3A(x, y, z float64) float64 {
if x == 0.0 {
return y
}
return z
}
func gepNET3A(x, y, z float64) float64 {
if x != 0.0 {
return y
}
return z
}
func gepLT3B(x, y, z float64) float64 {
if (x + y) < z {
return (x + y)
}
return z
}
func gepGT3B(x, y, z float64) float64 {
if (x + y) > z {
return (x + y)
}
return z
}
func gepLOE3B(x, y, z float64) float64 {
if (x + y) <= z {
return (x + y)
}
return z
}
func gepGOE3B(x, y, z float64) float64 {
if (x + y) >= z {
return (x + y)
}
return z
}
func gepET3B(x, y, z float64) float64 {
if (x + y) == z {
return (x + y)
}
return z
}
func gepNET3B(x, y, z float64) float64 {
if (x + y) != z {
return (x + y)
}
return z
}
func gepLT3C(x, y, z float64) float64 {
if (x + y) < z {
return (x + y)
}
return (x + z)
}
func gepGT3C(x, y, z float64) float64 {
if (x + y) > z {
return (x + y)
}
return (x + z)
}
func gepLOE3C(x, y, z float64) float64 {
if (x + y) <= z {
return (x + y)
}
return (x + z)
}
func gepGOE3C(x, y, z float64) float64 {
if (x + y) >= z {
return (x + y)
}
return (x + z)
}
func gepET3C(x, y, z float64) float64 {
if (x + y) == z {
return (x + y)
}
return (x + z)
}
func gepNET3C(x, y, z float64) float64 {
if (x + y) != z {
return (x + y)
}
return (x + z)
}
func gepLT3D(x, y, z float64) float64 {
if (x + y) < z {
return (x + y)
}
return (x - z)
}
func gepGT3D(x, y, z float64) float64 {
if (x + y) > z {
return (x + y)
}
return (x - z)
}
func gepLOE3D(x, y, z float64) float64 {
if (x + y) <= z {
return (x + y)
}
return (x - z)
}
func gepGOE3D(x, y, z float64) float64 {
if (x + y) >= z {
return (x + y)
}
return (x - z)
}
func gepET3D(x, y, z float64) float64 {
if (x + y) == z {
return (x + y)
}
return (x - z)
}
func gepNET3D(x, y, z float64) float64 {
if (x + y) != z {
return (x + y)
}
return (x - z)
}
func gepLT3E(x, y, z float64) float64 {
if (x + y) < z {
return (x + y)
}
return (x * z)
}
func gepGT3E(x, y, z float64) float64 {
if (x + y) > z {
return (x + y)
}
return (x * z)
}
func gepLOE3E(x, y, z float64) float64 {
if (x + y) <= z {
return (x + y)
}
return (x * z)
}
func gepGOE3E(x, y, z float64) float64 {
if (x + y) >= z {
return (x + y)
}
return (x * z)
}
func gepET3E(x, y, z float64) float64 {
if (x + y) == z {
return (x + y)
}
return (x * z)
}
func gepNET3E(x, y, z float64) float64 {
if (x + y) != z {
return (x + y)
}
return (x * z)
}
func gepLT3F(x, y, z float64) float64 {
if (x + y) < z {
return (x + y)
}
return (x / z)
}
func gepGT3F(x, y, z float64) float64 {
if (x + y) > z {
return (x + y)
}
return (x / z)
}
func gepLOE3F(x, y, z float64) float64 {
if (x + y) <= z {
return (x + y)
}
return (x / z)
}
func gepGOE3F(x, y, z float64) float64 {
if (x + y) >= z {
return (x + y)
}
return (x / z)
}
func gepET3F(x, y, z float64) float64 {
if (x + y) == z {
return (x + y)
}
return (x / z)
}
func gepNET3F(x, y, z float64) float64 {
if (x + y) != z {
return (x + y)
}
return (x / z)
}
func gepLT3G(x, y, z float64) float64 {
if (x + y) < z {
return (x * y)
}
return (x + z)
}
func gepGT3G(x, y, z float64) float64 {
if (x + y) > z {
return (x * y)
}
return (x + z)
}
func gepLOE3G(x, y, z float64) float64 {
if (x + y) <= z {
return (x * y)
}
return (x + z)
}
func gepGOE3G(x, y, z float64) float64 {
if (x + y) >= z {
return (x * y)
}
return (x + z)
}
func gepET3G(x, y, z float64) float64 {
if (x + y) == z {
return (x * y)
}
return (x + z)
}
func gepNET3G(x, y, z float64) float64 {
if (x + y) != z {
return (x * y)
}
return (x + z)
}
func gepLT3H(x, y, z float64) float64 {
if (x + y) < z {
return (x * y)
}
return (x - z)
}
func gepGT3H(x, y, z float64) float64 {
if (x + y) > z {
return (x * y)
}
return (x - z)
}
func gepLOE3H(x, y, z float64) float64 {
if (x + y) <= z {
return (x * y)
}
return (x - z)
}
func gepGOE3H(x, y, z float64) float64 {
if (x + y) >= z {
return (x * y)
}
return (x - z)
}
func gepET3H(x, y, z float64) float64 {
if (x + y) == z {
return (x * y)
}
return (x - z)
}
func gepNET3H(x, y, z float64) float64 {
if (x + y) != z {
return (x * y)
}
return (x - z)
}
func gepLT3I(x, y, z float64) float64 {
if (x + y) < z {
return (x * y)
}
return (x * z)
}
func gepGT3I(x, y, z float64) float64 {
if (x + y) > z {
return (x * y)
}
return (x * z)
}
func gepLOE3I(x, y, z float64) float64 {
if (x + y) <= z {
return (x * y)
}
return (x * z)
}
func gepGOE3I(x, y, z float64) float64 {
if (x + y) >= z {
return (x * y)
}
return (x * z)
}
func gepET3I(x, y, z float64) float64 {
if (x + y) == z {
return (x * y)
}
return (x * z)
}
func gepNET3I(x, y, z float64) float64 {
if (x + y) != z {
return (x * y)
}
return (x * z)
}
func gepLT3J(x, y, z float64) float64 {
if (x + y) < z {
return (x * y)
}
return (x / z)
}
func gepGT3J(x, y, z float64) float64 {
if (x + y) > z {
return (x * y)
}
return (x / z)
}
func gepLOE3J(x, y, z float64) float64 {
if (x + y) <= z {
return (x * y)
}
return (x / z)
}
func gepGOE3J(x, y, z float64) float64 {
if (x + y) >= z {
return (x * y)
}
return (x / z)
}
func gepET3J(x, y, z float64) float64 {
if (x + y) == z {
return (x * y)
}
return (x / z)
}
func gepNET3J(x, y, z float64) float64 {
if (x + y) != z {
return (x * y)
}
return (x / z)
}
func gepLT3K(x, y, z float64) float64 {
if (x + y) < z {
return (x + y + z)
}
return math.Sin(x * y * z)
}
func gepGT3K(x, y, z float64) float64 {
if (x + y) > z {
return (x + y + z)
}
return math.Sin(x * y * z)
}
func gepLOE3K(x, y, z float64) float64 {
if (x + y) <= z {
return (x + y + z)
}
return math.Sin(x * y * z)
}
func gepGOE3K(x, y, z float64) float64 {
if (x + y) >= z {
return (x + y + z)
}
return math.Sin(x * y * z)
}
func gepET3K(x, y, z float64) float64 {
if (x + y) == z {
return (x + y + z)
}
return math.Sin(x * y * z)
}
func gepNET3K(x, y, z float64) float64 {
if (x + y) != z {
return (x + y + z)
}
return math.Sin(x * y * z)
}
func gepLT3L(x, y, z float64) float64 {
if (x + y) < z {
return (x + y + z)
}
return math.Atan(x * y * z)
}
func gepGT3L(x, y, z float64) float64 {
if (x + y) > z {
return (x + y + z)
}
return math.Atan(x * y * z)
}
func gepLOE3L(x, y, z float64) float64 {
if (x + y) <= z {
return (x + y + z)
}
return math.Atan(x * y * z)
}
func gepGOE3L(x, y, z float64) float64 {
if (x + y) >= z {
return (x + y + z)
}
return math.Atan(x * y * z)
}
func gepET3L(x, y, z float64) float64 {
if (x + y) == z {
return (x + y + z)
}
return math.Atan(x * y * z)
}
func gepNET3L(x, y, z float64) float64 {
if (x + y) != z {
return (x + y + z)
}
return math.Atan(x * y * z)
}
func gepLT4A(a, b, c, d float64) float64 {
if a < b {
return c
}
return d
}
func gepGT4A(a, b, c, d float64) float64 {
if a > b {
return c
}
return d
}
func gepLOE4A(a, b, c, d float64) float64 {
if a <= b {
return c
}
return d
}
func gepGOE4A(a, b, c, d float64) float64 {
if a >= b {
return c
}
return d
}
func gepET4A(a, b, c, d float64) float64 {
if a == b {
return c
}
return d
}
func gepNET4A(a, b, c, d float64) float64 {
if a != b {
return c
}
return d
}
func gepLT4B(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return c
}
return d
}
func gepGT4B(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return c
}
return d
}
func gepLOE4B(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return c
}
return d
}
func gepGOE4B(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return c
}
return d
}
func gepET4B(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return c
}
return d
}
func gepNET4B(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return c
}
return d
}
func gepLT4C(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a + b)
}
return (c + d)
}
func gepGT4C(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a + b)
}
return (c + d)
}
func gepLOE4C(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a + b)
}
return (c + d)
}
func gepGOE4C(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a + b)
}
return (c + d)
}
func gepET4C(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a + b)
}
return (c + d)
}
func gepNET4C(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a + b)
}
return (c + d)
}
func gepLT4D(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a + b)
}
return (c - d)
}
func gepGT4D(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a + b)
}
return (c - d)
}
func gepLOE4D(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a + b)
}
return (c - d)
}
func gepGOE4D(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a + b)
}
return (c - d)
}
func gepET4D(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a + b)
}
return (c - d)
}
func gepNET4D(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a + b)
}
return (c - d)
}
func gepLT4E(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a + b)
}
return (c * d)
}
func gepGT4E(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a + b)
}
return (c * d)
}
func gepLOE4E(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a + b)
}
return (c * d)
}
func gepGOE4E(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a + b)
}
return (c * d)
}
func gepET4E(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a + b)
}
return (c * d)
}
func gepNET4E(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a + b)
}
return (c * d)
}
func gepLT4F(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a + b)
}
return (c / d)
}
func gepGT4F(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a + b)
}
return (c / d)
}
func gepLOE4F(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a + b)
}
return (c / d)
}
func gepGOE4F(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a + b)
}
return (c / d)
}
func gepET4F(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a + b)
}
return (c / d)
}
func gepNET4F(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a + b)
}
return (c / d)
}
func gepLT4G(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a * b)
}
return (c + d)
}
func gepGT4G(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a * b)
}
return (c + d)
}
func gepLOE4G(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a * b)
}
return (c + d)
}
func gepGOE4G(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a * b)
}
return (c + d)
}
func gepET4G(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a * b)
}
return (c + d)
}
func gepNET4G(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a * b)
}
return (c + d)
}
func gepLT4H(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a * b)
}
return (c - d)
}
func gepGT4H(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a * b)
}
return (c - d)
}
func gepLOE4H(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a * b)
}
return (c - d)
}
func gepGOE4H(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a * b)
}
return (c - d)
}
func gepET4H(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a * b)
}
return (c - d)
}
func gepNET4H(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a * b)
}
return (c - d)
}
func gepLT4I(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a * b)
}
return (c * d)
}
func gepGT4I(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a * b)
}
return (c * d)
}
func gepLOE4I(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a * b)
}
return (c * d)
}
func gepGOE4I(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a * b)
}
return (c * d)
}
func gepET4I(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a * b)
}
return (c * d)
}
func gepNET4I(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a * b)
}
return (c * d)
}
func gepLT4J(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return (a * b)
}
return (c / d)
}
func gepGT4J(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return (a * b)
}
return (c / d)
}
func gepLOE4J(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return (a * b)
}
return (c / d)
}
func gepGOE4J(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return (a * b)
}
return (c / d)
}
func gepET4J(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return (a * b)
}
return (c / d)
}
func gepNET4J(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return (a * b)
}
return (c / d)
}
func gepLT4K(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return math.Sin(a * b)
}
return math.Sin(c * d)
}
func gepGT4K(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return math.Sin(a * b)
}
return math.Sin(c * d)
}
func gepLOE4K(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return math.Sin(a * b)
}
return math.Sin(c * d)
}
func gepGOE4K(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return math.Sin(a * b)
}
return math.Sin(c * d)
}
func gepET4K(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return math.Sin(a * b)
}
return math.Sin(c * d)
}
func gepNET4K(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return math.Sin(a * b)
}
return math.Sin(c * d)
}
func gepLT4L(a, b, c, d float64) float64 {
if (a + b) < (c + d) {
return math.Atan(a * b)
}
return math.Atan(c * d)
}
func gepGT4L(a, b, c, d float64) float64 {
if (a + b) > (c + d) {
return math.Atan(a * b)
}
return math.Atan(c * d)
}
func gepLOE4L(a, b, c, d float64) float64 {
if (a + b) <= (c + d) {
return math.Atan(a * b)
}
return math.Atan(c * d)
}
func gepGOE4L(a, b, c, d float64) float64 {
if (a + b) >= (c + d) {
return math.Atan(a * b)
}
return math.Atan(c * d)
}
func gepET4L(a, b, c, d float64) float64 {
if (a + b) == (c + d) {
return math.Atan(a * b)
}
return math.Atan(c * d)
}
func gepNET4L(a, b, c, d float64) float64 {
if (a + b) != (c + d) {
return math.Atan(a * b)
}
return math.Atan(c * d)
} | functions/math_nodes/math.go | 0.572006 | 0.720528 | math.go | starcoder |
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// Bernoulli represents a random variable whose value is 1 with probability p and
// value of zero with probability 1-P. The value of P must be between 0 and 1.
// More information at https://en.wikipedia.org/wiki/Bernoulli_distribution.
type Bernoulli struct {
P float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (b Bernoulli) CDF(x float64) float64 {
if x < 0 {
return 0
}
if x < 1 {
return 1 - b.P
}
return 1
}
// Entropy returns the entropy of the distribution.
func (b Bernoulli) Entropy() float64 {
if b.P == 0 || b.P == 1 {
return 0
}
q := 1 - b.P
return -b.P*math.Log(b.P) - q*math.Log(q)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (b Bernoulli) ExKurtosis() float64 {
pq := b.P * (1 - b.P)
return (1 - 6*pq) / pq
}
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (b Bernoulli) LogProb(x float64) float64 {
if x == 0 {
return math.Log(1 - b.P)
}
if x == 1 {
return math.Log(b.P)
}
return math.Inf(-1)
}
// Mean returns the mean of the probability distribution.
func (b Bernoulli) Mean() float64 {
return b.P
}
// Median returns the median of the probability distribution.
func (b Bernoulli) Median() float64 {
p := b.P
switch {
case p < 0.5:
return 0
case p > 0.5:
return 1
default:
return 0.5
}
}
// NumParameters returns the number of parameters in the distribution.
func (Bernoulli) NumParameters() int {
return 1
}
// Prob computes the value of the probability distribution at x.
func (b Bernoulli) Prob(x float64) float64 {
if x == 0 {
return 1 - b.P
}
if x == 1 {
return b.P
}
return 0
}
// Quantile returns the minimum value of x from amongst all those values whose CDF value exceeds or equals p.
func (b Bernoulli) Quantile(p float64) float64 {
if p < 0 || 1 < p {
panic(badPercentile)
}
if p <= 1-b.P {
return 0
}
return 1
}
// Rand returns a random sample drawn from the distribution.
func (b Bernoulli) Rand() float64 {
var rnd float64
if b.Src == nil {
rnd = rand.Float64()
} else {
rnd = rand.New(b.Src).Float64()
}
if rnd < b.P {
return 1
}
return 0
}
// Skewness returns the skewness of the distribution.
func (b Bernoulli) Skewness() float64 {
return (1 - 2*b.P) / math.Sqrt(b.P*(1-b.P))
}
// StdDev returns the standard deviation of the probability distribution.
func (b Bernoulli) StdDev() float64 {
return math.Sqrt(b.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (b Bernoulli) Survival(x float64) float64 {
if x < 0 {
return 1
}
if x < 1 {
return b.P
}
return 0
}
// Variance returns the variance of the probability distribution.
func (b Bernoulli) Variance() float64 {
return b.P * (1 - b.P)
} | stat/distuv/bernoulli.go | 0.919737 | 0.787278 | bernoulli.go | starcoder |
package common
import (
"github.com/keshav-kk/tsbs/pkg/data"
"time"
)
// SubsystemMeasurement represents a collection of measurement distributions and a start time.
type SubsystemMeasurement struct {
Timestamp time.Time
Distributions []Distribution
}
// NewSubsystemMeasurement creates a new SubsystemMeasurement with provided start time and number of distributions.
func NewSubsystemMeasurement(start time.Time, numDistributions int) *SubsystemMeasurement {
return &SubsystemMeasurement{
Timestamp: start,
Distributions: make([]Distribution, numDistributions),
}
}
// NewSubsystemMeasurementWithDistributionMakers creates a new SubsystemMeasurement with start time and distribution makers
// which are used to create the necessary distributions.
func NewSubsystemMeasurementWithDistributionMakers(start time.Time, makers []LabeledDistributionMaker) *SubsystemMeasurement {
m := NewSubsystemMeasurement(start, len(makers))
for i := 0; i < len(makers); i++ {
m.Distributions[i] = makers[i].DistributionMaker()
}
return m
}
// Tick advances all the distributions for the SubsystemMeasurement.
func (m *SubsystemMeasurement) Tick(d time.Duration) {
m.Timestamp = m.Timestamp.Add(d)
for i := range m.Distributions {
m.Distributions[i].Advance()
}
}
// ToPoint fills the provided serialize.Point with measurements from the SubsystemMeasurement.
func (m *SubsystemMeasurement) ToPoint(p *data.Point, measurementName []byte, labels []LabeledDistributionMaker) {
p.SetMeasurementName(measurementName)
p.SetTimestamp(&m.Timestamp)
for i, d := range m.Distributions {
p.AppendField(labels[i].Label, d.Get())
}
}
// ToPointAllInt64 fills in a serialize.Point with a given measurementName and
// all vales from the distributions stored as int64. The labels for each field
// are given by the supplied []LabeledDistributionMaker, assuming that the distributions
// are in the same order.
func (m *SubsystemMeasurement) ToPointAllInt64(p *data.Point, measurementName []byte, labels []LabeledDistributionMaker) {
p.SetMeasurementName(measurementName)
p.SetTimestamp(&m.Timestamp)
for i, d := range m.Distributions {
p.AppendField(labels[i].Label, int64(d.Get()))
}
}
// LabeledDistributionMaker combines a distribution maker with a label.
type LabeledDistributionMaker struct {
Label []byte
DistributionMaker func() Distribution
} | pkg/data/usecases/common/measurement.go | 0.779448 | 0.48377 | measurement.go | starcoder |
package mapper
import (
"fmt"
"strconv"
"strings"
"github.com/7vars/leikari/query"
)
func ApplyFilter(node query.Node, v interface{}, mapname ...MapName) bool {
switch n := node.(type) {
case query.All:
return true
case query.Comparsion:
switch n.Value().Type() {
case query.INT:
if va, ok := Int64Value(n.Identifier().Name(), v, mapname...); ok {
if vb, ok := n.Value().IntValue(); ok {
return compareInt(va, n.Operator(), vb)
}
}
case query.FLOAT:
if va, ok := Float64Value(n.Identifier().Name(), v, mapname...); ok {
if vb, ok := n.Value().FloatValue(); ok {
return compareFloat(va, n.Operator(), vb)
}
}
case query.BOOL:
if va, ok := BoolValue(n.Identifier().Name(), v, mapname...); ok {
if vb, ok := n.Value().BoolValue(); ok {
return compareBool(va, n.Operator(), vb)
}
}
case query.STRING:
if va, ok := StringValue(n.Identifier().Name(), v, mapname...); ok {
if vb, ok := n.Value().StringValue(); ok {
return compareString(va, n.Operator(), vb)
}
}
}
case query.PrefixCondition:
switch n.Prefix() {
case query.GROUP:
return ApplyFilter(n.Right(), v, mapname...)
case query.NOT:
return !ApplyFilter(n.Right(), v, mapname...)
case query.PR:
if ident, ok := n.Right().(query.Identifier); ok {
_, ok := Value(ident.Name(), v, mapname...)
return ok
}
}
case query.LogicalCondition:
switch n.Logical() {
case query.AND:
return ApplyFilter(n.Left(), v, mapname...) && ApplyFilter(n.Right(), v, mapname...)
case query.OR:
return ApplyFilter(n.Left(), v, mapname...) || ApplyFilter(n.Right(), v, mapname...)
}
}
return false
}
func compareInt(a int64, op query.Operator, b int64) bool {
switch op {
case query.EQ:
return a == b
case query.NE:
return a != b
case query.CO:
return strings.Contains(strconv.Itoa(int(a)), strconv.Itoa(int(b)))
case query.SW:
return strings.HasPrefix(strconv.Itoa(int(a)), strconv.Itoa(int(b)))
case query.EW:
return strings.HasSuffix(strconv.Itoa(int(a)), strconv.Itoa(int(b)))
case query.GT:
return a > b
case query.GE:
return a >= b
case query.LT:
return a < b
case query.LE:
return a <= b
}
return false
}
func compareFloat(a float64, op query.Operator, b float64) bool {
switch op {
case query.EQ:
return a == b
case query.NE:
return a != b
case query.CO:
return strings.Contains(fmt.Sprintf("%f", a), fmt.Sprintf("%f", b))
case query.SW:
return strings.HasPrefix(fmt.Sprintf("%f", a), fmt.Sprintf("%f", b))
case query.EW:
return strings.HasSuffix(fmt.Sprintf("%f", a), fmt.Sprintf("%f", b))
case query.GT:
return a > b
case query.GE:
return a >= b
case query.LT:
return a < b
case query.LE:
return a >= b
}
return false
}
func compareBool(a bool, op query.Operator, b bool) bool {
switch op {
case query.EQ:
return a == b
case query.NE:
return a != b
case query.CO:
return a == b
case query.SW:
return a == b
case query.EW:
return a == b
case query.GT:
return a != b
case query.GE:
return true
case query.LT:
return a != b
case query.LE:
return true
}
return false
}
func compareString(a string, op query.Operator, b string) bool {
switch op {
case query.EQ:
return a == b
case query.NE:
return a != b
case query.CO:
return strings.Contains(a, b)
case query.SW:
return strings.HasPrefix(a, b)
case query.EW:
return strings.HasSuffix(a, b)
case query.GT:
return a > b
case query.GE:
return a >= b
case query.LT:
return a < b
case query.LE:
return a <= b
}
return false
} | mapper/node.go | 0.541166 | 0.429071 | node.go | starcoder |
package fastjson
import (
"fmt"
"strconv"
)
func (arr *jsonArray) Size() int {
return len(arr.value)
}
func (arr *jsonArray) IsZero() bool {
return arr.Size() == 0
}
func (arr *jsonArray) ContainsIndex(index int) bool {
return arr.Size() > index
}
func (arr *jsonArray) GetInt(index int) (int, error) {
if !arr.ContainsIndex(index) {
return 0, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
return strconv.Atoi(t)
case []byte:
return strconv.Atoi(string(t))
case int:
return t, nil
case int32:
return int(t), nil
case int64:
return int(t), nil
case float32:
return int(t), nil
case float64:
return int(t), nil
default:
return 0, fmt.Errorf("data is %T, not int", v)
}
}
func (arr *jsonArray) GetInt32(index int) (int32, error) {
if !arr.ContainsIndex(index) {
return 0, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
i, err := strconv.ParseInt(t, 10, 32)
if err != nil {
return 0, err
}
return int32(i), nil
case []byte:
i, err := strconv.ParseInt(string(t), 10, 32)
if err != nil {
return 0, err
}
return int32(i), nil
case int:
return int32(t), nil
case int32:
return t, nil
case int64:
return int32(t), nil
case float32:
return int32(t), nil
case float64:
return int32(t), nil
default:
return 0, fmt.Errorf("data is %T, not int32", v)
}
}
func (arr *jsonArray) GetInt64(index int) (int64, error) {
if !arr.ContainsIndex(index) {
return 0, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
return strconv.ParseInt(t, 10, 64)
case []byte:
return strconv.ParseInt(string(t), 10, 64)
case int:
return int64(t), nil
case int32:
return int64(t), nil
case int64:
return t, nil
case float32:
return int64(t), nil
case float64:
return int64(t), nil
default:
return 0, fmt.Errorf("data is %T, not int64", v)
}
}
func (arr *jsonArray) GetFloat32(index int) (float32, error) {
if !arr.ContainsIndex(index) {
return 0, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
f, err := strconv.ParseFloat(t, 32)
if err != nil {
return 0, err
}
return float32(f), nil
case []byte:
f, err := strconv.ParseFloat(string(t), 32)
if err != nil {
return 0, err
}
return float32(f), nil
case int:
return float32(t), nil
case int32:
return float32(t), nil
case int64:
return float32(t), nil
case float32:
return t, nil
case float64:
return float32(t), nil
default:
return 0, fmt.Errorf("data is %T, not float32", v)
}
}
func (arr *jsonArray) GetFloat64(index int) (float64, error) {
if !arr.ContainsIndex(index) {
return 0, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
return strconv.ParseFloat(t, 64)
case []byte:
return strconv.ParseFloat(string(t), 64)
case int:
return float64(t), nil
case int32:
return float64(t), nil
case int64:
return float64(t), nil
case float32:
return float64(t), nil
case float64:
return t, nil
default:
return 0, fmt.Errorf("data is %T, not float64", v)
}
}
func (arr *jsonArray) GetString(index int) (string, error) {
if !arr.ContainsIndex(index) {
return "", fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
return t, nil
case []byte:
return string(t), nil
case int, int32, int64:
return fmt.Sprint(t), nil
case float32, float64:
return fmt.Sprint(t), nil
default:
return "", fmt.Errorf("data is %T, not string", v)
}
}
func (arr *jsonArray) GetBool(index int) (bool, error) {
if !arr.ContainsIndex(index) {
return false, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
t, ok := v.(bool)
if !ok {
return false, fmt.Errorf("data is %T, not bool", v)
}
return t, nil
}
func (arr *jsonArray) GetJSONObject(index int) (JSONObject, error) {
if !arr.ContainsIndex(index) {
return nil, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
return NewJSONObjectFrom([]byte(t))
case []byte:
return NewJSONObjectFrom(t)
case JSONObject:
return t, nil
case map[string]interface{}:
return &jsonObject{
value: t,
}, nil
default:
return nil, fmt.Errorf("data is %T, not string/[]byte/JSONObject/map[string]interface{}", v)
}
}
func (arr *jsonArray) GetJSONArray(index int) (JSONArray, error) {
if !arr.ContainsIndex(index) {
return nil, fmt.Errorf("index %d out of range", index)
}
v := arr.value[index]
switch t := v.(type) {
case string:
return NewJSONArrayFrom([]byte(t))
case []byte:
return NewJSONArrayFrom(t)
case JSONArray:
return t, nil
case []interface{}:
return &jsonArray{
value: t,
}, nil
default:
return nil, fmt.Errorf("data is %T, not string/[]byte/JSONArray/[]interface{}", v)
}
}
func (arr *jsonArray) Put(value interface{}) error {
arr.value = append(arr.value, value)
return nil
}
func (arr *jsonArray) GetValue() ([]interface{}, error) {
return arr.value, nil
}
func (arr *jsonArray) MarshalJSON() ([]byte, error) {
return json.Marshal(arr.value)
}
func (arr *jsonArray) Scan(dest []interface{}) error {
v, err := arr.MarshalJSON()
if err != nil {
return err
}
return json.Unmarshal(v, &dest)
} | json_array.go | 0.539226 | 0.429908 | json_array.go | starcoder |
package internal
import (
"github.com/Vale-sail/maroto/pkg/color"
"github.com/Vale-sail/maroto/pkg/consts"
"github.com/Vale-sail/maroto/pkg/props"
)
// MarotoGridPart is the abstraction to deal with the gris system inside the table list
type MarotoGridPart interface {
// Grid System
Row(height float64, closure func())
Col(width uint, closure func())
ColSpace(width uint)
// Helpers
SetBackgroundColor(color color.Color)
GetCurrentOffset() float64
GetPageSize() (width float64, height float64)
GetPageMargins() (left float64, top float64, right float64, bottom float64)
// Outside Col/Row Components
Line(spaceHeight float64)
// Inside Col/Row Components
Text(text string, prop ...props.Text)
}
// TableList is the abstraction to create a table with header and contents
type TableList interface {
Create(header []string, contents [][]string, prop ...props.TableList)
BindGrid(part MarotoGridPart)
}
type tableList struct {
pdf MarotoGridPart
text Text
font Font
}
// NewTableList create a TableList
func NewTableList(text Text, font Font) *tableList {
return &tableList{
text: text,
font: font,
}
}
// BindGrid bind the grid system to TableList
func (s *tableList) BindGrid(pdf MarotoGridPart) {
s.pdf = pdf
}
// Create create a header section with a list of strings and
// create many rows with contents
func (s *tableList) Create(header []string, contents [][]string, prop ...props.TableList) {
if len(header) == 0 {
return
}
if len(contents) == 0 {
return
}
tableProp := props.TableList{}
if len(prop) > 0 {
tableProp = prop[0]
}
tableProp.MakeValid(header, contents)
headerHeight := s.calcLinesHeight(header, tableProp.HeaderProp, tableProp.Align)
// Draw header
s.pdf.Row(headerHeight+1, func() {
for i, h := range header {
hs := h
s.pdf.Col(tableProp.HeaderProp.GridSizes[i], func() {
reason := hs
s.pdf.Text(reason, tableProp.HeaderProp.ToTextProp(tableProp.Align, 0, false, 0.0))
})
}
})
// Define space between header and contents
s.pdf.Row(tableProp.HeaderContentSpace, func() {
s.pdf.ColSpace(0)
})
// Draw contents
for index, content := range contents {
contentHeight := s.calcLinesHeight(content, tableProp.ContentProp, tableProp.Align)
if tableProp.AlternatedBackground != nil && index%2 == 0 {
s.pdf.SetBackgroundColor(*tableProp.AlternatedBackground)
}
s.pdf.Row(contentHeight+1, func() {
for i, c := range content {
cs := c
s.pdf.Col(tableProp.ContentProp.GridSizes[i], func() {
s.pdf.Text(cs, tableProp.ContentProp.ToTextProp(tableProp.Align, 0, false, 0.0))
})
}
})
if tableProp.AlternatedBackground != nil && index%2 == 0 {
s.pdf.SetBackgroundColor(color.NewWhite())
}
if tableProp.Line {
s.pdf.Line(1.0)
}
}
}
func (s *tableList) calcLinesHeight(textList []string, contentProp props.TableListContent, align consts.Align) float64 {
maxLines := 1.0
left, _, right, _ := s.pdf.GetPageMargins()
width, _ := s.pdf.GetPageSize()
usefulWidth := float64(width - left - right)
textProp := contentProp.ToTextProp(align, 0, false, 0.0)
for i, text := range textList {
gridSize := float64(contentProp.GridSizes[i])
percentSize := gridSize / consts.MaxGridSum
colWidth := usefulWidth * percentSize
qtdLines := float64(s.text.GetLinesQuantity(text, textProp, colWidth))
if qtdLines > maxLines {
maxLines = qtdLines
}
}
_, _, fontSize := s.font.GetFont()
// Font size corrected by the scale factor from "mm" inside gofpdf f.k
fontHeight := fontSize / s.font.GetScaleFactor()
return fontHeight * maxLines
} | internal/tablelist.go | 0.577972 | 0.448849 | tablelist.go | starcoder |
package stripe
import "encoding/json"
// A unit of time.
type ShippingRateDeliveryEstimateMaximumUnit string
// List of values that ShippingRateDeliveryEstimateMaximumUnit can take
const (
ShippingRateDeliveryEstimateMaximumUnitBusinessDay ShippingRateDeliveryEstimateMaximumUnit = "business_day"
ShippingRateDeliveryEstimateMaximumUnitDay ShippingRateDeliveryEstimateMaximumUnit = "day"
ShippingRateDeliveryEstimateMaximumUnitHour ShippingRateDeliveryEstimateMaximumUnit = "hour"
ShippingRateDeliveryEstimateMaximumUnitMonth ShippingRateDeliveryEstimateMaximumUnit = "month"
ShippingRateDeliveryEstimateMaximumUnitWeek ShippingRateDeliveryEstimateMaximumUnit = "week"
)
// A unit of time.
type ShippingRateDeliveryEstimateMinimumUnit string
// List of values that ShippingRateDeliveryEstimateMinimumUnit can take
const (
ShippingRateDeliveryEstimateMinimumUnitBusinessDay ShippingRateDeliveryEstimateMinimumUnit = "business_day"
ShippingRateDeliveryEstimateMinimumUnitDay ShippingRateDeliveryEstimateMinimumUnit = "day"
ShippingRateDeliveryEstimateMinimumUnitHour ShippingRateDeliveryEstimateMinimumUnit = "hour"
ShippingRateDeliveryEstimateMinimumUnitMonth ShippingRateDeliveryEstimateMinimumUnit = "month"
ShippingRateDeliveryEstimateMinimumUnitWeek ShippingRateDeliveryEstimateMinimumUnit = "week"
)
// Specifies whether the rate is considered inclusive of taxes or exclusive of taxes. One of `inclusive`, `exclusive`, or `unspecified`.
type ShippingRateTaxBehavior string
// List of values that ShippingRateTaxBehavior can take
const (
ShippingRateTaxBehaviorExclusive ShippingRateTaxBehavior = "exclusive"
ShippingRateTaxBehaviorInclusive ShippingRateTaxBehavior = "inclusive"
ShippingRateTaxBehaviorUnspecified ShippingRateTaxBehavior = "unspecified"
)
// The type of calculation to use on the shipping rate. Can only be `fixed_amount` for now.
type ShippingRateType string
// List of values that ShippingRateType can take
const (
ShippingRateTypeFixedAmount ShippingRateType = "fixed_amount"
)
// Returns a list of your shipping rates.
type ShippingRateListParams struct {
ListParams `form:"*"`
// Only return shipping rates that are active or inactive.
Active *bool `form:"active"`
// A filter on the list, based on the object `created` field. The value can be a string with an integer Unix timestamp, or it can be a dictionary with a number of different query options.
Created *int64 `form:"created"`
// A filter on the list, based on the object `created` field. The value can be a string with an integer Unix timestamp, or it can be a dictionary with a number of different query options.
CreatedRange *RangeQueryParams `form:"created"`
// Only return shipping rates for the given currency.
Currency *string `form:"currency"`
}
// The upper bound of the estimated range. If empty, represents no upper bound i.e., infinite.
type ShippingRateDeliveryEstimateMaximumParams struct {
// A unit of time.
Unit *string `form:"unit"`
// Must be greater than 0.
Value *int64 `form:"value"`
}
// The lower bound of the estimated range. If empty, represents no lower bound.
type ShippingRateDeliveryEstimateMinimumParams struct {
// A unit of time.
Unit *string `form:"unit"`
// Must be greater than 0.
Value *int64 `form:"value"`
}
// The estimated range for how long shipping will take, meant to be displayable to the customer. This will appear on CheckoutSessions.
type ShippingRateDeliveryEstimateParams struct {
// The upper bound of the estimated range. If empty, represents no upper bound i.e., infinite.
Maximum *ShippingRateDeliveryEstimateMaximumParams `form:"maximum"`
// The lower bound of the estimated range. If empty, represents no lower bound.
Minimum *ShippingRateDeliveryEstimateMinimumParams `form:"minimum"`
}
// Describes a fixed amount to charge for shipping. Must be present if type is `fixed_amount`.
type ShippingRateFixedAmountParams struct {
// A non-negative integer in cents representing how much to charge.
Amount *int64 `form:"amount"`
// Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. Must be a [supported currency](https://stripe.com/docs/currencies).
Currency *string `form:"currency"`
}
// Creates a new shipping rate object.
type ShippingRateParams struct {
Params `form:"*"`
// Whether the shipping rate can be used for new purchases. Defaults to `true`.
Active *bool `form:"active"`
// The estimated range for how long shipping will take, meant to be displayable to the customer. This will appear on CheckoutSessions.
DeliveryEstimate *ShippingRateDeliveryEstimateParams `form:"delivery_estimate"`
// The name of the shipping rate, meant to be displayable to the customer. This will appear on CheckoutSessions.
DisplayName *string `form:"display_name"`
// Describes a fixed amount to charge for shipping. Must be present if type is `fixed_amount`.
FixedAmount *ShippingRateFixedAmountParams `form:"fixed_amount"`
// Specifies whether the rate is considered inclusive of taxes or exclusive of taxes. One of `inclusive`, `exclusive`, or `unspecified`.
TaxBehavior *string `form:"tax_behavior"`
// A [tax code](https://stripe.com/docs/tax/tax-codes) ID. The Shipping tax code is `txcd_92010001`.
TaxCode *string `form:"tax_code"`
// The type of calculation to use on the shipping rate. Can only be `fixed_amount` for now.
Type *string `form:"type"`
}
// The upper bound of the estimated range. If empty, represents no upper bound i.e., infinite.
type ShippingRateDeliveryEstimateMaximum struct {
// A unit of time.
Unit ShippingRateDeliveryEstimateMaximumUnit `json:"unit"`
// Must be greater than 0.
Value int64 `json:"value"`
}
// The lower bound of the estimated range. If empty, represents no lower bound.
type ShippingRateDeliveryEstimateMinimum struct {
// A unit of time.
Unit ShippingRateDeliveryEstimateMinimumUnit `json:"unit"`
// Must be greater than 0.
Value int64 `json:"value"`
}
// The estimated range for how long shipping will take, meant to be displayable to the customer. This will appear on CheckoutSessions.
type ShippingRateDeliveryEstimate struct {
// The upper bound of the estimated range. If empty, represents no upper bound i.e., infinite.
Maximum *ShippingRateDeliveryEstimateMaximum `json:"maximum"`
// The lower bound of the estimated range. If empty, represents no lower bound.
Minimum *ShippingRateDeliveryEstimateMinimum `json:"minimum"`
}
type ShippingRateFixedAmount struct {
// A non-negative integer in cents representing how much to charge.
Amount int64 `json:"amount"`
// Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. Must be a [supported currency](https://stripe.com/docs/currencies).
Currency Currency `json:"currency"`
}
// Shipping rates describe the price of shipping presented to your customers and can be
// applied to [Checkout Sessions](https://stripe.com/docs/payments/checkout/shipping) to collect shipping costs.
type ShippingRate struct {
APIResource
// Whether the shipping rate can be used for new purchases. Defaults to `true`.
Active bool `json:"active"`
// Time at which the object was created. Measured in seconds since the Unix epoch.
Created int64 `json:"created"`
// The estimated range for how long shipping will take, meant to be displayable to the customer. This will appear on CheckoutSessions.
DeliveryEstimate *ShippingRateDeliveryEstimate `json:"delivery_estimate"`
// The name of the shipping rate, meant to be displayable to the customer. This will appear on CheckoutSessions.
DisplayName string `json:"display_name"`
FixedAmount *ShippingRateFixedAmount `json:"fixed_amount"`
// Unique identifier for the object.
ID string `json:"id"`
// Has the value `true` if the object exists in live mode or the value `false` if the object exists in test mode.
Livemode bool `json:"livemode"`
// Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object. This can be useful for storing additional information about the object in a structured format.
Metadata map[string]string `json:"metadata"`
// String representing the object's type. Objects of the same type share the same value.
Object string `json:"object"`
// Specifies whether the rate is considered inclusive of taxes or exclusive of taxes. One of `inclusive`, `exclusive`, or `unspecified`.
TaxBehavior ShippingRateTaxBehavior `json:"tax_behavior"`
// A [tax code](https://stripe.com/docs/tax/tax-codes) ID. The Shipping tax code is `txcd_92010001`.
TaxCode *TaxCode `json:"tax_code"`
// The type of calculation to use on the shipping rate. Can only be `fixed_amount` for now.
Type ShippingRateType `json:"type"`
}
// ShippingRateList is a list of ShippingRates as retrieved from a list endpoint.
type ShippingRateList struct {
APIResource
ListMeta
Data []*ShippingRate `json:"data"`
}
// UnmarshalJSON handles deserialization of a ShippingRate.
// This custom unmarshaling is needed because the resulting
// property may be an id or the full struct if it was expanded.
func (s *ShippingRate) UnmarshalJSON(data []byte) error {
if id, ok := ParseID(data); ok {
s.ID = id
return nil
}
type shippingRate ShippingRate
var v shippingRate
if err := json.Unmarshal(data, &v); err != nil {
return err
}
*s = ShippingRate(v)
return nil
} | shippingrate.go | 0.886948 | 0.459501 | shippingrate.go | starcoder |
package runtime
import (
"fmt"
"github.com/onflow/cadence"
"github.com/onflow/cadence/runtime/common"
"github.com/onflow/cadence/runtime/sema"
"github.com/onflow/cadence/runtime/stdlib"
)
// exportType converts a runtime type to its corresponding Go representation.
func exportType(t sema.Type, results map[sema.TypeID]cadence.Type) cadence.Type {
typeID := t.ID()
if result, ok := results[typeID]; ok {
return result
}
result := func() cadence.Type {
switch t := t.(type) {
case *sema.AnyType:
return cadence.AnyType{}
case *sema.AnyStructType:
return cadence.AnyStructType{}
case *sema.AnyResourceType:
return cadence.AnyResourceType{}
case *sema.VoidType:
return cadence.VoidType{}
case *sema.NeverType:
return cadence.NeverType{}
case *sema.MetaType:
return cadence.MetaType{}
case *sema.OptionalType:
return exportOptionalType(t, results)
case *sema.BoolType:
return cadence.BoolType{}
case *sema.StringType:
return cadence.StringType{}
case *sema.CharacterType:
return cadence.CharacterType{}
case *sema.NumberType:
return cadence.NumberType{}
case *sema.SignedNumberType:
return cadence.SignedNumberType{}
case *sema.IntegerType:
return cadence.IntegerType{}
case *sema.SignedIntegerType:
return cadence.SignedIntegerType{}
case *sema.FixedPointType:
return cadence.FixedPointType{}
case *sema.SignedFixedPointType:
return cadence.SignedFixedPointType{}
case *sema.IntType:
return cadence.IntType{}
case *sema.Int8Type:
return cadence.Int8Type{}
case *sema.Int16Type:
return cadence.Int16Type{}
case *sema.Int32Type:
return cadence.Int32Type{}
case *sema.Int64Type:
return cadence.Int64Type{}
case *sema.Int128Type:
return cadence.Int128Type{}
case *sema.Int256Type:
return cadence.Int256Type{}
case *sema.UIntType:
return cadence.UIntType{}
case *sema.UInt8Type:
return cadence.UInt8Type{}
case *sema.UInt16Type:
return cadence.UInt16Type{}
case *sema.UInt32Type:
return cadence.UInt32Type{}
case *sema.UInt64Type:
return cadence.UInt64Type{}
case *sema.UInt128Type:
return cadence.UInt128Type{}
case *sema.UInt256Type:
return cadence.UInt256Type{}
case *sema.Word8Type:
return cadence.Word8Type{}
case *sema.Word16Type:
return cadence.Word16Type{}
case *sema.Word32Type:
return cadence.Word32Type{}
case *sema.Word64Type:
return cadence.Word64Type{}
case *sema.Fix64Type:
return cadence.Fix64Type{}
case *sema.UFix64Type:
return cadence.UFix64Type{}
case *sema.VariableSizedType:
return exportVariableSizedType(t, results)
case *sema.ConstantSizedType:
return exportConstantSizedType(t, results)
case *sema.CompositeType:
return exportCompositeType(t, results)
case *sema.InterfaceType:
return exportInterfaceType(t, results)
case *sema.DictionaryType:
return exportDictionaryType(t, results)
case *sema.FunctionType:
return exportFunctionType(t, results)
case *sema.AddressType:
return cadence.AddressType{}
case *sema.ReferenceType:
return exportReferenceType(t, results)
case *sema.RestrictedType:
return exportRestrictedType(t, results)
case *stdlib.BlockType:
return cadence.BlockType{}
case *sema.PathType:
return cadence.PathType{}
case *sema.CheckedFunctionType:
return exportFunctionType(t.FunctionType, results)
case *sema.CapabilityType:
return exportCapabilityType(t, results)
case *sema.AuthAccountType:
return cadence.AuthAccountType{}
case *sema.PublicAccountType:
return cadence.PublicAccountType{}
}
panic(fmt.Sprintf("cannot export type of type %T", t))
}()
results[typeID] = result
return result
}
func exportOptionalType(t *sema.OptionalType, results map[sema.TypeID]cadence.Type) cadence.Type {
convertedType := exportType(t.Type, results)
return cadence.OptionalType{
Type: convertedType,
}
}
func exportVariableSizedType(t *sema.VariableSizedType, results map[sema.TypeID]cadence.Type) cadence.Type {
convertedElement := exportType(t.Type, results)
return cadence.VariableSizedArrayType{
ElementType: convertedElement,
}
}
func exportConstantSizedType(t *sema.ConstantSizedType, results map[sema.TypeID]cadence.Type) cadence.Type {
convertedElement := exportType(t.Type, results)
return cadence.ConstantSizedArrayType{
Size: uint(t.Size),
ElementType: convertedElement,
}
}
func exportCompositeType(t *sema.CompositeType, results map[sema.TypeID]cadence.Type) (result cadence.CompositeType) {
id := string(t.ID())
fieldMembers := make([]*sema.Member, 0, len(t.Fields))
for _, identifier := range t.Fields {
member := t.Members[identifier]
if member.IgnoreInSerialization {
continue
}
fieldMembers = append(fieldMembers, member)
}
fields := make([]cadence.Field, len(fieldMembers))
switch t.Kind {
case common.CompositeKindStructure:
result = &cadence.StructType{
TypeID: id,
Identifier: t.Identifier,
Fields: fields,
}
case common.CompositeKindResource:
result = &cadence.ResourceType{
TypeID: id,
Identifier: t.Identifier,
Fields: fields,
}
case common.CompositeKindEvent:
result = &cadence.EventType{
TypeID: id,
Identifier: t.Identifier,
Fields: fields,
}
case common.CompositeKindContract:
result = &cadence.ContractType{
TypeID: id,
Identifier: t.Identifier,
Fields: fields,
}
default:
panic(fmt.Sprintf("cannot export composite type %v of unknown kind %v", t, t.Kind))
}
// NOTE: ensure to set the result before recursively export field types
results[t.ID()] = result
for i, member := range fieldMembers {
convertedFieldType := exportType(member.TypeAnnotation.Type, results)
fields[i] = cadence.Field{
Identifier: member.Identifier.Identifier,
Type: convertedFieldType,
}
}
return
}
func exportInterfaceType(t *sema.InterfaceType, results map[sema.TypeID]cadence.Type) (result cadence.InterfaceType) {
id := string(t.ID())
fieldMembers := make([]*sema.Member, 0, len(t.Fields))
for _, identifier := range t.Fields {
member := t.Members[identifier]
if member.IgnoreInSerialization {
continue
}
fieldMembers = append(fieldMembers, member)
}
fields := make([]cadence.Field, len(fieldMembers))
switch t.CompositeKind {
case common.CompositeKindStructure:
result = &cadence.StructInterfaceType{
TypeID: id,
Identifier: t.Identifier,
Fields: fields,
}
case common.CompositeKindResource:
result = &cadence.ResourceInterfaceType{
TypeID: id,
Identifier: t.Identifier,
Fields: fields,
}
case common.CompositeKindContract:
result = &cadence.ContractInterfaceType{
TypeID: id,
Identifier: t.Identifier,
Fields: fields,
}
default:
panic(fmt.Sprintf("cannot export interface type %v of unknown kind %v", t, t.CompositeKind))
}
// NOTE: ensure to set the result before recursively export field types
results[t.ID()] = result
for i, member := range fieldMembers {
convertedFieldType := exportType(member.TypeAnnotation.Type, results)
fields[i] = cadence.Field{
Identifier: member.Identifier.Identifier,
Type: convertedFieldType,
}
}
return
}
func exportDictionaryType(t *sema.DictionaryType, results map[sema.TypeID]cadence.Type) cadence.Type {
convertedKeyType := exportType(t.KeyType, results)
convertedElementType := exportType(t.ValueType, results)
return cadence.DictionaryType{
KeyType: convertedKeyType,
ElementType: convertedElementType,
}
}
func exportFunctionType(t *sema.FunctionType, results map[sema.TypeID]cadence.Type) cadence.Type {
convertedParameters := make([]cadence.Parameter, len(t.Parameters))
for i, parameter := range t.Parameters {
convertedParameterType := exportType(parameter.TypeAnnotation.Type, results)
convertedParameters[i] = cadence.Parameter{
Label: parameter.Label,
Identifier: parameter.Identifier,
Type: convertedParameterType,
}
}
convertedReturnType := exportType(t.ReturnTypeAnnotation.Type, results)
return cadence.Function{
Parameters: convertedParameters,
ReturnType: convertedReturnType,
}.WithID(string(t.ID()))
}
func exportReferenceType(t *sema.ReferenceType, results map[sema.TypeID]cadence.Type) cadence.ReferenceType {
convertedType := exportType(t.Type, results)
return cadence.ReferenceType{
Authorized: t.Authorized,
Type: convertedType,
}.WithID(string(t.ID()))
}
func exportRestrictedType(t *sema.RestrictedType, results map[sema.TypeID]cadence.Type) cadence.RestrictedType {
convertedType := exportType(t.Type, results)
restrictions := make([]cadence.Type, len(t.Restrictions))
for i, restriction := range t.Restrictions {
restrictions[i] = exportType(restriction, results)
}
return cadence.RestrictedType{
Type: convertedType,
Restrictions: restrictions,
}.WithID(string(t.ID()))
}
func exportCapabilityType(t *sema.CapabilityType, results map[sema.TypeID]cadence.Type) cadence.CapabilityType {
var borrowType cadence.Type
if t.BorrowType != nil {
borrowType = exportType(t.BorrowType, results)
}
return cadence.CapabilityType{
BorrowType: borrowType,
}.WithID(string(t.ID()))
} | runtime/convertTypes.go | 0.641871 | 0.465509 | convertTypes.go | starcoder |
package stats
// Coverage represents a REST API statistics
type Coverage struct {
UniqueHits int `json:"uniqueHits"`
ExpectedUniqueHits int `json:"expectedUniqueHits"`
Percent float64 `json:"percent"`
Endpoints map[string]map[string]*Endpoint `json:"endpoints"`
}
// Endpoint represents a basic statistics structure which is used to calculate REST API coverage
type Endpoint struct {
Params `json:"params"`
UniqueHits int `json:"uniqueHits"`
ExpectedUniqueHits int `json:"expectedUniqueHits"`
Percent float64 `json:"percent"`
MethodCalled bool `json:"methodCalled"`
Path string `json:"path"`
Method string `json:"method"`
}
// Params represents body and query parameters
type Params struct {
Body *Trie `json:"body"`
Query *Trie `json:"query"`
}
// Trie represents a coverage data
type Trie struct {
Root *Node `json:"root"`
UniqueHits int `json:"uniqueHits"`
ExpectedUniqueHits int `json:"expectedUniqueHits"`
Size int `json:"size"`
Height int `json:"height"`
}
// NewTrie initializes Trie
func NewTrie() *Trie {
return &Trie{
Root: &Node{
Children: make(map[string]*Node),
Key: "root",
},
UniqueHits: 0,
Size: 0,
}
}
// Add a new node to Trie
func (t *Trie) Add(key string, node *Node, leaf bool) *Node {
if t.Size == 0 || node == nil {
node = t.Root
}
depth := node.Depth + 1
if depth > t.Height {
t.Height = depth
}
node.Children[key] = &Node{
Parent: node,
Children: make(map[string]*Node),
Depth: depth,
Key: key,
IsLeaf: leaf,
}
t.Size++
if leaf {
t.ExpectedUniqueHits++
}
return node.Children[key]
}
// IncreaseHits calculates hits for all nodes in given path
func (t *Trie) IncreaseHits(node *Node) {
node.Hits++
if node.IsLeaf && node.Hits == 1 {
t.UniqueHits++
}
if node.Parent == nil {
return
}
t.IncreaseHits(node.Parent)
}
// Node represents a single data unit for coverage report
type Node struct {
Key string `json:"-"`
Hits int `json:"hits"`
Depth int `json:"-"`
IsLeaf bool `json:"-"`
Parent *Node `json:"-"`
Children map[string]*Node `json:"items,omitempty"`
}
// GetChild returns child for a node
func (n *Node) GetChild(key string) *Node {
if node, ok := n.Children[key]; ok {
return node
}
return nil
}
func (n *Node) String() string {
return n.Key
} | vendor/github.com/mfranczy/crd-rest-coverage/pkg/stats/types.go | 0.863607 | 0.447038 | types.go | starcoder |
package byteutil
// GfnDouble computes 2 * input in the field of 2^n elements.
// The irreducible polynomial in the finite field for n=128 is
// x^128 + x^7 + x^2 + x + 1 (equals 0x87)
// Constant-time execution in order to avoid side-channel attacks
func GfnDouble(input []byte) []byte {
if len(input) != 16 {
panic("Doubling in GFn only implemented for n = 128")
}
// If the first bit is zero, return 2L = L << 1
// Else return (L << 1) xor 0^120 10000111
shifted := ShiftBytesLeft(input)
shifted[15] ^= ((input[0] >> 7) * 0x87)
return shifted
}
// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary.
func ShiftBytesLeft(x []byte) []byte {
l := len(x)
dst := make([]byte, l)
for i := 0; i < l-1; i++ {
dst[i] = (x[i] << 1) | (x[i+1] >> 7)
}
dst[l-1] = x[l-1] << 1
return dst
}
// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary.
func ShiftNBytesLeft(dst, x []byte, n int) {
// Erase first n / 8 bytes
copy(dst, x[n/8:])
// Shift the remaining n % 8 bits
bits := uint(n % 8)
l := len(dst)
for i := 0; i < l-1; i++ {
dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits))
}
dst[l-1] = dst[l-1] << bits
// Append trailing zeroes
dst = append(dst, make([]byte, n/8)...)
}
// XorBytesMut assumes equal input length, replaces X with X XOR Y
func XorBytesMut(X, Y []byte) {
for i := 0; i < len(X); i++ {
X[i] ^= Y[i]
}
}
// XorBytes assumes equal input length, puts X XOR Y into Z
func XorBytes(Z, X, Y []byte) {
for i := 0; i < len(X); i++ {
Z[i] = X[i] ^ Y[i]
}
}
// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X)
func RightXor(X, Y []byte) []byte {
offset := len(X) - len(Y)
xored := make([]byte, len(X))
copy(xored, X)
for i := 0; i < len(Y); i++ {
xored[offset+i] ^= Y[i]
}
return xored
}
// SliceForAppend takes a slice and a requested number of bytes. It returns a
// slice with the contents of the given slice followed by that many bytes and a
// second slice that aliases into it and contains only the extra bytes. If the
// original slice has sufficient capacity then no allocation is performed.
func SliceForAppend(in []byte, n int) (head, tail []byte) {
if total := len(in) + n; cap(in) >= total {
head = in[:total]
} else {
head = make([]byte, total)
copy(head, in)
}
tail = head[len(in):]
return
} | byteutil/byteutil.go | 0.757794 | 0.563798 | byteutil.go | starcoder |
package graph
import (
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
)
// EducationCourse
type EducationCourse struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{};
// Unique identifier for the course.
courseNumber *string;
// Description of the course.
description *string;
// Name of the course.
displayName *string;
// ID of the course from the syncing system.
externalId *string;
// Subject of the course.
subject *string;
}
// NewEducationCourse instantiates a new educationCourse and sets the default values.
func NewEducationCourse()(*EducationCourse) {
m := &EducationCourse{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *EducationCourse) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCourseNumber gets the courseNumber property value. Unique identifier for the course.
func (m *EducationCourse) GetCourseNumber()(*string) {
if m == nil {
return nil
} else {
return m.courseNumber
}
}
// GetDescription gets the description property value. Description of the course.
func (m *EducationCourse) GetDescription()(*string) {
if m == nil {
return nil
} else {
return m.description
}
}
// GetDisplayName gets the displayName property value. Name of the course.
func (m *EducationCourse) GetDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.displayName
}
}
// GetExternalId gets the externalId property value. ID of the course from the syncing system.
func (m *EducationCourse) GetExternalId()(*string) {
if m == nil {
return nil
} else {
return m.externalId
}
}
// GetSubject gets the subject property value. Subject of the course.
func (m *EducationCourse) GetSubject()(*string) {
if m == nil {
return nil
} else {
return m.subject
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *EducationCourse) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error))
res["courseNumber"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCourseNumber(val)
}
return nil
}
res["description"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDescription(val)
}
return nil
}
res["displayName"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDisplayName(val)
}
return nil
}
res["externalId"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetExternalId(val)
}
return nil
}
res["subject"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSubject(val)
}
return nil
}
return res
}
func (m *EducationCourse) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *EducationCourse) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
{
err := writer.WriteStringValue("courseNumber", m.GetCourseNumber())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("description", m.GetDescription())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("displayName", m.GetDisplayName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("externalId", m.GetExternalId())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("subject", m.GetSubject())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *EducationCourse) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCourseNumber sets the courseNumber property value. Unique identifier for the course.
func (m *EducationCourse) SetCourseNumber(value *string)() {
if m != nil {
m.courseNumber = value
}
}
// SetDescription sets the description property value. Description of the course.
func (m *EducationCourse) SetDescription(value *string)() {
if m != nil {
m.description = value
}
}
// SetDisplayName sets the displayName property value. Name of the course.
func (m *EducationCourse) SetDisplayName(value *string)() {
if m != nil {
m.displayName = value
}
}
// SetExternalId sets the externalId property value. ID of the course from the syncing system.
func (m *EducationCourse) SetExternalId(value *string)() {
if m != nil {
m.externalId = value
}
}
// SetSubject sets the subject property value. Subject of the course.
func (m *EducationCourse) SetSubject(value *string)() {
if m != nil {
m.subject = value
}
} | models/microsoft/graph/education_course.go | 0.650023 | 0.408218 | education_course.go | starcoder |
package kate
// warning: the values in `a` are modified in-place to become the outputs.
// Make a deep copy first if you need to use them later.
func (fs *FFTSettings) dASFFTExtension(ab []Big, domainStride uint64) {
if len(ab) == 2 {
aHalf0 := &ab[0]
aHalf1 := &ab[1]
var x Big
addModBig(&x, aHalf0, aHalf1)
var y Big
subModBig(&y, aHalf0, aHalf1)
var tmp Big
mulModBig(&tmp, &y, &fs.expandedRootsOfUnity[domainStride])
addModBig(&ab[0], &x, &tmp)
subModBig(&ab[1], &x, &tmp)
return
}
if len(ab) < 2 {
panic("bad usage")
}
half := uint64(len(ab))
halfHalf := half >> 1
abHalf0s := ab[:halfHalf]
abHalf1s := ab[halfHalf:half]
// Instead of allocating L0 and L1, just modify a in-place.
//L0[i] = (((a_half0 + a_half1) % modulus) * inv2) % modulus
//R0[i] = (((a_half0 - L0[i]) % modulus) * inverse_domain[i * 2]) % modulus
var tmp1, tmp2 Big
for i := uint64(0); i < halfHalf; i++ {
aHalf0 := &abHalf0s[i]
aHalf1 := &abHalf1s[i]
addModBig(&tmp1, aHalf0, aHalf1)
subModBig(&tmp2, aHalf0, aHalf1)
mulModBig(aHalf1, &tmp2, &fs.reverseRootsOfUnity[i*2*domainStride])
CopyBigNum(aHalf0, &tmp1)
}
// L will be the left half of out
fs.dASFFTExtension(abHalf0s, domainStride<<1)
// R will be the right half of out
fs.dASFFTExtension(abHalf1s, domainStride<<1)
// The odd deduced outputs are written to the output array already, but then updated in-place
// L1 = b[:halfHalf]
// R1 = b[halfHalf:]
// Half the work of a regular FFT: only deal with uneven-index outputs
var yTimesRoot Big
var x, y Big
for i := uint64(0); i < halfHalf; i++ {
// Temporary copies, so that writing to output doesn't conflict with input.
// Note that one hand is from L1, the other R1
CopyBigNum(&x, &abHalf0s[i])
CopyBigNum(&y, &abHalf1s[i])
root := &fs.expandedRootsOfUnity[(1+2*i)*domainStride]
mulModBig(&yTimesRoot, &y, root)
// write outputs in place, avoid unnecessary list allocations
addModBig(&abHalf0s[i], &x, &yTimesRoot)
subModBig(&abHalf1s[i], &x, &yTimesRoot)
}
}
// Takes vals as input, the values of the even indices.
// Then computes the values for the odd indices, which combined would make the right half of coefficients zero.
// Warning: the odd results are written back to the vals slice.
func (fs *FFTSettings) DASFFTExtension(vals []Big) {
if uint64(len(vals))*2 > fs.maxWidth {
panic("domain too small for extending requested values")
}
fs.dASFFTExtension(vals, 1)
// The above function didn't perform the divide by 2 on every layer.
// So now do it all at once, by dividing by 2**depth (=length).
var invLen Big
asBig(&invLen, uint64(len(vals)))
invModBig(&invLen, &invLen)
for i := 0; i < len(vals); i++ {
mulModBig(&vals[i], &vals[i], &invLen)
}
} | das_extension.go | 0.52829 | 0.450057 | das_extension.go | starcoder |
package evaluator
import (
"math"
"strings"
"github.com/mzrimsek/zip-lang/src/zip/ast"
"github.com/mzrimsek/zip-lang/src/zip/object"
)
func evalPrefixExpression(operator string, right object.Object) object.Object {
switch operator {
case "!":
return evalBangOperatorExpression(right)
case "-":
return evalMinusPrefixOperatorExpression(right)
case "++":
return evalIncrementPrefixOperatorExpression(right)
case "--":
return evalDecrementPrefixOperatorExpression(right)
default:
return newError("unknown operator: %s%s", operator, right.Type())
}
}
func evalBangOperatorExpression(right object.Object) object.Object {
switch right {
case TRUE:
return FALSE
case FALSE:
return TRUE
case NULL:
return TRUE
default:
return FALSE
}
}
func evalMinusPrefixOperatorExpression(right object.Object) object.Object {
switch right.Type() {
case object.INTEGER_OBJ:
value := right.(*object.Integer).Value
return &object.Integer{Value: -value}
case object.FLOAT_OBJ:
value := right.(*object.Float).Value
return &object.Float{Value: -value}
default:
return newError("unknown operator: -%s", right.Type())
}
}
func evalIncrementPrefixOperatorExpression(right object.Object) object.Object {
switch right.Type() {
case object.INTEGER_OBJ:
rightObj := right.(*object.Integer)
rightObj.Value = rightObj.Value + 1
return &object.Integer{Value: rightObj.Value}
case object.FLOAT_OBJ:
rightObj := right.(*object.Float)
rightObj.Value = rightObj.Value + 1
return &object.Float{Value: rightObj.Value}
default:
return newError("unknown operator: ++%s", right.Type())
}
}
func evalDecrementPrefixOperatorExpression(right object.Object) object.Object {
switch right.Type() {
case object.INTEGER_OBJ:
rightObj := right.(*object.Integer)
rightObj.Value = rightObj.Value - 1
return &object.Integer{Value: rightObj.Value}
case object.FLOAT_OBJ:
rightObj := right.(*object.Float)
rightObj.Value = rightObj.Value - 1
return &object.Float{Value: rightObj.Value}
default:
return newError("unknown operator: --%s", right.Type())
}
}
func evalInfixExpression(operator string, left, right object.Object) object.Object {
_, leftIsNum := left.(object.Number)
_, rightIsNum := right.(object.Number)
hasNumArg := leftIsNum || rightIsNum
_, leftIsChar := left.(*object.Character)
_, rightIsChar := right.(*object.Character)
hasCharArg := leftIsChar || rightIsChar
switch {
case leftIsNum && rightIsNum:
return evalNumberInfixExpression(operator, left, right)
case left.Type() == object.STRING_OBJ && right.Type() == object.STRING_OBJ:
return evalStringInfixExpression(operator, left, right)
case (left.Type() == object.STRING_OBJ || right.Type() == object.STRING_OBJ) && (hasNumArg || hasCharArg):
return evalMixedTypeInfixExpression(operator, left, right)
case left.Type() == object.BOOLEAN_OBJ && right.Type() == object.BOOLEAN_OBJ:
return evalBooleanInfixExpression(operator, left, right)
case left.Type() != right.Type():
return newError("type mismatch: %s %s %s", left.Type(), operator, right.Type())
default:
return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
}
func evalNumberInfixExpression(operator string, left, right object.Object) object.Object {
var leftVal float64
var rightVal float64
isInt := left.Type() == object.INTEGER_OBJ && right.Type() == object.INTEGER_OBJ
if left.Type() == object.INTEGER_OBJ {
leftVal = float64(left.(*object.Integer).Value)
} else {
leftVal = left.(*object.Float).Value
}
if right.Type() == object.INTEGER_OBJ {
rightVal = float64(right.(*object.Integer).Value)
} else {
rightVal = right.(*object.Float).Value
}
switch operator {
case "+":
val := leftVal + rightVal
if isInt {
return &object.Integer{Value: int64(val)}
}
return &object.Float{Value: val}
case "-":
val := leftVal - rightVal
if isInt {
return &object.Integer{Value: int64(val)}
}
return &object.Float{Value: val}
case "*":
val := leftVal * rightVal
if isInt {
return &object.Integer{Value: int64(val)}
}
return &object.Float{Value: val}
case "/":
val := leftVal / rightVal
if isInt {
return &object.Integer{Value: int64(val)}
}
return &object.Float{Value: val}
case "%":
if isInt {
return &object.Integer{Value: int64(leftVal) % int64(rightVal)}
}
return &object.Float{Value: math.Mod(leftVal, rightVal)}
case "**":
val := math.Pow(leftVal, rightVal)
if isInt {
return &object.Integer{Value: int64(val)}
}
return &object.Float{Value: val}
case "<":
return nativeBoolToBooleanObject(leftVal < rightVal)
case ">":
return nativeBoolToBooleanObject(leftVal > rightVal)
case "<=":
return nativeBoolToBooleanObject(leftVal <= rightVal)
case ">=":
return nativeBoolToBooleanObject(leftVal >= rightVal)
case "==":
return nativeBoolToBooleanObject(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanObject(leftVal != rightVal)
default:
return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
}
func evalStringInfixExpression(operator string, left, right object.Object) object.Object {
leftVal := left.(*object.String).Value
rightVal := right.(*object.String).Value
switch operator {
case "+":
return &object.String{Value: leftVal + rightVal}
case "==":
return nativeBoolToBooleanObject(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanObject(leftVal != rightVal)
default:
return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
}
func evalMixedTypeInfixExpression(operator string, left, right object.Object) object.Object {
switch operator {
case "+":
return &object.String{Value: left.Inspect() + right.Inspect()}
case "*":
if left.Type() == object.INTEGER_OBJ {
integer := left.(*object.Integer).Value
return &object.String{Value: strings.Repeat(right.Inspect(), int(integer))}
}
if right.Type() == object.INTEGER_OBJ {
integer := right.(*object.Integer).Value
return &object.String{Value: strings.Repeat(left.Inspect(), int(integer))}
}
return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type())
default:
return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
}
func evalBooleanInfixExpression(operator string, left, right object.Object) object.Object {
leftVal := left.(*object.Boolean).Value
rightVal := right.(*object.Boolean).Value
switch operator {
case "&&":
return nativeBoolToBooleanObject(leftVal && rightVal)
case "||":
return nativeBoolToBooleanObject(leftVal || rightVal)
case "==":
return nativeBoolToBooleanObject(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanObject(leftVal != rightVal)
default:
return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
}
func evalIfExpression(ie *ast.IfExpression, env *object.Environment) object.Object {
condition := Eval(ie.Condition, env)
if isError(condition) {
return condition
}
if isTruthy(condition) {
return Eval(ie.Consequence, env)
} else if ie.Alternative != nil {
return Eval(ie.Alternative, env)
} else {
return NULL
}
}
func evalExpressions(exps []ast.Expression, env *object.Environment) []object.Object {
var result []object.Object
for _, e := range exps {
evaluated := Eval(e, env)
if isError(evaluated) {
return []object.Object{evaluated}
}
result = append(result, evaluated)
}
return result
}
func evalIndexExpression(left, index object.Object) object.Object {
switch {
case left.Type() == object.ARRAY_OBJ && index.Type() == object.INTEGER_OBJ:
return evalArrayIndexExpression(left, index)
case left.Type() == object.HASH_OBJ:
return evalHashIndexExpression(left, index)
default:
return newError("index operator not supported: %s", left.Type())
}
}
func evalArrayIndexExpression(left, index object.Object) object.Object {
arrayObject := left.(*object.Array)
idx := index.(*object.Integer).Value
max := int64(len(arrayObject.Elements) - 1)
if idx < 0 || idx > max {
return NULL
}
return arrayObject.Elements[idx]
}
func evalHashIndexExpression(hash, index object.Object) object.Object {
hashObject := hash.(*object.Hash)
key, ok := index.(object.Hashable)
if !ok {
return newError("unusable as hash key: %s", index.Type())
}
pair, ok := hashObject.Pairs[key.HashKey()]
if !ok {
return NULL
}
return pair.Value
}
func evalWhileExpression(we *ast.WhileExpression, env *object.Environment) object.Object {
condition := Eval(we.Condition, env)
if isError(condition) {
return condition
}
var result object.Object
for isTruthy(condition) {
result = Eval(we.Block, env)
condition = Eval(we.Condition, env)
if isError(condition) {
return condition
}
}
return result
}
func evalPostfixExpression(left object.Object, operator string) object.Object {
switch operator {
case "++":
return evalIncrementPostfixOperatorExpression(left)
case "--":
return evalDecrementPostfixOperatorExpression(left)
default:
return newError("unknown operator: %s%s", left.Type(), operator)
}
}
func evalIncrementPostfixOperatorExpression(left object.Object) object.Object {
switch left.Type() {
case object.INTEGER_OBJ:
leftObj := left.(*object.Integer)
returnVal := &object.Integer{Value: leftObj.Value}
leftObj.Value = leftObj.Value + 1
return returnVal
case object.FLOAT_OBJ:
leftObj := left.(*object.Float)
returnVal := &object.Float{Value: leftObj.Value}
leftObj.Value = leftObj.Value + 1
return returnVal
default:
return newError("unknown operator: %s++", left.Type())
}
}
func evalDecrementPostfixOperatorExpression(left object.Object) object.Object {
switch left.Type() {
case object.INTEGER_OBJ:
leftObj := left.(*object.Integer)
returnVal := &object.Integer{Value: leftObj.Value}
leftObj.Value = leftObj.Value - 1
return returnVal
case object.FLOAT_OBJ:
leftObj := left.(*object.Float)
returnVal := &object.Float{Value: leftObj.Value}
leftObj.Value = leftObj.Value - 1
return returnVal
default:
return newError("unknown operator: %s--", left.Type())
}
} | src/zip/evaluator/expression_evaluating.go | 0.582016 | 0.513181 | expression_evaluating.go | starcoder |
// +build go1.14,!go1.15
package symbols
import (
"gonum.org/v1/plot"
"gonum.org/v1/plot/vg/draw"
"reflect"
)
func init() {
Symbols["gonum.org/v1/plot"] = map[string]reflect.Value{
// function, constant and variable definitions
"Align": reflect.ValueOf(plot.Align),
"DefaultFont": reflect.ValueOf(&plot.DefaultFont).Elem(),
"New": reflect.ValueOf(plot.New),
"NewLegend": reflect.ValueOf(plot.NewLegend),
"UTCUnixTime": reflect.ValueOf(&plot.UTCUnixTime).Elem(),
"UnixTimeIn": reflect.ValueOf(plot.UnixTimeIn),
"Version": reflect.ValueOf(plot.Version),
// type definitions
"Axis": reflect.ValueOf((*plot.Axis)(nil)),
"ConstantTicks": reflect.ValueOf((*plot.ConstantTicks)(nil)),
"DataRanger": reflect.ValueOf((*plot.DataRanger)(nil)),
"DefaultTicks": reflect.ValueOf((*plot.DefaultTicks)(nil)),
"GlyphBox": reflect.ValueOf((*plot.GlyphBox)(nil)),
"GlyphBoxer": reflect.ValueOf((*plot.GlyphBoxer)(nil)),
"InvertedScale": reflect.ValueOf((*plot.InvertedScale)(nil)),
"Legend": reflect.ValueOf((*plot.Legend)(nil)),
"LinearScale": reflect.ValueOf((*plot.LinearScale)(nil)),
"LogScale": reflect.ValueOf((*plot.LogScale)(nil)),
"LogTicks": reflect.ValueOf((*plot.LogTicks)(nil)),
"Normalizer": reflect.ValueOf((*plot.Normalizer)(nil)),
"Plot": reflect.ValueOf((*plot.Plot)(nil)),
"Plotter": reflect.ValueOf((*plot.Plotter)(nil)),
"Thumbnailer": reflect.ValueOf((*plot.Thumbnailer)(nil)),
"Tick": reflect.ValueOf((*plot.Tick)(nil)),
"Ticker": reflect.ValueOf((*plot.Ticker)(nil)),
"TickerFunc": reflect.ValueOf((*plot.TickerFunc)(nil)),
"TimeTicks": reflect.ValueOf((*plot.TimeTicks)(nil)),
// interface wrapper definitions
"_DataRanger": reflect.ValueOf((*_gonum_org_v1_plot_DataRanger)(nil)),
"_GlyphBoxer": reflect.ValueOf((*_gonum_org_v1_plot_GlyphBoxer)(nil)),
"_Normalizer": reflect.ValueOf((*_gonum_org_v1_plot_Normalizer)(nil)),
"_Plotter": reflect.ValueOf((*_gonum_org_v1_plot_Plotter)(nil)),
"_Thumbnailer": reflect.ValueOf((*_gonum_org_v1_plot_Thumbnailer)(nil)),
"_Ticker": reflect.ValueOf((*_gonum_org_v1_plot_Ticker)(nil)),
}
}
// _gonum_org_v1_plot_DataRanger is an interface wrapper for DataRanger type
type _gonum_org_v1_plot_DataRanger struct {
WDataRange func() (xmin float64, xmax float64, ymin float64, ymax float64)
}
func (W _gonum_org_v1_plot_DataRanger) DataRange() (xmin float64, xmax float64, ymin float64, ymax float64) {
return W.WDataRange()
}
// _gonum_org_v1_plot_GlyphBoxer is an interface wrapper for GlyphBoxer type
type _gonum_org_v1_plot_GlyphBoxer struct {
WGlyphBoxes func(a0 *plot.Plot) []plot.GlyphBox
}
func (W _gonum_org_v1_plot_GlyphBoxer) GlyphBoxes(a0 *plot.Plot) []plot.GlyphBox {
return W.WGlyphBoxes(a0)
}
// _gonum_org_v1_plot_Normalizer is an interface wrapper for Normalizer type
type _gonum_org_v1_plot_Normalizer struct {
WNormalize func(min float64, max float64, x float64) float64
}
func (W _gonum_org_v1_plot_Normalizer) Normalize(min float64, max float64, x float64) float64 {
return W.WNormalize(min, max, x)
}
// _gonum_org_v1_plot_Plotter is an interface wrapper for Plotter type
type _gonum_org_v1_plot_Plotter struct {
WPlot func(a0 draw.Canvas, a1 *plot.Plot)
}
func (W _gonum_org_v1_plot_Plotter) Plot(a0 draw.Canvas, a1 *plot.Plot) { W.WPlot(a0, a1) }
// _gonum_org_v1_plot_Thumbnailer is an interface wrapper for Thumbnailer type
type _gonum_org_v1_plot_Thumbnailer struct {
WThumbnail func(c *draw.Canvas)
}
func (W _gonum_org_v1_plot_Thumbnailer) Thumbnail(c *draw.Canvas) { W.WThumbnail(c) }
// _gonum_org_v1_plot_Ticker is an interface wrapper for Ticker type
type _gonum_org_v1_plot_Ticker struct {
WTicks func(min float64, max float64) []plot.Tick
}
func (W _gonum_org_v1_plot_Ticker) Ticks(min float64, max float64) []plot.Tick {
return W.WTicks(min, max)
} | pkg/internal/runtime/symbols/go1_14_gonum.org_v1_plot.go | 0.630571 | 0.429609 | go1_14_gonum.org_v1_plot.go | starcoder |
package parser
import (
"github.com/mzrimsek/zip-lang/src/zip/ast"
"github.com/mzrimsek/zip-lang/src/zip/token"
)
func (p *Parser) parseStatement() ast.Statement {
switch p.curToken.Type {
case token.LET:
return p.parseLetStatement()
case token.RETURN:
return p.parseReturnStatement()
case token.IDENT:
switch p.peekToken.Type {
case token.ASSIGN:
return p.parseAssignStatement()
case token.ADD_ASSIGN:
return p.parseShortcutAssignStatement(token.ADD_ASSIGN)
case token.SUB_ASSIGN:
return p.parseShortcutAssignStatement(token.SUB_ASSIGN)
case token.MULT_ASSIGN:
return p.parseShortcutAssignStatement(token.MULT_ASSIGN)
case token.DIV_ASSIGN:
return p.parseShortcutAssignStatement(token.DIV_ASSIGN)
case token.MOD_ASSIGN:
return p.parseShortcutAssignStatement(token.MOD_ASSIGN)
case token.POW_ASSIGN:
return p.parseShortcutAssignStatement(token.POW_ASSIGN)
default:
return p.parseExpressionStatement()
}
default:
return p.parseExpressionStatement()
}
}
func (p *Parser) parseLetStatement() *ast.LetStatement {
stmt := &ast.LetStatement{Token: p.curToken}
if !p.expectPeek(token.IDENT) {
return nil
}
stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
if !p.expectPeek(token.ASSIGN) {
return nil
}
p.nextToken()
stmt.Value = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseReturnStatement() *ast.ReturnStatement {
stmt := &ast.ReturnStatement{Token: p.curToken}
p.nextToken()
stmt.ReturnValue = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseExpressionStatement() *ast.ExpressionStatement {
stmt := &ast.ExpressionStatement{Token: p.curToken}
stmt.Expression = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseBlockStatement() *ast.BlockStatement {
block := &ast.BlockStatement{Token: p.curToken, Statements: []ast.Statement{}}
p.nextToken()
for !p.curTokenIs(token.RBRACE) && !p.curTokenIs(token.EOF) {
stmt := p.parseStatement()
if stmt != nil {
block.Statements = append(block.Statements, stmt)
}
p.nextToken()
}
return block
}
func (p *Parser) parseAssignStatement() *ast.AssignStatement {
stmt := &ast.AssignStatement{Token: p.curToken}
stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
if !p.expectPeek("=") {
return nil
}
p.nextToken()
stmt.Value = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseShortcutAssignStatement(tokenType token.TokenType) *ast.ShortcutAssignStatement {
operator := string(tokenType)
stmt := &ast.ShortcutAssignStatement{Token: p.curToken}
stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
stmt.Operator = operator[0 : len(operator)-1]
if !p.expectPeek(tokenType) {
return nil
}
p.nextToken()
stmt.Value = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
} | src/zip/parser/statement_parsing.go | 0.526099 | 0.447581 | statement_parsing.go | starcoder |
package query
import (
"bytes"
"encoding/gob"
"log"
"strings"
)
// A ManyManyNode is an association node that links one table to another table with a many-to-many relationship.
// Some of the columns have overloaded meanings depending on SQL or NoSQL mode.
type ManyManyNode struct {
nodeAlias
nodeCondition
nodeLink
// Which database in the global list of databases does the node belong to
dbKey string
// NoSQL: The originating table. SQL: The association table
dbTable string
// NoSQL: The table storing the array of ids on the other end. SQL: the table in the association table pointing towards us.
dbColumn string
// Property in the original object used to ref to this object or node.
goPropName string
// NoSQL & SQL: The table we are joining to
refTable string
// NoSQL: table point backwards to us. SQL: Column in association table pointing forwards to refTable
refColumn string
// Are we expanding as an array, or one item at a time.
isArray bool
// Is this pointing to a type table item?
isTypeTable bool
}
// NewManyManyNode is used internally by the framework to return a new ManyMany node.
func NewManyManyNode(
dbKey string,
// NoSQL: The originating table. SQL: The association table
dbTable string,
// NoSQL: The table storing the array of ids on the other end. SQL: the table in the association table pointing towards us.
dbColumn string,
// Property in the original object used to ref to this object or node.
goName string,
// NoSQL & SQL: The table we are joining to
refTableName string,
// NoSQL: table point backwards to us. SQL: Column in association table pointing forwards to refTable
refColumn string,
// Are we pointing to a type table
isType bool,
) *ManyManyNode {
n := &ManyManyNode{
dbKey: dbKey,
dbTable: dbTable,
dbColumn: dbColumn,
goPropName: goName,
refTable: refTableName,
refColumn: refColumn,
isArray: true,
isTypeTable: isType,
}
return n
}
func (n *ManyManyNode) copy() NodeI {
ret := &ManyManyNode{
dbKey: n.dbKey,
dbTable: n.dbTable,
dbColumn: n.dbColumn,
goPropName: n.goPropName,
refTable: n.refTable,
refColumn: n.refColumn,
isArray: n.isArray,
isTypeTable: n.isTypeTable,
nodeAlias: nodeAlias{n.alias},
nodeCondition: nodeCondition{n.condition}, // shouldn't need to duplicate condition
}
return ret
}
func (n *ManyManyNode) nodeType() NodeType {
return ManyManyNodeType
}
// Expand tells this node to create multiple original objects with a single link for each joined item, rather than to create one original with an array of joined items
func (n *ManyManyNode) Expand() {
n.isArray = false
}
// isExpanded reports whether this node is creating a new object for each joined item (true), or creating an array of
// joined items (false).
func (n *ManyManyNode) isExpanded() bool {
return !n.isArray
}
func (n *ManyManyNode) isExpander() bool {
return true
}
// Equals is used internally by the framework to test if the node is the same as another node.
func (n *ManyManyNode) Equals(n2 NodeI) bool {
if tn, ok := n2.(TableNodeI); !ok {
return false
} else if cn, ok2 := tn.EmbeddedNode_().(*ManyManyNode); !ok2 {
return false
} else {
return cn.dbTable == n.dbTable &&
cn.goPropName == n.goPropName &&
(cn.alias == "" || n.alias == "" || cn.alias == n.alias)
}
}
func (n *ManyManyNode) tableName() string {
return n.refTable
}
func (n *ManyManyNode) databaseKey() string {
return n.dbKey
}
func (n *ManyManyNode) log(level int) {
tabs := strings.Repeat("\t", level)
log.Print(tabs + "MM: " + n.dbTable + "." + n.dbColumn + "." + n.refTable + "." + n.refColumn + " AS " + n.GetAlias())
}
// Return the name as a captialized object name
func (n *ManyManyNode) goName() string {
return n.goPropName
}
type manyManyNodeEncoded struct {
Alias string
Condition NodeI
Parent NodeI
DbKey string
DbTable string
DbColumn string
GoPropName string
RefTable string
RefColumn string
IsArray bool
IsTypeTable bool
}
func (n *ManyManyNode) GobEncode() (data []byte, err error) {
var buf bytes.Buffer
e := gob.NewEncoder(&buf)
s := manyManyNodeEncoded{
Alias: n.alias,
Condition: n.condition,
Parent: n.parentNode,
DbKey: n.dbKey,
DbTable: n.dbTable,
DbColumn: n.dbColumn,
GoPropName: n.goPropName,
RefTable: n.refTable,
RefColumn: n.refColumn,
IsArray: n.isArray,
IsTypeTable: n.isTypeTable,
}
if err = e.Encode(s); err != nil {
panic(err)
}
data = buf.Bytes()
return
}
func (n *ManyManyNode) GobDecode(data []byte) (err error) {
buf := bytes.NewBuffer(data)
dec := gob.NewDecoder(buf)
var s manyManyNodeEncoded
if err = dec.Decode(&s); err != nil {
panic(err)
}
n.alias = s.Alias
n.condition = s.Condition
n.dbKey = s.DbKey
n.dbTable = s.DbTable
n.dbColumn = s.DbColumn
n.goPropName = s.GoPropName
n.refTable = s.RefTable
n.refColumn = s.RefColumn
n.isArray = s.IsArray
n.isTypeTable = s.IsTypeTable
SetParentNode(n, s.Parent)
return
}
func init() {
gob.Register(&ManyManyNode{})
}
// ManyManyNodeIsArray is used internally by the framework to return whether the node creates an array, or just a link to a single item.
func ManyManyNodeIsArray(n *ManyManyNode) bool {
return n.isArray
}
// ManyManyNodeIsTypeTable is used internally by the framework to return whether the node points to a type table
func ManyManyNodeIsTypeTable(n *ManyManyNode) bool {
return n.isTypeTable
}
// ManyManyNodeRefTable is used internally by the framework to return the table name on the other end of the link
func ManyManyNodeRefTable(n *ManyManyNode) string {
return n.refTable
}
// ManyManyNodeRefColumn is used internally by the framework to return the column name on the other end of the link
func ManyManyNodeRefColumn(n *ManyManyNode) string {
return n.refColumn
}
// ManyManyNodeDbTable is used internally by the framework to return the table name of the table the node belongs to
func ManyManyNodeDbTable(n *ManyManyNode) string {
return n.dbTable
}
// ManyManyNodeDbColumn is used internally by the framework to return the column name in the table the node belongs to
func ManyManyNodeDbColumn(n *ManyManyNode) string {
return n.dbColumn
} | pkg/orm/query/manyManyNode.go | 0.717309 | 0.498657 | manyManyNode.go | starcoder |
package geojson
import (
"encoding/json"
"github.com/ctessum/geom"
)
func decodeCoordinates(jsonCoordinates interface{}) []float64 {
array, ok := jsonCoordinates.([]interface{})
if !ok {
panic(&InvalidGeometryError{})
}
coordinates := make([]float64, len(array))
for i, element := range array {
var ok bool
if coordinates[i], ok = element.(float64); !ok {
panic(&InvalidGeometryError{})
}
}
return coordinates
}
func decodeCoordinates2(jsonCoordinates interface{}) [][]float64 {
array, ok := jsonCoordinates.([]interface{})
if !ok {
panic(&InvalidGeometryError{})
}
coordinates := make([][]float64, len(array))
for i, element := range array {
coordinates[i] = decodeCoordinates(element)
}
return coordinates
}
func decodeCoordinates3(jsonCoordinates interface{}) [][][]float64 {
array, ok := jsonCoordinates.([]interface{})
if !ok {
panic(&InvalidGeometryError{})
}
coordinates := make([][][]float64, len(array))
for i, element := range array {
coordinates[i] = decodeCoordinates2(element)
}
return coordinates
}
func decodeCoordinates4(jsonCoordinates interface{}) [][][][]float64 {
array, ok := jsonCoordinates.([]interface{})
if !ok {
panic(&InvalidGeometryError{})
}
coordinates := make([][][][]float64, len(array))
for i, element := range array {
coordinates[i] = decodeCoordinates3(element)
}
return coordinates
}
func makeLinearRing(coordinates [][]float64) geom.Path {
points := make(geom.Path, len(coordinates))
for i, element := range coordinates {
if len(element) == 2 {
points[i].X = element[0]
points[i].Y = element[1]
} else {
panic(&InvalidGeometryError{})
}
}
return points
}
func makeLinearRings(coordinates [][][]float64) []geom.Path {
pointss := make([]geom.Path, len(coordinates))
for i, element := range coordinates {
pointss[i] = makeLinearRing(element)
}
return pointss
}
func doFromGeoJSON(g *Geometry) geom.Geom {
switch g.Type {
case "Point":
coordinates := decodeCoordinates(g.Coordinates)
switch len(coordinates) {
case 2:
return geom.Point{coordinates[0], coordinates[1]}
default:
panic(&InvalidGeometryError{})
}
case "MultiPoint":
coordinates := decodeCoordinates2(g.Coordinates)
if len(coordinates) == 0 {
panic(&InvalidGeometryError{})
}
switch len(coordinates[0]) {
case 2:
return geom.MultiPoint(makeLinearRing(coordinates))
default:
panic(&InvalidGeometryError{})
}
case "LineString":
coordinates := decodeCoordinates2(g.Coordinates)
if len(coordinates) == 0 {
panic(&InvalidGeometryError{})
}
switch len(coordinates[0]) {
case 2:
return geom.LineString(makeLinearRing(coordinates))
default:
panic(&InvalidGeometryError{})
}
case "MultiLineString":
coordinates := decodeCoordinates3(g.Coordinates)
if len(coordinates) == 0 || len(coordinates[0]) == 0 {
panic(&InvalidGeometryError{})
}
switch len(coordinates[0][0]) {
case 2:
multiLineString := make(geom.MultiLineString, len(coordinates))
for i, coord := range coordinates {
multiLineString[i] = geom.LineString(makeLinearRing(coord))
}
return multiLineString
default:
panic(&InvalidGeometryError{})
}
case "Polygon":
coordinates := decodeCoordinates3(g.Coordinates)
if len(coordinates) == 0 || len(coordinates[0]) == 0 {
panic(&InvalidGeometryError{})
}
switch len(coordinates[0][0]) {
case 2:
return geom.Polygon(makeLinearRings(coordinates))
default:
panic(&InvalidGeometryError{})
}
case "MultiPolygon":
coordinates := decodeCoordinates4(g.Coordinates)
if len(coordinates) == 0 || len(coordinates[0]) == 0 || len(coordinates[0][0]) == 0 {
panic(&InvalidGeometryError{})
}
switch len(coordinates[0][0][0]) {
case 2:
multiPolygon := make(geom.MultiPolygon, len(coordinates))
for i, coord := range coordinates {
multiPolygon[i] = makeLinearRings(coord)
}
return multiPolygon
default:
panic(&InvalidGeometryError{})
}
default:
panic(&UnsupportedGeometryError{g.Type})
}
}
func FromGeoJSON(geom *Geometry) (g geom.Geom, err error) {
defer func() {
if e := recover(); e != nil {
g = nil
err = e.(error)
}
}()
return doFromGeoJSON(geom), nil
}
func Decode(data []byte) (geom.Geom, error) {
var geom Geometry
if err := json.Unmarshal(data, &geom); err == nil {
return FromGeoJSON(&geom)
} else {
return nil, err
}
} | encoding/geojson/decode.go | 0.589362 | 0.464841 | decode.go | starcoder |
package analyze
import (
"github.com/Rhymond/go-money"
"github.com/ed-fx/go-soft4fx/internal/simulator"
"github.com/pkg/errors"
"math"
"strconv"
"time"
)
type Day struct {
day time.Weekday
NoOfTrades int
NoOfProfitTrades int
ProfitTradesInPips float64
ProfitTradesInMoney *money.Money
NoOfLossTrades int
LossTradesInPips float64
LossTradesInMoney *money.Money
totalTradesDuration time.Duration
simulatorGainPct float64
netProfit *money.Money
profitInPipsPct float64
lossInPipsPct float64
netGainInMoneyPct float64
}
type Weekday struct {
monday *Day
tuesday *Day
wednesday *Day
thursday *Day
friday *Day
}
func (w Weekday) Monday() *Day {
return w.monday
}
func (w Weekday) Tuesday() *Day {
return w.tuesday
}
func (w Weekday) Wednesday() *Day {
return w.wednesday
}
func (w Weekday) Thursday() *Day {
return w.thursday
}
func (w Weekday) Friday() *Day {
return w.friday
}
func (w *Weekday) getByDayOfWeek(weekday time.Weekday) *Day {
switch weekday {
case time.Monday:
return w.monday
case time.Tuesday:
return w.tuesday
case time.Wednesday:
return w.wednesday
case time.Thursday:
return w.thursday
case time.Friday:
return w.friday
}
return nil
}
func (w Weekday) Days() []*Day {
return []*Day{
w.monday,
w.tuesday,
w.wednesday,
w.thursday,
w.friday,
}
}
func (w *Weekday) append(order *simulator.Order) (err error) {
openTime := order.OpenTime
day := w.getByDayOfWeek(openTime.Weekday())
if day == nil {
err = errors.New("Invalid day for order [" + strconv.Itoa(order.Id) + "] Open time [" + openTime.String() + "]")
} else {
err = day.append(order)
}
return
}
func analyzeByWeekday(sim *simulator.Simulator) (weekday *Weekday, err error) {
weekday = &Weekday{
monday: newDay(time.Monday),
tuesday: newDay(time.Tuesday),
wednesday: newDay(time.Wednesday),
thursday: newDay(time.Thursday),
friday: newDay(time.Friday),
}
for _, order := range sim.ClosedOrders {
if order.Type == simulator.Balance {
continue
}
if err = weekday.append(order); err != nil {
return
}
}
netProfit := sim.Details.TotalNetProfit
for _, day := range weekday.Days() {
day.postConstruct(sim.ProfitInPips(), sim.LossInPips(), netProfit)
}
return
}
func newDay(day time.Weekday) *Day {
return &Day{
day: day,
ProfitTradesInPips: 0,
LossTradesInPips: 0,
simulatorGainPct: 0,
profitInPipsPct: 0,
lossInPipsPct: 0,
netGainInMoneyPct: 0,
}
}
func (d Day) Day() time.Weekday {
return d.day
}
func (d Day) WinPct() float64 {
if d.NoOfTrades == 0 {
return 0
}
return math.Round(float64(d.NoOfProfitTrades)/float64(d.NoOfTrades)*100_00) / 100
}
func (d *Day) append(o *simulator.Order) (err error) {
d.NoOfTrades++
if o.IsWin() {
d.NoOfProfitTrades++
d.ProfitTradesInPips += o.ProfitPips()
if d.ProfitTradesInMoney == nil {
d.ProfitTradesInMoney = o.Profit
} else {
newProfit, _ := d.ProfitTradesInMoney.Add(o.Profit)
d.ProfitTradesInMoney = newProfit
}
} else if o.IsLoss() {
d.NoOfLossTrades++
d.LossTradesInPips += o.ProfitPips()
if d.LossTradesInMoney == nil {
d.LossTradesInMoney = o.Profit
} else {
newLoss, _ := d.LossTradesInMoney.Add(o.Profit)
d.LossTradesInMoney = newLoss
}
}
d.totalTradesDuration += o.Duration()
return
}
func (d *Day) postConstruct(simProfitInPips float64, simLossProfitInPips float64, netProfit *money.Money) {
currencyCode := netProfit.Currency().Code
d.ProfitTradesInMoney = initWithZeroMoney(d.ProfitTradesInMoney, currencyCode)
d.LossTradesInMoney = initWithZeroMoney(d.LossTradesInMoney, currencyCode)
d.netProfit = money.New(d.ProfitTradesInMoney.Amount()+d.LossTradesInMoney.Amount(), currencyCode)
d.profitInPipsPct = pct(d.ProfitTradesInPips / simProfitInPips)
if simLossProfitInPips != 0 {
d.lossInPipsPct = pct(d.LossTradesInPips / simLossProfitInPips)
}
d.netGainInMoneyPct = pctInt64(d.netProfit.Amount(), netProfit.Amount())
}
func initWithZeroMoney(m *money.Money, currencyCode string) *money.Money {
if m != nil {
return m
}
return money.New(0, currencyCode)
}
func (d Day) ProfitInPipsPct() float64 {
return d.profitInPipsPct
}
func (d Day) LossInPipsPct() float64 {
return d.lossInPipsPct
}
func (d Day) NetProfitInMoney() *money.Money {
return d.netProfit
}
func (d Day) NetGainInMoneyPct() float64 {
return d.netGainInMoneyPct
} | internal/simulator/analyze/weekday.go | 0.625438 | 0.501221 | weekday.go | starcoder |
package mathval
import (
"errors"
"io"
"math/big"
)
// Parser is a parser including a Scanner and a buffer
type Parser struct {
s *Scanner
buf struct {
tok Token // last read token
lit string // last read literal
n int // buffer size. Currently max=1 as no lookahead
}
}
// NewParser returns a new instance of Parser with the defined lookahead length
func NewParser(r io.Reader) *Parser {
return &Parser{s: NewScanner(r)}
}
// scan returns the next token from the underlying scanner
func (p *Parser) scan() (tok Token, lit string) {
// If we have a token on the buffer, then return it.
if p.buf.n != 0 {
p.buf.n = 0
return p.buf.tok, p.buf.lit
}
// Otherwise read the next token from the scanner.
tok, lit = p.s.Scan()
// Save it to the buffer in case we unscan later.
p.buf.tok, p.buf.lit = tok, lit
return
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() { p.buf.n = 1 }
// peek returns the next token in the scanner. Whitespace is ignored
func (p *Parser) peek() (Token, string) {
if p.buf.n == 0 {
p.buf.tok, p.buf.lit = p.s.Scan()
p.buf.n = 1
}
// Ignore whitespace
if p.buf.tok == WS {
p.buf.tok, p.buf.lit = p.s.Scan()
}
return p.buf.tok, p.buf.lit
}
// scanIgnoreWhitespace scans the next non-whitespace token
func (p *Parser) scanIgnoreWhitespace() (tok Token, lit string) {
tok, lit = p.scan()
if tok == WS {
tok, lit = p.scan()
}
return
}
// Parse parses the output from the Scanner
func (p *Parser) Parse() (*Expression, error) {
return p.parseExpression()
}
// parseExpression recursively parses an Expression starting at the next Token
func (p *Parser) parseExpression() (exp *Expression, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
exp = &Expression{}
exp.factor, err = p.parseFactor()
if err != nil {
return
}
// Check for an additive operator
if tok, _ := p.peek(); tok >= additive_begin && tok <= additive_end {
exp.op, err = p.parseAddOp()
if err != nil {
return
}
exp.expression, err = p.parseExpression()
}
return
}
// parseFactor recursively parses a Factor starting at the next Token
func (p *Parser) parseFactor() (fac *Factor, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
fac = &Factor{}
fac.power, err = p.parsePower()
if err != nil {
return
}
// Check for a multiplicative operator
if tok, _ := p.peek(); tok >= multiplicative_begin && tok <= multiplicative_end {
fac.op, err = p.parseMultiplyOp()
if err != nil {
return
}
fac.factor, err = p.parseFactor()
}
return
}
// parsePower recursively parses a Power starting at the next Token
func (p *Parser) parsePower() (pow *Power, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
pow = &Power{}
pow.term, err = p.parseTerm()
if err != nil {
return
}
// Check for an exponentiation operator
if tok, _ := p.peek(); tok >= exponentiation_begin && tok <= exponentiation_end {
pow.op, err = p.parseExponentOp()
if err != nil {
return
}
pow.power, err = p.parsePower()
}
return
}
// parseTerm recursively parses a Term starting at the next Token
func (p *Parser) parseTerm() (term *Term, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
term = &Term{}
// '(' EXPRESSION ')'
if tok, _ := p.peek(); tok == LPAREN {
p.scanIgnoreWhitespace()
term.exp, err = p.parseExpression()
if tok, _ = p.peek(); tok != RPAREN {
return term, errors.New("Expected RPAREN")
}
p.scanIgnoreWhitespace()
} else if tok == DIGITS {
term.number, err = p.parseNumber()
} else {
// TODO add helper in tokens.go to convert token values to names for errors
return nil, errors.New("Unexpected TOKEN")
}
return
}
// parseNumber parses the number represented by the next Token
func (p *Parser) parseNumber() (num *Number, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
num = &Number{}
if tok, _ := p.peek(); tok != DIGITS {
return nil, errors.New("Expected decimal or floating digits")
}
_, integral := p.scanIgnoreWhitespace()
fractional := ""
if tok, _ := p.peek(); tok == DOT {
p.scanIgnoreWhitespace()
if tok, _ = p.peek(); tok != DIGITS {
return nil, errors.New("Expected fractional digits")
}
_, fractional = p.scanIgnoreWhitespace()
}
num.str = integral
if fractional != "" {
num.str += "." + fractional
}
num.val = new(big.Rat)
if _, ok := num.val.SetString(num.str); !ok {
return num, errors.New("Error parsing value")
}
return
}
// parseAddOp recursively parses an AddOp starting at the next Token
func (p *Parser) parseAddOp() (add *AddOp, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
add = &AddOp{}
if tok, _ := p.peek(); tok < additive_begin || tok > additive_end {
return nil, errors.New("Expected additive operator")
}
add.op, _ = p.scanIgnoreWhitespace()
return
}
// parseMultiplyOp recursively parses a MultiplyOp starting at the next Token
func (p *Parser) parseMultiplyOp() (mul *MultiplyOp, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
mul = &MultiplyOp{}
if tok, _ := p.peek(); tok < multiplicative_begin || tok > multiplicative_end {
return nil, errors.New("Expected multiplicative operator")
}
mul.op, _ = p.scanIgnoreWhitespace()
return
}
// parseExponentOp recursively parses an ExponentOp starting at the next Token
func (p *Parser) parseExponentOp() (exp *ExponentOp, err error) {
if tok, _ := p.peek(); tok == EOF {
return nil, errors.New("Unexpected EOF")
}
exp = &ExponentOp{}
if tok, _ := p.peek(); tok < exponentiation_begin || tok > exponentiation_end {
return nil, errors.New("Expected exponentiation operator")
}
exp.op, _ = p.scanIgnoreWhitespace()
return
} | parser.go | 0.649023 | 0.456531 | parser.go | starcoder |
package wordvectors
import (
"bufio"
"encoding/gob"
"io"
"os"
"strconv"
"strings"
"sync"
log "github.com/sirupsen/logrus"
)
var mutex = &sync.RWMutex{}
const WordVectorsFile = "wv.gob"
// Config contains configuration for fasttext word vectors
type Config struct {
// WordVectorsFile is the path to the word vectors file
WordVectorsFile string `mapstructure:"file_name"`
// Truncate is a number between 0 and 1, which represents how many
// words will be used from the word vector
Truncate float32 `mapstructure:"truncate"`
// SkipOOV makes the out-of-vocabulary words to be omitted
SkipOOV bool `mapstructure:"skip_oov"`
}
// VectorMap contains a map of words and their vector, as well as the vector size
type VectorMap struct {
Map map[string][]float64
vectorSize int
skipOOV bool
}
// serializableVectorMap contains a VectorMap that can be serialized by gob
type serializableVectorMap struct {
Map map[string][]float64
VectorSize int
SkipOOV bool
}
// SaveToFile saves a serializableVectorMap to the wordVectorsFile
func (m *VectorMap) SaveToFile(name string) error {
file, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer file.Close()
enc := gob.NewEncoder(file)
return enc.Encode(&serializableVectorMap{m.Map, m.vectorSize, m.skipOOV})
}
func NewVectorMapFromFile(name string) (*VectorMap, error) {
file, err := os.Open(name)
if err != nil {
return nil, err
}
defer file.Close()
dec := gob.NewDecoder(file)
vectorMap := new(serializableVectorMap)
err = dec.Decode(vectorMap)
return &VectorMap{vectorMap.Map, vectorMap.VectorSize, vectorMap.SkipOOV}, err
}
// Vector returns the vector for a certain word
// If the word is not found, a vector of zeros is returned
func (m *VectorMap) Vector(word string) (vector []float64, inVocabulary bool) {
mutex.Lock()
vector, inVocabulary = m.Map[word]
mutex.Unlock()
if inVocabulary {
return
}
vector = make([]float64, m.vectorSize)
return
}
// Vectors returns the vector for a slice of wordsd
func (m *VectorMap) Vectors(words []string) [][]float64 {
vecs := make([][]float64, 0, m.vectorSize)
for _, word := range words {
emb, voc := m.Vector(word)
if m.skipOOV && !voc {
continue
}
vecs = append(vecs, emb)
}
return vecs
}
// SumVectors sums an array of vector
func (m *VectorMap) SumVectors(vecs [][]float64) []float64 {
sum := make([]float64, m.vectorSize)
for _, vec := range vecs {
for j, val := range vec {
sum[j] += val
}
}
return sum
}
// AverageVectors averages an array of vector
func (m *VectorMap) AverageVectors(vecs [][]float64) []float64 {
sum := m.SumVectors(vecs)
for i := range vecs {
sum[i] /= float64(len(vecs))
}
return sum
}
// PadVectors pads an vector array to a length with zero vectors
func (m *VectorMap) PadVectors(vecs [][]float64, length int) [][]float64 {
numVecs, vecSize := len(vecs), m.vectorSize
switch {
case numVecs < length:
fill := make([][]float64, length-numVecs)
for i := 0; i < len(fill); i++ {
fill[i] = make([]float64, vecSize)
}
vecs = append(vecs, fill...)
case numVecs > length:
vecs = vecs[:length]
}
return vecs
}
// FlattenVectors converts an array of vector into an array
// of vector, one after the other
func (m *VectorMap) FlattenVectors(vecs [][]float64) []float64 {
flat := make([]float64, 0)
for _, vec := range vecs {
flat = append(flat, vec...)
}
return flat
}
func stringSliceToFloat64Slice(ar []string) []float64 {
newar := make([]float64, len(ar))
var v string
var i int
for i, v = range ar {
f64, _ := strconv.ParseFloat(v, 64)
newar[i] = f64
}
return newar
}
// NewVectorMap loads word vector from a file, up to
// trunc percentage of words and returns a new VectorMap
func NewVectorMap(config *Config) (*VectorMap, error) {
file, err := os.Open(config.WordVectorsFile)
if err != nil {
return nil, err
}
defer file.Close()
var numWords int
var vecSize int
vMap := make(map[string][]float64)
reader := bufio.NewReader(file)
num := 0
for {
line, err := reader.ReadString('\n')
if err == io.EOF {
break
}
line = strings.TrimSuffix(line, "\n")
if num == 0 {
lineVals := strings.SplitN(line, " ", 2)
numWordsStr, vecSizeStr := lineVals[0], lineVals[1]
numWords, _ = strconv.Atoi(numWordsStr)
vecSize, _ = strconv.Atoi(vecSizeStr)
log.Debugf("Vector file dimensions: %d, %d", numWords, vecSize)
} else if num <= int(float32(numWords)*config.Truncate) {
lineVals := strings.SplitN(line, " ", vecSize+1)
word, vector := lineVals[0], stringSliceToFloat64Slice(lineVals[1:])
vMap[word] = vector
} else {
break
}
num++
}
vectorMap := &VectorMap{vMap, vecSize, config.SkipOOV}
log.Debugf("Vector map length: %d", len(vectorMap.Map))
return vectorMap, nil
} | internal/clf/wordvectors/wordvectors.go | 0.692226 | 0.516047 | wordvectors.go | starcoder |
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"contact": {},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/ping": {
"get": {
"description": "Ping",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"system"
],
"summary": "Ping",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/health.PingResponse"
}
}
}
}
},
"/prices": {
"get": {
"description": "Latest Prices",
"tags": [
"prices"
],
"summary": "Latest Prices",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/pricing.LatestPrices"
}
}
}
}
}
},
"/prices/config": {
"get": {
"description": "price config",
"tags": [
"prices"
],
"summary": "Price config",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/pricing.Config"
}
}
}
}
},
"post": {
"description": "update price config",
"tags": [
"prices"
],
"summary": "update price config",
"parameters": [
{
"description": "config object",
"name": "config",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/pricing.Config"
}
}
],
"responses": {
"202": {
"description": ""
}
}
}
},
"/proposals": {
"get": {
"description": "List proposals",
"consumes": [
"application/json"
],
"tags": [
"proposals"
],
"summary": "List proposals",
"parameters": [
{
"type": "string",
"description": "Consumer country",
"name": "from",
"in": "query"
},
{
"type": "string",
"description": "Provider ID",
"name": "provider_id",
"in": "query"
},
{
"type": "string",
"description": "Service type",
"name": "service_type",
"in": "query"
},
{
"type": "string",
"description": "Provider country",
"name": "location_country",
"in": "query"
},
{
"type": "string",
"description": "IP type (residential, datacenter, etc.)",
"name": "ip_type",
"in": "query"
},
{
"type": "string",
"description": "Access policy. When empty, returns only public proposals (default). Use 'all' to return all.",
"name": "access_policy",
"in": "query"
},
{
"type": "string",
"description": "Access policy source",
"name": "access_policy_source",
"in": "query"
},
{
"type": "number",
"description": "Minimum compatibility. When empty, will not filter by it.",
"name": "compatibility_min",
"in": "query"
},
{
"type": "number",
"description": "Maximum compatibility. When empty, will not filter by it.",
"name": "compatibility_max",
"in": "query"
},
{
"type": "number",
"description": "Minimal quality threshold. When empty will be defaulted to 0. Quality ranges from [0.0; 3.0]",
"name": "quality_min",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/v3.Proposal"
}
}
}
}
}
},
"/proposals-metadata": {
"get": {
"description": "List proposals' metadata",
"consumes": [
"application/json"
],
"summary": "List proposals' metadata.",
"parameters": [
{
"type": "string",
"description": "Provider ID",
"name": "provider_id",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/v3.Metadata"
}
}
}
}
}
},
"/status": {
"get": {
"description": "Status",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"system"
],
"summary": "Status",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/health.StatusResponse"
}
}
}
}
}
},
"definitions": {
"health.PingResponse": {
"type": "object",
"properties": {
"message": {
"type": "string"
}
}
},
"health.StatusResponse": {
"type": "object",
"properties": {
"cache_ok": {
"type": "boolean"
},
"db_ok": {
"type": "boolean"
}
}
},
"pricing.Config": {
"type": "object",
"properties": {
"base_prices": {
"$ref": "#/definitions/pricing.PriceByTypeUSD"
},
"country_modifiers": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/pricing.Modifier"
}
}
}
},
"pricing.LatestPrices": {
"type": "object",
"properties": {
"current_valid_until": {
"type": "string"
},
"defaults": {
"$ref": "#/definitions/pricing.PriceHistory"
},
"per_country": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/pricing.PriceHistory"
}
},
"previous_valid_until": {
"type": "string"
}
}
},
"pricing.Modifier": {
"type": "object",
"properties": {
"other": {
"type": "number"
},
"residential": {
"type": "number"
}
}
},
"pricing.Price": {
"type": "object",
"properties": {
"price_per_gib": {
"type": "integer"
},
"price_per_gib_human_readable": {
"type": "number"
},
"price_per_hour": {
"type": "integer"
},
"price_per_hour_human_readable": {
"type": "number"
}
}
},
"pricing.PriceByType": {
"type": "object",
"properties": {
"other": {
"$ref": "#/definitions/pricing.Price"
},
"residential": {
"$ref": "#/definitions/pricing.Price"
}
}
},
"pricing.PriceByTypeUSD": {
"type": "object",
"properties": {
"other": {
"$ref": "#/definitions/pricing.PriceUSD"
},
"residential": {
"$ref": "#/definitions/pricing.PriceUSD"
}
}
},
"pricing.PriceHistory": {
"type": "object",
"properties": {
"current": {
"$ref": "#/definitions/pricing.PriceByType"
},
"previous": {
"$ref": "#/definitions/pricing.PriceByType"
}
}
},
"pricing.PriceUSD": {
"type": "object",
"properties": {
"price_per_gib_usd": {
"type": "number"
},
"price_per_hour_usd": {
"type": "number"
}
}
},
"v3.AccessPolicy": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"source": {
"type": "string"
}
}
},
"v3.Contact": {
"type": "object",
"properties": {
"definition": {
"type": "object"
},
"type": {
"type": "string"
}
}
},
"v3.Location": {
"type": "object",
"properties": {
"asn": {
"type": "integer"
},
"city": {
"type": "string"
},
"continent": {
"type": "string"
},
"country": {
"type": "string"
},
"ip_type": {
"type": "string"
},
"isp": {
"type": "string"
}
}
},
"v3.Metadata": {
"type": "object",
"properties": {
"country": {
"type": "string"
},
"ip_type": {
"type": "string"
},
"isp": {
"type": "string"
},
"provider_id": {
"type": "string"
},
"service_type": {
"type": "string"
},
"updated_at": {
"type": "string"
},
"whitelist": {
"type": "boolean"
}
}
},
"v3.Proposal": {
"type": "object",
"properties": {
"access_policies": {
"type": "array",
"items": {
"$ref": "#/definitions/v3.AccessPolicy"
}
},
"compatibility": {
"type": "integer"
},
"contacts": {
"type": "array",
"items": {
"$ref": "#/definitions/v3.Contact"
}
},
"format": {
"type": "string"
},
"id": {
"type": "integer"
},
"location": {
"$ref": "#/definitions/v3.Location"
},
"provider_id": {
"type": "string"
},
"quality": {
"$ref": "#/definitions/v3.Quality"
},
"service_type": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"v3.Quality": {
"type": "object",
"properties": {
"bandwidth": {
"description": "Bandwidth in Mbps.",
"type": "number"
},
"latency": {
"description": "Latency in ms.",
"type": "number"
},
"quality": {
"description": "Quality valuation from the oracle.",
"type": "number"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "3.0",
Host: "",
BasePath: "/api/v3",
Schemes: []string{},
Title: "Discovery API",
Description: "Discovery API for Mysterium Network",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
} | docs/docs.go | 0.523664 | 0.426441 | docs.go | starcoder |
// Package image implements functions to validate that the fields of image entities being passed
// into the API meet our requirements.
package image
import (
"errors"
"fmt"
ipb "github.com/grafeas/grafeas/proto/v1/image_go_proto"
)
// ValidateBasis validates that an image basis has all its required fields filled in.
func ValidateBasis(b *ipb.Basis) []error {
errs := []error{}
if b.GetResourceUrl() == "" {
errs = append(errs, errors.New("resource_url is required"))
}
if f := b.GetFingerprint(); f == nil {
errs = append(errs, errors.New("fingerprint is required"))
} else {
for _, err := range validateFingerprint(f) {
errs = append(errs, fmt.Errorf("fingerprint.%s", err))
}
}
return errs
}
func validateFingerprint(f *ipb.Fingerprint) []error {
errs := []error{}
if f.GetV1Name() == "" {
errs = append(errs, errors.New("v1_name is required"))
}
if blob := f.GetV2Blob(); blob == nil {
errs = append(errs, errors.New("v2_blob is required"))
} else if len(blob) == 0 {
errs = append(errs, errors.New("v2_blob requires at least 1 element"))
} else {
for i, b := range blob {
if b == "" {
errs = append(errs, fmt.Errorf("v2_blob[%d] cannot be empty", i))
}
}
}
return errs
}
// ValidateDetails validates that a details has all its required fields filled in.
func ValidateDetails(d *ipb.Details) []error {
errs := []error{}
if d := d.GetDerivedImage(); d == nil {
errs = append(errs, errors.New("derived_image is required"))
} else {
for _, err := range validateDerived(d) {
errs = append(errs, fmt.Errorf("derived_image.%s", err))
}
}
return errs
}
func validateDerived(d *ipb.Derived) []error {
errs := []error{}
if f := d.GetFingerprint(); f == nil {
errs = append(errs, errors.New("fingerprint is required"))
} else {
for _, err := range validateFingerprint(f) {
errs = append(errs, fmt.Errorf("fingerprint.%s", err))
}
}
for i, l := range d.GetLayerInfo() {
if l == nil {
errs = append(errs, fmt.Errorf("layer_info[%d] layer cannot be null", i))
} else {
for _, err := range validateLayer(l) {
errs = append(errs, fmt.Errorf("layer_info[%d].%s", i, err))
}
}
}
return errs
}
func validateLayer(l *ipb.Layer) []error {
errs := []error{}
if l.GetDirective() == ipb.Layer_DIRECTIVE_UNSPECIFIED {
errs = append(errs, errors.New("directive is required"))
}
return errs
} | go/v1/api/validators/image/image.go | 0.627837 | 0.404272 | image.go | starcoder |
package main
import (
"math"
"strconv"
"github.com/TomasCruz/projecteuler"
)
/*
Problem 37; Truncatable primes
The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits
from left to right, and remain prime at each stage: 3797, 797, 97, and 7.
Similarly we can work from right to left: 3797, 379, 37, and 3.
Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
*/
func main() {
// I'm cheating here, as I know the solution and limit to truncatable primes (<10^8).
// So, I'm finding right truncatables, then left truncatable not greater than biggest right truncatable,
// and sum numbers with both properties
projecteuler.Timed(calc)
}
func calc(args ...interface{}) (result string, err error) {
limit := 100000
primes, primeSet := projecteuler.PrimeSet(limit)
rtp := rightTruncatablePrimes(primes, primeSet)
biggest := biggestInSet(rtp)
ltp := leftTruncatablePrimes(primes, primeSet, biggest)
sum := 0
for x := range rtp {
if _, ok := ltp[x]; ok {
sum += x
}
}
sum -= 17 // deduce one digits results as per requirement
result = strconv.Itoa(sum)
return
}
func rightTruncatablePrimes(primes []int, primeSet map[int]struct{}) (rtp map[int]struct{}) {
// first digit must be prime, hence in {2,3,5,7}
// other digits must not be even or 5 because of divisibility with 2 and 5, so other digits must be in {1,3,7,9}
// right truncatables with x+1 digits must, when right truncated, be one of x digit right truncatables
rtp = make(map[int]struct{})
rtp[2] = struct{}{}
rtp[3] = struct{}{}
rtp[5] = struct{}{}
rtp[7] = struct{}{}
lastBatch := []int{2, 3, 5, 7}
possibleDigits := []int{1, 3, 7, 9}
for len(lastBatch) > 0 {
numAdded := 0
var nextBatch []int
for i := 0; i < len(lastBatch); i++ {
base := 10 * lastBatch[i]
for j := 0; j < len(possibleDigits); j++ {
curr := base + possibleDigits[j]
if isPrime(curr, primes, primeSet) {
rtp[curr] = struct{}{}
nextBatch = append(nextBatch, curr)
numAdded++
}
}
}
lastBatch = make([]int, numAdded)
copy(lastBatch, nextBatch)
}
return
}
func leftTruncatablePrimes(primes []int, primeSet map[int]struct{}, biggest int) (ltp map[int]struct{}) {
// last digit must be prime, hence in {2,3,5,7}
// other digits must not be 0
// left truncatables with x+1 digits must, when left truncated, be one of x digit left truncatables
ltp = make(map[int]struct{})
ltp[2] = struct{}{}
ltp[3] = struct{}{}
ltp[5] = struct{}{}
ltp[7] = struct{}{}
lastPowerTen := 1
lastBatch := []int{2, 3, 5, 7}
limitExceded := false
for !limitExceded {
numAdded := 0
lastPowerTen *= 10
var nextBatch []int
for i := 0; i < len(lastBatch); i++ {
for j := 1; j < 10; j++ {
curr := lastBatch[i] + j*lastPowerTen
if curr > biggest {
limitExceded = true
}
if isPrime(curr, primes, primeSet) {
ltp[curr] = struct{}{}
nextBatch = append(nextBatch, curr)
numAdded++
}
}
}
lastBatch = make([]int, numAdded)
copy(lastBatch, nextBatch)
}
return
}
func biggestInSet(numSet map[int]struct{}) int {
ret := math.MinInt32
for x := range numSet {
if x > ret {
ret = x
}
}
return ret
}
// user is responsible to provide big enough primes slice, so that root(x) isn't bigger than primes in the list
func isPrime(x int, primes []int, primeSet map[int]struct{}) bool {
if _, ok := primeSet[x]; ok {
return true
}
rt := int(math.Sqrt(float64(x)))
var i int
for i = 0; i < len(primes) && primes[i] <= rt; i++ {
if x%primes[i] == 0 {
return false
}
}
primeSet[x] = struct{}{}
return true
} | 001-100/031-040/037/main.go | 0.636918 | 0.551574 | main.go | starcoder |
package mdutil
import (
"fmt"
"strings"
"github.com/gomarkdown/markdown/ast"
)
// RenderCode renders one-line code into a string.
func RenderCode(code string) string {
return fmt.Sprintf("`%s`", code)
}
// RenderCodeNode wraps RenderCode for *ast.Code node.
func RenderCodeNode(node *ast.Code) string {
return RenderCode(string(node.Literal))
}
// RenderCodeBlock renders multiline code blocks into a string.
func RenderCodeBlock(language, block string) string {
return fmt.Sprintf("```%s\n%s\n```", language, block)
}
// RenderCodeBlockNode wraps RenderCodeBlock for *ast.CodeBlock node.
func RenderCodeBlockNode(node *ast.CodeBlock) string {
return RenderCodeBlock(string(node.Info), string(node.Literal))
}
// RenderParagraphNode renders *ast.Paragraph node into a string.
func RenderParagraphNode(node *ast.Paragraph) (result string) {
if node == nil {
return ""
}
ast.Walk(node, ast.NodeVisitorFunc(func(node ast.Node, entering bool) ast.WalkStatus {
if !entering {
return ast.GoToNext
}
switch v := node.(type) {
case *ast.Text:
result += string(v.Literal)
case *ast.Emph:
result += "_" + string(ast.GetFirstChild(v).AsLeaf().Literal) + "_"
case *ast.Strong:
result += "**" + string(ast.GetFirstChild(v).AsLeaf().Literal) + "**"
case *ast.Code:
result += RenderCodeNode(v)
default:
return ast.GoToNext
}
return ast.SkipChildren
}))
return result
}
// RenderBlockQuote renders markdown block quote into a string.
// If multiline is true it uses multiline block quote syntax.
func RenderBlockQuote(content string, multiline bool) string {
if multiline {
return ">>> " + content
}
return "> " + content
}
// RenderBlockQuoteNode wraps RenderBlockQuote for *ast.BlockQuote node.
func RenderBlockQuoteNode(node *ast.BlockQuote) (res string) {
switch v := ast.GetFirstChild(node).(type) {
case *ast.Paragraph:
res = RenderParagraphNode(v)
case *ast.CodeBlock:
res = RenderCodeBlockNode(v)
}
return RenderBlockQuote(res, false)
}
// HintKindMapping is a mapping for hint types.
type HintKindMapping map[string]string
// RenderHintNode renders *ast.BlockQuote node as a hint into a string
func RenderHintNode(node *ast.BlockQuote, kindMappings HintKindMapping) (res string) {
if p, ok := ast.GetFirstChild(node).(*ast.Paragraph); ok {
prefixed := false
for k := range kindMappings {
if strings.HasPrefix(string(ast.GetFirstChild(p).AsLeaf().Literal), k) {
prefixed = true
}
}
if prefixed {
kind := string(ast.GetFirstChild(p).AsLeaf().Literal)
split := strings.Split(kind, "\n")
kind = split[0]
literal := kindMappings[kind] + ": "
if len(split) > 1 {
literal += split[1]
}
ast.GetFirstChild(p).AsLeaf().Literal = []byte(literal)
}
}
return RenderBlockQuoteNode(node)
}
// RenderStringNode renders node into a string.
func RenderStringNode(node ast.Node) string {
switch v := node.(type) {
case *ast.Paragraph:
return RenderParagraphNode(v)
case *ast.CodeBlock:
return RenderCodeBlockNode(v)
case *ast.BlockQuote:
return RenderBlockQuoteNode(v)
}
return ""
} | pkg/markdown/render.go | 0.738858 | 0.796213 | render.go | starcoder |
package main
import "fmt"
func main() {
// --------------
// Built-in types
// --------------
// Type provides integrity and readability.
// - What is the amount of memory that we allocate?
// - What does that memory represent?
// Type can be specific such as int32 or int64.
// For example,
// - uint8 contains a base 10 number using one byte of memory
// - int32 contains a base 10 number using 4 bytes of memory.
// When we declare a type without being very specific, such as uint or int, it gets mapped
// based on the architecture we are building the code against.
// On a 64-bit OS, int will map to int64. Similarly, on a 32 bit OS, it becomes int32.
// The word size is the number of bytes in a word, which matches our address size.
// For example, in 64-bit architecture, the word size is 64 bit (8 bytes), address size is 64
// bit then our integer should be 64 bit.
// ------------------
// Zero value concept
// ------------------
// Every single value we create must be initialized. If we don't specify it, it will be set to
// the zero value. The entire allocation of memory, we reset that bit to 0.
// - Boolean false
// - Integer 0
// - Floating Point 0
// - Complex 0i
// - String "" (empty string)
// - Pointer nil
// Strings are a series of uint8 types.
// A string is a two word data structure: first word represents a pointer to a backing array, the
// second word represents its length.
// If it is a zero value then the first word is nil, the second word is 0.
// ----------------------
// Declare and initialize
// ----------------------
// var is the only guarantee to initialize a zero value for a type.
var a int
var b string
var c float64
var d bool
fmt.Printf("var a int \t %T [%v]\n", a, a)
fmt.Printf("var b string \t %T [%v]\n", b, b)
fmt.Printf("var c float64 \t %T [%v]\n", c, c)
fmt.Printf("var d bool \t %T [%v]\n\n", d, d)
// Using the short variable declaration operator, we can define and initialize at the same time.
aa := 10
bb := "hello" // 1st word points to a array of characters, 2nd word is 5 bytes
cc := 3.14159
dd := true
fmt.Printf("aa := 10 \t %T [%v]\n", aa, aa)
fmt.Printf("bb := \"hello\" \t %T [%v]\n", bb, bb)
fmt.Printf("cc := 3.14159 \t %T [%v]\n", cc, cc)
fmt.Printf("dd := true \t %T [%v]\n\n", dd, dd)
// ---------------------
// Conversion vs casting
// ---------------------
// Go doesn't have casting, but conversion.
// Instead of telling a compiler to pretend to have some more bytes, we have to allocate more
// memory.
// Specify type and perform a conversion.
aaa := int32(10)
fmt.Printf("aaa := int32(10) %T [%v]\n\n", aaa, aaa)
sss := "hello bobr"
fmt.Printf("aaa := int32(10) %T [%v] addr %v\n", sss, sss, &sss)
sss += "z"
fmt.Printf("aaa := int32(10) %T [%v] addr %v\n", sss, sss, &sss)
} | go/language/variable.go | 0.528047 | 0.473231 | variable.go | starcoder |
package render
import (
"image"
"image/color"
"math"
"github.com/oakmound/oak/alg/floatgeom"
"github.com/oakmound/oak/oakerr"
)
// A Polygon is a renderable that is represented by a set of in order points
// on a plane.
type Polygon struct {
*Sprite
Rect2 floatgeom.Rect2
points []floatgeom.Point2
}
// NewStrictPolygon will draw a polygon of points within a given rectangle,
// and if the input points lie outside of that rectangle the polygon will clip
// into and not be drawn outside of that border.
func NewStrictPolygon(bounds floatgeom.Rect2, points ...floatgeom.Point2) (*Polygon, error) {
if len(points) < 3 {
return nil, oakerr.InsufficientInputs{AtLeast: 3, InputName: "points"}
}
return &Polygon{
Sprite: NewSprite(bounds.Min.X(), bounds.Min.Y(),
image.NewRGBA(image.Rect(0, 0, int(bounds.W()), int(bounds.H())))),
Rect2: bounds,
points: points,
}, nil
}
// NewPolygon takes in a set of points and returns a polygon. At least three points
// must be provided.
func NewPolygon(points ...floatgeom.Point2) (*Polygon, error) {
if len(points) < 3 {
return nil, oakerr.InsufficientInputs{AtLeast: 3, InputName: "points"}
}
// Calculate the bounding rectangle of the polygon by
// finding the maximum and minimum x and y values of the given points
return NewStrictPolygon(floatgeom.NewBoundingRect2(points...), points...)
}
// UpdatePoints resets the points of this polygon to be the passed in points
func (pg *Polygon) UpdatePoints(points ...floatgeom.Point2) error {
if len(points) < 3 {
return oakerr.InsufficientInputs{AtLeast: 3, InputName: "points"}
}
pg.points = points
pg.Rect2 = floatgeom.NewBoundingRect2(points...)
return nil
}
// Fill fills the inside of this polygon with the input color
func (pg *Polygon) Fill(c color.Color) {
// Reset the rgba of the polygon
bounds := pg.r.Bounds()
rect := image.Rect(0, 0, bounds.Max.X, bounds.Max.Y)
rgba := image.NewRGBA(rect)
minx := pg.Rect2.Min.X()
miny := pg.Rect2.Min.Y()
for x := 0; x < bounds.Max.X; x++ {
for y := 0; y < bounds.Max.Y; y++ {
if pg.Contains(float64(x)+minx, float64(y)+miny) {
rgba.Set(x, y, c)
}
}
}
pg.r = rgba
}
// GetOutline returns a set of lines of the given color along this polygon's outline
func (pg *Polygon) GetOutline(c color.Color) *CompositeM {
sl := NewCompositeM()
j := len(pg.points) - 1
for i, p2 := range pg.points {
p1 := pg.points[j]
MinX := math.Min(p1.X(), p2.X())
MinY := math.Min(p1.Y(), p2.Y())
sl.AppendOffset(NewLine(p1.X(), p1.Y(), p2.X(), p2.Y(), c), floatgeom.Point2{MinX, MinY})
j = i
}
return sl
}
// FillInverse colors this polygon's exterior the given color
func (pg *Polygon) FillInverse(c color.Color) {
bounds := pg.r.Bounds()
rect := image.Rect(0, 0, bounds.Max.X, bounds.Max.Y)
rgba := image.NewRGBA(rect)
for x := 0; x < bounds.Max.X; x++ {
for y := 0; y < bounds.Max.Y; y++ {
if !pg.ConvexContains(float64(x), float64(y)) {
rgba.Set(x, y, c)
}
}
}
pg.r = rgba
}
// Todo: almost all of this junk below should be in alg, under floatgeom or something.
// Contains returns whether or not the current Polygon contains the passed in Point.
// It is the default containment function, versus wrapping and convex.
func (pg *Polygon) Contains(x, y float64) (contains bool) {
if !pg.Rect2.Contains(floatgeom.Point2{x, y}) {
return
}
j := len(pg.points) - 1
for i := 0; i < len(pg.points); i++ {
tp1 := pg.points[i]
tp2 := pg.points[j]
if (tp1.Y() > y) != (tp2.Y() > y) { // Three comparisons
if x < (tp2.X()-tp1.X())*(y-tp1.Y())/(tp2.Y()-tp1.Y())+tp1.X() { // One Comparison, Four add/sub, Two mult/div
contains = !contains
}
}
j = i
}
return
}
// ConvexContains returns whether the given point is contained by the input polygon.
// It assumes the polygon is convex. It outperforms the alternatives.
func (pg *Polygon) ConvexContains(x, y float64) bool {
p := floatgeom.Point2{x, y}
if !pg.Rect2.Contains(p) {
return false
}
prev := 0
for i := 0; i < len(pg.points); i++ {
tp1 := pg.points[i]
tp2 := pg.points[(i+1)%len(pg.points)]
tp3 := tp2.Sub(tp1)
tp4 := p.Sub(tp1)
cur := getSide(tp3, tp4)
if cur == 0 {
return false
} else if prev == 0 {
} else if prev != cur {
return false
}
prev = cur
}
return true
}
func getSide(a, b floatgeom.Point2) int {
x := a.X()*b.Y() - a.Y()*b.X()
if x == 0 {
return 0
} else if x < 1 {
return -1
} else {
return 1
}
} | render/polygon.go | 0.79166 | 0.67452 | polygon.go | starcoder |
package core
import "math"
const STEP_MAX = 26 /* 26*2 = 52 bits. */
/* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */
const LAT_MIN = -85.05112878
const LAT_MAX = 85.05112878
const LONG_MIN = -180
const LONG_MAX = 180
const D_R = (math.Pi / 180.0)
/// @brief Earth's quatratic mean radius for WGS-84
const EARTH_RADIUS_IN_METERS = 6372797.560856
const MERCATOR_MAX = 20037726.37
const MERCATOR_MIN = -20037726.37
type HashFix52Bits = uint64
type HashVarBits = uint64
type HashBits struct {
bits uint64
step uint8
}
type HashRange struct {
min float64
max float64
}
type HashArea struct {
hash HashBits
longitude HashRange
latitude HashRange
}
type HashRadius struct {
hash HashBits
area HashArea
neighbors HashNeighbors
}
type HashNeighbors struct {
north HashBits
east HashBits
west HashBits
south HashBits
north_east HashBits
south_east HashBits
north_west HashBits
south_west HashBits
}
func deg_rad(ang float64) float64 {
return ang * D_R
}
func rad_deg(ang float64) float64 {
return ang / D_R
}
func hashEncodeWGS84(longitude float64, latitude float64, step uint8, hash *HashBits) int {
return hashEncodeType(longitude, latitude, step, hash)
}
func hashEncodeType(longitude float64, latitude float64, step uint8, hash *HashBits) int {
r := [2]HashRange{}
hashGetCoordRange(&r[0], &r[1])
return hashEncode(&r[0], &r[1], longitude, latitude, step, hash)
}
func hashGetCoordRange(long_range *HashRange, lat_range *HashRange) {
long_range.max = LONG_MAX
long_range.min = LONG_MIN
lat_range.max = LAT_MAX
lat_range.min = LAT_MIN
}
func hashEncode(long_range *HashRange, lat_range *HashRange, longitude float64, latitude float64, step uint8,
hash *HashBits) int {
if longitude > 180 || longitude < -180 ||
latitude > 85.05112878 || latitude < -85.05112878 {
return 0
}
hash.bits = 0
hash.step = step
if latitude < lat_range.min || latitude > lat_range.max ||
longitude < long_range.min || longitude > long_range.max {
return 0
}
var lat_offset float64
var long_offset float64
lat_offset =
(latitude - lat_range.min) / (lat_range.max - lat_range.min)
long_offset =
(longitude - long_range.min) / (long_range.max - long_range.min)
/* convert to fixed point based on the step size */
mask := 1 << step
lat_offset = lat_offset * float64(mask)
long_offset = long_offset * float64(mask)
hash.bits = interleave64(int32(lat_offset), int32(long_offset))
return 1
}
func interleave64(latOffset int32, lngOffset int32) uint64 {
B := []uint64{0x5555555555555555, 0x3333333333333333,
0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF,
0x0000FFFF0000FFFF}
S := []uint8{1, 2, 4, 8, 16}
x := uint64(latOffset)
y := uint64(lngOffset)
x = (x | (x << S[4])) & B[4]
y = (y | (y << S[4])) & B[4]
x = (x | (x << S[3])) & B[3]
y = (y | (y << S[3])) & B[3]
x = (x | (x << S[2])) & B[2]
y = (y | (y << S[2])) & B[2]
x = (x | (x << S[1])) & B[1]
y = (y | (y << S[1])) & B[1]
x = (x | (x << S[0])) & B[0]
y = (y | (y << S[0])) & B[0]
return x | (y << 1)
}
func deinterleave64(interleaved uint64) uint64 {
B := []uint64{0x5555555555555555, 0x3333333333333333,
0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF,
0x0000FFFF0000FFFF, 0x00000000FFFFFFFF}
S := []uint8{0, 1, 2, 4, 8, 16}
x := interleaved
y := interleaved >> 1
x = (x | (x >> S[0])) & B[0]
y = (y | (y >> S[0])) & B[0]
x = (x | (x >> S[1])) & B[1]
y = (y | (y >> S[1])) & B[1]
x = (x | (x >> S[2])) & B[2]
y = (y | (y >> S[2])) & B[2]
x = (x | (x >> S[3])) & B[3]
y = (y | (y >> S[3])) & B[3]
x = (x | (x >> S[4])) & B[4]
y = (y | (y >> S[4])) & B[4]
x = (x | (x >> S[5])) & B[5]
y = (y | (y >> S[5])) & B[5]
return x | (y << 32)
}
func hashAlign52Bits(hash HashBits) uint64 {
bits := hash.bits
bits <<= (52 - hash.step*2)
return bits
}
func decodehash(bits float64, xy *[2]float64) bool {
hash := HashBits{bits: uint64(bits), step: STEP_MAX}
return hashDecodeToLongLatWGS84(hash, xy)
}
func hashDecodeToLongLatWGS84(hash HashBits, xy *[2]float64) bool {
return hashDecodeToLongLatType(hash, xy)
}
func hashDecodeToLongLatType(hash HashBits, xy *[2]float64) bool {
area := new(HashArea)
if xy == nil || !hashDecodeType(hash, area) {
return false
}
return hashDecodeAreaToLongLat(area, xy)
}
func hashDecodeType(hash HashBits, area *HashArea) bool {
r := [2]HashRange{}
hashGetCoordRange(&r[0], &r[1])
return hashDecode(r[0], r[1], hash, area)
}
func hashDecodeWGS84(hash HashBits, area *HashArea) bool {
return hashDecodeType(hash, area)
}
func hashDecodeAreaToLongLat(area *HashArea, xy *[2]float64) bool {
if xy == nil {
return false
}
xy[0] = (area.longitude.min + area.longitude.max) / 2
xy[1] = (area.latitude.min + area.latitude.max) / 2
return true
}
func hashIsZero(hash HashBits) bool {
return hash.bits == 0 && hash.step == 0
}
func rangeIsZero(r HashRange) bool {
return r.max == 0 && r.min == 0
}
func hashDecode(long_range HashRange, lat_range HashRange, hash HashBits, area *HashArea) bool {
if hashIsZero(hash) || area == nil || rangeIsZero(lat_range) || rangeIsZero(long_range) {
return false
}
area.hash = hash
step := hash.step
hash_sep := deinterleave64(hash.bits)
lat_scale := lat_range.max - lat_range.min
long_scale := long_range.max - long_range.min
ilato := uint32(hash_sep)
ilono := uint32(hash_sep >> 32)
area.latitude.min = lat_range.min + (float64(ilato)*1.0/float64(uint64(1)<<step))*lat_scale
area.latitude.max = lat_range.min + ((float64(ilato)+1)*1.0/float64(uint64(1)<<step))*lat_scale
area.longitude.min = long_range.min + (float64(ilono)*1.0/float64(uint64(1)<<step))*long_scale
area.longitude.max = long_range.min + ((float64(ilono)+1)*1.0/float64(uint64(1)<<step))*long_scale
return true
}
func hashGetDistance(lon1d float64, lat1d float64, lon2d float64, lat2d float64) float64 {
var lat1r, lon1r, lat2r, lon2r, u, v float64
lat1r = deg_rad(lat1d)
lon1r = deg_rad(lon1d)
lat2r = deg_rad(lat2d)
lon2r = deg_rad(lon2d)
u = math.Sin((lat2r - lat1r) / 2)
v = math.Sin((lon2r - lon1r) / 2)
return 2.0 * EARTH_RADIUS_IN_METERS *
math.Asin(math.Sqrt(u*u+math.Cos(lat1r)*math.Cos(lat2r)*v*v))
}
func hashGetAreasByRadiusWGS84(longitude float64, latitude float64, radius_meters float64) HashRadius {
return hashGetAreasByRadius(longitude, latitude, radius_meters)
}
func hashGetAreasByRadius(longitude float64, latitude float64, radius_meters float64) HashRadius {
var long_range, lat_range HashRange
var radius HashRadius
var hash HashBits
var neighbors HashNeighbors
var area HashArea
var min_lon, max_lon, min_lat, max_lat float64
var bounds [4]float64
var steps int
hashBoundingBox(longitude, latitude, radius_meters, &bounds)
min_lon = bounds[0]
min_lat = bounds[1]
max_lon = bounds[2]
max_lat = bounds[3]
steps = int(hashEstimateStepsByRadius(radius_meters, latitude))
hashGetCoordRange(&long_range, &lat_range) //获取经纬度范围 南北极无法code
hashEncode(&long_range, &lat_range, longitude, latitude, (uint8(steps)), &hash) //hash
hashNeighbors(&hash, &neighbors) //计算其余8个框的hash
hashDecode(long_range, lat_range, hash, &area)
decrease_step := 0
{
var north, south, east, west HashArea
hashDecode(long_range, lat_range, neighbors.north, &north)
hashDecode(long_range, lat_range, neighbors.south, &south)
hashDecode(long_range, lat_range, neighbors.east, &east)
hashDecode(long_range, lat_range, neighbors.west, &west)
if hashGetDistance(longitude, latitude, longitude, north.latitude.max) < radius_meters {
decrease_step = 1
}
if hashGetDistance(longitude, latitude, longitude, south.latitude.min) < radius_meters {
decrease_step = 1
}
if hashGetDistance(longitude, latitude, east.longitude.max, latitude) < radius_meters {
decrease_step = 1
}
if hashGetDistance(longitude, latitude, west.longitude.min, latitude) < radius_meters {
decrease_step = 1
}
}
if steps > 1 && decrease_step > 0 {
steps--
hashEncode(&long_range, &lat_range, longitude, latitude, uint8(steps), &hash)
hashNeighbors(&hash, &neighbors)
hashDecode(long_range, lat_range, hash, &area)
}
/* Exclude the search areas that are useless. */
if steps >= 2 {
if area.latitude.min < min_lat {
GZERO(&neighbors.south)
GZERO(&neighbors.south_west)
GZERO(&neighbors.south_east)
}
if area.latitude.max > max_lat {
GZERO(&neighbors.north)
GZERO(&neighbors.north_east)
GZERO(&neighbors.north_west)
}
if area.longitude.min < min_lon {
GZERO(&neighbors.west)
GZERO(&neighbors.south_west)
GZERO(&neighbors.north_west)
}
if area.longitude.max > max_lon {
GZERO(&neighbors.east)
GZERO(&neighbors.south_east)
GZERO(&neighbors.north_east)
}
}
radius.hash = hash
radius.neighbors = neighbors
radius.area = area
return radius
}
func GZERO(s *HashBits) {
s.bits = 0
s.step = 0
}
//计算经度、纬度为中心的搜索区域的边界框
func hashBoundingBox(longitude float64, latitude float64, radius_meters float64, bounds *[4]float64) bool {
if bounds == nil {
return false
}
bounds[0] = longitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/math.Cos(deg_rad(latitude)))
bounds[2] = longitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/math.Cos(deg_rad(latitude)))
bounds[1] = latitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS)
bounds[3] = latitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS)
return true
}
//计算bits 位的精度
func hashEstimateStepsByRadius(range_meters float64, lat float64) uint8 {
if range_meters == 0 {
return 26
}
step := uint8(1)
for range_meters < MERCATOR_MAX {
range_meters *= 2
step++
}
step -= 2
if lat > 66 || lat < -66 {
step--
if lat > 80 || lat < -80 {
step--
}
}
/* Frame to valid range. */
if step < 1 {
step = 1
}
if step > 26 {
step = 26
}
return step
}
//计算其余8个框的hash
func hashNeighbors(hash *HashBits, neighbors *HashNeighbors) {
neighbors.east = *hash
neighbors.west = *hash
neighbors.north = *hash
neighbors.south = *hash
neighbors.south_east = *hash
neighbors.south_west = *hash
neighbors.north_east = *hash
neighbors.north_west = *hash //8个方位的hash赋值
hash_move_x(&neighbors.east, 1)
hash_move_y(&neighbors.east, 0)
hash_move_x(&neighbors.west, -1)
hash_move_y(&neighbors.west, 0)
hash_move_x(&neighbors.south, 0)
hash_move_y(&neighbors.south, -1)
hash_move_x(&neighbors.north, 0)
hash_move_y(&neighbors.north, 1)
hash_move_x(&neighbors.north_west, -1)
hash_move_y(&neighbors.north_west, 1)
hash_move_x(&neighbors.north_east, 1)
hash_move_y(&neighbors.north_east, 1)
hash_move_x(&neighbors.south_east, 1)
hash_move_y(&neighbors.south_east, -1)
hash_move_x(&neighbors.south_west, -1)
hash_move_y(&neighbors.south_west, -1)
}
func hash_move_x(hash *HashBits, d int8) {
if d == 0 {
return
}
x := hash.bits & 0xaaaaaaaaaaaaaaaa
y := hash.bits & 0x5555555555555555
zz := uint64(0x5555555555555555 >> (64 - hash.step*2))
if d > 0 {
x = x + (zz + 1)
} else {
x = x | zz
x = x - (zz + 1)
}
x &= (0xaaaaaaaaaaaaaaaa >> (64 - hash.step*2))
hash.bits = (x | y)
}
func hash_move_y(hash *HashBits, d int8) {
if d == 0 {
return
}
x := hash.bits & 0xaaaaaaaaaaaaaaaa
y := hash.bits & 0x5555555555555555
zz := uint64(0xaaaaaaaaaaaaaaaa >> (64 - hash.step*2))
if d > 0 {
y = y + (zz + 1)
} else {
y = y | zz
y = y - (zz + 1)
}
y &= (0x5555555555555555 >> (64 - hash.step*2))
hash.bits = (x | y)
}
func hashGetDistanceIfInRadius(x1 float64, y1 float64, x2 float64, y2 float64, radius float64, distance *float64) bool {
*distance = hashGetDistance(x1, y1, x2, y2)
if *distance > radius {
return false
}
return true
}
func hashGetDistanceIfInRadiusWGS84(x1 float64, y1 float64, x2 float64, y2 float64, radius float64, distance *float64) bool {
return hashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance)
} | core/zhash.go | 0.782122 | 0.447219 | zhash.go | starcoder |
package stats
import (
"context"
"sync"
"sync/atomic" //lint:ignore faillint we can't use go.uber.org/atomic with a protobuf struct without wrapping it.
"time"
"github.com/dustin/go-humanize"
"github.com/go-kit/log"
)
type (
ctxKeyType string
Component int64
)
const (
statsKey ctxKeyType = "stats"
)
// Context is the statistics context. It is passed through the query path and accumulates statistics.
type Context struct {
querier Querier
ingester Ingester
// store is the store statistics collected across the query path
store Store
// result accumulates results for JoinResult.
result Result
mtx sync.Mutex
}
// NewContext creates a new statistics context
func NewContext(ctx context.Context) (*Context, context.Context) {
contextData := &Context{}
ctx = context.WithValue(ctx, statsKey, contextData)
return contextData, ctx
}
// FromContext returns the statistics context.
func FromContext(ctx context.Context) *Context {
v, ok := ctx.Value(statsKey).(*Context)
if !ok {
return &Context{}
}
return v
}
// Ingester returns the ingester statistics accumulated so far.
func (c *Context) Ingester() Ingester {
return Ingester{
TotalReached: c.ingester.TotalReached,
TotalChunksMatched: c.ingester.TotalChunksMatched,
TotalBatches: c.ingester.TotalBatches,
TotalLinesSent: c.ingester.TotalLinesSent,
Store: c.store,
}
}
// Reset clears the statistics.
func (c *Context) Reset() {
c.mtx.Lock()
defer c.mtx.Unlock()
c.store.Reset()
c.querier.Reset()
c.ingester.Reset()
c.result.Reset()
}
// Result calculates the summary based on store and ingester data.
func (c *Context) Result(execTime time.Duration, queueTime time.Duration) Result {
r := c.result
r.Merge(Result{
Querier: Querier{
Store: c.store,
},
Ingester: c.ingester,
})
r.ComputeSummary(execTime, queueTime)
return r
}
// JoinResults merges a Result with the embedded Result in a context in a concurrency-safe manner.
func JoinResults(ctx context.Context, res Result) {
stats := FromContext(ctx)
stats.mtx.Lock()
defer stats.mtx.Unlock()
stats.result.Merge(res)
}
// JoinIngesterResult joins the ingester result statistics in a concurrency-safe manner.
func JoinIngesters(ctx context.Context, inc Ingester) {
stats := FromContext(ctx)
stats.mtx.Lock()
defer stats.mtx.Unlock()
stats.ingester.Merge(inc)
}
// ComputeSummary compute the summary of the statistics.
func (r *Result) ComputeSummary(execTime time.Duration, queueTime time.Duration) {
r.Summary.TotalBytesProcessed = r.Querier.Store.Chunk.DecompressedBytes + r.Querier.Store.Chunk.HeadChunkBytes +
r.Ingester.Store.Chunk.DecompressedBytes + r.Ingester.Store.Chunk.HeadChunkBytes
r.Summary.TotalLinesProcessed = r.Querier.Store.Chunk.DecompressedLines + r.Querier.Store.Chunk.HeadChunkLines +
r.Ingester.Store.Chunk.DecompressedLines + r.Ingester.Store.Chunk.HeadChunkLines
r.Summary.ExecTime = execTime.Seconds()
if execTime != 0 {
r.Summary.BytesProcessedPerSecond = int64(float64(r.Summary.TotalBytesProcessed) /
execTime.Seconds())
r.Summary.LinesProcessedPerSecond = int64(float64(r.Summary.TotalLinesProcessed) /
execTime.Seconds())
}
if queueTime != 0 {
r.Summary.QueueTime = queueTime.Seconds()
}
}
func (s *Store) Merge(m Store) {
s.TotalChunksRef += m.TotalChunksRef
s.TotalChunksDownloaded += m.TotalChunksDownloaded
s.ChunksDownloadTime += m.ChunksDownloadTime
s.Chunk.HeadChunkBytes += m.Chunk.HeadChunkBytes
s.Chunk.HeadChunkLines += m.Chunk.HeadChunkLines
s.Chunk.DecompressedBytes += m.Chunk.DecompressedBytes
s.Chunk.DecompressedLines += m.Chunk.DecompressedLines
s.Chunk.CompressedBytes += m.Chunk.CompressedBytes
s.Chunk.TotalDuplicates += m.Chunk.TotalDuplicates
}
func (q *Querier) Merge(m Querier) {
q.Store.Merge(m.Store)
}
func (i *Ingester) Merge(m Ingester) {
i.Store.Merge(m.Store)
i.TotalBatches += m.TotalBatches
i.TotalLinesSent += m.TotalLinesSent
i.TotalChunksMatched += m.TotalChunksMatched
i.TotalReached += m.TotalReached
}
// Merge merges two results of statistics.
// This will increase the total number of Subqueries.
func (r *Result) Merge(m Result) {
r.Summary.Subqueries++
r.Querier.Merge(m.Querier)
r.Ingester.Merge(m.Ingester)
r.ComputeSummary(ConvertSecondsToNanoseconds(r.Summary.ExecTime+m.Summary.ExecTime),
ConvertSecondsToNanoseconds(r.Summary.QueueTime+m.Summary.QueueTime))
}
// ConvertSecondsToNanoseconds converts time.Duration representation of seconds (float64)
// into time.Duration representation of nanoseconds (int64)
func ConvertSecondsToNanoseconds(seconds float64) time.Duration {
return time.Duration(int64(seconds * float64(time.Second)))
}
func (r Result) ChunksDownloadTime() time.Duration {
return time.Duration(r.Querier.Store.ChunksDownloadTime + r.Ingester.Store.ChunksDownloadTime)
}
func (r Result) TotalDuplicates() int64 {
return r.Querier.Store.Chunk.TotalDuplicates + r.Ingester.Store.Chunk.TotalDuplicates
}
func (r Result) TotalChunksDownloaded() int64 {
return r.Querier.Store.TotalChunksDownloaded + r.Ingester.Store.TotalChunksDownloaded
}
func (r Result) TotalChunksRef() int64 {
return r.Querier.Store.TotalChunksRef + r.Ingester.Store.TotalChunksRef
}
func (r Result) TotalDecompressedBytes() int64 {
return r.Querier.Store.Chunk.DecompressedBytes + r.Ingester.Store.Chunk.DecompressedBytes
}
func (r Result) TotalDecompressedLines() int64 {
return r.Querier.Store.Chunk.DecompressedLines + r.Ingester.Store.Chunk.DecompressedLines
}
func (c *Context) AddIngesterBatch(size int64) {
atomic.AddInt64(&c.ingester.TotalBatches, 1)
atomic.AddInt64(&c.ingester.TotalLinesSent, size)
}
func (c *Context) AddIngesterTotalChunkMatched(i int64) {
atomic.AddInt64(&c.ingester.TotalChunksMatched, i)
}
func (c *Context) AddIngesterReached(i int32) {
atomic.AddInt32(&c.ingester.TotalReached, i)
}
func (c *Context) AddHeadChunkLines(i int64) {
atomic.AddInt64(&c.store.Chunk.HeadChunkLines, i)
}
func (c *Context) AddHeadChunkBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.HeadChunkBytes, i)
}
func (c *Context) AddCompressedBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.CompressedBytes, i)
}
func (c *Context) AddDecompressedBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.DecompressedBytes, i)
}
func (c *Context) AddDecompressedLines(i int64) {
atomic.AddInt64(&c.store.Chunk.DecompressedLines, i)
}
func (c *Context) AddDuplicates(i int64) {
atomic.AddInt64(&c.store.Chunk.TotalDuplicates, i)
}
func (c *Context) AddChunksDownloadTime(i time.Duration) {
atomic.AddInt64(&c.store.ChunksDownloadTime, int64(i))
}
func (c *Context) AddChunksDownloaded(i int64) {
atomic.AddInt64(&c.store.TotalChunksDownloaded, i)
}
func (c *Context) AddChunksRef(i int64) {
atomic.AddInt64(&c.store.TotalChunksRef, i)
}
// Log logs a query statistics result.
func (r Result) Log(log log.Logger) {
_ = log.Log(
"Ingester.TotalReached", r.Ingester.TotalReached,
"Ingester.TotalChunksMatched", r.Ingester.TotalChunksMatched,
"Ingester.TotalBatches", r.Ingester.TotalBatches,
"Ingester.TotalLinesSent", r.Ingester.TotalLinesSent,
"Ingester.TotalChunksRef", r.Ingester.Store.TotalChunksRef,
"Ingester.TotalChunksDownloaded", r.Ingester.Store.TotalChunksDownloaded,
"Ingester.ChunksDownloadTime", time.Duration(r.Ingester.Store.ChunksDownloadTime),
"Ingester.HeadChunkBytes", humanize.Bytes(uint64(r.Ingester.Store.Chunk.HeadChunkBytes)),
"Ingester.HeadChunkLines", r.Ingester.Store.Chunk.HeadChunkLines,
"Ingester.DecompressedBytes", humanize.Bytes(uint64(r.Ingester.Store.Chunk.DecompressedBytes)),
"Ingester.DecompressedLines", r.Ingester.Store.Chunk.DecompressedLines,
"Ingester.CompressedBytes", humanize.Bytes(uint64(r.Ingester.Store.Chunk.CompressedBytes)),
"Ingester.TotalDuplicates", r.Ingester.Store.Chunk.TotalDuplicates,
"Querier.TotalChunksRef", r.Querier.Store.TotalChunksRef,
"Querier.TotalChunksDownloaded", r.Querier.Store.TotalChunksDownloaded,
"Querier.ChunksDownloadTime", time.Duration(r.Querier.Store.ChunksDownloadTime),
"Querier.HeadChunkBytes", humanize.Bytes(uint64(r.Querier.Store.Chunk.HeadChunkBytes)),
"Querier.HeadChunkLines", r.Querier.Store.Chunk.HeadChunkLines,
"Querier.DecompressedBytes", humanize.Bytes(uint64(r.Querier.Store.Chunk.DecompressedBytes)),
"Querier.DecompressedLines", r.Querier.Store.Chunk.DecompressedLines,
"Querier.CompressedBytes", humanize.Bytes(uint64(r.Querier.Store.Chunk.CompressedBytes)),
"Querier.TotalDuplicates", r.Querier.Store.Chunk.TotalDuplicates,
)
r.Summary.Log(log)
}
func (s Summary) Log(log log.Logger) {
_ = log.Log(
"Summary.BytesProcessedPerSecond", humanize.Bytes(uint64(s.BytesProcessedPerSecond)),
"Summary.LinesProcessedPerSecond", s.LinesProcessedPerSecond,
"Summary.TotalBytesProcessed", humanize.Bytes(uint64(s.TotalBytesProcessed)),
"Summary.TotalLinesProcessed", s.TotalLinesProcessed,
"Summary.ExecTime", ConvertSecondsToNanoseconds(s.ExecTime),
"Summary.QueueTime", ConvertSecondsToNanoseconds(s.QueueTime),
)
} | pkg/logqlmodel/stats/context.go | 0.746231 | 0.436862 | context.go | starcoder |
package storage
import (
"bytes"
"github.com/mkawserm/flamed/pkg/iface"
"os"
"testing"
)
func setKeyValuePair(t *testing.T, stateStorage iface.IStateStorage, inputDataTable []string) {
txn := stateStorage.NewTransaction()
if txn == nil {
t.Fatal("unexpected nil pointer")
return
}
for _, v := range inputDataTable {
if err := txn.Set([]byte(v), []byte(v)); err != nil {
t.Fatal("unexpected error: ", err)
return
}
}
if err := txn.Commit(); err != nil {
t.Fatal("unexpected error: ", err)
return
}
}
func forwardIteratorCheck(t *testing.T, stateStorage iface.IStateStorage, expectedForwardDataTable []string) {
txn := stateStorage.NewTransaction()
forwardIterator := txn.ForwardIterator()
if forwardIterator == nil {
t.Fatal("unexpected nil forward iterator")
return
}
var i = 0
for forwardIterator.Rewind(); forwardIterator.Valid(); forwardIterator.Next() {
state := forwardIterator.StateSnapshot()
currentData := expectedForwardDataTable[i]
if !bytes.EqualFold([]byte(currentData), state.Address) {
t.Fatal("address ordering is not correct")
}
if !bytes.EqualFold([]byte(currentData), state.Data) {
t.Fatal("data mismatch")
}
i = i + 1
}
forwardIterator.Close()
}
func reverseIteratorCheck(t *testing.T, stateStorage iface.IStateStorage, expectedReverseDataTable []string) {
txn := stateStorage.NewTransaction()
reverseIterator := txn.ReverseIterator()
if reverseIterator == nil {
t.Error("unexpected nil reverse iterator")
return
}
var i = 0
for reverseIterator.Rewind(); reverseIterator.Valid(); reverseIterator.Next() {
state := reverseIterator.StateSnapshot()
currentData := expectedReverseDataTable[i]
if !bytes.EqualFold([]byte(currentData), state.Address) {
t.Fatal("address ordering is not correct")
}
if !bytes.EqualFold([]byte(currentData), state.Data) {
t.Fatal("data mismatch")
}
i = i + 1
}
reverseIterator.Close()
}
func forwardIteratorKeyOnlyCheck(t *testing.T, stateStorage iface.IStateStorage, expectedForwardDataTable []string) {
txn := stateStorage.NewTransaction()
forwardIterator := txn.KeyOnlyForwardIterator()
if forwardIterator == nil {
t.Fatal("unexpected nil forward iterator")
return
}
var i = 0
for forwardIterator.Rewind(); forwardIterator.Valid(); forwardIterator.Next() {
state := forwardIterator.StateSnapshot()
currentData := expectedForwardDataTable[i]
if !bytes.EqualFold([]byte(currentData), state.Address) {
t.Fatal("address ordering is not correct")
}
if !bytes.EqualFold([]byte(currentData), state.Data) {
t.Fatal("data mismatch")
}
i = i + 1
}
forwardIterator.Close()
}
func reverseIteratorCheckKeyOnly(t *testing.T, stateStorage iface.IStateStorage, expectedReverseDataTable []string) {
txn := stateStorage.NewTransaction()
reverseIterator := txn.ReverseIterator()
if reverseIterator == nil {
t.Error("unexpected nil reverse iterator")
return
}
var i = 0
for reverseIterator.Rewind(); reverseIterator.Valid(); reverseIterator.Next() {
state := reverseIterator.StateSnapshot()
currentData := expectedReverseDataTable[i]
if !bytes.EqualFold([]byte(currentData), state.Address) {
t.Fatal("address ordering is not correct")
}
if !bytes.EqualFold([]byte(currentData), state.Data) {
t.Fatal("data mismatch")
}
i = i + 1
}
reverseIterator.Close()
}
// StateStorageTestSuite is helpful for developer
// to implement new key value plugin for state storage
// correctly
func StateStorageTestSuite(t *testing.T, stateStorage iface.IStateStorage) {
path := "/tmp/test_db_1"
defer func() {
_ = os.RemoveAll(path)
}()
stateStorage.Setup(path, nil, nil)
if err := stateStorage.Open(); err != nil {
t.Fatal("unexpected error: ", err)
return
}
defer func() {
_ = stateStorage.Close()
}()
inputDataTable := []string{
"z",
"a",
"Z",
"A",
"9",
"0",
"5",
"1",
"Ab",
"1ba",
"1ab",
}
expectedForwardDataTable := []string{
"0",
"1",
"1ab",
"1ba",
"5",
"9",
"A",
"Ab",
"Z",
"a",
"z",
}
expectedReverseDataTable := []string{
"z",
"a",
"Z",
"Ab",
"A",
"9",
"5",
"1ba",
"1ab",
"1",
"0",
}
setKeyValuePair(t, stateStorage, inputDataTable)
forwardIteratorCheck(t, stateStorage, expectedForwardDataTable)
forwardIteratorKeyOnlyCheck(t, stateStorage, expectedForwardDataTable)
reverseIteratorCheck(t, stateStorage, expectedReverseDataTable)
reverseIteratorCheckKeyOnly(t, stateStorage, expectedReverseDataTable)
} | testsuite/storage/state.go | 0.667906 | 0.449513 | state.go | starcoder |
package continuous
import (
"github.com/jtejido/ggsl/specfunc"
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
smath "github.com/jtejido/stats/math"
"math"
"math/rand"
)
// F-distribution
// https://en.wikipedia.org/wiki/F-distribution
type F struct {
baseContinuousWithSource
d1, d2 int
}
func NewF(d1, d2 int) (*F, error) {
return NewFWithSource(d1, d2, nil)
}
func NewFWithSource(d1, d2 int, src rand.Source) (*F, error) {
if d1 <= 0 || d2 <= 0 {
return nil, err.Invalid()
}
f := new(F)
f.d1 = d1
f.d2 = d2
f.src = src
return f, nil
}
func (f *F) String() string {
return "F: Parameters - " + f.Parameters().String() + ", Support(x) - " + f.Support().String()
}
// d₁ ∈ (0,∞)
// d₂ ∈ (0,∞)
func (f *F) Parameters() stats.Limits {
return stats.Limits{
"d₁": stats.Interval{0, math.Inf(1), true, true},
"d₂": stats.Interval{0, math.Inf(1), true, true},
}
}
// x ∈ [0,∞)
func (f *F) Support() stats.Interval {
return stats.Interval{0, math.Inf(1), false, true}
}
func (f *F) Probability(x float64) float64 {
if f.Support().IsWithinInterval(x) {
a := math.Pow(float64(f.d1)*x, float64(f.d1)) * math.Pow(float64(f.d2), float64(f.d2))
b := math.Pow(float64(f.d1)*x+float64(f.d2), float64(f.d1)+float64(f.d2))
num := math.Sqrt(a / b)
denom := x * specfunc.Beta(float64(f.d1)/2, float64(f.d2)/2)
return num / denom
}
return 0
}
func (f *F) Distribution(x float64) float64 {
if f.Support().IsWithinInterval(x) {
return specfunc.Beta_inc(float64(f.d1)/2, float64(f.d2)/2, (float64(f.d1)*x)/(float64(f.d1)*x+float64(f.d2)))
}
return 0
}
func (f *F) Inverse(p float64) float64 {
if p <= 0 {
return 0
}
if p >= 1 {
return math.Inf(1)
}
res := 0.
w := specfunc.Beta_inc(0.5*float64(f.d2), 0.5*float64(f.d1), 0.5)
if w > p || p < 0.001 {
w = smath.InverseRegularizedIncompleteBeta(0.5*float64(f.d1), 0.5*float64(f.d2), p)
res = float64(f.d2) * w / (float64(f.d1) * (1.0 - w))
} else {
w = smath.InverseRegularizedIncompleteBeta(0.5*float64(f.d2), 0.5*float64(f.d1), 1.0-p)
res = (float64(f.d2) - float64(f.d2)*w) / (float64(f.d1) * w)
}
return res
}
func (f *F) Entropy() float64 {
lgd1 := specfunc.Lngamma(float64(f.d1) / 2.)
lgd2 := specfunc.Lngamma(float64(f.d2) / 2.)
lgd1pd2 := specfunc.Lngamma((float64(f.d1) + float64(f.d2)) / 2.)
return lgd1 + lgd2 - lgd1pd2 + (1-(float64(f.d1)/2.))*specfunc.Psi(1+(float64(f.d1)/2.)) - (1+(float64(f.d2)/2.))*specfunc.Psi(1+(float64(f.d2)/2.)) + ((float64(f.d1)+float64(f.d2))/2.)*specfunc.Psi((float64(f.d1)+float64(f.d2))/2.) + math.Log(float64(f.d1)/float64(f.d2))
}
func (f *F) ExKurtosis() float64 {
if float64(f.d2) <= 8 {
return math.NaN()
}
return (12 / (float64(f.d2) - 6)) * ((5*float64(f.d2)-22)/(float64(f.d2)-8) + ((float64(f.d2)-4)/float64(f.d1))*((float64(f.d2)-2)/(float64(f.d2)-8))*((float64(f.d2)-2)/(float64(f.d1)+float64(f.d2)-2)))
}
func (f *F) Skewness() float64 {
if float64(f.d2) <= 6 {
return math.NaN()
}
num := (2*float64(f.d1) + float64(f.d2) - 2) * math.Sqrt(8*(float64(f.d2)-4))
den := (float64(f.d2) - 6) * math.Sqrt(float64(f.d1)*(float64(f.d1)+float64(f.d2)-2))
return num / den
}
func (f *F) Mean() float64 {
if float64(f.d2) > 2 {
return float64(f.d2) / (float64(f.d2) - 2)
}
return math.NaN()
}
func (f *F) Median() float64 {
return f.Inverse(.5)
}
func (f *F) Mode() float64 {
if float64(f.d1) <= 2 {
return math.NaN()
}
return ((float64(f.d1) - 2) / float64(f.d1)) * (float64(f.d2) / (float64(f.d2) + 2))
}
func (f *F) Variance() float64 {
if float64(f.d2) <= 4 {
return math.NaN()
}
twoD2pow2d1pd2m2 := (2 * (float64(f.d2) * float64(f.d2))) * (float64(f.d1) + float64(f.d2) - 2)
d1d2m2Pow2d2m4 := (float64(f.d1) * ((float64(f.d2) - 2) * (float64(f.d2) - 2))) * (float64(f.d2) - 4)
return twoD2pow2d1pd2m2 / d1d2m2Pow2d2m4
}
func (f *F) Rand() float64 {
c1 := ChiSquared{dof: f.d1}
c2 := ChiSquared{dof: f.d2}
c1.src = f.src
c2.src = f.src
return (c1.Rand() / float64(f.d1)) / (c2.Rand() / float64(f.d2))
} | dist/continuous/f.go | 0.757436 | 0.491944 | f.go | starcoder |
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
type bot struct {
x int
y int
direction direct
}
type direct struct {
x int
y int
}
func main() {
memory := []int{}
dimension := 0
scanner := bufio.NewScanner(os.Stdin)
// Read and process input from stdin
for scanner.Scan() {
for _, val := range strings.Split(scanner.Text(), ",") {
num, _ := strconv.Atoi(val)
if num%10 == 4 {
dimension++
}
memory = append(memory, num)
}
}
dimension *= 3
// Add buffer space to memory
count := len(memory)
for len(memory) < 50000 {
memory = append(memory, 0)
}
panels := createPanels(dimension)
robot := bot{len(panels)/2, len(panels)/2, direct{0, -1}}
// Robot starts on a white panel
panels[robot.y][robot.x] = true
// Get number of painted panels
painted := compute(memory, count, robot, panels)
pCount := 0
for _, val := range painted {
if val {
pCount++
}
}
printer(panels, robot)
fmt.Printf("%d panels painted\n", pCount)
}
func createPanels(dimension int) [][]bool {
i, panels := 0, [][]bool{}
for i < dimension {
j, row := 0, []bool{}
for j < dimension {
row = append(row, false)
j++
}
panels = append(panels, row)
i++
}
return panels
}
func printer(panels [][]bool, robot bot) {
fmt.Print("\n")
for y, row := range panels {
for x, j := range row {
if robot.y == y && robot.x == x {
if robot.direction.x == 0 && robot.direction.y == -1 {
fmt.Print("^")
} else if robot.direction.x == -1 && robot.direction.y == 0 {
fmt.Print("<")
} else if robot.direction.x == 1 && robot.direction.y == 0 {
fmt.Print(">")
} else {
fmt.Print("v")
}
} else if j == true {
fmt.Print("#")
} else {
fmt.Print(".")
}
}
fmt.Print("\n")
}
fmt.Print("\n")
}
func paint(panels *[][]bool, robot bot, val int) {
if val == 0 {
(*panels)[robot.y][robot.x] = false
} else if val == 1 {
(*panels)[robot.y][robot.x] = true
}
}
func turn(robot bot, val int) bot {
directions := make(map[direct][]direct)
up, left, down, right := direct{0, -1}, direct{-1, 0}, direct{0, 1}, direct{1, 0}
directions[up] = []direct{left, right}
directions[left] = []direct{down, up}
directions[down] = []direct{right, left}
directions[right] = []direct{up, down}
robot.direction = directions[robot.direction][val]
robot.x += robot.direction.x
robot.y += robot.direction.y
return robot
}
/*******************************************************
/* Intcode computer
********************************************************/
func compute(memory []int, instCount int, robot bot, panels [][]bool) map[direct]bool {
// Array of instruction parameter counts
paramCounts := []int{2, 4, 4, 2, 2, 3, 3, 4, 4, 2}
painted := make(map[direct]bool)
index, relBase := 0, 0
output := []int{}
for index < instCount-2 {
op := 0
digits := intToSlice(memory[index], []int{})
// Get operation from instruction
if (len(digits) > 1) {
op = (digits[len(digits)-2]*10) + (digits[len(digits)-1])
} else {
op = digits[len(digits)-1]
}
// Exit on 99
if op == 99 {
break
}
// Get instruction parameter count
paramCount := paramCounts[op]
// Get parameters by mode
params := resolveParams(memory, digits, index, relBase, paramCount)
if op == 1 { // addition instruction
memory[params[2]] = params[0] + params[1]
} else if op == 2 { // multiplication instruction
memory[params[2]] = params[0] * params[1]
} else if op == 3 { // input instruction
inputVal := 1
if panels[robot.y][robot.x] == false {
inputVal = 0
}
memory[params[0]] = inputVal
} else if op == 4 { // output instruction
temp := params[0]
output = append(output, temp)
if len(output) % 2 == 1 {
painted[direct{robot.x, robot.y}] = true
paint(&panels, robot, temp)
} else {
robot = turn(robot, temp)
}
} else if op == 5 { // jump-if-true instruction
if params[0] != 0 {
index = params[1] - paramCount
}
} else if op == 6 { // jump-if-false instruction
if params[0] == 0 {
index = params[1] - paramCount
}
} else if op == 7 { // less-than instruction
if params[0] < params[1] {
memory[params[2]] = 1
} else {
memory[params[2]] = 0
}
} else if op == 8 { // equals instruction
if params[0] == params[1] {
memory[params[2]] = 1
} else {
memory[params[2]] = 0
}
} else if op == 9 { // increment relative base
relBase += params[0]
}
index += paramCount
}
return painted
}
func resolveParams(memory []int, digits []int, pointer int, relBase int, paramCount int) []int {
writeParam := []int{-1, 3, 3, 1, -1, -1, -1, 3, 3, -1}
op := digits[len(digits)-1]
// append leading zeroes to instruction
for len(digits) <= paramCount {
digits = append([]int{0}, digits...)
}
// get param modes from instruction
modes := digits[:len(digits)-2]
var params []int
i := 1
for i < paramCount {
mode := modes[len(modes)-i]
// Get param values based on parameter modes
if mode == 0 { // position mode
if i == writeParam[op] {
params = append(params, memory[pointer+i])
} else {
params = append(params, memory[memory[pointer+i]])
}
} else if mode == 1 { // immediate mode
params = append(params, memory[pointer+i])
} else if mode == 2 { // relative mode
if i == writeParam[op] {
params = append(params, relBase+memory[pointer+i])
} else {
params = append(params, memory[relBase+memory[pointer+i]])
}
}
i++
}
return params
}
func intToSlice(n int, digits []int) []int {
if n != 0 {
i := n % 10
digits = append([]int{i}, digits...)
return intToSlice(n/10, digits)
}
return digits
} | 2019/day11p2.go | 0.602296 | 0.432003 | day11p2.go | starcoder |
// Package skein1024 implements the Skein1024 hash function
// based on the Threefish1024 tweakable block cipher.
package skein1024
import (
"github.com/esrrhs/go-engine/src/crypto/cryptonight/inter/skein"
"hash"
)
// Sum512 computes the 512 bit Skein1024 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum512(out *[64]byte, msg, key []byte) {
var out1024 [128]byte
s := new(hashFunc)
s.initialize(64, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(&out1024, 0)
copy(out[:], out1024[:64])
}
// Sum384 computes the 384 bit Skein1024 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum384(out *[48]byte, msg, key []byte) {
var out1024 [128]byte
s := new(hashFunc)
s.initialize(48, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(&out1024, 0)
copy(out[:], out1024[:48])
}
// Sum256 computes the 256 bit Skein1024 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum256(out *[32]byte, msg, key []byte) {
var out1024 [128]byte
s := new(hashFunc)
s.initialize(32, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(&out1024, 0)
copy(out[:], out1024[:32])
}
// Sum160 computes the 160 bit Skein1024 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum160(out *[20]byte, msg, key []byte) {
var out1024 [128]byte
s := new(hashFunc)
s.initialize(20, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(&out1024, 0)
copy(out[:], out1024[:20])
}
// Sum returns the Skein1024 checksum with the given hash size of msg using the (optional)
// conf for configuration. The hashsize must be > 0.
func Sum(msg []byte, hashsize int, conf *skein.Config) []byte {
s := New(hashsize, conf)
s.Write(msg)
return s.Sum(nil)
}
// New512 returns a hash.Hash computing the Skein1024 512 bit checksum.
// The key is optional and turns the hash into a MAC.
func New512(key []byte) hash.Hash {
s := new(hashFunc)
s.initialize(64, &skein.Config{Key: key})
return s
}
// New256 returns a hash.Hash computing the Skein1024 256 bit checksum.
// The key is optional and turns the hash into a MAC.
func New256(key []byte) hash.Hash {
s := new(hashFunc)
s.initialize(32, &skein.Config{Key: key})
return s
}
// New returns a hash.Hash computing the Skein1024 checksum with the given hash size.
// The conf is optional and configurates the hash.Hash
func New(hashsize int, conf *skein.Config) hash.Hash {
s := new(hashFunc)
s.initialize(hashsize, conf)
return s
} | src/crypto/cryptonight/inter/skein/skein1024/skein.go | 0.835986 | 0.404743 | skein.go | starcoder |
// Package databaseio provides transformations and utilities to interact with
// generic database database/sql API. See also: https://golang.org/pkg/database/sql/
package databaseio
import (
"database/sql"
"fmt"
"reflect"
"strings"
"time"
)
//rowMapper represents a record mapper
type rowMapper func(value reflect.Value) ([]interface{}, error)
//newQueryMapper creates a new record mapped
func newQueryMapper(columns []string, columnTypes []*sql.ColumnType, recordType reflect.Type) (rowMapper, error) {
val := reflect.New(recordType).Interface()
if _, isLoader := val.(MapLoader); isLoader {
return newQueryLoaderMapper(columns, columnTypes)
} else if recordType.Kind() == reflect.Struct {
return newQueryStructMapper(columns, recordType)
}
return nil, fmt.Errorf("unsupported type %s", recordType)
}
//newQueryStructMapper creates a new record mapper for supplied struct type
func newQueryStructMapper(columns []string, recordType reflect.Type) (rowMapper, error) {
mappedFieldIndex, err := mapFields(columns, recordType)
if err != nil {
return nil, err
}
var record = make([]interface{}, recordType.NumField())
var mapper = func(value reflect.Value) ([]interface{}, error) {
value = value.Elem() //T = *T
for i, fieldIndex := range mappedFieldIndex {
record[i] = value.Field(fieldIndex).Addr().Interface()
}
return record, nil
}
return mapper, nil
}
//newQueryStructMapper creates a new record mapper for supplied struct type
func newQueryLoaderMapper(columns []string, columnTypes []*sql.ColumnType) (rowMapper, error) {
var record = make([]interface{}, len(columns))
var valueProviders = make([]func(index int, values []interface{}), len(columns))
defaultProvider := func(index int, values []interface{}) {
val := new(interface{})
values[index] = &val
}
for i := range columns {
valueProviders[i] = defaultProvider
if len(columnTypes) == 0 {
continue
}
dbTypeName := strings.ToLower(columnTypes[i].DatabaseTypeName())
if strings.Contains(dbTypeName, "char") || strings.Contains(dbTypeName, "string") || strings.Contains(dbTypeName, "text") {
valueProviders[i] = func(index int, values []interface{}) {
val := ""
values[index] = &val
}
} else if strings.Contains(dbTypeName, "int") {
valueProviders[i] = func(index int, values []interface{}) {
val := 0
values[index] = &val
}
} else if strings.Contains(dbTypeName, "decimal") || strings.Contains(dbTypeName, "numeric") || strings.Contains(dbTypeName, "float") {
valueProviders[i] = func(index int, values []interface{}) {
val := 0.0
values[index] = &val
}
} else if strings.Contains(dbTypeName, "time") || strings.Contains(dbTypeName, "date") {
valueProviders[i] = func(index int, values []interface{}) {
val := time.Now()
values[index] = &val
}
} else if strings.Contains(dbTypeName, "bool") {
valueProviders[i] = func(index int, values []interface{}) {
val := false
values[index] = &val
}
} else {
valueProviders[i] = func(index int, values []interface{}) {
val := reflect.New(columnTypes[i].ScanType()).Elem().Interface()
values[index] = &val
}
}
}
mapper := func(value reflect.Value) ([]interface{}, error) {
for i := range columns {
valueProviders[i](i, record)
}
return record, nil
}
return mapper, nil
}
//newQueryMapper creates a new record mapped
func newWriterRowMapper(columns []string, recordType reflect.Type) (rowMapper, error) {
mappedFieldIndex, err := mapFields(columns, recordType)
if err != nil {
return nil, err
}
columnCount := len(columns)
mapper := func(value reflect.Value) ([]interface{}, error) {
var record = make([]interface{}, columnCount)
if value.Kind() == reflect.Ptr {
value = value.Elem() //T = *T
}
for i, fieldIndex := range mappedFieldIndex {
record[i] = value.Field(fieldIndex).Interface()
}
return record, nil
}
return mapper, nil
} | sdks/go/pkg/beam/io/databaseio/mapper.go | 0.643105 | 0.47098 | mapper.go | starcoder |
package main
import (
"image/color"
"image/jpeg"
"image/png"
"strings"
"strconv"
"image"
"bufio"
"math"
"flag"
"time"
"log"
"fmt"
"os"
)
const BlockSize = 16
const BlockMaxOffset = 8
const BlockMaxFractOffset = 0.5
const BlockFractStep = 0.25
/*
* struct ImageData
*/
type ImageData struct {
Width, Height, Size int
Pixels []float64
}
func (i *ImageData) Allocate() {
i.Pixels = make([]float64, i.Width * i.Height * i.Size)
}
func (i *ImageData) GetPixel(x int, y int, index int) float64 {
return i.Pixels[(y * i.Width + x) * i.Size + index]
}
func (i *ImageData) GetPixelBilinear(x float64, y float64, index int) float64 {
x1 := math.Floor(x)
x2 := math.Ceil(x)
y1 := math.Floor(y)
y2 := math.Ceil(y)
f11 := i.GetPixel(int(x1), int(y1), index)
f12 := i.GetPixel(int(x1), int(y2), index)
f21 := i.GetPixel(int(x2), int(y1), index)
f22 := i.GetPixel(int(x2), int(y2), index)
m1 := f11 * (x2 - x) + f21 * (1 - (x2 - x))
m2 := f12 * (x2 - x) + f22 * (1 - (x2 - x))
return m1 * (y2 - y) + m2 * (1 - (y2 - y))
}
func (i *ImageData) SetPixel(x int, y int, index int, value float64) {
i.Pixels[(y * i.Width + x) * i.Size + index] = value
}
/*
* struct Block, BlockData
*/
type Block struct {
DtX, DtY float64
SourceIndex int
}
type BlockData struct {
Width, Height int
Blocks []*Block
}
func (b *BlockData) Allocate() {
b.Blocks = make([]*Block, b.Width * b.Height)
for i := 0; i < len(b.Blocks); i++ {
b.Blocks[i] = new(Block)
}
}
func (b *BlockData) GetBlock(x int, y int) *Block {
return b.Blocks[y * b.Width + x]
}
/*
* misc math functions
*/
func Min(a int, b int) int {
if (a > b) {
return b
}
return a
}
func Max(a int, b int) int {
if (a > b) {
return a
}
return b
}
func Abs(d float64) float64 {
if (d < 0) {
return -d
}
return d
}
func LoadImageJPEG(filename string) *ImageData {
file, err := os.Open(filename)
if err != nil {
log.Fatalf("[error] Cannot open %s\n", filename)
}
reader := bufio.NewReader(file)
image, err := jpeg.Decode(reader)
if err != nil {
log.Fatalf("[error] Cannot decode %s as JPEG file\n", filename)
}
outputImage := ImageData{ Width: image.Bounds().Max.X, Height: image.Bounds().Max.Y, Size: 3 }
outputImage.Allocate()
for x := 0; x < outputImage.Width; x++ {
for y := 0; y < outputImage.Height; y++ {
iR, iG, iB, _ := image.At(x, y).RGBA()
outputImage.SetPixel(x, y, 0, float64(iR))
outputImage.SetPixel(x, y, 1, float64(iG))
outputImage.SetPixel(x, y, 2, float64(iB))
}
}
return &outputImage
}
func LoadImagePNG(filename string) *ImageData {
file, err := os.Open(filename)
if err != nil {
log.Fatalf("[error] Cannot open %s\n", filename)
}
reader := bufio.NewReader(file)
image, err := png.Decode(reader)
if err != nil {
log.Fatalf("[error] Cannot decode %s as PNG file\n", filename)
}
outputImage := ImageData{ Width: image.Bounds().Max.X, Height: image.Bounds().Max.Y, Size: 3 }
outputImage.Allocate()
for x := 0; x < outputImage.Width; x++ {
for y := 0; y < outputImage.Height; y++ {
iR, iG, iB, _ := image.At(x, y).RGBA()
outputImage.SetPixel(x, y, 0, float64(iR))
outputImage.SetPixel(x, y, 1, float64(iG))
outputImage.SetPixel(x, y, 2, float64(iB))
}
}
return &outputImage
}
func GetBlockDiff(sourceImage *ImageData, sourceX int, sourceY int, targetImage *ImageData, targetX int, targetY int, minBlockDiff int) int {
minDiff := float64(minBlockDiff)
blockDiff := 0.0
imageWidth := targetImage.Width
imageSize := targetImage.Size
sourcePixels := sourceImage.Pixels
targetPixels := targetImage.Pixels
for blockX := 0; blockX < BlockSize; blockX++ {
for blockY := 0; blockY < BlockSize; blockY++ {
sourceIndex := ((sourceY + blockY) * imageWidth + sourceX + blockX) * imageSize
targetIndex := ((targetY + blockY) * imageWidth + targetX + blockX) * imageSize
// Manually inlined for better performance
sR := sourcePixels[sourceIndex]
sG := sourcePixels[sourceIndex + 1]
sB := sourcePixels[sourceIndex + 2]
tR := targetPixels[targetIndex]
tG := targetPixels[targetIndex + 1]
tB := targetPixels[targetIndex + 2]
blockDiff += Abs(tR - sR) + Abs(tG - sG) + Abs(tB - sB)
if (blockDiff > minDiff) {
return math.MaxInt32
}
}
}
return int(blockDiff)
}
func GetBlockDiffBilinear(sourceImage *ImageData, sourceX float64, sourceY float64, targetImage *ImageData, targetX int, targetY int, minBlockDiff int) int {
minDiff := float64(minBlockDiff)
blockDiff := 0.0
imageWidth := targetImage.Width
imageSize := targetImage.Size
sourcePixels := sourceImage.Pixels
targetPixels := targetImage.Pixels
sourceX1 := int(math.Floor(sourceX))
sourceX2 := int(math.Ceil(sourceX))
sourceY1 := int(math.Floor(sourceY))
sourceY2 := int(math.Ceil(sourceY))
factorX := float64(sourceX2) - sourceX
factorY := float64(sourceY2) - sourceY
for blockX := 0; blockX < BlockSize; blockX++ {
for blockY := 0; blockY < BlockSize; blockY++ {
sourceIndex11 := ((sourceY1 + blockY) * imageWidth + sourceX1 + blockX) * imageSize
sourceIndex12 := ((sourceY2 + blockY) * imageWidth + sourceX1 + blockX) * imageSize
sourceIndex21 := ((sourceY1 + blockY) * imageWidth + sourceX2 + blockX) * imageSize
sourceIndex22 := ((sourceY2 + blockY) * imageWidth + sourceX2 + blockX) * imageSize
targetIndex := ((targetY + blockY) * imageWidth + targetX + blockX) * imageSize
// Manually inlined for better performance
sR11 := sourcePixels[sourceIndex11]
sG11 := sourcePixels[sourceIndex11 + 1]
sB11 := sourcePixels[sourceIndex11 + 2]
sR12 := sourcePixels[sourceIndex12]
sG12 := sourcePixels[sourceIndex12 + 1]
sB12 := sourcePixels[sourceIndex12 + 2]
sR21 := sourcePixels[sourceIndex21]
sG21 := sourcePixels[sourceIndex21 + 1]
sB21 := sourcePixels[sourceIndex21 + 2]
sR22 := sourcePixels[sourceIndex22]
sG22 := sourcePixels[sourceIndex22 + 1]
sB22 := sourcePixels[sourceIndex22 + 2]
sR1 := sR11 * factorX + sR21 * (1 - factorX)
sG1 := sG11 * factorX + sG21 * (1 - factorX)
sB1 := sB11 * factorX + sB21 * (1 - factorX)
sR2 := sR12 * factorX + sR22 * (1 - factorX)
sG2 := sG12 * factorX + sG22 * (1 - factorX)
sB2 := sB12 * factorX + sB22 * (1 - factorX)
sR := sR1 * factorY + sR2 * (1 - factorY)
sG := sG1 * factorY + sG2 * (1 - factorY)
sB := sB1 * factorY + sB2 * (1 - factorY)
tR := targetPixels[targetIndex]
tG := targetPixels[targetIndex + 1]
tB := targetPixels[targetIndex + 2]
blockDiff += Abs(tR - sR) + Abs(tG - sG) + Abs(tB - sB)
if (blockDiff > minDiff) {
return math.MaxInt32
}
}
}
return int(blockDiff)
}
func CreateDelta(sourceImages []*ImageData, targetImage *ImageData, verbose bool) *BlockData {
outputBlocks := BlockData{ Width: targetImage.Width / BlockSize, Height: targetImage.Height / BlockSize }
outputBlocks.Allocate()
for x := 0; x < targetImage.Width; x += BlockSize {
for y := 0; y < targetImage.Height; y += BlockSize {
minBlockDiff := math.MaxInt32
blockDtX := 0.0
blockDtY := 0.0
blockSourceIndex := 0
// Iterate over source images to find best match for target image block
minX := Max(x - BlockMaxOffset, 0)
maxX := Min(x + BlockMaxOffset, targetImage.Width - BlockSize)
minY := Max(y - BlockMaxOffset, 0)
maxY := Min(y + BlockMaxOffset, targetImage.Height - BlockSize)
for sourceIndex := 0; sourceIndex < len(sourceImages); sourceIndex++ {
for sourceX := minX; sourceX <= maxX; sourceX++ {
for sourceY := minY; sourceY <= maxY; sourceY++ {
blockDiff := GetBlockDiff(sourceImages[sourceIndex], sourceX, sourceY, targetImage, x, y, minBlockDiff)
if (blockDiff < minBlockDiff) {
minBlockDiff = blockDiff
blockDtX = float64(sourceX - x)
blockDtY = float64(sourceY - y)
blockSourceIndex = sourceIndex
}
}
}
}
// Improve output with subpixel precision
minXs := math.Max(float64(x) + blockDtX - BlockMaxFractOffset, float64(minX))
maxXs := math.Min(float64(x) + blockDtX + BlockMaxFractOffset, float64(maxX))
minYs := math.Max(float64(y) + blockDtY - BlockMaxFractOffset, float64(minY))
maxYs := math.Min(float64(y) + blockDtY + BlockMaxFractOffset, float64(maxY))
for sourceX := minXs; sourceX <= maxXs; sourceX += BlockFractStep {
for sourceY := minYs; sourceY <= maxYs; sourceY += BlockFractStep {
blockDiff := GetBlockDiffBilinear(sourceImages[blockSourceIndex], sourceX, sourceY, targetImage, x, y, minBlockDiff)
if (blockDiff < minBlockDiff) {
minBlockDiff = blockDiff
blockDtX = sourceX - float64(x)
blockDtY = sourceY - float64(y)
}
}
}
if verbose {
fmt.Printf("[%d, %d] | %.2f %.2f (%d)\n", x, y, blockDtX, blockDtY, blockSourceIndex)
}
block := outputBlocks.GetBlock(x / BlockSize, y / BlockSize)
block.DtX = blockDtX
block.DtY = blockDtY
block.SourceIndex = blockSourceIndex
}
}
return &outputBlocks
}
func GetSSIMChannel(sourceImages []*ImageData, targetImage *ImageData, blocks *BlockData, index int) float64 {
sSum := 0.0
tSum := 0.0
for x := 0; x < targetImage.Width; x += BlockSize {
for y := 0; y < targetImage.Height; y += BlockSize {
block := blocks.GetBlock(x / BlockSize, y / BlockSize)
for blockX := 0; blockX < BlockSize; blockX++ {
for blockY := 0; blockY < BlockSize; blockY++ {
sSum += sourceImages[block.SourceIndex].GetPixelBilinear(float64(x + blockX) + block.DtX, float64(y + blockY) + block.DtY, index) / 256
tSum += targetImage.GetPixel(x + blockX, y + blockY, index) / 256
}
}
}
}
sMean := sSum / float64(targetImage.Width * targetImage.Height)
tMean := tSum / float64(targetImage.Width * targetImage.Height)
sStSum := 0.0
tStSum := 0.0
covSum := 0.0
for x := 0; x < targetImage.Width; x += BlockSize {
for y := 0; y < targetImage.Height; y += BlockSize {
block := blocks.GetBlock(x / BlockSize, y / BlockSize)
for blockX := 0; blockX < BlockSize; blockX++ {
for blockY := 0; blockY < BlockSize; blockY++ {
s := sourceImages[block.SourceIndex].GetPixelBilinear(float64(x + blockX) + block.DtX, float64(y + blockY) + block.DtY, index) / 256
t := targetImage.GetPixel(x + blockX, y + blockY, index) / 256
sStSum += math.Pow(s - sMean, 2)
tStSum += math.Pow(t - tMean, 2)
covSum += (s - sMean) * (t - tMean)
}
}
}
}
sStDev := math.Sqrt(sStSum / float64(targetImage.Width * targetImage.Height))
tStDev := math.Sqrt(tStSum / float64(targetImage.Width * targetImage.Height))
cov := covSum / float64(targetImage.Width * targetImage.Height)
C1 := math.Pow(0.01 * 255.0, 2)
C2 := math.Pow(0.03 * 255.0, 2)
return ((2.0 * sMean * tMean + C1) * (2.0 * cov + C2)) / ((math.Pow(sMean, 2) + math.Pow(tMean, 2) + C1) * (math.Pow(sStDev, 2) + math.Pow(tStDev, 2) + C2))
}
func GetSSIM(sourceImages []*ImageData, targetImage *ImageData, blocks *BlockData) float64 {
indexR := GetSSIMChannel(sourceImages, targetImage, blocks, 0)
indexG := GetSSIMChannel(sourceImages, targetImage, blocks, 1)
indexB := GetSSIMChannel(sourceImages, targetImage, blocks, 2)
return (indexR + indexG + indexB) / 3.0;
}
func GetPSNR(sourceImages []*ImageData, targetImage *ImageData, blocks *BlockData) float64 {
mseSum := 0.0
for x := 0; x < targetImage.Width; x += BlockSize {
for y := 0; y < targetImage.Height; y += BlockSize {
block := blocks.GetBlock(x / BlockSize, y / BlockSize)
for blockX := 0; blockX < BlockSize; blockX++ {
for blockY := 0; blockY < BlockSize; blockY++ {
sR := sourceImages[block.SourceIndex].GetPixelBilinear(float64(x + blockX) + block.DtX, float64(y + blockY) + block.DtY, 0) / 256
sG := sourceImages[block.SourceIndex].GetPixelBilinear(float64(x + blockX) + block.DtX, float64(y + blockY) + block.DtY, 1) / 256
sB := sourceImages[block.SourceIndex].GetPixelBilinear(float64(x + blockX) + block.DtX, float64(y + blockY) + block.DtY, 2) / 256
tR := targetImage.GetPixel(x + blockX, y + blockY, 0) / 256
tG := targetImage.GetPixel(x + blockX, y + blockY, 1) / 256
tB := targetImage.GetPixel(x + blockX, y + blockY, 2) / 256
mseSum += math.Pow(tR - sR, 2) + math.Pow(tG - sG, 2) + math.Pow(tB - sB, 2)
}
}
}
}
return 20 * math.Log10(255) - 10 * math.Log10(mseSum / float64(targetImage.Width * targetImage.Height * targetImage.Size));
}
func SaveReconstructedTarget(sourceImages []*ImageData, outputBlocks *BlockData, filename string) {
outputImage := image.NewRGBA(image.Rect(0, 0, sourceImages[0].Width, sourceImages[0].Height))
for x := 0; x < sourceImages[0].Width; x++ {
for y := 0; y < sourceImages[0].Height; y++ {
block := outputBlocks.GetBlock(x / BlockSize, y / BlockSize)
sR := sourceImages[block.SourceIndex].GetPixelBilinear(float64(x) + block.DtX, float64(y) + block.DtY, 0) / 256
sG := sourceImages[block.SourceIndex].GetPixelBilinear(float64(x) + block.DtX, float64(y) + block.DtY, 1) / 256
sB := sourceImages[block.SourceIndex].GetPixelBilinear(float64(x) + block.DtX, float64(y) + block.DtY, 2) / 256
outputImage.Set(x, y, color.RGBA{uint8(sR), uint8(sG), uint8(sB), 255})
}
}
file, _ := os.Create(filename)
png.Encode(file, outputImage)
}
func SaveDeltaImage(outputBlocks *BlockData, sourceIndexes []int, filename string) {
outputImage := image.NewRGBA(image.Rect(0, 0, outputBlocks.Width, outputBlocks.Height))
for x := 0; x < outputBlocks.Width; x++ {
for y := 0; y < outputBlocks.Height; y++ {
block := outputBlocks.GetBlock(x, y)
sourceIndex := uint8(sourceIndexes[block.SourceIndex])
dtX := uint8(128 + block.DtX * (1 / BlockFractStep))
dtY := uint8(128 + block.DtY * (1 / BlockFractStep))
outputImage.Set(x, y, color.RGBA{sourceIndex, dtX, dtY, 255})
}
}
file, err := os.Create(filename)
if err != nil {
log.Fatalf("[error] Cannot create output image")
}
png.Encode(file, outputImage)
}
func main() {
// Command line input
optImageSources := flag.String("s", "none", "Source JPEG images filenames, indexes")
optImageTarget := flag.String("t", "none", "Target PNG image filename")
optImageOutput := flag.String("o", "none", "Output PNG image filename")
optImageDebug := flag.String("d", "none", "Debug PNG image filename")
optVerbose := flag.Bool("v", false, "Verbose mode")
flag.Parse()
// Load source images (format: "filename|index,filename|index, ...")
sources := strings.Split(*optImageSources, ",")
sourceImages := make([]*ImageData, len(sources))
sourceIndexes := make([]int, len(sources))
for i := 0; i < len(sources); i++ {
source := strings.Split(sources[i], "|")
sourceImages[i] = LoadImageJPEG(source[0])
sourceIndexes[i], _ = strconv.Atoi(source[1])
}
// Load target image
targetImage := LoadImagePNG(*optImageTarget)
imageWidth := targetImage.Width
imageHeight := targetImage.Height
// Check equal image dimensions
for i := 0; i < len(sourceImages); i++ {
if imageWidth != sourceImages[i].Width || imageHeight != sourceImages[i].Height {
fmt.Fprintf(os.Stderr, "[error] Image dimensions are not equal\n")
os.Exit(15)
}
}
// Check dimensions divisible by 16
if math.Mod(float64(imageWidth), BlockSize) != 0 || math.Mod(float64(imageHeight), BlockSize) != 0 {
fmt.Fprintf(os.Stderr, "[error] Image dimensions must be divisible by 16\n")
os.Exit(16)
}
// Print image info
fmt.Printf("[info] Size: %dx%d\n", imageWidth, imageHeight)
// Create delta data
timeStart := time.Now().UnixNano() / 1e6
outputBlocks := CreateDelta(sourceImages, targetImage, *optVerbose)
timeEnd := time.Now().UnixNano() / 1e6
fmt.Printf("[info] Elapsed time: %d ms\n", timeEnd - timeStart)
// If defined, save reconstructed image (for comparison)
if *optImageDebug != "none" {
SaveReconstructedTarget(sourceImages, outputBlocks, *optImageDebug)
}
// Print PSNR, SSIM and save output data
fmt.Printf("PSNR: %.2fdB\n", GetPSNR(sourceImages, targetImage, outputBlocks))
fmt.Printf("SSIM: %.3f\n", GetSSIM(sourceImages, targetImage, outputBlocks))
SaveDeltaImage(outputBlocks, sourceIndexes, *optImageOutput)
} | scripts/encoder/encoder-pred.go | 0.580709 | 0.41947 | encoder-pred.go | starcoder |
package integration
import (
"fmt"
json "github.com/bitly/go-simplejson"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
//Validates rules are equal
//`actual` is a representation of an entry from the ConfigMap handled by the Controller
func validateRuleEquals(actual *json.Json, expected *json.Json) {
Expect(actual).To(Equal(expected))
expectOnlyKeys(actual, "id", "upstream", "match", "authenticators", "authorizer", "mutators")
expectString(actual, "id")
compareUpstreams(actual.Get("upstream"), expected.Get("upstream"))
compareMatches(actual.Get("match"), expected.Get("match"))
compareHandlerArrays(actual.Get("authenticators"), expected.Get("authenticators"))
compareHandlers(actual.Get("authorizer"), expected.Get("authorizer"))
compareHandlerArrays(actual.Get("mutators"), expected.Get("mutators"))
}
func compareUpstreams(actual *json.Json, expected *json.Json) {
Expect(actual).To(Equal(expected))
expectOnlyKeys(actual, "url", "preserve_host")
expectString(actual, "url")
expectBoolean(actual.Get("preserve_host"))
}
func compareMatches(actual *json.Json, expected *json.Json) {
Expect(actual).To(Equal(expected))
expectOnlyKeys(actual, "url", "methods")
expectString(actual, "url")
expectStringArray(actual, "methods")
}
func compareHandlerArrays(actual *json.Json, expected *json.Json) {
//both are equal
Expect(actual).To(Equal(expected))
//expected is an Array
expectedArray, err := expected.Array()
Expect(err).To(BeNil())
//All elements are proper handlers
length := len(expectedArray)
for i := 0; i < length; i++ {
compareHandlers(actual.GetIndex(i), expected.GetIndex(i))
}
}
//Compares `handler` objects, a common type for `authenticators`, `authorizer`, and `mutator` configurations
//The object consists of two properties: `hander`:string` and `config`:object
func compareHandlers(actual *json.Json, expected *json.Json) {
//expected.SetPath(
Expect(actual).To(Equal(expected))
expectAllowedKeys(actual, "handler", "config")
expectString(actual, "handler")
expectObjectOrNil(actual, "config")
}
func expectBoolean(data *json.Json) {
_, err := data.Bool()
Expect(err).To(BeNil())
}
func expectString(data *json.Json, attributeName string) {
errMsg := ""
_, err := data.Get(attributeName).String()
if err != nil {
errMsg = fmt.Sprintf("Cannot convert %s to string. Details: %v", attributeName, err)
}
Expect(errMsg).To(BeEmpty())
}
func expectStringArray(data *json.Json, attributeName string) {
errMsg := ""
arr, err := data.Get(attributeName).Array()
if err != nil {
errMsg = fmt.Sprintf("Cannot convert %s to slice/array. Details: %v", attributeName, err)
}
Expect(errMsg).To(BeEmpty())
length := len(arr)
for i := 0; i < length; i++ {
_, ok := arr[i].(string)
if !ok {
errMsg = fmt.Sprintf("Cannot convert element of %s array [%d] to string. Details: %v", attributeName, i, err)
}
Expect(errMsg).To(BeEmpty())
}
}
func expectObjectOrNil(data *json.Json, attributeName string) {
if data.Get("attributeName").Interface() == nil {
return
}
errMsg := ""
_, err := data.Map()
if err != nil {
errMsg = fmt.Sprintf("Cannot convert %s to an object. Details: %v", attributeName, err)
}
Expect(errMsg).To(BeEmpty())
}
func expectAllowedKeys(genericMap *json.Json, allowedKeys ...string) {
aMap, ok := genericMap.Interface().(map[string]interface{})
Expect(ok).To(BeTrue())
actualKeys := getKeysOf(aMap)
for _, v := range actualKeys {
Expect(allowedKeys).To(ContainElement(v))
}
}
func expectOnlyKeys(genericMap *json.Json, keys ...string) {
aMap, ok := genericMap.Interface().(map[string]interface{})
Expect(ok).To(BeTrue())
Expect(getKeysOf(aMap)).To(ConsistOf(keys))
}
func getKeysOf(input map[string]interface{}) []string {
keys := make([]string, len(input))
i := 0
for k := range input {
keys[i] = k
i++
}
return keys
}
//Converts from dynamic client representation to *json.Json.
//"spec" must be a top-level attribute of dynamicObject.
func wrapSpecAsJson(dynamicObject *unstructured.Unstructured) *json.Json {
//A little trick since go-simplejson doesn't offer a constructor for arbitrary data
res := json.New()
res.Set("data", dynamicObject.UnstructuredContent())
return res.GetPath("data", "spec")
} | tests/integration/validation.go | 0.691706 | 0.507751 | validation.go | starcoder |
package htest
import (
"bytes"
"encoding/json"
"encoding/xml"
"github.com/basgys/goxml2json"
"github.com/stretchr/testify/assert"
"github.com/tidwall/gjson"
"io/ioutil"
"testing"
"time"
)
type (
JSON struct {
body []byte
*testing.T
}
XML struct {
*JSON
body []byte
}
MD5 struct {
body []byte
*testing.T
}
SHA1 struct {
body []byte
*testing.T
}
)
func NewJSON(body []byte, t *testing.T) *JSON {
return &JSON{
body: body,
T: t,
}
}
func NewXML(body []byte, t *testing.T) *XML {
jsonBuf, _ := xml2json.Convert(bytes.NewBuffer(body))
jsonBody, _ := ioutil.ReadAll(jsonBuf)
return &XML{
body: body,
JSON: NewJSON(jsonBody, t),
}
}
func NewMD5(body []byte, t *testing.T) *MD5 {
return &MD5{
body: body,
T: t,
}
}
func NewSHA1(body []byte, t *testing.T) *SHA1 {
return &SHA1{
body: body,
T: t,
}
}
func (j *JSON) GetKey(key string) (result gjson.Result, exist bool) {
result = gjson.GetBytes(j.body, key)
exist = result.Exists()
return
}
func (j *JSON) Exist(key string) *JSON {
_, exist := j.GetKey(key)
assert.True(j.T, exist)
return j
}
func (j *JSON) NotExist(key string) *JSON {
_, exist := j.GetKey(key)
assert.False(j.T, exist)
return j
}
func (j *JSON) String(key, expect string) *JSON {
result, _ := j.GetKey(key)
assert.Equal(j.T, expect, result.String())
return j
}
func (j *JSON) Int(key string, expect int64) *JSON {
result, _ := j.GetKey(key)
assert.Equal(j.T, expect, result.Int())
return j
}
func (j *JSON) True(key string) *JSON {
result, _ := j.GetKey(key)
assert.True(j.T, result.Bool())
return j
}
func (j *JSON) False(key string) *JSON {
result, _ := j.GetKey(key)
assert.False(j.T, result.Bool())
return j
}
func (j *JSON) Uint(key string, expect uint64) *JSON {
result, _ := j.GetKey(key)
assert.Equal(j.T, expect, result.Uint())
return j
}
func (j *JSON) Time(key string, expect time.Time) *JSON {
result, _ := j.GetKey(key)
assert.Equal(j.T, expect, result.Time())
return j
}
func (j *JSON) Float(key string, expect float64) *JSON {
result, _ := j.GetKey(key)
assert.Equal(j.T, expect, result.Float())
return j
}
func (j *JSON) Empty() *JSON {
body := bytes.Trim(j.Body(), "\"\n")
assert.Equal(j.T, "", string(body))
return j
}
func (j *JSON) NotEmpty() *JSON {
body := bytes.Trim(j.Body(), "\"\n")
assert.NotEqual(j.T, "", string(body))
return j
}
func (j *JSON) Body() []byte {
return j.body
}
func (j *JSON) Bind(obj interface{}) error {
return json.Unmarshal(j.body, obj)
}
func (x *XML) Exist(key string) *XML {
x.JSON.Exist(key)
return x
}
func (x *XML) NotExist(key string) *XML {
x.JSON.NotExist(key)
return x
}
func (x *XML) String(key, expect string) *XML {
x.JSON.String(key, expect)
return x
}
func (x *XML) Int(key string, expect int64) *XML {
x.JSON.Int(key, expect)
return x
}
func (x *XML) True(key string) *XML {
x.JSON.True(key)
return x
}
func (x *XML) False(key string) *XML {
x.JSON.False(key)
return x
}
func (x *XML) Uint(key string, expect uint64) *XML {
x.JSON.Uint(key, expect)
return x
}
func (x *XML) Time(key string, expect time.Time) *XML {
x.JSON.Time(key, expect)
return x
}
func (x *XML) Float(key string, expect float64) *XML {
x.JSON.Float(key, expect)
return x
}
func (x *XML) Empty() *XML {
assert.Equal(x.T, "", string(x.Body()))
return x
}
func (x *XML) NotEmpty() *XML {
assert.NotEqual(x.T, "", string(x.Body()))
return x
}
func (x *XML) Body() []byte {
return x.body
}
func (x *XML) Bind(obj interface{}) error {
return xml.Unmarshal(x.body, obj)
}
func (m *MD5) Expect(expect string) *MD5 {
assert.Equal(m.T, expect, string(m.Body()))
return m
}
func (m *MD5) Body() []byte {
return m.body
}
func (s *SHA1) Expect(expect string) *SHA1 {
assert.Equal(s.T, expect, string(s.Body()))
return s
}
func (s *SHA1) Body() []byte {
return s.body
} | body.go | 0.64512 | 0.435781 | body.go | starcoder |
package timeseries
import (
"fmt"
"strconv"
"time"
"github.com/grokify/gocharts/v2/data/table"
)
// ParseTableTimeSeriesSetMatrix create a `TimeSeriesSet` from a `table.Table` using the least
// amount of input to populate the data structure. The time must be in column 0 and the series
// names must be in the column headers.
func ParseTableTimeSeriesSetMatrix(tbl table.Table, isFloat bool, timeParseFunc func(s string) (time.Time, error)) (TimeSeriesSet, error) {
if timeParseFunc == nil {
timeParseFunc = ParseTimeFuncRFC3339
}
tss := NewTimeSeriesSet("")
tss.IsFloat = isFloat
for y, row := range tbl.Rows {
if len(row) <= 1 {
continue
}
dt, err := timeParseFunc(row[0])
if err != nil {
return tss, fmt.Errorf("cannot parse time [%s] in row [%d]", row[0], y)
}
for x := 1; x < len(row); x++ {
if x >= len(tbl.Columns) {
return tss, fmt.Errorf("no column header for column [%d] on row [%d]", x, y)
}
seriesName := tbl.Columns[x]
countString := row[x]
if isFloat {
countFloat, err := strconv.ParseFloat(countString, 64)
if err != nil {
return tss, fmt.Errorf("cannot parse count as float64 [%s] in row [%d]", row[x], y)
}
tss.AddFloat64(seriesName, dt, countFloat)
} else {
countInt, err := strconv.Atoi(countString)
if err != nil {
return tss, fmt.Errorf("cannot parse count as int [%s] in row [%d]", row[x], y)
}
tss.AddInt64(seriesName, dt, int64(countInt))
}
}
}
tss.Times = tss.TimeSlice(true)
return tss, nil
}
// ParseTableTimeSeriesSetFlat create a `TimeSeriesSet` from a `table.Table` using the least
// amount of input to populate the data structure. It does not set the following
// parameters which must be set manually: `Name`, `Interval`.
func ParseTableTimeSeriesSetFlat(tbl table.Table, timeColIdx, seriesNameColIdx, countColIdx uint, isFloat bool, timeParseFunc func(s string) (time.Time, error)) (TimeSeriesSet, error) {
if timeParseFunc == nil {
timeParseFunc = ParseTimeFuncRFC3339
}
tss := NewTimeSeriesSet(tbl.Name)
tss.IsFloat = isFloat
for i, row := range tbl.Rows {
if int(seriesNameColIdx) >= len(row) {
return tss, fmt.Errorf("colIdx [%d] not present in row [%d]", seriesNameColIdx, i)
}
if int(timeColIdx) >= len(row) {
return tss, fmt.Errorf("colIdx [%d] not present in row [%d]", timeColIdx, i)
}
if int(countColIdx) >= len(row) {
return tss, fmt.Errorf("colIdx [%d] not present in row [%d]", countColIdx, i)
}
seriesName := row[seriesNameColIdx]
dt, err := timeParseFunc(row[timeColIdx])
if err != nil {
return tss, fmt.Errorf("cannot parse time [%s] in row [%d] err [%s]", row[timeColIdx], i, err.Error())
}
countString := row[countColIdx]
if isFloat {
countFloat, err := strconv.ParseFloat(countString, 64)
if err != nil {
return tss, fmt.Errorf("cannot parse count as float64 [%s] in row [%d] err [%s]", row[countColIdx], i, err.Error())
}
tss.AddFloat64(seriesName, dt, countFloat)
} else {
countInt, err := strconv.Atoi(countString)
if err != nil {
return tss, fmt.Errorf("cannot parse count as int [%s] in row [%d] err [%s]", row[countColIdx], i, err.Error())
}
tss.AddInt64(seriesName, dt, int64(countInt))
}
}
return tss, nil
}
func ParseTimeFuncMonthYear(s string) (time.Time, error) {
return time.Parse("January 2006", s)
}
func ParseTimeFuncRFC3339(s string) (time.Time, error) {
return time.Parse(time.RFC3339, s)
}
func ParseTimeFuncYearDotMonth(s string) (time.Time, error) {
return time.Parse("2006.01", s)
} | data/timeseries/time_series_set_parse.go | 0.729712 | 0.710302 | time_series_set_parse.go | starcoder |
package data
import (
"encoding/csv"
"fmt"
. "github.com/woojiahao/govid-19/pkg/utility"
"io"
"os"
"strings"
"time"
)
var TimeSeriesPaths = map[TimeSeriesType]RepoPath{
Confirmed: ConfirmedTimeSeries,
Deaths: DeathsTimeSeries,
Recovered: RecoveredTimeSeries,
}
type (
// Single day of data for a specific country/region
TimeSeriesRecordData struct {
Date time.Time `json:"date"`
Value int `json:"value"`
}
// Single row in the time series.
TimeSeriesRecord struct {
TimeSeriesType TimeSeriesType `json:"-"`
State string `json:"state"`
Country string `json:"country"`
Longitude float32 `json:"long"`
Latitude float32 `json:"lat"`
Total int `json:"total"`
Data []TimeSeriesRecordData `json:"data"`
}
Series struct {
TimeSeriesType TimeSeriesType `json:"-"`
Records []TimeSeriesRecord `json:"records"`
}
)
func (s *Series) GetValueOfDate(country, state string, date time.Time) int {
records := make([]TimeSeriesRecord, 0)
for _, record := range s.Records {
isCountryMatching := strings.ToLower(record.Country) == strings.ToLower(country)
isStateMatching := strings.ToLower(record.State) == strings.ToLower(state)
if isCountryMatching || isStateMatching {
records = append(records, record)
}
}
if len(records) <= 0 {
return 0
}
for _, record := range records[0].Data {
if date == record.Date {
return record.Value
}
}
return -1
}
func getTimeSeries(seriesType TimeSeriesType) Series {
file, err := os.Open(string(TimeSeriesPaths[seriesType]))
Check(err)
r := csv.NewReader(file)
idx, headers, records := 0, make([]string, 0), make([]TimeSeriesRecord, 0)
for {
idx++
record, err := r.Read()
if err == io.EOF {
break
}
if idx == 1 {
for _, header := range record {
headers = append(headers, header)
}
} else {
rawData, timeHeaders, data := record[4:], headers[4:], make([]TimeSeriesRecordData, 0)
for i, d := range rawData {
prev := 0
if i != 0 {
prev = ToInt(rawData[i-1])
}
increment := ToInt(d) - prev
date := strings.Split(timeHeaders[i], "/")
month, day, year := date[0], date[1], date[2]
const timeLayout = "01/02/2006"
formattedDate, err := time.Parse(timeLayout, fmt.Sprintf("%02s/%02s/20%s", month, day, year))
Check(err)
data = append(data, TimeSeriesRecordData{
Date: formattedDate,
Value: increment,
})
}
timeSeriesRecord := TimeSeriesRecord{
TimeSeriesType: seriesType,
State: record[0],
Country: record[1],
Longitude: ToFloat32(record[2]),
Latitude: ToFloat32(record[3]),
Total: ToInt(rawData[len(rawData)-1]),
Data: data,
}
records = append(records, timeSeriesRecord)
}
Check(err)
}
return Series{
TimeSeriesType: seriesType,
Records: records,
}
} | pkg/data/time_series.go | 0.741768 | 0.486271 | time_series.go | starcoder |
package function
// Simple implementation of a Run Length Codec
// Length is transmitted as 1 or 2 bytes (minus 1 bit for the mask that indicates
// whether a second byte is used). The run threshold can be provided.
// For a run threshold of 2:
// EG input: 0x10 0x11 0x11 0x17 0x13 0x13 0x13 0x13 0x13 0x13 0x12 (160 times) 0x14
// output: 0x10 0x11 0x11 0x17 0x13 0x13 0x13 0x05 0x12 0x12 0x80 0xA0 0x14
import (
"errors"
"kanzi"
)
const (
TWO_BYTE_RLE_MASK = 0x80
RLT_MAX_RUN = 0x7FFF
DEFAULT_RLE_THRESHOLD = 3
)
type RLT struct {
size uint
runThreshold uint
}
func NewRLT(sz, threshold uint) (*RLT, error) {
if threshold < 2 {
return nil, errors.New("Invalid run threshold parameter (must be at least 2)")
}
if threshold > 256 {
return nil, errors.New("Invalid run threshold parameter (must be at most 256)")
}
this := new(RLT)
this.size = sz
this.runThreshold = threshold
return this, nil
}
func (this *RLT) Size() uint {
return this.size
}
func (this *RLT) RunTheshold() uint {
return this.runThreshold
}
func (this *RLT) Forward(src, dst []byte) (uint, uint, error) {
if src == nil {
return uint(0), uint(0), errors.New("Invalid null source buffer")
}
if dst == nil {
return uint(0), uint(0), errors.New("Invalid null destination buffer")
}
if kanzi.SameByteSlices(src, dst, false) {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
srcEnd := this.size
if this.size == 0 {
srcEnd = uint(len(src))
}
dstEnd := uint(len(dst))
run := 1
threshold := int(this.runThreshold)
maxThreshold := RLT_MAX_RUN + int(this.runThreshold)
srcIdx := uint(0)
dstIdx := uint(0)
// Initialize with a value different from the first data
prev := ^src[srcIdx]
for srcIdx < srcEnd && dstIdx < dstEnd {
val := byte(src[srcIdx])
srcIdx++
// Encode up to 0x7FFF repetitions in the 'length' information
if prev == val && run < maxThreshold {
run++
if run < threshold {
dst[dstIdx] = prev
dstIdx++
}
continue
}
if run >= threshold {
dst[dstIdx] = prev
dstIdx++
run -= threshold
// Force MSB to indicate a 2 byte encoding of the length
if run >= TWO_BYTE_RLE_MASK {
dst[dstIdx] = byte((run >> 8) | TWO_BYTE_RLE_MASK)
dstIdx++
}
dst[dstIdx] = byte(run)
dstIdx++
run = 1
}
dst[dstIdx] = val
dstIdx++
if prev != val {
prev = val
run = 1
}
}
// Fill up the destination array
if run >= threshold {
dst[dstIdx] = prev
dstIdx++
run -= threshold
// Force MSB to indicate a 2 byte encoding of the length
if run >= TWO_BYTE_RLE_MASK {
dst[dstIdx] = byte((run >> 8) | TWO_BYTE_RLE_MASK)
dstIdx++
}
dst[dstIdx] = byte(run & 0xFF)
dstIdx++
}
return srcIdx, dstIdx, nil
}
func (this *RLT) Inverse(src, dst []byte) (uint, uint, error) {
if src == nil {
return uint(0), uint(0), errors.New("Invalid null source buffer")
}
if dst == nil {
return uint(0), uint(0), errors.New("Invalid null destination buffer")
}
if kanzi.SameByteSlices(src, dst, false) {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
srcEnd := this.size
if this.size == 0 {
srcEnd = uint(len(src))
}
dstEnd := uint(len(dst))
run := 0
threshold := int(this.runThreshold)
srcIdx := uint(0)
dstIdx := uint(0)
// Initialize with a value different from the first data
prev := ^src[srcIdx]
for srcIdx < srcEnd && dstIdx < dstEnd {
val := src[srcIdx]
srcIdx++
if prev == val {
run++
if run >= threshold {
// Read the length
run = int(src[srcIdx])
srcIdx++
// If the length is encoded in 2 bytes, process next byte
if run&TWO_BYTE_RLE_MASK != 0 {
run = ((run & (^TWO_BYTE_RLE_MASK)) << 8) | int(src[srcIdx])
srcIdx++
}
// Emit length times the previous byte
for run > 0 {
dst[dstIdx] = prev
dstIdx++
run--
}
}
} else {
prev = val
run = 1
}
dst[dstIdx] = val
dstIdx++
}
return srcIdx, dstIdx, nil
}
// Required encoding output buffer size unknown
func (this RLT) MaxEncodedLen(srcLen int) int {
return -1
} | go/src/kanzi/function/RLT.go | 0.757077 | 0.475666 | RLT.go | starcoder |
package graphmatrix
import (
"errors"
"fmt"
"sort"
)
// GraphMatrix holds a row index and vector of column pointers.
// If a point is defined at a particular row i and column j, an
// edge exists between vertex i and vertex j.
// GraphMatrices thus represent directed graphs; undirected graphs
// must explicitly set the reverse edge from j to i.
type GraphMatrix struct {
IndPtr []uint64 // indexes into Indices - must be twice the width of Indices.
Indices []uint32 // contains the row values for each column. A stride represents the outneighbors of a vertex at col j.
}
// String is used for pretty printing.
func (g GraphMatrix) String() string {
return fmt.Sprintf("GraphMatrix %v, %v, size %d", g.IndPtr, g.Indices, g.Dim())
}
// GetRow returns the 'n'th row slice, or an empty slice if empty.
func (g GraphMatrix) GetRow(r uint32) ([]uint32, error) {
if r > uint32(len(g.IndPtr))-1 {
return []uint32{}, fmt.Errorf("Row %d out of bounds (max %d)", r, len(g.IndPtr))
}
rowStart := g.IndPtr[r]
rowEnd := g.IndPtr[r+1]
return g.Indices[rowStart:rowEnd], nil
}
// cumsum was taken from github.com/james-bowman/sparse.
func cumsum(p []uint64, c []uint64, n uint64) uint64 {
nz := uint64(0)
for i := nz; i < n; i++ {
p[i] = nz
nz += uint64(c[i])
c[i] = p[i]
}
p[n] = nz
return nz
}
// compress was modified from github.com/james-bowman/sparse.
func compress(row []uint32, col []uint32, n uint64) (ia []uint64, ja []uint32) {
w := make([]uint64, n+1)
ia = make([]uint64, n+1)
ja = make([]uint32, len(col))
for _, v := range row {
w[v]++
}
cumsum(ia, w, n)
for j, v := range col {
p := w[row[j]]
ja[p] = v
w[row[j]]++
}
return
}
// inRange returns true if (r, c) is a valid index into v.
func (g *GraphMatrix) inRange(r, c uint32) bool {
n := g.Dim()
return (c < n) && (r < n)
}
// Dim returns the (single-axis) dimension of the GraphMatrix.
func (g GraphMatrix) Dim() uint32 {
return uint32(len(g.IndPtr) - 1)
}
// N returns the number of defined values in the GraphMatrix.
func (g *GraphMatrix) N() uint64 {
return uint64(len(g.Indices))
}
// GetIndex returns true if the value at (r, c) is defined.
func (g GraphMatrix) GetIndex(r, c uint32) bool {
if uint32(len(g.IndPtr)) <= c+1 {
return false
}
r1 := g.IndPtr[r]
r2 := g.IndPtr[r+1]
if r1 >= r2 {
return false
}
_, found := SearchSorted32(g.Indices, c, r1, r2)
return found
}
// SetIndex sets the value at (r, c) to true.
// This can be a relatively expensive operation as it can force
// reallocation as the vectors increase in size.
func (g *GraphMatrix) SetIndex(r, c uint32) error {
if !g.inRange(r, c) {
return errors.New("index out of range")
}
rowStartIdx := g.IndPtr[r] // this is the pointer into the Indices for column c
rowEndIdx := g.IndPtr[r+1]
i, found := SearchSorted32(g.Indices, c, rowStartIdx, rowEndIdx)
if found { // already set
return nil
}
g.Indices = append(g.Indices, 0)
copy(g.Indices[i+1:], g.Indices[i:])
g.Indices[i] = c
for i := int(r + 1); i < len(g.IndPtr); i++ {
g.IndPtr[i]++
}
return nil
}
// returns the maximum uint and its position in the vector.
// returns -1 as position if the vector is empty.
func maxUint32(v []uint32) (max uint32, maxPos int) {
if len(v) == 0 {
return 0, -1
}
max = 0
maxPos = 0
for i, n := range v {
if n > max {
max = n
maxPos = i
}
}
return max, maxPos
}
// NewZero creates an m x m sparse matrix.
func NewZero(m int) (GraphMatrix, error) {
if m < 0 {
return GraphMatrix{}, errors.New("dimensions must be non-negative")
}
i := make([]uint32, 0)
ip := make([]uint64, m+1)
return GraphMatrix{IndPtr: ip, Indices: i}, nil
}
func NewFromSortedIJ(s []uint32, d []uint32) (GraphMatrix, error) {
if len(s) != len(d) {
return GraphMatrix{}, fmt.Errorf("graph inputs must be of the same length (got %d, %d)", len(s), len(d))
}
m1 := s[len(s)-1] // max s - this is O(1)
m2, _ := maxUint32(d) // max d - this is O(n)
m := m1
if m2 > m1 {
m = m2
}
m++ // m is the number of rows/cols for this matrix.
ia, ja := compress(s, d, uint64(m))
// ja, data = dedupe(ia, ja, data, c.r, c.c)
return GraphMatrix{ia, ja}, nil
}
// SortIJ sorts two vectors s and d by s, then by d, and eliminates any duplicate pairs.
// Modifies s and d.
func SortIJ(s, d *[]uint32) error {
if len(*s) != len(*d) {
return errors.New("inputs must be of the same length")
}
sd := make([]uint64, len(*s))
for i := 0; i < len(*s); i++ {
sd[i] = uint64((*s)[i])<<32 + uint64((*d)[i])
}
sort.Slice(sd, func(i, j int) bool { return sd[i] < sd[j] })
UniqSorted(&sd)
for i := 0; i < len(sd); i++ {
(*s)[i] = uint32(sd[i] >> 32)
(*d)[i] = uint32(sd[i] & 0x00000000ffffffff)
}
(*s) = (*s)[:len(sd)]
(*d) = (*d)[:len(sd)]
return nil
}
// UniqSorted deduplicates a sorted vector in place.
func UniqSorted(a *[]uint64) {
j := 0
for i := 1; i < len(*a); i++ {
if (*a)[j] == (*a)[i] {
continue
}
j++
// preserve the original data
// in[i], in[j] = in[j], in[i]
// only set what is required
(*a)[j] = (*a)[i]
}
(*a) = (*a)[:j+1]
}
// SearchSorted32 finds a value x in sorted vector v.
// Returns index and true/false indicating found.
// lo and hi constrains search to these Indices.
// If lo/hi are out of bounds, return -1 and false unless
// the vector is empty, in which case return 0 and false.
func SearchSorted32(v []uint32, x uint32, lo, hi uint64) (int, bool) {
ulen := uint64(len(v))
if ulen == 0 {
return 0, false
}
if lo == hi {
return int(lo), false
}
if ulen < lo || ulen < hi || lo > hi {
return -1, false
}
s := sort.Search(int(hi-lo), func(i int) bool { return v[int(lo)+i] >= x }) + int(lo)
found := (s < len(v)) && (v[s] == x)
return s, found
} | graphmatrix.go | 0.762822 | 0.586197 | graphmatrix.go | starcoder |
package data
import (
"fmt"
"time"
)
// vector represents a Field's collection of Elements.
type vector interface {
Set(idx int, i interface{})
Append(i interface{})
Extend(i int)
At(i int) interface{}
Len() int
Type() FieldType
PointerAt(i int) interface{}
CopyAt(i int) interface{}
ConcreteAt(i int) (val interface{}, ok bool)
}
func newVector(t interface{}, n int) (v vector) {
switch t.(type) {
// ints
case []int8:
v = newInt8Vector(n)
case []*int8:
v = newNullableInt8Vector(n)
case []int16:
v = newInt16Vector(n)
case []*int16:
v = newNullableInt16Vector(n)
case []int32:
v = newInt32Vector(n)
case []*int32:
v = newNullableInt32Vector(n)
case []int64:
v = newInt64Vector(n)
case []*int64:
v = newNullableInt64Vector(n)
// uints
case []uint8:
v = newUint8Vector(n)
case []*uint8:
v = newNullableUint8Vector(n)
case []uint16:
v = newUint16Vector(n)
case []*uint16:
v = newNullableUint16Vector(n)
case []uint32:
v = newUint32Vector(n)
case []*uint32:
v = newNullableUint32Vector(n)
case []uint64:
v = newUint64Vector(n)
case []*uint64:
v = newNullableUint64Vector(n)
// floats
case []float32:
v = newFloat32Vector(n)
case []*float32:
v = newNullableFloat32Vector(n)
case []float64:
v = newFloat64Vector(n)
case []*float64:
v = newNullableFloat64Vector(n)
case []string:
v = newStringVector(n)
case []*string:
v = newNullableStringVector(n)
case []bool:
v = newBoolVector(n)
case []*bool:
v = newNullableBoolVector(n)
case []time.Time:
v = newTimeTimeVector(n)
case []*time.Time:
v = newNullableTimeTimeVector(n)
default:
panic(fmt.Sprintf("unsupported vector type of %T", t))
}
return
}
// ValidFieldType returns if a primitive slice is a valid supported Field type.
func ValidFieldType(t interface{}) bool {
switch t.(type) {
// ints
case []int8:
return true
case []*int8:
return true
case []int16:
return true
case []*int16:
return true
case []int32:
return true
case []*int32:
return true
case []int64:
return true
case []*int64:
return true
// uints
case []uint8:
return true
case []*uint8:
return true
case []uint16:
return true
case []*uint16:
return true
case []uint32:
return true
case []*uint32:
return true
case []uint64:
return true
case []*uint64:
return true
// floats
case []float32:
return true
case []*float32:
return true
case []float64:
return true
case []*float64:
return true
case []string:
return true
case []*string:
return true
case []bool:
return true
case []*bool:
return true
case []time.Time:
return true
case []*time.Time:
return true
default:
return false
}
}
// FieldType indicates the Go type underlying the Field.
type FieldType int
const (
// FieldTypeInt8 indicates the underlying primitive is a []int8.
FieldTypeInt8 FieldType = iota
// FieldTypeNullableInt8 indicates the underlying primitive is a []*int8.
FieldTypeNullableInt8
// FieldTypeInt16 indicates the underlying primitive is a []Int16.
FieldTypeInt16
// FieldTypeNullableInt16 indicates the underlying primitive is a []*Int16.
FieldTypeNullableInt16
// FieldTypeInt32 indicates the underlying primitive is a []int32.
FieldTypeInt32
// FieldTypeNullableInt32 indicates the underlying primitive is a []*int32.
FieldTypeNullableInt32
// FieldTypeInt64 indicates the underlying primitive is a []int64.
FieldTypeInt64
// FieldTypeNullableInt64 indicates the underlying primitive is a []*int64.
FieldTypeNullableInt64
// FieldTypeUint8 indicates the underlying primitive is a []int8.
FieldTypeUint8
// FieldTypeNullableUint8 indicates the underlying primitive is a []*int8.
FieldTypeNullableUint8
// FieldTypeUint16 indicates the underlying primitive is a []uint16.
FieldTypeUint16
// FieldTypeNullableUint16 indicates the underlying primitive is a []*uint16.
FieldTypeNullableUint16
// FieldTypeUint32 indicates the underlying primitive is a []uint32.
FieldTypeUint32
// FieldTypeNullableUint32 indicates the underlying primitive is a []*uint32.
FieldTypeNullableUint32
// FieldTypeUint64 indicates the underlying primitive is a []uint64.
FieldTypeUint64
// FieldTypeNullableUint64 indicates the underlying primitive is a []*uint64.
FieldTypeNullableUint64
// FieldTypeFloat32 indicates the underlying primitive is a []float32.
FieldTypeFloat32
// FieldTypeNullableFloat32 indicates the underlying primitive is a []*float32.
FieldTypeNullableFloat32
// FieldTypeFloat64 indicates the underlying primitive is a []float64.
FieldTypeFloat64
// FieldTypeNullableFloat64 indicates the underlying primitive is a []*float64.
FieldTypeNullableFloat64
// FieldTypeString indicates the underlying primitive is a []string.
FieldTypeString
// FieldTypeNullableString indicates the underlying primitive is a []*string.
FieldTypeNullableString
// FieldTypeBool indicates the underlying primitive is a []bool.
FieldTypeBool
// FieldTypeNullableBool indicates the underlying primitive is a []*bool.
FieldTypeNullableBool
// FieldTypeTime indicates the underlying primitive is a []time.Time.
FieldTypeTime
// FieldTypeNullableTime indicates the underlying primitive is a []*time.Time.
FieldTypeNullableTime
)
func vectorFieldType(v vector) FieldType {
switch v.(type) {
case *int8Vector:
return FieldTypeInt8
case *nullableInt8Vector:
return FieldTypeNullableInt8
case *int16Vector:
return FieldTypeInt16
case *nullableInt16Vector:
return FieldTypeNullableInt16
case *int32Vector:
return FieldTypeInt32
case *nullableInt32Vector:
return FieldTypeNullableInt32
case *int64Vector:
return FieldTypeInt64
case *nullableInt64Vector:
return FieldTypeNullableInt64
case *uint8Vector:
return FieldTypeUint8
case *nullableUint8Vector:
return FieldTypeNullableUint8
case *uint16Vector:
return FieldTypeUint16
case *nullableUint16Vector:
return FieldTypeNullableUint16
case *uint32Vector:
return FieldTypeUint32
case *nullableUint32Vector:
return FieldTypeNullableUint32
case *uint64Vector:
return FieldTypeUint64
case *nullableUint64Vector:
return FieldTypeNullableUint64
case *float32Vector:
return FieldTypeFloat32
case *nullableFloat32Vector:
return FieldTypeNullableFloat32
case *float64Vector:
return FieldTypeFloat64
case *nullableFloat64Vector:
return FieldTypeNullableFloat64
case *stringVector:
return FieldTypeString
case *nullableStringVector:
return FieldTypeNullableString
case *boolVector:
return FieldTypeBool
case *nullableBoolVector:
return FieldTypeNullableBool
case *timeTimeVector:
return FieldTypeTime
case *nullableTimeTimeVector:
return FieldTypeNullableTime
}
return FieldType(-1)
}
func fieldTypeFromVal(v interface{}) FieldType {
switch v.(type) {
case int8:
return FieldTypeInt8
case *int8:
return FieldTypeNullableInt8
case int16:
return FieldTypeInt16
case *int16:
return FieldTypeNullableInt16
case int32:
return FieldTypeInt32
case *int32:
return FieldTypeNullableInt32
case int64:
return FieldTypeInt64
case *int64:
return FieldTypeNullableInt64
case uint8:
return FieldTypeUint8
case *uint8:
return FieldTypeNullableUint8
case uint16:
return FieldTypeUint16
case *uint16:
return FieldTypeNullableUint16
case uint32:
return FieldTypeUint32
case *uint32:
return FieldTypeNullableUint32
case uint64:
return FieldTypeUint64
case *uint64:
return FieldTypeNullableUint64
case float32:
return FieldTypeFloat32
case *float32:
return FieldTypeNullableFloat32
case float64:
return FieldTypeFloat64
case *float64:
return FieldTypeNullableFloat64
case string:
return FieldTypeString
case *string:
return FieldTypeNullableString
case bool:
return FieldTypeBool
case *bool:
return FieldTypeNullableBool
case time.Time:
return FieldTypeTime
case *time.Time:
return FieldTypeNullableTime
}
return FieldType(-1)
}
func (p FieldType) String() string {
if p < 0 {
return "invalid/unsupported"
}
return fmt.Sprintf("[]%v", p.ItemTypeString())
}
// NewFieldFromFieldType creates a new Field of the given pType of length n.
func NewFieldFromFieldType(p FieldType, n int) *Field {
f := &Field{}
switch p {
// ints
case FieldTypeInt8:
f.vector = newInt8Vector(n)
case FieldTypeNullableInt8:
f.vector = newNullableInt8Vector(n)
case FieldTypeInt16:
f.vector = newInt16Vector(n)
case FieldTypeNullableInt16:
f.vector = newNullableInt16Vector(n)
case FieldTypeInt32:
f.vector = newInt32Vector(n)
case FieldTypeNullableInt32:
f.vector = newNullableInt32Vector(n)
case FieldTypeInt64:
f.vector = newInt64Vector(n)
case FieldTypeNullableInt64:
f.vector = newNullableInt64Vector(n)
// uints
case FieldTypeUint8:
f.vector = newUint8Vector(n)
case FieldTypeNullableUint8:
f.vector = newNullableUint8Vector(n)
case FieldTypeUint16:
f.vector = newUint16Vector(n)
case FieldTypeNullableUint16:
f.vector = newNullableUint16Vector(n)
case FieldTypeUint32:
f.vector = newUint32Vector(n)
case FieldTypeNullableUint32:
f.vector = newNullableUint32Vector(n)
case FieldTypeUint64:
f.vector = newUint64Vector(n)
case FieldTypeNullableUint64:
f.vector = newNullableUint64Vector(n)
// floats
case FieldTypeFloat32:
f.vector = newFloat32Vector(n)
case FieldTypeNullableFloat32:
f.vector = newNullableFloat32Vector(n)
case FieldTypeFloat64:
f.vector = newFloat64Vector(n)
case FieldTypeNullableFloat64:
f.vector = newNullableFloat64Vector(n)
// other
case FieldTypeString:
f.vector = newStringVector(n)
case FieldTypeNullableString:
f.vector = newNullableStringVector(n)
case FieldTypeBool:
f.vector = newBoolVector(n)
case FieldTypeNullableBool:
f.vector = newNullableBoolVector(n)
case FieldTypeTime:
f.vector = newTimeTimeVector(n)
case FieldTypeNullableTime:
f.vector = newNullableTimeTimeVector(n)
default:
panic(fmt.Sprint("unsupported vector ptype"))
}
return f
}
// ItemTypeString returns the string representation of the type of element within in the vector
func (p FieldType) ItemTypeString() string {
switch p {
case FieldTypeInt8:
return "int8"
case FieldTypeNullableInt8:
return "*int8"
case FieldTypeInt16:
return "int16"
case FieldTypeNullableInt16:
return "*int16"
case FieldTypeInt32:
return "int32"
case FieldTypeNullableInt32:
return "*int32"
case FieldTypeInt64:
return "int64"
case FieldTypeNullableInt64:
return "*int64"
case FieldTypeUint8:
return "unit8"
case FieldTypeNullableUint8:
return "*uint8"
case FieldTypeUint16:
return "uint16"
case FieldTypeNullableUint16:
return "*uint16"
case FieldTypeUint32:
return "uint32"
case FieldTypeNullableUint32:
return "*uint32"
case FieldTypeUint64:
return "uint64"
case FieldTypeNullableUint64:
return "*uint64"
case FieldTypeFloat32:
return "float32"
case FieldTypeNullableFloat32:
return "*float32"
case FieldTypeFloat64:
return "float64"
case FieldTypeNullableFloat64:
return "*float64"
case FieldTypeString:
return "string"
case FieldTypeNullableString:
return "*string"
case FieldTypeBool:
return "bool"
case FieldTypeNullableBool:
return "*bool"
case FieldTypeTime:
return "time.Time"
case FieldTypeNullableTime:
return "*time.Time"
}
return "invalid/unsupported type"
}
// Nullable returns if Field type is a nullable type
func (p FieldType) Nullable() bool {
switch p {
case FieldTypeNullableInt8, FieldTypeNullableInt16, FieldTypeNullableInt32, FieldTypeNullableInt64:
return true
case FieldTypeNullableUint8, FieldTypeNullableUint16, FieldTypeNullableUint32, FieldTypeNullableUint64:
return true
case FieldTypeNullableFloat32, FieldTypeNullableFloat64:
return true
case FieldTypeNullableString:
return true
case FieldTypeNullableBool:
return true
case FieldTypeNullableTime:
return true
}
return false
}
// numericFieldTypes is an array of FieldTypes that are numeric.
var numericFieldTypes = [...]FieldType{
FieldTypeInt8, FieldTypeInt16, FieldTypeInt32, FieldTypeInt64,
FieldTypeNullableInt8, FieldTypeNullableInt16, FieldTypeNullableInt32, FieldTypeNullableInt64,
FieldTypeUint8, FieldTypeUint16, FieldTypeUint32, FieldTypeUint64,
FieldTypeNullableUint8, FieldTypeNullableUint16, FieldTypeNullableUint32, FieldTypeNullableUint64,
FieldTypeFloat32, FieldTypeFloat64,
FieldTypeNullableFloat32, FieldTypeNullableFloat64}
// NumericFieldTypes returns a slice of FieldTypes that are numeric.
func NumericFieldTypes() []FieldType {
return numericFieldTypes[:]
} | vendor/github.com/grafana/grafana-plugin-sdk-go/data/vector.go | 0.5769 | 0.480479 | vector.go | starcoder |
package recurrence
import (
"math"
"time"
)
const (
day = 24 * time.Hour
)
func round(valD time.Duration) int {
val := float64(valD)
_, div := math.Modf(val)
if div >= 0.5 {
return int(math.Ceil(val))
}
return int(math.Floor(val))
}
func (p Recurrence) GetNextDate(d time.Time) time.Time {
if p.Interval == 0 {
p.Interval = 1
}
if p.Location == nil {
p.Location = time.UTC
}
if p.End.After(p.Start) && !p.End.After(d) {
return time.Time{}
}
switch p.Frequence {
case NotRepeating:
if p.Start.After(d) {
return p.Start.In(p.Location)
}
case Daily:
return p.ndDaily(d)
case Weekly:
return p.ndWeekly(d)
case MonthlyXth:
return p.ndMonthlyX(d)
case Monthly:
return p.ndMonthly(d)
case Yearly:
return p.ndYearly(d)
}
return time.Time{}
}
func (p Recurrence) dateOf(t time.Time) time.Time {
y, m, d := t.In(p.Location).Date()
return time.Date(y, m, d, 0, 0, 0, 0, p.Location)
}
func (p Recurrence) ndDaily(d time.Time) time.Time {
start := p.Start.In(p.Location)
end := p.End.In(p.Location)
if d.Before(start) {
return start
}
startDate := p.dateOf(start)
timeOfDay := start.Sub(startDate)
d = d.In(p.Location)
dateOfD := p.dateOf(d)
daysBetween := round(dateOfD.Sub(startDate) / day)
freq := int(p.Interval)
daysToAdd := (freq - (daysBetween % freq)) % freq
res := dateOfD.AddDate(0, 0, daysToAdd).Add(timeOfDay)
if !res.After(d) {
res = res.AddDate(0, 0, freq)
}
if end.After(start) && res.After(end) {
return time.Time{}
}
return res
}
func (p Recurrence) ndWeekly(d time.Time) time.Time {
start := p.Start.In(p.Location)
end := p.End.In(p.Location)
d = d.In(p.Location)
startDate := p.dateOf(start)
timeOfDay := start.Sub(startDate)
startOfWeek, _ := IntToWeeklyPattern(p.Pattern)
days := p.Pattern & 255
if days == 0 {
return time.Time{}
}
offset := -(7 + int(start.Weekday()-startOfWeek)) % 7
weekStart := startDate.AddDate(0, 0, offset)
if d.Before(weekStart) {
d = weekStart
}
cycleLength := time.Duration(p.Interval*7) * day
// Skip already passed cycles.
weekStart = p.dateOf(weekStart.Add(time.Duration(int(d.Sub(weekStart)/cycleLength)) * cycleLength))
dayOfD := p.dateOf(d)
outerLoop:
for ws := weekStart; end.Before(start) || !end.Before(ws); ws = ws.AddDate(0, 0, int(p.Interval*7)) {
for i := 0; i < 7; i++ {
dat := ws.AddDate(0, 0, i)
if dat.Before(dayOfD) || dat.Before(startDate) {
continue
}
wd := int(1 << uint(dat.Weekday()))
if (days & wd) != wd {
continue
}
dat = dat.Add(timeOfDay)
if !dat.After(d) {
continue
}
if end.After(start) && dat.After(end) {
break outerLoop
}
return dat
}
}
return time.Time{}
}
func (p Recurrence) ndMonthlyX(d time.Time) time.Time {
start := p.Start.In(p.Location)
end := p.End.In(p.Location)
d = d.In(p.Location)
if d.Before(start) {
return start
}
dy := d.Year()
dm := int(d.Month())
sy := start.Year()
sm := int(start.Month())
interval := int(p.Interval)
monthsBetween := ((dy - sy) * 12) + (dm - sm)
monthsToAdd := (monthsBetween / interval) * interval
extraIntervals := 0
for dat := start.AddDate(0, monthsToAdd, 0); end.Before(start) || !end.Before(dat); dat = start.AddDate(0, monthsToAdd+(extraIntervals*interval), 0) {
extraIntervals += 1
if dat.Day() != start.Day() {
continue
}
if !dat.After(d) {
continue
}
return dat
}
return time.Time{}
}
func (p Recurrence) ndMonthly(d time.Time) time.Time {
occ, wd := IntToMonthlyPattern(p.Pattern)
start := p.Start.In(p.Location)
timeOfDay := start.Sub(p.dateOf(start))
start = time.Date(start.Year(), start.Month(), 1, 0, 0, 0, 0, p.Location)
end := p.End.In(p.Location)
dStart := d.In(p.Location)
if d.Before(start) {
dStart = start
}
dy := dStart.Year()
dm := int(dStart.Month())
sy := start.Year()
sm := int(start.Month())
interval := int(p.Interval)
monthsBetween := ((dy - sy) * 12) + (dm - sm)
monthsToAdd := (monthsBetween / interval) * interval
start = start.AddDate(0, monthsToAdd, 0)
dat := start
getNthDayFromMonth := func(dat time.Time) time.Time {
for dat.Weekday() != wd {
dat = dat.AddDate(0, 0, 1)
}
for i := Second; i <= occ; i++ {
next := dat.AddDate(0, 0, 7)
if next.Month() != dat.Month() {
return dat
}
dat = next
}
return dat
}
for dat = getNthDayFromMonth(start); !dat.Add(timeOfDay).After(d) || dat.Add(timeOfDay).Before(p.Start); {
start = start.AddDate(0, interval, 0)
dat = getNthDayFromMonth(start)
}
if !end.Before(p.Start) && end.Before(dat) {
return time.Time{}
}
return dat.Add(timeOfDay)
}
func (p Recurrence) ndYearly(d time.Time) time.Time {
start := p.Start.In(p.Location)
end := p.End.In(p.Location)
if d.Before(start) {
return start
}
d = d.In(p.Location)
interval := int(p.Interval)
yearsBetween := d.Year() - start.Year()
day := start.Day()
yearsToAdd := (yearsBetween / interval) * interval
dat := start.AddDate(yearsToAdd, 0, 0)
i := 0
for !dat.After(d) || day != dat.Day() {
dat = start.AddDate(yearsToAdd+(i*interval), 0, 0)
i += 1
}
if !end.Before(p.Start) && end.Before(dat) {
return time.Time{}
}
return dat
} | calculator.go | 0.646906 | 0.433442 | calculator.go | starcoder |
package main
import (
. "github.com/gocircuit/circuit/gocircuit.org/render"
)
func RenderIndexPage() string {
figs := A{
"FigFacade": RenderFigurePngSvg(
"Circuit API provides a dynamic hierarchical view of a compute cluster.", "view", "550px"),
}
return RenderHtml(
"Circuit: Self-managed infrastructure, programmatic monitoring and orchestration",
Render(indexBody, figs),
)
}
const indexBody = `
<p>The circuit is a minimal distributed operating system that enables programmatic, reactive control
over hosts, processes and connections within a compute cluster.
{{.FigFacade}}
<p>The circuit is unique in one respect: Once a circuit cluster is formed, the circuit system itself cannot
fail—only individual hosts can. In contrast, comparable systems
(like
<a href="https://coreos.com/">CoreOS</a>,
<a href="https://www.consul.io/">Consul</a> and
<a href="http://mesosphere.com/">Mesosphere</a>)
can fail if the hardware hosting the system's own software fails.
<h2>Sources</h2>
<p><a href="https://drone.io/github.com/gocircuit/circuit/latest"><img src="https://drone.io/github.com/gocircuit/circuit/status.png" /></a>
<a href="https://godoc.org/github.com/gocircuit/circuit/client"><img src="https://godoc.org/github.com/gocircuit/circuit/client?status.png" /></a>
<p>Find the source repository for <a href="https://github.com/gocircuit/circuit">Circuit on GitHub</a>.
Follow us on Twitter <a href="https://twitter.com/gocircuit">@gocircuit</a>.
<p>Submit <a href="">issues</a> to our GitHub repo. For discussions about using and developing
the Circuit visit <a href="https://groups.google.com/forum/#!forum/gocircuit-user">the Circuit User Group</a> and
<a href="https://groups.google.com/forum/#!forum/gocircuit-dev">the Circuit Developer Group</a>, respectively.
<h2>Documentation</h2>
<ul>
<li><a href="install.html">Building and installing</a></li>
<li><a href="run.html">Running Circuit servers</a></li>
<li><a href="metaphor.html">Programming metaphor</a></li>
<li><a href="cmd.html">Command-line client</a>
<ul>
<li><a href="element-process.html">Using processes</a></li>
<li><a href="element-container.html">Using containers</a></li>
<li><a href="element-subscription.html">Using subscriptions</a></li>
<li><a href="element-dns.html">Using name servers</a></li>
<li><a href="element-server.html">Using servers</a></li>
<li><a href="element-channel.html">Using channel</a></li>
</ul>
<li><a href="api.html">Go client</a>
<ul>
<li><a href="api-connect.html">Connecting to a circuit cluster</a></li>
<li><a href="api-anchor.html">Navigating and using the anchor hierarchy</a></li>
<li><a href="api-process.html">Using processes</a></li>
<li><a href="api-container.html">Using containers</a></li>
<li><a href="api-subscription.html">Using subscriptions</a></li>
<li><a href="api-name.html">Using name servers</a></li>
<li><a href="api-server.html">Using servers</a></li>
<li><a href="api-channel.html">Using channels</a></li>
</ul>
</li>
</li>
<li><a href="security.html">Security and networking</a></li>
<li><a href="history.html">History and bibliography</a></li>
</ul>
<h2>Tutorials</h2>
<h3>Orchestrating a typical web app: Node.js using MySQL running on Amazon EC2</h3>
<ul>
<li><a href="tutorial-mysql-nodejs-overview.html">Overview</a></li>
<li><a href="tutorial-mysql-nodejs-image.html">Prepare host images</a></li>
<li><a href="tutorial-mysql-nodejs-boot.html">Boot the circuit cluster</a></li>
<li><a href="tutorial-mysql-nodejs-app.html">Write the circuit app</a></li>
<li><a href="tutorial-mysql-nodejs-run.html">Run the app on the cluster</a></li>
</ul>
<p>
` | gocircuit.org/build/index.go | 0.716814 | 0.65624 | index.go | starcoder |
package inverted
import (
"sort"
"github.com/pkg/errors"
"github.com/semi-technologies/weaviate/entities/filters"
)
func mergeAndOptimized(children []*propValuePair,
acceptDuplicates bool) (*docPointers, error) {
sets := make([]*docPointers, len(children))
// Since the nested filter could have further children which are AND/OR
// filters, we need to merge the innermost of them first
// Part 1: Merge Children if any
// -----------------------------
// If the given operands are Value filters, merge will simply return the
// respective values
for i, child := range children {
docIDs, err := child.mergeDocIDs(acceptDuplicates)
if err != nil {
return nil, errors.Wrapf(err, "retrieve doc ids of child %d", i)
}
sets[i] = docIDs
}
// Potential early exit condition
// ------------------------------
if len(sets) == 1 || checksumsIdentical(sets) {
// all children are identical, no need to merge, simply return the first
// set
return sets[0], nil
}
checksum := combineSetChecksums(sets, filters.OperatorAnd)
// Part 2: Recursively intersect sets
// ----------------------------------
// The idea is that we pick the smallest list first and check against it, as
// building a map is considerably more expensive than a map lookup. So we
// build a small map first. Since the overall strategy is AND, we know that
// we will never have more items than on the smallest list (since an id would
// have to be present on all lists to make it through the merge)
// Thus we must start by sorting the respective sets by their length in ASC
// order
sort.Slice(sets, func(a, b int) bool {
return len(sets[a].docIDs) < len(sets[b].docIDs)
})
// Now we start a recursive merge where merge element 0 and element 1, then
// remove both from the list. If there are elements left we merge the result
// of the first round with the next smallest set (originally element 2, now
// element 0) and so on until we are left with only a single element
for len(sets) > 1 {
merged := intersectAnd(sets[0], sets[1])
sets[0] = nil // set to nil to avoid mem leak, as we are cutting from * slice
sets[1] = nil // set to nil to avoid mem leak, as we are cutting from * slice
sets = append([]*docPointers{merged}, sets[2:]...)
}
sets[0].checksum = checksum
return sets[0], nil
}
func intersectAnd(smaller, larger *docPointers) *docPointers {
lookup := make(map[uint64]struct{}, len(smaller.docIDs))
eligibile := docPointers{
docIDs: make([]docPointer, len(smaller.docIDs)),
}
for i := range smaller.docIDs {
lookup[smaller.docIDs[i].id] = struct{}{}
}
matches := 0
for i := range larger.docIDs {
if _, ok := lookup[larger.docIDs[i].id]; ok {
eligibile.docIDs[matches] = docPointer{id: larger.docIDs[i].id}
// remove the current match from the lookup list. Otherwise, if the
// larger of the two lists contains duplicates for a doc id the total
// length is no longer that of the smaller list. E.g. compare list_a=[1]
// and list_b=[1,1]. Without eliminating duplicates we would suddenly
// find two matches. After deleting [1] from list_a after it was first
// found on list_b, we will no longer create a match for the second
// entry.
delete(lookup, larger.docIDs[i].id)
matches++
}
}
eligibile.docIDs = eligibile.docIDs[:matches]
eligibile.count = uint64(matches)
return &eligibile
}
func mergeOrAcceptDuplicates(in []*docPointers) (*docPointers, error) {
size := 0
for i := range in {
size += len(in[i].docIDs)
}
out := docPointers{
docIDs: make([]docPointer, size),
checksum: combineSetChecksums(in, filters.OperatorOr),
}
index := 0
for i := range in {
for j := range in[i].docIDs {
out.docIDs[index] = in[i].docIDs[j]
index++
}
}
return &out, nil
} | adapters/repos/db/inverted/merge.go | 0.676086 | 0.532364 | merge.go | starcoder |
package geom
// Union returns a geometry that represents the parts from either geometry A or
// geometry B (or both). An error may be returned in pathological cases of
// numerical degeneracy. GeometryCollections are not supported.
func Union(a, b Geometry) (Geometry, error) {
if a.IsEmpty() && b.IsEmpty() {
return Geometry{}, nil
}
if a.IsEmpty() {
return b, nil
}
if b.IsEmpty() {
return a, nil
}
g, err := setOp(a, b, selectUnion)
return g, wrap(err, "executing union")
}
// Intersection returns a geometry that represents the parts that are common to
// both geometry A and geometry B. An error may be returned in pathological
// cases of numerical degeneracy. GeometryCollections are not supported.
func Intersection(a, b Geometry) (Geometry, error) {
if a.IsEmpty() || b.IsEmpty() {
return Geometry{}, nil
}
g, err := setOp(a, b, selectIntersection)
return g, wrap(err, "executing intersection")
}
// Difference returns a geometry that represents the parts of input geometry A
// that are not part of input geometry B. An error may be returned in cases of
// pathological cases of numerical degeneracy. GeometryCollections are not
// supported.
func Difference(a, b Geometry) (Geometry, error) {
if a.IsEmpty() {
return Geometry{}, nil
}
if b.IsEmpty() {
return a, nil
}
g, err := setOp(a, b, selectDifference)
return g, wrap(err, "executing difference")
}
// SymmetricDifference returns a geometry that represents the parts of geometry
// A and B that are not in common. An error may be returned in pathological
// cases of numerical degeneracy. GeometryCollections are not supported.
func SymmetricDifference(a, b Geometry) (Geometry, error) {
if a.IsEmpty() && b.IsEmpty() {
return Geometry{}, nil
}
if a.IsEmpty() {
return b, nil
}
if b.IsEmpty() {
return a, nil
}
g, err := setOp(a, b, selectSymmetricDifference)
return g, wrap(err, "executing symmetric difference")
}
func setOp(a, b Geometry, include func([2]label) bool) (Geometry, error) {
overlay, err := createOverlay(a, b)
if err != nil {
return Geometry{}, wrap(err, "internal error creating overlay")
}
g, err := overlay.extractGeometry(include)
if err != nil {
return Geometry{}, wrap(err, "internal error extracting geometry")
}
return g, nil
} | geom/alg_set_op.go | 0.898697 | 0.567038 | alg_set_op.go | starcoder |
package fn
import (
"github.com/nlpodyssey/spago/mat"
)
// MaxPooling is an operator to perform max pooling.
type MaxPooling[T mat.DType, O Operand[T]] struct {
x O
rows int
cols int
// initialized during the forward pass
y mat.Matrix[T]
argmaxI [][]int
argmaxJ [][]int
operands []O
}
// NewMaxPooling returns a new MaxPooling Function.
func NewMaxPooling[T mat.DType, O Operand[T]](x O, r, c int) *MaxPooling[T, O] {
return &MaxPooling[T, O]{
x: x,
rows: r,
cols: c,
y: nil,
argmaxI: nil,
argmaxJ: nil,
operands: []O{x},
}
}
// Operands returns the list of operands.
func (r *MaxPooling[T, O]) Operands() []O {
return r.operands
}
// Forward computes the output of the function.
func (r *MaxPooling[T, O]) Forward() mat.Matrix[T] {
xv := r.x.Value()
if !(xv.Rows()%r.rows == 0 && xv.Columns()%r.cols == 0) {
panic("fn: size mismatch")
}
r.y = mat.NewEmptyDense[T](xv.Rows()/r.rows, xv.Columns()/r.cols)
r.argmaxI = makeIntMatrix(r.y.Dims()) // output argmax row index
r.argmaxJ = makeIntMatrix(r.y.Dims()) // output argmax column index
for row := 0; row < r.y.Rows(); row++ {
for col := 0; col < r.y.Columns(); col++ {
maximum := mat.SmallestNonzero[T]()
for i := row * r.rows; i < (row*r.rows)+r.rows; i++ {
for j := col * r.cols; j < (col*r.cols)+r.rows; j++ {
val := xv.At(i, j)
if val > maximum {
maximum = val
r.argmaxI[row][col] = i
r.argmaxJ[row][col] = j
}
}
}
r.y.Set(row, col, maximum)
}
}
return r.y
}
// makeIntMatrix returns a new 2-dimensional slice of int.
func makeIntMatrix(rows, cols int) [][]int {
matrix := make([][]int, rows)
for i := 0; i < rows; i++ {
matrix[i] = make([]int, cols)
}
return matrix
}
// Backward computes the backward pass.
func (r *MaxPooling[T, O]) Backward(gy mat.Matrix[T]) {
if r.x.RequiresGrad() {
gx := r.x.Value().ZerosLike()
defer mat.ReleaseMatrix(gx)
for row := 0; row < r.y.Rows(); row++ {
rowi := r.argmaxI[row]
rowj := r.argmaxJ[row]
for col := 0; col < r.y.Columns(); col++ {
gx.Set(rowi[col], rowj[col], gy.At(row, col))
}
}
r.x.AccGrad(gx)
}
} | ag/fn/maxpooling.go | 0.775137 | 0.446555 | maxpooling.go | starcoder |
package main
import "fmt"
func DoSomething(v interface{}) {
// ...
}
// will accept any parameter whatsoever.
// Here’s where it gets confusing: inside of the DoSomething function, what is v’s type? Beginner gophers are led to believe that “v is of any type”, but that is wrong. v is not of any type; it is of interface{} type. Wait, what? When passing a value into the DoSomething function, the Go runtime will perform a type conversion (if necessary), and convert the value to an interface{} value. All values have exactly one type at runtime, and v’s one static type is interface{}.
// This should leave you wondering: ok, so if a conversion is taking place, what is actually being passed into a function that takes an interface{} value (or, what is actually stored in an []Animal slice)? An interface value is constructed of two words of data; one word is used to point to a method table for the value’s underlying type, and the other word is used to point to the actual data being held by that value. I don’t want to bleat on about this endlessly. If you understand that an interface value is two words wide and it contains a pointer to the underlying data, that’s typically enough to avoid common pitfalls. If you are curious to learn more about the implementation of interfaces, I think Russ Cox’s description of interfaces is very, very helpful.
// In our previous example, when we constructed a slice of Animal values, we did not have to say something onerous like Animal(Dog{}) to put a value of type Dog into the slice of Animal values, because the conversion was handled for us automatically. Within the animals slice, each element is of Animal type, but our different values have different underlying types.
// So… why does this matter? Well, understanding how interfaces are represented in memory makes some potentially confusing things very obvious. For example, the question “can I convert a []T to an []interface{}” is easy to answer once you understand how interfaces are represented in memory. Here’s an example of some broken code that is representative of a common misunderstanding of the interface{} type:
func PrintAll(vals []interface{}) {
for _, val := range vals{
fmt.Println(val)
}
}
func main() {
names := []string{"stanley", "david", "oscar"}
//PrintAll(names) // cannot use names (type []string) as type []interface {} in argument to PrintAll
// By running this, you can see that we encounter the following error: cannot use names (type []string) as type []interface {} in function argument. If we want to actually make that work, we would have to convert the []string to an []interface{}:
vals := make([]interface{}, len(names))
for i, v := range names {
vals[i] = v
}
PrintAll(vals)
// Run it here:http://play.golang.org/p/Dhg1YS6BJS
// That’s pretty ugly, but c'est la vie. Not everything is perfect. (in reality, this doesn’t come up very often, because []interface{} turns out to be less useful than you would initially expect)
} | 01 | Go by Example/internal/20_Interfaces/ref/How to use interfaces in Go/interfaces2.go | 0.583441 | 0.456591 | interfaces2.go | starcoder |
package layer
import tf "github.com/galeone/tensorflow/tensorflow/go"
type LPreprocessingLayer struct {
dtype DataType
inputs []Layer
name string
shape tf.Shape
trainable bool
layerWeights []*tf.Tensor
}
func PreprocessingLayer() *LPreprocessingLayer {
return &LPreprocessingLayer{
dtype: Float32,
name: UniqueName("preprocessing_layer"),
trainable: true,
}
}
func (l *LPreprocessingLayer) SetDtype(dtype DataType) *LPreprocessingLayer {
l.dtype = dtype
return l
}
func (l *LPreprocessingLayer) SetName(name string) *LPreprocessingLayer {
l.name = name
return l
}
func (l *LPreprocessingLayer) SetShape(shape tf.Shape) *LPreprocessingLayer {
l.shape = shape
return l
}
func (l *LPreprocessingLayer) SetTrainable(trainable bool) *LPreprocessingLayer {
l.trainable = trainable
return l
}
func (l *LPreprocessingLayer) SetLayerWeights(layerWeights []*tf.Tensor) *LPreprocessingLayer {
l.layerWeights = layerWeights
return l
}
func (l *LPreprocessingLayer) GetShape() tf.Shape {
return l.shape
}
func (l *LPreprocessingLayer) GetDtype() DataType {
return l.dtype
}
func (l *LPreprocessingLayer) SetInputs(inputs ...Layer) Layer {
l.inputs = inputs
return l
}
func (l *LPreprocessingLayer) GetInputs() []Layer {
return l.inputs
}
func (l *LPreprocessingLayer) GetName() string {
return l.name
}
func (l *LPreprocessingLayer) GetLayerWeights() []*tf.Tensor {
return l.layerWeights
}
type jsonConfigLPreprocessingLayer struct {
ClassName string `json:"class_name"`
Name string `json:"name"`
Config map[string]interface{} `json:"config"`
InboundNodes [][][]interface{} `json:"inbound_nodes"`
}
func (l *LPreprocessingLayer) GetKerasLayerConfig() interface{} {
inboundNodes := [][][]interface{}{
{},
}
for _, input := range l.inputs {
inboundNodes[0] = append(inboundNodes[0], []interface{}{
input.GetName(),
0,
0,
map[string]bool{},
})
}
return jsonConfigLPreprocessingLayer{
ClassName: "PreprocessingLayer",
Name: l.name,
Config: map[string]interface{}{
"dtype": l.dtype.String(),
"name": l.name,
"trainable": l.trainable,
},
InboundNodes: inboundNodes,
}
}
func (l *LPreprocessingLayer) GetCustomLayerDefinition() string {
return ``
} | layer/PreprocessingLayer.go | 0.657318 | 0.444022 | PreprocessingLayer.go | starcoder |
package gripql
import (
"errors"
"fmt"
//"sort"
"strings"
"google.golang.org/protobuf/types/known/structpb"
)
// GetDataMap obtains data attached to vertex in the form of a map
func (vertex *Vertex) GetDataMap() map[string]interface{} {
return vertex.Data.AsMap()
}
// SetDataMap obtains data attached to vertex in the form of a map
func (vertex *Vertex) SetDataMap(i map[string]interface{}) {
v, _ := structpb.NewStruct(i)
vertex.Data = v
}
// SetProperty sets named field in Vertex data
func (vertex *Vertex) SetProperty(key string, value interface{}) {
if vertex.Data == nil {
vertex.Data = &structpb.Struct{Fields: map[string]*structpb.Value{}}
}
v, _ := structpb.NewValue(value)
vertex.Data.Fields[key] = v
}
// GetProperty get named field from vertex data
func (vertex *Vertex) GetProperty(key string) interface{} {
if vertex.Data == nil {
return nil
}
if v, ok := vertex.Data.Fields[key]; ok {
return v.AsInterface()
}
return nil
}
// HasProperty returns true is field is defined
func (vertex *Vertex) HasProperty(key string) bool {
if vertex.Data == nil {
return false
}
_, ok := vertex.Data.Fields[key]
return ok
}
// Validate returns an error if the vertex is invalid
func (vertex *Vertex) Validate() error {
if vertex.Gid == "" {
return errors.New("'gid' cannot be blank")
}
if vertex.Label == "" {
return errors.New("'label' cannot be blank")
}
for k := range vertex.GetDataMap() {
err := ValidateFieldName(k)
if err != nil {
return err
}
}
return nil
}
// GetDataMap obtains data attached to vertex in the form of a map
func (edge *Edge) GetDataMap() map[string]interface{} {
return edge.Data.AsMap()
}
// SetDataMap obtains data attached to vertex in the form of a map
func (edge *Edge) SetDataMap(i map[string]interface{}) {
s, _ := structpb.NewStruct(i)
edge.Data = s
}
// SetProperty sets named field in Vertex data
func (edge *Edge) SetProperty(key string, value interface{}) {
if edge.Data == nil {
edge.Data = &structpb.Struct{Fields: map[string]*structpb.Value{}}
}
v, _ := structpb.NewValue(value)
edge.Data.Fields[key] = v
}
// GetProperty get named field from edge data
func (edge *Edge) GetProperty(key string) interface{} {
if edge.Data == nil {
return nil
}
if e, ok := edge.Data.Fields[key]; ok {
return e.AsInterface()
}
return nil
}
// HasProperty returns true is field is defined
func (edge *Edge) HasProperty(key string) bool {
if edge.Data == nil {
return false
}
_, ok := edge.Data.Fields[key]
return ok
}
// Validate returns an error if the edge is invalid
func (edge *Edge) Validate() error {
if edge.Gid == "" {
return errors.New("'gid' cannot be blank")
}
if edge.Label == "" {
return errors.New("'label' cannot be blank")
}
if edge.From == "" {
return errors.New("'from' cannot be blank")
}
if edge.To == "" {
return errors.New("'to' cannot be blank")
}
for k := range edge.GetDataMap() {
err := ValidateFieldName(k)
if err != nil {
return err
}
}
return nil
}
// ValidateGraphName returns an error if the graph name is invalid
func ValidateGraphName(graph string) error {
err := validate(graph)
if err != nil {
return fmt.Errorf(`invalid graph name %s; %v`, graph, err)
}
return nil
}
// ReservedFields are the fields that cannot be used as keys within the data of a vertex or edge
var ReservedFields = []string{"_gid", "_label", "_to", "_from", "_data"}
// ValidateFieldName returns an error if the data field name is invalid
func ValidateFieldName(k string) error {
for _, v := range ReservedFields {
if k == v {
return fmt.Errorf("data field '%s' uses a reserved name", k)
}
}
err := validate(k)
if err != nil {
return fmt.Errorf(`invalid data field '%s'; %v`, k, err)
}
return nil
}
func validate(k string) error {
if strings.ContainsAny(k, `!@#$%^&*()+={}[] :;"',.<>?/\|~`) {
return errors.New(`cannot contain: !@#$%^&*()+={}[] :;"',.<>?/\|~`)
}
if strings.HasPrefix(k, "_") || strings.HasPrefix(k, "-") {
return errors.New(`cannot start with _-`)
}
return nil
} | gripql/util.go | 0.719679 | 0.434521 | util.go | starcoder |
package maybe
import "go/types"
type nillable interface {
any | types.Nil
}
/*
Maybe is a monadic pattern allowing for data manipulation while abstracting whether the value actually exists or is nil.
For example, if we fetch data from an external API that could be nil, we can still perform manipulation on it while disregarding its actual state.
The Maybe struct will take care of managing the value itself. This is similar to the Maybe interface in Elm or Haskell or Optional in Java.
This is helpful for CRUD operations by simplifying the code and allowing for seamless manipulation of nullable data.
*/
type Maybe[T any] interface {
Unwrap() T
Map(func(T) T) Maybe[T]
Apply(func(T)) Maybe[T]
Bind(func(T) Maybe[T]) Maybe[T]
IsSome() bool
IsNil() bool
OrElse(func() T) T
Or(T) T
OrNil() *T
}
type maybe[T any] struct {
Maybe[T]
val *T
}
/*
Of returns a new Maybe based on a value that may or may not be nil.
*/
func Of[T nillable](val *T) Maybe[T] {
if val == nil {
return maybe[T]{val: nil}
}
return maybe[T]{val: val}
}
/*
Just returns a new Maybe based on a value that we know is not nil.
*/
func Just[T nillable](val T) Maybe[T] {
return maybe[T]{val: &val}
}
/*
None returns a new Maybe with an empty value we know is nil.
*/
func None[T nillable]() Maybe[T] {
return maybe[T]{val: nil}
}
func (m maybe[T]) Unwrap() T {
if m.IsSome() {
return *m.val
}
panic(any("unwrap of empty Maybe"))
}
func (m maybe[T]) Map(f func(T) T) Maybe[T] {
if m.IsSome() {
return Just(f(*m.val))
}
return m
}
func (m maybe[T]) Apply(f func(x T)) Maybe[T] {
if m.IsSome() {
f(m.Unwrap())
}
return m
}
func (m maybe[T]) IsSome() bool {
return m.val != nil
}
func (m maybe[T]) IsNil() bool {
return m.val == nil
}
func (m maybe[T]) OrElse(f func() T) T {
if m.IsNil() {
return f()
}
return m.Unwrap()
}
func (m maybe[T]) OrNil() *T {
if m.IsNil() {
return nil
}
return m.val
}
func (m maybe[T]) Or(val T) T {
if m.IsNil() {
return val
}
return m.Unwrap()
}
func (m maybe[T]) Bind(f func(T) Maybe[T]) Maybe[T] {
if m.IsSome() {
return f(m.Unwrap())
}
return None[T]()
} | maybe/maybe.go | 0.665954 | 0.487612 | maybe.go | starcoder |
package series
import (
"fmt"
"math"
"time"
)
type timeElement struct {
e *time.Time
}
func (e timeElement) Addr() string {
return fmt.Sprint(e.e)
}
func (e timeElement) Set(value interface{}) Element {
var val time.Time
var err error
switch value.(type) {
case string:
if value.(string) == "NaN" {
e.e = nil
return e
}
val, err = ParseDateTime(value.(string))
if err != nil {
e.e = nil
return e
}
case float64:
val = time.Unix(0, int64(value.(float64)))
case int:
val = time.Unix(0, int64(value.(int)))
case int64:
val = time.Unix(0, value.(int64))
case time.Time:
val = value.(time.Time)
case Element:
val, err = value.(Element).Time()
if err != nil {
e.e = nil
return e
}
default:
e.e = nil
return e
}
e.e = &val
return e
}
func (e timeElement) Type() Type {
return Time
}
func (e timeElement) IsNA() bool {
if e.e == nil {
return true
}
return false
}
func (e timeElement) Val() ElementValue {
if e.IsNA() {
return nil
}
return time.Time(*e.e)
}
func (e timeElement) Copy() Element {
if e.e == nil {
return timeElement{nil}
}
copy := time.Time(*e.e)
return timeElement{©}
}
func (e timeElement) Bool() (bool, error) {
return !e.IsNA(), nil
}
func (e timeElement) Int() (int, error) {
if e.IsNA() {
return 0, fmt.Errorf("can't convert NaN to int")
}
return int(e.e.UnixNano()), nil
}
func (e timeElement) Float() float64 {
if e.e == nil {
return math.NaN()
}
return float64(e.e.UnixNano())
}
func (e timeElement) String() string {
if e.e == nil {
return "NaN"
}
return time.Time(*e.e).String()
}
func (e timeElement) Time() (time.Time, error) {
if e.IsNA() {
return time.Time{}, fmt.Errorf("value is NaN")
}
return *e.e, nil
}
func (e timeElement) Eq(elem Element) bool {
t, err := elem.Time()
if e.IsNA() || err != nil {
return false
}
return e.e.Equal(t)
}
func (e timeElement) Neq(elem Element) bool {
t, err := elem.Time()
if e.IsNA() || err != nil {
return false
}
return !e.e.Equal(t)
}
func (e timeElement) Less(elem Element) bool {
t, err := elem.Time()
if e.IsNA() || err != nil {
return false
}
return (*e.e).Before(t)
}
func (e timeElement) LessEq(elem Element) bool {
t, err := elem.Time()
if e.IsNA() || err != nil {
return false
}
return (*e.e).Before(t) || e.Eq(elem)
}
func (e timeElement) Greater(elem Element) bool {
t, err := elem.Time()
if e.IsNA() || err != nil {
return false
}
return (*e.e).After(t)
}
func (e timeElement) GreaterEq(elem Element) bool {
t, err := elem.Time()
if e.IsNA() || err != nil {
return false
}
return (*e.e).After(t) || e.Eq(elem)
} | series/type-time.go | 0.655887 | 0.401306 | type-time.go | starcoder |
package gift
import (
"image"
"image/draw"
"math"
"runtime"
"sync"
)
// parallelize parallelizes the data processing.
func parallelize(enabled bool, start, stop int, fn func(start, stop int)) {
procs := 1
if enabled {
procs = runtime.GOMAXPROCS(0)
}
var wg sync.WaitGroup
splitRange(start, stop, procs, func(pstart, pstop int) {
wg.Add(1)
go func() {
defer wg.Done()
fn(pstart, pstop)
}()
})
wg.Wait()
}
// splitRange splits a range into n parts and calls a function for each of them.
func splitRange(start, stop, n int, fn func(pstart, pstop int)) {
count := stop - start
if count < 1 {
return
}
if n < 1 {
n = 1
}
if n > count {
n = count
}
div := count / n
mod := count % n
for i := 0; i < n; i++ {
fn(
start+i*div+minint(i, mod),
start+(i+1)*div+minint(i+1, mod),
)
}
}
func absf32(x float32) float32 {
if x < 0 {
return -x
}
return x
}
func minf32(x, y float32) float32 {
if x < y {
return x
}
return y
}
func maxf32(x, y float32) float32 {
if x > y {
return x
}
return y
}
func powf32(x, y float32) float32 {
return float32(math.Pow(float64(x), float64(y)))
}
func logf32(x float32) float32 {
return float32(math.Log(float64(x)))
}
func expf32(x float32) float32 {
return float32(math.Exp(float64(x)))
}
func sincosf32(a float32) (float32, float32) {
sin, cos := math.Sincos(math.Pi * float64(a) / 180)
return float32(sin), float32(cos)
}
func floorf32(x float32) float32 {
return float32(math.Floor(float64(x)))
}
func sqrtf32(x float32) float32 {
return float32(math.Sqrt(float64(x)))
}
func minint(x, y int) int {
if x < y {
return x
}
return y
}
func maxint(x, y int) int {
if x > y {
return x
}
return y
}
func sort(data []float32) {
n := len(data)
if n < 2 {
return
}
if n <= 20 {
for i := 1; i < n; i++ {
x := data[i]
j := i - 1
for ; j >= 0 && data[j] > x; j-- {
data[j+1] = data[j]
}
data[j+1] = x
}
return
}
i := 0
j := n - 1
x := data[n/2]
for i <= j {
for data[i] < x {
i++
}
for data[j] > x {
j--
}
if i <= j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}
if j > 0 {
sort(data[:j+1])
}
if i < n-1 {
sort(data[i:])
}
}
// createTempImage creates a temporary image.
func createTempImage(r image.Rectangle) draw.Image {
return image.NewNRGBA64(r)
}
// isOpaque checks if the given image is opaque.
func isOpaque(img image.Image) bool {
type opaquer interface {
Opaque() bool
}
if o, ok := img.(opaquer); ok {
return o.Opaque()
}
return false
}
// genDisk generates a disk-shaped kernel.
func genDisk(ksize int) []float32 {
if ksize%2 == 0 {
ksize--
}
if ksize < 1 {
return []float32{}
}
disk := make([]float32, ksize*ksize)
kcenter := ksize / 2
for i := 0; i < ksize; i++ {
for j := 0; j < ksize; j++ {
x := kcenter - i
y := kcenter - j
r := math.Sqrt(float64(x*x + y*y))
if r <= float64(ksize/2) {
disk[j*ksize+i] = 1
}
}
}
return disk
}
// copyimage copies an image from src to dst.
func copyimage(dst draw.Image, src image.Image, options *Options) {
if options == nil {
options = &defaultOptions
}
srcb := src.Bounds()
dstb := dst.Bounds()
pixGetter := newPixelGetter(src)
pixSetter := newPixelSetter(dst)
parallelize(options.Parallelization, srcb.Min.Y, srcb.Max.Y, func(start, stop int) {
for srcy := start; srcy < stop; srcy++ {
for srcx := srcb.Min.X; srcx < srcb.Max.X; srcx++ {
dstx := dstb.Min.X + srcx - srcb.Min.X
dsty := dstb.Min.Y + srcy - srcb.Min.Y
pixSetter.setPixel(dstx, dsty, pixGetter.getPixel(srcx, srcy))
}
}
})
}
type copyimageFilter struct{}
func (p *copyimageFilter) Bounds(srcBounds image.Rectangle) (dstBounds image.Rectangle) {
dstBounds = image.Rect(0, 0, srcBounds.Dx(), srcBounds.Dy())
return
}
func (p *copyimageFilter) Draw(dst draw.Image, src image.Image, options *Options) {
copyimage(dst, src, options)
} | utils.go | 0.687945 | 0.45042 | utils.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// AssignedTrainingInfo
type AssignedTrainingInfo struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Number of users who were assigned the training in an attack simulation and training campaign.
assignedUserCount *int32
// Number of users who completed the training in an attack simulation and training campaign.
completedUserCount *int32
// Display name of the training in an attack simulation and training campaign.
displayName *string
}
// NewAssignedTrainingInfo instantiates a new assignedTrainingInfo and sets the default values.
func NewAssignedTrainingInfo()(*AssignedTrainingInfo) {
m := &AssignedTrainingInfo{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateAssignedTrainingInfoFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateAssignedTrainingInfoFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewAssignedTrainingInfo(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AssignedTrainingInfo) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetAssignedUserCount gets the assignedUserCount property value. Number of users who were assigned the training in an attack simulation and training campaign.
func (m *AssignedTrainingInfo) GetAssignedUserCount()(*int32) {
if m == nil {
return nil
} else {
return m.assignedUserCount
}
}
// GetCompletedUserCount gets the completedUserCount property value. Number of users who completed the training in an attack simulation and training campaign.
func (m *AssignedTrainingInfo) GetCompletedUserCount()(*int32) {
if m == nil {
return nil
} else {
return m.completedUserCount
}
}
// GetDisplayName gets the displayName property value. Display name of the training in an attack simulation and training campaign.
func (m *AssignedTrainingInfo) GetDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.displayName
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *AssignedTrainingInfo) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["assignedUserCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetAssignedUserCount(val)
}
return nil
}
res["completedUserCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetCompletedUserCount(val)
}
return nil
}
res["displayName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDisplayName(val)
}
return nil
}
return res
}
// Serialize serializes information the current object
func (m *AssignedTrainingInfo) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteInt32Value("assignedUserCount", m.GetAssignedUserCount())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("completedUserCount", m.GetCompletedUserCount())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("displayName", m.GetDisplayName())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AssignedTrainingInfo) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetAssignedUserCount sets the assignedUserCount property value. Number of users who were assigned the training in an attack simulation and training campaign.
func (m *AssignedTrainingInfo) SetAssignedUserCount(value *int32)() {
if m != nil {
m.assignedUserCount = value
}
}
// SetCompletedUserCount sets the completedUserCount property value. Number of users who completed the training in an attack simulation and training campaign.
func (m *AssignedTrainingInfo) SetCompletedUserCount(value *int32)() {
if m != nil {
m.completedUserCount = value
}
}
// SetDisplayName sets the displayName property value. Display name of the training in an attack simulation and training campaign.
func (m *AssignedTrainingInfo) SetDisplayName(value *string)() {
if m != nil {
m.displayName = value
}
} | models/assigned_training_info.go | 0.57093 | 0.490724 | assigned_training_info.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.