code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package models
import (
"../../common"
"fmt"
"strconv"
)
type Company struct {
id int `json:id`
name string `json:name`
street string `json:street`
city string `json:city`
state string `json:state`
country string `json:country`
description string `json:description`
industry string `json:industry`
employees int `json:employees`
rating int `json:rating`
isPublic bool `json:isPublic`
lastUpdatedDate string `json:lastUpdatedDate`
applicantEndpoint string `json:applicationEndpoint`
}
func (x *Company) ReloadFromJson(json string) {
var dict = common.StrToDictionary([]byte(json));
x.id, _ = strconv.Atoi(dict["id"].(string));
x.name = dict["name"].(string);
x.street = dict["street"].(string);
x.city = dict["city"].(string);
x.state = dict["state"].(string);
x.country = dict["country"].(string);
x.description = dict["description"].(string);
x.industry = dict["industry"].(string);
x.employees, _ = strconv.Atoi(dict["employees"].(string));
x.rating, _ = strconv.Atoi(dict["rating"].(string));
var val, _ = strconv.Atoi(dict["isPublic"].(string));
x.isPublic = val > 0;
x.lastUpdatedDate = dict["lastUpdatedDate"].(string);
x.applicantEndpoint = dict["applicantEndpoint"].(string);
}
func (x *Company) ToJsonString() string {
var result = "{";
result += fmt.Sprintf("\"id\":\"%d\"", x.id);
result += fmt.Sprintf(",\"name\":\"%s\"", x.name);
result += fmt.Sprintf(",\"rating\":\"%v\"", x.rating);
result += fmt.Sprintf(",\"employees\":\"%v\"", x.employees);
result += fmt.Sprintf(",\"city\":\"%s\"", x.city);
result += fmt.Sprintf(",\"state\":\"%s\"", x.state);
result += fmt.Sprintf(",\"country\":\"%s\"", x.country);
result += fmt.Sprintf(",\"description\":\"%s\"", x.description);
result += fmt.Sprintf(",\"industry\":\"%s\"", x.industry);
result += fmt.Sprintf(",\"street\":\"%s\"", x.street);
result += fmt.Sprintf(",\"isPublic\":\"%d\"", x.isPublic);
result += fmt.Sprintf(",\"lastUpdatedDate\":\"%s\"", x.lastUpdatedDate);
result += fmt.Sprintf(",\"applicantEndpoint\":\"%s\"", x.applicantEndpoint);
result += "}";
return result;
}
func (x *Company) SetID (a int) { x.id = a; }
func (x *Company) SetName (a string) { x.name = a; }
func (x *Company) SetStreet (a string) { x.street = a; }
func (x *Company) SetCity (a string) { x.city = a; }
func (x *Company) SetState (a string) { x.state = a; }
func (x *Company) SetCountry (a string) { x.country = a; }
func (x *Company) SetDescription (a string) { x.description = a; }
func (x *Company) SetIndustry (a string) { x.industry = a; }
func (x *Company) SetEmployees (a int) { x.employees = a; }
func (x *Company) SetRating (a int) { x.rating = a; }
func (x *Company) SetIsPublic (a bool) { x.isPublic = a; }
func (x *Company) SetLastUpdatedDate (a string) { x.lastUpdatedDate = a; }
func (x *Company) SetApplicantEndpoint (a string) { x.applicantEndpoint = a; }
func (x Company) ID() (int) { return x.id; }
func (x Company) Name() (string) { return x.name; }
func (x Company) Street() (string) { return x.street; }
func (x Company) City() (string) { return x.city; }
func (x Company) State() (string) { return x.state; }
func (x Company) Country() (string) { return x.country; }
func (x Company) Description() (string) { return x.description; }
func (x Company) Industry() (string) { return x.industry; }
func (x Company) Employees() (int) { return x.employees; }
func (x Company) Rating() (int) { return x.rating; }
func (x Company) IsPublic() (bool) { return x.isPublic; }
func (x Company) LastUpdatedDate() (string) { return x.lastUpdatedDate; }
func (x Company) ApplicantEndpoint() (string) { return x.applicantEndpoint; } | jmserver/src/classes/jmserver/models/Company.go | 0.54359 | 0.540196 | Company.go | starcoder |
package x32
import (
"math"
"github.com/golang/glog"
)
// x32LogToHz returns the frquency in Hz which coresponds to the 0..1 float
// value sent by the X32 when the Eq Frequency knob is turned.
func x32EqFreqLogToHz(f float32) float32 {
if f < 0 {
f = 0
} else if f > 1 {
f = 1
}
// =EXP(2.9957322738+A1*6.9077552785)
Hz := float32(math.Exp(2.9957322738 + float64(f)*6.9077552785))
return Hz
}
// hzToX32 returns the log float 0..1 value corresponding to the passed in
// frequency.
// The frequency should be in the range 20..20000. Values outside this range
// will be treated as though they were at the corresponding range limit.
func hzToX32EqFreq(hz float32) float32 {
if hz < 20 {
hz = 20
} else if hz > 20000 {
hz = 20000
}
f := float32((math.Log10(float64(hz)) - 1.30102999622) / 3.0)
return f
}
// hzToNeutronEqLog converts a frequency in Hz to the corresponding log value
// used by the Neutron eq OSC params.
func hzToNeutronEqLog(hz float32) float32 {
if hz < 20 {
hz = 20
} else if hz > 20000 {
hz = 20000
}
f := float32((math.Atanh((float64(hz)-18.99300151)/200000000-1) - -9.9) / 4.947806)
return f
}
// neutronEqLogToHz converts the Neutron eq log frequency OSC value to the
// corresponding frequency in Hz.
func neutronEqLogToHz(f float32) float32 {
if f < 0 {
f = 0
} else if f > 1 {
f = 1
}
Hz := float32(18.99300151 + (1.0+math.Tanh(-9.9+float64(f)*4.9478063))*200000000)
return Hz
}
// x32QLogToOct converts an X32 Eq Q OSC log value to the corresponding Q
// octave value.
func x32QLogToOct(f float32) float32 {
if f < 0 {
f = 0
} else if f > 1 {
f = 1
}
oct := float32(math.Exp(2.302585093 - float64(f)*3.5))
return oct
}
// octToX32Q converts a Q octave value to the log float representation used by
// the X32 Eq Q OSC param.
func octToX32Q(o float32) float32 {
if o > 10 {
o = 10
} else if o < 0.3 {
o = 0.3
}
f := float32(-(math.Log10(float64(o)) - 1.0) / 1.520030687)
return f
}
// neutronQLogToOct converts from the Neutron Q float OSC param value to the
// corresponding Q octave value.
func neutronQLogToOct(f float32) float32 {
if f < 0 {
f = 0
} else if f > 1 {
f = 1
}
q := float32(-0.9022102697 + (1.0+math.Tanh(-4.1+float64(f)*1.86))*1825)
return q
}
// octToNeutronQLog converts from Q octave width to log float representation
// used by Neutron OSC param.
func octToNeutronQLog(o float32) float32 {
if o < 0.1 {
o = 0.1
} else if o > 40 {
o = 40
}
f := float32((math.Atanh(((float64(o)+0.9022102697)/1825.0)-1.0) + 4.1) / 1.86)
return f
}
func normToNeutronGain(g float32) float32 {
return g + 0.167
}
func neutronToNormGain(g float32) float32 {
return g - 0.167
}
const (
NeutronEqProp = iota
NeutronEqBell
NeutronEqBandShelf
NeutronEqAnalogLowLowShelf
NeutronEqBaxLowShelf
NeutronEqVintageLowShelf
NeutronEqAnalogHighLowShelf
NeutronEqBaxHighShelf
NeutronEqVintageHighShelf
NeutronEqFlatLowPass
NeutronEqResonantLowPass
NeutronEqFlatHighPass
NeutronEqResonantHighPass
NeutronEqMax float32 = 12.0
)
func x32EqTypeToNeutron(t float32) float32 {
// TODO: make these mappings be configurable
switch t {
case 0:
return NeutronEqFlatHighPass / NeutronEqMax
case 1:
return NeutronEqVintageLowShelf / NeutronEqMax
case 2:
return NeutronEqProp / NeutronEqMax
case 3:
return NeutronEqBandShelf / NeutronEqMax
case 4:
return NeutronEqVintageHighShelf / NeutronEqMax
case 5:
return NeutronEqFlatLowPass / NeutronEqMax
}
glog.Errorf("Got unknown x32EqType %f", t)
return 0
}
func neutronEqTypeToX32(t float32) float32 {
// TODO: make these mappings be configurable
i := int(t * NeutronEqMax)
switch i {
case NeutronEqFlatHighPass:
return 0
case NeutronEqVintageLowShelf:
return 1
case NeutronEqProp:
return 2
case NeutronEqBandShelf:
return 3
case NeutronEqVintageHighShelf:
return 4
case NeutronEqFlatLowPass:
return 5
case NeutronEqResonantHighPass, NeutronEqResonantLowPass, NeutronEqAnalogLowLowShelf, NeutronEqBaxLowShelf, NeutronEqBell, NeutronEqBaxHighShelf:
return 0
}
glog.Errorf("Got unknown x32EqType %d", i)
return 0
} | maths.go | 0.785514 | 0.606935 | maths.go | starcoder |
package heap
import (
"fmt"
"strings"
)
type Heap struct {
data []int
}
// Init initializes the heap with the given data.
func (h *Heap) Init(data []int) {
h.data = data
for i := len(data) / 2; i >= 0; i-- {
h.down(i)
}
}
// New returns a new heap with the given data.
func New(data []int) *Heap {
h := new(Heap)
h.Init(data)
return h
}
// Push pushes the given value into the heap.
func (h *Heap) Push(value int) {
h.data = append(h.data, value)
h.up(len(h.data) - 1)
}
// Pop pops the top value from the heap.
func (h *Heap) Pop() int {
if len(h.data) == 0 {
return 0
}
value := h.data[0]
h.data[0] = h.data[len(h.data)-1]
h.data = h.data[:len(h.data)-1]
h.down(0)
return value
}
// down moves the value at the given index down the heap.
func (h *Heap) down(index int) {
for {
left := 2*index + 1
right := 2*index + 2
if left >= len(h.data) {
break
}
if right >= len(h.data) {
if h.data[left] < h.data[index] {
h.swap(index, left)
index = left
} else {
break
}
} else {
if h.data[left] < h.data[index] && h.data[left] < h.data[right] {
h.swap(index, left)
index = left
} else if h.data[right] < h.data[index] && h.data[right] < h.data[left] {
h.swap(index, right)
index = right
} else {
break
}
}
}
}
// up moves the value at the given index up the heap.
func (h *Heap) up(index int) {
for {
parent := (index - 1) / 2
if parent < 0 {
break
}
if h.data[parent] < h.data[index] {
h.swap(parent, index)
index = parent
} else {
break
}
}
}
// swap the values at the given indices.
func (h *Heap) swap(i, j int) {
h.data[i], h.data[j] = h.data[j], h.data[i]
}
// String returns a string representation of the heap.
func (h *Heap) String() string {
return "[" + strings.Join(strings.Fields(fmt.Sprint(h.data)), ", ") + "]"
}
// Valid Test if the heap is valid.
func (h *Heap) Valid() bool {
for i := 0; i < len(h.data); i++ {
left := 2*i + 1
right := 2*i + 2
if left < len(h.data) && h.data[left] < h.data[i] {
return false
}
if right < len(h.data) && h.data[right] < h.data[i] {
return false
}
}
return true
}
// Empty returns true if the heap is empty.
func (h *Heap) Empty() bool {
return len(h.data) == 0
}
// Size returns the number of elements in the heap.
func (h *Heap) Size() int {
return len(h.data)
}
// Top returns the top value of the heap.
func (h *Heap) Top() int {
if len(h.data) == 0 {
return 0
}
return h.data[0]
}
// Copy returns a copy of the heap.
func (h *Heap) Copy() *Heap {
return New(h.data)
} | data-structures/linear/heap/heap.go | 0.804175 | 0.473901 | heap.go | starcoder |
package services
import (
"os"
)
// GENERICS TOOD: When Go has generics, parameterize this to be <N, S extends N> where S is the
// specific service interface and N represents the interface that every node on the network has
/*
Tells Kurtosis how to create a Docker container representing a user-defined service in the test network.
*/
type ServiceInitializerCore interface {
// Gets the "set" of ports that the Docker container running the service will listen on
GetUsedPorts() map[int]bool
// GENERICS TOOD: When Go has generics, make this return type be parameterized
/*
Uses the IP address of the Docker container running the service to create an implementation of the interface the developer
has created to represent their service.
NOTE: Because Go doesn't have generics, we can't properly parameterize the return type to be the actual service interface
that the developer has created; nonetheless, the developer should return an implementation of their interface (which itself
should extend Service).
Args:
ipAddr: The IP address of the Docker container running the service
*/
GetServiceFromIp(ipAddr string) Service
/*
This method is used to declare that the service will need a set of files in order to run. To do this, the developer
declares a set of string keys that are meaningful to the developer, and Kurtosis will create one file per key. These newly-createed
file objects will then be passed in to the `InitializeMountedFiles` and `GetStartCommand` functions below keyed on the
strings that the developer passed in, so that the developer can initialize the contents of the files as they please.
Kurtosis then guarantees that these files will be made available to the service at startup time.
NOTE: The keys that the developer returns here are ONLY used for developer identification purposes; the actual
filenames and filepaths of the file are implementation details handled by Kurtosis!
Returns:
A "set" of user-defined key strings identifying the files that the service will need, which is how files will be
identified in `InitializeMountedFiles` and `GetStartCommand`
*/
GetFilesToMount() map[string]bool
/*
Initializes the contents of the files that the developer requested in `GetFilesToMount` with whatever contents the developer desires
Args:
mountedFiles: A mapping of developer_key -> file_pointer, with developer_key corresponding to the keys declares in
`GetFilesToMount`
dependencies: The services that this service depends on (which, depending on the service, might be necessary
for filling in config file values)
*/
InitializeMountedFiles(mountedFiles map[string]*os.File, dependencies []Service) error
/*
Kurtosis mounts the files that the developer requested in `GetFilesToMount` via a Docker volume, but Kurtosis doesn't
know anything about the Docker image backing the service so therefore doesn't know what filepath it can safely mount
the volume on. This function uses the developer's knowledge of the Docker image running the service to inform
Kurtosis of a filepath where the Docker volume can be safely mounted.
Returns:
A filepath on the Docker image backing this service that's safe to mount the test volume on
*/
GetTestVolumeMountpoint() string
// GENERICS TOOD: when Go gets generics, make the type of 'dependencies' to be []N
// If Go had generics, dependencies should be of type []T
/*
Uses the given arguments to build the command that the Docker container running this service will be launched with.
NOTE: Because the IP address of the container is an implementation detail, any references to the IP address of the
container should use the placeholder "SERVICEIP" instead. This will get replaced at launch time with the service's
actual IP.
Args:
mountedFileFilepaths: Mapping of developer_key -> initialized_file_filepath where developer_key corresponds to the keys returned
in the `GetFilesToMount` function, and initialized_file_filepath is the path *on the Docker container* of where the
file has been mounted. The files will have already been initialized via the `InitializeMountedFiles` function.
ipPlaceholder: Because the IP address of the container is an implementation detail, any references to the IP
address of the container should use this placeholder string when they want the IP. This will get replaced at
service launch time with the actual IP.
dependencies: The services that this service depends on (for use in case the command line to the service changes based on dependencies)
Returns:
The command fragments which will be used to construct the run command which will be used to launch the Docker container
running the service. If this is nil, then no explicit command will be specified and whatever command the Dockerfile
specifies will be run instead.
*/
GetStartCommand(mountedFileFilepaths map[string]string, ipPlaceholder string, dependencies []Service) ([]string, error)
} | lib/services/service_initializer_core.go | 0.732783 | 0.507873 | service_initializer_core.go | starcoder |
package iso20022
// Valuation information of the portfolio.
type TotalPortfolioValuation1 struct {
// Total value of the portfolio (sum of the assets, liabilities and unrealised gain/loss) calculated according to the accounting rules.
TotalPortfolioValue *AmountAndDirection30 `xml:"TtlPrtflVal"`
// Previous total value of the portfolio.
PreviousTotalPortfolioValue *AmountAndDirection30 `xml:"PrvsTtlPrtflVal,omitempty"`
// Difference or change between the previous total portfolio value and the current total portfolio value.
TotalPortfolioValueChange *AmountAndRate2 `xml:"TtlPrtflValChng,omitempty"`
// Net asset on balance sheet - total portfolio value minus or plus the unrealised gain or loss.
TotalBookValue *AmountAndDirection30 `xml:"TtlBookVal"`
// Previous net asset on balance sheet.
PreviousTotalBookValue *AmountAndDirection30 `xml:"PrvsTtlBookVal,omitempty"`
// Difference or change between the previous net asset on balance sheet and the current net asset on balance sheet.
TotalBookValueChange *AmountAndRate2 `xml:"TtlBookValChng,omitempty"`
// Total receipts attributable to the portfolio.
TotalReceipts *AmountAndDirection30 `xml:"TtlRcts,omitempty"`
// Total disbursements attributable to the portfolio.
TotalDisbursements *AmountAndDirection30 `xml:"TtlDsbrsmnts,omitempty"`
// Income attributable to the portfolio.
IncomeReceived *AmountAndDirection30 `xml:"IncmRcvd,omitempty"`
// Expenses attributable to the portfolio
ExpensesPaid *AmountAndDirection30 `xml:"ExpnssPd,omitempty"`
// Difference between the holding value and the book value of the portfolio.
UnrealisedGainOrLoss *AmountAndDirection31 `xml:"UrlsdGnOrLoss,omitempty"`
// Difference between the realised value caused by the actual trade/re-evaluation and the book value of the portfolio.
RealisedGainOrLoss *AmountAndDirection31 `xml:"RealsdGnOrLoss,omitempty"`
// Accrued income.
AccruedIncome *AmountAndDirection30 `xml:"AcrdIncm,omitempty"`
// Valuation information of the investment fund or investment fund share class.
InvestmentFundDetails []*InvestmentFund1 `xml:"InvstmtFndDtls,omitempty"`
}
func (t *TotalPortfolioValuation1) AddTotalPortfolioValue() *AmountAndDirection30 {
t.TotalPortfolioValue = new(AmountAndDirection30)
return t.TotalPortfolioValue
}
func (t *TotalPortfolioValuation1) AddPreviousTotalPortfolioValue() *AmountAndDirection30 {
t.PreviousTotalPortfolioValue = new(AmountAndDirection30)
return t.PreviousTotalPortfolioValue
}
func (t *TotalPortfolioValuation1) AddTotalPortfolioValueChange() *AmountAndRate2 {
t.TotalPortfolioValueChange = new(AmountAndRate2)
return t.TotalPortfolioValueChange
}
func (t *TotalPortfolioValuation1) AddTotalBookValue() *AmountAndDirection30 {
t.TotalBookValue = new(AmountAndDirection30)
return t.TotalBookValue
}
func (t *TotalPortfolioValuation1) AddPreviousTotalBookValue() *AmountAndDirection30 {
t.PreviousTotalBookValue = new(AmountAndDirection30)
return t.PreviousTotalBookValue
}
func (t *TotalPortfolioValuation1) AddTotalBookValueChange() *AmountAndRate2 {
t.TotalBookValueChange = new(AmountAndRate2)
return t.TotalBookValueChange
}
func (t *TotalPortfolioValuation1) AddTotalReceipts() *AmountAndDirection30 {
t.TotalReceipts = new(AmountAndDirection30)
return t.TotalReceipts
}
func (t *TotalPortfolioValuation1) AddTotalDisbursements() *AmountAndDirection30 {
t.TotalDisbursements = new(AmountAndDirection30)
return t.TotalDisbursements
}
func (t *TotalPortfolioValuation1) AddIncomeReceived() *AmountAndDirection30 {
t.IncomeReceived = new(AmountAndDirection30)
return t.IncomeReceived
}
func (t *TotalPortfolioValuation1) AddExpensesPaid() *AmountAndDirection30 {
t.ExpensesPaid = new(AmountAndDirection30)
return t.ExpensesPaid
}
func (t *TotalPortfolioValuation1) AddUnrealisedGainOrLoss() *AmountAndDirection31 {
t.UnrealisedGainOrLoss = new(AmountAndDirection31)
return t.UnrealisedGainOrLoss
}
func (t *TotalPortfolioValuation1) AddRealisedGainOrLoss() *AmountAndDirection31 {
t.RealisedGainOrLoss = new(AmountAndDirection31)
return t.RealisedGainOrLoss
}
func (t *TotalPortfolioValuation1) AddAccruedIncome() *AmountAndDirection30 {
t.AccruedIncome = new(AmountAndDirection30)
return t.AccruedIncome
}
func (t *TotalPortfolioValuation1) AddInvestmentFundDetails() *InvestmentFund1 {
newValue := new (InvestmentFund1)
t.InvestmentFundDetails = append(t.InvestmentFundDetails, newValue)
return newValue
} | TotalPortfolioValuation1.go | 0.754192 | 0.600364 | TotalPortfolioValuation1.go | starcoder |
package circle
import (
"github.com/gravestench/pho/geom/point"
"github.com/gravestench/pho/geom/rectangle"
)
type CircleNamespace interface {
New(x, y, radius float64) *Circle
Clone(c *Circle) *Circle
Contains(c *Circle, x, y float64) bool
ContainsPoint(c *Circle, p *point.Point) bool
ContainsRectangle(c *Circle, r *rectangle.Rectangle) bool
GetPoint(c *Circle, position float64, point *point.Point) *point.Point
GetPoints(c *Circle, quantity int, stepRate float64, points []*point.Point) []*point.Point
Circumference(c *Circle) float64
GetRandomPoint(c *Circle, p *point.Point) *point.Point
Equals(c *Circle, other *Circle) bool
GetBounds(c *Circle, assignTo *rectangle.Rectangle) *rectangle.Rectangle
Offset(c *Circle, x, y float64) *Circle
OffsetPoint(c *Circle, p *point.Point) *Circle
}
type Namespace struct{}
func (n *Namespace) New(x, y, radius float64) *Circle {
return New(x, y, radius)
}
// Clone returns a clone of this circle
func (n *Namespace) Clone(c *Circle) *Circle {
return Clone(c)
}
// Contains checks to see if the Circle contains the given x / y coordinates.
func (n *Namespace) Contains(c *Circle, x, y float64) bool {
return Contains(c, x, y)
}
// ContainsPoint checks to see if the Circle contains the given point.
func (n *Namespace) ContainsPoint(c *Circle, p *point.Point) bool {
return ContainsPoint(c, p)
}
// ContainsRectangle checks to see if the Circle contains the given rectangle.
func (n *Namespace) ContainsRectangle(c *Circle, r *rectangle.Rectangle) bool {
return ContainsRectangle(c, r)
}
// GetPoint returns a Point object containing the coordinates of a point on the circumference of
// the Circle based on the given angle normalized to the range 0 to 1. I.e. a value of 0.5 will give
// the point at 180 degrees around the circle.
func (n *Namespace) GetPoint(c *Circle, position float64, point *point.Point) *point.Point {
return GetPoint(c, position, point)
}
// GetPoints Returns an array of Point objects containing the coordinates of the points around the
// circumference of the Circle, based on the given quantity or stepRate values.
func (n *Namespace) GetPoints(c *Circle, quantity int, stepRate float64,
points []*point.Point) []*point.Point {
return GetPoints(c, quantity, stepRate, points)
}
// Circumference returns the circumference of the given Circle.
func (n *Namespace) Circumference(c *Circle) float64 {
return Circumference(c)
}
// GetRandomPoint returns a uniformly distributed random point from anywhere within the given Circle
func (n *Namespace) GetRandomPoint(c *Circle, p *point.Point) *point.Point {
return GetRandomPoint(c, p)
}
// Equals compares the `x`, `y` and `radius` properties of this circle with the other circle.
// Returns `true` if they all match, otherwise returns `false`.
func (n *Namespace) Equals(c *Circle, other *Circle) bool {
return Equals(c, other)
}
// GetBounds returns the bounds (a rectangle) of the Circle object.
func (n *Namespace) GetBounds(c *Circle, assignTo *rectangle.Rectangle) *rectangle.Rectangle {
return GetBounds(c, assignTo)
}
// Offset the circle position by the given x, y
func (n *Namespace) Offset(c *Circle, x, y float64) *Circle {
return Offset(c, x, y)
}
// OffsetPoint offsets the circle with the x,y of the given point
func (n *Namespace) OffsetPoint(c *Circle, p *point.Point) *Circle {
return Offset(c, p.X, p.Y)
} | geom/circle/namespace.go | 0.92853 | 0.612049 | namespace.go | starcoder |
package main
import (
"fmt"
"math"
)
func leftmostDigit(n int) int {
for n >= 10 {
n = n / 10
}
return n
}
func numDigits(n int) int {
var count int = 0
for n > 0 {
n = n / 10
count = count + 1
}
return count
}
// assumes leftDigit and E are valid constraints for producing a sequential number, per validSequentialNumberConstraints
func getSequentialNumber(leftDigit int, E int) int {
if leftDigit > 9 || E < 0 {
return 0
}
var n int = leftDigit * int(math.Pow(10, float64(E)))
return n + getSequentialNumber(leftDigit+1, E-1)
}
func validSequentialNumberConstraints(leftDigit int, E int) bool {
return leftDigit+E <= 9
}
// https://leetcode.com/problems/sequential-digits/
// An integer has sequential digits if and only if each digit in the number is one more than the previous digit.
// Return a sorted list of all the integers in the range [low, high] inclusive that have sequential digits.
func sequentialDigits(low int, high int) []int {
var startingDigit int = leftmostDigit(low)
var startingE int = numDigits(low) - 1 // e.g. if low is in range 10 - 99, E is 1
var maxE int = numDigits(high) - 1
var result []int = make([]int, 0)
E := startingE
leftDigit := startingDigit
for E <= maxE {
for leftDigit < 10 {
if validSequentialNumberConstraints(leftDigit, E) {
var sequentialNumber int = getSequentialNumber(leftDigit, E)
if low <= sequentialNumber && sequentialNumber <= high {
result = append(result, sequentialNumber)
}
}
leftDigit = leftDigit + 1
}
E = E + 1
leftDigit = 1
}
return result
}
func main() {
fmt.Println("hello")
fmt.Println(leftmostDigit(123))
fmt.Println(leftmostDigit(456))
fmt.Println(numDigits(12345))
fmt.Println(numDigits(10))
fmt.Println(numDigits(11))
fmt.Println(getSequentialNumber(1, 2)) // expects 123
fmt.Println(getSequentialNumber(2, 4)) // expects 23456
fmt.Println(getSequentialNumber(7, 2)) // expects 789
fmt.Println(validSequentialNumberConstraints(7, 2)) // expects true
fmt.Println(validSequentialNumberConstraints(8, 2)) // expects false
fmt.Println(sequentialDigits(100, 300)) // expects [123, 234]
fmt.Println(sequentialDigits(1000, 13000)) // expects [1234,2345,3456,4567,5678,6789,12345]
} | puzzles/sequential-digits/sequential-digits.go | 0.819785 | 0.463748 | sequential-digits.go | starcoder |
package godis
import (
"fmt"
"math"
"strconv"
)
//BoolToByteArr convert bool to byte array
func BoolToByteArr(a bool) []byte {
if a {
return bytesTrue
}
return bytesFalse
}
//IntToByteArr convert int to byte array
func IntToByteArr(a int) []byte {
buf := make([]byte, 0)
return strconv.AppendInt(buf, int64(a), 10)
}
//Int64ToByteArr convert int64 to byte array
func Int64ToByteArr(a int64) []byte {
buf := make([]byte, 0)
return strconv.AppendInt(buf, a, 10)
}
//Float64ToStr convert float64 to string
func Float64ToStr(a float64) string {
if math.IsInf(a, 1) {
return "+inf"
} else if math.IsInf(a, -1) {
return "-inf"
} else {
return strconv.FormatFloat(a, 'f', -1, 64)
}
}
//Float64ToByteArr convert float64 to byte array
func Float64ToByteArr(a float64) []byte {
var incrBytes []byte
if math.IsInf(a, 1) {
incrBytes = []byte("+inf")
} else if math.IsInf(a, -1) {
incrBytes = []byte("-inf")
} else {
incrBytes = []byte(strconv.FormatFloat(a, 'f', -1, 64))
}
return incrBytes
}
//ByteArrToFloat64 convert byte array to float64
func ByteArrToFloat64(bytes []byte) float64 {
f, _ := strconv.ParseFloat(string(bytes), 64)
return f
}
//StrStrArrToByteArrArr convert string and string array to byte array
func StrStrArrToByteArrArr(str string, arr []string) [][]byte {
params := make([][]byte, 0)
params = append(params, []byte(str))
for _, v := range arr {
params = append(params, []byte(v))
}
return params
}
//StrStrArrToStrArr convert string and string array to string array
func StrStrArrToStrArr(str string, arr []string) []string {
params := make([]string, 0)
params = append(params, str)
for _, v := range arr {
params = append(params, v)
}
return params
}
//StrArrToByteArrArr convert string array to byte array list
func StrArrToByteArrArr(arr []string) [][]byte {
newArr := make([][]byte, 0)
for _, a := range arr {
newArr = append(newArr, []byte(a))
}
return newArr
}
//StrToFloat64Reply convert string reply to float64 reply
func StrToFloat64Reply(reply string, err error) (float64, error) {
if err != nil {
return 0, err
}
f, e := strconv.ParseFloat(reply, 64)
if e != nil {
return 0, e
}
return f, nil
}
//StrArrToMapReply convert string array reply to map reply
func StrArrToMapReply(reply []string, err error) (map[string]string, error) {
if err != nil {
return nil, err
}
newMap := make(map[string]string, len(reply)/2)
for i := 0; i < len(reply); i += 2 {
newMap[reply[i]] = reply[i+1]
}
return newMap, nil
}
//Int64ToBoolReply convert int64 reply to bool reply
func Int64ToBoolReply(reply int64, err error) (bool, error) {
if err != nil {
return false, err
}
return reply == 1, nil
}
//ByteArrToStrReply convert byte array reply to string reply
func ByteArrToStrReply(reply []byte, err error) (string, error) {
if err != nil {
return "", err
}
return string(reply), nil
}
//StrArrToTupleReply convert string array reply to tuple array reply
func StrArrToTupleReply(reply []string, err error) ([]Tuple, error) {
if len(reply) == 0 {
return []Tuple{}, nil
}
newArr := make([]Tuple, 0)
for i := 0; i < len(reply); i += 2 {
f, err := strconv.ParseFloat(reply[i+1], 64)
if err != nil {
return nil, err
}
newArr = append(newArr, Tuple{element: reply[i], score: f})
}
return newArr, err
}
//ObjArrToScanResultReply convert object array reply to scanresult reply
func ObjArrToScanResultReply(reply []interface{}, err error) (*ScanResult, error) {
if err != nil || len(reply) == 0 {
return nil, err
}
nexCursor := string(reply[0].([]byte))
result := make([]string, 0)
for _, r := range reply[1].([]interface{}) {
result = append(result, string(r.([]byte)))
}
return &ScanResult{Cursor: nexCursor, Results: result}, err
}
//ObjArrToGeoCoordinateReply convert object array reply to GeoCoordinate reply
func ObjArrToGeoCoordinateReply(reply []interface{}, err error) ([]*GeoCoordinate, error) {
if err != nil || len(reply) == 0 {
return nil, err
}
arr := make([]*GeoCoordinate, 0)
for _, r := range reply {
if r == nil {
arr = append(arr, nil)
} else {
rArr := r.([]interface{})
lng, err := strconv.ParseFloat(string(rArr[0].([]byte)), 64)
if err != nil {
return nil, err
}
lat, err := strconv.ParseFloat(string(rArr[1].([]byte)), 64)
if err != nil {
return nil, err
}
arr = append(arr, &GeoCoordinate{
longitude: lng,
latitude: lat,
})
}
}
return arr, err
}
//ObjArrToGeoRadiusResponseReply convert object array reply to GeoRadiusResponse reply
func ObjArrToGeoRadiusResponseReply(reply []interface{}, err error) ([]GeoRadiusResponse, error) {
if err != nil || len(reply) == 0 {
return nil, err
}
arr := make([]GeoRadiusResponse, 0)
switch reply[0].(type) {
case []interface{}:
var resp GeoRadiusResponse
for _, r := range reply {
informations := r.([]interface{})
resp = *newGeoRadiusResponse(string(informations[0].([]byte)))
size := len(informations)
for idx := 1; idx < size; idx++ {
info := informations[idx]
switch info.(type) {
case []interface{}:
coord := info.([]interface{})
resp.coordinate = GeoCoordinate{
longitude: ByteArrToFloat64(coord[0].([]byte)),
latitude: ByteArrToFloat64(coord[1].([]byte)),
}
default:
resp.distance = ByteArrToFloat64(info.([]byte))
}
}
arr = append(arr, resp)
}
default:
for _, r := range reply {
arr = append(arr, *newGeoRadiusResponse(string(r.([]byte))))
}
}
return arr, err
}
//ObjArrToMapArrayReply convert object array reply to map array reply
func ObjArrToMapArrayReply(reply []interface{}, err error) ([]map[string]string, error) {
if err != nil || len(reply) == 0 {
return nil, err
}
masters := make([]map[string]string, 0)
for _, re := range reply {
m := make(map[string]string)
arr := re.([][]byte)
for i := 0; i < len(arr); i += 2 {
m[string(arr[i])] = string(arr[i+1])
}
masters = append(masters, m)
}
return masters, nil
}
//ObjToEvalResult resolve response data when use script command
func ObjToEvalResult(reply interface{}, err error) (interface{}, error) {
if err != nil {
return nil, err
}
switch reply.(type) {
case []byte:
return string(reply.([]byte)), nil
case []interface{}:
list := reply.([]interface{})
result := make([]interface{}, 0)
for _, l := range list {
evalResult, err := ObjToEvalResult(l, nil)
if err != nil {
return nil, err
}
result = append(result, evalResult)
}
return result, nil
}
return reply, err
}
//<editor-fold desc="cluster reply convert">
//ToStrReply convert object reply to string reply
func ToStrReply(reply interface{}, err error) (string, error) {
if err != nil {
return "", err
}
switch reply.(type) {
case []byte:
return string(reply.([]byte)), nil
}
return reply.(string), nil
}
//ToInt64Reply convert object reply to int64 reply
func ToInt64Reply(reply interface{}, err error) (int64, error) {
if err != nil {
return 0, err
}
return reply.(int64), nil
}
//ToInt64ArrReply convert object reply to int64 array reply
func ToInt64ArrReply(reply interface{}, err error) ([]int64, error) {
if err != nil {
return nil, err
}
return reply.([]int64), nil
}
//ToBoolReply convert object reply to bool reply
func ToBoolReply(reply interface{}, err error) (bool, error) {
if err != nil {
return false, err
}
return reply.(bool), nil
}
//ToFloat64Reply convert object reply to float64 reply
func ToFloat64Reply(reply interface{}, err error) (float64, error) {
if err != nil {
return 0, err
}
return reply.(float64), nil
}
//ToBoolArrReply convert object reply to bool array reply
func ToBoolArrReply(reply interface{}, err error) ([]bool, error) {
if err != nil {
return nil, err
}
return reply.([]bool), nil
}
//ToStrArrReply convert object reply to string array reply
func ToStrArrReply(reply interface{}, err error) ([]string, error) {
if err != nil {
return nil, err
}
return reply.([]string), nil
}
//ToScanResultReply convert object reply to scanresult reply
func ToScanResultReply(reply interface{}, err error) (*ScanResult, error) {
if err != nil {
return nil, err
}
return reply.(*ScanResult), nil
}
//ToMapReply convert object reply to map reply
func ToMapReply(reply interface{}, err error) (map[string]string, error) {
if err != nil {
return nil, err
}
return reply.(map[string]string), nil
}
//ToTupleArrReply convert object reply to tuple array reply
func ToTupleArrReply(reply interface{}, err error) ([]Tuple, error) {
if err != nil {
return nil, err
}
return reply.([]Tuple), nil
}
//ToGeoCoordArrReply convert object reply to geocoordinate array reply
func ToGeoCoordArrReply(reply interface{}, err error) ([]*GeoCoordinate, error) {
if err != nil {
return nil, err
}
return reply.([]*GeoCoordinate), nil
}
//ToGeoRespArrReply convert object reply to GeoRadiusResponse array reply
func ToGeoRespArrReply(reply interface{}, err error) ([]GeoRadiusResponse, error) {
if err != nil {
return nil, err
}
return reply.([]GeoRadiusResponse), nil
}
//</editor-fold>
//Builder convert pipeline|transaction response data
type Builder interface {
build(data interface{}) (interface{}, error)
}
var (
//StrBuilder convert interface to string
StrBuilder = newStrBuilder()
//Int64Builder convert interface to int64
Int64Builder = newInt64Builder()
//StrArrBuilder convert interface to string array
StrArrBuilder = newStringArrayBuilder()
)
type strBuilder struct {
}
func newStrBuilder() *strBuilder {
return &strBuilder{}
}
func (b *strBuilder) build(data interface{}) (interface{}, error) {
if data == nil {
return "", nil
}
switch data.(type) {
case []byte:
return string(data.([]byte)), nil
case error:
return "", data.(error)
}
return "", fmt.Errorf("unexpected type:%T", data)
}
type int64Builder struct {
}
func newInt64Builder() *int64Builder {
return &int64Builder{}
}
func (b *int64Builder) build(data interface{}) (interface{}, error) {
if data == nil {
return 0, nil
}
switch data.(type) {
case int64:
return data.(int64), nil
}
return 0, fmt.Errorf("unexpected type:%T", data)
}
type strArrBuilder struct {
}
func newStringArrayBuilder() *strArrBuilder {
return &strArrBuilder{}
}
func (b *strArrBuilder) build(data interface{}) (interface{}, error) {
if data == nil {
return []string{}, nil
}
switch data.(type) {
case []interface{}:
arr := make([]string, 0)
for _, b := range data.([]interface{}) {
if b == nil {
arr = append(arr, "")
} else {
arr = append(arr, string(b.([]byte)))
}
}
return arr, nil
}
return nil, fmt.Errorf("unexpected type:%T", data)
} | convert_util.go | 0.550849 | 0.562958 | convert_util.go | starcoder |
package geodist
import (
"fmt"
"math"
)
// WGS-84 ellipsoid
const (
a float64 = 6378137
f float64 = 1 / 298.257223563
b float64 = 6356752.314245
)
const (
epsilon = 1e-12
maxIterations = 200
)
// VincentyDistance returns the geographical distance in km between the points p1 and p2 using Vincenty's inverse formula.
// The surface of the Earth is approximated by the WGS-84 ellipsoid.
// This method may fail to converge for nearly antipodal points.
func VincentyDistance(p1 Point, p2 Point) (float64, error) {
if p1.Lat == p2.Lat && p1.Long == p2.Long {
return 0, nil
}
U1 := math.Atan((1 - f) * math.Tan(toRadians(p1.Lat)))
U2 := math.Atan((1 - f) * math.Tan(toRadians(p2.Lat)))
L := toRadians(p2.Long - p1.Long)
sinU1 := math.Sin(U1)
cosU1 := math.Cos(U1)
sinU2 := math.Sin(U2)
cosU2 := math.Cos(U2)
lambda := L
result := math.NaN()
for i := 0; i < maxIterations; i++ {
curLambda := lambda
sinSigma := math.Sqrt(math.Pow(cosU2*math.Sin(lambda), 2) +
math.Pow(cosU1*sinU2-sinU1*cosU2*math.Cos(lambda), 2))
cosSigma := sinU1*sinU2 + cosU1*cosU2*math.Cos(lambda)
sigma := math.Atan2(sinSigma, cosSigma)
sinAlpha := (cosU1 * cosU2 * math.Sin(lambda)) / math.Sin(sigma)
cosSqrAlpha := 1 - math.Pow(sinAlpha, 2)
cos2sigmam := 0.0
if cosSqrAlpha != 0 {
cos2sigmam = math.Cos(sigma) - ((2 * sinU1 * sinU2) / cosSqrAlpha)
}
C := (f / 16) * cosSqrAlpha * (4 + f*(4-3*cosSqrAlpha))
lambda = L + (1-C)*f*sinAlpha*(sigma+C*sinSigma*(cos2sigmam+C*cosSigma*(-1+2*math.Pow(cos2sigmam, 2))))
if math.Abs(lambda-curLambda) < epsilon {
uSqr := cosSqrAlpha * ((math.Pow(a, 2) - math.Pow(b, 2)) / math.Pow(b, 2))
k1 := (math.Sqrt(1+uSqr) - 1) / (math.Sqrt(1+uSqr) + 1)
A := (1 + (math.Pow(k1, 2) / 4)) / (1 - k1)
B := k1 * (1 - (3*math.Pow(k1, 2))/8)
deltaSigma := B * sinSigma * (cos2sigmam + (B/4)*(cosSigma*(-1+2*math.Pow(cos2sigmam, 2))-
(B/6)*cos2sigmam*(-3+4*math.Pow(sinSigma, 2))*(-3+4*math.Pow(cos2sigmam, 2))))
s := b * A * (sigma - deltaSigma)
result = s / 1000
break
}
}
if math.IsNaN(result) {
return result, fmt.Errorf("failed to converge for %v and %v", p1, p2)
}
return result, nil
} | vincenty.go | 0.824921 | 0.527803 | vincenty.go | starcoder |
package lcd
// Value is a uint that can be converted into a string representation
type Value uint
// Dimension defines the width and height of each digit as it's printed
type Dimension interface {
Width() uint
Height() uint
}
type dimension struct {
width uint
height uint
}
func (d dimension) Width() uint {
return d.width
}
func (d dimension) Height() uint {
return d.height
}
// DefaultDimension is the default output size for the lcd module
var DefaultDimension Dimension = dimension{width: 3, height: 3}
// Square returns an object defining a size-by-size square output format
func Square(size uint) Dimension {
return dimension{
width: size,
height: size,
}
}
// Custom returns an object defining a width-by-height output format
func Custom(width, height uint) Dimension {
return dimension{
width: width,
height: height,
}
}
// Format converts a Display to a string representation using LCD-style digits
func (l Value) Format(dimension Dimension) string {
digits := digitize(uint(l))
output := ""
for row := 0; row < int(dimension.Height()); row++ {
output += printRow(digits, dimension, row)
}
return output
}
func digitize(number uint) []uint {
if number == 0 {
return []uint{0}
}
var digits []uint
for number > 0 {
digits = append(digits, number%10)
number /= 10
}
return reverse(digits)
}
func reverse(digits []uint) []uint {
numDigits := len(digits)
for i := 0; i < numDigits/2; i++ {
opp := numDigits - 1 - i
digits[i], digits[opp] = digits[opp], digits[i]
}
return digits
}
func printRow(digits []uint, dimension Dimension, row int) string {
output := ""
srcRow := dstToSrc(row, dimension.Height(), digitHeight)
for _, digit := range digits {
for col := 0; col < int(dimension.Width()); col++ {
srcCol := (srcRow * digitWidth) + dstToSrc(col, dimension.Width(), digitWidth)
output += string(digitStrings[digit][srcCol])
}
}
return output + "\n"
}
func dstToSrc(dstValue int, dstSize, srcSize uint) int {
dstMidpoint := int(dstSize / 2)
srcMidpoint := int(srcSize / 2)
switch {
case dstValue == 0:
return 0
case dstValue == int(dstSize)-1:
return int(srcSize) - 1
case dstValue == dstMidpoint:
return srcMidpoint
case dstValue < dstMidpoint:
return srcMidpoint - 1
case dstValue > dstMidpoint:
return srcMidpoint + 1
}
return 0
}
const digitWidth = 5
const digitHeight = 5
var digitStrings = [...]string{
" ___ | || || ||___|", // 0
" | | | |", // 1
" ___ | ___|| |___ ", // 2
" ___ | ___| | ___|", // 3
" | ||___| | |", // 4
" ___ | |___ | ___|", // 5
" ___ | |___ | ||___|", // 6
" ___ | | | |", // 7
" ___ | ||___|| ||___|", // 8
" ___ | ||___| | ___|", // 9
} | go/lcd/lcd.go | 0.779112 | 0.620708 | lcd.go | starcoder |
package elf_code
import (
"errors"
"strconv"
"strings"
)
type OpCode int
const (
AddR OpCode = iota // `addr` (add register) stores into register `C` the result of adding register `A` and register `B`.
AddI // `addi` (add immediate) stores into register `C` the result of adding register `A` and value `B`.
MulR // `mulr` (multiply register) stores into register `C` the result of multiplying register `A` and register `B`.
MulI // `muli` (multiply immediate) stores into register `C` the result of multiplying register `A` and value `B`.
BanR // `banr` (bitwise AND register) stores into register `C` the result of the bitwise AND of register `A` and register `B`.
BanI // `bani` (bitwise AND immediate) stores into register `C` the result of the bitwise AND of register `A` and value `B`.
BorR // `borr` (bitwise OR register) stores into register `C` the result of the bitwise OR of register `A` and register `B`.
BorI // `bori` (bitwise OR immediate) stores into register `C` the result of the bitwise OR of register `A` and value `B`.
SetR // `setr` (set register) copies the contents of register `A` into register `C`. (Input `B` is ignored.)
SetI // `seti` (set immediate) stores value `A` into register `C`. (Input `B` is ignored.)
GtIR // `gtir` (greater-than immediate/register) sets register `C` to 1 if value `A` is greater than register `B`. Otherwise, register `C` is set to 0.
GtRI // `gtri` (greater-than register/immediate) sets register `C` to 1 if register `A` is greater than value `B`. Otherwise, register `C` is set to 0.
GtRR // `gtrr` (greater-than register/register) sets register `C` to 1 if register `A` is greater than register `B`. Otherwise, register `C` is set to 0.
EqIR // `eqir` (equal immediate/register) sets register `C` to 1 if value `A` is equal to register `B`. Otherwise, register `C` is set to 0.
EqRI // `eqri` (equal register/immediate) sets register `C` to 1 if register `A` is equal to value `B`. Otherwise, register `C` is set to 0.
EqRR // `eqrr` (equal register/register) sets register `C` to 1 if register `A` is equal to register `B`. Otherwise, register `C` is set to 0.
)
const NumOpCodes = OpCode(int(EqRR) + 1)
func (o OpCode) String() string {
switch o {
case AddR:
return "addr"
case AddI:
return "addi"
case MulR:
return "mulr"
case MulI:
return "muli"
case BanR:
return "banr"
case BanI:
return "bani"
case BorR:
return "borr"
case BorI:
return "bori"
case SetR:
return "setr"
case SetI:
return "seti"
case GtIR:
return "gtir"
case GtRI:
return "gtri"
case GtRR:
return "gtrr"
case EqIR:
return "eqir"
case EqRI:
return "eqri"
case EqRR:
return "eqrr"
default:
return "Unknown OpCode: " + strconv.Itoa(int(o))
}
}
func (o OpCode) isComparator() bool {
return o >= GtIR && o <= EqRR
}
// Parse an op code from string
func ParseOpCode(str string) (opCode OpCode, err error) {
switch strings.ToLower(str) {
case "addr":
return AddR, nil
case "addi":
return AddI, nil
case "mulr":
return MulR, nil
case "muli":
return MulI, nil
case "banr":
return BanR, nil
case "bani":
return BanI, nil
case "borr":
return BorR, nil
case "bori":
return BorI, nil
case "setr":
return SetR, nil
case "seti":
return SetI, nil
case "gtir":
return GtIR, nil
case "gtri":
return GtRI, nil
case "gtrr":
return GtRR, nil
case "eqir":
return EqIR, nil
case "eqri":
return EqRI, nil
case "eqrr":
return EqRR, nil
default:
err = errors.New("unknown op code: " + str)
return
}
}
// All op codes without the given `filter` list
func OpCodesWithout(filter []OpCode) []OpCode {
result := make([]OpCode, 0)
for opCode := OpCode(0); opCode < NumOpCodes; opCode++ {
found := false
for i := 0; i < len(filter); i++ {
if filter[i] == opCode {
found = true
}
}
if !found {
result = append(result, opCode)
}
}
return result
}
type InputIsImmediate struct {
A, B bool
}
// Lookup table for if op codes are immediate values or not
var OpCodeInputType = map[OpCode]InputIsImmediate{
AddR: { false, false },
AddI: { false, true },
MulR: { false, false },
MulI: { false, true },
BanR: { false, false },
BanI: { false, true },
BorR: { false, false },
BorI: { false, true },
SetR: { false, true },
SetI: { true, true },
GtIR: { true, false },
GtRI: { false, true },
GtRR: { false, false },
EqIR: { true, false },
EqRI: { false, true },
EqRR: { false, false },
}
// Lookup table for if input B register is constant, what op we can swap it out for
var OpCodeImmedateVersion = map[OpCode]OpCode {
AddR: AddI,
MulR: MulI,
BanR: BanI,
BorR: BorI,
GtRR: GtRI,
EqRR: EqRI,
}
// The function which executes the given op code, with the A, B inputs and the current Registers. Returns the value to
// put in output C and any error with the op code
type OpFunc = func(a int, b int, registers Registers) (value int, err error)
var OpCodeFunc = map[OpCode]OpFunc{
// addr (add register) stores into register C the result of adding register A and register B.
AddR: func(a int, b int, registers Registers) (value int, err error) {
aV, bV, err := registers.GetTwo(a, b)
if err != nil {
return 0, err
}
return aV + bV, nil
},
// addi (add immediate) stores into register C the result of adding register A and value B.
AddI: func(a int, b int, registers Registers) (value int, err error) {
aV, err := registers.Get(a)
if err != nil {
return 0, err
}
return aV + b, nil
},
// mulr (multiply register) stores into register C the result of multiplying register A and register B.
MulR: func(a int, b int, registers Registers) (value int, err error) {
aV, bV, err := registers.GetTwo(a, b)
if err != nil {
return 0, err
}
return aV * bV, nil
},
// muli (multiply immediate) stores into register C the result of multiplying register A and value B.
MulI: func(a int, b int, registers Registers) (value int, err error) {
aV, err := registers.Get(a)
if err != nil {
return 0, err
}
return aV * b, nil
},
// banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.
BanR: func(a int, b int, registers Registers) (value int, err error) {
aV, bV, err := registers.GetTwo(a, b)
if err != nil {
return 0, err
}
return aV & bV, nil
},
// bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.
BanI: func(a int, b int, registers Registers) (value int, err error) {
aV, err := registers.Get(a)
if err != nil {
return 0, err
}
return aV & b, nil
},
// borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B.
BorR: func(a int, b int, registers Registers) (value int, err error) {
aV, bV, err := registers.GetTwo(a, b)
if err != nil {
return 0, err
}
return aV | bV, nil
},
// bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B.
BorI: func(a int, b int, registers Registers) (value int, err error) {
aV, err := registers.Get(a)
if err != nil {
return 0, err
}
return aV | b, nil
},
// setr (set register) copies the contents of register A into register C. (Input B is ignored.)
SetR: func(a int, b int, registers Registers) (value int, err error) {
aV, err := registers.Get(a)
if err != nil {
return 0, err
}
return aV, nil
},
// seti (set immediate) stores value A into register C. (Input B is ignored.)
SetI: func(a int, b int, registers Registers) (value int, err error) {
return a, nil
},
// gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0.
GtIR: func(a int, b int, registers Registers) (value int, err error) {
bV, err := registers.Get(b)
if err != nil {
return 0, err
}
if a > bV {
return 1, nil
} else {
return 0, nil
}
},
// gtri (greater-than register/immediate) sets register C to 1 if register A is greater than value B. Otherwise, register C is set to 0.
GtRI: func(a int, b int, registers Registers) (value int, err error) {
aV, err := registers.Get(a)
if err != nil {
return 0, err
}
if aV > b {
return 1, nil
} else {
return 0, nil
}
},
// gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0.
GtRR: func(a int, b int, registers Registers) (value int, err error) {
aV, bV, err := registers.GetTwo(a, b)
if err != nil {
return 0, err
}
if aV > bV {
return 1, nil
} else {
return 0, nil
}
},
// eqir (equal immediate/register) sets register C to 1 if value A is equal to register B. Otherwise, register C is set to 0.
EqIR: func(a int, b int, registers Registers) (value int, err error) {
bV, err := registers.Get(b)
if err != nil {
return 0, err
}
if a == bV {
return 1, nil
} else {
return 0, nil
}
},
// eqri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0.
EqRI: func(a int, b int, registers Registers) (value int, err error) {
aV, err := registers.Get(a)
if err != nil {
return 0, err
}
if aV == b {
return 1, nil
} else {
return 0, nil
}
},
// eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0.
EqRR: func(a int, b int, registers Registers) (value int, err error) {
aV, bV, err := registers.GetTwo(a, b)
if err != nil {
return 0, err
}
if aV == bV {
return 1, nil
} else {
return 0, nil
}
},
} | lib/elf_code/OpCodes.go | 0.731538 | 0.636381 | OpCodes.go | starcoder |
package testdata
// GetPaymentLink response sample.
const GetPaymentLinkResponse = `{
"resource": "payment-link",
"id": "pl_4Y0eZitmBnQ6IDoMqZQKh",
"mode": "test",
"profileId": "pfl_QkEhN94Ba",
"createdAt": "2021-03-20T09:13:37+00:00",
"paidAt": "2021-03-21T09:13:37+00:00",
"updatedAt": "2021-03-21T09:13:37+00:00",
"expiresAt": null,
"amount": {
"value": "24.95",
"currency": "EUR"
},
"description": "Bicycle tires",
"redirectUrl": "https://webshop.example.org/thanks",
"webhookUrl": "https://webshop.example.org/payment-links/webhook/",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payment-links/pl_4Y0eZitmBnQ6IDoMqZQKh",
"type": "application/json"
},
"paymentLink": {
"href": "https://paymentlink.mollie.com/payment/4Y0eZitmBnQ6IDoMqZQKh/",
"type": "text/html"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payment-links-api/get-payment-link",
"type": "text/html"
}
}
}
`
const CreatePaymentLinkResponse = `{
"resource": "payment-link",
"id": "pl_4Y0eZitmBnQ6IDoMqZQKh",
"mode": "test",
"profileId": "pfl_QkEhN94Ba",
"createdAt": "2021-03-20T09:13:37+00:00",
"paidAt": null,
"updatedAt": null,
"expiresAt": "2021-06-06T11:00:00+00:00",
"amount": {
"value": "24.95",
"currency": "EUR"
},
"description": "Bicycle tires",
"redirectUrl": "https://webshop.example.org/thanks",
"webhookUrl": "https://webshop.example.org/payment-links/webhook/",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payment-links/pl_4Y0eZitmBnQ6IDoMqZQKh",
"type": "application/json"
},
"paymentLink": {
"href": "https://paymentlink.mollie.com/payment/4Y0eZitmBnQ6IDoMqZQKh/",
"type": "text/html"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payment-links-api/create-payment-link",
"type": "text/html"
}
}
}`
const ListPaymentLinksResponse = `{
"count": 1,
"_embedded": {
"payment_links": [
{
"resource": "payment-link",
"id": "pl_4Y0eZitmBnQ6IDoMqZQKh",
"mode": "test",
"profileId": "pfl_QkEhN94Ba",
"createdAt": "2021-03-20T09:13:37+00:00",
"paidAt": "2021-03-21T09:13:37+00:00",
"updatedAt": "2021-03-21T09:13:37+00:00",
"expiresAt": null,
"amount": {
"value": "24.95",
"currency": "EUR"
},
"description": "Bicycle tires",
"redirectUrl": "https://webshop.example.org/thanks",
"webhookUrl": "https://webshop.example.org/payment-links/webhook/",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payment-links/pl_4Y0eZitmBnQ6IDoMqZQKh",
"type": "application/json"
},
"paymentLink": {
"href": "https://paymentlink.mollie.com/payment/4Y0eZitmBnQ6IDoMqZQKh/",
"type": "text/html"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payment-links-api/get-payment-link",
"type": "text/html"
}
}
}
]
},
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payment-links?limit=5",
"type": "application/hal+json"
},
"previous": null,
"next": {
"href": "https://api.mollie.com/v2/payment-links?from=pl_ER6aqfpXg6nZrJvcsxNsm&limit=5",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payment-links-api/list-payment-links",
"type": "text/html"
}
}
}` | testdata/payment_links.go | 0.783947 | 0.482124 | payment_links.go | starcoder |
package core
import (
"fmt"
"time"
"github.com/Sirupsen/logrus"
"github.com/demizer/go-humanize"
)
// Maximum number of points used to calculate the average
var windowSize = 10
// ProgressPoint is a progress point containing bytes written in the last second added to the bps tracker.
type ProgressPoint struct {
Time time.Time // The time the point was added
TotalBytesWritten uint64 // Total bytes written since the last update
}
// BytesPerSecond is used to calculate bytes per second transfer speeds using the average of the last ten points. A point
// should be added every second for accurate calculation.
type BytesPerSecond struct {
TimeStart time.Time // The time the bps tracker was initialized
SizeTotal uint64 // The total numeber of bytes that will be tracked
Points []*ProgressPoint // Used to do the calculation
counter uint64 // Used to track bytes that are added in between seconds
}
// NewBytesPerSecond returns a new bytes per second object that can be used to track bytes per second transfer speeds.
func NewBytesPerSecond(sizeTotal uint64) *BytesPerSecond {
return &BytesPerSecond{TimeStart: time.Now(), Points: make([]*ProgressPoint, 0), SizeTotal: sizeTotal}
}
// TimeSince returns the time since the first point. If no points exist, then the duration will be from object creation.
func (b *BytesPerSecond) TimeSince() time.Duration {
if len(b.Points) > 0 {
return time.Since(b.Points[0].Time)
}
return time.Since(b.TimeStart)
}
// AddPoint adds a new point to the progress points. It is initialized with using the totalBytesWritten argument. This should
// be called once a second for accurate results.
func (b *BytesPerSecond) AddPoint(totalBytesWritten uint64) {
var addPoint bool
if len(b.Points) == 0 {
addPoint = true
} else if (time.Since(b.LastPoint().Time).Seconds()) > 1 {
addPoint = true
}
if addPoint {
b.counter += totalBytesWritten
b.Points = append(b.Points, &ProgressPoint{Time: time.Now(), TotalBytesWritten: b.counter})
b.counter = 0
} else {
b.counter += totalBytesWritten
}
}
// LastPoint returns the last progress point.
func (b *BytesPerSecond) LastPoint() *ProgressPoint {
return (b.Points)[len(b.Points)-1]
}
// Calc returns the average bps calculation using the last 10 points.
func (b *BytesPerSecond) Calc() uint64 {
var tBytes uint64
if len(b.Points) == 0 {
return 0
}
points := b.Points
end := len(b.Points)
if end > windowSize {
points = b.Points[end-windowSize : end]
}
for _, y := range points {
tBytes += y.TotalBytesWritten
}
return uint64(float64(tBytes / uint64(len(points))))
}
// CalcFull returns the average bps since time start including all points.
func (b *BytesPerSecond) CalcFull() uint64 {
printStats := func(calc uint64) {
Log.WithFields(logrus.Fields{
"timeSinceStart": fmt.Sprintf("%4.6f", b.TimeSince().Seconds()),
"totalBytes": b.SizeTotal,
"bps": humanize.IBytes(calc),
}).Debugln("CalcFull: stats")
}
calc := uint64(float64(b.SizeTotal) / b.TimeSince().Seconds())
printStats(calc)
return calc
} | src/core/bps.go | 0.668015 | 0.554169 | bps.go | starcoder |
package itype
import "sort"
// Recti is a 2D rectangle with int coordinates.
type Recti struct {
Left, Top int
Width, Height int
}
// Rectf is a 2D rectangle with float32 coordinates.
type Rectf struct {
Left, Top float32
Width, Height float32
}
func (r Rectf) MinPoint() Vec2f {
return Vec2f{r.Left, r.Top}
}
func (r Rectf) MaxPoint() Vec2f {
return Vec2f{r.Left + r.Width, r.Top + r.Height}
}
func (r Rectf) Size() Vec2f {
return Vec2f{r.Width, r.Height}
}
// Rectd is a 2D rectangle with float64 coordinates.
type Rectd struct {
Left, Top float64
Width, Height float64
}
func (r Rectd) MinPoint() Vec2d {
return Vec2d{r.Left, r.Top}
}
func (r Rectd) MaxPoint() Vec2d {
return Vec2d{r.Left + r.Width, r.Top + r.Height}
}
func (r Rectd) Size() Vec2d {
return Vec2d{r.Width, r.Height}
}
// Boxi is a 3D box with int coordinates.
type Boxi struct {
OffX, OffY, OffZ int
SizeX, SizeY, SizeZ int
}
// Boxf is a 3D box with float32 coordinates.
type Boxf struct {
OffX, OffY, OffZ float32
SizeX, SizeY, SizeZ float32
}
func (b Boxf) Offset(offset Vec3f) Boxf {
return Boxf{
OffX: b.OffX + offset[0],
OffY: b.OffY + offset[1],
OffZ: b.OffZ + offset[2],
SizeX: b.SizeX,
SizeY: b.SizeY,
SizeZ: b.SizeZ,
}
}
func (b Boxf) MinPoint() Vec3f {
return Vec3f{b.OffX, b.OffY, b.OffZ}
}
func (b Boxf) MaxPoint() Vec3f {
return Vec3f{b.OffX + b.SizeX, b.OffY + b.SizeY, b.OffZ + b.SizeZ}
}
// Boxd is a 3D box with float64 coordinates.
type Boxd struct {
OffX, OffY, OffZ float64
SizeX, SizeY, SizeZ float64
}
func (b Boxd) ToFloat32() Boxf {
return Boxf{
OffX: float32(b.OffX),
OffY: float32(b.OffY),
OffZ: float32(b.OffZ),
SizeX: float32(b.SizeX),
SizeY: float32(b.SizeY),
SizeZ: float32(b.SizeZ),
}
}
func (b Boxd) Offset(offset Vec3d) Boxd {
return Boxd{
OffX: b.OffX + offset[0],
OffY: b.OffY + offset[1],
OffZ: b.OffZ + offset[2],
SizeX: b.SizeX,
SizeY: b.SizeY,
SizeZ: b.SizeZ,
}
}
func (b Boxd) Offsetv(x, y, z float64) Boxd {
return Boxd{
OffX: b.OffX + x,
OffY: b.OffY + y,
OffZ: b.OffZ + z,
SizeX: b.SizeX,
SizeY: b.SizeY,
SizeZ: b.SizeZ,
}
}
func (b Boxd) MinPoint() Vec3d {
return Vec3d{b.OffX, b.OffY, b.OffZ}
}
func (b Boxd) MaxPoint() Vec3d {
return Vec3d{b.OffX + b.SizeX, b.OffY + b.SizeY, b.OffZ + b.SizeZ}
}
func (b Boxd) Contains(point Vec3d) bool {
return point[0] >= b.OffX && point[0] <= b.OffX+b.SizeX &&
point[1] >= b.OffY && point[1] <= b.OffY+b.SizeY &&
point[2] >= b.OffZ && point[2] <= b.OffZ+b.SizeZ
}
func pointIntersect(n, m, p, q float64) (min, len float64) {
if m < p || q < n { // no intersection
return 0, 0
}
arr := []float64{n, m, p, q}
sort.Float64s(arr)
return arr[1], arr[2] - arr[1]
}
func (box1 Boxd) Intersect(box2 Boxd) (ok bool, intersect Boxd) {
a, b := pointIntersect(box1.OffX, box1.OffX+box1.SizeX, box2.OffX, box2.OffX+box2.SizeX)
c, d := pointIntersect(box1.OffY, box1.OffY+box1.SizeY, box2.OffY, box2.OffY+box2.SizeY)
e, f := pointIntersect(box1.OffZ, box1.OffZ+box1.SizeZ, box2.OffZ, box2.OffZ+box2.SizeZ)
if b == 0 || d == 0 || f == 0 {
return false, Boxd{}
} else {
return true, Boxd{
OffX: a,
SizeX: b,
OffY: c,
SizeY: d,
OffZ: e,
SizeZ: f,
}
}
} | itype/rect.go | 0.827584 | 0.66769 | rect.go | starcoder |
package rect
import (
"math"
"sort"
"strings"
disc "github.com/briannoyama/bvh/discreet"
)
// A Bounding Volume for orthotopes. Wraps the orthotope and contains descendents.
type BVol struct {
vol *Orthotope
desc [2]*BVol
depth int32
}
func (bvol *BVol) minBound() {
if bvol.depth > 0 {
bvol.vol.MinBounds(bvol.desc[0].vol, bvol.desc[1].vol)
}
}
func (bvol *BVol) redepth() {
bvol.depth = disc.Max(bvol.desc[0].depth, bvol.desc[1].depth) + 1
}
type byDimension struct {
orths []*Orthotope
dimension int
}
func (d byDimension) Len() int {
return len(d.orths)
}
func (d byDimension) Swap(i, j int) {
d.orths[i], d.orths[j] = d.orths[j], d.orths[i]
}
// Compare the midpoints along a dimension.
func (d byDimension) Less(i, j int) bool {
return (d.orths[i].Point[d.dimension] +
d.orths[i].Delta[d.dimension]) <
(d.orths[j].Point[d.dimension] +
d.orths[j].Delta[d.dimension])
}
// Creates a balanced BVH by recursively halving, sorting and comparing vols.
func TopDownBVH(orths []*Orthotope) *BVol {
if len(orths) == 1 {
return &BVol{vol: orths[0]}
}
comp1 := &Orthotope{}
comp2 := &Orthotope{}
mid := len(orths) / 2
lowDim := 0
lowScore := int32(math.MaxInt32)
for d := 0; d < DIMENSIONS; d++ {
sort.Sort(byDimension{orths: orths, dimension: d})
comp1.MinBounds(orths[:mid]...)
comp2.MinBounds(orths[mid:]...)
score := comp1.Score() + comp2.Score()
if score < lowScore {
lowScore = score
lowDim = d
}
}
if lowDim < DIMENSIONS-1 {
sort.Sort(byDimension{orths: orths, dimension: lowDim})
}
bvol := &BVol{vol: comp1,
desc: [2]*BVol{TopDownBVH(orths[:mid]), TopDownBVH(orths[mid:])}}
bvol.redepth()
bvol.minBound()
return bvol
}
func (bvol *BVol) GetDepth() int32 {
return bvol.depth
}
// Get an iterator for each volume in a Bounding Volume Hierarhcy.
func (bvol *BVol) Iterator() *orthStack {
stack := &orthStack{bvh: bvol, bvStack: []*BVol{bvol}, intStack: []int32{0}}
return stack
}
// Add an orthotope to a Bounding Volume Hierarchy. Only add to root volume.
func (bvol *BVol) Add(orth *Orthotope) bool {
s := bvol.Iterator()
return s.Add(orth)
}
func (bvol *BVol) Remove(orth *Orthotope) bool {
s := bvol.Iterator()
return s.Remove(orth)
}
func (bvol *BVol) Score() int32 {
s := bvol.Iterator()
return s.Score()
}
// Rebalances the children of a given volume.
func (bvol *BVol) redistribute() {
if bvol.desc[1].depth > bvol.desc[0].depth {
swapCheck(bvol.desc[1], bvol, 0)
} else if bvol.desc[1].depth < bvol.desc[0].depth {
swapCheck(bvol.desc[0], bvol, 1)
} else if bvol.desc[1].depth > 0 {
swapCheck(bvol.desc[0], bvol.desc[1], 1)
}
bvol.redepth()
}
func swapCheck(first *BVol, second *BVol, secIndex int) {
first.minBound()
second.minBound()
minScore := first.vol.Score() + second.vol.Score()
minIndex := -1
for index := 0; index < 2; index++ {
first.desc[index], second.desc[secIndex] =
second.desc[secIndex], first.desc[index]
// Ensure that swap did not unbalance second.
if disc.Abs(second.desc[0].depth-second.desc[1].depth) < 2 {
// Score first then second, since first may be a child of second.
first.minBound()
second.minBound()
score := first.vol.Score() + second.vol.Score()
if score < minScore {
// Update the children with the best split
minScore = score
minIndex = index
}
}
}
if minIndex < 1 {
first.desc[minIndex+1], second.desc[secIndex] =
second.desc[secIndex], first.desc[minIndex+1]
// Recalculate bounding volume
first.minBound()
second.minBound()
}
// Recalculate depth
first.redepth()
second.redepth()
}
//Recursive algorithm for comparing BVHs
func (bvh *BVol) Equals(other *BVol) bool {
return (bvh.depth == 0 && other.depth == 0 && bvh.vol == other.vol) ||
(bvh.depth > 0 && other.depth > 0 && bvh.vol.Equals(other.vol) &&
((bvh.desc[0].Equals(other.desc[0]) && bvh.desc[1].Equals(other.desc[1])) ||
(bvh.desc[1].Equals(other.desc[0]) && bvh.desc[0].Equals(other.desc[1]))))
}
// An indented string representation of the BVH (helps for debugging)
func (bvh *BVol) String() string {
iter := bvh.Iterator()
maxDepth := bvh.depth
toPrint := []string{}
for iter.HasNext() {
next := iter.Next()
toPrint = append(toPrint, strings.Repeat(" ", int(maxDepth-next.depth)))
toPrint = append(toPrint, next.vol.String()+"\n")
}
return strings.Join(toPrint, "")
} | rect/bvh.go | 0.682468 | 0.431045 | bvh.go | starcoder |
package v2
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1"
"github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid"
)
// NetworkList Encapsulates information about each network list.
type NetworkList struct {
// Name of this network list's access control group (ACG).
AccessControlGroup string `json:"accessControlGroup,omitempty"`
// Encapsulates the set of [API hypermedia](#apihypermedia) to access a set of resources related to this network list. The object is arranged as a hash of keys, each of which represents a link relation.
Links Links `json:"links,omitempty"`
// ISO 8601 timestamp indicating when the network list was first created. Available only when using the `extended` query parameter to retrieve network list data.
CreateDate string `json:"createDate,omitempty"`
// Username of this list's creator. Available only when using the `extended` query parameter to retrieve network list data.
CreatedBy string `json:"createdBy,omitempty"`
// Detailed description of the list.
Description string `json:"description,omitempty"`
// Reflects the number of elements in the `list` array, which may not necessarily appear in the object when retrieving the list with the `includeElements` query parameter set to `false`.
ElementCount int `json:"elementCount,omitempty"`
// For clients with access to _expedited_ activations on select servers, provides the most recent activation status in the `PRODUCTION` environment. See [Activation States](#activationvalues) for details on each activation state. Available only when using the `extended` query parameter to retrieve network list data.
ExpeditedProductionActivationStatus string `json:"expeditedProductionActivationStatus,omitempty"`
// For clients with access to _expedited_ activations on select servers, provides the most recent activation status in the `STAGING` environment. See [Activation States](#activationvalues) for details on each activation state. Available only when using the `extended` query parameter to retrieve network list data.
ExpeditedStagingActivationStatus string `json:"expeditedStagingActivationStatus,omitempty"`
// List of IPs or Countries
List []string `json:"list,omitempty"`
// Display name of the network list.
Name string `json:"name"`
// If set to `extendedNetworkListResponse`, indicates that the current data features members enabled with the `extended` query parameter. Otherwise a plain `networkListResponse` value indicates this additional data is absent.
NetworkListType string `json:"networkListType,omitempty"`
// The most recent activation status of the current list in the `PRODUCTION` environment. See [Activation States](#activationvalues) for details on each activation state. Available only when using the `extended` query parameter to retrieve network list data.
ProductionActivationStatus string `json:"productionActivationStatus,omitempty"`
// If `true`, indicates that you do not have permission to modify the network list. This may indicate either a network list that Akamai manages, or insufficient permission for your API client's identity to modify a customer-managed list. The default value is `false`.
ReadOnly bool `json:"readOnly,omitempty"`
// If `true`, indicates that this list has been shared with you by Akamai or some other account. The default value is `false`. Shared lists are always read only
Shared bool `json:"shared,omitempty"`
// The most recent activation status of the current list in the `STAGING` environment. See [Activation States](#activationvalues) for details on each activation state. Available only when using the `extended` query parameter to retrieve network list data.
StagingActivationStatus string `json:"stagingActivationStatus,omitempty"`
// Identifies each version of the network list, which increments each time it's modified. You need to include this value in any requests to modify the list. See [Concurrency control](#concurrency) for details.
SyncPoint uint `json:"syncPoint"`
// The network list type, either `IP` for IP addresses and CIDR blocks, or `GEO` for two-letter country codes.
Type string `json:"type"`
// A unique identifier for each network list, corresponding to the `networkListId` URL parameter.
UniqueId string `json:"uniqueId"`
// ISO 8601 timestamp indicating when the network list was last modified. Available only when using the `extended` query parameter to retrieve network list data.
UpdateDate string `json:"updateDate,omitempty"`
// Username of this list's creator. Available only when using the `extended` query parameter to retrieve network list data.
UpdatedBy string `json:"updatedBy,omitempty"`
}
// ListNetworkLists Get all network list according to search criteria.
// [API](https://techdocs.akamai.com/network-lists/reference/get-network-lists)
func ListNetworkLists(config edgegrid.Config, includeElements bool, search *string, listType *string, extended bool) ([]NetworkList, error) {
type listResponse struct {
Links struct {
Create struct {
Href string `json:"href"`
Method string `json:"method"`
} `json:"create"`
} `json:"links"`
NetworkLists []NetworkList `json:"networkLists"`
}
var (
err error
req *http.Request
resp *http.Response
data []byte
query string
wrapper listResponse
)
query = fmt.Sprintf("/network-list/v2/network-lists?extended=%v&includeElements=%v", extended, includeElements)
if search != nil {
query = query + "&search=" + url.QueryEscape(*search)
}
if listType != nil {
query = query + "&listType=" + *listType
}
req, err = client.NewRequest(config, "GET", query, nil)
if err != nil {
return nil, CreateRequestFailed
}
resp, err = client.Do(config, req)
if err != nil {
return nil, ExecRequestFailed
}
defer resp.Body.Close()
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, ReadBodyFailed
}
if client.IsError(resp) {
return nil, fmt.Errorf("%s", string(data))
}
if err := json.Unmarshal(data, &wrapper); err != nil {
return nil, JsonError
}
return wrapper.NetworkLists, nil
} | list.go | 0.783326 | 0.50769 | list.go | starcoder |
package go_
func IsDefaultValue(a interface{}) bool {
switch a.(type) {
case int:
return a.(int) == 0
case int8:
return a.(int8) == 0
case int16:
return a.(int16) == 0
case int32:
return a.(int32) == 0
case int64:
return a.(int64) == 0
case []int:
return len(a.([]int)) == 0
case []int8:
return len(a.([]int8)) == 0
case []int16:
return len(a.([]int16)) == 0
case []int32:
return len(a.([]int32)) == 0
case []int64:
return len(a.([]int64)) == 0
case uint:
return a.(uint) == 0
case uint8:
return a.(uint8) == 0
case uint16:
return a.(uint16) == 0
case uint32:
return a.(uint32) == 0
case uint64:
return a.(uint64) == 0
case []uint8:
return len(a.([]uint8)) == 0
case []uint16:
return len(a.([]uint16)) == 0
case []uint32:
return len(a.([]uint32)) == 0
case []uint64:
return len(a.([]uint64)) == 0
case float32:
return a.(float32) == 0
case float64:
return a.(float64) == 0
case []float32:
return len(a.([]float32)) == 0
case []float64:
return len(a.([]float64)) == 0
case string:
return a == ""
case []string:
return len(a.([]string)) == 0
default:
return a == nil
}
}
func NotEmpty(s string) bool {
return !IsDefaultValue(s)
}
func NotNilNorEmpty(s *string) bool {
return s != nil && !IsDefaultValue(*s)
}
// Return the first string if it's not empty, otherwise return second one. Short cut for golang if else
func OrString(s1, s2 string, ss ...string) string {
res := Or(s1, s2, ss)
if res != nil {
return res.(string)
}
return ""
}
// Return the first string array if it's not empty, otherwise return second one. Short cut for golang if else
func OrStringArray(arr1, arr2 []string, arrs ...[]string) []string {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]string)
}
return []string{}
}
func OrBool(a, b bool, bs ...bool) bool {
if a || b {
return a || b
}
for _, v := range bs {
if v {
return v
}
}
return false
}
func OrInt64(a, b int64, is ...int64) int64 {
res := Or(a, b, is)
if res != nil {
return res.(int64)
}
return 0
}
func OrInt32(a, b int32, is ...int32) int32 {
res := Or(a, b, is)
if res != nil {
return res.(int32)
}
return 0
}
func OrInt16(a, b int16, is ...int16) int16 {
res := Or(a, b, is)
if res != nil {
return res.(int16)
}
return 0
}
func OrInt8(a, b int8, is ...int8) int8 {
res := Or(a, b, is)
if res != nil {
return res.(int8)
}
return 0
}
func OrInt(a, b int, is ...int) int {
res := Or(a, b, is)
if res != nil {
return res.(int)
}
return 0
}
func OrFloat64(a, b float64, is ...float64) float64 {
res := Or(a, b, is)
if res != nil {
return res.(float64)
}
return 0
}
func OrFloat32(a, b float32, is ...float32) float32 {
res := Or(a, b, is)
if res != nil {
return res.(float32)
}
return 0
}
func OrIntArray(arr1, arr2 []int, arrs ...[]int) []int {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]int)
}
return []int{}
}
func OrInt8Array(arr1, arr2 []int8, arrs ...[]int8) []int8 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]int8)
}
return []int8{}
}
func OrInt16Array(arr1, arr2 []int16, arrs ...[]int16) []int16 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]int16)
}
return []int16{}
}
func OrInt32Array(arr1, arr2 []int32, arrs ...[]int32) []int32 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]int32)
}
return []int32{}
}
func OrInt64Array(arr1, arr2 []int64, arrs ...[]int64) []int64 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]int64)
}
return []int64{}
}
func OrUIntArray(arr1, arr2 []uint, arrs ...[]uint) []uint {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]uint)
}
return []uint{}
}
func OrUInt8Array(arr1, arr2 []uint8, arrs ...[]uint8) []uint8 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]uint8)
}
return []uint8{}
}
func OrUInt16Array(arr1, arr2 []uint16, arrs ...[]uint16) []uint16 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]uint16)
}
return []uint16{}
}
func OrUInt32Array(arr1, arr2 []uint32, arrs ...[]uint32) []uint32 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]uint32)
}
return []uint32{}
}
func OrUInt64Array(arr1, arr2 []uint64, arrs ...[]uint64) []uint64 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]uint64)
}
return []uint64{}
}
func OrFloat32Array(arr1, arr2 []float32, arrs ...[]float32) []float32 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]float32)
}
return []float32{}
}
func OrFloat64Array(arr1, arr2 []float64, arrs ...[]float64) []float64 {
res := Or(arr1, arr2, arrs)
if res != nil {
return res.([]float64)
}
return []float64{}
}
func Or(a, b interface{}, vals ...interface{}) interface{} {
if !IsDefaultValue(a) {
return a
}
if !IsDefaultValue(b) {
return b
}
for _, val := range vals {
if !IsDefaultValue(val) {
return val
}
}
return nil
} | funcs.go | 0.509032 | 0.522811 | funcs.go | starcoder |
package memio
import (
"errors"
"io"
)
// ReadWriteMem is a combination of both the ReadMem and WriteMem types,
// allowing both all reads and writes to the same underlying byte slice.
type ReadWriteMem struct {
WriteMem
}
// OpenMem uses a byte slice for reading and writing. Implements io.Reader,
// io.Writer, io.Seeker, io.ReaderAt, io.ByteReader, io.WriterTo, io.WriterAt,
// io.ByteWriter and io.ReaderFrom.
func OpenMem(data *[]byte) *ReadWriteMem {
return &ReadWriteMem{WriteMem{data, 0}}
}
// Peek reads the next n bytes without advancing the position
func (b *ReadWriteMem) Peek(n int) ([]byte, error) {
if b.data == nil {
return nil, ErrClosed
} else if b.pos >= len(*b.data) {
return nil, io.EOF
} else if b.pos+n > len(*b.data) {
return (*b.data)[b.pos:], io.EOF
}
return (*b.data)[b.pos : b.pos+n], nil
}
// Read is an implementation of the io.Reader interface
func (b *ReadWriteMem) Read(p []byte) (int, error) {
if b.data == nil {
return 0, ErrClosed
} else if b.pos >= len(*b.data) {
return 0, io.EOF
}
n := copy(p, (*b.data)[b.pos:])
b.pos += n
return n, nil
}
// ReadByte is an implementation of the io.ByteReader interface
func (b *ReadWriteMem) ReadByte() (byte, error) {
if b.data == nil {
return 0, ErrClosed
} else if b.pos >= len(*b.data) {
return 0, io.EOF
}
c := (*b.data)[b.pos]
b.pos++
return c, nil
}
// UnreadByte implements the io.ByteScanner interface
func (b *ReadWriteMem) UnreadByte() error {
if b.data == nil {
return ErrClosed
}
if b.pos > 0 {
b.pos--
return nil
}
return ErrInvalidUnreadByte
}
// ReadAt is an implementation of the io.ReaderAt interface
func (b *ReadWriteMem) ReadAt(p []byte, off int64) (int, error) {
if b.data == nil {
return 0, ErrClosed
} else if off >= int64(len(*b.data)) {
return 0, io.EOF
}
return copy(p, (*b.data)[off:]), nil
}
// WriteTo is an implementation of the io.WriterTo interface
func (b *ReadWriteMem) WriteTo(f io.Writer) (int64, error) {
if b.data == nil {
return 0, ErrClosed
} else if b.pos >= len(*b.data) {
return 0, io.EOF
}
n, err := f.Write((*b.data)[b.pos:])
b.pos = len(*b.data)
return int64(n), err
}
// Errors
var (
ErrInvalidUnreadByte = errors.New("invalid UnreadByte, no bytes read")
) | readwrite.go | 0.621426 | 0.466359 | readwrite.go | starcoder |
package core
import (
"git.maze.io/go/math32"
)
type Camera struct {
Width uint32
Height uint32
Aspect float32
DX float32
DY float32
Origin Vector3
Forward Vector3
Right Vector3
Up Vector3
LensRadius float32
}
func (camera *Camera) LookAt(eye, at, up Vector3) {
forward := NormalizeVector3(SubVector3(at, eye))
cs := DotVector3(forward, up)
if 0.999 < math32.Abs(cs) {
up = Vector3{forward.Z, forward.X, forward.Y}
}
right := NormalizeVector3(CrossVector3(forward, up))
up = NormalizeVector3(CrossVector3(right, forward))
camera.Origin = eye
camera.Forward = forward
camera.Right = right
camera.Up = up
}
//Screen coordinate to Normalized Device Coordinate (NDC)
func screenToNDC(x, resolution uint32, jitter float32) float32 {
return 2.0*((float32(x)+0.5+jitter)/float32(resolution)) - 1.0
}
func (camera *Camera) GenerateRay(x, y uint32, screenSample, lensSample Sample2) Ray {
lensSample = RandomOnDisk(lensSample.X, lensSample.Y).Mul(camera.LensRadius)
originUp := MulVector3(lensSample.X, camera.Up)
originRight := MulVector3(lensSample.Y, camera.Right)
origin := AddVector3(camera.Origin, AddVector3(originUp, originRight));
dx := camera.DX * screenToNDC(x, camera.Width, screenSample.X-0.499)
dy := camera.DY * screenToNDC(y, camera.Height, screenSample.Y-0.499)
right := MulVector3(dx, camera.Right)
up := MulVector3(dy, camera.Up)
direction := NormalizeVector3(AddVector3(AddVector3(right, up), camera.Forward))
return Ray{origin, direction}
}
func NewCameraPerspectiveFov(width uint32, height uint32, fovy float32) Camera {
aspect := float32(width) / float32(height)
fovy = math32.Tan(0.5 * fovy)
fovx := fovy * aspect
return Camera{width, height, aspect, fovx, fovy,
Vector3{}, Vector3{0.0, 0.0, -1.0}, Vector3{1.0, 0.0, 0.0}, Vector3{0.0, 1.0, 0.0},
0.0}
}
func NewCameraPerspectiveLens(width, height uint32, fovy, aperture float32) Camera {
aspect := float32(width) / float32(height)
fovy = math32.Tan(0.5 * fovy)
fovx := fovy * aspect
return Camera{width, height, aspect, fovx, fovy,
Vector3{}, Vector3{0.0, 0.0, -1.0}, Vector3{1.0, 0.0, 0.0}, Vector3{0.0, 1.0, 0.0},
aperture*0.5}
} | core/camera.go | 0.815857 | 0.641296 | camera.go | starcoder |
package manifold
import (
"encoding/json"
"strings"
"github.com/manifoldco/go-manifold/number"
"github.com/pkg/errors"
)
// FeatureMap stores the selected feature values for a Manifold resource
type FeatureMap map[string]interface{}
// Equals checks the equality of another FeatureMap against this one
func (f FeatureMap) Equals(fm FeatureMap) bool {
if len(f) != len(fm) {
return false
}
for k, v := range f {
if val, ok := fm[k]; !ok || val != v {
return false
}
}
return true
}
// MetadataValue stores MetadataValue for a Manifold resource
type MetadataValue struct {
Type MetadataValueType `json:"type"`
Value interface{} `json:"value"`
}
// MetadataValueType defines metadata type identifiers
type MetadataValueType string
const (
// MetadataValueTypeString identifies the string type
MetadataValueTypeString MetadataValueType = "string"
// MetadataValueTypeBool identifies the bool type
MetadataValueTypeBool MetadataValueType = "bool"
// MetadataValueTypeInt identifies the int type
MetadataValueTypeInt MetadataValueType = "int"
// MetadataValueTypeFloat identifies the float type
MetadataValueTypeFloat MetadataValueType = "float"
// MetadataValueTypeObject identifies the object type
MetadataValueTypeObject MetadataValueType = "object"
)
// Equals checks the equality of another MetadataValue against this one
func (m *MetadataValue) Equals(md MetadataValue) bool {
return m.Type == md.Type && m.Value == md.Value
}
func (m *MetadataValue) tryCastFields() error {
switch m.Type {
case MetadataValueTypeString:
_, ok := m.Value.(string)
if !ok {
return errors.New("Expected value to be a string but it was not")
}
case MetadataValueTypeBool:
_, ok := m.Value.(bool)
if !ok {
return errors.New("Expected value to be a boolean but it was not")
}
case MetadataValueTypeInt:
val, err := number.ToInt64(m.Value)
if err != nil {
return errors.Errorf(
"Expected value to be castable to int64 but it was not: %s", err.Error())
}
m.Value = val
case MetadataValueTypeFloat:
val, err := number.ToFloat64(m.Value)
if err != nil {
return errors.Errorf(
"Expected value to be castable to float64 but it was not: %s", err.Error())
}
m.Value = val
case MetadataValueTypeObject:
val, ok := m.Value.(Metadata)
if !ok {
valMap, ok := m.Value.(map[string]interface{})
if !ok {
return errors.Errorf("Expected value to be a valid metadata object but it was not")
}
val = Metadata{}
for k, v := range valMap {
vMap, ok := v.(map[string]interface{})
if !ok {
return errors.Errorf(
"Expected value to be a valid metadata object but it's values aren't compatible")
}
kv := MetadataValue{}
if err := kv.FromMap(vMap); err != nil {
return errors.Errorf(
"Expected value to be a valid metadata object but it was not: %s", err.Error())
}
val[k] = kv
}
}
m.Value = val
default:
return errors.Errorf(
"%s is not a valid type, expected 'string', 'int', 'float', 'bool', or 'object'", m.Type)
}
return nil
}
// FromMap tries to make a MetadataValue from a supplied map
func (m *MetadataValue) FromMap(md map[string]interface{}) error {
typI, ok := md["type"]
if !ok {
return errors.New("Could not make MetadataValue from map, it did not contain a 'type' key")
}
typ, ok := typI.(string)
if !ok {
return errors.New("Could not make MetadataValue from map, the type key was not a string as expected")
}
val, ok := md["value"]
if !ok {
return errors.New("Could not make MetadataValue from map, it did not contain a 'value' key")
}
m.Type = MetadataValueType(typ)
m.Value = val
if err := m.tryCastFields(); err != nil {
return errors.Errorf("Could not make MetadataValue from map: %s", err.Error())
}
return nil
}
// UnmarshalJSON controls how a MetadataValue is parsed from JSON
func (m *MetadataValue) UnmarshalJSON(data []byte) error {
// New replica of struct for unmarshal to avoid infinite unmarshal loop
mv := &struct {
Type string `json:"type"`
Value interface{} `json:"value"`
}{}
json.Unmarshal(data, &mv)
m.Type = MetadataValueType(mv.Type)
m.Value = mv.Value
return m.tryCastFields()
}
// Validate validates this MetadataValue
func (m *MetadataValue) Validate(_ interface{}) error {
switch m.Type {
case MetadataValueTypeString:
_, ok := m.Value.(string)
if !ok {
return errors.New("Expected value to be a string but it was not")
}
case MetadataValueTypeInt:
_, ok := m.Value.(int64)
if !ok {
return errors.New("Expected value to be a int64 but it was not")
}
case MetadataValueTypeFloat:
_, ok := m.Value.(float64)
if !ok {
return errors.New("Expected value to be a float64 but it was not")
}
case MetadataValueTypeBool:
_, ok := m.Value.(bool)
if !ok {
return errors.New("Expected value to be a bool but it was not")
}
case MetadataValueTypeObject:
val, ok := m.Value.(Metadata)
if !ok {
return errors.New("Expected value to be a Metadata but it was not")
}
err := val.Validate(nil)
if err != nil {
return errors.Errorf("Metadata value was not valid: %s", err.Error())
}
default:
return errors.Errorf(
"%s is not a valid type, expected 'string', 'int', 'float', 'bool', or 'object'", m.Type)
}
return nil
}
// Metadata stores Metadata for a Manifold resource
type Metadata map[string]MetadataValue
// MetadataMaxSize defines the max size of the metadata JSON in bytes
const MetadataMaxSize = 10 * 1024
// ErrMetadataNonexistantKey describes and error for when the expected key is not present
var ErrMetadataNonexistantKey = errors.New("Key does not exist")
// ErrMetadataUnexpectedValueType describes and error when a metadata type isn't what's expected
var ErrMetadataUnexpectedValueType = errors.New("Found value but it was not the expected type")
// Equals checks the equality of another Metadata against this one
func (m Metadata) Equals(md Metadata) bool {
if len(m) != len(md) {
return false
}
for k, v := range m {
if val, ok := md[k]; !ok || !val.Equals(v) {
return false
}
}
return true
}
// Validate validates this Metadata
func (m Metadata) Validate(_ interface{}) error {
for k, v := range m {
// Make sure key is a label
if err := Label(k).Validate(nil); err != nil {
return errors.Errorf("Key '%s' is not a valid Manifold Label", k)
}
// Make sure value is valid
if err := v.Validate(nil); err != nil {
return errors.Errorf("Value of key '%s' is not valid: %s", k, err.Error())
}
}
// Make sure total length isn't too long
b, _ := json.Marshal(m)
bLen := len(b)
if bLen > MetadataMaxSize {
return errors.Errorf("%d is %d bytes larger than the Metadata size limit of %d",
bLen, MetadataMaxSize-bLen, MetadataMaxSize)
}
return nil
}
// GetString returns the value of the specified key as a string, or returns an error
func (m Metadata) GetString(key string) (*string, error) {
val, ok := m[key]
if !ok {
return nil, ErrMetadataNonexistantKey
}
if val.Type != MetadataValueTypeString {
return nil, ErrMetadataUnexpectedValueType
}
out, _ := val.Value.(string)
return &out, nil
}
// GetBool returns the value of the specified key as a bool, or returns an error
func (m Metadata) GetBool(key string) (*bool, error) {
val, ok := m[key]
if !ok {
return nil, ErrMetadataNonexistantKey
}
if val.Type != MetadataValueTypeBool {
return nil, ErrMetadataUnexpectedValueType
}
out, _ := val.Value.(bool)
return &out, nil
}
// GetInt returns the value of the specified key as a int64, or returns an error
func (m Metadata) GetInt(key string) (*int64, error) {
val, ok := m[key]
if !ok {
return nil, ErrMetadataNonexistantKey
}
if val.Type != MetadataValueTypeInt {
return nil, ErrMetadataUnexpectedValueType
}
out, _ := val.Value.(int64)
return &out, nil
}
// GetFloat returns the value of the specified key as a float64, or returns an error
func (m Metadata) GetFloat(key string) (*float64, error) {
val, ok := m[key]
if !ok {
return nil, ErrMetadataNonexistantKey
}
if val.Type != MetadataValueTypeFloat {
return nil, ErrMetadataUnexpectedValueType
}
out, _ := val.Value.(float64)
return &out, nil
}
// GetObject returns the value of the specified key as a Metadata, or returns an error
func (m Metadata) GetObject(key string) (Metadata, error) {
val, ok := m[key]
if !ok {
return nil, ErrMetadataNonexistantKey
}
if val.Type != MetadataValueTypeObject {
return nil, ErrMetadataUnexpectedValueType
}
out, _ := val.Value.(Metadata)
return out, nil
}
// AnnotationsMap defines a map of string arrays that contain the annotations data
type AnnotationsMap map[string][]string
// AnnotationMaxReservedKeys defines the max number of reserved keys (Keys prefixed with manifold.co)
const AnnotationMaxReservedKeys = 20
// AnnotationReservedKeyPrefix is the prefix a key must start with to be considered reserved
const AnnotationReservedKeyPrefix = "manifold.co"
// AnnotationKnownReservedKeys is an array of all the known reserved keys, any other key prefixed with the reserved
// key prefix will cause an error.
var AnnotationKnownReservedKeys = []string{
"manifold.co/tool",
"manifold.co/package",
"manifold.co/environment",
"manifold.co/projects",
"manifold.co/resource-template",
}
// Equals checks the equality of another AnnotationsMap against this one
func (a AnnotationsMap) Equals(fm AnnotationsMap) bool {
if len(a) != len(fm) {
return false
}
for key, value := range a {
val, ok := fm[key]
if !ok || len(value) != len(val) {
return false
}
for subkey, subvalue := range value {
if subval := fm[key][subkey]; subvalue != subval {
return false
}
}
}
return true
}
// Validate validates this AnnotationsMap
func (a AnnotationsMap) Validate(_ interface{}) error {
countReserved := 0
for key, value := range a {
// Make sure the key is a valid key
if err := AnnotationKey(key).Validate(nil); err != nil {
return errors.Errorf("Key '%s' is not a valid annotation key", key)
}
// Make sure that, if the key is reserved, it is validated
if strings.HasPrefix(key, AnnotationReservedKeyPrefix) {
found := false
for _, reservedKey := range AnnotationKnownReservedKeys {
if reservedKey == key {
found = true
}
}
if !found {
return errors.Errorf("Key '%s' is not an accepted reserved key", key)
}
countReserved++
}
// Make sure every value is a valid value
for _, subvalue := range value {
if err := AnnotationValue(subvalue).Validate(nil); err != nil {
return errors.Errorf("Value '%s' is not a valid annotation value", subvalue)
}
}
}
// Finally, make sure we didn't overflow the mx number of reserved keys
if countReserved > AnnotationMaxReservedKeys {
return errors.New("Annotation has more than 20 annotation keys")
}
return nil
} | types.go | 0.793106 | 0.406509 | types.go | starcoder |
Package xdr implements the data representation portion of the External Data
Representation (XDR) standard protocol as specified in RFC 4506 (obsoletes
RFC 1832 and RFC 1014).
The XDR RFC defines both a data specification language and a data
representation standard. This package implements methods to encode and decode
XDR data per the data representation standard with the exception of 128-bit
quadruple-precision floating points. It does not currently implement parsing of
the data specification language. In other words, the ability to automatically
generate Go code by parsing an XDR data specification file (typically .x
extension) is not supported. In practice, this limitation of the package is
fairly minor since it is largely unnecessary due to the reflection capabilities
of Go as described below.
This package provides two approaches for encoding and decoding XDR data:
1) Marshal/Unmarshal functions which automatically map between XDR and Go types
2) Individual Encoder/Decoder objects to manually work with XDR primitives
For the Marshal/Unmarshal functions, Go reflection capabilities are used to
choose the type of the underlying XDR data based upon the Go type to encode or
the target Go type to decode into. A description of how each type is mapped is
provided below, however one important type worth reviewing is Go structs. In
the case of structs, each exported field (first letter capitalized) is reflected
and mapped in order. As a result, this means a Go struct with exported fields
of the appropriate types listed in the expected order can be used to
automatically encode / decode the XDR data thereby eliminating the need to write
a lot of boilerplate code to encode/decode and error check each piece of XDR
data as is typically required with C based XDR libraries.
Go Type to XDR Type Mappings
The following chart shows an overview of how Go types are mapped to XDR types
for automatic marshalling and unmarshalling. The documentation for the Marshal
and Unmarshal functions has specific details of how the mapping proceeds.
Go Type <-> XDR Type
--------------------
int8, int16, int32, int <-> XDR Integer
uint8, uint16, uint32, uint <-> XDR Unsigned Integer
int64 <-> XDR Hyper Integer
uint64 <-> XDR Unsigned Hyper Integer
bool <-> XDR Boolean
float32 <-> XDR Floating-Point
float64 <-> XDR Double-Precision Floating-Point
string <-> XDR String
byte <-> XDR Integer
[]byte <-> XDR Variable-Length Opaque Data
[#]byte <-> XDR Fixed-Length Opaque Data
[]<type> <-> XDR Variable-Length Array
[#]<type> <-> XDR Fixed-Length Array
struct <-> XDR Structure
map <-> XDR Variable-Length Array of two-element XDR Structures
time.Time <-> XDR String encoded with RFC3339 nanosecond precision
Notes and Limitations:
* Automatic marshalling and unmarshalling of variable and fixed-length
arrays of uint8s require a special struct tag `xdropaque:"false"`
since byte slices and byte arrays are assumed to be opaque data and
byte is a Go alias for uint8 thus indistinguishable under reflection
* Channel, complex, and function types cannot be encoded
* Interfaces without a concrete value cannot be encoded
* Cyclic data structures are not supported and will result in infinite
loops
* Strings are marshalled and unmarshalled with UTF-8 character encoding
which differs from the XDR specification of ASCII, however UTF-8 is
backwards compatible with ASCII so this should rarely cause issues
Encoding
To encode XDR data, use the Marshal function.
func Marshal(w io.Writer, v interface{}) (int, error)
For example, given the following code snippet:
type ImageHeader struct {
Signature [3]byte
Version uint32
IsGrayscale bool
NumSections uint32
}
h := ImageHeader{[3]byte{0xAB, 0xCD, 0xEF}, 2, true, 10}
var w bytes.Buffer
bytesWritten, err := xdr.Marshal(&w, &h)
// Error check elided
The result, encodedData, will then contain the following XDR encoded byte
sequence:
0xAB, 0xCD, 0xEF, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x0A
In addition, while the automatic marshalling discussed above will work for the
vast majority of cases, an Encoder object is provided that can be used to
manually encode XDR primitives for complex scenarios where automatic
reflection-based encoding won't work. The included examples provide a sample of
manual usage via an Encoder.
Decoding
To decode XDR data, use the Unmarshal function.
func Unmarshal(r io.Reader, v interface{}) (int, error)
For example, given the following code snippet:
type ImageHeader struct {
Signature [3]byte
Version uint32
IsGrayscale bool
NumSections uint32
}
// Using output from the Encoding section above.
encodedData := []byte{
0xAB, 0xCD, 0xEF, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x0A,
}
var h ImageHeader
bytesRead, err := xdr.Unmarshal(bytes.NewReader(encodedData), &h)
// Error check elided
The struct instance, h, will then contain the following values:
h.Signature = [3]byte{0xAB, 0xCD, 0xEF}
h.Version = 2
h.IsGrayscale = true
h.NumSections = 10
In addition, while the automatic unmarshalling discussed above will work for the
vast majority of cases, a Decoder object is provided that can be used to
manually decode XDR primitives for complex scenarios where automatic
reflection-based decoding won't work. The included examples provide a sample of
manual usage via a Decoder.
Errors
All errors are either of type UnmarshalError or MarshalError. Both provide
human-readable output as well as an ErrorCode field which can be inspected by
sophisticated callers if necessary.
See the documentation of UnmarshalError, MarshalError, and ErrorCode for further
details.
*/
package xdr | vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/davecgh/go-xdr/xdr2/doc.go | 0.842248 | 0.847968 | doc.go | starcoder |
package ElementaryCyclesSearch
/**
* Searches all elementary cycles in a given directed graph. The implementation
* is independent from the concrete objects that represent the graphnodes, it
* just needs an array of the objects representing the nodes of the graph
* and an adjacency-matrix of type boolean, representing the edges of the
* graph. It then calculates based on the adjacency-matrix the elementary
* cycles and returns a list, which contains lists itself with the objects of the
* concrete graphnodes-implementation. Each of these lists represents an
* elementary cycle.
*
* The implementation uses the algorithm of <NAME> for the search of
* the elementary cycles. For a description of the algorithm see:
* <NAME>: Finding All the Elementary Circuits of a Directed Graph.
* SIAM Journal on Computing. Volumne 4, Nr. 1 (1975), pp. 77-84.
*
* The algorithm of Johnson is based on the search for strong connected
* components in a graph. For a description of this part see:
* <NAME>: Depth-first search and linear graph algorithms. In: SIAM
* Journal on Computing. Volume 1, Nr. 2 (1972), pp. 146-160.
*
* This is based on the Java implementation of :
* @author <NAME>, web_at_normalisiert_dot_de
* @version 1.2, 22.03.2009
*
*/
type ElementaryCyclesSearch struct {
/** List of cycles */
cycles [][]int
/** Adjacency-list of graph */
adjList [][]int
/** Graphnodes */
graphNodes []int
/** Blocked nodes, used by the algorithm of Johnson */
blocked []bool
/** B-Lists, used by the algorithm of Johnson */
B [][]int
/** Stack for nodes, used by the algorithm of Johnson */
stack []int
}
/**
* @param matrix adjacency-matrix of the graph
* @param graphNodes array of the graphnodes of the graph; this is used to
* build sets of the elementary cycles containing the objects of the original
* graph-representation
*/
func NewElementaryCyclesSearch(matrix [][]bool, graphNodes []int) *ElementaryCyclesSearch {
ecs := new(ElementaryCyclesSearch)
ecs.graphNodes = graphNodes
ecs.adjList = GetAdjacencyList(matrix)
return ecs
}
/**
* Returns List::List::Object with the Lists of nodes of all elementary
* cycles in the graph.
*
* @return List::List::Object with the Lists of the elementary cycles.
*/
func (this *ElementaryCyclesSearch) GetElementaryCycles() [][]int {
this.cycles = make([][]int, 0)
this.blocked = make([]bool, len(this.adjList))
this.B = make([][]int, len(this.adjList))
this.stack = make([]int, 0)
var sccs *StrongConnectedComponents
sccs = NewStrongConnectedComponents(this.adjList)
s := 0
for true {
var sccResult *SCCResult
sccResult = sccs.getAdjacencyList(s)
if sccResult != nil && sccResult.getAdjList() != nil {
var scc [][]int
scc = sccResult.getAdjList()
s = sccResult.getLowestNodeId()
for j := 0; j < len(scc); j++ {
if (scc[j] != nil) && (len(scc[j]) > 0) {
this.blocked[j] = false
this.B[j] = make([]int, 0)
}
}
this.findCycles(s, s, scc)
s++
} else {
break
}
}
return this.cycles
}
/**
* Calculates the cycles containing a given node in a strongly connected
* component. The method calls itself recursivly.
*
* @param v
* @param s
* @param adjList adjacency-list with the subgraph of the strongly
* connected component s is part of.
* @return true, if cycle found; false otherwise
*/
func (this *ElementaryCyclesSearch) findCycles(v int, s int, adjList [][]int) bool {
f := false
//this.stack[len(this.stack)-1] = v
this.stack = append(this.stack, v)
this.blocked[v] = true
for i := 0; i < len(adjList[v]); i++ {
w := adjList[v][i]
// found cycle
if w == s {
var cycle []int
cycle = make([]int, 0)
for j := 0; j < len(this.stack); j++ {
index := this.stack[j]
cycle = append(cycle, this.graphNodes[index])
}
this.cycles = append(this.cycles, cycle)
f = true
} else if !this.blocked[w] {
if this.findCycles(w, s, adjList) {
f = true
}
}
}
if f {
this.unblock(v)
} else {
for i := 0; i < len(adjList[v]); i++ {
w := adjList[v][i]
if !contains(this.B[w], v) {
this.B[w] = append(this.B[w], v)
}
}
}
this.stack = remove(this.stack, v) //this.stack.remove(v) : v is the object to remove
return f
}
// remove : remove an the element r (not the index) from a slice
func remove(s []int, r int) []int {
for i, v := range s {
if v == r {
return append(s[:i], s[i+1:]...)
}
}
return s
}
// contains : returns true if slice contains element, else false
func contains(s []int, e int) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
/**
* Unblocks recursively all blocked nodes, starting with a given node.
*
* @param node node to unblock
*/
func (this *ElementaryCyclesSearch) unblock(node int) {
this.blocked[node] = false
Bnode := this.B[node]
for len(Bnode) > 0 {
w := Bnode[0]
Bnode = append(Bnode[:0], Bnode[0+1:]...) // remove first element - TODO : check if element are shifted correctly (i.e index 1 becomes 0 etc.)
if this.blocked[w] {
this.unblock(w)
}
}
} | ElementaryCyclesSearch.go | 0.802207 | 0.725989 | ElementaryCyclesSearch.go | starcoder |
package bytesutil
import (
"bytes"
"fmt"
"sort"
)
// Sort sorts a slice of byte slices.
func Sort(a [][]byte) {
sort.Sort(byteSlices(a))
}
func IsSorted(a [][]byte) bool {
return sort.IsSorted(byteSlices(a))
}
func SearchBytes(a [][]byte, x []byte) int {
return sort.Search(len(a), func(i int) bool { return bytes.Compare(a[i], x) >= 0 })
}
// SearchBytesFixed searches a for x using a binary search. The size of a must be a multiple of
// of x or else the function panics. There returned value is the index within a where x should
// exist. The caller should ensure that x does exist at this index.
func SearchBytesFixed(a []byte, sz int, fn func(x []byte) bool) int {
if len(a)%sz != 0 {
panic(fmt.Sprintf("x is not a multiple of a: %d %d", len(a), sz))
}
i, j := 0, len(a)-sz
for i < j {
h := int(uint(i+j) >> 1)
h -= h % sz
if !fn(a[h : h+sz]) {
i = h + sz
} else {
j = h
}
}
return i
}
// Union returns the union of a & b in sorted order.
func Union(a, b [][]byte) [][]byte {
n := len(b)
if len(a) > len(b) {
n = len(a)
}
other := make([][]byte, 0, n)
for {
if len(a) > 0 && len(b) > 0 {
if cmp := bytes.Compare(a[0], b[0]); cmp == 0 {
other, a, b = append(other, a[0]), a[1:], b[1:]
} else if cmp == -1 {
other, a = append(other, a[0]), a[1:]
} else {
other, b = append(other, b[0]), b[1:]
}
} else if len(a) > 0 {
other, a = append(other, a[0]), a[1:]
} else if len(b) > 0 {
other, b = append(other, b[0]), b[1:]
} else {
return other
}
}
}
// Intersect returns the intersection of a & b in sorted order.
func Intersect(a, b [][]byte) [][]byte {
n := len(b)
if len(a) > len(b) {
n = len(a)
}
other := make([][]byte, 0, n)
for len(a) > 0 && len(b) > 0 {
if cmp := bytes.Compare(a[0], b[0]); cmp == 0 {
other, a, b = append(other, a[0]), a[1:], b[1:]
} else if cmp == -1 {
a = a[1:]
} else {
b = b[1:]
}
}
return other
}
type byteSlices [][]byte
func (a byteSlices) Len() int { return len(a) }
func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }
func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go | 0.775265 | 0.593786 | bytesutil.go | starcoder |
package _841_Keys_and_Rooms
/*https://leetcode.com/problems/keys-and-rooms/
There are N rooms and you start in room 0. Each room has a distinct number in 0, 1, 2, ..., N-1, and each room may have some keys to access the next room.
Formally, each room i has a list of keys rooms[i], and each key rooms[i][j] is an integer in [0, 1, ..., N-1] where N = rooms.length. A key rooms[i][j] = v opens the room with number v.
Initially, all the rooms start locked (except for room 0).
You can walk back and forth between rooms freely.
Return true if and only if you can enter every room.
Example 1:
Input: [[1],[2],[3],[]]
Output: true
Explanation:
We start in room 0, and pick up key 1.
We then go to room 1, and pick up key 2.
We then go to room 2, and pick up key 3.
We then go to room 3. Since we were able to go to every room, we return true.
Example 2:
Input: [[1,3],[3,0,1],[2],[0]]
Output: false
Explanation: We can't enter the room with number 2.
Note:
1 <= rooms.length <= 1000
0 <= rooms[i].length <= 1000
The number of keys in all rooms combined is at most 3000.
*/
/* Description
* track by map[]bool visited - if enter, set true
* enter first room (assume that we have key from 0)
* add key 0 to stack
* for stack not empty
* pop key, check visited[key], if not -> enter the room
* track -> set visited[room] = true
* take keys -> add to the stack
* scan the visited list if all true -> return true
*/
func canVisitAllRooms(rooms [][]int) bool {
visited := map[int]bool{}
s := []int{}
push := func(i int) {
s = append(s,i)
}
pop := func() int {
if len(s) <= 0 {
return -1
}
r := s[len(s)-1]
s = s[:len(s)-1]
return r
}
push(0)
for len(s) > 0 {
k := pop()
if visited[k] {
continue
}
visited[k] = true
for _,i := range rooms[k] {
if !visited[i] {
push(i)
}
}
}
return len(visited) == len(rooms)
} | 841_Keys_and_Rooms/solution.go | 0.802788 | 0.514766 | solution.go | starcoder |
package discrete
// lnFunc log function type
type lnFunc func(float64) float64
// Create2D creates a 2-dimensional slice
func Create2D(xDim, yDim int) [][]float64 {
r := make([][]float64, xDim)
for x := 0; x < xDim; x++ {
r[x] = make([]float64, yDim)
}
return r
}
// Create2DInt creates a 2-dimensional slice
func Create2DInt(xDim, yDim int) [][]int {
r := make([][]int, xDim)
for x := 0; x < xDim; x++ {
r[x] = make([]int, yDim)
}
return r
}
// Create3D creates a 3-dimensional slice
func Create3D(xDim, yDim, zDim int) [][][]float64 {
r := make([][][]float64, xDim)
for x := 0; x < xDim; x++ {
r[x] = make([][]float64, yDim)
for y := 0; y < yDim; y++ {
r[x][y] = make([]float64, zDim)
}
}
return r
}
// Create4D creates a 3-dimensional slice
func Create4D(xDim, yDim, zDim, wDim int) [][][][]float64 {
r := make([][][][]float64, xDim)
for x := 0; x < xDim; x++ {
r[x] = make([][][]float64, yDim)
for y := 0; y < yDim; y++ {
r[x][y] = make([][]float64, yDim)
for z := 0; z < zDim; z++ {
r[x][y][z] = make([]float64, wDim)
}
}
}
return r
}
// Create3DInt creates a 3-dimensional slice
func Create3DInt(xDim, yDim, zDim int) [][][]int {
r := make([][][]int, xDim)
for x := 0; x < xDim; x++ {
r[x] = make([][]int, yDim)
for y := 0; y < yDim; y++ {
r[x][y] = make([]int, zDim)
}
}
return r
}
// Normalise1D return the normalised matrix
// a / sum(a)
func Normalise1D(a []float64) []float64 {
r := make([]float64, len(a), len(a))
sum := 0.0
for i := 0; i < len(a); i++ {
sum += a[i]
}
for i := 0; i < len(a); i++ {
r[i] = a[i] / sum
}
return r
}
// Normalise2D return the normalised matrix
// a / sum(a)
func Normalise2D(a [][]float64) [][]float64 {
r := Create2D(len(a), len(a[0]))
sum := 0.0
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
sum += a[i][j]
}
}
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
r[i][j] = a[i][j] / sum
}
}
return r
}
// Normalise3D return the normalised matrix
// a / sum(a)
func Normalise3D(a [][][]float64) [][][]float64 {
r := Create3D(len(a), len(a[0]), len(a[0][0]))
sum := 0.0
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
for k := 0; k < len(a[i]); k++ {
sum += a[i][j][k]
}
}
}
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
for k := 0; k < len(a[i]); k++ {
r[i][j][k] = a[i][j][k] / sum
}
}
}
return r
}
// Normalise4D return the normalised matrix
// a / sum(a)
func Normalise4D(a [][][][]float64) [][][][]float64 {
r := Create4D(len(a), len(a[0]), len(a[0][0]), len(a[0][0][0]))
sum := 0.0
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
for k := 0; k < len(a[i]); k++ {
for l := 0; l < len(a[i]); l++ {
sum += a[i][j][k][l]
}
}
}
}
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
for k := 0; k < len(a[i]); k++ {
for l := 0; l < len(a[i]); l++ {
r[i][j][k][l] = a[i][j][k][l] / sum
}
}
}
}
return r
} | discrete/defs.go | 0.641198 | 0.688292 | defs.go | starcoder |
package ln
import "math"
type Cone struct {
Radius float64
Height float64
}
func NewCone(radius, height float64) *Cone {
return &Cone{radius, height}
}
func (c *Cone) Compile() {
}
func (c *Cone) BoundingBox() Box {
r := c.Radius
return Box{Vector{-r, -r, 0}, Vector{r, r, c.Height}}
}
func (c *Cone) Contains(v Vector, f float64) bool {
return false
}
func (shape *Cone) Intersect(ray Ray) Hit {
o := ray.Origin
d := ray.Direction
r := shape.Radius
h := shape.Height
k := r / h
k = k * k
a := d.X*d.X + d.Y*d.Y - k*d.Z*d.Z
b := 2 * (d.X*o.X + d.Y*o.Y - k*d.Z*(o.Z-h))
c := o.X*o.X + o.Y*o.Y - k*(o.Z-h)*(o.Z-h)
q := b*b - 4*a*c
if q <= 0 {
return NoHit
}
s := math.Sqrt(q)
t0 := (-b + s) / (2 * a)
t1 := (-b - s) / (2 * a)
if t0 > t1 {
t0, t1 = t1, t0
}
if t0 > 1e-6 {
p := ray.Position(t0)
if p.Z > 0 && p.Z < h {
return Hit{shape, t0}
}
}
if t1 > 1e-6 {
p := ray.Position(t1)
if p.Z > 0 && p.Z < h {
return Hit{shape, t1}
}
}
return NoHit
}
func (c *Cone) Paths() Paths {
var result Paths
for a := 0; a < 360; a += 30 {
x := c.Radius * math.Cos(Radians(float64(a)))
y := c.Radius * math.Sin(Radians(float64(a)))
result = append(result, Path{{x, y, 0}, {0, 0, c.Height}})
}
return result
}
type OutlineCone struct {
Cone
Eye Vector
Up Vector
}
func NewOutlineCone(eye, up Vector, radius, height float64) *OutlineCone {
cone := NewCone(radius, height)
return &OutlineCone{*cone, eye, up}
}
func (c *OutlineCone) Paths() Paths {
center := Vector{0, 0, 0}
hyp := center.Sub(c.Eye).Length()
opp := c.Radius
theta := math.Asin(opp / hyp)
adj := opp / math.Tan(theta)
d := math.Cos(theta) * adj
// r := math.Sin(theta) * adj
w := center.Sub(c.Eye).Normalize()
u := w.Cross(c.Up).Normalize()
c0 := c.Eye.Add(w.MulScalar(d))
a0 := c0.Add(u.MulScalar(c.Radius * 1.01))
b0 := c0.Add(u.MulScalar(-c.Radius * 1.01))
var p0 Path
for a := 0; a < 360; a++ {
x := c.Radius * math.Cos(Radians(float64(a)))
y := c.Radius * math.Sin(Radians(float64(a)))
p0 = append(p0, Vector{x, y, 0})
}
return Paths{
p0,
{{a0.X, a0.Y, 0}, {0, 0, c.Height}},
{{b0.X, b0.Y, 0}, {0, 0, c.Height}},
}
}
func NewTransformedOutlineCone(eye, up, v0, v1 Vector, radius float64) Shape {
d := v1.Sub(v0)
z := d.Length()
a := math.Acos(d.Normalize().Dot(up))
m := Translate(v0)
if a != 0 {
u := d.Cross(up).Normalize()
m = Rotate(u, a).Translate(v0)
}
c := NewOutlineCone(m.Inverse().MulPosition(eye), up, radius, z)
return NewTransformedShape(c, m)
} | ln/cone.go | 0.810816 | 0.449211 | cone.go | starcoder |
package pipeline
// ReadableMap is an interface that provides read only access to map properties
type ReadableMap interface {
// Get retrieves an element from the map, and a bool which says if there was a property that exists with that
// name at all
Get(propName string) (interface{}, bool)
}
// NoProps is an empty ImmutableProperties struct
var NoProps = ImmutableProperties{}
// ImmutableProperties is a map of properties which can't be edited after creation
type ImmutableProperties struct {
props map[string]interface{}
}
func NewImmutableProps(props map[string]interface{}) ImmutableProperties {
return ImmutableProperties{props}
}
func (ip ImmutableProperties) Len() int {
return len(ip.props)
}
// Get retrieves an element from the map, and a bool which says if there was a property that exists with that name at all
func (ip ImmutableProperties) Get(propName string) (interface{}, bool) {
if ip.props == nil {
return nil, false
}
val, ok := ip.props[propName]
return val, ok
}
// Set will create a new ImmutableProperties struct whose values are the original properties combined with the provided
// updates
func (ip ImmutableProperties) Set(updates map[string]interface{}) ImmutableProperties {
numProps := len(updates) + len(ip.props)
allProps := make(map[string]interface{}, numProps)
for k, v := range ip.props {
allProps[k] = v
}
for k, v := range updates {
allProps[k] = v
}
return ImmutableProperties{allProps}
}
// ItemWithProps is an interface for an item which is passed through a pipeline
type ItemWithProps interface {
// GetItem retrieves the item
GetItem() interface{}
// GetProperties retrieves properties attached to the item
GetProperties() ImmutableProperties
}
type itemWithProps struct {
item interface{}
props ImmutableProperties
}
func (iwp itemWithProps) GetItem() interface{} {
return iwp.item
}
func (iwp itemWithProps) GetProperties() ImmutableProperties {
return iwp.props
}
// NewItemWithNoProps creates an item with no properties
func NewItemWithNoProps(item interface{}) ItemWithProps {
return itemWithProps{item, NoProps}
}
// NewItemWithProps creates an item with props from an item and a map of properties
func NewItemWithProps(item interface{}, props ImmutableProperties) ItemWithProps {
return itemWithProps{item, props}
} | go/libraries/utils/pipeline/item.go | 0.822474 | 0.486636 | item.go | starcoder |
package main
// CorporateFiller1 is the filler in the first column of "Synergy Ipsum" demo
const CorporateFiller1 = `
CORPORATE SYNERGY: WIN-WIN FOR ALL STAKEHOLDERS
Dramatically mesh low-risk high-yield alignments
before transparent e-tailers. Completely pursue
scalable customer service through sustainable
potentialities. Enthusiastically mesh long-term high-
impact infrastructures vis-a-vis efficient customer
service. Distinctively re-engineer revolutionary meta-
services and premium architectures. Continually
reintermediate integrated processes through
technically sound intellectual capital. Credibly
reintermediate backend ideas for cross-platform
models. Efficiently unleash cross-media information
without cross-media value. Credibly pontificate highly
efficient manufactured products and enabled data.
Holisticly predominate extensible testing procedures
for reliable supply chains. Quickly maximize timely
deliverables for real-time schemas. Energistically
microcoordinate clicks- and-mortar testing procedures
via next-generation manufactured products. Uniquely
matrix economically sound value through cooperative
technology. Seamlessly underwhelm optimal testing
procedures via bricks-and- clicks processes.
Collaboratively unleash market-driven "outside the
box" thinking for long-term high-impact solutions.
Dynamically target high-payoff intellectual capital
for customized technologies. Collaboratively
administrate empowered markets via plug-and- play
networks. Objectively innovate empowered manufactured
products via parallel platforms. Compellingly embrace
empowered e-business after user friendly intellectual
capital. Dramatically engage top- line web services
vis-a-vis cutting-edge deliverables.`
// CorporateFiller2 is the filler in the second column of "Synergy Ipsum" demo
const CorporateFiller2 = `
Seamlessly empower fully researched growth strategies and
interoperable internal or "organic" sources. Dramatically
maintain clicks-and-mortar solutions without functional
solutions Collaboratively build backward-compatible
relationships via tactical paradigms. Assertively iterate
resource maximizing products after leading-edge intellectual
capital. Efficiently enable enabled sources and cost effective
products. Continually whiteboard superior opportunities via
covalent scenarios. Dramatically synthesize integrated schemas
with optimal networks. Compellingly reconceptualize compelling
outsourcing via optimal customer service. Progressively maintain
extensive infomediaries via extensible niches. Dramatically
engage high-payoff infomediaries rather than client-centric
imperatives. Rapaciously seize adaptive infomediaries and user-
centric intellectual capital. Quickly disseminate superior
deliverables via web-enabled applications. Objectively pursue
diverse catalysts for change for interoperable meta-services.
Interactively actualize front-end processes with effective
convergence. Compellingly supply just in time catalysts for
change through top-line potentialities. Completely iterate
covalent strategic theme areas via accurate e-markets.
Proactively fabricate one-to-one materials via effective e-
business. Uniquely deploy cross-unit benefits with wireless
testing procedures. Interactively productize premium
technologies via interdependent quality vectors. Professionally
cultivate one-to-one customer service with robust ideas. Quickly
cultivate optimal processes and tactical architectures.
Dramatically visualize customer directed convergence without
revolutionary ROI. Efficiently innovate open-source
infrastructures via inexpensive materials. Globally incubate
standards compliant channels before scalable benefits.
Energistically scale future-proof core competencies vis-a-vis
impactful experiences. Utilize bleeding-edge technologies rather
than just in time initiatives. Proactively envisioned multimedia
based expertise and cross-media growth strategies. Completely
synergize scalable e-commerce rather than high standards in e-
services. Monotonically engage market-driven intellectual
capital through wireless opportunities. Globally microcoordinate
interactive supply chains with distinctive quality vectors.
Phosfluorescently expedite impactful supply chains via focused
results. Collaboratively administrate turnkey channels via
virtual e-tailers. Competently parallel task fully researched
data and enterprise process improvements. Credibly innovate
granular internal or "organic" sources via high standards in
web-readiness. Quickly aggregate B2B users and worldwide
potentialities. Interactively coordinate proactive e-commerce
via process-centric "outside the box" thinking.`
// end | demo/filler_text.go | 0.547948 | 0.535038 | filler_text.go | starcoder |
package main
import (
"errors"
"math/rand"
"sort"
)
// Parameters is parameters used in the genetic algorithm solver
type Parameters struct {
// CrossoverProbability is the probability of one chromosome crossover with
// another chromosome to produce two offspring chromosomes in one evolving
// process
CrossoverProbability float64
// MutationProbability is the probability that a chromosome produced by crossover process should mutate one byte in its gene
MutationProbability float64
// EliteismRatio is the ratio of chromosomes (that rank in front) in the
// population that should absolutely be suviving.
EliteismRatio float64
// PopulationSize is the size of population
PopulationSize int
// ChromosomeLength is the length of chromosomes
ChromosomeLength int
// MaximumGeneration is the maximum number of generations that the solver can
// envolve.
MaximumGeneration int
// Evaluator is the evaluation function, or fitness function, that the solver
// should use. The function should take a byte slice, which is gene, and
// returns a fitness value. The better the chromosome is, the lower value it
// should return. 0 is assumed to be a perfect chromosome.
Evaluator func([]byte) float64
}
// GA is a genetic algorithm solver
type GA struct {
param *Parameters
pop population
}
func checkParam(param *Parameters) error {
if param.CrossoverProbability < 0 || param.CrossoverProbability > 1 {
return errors.New("CrossoverProbability should be in [0, 1]")
}
if param.MutationProbability < 0 || param.MutationProbability > 1 {
return errors.New("MutationProbability should be in [0, 1]")
}
if param.EliteismRatio < 0 || param.EliteismRatio > 1 {
return errors.New("EliteismRatio should be in [0, 1]")
}
if param.PopulationSize <= 2 {
return errors.New("PopulationSize should > 2")
}
if param.ChromosomeLength <= 0 {
return errors.New("ChromosomeLength should > 0")
}
if param.MaximumGeneration <= 0 {
return errors.New("MaximumGeneration should > 0")
}
if param.Evaluator == nil {
return errors.New("Evaluator cannot be nil")
}
return nil
}
// NewGA create a new genetic algorithm solver with param being parameters.
func NewGA(param *Parameters) (*GA, error) {
if err := checkParam(param); err != nil {
return nil, err
}
ret := &GA{param: param, pop: make([]chromosome, param.PopulationSize)}
return ret, nil
}
func (g *GA) init() {
for i := 0; i < g.param.PopulationSize; i++ {
g.pop[i] = randomChromosome(g.param.ChromosomeLength)
g.pop[i].fitness = g.param.Evaluator(g.pop[i].gene)
}
}
func (g *GA) evolve() {
// generate offspring
for i := 0; i < g.param.PopulationSize; i++ {
if rand.Float64() < g.param.CrossoverProbability {
spouseIndex := rand.Int() % (g.param.PopulationSize - 1)
if spouseIndex == i {
spouseIndex += 1
}
offspring := crossover(g.pop[i], g.pop[spouseIndex])
for _, chrom := range offspring {
if rand.Float64() < g.param.MutationProbability {
chrom.mutate()
}
chrom.fitness = g.param.Evaluator(chrom.gene)
g.pop = append(g.pop, chrom)
}
}
}
// selection
sort.Sort(g.pop)
for i := int(g.param.EliteismRatio * float64(g.param.PopulationSize)); i < g.param.PopulationSize; i++ {
// randomly select those who are not "eliteism"
luckyIndex := rand.Int() % (g.param.PopulationSize - i)
g.pop.Swap(i, luckyIndex)
}
g.pop = g.pop[:g.param.PopulationSize]
}
// Run runs genetic algorithm on g. It returns result, which is the evolved
// chromosome if succeeds, and generation, which is the generation at which the
// algorithm gets the result. Evolution is considered to be successful if
// fitness reaches 0
func (g *GA) Run() (result []byte, generation int) {
g.init()
for i := 0; i < g.param.MaximumGeneration; i++ {
g.evolve()
if g.pop[0].fitness == 0 {
return g.pop[0].gene, i
}
}
return nil, g.param.MaximumGeneration
} | go/ga.go | 0.714329 | 0.49408 | ga.go | starcoder |
package model
import (
"github.com/jung-kurt/gofpdf"
"github.com/tcd/md2pdf/internal/lib"
)
// TableContent represents the contents of a table element.
type TableContent struct {
Rows [][]Contents `json:"rows"`
Alignments []string `json:"alignments"` // "L", "C", or "R"
}
// AddRows to TableContent.
func (tc *TableContent) AddRows(cols ...[]Contents) {
tc.Rows = append(tc.Rows, cols...)
}
// Headers returns the first slice of string in Rows.
func (tc TableContent) Headers() []Contents {
if len(tc.Rows) != 0 {
return tc.Rows[0]
}
return []Contents{}
}
// Body returns all rows after the first.
func (tc TableContent) Body() [][]Contents {
if len(tc.Rows) > 0 {
return tc.Rows[1:]
}
return [][]Contents{}
}
// ColCount returns the number of header columns.
func (tc TableContent) ColCount() int {
return len(tc.Headers())
}
// GetColumn returns one string for every cell in a column at the given index.
// First column is 0, not 1.
func (tc TableContent) GetColumn(index int) []Contents {
columns := make([]Contents, len(tc.Rows))
for i, row := range tc.Rows {
if len(row) > index {
columns[i] = row[index]
} else {
columns[i] = Contents{}
}
}
return columns
}
// longestWidths returns the width of the longest cell in each column.
func (tc TableContent) longestWidths(pdf *gofpdf.Fpdf) []float64 {
widths := make([]float64, tc.ColCount())
for i := 0; i < tc.ColCount(); i++ {
col := tc.GetColumn(i)
longest := ""
for _, cell := range col {
if len(cell.JoinContent()) > len(longest) {
longest = cell.JoinContent()
}
}
widths[i] = pdf.GetStringWidth(longest) * 1.5
}
return widths
}
// headerWidths returns the width needed to hold the header cell in each column.
func (tc TableContent) headerWidths(pdf *gofpdf.Fpdf) []float64 {
widths := make([]float64, tc.ColCount())
for i, header := range tc.Headers() {
widths[i] = pdf.GetStringWidth(header.JoinContent()) * 1.5
}
return widths
}
// Widths returns the width needed to hold the longest cell in each column.
func (tc TableContent) Widths(pdf *gofpdf.Fpdf, cellMargin float64) []float64 {
colCount := tc.ColCount()
cellPadding := (cellMargin * 2)
tableWidth := (lib.ContentBoxWidth(pdf) - (cellPadding * float64(colCount)))
headerWidths := tc.headerWidths(pdf)
widths := tc.longestWidths(pdf)
if sum(widths...) <= tableWidth {
return widths
}
finalWidths := make([]float64, colCount)
for i := range finalWidths {
remainingWidth := tableWidth - sum(headerWidths...)
portions := percentages(widths...)
finalWidths[i] = (remainingWidth * portions[i]) + headerWidths[i] + cellMargin
}
return finalWidths
}
// WidthsAlt is an older version of Widths.
// Still trying to figure out the best way to implement this.
func (tc TableContent) WidthsAlt(pdf *gofpdf.Fpdf, cellMargin float64) []float64 {
colCount := tc.ColCount()
cellPadding := (cellMargin * 2)
tableWidth := (lib.ContentBoxWidth(pdf) - (cellPadding * float64(colCount)))
headerWidths := tc.headerWidths(pdf)
widths := tc.longestWidths(pdf)
if sum(widths...) <= tableWidth {
return widths
}
finalWidths := make([]float64, colCount)
if len(widths) == 2 {
remainingWidth := tableWidth - sum(headerWidths...)
portions := percentages(widths...)
finalWidths[0] = (remainingWidth * portions[0]) + headerWidths[0] + cellMargin
finalWidths[1] = (remainingWidth * portions[1]) + headerWidths[1] + cellMargin
return finalWidths
}
for i := range finalWidths {
var finalWidth float64
portions := percentages(widths...)
if portions[i] == 1 {
finalWidths[i] = tableWidth + cellPadding
continue
}
if widths[i] < tableWidth && tableWidth*portions[i] < headerWidths[i] {
finalWidth = headerWidths[i] + cellPadding
tableWidth = tableWidth - (headerWidths[i] + cellPadding)
widths[i] = 0
} else {
finalWidth = (tableWidth * (portions[i] + cellPadding))
}
finalWidths[i] = finalWidth
}
return finalWidths
}
// Return the sum of any number of `float64`s.
func sum(items ...float64) (sum float64) {
for i := range items {
sum += items[i]
}
return
}
// Returns the percentage of all items out of their sum.
func percentages(items ...float64) []float64 {
total := sum(items...)
percentages := make([]float64, len(items))
for i := range items {
percentages[i] = items[i] / total
}
return percentages
}
// Return a new slice with one item removed.
func remove(slice []float64, s int) []float64 {
return append(slice[:s], slice[s+1:]...)
} | internal/model/table_content.go | 0.809201 | 0.470311 | table_content.go | starcoder |
package shapes
import (
"github.com/juan-medina/goecs"
"github.com/juan-medina/gosge/components/geometry"
)
//Box is a rectangular outline that we could draw in a geometry.Point with a color.Solid
type Box struct {
Size geometry.Size // The box size
Scale float32 // The box scale
Thickness int32 // Thickness of the line
}
// Type return this goecs.ComponentType
func (b Box) Type() goecs.ComponentType {
return TYPE.Box
}
//SolidBox is a rectangular shape that we could draw in a geometry.Point with a color.Solid or color.Gradient
type SolidBox struct {
Size geometry.Size // The box size
Scale float32 // The box scale
}
// Type return this goecs.ComponentType
func (b SolidBox) Type() goecs.ComponentType {
return TYPE.SolidBox
}
// Contains return if a box at a geometry.Point contains a point
func (b SolidBox) Contains(at geometry.Point, point geometry.Point) bool {
return geometry.Rect{
From: at,
Size: geometry.Size{
Width: b.Size.Width * b.Scale,
Height: b.Size.Height * b.Scale,
},
}.IsPointInRect(point)
}
// GetReactAt return return a geometry.Rect at a given point
func (b Box) GetReactAt(at geometry.Point) geometry.Rect {
return geometry.Rect{
From: at,
Size: geometry.Size{
Width: b.Size.Width * b.Scale,
Height: b.Size.Height * b.Scale,
},
}
}
// Contains return if a box at a geometry.Point contains a point
func (b Box) Contains(at geometry.Point, point geometry.Point) bool {
return b.GetReactAt(at).IsPointInRect(point)
}
// Line is a component that represent a line
type Line struct {
To geometry.Point // To where the line goes
Thickness float32 // Thickness of the line
}
// Type return this goecs.ComponentType
func (l Line) Type() goecs.ComponentType {
return TYPE.Line
}
type types struct {
// Box is the goecs.ComponentType for shapes.Box
Box goecs.ComponentType
// SolidBox is the goecs.ComponentType for shapes.SolidBox
SolidBox goecs.ComponentType
// Line is the goecs.ComponentType for shapes.Line
Line goecs.ComponentType
}
// TYPE hold the goecs.ComponentType for our shapes components
var TYPE = types{
Box: goecs.NewComponentType(),
SolidBox: goecs.NewComponentType(),
Line: goecs.NewComponentType(),
}
type gets struct {
// Box gets a shapes.Box from a goecs.Entity
Box func(e *goecs.Entity) Box
// SolidBox gets a shapes.SolidBox from a goecs.Entity
SolidBox func(e *goecs.Entity) SolidBox
// Line gets a shapes.Line from a goecs.Entity
Line func(e *goecs.Entity) Line
}
// Get a geometry component
var Get = gets{
// Box gets a shapes.Box from a goecs.Entity
Box: func(e *goecs.Entity) Box {
return e.Get(TYPE.Box).(Box)
},
// SolidBox gets a shapes.SolidBox from a goecs.Entity
SolidBox: func(e *goecs.Entity) SolidBox {
return e.Get(TYPE.SolidBox).(SolidBox)
},
// Line gets a shapes.Line from a goecs.Entity
Line: func(e *goecs.Entity) Line {
return e.Get(TYPE.Line).(Line)
},
} | components/shapes/shapes.go | 0.832611 | 0.553747 | shapes.go | starcoder |
package main
// MapToString allows for a safe transformation of a maybe of type string to a maybe
// of type string. Note that the function f is only called if the StringMaybe is a
// some-type.
func (m *StringMaybe) MapToString(f func(string) string) *StringMaybe {
if m.empty {
return &StringMaybe{
empty: true,
}
}
return &StringMaybe{
value: f(m.value),
}
}
// FlatMapToString allows for a safe transformation of a maybe of type string to a maybe
// of type string. Note that the function f is only called if the StringMaybe is a
// some-type.
func (m *StringMaybe) FlatMapToString(f func(string) *StringMaybe) *StringMaybe {
if m.empty {
return &StringMaybe{
empty: true,
}
}
return f(m.value)
}
// MapToInt allows for a safe transformation of a maybe of type string to a maybe
// of type int. Note that the function f is only called if the StringMaybe is a
// some-type.
func (m *StringMaybe) MapToInt(f func(string) int) *IntMaybe {
if m.empty {
return &IntMaybe{
empty: true,
}
}
return &IntMaybe{
value: f(m.value),
}
}
// FlatMapToInt allows for a safe transformation of a maybe of type string to a maybe
// of type int. Note that the function f is only called if the StringMaybe is a
// some-type.
func (m *StringMaybe) FlatMapToInt(f func(string) *IntMaybe) *IntMaybe {
if m.empty {
return &IntMaybe{
empty: true,
}
}
return f(m.value)
}
// MapToString allows for a safe transformation of a maybe of type int to a maybe
// of type string. Note that the function f is only called if the IntMaybe is a
// some-type.
func (m *IntMaybe) MapToString(f func(int) string) *StringMaybe {
if m.empty {
return &StringMaybe{
empty: true,
}
}
return &StringMaybe{
value: f(m.value),
}
}
// FlatMapToString allows for a safe transformation of a maybe of type int to a maybe
// of type string. Note that the function f is only called if the IntMaybe is a
// some-type.
func (m *IntMaybe) FlatMapToString(f func(int) *StringMaybe) *StringMaybe {
if m.empty {
return &StringMaybe{
empty: true,
}
}
return f(m.value)
}
// MapToInt allows for a safe transformation of a maybe of type int to a maybe
// of type int. Note that the function f is only called if the IntMaybe is a
// some-type.
func (m *IntMaybe) MapToInt(f func(int) int) *IntMaybe {
if m.empty {
return &IntMaybe{
empty: true,
}
}
return &IntMaybe{
value: f(m.value),
}
}
// FlatMapToInt allows for a safe transformation of a maybe of type int to a maybe
// of type int. Note that the function f is only called if the IntMaybe is a
// some-type.
func (m *IntMaybe) FlatMapToInt(f func(int) *IntMaybe) *IntMaybe {
if m.empty {
return &IntMaybe{
empty: true,
}
}
return f(m.value)
} | examples/gen_maybe_compose.go | 0.756627 | 0.449695 | gen_maybe_compose.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPStatementReturn281 struct for BTPStatementReturn281
type BTPStatementReturn281 struct {
BTPStatement269
BtType *string `json:"btType,omitempty"`
SpaceAfterReturn *BTPSpace10 `json:"spaceAfterReturn,omitempty"`
Value *BTPExpression9 `json:"value,omitempty"`
}
// NewBTPStatementReturn281 instantiates a new BTPStatementReturn281 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPStatementReturn281() *BTPStatementReturn281 {
this := BTPStatementReturn281{}
return &this
}
// NewBTPStatementReturn281WithDefaults instantiates a new BTPStatementReturn281 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPStatementReturn281WithDefaults() *BTPStatementReturn281 {
this := BTPStatementReturn281{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPStatementReturn281) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementReturn281) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPStatementReturn281) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPStatementReturn281) SetBtType(v string) {
o.BtType = &v
}
// GetSpaceAfterReturn returns the SpaceAfterReturn field value if set, zero value otherwise.
func (o *BTPStatementReturn281) GetSpaceAfterReturn() BTPSpace10 {
if o == nil || o.SpaceAfterReturn == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceAfterReturn
}
// GetSpaceAfterReturnOk returns a tuple with the SpaceAfterReturn field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementReturn281) GetSpaceAfterReturnOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceAfterReturn == nil {
return nil, false
}
return o.SpaceAfterReturn, true
}
// HasSpaceAfterReturn returns a boolean if a field has been set.
func (o *BTPStatementReturn281) HasSpaceAfterReturn() bool {
if o != nil && o.SpaceAfterReturn != nil {
return true
}
return false
}
// SetSpaceAfterReturn gets a reference to the given BTPSpace10 and assigns it to the SpaceAfterReturn field.
func (o *BTPStatementReturn281) SetSpaceAfterReturn(v BTPSpace10) {
o.SpaceAfterReturn = &v
}
// GetValue returns the Value field value if set, zero value otherwise.
func (o *BTPStatementReturn281) GetValue() BTPExpression9 {
if o == nil || o.Value == nil {
var ret BTPExpression9
return ret
}
return *o.Value
}
// GetValueOk returns a tuple with the Value field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementReturn281) GetValueOk() (*BTPExpression9, bool) {
if o == nil || o.Value == nil {
return nil, false
}
return o.Value, true
}
// HasValue returns a boolean if a field has been set.
func (o *BTPStatementReturn281) HasValue() bool {
if o != nil && o.Value != nil {
return true
}
return false
}
// SetValue gets a reference to the given BTPExpression9 and assigns it to the Value field.
func (o *BTPStatementReturn281) SetValue(v BTPExpression9) {
o.Value = &v
}
func (o BTPStatementReturn281) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTPStatement269, errBTPStatement269 := json.Marshal(o.BTPStatement269)
if errBTPStatement269 != nil {
return []byte{}, errBTPStatement269
}
errBTPStatement269 = json.Unmarshal([]byte(serializedBTPStatement269), &toSerialize)
if errBTPStatement269 != nil {
return []byte{}, errBTPStatement269
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.SpaceAfterReturn != nil {
toSerialize["spaceAfterReturn"] = o.SpaceAfterReturn
}
if o.Value != nil {
toSerialize["value"] = o.Value
}
return json.Marshal(toSerialize)
}
type NullableBTPStatementReturn281 struct {
value *BTPStatementReturn281
isSet bool
}
func (v NullableBTPStatementReturn281) Get() *BTPStatementReturn281 {
return v.value
}
func (v *NullableBTPStatementReturn281) Set(val *BTPStatementReturn281) {
v.value = val
v.isSet = true
}
func (v NullableBTPStatementReturn281) IsSet() bool {
return v.isSet
}
func (v *NullableBTPStatementReturn281) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPStatementReturn281(val *BTPStatementReturn281) *NullableBTPStatementReturn281 {
return &NullableBTPStatementReturn281{value: val, isSet: true}
}
func (v NullableBTPStatementReturn281) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPStatementReturn281) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_statement_return_281.go | 0.765681 | 0.45048 | model_btp_statement_return_281.go | starcoder |
package tree
import (
"strings"
"github.com/Allenxuxu/dsa/queue"
)
type Element interface {
Less(e Element) bool
String() string
}
type Node struct {
data Element
parent *Node
left *Node
right *Node
}
type BinaryTree struct {
root *Node
}
func NewBinaryTree() *BinaryTree {
return &BinaryTree{}
}
func (t *BinaryTree) Insert(data Element) bool {
node := &Node{data: data}
if t.root == nil {
t.root = node
} else {
for current := t.root; current != nil; {
if current.data == node.data {
return false
}
if node.data.Less(current.data) {
if current.left == nil {
current.left = node
node.parent = current
break
} else {
current = current.left
}
} else {
if current.right == nil {
current.right = node
node.parent = current
break
} else {
current = current.right
}
}
}
}
return true
}
func (t *BinaryTree) Delete(data Element) {
toDelete := t.find(data)
if toDelete == nil {
return
}
if toDelete.left != nil && toDelete.right != nil {
next := min(toDelete.right)
if next != toDelete.right {
next.parent.left = next.right
if next.right != nil {
next.right.parent = next.parent
}
next.right = toDelete.right
toDelete.right.parent = next
toDelete.parent.left = next
next.left = toDelete.left
next.parent = toDelete.parent
} else {
toDelete.parent.left = next
next.parent = toDelete.parent
next.left = toDelete.left
toDelete.left.parent = next
}
} else if toDelete.left != nil {
if toDelete.parent.left == toDelete {
toDelete.parent.left = toDelete.left
toDelete.left.parent = toDelete.parent
} else if toDelete.parent.right == toDelete {
toDelete.parent.right = toDelete.left
toDelete.left.parent = toDelete.parent
}
} else if toDelete.right != nil {
if toDelete.parent.left == toDelete {
toDelete.parent.left = toDelete.right
toDelete.left.right = toDelete.parent
} else if toDelete.parent.right == toDelete {
toDelete.parent.right = toDelete.right
toDelete.left.right = toDelete.parent
}
} else {
if toDelete.parent.left == toDelete {
toDelete.parent.left = nil
} else if toDelete.parent.right == toDelete {
toDelete.parent.right = nil
}
}
toDelete.parent = nil
toDelete.left = nil
toDelete.right = nil
}
func min(root *Node) *Node {
if root != nil {
for current := root; current != nil; {
if current.left == nil {
return current
} else {
current = current.left
}
}
}
return nil
}
func (t *BinaryTree) Min() Element {
node := min(t.root)
if node != nil {
return node.data
}
return nil
}
func max(root *Node) *Node {
if root != nil {
for current := root; current != nil; {
if current.right == nil {
return current
} else {
current = current.right
}
}
}
return nil
}
func (t *BinaryTree) Max() Element {
node := max(t.root)
if node != nil {
return node.data
}
return nil
}
func (t *BinaryTree) find(data Element) *Node {
q := queue.LinkQueue{}
// 层次遍历(广度优先)
for current := t.root; current != nil; {
if current.data == data {
return current
} else {
if current.left != nil {
q.Push(current.left)
}
if current.right != nil {
q.Push(current.right)
}
}
e, ok := q.Pop()
if ok {
current = e.(*Node)
} else {
current = nil
}
}
return nil
}
func (t *BinaryTree) String() string {
var ret []string
q := queue.LinkQueue{}
// 层次遍历(广度优先)
for current := t.root; current != nil; {
ret = append(ret, current.data.String())
if current.left != nil {
q.Push(current.left)
}
if current.right != nil {
q.Push(current.right)
}
e, ok := q.Pop()
if ok {
current = e.(*Node)
} else {
current = nil
}
}
return strings.Join(ret, ",")
}
func (t *BinaryTree) LevelTraversal(node *Node, q *queue.LinkQueue) {
if node != nil {
tmp := queue.NewLinkQueue()
tmp.Push(node)
q.Push(node.data)
for tmp.Length() != 0 {
p, ok := tmp.Pop()
if !ok {
break
}
n := p.(*Node)
if n.left != nil {
tmp.Push(n.left)
q.Push(n.left.data)
}
if n.right != nil {
tmp.Push(n.right)
q.Push(n.right.data)
}
}
}
}
func (t *BinaryTree) PreOrderTraversal(node *Node, q *queue.LinkQueue) {
if node != nil {
q.Push(node.data)
t.PreOrderTraversal(node.left, q)
t.PreOrderTraversal(node.right, q)
}
}
func (t *BinaryTree) InOrderTraversal(node *Node, q *queue.LinkQueue) {
if node != nil {
t.InOrderTraversal(node.left, q)
q.Push(node.data)
t.InOrderTraversal(node.right, q)
}
}
func (t *BinaryTree) PostOrderTraversal(node *Node, q *queue.LinkQueue) {
if node != nil {
t.PostOrderTraversal(node.left, q)
t.PostOrderTraversal(node.right, q)
q.Push(node.data)
}
} | tree/binary_tree.go | 0.624523 | 0.422743 | binary_tree.go | starcoder |
package consistenthash
import (
"github.com/zjbztianya/go-misc/hashkit"
"errors"
"math/big"
"sort"
)
// Maglev consistent hashing algorithm
// paper:https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/44824.pdf
type Maglev struct {
permutation map[string][]uint32
entry []int
nodes []string
numBuckets uint64
h1, h2 hashkit.HashFunc64
}
func NewMaglev(nodes []string, numBuckets uint64, h1, h2 hashkit.HashFunc64) (*Maglev, error) {
if !big.NewInt(int64(numBuckets)).ProbablyPrime(0) {
return nil, errors.New("lookup table size must be prime")
}
if len(nodes) == 0 {
return nil, errors.New("node nums must be greater than zero")
}
m := &Maglev{
permutation: make(map[string][]uint32),
numBuckets: numBuckets,
nodes: make([]string, len(nodes)),
h1: h1,
h2: h2,
}
copy(m.nodes, nodes)
sort.Strings(m.nodes)
for _, node := range m.nodes {
m.permutation[node] = m.generatePermutation(node)
}
m.populate()
return m, nil
}
// generatePermutation guarantee permutation array to be a full permutation,proof as follows:
// Suppose that permutation[] is not a full permutation of 0,1... ,m-1,
// then there exists permutation[i] which is equal to permutation[j].
// Then the following equations hold.
// 1.(offset + i * skip) % m == (offset + j * skip) % m
// 2.(i * skip) % m == (j * skip) % m
// 3.(i - j) * skip == x * m , assuming i > j and x >= 1 (congruence modulo)
// 4.(i - j ) * skip / x == m
// Since 1 <= skip < m, 1 <= (i - j) < m, and m is a prime number, Equation 4 cannot hold.
func (m *Maglev) generatePermutation(node string) []uint32 {
offset := m.h1([]byte(node)) % m.numBuckets
skip := m.h2([]byte(node))%(m.numBuckets-1) + 1
permutation := make([]uint32, m.numBuckets)
for j := uint64(0); j < m.numBuckets; j++ {
permutation[j] = uint32((offset + j*skip) % m.numBuckets)
}
return permutation
}
func (m *Maglev) populate() {
next := make([]uint32, len(m.nodes))
m.entry = make([]int, m.numBuckets)
for i := uint64(0); i < m.numBuckets; i++ {
m.entry[i] = -1
}
var n uint64
for {
for i, node := range m.nodes {
permutation := m.permutation[node]
c := permutation[next[i]]
for m.entry[c] >= 0 {
next[i]++
c = permutation[next[i]]
}
m.entry[c] = i
next[i]++
n++
if n == m.numBuckets {
return
}
}
}
}
func (m *Maglev) AddNode(node string) error {
idx := sort.SearchStrings(m.nodes, node)
if idx < len(m.nodes) && m.nodes[idx] == node {
return errors.New("node already exist")
}
m.nodes = append(m.nodes[:idx], append([]string{node}, m.nodes[idx:]...)...)
m.permutation[node] = m.generatePermutation(node)
m.populate()
return nil
}
func (m *Maglev) RemoveNode(node string) error {
idx := sort.SearchStrings(m.nodes, node)
if idx >= len(m.nodes) || m.nodes[idx] != node {
return errors.New("node not find")
}
m.nodes = append(m.nodes[:idx], m.nodes[idx+1:]...)
delete(m.permutation, node)
m.populate()
return nil
}
func (m *Maglev) Lookup(key uint64) string {
if len(m.nodes) == 0 {
return ""
}
return m.nodes[m.entry[key%m.numBuckets]]
} | consistenthash/maglev.go | 0.626238 | 0.418935 | maglev.go | starcoder |
package main
/*
A Function which parses a line of code and returns the new position of the pointer and the new memory value
- It also executes the actions of the line of code like printing, moving the pointer, conditionals, loops, etc.
- The Function gets a slice of tokens from the lexer by passing the line to it. It works with the tokens to execute the actions of the line.
Function Parameters:
- line: string - the line of code that is being parsed
- lineNumber: int - the line number of the line of code that is being parsed
- p: int - the current position of the pointer from the main function
- m: int - the current memory value from the main function
Return Values:
- p: int - the new position of the pointer
- m: int - the new memory value
*/
func parse(line string, lineNumber int, p int, m int) (int, int) {
tokens := lex(line, lineNumber)
p, m = parser(tokens, line, lineNumber, p, m)
return p, m
}
/*
Function that gets the tokens of the line that is currently being parsed
For Conditionals and Looping, the function will call itself recursively until it
reaches the end of the line finding the statement in the current condition
Parameters:
- tokens: []token - the tokens of the line currently being parsed
- line: string - the line currently being parsed
- lineNumber: int - the line number of the line currently being parsed
- p: int - the current position of the pointer
- m: int - the current memory value
Return Values:
- p: int - the new position of the pointer
- m: int - the new memory value
*/
func parser(tokens []token, line string, lineNumber int, p int, m int) (int, int) {
if len(tokens) == 0 {
return p, m
}
if tokens[0].tokenType == "ACTION" {
actionTokens := checkAction(tokens, line, lineNumber)
if actionTokens != nil {
p, m = pointerMovements(actionTokens, line, lineNumber, p, m) // Movement of the pointer
}
} else if tokens[0].tokenType == "PRINT" {
printTokens := checkPrint(tokens, line, lineNumber)
if printTokens != nil {
p, m = printStuff(printTokens, p, m)
}
} else if tokens[0].tokenType == "MEMORY" {
if len(tokens) == 3 {
assignmentTokens := checkAssignment(tokens, line, lineNumber)
if assignmentTokens != nil {
p, m = assignMemory(assignmentTokens, p)
}
} else if len(tokens) == 5 {
arithmeticTokens := checkArithmetic(tokens, line, lineNumber)
if arithmeticTokens != nil {
p, m = doArithmetic(arithmeticTokens, line, lineNumber, p, m)
}
} else {
printExpectedTokenError(line, lineNumber, "pointer or memory or number")
}
} else if tokens[0].tokenType == "CONDITION" {
conditionTokens := checkCondition(tokens, line, lineNumber)
if conditionTokens != nil {
p, m = doConditionalCheck(tokens, conditionTokens, line, lineNumber, p, m)
}
} else if tokens[0].tokenType == "LOOP" {
loopTokens := checkLoop(tokens, line, lineNumber)
if loopTokens != nil {
p, m = doLoops(tokens, loopTokens, line, lineNumber, p, m)
}
} else {
printParseError(line, lineNumber, "")
}
return p, m
} | parser.go | 0.673729 | 0.723712 | parser.go | starcoder |
package gown
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
/*
From wndb(5WN):
For each syntactic category, two files are needed to represent the contents of
the WordNet database - index. pos and data. pos, where pos is noun, verb,
adj and adv . The other auxiliary files are used by the WordNet library's
searching functions and are needed to run the various WordNet browsers.
Each index file is an alphabetized list of all the words found in WordNet in
the corresponding part of speech. On each line, following the word, is a list
of byte offsets (synset_offset s) in the corresponding data file, one for each
synset containing the word. Words in the index file are in lower case only,
regardless of how they were entered in the lexicographer files. This folds
various orthographic representations of the word into one line enabling
database searches to be case insensitive. See wninput(5WN) for a detailed
description of the lexicographer files
A data file for a syntactic category contains information corresponding to the
synsets that were specified in the lexicographer files, with relational
pointers resolved to synset_offset s. Each line corresponds to a synset.
Pointers are followed and hierarchies traversed by moving from one synset to
another via the synset_offset s.
*/
type dataIndex map[string]DataIndexEntry
type DataIndexEntry struct {
PartOfSpeech int
SynsetCount int
Relationships []int
TagSenseCount int
SynsetOffsets []int
}
type dataFile map[int]Synset
type Synset struct {
SynsetOffset int
LexographerFilenum int
PartOfSpeech int
Words []string
LexIds []int
Relationships []RelationshipEdge
Gloss string
}
type RelationshipEdge struct {
RelationshipType int // ANTONYM_RELATIONSHIP, etc.
SynsetOffset int // synset offset of the target
PartOfSpeech int // part-of-speech of target
SourceWordNumber int // word number of the source
TargetWordNumber int // word number of the target
}
type DataIndexPair struct {
Lexeme string
IndexEntry DataIndexEntry
}
func DataIndexIterator(di *dataIndex) <-chan DataIndexPair {
ch := make(chan DataIndexPair)
go func() {
for k, v := range *di {
ch <- DataIndexPair {
Lexeme: k,
IndexEntry: v,
}
}
close(ch) // Remember to close or the loop never ends!
}()
return ch
}
// Reads a index.POS (e.g. index.noun, index.verb, etc.) file and populates
// a dataIndex . The index format is:
// lemma pos synset_cnt p_cnt [ptr_symbol...] sense_cnt tagsense_cnt synset_offset [synset_offset...]
func readPosIndex(posIndexFilename string) (*dataIndex, error) {
index := dataIndex{}
infile, err := os.Open(posIndexFilename)
defer infile.Close()
if err != nil {
return nil, fmt.Errorf("can't open %s: %v", posIndexFilename, err)
}
r := bufio.NewReader(infile)
if (r == nil) {
return nil, fmt.Errorf("can't read %s: %v" + posIndexFilename, err)
}
var readerr error = nil
for ; readerr == nil ; {
bytebuf, readerr := r.ReadBytes('\n')
if readerr != nil && readerr != io.EOF {
panic(readerr)
}
if len(bytebuf) == 0 {
break;
}
line := string(bytebuf)
if line[0:2] == " " {
// comment line
continue
}
fields := strings.SplitN(strings.TrimSpace(line), " ", -1)
lemma := readStoredLemma(fields[0])
pos_tag := oneCharPosTagToPosId(fields[1])
synset_cnt, _ := strconv.Atoi(fields[2]) // number of senses of the <lemma, pos> pair
p_cnt, _ := strconv.Atoi(fields[3]) // number of different pointers that lemma has in all synsets containing it.
field_index := 4
relationships := make([]int, p_cnt)
// consume p_cnt pointer symbols
for i := 0; i < p_cnt; i++ {
relationships[i], _ = RELATIONSHIP_POINTER_SYMBOLS[fields[field_index]]
field_index++
}
field_index++ // sense_cnt is redundant with synset_cnt, so skip it
tagsense_cnt, _ := strconv.Atoi(fields[field_index])
field_index++
synsetOffsets := make([]int, synset_cnt)
for i := 0; i < synset_cnt; i++ {
synsetOffsets[i], _ = strconv.Atoi(fields[field_index])
field_index++
}
_, exists := index[lemma]
if exists {
fmt.Printf("WARNING: %s already exists. Overwriting.\n", lemma)
}
index[lemma] = DataIndexEntry {
PartOfSpeech: pos_tag,
SynsetCount: synset_cnt,
Relationships: relationships,
TagSenseCount: tagsense_cnt,
SynsetOffsets: synsetOffsets,
}
}
return &index, nil
}
// Reads a data.POS (e.g. data.noun, data.verb, etc.) file and populates
// a map of ints to dataIndexEntries. The data format is:
// synset_offset lex_filenum ss_type w_cnt word lex_id [word lex_id...] p_cnt [ptr...] [frames...] | gloss
func readPosData(posDataFilename string) (*dataFile, error) {
data := dataFile{}
infile, err := os.Open(posDataFilename)
defer infile.Close()
if err != nil {
return nil, fmt.Errorf("can't open %s: %v", posDataFilename, err)
}
r := bufio.NewReader(infile)
if (r == nil) {
return nil, fmt.Errorf("can't read %s: %v" + posDataFilename, err)
}
var readerr error = nil
for ; readerr == nil ; {
bytebuf, readerr := r.ReadBytes('\n')
if readerr != nil && readerr != io.EOF {
panic(readerr)
}
if len(bytebuf) == 0 {
break;
}
line := string(bytebuf)
if line[0:2] == " " {
// comment line
continue
}
fields := strings.SplitN(strings.TrimSpace(line), " ", -1)
synset_offset, _ := strconv.Atoi(fields[0])
lex_filenum, _ := strconv.Atoi(fields[1])
ss_type := oneCharPosTagToPosId(fields[2])
w_cnt64, _ := strconv.ParseInt(fields[3], 16, 0)
w_cnt := int(w_cnt64)
words := make([]string, w_cnt)
lex_ids := make([]int, w_cnt)
fieldIndex := 4
for i := 0; i < w_cnt; i++ {
words[i] = readStoredLemma(fields[fieldIndex])
fieldIndex++
lex_id64, _ := strconv.ParseInt(fields[fieldIndex], 16, 0)
lex_ids[i] = int(lex_id64)
fieldIndex++
}
p_cnt, _ := strconv.Atoi(fields[fieldIndex])
fieldIndex++
pointers := make([]RelationshipEdge, p_cnt)
for i := 0; i < p_cnt; i++ {
pointer_type, symbolFound := RELATIONSHIP_POINTER_SYMBOLS[fields[fieldIndex]]
if !symbolFound {
panic(fmt.Sprintf("could not handle relationship symbol %s in line <<%v>>, file %s", fields[fieldIndex], line, posDataFilename))
}
fieldIndex++
synset_offset, _ := strconv.Atoi(fields[fieldIndex])
fieldIndex++
pos := oneCharPosTagToPosId(fields[fieldIndex])
fieldIndex++
src_wordnum64, _ := strconv.ParseInt(fields[fieldIndex][0:2], 16, 0)
dest_wordnum64, _ := strconv.ParseInt(fields[fieldIndex][2:4], 16, 0)
fieldIndex++
src_word_num := int(src_wordnum64)
dest_word_num := int(dest_wordnum64)
pointers[i] = RelationshipEdge {
RelationshipType: pointer_type,
SynsetOffset: synset_offset,
PartOfSpeech: pos,
SourceWordNumber: src_word_num,
TargetWordNumber: dest_word_num,
}
}
// skip data.verb frames
pipeIndex := strings.LastIndex(line, "|")
var gloss string
if pipeIndex >= 0 {
gloss = strings.TrimSpace(line[pipeIndex + 2:])
} else {
gloss = ""
}
data[synset_offset] = Synset {
SynsetOffset: synset_offset,
LexographerFilenum: lex_filenum,
PartOfSpeech: ss_type,
Words: words,
LexIds: lex_ids,
Relationships: pointers,
Gloss: gloss,
}
}
return &data, nil
} | data_file.go | 0.582491 | 0.541591 | data_file.go | starcoder |
package signature
import (
"crypto/rand"
"crypto/sha512"
"errors"
"math/big"
"strconv"
"github.com/alecthomas/binary"
"github.com/qantik/ratcheted/primitives"
)
const (
bellareSecurity = 512 // security parameter in bits.
bellareNumPoints = 10 // number of points in the keys.
bellareMaxPeriod = 1000 // maximum value of allowed key evolutions.
)
// Bellare implements the forward-secure digital signature schemes proposed
// by <NAME> and <NAME> in their 1999 paper A Forward-Secure Digital
// Signature Scheme.
type Bellare struct{}
// bellarePublicKey bundles the public key material.
type bellarePublicKey struct {
N []byte
//U [bellareNumPoints][]byte
U [][]byte
}
// bellarePrivateKey bundles the secret key material.
type bellarePrivateKey struct {
N []byte
//S [bellareNumPoints][]byte
S [][]byte
J int // J specifies the current period of this private key.
}
// bellareSignature bundles signature material.
type bellareSignature struct {
Y, Z []byte
J int
}
// NewBellare creates a fresh Bellare protocol instance.
func NewBellare() *Bellare {
return &Bellare{}
}
// Generate creates a Bellare public/private key pair.
func (b Bellare) Generate() (pk, sk []byte, err error) {
var p *big.Int
var q *big.Int
for {
p, _ = rand.Prime(rand.Reader, bellareSecurity/2)
q, _ = rand.Prime(rand.Reader, bellareSecurity/2)
rp := new(big.Int).Mod(p, big.NewInt(4))
rq := new(big.Int).Mod(q, big.NewInt(4))
if rp.Uint64() == 3 && rq.Uint64() == 3 {
break
}
}
N := new(big.Int).Mul(p, q)
S := make([][]byte, bellareNumPoints)
U := make([][]byte, bellareNumPoints)
for i := 0; i < bellareNumPoints; i++ {
var s *big.Int
for {
s, _ = rand.Int(rand.Reader, N)
if s.Uint64() != 0 {
break
}
}
e := new(big.Int).Exp(big.NewInt(2), big.NewInt(bellareMaxPeriod+1), nil)
u := new(big.Int).Exp(s, e, N)
S[i], U[i] = s.Bytes(), u.Bytes()
}
pk, err = binary.Marshal(&bellarePublicKey{N: N.Bytes(), U: U})
if err != nil {
return
}
sk, err = binary.Marshal(&bellarePrivateKey{N: N.Bytes(), S: S, J: 0})
return
}
// Update evolves a private key into a new period.
func (b Bellare) Update(sk []byte) ([]byte, error) {
var private bellarePrivateKey
if err := binary.Unmarshal(sk, &private); err != nil {
return nil, err
}
if private.J > bellareMaxPeriod {
return nil, errors.New("private key has surpassed max period")
}
n := new(big.Int).SetBytes(private.N)
S := make([][]byte, bellareNumPoints)
for i := 0; i < bellareNumPoints; i++ {
s := new(big.Int).SetBytes(private.S[i])
S[i] = new(big.Int).Exp(s, big.NewInt(2), n).Bytes()
}
return binary.Marshal(&bellarePrivateKey{N: private.N, S: S, J: private.J + 1})
}
// Sign creates a Bellare signature of a given message.
func (b Bellare) Sign(sk, msg []byte) ([]byte, error) {
var private bellarePrivateKey
if err := binary.Unmarshal(sk, &private); err != nil {
return nil, err
}
n := new(big.Int).SetBytes(private.N)
var R *big.Int
for {
R, _ = rand.Int(rand.Reader, n)
if R.Uint64() != 0 {
break
}
}
e := new(big.Int).Exp(big.NewInt(2), big.NewInt(bellareMaxPeriod+1-int64(private.J)), nil)
Y := new(big.Int).Exp(R, e, n)
digest := primitives.Digest(sha512.New(), []byte(strconv.Itoa(private.J)), Y.Bytes(), msg)
c := new(big.Int).SetBytes(digest)
P := big.NewInt(1)
for i := 0; i < bellareNumPoints; i++ {
e := new(big.Int).And(new(big.Int).Rsh(c, uint(i)), big.NewInt(1))
s := new(big.Int).SetBytes(private.S[i])
P = new(big.Int).Mul(P, new(big.Int).Exp(s, e, nil))
}
P = new(big.Int).Mul(R, P)
Z := new(big.Int).Mod(P, n)
return binary.Marshal(&bellareSignature{Y: Y.Bytes(), Z: Z.Bytes(), J: private.J})
}
// Verify checks the validity of a given signature.
func (b Bellare) Verify(pk, msg, sig []byte) error {
var public bellarePublicKey
if err := binary.Unmarshal(pk, &public); err != nil {
return err
}
var signature bellareSignature
if err := binary.Unmarshal(sig, &signature); err != nil {
return err
}
y, z := new(big.Int).SetBytes(signature.Y), new(big.Int).SetBytes(signature.Z)
n := new(big.Int).SetBytes(public.N)
digest := primitives.Digest(sha512.New(), []byte(strconv.Itoa(signature.J)), y.Bytes(), msg)
c := new(big.Int).SetBytes(digest)
e := new(big.Int).Exp(big.NewInt(2), big.NewInt(bellareMaxPeriod+1-int64(signature.J)), nil)
L := new(big.Int).Exp(z, e, n)
P := big.NewInt(1)
for i := 0; i < bellareNumPoints; i++ {
e := new(big.Int).And(new(big.Int).Rsh(c, uint(i)), big.NewInt(1))
u := new(big.Int).SetBytes(public.U[i])
P = new(big.Int).Mul(P, new(big.Int).Exp(u, e, nil))
}
P = new(big.Int).Mul(y, P)
R := new(big.Int).Mod(P, n)
if L.Cmp(R) != 0 {
return errors.New("unable to verify signature")
}
return nil
} | primitives/signature/bellare.go | 0.732496 | 0.444263 | bellare.go | starcoder |
// verify is a simple example that shows how a verifiable map can be used to
// demonstrate inclusion.
package main
import (
"bytes"
"crypto"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
"github.com/golang/glog"
"github.com/google/trillian/experimental/batchmap"
coniks "github.com/google/trillian/merkle/coniks/hasher"
"github.com/google/trillian/merkle/hashers"
"github.com/google/trillian/merkle/smt"
"github.com/google/trillian/storage/tree"
)
const hash = crypto.SHA512_256
var (
mapDir = flag.String("map_dir", "", "Directory containing map tiles.")
treeID = flag.Int64("tree_id", 12345, "The ID of the tree. Used as a salt in hashing.")
valueSalt = flag.String("value_salt", "v1", "Some string that will be smooshed in with the generated value before hashing. Allows generated values to be deterministic but variable.")
key = flag.Int64("key", 0, "This is the seed for the key that will be looked up.")
prefixStrata = flag.Int("prefix_strata", 1, "The number of strata of 8-bit strata before the final strata.")
)
func main() {
flag.Parse()
mapDir := filepath.Clean(*mapDir)
if mapDir == "" {
glog.Fatal("No output provided")
}
// Determine the key/value we expect to find.
// Note that the map tiles do not contain raw values, but commitments to the values.
// If the map needs to return the values to clients then it is recommended that the
// map operator uses a Content Addressable Store to store these values.
h := hash.New()
h.Write([]byte(fmt.Sprintf("%d", *key)))
keyPath := h.Sum(nil)
expectedString := fmt.Sprintf("[%s]%d", *valueSalt, *key)
expectedValueHash := coniks.Default.HashLeaf(*treeID, keyPath, []byte(expectedString))
// Read the tiles required for this check from disk.
tiles, err := getTilesForKey(mapDir, keyPath)
if err != nil {
glog.Exitf("couldn't load tiles: %v", err)
}
// Perform the verification.
// 1) Start at the leaf tile and check the key/value.
// 2) Compute the merkle root of the leaf tile
// 3) Check the computed root matches that reported in the tile
// 4) Check this root value is the key/value of the tile above.
// 5) Rinse and repeat until we reach the tree root.
et := emptyTree{treeID: *treeID, hasher: coniks.Default}
needPath, needValue := keyPath, expectedValueHash
for i := *prefixStrata; i >= 0; i-- {
tile := tiles[i]
// Check the prefix of what we are looking for matches the tile's path.
if got, want := tile.Path, needPath[:len(tile.Path)]; !bytes.Equal(got, want) {
glog.Fatalf("wrong tile found at index %d: got %x, want %x", i, got, want)
}
// Leaf paths within a tile are within the scope of the tile, so we can
// drop the prefix from the expected path now we have verified it.
needLeafPath := needPath[len(tile.Path):]
// Identify the leaf we need, and convert all leaves to the format needed for hashing.
var leaf *batchmap.TileLeaf
nodes := make([]smt.Node, len(tile.Leaves))
for j, l := range tile.Leaves {
if bytes.Equal(l.Path, needLeafPath) {
leaf = l
}
nodes[j] = toNode(tile.Path, l)
}
// Confirm we found the leaf we needed, and that it had the value we expected.
if leaf == nil {
glog.Fatalf("couldn't find expected leaf %x in tile %x", needLeafPath, tile.Path)
}
// TODO(pavelkalinnikov): Remove nolint after fixing
// https://github.com/dominikh/go-tools/issues/921.
if !bytes.Equal(leaf.Hash, needValue) { // nolint: staticcheck
glog.Fatalf("wrong leaf value in tile %x, leaf %x: got %x, want %x", tile.Path, leaf.Path, leaf.Hash, needValue)
}
// Hash this tile given its leaf values, and confirm that the value we compute
// matches the value reported in the tile.
hs, err := smt.NewHStar3(nodes, et.hasher.HashChildren,
uint(len(tile.Path)+len(leaf.Path))*8, uint(len(tile.Path))*8)
if err != nil {
glog.Fatalf("failed to create HStar3 for tile %x: %v", tile.Path, err)
}
res, err := hs.Update(et)
if err != nil {
glog.Fatalf("failed to hash tile %x: %v", tile.Path, err)
} else if got, want := len(res), 1; got != want {
glog.Fatalf("wrong number of roots for tile %x: got %v, want %v", tile.Path, got, want)
}
if got, want := res[0].Hash, tile.RootHash; !bytes.Equal(got, want) {
glog.Fatalf("wrong root hash for tile %x: got %x, calculated %x", tile.Path, want, got)
}
// Make the next iteration of the loop check that the tile above this has the
// root value of this tile stored as the value at the expected leaf index.
needPath, needValue = tile.Path, res[0].Hash
}
// If we get here then we have proved that the value was correct and that the map
// root commits to this value. Any other user with the same map root must see the
// same value under the same key we have checked.
glog.Infof("key %d found at path %x, with value '%s' (%x) committed to by map root %x", *key, keyPath, expectedString, expectedValueHash, needValue)
}
// getTilesForKey loads the tiles on the path from the root to the given leaf.
func getTilesForKey(mapDir string, key []byte) ([]*batchmap.Tile, error) {
tiles := make([]*batchmap.Tile, *prefixStrata+1)
for i := 0; i <= *prefixStrata; i++ {
tilePath := key[0:i]
tileFile := fmt.Sprintf("%s/path_%x", mapDir, tilePath)
in, err := ioutil.ReadFile(tileFile)
if err != nil {
return nil, fmt.Errorf("failed to read file %s: %v", tileFile, err)
}
tile := &batchmap.Tile{}
if err := json.Unmarshal(in, tile); err != nil {
return nil, fmt.Errorf("failed to parse tile in %s: %v", tileFile, err)
}
tiles[i] = tile
}
return tiles, nil
}
// toNode converts a TileLeaf into the equivalent Node for HStar3.
func toNode(prefix []byte, l *batchmap.TileLeaf) smt.Node {
path := make([]byte, 0, len(prefix)+len(l.Path))
path = append(append(path, prefix...), l.Path...)
return smt.Node{
ID: tree.NewNodeID2(string(path), uint(len(path))*8),
Hash: l.Hash,
}
}
// emptyTree is a NodeAccessor for an empty tree with the given ID.
type emptyTree struct {
treeID int64
hasher hashers.MapHasher
}
func (e emptyTree) Get(id tree.NodeID2) ([]byte, error) {
oldID := tree.NewNodeIDFromID2(id)
height := e.hasher.BitLen() - oldID.PrefixLenBits
// TODO(pavelkalinnikov): Make HashEmpty method take the NodeID2 directly,
// batchmap is the only remaining user of the map helpers.
return e.hasher.HashEmpty(e.treeID, oldID.Path, height), nil
}
func (e emptyTree) Set(id tree.NodeID2, hash []byte) {} | experimental/batchmap/cmd/verify/verify.go | 0.574634 | 0.475118 | verify.go | starcoder |
package box2d
import (
"fmt"
"math"
)
/// Rope joint definition. This requires two body anchor points and
/// a maximum lengths.
/// Note: by default the connected objects will not collide.
/// see collideConnected in b2JointDef.
type B2RopeJointDef struct {
B2JointDef
/// The local anchor point relative to bodyA's origin.
LocalAnchorA B2Vec2
/// The local anchor point relative to bodyB's origin.
LocalAnchorB B2Vec2
/// The maximum length of the rope.
/// Warning: this must be larger than b2_linearSlop or
/// the joint will have no effect.
MaxLength float64
}
func MakeB2RopeJointDef() B2RopeJointDef {
res := B2RopeJointDef{
B2JointDef: MakeB2JointDef(),
}
res.Type = B2JointType.E_ropeJoint
res.LocalAnchorA.Set(-1.0, 0.0)
res.LocalAnchorB.Set(1.0, 0.0)
res.MaxLength = 0.0
return res
}
/// A rope joint enforces a maximum distance between two points
/// on two bodies. It has no other effect.
/// Warning: if you attempt to change the maximum length during
/// the simulation you will get some non-physical behavior.
/// A model that would allow you to dynamically modify the length
/// would have some sponginess, so I chose not to implement it
/// that way. See b2DistanceJoint if you want to dynamically
/// control length.
type B2RopeJoint struct {
*B2Joint
// Solver shared
M_localAnchorA B2Vec2
M_localAnchorB B2Vec2
M_maxLength float64
M_length float64
M_impulse float64
// Solver temp
M_indexA int
M_indexB int
M_u B2Vec2
M_rA B2Vec2
M_rB B2Vec2
M_localCenterA B2Vec2
M_localCenterB B2Vec2
M_invMassA float64
M_invMassB float64
M_invIA float64
M_invIB float64
M_mass float64
M_state uint8
}
/// The local anchor point relative to bodyA's origin.
func (joint B2RopeJoint) GetLocalAnchorA() B2Vec2 {
return joint.M_localAnchorA
}
/// The local anchor point relative to bodyB's origin.
func (joint B2RopeJoint) GetLocalAnchorB() B2Vec2 {
return joint.M_localAnchorB
}
/// Set/Get the maximum length of the rope.
func (joint *B2RopeJoint) SetMaxLength(length float64) {
joint.M_maxLength = length
}
// // Limit:
// // C = norm(pB - pA) - L
// // u = (pB - pA) / norm(pB - pA)
// // Cdot = dot(u, vB + cross(wB, rB) - vA - cross(wA, rA))
// // J = [-u -cross(rA, u) u cross(rB, u)]
// // K = J * invM * JT
// // = invMassA + invIA * cross(rA, u)^2 + invMassB + invIB * cross(rB, u)^2
func MakeB2RopeJoint(def *B2RopeJointDef) *B2RopeJoint {
res := B2RopeJoint{
B2Joint: MakeB2Joint(def),
}
res.M_localAnchorA = def.LocalAnchorA
res.M_localAnchorB = def.LocalAnchorB
res.M_maxLength = def.MaxLength
res.M_mass = 0.0
res.M_impulse = 0.0
res.M_state = B2LimitState.E_inactiveLimit
res.M_length = 0.0
return &res
}
func (joint *B2RopeJoint) InitVelocityConstraints(data B2SolverData) {
joint.M_indexA = joint.M_bodyA.M_islandIndex
joint.M_indexB = joint.M_bodyB.M_islandIndex
joint.M_localCenterA = joint.M_bodyA.M_sweep.LocalCenter
joint.M_localCenterB = joint.M_bodyB.M_sweep.LocalCenter
joint.M_invMassA = joint.M_bodyA.M_invMass
joint.M_invMassB = joint.M_bodyB.M_invMass
joint.M_invIA = joint.M_bodyA.M_invI
joint.M_invIB = joint.M_bodyB.M_invI
cA := data.Positions[joint.M_indexA].C
aA := data.Positions[joint.M_indexA].A
vA := data.Velocities[joint.M_indexA].V
wA := data.Velocities[joint.M_indexA].W
cB := data.Positions[joint.M_indexB].C
aB := data.Positions[joint.M_indexB].A
vB := data.Velocities[joint.M_indexB].V
wB := data.Velocities[joint.M_indexB].W
qA := MakeB2RotFromAngle(aA)
qB := MakeB2RotFromAngle(aB)
joint.M_rA = B2RotVec2Mul(qA, B2Vec2Sub(joint.M_localAnchorA, joint.M_localCenterA))
joint.M_rB = B2RotVec2Mul(qB, B2Vec2Sub(joint.M_localAnchorB, joint.M_localCenterB))
joint.M_u = B2Vec2Sub(B2Vec2Sub(B2Vec2Add(cB, joint.M_rB), cA), joint.M_rA)
joint.M_length = joint.M_u.Length()
C := joint.M_length - joint.M_maxLength
if C > 0.0 {
joint.M_state = B2LimitState.E_atUpperLimit
} else {
joint.M_state = B2LimitState.E_inactiveLimit
}
if joint.M_length > B2_linearSlop {
joint.M_u.OperatorScalarMulInplace(1.0 / joint.M_length)
} else {
joint.M_u.SetZero()
joint.M_mass = 0.0
joint.M_impulse = 0.0
return
}
// Compute effective mass.
crA := B2Vec2Cross(joint.M_rA, joint.M_u)
crB := B2Vec2Cross(joint.M_rB, joint.M_u)
invMass := joint.M_invMassA + joint.M_invIA*crA*crA + joint.M_invMassB + joint.M_invIB*crB*crB
if invMass != 0.0 {
joint.M_mass = 1.0 / invMass
} else {
joint.M_mass = 0.0
}
if data.Step.WarmStarting {
// Scale the impulse to support a variable time step.
joint.M_impulse *= data.Step.DtRatio
P := B2Vec2MulScalar(joint.M_impulse, joint.M_u)
vA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
wA -= joint.M_invIA * B2Vec2Cross(joint.M_rA, P)
vB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
wB += joint.M_invIB * B2Vec2Cross(joint.M_rB, P)
} else {
joint.M_impulse = 0.0
}
data.Velocities[joint.M_indexA].V = vA
data.Velocities[joint.M_indexA].W = wA
data.Velocities[joint.M_indexB].V = vB
data.Velocities[joint.M_indexB].W = wB
}
func (joint *B2RopeJoint) SolveVelocityConstraints(data B2SolverData) {
vA := data.Velocities[joint.M_indexA].V
wA := data.Velocities[joint.M_indexA].W
vB := data.Velocities[joint.M_indexB].V
wB := data.Velocities[joint.M_indexB].W
// Cdot = dot(u, v + cross(w, r))
vpA := B2Vec2Add(vA, B2Vec2CrossScalarVector(wA, joint.M_rA))
vpB := B2Vec2Add(vB, B2Vec2CrossScalarVector(wB, joint.M_rB))
C := joint.M_length - joint.M_maxLength
Cdot := B2Vec2Dot(joint.M_u, B2Vec2Sub(vpB, vpA))
// Predictive constraint.
if C < 0.0 {
Cdot += data.Step.Inv_dt * C
}
impulse := -joint.M_mass * Cdot
oldImpulse := joint.M_impulse
joint.M_impulse = math.Min(0.0, joint.M_impulse+impulse)
impulse = joint.M_impulse - oldImpulse
P := B2Vec2MulScalar(impulse, joint.M_u)
vA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
wA -= joint.M_invIA * B2Vec2Cross(joint.M_rA, P)
vB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
wB += joint.M_invIB * B2Vec2Cross(joint.M_rB, P)
data.Velocities[joint.M_indexA].V = vA
data.Velocities[joint.M_indexA].W = wA
data.Velocities[joint.M_indexB].V = vB
data.Velocities[joint.M_indexB].W = wB
}
func (joint *B2RopeJoint) SolvePositionConstraints(data B2SolverData) bool {
cA := data.Positions[joint.M_indexA].C
aA := data.Positions[joint.M_indexA].A
cB := data.Positions[joint.M_indexB].C
aB := data.Positions[joint.M_indexB].A
qA := MakeB2RotFromAngle(aA)
qB := MakeB2RotFromAngle(aB)
rA := B2RotVec2Mul(qA, B2Vec2Sub(joint.M_localAnchorA, joint.M_localCenterA))
rB := B2RotVec2Mul(qB, B2Vec2Sub(joint.M_localAnchorB, joint.M_localCenterB))
u := B2Vec2Sub(B2Vec2Sub(B2Vec2Add(cB, rB), cA), rA)
length := u.Normalize()
C := length - joint.M_maxLength
C = B2FloatClamp(C, 0.0, B2_maxLinearCorrection)
impulse := -joint.M_mass * C
P := B2Vec2MulScalar(impulse, u)
cA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
aA -= joint.M_invIA * B2Vec2Cross(rA, P)
cB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
aB += joint.M_invIB * B2Vec2Cross(rB, P)
data.Positions[joint.M_indexA].C = cA
data.Positions[joint.M_indexA].A = aA
data.Positions[joint.M_indexB].C = cB
data.Positions[joint.M_indexB].A = aB
return length-joint.M_maxLength < B2_linearSlop
}
func (joint B2RopeJoint) GetAnchorA() B2Vec2 {
return joint.M_bodyA.GetWorldPoint(joint.M_localAnchorA)
}
func (joint B2RopeJoint) GetAnchorB() B2Vec2 {
return joint.M_bodyB.GetWorldPoint(joint.M_localAnchorB)
}
func (joint B2RopeJoint) GetReactionForce(inv_dt float64) B2Vec2 {
F := B2Vec2MulScalar((inv_dt * joint.M_impulse), joint.M_u)
return F
}
func (joint B2RopeJoint) GetReactionTorque(inv_dt float64) float64 {
return 0.0
}
func (joint B2RopeJoint) GetMaxLength() float64 {
return joint.M_maxLength
}
func (joint B2RopeJoint) GetLimitState() uint8 {
return joint.M_state
}
func (joint *B2RopeJoint) Dump() {
indexA := joint.M_bodyA.M_islandIndex
indexB := joint.M_bodyB.M_islandIndex
fmt.Printf(" b2RopeJointDef jd;\n")
fmt.Printf(" jd.bodyA = bodies[%d];\n", indexA)
fmt.Printf(" jd.bodyB = bodies[%d];\n", indexB)
fmt.Printf(" jd.collideConnected = bool(%d);\n", joint.M_collideConnected)
fmt.Printf(" jd.localAnchorA.Set(%.15lef, %.15lef);\n", joint.M_localAnchorA.X, joint.M_localAnchorA.Y)
fmt.Printf(" jd.localAnchorB.Set(%.15lef, %.15lef);\n", joint.M_localAnchorB.X, joint.M_localAnchorB.Y)
fmt.Printf(" jd.maxLength = %.15lef;\n", joint.M_maxLength)
fmt.Printf(" joints[%d] = m_world.CreateJoint(&jd);\n", joint.M_index)
} | DynamicsB2JointRope.go | 0.848282 | 0.625795 | DynamicsB2JointRope.go | starcoder |
package main
import "fmt"
// Slices are a key data type in Go, giving a more powerful interface to sequences than arrays.
// Unlike arrays, slices are typed only by the elements they contain (not the number of elements).
func main() {
s := createSlice()
inlineDeclaration()
getSet(s)
lengthCapacity(s)
appendTo(s)
copyFrom(s)
slicing(s)
multiDimensional()
}
// To create an empty slice with non-zero length, use the builtin make.
// Here we make a slice of strings of length 3 (initially zero-valued).
func createSlice() []string {
s := make([]string, 3)
s[0] = "a"
s[1] = "b"
s[2] = "c"
return s
}
// We can declare and initialize a variable for slice in a single line as well.
func inlineDeclaration() {
t := []string{"g", "h", "i"}
fmt.Println("inline declaration:", t)
}
// We can set and get just like with arrays.
func getSet(s []string) {
fmt.Println("get before set:", s[2])
s[0] = "a1"
fmt.Println("get after set:", s[2])
}
func lengthCapacity(s []string) {
// len returns the length of the slice
// The length is the declared one creating the slice, it does not matter if the number
// of objects inside it is the same.
fmt.Println("len:", len(s))
// cap returns the capacity of the slice (in this case same as length)
fmt.Println("cap:", cap(s))
}
// In addition to these basic operations, slices support several more that
// make them richer than arrays. One is the builtin append, which returns a
// slice containing one or more new values. Note that we need to accept a return
// value from append as we may get a new slice value.
func appendTo(s []string) {
fmt.Println("slice before append:", s)
s = append(s, "d")
s = append(s, "e", "f")
fmt.Println("slice after append:", s)
}
// Slices can also be copy’d. Here we create an empty slice c of the same
// length as s and copy into c from s.
func copyFrom(s []string) {
c := make([]string, len(s))
copy(c, s)
fmt.Println("slice:", s)
fmt.Println("copy:", c)
}
// Slices support a “slice” operator with the syntax slice[low:high].
// For example, this gets a slice of the elements s[2], s[3], and s[4].
func slicing(s []string) {
l := s[2:5]
fmt.Println("slice 1[2:5]:", l)
// This slices up to (but excluding) s[5].
l = s[:5]
fmt.Println("slice 2[:5]:", l)
// And this slices up from (and including) s[2].
l = s[2:]
fmt.Println("slice 3[2:]:", l)
}
// Slices can be composed into multi-dimensional data structures.
// The length of the inner slices can vary, unlike with multi-dimensional arrays.
func multiDimensional() {
twoD := make([][]int, 3)
for i := 0; i < 3; i++ {
innerLen := i + 1
twoD[i] = make([]int, innerLen)
for j := 0; j < innerLen; j++ {
twoD[i][j] = i + j
}
}
fmt.Println("multi dimensional:", twoD)
} | slice/functionalities.go | 0.716219 | 0.492066 | functionalities.go | starcoder |
package main
import (
"fmt"
"math"
"math/rand"
)
type BinTree struct {
root *Node
}
type pointerDescription int
const (
noLinks pointerDescription = 0
leftIsLinked = 1
rightIsLinked = 2
bothAreLinked = leftIsLinked | rightIsLinked
)
type Node struct {
value int
links pointerDescription
left *Node
right *Node
}
func New() *BinTree {
return &BinTree{}
}
func (b *BinTree) add(val int) {
if b.root == nil {
b.root = &Node{val, bothAreLinked, nil, nil}
return
}
b.root.add(val)
}
func (n *Node) add(val int) {
// println("adding ", val)
for {
if val <= n.value {
if n.links&leftIsLinked == leftIsLinked {
n.links ^= leftIsLinked
n.left = &Node{val, leftIsLinked | rightIsLinked, n.left, n}
return
}
n = n.left
} else {
if n.links&rightIsLinked == rightIsLinked {
n.links ^= rightIsLinked
n.right = &Node{val, leftIsLinked | rightIsLinked, n, n.right}
return
}
n = n.right
}
}
}
func (b *BinTree) del(val int) {
if b.root == nil {
return
}
if b.root.isLeafNode() && b.root.value == val {
b.root = nil
}
b.root = b.root.del(nil, val)
}
func (n *Node) isLeafNode() bool {
return n.links == bothAreLinked
}
func (n *Node) del(parent *Node, val int) *Node {
if n.value == val {
p := n.pred()
s := n.succ()
if p != nil && p.links&rightIsLinked == rightIsLinked {
p.right = s
}
if s != nil && s.links&leftIsLinked == leftIsLinked {
s.left = p
}
// 1) leaf node
if n.links&bothAreLinked == bothAreLinked {
// fmt.Println("both are linked: ", val)
// we're about to free up a node for our parent
// -> turn it into a linked-list pointer
// these cases could probably be combined with the above
var child *Node
switch {
case n == parent.left:
child = n.left
case n == parent.right:
child = n.right
}
switch {
case n == parent.left:
parent.left = child
parent.links |= leftIsLinked
case n == parent.right:
parent.right = child
parent.links |= rightIsLinked
}
return child
}
// 2) left linked list, right is child(-tree)
// 3) right is linked list, left is child(-tree)
if n.links&leftIsLinked == leftIsLinked || n.links&rightIsLinked == rightIsLinked {
// fmt.Println("one is linked: ", val)
var child *Node
if n.links&leftIsLinked == leftIsLinked {
child = n.right
} else {
child = n.left
}
// update the correct pointer from our parent
if parent != nil {
switch {
case n == parent.left:
parent.left = child
case n == parent.right:
parent.right = child
}
}
return child
}
// 4) both left and right are children
// fmt.Println("none are linked: ", val)
// add the right subtree to the 'end' of the left
// the largest value on the left is n.pred(), so we already have it
// we just need to set the child pointers
if p != nil {
p.right = n.right
if p.links&rightIsLinked == 0 {
panic("bad mojo")
}
p.links &= ^rightIsLinked
} else {
// I have no predecessor
fmt.Println("no pred found for val=", val)
}
// update the correct pointer from our parent
if parent != nil {
switch {
case n == parent.left:
parent.left = n.left
case n == parent.right:
parent.right = n.left
}
}
return n.left
}
if val < n.value && n.links&leftIsLinked == 0 {
n.left.del(n, val)
} else if val > n.value && n.links&rightIsLinked == 0 {
n.right.del(n, val)
}
return n
}
func (b *BinTree) walkIn(f func(*Node)) {
if b.root == nil {
return
}
b.root.walkIn(f)
}
func (n *Node) walkIn(f func(*Node)) {
if n.links&leftIsLinked == 0 {
n.left.walkIn(f)
}
f(n)
if n.links&rightIsLinked == 0 {
n.right.walkIn(f)
}
}
func (b *BinTree) walkLink(f func(*Node)) {
if b.root == nil {
return
}
b.root.walkLink(f)
}
func (b *BinTree) walkLinkReverse(f func(*Node)) {
if b.root == nil {
return
}
b.root.walkLinkReverse(f)
}
func (n *Node) pred() *Node {
if n.links&leftIsLinked == leftIsLinked {
return n.left
}
node := n.left
for node.links&rightIsLinked == 0 {
node = node.right
}
return node
}
func (n *Node) succ() *Node {
if n.links&rightIsLinked == rightIsLinked {
return n.right
}
node := n.right
for node.links&leftIsLinked == 0 {
node = node.left
}
return node
}
func (n *Node) walkLink(f func(*Node)) {
var node *Node = nil
// find the 'start' of the linked-list
node = n
for node.links&leftIsLinked == 0 {
node = node.left
}
for node != nil {
f(node)
node = node.succ()
}
}
func (n *Node) walkLinkReverse(f func(*Node)) {
var node *Node = nil
// find the 'end'' of the linked-list
node = n
for node.links&rightIsLinked == 0 {
node = node.right
}
for node != nil {
f(node)
node = node.pred()
}
}
// fisher-yates
func shuffle(array []int) {
for i := len(array) - 1; i >= 1; i-- {
j := rand.Intn(i + 1)
array[i], array[j] = array[j], array[i]
}
}
func check_tree(b *BinTree) {
var i int
i = 0
b.walkIn(func(n *Node) {
// fmt.Printf("%p = %d, left = %x right = %x\n", n, n.value, n.left, n.right)
if n.value < i {
fmt.Println("in-order tree walk failed: i=", i, " value=", n.value)
}
i = n.value
})
//fmt.Println()
i = 0
b.walkLink(func(n *Node) {
// fmt.Printf("%p = %d, list=%d left = %x right = %x\n", n, n.value, n.links, n.left, n.right)
if n.value < i {
fmt.Println("reverse linked-list tree walk failed: i=", i, " value=", n.value)
}
i = n.value
})
//fmt.Println()
i = math.MaxInt32
b.walkLinkReverse(func(n *Node) {
// fmt.Printf("%p = %d, list=%d left = %x right = %x\n", n, n.value, n.links, n.left, n.right)
if i < n.value {
fmt.Println("reverse linked-list tree walk failed: i=", i, " value=", n.value)
}
i = n.value
})
}
func main() {
b := New()
t := []int{2, 1, 7, 5, 4, 9, 3, 6, 8}
for _, v := range t {
b.add(v)
}
b.del(3)
b.del(5)
b.del(4)
b.walkIn(func(n *Node) { fmt.Printf("%p = %d, left = %x right = %x\n", n, n.value, n.left, n.right) })
fmt.Println()
b.walkLink(func(n *Node) {
fmt.Printf("%p = %d, list=%d left = %x right = %x\n", n, n.value, n.links, n.left, n.right)
})
fmt.Println()
b.walkLinkReverse(func(n *Node) {
fmt.Printf("%p = %d, list=%d left = %x right = %x\n", n, n.value, n.links, n.left, n.right)
})
t = []int{9, 8, 6, 1, 7}
for _, v := range t {
b.walkIn(func(n *Node) { fmt.Printf("%p = %d, left = %x right = %x\n", n, n.value, n.left, n.right) })
b.del(v)
fmt.Println()
}
b.walkIn(func(n *Node) { fmt.Printf("%p = %d, left = %x right = %x\n", n, n.value, n.left, n.right) })
b.walkLink(func(n *Node) {
fmt.Printf("%p = %d, list=%d left = %x right = %x\n", n, n.value, n.links, n.left, n.right)
})
b.walkLinkReverse(func(n *Node) {
fmt.Printf("%p = %d, list=%d left = %x right = %x\n", n, n.value, n.links, n.left, n.right)
})
t = make([]int, 10000)
for i := 0; i < 10000; i++ {
t[i] = i
}
shuffle(t)
for _, v := range t {
b.add(v)
check_tree(b)
}
check_tree(b)
shuffle(t)
for _, v := range t {
b.del(v)
check_tree(b)
}
fmt.Println("done adding")
b.walkLinkReverse(func(n *Node) {
fmt.Printf("%p = %d, list=%d left = %x right = %x\n", n, n.value, n.links, n.left, n.right)
})
} | threadtree/threadthree.go | 0.601477 | 0.403802 | threadthree.go | starcoder |
package utils
import (
"fmt"
)
// Node of a tree
type Node struct {
left *Node
right *Node
val interface{}
}
// Tree struct
type Tree struct {
comparator Comparator
node *Node
length int
}
// NewIntTree creates a new int tree
func NewIntTree() *Tree {
return &Tree{comparator: intAscComparator}
}
// NewStringTree creates a new string tree
func NewStringTree() *Tree {
return &Tree{comparator: stringAscComparator}
}
// NewTree creates a new tree ordered by a user defined comparator
func NewTree(comp Comparator) *Tree {
return &Tree{comparator: comp}
}
// Add a value to the tree
func (t *Tree) Add(values ...interface{}) *Tree {
for _, val := range values {
t.node = t.node.add(t.comparator, val)
t.length++
}
return t
}
// ContainsOne check if tree contains a specific value
func (t *Tree) ContainsOne(val interface{}) bool {
return t.node.contains(t.comparator, val)
}
// Contains check if tree contains a list of values
func (t *Tree) Contains(values ...interface{}) []bool {
arr := make([]bool, len(values))
comp := t.comparator
for i, val := range values {
arr[i] = t.node.contains(comp, val)
}
return arr
}
// RemoveMin value
func (t *Tree) RemoveMin() error {
if t.length == 0 {
return fmt.Errorf("tree is empty")
}
if t.node.left == nil {
t.node = t.node.right
} else {
t.node.removeMin(t.comparator, t.node)
}
t.length--
return nil
}
// RemoveMax value
func (t *Tree) RemoveMax() error {
if t.length == 0 {
return fmt.Errorf("tree is empty")
}
if t.node.right == nil {
t.node = t.node.left
} else {
t.node.removeMax(t.comparator, t.node)
}
t.length--
return nil
}
// Remove one or more nodes from the tree
func (t *Tree) Remove(values ...interface{}) error {
for i := 0; i < len(values); i++ {
if t.length == 0 {
return fmt.Errorf("tree is empty")
}
if t.length == 1 {
t.node = nil
} else if t.node.val == values[i] {
aux := t.node.left
t.node = t.node.right
t.node.left = aux
} else {
err := t.node.remove(t.comparator, values[i], t.node)
if err != nil {
return err
}
}
t.length--
}
return nil
}
// Reverse tree
func (t *Tree) Reverse() *Tree {
t.node.reverse()
return t
}
// Height returns the height of the tree
func (t *Tree) Height() int {
return t.node.height()
}
// Clone makes a copy of a tree
func (t *Tree) Clone() *Tree {
toClone := &Tree{comparator: t.comparator, length: t.length}
toClone.node = toClone.node.clone(t.node)
return toClone
}
// IsEmpty tree
func (t *Tree) IsEmpty() bool {
return t.length == 0
}
// Length tree
func (t *Tree) Length() int {
return t.length
}
// InOrder returns a array of nodes in order
func (t *Tree) InOrder() []interface{} {
var arr []interface{}
t.node.inOrder(&arr)
return arr
}
// PreOrder returns a array of nodes in pre order
func (t *Tree) PreOrder() []interface{} {
var arr []interface{}
t.node.preOrder(&arr)
return arr
}
// PosOrder returns a array of nodes in pos order
func (t *Tree) PosOrder() []interface{} {
var arr []interface{}
t.node.posOrder(&arr)
return arr
}
// String returns the string method of this type
func (t *Tree) String() string {
if t.length == 0 {
return "[]"
}
nodes := t.InOrder()
str := fmt.Sprintf("%v", nodes[0])
for i := 1; i < len(nodes); i++ {
str += fmt.Sprintf(", %v", nodes[i])
}
return "[" + str + "]"
}
// String returns the string method of this type
func (n *Node) String() string {
return fmt.Sprintf("%v", n.val)
}
/* Private Aux Methods */
func (n *Node) add(comp Comparator, val interface{}) *Node {
if n == nil {
return &Node{val: val}
}
if comp(val, n.val) > 0 {
n.right = n.right.add(comp, val)
} else {
n.left = n.left.add(comp, val)
}
return n
}
func (n *Node) reverse() {
if n == nil {
return
}
temp := n.right
n.right = n.left
n.left = temp
n.left.reverse()
n.right.reverse()
}
func (n *Node) contains(comp Comparator, val interface{}) bool {
if n == nil {
return false
}
flag := false
if n.val == val {
return true
} else if comp(val, n.val) > 0 {
flag = n.right.contains(comp, val)
} else {
flag = n.left.contains(comp, val)
}
return flag
}
func (n *Node) height() int {
if n == nil {
return 0
}
lheight := n.left.height()
height := n.right.height()
if lheight > height {
height = lheight
}
return lheight + 1
}
func (n *Node) findMax(parent *Node) (*Node, *Node) {
if n == nil {
return nil, parent
}
if n.right == nil {
return n, parent
}
return n.right.findMax(n)
}
func (n *Node) findMin(parent *Node) (*Node, *Node) {
if n == nil {
return nil, parent
}
if n.left == nil {
return n, parent
}
return n.left.findMin(n)
}
func (n *Node) replaceNode(parent, replacement *Node) {
if n == parent.left {
parent.left = replacement
} else {
parent.right = replacement
}
}
func (n *Node) remove(comp Comparator, val interface{}, parent *Node) error {
if n == nil {
return fmt.Errorf("Value %v doesn't exist in the tree", val)
}
if comp(val, n.val) < 0 {
return n.left.remove(comp, val, n)
} else if comp(val, n.val) > 0 {
return n.right.remove(comp, val, n)
}
if n.left == nil && n.right == nil {
n.replaceNode(parent, nil)
} else if n.left == nil {
n.replaceNode(parent, n.right)
} else {
n.replaceNode(parent, n.left)
}
return nil
}
func (n *Node) removeMin(comp Comparator, parent *Node) {
n, parent = n.findMin(parent)
if n.left == nil && n.right == nil {
n.replaceNode(parent, nil)
} else if n.left == nil {
n.replaceNode(parent, n.right)
} else {
n.replaceNode(parent, n.left)
}
}
func (n *Node) removeMax(comp Comparator, parent *Node) {
n, parent = n.findMax(parent)
if n.left == nil && n.right == nil {
n.replaceNode(parent, nil)
} else if n.left == nil {
n.replaceNode(parent, n.right)
} else {
n.replaceNode(parent, n.left)
}
}
func (n *Node) clone(root *Node) *Node {
if root == nil {
return nil
}
n = &Node{val: root.val}
n.left = n.clone(root.left)
n.right = n.clone(root.right)
return n
}
func (n *Node) inOrder(arr *[]interface{}) {
if n == nil {
return
}
n.left.inOrder(arr)
*arr = append(*arr, n.val)
n.right.inOrder(arr)
}
func (n *Node) preOrder(arr *[]interface{}) {
if n == nil {
return
}
*arr = append(*arr, n.val)
n.left.preOrder(arr)
n.right.preOrder(arr)
}
func (n *Node) posOrder(arr *[]interface{}) {
if n == nil {
return
}
n.left.posOrder(arr)
n.right.posOrder(arr)
*arr = append(*arr, n.val)
} | utils/tree.go | 0.760651 | 0.452717 | tree.go | starcoder |
package statsd
import "time"
type NoopClient struct {
// prefix for statsd name
prefix string
}
// Close closes the connection and cleans up.
func (s *NoopClient) Close() error {
return nil
}
// Increments a statsd count type.
// stat is a string name for the metric.
// value is the integer value
// rate is the sample rate (0.0 to 1.0)
func (s *NoopClient) Inc(stat string, value int64, rate float32) error {
return nil
}
// Decrements a statsd count type.
// stat is a string name for the metric.
// value is the integer value.
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) Dec(stat string, value int64, rate float32) error {
return nil
}
// Submits/Updates a statsd gauge type.
// stat is a string name for the metric.
// value is the integer value.
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) Gauge(stat string, value int64, rate float32) error {
return nil
}
// Submits a delta to a statsd gauge.
// stat is the string name for the metric.
// value is the (positive or negative) change.
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) GaugeDelta(stat string, value int64, rate float32) error {
return nil
}
// Submits a statsd timing type.
// stat is a string name for the metric.
// delta is the time duration value in milliseconds
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) Timing(stat string, delta int64, rate float32) error {
return nil
}
// Submits a statsd timing type.
// stat is a string name for the metric.
// delta is the timing value as time.Duration
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) TimingDuration(stat string, delta time.Duration, rate float32) error {
return nil
}
// Submits a stats set type.
// stat is a string name for the metric.
// value is the string value
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) Set(stat string, value string, rate float32) error {
return nil
}
// Submits a number as a stats set type.
// convenience method for Set with number.
// stat is a string name for the metric.
// value is the integer value
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) SetInt(stat string, value int64, rate float32) error {
return nil
}
// Raw formats the statsd event data, handles sampling, prepares it,
// and sends it to the server.
// stat is the string name for the metric.
// value is the preformatted "raw" value string.
// rate is the sample rate (0.0 to 1.0).
func (s *NoopClient) Raw(stat string, value string, rate float32) error {
return nil
}
// Sets/Updates the statsd client prefix
func (s *NoopClient) SetPrefix(prefix string) {
s.prefix = prefix
}
// Returns a pointer to a new NoopClient, and an error (always nil, just
// supplied to support api convention).
// Use variadic arguments to support identical format as NewClient, or a more
// conventional no argument form.
func NewNoopClient(a ...interface{}) (Statter, error) {
noopClient := &NoopClient{}
return noopClient, nil
}
// Compatibility alias
var NewNoop = NewNoopClient | src/code.cloudfoundry.org/vendor/github.com/cactus/go-statsd-client/statsd/client_noop.go | 0.878692 | 0.522994 | client_noop.go | starcoder |
This is a b-link tree in progress from the following paper:
http://www.csd.uoc.gr/~hy460/pdf/p650-lehman.pdf
This is still a work in progress and the CRUD methods on the tree
need to be parallelized. Until this is complete, there is no
constructor method for this package.
Time complexities:
Space: O(n)
Search: O(log n)
Insert: O(log n)
Delete: O(log n)
Current benchmarks with 16 ary:
BenchmarkSimpleAdd-8 1000000 1455 ns/op
BenchmarkGet-8 2000000 704 ns/op
B-link was chosen after examining this paper:
http://www.vldb.org/journal/VLDBJ2/P361.pdf
*/
package link
import (
"log"
"sync"
"sync/atomic"
)
// numberOfItemsBeforeMultithread defines the number of items that have
// to be called with a method before we multithread.
const numberOfItemsBeforeMultithread = 10
type blink struct {
root *node
lock sync.RWMutex
number, ary, numRoutines uint64
}
func (blink *blink) insert(key Key, stack *nodes) Key {
var parent *node
blink.lock.Lock()
if blink.root == nil {
blink.root = newNode(
true, make(Keys, 0, blink.ary), make(nodes, 0, blink.ary+1),
)
blink.root.keys = make(Keys, 0, blink.ary)
blink.root.isLeaf = true
}
parent = blink.root
blink.lock.Unlock()
result := insert(blink, parent, stack, key)
if result == nil {
atomic.AddUint64(&blink.number, 1)
return nil
}
return result
}
func (blink *blink) multithreadedInsert(keys Keys) Keys {
chunks := chunkKeys(keys, int64(blink.numRoutines))
overwritten := make(Keys, len(keys))
var offset uint64
var wg sync.WaitGroup
wg.Add(len(chunks))
for _, chunk := range chunks {
go func(chunk Keys, offset uint64) {
defer wg.Done()
stack := make(nodes, 0, blink.ary)
for i := 0; i < len(chunk); i++ {
result := blink.insert(chunk[i], &stack)
stack.reset()
overwritten[offset+uint64(i)] = result
}
}(chunk, offset)
offset += uint64(len(chunk))
}
wg.Wait()
return overwritten
}
// Insert will insert the provided keys into the b-tree and return
// a list of keys overwritten, if any. Each insert is an O(log n)
// operation.
func (blink *blink) Insert(keys ...Key) Keys {
if len(keys) > numberOfItemsBeforeMultithread {
return blink.multithreadedInsert(keys)
}
overwritten := make(Keys, 0, len(keys))
stack := make(nodes, 0, blink.ary)
for _, k := range keys {
overwritten = append(overwritten, blink.insert(k, &stack))
stack.reset()
}
return overwritten
}
// Len returns the number of items in this b-link tree.
func (blink *blink) Len() uint64 {
return atomic.LoadUint64(&blink.number)
}
func (blink *blink) get(key Key) Key {
var parent *node
blink.lock.RLock()
parent = blink.root
blink.lock.RUnlock()
k := search(parent, key)
if k == nil {
return nil
}
if k.Compare(key) == 0 {
return k
}
return nil
}
// Get will retrieve the keys if they exist in this tree. If not,
// a nil is returned in the proper place in the list of keys. Each
// lookup is O(log n) time complexity.
func (blink *blink) Get(keys ...Key) Keys {
found := make(Keys, 0, len(keys))
for _, k := range keys {
found = append(found, blink.get(k))
}
return found
}
func (blink *blink) print(output *log.Logger) {
output.Println(`PRINTING B-LINK`)
if blink.root == nil {
return
}
blink.root.print(output)
}
func newTree(ary, numRoutines uint64) *blink {
return &blink{ary: ary, numRoutines: numRoutines}
} | vendor/src/github.com/Workiva/go-datastructures/btree/_link/tree.go | 0.626238 | 0.485844 | tree.go | starcoder |
// More information about Google Distance Matrix API is available on
// https://developers.google.com/maps/documentation/distancematrix/
package maps
import (
"encoding/json"
"net/url"
"github.com/gronka/google-maps-services-go/internal"
//"googlemaps.github.io/maps/internal"
)
// safeLeg is a raw version of Leg that does not have custom encoding or
// decoding methods applied.
type safeLeg Leg
// encodedLeg is the actual encoded version of Leg as per the Maps APIs.
type encodedLeg struct {
safeLeg
EncDuration *internal.Duration `json:"duration"`
EncDurationInTraffic *internal.Duration `json:"duration_in_traffic"`
EncArrivalTime *internal.DateTime `json:"arrival_time"`
EncDepartureTime *internal.DateTime `json:"departure_time"`
}
// UnmarshalJSON implements json.Unmarshaler for Leg. This decodes the API
// representation into types useful for Go developers.
func (leg *Leg) UnmarshalJSON(data []byte) error {
x := encodedLeg{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*leg = Leg(x.safeLeg)
leg.Duration = x.EncDuration.Duration()
leg.DurationInTraffic = x.EncDurationInTraffic.Duration()
leg.ArrivalTime = x.EncArrivalTime.Time()
leg.DepartureTime = x.EncDepartureTime.Time()
return nil
}
// MarshalJSON implements json.Marshaler for Leg. This encodes Go types back to
// the API representation.
func (leg *Leg) MarshalJSON() ([]byte, error) {
x := encodedLeg{}
x.safeLeg = safeLeg(*leg)
x.EncDuration = internal.NewDuration(leg.Duration)
x.EncDurationInTraffic = internal.NewDuration(leg.DurationInTraffic)
x.EncArrivalTime = internal.NewDateTime(leg.ArrivalTime)
x.EncDepartureTime = internal.NewDateTime(leg.DepartureTime)
return json.Marshal(x)
}
// safeStep is a raw version of Step that does not have custom encoding or
// decoding methods applied.
type safeStep Step
// encodedStep is the actual encoded version of Step as per the Maps APIs.
type encodedStep struct {
safeStep
EncDuration *internal.Duration `json:"duration"`
}
// UnmarshalJSON implements json.Unmarshaler for Step. This decodes the API
// representation into types useful for Go developers.
func (step *Step) UnmarshalJSON(data []byte) error {
x := encodedStep{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*step = Step(x.safeStep)
step.Duration = x.EncDuration.Duration()
return nil
}
// MarshalJSON implements json.Marshaler for Step. This encodes Go types back to
// the API representation.
func (step *Step) MarshalJSON() ([]byte, error) {
x := encodedStep{}
x.safeStep = safeStep(*step)
x.EncDuration = internal.NewDuration(step.Duration)
return json.Marshal(x)
}
// safeTransitDetails is a raw version of TransitDetails that does not have
// custom encoding or decoding methods applied.
type safeTransitDetails TransitDetails
// encodedTransitDetails is the actual encoded version of TransitDetails as per
// the Maps APIs
type encodedTransitDetails struct {
safeTransitDetails
EncArrivalTime *internal.DateTime `json:"arrival_time"`
EncDepartureTime *internal.DateTime `json:"departure_time"`
}
// UnmarshalJSON implements json.Unmarshaler for TransitDetails. This decodes
// the API representation into types useful for Go developers.
func (transitDetails *TransitDetails) UnmarshalJSON(data []byte) error {
x := encodedTransitDetails{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*transitDetails = TransitDetails(x.safeTransitDetails)
transitDetails.ArrivalTime = x.EncArrivalTime.Time()
transitDetails.DepartureTime = x.EncDepartureTime.Time()
return nil
}
// MarshalJSON implements json.Marshaler for TransitDetails. This encodes Go
// types back to the API representation.
func (transitDetails *TransitDetails) MarshalJSON() ([]byte, error) {
x := encodedTransitDetails{}
x.safeTransitDetails = safeTransitDetails(*transitDetails)
x.EncArrivalTime = internal.NewDateTime(transitDetails.ArrivalTime)
x.EncDepartureTime = internal.NewDateTime(transitDetails.DepartureTime)
return json.Marshal(x)
}
// safeTransitLine is the raw version of TransitLine that does not have custom
// encoding or decoding methods applied.
type safeTransitLine TransitLine
// encodedTransitLine is the actual encoded version of TransitLine as per the
// Maps APIs
type encodedTransitLine struct {
safeTransitLine
EncURL string `json:"url"`
EncIcon string `json:"icon"`
}
// UnmarshalJSON imlpements json.Unmarshaler for TransitLine. This decodes the
// API representation into types useful for Go developers.
func (transitLine *TransitLine) UnmarshalJSON(data []byte) error {
x := encodedTransitLine{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*transitLine = TransitLine(x.safeTransitLine)
transitLine.URL, err = url.Parse(x.EncURL)
if err != nil {
return err
}
transitLine.Icon, err = url.Parse(x.EncIcon)
if err != nil {
return err
}
return nil
}
// MarshalJSON implements json.Marshaler for TransitLine. This encodes Go
// types back to the API representation.
func (transitLine *TransitLine) MarshalJSON() ([]byte, error) {
x := encodedTransitLine{}
x.safeTransitLine = safeTransitLine(*transitLine)
x.EncURL = transitLine.URL.String()
x.EncIcon = transitLine.Icon.String()
return json.Marshal(x)
}
// safeTransitAgency is the raw version of TransitAgency that does not have
// custom encoding or decoding methods applied.
type safeTransitAgency TransitAgency
// encodedTransitAgency is the actual encoded version of TransitAgency as per the
// Maps APIs
type encodedTransitAgency struct {
safeTransitAgency
EncURL string `json:"url"`
}
// UnmarshalJSON imlpements json.Unmarshaler for TransitAgency. This decodes the
// API representation into types useful for Go developers.
func (transitAgency *TransitAgency) UnmarshalJSON(data []byte) error {
x := encodedTransitAgency{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*transitAgency = TransitAgency(x.safeTransitAgency)
transitAgency.URL, err = url.Parse(x.EncURL)
if err != nil {
return err
}
return nil
}
// MarshalJSON implements json.Marshaler for TransitAgency. This encodes Go
// types back to the API representation.
func (transitAgency *TransitAgency) MarshalJSON() ([]byte, error) {
x := encodedTransitAgency{}
x.safeTransitAgency = safeTransitAgency(*transitAgency)
x.EncURL = transitAgency.URL.String()
return json.Marshal(x)
}
// safeTransitLineVehicle is the raw version of TransitLineVehicle that does not
// have custom encoding or decoding methods applied.
type safeTransitLineVehicle TransitLineVehicle
// encodedTransitLineVehicle is the actual encoded version of TransitLineVehicle
// as per the Maps APIs
type encodedTransitLineVehicle struct {
safeTransitLineVehicle
EncIcon string `json:"icon"`
}
// UnmarshalJSON imlpements json.Unmarshaler for TransitLineVehicle. This
// decodes the API representation into types useful for Go developers.
func (transitLineVehicle *TransitLineVehicle) UnmarshalJSON(data []byte) error {
x := encodedTransitLineVehicle{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*transitLineVehicle = TransitLineVehicle(x.safeTransitLineVehicle)
transitLineVehicle.Icon, err = url.Parse(x.EncIcon)
if err != nil {
return err
}
return nil
}
// MarshalJSON implements json.Marshaler for TransitLineVehicle. This encodes
// Go types back to the API representation.
func (transitLineVehicle *TransitLineVehicle) MarshalJSON() ([]byte, error) {
x := encodedTransitLineVehicle{}
x.safeTransitLineVehicle = safeTransitLineVehicle(*transitLineVehicle)
x.EncIcon = transitLineVehicle.Icon.String()
return json.Marshal(x)
}
// safeDistanceMatrixElement is a raw version of DistanceMatrixElement that
// does not have custom encoding or decoding methods applied.
type safeDistanceMatrixElement DistanceMatrixElement
// encodedDistanceMatrixElement is the actual encoded version of
// DistanceMatrixElement as per the Maps APIs.
type encodedDistanceMatrixElement struct {
safeDistanceMatrixElement
EncDuration *internal.Duration `json:"duration"`
EncDurationInTraffic *internal.Duration `json:"duration_in_traffic"`
}
// UnmarshalJSON implements json.Unmarshaler for DistanceMatrixElement. This
// decodes the API representation into types useful for Go developers.
func (dme *DistanceMatrixElement) UnmarshalJSON(data []byte) error {
x := encodedDistanceMatrixElement{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*dme = DistanceMatrixElement(x.safeDistanceMatrixElement)
dme.Duration = x.EncDuration.Duration()
dme.DurationInTraffic = x.EncDurationInTraffic.Duration()
return nil
}
// MarshalJSON implements json.Marshaler for DistanceMatrixElement. This encodes
// Go types back to the API representation.
func (dme *DistanceMatrixElement) MarshalJSON() ([]byte, error) {
x := encodedDistanceMatrixElement{}
x.safeDistanceMatrixElement = safeDistanceMatrixElement(*dme)
x.EncDuration = internal.NewDuration(dme.Duration)
return json.Marshal(x)
}
// safeSnappedPoint is a raw version of SnappedPoint that does not have custom
// encoding or decoding methods applied.
type safeSnappedPoint SnappedPoint
// encodedSnappedPoint is the actual encoded version of SnappedPoint as per the
// Roads API.
type encodedSnappedPoint struct {
safeSnappedPoint
EncLocation internal.Location `json:"location"`
}
// UnmarshalJSON implements json.Unmarshaler for SnappedPoint. This decode the
// API representation into types useful for Go developers.
func (sp *SnappedPoint) UnmarshalJSON(data []byte) error {
x := encodedSnappedPoint{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
*sp = SnappedPoint(x.safeSnappedPoint)
sp.Location.Lat = x.EncLocation.Latitude
sp.Location.Lng = x.EncLocation.Longitude
return nil
}
// MarshalJSON implements json.Marshaler for SnappedPoint. This encodes Go
// types back to the API representation.
func (sp *SnappedPoint) MarshalJSON() ([]byte, error) {
x := encodedSnappedPoint{}
x.safeSnappedPoint = safeSnappedPoint(*sp)
x.EncLocation.Latitude = sp.Location.Lat
x.EncLocation.Longitude = sp.Location.Lng
return json.Marshal(x)
} | encoding.go | 0.856468 | 0.402157 | encoding.go | starcoder |
package loom
import (
"container/list"
"fmt"
"github.com/kpmy/lomo/ir"
"github.com/kpmy/lomo/ir/types"
"github.com/kpmy/trigo"
"github.com/kpmy/ypk/assert"
"github.com/kpmy/ypk/halt"
"math/big"
"reflect"
)
type value struct {
typ types.Type
val interface{}
}
func (v *value) String() string {
return fmt.Sprint(v.val)
}
func (v *value) toAtom() (ret Atom) {
assert.For(v.typ == types.ATOM, 20)
switch x := v.val.(type) {
case Atom:
ret = x
case nil: //do nothing
default:
halt.As(100, "wrong atom ", reflect.TypeOf(x))
}
return
}
func (v *value) toStr() (ret string) {
assert.For(v.typ == types.STRING, 20)
switch x := v.val.(type) {
case string:
ret = x
default:
halt.As(100, "wrong string ", reflect.TypeOf(x))
}
return
}
func (v *value) toInt() (ret *big.Int) {
assert.For(v.typ == types.INTEGER, 20)
switch x := v.val.(type) {
case int:
ret = big.NewInt(int64(x))
case *Int:
ret = big.NewInt(0)
ret.Add(ret, &x.Int)
default:
halt.As(100, "wrong integer ", reflect.TypeOf(x))
}
return
}
func (v *value) toBool() (ret bool) {
assert.For(v.typ == types.BOOLEAN, 20)
switch x := v.val.(type) {
case bool:
ret = x
default:
halt.As(100, "wrong boolean ", reflect.TypeOf(x))
}
return
}
func (v *value) toTril() (ret tri.Trit) {
assert.For(v.typ == types.TRILEAN || v.typ == types.BOOLEAN, 20, v.typ)
switch x := v.val.(type) {
case tri.Trit:
ret = x
case bool:
ret = tri.This(x)
default:
halt.As(100, "wrong trilean ", reflect.TypeOf(x))
}
return
}
func (v *value) toReal() (ret *big.Rat) {
assert.For(v.typ == types.REAL, 20)
switch x := v.val.(type) {
case *Rat:
ret = big.NewRat(0, 1)
ret.Add(ret, &x.Rat)
default:
halt.As(100, "wrong real ", reflect.TypeOf(x))
}
return
}
func (v *value) toCmp() (ret *Cmp) {
assert.For(v.typ == types.COMPLEX, 20)
switch x := v.val.(type) {
case *Cmp:
ret = ThisCmp(x)
default:
halt.As(100, "wrong complex ", reflect.TypeOf(x))
}
return
}
func (v *value) toRune() (ret rune) {
assert.For(v.typ == types.CHAR, 20, v.typ)
switch x := v.val.(type) {
case rune:
ret = x
default:
halt.As(100, "wrong rune ", reflect.TypeOf(x))
}
return
}
func (v *value) toAny() (ret *Any) {
assert.For(v.typ == types.ANY, 20)
switch x := v.val.(type) {
case *Any:
ret = ThisAny(&value{typ: x.typ, val: x.x})
default:
halt.As(100, "wrong any ", reflect.TypeOf(x))
}
return
}
func (v *value) toSet() (ret *Set) {
assert.For(v.typ == types.SET, 20)
switch x := v.val.(type) {
case *Set:
ret = ThisSet(x)
default:
halt.As(100, "wrong list ", reflect.TypeOf(x))
}
return
}
func (v *value) toList() (ret *List) {
assert.For(v.typ == types.LIST, 20)
switch x := v.val.(type) {
case *List:
ret = ThisList(x)
default:
halt.As(100, "wrong list ", reflect.TypeOf(x))
}
return
}
func (v *value) toMap() (ret *Map) {
assert.For(v.typ == types.MAP, 20)
switch x := v.val.(type) {
case *Map:
ret = ThisMap(x)
default:
halt.As(100, "wrong list ", reflect.TypeOf(x))
}
return
}
func (v *value) asList() (ret *List) {
assert.For(v.typ == types.LIST, 20)
switch x := v.val.(type) {
case *List:
ret = x
default:
halt.As(100, "wrong list ", reflect.TypeOf(x))
}
return
}
func (v *value) asMap() (ret *Map) {
assert.For(v.typ == types.MAP, 20)
switch x := v.val.(type) {
case *Map:
ret = x
default:
halt.As(100, "wrong list ", reflect.TypeOf(x))
}
return
}
func (v *value) toRef() (ret *Ref) {
assert.For(v.typ == types.UNIT, 20)
switch x := v.val.(type) {
case *Ref:
ret = ThisRef(x)
default:
halt.As(100, "wrong list ", reflect.TypeOf(x))
}
return
}
func cval(e *ir.ConstExpr) (ret *value) {
t := e.Type
switch t {
case types.INTEGER:
b := big.NewInt(0)
if err := b.UnmarshalText([]byte(e.Value.(string))); err == nil {
v := ThisInt(b)
ret = &value{typ: t, val: v}
} else {
halt.As(100, "wrong integer")
}
case types.REAL:
r := big.NewRat(0, 1)
if err := r.UnmarshalText([]byte(e.Value.(string))); err == nil {
v := ThisRat(r)
ret = &value{typ: t, val: v}
} else {
halt.As(100, "wrong real")
}
case types.BOOLEAN:
ret = &value{typ: t, val: e.Value.(bool)}
case types.TRILEAN:
ret = &value{typ: t, val: tri.NIL}
case types.CHAR:
var v rune
switch x := e.Value.(type) {
case int32:
v = rune(x)
case int:
v = rune(x)
default:
halt.As(100, "unsupported rune coding")
}
ret = &value{typ: t, val: v}
case types.STRING:
v := e.Value.(string)
ret = &value{typ: t, val: v}
case types.ANY:
ret = &value{typ: t, val: &Any{}}
default:
halt.As(100, "unknown type ", t, " for ", e)
}
return
}
type exprStack struct {
vl *list.List
}
func (s *exprStack) init() {
s.vl = list.New()
}
func (s *exprStack) push(v *value) {
assert.For(v != nil, 20)
_, fake := v.val.(*value)
assert.For(!fake, 21)
s.vl.PushFront(v)
}
func (s *exprStack) pop() (ret *value) {
if s.vl.Len() > 0 {
el := s.vl.Front()
ret = s.vl.Remove(el).(*value)
} else {
halt.As(100, "pop on empty stack")
}
return
} | loom/val.go | 0.514888 | 0.454291 | val.go | starcoder |
package collector
import (
"go.etcd.io/etcd/raft/raftpb"
)
type BriefSegment struct {
Term uint64
PrevLogTerm uint64
FirstIndex uint64
LastIndex uint64
}
func (b *BriefSegment) Hit(term, index uint64) bool {
return term == b.Term && b.FirstIndex <= index && index <= b.LastIndex
}
func (b *BriefSegment) HitPrev(term, index uint64) bool {
return term == b.PrevLogTerm && b.FirstIndex-1 == index
}
func ExtractBriefFromEntries(prevLogTerm uint64, ent []raftpb.Entry) []*BriefSegment {
var res []*BriefSegment
var brief *BriefSegment = nil
for _, entry := range ent {
if brief != nil {
if brief.Term == entry.Term {
brief.LastIndex = entry.Index
continue
}
res = append(res, brief)
brief = &BriefSegment{entry.Term, brief.Term, entry.Index, entry.Index}
} else {
brief = &BriefSegment{entry.Term, prevLogTerm, entry.Index, entry.Index}
}
}
if brief != nil {
res = append(res, brief)
}
return res
}
type BriefSegmentCollector interface {
//AddEntriesToBrief collects entries and resolve potential conflict. This might cause
// the modification of internal structure. Return false if failed to attach new entries.
AddEntriesToBrief(entries []raftpb.Entry, logTerm uint64, logIndex uint64) bool
ResizeBriefToIndex(index uint64) (bool, Location)
Briefing() []*BriefSegment
Locator
Refresher
}
//MimicRaftKernelBriefCollector is an implementation of BriefSegmentCollector.
type MimicRaftKernelBriefCollector struct {
b []*BriefSegment
next *BriefSegment
logTerm uint64
logIndex uint64
initialized bool
}
func NewMimicRaftKernelBriefCollector() *MimicRaftKernelBriefCollector {
return &MimicRaftKernelBriefCollector{initialized: false}
}
func NewInitializedMimicRaftKernelBriefCollector(logTerm, logIndex uint64) *MimicRaftKernelBriefCollector {
return &MimicRaftKernelBriefCollector{
logTerm: logTerm,
logIndex: logIndex,
initialized: true,
}
}
//CloneMimicRaftKernelBriefCollector offers a deep copy of MimicRaftKernelBriefCollector
func CloneMimicRaftKernelBriefCollector(rkb *MimicRaftKernelBriefCollector) *MimicRaftKernelBriefCollector {
if rkb == nil || !rkb.initialized {
panic("illegal argument")
}
res := NewInitializedMimicRaftKernelBriefCollector(rkb.logTerm, rkb.logIndex)
res.b = append([]*BriefSegment{}, rkb.b...)
if len(res.b) != 0 {
res.next = res.b[len(res.b)-1]
}
return res
}
func (c *MimicRaftKernelBriefCollector) AddEntriesToBrief(entries []raftpb.Entry, logTerm uint64, logIndex uint64) bool {
if c.IsRefreshed() {
c.init(entries, logTerm, logIndex)
return true
}
if c.IsEmpty() {
if c.logTerm == logTerm && c.logIndex == logIndex {
c.init(entries, logTerm, logIndex)
return true
}
return false
}
// tightly behind the last brief, short-cut
if c.next.LastIndex == logIndex && c.next.Term == logTerm {
nbs := ExtractBriefFromEntries(logTerm, entries)
if len(nbs) != 0 {
if nbs[0].Term == c.next.Term {
c.next.LastIndex = nbs[0].LastIndex
if len(nbs) > 1 {
c.b = append(c.b, nbs[1:]...)
c.next = c.b[len(c.b)-1]
}
} else {
c.b = append(c.b, nbs...)
c.next = c.b[len(c.b)-1]
}
}
return true
}
ok, _ := c.mimic(entries, logTerm, logIndex)
return ok
}
func (c *MimicRaftKernelBriefCollector) ResizeBriefToIndex(index uint64) (bool, Location) {
if c.IsRefreshed() {
panic("cannot resize a non-initialized collector")
}
if c.IsEmpty() {
switch {
case index < c.logIndex:
return false, UNDERFLOW
case index == c.logIndex:
return true, PREV
default:
return false, OVERFLOW
}
}
first, last := c.b[0].FirstIndex, c.b[len(c.b)-1].LastIndex
switch {
case index < first-1:
return false, UNDERFLOW
case index == first-1:
c.b = nil
c.next = nil
return true, PREV
case index > last:
return false, OVERFLOW
default:
idx := c.locateIndex(index, 0, len(c.b))
c.b = c.b[:idx+1]
c.next = c.b[idx]
c.next.LastIndex = index
return true, WITHIN
}
}
func (c *MimicRaftKernelBriefCollector) Briefing() []*BriefSegment {
return c.b
}
func (c *MimicRaftKernelBriefCollector) MatchIndex(index, term uint64) Location {
if c.IsRefreshed() {
panic("not initialized")
}
if c.IsEmpty() {
switch {
case index < c.logIndex:
return UNDERFLOW
case index == c.logIndex:
if term == c.logTerm {
return PREV
} else {
return CONFLICT
}
default:
return OVERFLOW
}
}
l, _ := c.matchIndex(index, term)
return l
}
func (c *MimicRaftKernelBriefCollector) LocateIndex(index uint64) (Location, uint64) {
if c.IsRefreshed() {
panic("not initialized")
}
if c.IsEmpty() {
switch {
case index < c.logIndex:
return UNDERFLOW, 0
case index == c.logIndex:
return PREV, c.logTerm
default:
return OVERFLOW, 0
}
}
first, last := c.b[0].FirstIndex, c.b[len(c.b)-1].LastIndex
switch {
case index < first-1:
return UNDERFLOW, 0
case index == first-1:
return PREV, c.b[0].PrevLogTerm
case index > last:
return OVERFLOW, 0
default:
idx := c.locateIndex(index, 0, len(c.b))
return WITHIN, c.b[idx].Term
}
}
func (c *MimicRaftKernelBriefCollector) PrevLogTerm() uint64 {
if c.IsRefreshed() {
panic("not initialized")
}
return c.logTerm
}
func (c *MimicRaftKernelBriefCollector) FirstIndex() uint64 {
if c.IsRefreshed() || c.IsEmpty() {
panic("illegal operation")
}
return c.logIndex + 1
}
func (c *MimicRaftKernelBriefCollector) LastIndex() uint64 {
if c.IsRefreshed() || c.IsEmpty() {
panic("illegal operation")
}
return c.next.LastIndex
}
func (c *MimicRaftKernelBriefCollector) IsEmpty() bool {
return c.next == nil || len(c.b) == 0
}
func (c *MimicRaftKernelBriefCollector) IsRefreshed() bool {
return !c.initialized
}
func (c *MimicRaftKernelBriefCollector) Refresh() {
c.b = nil
c.next = nil
c.initialized = false
}
func (c *MimicRaftKernelBriefCollector) init(ent []raftpb.Entry, logTerm, logIndex uint64) {
c.initialized = true
c.logTerm = logTerm
c.logIndex = logIndex
c.b = ExtractBriefFromEntries(logTerm, ent)
if len(c.b) != 0 {
c.next = c.b[len(c.b)-1]
} else {
c.next = nil
}
}
//mimic compresses entry fragment into BriefSegment, then add the brief like a raft kernel.
// To be mentioned, it is NOT safe to call this function when the collector is empty!
func (c *MimicRaftKernelBriefCollector) mimic(entries []raftpb.Entry, logTerm, logIndex uint64) (bool, Location) {
// omit checking committed
// check the existence of <logIndex, logTerm>
loc, _ := c.matchIndex(logIndex, logTerm)
if loc != PREV && loc != WITHIN {
return false, loc
}
// find conflicts in entries
eLen := len(entries)
cLen := len(c.b)
if eLen != 0 {
if l, cIdx := c.matchIndex(entries[0].Index, entries[0].Term); l == WITHIN {
brief := c.b[cIdx]
eIdx := 0
for eIdx < eLen && cIdx < cLen {
ent := entries[eIdx]
if !brief.Hit(ent.Term, ent.Index) {
cIdx++
if cIdx == cLen {
break
}
brief = c.b[cIdx]
if !brief.Hit(ent.Term, ent.Index) {
break
}
}
eIdx++
}
// append conflict entries
if eIdx != eLen {
entries = entries[eIdx:]
var prevLogTerm uint64
if eIdx == 0 {
prevLogTerm = logTerm
} else {
prevLogTerm = entries[eIdx-1].Term
}
after := entries[0].Index
if c.next.LastIndex+1 == after {
// direct append
c.absorbBriefs(ExtractBriefFromEntries(prevLogTerm, entries))
} else {
// truncate then append
c.b = c.b[:cIdx+1]
c.next = brief
brief.LastIndex = after - 1
c.absorbBriefs(ExtractBriefFromEntries(prevLogTerm, entries))
}
}
}
}
return true, loc
}
func (c *MimicRaftKernelBriefCollector) locateTerm(term uint64, from, to int) int {
start := from
end := to
for start < end {
mid := (start + end) / 2
t := c.b[mid].Term
if t < term {
start = mid + 1
} else if term < t {
end = mid
} else {
return mid
}
}
return start
}
func (c *MimicRaftKernelBriefCollector) locateIndex(index uint64, from, to int) int {
start := from
end := to
for start < end {
mid := (start + end) / 2
first, last := c.b[mid].FirstIndex, c.b[mid].LastIndex
if last < index {
start = mid + 1
} else if index < first {
end = mid
} else {
return mid
}
}
return start
}
func (c *MimicRaftKernelBriefCollector) absorbBriefs(b []*BriefSegment) {
// do not accept nil input
if len(b) == 0 {
return
}
succ := b[0]
switch {
case succ.Term == c.next.Term:
c.next.LastIndex = succ.LastIndex
c.b = append(c.b, b[1:]...)
c.next = b[len(b)-1]
case succ.PrevLogTerm == c.next.Term:
c.b = append(c.b, b...)
c.next = b[len(b)-1]
}
}
func (c *MimicRaftKernelBriefCollector) matchIndex(index, term uint64) (Location, int) {
first, last := c.b[0].FirstIndex, c.b[len(c.b)-1].LastIndex
switch {
case index < first-1:
return UNDERFLOW, -1
case index == first-1:
if c.b[0].HitPrev(term, index) {
return PREV, -1
} else {
return CONFLICT, -1
}
case index > last:
return OVERFLOW, -1
default:
idx := c.locateTerm(term, 0, len(c.b))
if 0 <= idx && idx < len(c.b) && c.b[idx].Hit(term, index) {
return WITHIN, idx
} else {
return CONFLICT, -1
}
}
} | draft/collector/brief.go | 0.628977 | 0.446072 | brief.go | starcoder |
package isc
func Associate[T any, K comparable, V any](list []T, transform func(T) Pair[K, V]) map[K]V {
r := make(map[K]V)
for _, e := range list {
item := transform(e)
r[item.First] = item.Second
}
return r
}
func AssociateTo[T any, K comparable, V any](list []T, destination *map[K]V, transform func(T) Pair[K, V]) map[K]V {
for _, e := range list {
item := transform(e)
(*destination)[item.First] = item.Second
}
return *destination
}
//AssociateBy Returns a Map containing the elements from the given collection indexed by the key returned from keySelector function applied to each element.
//If any two elements would have the same key returned by keySelector the last one gets added to the map.
//The returned map preserves the entry iteration order of the original collection.
func AssociateBy[T any, K comparable](list []T, keySelector func(T) K) map[K]T {
r := make(map[K]T)
for _, e := range list {
r[keySelector(e)] = e
}
return r
}
//AssociateByAndValue Returns a Map containing the values provided by valueTransform and indexed by keySelector functions applied to elements of the given collection.
//If any two elements would have the same key returned by keySelector the last one gets added to the map.
//The returned map preserves the entry iteration order of the original collection.
func AssociateByAndValue[T, V any, K comparable](list []T, keySelector func(T) K, valueTransform func(T) V) map[K]V {
r := make(map[K]V)
for _, e := range list {
r[keySelector(e)] = valueTransform(e)
}
return r
}
// AssociateByTo Populates and returns the destination mutable map with key-value pairs, where key is provided by the keySelector function applied to each element of the given collection and value is the element itself.
//If any two elements would have the same key returned by keySelector the last one gets added to the map
func AssociateByTo[T, K comparable](list []T, destination *map[K]T, keySelector func(T) K) map[K]T {
for _, e := range list {
(*destination)[keySelector(e)] = e
}
return *destination
}
// AssociateByAndValueTo Populates and returns the destination mutable map with key-value pairs, where key is provided by the keySelector function applied to each element of the given collection and value is the element itself.
//If any two elements would have the same key returned by keySelector the last one gets added to the map
func AssociateByAndValueTo[T, V any, K comparable](list []T, destination *map[K]V, keySelector func(T) K, valueTransform func(T) V) map[K]V {
for _, e := range list {
(*destination)[keySelector(e)] = valueTransform(e)
}
return *destination
}
//AssociateWith Returns a Map where keys are elements from the given collection and values are produced by the valueSelector function applied to each element.
//If any two elements are equal, the last one gets added to the map.
//The returned map preserves the entry iteration order of the original collection.
func AssociateWith[T comparable, V any](list []T, valueSelector func(T) V) map[T]V {
destination := make(map[T]V)
for _, e := range list {
destination[e] = valueSelector(e)
}
return destination
}
//AssociateWithTo Populates and returns the destination mutable map with key-value pairs for each element of the given collection, where key is the element itself and value is provided by the valueSelector function applied to that key.
//If any two elements are equal, the last one overwrites the former value in the map.
func AssociateWithTo[T comparable, V any](list []T, destination *map[T]V, valueSelector func(T) V) map[T]V {
for _, e := range list {
(*destination)[e] = valueSelector(e)
}
return *destination
} | isc/associate.go | 0.82251 | 0.487124 | associate.go | starcoder |
package grouped
// BoolFuncs helps starting and waiting for a group of bool-returning functions.
type BoolFuncs struct {
// Sets a recovery callback for any functions added after this is set.
// If a function has a recovery set, and the function panics, the recovery will be called with
// the panic value. In the aggregation the function is assumed to have returned false.
Recover func(interface{})
funcs []boolFuncParams
last *spawnedBoolFuncs
}
// Add some callbacks to be executed as part of the group.
// The callbacks will not be started until one of FirstDone/FirstOK/FirstNot/AllDone are called.
func (s *BoolFuncs) Add(fn ...func() bool) {
for _, f := range fn {
s.funcs = append(s.funcs, boolFuncParams{fn: f, rec: s.Recover})
}
}
// Launch all previously added functions, and return a channel that signals when any function completes.
// The returned pointer should only be used after the signal has been received. It points to the result
// of the function that completed first.
func (s *BoolFuncs) FirstDone() (<-chan struct{}, *bool) {
spawned := s.start()
return spawned.firstDone, &spawned.firstResult
}
// Launch all previously added functions, and return a channel that signals when any function completes ok.
// Note that the channel will never signal if none of the functions complete ok.
func (s *BoolFuncs) FirstOK() <-chan struct{} {
spawned := s.start()
return spawned.anyOK
}
// Launch all previously added functions, and return a channel that signals when any function completes not ok.
// Note that the channel will never signal if all of the functions complete ok.
func (s *BoolFuncs) FirstNot() <-chan struct{} {
spawned := s.start()
return spawned.anyNot
}
// Launch all previously added functions, and return a channel that signals when they all complete.
// The returned pointer should only be used after the signal has been received. It points to the aggregated
// results of all the functions.
func (s *BoolFuncs) AllDone() (<-chan struct{}, *struct{ anyOK, anyNot bool }) {
spawned := s.start()
return spawned.allDone, &spawned.aggResult
}
func (s *BoolFuncs) start() *spawnedBoolFuncs {
if len(s.funcs) == 0 {
if s.last != nil {
return s.last
}
done := make(chan struct{})
close(done)
return &spawnedBoolFuncs{
allDone: done,
}
}
spawned := &spawnedBoolFuncs{
prev: s.last,
funcs: s.funcs,
firstDone: make(chan struct{}),
anyOK: make(chan struct{}),
anyNot: make(chan struct{}),
allDone: make(chan struct{}),
funcResults: make(chan bool, len(s.funcs)),
}
s.funcs = nil
s.last = spawned
go spawned.run()
for i := range spawned.funcs {
go func(params boolFuncParams) {
var ok, returned bool
{
if params.rec != nil {
defer func() {
v := recover()
if !returned {
params.rec(v)
}
}()
}
ok = params.fn()
returned = true
}
spawned.funcResults <- ok
}(spawned.funcs[i])
}
return spawned
}
type boolFuncParams struct {
fn func() bool
rec func(interface{})
}
type spawnedBoolFuncs struct {
prev *spawnedBoolFuncs
funcs []boolFuncParams
funcResults chan bool
firstDone, anyOK, anyNot, allDone chan struct{}
firstResult bool
aggResult struct{ anyOK, anyNot bool }
}
func (s *spawnedBoolFuncs) run() {
var prevFirstDone, prevAnyOK, prevAnyNot, prevAllDone chan struct{}
if s.prev != nil {
prevFirstDone, prevAnyOK, prevAnyNot, prevAllDone = s.prev.firstDone, s.prev.anyOK, s.prev.anyNot, s.prev.allDone
}
var seen struct{ first, anyOK, anyNot bool }
onFirst := func(val bool) {
seen.first = true
prevFirstDone = nil
s.firstResult = val
close(s.firstDone)
}
onAnyOK := func() {
if seen.anyOK {
return
}
prevAnyOK = nil
seen.anyOK = true
close(s.anyOK)
}
onAnyNot := func() {
if seen.anyNot {
return
}
prevAnyNot = nil
seen.anyNot = true
close(s.anyNot)
}
received := 0
for received < len(s.funcs) || prevAllDone != nil {
select {
case res := <-s.funcResults:
received++
if !seen.first {
onFirst(res)
}
if res {
onAnyOK()
}
if !res {
onAnyNot()
}
case <-prevFirstDone:
onFirst(s.prev.firstResult)
if s.firstResult {
onAnyOK()
} else {
onAnyNot()
}
case <-prevAnyOK:
onAnyOK()
case <-prevAnyNot:
onAnyNot()
case <-prevAllDone:
prevAllDone = nil
if !seen.first {
onFirst(s.prev.firstResult)
}
if s.prev.aggResult.anyOK {
onAnyOK()
}
if s.prev.aggResult.anyNot {
onAnyNot()
}
}
}
close(s.allDone)
} | boolfuncs.go | 0.614278 | 0.557123 | boolfuncs.go | starcoder |
package datatypes
// ModelInfo enumerates all of the different options for customizing
// the character model
type ModelInfo struct {
Race byte // 2 Elezen, 3 Lalafell, 4 Miqo'te, 5 Roe, 6 Au Ra, else Hyur
Gender byte // 0 is male, 1 is female
BodyType byte // CHANGE AT OWN RISK
Height byte // Scale from 0-100
Tribe byte
Face byte
Hairstyle byte
HairHighlight byte // 8th bit toggles highlight
SkinTone byte
OddEyeColor byte
HairColor byte
HairHighlightColor byte
FacialFeatures byte // Race specific toggles. i.e. 6th bit toggles right limbal ring. 7th bit toggles left limbal ring.
FacialFeaturesColor byte
Eyebrows byte
EyeColor byte
EyeShape byte
Nose byte
Jaw byte
Mouth byte // Bitfield toggles light/dark/none lip color
LipColor byte
TailLength byte // Scale from 1-100
TailType byte
BustSize byte // Scale from 1-100
FacePaintType byte
FacePaintColor byte
}
// ChocoboInfo enumerates the fields for chocobo information
type ChocoboInfo struct {
Head byte
Body byte
Feet byte
Color byte
}
// PlayerSpawn defines the data array for a new entity block
type PlayerSpawn struct {
Title uint16
U1b uint16
CurrentWorld uint16
HomeWorld uint16
GMRank byte
U3c, U4 byte
OnlineStatus byte
Pose byte
U5a, U5b, U5c byte
TargetID uint64
U6, U7 uint32
WeaponMain WeaponGear
WeaponSub WeaponGear
CraftSub WeaponGear
U14, U15 uint32
BNPCBase, BNPCName uint32
U18, U19, DirectorID uint32
OwnerID uint32
UnkID3 uint32
MaxHP, CurrentHP uint32
DisplayFlags uint32
FateID uint16
CurrentMP uint16
MaxMP uint16
U21a uint16
ModelChara uint16
Direction uint16 // Quantized direction 0x0000 ~ 0xFFFF, NWSE <=> 0,0x4000,0x8000,0xC000
MountID uint16
Minion uint16
Index byte
State byte // 0-1 for alive, 2 for dead, 3 for persistent emote
Emote byte // Applies for when State is 3
Type byte // 1 for player, 2 for NPC, else furniture
Subtype byte // 4 for players, 2 pet, 3 companion, 5 mob, 7 minion
Voice byte
U25c uint16
EnemyType byte // 0 for friendly, anything else is an enemy
Level byte
ClassJob byte
U26d byte
U27a uint16
ChocoboInfo ChocoboInfo
StatusLoopVFX byte
U28c uint16
U29 uint32
U29b uint16
U29c byte
Statuses [30]StatusEffect
X float32
Y float32
Z float32
Head Gear
Body Gear
Hand Gear
Leg Gear
Foot Gear
Ear Gear
Neck Gear
Wrist Gear
Ring1 Gear
Ring2 Gear
Name EntityName
Model ModelInfo
FCTag FCTag
U30 uint64
}
func (PlayerSpawn) IsBlockData() {}
// NPCSpawn defines the data array for a new entity block
// Notes:
// GMRank from PlayerSpawn corresponds to MobAggression in
// this struct
type NPCSpawn struct {
PlayerSpawn
U31 uint64
}
func (NPCSpawn) IsBlockData() {}
// NPCSpawn2 defines the data array for a new entity block
// Notes:
// GMRank from PlayerSpawn corresponds to MobAggression in
// this struct
// This packet type is encountered in the wild when spawning Alliance Raid
// bosses
type NPCSpawn2 struct {
PlayerSpawn
U31 [47]uint64
}
func (NPCSpawn2) IsBlockData() {} | datatypes/spawn.go | 0.541166 | 0.50653 | spawn.go | starcoder |
package money
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
// Amount represents a quantity of money with a currency.
type Amount struct {
// Quantity is the quantity of the amount.
Quantity float64
// Currency is the currency of the amount.
Currency string
}
// NewAmount creates a new amount with the given quantity and currency.
func NewAmount(q float64, currency string) *Amount {
return &Amount{q, currency}
}
// String returns the money as string in the format "[QUANTITY](.DECIMAL)(M|k)( CURRENCY)".
func (a Amount) String() string {
var n float64
var suffix string
if a.Quantity >= 1000000 {
n = a.Quantity / 1000000
suffix = "M"
} else if a.Quantity >= 1000 {
n = a.Quantity / 1000
suffix = "K"
} else {
n = a.Quantity
}
num := trimDecimal(n)
if a.Currency != "" {
return fmt.Sprintf("%s%s %s", num, suffix, a.Currency)
}
return fmt.Sprint(num, suffix)
}
func trimDecimal(n float64) string {
return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", n), "0"), ".")
}
// StringComma returns the money as string in the format "[QUANTITY](,DECIMAL)(M|k)( CURRENCY)".
func (a Amount) StringComma() string {
return strings.Replace(a.String(), ".", ",", -1)
}
// StringBefore returns the money as string in the format "[CURRENCY][QUANTITY](.DECIMAL)(M|k)".
func (a Amount) StringBefore() string {
parts := strings.Split(a.String(), " ")
if len(parts) > 1 {
return parts[1] + parts[0]
}
return parts[0]
}
var (
moneyRegex = regexp.MustCompile(`^([^0-9 ]*) *([0-9][0-9,\.]*) *(k|m{1,2})? *([^0-9]*)$`)
errInvalid = errors.New("invalid money format given")
errInvalidNumber = errors.New("invalid number format given")
)
// Parse returns an Amount of money with quantity and currency (if identified) and an error.
func Parse(text string) (*Amount, error) {
return parse(text, ".")
}
// ParseComma parses the amount of money in the text with "," instead of "." as decimal separator.
func ParseComma(text string) (*Amount, error) {
return parse(text, ",")
}
func parse(text string, decimalSep string) (*Amount, error) {
text = strings.TrimSpace(strings.ToLower(strings.Replace(text, sepInverse(decimalSep), "", -1)))
if !moneyRegex.MatchString(text) {
return nil, errInvalid
}
parts := moneyRegex.FindStringSubmatch(text)
if parts[1] != "" && parts[4] != "" {
return nil, errInvalid
}
if strings.Count(parts[2], decimalSep) > 1 {
return nil, errInvalidNumber
}
var quantity = parts[2]
if decimalSep == "," {
quantity = strings.Replace(quantity, ",", ".", -1)
}
q, err := strconv.ParseFloat(quantity, 64)
if err != nil {
return nil, err
}
switch parts[3] {
case "m", "mm":
q *= 1000000.0
case "k":
q *= 1000.0
}
var currency string
switch true {
case parts[1] != "":
currency = identifyCurrency(parts[1])
case parts[4] != "":
currency = identifyCurrency(parts[4])
}
return &Amount{
Quantity: q,
Currency: currency,
}, nil
}
func sepInverse(sep string) string {
if sep == "," {
return "."
}
return ","
}
var currencies = map[string][]string{
"Lek": {"all", "lek"},
"؋": {"؋", "afn"},
"$": {"$", "usd", "svc", "ars", "aud", "bsd", "bbd", "bmd", "bnd", "cad", "kyd", "clp", "cop", "xcd", "svc", "fjd", "gyd", "hkd", "lrd", "mxn", "nad", "nzd", "sgd", "sbd", "srd", "tvd"},
"ƒ": {"ƒ", "awg", "ang"},
"₼": {"₼", "azn"},
"p.": {"p.", "byr"},
"BZ$": {"bz$", "bzd"},
"$b": {"$b", "bob"},
"KM": {"km", "bam"},
"P": {"p", "bwp"},
"лв": {"лв", "bgn", "kzt", "uzs"},
"R$": {"r$", "brl"},
"៛": {"៛", "khr"},
"¥": {"¥", "dny", "jpy"},
"₡": {"₡", "crc"},
"kn": {"kn", "hrk"},
"₩": {"₩", "kpw", "krw"},
"₱": {"₱", "cup", "php"},
"Kč": {"Kč", "czk"},
"kr": {"kr", "dkk", "eek", "isk", "nok", "sek"},
"RD$": {"rd$", "dop"},
"£": {"£", "gbp", "egp", "fkp", "gip", "ggp", "imp", "jep", "lbp", "shp", "syp"},
"GEL": {"gel"},
"€": {"€", "eur"},
"¢": {"¢", "ghc"},
"Q": {"q", "gtq"},
"L": {"l", "hnl"},
"Ft": {"ft", "huf"},
"₹": {"₹", "inr"},
"Rp": {"rp", "idr"},
"﷼": {"﷼", "irr", "omr", "qar", "sar", "yer"},
"₪": {"₪", "ils"},
"J$": {"j$", "jmd"},
"₭": {"₭", "lak"},
"Ls": {"ls", "lvl"},
"Bs": {"bs", "vef"},
"Lt": {"lt", "ltl"},
"ден": {"ден", "mkd"},
"RM": {"rm", "myr"},
"Rs": {"rs", "mur", "npr", "pkr", "scr", "lkr"},
"₮": {"₮", "mnt"},
"MT": {"mt", "mzn"},
"C$": {"c$", "nio"},
"₦": {"₦", "ngn"},
"B/.": {"b/.", "pab"},
"S/.": {"s/.", "pen"},
"Gs": {"gs", "pyg"},
"zł": {"zł", "pln"},
"lei": {"lei", "ron"},
"Дин.": {"Дин.", "rsd"},
"S": {"s", "sos", "zar"},
"CHF": {"chf"},
"NT$": {"nt$", "twd"},
"Z$": {"z$", "zwd"},
"TT$": {"tt$", "ttd"},
"฿": {"฿", "tbh"},
"₺": {"₺", "tlr"},
"₴": {"₴", "uah"},
"$U": {"$u", "uyu"},
"₫": {"₫", "vnd"},
}
var currenciesByAlias = make(map[string]string)
func init() {
for c, aliases := range currencies {
for _, a := range aliases {
currenciesByAlias[a] = c
}
}
}
func identifyCurrency(curr string) string {
c, ok := currenciesByAlias[curr]
if !ok {
return ""
}
return c
} | money.go | 0.756447 | 0.470372 | money.go | starcoder |
package dao
import (
"database/sql"
"fmt"
"github.com/squat/and/dab/simple-temple-test-group/util"
"github.com/google/uuid"
// pq acts as the driver for SQL requests
_ "github.com/lib/pq"
)
// BaseDatastore provides the basic datastore methods
type BaseDatastore interface {
CreateSimpleTempleTestGroup(input CreateSimpleTempleTestGroupInput) (*SimpleTempleTestGroup, error)
ReadSimpleTempleTestGroup(input ReadSimpleTempleTestGroupInput) (*SimpleTempleTestGroup, error)
DeleteSimpleTempleTestGroup(input DeleteSimpleTempleTestGroupInput) error
}
// DAO encapsulates access to the datastore
type DAO struct {
DB *sql.DB
}
// SimpleTempleTestGroup encapsulates the object stored in the datastore
type SimpleTempleTestGroup struct {
ID uuid.UUID
CreatedBy uuid.UUID
}
// CreateSimpleTempleTestGroupInput encapsulates the information required to create a single simpleTempleTestGroup in the datastore
type CreateSimpleTempleTestGroupInput struct {
ID uuid.UUID
AuthID uuid.UUID
}
// ReadSimpleTempleTestGroupInput encapsulates the information required to read a single simpleTempleTestGroup in the datastore
type ReadSimpleTempleTestGroupInput struct {
ID uuid.UUID
}
// DeleteSimpleTempleTestGroupInput encapsulates the information required to delete a single simpleTempleTestGroup in the datastore
type DeleteSimpleTempleTestGroupInput struct {
ID uuid.UUID
}
// Init opens the datastore connection, returning a DAO
func Init(config *util.Config) (*DAO, error) {
connStr := fmt.Sprintf("user=%s dbname=%s host=%s sslmode=%s", config.User, config.DBName, config.Host, config.SSLMode)
db, err := sql.Open("postgres", connStr)
if err != nil {
return nil, err
}
return &DAO{db}, nil
}
// Executes a query, returning the row
func executeQueryWithRowResponse(db *sql.DB, query string, args ...interface{}) *sql.Row {
return db.QueryRow(query, args...)
}
// Executes a query, returning the number of rows affected
func executeQuery(db *sql.DB, query string, args ...interface{}) (int64, error) {
result, err := db.Exec(query, args...)
if err != nil {
return 0, err
}
return result.RowsAffected()
}
// CreateSimpleTempleTestGroup creates a new simpleTempleTestGroup in the datastore, returning the newly created simpleTempleTestGroup
func (dao *DAO) CreateSimpleTempleTestGroup(input CreateSimpleTempleTestGroupInput) (*SimpleTempleTestGroup, error) {
row := executeQueryWithRowResponse(dao.DB, "INSERT INTO simple_temple_test_group (id, created_by) VALUES ($1, $2) RETURNING id, created_by;", input.ID, input.AuthID)
var simpleTempleTestGroup SimpleTempleTestGroup
err := row.Scan(&simpleTempleTestGroup.ID, &simpleTempleTestGroup.CreatedBy)
if err != nil {
return nil, err
}
return &simpleTempleTestGroup, nil
}
// ReadSimpleTempleTestGroup returns the simpleTempleTestGroup in the datastore for a given ID
func (dao *DAO) ReadSimpleTempleTestGroup(input ReadSimpleTempleTestGroupInput) (*SimpleTempleTestGroup, error) {
row := executeQueryWithRowResponse(dao.DB, "SELECT id, created_by FROM simple_temple_test_group WHERE id = $1;", input.ID)
var simpleTempleTestGroup SimpleTempleTestGroup
err := row.Scan(&simpleTempleTestGroup.ID, &simpleTempleTestGroup.CreatedBy)
if err != nil {
switch err {
case sql.ErrNoRows:
return nil, ErrSimpleTempleTestGroupNotFound(input.ID.String())
default:
return nil, err
}
}
return &simpleTempleTestGroup, nil
}
// DeleteSimpleTempleTestGroup deletes the simpleTempleTestGroup in the datastore for a given ID
func (dao *DAO) DeleteSimpleTempleTestGroup(input DeleteSimpleTempleTestGroupInput) error {
rowsAffected, err := executeQuery(dao.DB, "DELETE FROM simple_temple_test_group WHERE id = $1;", input.ID)
if err != nil {
return err
} else if rowsAffected == 0 {
return ErrSimpleTempleTestGroupNotFound(input.ID.String())
}
return nil
} | src/e2e/resources/simple-temple-expected/simple-temple-test-group/dao/dao.go | 0.537284 | 0.438605 | dao.go | starcoder |
package common
import "fmt"
// FileData contains the summarized data about the file, including its
// package, its base name, the total number of statements, and the
// number of executed statements.
type FileData struct {
Package string // Name of the package
Name string // Name of the file (basename)
Count int64 // Number of statements in the file
Exec int64 // Number of statements in the file that were executed
}
// Coverage reports the coverage of the file as a float.
func (fd FileData) Coverage() float64 {
// Avoid divide-by-zero
if fd.Count <= 0 {
return 1.0
}
return float64(fd.Exec) / float64(fd.Count)
}
// Handle reports a handle for the FileData record. This is typically
// the full file name, including package, but it could be just the
// package name, or the empty string.
func (fd FileData) Handle() string {
if fd.Package == "" && fd.Name == "" {
return ""
}
return fmt.Sprintf("%s/%s", fd.Package, fd.Name)
}
// String reports the coverage of the entity described by the FileData
// object.
func (fd FileData) String() string {
handle := fd.Handle()
if handle == "" {
return fmt.Sprintf("%d statements out of %d covered; overall coverage: %.1f%%", fd.Exec, fd.Count, fd.Coverage()*100.0)
}
return fmt.Sprintf("%s: %d statements out of %d covered; coverage: %.1f%%", handle, fd.Exec, fd.Count, fd.Coverage()*100.0)
}
// DataSet is a list of FileData instances. It implements
// sort.Interface, and thus is capable of being sorted by handle.
type DataSet []FileData
// Merge is a utility function that merges a list of FileData
// instances with another FileData list. It ensures that the Count
// fields are the same and picks an Exec field. It returns the
// resulting list, along with a list of FileData where Count did not
// match; the order will match the elements of a first, followed by
// any elements of b which did not appear in a.
func (ds DataSet) Merge(other DataSet) (DataSet, DataSet) {
// Set up our index and result sets
idx := map[string]map[string]int{}
var result DataSet
var conflict DataSet
// Copy ds into the result set
i := 0
for _, fd := range ds {
// Mark it seen
if _, ok := idx[fd.Package]; !ok {
idx[fd.Package] = map[string]int{}
}
idx[fd.Package][fd.Name] = i
// Save it to the result set
result = append(result, fd)
i++
}
// Now walk through other
for _, fd := range other {
// Have we seen it?
if _, ok := idx[fd.Package]; !ok {
idx[fd.Package] = map[string]int{}
}
if j, ok := idx[fd.Package][fd.Name]; ok {
// Are the counts consistent?
if result[j].Count != fd.Count {
conflict = append(conflict, fd)
} else if result[j].Exec == 0 {
result[j].Exec = fd.Exec
}
continue
}
// Add the element to the result set
idx[fd.Package][fd.Name] = i
result = append(result, fd)
i++
}
return result, conflict
}
// Reduce is a utility function that reduces a list of FileData
// instances into a list of FileData instances that only contain
// counts for packages.
func (ds DataSet) Reduce() DataSet {
// Set up our index and result set
idx := map[string]int{}
var result DataSet
// Construct it
i := 0
for _, fd := range ds {
// Has the package been seen yet?
if j, ok := idx[fd.Package]; ok {
result[j].Count += fd.Count
result[j].Exec += fd.Exec
continue
}
// Add a new entry
idx[fd.Package] = i
result = append(result, FileData{
Package: fd.Package,
Count: fd.Count,
Exec: fd.Exec,
})
i++
}
return result
}
// Sum is a utility function similar to Reduce, but it reduces a list
// of FileData instances down to a single summarizing FileData.
func (ds DataSet) Sum() FileData {
result := FileData{}
for _, fd := range ds {
result.Count += fd.Count
result.Exec += fd.Exec
}
return result
}
// Len returns the number of elements in the data set.
func (ds DataSet) Len() int {
return len(ds)
}
// Less reports whether the element with index i should sort before
// the element with index j.
func (ds DataSet) Less(i, j int) bool {
iCov := ds[i].Coverage()
jCov := ds[j].Coverage()
switch {
case iCov < jCov:
return true
case jCov < iCov:
return false
default:
return ds[i].Handle() < ds[j].Handle()
}
}
// Swap swaps the two elements with the specified indexes.
func (ds DataSet) Swap(i, j int) {
ds[i], ds[j] = ds[j], ds[i]
} | common/types.go | 0.659295 | 0.501831 | types.go | starcoder |
package tegola
import (
"fmt"
"math"
"github.com/go-spatial/geom"
"github.com/go-spatial/tegola/maths/webmercator"
)
const (
DefaultEpislon = 10.0
DefaultExtent = 4096
DefaultTileBuffer = 64.0
MaxZ = 22
)
var UnknownConversionError = fmt.Errorf("do not know how to convert value to requested value")
//Tile slippy map tilenames
// http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
type Tile struct {
Z uint
X uint
Y uint
Lat float64
Long float64
Tolerance float64
Extent float64
Buffer float64
// These values are cached
cached bool
// The width and height of the region.
xspan float64
yspan float64
// This is the computed bounding box.
extent *geom.Extent
bufpext *geom.Extent
}
// NewTile will return a non-nil tile object.
func NewTile(z, x, y uint) (t *Tile) {
t = &Tile{
Z: z,
X: x,
Y: y,
Buffer: DefaultTileBuffer,
Extent: DefaultExtent,
Tolerance: DefaultEpislon,
}
t.Lat, t.Long = t.Num2Deg()
t.Init()
return t
}
// NewTileLatLong will return a non-nil tile object.
func NewTileLatLong(z uint, lat, lon float64) (t *Tile) {
t = &Tile{
Z: z,
Lat: lat,
Long: lon,
Buffer: DefaultTileBuffer,
Extent: DefaultExtent,
Tolerance: DefaultEpislon,
}
x, y := t.Deg2Num()
t.X, t.Y = uint(x), uint(y)
t.Init()
return t
}
func (t *Tile) Init() {
max := 20037508.34
// resolution
res := (max * 2) / math.Exp2(float64(t.Z))
t.cached = true
t.extent = &geom.Extent{
-max + (float64(t.X) * res), // MinX
max - (float64(t.Y) * res), // Miny
-max + (float64(t.X) * res) + res, // MaxX
max - (float64(t.Y) * res) - res, // MaxY
}
t.xspan = t.extent.MaxX() - t.extent.MinX()
t.yspan = t.extent.MaxY() - t.extent.MinY()
/*
// This is how we can calculate it. But, it will always be a constant.
// So, we just return that constant.
// Where PixelBounds is : [4]float64{0.0, 0.0, t.Extent, t.Extent}
bounds, err = t.PixelBounds()
if err != nil {
return bounds, err
}
bounds[0][0] -= t.Buffer
bounds[0][1] -= t.Buffer
bounds[1][0] += t.Buffer
bounds[1][1] += t.Buffer
*/
t.bufpext = &geom.Extent{
0 - t.Buffer, 0 - t.Buffer,
t.Extent + t.Buffer, t.Extent + t.Buffer,
}
}
func (t *Tile) Deg2Num() (x, y int) {
x = int(math.Floor((t.Long + 180.0) / 360.0 * (math.Exp2(float64(t.Z)))))
y = int(math.Floor((1.0 - math.Log(math.Tan(t.Lat*math.Pi/180.0)+1.0/math.Cos(t.Lat*math.Pi/180.0))/math.Pi) / 2.0 * (math.Exp2(float64(t.Z)))))
return x, y
}
func (t *Tile) Num2Deg() (lat, lng float64) {
lat = Tile2Lat(uint64(t.Y), uint64(t.Z))
lng = Tile2Lon(uint64(t.X), uint64(t.Z))
return lat, lng
}
func Tile2Lon(x, z uint64) float64 { return float64(x)/math.Exp2(float64(z))*360.0 - 180.0 }
func Tile2Lat(y, z uint64) float64 {
var n float64 = math.Pi
if y != 0 {
n = math.Pi - 2.0*math.Pi*float64(y)/math.Exp2(float64(z))
}
return 180.0 / math.Pi * math.Atan(0.5*(math.Exp(n)-math.Exp(-n)))
}
// Bounds returns the bounds of the Tile as defined by the North most Longitude, East most Latitude, South most Longitude, West most Latitude.
func (t *Tile) Bounds() [4]float64 {
north := Tile2Lon(uint64(t.X), uint64(t.Z))
east := Tile2Lat(uint64(t.Y), uint64(t.Z))
south := Tile2Lon(uint64(t.X+1), uint64(t.Z))
west := Tile2Lat(uint64(t.Y+1), uint64(t.Z))
return [4]float64{north, east, south, west}
}
func toWebMercator(srid int, pt [2]float64) (npt [2]float64, err error) {
switch srid {
default:
return npt, UnknownConversionError
case WebMercator:
return pt, nil
case WGS84:
tnpt, err := webmercator.PToXY(pt[0], pt[1])
if err != nil {
return npt, err
}
return [2]float64{tnpt[0], tnpt[1]}, nil
}
}
func fromWebMercator(srid int, pt [2]float64) (npt [2]float64, err error) {
switch srid {
default:
return npt, UnknownConversionError
case WebMercator:
return pt, nil
case WGS84:
tnpt, err := webmercator.PToLonLat(pt[0], pt[1])
if err != nil {
return npt, err
}
return [2]float64{tnpt[0], tnpt[1]}, nil
}
}
func (t *Tile) ToPixel(srid int, pt [2]float64) (npt [2]float64, err error) {
spt, err := toWebMercator(srid, pt)
if err != nil {
return npt, err
}
nx := int64((spt[0] - t.extent.MinX()) * t.Extent / t.xspan)
ny := int64((spt[1] - t.extent.MinY()) * t.Extent / t.yspan)
return [2]float64{float64(nx), float64(ny)}, nil
}
func (t *Tile) FromPixel(srid int, pt [2]float64) (npt [2]float64, err error) {
x := float64(int64(pt[0]))
y := float64(int64(pt[1]))
wmx := (x * t.xspan / t.Extent) + t.extent.MinX()
wmy := (y * t.yspan / t.Extent) + t.extent.MinY()
return fromWebMercator(srid, [2]float64{wmx, wmy})
}
func (t *Tile) PixelBufferedBounds() (bounds [4]float64, err error) {
return t.bufpext.Extent(), nil
}
// Returns web mercator zoom level
func (t *Tile) ZLevel() uint {
return t.Z
}
//ZRes takes a web mercator zoom level and returns the pixel resolution for that
// scale, assuming t.Extent x t.Extent pixel tiles. Non-integer zoom levels are accepted.
// ported from: https://raw.githubusercontent.com/mapbox/postgis-vt-util/master/postgis-vt-util.sql
// 40075016.6855785 is the equator in meters for WGS84 at z=0
func (t *Tile) ZRes() float64 {
return 40075016.6855785 / (t.Extent * math.Exp2(float64(t.Z)))
}
// This is from Leafty
func (t *Tile) ZEpislon() float64 {
if t.Z == MaxZ {
return 0
}
epi := t.Tolerance
if epi <= 0 {
return 0
}
ext := t.Extent
denom := (math.Exp2(float64(t.Z)) * ext)
e := epi / denom
return e
} | tile.go | 0.749271 | 0.435121 | tile.go | starcoder |
package lnchat
import "github.com/lightningnetwork/lnd/lntypes"
// LightningNode represents a Lightning network node.
type LightningNode struct {
// Alias is the Lightning network alias of the node.
Alias string
// Address is the Lightning address of the node.
Address string
}
// Chain represents a blockchain and network of a Lightning node.
type Chain struct {
// The blockchain of a node.
Chain string
// The network a node is operating on.
Network string
}
// SelfInfo contains information about the underlying Lightning node.
type SelfInfo struct {
// Node holds the general node information.
Node LightningNode
// Chains are the chains of the current node.
Chains []Chain
}
// BalanceAllocation represents the distribution of
// balance in local and remote endpoints.
type BalanceAllocation struct {
// The part of the balance available on the local end (in millisatoshi).
LocalMsat uint64
// The part of the balance available on the remote end (in millisatoshi).
RemoteMsat uint64
}
// SelfBalance contains information about the underlying Lightning node balance.
type SelfBalance struct {
// The confirmed balance of the node's wallet (in satoshi).
WalletConfirmedBalanceSat int64
// The unconfirmed balance of the node's wallet (in satoshi).
WalletUnconfirmedBalanceSat int64
// The balance available across all open channels.
ChannelBalance BalanceAllocation
// The balance in pending open channels.
PendingOpenBalance BalanceAllocation
// The unsettled balance across all open channels.
UnsettledBalance BalanceAllocation
}
// TxFeeOptions represents ways to control the fee
// of on-chain transactions.
type TxFeeOptions struct {
// A manual fee rate of sats per virtual byte of the funding transaction.
SatPerVByte uint64
// A number of blocks from the current that the transaction
// should confirm by, which is used for fee estimation.
TargetConfBlock uint32
}
// ChannelPoint represents a channel, as identified by its funding transaction.
type ChannelPoint struct {
// The funding transaction ID of the channel opening
// transaction (hex-encoded and byte-reversed).
FundingTxid string
// The output index of the funding transaction.
OutputIndex uint32
}
// LightningChannel represents a Lightning network channel between two nodes.
type LightningChannel struct {
// The ID of the Lightning channel.
ChannelID uint64
// The Lightning address of the first endpoint (node) of the channel.
Node1Address string
// The Lightning address of the second endpoint of the channel.
Node2Address string
// Capacity is the channel capacity (in millisatoshi).
CapacityMsat int64
}
// PaymentOptions contains the payment details for sending a message.
type PaymentOptions struct {
// FeeLimitMsat is the maximum amount of fees (in millisatoshi)
// the sender is willing to give in order to send a message.
FeeLimitMsat int64
// FinalCltvDelta is the difference in blocks from the current height
// that should be used for the timelock of the final hop.
FinalCltvDelta int32
// TimeoutSecs is the upper limit (in seconds) afforded for
// attempting to send a message.
TimeoutSecs int32
}
// PreImageHash is the preimage hash of a payment.
type PreImageHash = lntypes.Hash
// PreImage is the type of a payment preimage.
type PreImage = lntypes.Preimage | lnchat/types.go | 0.591605 | 0.575558 | types.go | starcoder |
package horserace
import (
"fmt"
"strings"
)
// GetClassAndDistance returns the classification of the race and the distance, given the name of the race (as returned by the betfair API).
func GetClassAndDistance(marketName string) (name string, distance string) {
words := strings.Fields(marketName)
if len(words) < 2 {
return "", ""
}
distance = words[0]
name = strings.Join(words[1:], " ")
return name, distance
}
// GetTrackNameFromAbbrev returns the track name, given the the betfair track abbreviation and country.
func GetTrackNameFromAbbrev(country Country, abbrev string) (track string, err error) {
var ok bool
switch country {
case Country_UK:
track, ok = ukAbbrevToTrack[abbrev]
case Country_IRE:
track, ok = ireAbbrevToTrack[abbrev]
default:
return "", fmt.Errorf("country [%s] not supported", country)
}
if ok {
return track, nil
}
return "", fmt.Errorf("couldn't find track with abbreviation [%s] in country [%s]", abbrev, country)
}
// GetAbbrevFromTrackName returns the betfair track abbreviation, given the country and track name.
func GetAbbrevFromTrackName(country Country, track string) (abbrev string, err error) {
var ok bool
switch country {
case Country_UK:
track, ok = ukTrackToAbbrev[track]
case Country_IRE:
track, ok = ireTrackToAbbrev[track]
default:
return "", fmt.Errorf("country [%s] not supported", country)
}
if ok {
return track, nil
}
return "", fmt.Errorf("couldn't find track [%s] in country [%s]", abbrev, country)
}
// GetClassificationFromAbbrev returns the race classification, given the betfair classification abbreviation.
func GetClassificationFromAbbrev(abbrev string) (class string, err error) {
if classes, ok := abbrevToClass[abbrev]; ok {
return classes[0], nil
}
return "", fmt.Errorf("couldn't find match")
}
// GetAbbrevFromClassification returns the betfair classification abbreviation, given the race classification.
func GetAbbrevFromClassification(class string) (abbrev string, err error) {
if abbrev, ok := classToAbbrev[class]; ok {
return abbrev, nil
}
return "", fmt.Errorf("couldn't find match")
}
// Country represents a country.
type Country uint
const (
// Country_UK represents UK country
Country_UK = iota + 1
// Country_IRE represents IRE country
Country_IRE
)
// String returns the string representation of Country.
func (c Country) String() string {
return [...]string{"", "UK", "IRE"}[c]
} | horserace/horserace.go | 0.748812 | 0.520435 | horserace.go | starcoder |
package stats
import "runtime"
// BasicMemStats includes a few of the fields from runtime.MemStats suitable for
// general logging.
type BasicMemStats struct {
// General statistics.
Alloc uint64 // bytes allocated and still in use
TotalAlloc uint64 // bytes allocated (even if freed)
Sys uint64 // bytes obtained from system (sum of XxxSys in runtime)
Lookups uint64 // number of pointer lookups
Mallocs uint64 // number of mallocs
Frees uint64 // number of frees
// Main allocation heap statistics.
HeapAlloc uint64 // bytes allocated and still in use
HeapSys uint64 // bytes obtained from system
HeapIdle uint64 // bytes in idle spans
HeapInuse uint64 // bytes in non-idle span
HeapReleased uint64 // bytes released to the OS
HeapObjects uint64 // total number of allocated objects
// Garbage collector statistics.
PauseTotalNs uint64
LatestPauseNs uint64
}
type memStatsPlaceholder interface{}
// MemStatsWrapper wraps runtime.MemStats with an optionally less verbose JSON
// representation. The JSON field names correspond exactly to the runtime field
// names to avoid reimplementing the entire struct.
type MemStatsWrapper struct {
memStatsPlaceholder `json:"Memory"`
basic *BasicMemStats
cache *runtime.MemStats
}
func NewMemStatsWrapper(verbose bool) *MemStatsWrapper {
stats := &MemStatsWrapper{cache: &runtime.MemStats{}}
if verbose {
stats.memStatsPlaceholder = stats.cache
} else {
stats.basic = &BasicMemStats{}
stats.memStatsPlaceholder = stats.basic
}
return stats
}
// Update fetches the current memstats from runtime and resets the cache.
func (s *MemStatsWrapper) Update() {
runtime.ReadMemStats(s.cache)
if s.basic != nil {
// Gross, but any decent editor can generate this in a couple commands.
s.basic.Alloc = s.cache.Alloc
s.basic.TotalAlloc = s.cache.TotalAlloc
s.basic.Sys = s.cache.Sys
s.basic.Lookups = s.cache.Lookups
s.basic.Mallocs = s.cache.Mallocs
s.basic.Frees = s.cache.Frees
s.basic.HeapAlloc = s.cache.HeapAlloc
s.basic.HeapSys = s.cache.HeapSys
s.basic.HeapIdle = s.cache.HeapIdle
s.basic.HeapInuse = s.cache.HeapInuse
s.basic.HeapReleased = s.cache.HeapReleased
s.basic.HeapObjects = s.cache.HeapObjects
s.basic.PauseTotalNs = s.cache.PauseTotalNs
s.basic.LatestPauseNs = s.cache.PauseNs[(s.cache.NumGC+255)%256]
}
} | stats/mem.go | 0.589716 | 0.427158 | mem.go | starcoder |
package indicators
import "math"
// mean returns mean value for an array of float64 values
func mean(values []float64) float64 {
var total float64 = 0
for x := range values {
total += values[x]
}
return total / float64(len(values))
}
func evenSlice(inA, inB []float64) (outA, outB []float64) {
offsetA := int(math.Max(0, float64(len(inA)-len(inB))))
return inA[offsetA:], inB[0:]
}
// trueRange returns the true range for high low close
func trueRange(inHigh, inLow, inClose []float64) []float64 {
outReal := make([]float64, len(inClose))
startIdx := 1
outIdx := startIdx
today := startIdx
for today < len(inClose) {
tempLT := inLow[today]
tempHT := inHigh[today]
tempCY := inClose[today-1]
greatest := tempHT - tempLT
val2 := math.Abs(tempCY - tempHT)
if val2 > greatest {
greatest = val2
}
val3 := math.Abs(tempCY - tempLT)
if val3 > greatest {
greatest = val3
}
outReal[outIdx] = greatest
outIdx++
today++
}
return outReal
}
// variance returns Variance for given time period
func variance(inReal []float64, inTimePeriod int) []float64 {
outReal := make([]float64, len(inReal))
nbInitialElementNeeded := inTimePeriod - 1
startIdx := nbInitialElementNeeded
periodTotal1 := 0.0
periodTotal2 := 0.0
trailingIdx := startIdx - nbInitialElementNeeded
i := trailingIdx
if inTimePeriod > 1 {
for i < startIdx {
tempReal := inReal[i]
periodTotal1 += tempReal
tempReal *= tempReal
periodTotal2 += tempReal
i++
}
}
outIdx := startIdx
for ok := true; ok; {
tempReal := inReal[i]
periodTotal1 += tempReal
tempReal *= tempReal
periodTotal2 += tempReal
meanValue1 := periodTotal1 / float64(inTimePeriod)
meanValue2 := periodTotal2 / float64(inTimePeriod)
tempReal = inReal[trailingIdx]
periodTotal1 -= tempReal
tempReal *= tempReal
periodTotal2 -= tempReal
outReal[outIdx] = meanValue2 - meanValue1*meanValue1
i++
trailingIdx++
outIdx++
ok = i < len(inReal)
}
return outReal
}
// stdDev - Standard Deviation
func stdDev(inReal []float64, inTimePeriod int, inNbDev float64) []float64 {
outReal := variance(inReal, inTimePeriod)
if inNbDev != 1.0 {
for i := 0; i < len(inReal); i++ {
tempReal := outReal[i]
if !(tempReal < 0.00000000000001) {
outReal[i] = math.Sqrt(tempReal) * inNbDev
} else {
outReal[i] = 0.0
}
}
} else {
for i := 0; i < len(inReal); i++ {
tempReal := outReal[i]
if !(tempReal < 0.00000000000001) {
outReal[i] = math.Sqrt(tempReal)
} else {
outReal[i] = 0.0
}
}
}
return outReal
} | indicators/indicators.go | 0.62498 | 0.513546 | indicators.go | starcoder |
package main
// https://projecteuler.net/problem=12
import (
"flag"
"fmt"
"math"
)
type Maps map[int]map[int]int
// The triangle number sum is a simple AP sum
func getTriangleNumber(n int) int {
return n * (n + 1) / 2
}
// Every single natural number is a mutiple of a prime
// So we can decompose that product with a simple prime factor check
// And reduce the powers till we get another prime
func countMultiples(n int, divisorMap Maps) map[int]int {
i := 2
factorMap := map[int]int{}
temp := n
for i <= int(math.Sqrt(float64(n))) {
for n % i == 0 && n > 1 {
n = n /i
if val, ok := factorMap[i]; !ok{
factorMap[i] = 1
} else {
factorMap[i] = val + 1
}
if _, ok := divisorMap[n]; ok {
fmt.Println("Found value for number ", n, " in divisor map for ", temp)
for key, val := range divisorMap[n] {
if _, ok := factorMap[key]; !ok {
factorMap[key] = val
} else {
factorMap[key] += val
}
}
return factorMap
}
}
i++
}
if n > 1 {
factorMap[n] = 1
}
return factorMap
}
func highestDivisibleTriangularNumber(maxDivisor int) int{
divisorMap := Maps{}
n := 2
for n > 0{
temp := getTriangleNumber(n)
divisorMap[temp] = countMultiples(temp, divisorMap)
// The number of factor compositions is a product of the order of each prime
product := 1
for _,v := range divisorMap[temp]{
product *= (v + 1)
}
if product >= maxDivisor{
return temp
}
n += 1
}
return 1
}
func main(){
//divisorMap := Maps{15: {3: 1, 5: 1}}
divisor := flag.Int("divisor", 2, "Flag to indicate the number of divisors to look out for")
flag.Parse()
//fmt.Println("Number of multiples to ", *divisor, " is ", countMultiples(*divisor, divisorMap))
fmt.Println("Number of multiples to ", *divisor, " is ", highestDivisibleTriangularNumber(*divisor))
} | Problem_12_Euler/main.go | 0.766556 | 0.400368 | main.go | starcoder |
package ion
import "fmt"
// A Type represents the type of an Ion Value.
type Type uint8
const (
// NoType is returned by a Reader that is not currently pointing at a value.
NoType Type = iota
// NullType is the type of the (unqualified) Ion null value.
NullType
// BoolType is the type of an Ion boolean, true or false.
BoolType
// IntType is the type of a signed Ion integer of arbitrary size.
IntType
// FloatType is the type of a fixed-precision Ion floating-point value.
FloatType
// DecimalType is the type of an arbitrary-precision Ion decimal value.
DecimalType
// TimestampType is the type of an arbitrary-precision Ion timestamp.
TimestampType
// SymbolType is the type of an Ion symbol, mapped to an integer ID by a SymbolTable
// to (potentially) save space.
SymbolType
// StringType is the type of a non-symbol Unicode string, represented directly.
StringType
// ClobType is the type of a character large object. Like a BlobType, it stores an
// arbitrary sequence of bytes, but it represents them in text form as an escaped-ASCII
// string rather than a base64-encoded string.
ClobType
// BlobType is the type of a binary large object; a sequence of arbitrary bytes.
BlobType
// ListType is the type of a list, recursively containing zero or more Ion values.
ListType
// SexpType is the type of an s-expression. Like a ListType, it contains a sequence
// of zero or more Ion values, but with a lisp-like syntax when encoded as text.
SexpType
// StructType is the type of a structure, recursively containing a sequence of named
// (by an Ion symbol) Ion values.
StructType
)
// String implements fmt.Stringer for Type.
func (t Type) String() string {
switch t {
case NoType:
return "<no type>"
case NullType:
return "null"
case BoolType:
return "bool"
case IntType:
return "int"
case FloatType:
return "float"
case DecimalType:
return "decimal"
case TimestampType:
return "timestamp"
case StringType:
return "string"
case SymbolType:
return "symbol"
case BlobType:
return "blob"
case ClobType:
return "clob"
case StructType:
return "struct"
case ListType:
return "list"
case SexpType:
return "sexp"
default:
return fmt.Sprintf("<unknown type %v>", uint8(t))
}
}
// IsScalar determines if the type is a scalar type
func IsScalar(t Type) bool {
return NullType <= t && t <= BlobType
}
// IsContainer determines if the type is a container type
func IsContainer(t Type) bool {
return ListType <= t && t <= StructType
}
// IntSize represents the size of an integer.
type IntSize uint8
const (
// NullInt is the size of null.int and other things that aren't actually ints.
NullInt IntSize = iota
// Int32 is the size of an Ion integer that can be losslessly stored in an int32.
Int32
// Int64 is the size of an Ion integer that can be losslessly stored in an int64.
Int64
// BigInt is the size of an Ion integer that can only be losslessly stored in a big.Int.
BigInt
)
// String implements fmt.Stringer for IntSize.
func (i IntSize) String() string {
switch i {
case NullInt:
return "null.int"
case Int32:
return "int32"
case Int64:
return "int64"
case BigInt:
return "big.Int"
default:
return fmt.Sprintf("<unknown size %v>", uint8(i))
}
} | ion/type.go | 0.7413 | 0.597021 | type.go | starcoder |
package loader
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/xo/xo/models"
)
func init() {
Register(&Loader{
Driver: "oracle",
Kind: map[Kind]string{
KindTable: "TABLE",
KindView: "VIEW",
},
ParamN: func(i int) string {
return fmt.Sprintf(":%d", i+1)
},
MaskFunc: func() string {
return ":%d"
},
Schema: models.OracleSchema,
GoType: OracleGoType,
Tables: models.OracleTables,
TableColumns: models.OracleTableColumns,
TableForeignKeys: models.OracleTableForeignKeys,
TableIndexes: models.OracleTableIndexes,
IndexColumns: models.OracleIndexColumns,
QueryColumns: OracleQueryColumns,
})
}
// orLenRE is a regexp that matches lengths.
var orLenRE = regexp.MustCompile(`\([0-9]+\)`)
// OracleGoType parse a oracle type into a Go type based on the column
// definition.
func OracleGoType(ctx context.Context, typ string, nullable bool) (string, string, int, error) {
// extract precision
typ, prec, scale, err := parsePrec(typ)
if err != nil {
return "", "", 0, err
}
var goType, zero string
// strip remaining length (on things like timestamp)
switch orLenRE.ReplaceAllString(typ, "") {
case "char", "nchar", "varchar", "varchar2", "nvarchar2", "clob", "nclob", "rowid":
goType, zero = "string", `""`
if nullable {
goType, zero = "sql.NullString", "sql.NullString{}"
}
case "number":
switch {
case prec == 0 && scale == 0 && !nullable:
goType, zero = "int", "0"
case scale != 0 && !nullable:
goType, zero = "float64", "0.0"
case scale != 0 && nullable:
goType, zero = "sql.NullFloat64", "sql.NullFloat64{}"
case !nullable:
goType, zero = "int64", "0"
default:
goType, zero = "sql.NullInt64", "sql.NullInt64{}"
}
case "float":
goType, zero = "float64", "0.0"
if nullable {
goType, zero = "sql.NullFloat64", "sql.NullFloat64{}"
}
case "date", "timestamp", "timestamp with time zone":
goType, zero = "time.Time", "time.Time{}"
if nullable {
goType, zero = "sql.NullTime", "sql.NullTime{}"
}
case "blob", "long raw", "raw":
goType, zero = "[]byte", "nil"
default:
goType, zero = schemaGoType(ctx, typ)
}
// handle bools
switch {
case goType == "int" && prec == 1 && !nullable:
goType, zero = "bool", "false"
case goType == "int" && prec == 1 && nullable:
goType, zero = "sql.NullBool", "sql.NullBool{}"
}
return goType, zero, prec, nil
}
// OracleQueryColumns parses the query and generates a type for it.
func OracleQueryColumns(ctx context.Context, db models.DB, schema string, inspect []string) ([]*models.Column, error) {
// create temporary view xoid
xoid := "XO$" + randomID()
viewq := `CREATE GLOBAL TEMPORARY TABLE ` + xoid + ` ` +
`ON COMMIT PRESERVE ROWS ` +
`AS ` + strings.Join(inspect, "\n")
models.Logf(viewq)
if _, err := db.ExecContext(ctx, viewq); err != nil {
return nil, err
}
// load columns
cols, err := models.OracleTableColumns(ctx, db, schema, xoid)
// drop inspect view
dropq := `DROP TABLE ` + xoid
models.Logf(dropq)
_, _ = db.ExecContext(ctx, dropq)
// load column information
return cols, err
} | loader/oracle.go | 0.517571 | 0.408011 | oracle.go | starcoder |
package notebook
import (
"gioui.org/f32"
"gioui.org/layout"
"gioui.org/op"
"gioui.org/op/clip"
"gioui.org/op/paint"
"image"
"image/color"
)
func rrect(ops *op.Ops, width, height, se, sw, nw, ne float32) {
w, h := float32(width), float32(height)
const c = 0.55228475 // 4*(sqrt(2)-1)/3
var b clip.Path
b.Begin(ops)
b.Move(f32.Point{X: w, Y: h - se})
b.Cube(f32.Point{X: 0, Y: se * c}, f32.Point{X: -se + se*c, Y: se}, f32.Point{X: -se, Y: se}) // SE
b.Line(f32.Point{X: sw - w + se, Y: 0})
b.Cube(f32.Point{X: -sw * c, Y: 0}, f32.Point{X: -sw, Y: -sw + sw*c}, f32.Point{X: -sw, Y: -sw}) // SW
b.Line(f32.Point{X: 0, Y: nw - h + sw})
b.Cube(f32.Point{X: 0, Y: -nw * c}, f32.Point{X: nw - nw*c, Y: -nw}, f32.Point{X: nw, Y: -nw}) // NW
b.Line(f32.Point{X: w - ne - nw, Y: 0})
b.Cube(f32.Point{X: ne * c, Y: 0}, f32.Point{X: ne, Y: ne - ne*c}, f32.Point{X: ne, Y: ne}) // NE
// Return to origin
b.Move(f32.Point{X: -w, Y: -ne})
const scale = 0.85
b.Move(f32.Point{X: w * (1 - scale) * .5, Y: h * (1 - scale) * .5})
w *= scale
h *= scale
se *= scale
sw *= scale
nw *= scale
ne *= scale
b.Move(f32.Point{X: w, Y: h - se})
b.Cube(f32.Point{X: 0, Y: se * c}, f32.Point{X: -se + se*c, Y: se}, f32.Point{X: -se, Y: se}) // SE
b.Line(f32.Point{X: sw - w + se, Y: 0})
b.Cube(f32.Point{X: -sw * c, Y: 0}, f32.Point{X: -sw, Y: -sw + sw*c}, f32.Point{X: -sw, Y: -sw}) // SW
b.Line(f32.Point{X: 0, Y: nw - h + sw})
b.Cube(f32.Point{X: 0, Y: -nw * c}, f32.Point{X: nw - nw*c, Y: -nw}, f32.Point{X: nw, Y: -nw}) // NW
b.Line(f32.Point{X: w - ne - nw, Y: 0})
b.Cube(f32.Point{X: ne * c, Y: 0}, f32.Point{X: ne, Y: ne - ne*c}, f32.Point{X: ne, Y: ne}) // NE
b.End().Add(ops)
}
func fill(gtx *layout.Context, col color.RGBA) {
cs := gtx.Constraints
d := image.Point{X: cs.Width.Min, Y: cs.Height.Min}
dr := f32.Rectangle{
Max: f32.Point{X: float32(d.X), Y: float32(d.Y)},
}
paint.ColorOp{Color: col}.Add(gtx.Ops)
paint.PaintOp{Rect: dr}.Add(gtx.Ops)
gtx.Dimensions = layout.Dimensions{Size: d}
} | notebook/paint.go | 0.763836 | 0.400192 | paint.go | starcoder |
package cluster
import (
"encoding/gob"
"errors"
"fmt"
"math"
"math/rand"
"os"
"time"
"gonum.org/v1/gonum/mat"
)
// KPrototypes is a basic class for the k-prototypes algorithm, it contains all
// necessary information as alg. parameters, labels, centroids, ...
type KPrototypes struct {
DistanceFunc DistanceFunction
InitializationFunc InitializationFunction
CategoricalInd []int
ClustersNumber int
RunsNumber int
MaxIterationNumber int
WeightVectors [][]float64
FrequencyTable [][]map[float64]float64 // frequency table - list of lists with dictionaries containing frequencies of values per cluster and attribute
MembershipNumTable [][]float64 // membership table for numeric attributes - list of labels for each cluster
LabelsCounter []int
Labels *DenseVector
ClusterCentroids *DenseMatrix
ClusterCentroidsCat *DenseMatrix
ClusterCentroidsNum *DenseMatrix
Gamma float64
IsFitted bool
ModelPath string
}
// NewKPrototypes implements constructor for the KPrototypes struct.
func NewKPrototypes(dist DistanceFunction, init InitializationFunction, categorical []int, clusters int, runs int, iters int, weights [][]float64, g float64, modelPath string) *KPrototypes {
rand.Seed(time.Now().UnixNano())
return &KPrototypes{DistanceFunc: dist,
InitializationFunc: init,
ClustersNumber: clusters,
CategoricalInd: categorical,
RunsNumber: runs,
MaxIterationNumber: iters,
Gamma: g,
WeightVectors: weights,
ModelPath: modelPath,
Labels: &DenseVector{VecDense: new(mat.VecDense)},
ClusterCentroidsCat: &DenseMatrix{Dense: new(mat.Dense)},
ClusterCentroidsNum: &DenseMatrix{Dense: new(mat.Dense)},
ClusterCentroids: &DenseMatrix{Dense: new(mat.Dense)},
}
}
// FitModel main algorithm function which finds the best clusters centers for
// the given dataset X.
func (km *KPrototypes) FitModel(X *DenseMatrix) error {
err := km.validateParameters()
if err != nil {
return fmt.Errorf("kmodes: failed to fit the model: %v", err)
}
xRows, xCols := X.Dims()
// Partition data on two sets - one with categorical, other with numerical
// data.
xCat, xNum := km.partitionData(xRows, xCols, X)
_, xCatCols := xCat.Dims()
_, xNumCols := xNum.Dims()
// Normalize numerical values.
xNum = normalizeNum(xNum)
// Initialize weightVector.
SetWeights(km.WeightVectors[0])
// Initialize clusters for categorical data.
km.ClusterCentroidsCat, err = km.InitializationFunc(xCat, km.ClustersNumber, km.DistanceFunc)
if err != nil {
return fmt.Errorf("kmodes: failed to fit the model: %v", err)
}
// Initialize clusters for numerical data.
km.ClusterCentroidsNum, err = InitNum(xNum, km.ClustersNumber, km.DistanceFunc)
if err != nil {
return fmt.Errorf("kmodes: failed to initialiaze cluster centers for numerical data: %v", err)
}
// Initialize labels vector
km.Labels = NewDenseVector(xRows, nil)
km.LabelsCounter = make([]int, km.ClustersNumber)
// Create frequency table for categorical data.
km.FrequencyTable = make([][]map[float64]float64, km.ClustersNumber)
for i := range km.FrequencyTable {
km.FrequencyTable[i] = make([]map[float64]float64, xCatCols)
for j := range km.FrequencyTable[i] {
km.FrequencyTable[i][j] = make(map[float64]float64)
}
}
// Create membership table.
km.MembershipNumTable = make([][]float64, km.ClustersNumber)
for i := range km.MembershipNumTable {
km.MembershipNumTable[i] = make([]float64, 0, 100)
}
// Perform initial assignements to clusters - in order to fill in frequency
// table.
for i := 0; i < xRows; i++ {
rowCat := &DenseVector{xCat.RowView(i).(*mat.VecDense)}
rowNum := &DenseVector{xNum.RowView(i).(*mat.VecDense)}
newLabel, _, err := km.near(i, rowCat, rowNum)
km.Labels.SetVec(i, newLabel)
km.LabelsCounter[int(newLabel)]++
if err != nil {
return fmt.Errorf("kmodes: initial labels assignement failure: %v", err)
}
for j := 0; j < xCatCols; j++ {
km.FrequencyTable[int(newLabel)][j][rowCat.At(j, 0)]++
}
km.MembershipNumTable[int(newLabel)] = append(km.MembershipNumTable[int(newLabel)], float64(i))
}
// Perform initial centers update - because iteration() starts with label
// assignements.
for i := 0; i < km.ClustersNumber; i++ {
// Find new values for clusters centers.
km.findNewCenters(xCatCols, xNumCols, i, xNum)
}
for i := 0; i < km.MaxIterationNumber; i++ {
_, change, err := km.iteration(xNum, xCat)
if err != nil {
return fmt.Errorf("KMeans error at iteration %d: %v", i, err)
}
if change == false {
km.IsFitted = true
return nil
}
}
return nil
}
func (km *KPrototypes) partitionData(xRows, xCols int, X *DenseMatrix) (*DenseMatrix, *DenseMatrix) {
xCat := NewDenseMatrix(xRows, len(km.CategoricalInd), nil)
xNum := NewDenseMatrix(xRows, xCols-len(km.CategoricalInd), nil)
var lastCat, lastNum int
for i := 0; i < xCols; i++ {
vec := make([]float64, xRows)
vec = mat.Col(vec, i, X)
if km.CategoricalInd[lastCat] == i {
xCat.SetCol(lastCat, vec)
lastCat++
if lastCat >= len(km.CategoricalInd) {
lastCat--
}
} else {
xNum.SetCol(lastNum, vec)
lastNum++
}
}
return xCat, xNum
}
func (km *KPrototypes) iteration(xNum, xCat *DenseMatrix) (float64, bool, error) {
changed := make([]bool, km.ClustersNumber)
var change bool
var numOfChanges float64
var totalCost float64
for i := 0; i < km.ClustersNumber; i++ {
km.MembershipNumTable[i] = nil
}
// Find closest cluster for all data vectors - assign new labels.
xRowsNum, xNumCols := xNum.Dims()
_, xColsCat := xCat.Dims()
for i := 0; i < xRowsNum; i++ {
rowCat := &DenseVector{xCat.RowView(i).(*mat.VecDense)}
rowNum := &DenseVector{xNum.RowView(i).(*mat.VecDense)}
newLabel, cost, err := km.near(i, rowCat, rowNum)
if err != nil {
return totalCost, change, fmt.Errorf("iteration error: %v", err)
}
totalCost += cost
km.MembershipNumTable[int(newLabel)] = append(km.MembershipNumTable[int(newLabel)], float64(i))
if newLabel != km.Labels.At(i, 0) {
km.LabelsCounter[int(newLabel)]++
km.LabelsCounter[int(km.Labels.At(i, 0))]--
// Make changes in frequency table.
for j := 0; j < xColsCat; j++ {
km.FrequencyTable[int(km.Labels.At(i, 0))][j][rowCat.At(j, 0)]--
km.FrequencyTable[int(newLabel)][j][rowCat.At(j, 0)]++
}
change = true
numOfChanges++
changed[int(newLabel)] = true
changed[int(km.Labels.At(i, 0))] = true
km.Labels.SetVec(i, newLabel)
}
}
// Recompute cluster centers for all clusters with changes.
for i, elem := range changed {
if elem == true {
// Find new values for clusters centers.
km.findNewCenters(xColsCat, xNumCols, i, xNum)
}
}
return totalCost, change, nil
}
func (km *KPrototypes) findNewCenters(xColsCat, xNumCols, i int, xNum *DenseMatrix) {
newCentroid := make([]float64, xColsCat)
for j := 0; j < xColsCat; j++ {
val, empty := findHighestMapValue(km.FrequencyTable[i][j])
if !empty {
newCentroid[j] = val
} else {
newCentroid[j] = km.ClusterCentroidsCat.At(i, j)
}
}
km.ClusterCentroidsCat.SetRow(i, newCentroid)
vecSum := make([]*mat.VecDense, km.ClustersNumber)
for a := 0; a < km.ClustersNumber; a++ {
vecSum[a] = mat.NewVecDense(xNumCols, nil)
}
for a := 0; a < km.ClustersNumber; a++ {
newCenter := make([]float64, xNumCols)
for j := 0; j < km.LabelsCounter[a]; j++ {
for k := 0; k < xNumCols; k++ {
vecSum[a].SetVec(k, vecSum[a].At(k, 0)+xNum.At(int(km.MembershipNumTable[a][j]), k))
}
}
for l := 0; l < xNumCols; l++ {
newCenter[l] = vecSum[a].At(l, 0) / float64(km.LabelsCounter[a])
}
km.ClusterCentroidsNum.SetRow(a, newCenter)
}
}
func (km *KPrototypes) near(index int, vectorCat, vectorNum *DenseVector) (float64, float64, error) {
var newLabel, distance float64
distance = math.MaxFloat64
for i := 0; i < km.ClustersNumber; i++ {
distCat, err := km.DistanceFunc(vectorCat, &DenseVector{km.ClusterCentroidsCat.RowView(i).(*mat.VecDense)})
if err != nil {
return -1, -1, fmt.Errorf("Cannot compute nearest cluster for vector %q: %v", index, err)
}
distNum, err := EuclideanDistance(vectorNum, &DenseVector{km.ClusterCentroidsNum.RowView(i).(*mat.VecDense)})
if err != nil {
return -1, -1, fmt.Errorf("Cannot compute nearest cluster for vector %q: %v", index, err)
}
dist := km.Gamma*distCat + distNum
if dist < distance {
distance = dist
newLabel = float64(i)
}
}
return newLabel, distance, nil
}
// Predict assign labels for the set of new vectors.
func (km *KPrototypes) Predict(X *DenseMatrix) (*DenseVector, error) {
if km.IsFitted != true {
return NewDenseVector(0, nil), errors.New("kmodes: cannot predict labels, model is not fitted yet")
}
xRows, xCols := X.Dims()
labelsVec := NewDenseVector(xRows, nil)
// Split data on categorical and numerical.
xCat := NewDenseMatrix(xRows, len(km.CategoricalInd), nil)
xNum := NewDenseMatrix(xRows, xCols-len(km.CategoricalInd), nil)
var lastCat, lastNum int
for i := 0; i < xCols; i++ {
vec := make([]float64, xRows)
vec = mat.Col(vec, i, X)
if km.CategoricalInd[lastCat] == i {
xCat.SetCol(lastCat, vec)
lastCat++
if lastCat >= len(km.CategoricalInd) {
lastCat--
}
} else {
xNum.SetCol(lastNum, vec)
lastNum++
}
}
// Normalize numerical values.
xNum = normalizeNum(xNum)
for i := 0; i < xRows; i++ {
catVector := &DenseVector{xCat.RowView(i).(*mat.VecDense)}
numVector := &DenseVector{xNum.RowView(i).(*mat.VecDense)}
label, _, err := km.near(i, catVector, numVector)
if err != nil {
return NewDenseVector(0, nil), fmt.Errorf("kmodes Predict: %v", err)
}
labelsVec.SetVec(i, label)
}
return labelsVec, nil
}
// SaveModel saves computed ml model (KPrototypes struct) in file specified in
// configuration.
func (km *KPrototypes) SaveModel() error {
file, err := os.Create(km.ModelPath)
if err == nil {
encoder := gob.NewEncoder(file)
encoder.Encode(km)
}
file.Close()
return err
}
// LoadModel loads model (KPrototypes struct) from file.
func (km *KPrototypes) LoadModel() error {
file, err := os.Open(km.ModelPath)
if err == nil {
decoder := gob.NewDecoder(file)
err = decoder.Decode(&km)
}
file.Close()
SetWeights(km.WeightVectors[0])
return err
}
func normalizeNum(X *DenseMatrix) *DenseMatrix {
xRows, xCols := X.Dims()
for i := 0; i < xCols; i++ {
column := X.ColView(i).(*mat.VecDense).RawVector().Data
max := maxVal(column)
for j := 0; j < xRows; j++ {
X.Set(j, i, X.At(j, i)/max)
}
}
return X
}
func (km *KPrototypes) validateParameters() error {
if km.InitializationFunc == nil {
return errors.New("initializationFunction is nil")
}
if km.DistanceFunc == nil {
return errors.New("distanceFunction is nil")
}
if km.ClustersNumber < 1 || km.MaxIterationNumber < 1 || km.RunsNumber < 1 {
return errors.New("wrong initialization parameters (should be >1)")
}
return nil
} | cluster/kprototypes.go | 0.70028 | 0.547343 | kprototypes.go | starcoder |
package input
import (
"github.com/benthosdev/benthos/v4/internal/component/input"
"github.com/benthosdev/benthos/v4/internal/component/metrics"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/impl/amqp1/shared"
"github.com/benthosdev/benthos/v4/internal/interop"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/old/input/reader"
"github.com/benthosdev/benthos/v4/internal/tls"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeAMQP1] = TypeSpec{
constructor: fromSimpleConstructor(NewAMQP1),
Status: docs.StatusBeta,
Summary: `
Reads messages from an AMQP (1.0) server.`,
Description: `
### Metadata
This input adds the following metadata fields to each message:
` + "``` text" + `
- amqp_content_type
- amqp_content_encoding
- amqp_creation_time
- All string typed message annotations
` + "```" + `
You can access these metadata fields using
[function interpolation](/docs/configuration/interpolation#metadata).`,
Categories: []Category{
CategoryServices,
},
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("url",
"A URL to connect to.",
"amqp://localhost:5672/",
"amqps://guest:guest@localhost:5672/",
),
docs.FieldCommon("source_address", "The source address to consume from.", "/foo", "queue:/bar", "topic:/baz"),
docs.FieldAdvanced("azure_renew_lock", "Experimental: Azure service bus specific option to renew lock if processing takes more then configured lock time").AtVersion("3.45.0"),
tls.FieldSpec(),
shared.SASLFieldSpec(),
},
}
}
//------------------------------------------------------------------------------
// NewAMQP1 creates a new AMQP1 input type.
func NewAMQP1(conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type) (input.Streamed, error) {
var a reader.Async
var err error
if a, err = reader.NewAMQP1(conf.AMQP1, log, stats); err != nil {
return nil, err
}
return NewAsyncReader(TypeAMQP1, true, a, log, stats)
}
//------------------------------------------------------------------------------ | internal/old/input/amqp_1.go | 0.689619 | 0.530723 | amqp_1.go | starcoder |
package gfilter
import (
"image"
"image/color/palette"
"image/png"
"os"
// but is imported for its initialization side-effect, which allows
// image.Decode to understand JPEG formatted images. Uncomment these
// two lines to also understand GIF and PNG images:
_ "image/gif"
_ "image/jpeg"
_ "image/png"
)
// ImageType to add support for different images
type ImageType int
// Enumeration of image types
const (
ImageTypeNRGBA = ImageType(iota)
ImageTypeNRGBA64
ImageTypeRGBA
ImageTypeRGBA64
ImageTypeGray
ImageTypeGray16
ImageTypePaletted
)
// Pixel struct to contain RGBA components for images
type Pixel struct {
r, g, b, a uint32
}
// ImageHandler interface for polymorphism
type ImageHandler interface {
At(row, column int) (Pixel, error)
Set(row, column int, px Pixel) error
Mode() ImageType
GetDimensions() image.Rectangle
SaveImage(filePath string) error
}
// NRGBAImageHandler struct to handle NRGBA Image
type NRGBAImageHandler struct {
Image *image.NRGBA
ImageType ImageType
}
// At returns the Pixel at row and column of the image
func (handler *NRGBAImageHandler) At(row, column int) (Pixel, error) {
rect := handler.Image.Bounds()
if (row < rect.Min.X && row > rect.Max.X) || (column < rect.Min.Y && column > rect.Max.Y) {
return Pixel{}, ErrRowColumnOutOfBounds
}
return ConvertColor(handler.Image.At(row, column)), nil
}
// Set sets the Pixel at row and column of the image
func (handler *NRGBAImageHandler) Set(row, column int, px Pixel) error {
setColor, err := GetColorFromPixel(px, handler.ImageType)
if err != nil {
return err
}
handler.Image.Set(row, column, setColor)
return nil
}
// Mode returns the ImageType for the image
func (handler *NRGBAImageHandler) Mode() ImageType {
return handler.ImageType
}
// GetDimensions returns the dimensions as a Rectangle for the image
func (handler *NRGBAImageHandler) GetDimensions() image.Rectangle {
return handler.Image.Bounds()
}
// SaveImage saves the image to the filepath specified
func (handler *NRGBAImageHandler) SaveImage(filePath string) error {
// Encode the grayscale image to the new file
newfile, err := os.Create(filePath)
if err != nil {
return err
}
defer newfile.Close()
png.Encode(newfile, handler.Image)
return nil
}
// NRGBA64ImageHandler struct to handle NRGBA64 Image
type NRGBA64ImageHandler struct {
Image *image.NRGBA64
ImageType ImageType
}
// At retuns the Pixel at row and column of the image
func (handler *NRGBA64ImageHandler) At(row, column int) (Pixel, error) {
rect := handler.Image.Bounds()
if (row < rect.Min.X && row > rect.Max.X) || (column < rect.Min.Y && column > rect.Max.Y) {
return Pixel{}, ErrRowColumnOutOfBounds
}
return ConvertColor(handler.Image.At(row, column)), nil
}
// Set sets the Pixel at row and column of the image
func (handler *NRGBA64ImageHandler) Set(row, column int, px Pixel) error {
setColor, err := GetColorFromPixel(px, handler.ImageType)
if err != nil {
return err
}
handler.Image.Set(row, column, setColor)
return nil
}
// Mode returns the ImageType of the images
func (handler *NRGBA64ImageHandler) Mode() ImageType {
return handler.ImageType
}
// GetDimensions returns the dimensions as a Rectangle for the image
func (handler *NRGBA64ImageHandler) GetDimensions() image.Rectangle {
return handler.Image.Bounds()
}
// SaveImage saves the image to the filepath specified
func (handler *NRGBA64ImageHandler) SaveImage(filePath string) error {
// Encode the grayscale image to the new file
newfile, err := os.Create(filePath)
if err != nil {
return err
}
defer newfile.Close()
png.Encode(newfile, handler.Image)
return nil
}
// RGBAImageHandler struct to handle RGBA Image
type RGBAImageHandler struct {
Image *image.RGBA
ImageType ImageType
}
// At returns the Pixel at row and column of the image
func (handler *RGBAImageHandler) At(row, column int) (Pixel, error) {
rect := handler.Image.Bounds()
if (row < rect.Min.X && row > rect.Max.X) || (column < rect.Min.Y && column > rect.Max.Y) {
return Pixel{}, ErrRowColumnOutOfBounds
}
return ConvertColor(handler.Image.At(row, column)), nil
}
// Set sets the Pixel at row and column of the image
func (handler *RGBAImageHandler) Set(row, column int, px Pixel) error {
setColor, err := GetColorFromPixel(px, handler.ImageType)
if err != nil {
return err
}
handler.Image.Set(row, column, setColor)
return nil
}
// Mode returns the ImageType of the images
func (handler *RGBAImageHandler) Mode() ImageType {
return handler.ImageType
}
// GetDimensions returns the dimensions as a Rectangle for the image
func (handler *RGBAImageHandler) GetDimensions() image.Rectangle {
return handler.Image.Bounds()
}
// SaveImage saves the image to the filepath specified
func (handler *RGBAImageHandler) SaveImage(filePath string) error {
// Encode the grayscale image to the new file
newfile, err := os.Create(filePath)
if err != nil {
return err
}
defer newfile.Close()
png.Encode(newfile, handler.Image)
return nil
}
// RGBA64ImageHandler struct to handle the RGBA64 Image
type RGBA64ImageHandler struct {
Image *image.RGBA64
ImageType ImageType
}
// At returns the Pixel at row and column of the image
func (handler *RGBA64ImageHandler) At(row, column int) (Pixel, error) {
rect := handler.Image.Bounds()
if (row < rect.Min.X && row > rect.Max.X) || (column < rect.Min.Y && column > rect.Max.Y) {
return Pixel{}, ErrRowColumnOutOfBounds
}
return ConvertColor(handler.Image.At(row, column)), nil
}
// Set sets the Pixel at row and column of the image
func (handler *RGBA64ImageHandler) Set(row, column int, px Pixel) error {
setColor, err := GetColorFromPixel(px, handler.ImageType)
if err != nil {
return err
}
handler.Image.Set(row, column, setColor)
return nil
}
// Mode returns the ImageType of the images
func (handler *RGBA64ImageHandler) Mode() ImageType {
return handler.ImageType
}
// GetDimensions returns the dimensions as a Rectangle for the image
func (handler *RGBA64ImageHandler) GetDimensions() image.Rectangle {
return handler.Image.Bounds()
}
// SaveImage saves the image to the filepath specified
func (handler *RGBA64ImageHandler) SaveImage(filePath string) error {
// Encode the grayscale image to the new file
newfile, err := os.Create(filePath)
if err != nil {
return err
}
defer newfile.Close()
png.Encode(newfile, handler.Image)
return nil
}
// GrayImageHandler struct to handle the Gray Image
type GrayImageHandler struct {
Image *image.Gray
ImageType ImageType
}
// At returns the Pixel at row and column of the image
func (handler *GrayImageHandler) At(row, column int) (Pixel, error) {
rect := handler.Image.Bounds()
if (row < rect.Min.X && row > rect.Max.X) || (column < rect.Min.Y && column > rect.Max.Y) {
return Pixel{}, ErrRowColumnOutOfBounds
}
return ConvertColor(handler.Image.At(row, column)), nil
}
// Set sets the Pixel at row and column of the image
func (handler *GrayImageHandler) Set(row, column int, px Pixel) error {
setColor, err := GetColorFromPixel(px, handler.ImageType)
if err != nil {
return err
}
handler.Image.Set(row, column, setColor)
return nil
}
// Mode returns the ImageType of the images
func (handler *GrayImageHandler) Mode() ImageType {
return handler.ImageType
}
// GetDimensions returns the dimensions as a Rectangle for the image
func (handler *GrayImageHandler) GetDimensions() image.Rectangle {
return handler.Image.Bounds()
}
// SaveImage saves the image to the filepath specified
func (handler *GrayImageHandler) SaveImage(filePath string) error {
// Encode the grayscale image to the new file
newfile, err := os.Create(filePath)
if err != nil {
return err
}
defer newfile.Close()
png.Encode(newfile, handler.Image)
return nil
}
// Gray16ImageHandler struct to handle the Gray16 Image
type Gray16ImageHandler struct {
Image *image.Gray16
ImageType ImageType
}
// At returns the Pixel at row and column of the image
func (handler *Gray16ImageHandler) At(row, column int) (Pixel, error) {
rect := handler.Image.Bounds()
if (row < rect.Min.X && row > rect.Max.X) || (column < rect.Min.Y && column > rect.Max.Y) {
return Pixel{}, ErrRowColumnOutOfBounds
}
return ConvertColor(handler.Image.At(row, column)), nil
}
// Set sets the Pixel at row and column of the image
func (handler *Gray16ImageHandler) Set(row, column int, px Pixel) error {
setColor, err := GetColorFromPixel(px, handler.ImageType)
if err != nil {
return err
}
handler.Image.Set(row, column, setColor)
return nil
}
// Mode returns the ImageType of the images
func (handler *Gray16ImageHandler) Mode() ImageType {
return handler.ImageType
}
// GetDimensions returns the dimensions as a Rectangle for the image
func (handler *Gray16ImageHandler) GetDimensions() image.Rectangle {
return handler.Image.Bounds()
}
// SaveImage saves the image to the filepath specified
func (handler *Gray16ImageHandler) SaveImage(filePath string) error {
// Encode the grayscale image to the new file
newfile, err := os.Create(filePath)
if err != nil {
return err
}
defer newfile.Close()
png.Encode(newfile, handler.Image)
return nil
}
// Gray16ImageHandler struct to handle the Gray16 Image
type PalettedImageHandler struct {
Image *image.Paletted
ImageType ImageType
}
// At returns the Pixel at row and column of the image
func (handler *PalettedImageHandler) At(row, column int) (Pixel, error) {
rect := handler.Image.Bounds()
if (row < rect.Min.X && row > rect.Max.X) || (column < rect.Min.Y && column > rect.Max.Y) {
return Pixel{}, ErrRowColumnOutOfBounds
}
return ConvertColor(handler.Image.At(row, column)), nil
}
// Set sets the Pixel at row and column of the image
func (handler *PalettedImageHandler) Set(row, column int, px Pixel) error {
setColor, err := GetColorFromPixel(px, handler.ImageType)
if err != nil {
return err
}
handler.Image.Set(row, column, setColor)
return nil
}
// Mode returns the ImageType of the images
func (handler *PalettedImageHandler) Mode() ImageType {
return handler.ImageType
}
// GetDimensions returns the dimensions as a Rectangle for the image
func (handler *PalettedImageHandler) GetDimensions() image.Rectangle {
return handler.Image.Bounds()
}
// SaveImage saves the image to the filepath specified
func (handler *PalettedImageHandler) SaveImage(filePath string) error {
newfile, err := os.Create(filePath)
if err != nil {
return err
}
defer newfile.Close()
png.Encode(newfile, handler.Image)
return nil
}
// ReadImage reads the image from the specified path
// it will return the interface ImageHandler of the
// correct type specified in the image
func ReadImage(path string) (ImageHandler, error) {
reader, err := os.Open(path)
if err != nil {
return nil, err
}
img, _, err := image.Decode(reader)
if err != nil {
return nil, err
}
switch img.(type) {
case *image.NRGBA:
return &NRGBAImageHandler{
Image: img.(*image.NRGBA),
ImageType: ImageTypeNRGBA,
}, nil
case *image.NRGBA64:
return &NRGBA64ImageHandler{
Image: img.(*image.NRGBA64),
ImageType: ImageTypeNRGBA64,
}, nil
case *image.RGBA:
return &RGBAImageHandler{
Image: img.(*image.RGBA),
ImageType: ImageTypeRGBA,
}, nil
case *image.RGBA64:
return &RGBA64ImageHandler{
Image: img.(*image.RGBA64),
ImageType: ImageTypeRGBA64,
}, nil
case *image.Gray:
return &GrayImageHandler{
Image: img.(*image.Gray),
ImageType: ImageTypeGray,
}, nil
case *image.Gray16:
return &Gray16ImageHandler{
Image: img.(*image.Gray16),
ImageType: ImageTypeGray16,
}, nil
case *image.Paletted:
return &PalettedImageHandler{
Image: img.(*image.Paletted),
ImageType: ImageTypePaletted,
}, nil
default:
return nil, ErrUnsupportedImageFormat
}
}
// New returns an ImageHandler with a new Image
// This handler will be used for the result
func New(imgT ImageType, dimensions image.Rectangle) (ImageHandler, error) {
switch imgT {
case ImageTypeNRGBA:
return &NRGBAImageHandler{
Image: image.NewNRGBA(dimensions),
ImageType: ImageTypeNRGBA,
}, nil
case ImageTypeNRGBA64:
return &NRGBA64ImageHandler{
Image: image.NewNRGBA64(dimensions),
ImageType: ImageTypeNRGBA64,
}, nil
case ImageTypeRGBA:
return &RGBAImageHandler{
Image: image.NewRGBA(dimensions),
ImageType: ImageTypeRGBA,
}, nil
case ImageTypeRGBA64:
return &RGBA64ImageHandler{
Image: image.NewRGBA64(dimensions),
ImageType: ImageTypeRGBA64,
}, nil
case ImageTypeGray:
return &GrayImageHandler{
Image: image.NewGray(dimensions),
ImageType: ImageTypeGray,
}, nil
case ImageTypeGray16:
return &Gray16ImageHandler{
Image: image.NewGray16(dimensions),
ImageType: ImageTypeGray16,
}, nil
case ImageTypePaletted:
return &PalettedImageHandler{
Image: image.NewPaletted(dimensions, palette.Plan9),
ImageType: ImageTypePaletted,
}, nil
default:
return nil, ErrUnsupportedImageFormat
}
} | reader.go | 0.782538 | 0.49823 | reader.go | starcoder |
package jigo
import (
"fmt"
"reflect"
)
// vartype is a simplified version of the notion of Kind in reflect, modified
// to reflect the slightly different semantics in jigo.
type vartype int
const (
intType vartype = iota
floatType
stringType
boolType
sliceType
mapType
unknownType
)
func (v vartype) String() string {
switch v {
case intType:
return "int"
case floatType:
return "float"
case stringType:
return "string"
case boolType:
return "bool"
case sliceType:
return "slice"
case mapType:
return "map"
default:
return "<unknown>"
}
}
func isNumericVar(v vartype) bool {
return v < stringType
}
func typeOf(i interface{}) vartype {
switch i.(type) {
case uint, uint8, uint16, uint32, uint64, int, int8, int16, int32, int64:
return intType
case float32, float64:
return floatType
case string:
return stringType
case bool:
return boolType
}
kind := reflect.ValueOf(i).Kind()
switch kind {
case reflect.Slice, reflect.Array:
return sliceType
case reflect.Map:
return mapType
}
return unknownType
}
func asBool(i interface{}) (bool, error) {
if typeOf(i) != boolType {
return false, fmt.Errorf("%s is not boolean", i)
}
return i.(bool), nil
}
func asInteger(i interface{}) (int64, bool) {
switch t := i.(type) {
case uint:
return int64(t), true
case uint8:
return int64(t), true
case uint16:
return int64(t), true
case uint32:
return int64(t), true
case uint64:
return int64(t), true
case int:
return int64(t), true
case int8:
return int64(t), true
case int16:
return int64(t), true
case int32:
return int64(t), true
case int64:
return t, true
case float32:
return int64(t), true
case float64:
return int64(t), true
}
return 0, false
}
func asFloat(i interface{}) (float64, bool) {
switch t := i.(type) {
case uint:
return float64(t), true
case uint8:
return float64(t), true
case uint16:
return float64(t), true
case uint32:
return float64(t), true
case uint64:
return float64(t), true
case int:
return float64(t), true
case int8:
return float64(t), true
case int16:
return float64(t), true
case int32:
return float64(t), true
case int64:
return float64(t), true
case float32:
return float64(t), true
case float64:
return t, true
}
return 0, false
}
func asString(i interface{}) string {
return fmt.Sprint(i)
} | types.go | 0.692746 | 0.441131 | types.go | starcoder |
package header
import (
"fmt"
"strings"
"unsafe"
)
const (
// HeaderSize the allocated size of the header
HeaderSize = 48
)
// Header data header stores
// info about a given data value
type Header struct {
xmin uint64 // transaction id that created the node's data
xmax uint64 // transaction id that updated/deleted the node's data
psize int64 // size of the previous version of this data, including header
poffset int64 // offset of the previous version of this data
size int64 // size of current data
ksize int64 // size of the current key
}
// Xmin returns the transaction if of the node that created the data
func (h *Header) Xmin() uint64 {
return h.xmin
}
// Xmax returns the transaction if of the node that updated or deleted the data
func (h *Header) Xmax() uint64 {
return h.xmax
}
// DataSize returns the size of the values data
func (h *Header) DataSize() int64 {
return h.size
}
// KeySize returns the size of the tuples key
func (h *Header) KeySize() int64 {
return h.ksize
}
// TotalSize returns the total size of header + data
func (h *Header) TotalSize() int64 {
return HeaderSize + h.ksize + h.size
}
// DataOffset returs the offset that the data starts at
func (h *Header) DataOffset() int64 {
return HeaderSize + h.ksize
}
// Previous returns the size and offset of the previous version's data
func (h *Header) Previous() (int64, int64) {
return h.psize, h.poffset
}
// HasPrevious returns true if there is a previous version of the data
func (h *Header) HasPrevious() bool {
return h.psize != 0
}
// SetXmin sets the transaction if of the node that created the data
func (h *Header) SetXmin(txid uint64) {
h.xmin = txid
}
// SetXmax sets the transaction if of the node that updated or deleted the data
func (h *Header) SetXmax(txid uint64) {
h.xmax = txid
}
// SetPrevious sets the offset of the previous version's data
func (h *Header) SetPrevious(size, offset int64) {
h.psize = size
h.poffset = offset
}
// SetDataSize sets the size of the keys data
func (h *Header) SetDataSize(size int64) {
h.size = size
}
// SetKeySize sets the size of the tuples key
func (h *Header) SetKeySize(size int64) {
h.ksize = size
}
// Serialize serialize a node to a byteslice
func Serialize(h *Header) []byte {
data := make([]byte, 48)
xmin := *(*[8]byte)(unsafe.Pointer(&h.xmin))
copy(data[0:], xmin[:])
xmax := *(*[8]byte)(unsafe.Pointer(&h.xmax))
copy(data[8:], xmax[:])
psize := *(*[8]byte)(unsafe.Pointer(&h.psize))
copy(data[16:], psize[:])
poffset := *(*[8]byte)(unsafe.Pointer(&h.poffset))
copy(data[24:], poffset[:])
size := *(*[8]byte)(unsafe.Pointer(&h.size))
copy(data[32:], size[:])
ksize := *(*[8]byte)(unsafe.Pointer(&h.ksize))
copy(data[40:], ksize[:])
return data
}
// Deserialize deserialize from a byteslice to a Node
func Deserialize(data []byte) *Header {
return &Header{
xmin: *(*uint64)(unsafe.Pointer(&data[0])),
xmax: *(*uint64)(unsafe.Pointer(&data[8])),
psize: *(*int64)(unsafe.Pointer(&data[16])),
poffset: *(*int64)(unsafe.Pointer(&data[24])),
size: *(*int64)(unsafe.Pointer(&data[32])),
ksize: *(*int64)(unsafe.Pointer(&data[40])),
}
}
// Prepend prepends header information to data
func Prepend(h *Header, data []byte) []byte {
hdr := Serialize(h)
// may be more performant to write the header seperately
// as this append creates a copy of the data
hdr = append(hdr, data...)
return hdr
}
// Print prints header information to stdout
func Print(h *Header) {
output := []string{"{"}
output = append(output, fmt.Sprintf(" Xmin: %d", h.xmin))
output = append(output, fmt.Sprintf(" Xmax: %d", h.xmax))
output = append(output, fmt.Sprintf(" Previous Version Size: %d", h.psize))
output = append(output, fmt.Sprintf(" Previous Version Offset: %d", h.poffset))
output = append(output, "}")
fmt.Println(strings.Join(output, "\n"))
} | header/header.go | 0.706798 | 0.489015 | header.go | starcoder |
package stats
import (
"strings"
"time"
"github.com/hashicorp/go-memdb"
)
const (
// TimeUnitDays is an identifier that means a metrics values should be grouped by day
TimeUnitDays = "days"
// TimeUnitMonths is an identifier that means a metrics values should be grouped by month
TimeUnitMonths = "months"
// TimeUnitYears is an identifier that means a metrics values should be grouped by year
TimeUnitYears = "years"
)
// TimeUnits contains the supported time units
var TimeUnits = []string{
TimeUnitDays,
TimeUnitMonths,
TimeUnitYears,
}
// getDateLayoutForTimeUnit returns the layout used for stringifying a time.Time for given timeUnit
func getDateLayoutForTimeUnit(timeUnit string) string {
var layout string
switch strings.ToLower(timeUnit) {
case TimeUnitDays:
layout = "2006-01-02"
case TimeUnitMonths:
layout = "Jan 2006"
case TimeUnitYears:
layout = "2006"
default:
panic("unsupported timeUnit: " + timeUnit)
}
return layout
}
// DateIterator is a type used for iterating over a range of dates and querying
// a DB for records for a given date
type DateIterator struct {
TimeUnit string
TimeLength int
CurrentOffset int
db *DB
currentDate *Date
}
// Date is a type containing a time.Time Value and boolean Valid fields,
// Valid is true the Value is inside the allowed range of dates
type Date struct {
Value time.Time
Valid bool
}
// NewDateIterator returns a new date iterator
func NewDateIterator(db *DB, timeUnit string, timeLength int) DateIterator {
it := DateIterator{
TimeUnit: timeUnit,
TimeLength: timeLength,
db: db,
}
it.CurrentOffset = -1
it.currentDate = &Date{Value: time.Now(), Valid: true}
it.decrementDate(timeLength + 1)
return it
}
// Next returns the next date after currentDate of DateIterator it. Also updates
// Valid field accordingly
func (it *DateIterator) Next() *Date {
it.CurrentOffset++
if it.CurrentOffset > it.TimeLength {
it.currentDate.Valid = false
}
it.incrementDate(1)
return it.currentDate
}
// GetRecordsForDate queries the DB of DateIterator it for values in
// provided table matching date and metric metadata and returns matching records
func (it *DateIterator) GetRecordsForDate(table string, metric Metric, date time.Time) memdb.ResultIterator {
layout := getDateLayoutForTimeUnit(it.TimeUnit)
txn := it.db.memdb.Txn(false)
defer txn.Abort()
var args []interface{}
args = append(args, metric.GetMetadata("repo"))
args = append(args, metric.GetMetadata("group"))
if metric.SupportsDomains() {
args = append(args, metric.GetMetadata("domain"))
}
args = append(args, date.Format(layout))
records, err := txn.Get(
table,
it.db.GetIndexFromTimeUnit(it.TimeUnit, metric.SupportsDomains()),
args...,
)
if err != nil {
panic("failed to fetch data from db for metric. " + err.Error())
}
return records
}
// incrementDate increments currentDate of DateIterator it by given amount
func (it *DateIterator) incrementDate(amount int) {
switch strings.ToLower(it.TimeUnit) {
case TimeUnitDays:
it.currentDate.Value = it.currentDate.Value.AddDate(0, 0, amount)
case TimeUnitMonths:
it.currentDate.Value = it.currentDate.Value.AddDate(0, amount, 0)
case TimeUnitYears:
it.currentDate.Value = it.currentDate.Value.AddDate(amount, 0, 0)
default:
panic("unsupported time unit: " + it.TimeUnit)
}
}
// decrementDate decrements currentDate of DateIterator it by given amount
func (it *DateIterator) decrementDate(amount int) {
amount *= -1
switch strings.ToLower(it.TimeUnit) {
case TimeUnitDays:
it.currentDate.Value = it.currentDate.Value.AddDate(0, 0, amount)
case TimeUnitMonths:
it.currentDate.Value = it.currentDate.Value.AddDate(0, amount, 0)
case TimeUnitYears:
it.currentDate.Value = it.currentDate.Value.AddDate(amount, 0, 0)
default:
panic("unsupported time unit: " + it.TimeUnit)
}
} | internal/stats/date.go | 0.823257 | 0.424412 | date.go | starcoder |
package network
import (
"errors"
"fmt"
neatmath "github.com/yaricom/goNEAT/v2/neat/math"
"math"
)
// FastNetworkLink The connection descriptor for fast network
type FastNetworkLink struct {
// The index of source neuron
SourceIndex int
// The index of target neuron
TargetIndex int
// The weight of this link
Weight float64
// The signal relayed by this link
Signal float64
}
// FastControlNode The module relay (control node) descriptor for fast network
type FastControlNode struct {
// The activation function for control node
ActivationType neatmath.NodeActivationType
// The indexes of the input nodes
InputIndexes []int
// The indexes of the output nodes
OutputIndexes []int
}
// FastModularNetworkSolver is the network solver implementation to be used for large neural networks simulation.
type FastModularNetworkSolver struct {
// A network id
Id int
// Is a name of this network */
Name string
// The current activation values per each neuron
neuronSignals []float64
// This array is a parallel of neuronSignals and used to test network relaxation
neuronSignalsBeingProcessed []float64
// The activation functions per neuron, must be in the same order as neuronSignals. Has nil entries for
// neurons that are inputs or outputs of a module.
activationFunctions []neatmath.NodeActivationType
// The bias values associated with neurons
biasList []float64
// The control nodes relaying between network modules
modules []*FastControlNode
// The connections
connections []*FastNetworkLink
// The number of input neurons
inputNeuronCount int
// The total number of sensors in the network (input + bias). This is also the index of the first output neuron in the neuron signals.
sensorNeuronCount int
// The number of output neurons
outputNeuronCount int
// The bias neuron count (usually one). This is also the index of the first input neuron in the neuron signals.
biasNeuronCount int
// The total number of neurons in network
totalNeuronCount int
// For recursive activation, marks whether we have finished this node yet
activated []bool
// For recursive activation, makes whether a node is currently being calculated (recurrent connections processing)
inActivation []bool
// For recursive activation, the previous activation values of recurrent connections (recurrent connections processing)
lastActivation []float64
// The adjacent list to hold IDs of outgoing nodes for each network node
adjacentList [][]int
// The adjacent list to hold IDs of incoming nodes for each network node
reverseAdjacentList [][]int
// The adjacent matrix to hold connection weights between all connected nodes
adjacentMatrix [][]float64
}
// NewFastModularNetworkSolver Creates new fast modular network solver
func NewFastModularNetworkSolver(biasNeuronCount, inputNeuronCount, outputNeuronCount, totalNeuronCount int,
activationFunctions []neatmath.NodeActivationType, connections []*FastNetworkLink,
biasList []float64, modules []*FastControlNode) *FastModularNetworkSolver {
fmm := FastModularNetworkSolver{
biasNeuronCount: biasNeuronCount,
inputNeuronCount: inputNeuronCount,
sensorNeuronCount: biasNeuronCount + inputNeuronCount,
outputNeuronCount: outputNeuronCount,
totalNeuronCount: totalNeuronCount,
activationFunctions: activationFunctions,
biasList: biasList,
modules: modules,
connections: connections,
}
// Allocate the arrays that store the states at different points in the neural network.
// The neuron signals are initialised to 0 by default. Only bias nodes need setting to 1.
fmm.neuronSignals = make([]float64, totalNeuronCount)
fmm.neuronSignalsBeingProcessed = make([]float64, totalNeuronCount)
for i := 0; i < biasNeuronCount; i++ {
fmm.neuronSignals[i] = 1.0 // BIAS neuron signal
}
// Allocate activation arrays
fmm.activated = make([]bool, totalNeuronCount)
fmm.inActivation = make([]bool, totalNeuronCount)
fmm.lastActivation = make([]float64, totalNeuronCount)
// Build adjacent lists and matrix for fast access of incoming/outgoing nodes and connection weights
fmm.adjacentList = make([][]int, totalNeuronCount)
fmm.reverseAdjacentList = make([][]int, totalNeuronCount)
fmm.adjacentMatrix = make([][]float64, totalNeuronCount)
for i := 0; i < totalNeuronCount; i++ {
fmm.adjacentList[i] = make([]int, 0)
fmm.reverseAdjacentList[i] = make([]int, 0)
fmm.adjacentMatrix[i] = make([]float64, totalNeuronCount)
}
for i := 0; i < len(connections); i++ {
crs := connections[i].SourceIndex
crt := connections[i].TargetIndex
// Holds outgoing nodes
fmm.adjacentList[crs] = append(fmm.adjacentList[crs], crt)
// Holds incoming nodes
fmm.reverseAdjacentList[crt] = append(fmm.reverseAdjacentList[crt], crs)
// Holds link weight
fmm.adjacentMatrix[crs][crt] = connections[i].Weight
}
return &fmm
}
// ForwardSteps Propagates activation wave through all network nodes provided number of steps in forward direction.
// Returns true if activation wave passed from all inputs to the outputs.
func (s *FastModularNetworkSolver) ForwardSteps(steps int) (res bool, err error) {
for i := 0; i < steps; i++ {
if res, err = s.forwardStep(0); err != nil {
return false, err
}
}
return res, nil
}
// RecursiveSteps Propagates activation wave through all network nodes provided number of steps by recursion from output nodes
// Returns true if activation wave passed from all inputs to the outputs. This method is preferred method
// of network activation when number of forward steps can not be easy calculated and no network modules are set.
func (s *FastModularNetworkSolver) RecursiveSteps() (res bool, err error) {
if len(s.modules) > 0 {
return false, errors.New("recursive activation can not be used for network with defined modules")
}
// Initialize boolean arrays and set the last activation signal for output/hidden neurons
for i := 0; i < s.totalNeuronCount; i++ {
// Set as activated if i is an input node, otherwise ensure it is unactivated (false)
s.activated[i] = i < s.sensorNeuronCount
s.inActivation[i] = false
// set last activation for output/hidden neurons
if i >= s.sensorNeuronCount {
s.lastActivation[i] = s.neuronSignals[i]
}
}
// Get each output node activation recursively
for i := 0; i < s.outputNeuronCount; i++ {
index := s.sensorNeuronCount + i
if res, err = s.recursiveActivateNode(index); err != nil {
return false, err
} else if !res {
return false, fmt.Errorf("failed to recursively activate the output neuron at %d", index)
}
}
return res, nil
}
// Propagate activation wave by recursively looking for input signals graph for a given output neuron
func (s *FastModularNetworkSolver) recursiveActivateNode(currentNode int) (res bool, err error) {
// If we've reached an input node then return since the signal is already set
if s.activated[currentNode] {
s.inActivation[currentNode] = false
return true, nil
}
// Mark that the node is currently being calculated
s.inActivation[currentNode] = true
// Set the pre-signal to 0
s.neuronSignalsBeingProcessed[currentNode] = 0
// Adjacency list in reverse holds incoming connections, go through each one and activate it
for i := 0; i < len(s.reverseAdjacentList[currentNode]); i++ {
currentAdjNode := s.reverseAdjacentList[currentNode][i]
// If this node is currently being activated then we have reached a cycle, or recurrent connection.
// Use the previous activation in this case
if s.inActivation[currentAdjNode] {
s.neuronSignalsBeingProcessed[currentNode] += s.lastActivation[currentAdjNode] * s.adjacentMatrix[currentAdjNode][currentNode]
} else {
// Otherwise, proceed as normal
// Recurse if this neuron has not been activated yet
if !s.activated[currentAdjNode] {
res, err = s.recursiveActivateNode(currentAdjNode)
if err != nil {
// recursive activation failed
return false, err
} else if !res {
return false, fmt.Errorf("failed to recursively activate neuron at %d", currentAdjNode)
}
}
// Add it to the new activation
s.neuronSignalsBeingProcessed[currentNode] += s.neuronSignals[currentAdjNode] * s.adjacentMatrix[currentAdjNode][currentNode]
}
}
// Mark this neuron as completed
s.activated[currentNode] = true
// This is no longer being calculated (for cycle detection)
s.inActivation[currentNode] = false
// Set this signal after running it through the activation function
if s.neuronSignals[currentNode], err = neatmath.NodeActivators.ActivateByType(
s.neuronSignalsBeingProcessed[currentNode], nil,
s.activationFunctions[currentNode]); err != nil {
// failed to activate
res = false
} else {
res = true
}
return res, err
}
// Relax Attempts to relax network given amount of steps until giving up. The network considered relaxed when absolute
// value of the change at any given point is less than maxAllowedSignalDelta during activation waves propagation.
// If maxAllowedSignalDelta value is less than or equal to 0, the method will return true without checking for relaxation.
func (s *FastModularNetworkSolver) Relax(maxSteps int, maxAllowedSignalDelta float64) (relaxed bool, err error) {
for i := 0; i < maxSteps; i++ {
if relaxed, err = s.forwardStep(maxAllowedSignalDelta); err != nil {
return false, err
} else if relaxed {
break // no need to iterate any further, already reached desired accuracy
}
}
return relaxed, nil
}
// Performs single forward step through the network and tests if network become relaxed. The network considered relaxed
// when absolute value of the change at any given point is less than maxAllowedSignalDelta during activation waves propagation.
func (s *FastModularNetworkSolver) forwardStep(maxAllowedSignalDelta float64) (isRelaxed bool, err error) {
isRelaxed = true
// Calculate output signal per each connection and add the signals to the target neurons
for _, conn := range s.connections {
s.neuronSignalsBeingProcessed[conn.TargetIndex] += s.neuronSignals[conn.SourceIndex] * conn.Weight
}
// Pass the signals through the single-valued activation functions
for i := s.sensorNeuronCount; i < s.totalNeuronCount; i++ {
signal := s.neuronSignalsBeingProcessed[i]
if s.biasNeuronCount > 0 {
// append BIAS value to the signal if appropriate
signal += s.biasList[i]
}
if s.neuronSignalsBeingProcessed[i], err = neatmath.NodeActivators.ActivateByType(
signal, nil, s.activationFunctions[i]); err != nil {
return false, err
}
}
// Pass the signals through each module (activation function with more than one input or output)
for _, module := range s.modules {
inputs := make([]float64, len(module.InputIndexes))
for i, inIndex := range module.InputIndexes {
inputs[i] = s.neuronSignalsBeingProcessed[inIndex]
}
if outputs, err := neatmath.NodeActivators.ActivateModuleByType(inputs, nil, module.ActivationType); err == nil {
// save outputs
for i, outIndex := range module.OutputIndexes {
s.neuronSignalsBeingProcessed[outIndex] = outputs[i]
}
} else {
return false, err
}
}
// Move all the neuron signals we changed while processing this network activation into storage.
if maxAllowedSignalDelta <= 0 {
// iterate through output and hidden neurons and collect activations
for i := s.sensorNeuronCount; i < s.totalNeuronCount; i++ {
s.neuronSignals[i] = s.neuronSignalsBeingProcessed[i]
s.neuronSignalsBeingProcessed[i] = 0
}
} else {
for i := s.sensorNeuronCount; i < s.totalNeuronCount; i++ {
// First check whether any location in the network has changed by more than a small amount.
isRelaxed = isRelaxed && !(math.Abs(s.neuronSignals[i]-s.neuronSignalsBeingProcessed[i]) > maxAllowedSignalDelta)
s.neuronSignals[i] = s.neuronSignalsBeingProcessed[i]
s.neuronSignalsBeingProcessed[i] = 0
}
}
return isRelaxed, err
}
// Flush Flushes network state by removing all current activations. Returns true if network flushed successfully or
// false in case of error.
func (s *FastModularNetworkSolver) Flush() (bool, error) {
for i := s.biasNeuronCount; i < s.totalNeuronCount; i++ {
s.neuronSignals[i] = 0.0
}
return true, nil
}
// LoadSensors Set sensors values to the input nodes of the network
func (s *FastModularNetworkSolver) LoadSensors(inputs []float64) error {
if len(inputs) == s.inputNeuronCount {
// only inputs should be provided
for i := 0; i < s.inputNeuronCount; i++ {
s.neuronSignals[s.biasNeuronCount+i] = inputs[i]
}
} else {
return NetErrUnsupportedSensorsArraySize
}
return nil
}
// ReadOutputs Read output values from the output nodes of the network
func (s *FastModularNetworkSolver) ReadOutputs() []float64 {
return s.neuronSignals[s.sensorNeuronCount : s.sensorNeuronCount+s.outputNeuronCount]
}
// NodeCount Returns the total number of neural units in the network
func (s *FastModularNetworkSolver) NodeCount() int {
return s.totalNeuronCount + len(s.modules)
}
// LinkCount Returns the total number of links between nodes in the network
func (s *FastModularNetworkSolver) LinkCount() int {
// count all connections
numLinks := len(s.connections)
// count all bias links if any
if s.biasNeuronCount > 0 {
for _, b := range s.biasList {
if b != 0 {
numLinks++
}
}
}
// count all modules links
if len(s.modules) != 0 {
for _, module := range s.modules {
numLinks += len(module.InputIndexes) + len(module.OutputIndexes)
}
}
return numLinks
}
// Stringer
func (s *FastModularNetworkSolver) String() string {
str := fmt.Sprintf("FastModularNetwork, id: %d, name: [%s], neurons: %d,\n\tinputs: %d,\tbias: %d,\toutputs:%d,\t hidden: %d",
s.Id, s.Name, s.totalNeuronCount, s.inputNeuronCount, s.biasNeuronCount, s.outputNeuronCount,
s.totalNeuronCount-s.sensorNeuronCount-s.outputNeuronCount)
return str
} | neat/network/fast_network.go | 0.714827 | 0.533215 | fast_network.go | starcoder |
package cloudexport
import (
"encoding/json"
)
// V202101beta1BgpProperties Optional BGP related settings.
type V202101beta1BgpProperties struct {
// If true, apply BGP data discovered via another device to the flow from this export.
ApplyBgp *bool `json:"applyBgp,omitempty"`
UseBgpDeviceId *string `json:"useBgpDeviceId,omitempty"`
DeviceBgpType *string `json:"deviceBgpType,omitempty"`
}
// NewV202101beta1BgpProperties instantiates a new V202101beta1BgpProperties object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewV202101beta1BgpProperties() *V202101beta1BgpProperties {
this := V202101beta1BgpProperties{}
return &this
}
// NewV202101beta1BgpPropertiesWithDefaults instantiates a new V202101beta1BgpProperties object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewV202101beta1BgpPropertiesWithDefaults() *V202101beta1BgpProperties {
this := V202101beta1BgpProperties{}
return &this
}
// GetApplyBgp returns the ApplyBgp field value if set, zero value otherwise.
func (o *V202101beta1BgpProperties) GetApplyBgp() bool {
if o == nil || o.ApplyBgp == nil {
var ret bool
return ret
}
return *o.ApplyBgp
}
// GetApplyBgpOk returns a tuple with the ApplyBgp field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1BgpProperties) GetApplyBgpOk() (*bool, bool) {
if o == nil || o.ApplyBgp == nil {
return nil, false
}
return o.ApplyBgp, true
}
// HasApplyBgp returns a boolean if a field has been set.
func (o *V202101beta1BgpProperties) HasApplyBgp() bool {
if o != nil && o.ApplyBgp != nil {
return true
}
return false
}
// SetApplyBgp gets a reference to the given bool and assigns it to the ApplyBgp field.
func (o *V202101beta1BgpProperties) SetApplyBgp(v bool) {
o.ApplyBgp = &v
}
// GetUseBgpDeviceId returns the UseBgpDeviceId field value if set, zero value otherwise.
func (o *V202101beta1BgpProperties) GetUseBgpDeviceId() string {
if o == nil || o.UseBgpDeviceId == nil {
var ret string
return ret
}
return *o.UseBgpDeviceId
}
// GetUseBgpDeviceIdOk returns a tuple with the UseBgpDeviceId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1BgpProperties) GetUseBgpDeviceIdOk() (*string, bool) {
if o == nil || o.UseBgpDeviceId == nil {
return nil, false
}
return o.UseBgpDeviceId, true
}
// HasUseBgpDeviceId returns a boolean if a field has been set.
func (o *V202101beta1BgpProperties) HasUseBgpDeviceId() bool {
if o != nil && o.UseBgpDeviceId != nil {
return true
}
return false
}
// SetUseBgpDeviceId gets a reference to the given string and assigns it to the UseBgpDeviceId field.
func (o *V202101beta1BgpProperties) SetUseBgpDeviceId(v string) {
o.UseBgpDeviceId = &v
}
// GetDeviceBgpType returns the DeviceBgpType field value if set, zero value otherwise.
func (o *V202101beta1BgpProperties) GetDeviceBgpType() string {
if o == nil || o.DeviceBgpType == nil {
var ret string
return ret
}
return *o.DeviceBgpType
}
// GetDeviceBgpTypeOk returns a tuple with the DeviceBgpType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1BgpProperties) GetDeviceBgpTypeOk() (*string, bool) {
if o == nil || o.DeviceBgpType == nil {
return nil, false
}
return o.DeviceBgpType, true
}
// HasDeviceBgpType returns a boolean if a field has been set.
func (o *V202101beta1BgpProperties) HasDeviceBgpType() bool {
if o != nil && o.DeviceBgpType != nil {
return true
}
return false
}
// SetDeviceBgpType gets a reference to the given string and assigns it to the DeviceBgpType field.
func (o *V202101beta1BgpProperties) SetDeviceBgpType(v string) {
o.DeviceBgpType = &v
}
func (o V202101beta1BgpProperties) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.ApplyBgp != nil {
toSerialize["applyBgp"] = o.ApplyBgp
}
if o.UseBgpDeviceId != nil {
toSerialize["useBgpDeviceId"] = o.UseBgpDeviceId
}
if o.DeviceBgpType != nil {
toSerialize["deviceBgpType"] = o.DeviceBgpType
}
return json.Marshal(toSerialize)
}
type NullableV202101beta1BgpProperties struct {
value *V202101beta1BgpProperties
isSet bool
}
func (v NullableV202101beta1BgpProperties) Get() *V202101beta1BgpProperties {
return v.value
}
func (v *NullableV202101beta1BgpProperties) Set(val *V202101beta1BgpProperties) {
v.value = val
v.isSet = true
}
func (v NullableV202101beta1BgpProperties) IsSet() bool {
return v.isSet
}
func (v *NullableV202101beta1BgpProperties) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableV202101beta1BgpProperties(val *V202101beta1BgpProperties) *NullableV202101beta1BgpProperties {
return &NullableV202101beta1BgpProperties{value: val, isSet: true}
}
func (v NullableV202101beta1BgpProperties) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableV202101beta1BgpProperties) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | apiv6/kentikapi/cloudexport/model_v202101beta1_bgp_properties.go | 0.782538 | 0.433742 | model_v202101beta1_bgp_properties.go | starcoder |
package blowfish
// getNextWord returns the next big-endian uint32 value from the byte slice
// at the given position in a circular manner, updating the position.
func getNextWord(b []byte, pos *int) uint32 {
var w uint32
j := *pos
for i := 0; i < 4; i++ {
w = w<<8 | uint32(b[j])
j++
if j >= len(b) {
j = 0
}
}
*pos = j
return w
}
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
// by the bcrypt package to reuse the Blowfish key schedule during its
// set up. It's unlikely that you need to use this directly.
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
// Using inlined getNextWord for performance.
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
// This is similar to ExpandKey, but folds the salt during the key
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
// and specializing it here is useful.
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
c.p[i] ^= getNextWord(key, &j)
}
j = 0
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[0]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
xr ^= c.p[17]
return xr, xl
}
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[17]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
xr ^= c.p[0]
return xr, xl
} | vendor/golang.org/x/crypto/blowfish/block.go | 0.640636 | 0.443841 | block.go | starcoder |
package xxhash
import (
"encoding/binary"
"errors"
"hash"
)
const (
prime32x1 uint32 = 2654435761
prime32x2 uint32 = 2246822519
prime32x3 uint32 = 3266489917
prime32x4 uint32 = 668265263
prime32x5 uint32 = 374761393
prime64x1 uint64 = 11400714785074694791
prime64x2 uint64 = 14029467366897019727
prime64x3 uint64 = 1609587929392839161
prime64x4 uint64 = 9650029242287828579
prime64x5 uint64 = 2870177450012600261
maxInt32 int32 = (1<<31 - 1)
// precomputed zero Vs for seed 0
zero64x1 = 0x60ea27eeadc0b5d6
zero64x2 = 0xc2b2ae3d27d4eb4f
zero64x3 = 0x0
zero64x4 = 0x61c8864e7a143579
)
const (
magic32 = "xxh\x07"
magic64 = "xxh\x08"
marshaled32Size = len(magic32) + 4*7 + 16
marshaled64Size = len(magic64) + 8*6 + 32 + 1
)
func NewHash32() hash.Hash { return New32() }
func NewHash64() hash.Hash { return New64() }
// Checksum32 returns the checksum of the input data with the seed set to 0.
func Checksum32(in []byte) uint32 {
return Checksum32S(in, 0)
}
// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0.
func ChecksumString32(s string) uint32 {
return ChecksumString32S(s, 0)
}
type XXHash32 struct {
mem [16]byte
ln, memIdx int32
v1, v2, v3, v4 uint32
seed uint32
}
// Size returns the number of bytes Sum will return.
func (xx *XXHash32) Size() int {
return 4
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (xx *XXHash32) BlockSize() int {
return 16
}
// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed.
func NewS32(seed uint32) (xx *XXHash32) {
xx = &XXHash32{
seed: seed,
}
xx.Reset()
return
}
// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0.
func New32() *XXHash32 {
return NewS32(0)
}
func (xx *XXHash32) Reset() {
xx.v1 = xx.seed + prime32x1 + prime32x2
xx.v2 = xx.seed + prime32x2
xx.v3 = xx.seed
xx.v4 = xx.seed - prime32x1
xx.ln, xx.memIdx = 0, 0
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (xx *XXHash32) Sum(in []byte) []byte {
s := xx.Sum32()
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (xx *XXHash32) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaled32Size)
b = append(b, magic32...)
b = appendUint32(b, xx.v1)
b = appendUint32(b, xx.v2)
b = appendUint32(b, xx.v3)
b = appendUint32(b, xx.v4)
b = appendUint32(b, xx.seed)
b = appendInt32(b, xx.ln)
b = appendInt32(b, xx.memIdx)
b = append(b, xx.mem[:]...)
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (xx *XXHash32) UnmarshalBinary(b []byte) error {
if len(b) < len(magic32) || string(b[:len(magic32)]) != magic32 {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaled32Size {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic32):]
b, xx.v1 = consumeUint32(b)
b, xx.v2 = consumeUint32(b)
b, xx.v3 = consumeUint32(b)
b, xx.v4 = consumeUint32(b)
b, xx.seed = consumeUint32(b)
b, xx.ln = consumeInt32(b)
b, xx.memIdx = consumeInt32(b)
copy(xx.mem[:], b)
return nil
}
// Checksum64 an alias for Checksum64S(in, 0)
func Checksum64(in []byte) uint64 {
return Checksum64S(in, 0)
}
// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0.
func ChecksumString64(s string) uint64 {
return ChecksumString64S(s, 0)
}
type XXHash64 struct {
v1, v2, v3, v4 uint64
seed uint64
ln uint64
mem [32]byte
memIdx int8
}
// Size returns the number of bytes Sum will return.
func (xx *XXHash64) Size() int {
return 8
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (xx *XXHash64) BlockSize() int {
return 32
}
// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed.
func NewS64(seed uint64) (xx *XXHash64) {
xx = &XXHash64{
seed: seed,
}
xx.Reset()
return
}
// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0.
func New64() *XXHash64 {
return NewS64(0)
}
func (xx *XXHash64) Reset() {
xx.ln, xx.memIdx = 0, 0
xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (xx *XXHash64) Sum(in []byte) []byte {
s := xx.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (xx *XXHash64) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaled64Size)
b = append(b, magic64...)
b = appendUint64(b, xx.v1)
b = appendUint64(b, xx.v2)
b = appendUint64(b, xx.v3)
b = appendUint64(b, xx.v4)
b = appendUint64(b, xx.seed)
b = appendUint64(b, xx.ln)
b = append(b, byte(xx.memIdx))
b = append(b, xx.mem[:]...)
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (xx *XXHash64) UnmarshalBinary(b []byte) error {
if len(b) < len(magic64) || string(b[:len(magic64)]) != magic64 {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaled64Size {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic64):]
b, xx.v1 = consumeUint64(b)
b, xx.v2 = consumeUint64(b)
b, xx.v3 = consumeUint64(b)
b, xx.v4 = consumeUint64(b)
b, xx.seed = consumeUint64(b)
b, xx.ln = consumeUint64(b)
xx.memIdx = int8(b[0])
b = b[1:]
copy(xx.mem[:], b)
return nil
}
func appendInt32(b []byte, x int32) []byte { return appendUint32(b, uint32(x)) }
func appendUint32(b []byte, x uint32) []byte {
var a [4]byte
binary.LittleEndian.PutUint32(a[:], x)
return append(b, a[:]...)
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeInt32(b []byte) ([]byte, int32) { bn, x := consumeUint32(b); return bn, int32(x) }
func consumeUint32(b []byte) ([]byte, uint32) { x := u32(b); return b[4:], x }
func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b); return b[8:], x }
// force the compiler to use ROTL instructions
func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) }
func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) }
func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) }
func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) }
func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) }
func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) }
func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) }
func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
func mix64(h uint64) uint64 {
h ^= h >> 33
h *= prime64x2
h ^= h >> 29
h *= prime64x3
h ^= h >> 32
return h
}
func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) {
if seed == 0 {
return zero64x1, zero64x2, zero64x3, zero64x4
}
return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1)
}
// borrowed from cespare
func round64(h, v uint64) uint64 {
h += v * prime64x2
h = rotl64_31(h)
h *= prime64x1
return h
}
func mergeRound64(h, v uint64) uint64 {
v = round64(0, v)
h ^= v
h = h*prime64x1 + prime64x4
return h
} | vendor/github.com/OneOfOne/xxhash/xxhash.go | 0.688678 | 0.442215 | xxhash.go | starcoder |
package entities
import (
"github.com/rpaloschi/dxf-go/core"
)
// Polyline Entity representation
type Polyline struct {
BaseEntity
Elevation float64
Thickness float64
Closed bool
CurveFitVerticesAdded bool
SplineFitVerticesAdded bool
Is3dPolyline bool
Is3dPolygonMesh bool
PolygonMeshClosedNDir bool
IsPolyfaceMesh bool
LineTypeParentAround bool
DefaultStartWidth float64
DefaultEndWidth float64
VertexCountM int64
VertexCountN int64
SmoothDensityM int64
SmoothDensityN int64
SmoothSurface SmoothSurfaceType
ExtrusionDirection core.Point
Vertices VertexSlice
}
// SmoothSurfaceType representation
type SmoothSurfaceType int
const (
NO_SMOOTH_SURFACE_FITTED SmoothSurfaceType = 0
QUADRATIC_BSPLINE SmoothSurfaceType = 5
CUBIC_BSPLINE SmoothSurfaceType = 6
BEZIER SmoothSurfaceType = 8
)
// Equals tests equality against another Polyline.
func (p Polyline) Equals(other core.DxfElement) bool {
if otherPolyline, ok := other.(*Polyline); ok {
return p.BaseEntity.Equals(otherPolyline.BaseEntity) &&
core.FloatEquals(p.Elevation, otherPolyline.Elevation) &&
core.FloatEquals(p.Thickness, otherPolyline.Thickness) &&
p.Closed == otherPolyline.Closed &&
p.CurveFitVerticesAdded == otherPolyline.CurveFitVerticesAdded &&
p.SplineFitVerticesAdded == otherPolyline.SplineFitVerticesAdded &&
p.Is3dPolyline == otherPolyline.Is3dPolyline &&
p.Is3dPolygonMesh == otherPolyline.Is3dPolygonMesh &&
p.PolygonMeshClosedNDir == otherPolyline.PolygonMeshClosedNDir &&
p.IsPolyfaceMesh == otherPolyline.IsPolyfaceMesh &&
p.LineTypeParentAround == otherPolyline.LineTypeParentAround &&
core.FloatEquals(p.DefaultStartWidth, otherPolyline.DefaultStartWidth) &&
core.FloatEquals(p.DefaultEndWidth, otherPolyline.DefaultEndWidth) &&
p.VertexCountM == otherPolyline.VertexCountM &&
p.VertexCountN == otherPolyline.VertexCountN &&
p.SmoothDensityM == otherPolyline.SmoothDensityM &&
p.SmoothDensityN == otherPolyline.SmoothDensityN &&
p.SmoothSurface == otherPolyline.SmoothSurface &&
p.ExtrusionDirection.Equals(otherPolyline.ExtrusionDirection) &&
p.Vertices.Equals(otherPolyline.Vertices)
}
return false
}
// HasNestedEntities a Polyline will have nested entities.
func (p Polyline) HasNestedEntities() bool {
return true
}
// AddNestedEntities a Polyline will contain only Vertex as nested entities,
// other types are simply ignored.
func (p *Polyline) AddNestedEntities(entities EntitySlice) {
for _, entity := range entities {
if vertex, ok := entity.(*Vertex); ok {
p.Vertices = append(p.Vertices, vertex)
} else {
core.Log.Printf(
"Skipping entity %v. Polylines can only contain Vertex entities.",
entity)
}
}
}
const closedPolylineBit = 0x1
const curveFitVerticesAddedBit = 0x2
const splineFitVerticesAddedBit = 0x4
const is3dPolylineBit = 0x8
const is3dPolygonMeshBit = 0x10
const closedNDirectionBit = 0x20
const polyfaceMeshBit = 0x40
const lineTypePatternBit = 0x80
// NewPolyline builds a new Polyline from a slice of Tags (without vertices)
func NewPolyline(tags core.TagSlice) (*Polyline, error) {
polyline := new(Polyline)
// set defaults
polyline.Vertices = make(VertexSlice, 0)
polyline.ExtrusionDirection = core.Point{X: 0.0, Y: 0.0, Z: 1.0}
polyline.InitBaseEntityParser()
polyline.Update(map[int]core.TypeParser{
30: core.NewFloatTypeParserToVar(&polyline.Elevation),
39: core.NewFloatTypeParserToVar(&polyline.Thickness),
40: core.NewFloatTypeParserToVar(&polyline.DefaultStartWidth),
41: core.NewFloatTypeParserToVar(&polyline.DefaultEndWidth),
70: core.NewIntTypeParser(func(flags int64) {
polyline.Closed = flags&closedPolylineBit != 0
polyline.CurveFitVerticesAdded = flags&curveFitVerticesAddedBit != 0
polyline.SplineFitVerticesAdded = flags&splineFitVerticesAddedBit != 0
polyline.Is3dPolyline = flags&is3dPolylineBit != 0
polyline.Is3dPolygonMesh = flags&is3dPolygonMeshBit != 0
polyline.PolygonMeshClosedNDir = flags&closedNDirectionBit != 0
polyline.IsPolyfaceMesh = flags&polyfaceMeshBit != 0
polyline.LineTypeParentAround = flags&lineTypePatternBit != 0
}),
71: core.NewIntTypeParserToVar(&polyline.VertexCountM),
72: core.NewIntTypeParserToVar(&polyline.VertexCountN),
73: core.NewIntTypeParserToVar(&polyline.SmoothDensityM),
74: core.NewIntTypeParserToVar(&polyline.SmoothDensityN),
75: core.NewIntTypeParser(func(value int64) {
polyline.SmoothSurface = SmoothSurfaceType(value)
}),
210: core.NewFloatTypeParserToVar(&polyline.ExtrusionDirection.X),
220: core.NewFloatTypeParserToVar(&polyline.ExtrusionDirection.Y),
230: core.NewFloatTypeParserToVar(&polyline.ExtrusionDirection.Z),
})
err := polyline.Parse(tags)
return polyline, err
} | vendor/github.com/rpaloschi/dxf-go/entities/polyline.go | 0.663124 | 0.59072 | polyline.go | starcoder |
package geometry
import (
"encoding/binary"
"math"
)
const rDims = 2
const rMaxEntries = 16
type rRect struct {
data interface{}
min, max [rDims]float64
}
type rNode struct {
count int
rects [rMaxEntries + 1]rRect
}
// rTree ...
type rTree struct {
height int
root rRect
count int
reinsert []rRect
}
func (r *rRect) expand(b *rRect) {
for i := 0; i < rDims; i++ {
if b.min[i] < r.min[i] {
r.min[i] = b.min[i]
}
if b.max[i] > r.max[i] {
r.max[i] = b.max[i]
}
}
}
// Insert inserts an item into the RTree
func (tr *rTree) Insert(min, max []float64, value interface{}) {
var item rRect
fit(min, max, value, &item)
tr.insert(&item)
}
func (tr *rTree) insert(item *rRect) {
if tr.root.data == nil {
fit(item.min[:], item.max[:], new(rNode), &tr.root)
}
grown := tr.root.insert(item, tr.height)
if grown {
tr.root.expand(item)
}
if tr.root.data.(*rNode).count == rMaxEntries+1 {
newRoot := new(rNode)
tr.root.splitLargestAxisEdgeSnap(&newRoot.rects[1])
newRoot.rects[0] = tr.root
newRoot.count = 2
tr.root.data = newRoot
tr.root.recalc()
tr.height++
}
tr.count++
}
func (r *rRect) chooseLeastEnlargement(b *rRect) int {
j, jenlargement, jarea := -1, 0.0, 0.0
n := r.data.(*rNode)
for i := 0; i < n.count; i++ {
// force inline
area := n.rects[i].max[0] - n.rects[i].min[0]
for j := 1; j < rDims; j++ {
area *= n.rects[i].max[j] - n.rects[i].min[j]
}
var enlargement float64
// force inline
enlargedArea := 1.0
for j := 0; j < len(n.rects[i].min); j++ {
if b.max[j] > n.rects[i].max[j] {
if b.min[j] < n.rects[i].min[j] {
enlargedArea *= b.max[j] - b.min[j]
} else {
enlargedArea *= b.max[j] - n.rects[i].min[j]
}
} else {
if b.min[j] < n.rects[i].min[j] {
enlargedArea *= n.rects[i].max[j] - b.min[j]
} else {
enlargedArea *= n.rects[i].max[j] - n.rects[i].min[j]
}
}
}
enlargement = enlargedArea - area
if j == -1 || enlargement < jenlargement {
j, jenlargement, jarea = i, enlargement, area
} else if enlargement == jenlargement {
if area < jarea {
j, jenlargement, jarea = i, enlargement, area
}
}
}
return j
}
func (r *rRect) recalc() {
n := r.data.(*rNode)
r.min = n.rects[0].min
r.max = n.rects[0].max
for i := 1; i < n.count; i++ {
r.expand(&n.rects[i])
}
}
// contains return struct when b is fully contained inside of n
func (r *rRect) contains(b *rRect) bool {
for i := 0; i < rDims; i++ {
if b.min[i] < r.min[i] || b.max[i] > r.max[i] {
return false
}
}
return true
}
func (r *rRect) largestAxis() (axis int, size float64) {
j, jsz := 0, 0.0
for i := 0; i < rDims; i++ {
sz := r.max[i] - r.min[i]
if i == 0 || sz > jsz {
j, jsz = i, sz
}
}
return j, jsz
}
func (r *rRect) splitLargestAxisEdgeSnap(right *rRect) {
axis, _ := r.largestAxis()
left := r
leftNode := left.data.(*rNode)
rightNode := new(rNode)
right.data = rightNode
var equals []rRect
for i := 0; i < leftNode.count; i++ {
minDist := leftNode.rects[i].min[axis] - left.min[axis]
maxDist := left.max[axis] - leftNode.rects[i].max[axis]
if minDist < maxDist {
// stay left
} else {
if minDist > maxDist {
// move to right
rightNode.rects[rightNode.count] = leftNode.rects[i]
rightNode.count++
} else {
// move to equals, at the end of the left array
equals = append(equals, leftNode.rects[i])
}
leftNode.rects[i] = leftNode.rects[leftNode.count-1]
leftNode.rects[leftNode.count-1].data = nil
leftNode.count--
i--
}
}
for _, b := range equals {
if leftNode.count < rightNode.count {
leftNode.rects[leftNode.count] = b
leftNode.count++
} else {
rightNode.rects[rightNode.count] = b
rightNode.count++
}
}
left.recalc()
right.recalc()
}
func (r *rRect) insert(item *rRect, height int) (grown bool) {
n := r.data.(*rNode)
if height == 0 {
n.rects[n.count] = *item
n.count++
grown = !r.contains(item)
return grown
}
// choose subtree
index := r.chooseLeastEnlargement(item)
child := &n.rects[index]
grown = child.insert(item, height-1)
if grown {
child.expand(item)
grown = !r.contains(item)
}
if child.data.(*rNode).count == rMaxEntries+1 {
child.splitLargestAxisEdgeSnap(&n.rects[n.count])
n.count++
}
return grown
}
// fit an external item into a rect type
func fit(min, max []float64, value interface{}, target *rRect) {
if max == nil {
max = min
}
if len(min) != len(max) {
panic("min/max dimension mismatch")
}
if len(min) != rDims {
panic("invalid number of dimensions")
}
for i := 0; i < rDims; i++ {
target.min[i] = min[i]
target.max[i] = max[i]
}
target.data = value
}
func (r *rRect) intersects(b *rRect) bool {
for i := 0; i < rDims; i++ {
if b.min[i] > r.max[i] || b.max[i] < r.min[i] {
return false
}
}
return true
}
func (r *rRect) search(
target *rRect, height int,
iter func(min, max []float64, value interface{}) bool,
) bool {
n := r.data.(*rNode)
if height == 0 {
for i := 0; i < n.count; i++ {
if target.intersects(&n.rects[i]) {
if !iter(n.rects[i].min[:], n.rects[i].max[:],
n.rects[i].data) {
return false
}
}
}
} else {
for i := 0; i < n.count; i++ {
if target.intersects(&n.rects[i]) {
if !n.rects[i].search(target, height-1, iter) {
return false
}
}
}
}
return true
}
func (tr *rTree) search(
target *rRect,
iter func(min, max []float64, value interface{}) bool,
) {
if tr.root.data == nil {
return
}
if target.intersects(&tr.root) {
tr.root.search(target, tr.height, iter)
}
}
// Search ...
func (tr *rTree) Search(
min, max []float64,
iter func(min, max []float64, value interface{}) bool,
) {
var target rRect
fit(min, max, nil, &target)
tr.search(&target, iter)
}
func appendFloat(dst []byte, num float64) []byte {
var buf [8]byte
binary.LittleEndian.PutUint64(buf[:], math.Float64bits(num))
return append(dst, buf[:]...)
}
func (tr *rTree) compress(dst []byte) []byte {
if tr.root.data == nil {
return dst
}
dst = append(dst, byte(tr.height))
return tr.root.compress(dst, tr.height)
}
func (r *rRect) compress(dst []byte, height int) []byte {
n := r.data.(*rNode)
dst = appendFloat(dst, r.min[0])
dst = appendFloat(dst, r.min[1])
dst = appendFloat(dst, r.max[0])
dst = appendFloat(dst, r.max[1])
dst = append(dst, byte(n.count))
if height == 0 {
var ibytes byte = 1
for i := 0; i < n.count; i++ {
ibytes2 := numBytes(uint32(n.rects[i].data.(int)))
if ibytes2 > ibytes {
ibytes = ibytes2
}
}
dst = append(dst, ibytes)
for i := 0; i < n.count; i++ {
dst = appendNum(dst, uint32(n.rects[i].data.(int)), ibytes)
}
return dst
}
mark := make([]int, n.count)
for i := 0; i < n.count; i++ {
mark[i] = len(dst)
dst = append(dst, 0, 0, 0, 0)
}
for i := 0; i < n.count; i++ {
binary.LittleEndian.PutUint32(dst[mark[i]:], uint32(len(dst)))
dst = n.rects[i].compress(dst, height-1)
}
return dst
}
func rCompressSearch(
data []byte,
addr int,
series *baseSeries,
rect Rect,
iter func(seg Segment, item int) bool,
) bool {
if int(addr) == len(data) {
return true
}
height := int(data[addr])
addr++
return rnCompressSearch(data, addr, series, rect, height, iter)
}
func rnCompressSearch(
data []byte,
addr int,
series *baseSeries,
rect Rect,
height int,
iter func(seg Segment, item int) bool,
) bool {
var nrect Rect
nrect.Min.X = math.Float64frombits(binary.LittleEndian.Uint64(data[addr:]))
addr += 8
nrect.Min.Y = math.Float64frombits(binary.LittleEndian.Uint64(data[addr:]))
addr += 8
nrect.Max.X = math.Float64frombits(binary.LittleEndian.Uint64(data[addr:]))
addr += 8
nrect.Max.Y = math.Float64frombits(binary.LittleEndian.Uint64(data[addr:]))
addr += 8
if !rect.IntersectsRect(nrect) {
return true
}
count := int(data[addr])
addr++
if height == 0 {
ibytes := data[addr]
addr++
for i := 0; i < count; i++ {
item := int(readNum(data[addr:], ibytes))
addr += int(ibytes)
seg := series.SegmentAt(int(item))
irect := seg.Rect()
if irect.IntersectsRect(rect) {
if !iter(seg, int(item)) {
return false
}
}
}
return true
}
for i := 0; i < count; i++ {
naddr := int(binary.LittleEndian.Uint32(data[addr:]))
addr += 4
if !rnCompressSearch(data, naddr, series, rect, height-1, iter) {
return false
}
}
return true
} | geometry/rtree.go | 0.566618 | 0.476032 | rtree.go | starcoder |
package nist_sp800_22
import (
"math"
)
func RandomExcursions(n uint64) ([]float64, []bool, error) {
var State_X []int64 = []int64{-4, -3, -2, -1, 1, 2, 3, 4}
var X []int64 = make([]int64, n)
// (1) Form a normalized (-1, +1) sequence X
for i := range epsilon {
X[i] = 2*int64(epsilon[i]) - 1
}
// (2) Compute the partial sums S[i] of successively larger subsequences.
var S []int64 = make([]int64, n)
var index_S uint64
S[0] = X[0]
for index_S = 1; index_S < n; index_S++ {
S[index_S] = S[index_S-1] + X[index_S]
}
// (3) Form a new sequence S' by attaching zeros before and after the set S.
var S_Prime []int64 = []int64{0}
S_Prime = append(S_Prime, S...)
S_Prime = append(S_Prime, 0)
S = nil // nil will release the underlying memory to the garbage collector.
// TyeolRik Note.
// (4) ~ (7) Could be more effective code.
// But, to showing like schoolbook, I coded as document steps.
// (4) Calculate J = the total number of zero crossings in S', where a zero crossing is a value of zero in S ' that occurs after the starting zero.
var J uint64 = 0 // the Number of Cycles
for _, value := range S_Prime {
if value == 0 {
J++
}
}
J = J - 1 // Not consisting 1st zero.
// (5) Drawing Tables
var Cycles [][]uint64 = make([][]uint64, 8)
var CycleIndex int64 = -1 // Due to omit 1st zero.
for i := range Cycles {
Cycles[i] = make([]uint64, J)
}
for _, stateX := range S_Prime {
switch stateX {
case -4:
Cycles[0][CycleIndex]++
case -3:
Cycles[1][CycleIndex]++
case -2:
Cycles[2][CycleIndex]++
case -1:
Cycles[3][CycleIndex]++
case 0:
CycleIndex++
case 1:
Cycles[4][CycleIndex]++
case 2:
Cycles[5][CycleIndex]++
case 3:
Cycles[6][CycleIndex]++
case 4:
Cycles[7][CycleIndex]++
}
}
// (6) Count v_k(x) = the total number of cycles in which state x occurs exactly k times among all cycles.
// What I understand is sum of count of State x (Cycle) row. (= Cycle Row)
var v [8][6]uint64
for rowIndex, CyclesRow := range Cycles {
for _, occur := range CyclesRow {
if occur < 5 {
v[rowIndex][occur]++
} else {
v[rowIndex][5]++
}
}
}
/*
// Print Log
fmt.Println("J", J)
for row, value := range v {
fmt.Println(State_X[row], value, sumArray(value))
}
*/
// (7) For each of the eight states of x, compute the test statistic χ^2
var chi_square []float64 = make([]float64, len(State_X))
for chi_square_Index, x := range State_X {
// (7) - 1. Calculate theoretical probabilities π_0 ... π_5 (Page 85. Section 3.14)
var _x float64 = float64(x)
var pi [6]float64
var tempArg_1_divided_by_2_abs_x float64 = 1.0 / (2.0 * math.Abs(_x))
pi[0] = 1.0 - tempArg_1_divided_by_2_abs_x
for k := 1; k <= 4; k++ {
pi[k] = tempArg_1_divided_by_2_abs_x * tempArg_1_divided_by_2_abs_x * math.Pow((1.0-tempArg_1_divided_by_2_abs_x), float64(k-1))
}
pi[5] = tempArg_1_divided_by_2_abs_x * (1.0 - tempArg_1_divided_by_2_abs_x) * (1.0 - tempArg_1_divided_by_2_abs_x) * (1.0 - tempArg_1_divided_by_2_abs_x) * (1.0 - tempArg_1_divided_by_2_abs_x)
// (7) - 2. compute the test statistic χ^2
var sum float64 = 0.0
var J_pi float64
for k := 0; k <= 5; k++ {
J_pi = float64(J) * pi[k]
sum += (float64(v[chi_square_Index][k]) - J_pi) * (float64(v[chi_square_Index][k]) - J_pi) / J_pi
}
chi_square[chi_square_Index] = sum
}
var P_value []float64 = make([]float64, 8)
var randomness []bool = make([]bool, 8)
// fmt.Println("State=x", "\tCHI_SQUARE", "\t P-value", "\t\t Conclusion")
for i := range P_value {
P_value[i] = igamc(5.0/2.0, chi_square[i]/2.0)
randomness[i] = DecisionRule(P_value[i], LEVEL)
// fmt.Println(State_X[i], "\t", chi_square[i], "\t", P_value[i], "\t", DecisionRule(P_value[i], LEVEL))
}
return P_value, randomness, nil
} | nist_sp800_22/randomExcursions.go | 0.523664 | 0.455865 | randomExcursions.go | starcoder |
// Color delta / comparison math.
// Functions to calculate color differences. All of them refers to the CIE-L*ab color space.
// http://www.easyrgb.com/index.php?X=DELT
package delta
import "math"
// Returns H° value
func cieLab2Hue(a, b float64) (h float64) {
rad2deg := 180 / math.Pi
bias := 0.
notAssigned := 999999.0
switch {
case a >= 0 && b == 0:
h = 0
case a < 0 && b == 0:
h = 180
case a == 0 && b > 0:
h = 90
case a == 0 && b < 0:
h = 270
case a > 0 && b > 0:
bias = 0
case a < 0:
bias = 180
case a > 0 && b < 0:
bias = 360
}
if h == notAssigned {
h = rad2deg*math.Atan(b/a) + bias
}
return
}
// Delta C*
func DeltaC(a1, b1, a2, b2 float64) (delta float64) {
delta = math.Sqrt(a2*a2+b2*b2) - math.Sqrt(a1*a1+b1*b1)
return
}
// Delta H*
func DeltaH(a1, b1, a2, b2 float64) (delta float64) {
xDE := math.Sqrt(a2*a2+b2*b2) - math.Sqrt(a1*a1+b1*b1)
delta = math.Sqrt((a2-a1)*(a2-a1) + (b2-b1)*(b2-b1) - (xDE * xDE))
return
}
// Delta E*
func DeltaE(l1, a1, b1, l2, a2, b2 float64) (delta float64) {
delta = math.Sqrt(((l1 - l2) * (l1 - l2)) + ((a1 - a2) * (a1 - a2)) + ((b1 - b2) * (b1 - b2)))
return
}
// Delta E 1994
func DeltaE94(l1, a1, b1, l2, a2, b2, wL, wC, wH float64) (delta float64) {
// wL, wC, wH weighting factors depending on the application (1 = default)
var xDH float64
xC1 := math.Sqrt((a1 * a1) + (b1 * b1))
xC2 := math.Sqrt((a2 * a2) + (b2 * b2))
xDL := l2 - l1
xDC := xC2 - xC1
xDE := math.Sqrt(((l1 - l2) * (l1 - l2)) + ((a1 - a2) * (a1 - a2)) + ((b1 - b2) * (b1 - b2)))
if math.Sqrt(xDE) > math.Sqrt(math.Abs(xDL))+math.Sqrt(math.Abs(xDC)) {
xDH = math.Sqrt((xDE * xDE) - (xDL * xDL) - (xDC * xDC))
} else {
xDH = 0
}
xSC := 1 + (0.045 * xC1)
xSH := 1 + (0.015 * xC1)
xDL /= wL
xDC /= wC * xSC
xDH /= wH * xSH
delta = math.Sqrt(xDL*xDL + xDC*xDC + xDH*xDH)
return
}
/* bug
// Delta E 2000
func DeltaE00(l1, a1, b1, l2, a2, b2, wL, wC, wH float64) (delta float64) {
// l1, a1, b1 //Color #1 lab values
// l2, a2, b2 //Color #2 lab values
// wL, wC, wH //Wheight factor
var xDH, xHX float64
const deg2rad = math.Pi / 180
xC1 := math.Sqrt(a1*a1 + b1*b1)
xC2 := math.Sqrt(a2*a2 + b2*b2)
xCX := (xC1 + xC2) / 2
xCX7 := xCX * xCX * xCX * xCX * xCX * xCX * xCX
xGX := 0.5 * (1 - math.Sqrt((xCX7)/((xCX7)+(6103515625))))
xNN := (1 + xGX) * a1
xC1 = math.Sqrt(xNN*xNN + b1*b1)
xH1 := cieLab2Hue(xNN, b1)
xNN = (1 + xGX) * a2
xC2 = math.Sqrt(xNN*xNN + b2*b2)
xH2 := cieLab2Hue(xNN, b2)
xDL := l2 - l1
xDC := xC2 - xC1
if xC1*xC2 == 0 {
xDH = 0
} else {
// xNN = round(xH2-xH1, 12)
xNN = xH2 - xH1
if math.Abs(xNN) <= 180 {
xDH = xH2 - xH1
} else {
if xNN > 180 {
xDH = xH2 - xH1 - 360
} else {
xDH = xH2 - xH1 + 360
}
}
}
xDH = 2 * math.Sqrt(xC1*xC2) * math.Sin(deg2rad*(xDH/2))
xLX := (l1 + l2) / 2
xCY := (xC1 + xC2) / 2
if (xC1 * xC2) == 0 {
xHX = xH1 + xH2
} else {
// xNN = math.Abs(round(xH1-xH2, 12))
xNN = math.Abs(xH1 - xH2)
if xNN > 180 {
if xH2+xH1 < 360 {
xHX = xH1 + xH2 + 360
} else {
xHX = xH1 + xH2 - 360
}
} else {
xHX = xH1 + xH2
}
xHX /= 2
}
xTX := 1 - 0.17*math.Cos(deg2rad*(xHX-30)) + 0.24*math.Cos(deg2rad*(2*xHX)) + 0.32*math.Cos(deg2rad*(3*xHX+6)) - 0.20*math.Cos(deg2rad*(4*xHX-63))
xPH := 30 * math.Exp(-((xHX-275)/25)*((xHX-275)/25))
xCY7 := xCY * xCY * xCY * xCY * xCY * xCY * xCY
xRC := 2 * math.Sqrt((xCY7)/((xCY7)+(6103515625)))
xSL := 1 + ((0.015 * ((xLX - 50) * (xLX - 50))) / math.Sqrt(20+((xLX-50)*(xLX-50))))
xSC := 1 + 0.045*xCY
xSH := 1 + 0.015*xCY*xTX
xRT := -math.Sin(deg2rad*(2*xPH)) * xRC
xDL /= wL * xSL
xDC /= wC * xSC
xDH /= wH * xSH
delta = math.Sqrt(xDL*xDL + xDC*xDC + xDH*xDH + xRT*xDC*xDH)
return
}
func DeltaCMC(l1, a1, b1, l2, a2, b2, wL, wC, wH float64) (delta float64) {
//l1, a1, b1 Color #1 lab values
//l2, a2, b2 Color #2 lab values
//wL, wC Weight factors
const deg2rad = math.Pi / 180
var xTT, xSL float64
xC1 := math.Sqrt((a1 * a1) + (b1 * b1))
xC2 := math.Sqrt((a2 * a2) + (b2 * b2))
xC14 := xC1 * xC1 * xC1 * xC1
xff := math.Sqrt((xC14) / ((xC14) + 1900))
xH1 := cieLab2Hue(a1, b1)
if xH1 < 164 || xH1 > 345 {
xTT = 0.36 + math.Abs(0.4*math.Cos(deg2rad*(35+xH1)))
} else {
xTT = 0.56 + math.Abs(0.2*math.Cos(deg2rad*(168+xH1)))
}
if l1 < 16 {
xSL = 0.511
} else {
xSL = (0.040975 * l1) / (1 + (0.01765 * l1))
}
xSC := ((0.0638 * xC1) / (1 + (0.0131 * xC1))) + 0.638
xSH := ((xff * xTT) + 1 - xff) * xSC
aa2 := (a2 - a1) * (a2 - a1)
bb2 := (b2 - b1) * (b2 - b1)
cc2 := (xC2 - xC1) * (xC2 - xC1)
xDH := math.Sqrt(aa2 + bb2 - cc2)
xSL = (l2 - l1) / wL * xSL
xSC = (xC2 - xC1) / wC * xSC
xSH = xDH / xSH
delta = math.Sqrt(xSL*xSL + xSC*xSC + xSH*xSH)
return
}
*/ | f64/delta/cie_lab_delta.go | 0.713731 | 0.547827 | cie_lab_delta.go | starcoder |
package gander
import (
"errors"
"fmt"
"sort"
"strconv"
"sync"
)
// A DataFrame is a slice of *Series. As a Series
// contains a slice of float64, a DataFrame can be thought of
// as a two dimensional table of data, somewhat like a spreadsheet.
type DataFrame []*Series
// NewDataFrame creates a DataFrame from a 2 dimensional string slice, converting
// all data values to float64. If any values in the first row cannot be converted to
// a float64, then the first row is treated as containing headers and is used to set
// the column name of each Series. If a value (excluding values in the first row) cannot
// be converted to a float64, then the Series is marked as holding categorical data
// and will not be used for numeric calculations.
func NewDataFrame(data [][]string) (*DataFrame, error) {
if !columnCountsMatch(data) {
return nil, errors.New("not all rows have the same number of columns")
}
var headers []string
if hasHeaderRow(data) {
headers = data[0]
data = data[1:]
} else {
headers = []string{}
for x := 0; x < len(data[0]); x++ {
headers = append(headers, fmt.Sprintf("Column %v", x+1))
}
}
d := DataFrame{}
for x := 0; x < len(data[0]); x++ {
s := createSeries(headers[x], data, x)
d = append(d, s)
}
return &d, nil
}
// DropRows removes the rows specified by the provided row numbers.
// Row numbers are zero based.
func (d *DataFrame) DropRows(r ...int) error {
if maxInt(r) > d.Rows()-1 {
return errors.New("a specified row is out of range")
}
if sort.IntsAreSorted(r) == false {
sort.Ints(r)
}
sort.Sort(sort.Reverse(sort.IntSlice(r)))
for _, v := range r {
d.dropRow(v)
}
return nil
}
// DropRowsWhere removes all the rows where the provided function
// evaluates to true.
func (d *DataFrame) DropRowsWhere(fn func([]float64) bool) error {
for i := d.Rows() - 1; i >= 0; i-- {
r := d.toRow(i)
if fn(r) == true {
d.dropRow(i)
}
}
return nil
}
// DropColumns removes the columns specified by the provided column numbers.
// Column numbers are zero based.
func (d *DataFrame) DropColumns(r ...int) error {
if maxInt(r) > d.Columns()-1 {
return errors.New("a specified column is out of range")
}
df := DataFrame{}
for i, v := range *d {
if containsInt(i, r) == false {
df = append(df, v)
}
}
*d = df
return nil
}
// DropColumnsByName removes the columns specified by the provided column names.
// Row indexes are zero based.
func (d *DataFrame) DropColumnsByName(n ...string) error {
for _, c := range n {
if containsString(c, d.ColumnNames()) == false {
return fmt.Errorf("column '%s' does not exist in the DataFrame", c)
}
}
df := DataFrame{}
for _, v := range *d {
if containsString(v.Name, n) == false {
df = append(df, v)
}
}
*d = df
return nil
}
// DropColumnsWhere removes all the columns where the provided function
// evaluates to true.
func (d *DataFrame) DropColumnsWhere(fn func(*Series) bool) error {
df := DataFrame{}
for _, v := range *d {
if fn(v) == false {
df = append(df, v)
}
}
*d = df
return nil
}
// ColumnNames returns a slice of the column names in the DataFrame.
func (d *DataFrame) ColumnNames() []string {
cols := []string{}
for _, c := range *d {
cols = append(cols, c.Name)
}
return cols
}
// Columns returns the number of columns in the DataFrame.
func (d *DataFrame) Columns() int {
return len(*d)
}
// Rows returns the number of rows in the DataFrame.
func (d *DataFrame) Rows() int {
return len((*d)[0].Values)
}
// String returns a tabular representation of the DataFrame.
func (d *DataFrame) String() string {
df := *d
columns := len(df)
colWidths := []int{}
output := ""
for c := 0; c < columns; c++ {
cl := len(df[c].Name) + 2 // add 2 for padding
if cl < 12 {
cl = 12
}
colWidths = append(colWidths, cl)
output += fmt.Sprintf("%"+strconv.Itoa(cl)+"v", fmt.Sprintf("%v ", df[c].Name))
}
output += fmt.Sprintf("\n")
rows := len(df[0].Values)
if rows > 10 {
rows = 10 // only print 1st 10 rows
}
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
if df[c].IsCategorical() == true {
output += fmt.Sprintf(" %"+strconv.Itoa(colWidths[c]-3)+"s ", df[c].categoricalLabels[df[c].Values[r]])
} else {
output += fmt.Sprintf(" %"+strconv.Itoa(colWidths[c]-3)+".2f ", df[c].Values[r])
}
}
output += fmt.Sprintf("\n")
}
return output
}
// Standardize scales the values in all non-categorical Series
// to standard form.
func (d *DataFrame) Standardize() {
for _, v := range *d {
if v.IsCategorical() == false {
v.Standardize()
}
}
}
// Describe returns a summary of the statisical properties
// of all the Series in the DataFrame.
func (d *DataFrame) Describe() []Summary {
s := []Summary{}
for _, v := range *d {
if v.IsCategorical() == false {
vs := v.Describe()
s = append(s, vs)
}
}
return s
}
func (d *DataFrame) toRow(i int) []float64 {
r := []float64{}
for _, v := range *d {
r = append(r, v.Values[i])
}
return r
}
func (d *DataFrame) dropRow(r int) {
var wg sync.WaitGroup
wg.Add(d.Columns())
for i, _ := range *d {
go func(s *Series) {
defer wg.Done()
s.dropRow(r)
}((*d)[i])
}
wg.Wait()
}
func maxInt(l []int) int {
m := -1
for _, v := range l {
if v > m {
m = v
}
}
return m
}
func containsInt(i int, l []int) bool {
// TODO: make more efficient
for _, v := range l {
if i == v {
return true
}
}
return false
}
func containsString(i string, l []string) bool {
// TODO: make more efficient
for _, v := range l {
if i == v {
return true
}
}
return false
} | dataframe.go | 0.673192 | 0.583263 | dataframe.go | starcoder |
package day24
import (
"bytes"
"fmt"
"io"
"math"
"aoc"
)
type Eris struct {
a, b *aoc.Grid
parent, child *Eris
recursive bool
depth int
}
func NewEris(recursive bool) *Eris {
g := aoc.NewGrid(5, 5)
for i := 0; i < len(g.Data); i++ {
g.Data[i] = '.'
}
if recursive {
g.Set(aoc.NewPos(2, 2), '?')
}
return &Eris{
a: g,
b: g.Copy(),
recursive: recursive,
}
}
func ReadEris(r io.Reader, recursive bool) *Eris {
g, err := aoc.ReadGrid(r)
if err != nil {
panic(err)
}
if recursive {
g.Set(aoc.NewPos(2, 2), '?')
}
return &Eris{a: g, b: g.Copy(), recursive: recursive}
}
func (c *Eris) SHA256() string { return c.a.SHA256() }
func (c *Eris) TopLevel() *Eris {
for ; c.parent != nil; c = c.parent {
}
return c
}
func (c *Eris) isBug(pos aoc.Pos) bool {
if c == nil {
return false
}
b := c.a.GetWithDefault(pos, '.')
if b != '#' {
return false
}
return true
}
func (c *Eris) countBugs(positions ...aoc.Pos) (n int) {
for _, pos := range positions {
if c.recursive && pos == (aoc.NewPos(2, 2)) {
// middle cells don't count in recursive mode
continue
}
if c.isBug(pos) {
n++
}
}
return
}
func (c *Eris) countAdjacentBugs(pos aoc.Pos) (n int) {
// start with neighbors on this plane
npos := pos.URDL()
n = c.countBugs(npos[:]...)
if !c.recursive {
return
}
// outer edges, adjacent to parent plane
if pos.Y == 0 && c.parent.isBug(aoc.NewPos(2, 1)) {
n++
}
if pos.X == 0 && c.parent.isBug(aoc.NewPos(1, 2)) {
n++
}
if pos.Y == c.a.Height-1 && c.parent.isBug(aoc.NewPos(2, 3)) {
n++
}
if pos.X == c.a.Width-1 && c.parent.isBug(aoc.NewPos(3, 2)) {
n++
}
// inner edges, adjacent to child plane
if pos == aoc.NewPos(2, 1) {
for x := 0; x < c.a.Width; x++ {
y := 0
if c.child.isBug(aoc.NewPos(x, y)) {
n++
}
}
}
if pos == aoc.NewPos(3, 2) {
x := c.a.Width - 1
for y := 0; y < c.a.Height; y++ {
if c.child.isBug(aoc.NewPos(x, y)) {
n++
}
}
}
if pos == aoc.NewPos(2, 3) {
y := c.a.Height - 1
for x := 0; x < c.a.Width; x++ {
if c.child.isBug(aoc.NewPos(x, y)) {
n++
}
}
}
if pos == aoc.NewPos(1, 2) {
x := 0
for y := 0; y < c.a.Height; y++ {
if c.child.isBug(aoc.NewPos(x, y)) {
n++
}
}
}
return
}
func (c *Eris) Step() {
if !c.recursive {
c.prepareState()
c.switchState()
return
}
// move to top plane
c = c.TopLevel()
// do we need a higher plane?
if c.a.Count('#') > 0 {
c.parent = NewEris(true)
c.parent.child = c
c.parent.depth = c.depth - 1
c = c.parent
}
// prepare next states
for plane := c; plane != nil; plane = plane.child {
plane.prepareState()
// do we need a lower plane?
if plane.child == nil && plane.a.Count('#') > 0 {
plane.child = NewEris(true)
plane.child.parent = plane
plane.child.depth = plane.depth + 1
}
}
// switch states
for plane := c; plane != nil; plane = plane.child {
plane.switchState()
}
}
func (c *Eris) prepareState() {
for y := 0; y < c.a.Height; y++ {
for x := 0; x < c.a.Width; x++ {
if c.recursive && x == 2 && y == 2 {
// never change middle tile in recursive mode
continue
}
pos := aoc.NewPos(x, y)
isBug := c.isBug(pos)
adjacentBugs := c.countAdjacentBugs(pos)
if isBug {
// bugs die unless there is exactly one adjacent bug
if adjacentBugs != 1 {
isBug = false
}
} else {
// bugs appear if 1 or 2 adjacent bugs
if adjacentBugs == 1 || adjacentBugs == 2 {
isBug = true
}
}
if isBug {
c.b.Set(pos, '#')
} else {
c.b.Set(pos, '.')
}
}
}
}
func (c *Eris) switchState() {
c.a, c.b = c.b, c.a
}
func (c *Eris) BiodiversityRating() (n int) {
for i := 0; i < len(c.a.Data); i++ {
if c.a.Data[i] != '#' {
continue
}
n += int(math.Pow(2, float64(i)))
}
return
}
func (c *Eris) RecursiveBugCount() (n int) {
for c = c.TopLevel(); c != nil; c = c.child {
n += c.a.Count('#')
}
return
}
func (c *Eris) String() string {
b := &bytes.Buffer{}
fmt.Fprintf(b, "Depth %d:\n", c.depth)
c.a.Print(b)
return b.String()
} | go/2019/day24/day24.go | 0.558809 | 0.436922 | day24.go | starcoder |
package planar
import (
"math/big"
"github.com/go-spatial/geom"
)
const (
// Experimental testing produced this result.
// For finding the intersect we need higher precision.
// Then geom.PrecisionLevelBigFloat
PrecisionLevelBigFloat = 110
)
func AreLinesColinear(l1, l2 geom.Line) bool {
x1, y1 := l1[0][0], l1[0][1]
x2, y2 := l1[1][0], l1[1][1]
x3, y3 := l2[0][0], l2[0][1]
x4, y4 := l2[1][0], l2[1][1]
denom := ((x1 - x2) * (y3 - y4)) - ((y1 - y2) * (x3 - x4))
// The lines are parallel or they overlap fi denom is 0.
if denom != 0 {
return false
}
// now we just need to see if one of the end points is on the other one.
xmin, xmax := x1, x2
if x1 > x2 {
xmin, xmax = x2, x1
}
ymin, ymax := y1, y2
if y1 > y2 {
ymin, ymax = y2, y1
}
return (xmin <= x3 && x3 <= xmax && ymin <= y3 && y3 <= ymax) ||
(xmin <= x4 && x4 <= xmax && ymin <= y4 && y4 <= ymax)
}
// LineIntersect find the intersection point (x,y) between two lines if there is one. Ok will be true if it found an interseciton point.
// ok being false, means there isn't just one intersection point, there could be zero, or more then one.
// ref: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_two_points_on_each_line
func LineIntersect(l1, l2 geom.Line) (pt [2]float64, ok bool) {
x1, y1 := l1.Point1().X(), l1.Point1().Y()
x2, y2 := l1.Point2().X(), l1.Point2().Y()
x3, y3 := l2.Point1().X(), l2.Point1().Y()
x4, y4 := l2.Point2().X(), l2.Point2().Y()
denom := ((x1 - x2) * (y3 - y4)) - ((y1 - y2) * (x3 - x4))
// The lines are parallel or they overlap. No single point.
if denom == 0 {
return pt, false
}
xnom := (((x1 * y2) - (y1 * x2)) * (x3 - x4)) - ((x1 - x2) * ((x3 * y4) - (y3 * x4)))
ynom := (((x1 * y2) - (y1 * x2)) * (y3 - y4)) - ((y1 - y2) * ((x3 * y4) - (y3 * x4)))
x := xnom / denom
if x == -0 {
x = 0
}
y := ynom / denom
if y == -0 {
y = 0
}
return [2]float64{x, y}, true
}
func bigFloat(f float64) *big.Float { return big.NewFloat(f).SetPrec(PrecisionLevelBigFloat) }
// LineIntersectBigFloat find the intersection point (x,y) between two lines if there is one. Ok will be true if it found an interseciton point. Internally uses math/big
// ok being false, means there isn't just one intersection point, there could be zero, or more then one.
// ref: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_two_points_on_each_line
func LineIntersectBigFloat(l1, l2 geom.Line) (pt [2]*big.Float, ok bool) {
x1, y1 := bigFloat(l1.Point1().X()), bigFloat(l1.Point1().Y())
x2, y2 := bigFloat(l1.Point2().X()), bigFloat(l1.Point2().Y())
x3, y3 := bigFloat(l2.Point1().X()), bigFloat(l2.Point1().Y())
x4, y4 := bigFloat(l2.Point2().X()), bigFloat(l2.Point2().Y())
deltaX12 := bigFloat(0).Sub(x1, x2)
deltaX34 := bigFloat(0).Sub(x3, x4)
deltaY12 := bigFloat(0).Sub(y1, y2)
deltaY34 := bigFloat(0).Sub(y3, y4)
denom := bigFloat(0).Sub(
bigFloat(0).Mul(deltaX12, deltaY34),
bigFloat(0).Mul(deltaY12, deltaX34),
)
// The lines are parallel or they overlap. No single point.
if d, _ := denom.Float64(); d == 0 {
return pt, false
}
xnom := bigFloat(0).Sub(
bigFloat(0).Mul(
bigFloat(0).Sub(
bigFloat(0).Mul(x1, y2),
bigFloat(0).Mul(y1, x2),
),
deltaX34,
),
bigFloat(0).Mul(
deltaX12,
bigFloat(0).Sub(
bigFloat(0).Mul(x3, y4),
bigFloat(0).Mul(y3, x4),
),
),
)
ynom := bigFloat(0).Sub(
bigFloat(0).Mul(
bigFloat(0).Sub(
bigFloat(0).Mul(x1, y2),
bigFloat(0).Mul(y1, x2),
),
deltaY34,
),
bigFloat(0).Mul(
deltaY12,
bigFloat(0).Sub(
bigFloat(0).Mul(x3, y4),
bigFloat(0).Mul(y3, x4),
),
),
)
bx := bigFloat(0).Quo(xnom, denom)
by := bigFloat(0).Quo(ynom, denom)
return [2]*big.Float{bx, by}, true
}
// SegmentIntersect will find the intersection point (x,y) between two lines if
// there is one. Ok will be true if it found an intersection point and if the
// point is on both lines.
// ref: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_two_points_on_each_line
func SegmentIntersect(l1, l2 geom.Line) (pt [2]float64, ok bool) {
x1, y1 := l1.Point1().X(), l1.Point1().Y()
x2, y2 := l1.Point2().X(), l1.Point2().Y()
x3, y3 := l2.Point1().X(), l2.Point1().Y()
x4, y4 := l2.Point2().X(), l2.Point2().Y()
deltaX12 := x1 - x2
deltaX13 := x1 - x3
deltaX34 := x3 - x4
deltaY12 := y1 - y2
deltaY13 := y1 - y3
deltaY34 := y3 - y4
denom := (deltaX12 * deltaY34) - (deltaY12 * deltaX34)
// The lines are parallel or they overlap. No single point.
if denom == 0 {
return pt, false
}
xnom := (((x1 * y2) - (y1 * x2)) * deltaX34) - (deltaX12 * ((x3 * y4) - (y3 * x4)))
ynom := (((x1 * y2) - (y1 * x2)) * deltaY34) - (deltaY12 * ((x3 * y4) - (y3 * x4)))
bx := (xnom / denom)
by := (ynom / denom)
if bx == -0 {
bx = 0
}
if by == -0 {
by = 0
}
t := ((deltaX13 * deltaY34) - (deltaY13 * deltaX34)) / denom
u := -((deltaX12 * deltaY13) - (deltaY12 * deltaX13)) / denom
intersects := u >= 0.0 && u <= 1.0 && t >= 0.0 && t <= 1.0
return [2]float64{bx, by}, intersects
} | planar/line_intersect.go | 0.757525 | 0.607023 | line_intersect.go | starcoder |
package plane
import (
_ "embed"
"image"
_ "image/png"
"math"
"github.com/moniquelive/demoscenetuts/internal/utils"
)
//go:embed texture.png
var textureBytes []byte
type Plane struct {
screenWidth int
screenHeight int
frameCount int
texture *image.Paletted
A, B, C Vector
}
func (p *Plane) Draw(buffer *image.RGBA) {
currentTime := float64(p.frameCount) * 2e4
p.A = NewVector(currentTime/34984.0, -16, currentTime/43512.0)
p.B = rotY(0.32).MulVec(NewVector(256, 0, 0))
p.C = rotY(0.32).MulVec(NewVector(0, 0, 256))
p.drawPlane(buffer)
p.frameCount++
}
func (p *Plane) drawPlane(buffer *image.RGBA) {
Cx := p.B[1]*p.C[2] - p.C[1]*p.B[2]
Cy := p.C[0]*p.B[2] - p.B[0]*p.C[2]
// the 240 represents the distance of the projection plane
// change it to modify the field of view
Cz := (p.B[0]*p.C[1] - p.C[0]*p.B[1]) * 240
Ax := p.C[1]*p.A[2] - p.A[1]*p.C[2]
Ay := p.A[0]*p.C[2] - p.C[0]*p.A[2]
Az := (p.C[0]*p.A[1] - p.A[0]*p.C[1]) * 240
Bx := p.A[1]*p.B[2] - p.B[1]*p.A[2]
By := p.B[0]*p.A[2] - p.A[0]*p.B[2]
Bz := (p.A[0]*p.B[1] - p.B[0]*p.A[1]) * 240
// only render the lower part of the plane, looks ugly above
offs := 105 * 320
for j := float64(105); j < 200; j++ {
// compute the (U,V) coordinates for the start of the line
a := Az + Ay*(j-100) + Ax*-161
b := Bz + By*(j-100) + Bx*-161
c := Cz + Cy*(j-100) + Cx*-161
// quick distance check, if it's too far reduce it
var ic float64
if math.Abs(c) > 65536 {
ic = 1 / c
} else {
ic = 1 / 65536
}
// compute original (U,V)
u := int(a * 16777216 * ic)
v := int(b * 16777216 * ic)
// and the deltas we need to interpolate along this line
du := int(16777216 * Ax * ic)
dv := int(16777216 * Bx * ic)
// start the loop
for i := 0; i < 320; i++ {
palIndex := p.texture.Pix[((v>>8)&0xff00)+((u>>16)&0xff)]
pr, pg, pb, _ := p.texture.Palette[palIndex].RGBA()
buffer.Pix[offs*4+0] = byte(pr)
buffer.Pix[offs*4+1] = byte(pg)
buffer.Pix[offs*4+2] = byte(pb)
offs++
// interpolate
u += du
v += dv
}
}
}
func (p *Plane) Setup() (int, int, int) {
p.texture = utils.LoadBufferPaletted(textureBytes)
p.screenWidth = 320
p.screenHeight = 200
return p.screenWidth, p.screenHeight, 2
} | internal/plane/plane.go | 0.530966 | 0.457924 | plane.go | starcoder |
package linear
import (
"math"
)
// Implementation of a diagonal matrix.
type DiagonalMatrix struct {
data []float64
}
func NewDiagonalMatrixWithDimension(dimension int) (*DiagonalMatrix, error) {
if dimension < 1 {
return nil, notStrictlyPositiveErrorf(float64(dimension))
}
ans := new(DiagonalMatrix)
ans.data = make([]float64, dimension)
return ans, nil
}
func NewDiagonalMatrixFromSlice(data []float64) (*DiagonalMatrix, error) {
return NewDiagonalMatrix(data, true)
}
func NewDiagonalMatrix(data []float64, copyArray bool) (*DiagonalMatrix, error) {
if data == nil {
return nil, invalidArgumentSimpleErrorf()
}
ans := new(DiagonalMatrix)
if copyArray {
ans.data = append([]float64{}, data...)
} else {
ans.data = data
}
return ans, nil
}
func (dm *DiagonalMatrix) Copy() RealMatrix {
ans := new(DiagonalMatrix)
ans.data = append([]float64{}, dm.data...)
return ans
}
func (dm *DiagonalMatrix) Add(mat RealMatrix) RealMatrix {
// Safety check.
if err := checkAdditionCompatible(dm, mat); err != nil {
panic(err)
}
if m, ok := mat.(*DiagonalMatrix); ok {
dim := dm.RowDimension()
outData := make([]float64, dim)
for i := 0; i < dim; i++ {
outData[i] = dm.data[i] + m.data[i]
}
mat := new(DiagonalMatrix)
mat.data = outData
return mat
} else {
rowCount := mat.RowDimension()
columnCount := mat.ColumnDimension()
if rowCount != columnCount {
panic(dimensionsMismatchSimpleErrorf(rowCount, columnCount))
}
out, err := NewDiagonalMatrixWithDimension(rowCount)
if err != nil {
panic(err)
}
for row := 0; row < rowCount; row++ {
for col := 0; col < columnCount; col++ {
out.SetEntry(row, col, dm.At(row, col)+mat.At(row, col))
}
}
return out
}
}
func (dm *DiagonalMatrix) Subtract(mat RealMatrix) RealMatrix {
// Safety check.
if err := checkAdditionCompatible(dm, mat); err != nil {
panic(err)
}
if m, ok := mat.(*DiagonalMatrix); ok {
dim := dm.RowDimension()
outData := make([]float64, dim)
for i := 0; i < dim; i++ {
outData[i] = dm.data[i] - m.data[i]
}
mat := new(DiagonalMatrix)
mat.data = outData
return mat
} else {
rowCount := mat.RowDimension()
columnCount := mat.ColumnDimension()
if rowCount != columnCount {
panic(dimensionsMismatchSimpleErrorf(rowCount, columnCount))
}
out, err := NewDiagonalMatrixWithDimension(rowCount)
if err != nil {
panic(err)
}
for row := 0; row < rowCount; row++ {
for col := 0; col < columnCount; col++ {
out.SetEntry(row, col, dm.At(row, col)-mat.At(row, col))
}
}
return out
}
}
func (dm *DiagonalMatrix) Multiply(mat RealMatrix) RealMatrix {
if err := checkMultiplicationCompatible(dm, mat); err != nil {
panic(err)
}
if m, ok := mat.(*DiagonalMatrix); ok {
dim := dm.RowDimension()
outData := make([]float64, dim)
for i := 0; i < dim; i++ {
outData[i] = dm.data[i] * m.data[i]
}
mat := new(DiagonalMatrix)
mat.data = outData
return mat
} else {
nRows := mat.RowDimension()
nCols := mat.ColumnDimension()
product := make([][]float64, nRows)
for r := 0; r < nRows; r++ {
product[r] = make([]float64, nCols)
for c := 0; c < nCols; c++ {
product[r][c] = dm.data[r] * mat.At(r, c)
}
}
rm := new(Array2DRowRealMatrix)
rm.copyIn(product)
return rm
}
}
func (dm *DiagonalMatrix) Data() [][]float64 {
dim := dm.RowDimension()
out := make([][]float64, dim)
for i := 0; i < dim; i++ {
if out[i] == nil {
out[i] = make([]float64, dim)
}
out[i][i] = dm.data[i]
}
return out
}
func (dm *DiagonalMatrix) DataRef() []float64 {
return dm.data
}
func (dm *DiagonalMatrix) At(row, column int) float64 {
if err := checkMatrixIndex(dm, row, column); err != nil {
panic(err)
}
if row == column {
return dm.data[row]
}
return 0
}
func (dm *DiagonalMatrix) SetEntry(row, column int, value float64) {
if row == column {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
dm.data[row] = value
} else {
if err := dm.ensureZero(value); err != nil {
panic(err)
}
}
}
func (dm *DiagonalMatrix) AddToEntry(row, column int, increment float64) {
if row == column {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
dm.data[row] += increment
} else {
if err := dm.ensureZero(increment); err != nil {
panic(err)
}
}
}
func (dm *DiagonalMatrix) ensureZero(value float64) error {
if !equalsWithULP(0.0, value, 1) {
return numberIsTooLargeBoundedErrorf(math.Abs(value), 0, true)
}
return nil
}
func (dm *DiagonalMatrix) MultiplyEntry(row, column int, factor float64) {
// we don't care about non-diagonal elements for multiplication
if row == column {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
dm.data[row] *= factor
}
}
func (dm *DiagonalMatrix) RowDimension() int {
return len(dm.data)
}
func (dm *DiagonalMatrix) ColumnDimension() int {
return len(dm.data)
}
func (dm *DiagonalMatrix) Operate(v []float64) []float64 {
diag := new(DiagonalMatrix)
diag.data = v
return dm.Multiply(diag).(*DiagonalMatrix).DataRef()
}
func (dm *DiagonalMatrix) OperateVector(vec RealVector) RealVector {
if v, ok := vec.(*ArrayRealVector); ok {
vec := new(ArrayRealVector)
vec.data = append([]float64{}, dm.Operate(v.DataRef())...)
return vec
} else {
nRows := dm.RowDimension()
nCols := dm.ColumnDimension()
if vec.Dimension() != nCols {
panic(dimensionsMismatchSimpleErrorf(vec.Dimension(), nCols))
}
out := make([]float64, nRows)
for row := 0; row < nRows; row++ {
var sum float64
for i := 0; i < nCols; i++ {
sum += dm.At(row, i) * vec.At(i)
}
out[row] = sum
}
vec := new(ArrayRealVector)
vec.data = append([]float64{}, out...)
return vec
}
}
func (dm *DiagonalMatrix) PreMultiply(v []float64) []float64 {
return dm.Operate(v)
}
func (dm *DiagonalMatrix) PreMultiplyVector(vec RealVector) RealVector {
var vectorData []float64
if v, ok := vec.(*ArrayRealVector); ok {
vectorData = v.DataRef()
} else {
vectorData = vec.ToArray()
}
rv, err := NewRealVector(dm.PreMultiply(vectorData))
if err != nil {
panic(err)
}
return rv
}
func (dm *DiagonalMatrix) PreMultiplyMatrix(m RealMatrix) RealMatrix {
return m.Multiply(dm)
}
func (dm *DiagonalMatrix) Inverse() *DiagonalMatrix {
return dm.InverseWithThreshold(0)
}
func (dm *DiagonalMatrix) InverseWithThreshold(threshold float64) *DiagonalMatrix {
if dm.IsSingular(threshold) {
panic(singularMatrixSimpleErrorf())
}
result := make([]float64, len(dm.data))
for i := 0; i < len(dm.data); i++ {
result[i] = 1.0 / dm.data[i]
}
mat := new(DiagonalMatrix)
mat.data = result
return mat
}
func (dm *DiagonalMatrix) IsSingular(threshold float64) bool {
for i := 0; i < len(dm.data); i++ {
if equalsWithError(dm.data[i], 0.0, threshold) {
return true
}
}
return false
}
func (dm *DiagonalMatrix) ColumnAt(column int) []float64 {
if err := checkColumnIndex(dm, column); err != nil {
panic(err)
}
nRows := dm.RowDimension()
out := make([]float64, nRows)
for i := 0; i < nRows; i++ {
out[i] = dm.At(i, column)
}
return out
}
func (dm *DiagonalMatrix) ColumnMatrixAt(column int) RealMatrix {
if err := checkColumnIndex(dm, column); err != nil {
panic(err)
}
nRows := dm.RowDimension()
if 1 != nRows {
panic(dimensionsMismatchSimpleErrorf(1, nRows))
}
out, err := NewDiagonalMatrixWithDimension(1)
if err != nil {
panic(err)
}
for i := 0; i < nRows; i++ {
out.SetEntry(i, 0, dm.At(i, column))
}
return out
}
func (dm *DiagonalMatrix) SetColumnMatrix(column int, matrix RealMatrix) {
if err := checkColumnIndex(dm, column); err != nil {
panic(err)
}
nRows := dm.RowDimension()
if (matrix.RowDimension() != nRows) || (matrix.ColumnDimension() != 1) {
panic(matrixDimensionMismatchErrorf(matrix.RowDimension(), matrix.ColumnDimension(), nRows, 1))
}
for i := 0; i < nRows; i++ {
dm.SetEntry(i, column, matrix.At(i, 0))
}
}
func (dm *DiagonalMatrix) ColumnVectorAt(column int) RealVector {
mat := new(ArrayRealVector)
mat.data = dm.ColumnAt(column)
return mat
}
func (dm *DiagonalMatrix) SetColumnVector(column int, vec RealVector) {
if err := checkColumnIndex(dm, column); err != nil {
panic(err)
}
nRows := dm.RowDimension()
if vec.Dimension() != nRows {
panic(matrixDimensionMismatchErrorf(vec.Dimension(), 1, nRows, 1))
}
for i := 0; i < nRows; i++ {
dm.SetEntry(i, column, vec.At(i))
}
}
func (dm *DiagonalMatrix) Equals(object interface{}) bool {
if object == dm {
return true
}
if _, ok := object.(RealMatrix); !ok {
return false
}
m := object.(RealMatrix)
nRows := dm.RowDimension()
nCols := dm.ColumnDimension()
if m.ColumnDimension() != nCols || m.RowDimension() != nRows {
return false
}
for row := 0; row < nRows; row++ {
for col := 0; col < nCols; col++ {
if dm.At(row, col) != m.At(row, col) {
return false
}
}
}
return true
}
func (dm *DiagonalMatrix) RowAt(row int) []float64 {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
nCols := dm.ColumnDimension()
out := make([]float64, nCols)
for i := 0; i < nCols; i++ {
out[i] = dm.At(row, i)
}
return out
}
func (dm *DiagonalMatrix) RowMatrixAt(row int) RealMatrix {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
nCols := dm.ColumnDimension()
if 1 != nCols {
panic(dimensionsMismatchSimpleErrorf(1, nCols))
}
out, err := NewDiagonalMatrixWithDimension(1)
if err != nil {
panic(err)
}
for i := 0; i < nCols; i++ {
out.SetEntry(0, i, dm.At(row, i))
}
return out
}
func (dm *DiagonalMatrix) RowVectorAt(row int) RealVector {
mat := new(ArrayRealVector)
mat.data = append([]float64{}, dm.RowAt(row)...)
return mat
}
func (dm *DiagonalMatrix) ScalarAdd(d float64) RealMatrix {
rowCount := dm.RowDimension()
columnCount := dm.ColumnDimension()
if rowCount != columnCount {
panic(dimensionsMismatchSimpleErrorf(rowCount, columnCount))
}
out, err := NewDiagonalMatrixWithDimension(rowCount)
if err != nil {
panic(err)
}
for row := 0; row < rowCount; row++ {
for col := 0; col < columnCount; col++ {
out.SetEntry(row, col, dm.At(row, col)+d)
}
}
return out
}
func (dm *DiagonalMatrix) ScalarMultiply(d float64) RealMatrix {
rowCount := dm.RowDimension()
columnCount := dm.ColumnDimension()
if rowCount != columnCount {
panic(dimensionsMismatchSimpleErrorf(rowCount, columnCount))
}
out, err := NewDiagonalMatrixWithDimension(rowCount)
if err != nil {
panic(err)
}
for row := 0; row < rowCount; row++ {
for col := 0; col < columnCount; col++ {
out.SetEntry(row, col, dm.At(row, col)*d)
}
}
return out
}
func (dm *DiagonalMatrix) SetColumn(column int, array []float64) {
if err := checkColumnIndex(dm, column); err != nil {
panic(err)
}
nRows := dm.RowDimension()
if len(array) != nRows {
panic(matrixDimensionMismatchErrorf(len(array), 1, nRows, 1))
}
for i := 0; i < nRows; i++ {
dm.SetEntry(i, column, array[i])
}
}
func (dm *DiagonalMatrix) SetRow(row int, array []float64) {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
nCols := dm.ColumnDimension()
if len(array) != nCols {
panic(matrixDimensionMismatchErrorf(1, len(array), 1, nCols))
}
for i := 0; i < nCols; i++ {
dm.SetEntry(row, i, array[i])
}
}
func (dm *DiagonalMatrix) SetRowMatrix(row int, matrix RealMatrix) {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
nCols := dm.ColumnDimension()
if (matrix.RowDimension() != 1) || (matrix.ColumnDimension() != nCols) {
panic(matrixDimensionMismatchErrorf(matrix.RowDimension(), matrix.ColumnDimension(), 1, nCols))
}
for i := 0; i < nCols; i++ {
dm.SetEntry(row, i, matrix.At(0, i))
}
}
func (dm *DiagonalMatrix) SetRowVector(row int, vec RealVector) {
if err := checkRowIndex(dm, row); err != nil {
panic(err)
}
nCols := dm.ColumnDimension()
if vec.Dimension() != nCols {
panic(matrixDimensionMismatchErrorf(1, vec.Dimension(), 1, nCols))
}
for i := 0; i < nCols; i++ {
dm.SetEntry(row, i, vec.At(i))
}
}
func (dm *DiagonalMatrix) SetSubMatrix(subMatrix [][]float64, row, column int) {
if subMatrix == nil {
panic(invalidArgumentSimpleErrorf())
}
nRows := len(subMatrix)
if nRows == 0 {
panic(noDataErrorf(at_least_one_row))
}
nCols := len(subMatrix[0])
if nCols == 0 {
panic(noDataErrorf(at_least_one_column))
}
for r := 1; r < nRows; r++ {
if len(subMatrix[r]) != nCols {
panic(dimensionsMismatchSimpleErrorf(nCols, len(subMatrix[r])))
}
}
checkRowIndex(dm, row)
checkColumnIndex(dm, column)
checkRowIndex(dm, nRows+row-1)
checkColumnIndex(dm, nCols+column-1)
for i := 0; i < nRows; i++ {
for j := 0; j < nCols; j++ {
dm.SetEntry(row+i, column+j, subMatrix[i][j])
}
}
}
func (dm *DiagonalMatrix) SubMatrix(startRow, endRow, startColumn, endColumn int) RealMatrix {
checkSubMatrixIndex(dm, startRow, endRow, startColumn, endColumn)
if (endRow - startRow + 1) != (endColumn - startColumn + 1) {
panic(dimensionsMismatchSimpleErrorf((endRow - startRow + 1), (endColumn - startColumn + 1)))
}
subMatrix, err := NewDiagonalMatrixWithDimension(endRow - startRow + 1)
if err != nil {
panic(err)
}
for i := startRow; i <= endRow; i++ {
for j := startColumn; j <= endColumn; j++ {
subMatrix.SetEntry(i-startRow, j-startColumn, dm.At(i, j))
}
}
return subMatrix
}
func (dm *DiagonalMatrix) Trace() float64 {
nRows := dm.RowDimension()
nCols := dm.ColumnDimension()
if nRows != nCols {
panic(nonSquareMatrixSimpleErrorf(nRows, nCols))
}
trace := 0.
for i := 0; i < nRows; i++ {
trace += dm.At(i, i)
}
return trace
}
type RealMatrixPreservingVisitorImpl struct {
s func(int, int, int, int, int, int)
v func(int, int, float64)
e func() float64
}
func (drmpv *RealMatrixPreservingVisitorImpl) Start(rows, columns, startRow, endRow, startColumn, endColumn int) {
drmpv.s(rows, columns, startRow, endRow, startColumn, endColumn)
}
func (drmpv *RealMatrixPreservingVisitorImpl) Visit(row, column int, value float64) {
drmpv.v(row, column, value)
}
func (drmpv *RealMatrixPreservingVisitorImpl) End() float64 {
return drmpv.e()
}
func (dm *DiagonalMatrix) Transpose() RealMatrix {
nRows := dm.RowDimension()
nCols := dm.ColumnDimension()
if nRows != nCols {
panic(nonSquareMatrixSimpleErrorf(nRows, nCols))
}
copy := new(DiagonalMatrix)
copy.data = make([]float64, nCols)
drmpv := new(RealMatrixPreservingVisitorImpl)
drmpv.s = func(int, int, int, int, int, int) {}
drmpv.v = func(row, column int, value float64) {
copy.SetEntry(column, row, value)
}
drmpv.e = func() float64 {
return 0
}
dm.WalkInOptimizedOrder(drmpv)
return copy
}
func (dm *DiagonalMatrix) WalkInRowOrder(visitor RealMatrixPreservingVisitor) float64 {
rows := dm.RowDimension()
columns := dm.ColumnDimension()
visitor.Start(rows, columns, 0, rows-1, 0, columns-1)
for i := 0; i < rows; i++ {
for j := 0; j < columns; j++ {
visitor.Visit(i, j, dm.At(i, j))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInRowOrderBounded(visitor RealMatrixPreservingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
checkSubMatrixIndex(dm, startRow, endRow, startColumn, endColumn)
visitor.Start(dm.RowDimension(), dm.ColumnDimension(), startRow, endRow, startColumn, endColumn)
for i := startRow; i <= endRow; i++ {
for j := startColumn; j <= endColumn; j++ {
visitor.Visit(i, j, dm.At(i, j))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInUpdateRowOrder(visitor RealMatrixChangingVisitor) float64 {
rows := dm.RowDimension()
columns := dm.ColumnDimension()
visitor.Start(rows, columns, 0, rows-1, 0, columns-1)
for i := 0; i < rows; i++ {
for j := 0; j < columns; j++ {
dm.SetEntry(i, j, visitor.Visit(i, j, dm.At(i, j)))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInUpdateRowOrderBounded(visitor RealMatrixChangingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
checkSubMatrixIndex(dm, startRow, endRow, startColumn, endColumn)
visitor.Start(dm.RowDimension(), dm.ColumnDimension(), startRow, endRow, startColumn, endColumn)
for i := startRow; i <= endRow; i++ {
for j := startColumn; j <= endColumn; j++ {
dm.SetEntry(i, j, visitor.Visit(i, j, dm.At(i, j)))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInUpdateColumnOrder(visitor RealMatrixChangingVisitor) float64 {
rows := dm.RowDimension()
columns := dm.ColumnDimension()
visitor.Start(rows, columns, 0, rows-1, 0, columns-1)
for j := 0; j < columns; j++ {
for i := 0; i < rows; i++ {
dm.SetEntry(i, j, visitor.Visit(i, j, dm.At(i, j)))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInColumnOrder(visitor RealMatrixPreservingVisitor) float64 {
rows := dm.RowDimension()
columns := dm.ColumnDimension()
visitor.Start(rows, columns, 0, rows-1, 0, columns-1)
for j := 0; j < columns; j++ {
for i := 0; i < rows; i++ {
visitor.Visit(i, j, dm.At(i, j))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInUpdateColumnOrderBounded(visitor RealMatrixChangingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
checkSubMatrixIndex(dm, startRow, endRow, startColumn, endColumn)
visitor.Start(dm.RowDimension(), dm.ColumnDimension(), startRow, endRow, startColumn, endColumn)
for j := startColumn; j <= endColumn; j++ {
for i := startRow; i <= endRow; i++ {
dm.SetEntry(i, j, visitor.Visit(i, j, dm.At(i, j)))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInColumnOrderBounded(visitor RealMatrixPreservingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
checkSubMatrixIndex(dm, startRow, endRow, startColumn, endColumn)
visitor.Start(dm.RowDimension(), dm.ColumnDimension(), startRow, endRow, startColumn, endColumn)
for j := startColumn; j <= endColumn; j++ {
for i := startRow; i <= endRow; i++ {
visitor.Visit(i, j, dm.At(i, j))
}
}
return visitor.End()
}
func (dm *DiagonalMatrix) WalkInUpdateOptimizedOrder(visitor RealMatrixChangingVisitor) float64 {
return dm.WalkInUpdateRowOrder(visitor)
}
func (dm *DiagonalMatrix) WalkInOptimizedOrder(visitor RealMatrixPreservingVisitor) float64 {
return dm.WalkInRowOrder(visitor)
}
func (dm *DiagonalMatrix) WalkInUpdateOptimizedOrderBounded(visitor RealMatrixChangingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
return dm.WalkInUpdateRowOrderBounded(visitor, startRow, endRow, startColumn, endColumn)
}
func (dm *DiagonalMatrix) WalkInOptimizedOrderBounded(visitor RealMatrixPreservingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
return dm.WalkInRowOrderBounded(visitor, startRow, endRow, startColumn, endColumn)
} | diagonal_matrix.go | 0.818701 | 0.744285 | diagonal_matrix.go | starcoder |
package opc
// Spatial Stripes
// Creates spatial sine wave stripes: x in the red channel, y--green, z--blue
// Also makes a white dot which moves down the strip non-spatially in the order
// that the LEDs are indexed.
import (
"github.com/longears/pixelslinger/colorutils"
"github.com/longears/pixelslinger/midi"
"math"
"time"
)
func MakePatternAqua(locations []float64) ByteThread {
return func(bytesIn chan []byte, bytesOut chan []byte, midiState *midi.MidiState) {
for bytes := range bytesIn {
n_pixels := len(bytes) / 3
t := float64(time.Now().UnixNano())/1.0e9 - 9.4e8
// fill in bytes slice
for ii := 0; ii < n_pixels; ii++ {
//--------------------------------------------------------------------------------
// make moving stripes for x, y, and z
x := locations[ii*3+0]
y := locations[ii*3+1]
z := locations[ii*3+2]
//r := colorutils.Cos(x, t/4, 1, 0, 0.7) // offset, period, min, max
s1 := colorutils.Cos((z*0.3)*(y*0.3)*(x*0.3) , t/8, 1, 0.05, 0.7)
s3 := colorutils.Cos(x*0.2 + y*0.8, t/16, 1, 0.05, 0.5)
s2 := colorutils.Cos(z*0.2, t/20, 1, 0.05, 0.8)
s4 := colorutils.Cos(y+0.1, t/40, 1, 0.05, 0.4)
r:= 0.0
g:= 0.0
b:= 0.0
// number of colors
nc := 4.0
// bubble strength (inverse, greater is less bubbles)
bs := 2.0
//lightcyan
r += (0.875*s1) / nc
g += (1.000*s1) / nc
b += (1.000*s1) / nc
//cyan
r += (0.000*s2) / nc
g += (1.000*s2) / nc
b += (1.000*s2) / nc
// aquamarine
r += (0.495 *s3) / nc
g += (1.000 *s3) / nc
b += (0.831 *s3) / nc
// teal
r += (0.000 *s4) / nc
g += (0.600 *s4) / nc
b += (0.700 *s4) / nc
// bluebubbles
pow1 := math.Pow((z*t), 0.99)
pow2 := math.Pow((z*t), 0.985)
b += colorutils.Cos(z, pow1 , 10, 0.0, 0.5) / bs
// whitebubbles
sb := colorutils.Cos(z, pow2, 50, 0.0, 0.5) / bs
r += sb
g += sb
b += sb
if z<0.0 {
// aquamarine
r = ((0.495 *s3) *2) +0.3
g = ((1.000 *s3) *2) +0.3
b = ((0.831 *s3) *2) +0.3
}
bytes[ii*3+0] = colorutils.FloatToByte(r)
bytes[ii*3+1] = colorutils.FloatToByte(g)
bytes[ii*3+2] = colorutils.FloatToByte(b)
//--------------------------------------------------------------------------------
}
bytesOut <- bytes
}
}
} | opc/pattern-aqua.go | 0.570571 | 0.533884 | pattern-aqua.go | starcoder |
package binarytree
import "fmt"
// BTree Returns a binary tree structure which contains only a root Node
type BTree struct {
Root *Node
}
// calculateDepth helper function for BTree's depth()
func calculateDepth(n *Node, depth int) int {
if n == nil {
return depth
}
return Max(calculateDepth(n.left, depth+1), calculateDepth(n.right, depth+1))
}
// Insert a value in the BTree
func Insert(root *Node, val int) *Node {
if root == nil {
return NewNode(val)
}
if val < root.val {
root.left = Insert(root.left, val)
} else {
root.right = Insert(root.right, val)
}
return root
}
// Depth returns the calculated depth of a binary tree
func (t *BTree) Depth() int {
return calculateDepth(t.Root, 0)
}
// InOrder add's children to a node in order left first then right recursively
func InOrder(n *Node) {
if n != nil {
InOrder(n.left)
fmt.Print(n.val, " ")
InOrder(n.right)
}
}
// InOrderSuccessor Goes to the left
func InOrderSuccessor(root *Node) *Node {
cur := root
for cur.left != nil {
cur = cur.left
}
return cur
}
// BstDelete removes the node
func BstDelete(root *Node, val int) *Node {
if root == nil {
return nil
}
if val < root.val {
root.left = BstDelete(root.left, val)
} else if val > root.val {
root.right = BstDelete(root.right, val)
} else {
// this is the node to delete
// node with one child
if root.left == nil {
return root.right
} else if root.right == nil {
return root.left
} else {
n := root.right
d := InOrderSuccessor(n)
d.left = root.left
return root.right
}
}
return root
}
// PreOrder Preorder
func PreOrder(n *Node) {
if n == nil {
return
}
fmt.Print(n.val, " ")
PreOrder(n.left)
PreOrder(n.right)
}
// PostOrder PostOrder
func PostOrder(n *Node) {
if n == nil {
return
}
PostOrder(n.left)
PostOrder(n.right)
fmt.Print(n.val, " ")
}
// LevelOrder LevelOrder
func LevelOrder(root *Node) {
var q []*Node // queue
var n *Node // temporary node
q = append(q, root)
for len(q) != 0 {
n, q = q[0], q[1:]
fmt.Print(n.val, " ")
if n.left != nil {
q = append(q, n.left)
}
if n.right != nil {
q = append(q, n.right)
}
}
}
// Max Function that returns max of two numbers - possibly already declared.
func Max(a, b int) int {
if a > b {
return a
}
return b
} | data_structures/binary_tree/btree.go | 0.840259 | 0.567937 | btree.go | starcoder |
package mui
import (
"bytes"
"image"
"path"
"time"
"github.com/blitzprog/imageoutput"
)
const (
// BookImageLargeWidth is the minimum width in pixels of a large book image.
BookImageLargeWidth = 512
// BookImageLargeHeight is the minimum height in pixels of a large book image.
BookImageLargeHeight = 512
// BookImageMediumWidth is the minimum width in pixels of a medium book image.
BookImageMediumWidth = 256
// BookImageMediumHeight is the minimum height in pixels of a medium book image.
BookImageMediumHeight = 256
// BookImageWebPQuality is the WebP quality of book images.
BookImageWebPQuality = 70
// BookImageJPEGQuality is the JPEG quality of book images.
BookImageJPEGQuality = 70
// BookImageQualityBonusLowDPI ...
BookImageQualityBonusLowDPI = 12
// BookImageQualityBonusLarge ...
BookImageQualityBonusLarge = 10
// BookImageQualityBonusMedium ...
BookImageQualityBonusMedium = 15
// BookImageQualityBonusSmall ...
BookImageQualityBonusSmall = 15
)
// Define the book image outputs
var bookImageOutputs = []imageoutput.Output{
// Original at full size
&imageoutput.OriginalFile{
Directory: path.Join(Root, "images/books/original/"),
Width: 0,
Height: 0,
},
// JPEG - Large
&imageoutput.JPEGFile{
Directory: path.Join(Root, "images/books/large/"),
Width: BookImageLargeWidth,
Height: BookImageLargeHeight,
Quality: BookImageJPEGQuality + BookImageQualityBonusLowDPI + BookImageQualityBonusLarge,
},
// JPEG - Medium
&imageoutput.JPEGFile{
Directory: path.Join(Root, "images/books/medium/"),
Width: BookImageMediumWidth,
Height: BookImageMediumHeight,
Quality: BookImageJPEGQuality + BookImageQualityBonusLowDPI + BookImageQualityBonusMedium,
},
// WebP - Large
&imageoutput.WebPFile{
Directory: path.Join(Root, "images/books/large/"),
Width: BookImageLargeWidth,
Height: BookImageLargeHeight,
Quality: BookImageWebPQuality + BookImageQualityBonusLowDPI + BookImageQualityBonusLarge,
},
// WebP - Medium
&imageoutput.WebPFile{
Directory: path.Join(Root, "images/books/medium/"),
Width: BookImageMediumWidth,
Height: BookImageMediumHeight,
Quality: BookImageWebPQuality + BookImageQualityBonusLowDPI + BookImageQualityBonusMedium,
},
}
// SetImageBytes accepts a byte buffer that represents an image file and updates the book image.
func (book *Book) SetImageBytes(data []byte) error {
// Decode
img, format, err := image.Decode(bytes.NewReader(data))
if err != nil {
return err
}
return book.SetImage(&imageoutput.MetaImage{
Image: img,
Format: format,
Data: data,
})
}
// SetImage sets the book image to the given MetaImage.
func (book *Book) SetImage(metaImage *imageoutput.MetaImage) error {
var lastError error
// Save the different image formats
for _, output := range bookImageOutputs {
err := output.Save(metaImage, book.ID)
if err != nil {
lastError = err
}
}
book.Image.Extension = metaImage.Extension()
book.Image.Width = metaImage.Image.Bounds().Dx()
book.Image.Height = metaImage.Image.Bounds().Dy()
book.Image.AverageColor = GetAverageColor(metaImage.Image)
book.Image.LastModified = time.Now().Unix()
return lastError
} | mui/BookImage.go | 0.660391 | 0.455017 | BookImage.go | starcoder |
package bits
/*
BitwiseLSBCount return the number of 1's in the given integer.
Description:
It AND's the LSB of the given number with 0x01 and increment the counter if
it is 1.
*/
func BitwiseLSBCount(x uint32) uint32 {
if x == 0 {
return 0
}
var cnt uint32
cnt = 0
for x != 0 {
if x&1 == 1 {
cnt++
}
x = x >> 1
}
return cnt
}
/*
KerninghamBitCount returns the number of set bits in uin32 number
Description:
x = x & x-1 always flips the rightmost set bit
*/
func KerninghamBitCount(x uint32) uint32 {
if x == 0 {
return 0
}
var cnt uint32
cnt = 0
for {
x &= x - 1
cnt++
if x == 0 {
break
}
}
return cnt
}
/*
LookupTblBitCount returns the number of set bits using precomputed lookup table
*/
func LookupTblBitCount(x uint32) uint32 {
if x == 0 {
return 0
}
var cnt uint32
var lookUpTbl [256]uint8
cnt = 0
for i := 0; i < 256; i++ {
lookUpTbl[i] = (uint8(i) & 1) + lookUpTbl[uint8(i)/2]
}
for i := 0; i < 32 && x != 0; i++ {
cnt += uint32(lookUpTbl[uint8(x)&255])
x = x >> 8
}
return cnt
}
/*
PopulationCount returns the number of bits set in a given unsigned integer
Description:
i = i - ((i >> 1) & 0x55555555);
First of all, the significance of the constant 0x55555555 is that, written
using the Java / GCC style binary literal notation),
0x55555555 = 0b01010101010101010101010101010101
That is, all its odd-numbered bits (counting the lowest bit as bit 1 = odd)
are 1, and all the even-numbered bits are 0.
The expression ((i >> 1) & 0x55555555) thus shifts the bits of i right by
one, and then sets all the even-numbered bits to zero. (Equivalently, we
could've first set all the odd-numbered bits of i to zero with & 0xAAAAAAAA
and then shifted the result right by one bit.) For convenience, let's call
this intermediate value j.
What happens when we subtract this j from the original i? Well, let's see
what would happen if i had only two bits:
i j i - j
----------------------------------
0 = 0b00 0 = 0b00 0 = 0b00
1 = 0b01 0 = 0b00 1 = 0b01
2 = 0b10 1 = 0b01 1 = 0b01
3 = 0b11 1 = 0b01 2 = 0b10
Hey! We've managed to count the bits of our two-bit number!
OK, but what if i has more than two bits set? In fact, it's pretty easy to
check that the lowest two bits of i - j will still be given by the table
above, and so will the third and fourth bits, and the fifth and sixth bits,
and so and. In particular:
despite the >> 1, the lowest two bits of i - j are not affected by the
third or higher bits of i, since they'll be masked out of
j by the & 0x55555555; and since the lowest two bits of j can never have a
greater numerical value than those of i, the subtraction will never borrow
from the third bit of i: thus, the lowest two bits of i also cannot affect
the third or higher bits of i - j.
In fact, by repeating the same argument, we can see that the calculation on
this line, in effect, applies the table above to each of the 16 two-bit
blocks in i in parallel. That is, after executing this line, the lowest two
bits of the new value of i will now contain the number of bits set among
the corresponding bits in the original value of i, and so will the next two
bits, and so on.
Line 2:
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
Compared to the first line, this one's quite simple. First, note that
0x33333333 = 0b00110011001100110011001100110011
Thus, i & 0x33333333 takes the two-bit counts calculated above and throws
away every second one of them, while (i >> 2) & 0x33333333 does the same
after shifting i right by two bits. Then we add the results together.
Thus, in effect, what this line does is take the bitcounts of the lowest
two and the second-lowest two bits of the original input, computed on the
previous line, and add them together to give the bitcount of the lowest
four bits of the input. And, again, it does this in parallel for all the 8
four-bit blocks (= hex digits) of the input.
Line 3:
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
OK, what's going on here?
Well, first of all, (i + (i >> 4)) & 0x0F0F0F0F does exactly the same as
the previous line, except it adds the adjacent four-bit bitcounts together
to give the bitcounts of each eight-bit block (i.e. byte) of the input.
(Here, unlike on the previous line, we can get away with moving the
& outside the addition, since we know that the eight-bit bitcount can never
exceed 8, and therefore will fit inside four bits without overflowing.)
Now we have a 32-bit number consisting of four 8-bit bytes, each byte
holding the number of 1-bit in that byte of the original input. (Let's call
these bytes A, B, C and D.) So what happens when we multiply this value
(let's call it k) by 0x01010101?
Well, since 0x01010101 = (1 << 24) + (1 << 16) + (1 << 8) + 1, we have:
k * 0x01010101 = (k << 24) + (k << 16) + (k << 8) + k
Thus, the highest byte of the result ends up being the sum of:
its original value, due to the k term, plus
the value of the next lower byte, due to the k << 8 term, plus
the value of the second lower byte, due to the k << 16 term, plus
the value of the fourth and lowest byte, due to the k << 24 term.
(In general, there could also be carries from lower bytes, but since we
know the value of each byte is at most 8, we know the addition will never
overflow and create a carry.)
That is, the highest byte of k * 0x01010101 ends up being the sum of the
bitcounts of all the bytes of the input, i.e. the total bitcount of the
32-bit input number. The final >> 24 then simply shifts this value down
from the highest byte to the lowest.
Ps. This code could easily be extended to 64-bit integers, simply by
changing the 0x01010101 to 0x0101010101010101 and the >> 24 to >> 56.
Indeed, the same method would even work for 128-bit integers; 256 bits
would require adding one extra shift / add / mask step, however, since the
number 256 no longer quite fits into an 8-bit byte.
Example:
Consider only 8 bits in which case this algorithm would need only
3 bit masks.
Let x = 11111111
mask1 = 0x01010101 (0x55) pair of one 0 and 1
mask2 = 0x00110011 (0x33) pair of two 0 and 1
mask3 = 0x00001111 (0x0f) pair of four 0 and 1
Step 1:
x = (x & mask1) + ((x >> 1) & mask1)
11111111 & 01010101 = 01010101
+
01111111 & 01010101 = 01010101
x = 10101010
Step 2:
x = (x & mask2) + ((x >> 2) & mask2)
10101010 & 00110011 = 00100010
00101010 & 00110011 = 00100010
x = 01000100
Step 3:
x = (x & mask3) + ((x >> 4) & mask3)
01000100 & 00001111 = 00000100
00000100 & 00001111 = 00000100
x = 00001000 = 8
To calculate 8 bits we took 3 masks therefore worst case complexity is
O(logn)
*/
func PopulationCount(x uint32) uint32 {
var mask1 uint32
mask1 = 0x55555555
var mask2 uint32
mask2 = 0x33333333
var mask3 uint32
mask3 = 0x0f0f0f0f
var mask4 uint32
mask4 = 0x00ff00ff
var mask5 uint32
mask5 = 0x0000ffff
x = (x & mask1) + ((x >> 1) & mask1)
x = (x & mask2) + ((x >> 2) & mask2)
x = (x & mask3) + ((x >> 4) & mask3)
x = (x & mask4) + ((x >> 8) & mask4)
x = (x & mask5) + ((x >> 16) & mask5)
return x
} | bits/numBitsSet.go | 0.669637 | 0.777384 | numBitsSet.go | starcoder |
package talibcdl
import (
"math"
)
type Series interface {
Len() int
High(i int) float64
Open(i int) float64
Close(i int) float64
Low(i int) float64
}
type SimpleSeries struct {
Highs []float64
Opens []float64
Closes []float64
Lows []float64
Volumes []float64
Rands []float64
}
func (s SimpleSeries) Len() int {
return len(s.Highs)
}
func (s SimpleSeries) High(i int) float64 {
return s.Highs[i]
}
func (s SimpleSeries) Open(i int) float64 {
return s.Opens[i]
}
func (s SimpleSeries) Close(i int) float64 {
return s.Closes[i]
}
func (s SimpleSeries) Low(i int) float64 {
return s.Lows[i]
}
type enhancedSeries struct {
Series
}
func (s enhancedSeries) average(st candleSetting, sum float64, i int) float64 {
a := s.rangeOf(st, i)
if st.avgPeriod != 0.0 {
a = sum / float64(st.avgPeriod)
}
b := 1.0
if st.rangeType == rangeTypeShadows {
b = 2.0
}
return st.factor * a / b
}
func (s enhancedSeries) candleColor(i int) candleColor {
if s.Close(i) >= s.Open(i) {
return candleColorWhite
} else {
return candleColorBlack
}
}
func (s enhancedSeries) highLowRange(i int) float64 {
return s.High(i) - s.Low(i)
}
func (s enhancedSeries) isCandleGapDown(i1, i2 int) bool {
return s.High(i1) < s.Low(i2)
}
func (s enhancedSeries) isCandleGapUp(i1, i2 int) bool {
return s.Low(i1) > s.High(i2)
}
func (s enhancedSeries) lowerShadow(i int) float64 {
return math.Min(s.Close(i), s.Open(i)) - s.Low(i)
}
func (s enhancedSeries) rangeOf(st candleSetting, i int) float64 {
return st.rangeType.rangeOf(s, i)
}
func (s enhancedSeries) realBody(i int) float64 {
return math.Abs(s.Close(i) - s.Open(i))
}
func (s enhancedSeries) realBodyGapDown(i2, i1 int) bool {
return math.Max(s.Open(i2), s.Close(i2)) < math.Min(s.Open(i1), s.Close(i1))
}
func (s enhancedSeries) realBodyGapUp(i2, i1 int) bool {
return math.Min(s.Open(i2), s.Close(i2)) > math.Max(s.Open(i1), s.Close(i1))
}
func (s enhancedSeries) upperShadow(i int) float64 {
return s.High(i) - (math.Max(s.Close(i), s.Open(i)))
}
type candleColor int
const (
candleColorWhite candleColor = 1
candleColorBlack = -1
)
func (c candleColor) isBlack() bool {
return c == candleColorBlack
}
func (c candleColor) isWhite() bool {
return c == candleColorWhite
} | series.go | 0.751375 | 0.402304 | series.go | starcoder |
package prng
// https://en.wikipedia.org/wiki/Lehmer_random_number_generator
import (
"math"
"math/bits"
)
// A MCG implements 64-bit multiplicative congruential pseudorandom number
// generator (MCG) modulo 2^64 with 64-bit state and maximun period of 2^62.
type MCG struct {
state uint64
}
// Steele and Vigna https://arxiv.org/pdf/2001.05304.pdf:
// For a MCG with modulus of power of two, the state must be odd for
// maximun period 2^64 / 4 = 2^62.
// Seed --
func (x *MCG) Seed(seed uint64) {
x.state = Splitmix(&seed) | 1
}
// NewMCG --
func NewMCG(seed uint64) MCG {
x := MCG{}
x.Seed(seed)
return x
}
// Uint64 returns a pseudo-random uint64 by MCG mod 2^64.
// The multiplier is picked from Table 6 in Steele & Vigna. Without the
// xor-rotate scrambler, the last bits are not uniformly distributed.
// This is a very fast generator, but not properly tested or proved
// to give anything good.
//
func (x *MCG) Uint64() (next uint64) {
next = x.state ^ bits.RotateLeft64(x.state, 27)
x.state *= 0x83b5b142866da9d5
return
}
// Alternative scrambler
// next = x.state ^ (x.state >> 17)
// Uint64 compiles to 7 instructions + in and out.
// 00000 MOVQ "".x+8(SP), AX
// 00005 MOVQ (AX), CX
// 00008 MOVQ $-8956057384675071531, DX
// 00018 IMULQ CX, DX
// 00022 MOVQ DX, (AX)
// 00025 MOVQ CX, AX
// 00028 ROLQ $27, CX
// 00032 XORQ CX, AX
// 00035 MOVQ AX, "".next+16(SP)
// Uint64Mul uses 128-bit multiplication and the high bits of it.
//
func (x *MCG) Uint64Mul() (next uint64) {
hi, lo := bits.Mul64(x.state, 0x83b5b142866da9d5)
next = hi ^ lo
x.state = lo
return
}
// Uint64Mul compiles to 5 instructions + in and out, but is not faster.
// 00000 MOVQ "".x+8(SP), CX
// 00005 MOVQ (CX), AX
// 00008 MOVQ $-8956057384675071531, DX
// 00018 MULQ DX
// 00021 MOVQ AX, (CX)
// 00024 XORQ AX, DX
// 00027 MOVQ DX, "".next+16(SP)
// Lehmer64 is pure Lehmer generator.
func (x *MCG) Lehmer64() uint64 {
x.state *= 0x83b5b142866da9d5
return x.state
}
// Float64 returns a uniformly distributed pseudo-random float64 from [0, 1).
// The distribution is 2^53 evenly spaced floats with spacing 2^-53.
// Float64 uses multiplicative congruential pseudorandom number generator (MCG)
// mod 2^64. 53 high bits of the MCG are considered good enough for a fast float64,
// but they don't pass random tests for the last ~3 bits.
//
func (x *MCG) Float64() (next float64) {
next = float64(x.state >> 11) * 0x1p-53
x.state *= 0x83b5b142866da9d5
return
}
// Float64_64 returns a uniformly distributed pseudo-random float64 from [0, 1).
// The distribution includes all floats in [2^-12, 1) and 2^52 evenly spaced
// floats in [0, 2^-12) with spacing 2^-64.
// This function inlines ok.
//
func (x *MCG) Float64_64() float64 {
u := x.Uint64()
if u == 0 { return 0 } // without this the smallest returned is 2^-65
z := uint64(bits.LeadingZeros64(u)) + 1
return math.Float64frombits((1023 - z) << 52 | u << z >> 12)
} | mcg.go | 0.70477 | 0.464416 | mcg.go | starcoder |
package trie
/*
* ASCII tries are a special case because the maximum size of the
* children is already known.
* Mulitple implementations are provided, depending on the need:
* - ASCIITrie: simple naive approach. Use for small alphabets.
* - ASCIIReduxTrie: uses alphabet reduction. Use for larger alphabets.
*/
import "fmt"
// invalid is used as a return value when a string could not be parsed
const invalid = -1
// NewASCIITrie creates a new trie holding strings.
func NewASCIITrie() *ASCIITrie {
return &ASCIITrie{isRoot: true}
}
func newASCIITrieNode() *ASCIITrie {
return &ASCIITrie{isRoot: false}
}
// Add adds a new word to an existing string trie.
func (t *ASCIITrie) Add(s string) error {
if len(s) == 0 {
t.isFullWord = true
return nil
}
c, err := next(s)
if err != nil {
return err
}
if t.children[c] == nil {
t.children[c] = newASCIITrieNode()
}
t.children[c].data = byte(c)
return t.children[c].Add(s[1:])
}
// Contains returns true if a string trie contains the given word.
func (t *ASCIITrie) Contains(s string) bool {
if s == "" {
return false
}
c, err := next(s)
if err != nil {
return false
}
if t.children[c] == nil {
return false
}
if len(s) == 1 {
return true
}
return t.children[c].Contains(s[1:])
}
func next(s string) (byte, error) {
c := s[0]
if c > 255 {
return byte(0), fmt.Errorf("Unexpected character in %v", s)
}
return c, nil
}
// NewASCIIReduxTrie creates a new trie holding strings.
func NewASCIIReduxTrie() *ASCIIReduxTrie {
return &ASCIIReduxTrie{isRoot: true, suffixByteMask: true}
}
func newASCIIReduxTrieNode(byteMask bool) *ASCIIReduxTrie {
return &ASCIIReduxTrie{isRoot: false, suffixByteMask: byteMask}
}
// Add adds a new word to an existing string trie.
func (t *ASCIIReduxTrie) Add(s string) error {
if len(s) == 0 {
t.isFullWord = true
return nil
}
data, err := next4(s, t.suffixByteMask)
if err != nil {
return err
}
if t.children[data] == nil {
t.children[data] = newASCIIReduxTrieNode(!t.suffixByteMask)
}
t.children[data].data = byte(data)
if t.suffixByteMask {
return t.children[data].Add(s[1:])
} else {
return t.children[data].Add(s)
}
}
// Contains returns true if a string trie contains the given word.
func (t *ASCIIReduxTrie) Contains(s string) bool {
if s == "" {
return false
}
c, err := next4(s, t.suffixByteMask)
if err != nil {
return false
}
if t.children[c] == nil {
return false
}
if len(s) == 1 {
return true
}
if t.suffixByteMask {
return t.children[c].Contains(s[1:])
} else {
return t.children[c].Contains(s)
}
}
// Returns the next 4 bits word in the given string
func next4(s string, suffixByteMask bool) (int, error) {
c := s[0]
if c > 255 {
return invalid, fmt.Errorf("Unexpected character in %v", s)
}
data := int(c)
if suffixByteMask {
data = data & 0x0f
} else {
data = data >> 4 // shift to obtain a 4 bit integer
}
return data, nil
} | stringtrie.go | 0.835316 | 0.488954 | stringtrie.go | starcoder |
package target
import (
"errors"
"fmt"
"github.com/spf13/pflag"
)
// TargetFlags represents the target cobra flags.
//nolint
type TargetFlags interface {
// GardenName returns the value that is tied to the corresponding cobra flag.
GardenName() string
// ProjectName returns the value that is tied to the corresponding cobra flag.
ProjectName() string
// SeedName returns the value that is tied to the corresponding cobra flag.
SeedName() string
// ShootName returns the value that is tied to the corresponding cobra flag.
ShootName() string
// AddFlags binds target configuration flags to a given flagset
AddFlags(flags *pflag.FlagSet)
// ToTarget converts the flags to a target
ToTarget() Target
// IsTargetValid returns true if the set of given CLI flags is enough
// to create a meaningful target. For example, if only the SeedName is
// given, false is returned because for targeting a seed, the GardenName
// must also be given. If ShootName and GardenName are set, false is
// returned because either project or seed have to be given as well.
IsTargetValid() bool
// OverrideTarget overrides the given target with the values of the target flags
OverrideTarget(current Target) (Target, error)
}
func NewTargetFlags(garden, project, seed, shoot string) TargetFlags {
return &targetFlagsImpl{
gardenName: garden,
projectName: project,
seedName: seed,
shootName: shoot,
}
}
type targetFlagsImpl struct {
gardenName string
projectName string
seedName string
shootName string
}
func (tf *targetFlagsImpl) GardenName() string {
return tf.gardenName
}
func (tf *targetFlagsImpl) ProjectName() string {
return tf.projectName
}
func (tf *targetFlagsImpl) SeedName() string {
return tf.seedName
}
func (tf *targetFlagsImpl) ShootName() string {
return tf.shootName
}
func (tf *targetFlagsImpl) AddFlags(flags *pflag.FlagSet) {
flags.StringVar(&tf.gardenName, "garden", "", "target the given garden cluster")
flags.StringVar(&tf.projectName, "project", "", "target the given project")
flags.StringVar(&tf.seedName, "seed", "", "target the given seed cluster")
flags.StringVar(&tf.shootName, "shoot", "", "target the given shoot cluster")
}
func (tf *targetFlagsImpl) ToTarget() Target {
return NewTarget(tf.gardenName, tf.projectName, tf.seedName, tf.shootName)
}
func (tf *targetFlagsImpl) isEmpty() bool {
return tf.gardenName == "" && tf.projectName == "" && tf.seedName == "" && tf.shootName == ""
}
func (tf *targetFlagsImpl) OverrideTarget(current Target) (Target, error) {
// user gave _some_ flags; we use those to override the current target
// (e.g. to quickly change a shoot while keeping garden/project names)
if !tf.isEmpty() {
// note that "deeper" levels of targets are reset, as to allow the
// user to "move up", e.g. when they have targeted a shoot, just
// specifying "--garden mygarden" should target the garden, not the same
// shoot on the garden mygarden.
if tf.gardenName != "" {
current = current.WithGardenName(tf.gardenName).WithProjectName("").WithSeedName("").WithShootName("")
}
if tf.projectName != "" && tf.seedName != "" {
return nil, errors.New("cannot specify --project and --seed at the same time")
}
if tf.projectName != "" {
current = current.WithProjectName(tf.projectName).WithSeedName("").WithShootName("")
}
if tf.seedName != "" {
current = current.WithSeedName(tf.seedName).WithProjectName("").WithShootName("")
}
if tf.shootName != "" {
current = current.WithShootName(tf.shootName)
}
if err := current.Validate(); err != nil {
return nil, fmt.Errorf("invalid target flags: %w", err)
}
}
return current, nil
}
func (tf *targetFlagsImpl) IsTargetValid() bool {
// garden name is always required for a complete set of flags
if tf.gardenName == "" {
return false
}
return tf.ToTarget().Validate() == nil
} | pkg/target/target_flags.go | 0.598899 | 0.436202 | target_flags.go | starcoder |
package systems
import (
"fmt"
"sync"
"time"
"github.com/Ariemeth/quantum-pulse/components"
"github.com/Ariemeth/quantum-pulse/entity"
)
const (
// TypeMovement is the name of the movement system.
TypeMovement = "mover"
)
// Movement represents a system that knows how to alter an Entity's position based on its velocities.
type Movement interface {
System
// Process updates entities position based on their velocities.
Process(elapsed float32)
}
type movement struct {
entities map[string]movable
remove chan entity.Entity
add chan entity.Entity
quit chan interface{}
quitProcessing chan interface{}
runningLock sync.Mutex
requirements []string
interval time.Duration
isRunning bool
}
// NewMovement creates a new Movement system.
func NewMovement() Movement {
m := movement{
entities: make(map[string]movable, 0),
remove: make(chan entity.Entity, 0),
add: make(chan entity.Entity, 0),
quit: make(chan interface{}),
quitProcessing: make(chan interface{}),
requirements: []string{components.TypeTransform,
components.TypeAcceleration,
components.TypeVelocity},
interval: (1000 / 144) * time.Millisecond,
isRunning: false,
}
go func() {
for {
select {
case ent := <-m.add:
fmt.Printf("Adding %s to the movement system.\n", ent.ID())
m.addEntity(ent)
case ent := <-m.remove:
fmt.Printf("Removing %s from the movement system.\n", ent.ID())
m.removeEntity(ent)
case <-m.quit:
return
}
}
}()
return &m
}
// Type retrieves the type of system such as renderer, mover, etc.
func (m *movement) Type() string {
return TypeMovement
}
// AddEntity adds an Entity to the system. Each system will have a component requirement that must be met before the Entity can be added.
func (m *movement) AddEntity(e entity.Entity) {
m.add <- e
}
// RemoveEntity removes an Entity from the system.
func (m *movement) RemoveEntity(e entity.Entity) {
m.remove <- e
}
// IsRunning is useful to check if the movement system is processing entities.
func (m *movement) IsRunning() bool {
defer m.runningLock.Unlock()
m.runningLock.Lock()
return m.isRunning
}
// Start will begin updating entities transform based on their velocity and acceleration.
func (m *movement) Start() {
defer m.runningLock.Unlock()
m.runningLock.Lock()
if m.isRunning {
return
}
m.isRunning = true
go func() {
tr := time.NewTimer(m.interval)
previousTime := time.Now().UnixNano()
for {
select {
case <-m.quitProcessing:
m.isRunning = false
return
case <-tr.C:
current := time.Now().UnixNano()
// Get the elapsed time in seconds
elapsed := float32((current - previousTime)) / 1000000000.0
previousTime = current
m.Process(elapsed)
processingTime := time.Now().UnixNano() - current
if processingTime > 0 {
tr.Reset(m.interval - time.Duration(processingTime))
} else {
tr.Reset(time.Nanosecond)
}
}
}
}()
}
// Stop Will stop the movement system from moving any of its Entities.
func (m *movement) Stop() {
defer m.runningLock.Unlock()
m.runningLock.Lock()
m.quitProcessing <- true
}
// Terminate stops the movement system and releases all resources. Once Terminate has been called, the system cannot be reused.
func (m *movement) Terminate() {
defer m.runningLock.Unlock()
m.runningLock.Lock()
m.quitProcessing <- true
m.quit <- true
}
// Process updates entities position based on their velocities.
func (m *movement) Process(elapsed float32) {
defer m.runningLock.Unlock()
m.runningLock.Lock()
for _, ent := range m.entities {
// adjust the velocity based on the acceleration.
updateVelocityUsingAcceleration(elapsed, ent.Acceleration, ent.Velocity)
// Apply velocity matrix to the transform
ent.Transform.Update(ent.Velocity.Translational().Mul(elapsed), ent.Velocity.Rotational().Mul(elapsed))
}
}
// addEntity adds an Entity to the system. Each system will have a component requirement that must be met before the Entity can be added.
func (m *movement) addEntity(e entity.Entity) {
velocity, isVelocity := e.Component(components.TypeVelocity).(components.Velocity)
acceleration, isAcceleration := e.Component(components.TypeAcceleration).(components.Acceleration)
transform, isTransform := e.Component(components.TypeTransform).(components.Transform)
if isVelocity && isAcceleration && isTransform {
move := movable{
Velocity: velocity,
Acceleration: acceleration,
Transform: transform,
}
defer m.runningLock.Unlock()
m.runningLock.Lock()
m.entities[e.ID()] = move
}
}
// removeEntity removes an Entity from the system.
func (m *movement) removeEntity(e entity.Entity) {
defer m.runningLock.Unlock()
m.runningLock.Lock()
delete(m.entities, e.ID())
}
// updateVelocityUsingAcceleration updates a velocity component by adding the acceleration component to the velocity component based on how much time has passed.
func updateVelocityUsingAcceleration(elapsed float32, a components.Acceleration, v components.Velocity) {
accRot, accTrans := a.Rotational(), a.Translational()
velRot, velTrans := v.Rotational(), v.Translational()
velRot = velRot.Add(accRot.Mul(elapsed))
velTrans = velTrans.Add(accTrans.Mul(elapsed))
v.Set(velRot, velTrans)
}
type movable struct {
Acceleration components.Acceleration
Velocity components.Velocity
Transform components.Transform
} | systems/movement.go | 0.710327 | 0.436322 | movement.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPOperatorDeclaration264AllOf struct for BTPOperatorDeclaration264AllOf
type BTPOperatorDeclaration264AllOf struct {
BtType *string `json:"btType,omitempty"`
Operator *string `json:"operator,omitempty"`
SpaceAfterOperator *BTPSpace10 `json:"spaceAfterOperator,omitempty"`
SpaceBeforeOperator *BTPSpace10 `json:"spaceBeforeOperator,omitempty"`
}
// NewBTPOperatorDeclaration264AllOf instantiates a new BTPOperatorDeclaration264AllOf object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPOperatorDeclaration264AllOf() *BTPOperatorDeclaration264AllOf {
this := BTPOperatorDeclaration264AllOf{}
return &this
}
// NewBTPOperatorDeclaration264AllOfWithDefaults instantiates a new BTPOperatorDeclaration264AllOf object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPOperatorDeclaration264AllOfWithDefaults() *BTPOperatorDeclaration264AllOf {
this := BTPOperatorDeclaration264AllOf{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPOperatorDeclaration264AllOf) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPOperatorDeclaration264AllOf) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPOperatorDeclaration264AllOf) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPOperatorDeclaration264AllOf) SetBtType(v string) {
o.BtType = &v
}
// GetOperator returns the Operator field value if set, zero value otherwise.
func (o *BTPOperatorDeclaration264AllOf) GetOperator() string {
if o == nil || o.Operator == nil {
var ret string
return ret
}
return *o.Operator
}
// GetOperatorOk returns a tuple with the Operator field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPOperatorDeclaration264AllOf) GetOperatorOk() (*string, bool) {
if o == nil || o.Operator == nil {
return nil, false
}
return o.Operator, true
}
// HasOperator returns a boolean if a field has been set.
func (o *BTPOperatorDeclaration264AllOf) HasOperator() bool {
if o != nil && o.Operator != nil {
return true
}
return false
}
// SetOperator gets a reference to the given string and assigns it to the Operator field.
func (o *BTPOperatorDeclaration264AllOf) SetOperator(v string) {
o.Operator = &v
}
// GetSpaceAfterOperator returns the SpaceAfterOperator field value if set, zero value otherwise.
func (o *BTPOperatorDeclaration264AllOf) GetSpaceAfterOperator() BTPSpace10 {
if o == nil || o.SpaceAfterOperator == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceAfterOperator
}
// GetSpaceAfterOperatorOk returns a tuple with the SpaceAfterOperator field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPOperatorDeclaration264AllOf) GetSpaceAfterOperatorOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceAfterOperator == nil {
return nil, false
}
return o.SpaceAfterOperator, true
}
// HasSpaceAfterOperator returns a boolean if a field has been set.
func (o *BTPOperatorDeclaration264AllOf) HasSpaceAfterOperator() bool {
if o != nil && o.SpaceAfterOperator != nil {
return true
}
return false
}
// SetSpaceAfterOperator gets a reference to the given BTPSpace10 and assigns it to the SpaceAfterOperator field.
func (o *BTPOperatorDeclaration264AllOf) SetSpaceAfterOperator(v BTPSpace10) {
o.SpaceAfterOperator = &v
}
// GetSpaceBeforeOperator returns the SpaceBeforeOperator field value if set, zero value otherwise.
func (o *BTPOperatorDeclaration264AllOf) GetSpaceBeforeOperator() BTPSpace10 {
if o == nil || o.SpaceBeforeOperator == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceBeforeOperator
}
// GetSpaceBeforeOperatorOk returns a tuple with the SpaceBeforeOperator field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPOperatorDeclaration264AllOf) GetSpaceBeforeOperatorOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceBeforeOperator == nil {
return nil, false
}
return o.SpaceBeforeOperator, true
}
// HasSpaceBeforeOperator returns a boolean if a field has been set.
func (o *BTPOperatorDeclaration264AllOf) HasSpaceBeforeOperator() bool {
if o != nil && o.SpaceBeforeOperator != nil {
return true
}
return false
}
// SetSpaceBeforeOperator gets a reference to the given BTPSpace10 and assigns it to the SpaceBeforeOperator field.
func (o *BTPOperatorDeclaration264AllOf) SetSpaceBeforeOperator(v BTPSpace10) {
o.SpaceBeforeOperator = &v
}
func (o BTPOperatorDeclaration264AllOf) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Operator != nil {
toSerialize["operator"] = o.Operator
}
if o.SpaceAfterOperator != nil {
toSerialize["spaceAfterOperator"] = o.SpaceAfterOperator
}
if o.SpaceBeforeOperator != nil {
toSerialize["spaceBeforeOperator"] = o.SpaceBeforeOperator
}
return json.Marshal(toSerialize)
}
type NullableBTPOperatorDeclaration264AllOf struct {
value *BTPOperatorDeclaration264AllOf
isSet bool
}
func (v NullableBTPOperatorDeclaration264AllOf) Get() *BTPOperatorDeclaration264AllOf {
return v.value
}
func (v *NullableBTPOperatorDeclaration264AllOf) Set(val *BTPOperatorDeclaration264AllOf) {
v.value = val
v.isSet = true
}
func (v NullableBTPOperatorDeclaration264AllOf) IsSet() bool {
return v.isSet
}
func (v *NullableBTPOperatorDeclaration264AllOf) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPOperatorDeclaration264AllOf(val *BTPOperatorDeclaration264AllOf) *NullableBTPOperatorDeclaration264AllOf {
return &NullableBTPOperatorDeclaration264AllOf{value: val, isSet: true}
}
func (v NullableBTPOperatorDeclaration264AllOf) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPOperatorDeclaration264AllOf) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_operator_declaration_264_all_of.go | 0.730963 | 0.422207 | model_btp_operator_declaration_264_all_of.go | starcoder |
package physics
import (
"encoding/json"
"fmt"
"github.com/stnma7e/betuol/common"
"github.com/stnma7e/betuol/component"
"github.com/stnma7e/betuol/math"
)
// PhysicsManager implements a basic physics manager that handles collision detection and resolution.
// The structure also satisifies the component.SceneManager interface.
type PhysicsManager struct {
sm component.SceneManager
radii []float32
}
// MakePhysicsManager returns a pointer to a PhysicsManager.
func MakePhysicsManager(sm component.SceneManager) *PhysicsManager {
pm := PhysicsManager{
sm,
make([]float32, 0),
}
return &pm
}
// Tick updates the physics forces on each component, and checks for collisions between components.
func (pm *PhysicsManager) Tick(delta float64) {
matList := pm.sm.GetMatrixList()
for i := range pm.radii {
if i == 0 {
continue
}
if pm.radii[i] == 0 {
continue
}
if matList[i].IsEmpty() {
continue
}
loc1, err := pm.sm.GetObjectLocation(component.GOiD(i))
if err != nil {
common.LogErr.Println(err)
}
sp1 := math.Sphere{loc1, pm.radii[i]}
for j := range pm.radii {
if i == j || j == 0 {
continue
}
if pm.radii[j] == 0 {
continue
}
if matList[j].IsEmpty() {
continue
}
loc2, err := pm.sm.GetObjectLocation(component.GOiD(j))
if err != nil {
common.LogErr.Println(err)
}
sp2 := math.Sphere{loc2, pm.radii[j]}
if sp1.Intersects(sp2) {
//common.LogWarn.Printf("collision between %d and %d\n", i, j)
penetration := math.Sub3v3v(sp1.Center, sp2.Center)
pSqrd := math.MagSqrd3v(penetration)
if pSqrd-(sp1.Radius+sp2.Radius)*(sp1.Radius+sp2.Radius) > 0 {
common.LogErr.Println("math fucked up")
}
split := math.Normalize3v(penetration)
smallestDistanceToRemoveIntersection := math.Mult3vf(split, math.Mag3v(penetration))
trans := matList[i]
trans[3] += smallestDistanceToRemoveIntersection[0]
trans[7] += smallestDistanceToRemoveIntersection[1]
trans[11] += smallestDistanceToRemoveIntersection[2]
transVec := math.Vec3{trans[3], trans[7], trans[11]}
if dist := math.DistSqrd3v3v(loc1, transVec); dist > 1 {
common.LogInfo.Println("physics movement", dist)
pm.sm.SetTransform(component.GOiD(i), trans)
}
}
}
}
}
// JsonCreate extracts creation data from a byte array of json text to pass to CreateComponent.
func (pm *PhysicsManager) JsonCreate(id component.GOiD, data []byte) error {
var obj struct {
BoundingRadius float32
}
if err := json.Unmarshal(data, &obj); err != nil {
return fmt.Errorf("failed to unmarshal physics component, error: %s", err.Error())
}
return pm.CreateComponent(id, obj.BoundingRadius)
}
// Uses extracted data from higher level component creation functions and initializes a character component based on the id passed through.
func (pm *PhysicsManager) CreateComponent(id component.GOiD, radius float32) error {
pm.resizeArrays(id)
pm.radii[id] = radius
return nil
}
// resizeArray is a helper function to resize the array of components to accomodate a new component.
// If the GOiD of the new component is larger than the size of the array, then resizeArrays will grow the array and copy data over in order to fit the new component.
func (pm *PhysicsManager) resizeArrays(index component.GOiD) {
const RESIZESTEP = 1
if cap(pm.radii)-1 < int(index) {
tmp := pm.radii
pm.radii = make([]float32, index+RESIZESTEP)
for i := range tmp {
pm.radii[i] = tmp[i]
}
}
}
// DeleteComponent implements the component.ComponentManager interface and deletes character component data from the manager.
func (pm *PhysicsManager) DeleteComponent(id component.GOiD) {
if len(pm.radii) <= int(id) {
return
}
pm.radii[id] = 0
} | component/physics/manager.go | 0.688573 | 0.41739 | manager.go | starcoder |
package collection
import (
"github.com/rkbodenner/parallel_universe/game"
)
func NewTicTacToe() *game.Game {
var setup = []*game.SetupRule{
game.NewSetupRule("Draw 3x3 grid", "Once"),
game.NewSetupRule("Choose X or O", "Each player"),
}
return game.NewGame("Tic-Tac-Toe", setup, 2, 2)
}
func NewForbiddenIsland() *game.Game {
var setup = []*game.SetupRule{
game.NewSetupRule("Create Forbidden Island", "Once"), //0
game.NewSetupRule("Place the treasures", "Once"),
game.NewSetupRule("Divide the cards", "Once"), //2
game.NewSetupRule("The island starts to sink", "Once"), //3
game.NewSetupRule("The Adventurers appear", "Once"), //4
game.NewSetupRule("Place Adventurer pawn", "Each player"),//5
game.NewSetupRule("Hand out Treasure deck cards", "Once"),//6
game.NewSetupRule("Set the water level", "Once"),
}
setup[0].Details = "Shuffle the 24 Island tiles and randomly place them colorful-side-up into a 4x4 grid, then place 2 tiles next to each of the two middle tiles on every side of the square. Leave a small gap between the tiles."
setup[1].Details = "Place the 4 treasure figurines--The Earth Stone, The Statue of the Wind, The Crystal of Fire, and The Ocean's Chalice--anywhere off to the side of the island"
setup[2].Details = "Separate the cards into three decks according to the card backs: Flood deck (blue back), Treasure deck (red), and Adventurer cards (6 cards)"
setup[3].Details = "Shuffle the Flood deck and place it face down on one side of the island, forming the Flood draw pile. Draw the top 6 cards (1 at a time) and place them face up next to the draw pile, forming the Flood discard pile. For each card drawn, flip the corresponding Island tile over to its flooded (blue & white) side."
setup[4].Details = "Shuffle the Adventurer cards and randomly deal 1 to each player. Put undealt cards and their matching pawns back in the box."
setup[5].Details = "Take the pawn matching the color of your Adventurer card and place it on the corresponding Island tile. Look for the matching pawn icon in the lower right corner of the Gates and Fools' Landing tiles. It's OK to start on a flooded tile."
setup[6].Details = "Shuffle the Treasure deck and deal 2 cards to each player. Place your cards face up in front of you. If anyone gets a Waters Rise! card, give them a replacement and shuffle Waters Rise! back into the deck. Place the Treasure deck face down by one side of the island."
setup[7].Details = "Place the Water Level slider on the left side of the Water Meter board and set it to the difficulty level of your choice"
setup[3].Dependencies = []*game.SetupRule{setup[0], setup[2]}
setup[4].Dependencies = []*game.SetupRule{setup[2]}
setup[5].Dependencies = []*game.SetupRule{setup[4]}
setup[6].Dependencies = []*game.SetupRule{setup[2]}
return game.NewGame("Forbidden Island", setup, 2, 4)
}
func NewOraEtLaboraShortMultiplayer() *game.Game {
var setup = []*game.SetupRule{
game.NewSetupRule("Choose game board for short 3-4 player game", "Once"),
game.NewSetupRule("Attach production wheel to game board", "Once"),
game.NewSetupRule("Place 7 wooden goods indicators on game board", "Once"),
game.NewSetupRule("Sort the building cards", "Once"),
game.NewSetupRule("Place the start buildings", "Once"),
game.NewSetupRule("Place the A, B, C, D buildings", "Once"),
game.NewSetupRule("Place the black stone goods indicator", "Once"),
game.NewSetupRule("Place the purple grapes goods indicator", "Once"),
game.NewSetupRule("Take a heartland landscape board", "Each player"),
game.NewSetupRule("Place moor and forest cards on landscape board", "Each player"),
game.NewSetupRule("Choose a color", "Each player"),
game.NewSetupRule("Take 1 prior and 1 lay brother of your color", "Each player"),
game.NewSetupRule("Take 8 settlement cards of your color", "Each player"),
game.NewSetupRule("Take 1 of each of the 6 starting goods", "Each player"),
game.NewSetupRule("Remove unused tiles", "Once"),
game.NewSetupRule("Sort districts and plots by cost", "Once"),
}
setup[0].Details = "The correct board will have an icon with two players, in the center on the reverse side. Place the board in the middle of the table."
setup[1].Details = "Side showing 0/2/3/4/... should face up. Orient the wheel so that the beam points to the bible symbol. You can unscrew the wheel from the board with a fingernail."
setup[1].Dependencies = []*game.SetupRule{setup[0]}
setup[2].Details = "Place onto the board where the production wheel indicates 0 (clay, coins, grain, livestock, wood, peat, joker)"
setup[2].Dependencies = []*game.SetupRule{setup[1]}
// TODO: Player number variation
setup[3].Details = "3-player game: Remove the cards with a 4 or a 3+ in the lower right corner. 4-player game: Remove the cards with a 4 in the lower right corner. Turn each card so that the chosen country variant (France or Ireland) faces up. Sort the buildings into stacks by the letter or bible symbol in the middle left of the card."
setup[4].Details = "Start buildings have a bible symbol in the middle left of the card. Place the stack anywhere all players can see them."
setup[4].Dependencies = []*game.SetupRule{setup[3]}
setup[5].Details = "Place each stack next to the matching blue A, B, C, D symbol on the edge of the game board."
setup[5].Dependencies = []*game.SetupRule{setup[1], setup[3]}
setup[6].Details = "Place it at the position indicated by the matching symbol on the edge of the game board."
setup[6].Dependencies = []*game.SetupRule{setup[1]}
// TODO: Variant
setup[7].Details = "Only if playing the France variant. Place it at the position indicated by the matching symbol on the edge of the game board."
setup[7].Dependencies = []*game.SetupRule{setup[1]}
setup[9].Details = "Place 1 moor and 2 forest. Leave the left-most two spaces empty on the upper row of the landscape board."
setup[9].Dependencies = []*game.SetupRule{setup[8]}
setup[11].Dependencies = []*game.SetupRule{setup[10]}
setup[12].Details = "Stack buildings marked A, B, C, D under the respective piles of building cards next to the board."
setup[12].Dependencies = []*game.SetupRule{setup[10]}
setup[13].Details = "Clay, coin, grain, livestock, wood, peat. Place them right-side up."
// TODO: Variant
setup[14].Details = "France variant: Remove malt/beer. Ireland variant: Remove flour/bread and grapes/wine."
setup[15].Details = "Lowest cost on top."
return game.NewGame("Ora et Labora", setup, 3, 4)
}
type Collection struct {
Games []*game.Game
}
func NewCollection() *Collection {
return &Collection{
[]*game.Game{
NewTicTacToe(),
NewForbiddenIsland(),
NewOraEtLaboraShortMultiplayer(),
},
}
} | collection/collection.go | 0.549157 | 0.446676 | collection.go | starcoder |
package nlp
import (
"strconv"
"github.com/gnames/bayes"
"github.com/gnames/gnfinder/ent/token"
"github.com/gnames/gnfinder/io/dict"
)
// BayesF implements bayes.Featurer
type BayesF struct {
name string
value string
}
// FeatureSet splits features into Uninomial, Species, Ifraspecies groups
type FeatureSet struct {
Uninomial []BayesF
Species []BayesF
InfraSp []BayesF
}
func (fs *FeatureSet) Flatten() []bayes.Featurer {
l := len(fs.Uninomial) + len(fs.Species) + len(fs.InfraSp)
res := make([]bayes.Featurer, 0, l)
res = append(res, features(fs.Uninomial)...)
res = append(res, features(fs.Species)...)
res = append(res, features(fs.InfraSp)...)
return res
}
// Name is required by bayes.Featurer
func (b BayesF) Name() bayes.FeatureName { return bayes.FeatureName(b.name) }
// Value is required by bayes.Featurer
func (b BayesF) Value() bayes.FeatureValue {
return bayes.FeatureValue(b.value)
}
// BayesFeatures creates slices of features for a token that might represent
// genus or other uninomial
func NewFeatureSet(ts []token.TokenSN) FeatureSet {
var fs FeatureSet
var u, sp, isp, rank token.TokenSN
u = ts[0]
if !u.Features().IsCapitalized {
return fs
}
if i := u.Indices().Species; i > 0 {
sp = ts[i]
}
if i := u.Indices().Infraspecies; i > 0 {
isp = ts[i]
}
if i := u.Indices().Rank; i > 0 {
rank = ts[i]
}
fs.convertFeatures(u, sp, isp, rank)
return fs
}
func (fs *FeatureSet) convertFeatures(
uni token.TokenSN,
sp token.TokenSN,
isp token.TokenSN,
rank token.TokenSN,
) {
var uniDict, spDict, ispDict string
if !uni.Features().Abbr {
uniDict = uni.Features().UninomialDict.String()
fs.Uninomial = append(fs.Uninomial,
BayesF{"uniLen", strconv.Itoa(len(uni.Cleaned()))},
BayesF{"abbr", "false"},
)
} else {
fs.Uninomial = append(fs.Uninomial, BayesF{"abbr", "true"})
}
if w3 := wordEnd(uni); !uni.Features().Abbr && w3 != "" {
fs.Uninomial = append(fs.Uninomial, BayesF{"uniEnd3", w3})
}
if uni.Indices().Species > 0 {
spDict = sp.Features().SpeciesDict.String()
fs.Species = append(fs.Species,
BayesF{"spLen", strconv.Itoa(len(sp.Cleaned()))},
)
if uni.Features().GenSpGreyDict > 0 {
uniDict = dict.GreyGenusSp.String()
spDict = dict.GreyGenusSp.String()
}
if sp.Features().HasDash {
fs.Species = append(fs.Species, BayesF{"hasDash", "true"})
}
if w3 := wordEnd(sp); w3 != "" {
fs.Species = append(fs.Species, BayesF{"spEnd3", w3})
}
}
if uni.Indices().Rank > 0 {
fs.InfraSp = []BayesF{
{"ispRank", "true"},
}
}
if uni.Indices().Infraspecies > 0 {
ispDict = isp.Features().SpeciesDict.String()
fs.InfraSp = append(fs.InfraSp,
BayesF{"ispLen", strconv.Itoa(len(isp.Cleaned()))},
)
if uni.Features().GenSpGreyDict > 1 {
ispDict = dict.GreyGenusSp.String()
}
if isp.Features().HasDash {
fs.InfraSp = append(fs.InfraSp, BayesF{"hasDash", "true"})
}
if w3 := wordEnd(isp); w3 != "" {
fs.InfraSp = append(fs.InfraSp, BayesF{"ispEnd3", w3})
}
}
if uniDict != "" {
fs.Uninomial = append(fs.Uninomial, BayesF{"uniDict", uniDict})
}
if spDict != "" {
fs.Species = append(fs.Species, BayesF{"spDict", spDict})
}
if ispDict != "" {
fs.InfraSp = append(fs.InfraSp, BayesF{"ispDict", ispDict})
}
}
func wordEnd(t token.TokenSN) string {
name := []rune(t.Cleaned())
l := len(name)
if l < 4 {
return ""
}
w3 := string(name[l-3 : l])
return w3
} | ent/nlp/features.go | 0.566258 | 0.474022 | features.go | starcoder |
package main
import (
"errors"
"fmt"
"io"
"os"
"time"
)
// In this video, we'll implement a non-concurrent, non-linear version of the barycenter finder.
// It won't be particularly fast, but it will get the job done.
// ST
// First, though, we need some data to operate on, so we'll write a command line utility
// to generate random bodies. After that's done, we can write the actual barycenter program.
// ST
// Pull up your editor and let's get coding!
// ST (editor - genBodies)
// Once back here:
// Now it's time for the actual barycenter finder.
// The first thing to do is to create the struct we'll use to represent a body.
// We'll call it a MassPoint. It's a point in 3-space plus mass information.
// The MassPoint is the primary datastructure we'll pass around through the program.
type MassPoint struct {
x, y, z, mass float64
}
// We can define a function that just adds them together.
// It'll add each coordinate, and the masses.
func addMassPoints(a MassPoint, b MassPoint) MassPoint {
// We just return a new MassPoint where we've added in each coordinate
return MassPoint{
a.x + b.x,
a.y + b.y,
a.z + b.z,
a.mass + b.mass,
}
}
// Using that function, we can create a function that averages them.
func avgMassPoints(a MassPoint, b MassPoint) MassPoint {
// All we have to do is add them together and divide by two.
sum := addMassPoints(a, b)
// So we divide by two in each coordinate
return MassPoint{
sum.x / 2,
sum.y / 2,
sum.z / 2,
// But not in the mass
sum.mass,
}
}
// Then, we need a function that maps them to a different point in space by mass, as we discussed.
func toWeightedSubspace(a MassPoint) MassPoint {
return MassPoint{
a.x * a.mass,
a.y * a.mass,
a.z * a.mass,
a.mass,
}
}
// And we need a function that takes them back.
func fromWeightedSubspace(a MassPoint) MassPoint {
return MassPoint{
a.x / a.mass,
a.y / a.mass,
a.z / a.mass,
a.mass,
}
}
// Finally, we'll write a function which takes a pair of mass points and returns the
// weighted average.
func avgMassPointsWeighted(a MassPoint, b MassPoint) MassPoint {
// First we calculate the weighted version of both mass points
aWeighted := toWeightedSubspace(a)
bWeighted := toWeightedSubspace(b)
return fromWeightedSubspace(avgMassPoints(aWeighted, bWeighted))
}
// Now, on to the actual application code. First, we'll define two useful helper functions.
// We'll create a function that just handles errors by aborting.
func handle(err error) {
if err != nil {
panic(err)
}
}
// And another that will close a file, so we can defer that operation.
func closeFile(fi *os.File) {
err := fi.Close()
handle(err)
}
// Now comes the actual bulk of our program, in the main function.
func main() {
// Check arguments. We need exactly two (the executable name and one user-provided argument).
if len(os.Args) != 2 {
// If there are too many or not enough, abort.
fmt.Println("Incorrect number of arguments!")
os.Exit(1)
}
// Then, we'll open the input file with os.Open
file, err := os.Open(os.Args[1])
// Handle a possible error using our error handler
handle(err)
// And finally defer the closing of the file,
// so even if the program aborts the file will still get closed.
defer closeFile(file)
// Now we need initial buffer for the MassPoints, which we'll load from the file.
var masspoints []MassPoint
// We'll time how long it takes to load them, just for comparison.
startLoading := time.Now()
// Let's make an infinite loop for loading, and then break out when there are no more points
// to read.
for {
// We'll create a variable to hold the new point
var newMassPoint MassPoint
// Then we'll use fmt.Fscanf to load a single line from the file
_, err = fmt.Fscanf(file, "%f:%f:%f:%f", &newMassPoint.x, &newMassPoint.y, &newMassPoint.z, &newMassPoint.mass)
// If we got an EOF error, there are no more points to load
if err == io.EOF {
break
// On other errors, we can just skip the line.
} else if err != nil {
continue
}
// Finally, we'll use append() to add the point into the list of points.
masspoints = append(masspoints, newMassPoint)
}
// Now we'll report how many points we loaded
fmt.Printf("Loaded %d values from file in %s.\n", len(masspoints), time.Since(startLoading))
// And we should check that there are actually enough values.
if len(masspoints) <= 1 {
// If there aren't enough, we'll create an error and pass it to our error handler.
handle(errors.New("Insufficient number of values; there must be at least one "))
}
// We also want to time the calculation itself, so we'll start a timer.
startCalculation := time.Now()
// Now, we'll make a loop. It'll run until there's exactly one point left.
for len(masspoints) != 1 {
// Each loop will need a new array of MassPoints
var newMasspoints []MassPoint
// We loop over the current list of bodies by twos
for i := 0; i < len(masspoints)-1; i += 2 {
// Adding the results of the averaging to the new array of mass points
newMasspoints = append(newMasspoints, avgMassPointsWeighted(masspoints[i], masspoints[i+1]))
}
// Then we check to make sure we didn't leave off one
if len(masspoints)%2 != 0 {
newMasspoints = append(newMasspoints, masspoints[len(masspoints)-1])
}
// Finally, we need to switch out the old array with the new one
masspoints = newMasspoints
}
// Once the loop is done we need the one remaining virtual body
systemAverage := masspoints[0]
// And then we'll print out the result in a pretty way.
fmt.Printf("System barycenter is at (%f, %f, %f) and the system's mass is %f.\n",
systemAverage.x,
systemAverage.y,
systemAverage.z,
systemAverage.mass)
// Finally, we just want to print out the time the calculation has taken.
fmt.Printf("Calculation took %s.\n", time.Since(startCalculation))
} | linearBarycenter/main.go | 0.675122 | 0.577793 | main.go | starcoder |
package yamlpath
import (
"fmt"
"regexp"
"strconv"
"strings"
"gopkg.in/yaml.v3"
)
type filter func(node, root *yaml.Node) bool
func newFilter(n *filterNode) filter {
if n == nil {
return never
}
switch n.lexeme.typ {
case lexemeFilterAt, lexemeRoot:
path := pathFilterScanner(n)
return func(node, root *yaml.Node) bool {
return len(path(node, root)) > 0
}
case lexemeFilterEquality, lexemeFilterInequality,
lexemeFilterGreaterThan, lexemeFilterGreaterThanOrEqual,
lexemeFilterLessThan, lexemeFilterLessThanOrEqual:
return comparisonFilter(n)
case lexemeFilterMatchesRegularExpression:
return matchRegularExpression(n)
case lexemeFilterNot:
f := newFilter(n.children[0])
return func(node, root *yaml.Node) bool {
return !f(node, root)
}
case lexemeFilterOr:
f1 := newFilter(n.children[0])
f2 := newFilter(n.children[1])
return func(node, root *yaml.Node) bool {
return f1(node, root) || f2(node, root)
}
case lexemeFilterAnd:
f1 := newFilter(n.children[0])
f2 := newFilter(n.children[1])
return func(node, root *yaml.Node) bool {
return f1(node, root) && f2(node, root)
}
case lexemeFilterBooleanLiteral:
b, err := strconv.ParseBool(n.lexeme.val)
if err != nil {
panic(err) // should not happen
}
return func(node, root *yaml.Node) bool {
return b
}
default:
return never
}
}
func never(node, root *yaml.Node) bool {
return false
}
func comparisonFilter(n *filterNode) filter {
compare := func(b bool) bool {
var c comparison
if b {
c = compareEqual
} else {
c = compareIncomparable
}
return n.lexeme.comparator()(c)
}
return nodeToFilter(n, func(l, r typedValue) bool {
if !l.typ.compatibleWith(r.typ) {
return compare(false)
}
switch l.typ {
case booleanValueType:
return compare(equalBooleans(l.val, r.val))
case nullValueType:
return compare(equalNulls(l.val, r.val))
default:
return n.lexeme.comparator()(compareNodeValues(l, r))
}
})
}
var x, y typedValue
func init() {
x = typedValue{stringValueType, "x"}
y = typedValue{stringValueType, "y"}
}
func nodeToFilter(n *filterNode, accept func(typedValue, typedValue) bool) filter {
lhsPath := newFilterScanner(n.children[0])
rhsPath := newFilterScanner(n.children[1])
return func(node, root *yaml.Node) (result bool) {
// perform a set-wise comparison of the values in each path
match := false
for _, l := range lhsPath(node, root) {
for _, r := range rhsPath(node, root) {
if !accept(l, r) {
return false
}
match = true
}
}
return match
}
}
func equalBooleans(l, r string) bool {
// Note: the YAML parser and our JSONPath lexer both rule out invalid boolean literals such as tRue.
return strings.EqualFold(l, r)
}
func equalNulls(l, r string) bool {
// Note: the YAML parser and our JSONPath lexer both rule out invalid null literals such as nUll.
return true
}
// filterScanner is a function that returns a slice of typed values from either a filter literal or a path expression
// which refers to either the current node or the root node. It is used in filter comparisons.
type filterScanner func(node, root *yaml.Node) []typedValue
func emptyScanner(*yaml.Node, *yaml.Node) []typedValue {
return []typedValue{}
}
func newFilterScanner(n *filterNode) filterScanner {
switch {
case n == nil:
return emptyScanner
case n.isItemFilter():
return pathFilterScanner(n)
case n.isLiteral():
return literalFilterScanner(n)
default:
return emptyScanner
}
}
func pathFilterScanner(n *filterNode) filterScanner {
var at bool
switch n.lexeme.typ {
case lexemeFilterAt:
at = true
case lexemeRoot:
at = false
default:
panic("false precondition")
}
subpath := ""
for _, lexeme := range n.subpath {
subpath += lexeme.val
}
path, err := NewPath(subpath)
if err != nil {
return emptyScanner
}
return func(node, root *yaml.Node) []typedValue {
if at {
return values(path.Find(node))
}
return values(path.Find(root))
}
}
type valueType int
const (
unknownValueType valueType = iota
stringValueType
intValueType
floatValueType
booleanValueType
nullValueType
regularExpressionValueType
)
func (vt valueType) isNumeric() bool {
return vt == intValueType || vt == floatValueType
}
func (vt valueType) compatibleWith(vt2 valueType) bool {
return vt.isNumeric() && vt2.isNumeric() || vt == vt2 || vt == stringValueType && vt2 == regularExpressionValueType
}
type typedValue struct {
typ valueType
val string
}
const (
nullTag = "!!null"
boolTag = "!!bool"
strTag = "!!str"
intTag = "!!int"
floatTag = "!!float"
)
func typedValueOfNode(node *yaml.Node) typedValue {
var t valueType = unknownValueType
if node.Kind == yaml.ScalarNode {
switch node.ShortTag() {
case nullTag:
t = nullValueType
case boolTag:
t = booleanValueType
case strTag:
t = stringValueType
case intTag:
t = intValueType
case floatTag:
t = floatValueType
}
}
return typedValue{
typ: t,
val: node.Value,
}
}
func newTypedValue(t valueType, v string) typedValue {
return typedValue{
typ: t,
val: v,
}
}
func typedValueOfString(s string) typedValue {
return newTypedValue(stringValueType, s)
}
func typedValueOfInt(i string) typedValue {
return newTypedValue(intValueType, i)
}
func typedValueOfFloat(f string) typedValue {
return newTypedValue(floatValueType, f)
}
func values(nodes []*yaml.Node, err error) []typedValue {
if err != nil {
panic(fmt.Errorf("unexpected error: %v", err)) // should never happen
}
v := []typedValue{}
for _, n := range nodes {
v = append(v, typedValueOfNode(n))
}
return v
}
func literalFilterScanner(n *filterNode) filterScanner {
v := n.lexeme.literalValue()
return func(node, root *yaml.Node) []typedValue {
return []typedValue{v}
}
}
func matchRegularExpression(parseTree *filterNode) filter {
return nodeToFilter(parseTree, stringMatchesRegularExpression)
}
func stringMatchesRegularExpression(s, expr typedValue) bool {
if s.typ != stringValueType || expr.typ != regularExpressionValueType {
panic("unexpected types") // should never happen
}
re, _ := regexp.Compile(expr.val) // regex already compiled during lexing
return re.Match([]byte(s.val))
} | pkg/yamlpath/filter.go | 0.68437 | 0.470858 | filter.go | starcoder |
package unit
import (
"github.com/brettbuddin/shaden/dsp"
)
func newReverb(io *IO, c Config) (*Unit, error) {
var (
aTravelFreq = dsp.Frequency(0.5, c.SampleRate).Float64()
bTravelFreq = dsp.Frequency(0.3, c.SampleRate).Float64()
r = &reverb{
a: io.NewIn("a", dsp.Float64(0)),
b: io.NewIn("b", dsp.Float64(0)),
defuse: io.NewIn("defuse", dsp.Float64(0.625)),
mix: io.NewIn("mix", dsp.Float64(0)),
precutoff: io.NewIn("cutoff-pre", dsp.Frequency(300, c.SampleRate)),
postcutoff: io.NewIn("cutoff-post", dsp.Frequency(500, c.SampleRate)),
decay: io.NewIn("decay", dsp.Float64(0.7)),
size: io.NewIn("size", dsp.Float64(0.1)),
shiftSemitones: io.NewIn("shift-semitones", dsp.Float64(0)),
aOut: io.NewOut("a"),
bOut: io.NewOut("b"),
ap: make([]*dsp.AllPass, 4),
aAP: make([]*dsp.AllPass, 2),
bAP: make([]*dsp.AllPass, 2),
aFilter: &dsp.SVFilter{Poles: 1},
aPostFilter: &dsp.SVFilter{Poles: 2},
bFilter: &dsp.SVFilter{Poles: 1},
bPostFilter: &dsp.SVFilter{Poles: 2},
blockA: &dsp.DCBlock{},
blockB: &dsp.DCBlock{},
shiftA: dsp.NewPitchShift(),
shiftB: dsp.NewPitchShift(),
aTravelFreq: aTravelFreq,
bTravelFreq: bTravelFreq,
}
)
r.ap[0] = dsp.NewAllPass(1170)
r.ap[1] = dsp.NewAllPass(1510)
r.ap[2] = dsp.NewAllPass(2370)
r.ap[3] = dsp.NewAllPass(3510)
r.aPreDL = dsp.NewDelayLine(3541)
r.aAP[0] = dsp.NewAllPass(21820)
r.aAP[1] = dsp.NewAllPass(26900)
r.aPostDL = dsp.NewDelayLine(4453)
r.bPreDL = dsp.NewDelayLine(3541)
r.bAP[0] = dsp.NewAllPass(21820)
r.bAP[1] = dsp.NewAllPass(26900)
r.bPostDL = dsp.NewDelayLine(4353)
return NewUnit(io, r), nil
}
type reverb struct {
a, b, defuse, mix, precutoff, postcutoff, decay, size, shiftSemitones *In
aOut, bOut *Out
aPhase, bPhase float64
aFilter, aPostFilter *dsp.SVFilter
bFilter, bPostFilter *dsp.SVFilter
aPreDL, bPreDL *dsp.DelayLine
aPostDL, bPostDL *dsp.DelayLine
ap, aAP, bAP []*dsp.AllPass
aLast, bLast float64
blockA, blockB *dsp.DCBlock
shiftA, shiftB *dsp.PitchShift
aTravelFreq, bTravelFreq float64
}
func decayClamp(v float64) float64 { return dsp.Clamp(v, 0, 0.99) }
func defuseClamp(v float64) float64 { return dsp.Clamp(v, 0.4, 0.625) }
func pitchShiftClamp(v float64) float64 { return dsp.Clamp(v, -12, 12) }
func sizeClamp(v float64) float64 { return dsp.Clamp(v, 0.01, 1) }
func (r *reverb) ProcessSample(i int) {
var (
mix = r.mix.ReadSlow(i, ident)
decay = r.decay.ReadSlow(i, decayClamp)
defuse = r.defuse.ReadSlow(i, defuseClamp)
precutoff = r.precutoff.ReadSlow(i, ident)
postcutoff = r.postcutoff.ReadSlow(i, ident)
size = r.size.ReadSlow(i, sizeClamp)
shiftSemitones = r.shiftSemitones.ReadSlow(i, pitchShiftClamp)
)
a, b := r.a.Read(i), r.b.Read(i)
d := r.ap[0].TickRelative(a+b, defuse, size)
d = r.ap[1].TickRelative(d, defuse, size)
d = r.ap[2].TickRelative(d, defuse, size)
d = r.ap[3].TickRelative(d, defuse, size)
r.aFilter.Cutoff = precutoff
r.bFilter.Cutoff = precutoff
r.aPostFilter.Cutoff = postcutoff
r.bPostFilter.Cutoff = postcutoff
aSig := d + (r.shiftA.TickSemitones(r.bLast, shiftSemitones) * decay)
aTravel := dsp.Sin(r.aPhase)*0.01 + 0.9
advanceLFO(&r.aPhase, r.aTravelFreq)
aSig = r.aPreDL.TickRelative(aSig, aTravel*size)
_, aSig, _ = r.aFilter.Tick(aSig)
aSig = r.aAP[0].TickRelative(aSig, defuse, size)
aSig = r.aAP[1].TickRelative(aSig, defuse, size)
_, aSig, _ = r.aPostFilter.Tick(aSig)
aOut := r.aPostDL.TickRelative(aSig, decay)
r.aLast = aOut
bSig := d + (r.shiftB.TickSemitones(r.aLast, shiftSemitones) * decay)
bTravel := dsp.Sin(r.bPhase)*0.01 + 0.9
advanceLFO(&r.bPhase, r.bTravelFreq)
bSig = r.bPreDL.TickRelative(bSig, bTravel*size)
_, bSig, _ = r.bFilter.Tick(bSig)
bSig = r.bAP[0].TickRelative(bSig, defuse, size)
bSig = r.bAP[1].TickRelative(bSig, defuse, size)
_, bSig, _ = r.bPostFilter.Tick(bSig)
bOut := r.bPostDL.TickRelative(bSig, decay)
r.bLast = bOut
r.aOut.Write(i, r.blockA.Tick(dsp.Mix(mix, a, aOut)))
r.bOut.Write(i, r.blockB.Tick(dsp.Mix(mix, b, bOut)))
}
func advanceLFO(phase *float64, freq float64) {
*phase += (freq * twoPi)
if *phase >= twoPi {
*phase -= twoPi
}
} | unit/reverb.go | 0.617051 | 0.536374 | reverb.go | starcoder |
package bls12381
import (
"crypto/subtle"
"fmt"
"github.com/cloudflare/circl/ecc/bls12381/ff"
)
// G2Size is the length in bytes of an element in G2 in uncompressed form..
const G2Size = 2 * ff.Fp2Size
// G2SizeCompressed is the length in bytes of an element in G2 in compressed form.
const G2SizeCompressed = ff.Fp2Size
// G2 is a point in the twist of the BLS12 curve over Fp2.
type G2 struct{ x, y, z ff.Fp2 }
func (g G2) String() string { return fmt.Sprintf("x: %v\ny: %v\nz: %v", g.x, g.y, g.z) }
// Bytes serializes a G2 element in uncompressed form.
func (g G2) Bytes() []byte { return g.encodeBytes(false) }
// Bytes serializes a G2 element in compressed form.
func (g G2) BytesCompressed() []byte { return g.encodeBytes(true) }
// SetBytes sets g to the value in bytes, and returns a non-nil error if not in G2.
func (g *G2) SetBytes(b []byte) error {
if len(b) < G2SizeCompressed {
return errInputLength
}
isCompressed := int((b[0] >> 7) & 0x1)
isInfinity := int((b[0] >> 6) & 0x1)
isBigYCoord := int((b[0] >> 5) & 0x1)
if isInfinity == 1 {
l := G2Size
if isCompressed == 1 {
l = G2SizeCompressed
}
zeros := make([]byte, l-1)
if (b[0]&0x1F) != 0 || subtle.ConstantTimeCompare(b[1:], zeros) != 1 {
return errEncoding
}
g.SetIdentity()
return nil
}
x := (&[ff.Fp2Size]byte{})[:]
copy(x, b)
x[0] &= 0x1F
if err := g.x.UnmarshalBinary(x); err != nil {
return err
}
if isCompressed == 1 {
x3b := &ff.Fp2{}
x3b.Sqr(&g.x)
x3b.Mul(x3b, &g.x)
x3b.Add(x3b, &g2Params.b)
if g.y.Sqrt(x3b) == 0 {
return errEncoding
}
if g.y.IsNegative() != isBigYCoord {
g.y.Neg()
}
} else {
if len(b) < G2Size {
return errInputLength
}
if err := g.y.UnmarshalBinary(b[ff.Fp2Size:G2Size]); err != nil {
return err
}
}
g.z.SetOne()
if !g.IsOnG2() {
return errEncoding
}
return nil
}
func (g G2) encodeBytes(compressed bool) []byte {
g.toAffine()
var isCompressed, isInfinity, isBigYCoord byte
if compressed {
isCompressed = 1
}
if g.z.IsZero() == 1 {
isInfinity = 1
}
if isCompressed == 1 && isInfinity == 0 {
isBigYCoord = byte(g.y.IsNegative())
}
bytes, _ := g.x.MarshalBinary()
if isCompressed == 0 {
yBytes, _ := g.y.MarshalBinary()
bytes = append(bytes, yBytes...)
}
if isInfinity == 1 {
l := len(bytes)
for i := 0; i < l; i++ {
bytes[i] = 0
}
}
bytes[0] = bytes[0]&0x1F | headerEncoding(isCompressed, isInfinity, isBigYCoord)
return bytes
}
// Neg inverts g.
func (g *G2) Neg() { g.y.Neg() }
// SetIdentity assigns g to the identity element.
func (g *G2) SetIdentity() { g.x = ff.Fp2{}; g.y.SetOne(); g.z = ff.Fp2{} }
// isValidProjective returns true if the point is not a projective point.
func (g *G2) isValidProjective() bool { return (g.x.IsZero() & g.y.IsZero() & g.z.IsZero()) != 1 }
// IsOnG2 returns true if the point is in the group G2.
func (g *G2) IsOnG2() bool { return g.isValidProjective() && g.isOnCurve() && g.isRTorsion() }
// IsIdentity return true if the point is the identity of G2.
func (g *G2) IsIdentity() bool { return g.isValidProjective() && (g.z.IsZero() == 1) }
// cmov sets g to P if b == 1
func (g *G2) cmov(P *G2, b int) {
(&g.x).CMov(&g.x, &P.x, b)
(&g.y).CMov(&g.y, &P.y, b)
(&g.z).CMov(&g.z, &P.z, b)
}
// isRTorsion returns true if point is in the r-torsion subgroup.
func (g *G2) isRTorsion() bool {
// Bowe, "Faster Subgroup Checks for BLS12-381" (https://eprint.iacr.org/2019/814)
Q := *g
Q.psi() // Q = \psi(g)
Q.scalarMult(g2PsiCoeff.minusZ[:], &Q) // Q = -[z]\psi(g)
Q.Add(&Q, g) // Q = -[z]\psi(g)+g
Q.psi() // Q = -[z]\psi^2(g)+\psi(g)
Q.psi() // Q = -[z]\psi^3(g)+\psi^2(g)
return Q.IsEqual(g) // Equivalent to verification equation in paper
}
func (g *G2) psi() {
g.x.Frob(&g.x)
g.y.Frob(&g.y)
g.z.Frob(&g.z)
g.x.Mul(&g2PsiCoeff.alpha, &g.x)
g.y.Mul(&g2PsiCoeff.beta, &g.y)
}
// Double updates g = 2g.
func (g *G2) Double() { doubleAndLine(g, nil) }
// Add updates g=P+Q.
func (g *G2) Add(P, Q *G2) { addAndLine(g, P, Q, nil) }
// ScalarMult calculates g = kP.
func (g *G2) ScalarMult(k *Scalar, P *G2) { b, _ := k.MarshalBinary(); g.scalarMult(b, P) }
// scalarMult calculates g = kP, where k is the scalar in big-endian order.
func (g *G2) scalarMult(k []byte, P *G2) {
var Q G2
Q.SetIdentity()
T := &G2{}
var mults [16]G2
mults[0].SetIdentity()
mults[1] = *P
for i := 1; i < 8; i++ {
mults[2*i] = mults[i]
mults[2*i].Double()
mults[2*i+1].Add(&mults[2*i], P)
}
N := 8 * len(k)
for i := 0; i < N; i += 4 {
Q.Double()
Q.Double()
Q.Double()
Q.Double()
idx := 0xf & (k[i/8] >> uint(4-i%8))
for j := 0; j < 16; j++ {
T.cmov(&mults[j], subtle.ConstantTimeByteEq(idx, uint8(j)))
}
Q.Add(&Q, T)
}
*g = Q
}
// IsEqual returns true if g and p are equivalent.
func (g *G2) IsEqual(p *G2) bool {
var lx, rx, ly, ry ff.Fp2
lx.Mul(&g.x, &p.z) // lx = x1*z2
rx.Mul(&p.x, &g.z) // rx = x2*z1
lx.Sub(&lx, &rx) // lx = lx-rx
ly.Mul(&g.y, &p.z) // ly = y1*z2
ry.Mul(&p.y, &g.z) // ry = y2*z1
ly.Sub(&ly, &ry) // ly = ly-ry
return lx.IsZero() == 1 && ly.IsZero() == 1
}
// isOnCurve returns true if g is a valid point on the curve.
func (g *G2) isOnCurve() bool {
var x3, z3, y2 ff.Fp2
y2.Sqr(&g.y) // y2 = y^2
y2.Mul(&y2, &g.z) // y2 = y^2*z
x3.Sqr(&g.x) // x3 = x^2
x3.Mul(&x3, &g.x) // x3 = x^3
z3.Sqr(&g.z) // z3 = z^2
z3.Mul(&z3, &g.z) // z3 = z^3
z3.Mul(&z3, &g2Params.b) // z3 = (4+4i)*z^3
x3.Add(&x3, &z3) // x3 = x^3 + (4+4i)*z^3
y2.Sub(&y2, &x3) // y2 = y^2*z - (x^3 + (4+4i)*z^3)
return y2.IsZero() == 1
}
// toAffine updates g with its affine representation.
func (g *G2) toAffine() {
if g.z.IsZero() != 1 {
var invZ ff.Fp2
invZ.Inv(&g.z)
g.x.Mul(&g.x, &invZ)
g.y.Mul(&g.y, &invZ)
g.z.SetOne()
}
}
// G2Generator returns the generator point of G2.
func G2Generator() *G2 {
var G G2
G.x = g2Params.genX
G.y = g2Params.genY
G.z.SetOne()
return &G
} | ecc/bls12381/g2.go | 0.683736 | 0.435301 | g2.go | starcoder |
package domain
type Block struct {
X, Y int
Colour BlockColour
}
type Board struct {
cells [][]*Block
}
func NewBoard() Board {
b := Board{}
// fill with blank cells
b.cells = make([][]*Block, BoardWidth)
for x := 0; x < BoardWidth; x++ {
b.cells[x] = make([]*Block, BoardHeight)
for y := 0; y < BoardHeight; y++ {
b.cells[x][y] = &Block{X: x, Y: y, Colour: Empty}
}
}
return b
}
func (b *Board) reset() {
// add grey surrounding blocks
y := 0
for x := 0; x < BoardWidth; x++ {
b.cells[x][y].Colour = Grey
}
y = BoardHeight - 1
for x := 0; x < BoardWidth; x++ {
b.cells[x][y].Colour = Grey
}
x := 0
for y := 0; y < BoardHeight; y++ {
b.cells[x][y].Colour = Grey
}
x = BoardWidth - 1
for y := 0; y < BoardHeight; y++ {
b.cells[x][y].Colour = Grey
}
}
func (b *Board) canPlayerFitAt(player *Player, x, y int) bool {
/*
Check if player's shape can move down one row
without colliding into any other blocks
*/
if player.shape == nil {
return false
}
if x < 0 || x > BoardWidth-1 {
return false // out of bounds
}
if y < 0 || y > BoardHeight-1 {
return false // out of bounds
}
blocks := player.shape.GetBlocks()
for _, block := range blocks {
// check board is empty for all player blocks
blockX := x + block.X
blockY := y + block.Y
if b.cells[blockX][blockY].Colour != Empty {
return false
}
}
return true
}
func (b *Board) addShapeToBoard(player *Player) {
/*
Shape has collided so add to the permanent board
*/
if player == nil {
return
}
blocks := player.shape.GetBlocks()
if blocks == nil {
return
}
for _, copiedBlock := range blocks {
blockX := player.X + copiedBlock.X
blockY := player.Y + copiedBlock.Y
copiedBlock.X = blockX
copiedBlock.Y = blockY
b.cells[blockX][blockY].Colour = copiedBlock.Colour
}
}
func (b *Board) checkCompleteRows() int {
/*
Check if there are any complete rows
*/
fullRows := make(map[int]bool)
// remember to ignore first and last grey rows
boardWidth := len(b.cells)
boardHeight := len(b.cells[0])
for y := 1; y < (boardHeight - 1); y++ {
rowFull := true
for x := 1; x < (boardWidth - 1); x++ {
if b.cells[x][y].Colour == Empty {
rowFull = false
continue
}
}
if rowFull {
// add full row to list
fullRows[y] = true
}
}
if len(fullRows) > 0 {
b.destroyRows(fullRows)
}
return len(fullRows)
}
func (b *Board) destroyRows(rows map[int]bool) {
/*
This method destroys ALL rows passed in the list
then readjusts the board to account for the destroyed rows
Given a list of rows to destroy
The rows must be removed and the remaining rows adjusted
and the the board must be refilled
*/
boardHeight := len(b.cells)
for y := boardHeight - 1; y >= 1; y-- {
if _, ok := rows[y]; ok {
//this is a row to destroy
//move all rows above it down 1 row
//self.move_row_down(y
b.moveRowDown(y)
}
}
}
func (b *Board) moveRowDown(lastRow int) {
// we don't "really" move the row down, we just copy the colour of the blocks to the row below
boardWidth := len(b.cells)
boardHeight := len(b.cells[0])
firstRow := boardHeight - 2
// new blank row at bottom
for x := 1; x < boardWidth-1; x++ {
b.cells[x][lastRow].Colour = Empty
}
for y := lastRow; y < firstRow; y++ {
for x := 1; x < boardWidth-1; x++ {
// move cell from row above
b.cells[x][y].Colour = b.cells[x][y+1].Colour
}
}
// new blank row at top
for x := 1; x < boardWidth-1; x++ {
b.cells[x][firstRow].Colour = Empty
}
} | domain/board.go | 0.643441 | 0.488466 | board.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.