code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package cc
import (
"errors"
"fmt"
"strings"
)
/*
Combines multiple errors. Returned by `cc.Conc.All`.
While `cc.Errs` does implement the `error` interface, you should never cast it
to `error`. Instead, call the method `cc.Errs.Err`, which will correctly return
a nil interface when all errors are nil.
*/
type Errs []error
// Implement the `error` interface, combining the messages of all non-nil errors.
func (self Errs) Error() string {
switch self.CountNonNil() {
case 0:
return ``
case 1:
return self.First().Error()
default:
return self.format()
}
}
// Implement a hidden interface in the "errors" package. Alias for `cc.Errs.First`.
func (self Errs) Unwrap() error { return self.First() }
/*
Implement a hidden interface in the "errors" package. Tries `errors.Is` on every
non-nil error until one succeeds.
*/
func (self Errs) Is(err error) bool {
for _, val := range self {
if val != nil && errors.Is(val, err) {
return true
}
}
return false
}
/*
Implement a hidden interface in the "errors" package. Tries `errors.As` on every
non-nil error until one succeeds.
*/
func (self Errs) As(out interface{}) bool {
for _, val := range self {
if val != nil && errors.As(val, out) {
return true
}
}
return false
}
/*
If all errors are nil, returns nil. If there's exactly one non-nil error,
returns it as-is. Otherwise, returns self. This is the only correct way to
convert `cc.Errs` to `error`.
*/
func (self Errs) Err() error {
switch self.CountNonNil() {
case 0:
return nil
case 1:
return self.First()
default:
return self
}
}
// True if at least one error is non-nil.
func (self Errs) HasSome() bool {
return self.CountNonNil() > 0
}
// Returns the amount of errors that satisfy the given function.
func (self Errs) Count(fun func(error) bool) (count int) {
if fun != nil {
for _, val := range self {
if fun(val) {
count++
}
}
}
return
}
// Returns the amount of nil errors.
func (self Errs) CountNil() int {
return self.Count(isErrNil)
}
// Returns the amount of non-nil errors.
func (self Errs) CountNonNil() int {
return self.Count(isErrNonNil)
}
// Finds the first error that satisfies the given test.
func (self Errs) Find(fun func(error) bool) error {
if fun == nil {
return nil
}
for _, val := range self {
if val != nil && fun(val) {
return val
}
}
return nil
}
// Returns the first non-nil error.
func (self Errs) First() error {
return self.Find(isErrNonNil)
}
func (self Errs) format() string {
var buf strings.Builder
buf.WriteString(`[cc] multiple errors`)
for _, val := range self {
if val == nil {
continue
}
buf.WriteString(`; `)
buf.WriteString(val.Error())
}
return buf.String()
}
func rec(ptr *error) {
err := toErr(recover())
if err != nil {
*ptr = err
}
}
func toErr(val interface{}) error {
if val == nil {
return nil
}
err, _ := val.(error)
if err != nil {
return err
}
return nonErr{val}
}
type nonErr [1]interface{}
func (self nonErr) Error() string {
if self[0] != nil {
return fmt.Sprint(self[0])
}
return ``
}
func isErrNil(err error) bool { return err == nil }
func isErrNonNil(err error) bool { return err != nil } | cc_err.go | 0.801819 | 0.421671 | cc_err.go | starcoder |
package iso20022
// Set of characteristics that apply to the debit side of the payment transactions included in the credit transfer initiation.
type PaymentInstructionInformation3 struct {
// Unique identification, as assigned by a sending party, to unambiguously identify the payment information group within the message.
PaymentInformationIdentification *Max35Text `xml:"PmtInfId"`
// Specifies the means of payment that will be used to move the amount of money.
PaymentMethod *PaymentMethod3Code `xml:"PmtMtd"`
// Identifies whether a single entry per individual transaction or a batch entry for the sum of the amounts of all transactions within the group of a message is requested.
// Usage: Batch booking is used to request and not order a possible batch booking.
BatchBooking *BatchBookingIndicator `xml:"BtchBookg,omitempty"`
// Number of individual transactions contained in the paymnet information group.
NumberOfTransactions *Max15NumericText `xml:"NbOfTxs,omitempty"`
// Total of all individual amounts included in the group, irrespective of currencies.
ControlSum *DecimalNumber `xml:"CtrlSum,omitempty"`
// Set of elements used to further specify the type of transaction.
PaymentTypeInformation *PaymentTypeInformation19 `xml:"PmtTpInf,omitempty"`
// Date at which the initiating party requests the clearing agent to process the payment.
// Usage: This is the date on which the debtor's account is to be debited. If payment by cheque, the date when the cheque must be generated by the bank.
RequestedExecutionDate *ISODate `xml:"ReqdExctnDt"`
// Date used for the correction of the value date of a cash pool movement that has been posted with a different value date.
PoolingAdjustmentDate *ISODate `xml:"PoolgAdjstmntDt,omitempty"`
// Party that owes an amount of money to the (ultimate) creditor.
Debtor *PartyIdentification32 `xml:"Dbtr"`
// Unambiguous identification of the account of the debtor to which a debit entry will be made as a result of the transaction.
DebtorAccount *CashAccount16 `xml:"DbtrAcct"`
// Financial institution servicing an account for the debtor.
DebtorAgent *BranchAndFinancialInstitutionIdentification4 `xml:"DbtrAgt"`
// Unambiguous identification of the account of the debtor agent at its servicing agent in the payment chain.
DebtorAgentAccount *CashAccount16 `xml:"DbtrAgtAcct,omitempty"`
// Ultimate party that owes an amount of money to the (ultimate) creditor.
UltimateDebtor *PartyIdentification32 `xml:"UltmtDbtr,omitempty"`
// Specifies which party/parties will bear the charges associated with the processing of the payment transaction.
ChargeBearer *ChargeBearerType1Code `xml:"ChrgBr,omitempty"`
// Account used to process charges associated with a transaction.
//
// Usage: Charges account should be used when charges have to be booked to an account different from the account identified in debtor's account.
ChargesAccount *CashAccount16 `xml:"ChrgsAcct,omitempty"`
// Agent that services a charges account.
//
// Usage: Charges account agent should only be used when the charges account agent is different from the debtor agent.
ChargesAccountAgent *BranchAndFinancialInstitutionIdentification4 `xml:"ChrgsAcctAgt,omitempty"`
// Set of elements used to provide information on the individual transaction(s) included in the message.
CreditTransferTransactionInformation []*CreditTransferTransactionInformation10 `xml:"CdtTrfTxInf"`
}
func (p *PaymentInstructionInformation3) SetPaymentInformationIdentification(value string) {
p.PaymentInformationIdentification = (*Max35Text)(&value)
}
func (p *PaymentInstructionInformation3) SetPaymentMethod(value string) {
p.PaymentMethod = (*PaymentMethod3Code)(&value)
}
func (p *PaymentInstructionInformation3) SetBatchBooking(value string) {
p.BatchBooking = (*BatchBookingIndicator)(&value)
}
func (p *PaymentInstructionInformation3) SetNumberOfTransactions(value string) {
p.NumberOfTransactions = (*Max15NumericText)(&value)
}
func (p *PaymentInstructionInformation3) SetControlSum(value string) {
p.ControlSum = (*DecimalNumber)(&value)
}
func (p *PaymentInstructionInformation3) AddPaymentTypeInformation() *PaymentTypeInformation19 {
p.PaymentTypeInformation = new(PaymentTypeInformation19)
return p.PaymentTypeInformation
}
func (p *PaymentInstructionInformation3) SetRequestedExecutionDate(value string) {
p.RequestedExecutionDate = (*ISODate)(&value)
}
func (p *PaymentInstructionInformation3) SetPoolingAdjustmentDate(value string) {
p.PoolingAdjustmentDate = (*ISODate)(&value)
}
func (p *PaymentInstructionInformation3) AddDebtor() *PartyIdentification32 {
p.Debtor = new(PartyIdentification32)
return p.Debtor
}
func (p *PaymentInstructionInformation3) AddDebtorAccount() *CashAccount16 {
p.DebtorAccount = new(CashAccount16)
return p.DebtorAccount
}
func (p *PaymentInstructionInformation3) AddDebtorAgent() *BranchAndFinancialInstitutionIdentification4 {
p.DebtorAgent = new(BranchAndFinancialInstitutionIdentification4)
return p.DebtorAgent
}
func (p *PaymentInstructionInformation3) AddDebtorAgentAccount() *CashAccount16 {
p.DebtorAgentAccount = new(CashAccount16)
return p.DebtorAgentAccount
}
func (p *PaymentInstructionInformation3) AddUltimateDebtor() *PartyIdentification32 {
p.UltimateDebtor = new(PartyIdentification32)
return p.UltimateDebtor
}
func (p *PaymentInstructionInformation3) SetChargeBearer(value string) {
p.ChargeBearer = (*ChargeBearerType1Code)(&value)
}
func (p *PaymentInstructionInformation3) AddChargesAccount() *CashAccount16 {
p.ChargesAccount = new(CashAccount16)
return p.ChargesAccount
}
func (p *PaymentInstructionInformation3) AddChargesAccountAgent() *BranchAndFinancialInstitutionIdentification4 {
p.ChargesAccountAgent = new(BranchAndFinancialInstitutionIdentification4)
return p.ChargesAccountAgent
}
func (p *PaymentInstructionInformation3) AddCreditTransferTransactionInformation() *CreditTransferTransactionInformation10 {
newValue := new (CreditTransferTransactionInformation10)
p.CreditTransferTransactionInformation = append(p.CreditTransferTransactionInformation, newValue)
return newValue
} | PaymentInstructionInformation3.go | 0.812533 | 0.486454 | PaymentInstructionInformation3.go | starcoder |
package rel
import "github.com/arr-ai/frozen"
// CombineOp specifies which pairings to include in Combine().
type CombineOp int
// The following masks control which elements to include in Combine().
const (
OnlyOnLHS CombineOp = 1 << iota // Include elements only found on lhs.
InBoth // Include elements found on both sides.
OnlyOnRHS // Include elements only found on rhs.
AllPairs = OnlyOnLHS | InBoth | OnlyOnRHS
)
// Pair represents a pair of values.
type Pair struct {
a, b Value
}
// Combine returns a map of names to pairs of corresponding Values from a and b.
// Which names appear in the output is determined by the masks provided in op.
func Combine(a, b Tuple, op CombineOp) map[string]Pair {
names := make(map[string]Pair, a.Count()+b.Count())
for e := a.Enumerator(); e.MoveNext(); {
aName, aValue := e.Current()
bValue, found := b.Get(aName)
if !found && (op&OnlyOnLHS != 0) || found && (op&InBoth != 0) {
names[aName] = Pair{aValue, bValue}
}
}
for e := b.Enumerator(); e.MoveNext(); {
bName, bValue := e.Current()
_, found := a.Get(bName)
if !found && (op&OnlyOnRHS != 0) {
names[bName] = Pair{nil, bValue}
}
}
return names
}
// CombineNames returns names from a and b according to the given mask.
func CombineNames(a, b Tuple, op CombineOp) Names {
var sb frozen.SetBuilder
for name := range Combine(a, b, op) {
sb.Add(name)
}
return Names(sb.Finish())
}
// Merge returns the merger of a and b, if possible or nil otherwise.
// Success requires that common names map to equal values.
func Merge(a, b Tuple) Tuple {
attrs := []Attr{}
for name, pair := range Combine(a, b, AllPairs) {
if pair.a == nil {
attrs = append(attrs, NewAttr(name, pair.b))
} else if pair.b == nil || pair.a.Equal(pair.b) {
attrs = append(attrs, NewAttr(name, pair.a))
} else {
return nil
}
}
return NewTuple(attrs...)
}
// MergeLeftToRight returns the merger of a and b. Key from tuples to the right
// override tuples to the left.
func MergeLeftToRight(t Tuple, ts ...Tuple) Tuple {
for _, u := range ts {
for e := u.Enumerator(); e.MoveNext(); {
name, value := e.Current()
t = t.With(name, value)
}
}
return t
} | rel/ops_tuple.go | 0.747339 | 0.471649 | ops_tuple.go | starcoder |
package asset
import (
gl "github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
type Material struct {
Ambient mgl32.Vec4
Diffuse mgl32.Vec4
Specular mgl32.Vec4
AmbientMap *Texture
DiffuseMap *Texture
SpecularMap *Texture
}
type MaterialData struct {
Ambient mgl32.Vec4
Diffuse mgl32.Vec4
Specular mgl32.Vec4
AmbientMap string
DiffuseMap string
SpecularMap string
}
const (
// PositionAttrID is the attribute ID of _Position in GLSL
PositionAttrID uint32 = 0
// NormalAttrID is the attribute ID of _Normal in GLSL
NormalAttrID uint32 = 1
// TexCoordAttrID is the attribute ID of _TexCoord in GLSL
TexCoordAttrID uint32 = 2
)
func NewMaterial(data *MaterialData) (*Material, error) {
var err error
m := &Material{
Ambient: data.Ambient,
Diffuse: data.Diffuse,
Specular: data.Specular,
}
if data.AmbientMap != "" {
m.AmbientMap, err = NewTextureFromFile(data.AmbientMap)
if err != nil {
return nil, err
}
}
if data.DiffuseMap != "" {
m.DiffuseMap, err = NewTextureFromFile(data.DiffuseMap)
if err != nil {
return nil, err
}
}
if data.SpecularMap != "" {
m.SpecularMap, err = NewTextureFromFile(data.SpecularMap)
if err != nil {
return nil, err
}
}
return m, nil
}
// Delete frees all resources owned by the Material
func (m *Material) Delete() {
if m.AmbientMap != nil {
m.AmbientMap.Delete()
m.AmbientMap = nil
}
if m.DiffuseMap != nil {
m.DiffuseMap.Delete()
m.DiffuseMap = nil
}
if m.SpecularMap != nil {
m.SpecularMap.Delete()
m.SpecularMap = nil
}
}
func (m *Material) Bind(s *Shader) {
gl.Uniform1i(s.GetUniformLocation("uAmbientMap"), 0)
if m.AmbientMap != nil {
gl.ActiveTexture(gl.TEXTURE0)
m.AmbientMap.Bind()
gl.Uniform4fv(s.GetUniformLocation("uAmbient"), 1, &[]float32{0, 0, 0, 0}[0])
} else {
gl.Uniform4fv(s.GetUniformLocation("uAmbient"), 1, &m.Ambient[0])
}
gl.Uniform1i(s.GetUniformLocation("uDiffuseMap"), 1)
if m.DiffuseMap != nil {
gl.ActiveTexture(gl.TEXTURE1)
m.DiffuseMap.Bind()
gl.Uniform4fv(s.GetUniformLocation("uDiffuse"), 1, &[]float32{0, 0, 0, 0}[0])
} else {
gl.Uniform4fv(s.GetUniformLocation("uDiffuse"), 1, &m.Diffuse[0])
}
gl.Uniform1i(s.GetUniformLocation("uSpecularMap"), 2)
if m.SpecularMap != nil {
gl.ActiveTexture(gl.TEXTURE2)
m.SpecularMap.Bind()
gl.Uniform4fv(s.GetUniformLocation("uSpecular"), 1, &[]float32{0, 0, 0, 0}[0])
} else {
gl.Uniform4fv(s.GetUniformLocation("uSpecular"), 1, &m.Specular[0])
}
}
func (m *Material) UnBind() {
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, 0)
gl.ActiveTexture(gl.TEXTURE1)
gl.BindTexture(gl.TEXTURE_2D, 0)
gl.ActiveTexture(gl.TEXTURE2)
gl.BindTexture(gl.TEXTURE_2D, 0)
} | asset/Material.go | 0.588416 | 0.403244 | Material.go | starcoder |
package tiff
import (
"image"
"image/color"
)
// Gray32 is an in-memory image whose At method returns color.Gray32 values.
type Gray32 struct {
// Pix holds the image's pixels, as gray values in big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)].
Pix []uint32
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
}
func (p *Gray32) ColorModel() color.Model { return Gray32Model }
func (p *Gray32) Bounds() image.Rectangle { return p.Rect }
func (p *Gray32) At(x, y int) color.Color {
return p.Gray32At(x, y)
}
func (p *Gray32) Gray32At(x, y int) Gray32Color {
if !(image.Point{x, y}.In(p.Rect)) {
return Gray32Color{}
}
i := p.PixOffset(x, y)
return Gray32Color{uint32(p.Pix[i])}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *Gray32) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
}
func (p *Gray32) SetGray32(x, y int, c Gray32Color) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = c.Y
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *Gray32) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &Gray32{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &Gray32{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *Gray32) Opaque() bool {
return true
}
// NewGray32 returns a new Gray16 image with the given bounds.
func NewGray32(r image.Rectangle) *Gray32 {
w, h := r.Dx(), r.Dy()
pix := make([]uint32, w*h)
return &Gray32{pix, w, r}
}
// GrayFloat32 is an in-memory image whose At method returns color.Gray32 values.
type GrayFloat32 struct {
// Pix holds the image's pixels, as gray values in big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)].
Pix []uint32
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
}
func (p *GrayFloat32) ColorModel() color.Model { return Gray32FloatModel }
func (p *GrayFloat32) Bounds() image.Rectangle { return p.Rect }
func (p *GrayFloat32) At(x, y int) color.Color {
return p.Gray32At(x, y)
}
func (p *GrayFloat32) Gray32At(x, y int) Gray32Color {
if !(image.Point{x, y}.In(p.Rect)) {
return Gray32Color{}
}
i := p.PixOffset(x, y)
return Gray32Color{uint32(p.Pix[i])}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *GrayFloat32) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x - p.Rect.Min.X)
}
func (p *GrayFloat32) SetGray32(x, y int, c GrayFloat32Color) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = c.Y
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *GrayFloat32) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &GrayFloat32{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &GrayFloat32{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *GrayFloat32) Opaque() bool {
return true
}
// NewGrayFloat32 returns a new Gray16 image with the given bounds.
func NewGrayFloat32(r image.Rectangle) *GrayFloat32 {
w, h := r.Dx(), r.Dy()
pix := make([]uint32, w*h)
return &GrayFloat32{pix, w, r}
} | image.go | 0.891363 | 0.682232 | image.go | starcoder |
package event
import (
"time"
"github.com/stnma7e/betuol/common"
)
type EventListener func(evt Event)
// Event is an interface to allow many different structs to be used as events as long as they specify their type.
type Event interface {
GetType() string
}
// EventMessage is a helper struct for keeping track of the time that an event was added to the queue.
type EventMessage struct {
time time.Time
evt Event
}
// EventManager handles incoming events and stores them until they are dispersed according to registered listeners.
type EventManager struct {
eventlink chan EventMessage
listenerMap map[string]*common.Vector
listeningChannels map[string]*common.Vector
eventList [2]*common.Vector
changeQueue bool
}
// MakeEventManager returns a pointer to an EventManager.
func MakeEventManager() *EventManager {
em := &EventManager{
make(chan EventMessage),
make(map[string]*common.Vector),
make(map[string]*common.Vector),
[2]*common.Vector{common.MakeVector(), common.MakeVector()},
false,
}
go em.Sort()
return em
}
// Sort will recieve events and sort them based on time of arrival.
func (em *EventManager) Sort() {
// need to implement some sorting scheme to order events by the time that they were created
for {
evtMsg := <-em.eventlink
if evtMsg.evt == nil {
continue
}
if em.changeQueue {
em.eventList[0].Insert(evtMsg.evt)
} else {
em.eventList[1].Insert(evtMsg.evt)
}
}
}
// Tick is used to dispatch events to listeners that registered to an event type.
// Events are dispatched to both function listeners and to channels during the same tick.
func (em *EventManager) Tick(delta float64) {
var events []interface{}
if em.changeQueue {
em.changeQueue = false
events = em.eventList[0].Array()
em.eventList[0].Empty()
} else {
em.changeQueue = true
events = em.eventList[1].Array()
em.eventList[1].Empty()
}
for i := range events {
evt := events[i].(Event)
common.LogInfo.Printf("new %s event: %v\n", evt.GetType(), evt)
if evt.GetType() == "requestCharacterCreationEvent" {
common.LogInfo.Println("here")
}
listeners, ok := em.listenerMap[evt.GetType()]
if !ok {
//common.LogWarn.Printf("no listener registered for %s", evt.GetType())
} else {
listenersArray := listeners.Array()
for j := range listenersArray {
listenersArray[j].(EventListener)(evt)
}
}
channels, ok := em.listeningChannels[evt.GetType()]
if !ok {
//common.LogWarn.Printf("no channel registered for %s", evt.GetType())
} else {
channelsArray := channels.Array()
for j := range channelsArray {
select {
case channelsArray[j].(chan Event) <- evt:
default:
//common.LogWarn.Printf("channel %d missed %v event %v", j, evt.GetType(), evt)
}
}
}
}
}
// RegisterListener registers a listening function to be called every time an event of type eventType is processed.
func (em *EventManager) RegisterListeningFunction(listener EventListener, eventType ...string) {
for i := range eventType {
_, ok := em.listenerMap[eventType[i]]
if !ok {
em.listenerMap[eventType[i]] = common.MakeVector()
}
if em.listenerMap[eventType[i]] == nil {
em.listenerMap[eventType[i]] = common.MakeVector()
}
em.listenerMap[eventType[i]].Insert(listener)
}
}
// RegisterListeningChannel registers a listening channel to be sent the event every time an event of type eventType is processed.
func (em *EventManager) RegisterListeningChannel(eventlink chan Event, eventType ...string) {
for i := range eventType {
_, ok := em.listeningChannels[eventType[i]]
if !ok {
em.listeningChannels[eventType[i]] = common.MakeVector()
}
if em.listeningChannels[eventType[i]] == nil {
em.listeningChannels[eventType[i]] = common.MakeVector()
}
em.listeningChannels[eventType[i]].Insert(eventlink)
}
}
// Send will add an event passed as an argument to the event queue to be processed.
func (em *EventManager) Send(evt Event) {
em.eventlink <- EventMessage{time.Now(), evt}
} | event/manager.go | 0.543833 | 0.412589 | manager.go | starcoder |
package operator
import (
"github.com/matrixorigin/matrixone/pkg/container/nulls"
"github.com/matrixorigin/matrixone/pkg/container/types"
"github.com/matrixorigin/matrixone/pkg/container/vector"
"github.com/matrixorigin/matrixone/pkg/vectorize/ge"
"github.com/matrixorigin/matrixone/pkg/vectorize/le"
"github.com/matrixorigin/matrixone/pkg/vm/process"
)
func gequal[T OrderedValue](d1, d2 interface{}, aScale, bScale int32) bool {
l, v := d1.(T), d2.(T)
return l >= v
}
func gequal_B(d1, d2 interface{}, aScale, bScale int32) bool {
l, v := d1.(bool), d2.(bool)
return l || !v
}
func gequal_D(d1, d2 interface{}, aScale, bScale int32) bool {
l, v := d1.(types.Decimal128), d2.(types.Decimal128)
return types.CompareDecimal128Decimal128(l, v, aScale, bScale) >= 0
}
var GeOpFuncMap = map[int]CompOpFunc{}
var GeOpFuncVec = []CompOpFunc{
gequal[int8], gequal[int16], gequal[int32], gequal[int64], gequal[uint8], gequal[uint16], gequal[uint32],
gequal[uint64], gequal[float32], gequal[float64], gequal[string], gequal_B, gequal[types.Date],
gequal[types.Datetime], gequal[types.Decimal64], gequal_D,
}
func InitGeOpFuncMap() {
for i := 0; i < len(GeOpFuncVec); i++ {
GeOpFuncMap[i] = GeOpFuncVec[i]
}
}
var StrGeOpFuncMap = map[int]StrCompOpFunc{}
var StrGeOpFuncVec = []StrCompOpFunc{
gequalCol_Col, gequalCol_Const, gequalConst_Col, gequalConst_Const,
}
func gequalCol_Col(d1, d2 interface{}) []bool {
lvs, rvs := d1.(*types.Bytes), d2.(*types.Bytes)
rs := make([]int64, len(lvs.Lengths))
rs = ge.StrGe(lvs, rvs, rs)
col := make([]bool, len(lvs.Lengths))
rsi := 0
for i := 0; i < len(col); i++ {
if rsi >= len(rs) {
break
}
if int64(i) == rs[rsi] {
col[i] = true
rsi++
} else {
col[i] = false
}
}
return col
}
func gequalCol_Const(d1, d2 interface{}) []bool {
lvs, rvs := d1.(*types.Bytes), d2.(*types.Bytes)
rs := make([]int64, len(lvs.Lengths))
rs = le.StrLeScalar(rvs.Data, lvs, rs)
col := make([]bool, len(lvs.Lengths))
rsi := 0
for i := 0; i < len(col); i++ {
if rsi >= len(rs) {
break
}
if int64(i) == rs[rsi] {
col[i] = true
rsi++
} else {
col[i] = false
}
}
return col
}
func gequalConst_Col(d1, d2 interface{}) []bool {
lvs, rvs := d1.(*types.Bytes), d2.(*types.Bytes)
rs := make([]int64, len(rvs.Lengths))
rs = ge.StrGeScalar(lvs.Data, rvs, rs)
col := make([]bool, len(rvs.Lengths))
rsi := 0
for i := 0; i < len(col); i++ {
if rsi >= len(rs) {
break
}
if int64(i) == rs[rsi] {
col[i] = true
rsi++
} else {
col[i] = false
}
}
return col
}
func gequalConst_Const(d1, d2 interface{}) []bool {
lvs, rvs := d1.(*types.Bytes), d2.(*types.Bytes)
return []bool{string(lvs.Data) >= string(rvs.Data)}
}
func InitStrGeOpFuncMap() {
for i := 0; i < len(StrGeOpFuncVec); i++ {
StrGeOpFuncMap[i] = StrGeOpFuncVec[i]
}
}
func ColGeCol[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
n := GetRetColLen[T](lv)
vec, err := proc.AllocVector(proc.GetBoolTyp(lv.Typ), int64(n)*1)
if err != nil {
return nil, err
}
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, GetRetCol[T](lv, rv, col_col, GeOpFuncMap, StrGeOpFuncMap))
return vec, nil
}
func ColGeConst[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
n := GetRetColLen[T](lv)
vec, err := proc.AllocVector(proc.GetBoolTyp(lv.Typ), int64(n)*1)
if err != nil {
return nil, err
}
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, GetRetCol[T](lv, rv, col_const, GeOpFuncMap, StrGeOpFuncMap))
return vec, nil
}
func ColGeNull[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return proc.AllocScalarNullVector(proc.GetBoolTyp(lv.Typ)), nil
}
func ConstGeCol[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
n := GetRetColLen[T](lv)
vec, err := proc.AllocVector(proc.GetBoolTyp(lv.Typ), int64(n)*1)
if err != nil {
return nil, err
}
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, GetRetCol[T](lv, rv, const_col, GeOpFuncMap, StrGeOpFuncMap))
return vec, nil
}
func ConstGeConst[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
vec := proc.AllocScalarVector(proc.GetBoolTyp(lv.Typ))
vector.SetCol(vec, GetRetCol[T](lv, rv, const_const, GeOpFuncMap, StrGeOpFuncMap))
return vec, nil
}
func ConstGeNull[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return proc.AllocScalarNullVector(proc.GetBoolTyp(lv.Typ)), nil
}
func NullGeCol[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return proc.AllocScalarNullVector(proc.GetBoolTyp(lv.Typ)), nil
}
func NullGeConst[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return proc.AllocScalarNullVector(proc.GetBoolTyp(lv.Typ)), nil
}
func NullGeNull[T DataValue](lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return proc.AllocScalarNullVector(proc.GetBoolTyp(lv.Typ)), nil
}
type GeFunc = func(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error)
var GeFuncMap = map[int]GeFunc{}
var GeFuncVec = []GeFunc{
ColGeCol[int8], ColGeCol[int16], ColGeCol[int32], ColGeCol[int64], ColGeCol[uint8], ColGeCol[uint16],
ColGeCol[uint32], ColGeCol[uint64], ColGeCol[float32], ColGeCol[float64], ColGeCol[string], ColGeCol[bool],
ColGeCol[types.Date], ColGeCol[types.Datetime], ColGeCol[types.Decimal64], ColGeCol[types.Decimal128],
ColGeConst[int8], ColGeConst[int16], ColGeConst[int32], ColGeConst[int64], ColGeConst[uint8], ColGeConst[uint16],
ColGeConst[uint32], ColGeConst[uint64], ColGeConst[float32], ColGeConst[float64], ColGeConst[string], ColGeConst[bool],
ColGeConst[types.Date], ColGeConst[types.Datetime], ColGeConst[types.Decimal64], ColGeConst[types.Decimal128],
ColGeNull[int8], ColGeNull[int16], ColGeNull[int32], ColGeNull[int64], ColGeNull[uint8], ColGeNull[uint16],
ColGeNull[uint32], ColGeNull[uint64], ColGeNull[float32], ColGeNull[float64], ColGeNull[string], ColGeNull[bool],
ColGeNull[types.Date], ColGeNull[types.Datetime], ColGeNull[types.Decimal64], ColGeNull[types.Decimal128],
ConstGeCol[int8], ConstGeCol[int16], ConstGeCol[int32], ConstGeCol[int64], ConstGeCol[uint8], ConstGeCol[uint16],
ConstGeCol[uint32], ConstGeCol[uint64], ConstGeCol[float32], ConstGeCol[float64], ConstGeCol[string], ConstGeCol[bool],
ConstGeCol[types.Date], ConstGeCol[types.Datetime], ConstGeCol[types.Decimal64], ConstGeCol[types.Decimal128],
ConstGeConst[int8], ConstGeConst[int16], ConstGeConst[int32], ConstGeConst[int64], ConstGeConst[uint8], ConstGeConst[uint16],
ConstGeConst[uint32], ConstGeConst[uint64], ConstGeConst[float32], ConstGeConst[float64], ConstGeConst[string], ConstGeConst[bool],
ConstGeConst[types.Date], ConstGeConst[types.Datetime], ConstGeConst[types.Decimal64], ConstGeConst[types.Decimal128],
ConstGeNull[int8], ConstGeNull[int16], ConstGeNull[int32], ConstGeNull[int64], ConstGeNull[uint8], ConstGeNull[uint16],
ConstGeNull[uint32], ConstGeNull[uint64], ConstGeNull[float32], ConstGeNull[float64], ConstGeNull[string], ConstGeNull[bool],
ConstGeNull[types.Date], ConstGeNull[types.Datetime], ConstGeNull[types.Decimal64], ConstGeNull[types.Decimal128],
NullGeCol[int8], NullGeCol[int16], NullGeCol[int32], NullGeCol[int64], NullGeCol[uint8], NullGeCol[uint16],
NullGeCol[uint32], NullGeCol[uint64], NullGeCol[float32], NullGeCol[float64], NullGeCol[string], NullGeCol[bool],
NullGeCol[types.Date], NullGeCol[types.Datetime], NullGeCol[types.Decimal64], NullGeCol[types.Decimal128],
NullGeConst[int8], NullGeConst[int16], NullGeConst[int32], NullGeConst[int64], NullGeConst[uint8], NullGeConst[uint16],
NullGeConst[uint32], NullGeConst[uint64], NullGeConst[float32], NullGeConst[float64], NullGeConst[string], NullGeConst[bool],
NullGeConst[types.Date], NullGeConst[types.Datetime], NullGeConst[types.Decimal64], NullGeConst[types.Decimal128],
NullGeNull[int8], NullGeNull[int16], NullGeNull[int32], NullGeNull[int64], NullGeNull[uint8], NullGeNull[uint16],
NullGeNull[uint32], NullGeNull[uint64], NullGeNull[float32], NullGeNull[float64], NullGeNull[string], NullGeNull[bool],
NullGeNull[types.Date], NullGeNull[types.Datetime], NullGeNull[types.Decimal64], NullGeNull[types.Decimal128],
}
func InitGeFuncMap() {
InitGeOpFuncMap()
InitStrGeOpFuncMap()
for i := 0; i < len(GeFuncVec); i++ {
GeFuncMap[i] = GeFuncVec[i]
}
}
func GeDataValue[T DataValue](vectors []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
lv := vectors[0]
rv := vectors[1]
lt, rt := GetTypeID(lv), GetTypeID(rv)
dataID := GetDatatypeID[T]()
vec, err := GeFuncMap[(lt*3+rt)*dataTypeNum+dataID](lv, rv, proc)
if err != nil {
return nil, err
}
return vec, nil
} | pkg/sql/plan2/function/operator/ge.go | 0.606848 | 0.450601 | ge.go | starcoder |
package tracker
import (
"encoding/json"
"math"
"strings"
"time"
"github.com/racingmars/flighttrack/decoder"
"github.com/rs/zerolog/log"
)
const sweepInterval = 30 * time.Second
const decayTime = 5 * time.Minute
const reportMinInterval = 5 * time.Second
const headingEpsilon = 10
const speedEpsilon = 10
const vsEpsilon = 150
const altitudeEpsilon = 200
const distanceEpsilonNM = 10
type FlightHandler interface {
NewFlight(icaoID string, firstSeen time.Time)
CloseFlight(icaoID string, lastSeen time.Time, messages int)
SetIdentity(icaoID, callsign string, category decoder.AircraftType, change bool)
AddTrackPoint(icaoID string, trackPoint TrackLog)
}
type Tracker struct {
ForceReporting bool
flights map[string]*flight
handlers FlightHandler
nextSweep time.Time
}
type flight struct {
IcaoID string
FirstSeen time.Time
LastSeen time.Time
MessageCount int
Callsign *string
Category decoder.AircraftType
Last TrackLog
Current TrackLog
EvenFrame *decoder.AdsbPosition
OddFrame *decoder.AdsbPosition
PendingChange bool
}
type TrackLog struct {
Time time.Time
Heading int
HeadingValid bool
VS int
VSValid bool
PositionValid bool
Latitude float64
Longitude float64
AltitudeValid bool
Altitude int
AltitudeType int
SpeedValid bool
Speed int
SpeedType decoder.SpeedType
SquawkValid bool
Squawk string
IdentityValid bool
Callsign string
Category decoder.AircraftType
}
func New(handler FlightHandler, forceReporting bool) *Tracker {
t := new(Tracker)
t.flights = make(map[string]*flight)
t.handlers = handler
t.ForceReporting = forceReporting
return t
}
func NewWithState(handler FlightHandler, forceReporting bool, trackerstate []byte) (*Tracker, error) {
t := new(Tracker)
var flights map[string]*flight
err := json.Unmarshal(trackerstate, &flights)
if err != nil {
return nil, err
}
t.flights = flights
t.handlers = handler
t.ForceReporting = forceReporting
return t, nil
}
func (t *Tracker) Message(icaoID string, tm time.Time, msg interface{}) {
flt, ok := t.flights[icaoID]
if !ok {
flt = &flight{IcaoID: icaoID, FirstSeen: tm}
t.flights[icaoID] = flt
t.handlers.NewFlight(icaoID, tm)
}
flt.LastSeen = tm
flt.MessageCount++
if msg != nil {
switch v := msg.(type) {
case *decoder.AdsbIdentification:
t.handleAdsbIdentification(icaoID, flt, tm, v)
case *decoder.AdsbVelocity:
t.handleAdsbVelocity(icaoID, flt, tm, v)
case *decoder.AdsbPosition:
t.handleAdsbPosition(icaoID, flt, tm, v)
}
}
t.sweepIfNeeded(tm)
}
func (t *Tracker) CloseAllFlights() {
for id := range t.flights {
if t.flights[id].PendingChange {
t.report(id, t.flights[id], t.flights[id].LastSeen, true)
}
t.handlers.CloseFlight(id, t.flights[id].LastSeen, t.flights[id].MessageCount)
delete(t.flights, id)
}
}
func (t *Tracker) handleAdsbIdentification(icaoID string, flt *flight, tm time.Time, msg *decoder.AdsbIdentification) {
// If there are bad characters, ignore.
if strings.Contains(msg.Callsign, "#") {
log.Warn().Msgf("For %s, callsign %s/%d is invalid", icaoID, msg.Callsign, msg.Type)
return
}
// If we already have an identification, and the new type is unknown (probably because it's a BDS2,0 message), use
// the existing type.
if flt.Current.IdentityValid && flt.Current.Category != decoder.ACTypeUnknown && msg.Type == decoder.ACTypeUnknown {
msg.Type = flt.Current.Category
}
flt.Current.Time = tm
flt.Current.IdentityValid = true
flt.Current.Callsign = msg.Callsign
flt.Current.Category = msg.Type
// First time we have a callsign for this flight
if flt.Callsign == nil {
flt.Callsign = &msg.Callsign
flt.Category = msg.Type
t.handlers.SetIdentity(icaoID, *flt.Callsign, flt.Category, false)
t.report(icaoID, flt, tm, true)
return
}
// First time we have a good category for this flight
if *flt.Callsign == msg.Callsign && flt.Category == decoder.ACTypeUnknown && msg.Type != decoder.ACTypeUnknown {
flt.Category = msg.Type
t.handlers.SetIdentity(icaoID, *flt.Callsign, flt.Category, false)
t.report(icaoID, flt, tm, true)
return
}
// This is a change in callsign or category
if *flt.Callsign != msg.Callsign || flt.Category != msg.Type {
//log.Warn().Msgf("Callsign change for %s. Was %s/%d now %s/%d", icaoID, *flt.Callsign, flt.Category, msg.Callsign, msg.Type)
flt.Callsign = &msg.Callsign
flt.Category = msg.Type
t.handlers.SetIdentity(icaoID, *flt.Callsign, flt.Category, true)
t.report(icaoID, flt, tm, true)
return
}
}
func (t *Tracker) handleAdsbVelocity(icaoID string, flt *flight, tm time.Time, msg *decoder.AdsbVelocity) {
reportable := false
flt.Current.Time = tm
if !flt.Current.HeadingValid && msg.HeadingAvailable {
// This is the first time we've received a heading
flt.Current.HeadingValid = true
flt.Current.Heading = msg.Heading
reportable = true
flt.PendingChange = true
} else if msg.HeadingAvailable {
flt.Current.Heading = msg.Heading
// Normalize headings to be +/- 180 degrees
oldHeading := flt.Last.Heading
if oldHeading > 180 {
oldHeading = oldHeading - 360
}
newHeading := flt.Current.Heading
if newHeading > 180 {
newHeading = newHeading - 360
}
difference := int(math.Abs((float64(newHeading - oldHeading))))
if difference > headingEpsilon {
reportable = true
}
if difference > 0 {
flt.PendingChange = true
}
}
if !flt.Current.SpeedValid {
// This is the first time we've received a speed
flt.Current.SpeedValid = true
flt.Current.Speed = msg.Speed
flt.Current.SpeedType = msg.SpeedType
reportable = true
flt.PendingChange = true
} else {
flt.Current.Speed = msg.Speed
flt.Current.SpeedType = msg.SpeedType
difference := int(math.Abs((float64(flt.Current.Speed - flt.Last.Speed))))
if difference > speedEpsilon {
reportable = true
}
if difference > 0 {
flt.PendingChange = true
}
}
// Let's consider +/-64 fpm to be noise around 0
vs := msg.VerticalRate
if vs <= 64 && vs >= -64 {
vs = 0
}
if !flt.Current.VSValid && msg.VerticalRateAvailable {
flt.Current.VSValid = true
flt.Current.VS = vs
reportable = true
flt.PendingChange = true
} else if msg.VerticalRateAvailable {
flt.Current.VS = vs
difference := int(math.Abs((float64(flt.Current.VS - flt.Last.VS))))
if difference > vsEpsilon {
reportable = true
}
if difference > 0 {
flt.PendingChange = true
}
}
if reportable {
t.report(icaoID, flt, tm, false)
}
}
func (t *Tracker) handleAdsbPosition(icaoID string, flt *flight, tm time.Time, msg *decoder.AdsbPosition) {
reportable := false
flt.Current.Time = tm
if !flt.Current.AltitudeValid {
flt.Current.AltitudeValid = true
reportable = true
flt.PendingChange = true
}
flt.Current.Altitude = msg.Altitude
difference := int(math.Abs((float64(flt.Current.Altitude - flt.Last.Altitude))))
if difference > altitudeEpsilon {
reportable = true
}
if difference > 0 {
flt.PendingChange = true
}
if msg.Frame == 0 {
flt.EvenFrame = msg
} else {
flt.OddFrame = msg
}
if flt.EvenFrame != nil && flt.OddFrame != nil {
timediff := flt.EvenFrame.Timestamp.Sub(flt.OddFrame.Timestamp)
if timediff < 0 {
timediff = -timediff
}
if timediff < 5*time.Second {
if lat, lon, good := decoder.CalcPosition(*flt.OddFrame, *flt.EvenFrame); good {
flt.Current.PositionValid = true
flt.Current.Longitude = lon
flt.Current.Latitude = lat
if flt.Current.Longitude != flt.Last.Longitude || flt.Current.Latitude != flt.Last.Latitude {
flt.PendingChange = true
}
if !flt.Last.PositionValid {
reportable = true
flt.PendingChange = true
} else {
if math.Abs(distanceNM(lat, flt.Last.Latitude, lon, flt.Last.Longitude)) >= distanceEpsilonNM {
reportable = true
}
}
}
}
}
if reportable {
t.report(icaoID, flt, tm, false)
}
}
func (t *Tracker) report(icaoID string, flt *flight, tm time.Time, force bool) {
if !force && !t.ForceReporting && flt.Last.Time.Add(reportMinInterval).After(flt.Current.Time) {
// We've too recently sent a previous position report.
return
}
flt.Last = flt.Current
//flt.Last.Time = tm
flt.PendingChange = false
t.handlers.AddTrackPoint(icaoID, flt.Last)
}
func (t *Tracker) sweepIfNeeded(tm time.Time) {
if tm.After(t.nextSweep) {
t.sweep(tm)
}
}
func (t *Tracker) sweep(tm time.Time) {
cutoff := tm.Add(-decayTime)
for id := range t.flights {
if t.flights[id].LastSeen.Before(cutoff) {
// it's been too long since we've seen this flight
t.handlers.CloseFlight(id, t.flights[id].LastSeen, t.flights[id].MessageCount)
delete(t.flights, id)
}
}
t.nextSweep = tm.Add(sweepInterval)
}
// Haversine distance between two GPS coordinates
// https://janakiev.com/blog/gps-points-distance-python/
func distanceNM(lat1, lon1, lat2, lon2 float64) float64 {
const r float64 = 6372800 // Earth radius in meters
phi1 := lat1 * (math.Pi / 180)
phi2 := lat2 * (math.Pi / 180)
dphi := (lat2 - lat1) * (math.Pi / 180)
dlambda := (lon2 - lon1) * (math.Pi / 180)
a := math.Pow(math.Sin(dphi/2.0), 2) + math.Cos(phi1)*math.Cos(phi2)*math.Pow(math.Sin(dlambda/2), 2)
meters := 2 * r * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
return meters / 1852
}
func (t *Tracker) GetState() []byte {
data, err := json.Marshal(t.flights)
if err != nil {
log.Error().Err(err).Msgf("Couldn't marshal flights array")
return nil
}
return data
} | tracker/tracker.go | 0.557845 | 0.404331 | tracker.go | starcoder |
package array
import (
"gotomate/fiber/variable"
"gotomate/log"
"math/rand"
"time"
)
// GetArrayLength Get the current length of an array
func GetArrayLength(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Getting an array length")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
variable.SetVariable(instructionData, "Output", len(array.([]bool)))
case "[]float64":
variable.SetVariable(instructionData, "Output", len(array.([]float64)))
case "[]int":
variable.SetVariable(instructionData, "Output", len(array.([]int)))
case "[]string":
variable.SetVariable(instructionData, "Output", len(array.([]string)))
}
finished <- true
return -1
}
// GetValue Get a value from an array by index
func GetValue(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Getting a value from an array")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
index, err := variable.Keys{VarName: "IndexVarName", IsVarName: "IndexIsVar", Name: "Index"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
variable.SetVariable(instructionData, "Output", array.([]bool)[index.(int)])
case "[]float64":
variable.SetVariable(instructionData, "Output", array.([]float64)[index.(int)])
case "[]int":
variable.SetVariable(instructionData, "Output", array.([]int)[index.(int)])
case "[]string":
variable.SetVariable(instructionData, "Output", array.([]string)[index.(int)])
}
finished <- true
return -1
}
// PopAt Pop the wanted index of an array
func PopAt(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Poping value of array at index")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
index, err := variable.Keys{VarName: "IndexVarName", IsVarName: "IndexIsVar", Name: "Index"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
popped := array.([]bool)[index.(int)]
copy(array.([]bool)[index.(int):], array.([]bool)[index.(int)+1:])
array = array.([]bool)[:len(array.([]bool))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
popped := array.([]float64)[index.(int)]
copy(array.([]float64)[index.(int):], array.([]float64)[index.(int)+1:])
array = array.([]float64)[:len(array.([]float64))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
popped := array.([]int)[index.(int)]
copy(array.([]int)[index.(int):], array.([]int)[index.(int)+1:])
array = array.([]int)[:len(array.([]int))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
popped := array.([]string)[index.(int)]
copy(array.([]string)[index.(int):], array.([]string)[index.(int)+1:])
array = array.([]string)[:len(array.([]string))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
}
// PopLast Pop the last index of an array
func PopLast(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Poping value of array at end")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
popped := array.([]bool)[len(array.([]bool))-1]
array = array.([]bool)[:len(array.([]bool))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
popped := array.([]float64)[len(array.([]float64))-1]
array = array.([]float64)[:len(array.([]float64))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
popped := array.([]int)[len(array.([]int))-1]
array = array.([]int)[:len(array.([]int))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
popped := array.([]string)[len(array.([]string))-1]
array = array.([]string)[:len(array.([]string))-1]
variable.SetVariable(instructionData, "Output", popped)
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
}
// PushAt Push a value at the wanted index of an array
func PushAt(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Pushing value in array at index")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
index, err := variable.Keys{VarName: "IndexVarName", IsVarName: "IndexIsVar", Name: "Index"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
value, err := variable.Keys{VarName: "ValueVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
array = append(array.([]bool), false)
copy(array.([]bool)[index.(int)+1:], array.([]bool)[index.(int):])
array.([]bool)[index.(int)] = value.(bool)
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
array = append(array.([]bool), false)
copy(array.([]float64)[index.(int)+1:], array.([]float64)[index.(int):])
array.([]float64)[index.(int)] = value.(float64)
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
array = append(array.([]int), 0)
copy(array.([]int)[index.(int)+1:], array.([]int)[index.(int):])
array.([]int)[index.(int)] = value.(int)
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
array = append(array.([]string), "")
copy(array.([]string)[index.(int)+1:], array.([]string)[index.(int):])
array.([]string)[index.(int)] = value.(string)
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
}
// PushLast Push a value at the end of an array
func PushLast(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Pushing value in array at end")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
value, err := variable.Keys{VarName: "ValueVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
array = append(array.([]bool), value.(bool))
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
array = append(array.([]float64), value.(float64))
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
array = append(array.([]int), value.(int))
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
array = append(array.([]string), value.(string))
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
}
// RemoveAt Remove a value by the index, of an array
func RemoveAt(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Removing value from array at index")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
index, err := variable.Keys{VarName: "IndexVarName", IsVarName: "IndexIsVar", Name: "Index"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
copy(array.([]bool)[index.(int):], array.([]bool)[index.(int)+1:])
array = array.([]bool)[:len(array.([]bool))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
copy(array.([]float64)[index.(int):], array.([]float64)[index.(int)+1:])
array = array.([]float64)[:len(array.([]float64))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
copy(array.([]int)[index.(int):], array.([]int)[index.(int)+1:])
array = array.([]int)[:len(array.([]int))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
copy(array.([]string)[index.(int):], array.([]string)[index.(int)+1:])
array = array.([]string)[:len(array.([]string))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
}
// RemoveLast Remove the last value of an array
func RemoveLast(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Removing value from array at end")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
array = array.([]bool)[:len(array.([]bool))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
array = array.([]float64)[:len(array.([]float64))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
array = array.([]int)[:len(array.([]int))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
array = array.([]string)[:len(array.([]string))-1]
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
}
// Shuffle an array
func Shuffle(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Shuffling an array")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
rand.Seed(time.Now().UnixNano())
switch variable.GetVariableType(array) {
case "[]bool":
rand.Shuffle(len(array.([]bool)), func(i, j int) { array.([]bool)[i], array.([]bool)[j] = array.([]bool)[j], array.([]bool)[i] })
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
rand.Shuffle(len(array.([]float64)), func(i, j int) {
array.([]float64)[i], array.([]float64)[j] = array.([]float64)[j], array.([]float64)[i]
})
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
rand.Shuffle(len(array.([]int)), func(i, j int) { array.([]int)[i], array.([]int)[j] = array.([]int)[j], array.([]int)[i] })
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
rand.Shuffle(len(array.([]string)), func(i, j int) { array.([]string)[i], array.([]string)[j] = array.([]string)[j], array.([]string)[i] })
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
}
// UpdateValue Update a value of an array by index
func UpdateValue(instructionData interface{}, finished chan bool) int {
log.FiberInfo("Updating a value in an array by index")
array, err := variable.Keys{VarName: "ArrayVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
index, err := variable.Keys{VarName: "IndexVarName", IsVarName: "IndexIsVar", Name: "Index"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
value, err := variable.Keys{VarName: "ValueVarName"}.GetValue(instructionData)
if err != nil {
finished <- true
return -1
}
switch variable.GetVariableType(array) {
case "[]bool":
array.([]bool)[index.(int)] = value.(bool)
variable.SetVariable(instructionData, "ArrayVarName", array.([]bool))
case "[]float64":
array.([]float64)[index.(int)] = value.(float64)
variable.SetVariable(instructionData, "ArrayVarName", array.([]float64))
case "[]int":
array.([]int)[index.(int)] = value.(int)
variable.SetVariable(instructionData, "ArrayVarName", array.([]int))
case "[]string":
array.([]string)[index.(int)] = value.(string)
variable.SetVariable(instructionData, "ArrayVarName", array.([]string))
}
finished <- true
return -1
} | fiber/packages/Array/functions.go | 0.53048 | 0.609321 | functions.go | starcoder |
package examples
func subsrc2anysub(src subsrc) anysub {
return anysub{
Test: src.Field1,
}
}
func anysub2subsrc(src anysub) subsrc {
return subsrc{
Field1: src.Test,
}
}
func subsrcPtr2anysubPtr(src *subsrc) *anysub {
if src == nil {
return nil
}
m := subsrc2anysub(*src)
return &m
}
func anysubPtr2subsrcPtr(src *anysub) *subsrc {
if src == nil {
return nil
}
m := anysub2subsrc(*src)
return &m
}
func subsrcList2anysubList(src []subsrc) []anysub {
if src == nil {
return nil
}
res := make([]anysub, len(src))
for k, s := range src {
p := subsrc2anysub(s)
res[k] = p
}
return res
}
func anysubList2subsrcList(src []anysub) []subsrc {
if src == nil {
return nil
}
res := make([]subsrc, len(src))
for k, s := range src {
p := anysub2subsrc(s)
res[k] = p
}
return res
}
func subsrcList2anysubPtrList(src []subsrc) []*anysub {
if src == nil {
return nil
}
res := make([]*anysub, len(src))
for k, s := range src {
p := subsrc2anysub(s)
res[k] = &p
}
return res
}
func anysubPtrList2subsrcList(src []*anysub) []subsrc {
if src == nil {
return nil
}
res := make([]subsrc, len(src))
for k, s := range src {
p := anysub2subsrc(*s)
res[k] = p
}
return res
}
func subsrcPtrList2anysubList(src []*subsrc) []anysub {
if src == nil {
return nil
}
res := make([]anysub, len(src))
for k, s := range src {
p := subsrc2anysub(*s)
res[k] = p
}
return res
}
func anysubList2subsrcPtrList(src []anysub) []*subsrc {
if src == nil {
return nil
}
res := make([]*subsrc, len(src))
for k, s := range src {
p := anysub2subsrc(s)
res[k] = &p
}
return res
}
func subsrcPtrList2anysubPtrList(src []*subsrc) []*anysub {
if src == nil {
return nil
}
res := make([]*anysub, len(src))
for k, s := range src {
p := subsrc2anysub(*s)
res[k] = &p
}
return res
}
func anysubPtrList2subsrcPtrList(src []*anysub) []*subsrc {
if src == nil {
return nil
}
res := make([]*subsrc, len(src))
for k, s := range src {
p := anysub2subsrc(*s)
res[k] = &p
}
return res
} | examples/subsrc_to_anysub.sts.go | 0.524395 | 0.48871 | subsrc_to_anysub.sts.go | starcoder |
package tuple
import (
"fmt"
"golang.org/x/exp/constraints"
)
// T1 is a tuple type holding 1 generic values.
type T1[Ty1 any] struct {
V1 Ty1
}
// Len returns the number of values held by the tuple.
func (t T1[Ty1]) Len() int {
return 1
}
// Values returns the values held by the tuple.
func (t T1[Ty1]) Values() Ty1 {
return t.V1
}
// Array returns an array of the tuple values.
func (t T1[Ty1]) Array() [1]any {
return [1]any{
t.V1,
}
}
// Slice returns a slice of the tuple values.
func (t T1[Ty1]) Slice() []any {
a := t.Array()
return a[:]
}
// String returns the string representation of the tuple.
func (t T1[Ty1]) String() string {
return tupString(t.Slice())
}
// GoString returns a Go-syntax representation of the tuple.
func (t T1[Ty1]) GoString() string {
return tupGoString(t.Slice())
}
// New1 creates a new tuple holding 1 generic values.
func New1[Ty1 any](v1 Ty1) T1[Ty1] {
return T1[Ty1]{
V1: v1,
}
}
// FromArray1 returns a tuple from an array of length 1.
// If any of the values can not be converted to the generic type, an error is returned.
func FromArray1[Ty1 any](arr [1]any) (T1[Ty1], error) {
v1, ok := arr[0].(Ty1)
if !ok {
return T1[Ty1]{}, fmt.Errorf("value at array index 0 expected to have type %s but has type %T", typeName[Ty1](), arr[0])
}
return New1(v1), nil
}
// FromArray1X returns a tuple from an array of length 1.
// If any of the values can not be converted to the generic type, the function panics.
func FromArray1X[Ty1 any](arr [1]any) T1[Ty1] {
return FromSlice1X[Ty1](arr[:])
}
// FromSlice1 returns a tuple from a slice of length 1.
// If the length of the slice doesn't match, or any of the values can not be converted to the generic type, an error is returned.
func FromSlice1[Ty1 any](values []any) (T1[Ty1], error) {
if len(values) != 1 {
return T1[Ty1]{}, fmt.Errorf("slice length %d must match number of tuple values 1", len(values))
}
v1, ok := values[0].(Ty1)
if !ok {
return T1[Ty1]{}, fmt.Errorf("value at slice index 0 expected to have type %s but has type %T", typeName[Ty1](), values[0])
}
return New1(v1), nil
}
// FromSlice1X returns a tuple from a slice of length 1.
// If the length of the slice doesn't match, or any of the values can not be converted to the generic type, the function panics.
func FromSlice1X[Ty1 any](values []any) T1[Ty1] {
if len(values) != 1 {
panic(fmt.Errorf("slice length %d must match number of tuple values 1", len(values)))
}
v1 := values[0].(Ty1)
return New1(v1)
}
// Equal1 returns whether the host tuple is equal to the other tuple.
// All tuple elements of the host and guest parameters must match the "comparable" built-in constraint.
// To test equality of tuples that hold custom Equalable values, use the Equal1E function.
// To test equality of tuples that hold custom Comparable values, use the Equal1C function.
// Otherwise, use Equal or reflect.DeepEqual to test tuples of any types.
func Equal1[Ty1 comparable](host, guest T1[Ty1]) bool {
return host.V1 == guest.V1
}
// Equal1E returns whether the host tuple is semantically equal to the guest tuple.
// All tuple elements of the host and guest parameters must match the Equalable constraint.
// To test equality of tuples that hold built-in "comparable" values, use the Equal1 function.
// To test equality of tuples that hold custom Comparable values, use the Equal1C function.
// Otherwise, use Equal or reflect.DeepEqual to test tuples of any types.
func Equal1E[Ty1 Equalable[Ty1]](host, guest T1[Ty1]) bool {
return host.V1.Equal(guest.V1)
}
// Equal1C returns whether the host tuple is semantically less than, equal to, or greater than the guest tuple.
// All tuple elements of the host and guest parameters must match the Comparable constraint.
// To test equality of tuples that hold built-in "comparable" values, use the Equal1 function.
// To test equality of tuples that hold custom Equalable values, use the Equal1E function.
// Otherwise, use Equal or reflect.DeepEqual to test tuples of any types.
func Equal1C[Ty1 Comparable[Ty1]](host, guest T1[Ty1]) bool {
return host.V1.CompareTo(guest.V1).EQ()
}
// Compare1 returns whether the host tuple is semantically less than, equal to, or greater than the guest tuple.
// All tuple elements of the host and guest parameters must match the "Ordered" constraint.
// To compare tuples that hold custom comparable values, use the Compare1C function.
func Compare1[Ty1 constraints.Ordered](host, guest T1[Ty1]) OrderedComparisonResult {
return multiCompare(
func() OrderedComparisonResult { return compareOrdered(host.V1, guest.V1) },
)
}
// Compare1C returns whether the host tuple is semantically less than, equal to, or greater than the guest tuple.
// All tuple elements of the host and guest parameters must match the Comparable constraint.
// To compare tuples that hold built-in "Ordered" values, use the Compare1 function.
func Compare1C[Ty1 Comparable[Ty1]](host, guest T1[Ty1]) OrderedComparisonResult {
return multiCompare(
func() OrderedComparisonResult { return host.V1.CompareTo(guest.V1) },
)
}
// LessThan1 returns whether the host tuple is semantically less than the guest tuple.
// All tuple elements of the host and guest parameters must match the "Ordered" constraint.
// To compare tuples that hold custom comparable values, use the LessThan1C function.
func LessThan1[Ty1 constraints.Ordered](host, guest T1[Ty1]) bool {
return Compare1(host, guest).LT()
}
// LessThan1C returns whether the host tuple is semantically less than the guest tuple.
// All tuple elements of the host and guest parameters must match the Comparable constraint.
// To compare tuples that hold built-in "Ordered" values, use the LessThan1 function.
func LessThan1C[Ty1 Comparable[Ty1]](host, guest T1[Ty1]) bool {
return Compare1C(host, guest).LT()
}
// LessOrEqual1 returns whether the host tuple is semantically less than or equal to the guest tuple.
// All tuple elements of the host and guest parameters must match the "Ordered" constraint.
// To compare tuples that hold custom comparable values, use the LessOrEqual1C function.
func LessOrEqual1[Ty1 constraints.Ordered](host, guest T1[Ty1]) bool {
return Compare1(host, guest).LE()
}
// LessOrEqual1C returns whether the host tuple is semantically less than or equal to the guest tuple.
// All tuple elements of the host and guest parameters must match the Comparable constraint.
// To compare tuples that hold built-in "Ordered" values, use the LessOrEqual1 function.
func LessOrEqual1C[Ty1 Comparable[Ty1]](host, guest T1[Ty1]) bool {
return Compare1C(host, guest).LE()
}
// GreaterThan1 returns whether the host tuple is semantically greater than the guest tuple.
// All tuple elements of the host and guest parameters must match the "Ordered" constraint.
// To compare tuples that hold custom comparable values, use the GreaterThan1C function.
func GreaterThan1[Ty1 constraints.Ordered](host, guest T1[Ty1]) bool {
return Compare1(host, guest).GT()
}
// GreaterThan1C returns whether the host tuple is semantically greater than the guest tuple.
// All tuple elements of the host and guest parameters must match the Comparable constraint.
// To compare tuples that hold built-in "Ordered" values, use the GreaterThan1 function.
func GreaterThan1C[Ty1 Comparable[Ty1]](host, guest T1[Ty1]) bool {
return Compare1C(host, guest).GT()
}
// GreaterOrEqual1 returns whether the host tuple is semantically greater than or equal to the guest tuple.
// All tuple elements of the host and guest parameters must match the "Ordered" constraint.
// To compare tuples that hold custom comparable values, use the GreaterOrEqual1C function.
func GreaterOrEqual1[Ty1 constraints.Ordered](host, guest T1[Ty1]) bool {
return Compare1(host, guest).GE()
}
// GreaterOrEqual1C returns whether the host tuple is semantically greater than or equal to the guest tuple.
// All tuple elements of the host and guest parameters must match the Comparable constraint.
// To compare tuples that hold built-in "Ordered" values, use the GreaterOrEqual1 function.
func GreaterOrEqual1C[Ty1 Comparable[Ty1]](host, guest T1[Ty1]) bool {
return Compare1C(host, guest).GE()
} | tuple1.go | 0.864554 | 0.613367 | tuple1.go | starcoder |
package store
import (
"fmt"
"math"
"reflect"
"strconv"
"github.com/juju/errors"
)
func Num64(i interface{}) interface{} {
switch x := i.(type) {
case int:
return int64(x)
case int8:
return int64(x)
case int16:
return int64(x)
case int32:
return int64(x)
case int64:
return int64(x)
case uint:
return uint64(x)
case uint8:
return uint64(x)
case uint16:
return uint64(x)
case uint32:
return uint64(x)
case uint64:
return uint64(x)
case float32:
return float64(x)
case float64:
return float64(x)
default:
return x
}
}
func ParseFloat(i interface{}) (float64, error) {
var s string
switch x := Num64(i).(type) {
case int64:
return float64(x), nil
case uint64:
return float64(x), nil
case float64:
switch {
case math.IsNaN(x):
return 0, errors.New("float is NaN")
}
return float64(x), nil
case string:
s = x
case []byte:
s = string(x)
default:
s = fmt.Sprint(x)
}
f, err := strconv.ParseFloat(s, 64)
return f, errors.Trace(err)
}
func ParseUint(i interface{}) (uint64, error) {
var s string
switch x := Num64(i).(type) {
case int64:
if x < 0 {
return 0, errors.New("integer overflow")
}
return uint64(x), nil
case uint64:
return uint64(x), nil
case float64:
switch {
case math.IsNaN(x):
return 0, errors.New("float is NaN")
case math.IsInf(x, 0):
return 0, errors.New("float is Inf")
case math.Abs(x-float64(uint64(x))) > 1e-9:
return 0, errors.New("float to uint64")
}
return uint64(x), nil
case string:
s = x
case []byte:
s = string(x)
default:
s = fmt.Sprint(x)
}
u, err := strconv.ParseUint(s, 10, 64)
return u, errors.Trace(err)
}
func ParseInt(i interface{}) (int64, error) {
var s string
switch x := Num64(i).(type) {
case int64:
return int64(x), nil
case uint64:
if x > math.MaxInt64 {
return 0, errors.New("integer overflow")
}
return int64(x), nil
case float64:
switch {
case math.IsNaN(x):
return 0, errors.New("float is NaN")
case math.IsInf(x, 0):
return 0, errors.New("float is Inf")
case math.Abs(x-float64(int64(x))) > 1e-9:
return 0, errors.New("float to int64")
}
return int64(x), nil
case string:
s = x
case []byte:
s = string(x)
default:
s = fmt.Sprint(x)
}
v, err := strconv.ParseInt(s, 10, 64)
return v, errors.Trace(err)
}
func FormatString(i interface{}) string {
switch y := i.(type) {
case []byte:
return string(y)
case string:
return y
default:
return fmt.Sprintf("%v", y)
}
}
func FormatFloat(v float64) []byte {
return []byte(FormatFloatString(v))
}
func FormatFloatString(v float64) string {
s := strconv.FormatFloat(v, 'f', 17, 64)
// redis use inf and -inf for float bulk string returns
switch s {
case "+Inf":
return "inf"
case "-Inf":
return "-inf"
default:
return s
}
}
func FormatUint(u uint64) []byte {
return []byte(strconv.FormatUint(u, 10))
}
func FormatInt(v int64) []byte {
return []byte(strconv.FormatInt(v, 10))
}
func parseArgument(arg interface{}, ref interface{}) error {
switch x := ref.(type) {
default:
return errors.Errorf("unsupported type, %v", reflect.TypeOf(x))
case *int64:
v, err := ParseInt(arg)
if err != nil {
return errors.Errorf("expect %v, %s", reflect.TypeOf(*x), err.Error())
}
*x = v
case *uint32:
v, err := ParseUint(arg)
if err != nil {
return errors.Errorf("expect %v, %s", reflect.TypeOf(*x), err.Error())
} else if v > math.MaxUint32 {
return errors.Errorf("expect %v, but got %d", reflect.TypeOf(*x), v)
}
*x = uint32(v)
case *uint64:
v, err := ParseUint(arg)
if err != nil {
return errors.Errorf("expect %v, %s", reflect.TypeOf(*x), err.Error())
}
*x = v
case *float64:
v, err := ParseFloat(arg)
if err != nil {
return errors.Errorf("expect %v, %s", reflect.TypeOf(*x), err.Error())
}
*x = v
case *[]byte:
switch y := arg.(type) {
case []byte:
*x = y
case string:
*x = []byte(y)
default:
return errors.Errorf("expect %v, but got %v", reflect.TypeOf(*x), reflect.TypeOf(y))
}
case *string:
switch y := arg.(type) {
case []byte:
*x = string(y)
case string:
*x = y
default:
return errors.Errorf("expect %v, but got %v", reflect.TypeOf(*x), reflect.TypeOf(y))
}
}
return nil
}
func FormatByte(arg interface{}) []byte {
switch x := arg.(type) {
default:
return []byte{}
case int64:
return FormatInt(int64(x))
case int32:
return FormatInt(int64(x))
case int:
return FormatInt(int64(x))
case uint64:
return FormatUint(uint64(x))
case uint32:
return FormatUint(uint64(x))
case uint:
return FormatUint(uint64(x))
case float64:
return FormatFloat(float64(x))
case float32:
return FormatFloat(float64(x))
case []byte:
return x
case string:
return []byte(x)
}
}
func FormatBytes(args ...interface{}) [][]byte {
values := make([][]byte, len(args))
for i, arg := range args {
values[i] = FormatByte(arg)
}
return values
} | Godeps/_workspace/src/github.com/reborndb/qdb/pkg/store/format.go | 0.554712 | 0.433502 | format.go | starcoder |
package ffnet
import (
"context"
"errors"
"math/rand"
"time"
"github.com/spy16/snowman/pkg/mat"
)
// SGDTrainer implements Stochastic Gradient Descent trainer for FFNet
// using BackPropagation algorithm.
type SGDTrainer struct {
// Embed the network that needs to be trained.
*FFNet
// Eta is the learning rate to be used for training.
Eta float64
// Loss is the loss function to use for computing loss during training.
Loss LossFunc
// log function to be used for logging progress. Nil means no logs.
LogFunc func(msg string, args ...interface{})
}
// Example represents a single training sample.
type Example struct {
Inputs []float64
Outputs []float64
}
// Train runs training iterations for given number of epochs. Training loop
// can be stopped by cancelling the context.
func (t SGDTrainer) Train(ctx context.Context, epochs int, samples []Example) error {
if err := t.init(); err != nil {
return err
}
trainingStart := time.Now()
for i := 0; i < epochs; i++ {
startedAt := time.Now()
shuffle(samples)
for _, sample := range samples {
x := mat.From(t.inputSz, 1, sample.Inputs...)
y := mat.From(t.outputSz, 1, sample.Outputs...)
zs, as, err := t.forwardPass(sample.Inputs)
if err != nil {
return err
}
yHat := zs[len(zs)-1]
costGrad := t.Loss.FPrime(y, yHat)
deltaB, deltaW := t.backPropagate(zs, as, x, costGrad)
for i := 0; i < len(t.layers); i++ {
t.layers[i].weights = mat.Sub(t.layers[i].weights, deltaW[i].Scale(t.Eta))
t.layers[i].biases = mat.Sub(t.layers[i].biases, deltaB[i].Scale(t.Eta))
}
}
t.LogFunc("epoch %d finished in %s", i, time.Since(startedAt))
}
t.LogFunc("training run of %d epochs finished in %s", epochs, time.Since(trainingStart))
return nil
}
func (t *SGDTrainer) init() error {
if t.FFNet == nil {
return errors.New("field Net is not set, nothing to train")
}
if t.Loss.F == nil {
t.Loss = SquaredError()
}
if t.Eta == 0 {
t.Eta = 0.5
}
if t.LogFunc == nil {
t.LogFunc = func(_ string, _ ...interface{}) {}
}
return nil
}
func shuffle(samples []Example) {
for i := range samples {
j := rand.Intn(i + 1)
samples[i], samples[j] = samples[j], samples[i]
}
} | pkg/ffnet/trainer.go | 0.795857 | 0.403156 | trainer.go | starcoder |
package main
import (
"strconv"
"time"
"periph.io/x/periph/conn/gpio"
"periph.io/x/periph/conn/gpio/gpioreg"
"periph.io/x/periph/host"
)
// gpioProvider generates pins for the platform (used for testing)
var gpioProvider = xGpioProvider // For testing on non-test setups
// GpioState represents the current binary value of the pin. Is it High or Low Voltage
type GpioState bool
const (
// Low voltage registered on the pin (~0-1v)
Low GpioState = false
// High voltage registered on the pin (~1-3.3v)
High GpioState = true
)
// State returns whether the pin is in a High or Low voltage state
func (s GpioState) State() gpio.Level {
if s == Low {
return gpio.Low
}
return gpio.High
}
func (s GpioState) String() string {
return s.State().String()
}
// Edge refers to the rising or falling of a voltage value on the pin.
type Edge int
const (
// NoEdge means no change
NoEdge Edge = 0
// RisingEdge means that the voltage is moving from a low to a high voltage state.
RisingEdge Edge = 1
// FallingEdge means that the voltage is moving from a high to a low voltage state.
FallingEdge Edge = 2
// BothEdges means taht a change is occuring in either direction.
BothEdges Edge = 3
)
// Edge returns the current edge value.
func (e Edge) Edge() gpio.Edge {
switch e {
case NoEdge:
return gpio.NoEdge
case RisingEdge:
return gpio.RisingEdge
case FallingEdge:
return gpio.FallingEdge
case BothEdges:
return gpio.BothEdges
}
return gpio.NoEdge
}
func (e Edge) String() string {
return e.Edge().String()
}
// Pull refers to the configuration of the pin circuitry.
type Pull int
const (
// Float lets the input flow directly, resistance is handled elswhere.
Float Pull = 0
// PullDown applies pull-down resistance to the pin
PullDown Pull = 1
// PullUp applies pull-up resistance to the pin
PullUp Pull = 2
// PullNoChange does not change the previous pull resistor setting
PullNoChange Pull = 3
)
// Pull returns the current state of the pin's pull configuration
func (p Pull) Pull() gpio.Pull {
switch p {
case Float:
return gpio.Float
case PullDown:
return gpio.PullDown
case PullUp:
return gpio.PullUp
case PullNoChange:
return gpio.PullNoChange
}
return gpio.PullNoChange
}
func (p Pull) String() string { return p.Pull().String() }
// PiPin represnets a GPIO pin on the Raspberry Pi
type PiPin interface {
Input()
InputEdge(Pull, Edge)
Output(GpioState)
Read() GpioState
WaitForEdge(time.Duration) bool
Pin() uint8
}
// Gpio implements a PiPin interface for a Raspberry Pi system.
type Gpio struct {
gpio uint8
pin gpio.PinIO
}
// SetGpioProvider allows you to change the type of GPIO for the system (useful for testing)
func SetGpioProvider(p func(uint8) PiPin) {
gpioProvider = p
}
func xGpioProvider(gpio uint8) PiPin {
g := Gpio{
gpio: gpio,
pin: gpioreg.ByName(strconv.Itoa(int(gpio))),
}
gpioreg.Register(g.pin)
return (PiPin)(&g)
}
// NewGpio creates a new PiPin for a given gpio value.
func NewGpio(gpio uint8) PiPin {
return gpioProvider(gpio)
}
// GpioInit initializes the system
func GpioInit() error {
_, err := host.Init()
return err
}
// Input sets the pin to be read from.
func (g *Gpio) Input() {
Debug("Setting gpio(%d) to Input(%s, %s)", g.gpio, Float, NoEdge)
g.pin.In(gpio.Float, gpio.NoEdge)
}
// InputEdge sets the pin to be read from and to alert WaitForEdge when the given Edge is found.
func (g *Gpio) InputEdge(p Pull, e Edge) {
Debug("Setting gpio(%d) to Input(%s, %s)", g.gpio, p, e)
g.pin.In(p.Pull(), e.Edge())
}
// Output sets the pin to be written to.
func (g *Gpio) Output(s GpioState) {
Debug("Output setting gpio(%d) to %s", g.gpio, s)
g.pin.Out(s.State())
}
// Read returns the current state of the pin
func (g *Gpio) Read() GpioState {
if g.pin.Read() == gpio.High {
return High
}
return Low
}
// WaitForEdge blocks while waiting for a voltage change on the pin.
func (g *Gpio) WaitForEdge(timeout time.Duration) bool {
return g.pin.WaitForEdge(timeout)
}
// Pin returns the GPIO number of the pin.
func (g *Gpio) Pin() uint8 {
return g.gpio
}
// Direction refers to the usage of the pin. Is it being used for input or output?
type Direction bool
const (
// Input means that the value of the pin will be read and is controlled externally.
Input Direction = false
// Output means that the value of the pin will be written to and is controlled internally.
Output Direction = true
) | gpio.go | 0.759939 | 0.462473 | gpio.go | starcoder |
package syntax
// compact specifies whether we allow spaces between expressions.
// This is true for let
func (p *Parser) arithmExpr(compact bool) ArithmExpr {
return p.arithmExprComma(compact)
}
// These function names are inspired by Bash's expr.c
func (p *Parser) arithmExprComma(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprAssign, Comma)
}
func (p *Parser) arithmExprAssign(compact bool) ArithmExpr {
// Assign is different from the other binary operators because it's
// right-associative and needs to check that it's placed after a name
value := p.arithmExprTernary(compact)
switch BinAritOperator(p.tok) {
case AddAssgn, SubAssgn, MulAssgn, QuoAssgn, RemAssgn, AndAssgn,
OrAssgn, XorAssgn, ShlAssgn, ShrAssgn, Assgn:
if compact && p.spaced {
return value
}
if !isArithName(value) {
p.posErr(p.pos, "%s must follow a name", p.tok.String())
}
pos := p.pos
tok := p.tok
p.nextArithOp(compact)
y := p.arithmExprAssign(compact)
if y == nil {
p.followErrExp(pos, tok.String())
}
return &BinaryArithm{
OpPos: pos,
Op: BinAritOperator(tok),
X: value,
Y: y,
}
}
return value
}
func (p *Parser) arithmExprTernary(compact bool) ArithmExpr {
value := p.arithmExprLor(compact)
if BinAritOperator(p.tok) != TernQuest || (compact && p.spaced) {
return value
}
if value == nil {
p.curErr("%s must follow an expression", p.tok.String())
}
questPos := p.pos
p.nextArithOp(compact)
if BinAritOperator(p.tok) == TernColon {
p.followErrExp(questPos, TernQuest.String())
}
trueExpr := p.arithmExpr(compact)
if trueExpr == nil {
p.followErrExp(questPos, TernQuest.String())
}
if BinAritOperator(p.tok) != TernColon {
p.posErr(questPos, "ternary operator missing : after ?")
}
colonPos := p.pos
p.nextArithOp(compact)
falseExpr := p.arithmExprTernary(compact)
if falseExpr == nil {
p.followErrExp(colonPos, TernColon.String())
}
return &BinaryArithm{
OpPos: questPos,
Op: TernQuest,
X: value,
Y: &BinaryArithm{
OpPos: colonPos,
Op: TernColon,
X: trueExpr,
Y: falseExpr,
},
}
}
func (p *Parser) arithmExprLor(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprLand, OrArit)
}
func (p *Parser) arithmExprLand(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprBor, AndArit)
}
func (p *Parser) arithmExprBor(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprBxor, Or)
}
func (p *Parser) arithmExprBxor(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprBand, Xor)
}
func (p *Parser) arithmExprBand(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprEquality, And)
}
func (p *Parser) arithmExprEquality(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprComparison, Eql, Neq)
}
func (p *Parser) arithmExprComparison(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprShift, Lss, Gtr, Leq, Geq)
}
func (p *Parser) arithmExprShift(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprAddition, Shl, Shr)
}
func (p *Parser) arithmExprAddition(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprMultiplication, Add, Sub)
}
func (p *Parser) arithmExprMultiplication(compact bool) ArithmExpr {
return p.arithmExprBinary(compact, p.arithmExprPower, Mul, Quo, Rem)
}
func (p *Parser) arithmExprPower(compact bool) ArithmExpr {
// Power is different from the other binary operators because it's right-associative
value := p.arithmExprUnary(compact)
if BinAritOperator(p.tok) != Pow || (compact && p.spaced) {
return value
}
if value == nil {
p.curErr("%s must follow an expression", p.tok.String())
}
op := p.tok
pos := p.pos
p.nextArithOp(compact)
y := p.arithmExprPower(compact)
if y == nil {
p.followErrExp(pos, op.String())
}
return &BinaryArithm{
OpPos: pos,
Op: BinAritOperator(op),
X: value,
Y: y,
}
}
func (p *Parser) arithmExprUnary(compact bool) ArithmExpr {
if !compact {
p.got(_Newl)
}
switch UnAritOperator(p.tok) {
case Not, BitNegation, Plus, Minus:
ue := &UnaryArithm{OpPos: p.pos, Op: UnAritOperator(p.tok)}
p.nextArithOp(compact)
if ue.X = p.arithmExprUnary(compact); ue.X == nil {
p.followErrExp(ue.OpPos, ue.Op.String())
}
return ue
}
return p.arithmExprValue(compact)
}
func (p *Parser) arithmExprValue(compact bool) ArithmExpr {
var x ArithmExpr
switch p.tok {
case addAdd, subSub:
ue := &UnaryArithm{OpPos: p.pos, Op: UnAritOperator(p.tok)}
p.nextArith(compact)
if p.tok != _LitWord {
p.followErr(ue.OpPos, token(ue.Op).String(), "a literal")
}
ue.X = p.arithmExprValue(compact)
return ue
case leftParen:
pe := &ParenArithm{Lparen: p.pos}
p.nextArithOp(compact)
pe.X = p.followArithm(leftParen, pe.Lparen)
pe.Rparen = p.matched(pe.Lparen, leftParen, rightParen)
x = pe
case leftBrack:
p.curErr("[ must follow a name")
case colon:
p.curErr("ternary operator missing ? before :")
case _LitWord:
l := p.getLit()
if p.tok != leftBrack {
x = p.word(p.wps(l))
break
}
pe := &ParamExp{Dollar: l.ValuePos, Short: true, Param: l}
pe.Index = p.eitherIndex()
x = p.word(p.wps(pe))
case bckQuote:
if p.quote == arithmExprLet && p.openBquotes > 0 {
return nil
}
fallthrough
default:
if w := p.getWord(); w != nil {
x = w
} else {
return nil
}
}
if compact && p.spaced {
return x
}
if !compact {
p.got(_Newl)
}
// we want real nil, not (*Word)(nil) as that
// sets the type to non-nil and then x != nil
if p.tok == addAdd || p.tok == subSub {
if !isArithName(x) {
p.curErr("%s must follow a name", p.tok.String())
}
u := &UnaryArithm{
Post: true,
OpPos: p.pos,
Op: UnAritOperator(p.tok),
X: x,
}
p.nextArith(compact)
return u
}
return x
}
// nextArith consumes a token.
// It returns true if compact and the token was followed by spaces
func (p *Parser) nextArith(compact bool) bool {
p.next()
if compact && p.spaced {
return true
}
if !compact {
p.got(_Newl)
}
return false
}
func (p *Parser) nextArithOp(compact bool) {
pos := p.pos
tok := p.tok
if p.nextArith(compact) {
p.followErrExp(pos, tok.String())
}
}
// arithmExprBinary is used for all left-associative binary operators
func (p *Parser) arithmExprBinary(compact bool, nextOp func(bool) ArithmExpr, operators ...BinAritOperator) ArithmExpr {
value := nextOp(compact)
for {
var foundOp BinAritOperator
for _, op := range operators {
if p.tok == token(op) {
foundOp = op
break
}
}
if token(foundOp) == illegalTok || (compact && p.spaced) {
return value
}
if value == nil {
p.curErr("%s must follow an expression", p.tok.String())
}
pos := p.pos
p.nextArithOp(compact)
y := nextOp(compact)
if y == nil {
p.followErrExp(pos, foundOp.String())
}
value = &BinaryArithm{
OpPos: pos,
Op: foundOp,
X: value,
Y: y,
}
}
}
func isArithName(left ArithmExpr) bool {
w, ok := left.(*Word)
if !ok || len(w.Parts) != 1 {
return false
}
switch x := w.Parts[0].(type) {
case *Lit:
return ValidName(x.Value)
case *ParamExp:
return x.nakedIndex()
default:
return false
}
}
func (p *Parser) followArithm(ftok token, fpos Pos) ArithmExpr {
x := p.arithmExpr(false)
if x == nil {
p.followErrExp(fpos, ftok.String())
}
return x
}
func (p *Parser) peekArithmEnd() bool {
return p.tok == rightParen && p.r == ')'
}
func (p *Parser) arithmMatchingErr(pos Pos, left, right token) {
switch p.tok {
case _Lit, _LitWord:
p.curErr("not a valid arithmetic operator: %s", p.val)
case leftBrack:
p.curErr("[ must follow a name")
case colon:
p.curErr("ternary operator missing ? before :")
case rightParen, _EOF:
p.matchingErr(pos, left, right)
default:
if p.quote == arithmExpr {
p.curErr("not a valid arithmetic operator: %v", p.tok)
}
p.matchingErr(pos, left, right)
}
}
func (p *Parser) matchedArithm(lpos Pos, left, right token) {
if !p.got(right) {
p.arithmMatchingErr(lpos, left, right)
}
}
func (p *Parser) arithmEnd(ltok token, lpos Pos, old saveState) Pos {
if !p.peekArithmEnd() {
p.arithmMatchingErr(lpos, ltok, dblRightParen)
}
p.rune()
p.postNested(old)
pos := p.pos
p.next()
return pos
} | syntax/parser_arithm.go | 0.657648 | 0.618896 | parser_arithm.go | starcoder |
package main
import ("fmt";"math/rand"; "math"; "time"; "flag")
func flip() int {
return rand.Intn(2)
}
/*
Given an integer n, returns the least number of flips needed to generate a
bitstring that covers at least until n. E.g., for n = 7, we have to flip 3
times for n = 30, we have to flip 5 times
*/
func numflips(set_size int) int {
log := math.Log2(float64(set_size))
return int(math.Ceil(log))
}
/*
Define k to be the number of flips needed to cover n and m = 2^k. The chance
that any 1 of the n items getting chosen on first try is 1/m, on second try is
(m-n)/m*1/m, on third try is (m-n)/m*(m-n)/m*1/m. So we will just sum this up
Or we can probably use the geometric progression formula
*/
func calculateUniform(set_size int, num_terms int) float64 {
totalItems := int(math.Pow(2,float64(numflips(set_size))))
multiplier := float64(totalItems - set_size)/float64(totalItems)
sum := 1.0/float64(totalItems)
for i, term := 0, sum; i < num_terms; i++ {
term *= multiplier
sum += term
}
fmt.Printf("Probability of choosing 1 item out of %d: %f\n", set_size, sum)
return sum
}
/*
Alternatively we can just simulate the flipping for a large number of trials
and check that each item do get selected more or less with equal frequency in
the end
*/
func trialUniform(set_size int, num_trials int) []int {
num_flips := numflips(set_size)
hits := make([]int, set_size)
for i := 0; i < num_trials; i++ {
currNum := 0
currMultiplier := 1
for k :=0; k < num_flips; k++{
currNum += flip() * currMultiplier
currMultiplier *= 2
}
if currNum >= set_size {
//discard this trial
i--
continue
}
hits[currNum]++
}
for i := 0; i < set_size; i++ {
fmt.Printf("Number of hits for %d : %d\n", i, hits[i]);
}
return hits
}
func getOpts() (int, int, int) {
setsize := flag.Int("n", 3, "number of items in a set")
numtrials := flag.Int("t", 1000, "number of trials to simulate")
numterms := flag.Int("m", 100, "number of terms to sum")
flag.Parse()
return *setsize, *numtrials, *numterms
}
func main() {
rand.Seed(time.Now().UTC().UnixNano())
setsize, numtrials, numterms := getOpts()
calculateUniform(setsize, numterms)
trialUniform(setsize, numtrials)
} | flip.go | 0.696371 | 0.416203 | flip.go | starcoder |
package collections
import (
"fmt"
"time"
)
// An Ord represents a result of comparing two items.
type Ord int
const (
// Less represents the idea of x < y.
Less Ord = 1 << iota
// Equal represents the idea of x == y.
Equal
// Greater represents the idea of x > y.
Greater
)
// checkOrd returns an error if the ord value is not Less, Equal, or Greater.
func checkOrd(prefix string, o Ord) {
if v := o &^ (Less | Equal | Greater); v != 0 {
panic(fmt.Sprintf("%s: bad compare value %d", prefix, o))
}
}
// String converts an Ord to a string for pretty printing.
func (o Ord) String() string {
switch o {
case Less:
return "Less"
case Equal:
return "Equal"
case Greater:
return "Greater"
default:
return "?"
}
}
// Bounded represents any collection with a known length, such as a list.
type Bounded interface {
Len() int
}
// Comparable represents any collection where two elements can be accessed and
// compared.
type Comparable interface {
Compare(i, j int) Ord
}
// Swappable represents any collction where two elements can be swapped.
type Swappable interface {
Swap(i, j int)
}
// IntSlice is an alias for a slice of ints.
type IntSlice []int
// Len calculates the length of the underlying int slice.
func (s IntSlice) Len() int { return len(s) }
// Compare compares two ints in a slice, returning their ordering.
func (s IntSlice) Compare(i, j int) Ord {
x, y := s[i], s[j]
return CompareInts(x, y)
}
// Swap swaps two ints in a slice.
func (s IntSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// FloatSlice is an alias for a slice of floats.
type FloatSlice []float64
// Len calculates the length of the underlying float slice.
func (s FloatSlice) Len() int { return len(s) }
// Compare compares two floats in a slice, returning their ordering.
func (s FloatSlice) Compare(i, j int) Ord {
x, y := s[i], s[j]
return CompareFloats(x, y)
}
// Swap swaps two floats in a slice.
func (s FloatSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// StringSlice is an alias for a slice of strings.
type StringSlice []string
// Len calculates the length of the underlying string slice.
func (s StringSlice) Len() int { return len(s) }
// Compare compares two strings in a slice, returning their ordering.
func (s StringSlice) Compare(i, j int) Ord {
x, y := s[i], s[j]
return CompareStrings(x, y)
}
// Swap swaps two strings in a slice.
func (s StringSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// TimeSlice is an alias for a slice of times.
type TimeSlice []time.Time
// Len calculates the length of the underlying time slice.
func (s TimeSlice) Len() int { return len(s) }
// Compare compares two times in a slice, returning their ordering.
func (s TimeSlice) Compare(i, j int) Ord {
x, y := s[i], s[j]
return CompareTimes(x, y)
}
// Swap swaps two times in a slice.
func (s TimeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | base.go | 0.862728 | 0.608391 | base.go | starcoder |
package chapter09
import "constraints"
type Number interface {
constraints.Integer | constraints.Float | constraints.Signed
}
// GenericItem defines a take-able item which has three properties. A weight, a value and a name for displaying.
type GenericItem[T Number] interface {
Weight() T
Name() string
Value() T
}
type KnapItem struct {
_Name string
_Value int
_Weight int
}
func (k KnapItem) Weight() int {
return k._Weight
}
func (k KnapItem) Name() string {
return k._Name
}
func (k KnapItem) Value() int {
return k._Value
}
type Item struct {
Name string
Value int
Weight int
}
// Knapsack is a popular problem about how to steal the most expensive items given a weight limit.
// I was really expecting the book to provider a proper code sample for this as I was looking forward
// to reading and understanding the nuances that a dynamic programming solution require. But, there were none.
// So I put this together while reading the chapter. One thing that wasn't intuitive is that I should
// start with an empty row full of zeros to make choosing the first row a lot easier.
func Knapsack(items []Item, limit int) int {
// why +1? Because we add a row with 0-s that can akt as a minimum value for
// the first items.
cell := make([][]int, len(items)+1)
for i := 0; i < len(items)+1; i++ {
cell[i] = make([]int, limit+1)
}
// the first row is left empty with 0s, so it can be the `max` in case of the first item.
// Take care to start from 1 for row and col as well.
for i := 1; i <= len(items); i++ {
// We are comparing it to the weight, so we begin from 1. But this could go backwards
// as well, or just -1 it. In any case, we would have to come up with a nicer way
// to determine the step of this loop any ways.
for j := 1; j <= limit; j++ {
item := items[i-1]
if item.Weight > j {
cell[i][j] = cell[i-1][j]
} else {
cell[i][j] = max(cell[i-1][j], cell[i-1][j-item.Weight]+item.Value)
}
}
}
return cell[len(items)][limit]
}
// KnapsackGeneric since you can't use limit as above because j-item.Weight needs to be an integer,
// we have to already have the row markers from somewhere.
// Once we have them, we can then run the same thing but instead of using it as an index,
// we always do an `IndexOf` on the item that we get.
func KnapsackGeneric[I GenericItem[int], T Number](items []I, limits []T) T {
// why +1? Because we add a row with 0-s that can akt as a minimum value for
// the first items.
cell := make([][]T, len(items)+1)
for i := 0; i < len(items)+1; i++ {
cell[i] = make([]T, len(limits)+1)
}
for i := 1; i <= len(items); i++ {
for j := 1; j <= len(limits); j++ {
item := items[i-1]
limit := limits[j-1]
if T(item.Weight()) > limit {
cell[i][j] = cell[i-1][j]
} else {
next := IndexOf(limit-T(item.Weight()-1), limits)
cell[i][j] = max(cell[i-1][j], cell[i-1][next]+T(item.Value()))
}
}
}
return cell[len(items)][len(limits)]
}
func IndexOf[T Number](num T, nums []T) int {
for i, n := range nums {
if n == num {
return i
}
}
return -1
}
// LongestCommonSubstring finds the longest substring that is common in each of the two strings.
func LongestCommonSubstring(a, b string) string {
// We start from +1 again, so we have the first row empty.
cell := make([][]int, len(a)+1)
for i := range cell {
cell[i] = make([]int, len(b)+1)
}
max := 0
maxIndex := 0
for i := 1; i <= len(a); i++ {
for j := 1; j <= len(b); j++ {
// -1 because we started from 1
if a[i-1] == b[j-1] {
cell[i][j] = cell[i-1][j-1] + 1
// save the last maximum point, or last sub index
// because the greatest sub index might not be the last row:col.
if cell[i][j] > max {
maxIndex = i
max = cell[i][j]
}
} else {
cell[i][j] = 0
}
}
}
// From the last index - the maximum value to the last index.
// Again, this was not intuitive to figure out.
return a[maxIndex-max : maxIndex]
}
// LongestCommonSubsequence finds the longest subsequence length between two strings. They don't have to be
// continuous.
func LongestCommonSubsequence(a, b string) int {
// We start from +1 again, so we have the first row empty.
cell := make([][]int, len(a)+1)
for i := range cell {
cell[i] = make([]int, len(b)+1)
}
// almost the same as Knapsack
for i := 1; i <= len(a); i++ {
for j := 1; j <= len(b); j++ {
// -1 because we started from 1
if a[i-1] == b[j-1] {
cell[i][j] = cell[i-1][j-1] + 1
} else {
cell[i][j] = max(cell[i-1][j], cell[i][j-1])
}
}
}
return cell[len(a)][len(b)]
}
func max[T Number](a, b T) T {
if a > b {
return a
}
return b
} | chapter09/dynamic_programming.go | 0.777046 | 0.492188 | dynamic_programming.go | starcoder |
package graphics
import (
"fmt"
"github.com/stnma7e/betuol/common"
"github.com/stnma7e/betuol/component"
)
// TextGraphicsHandler implements the GraphicsHandler interface, but instead of rendering to a graphics context, TextGraphicsHandler instead outputs textual descriptions of each model.
type TextGraphicsHandler struct {
compList []string
lastIdList common.Vector
}
// MakeTextGraphicsHandler returns a pointer to a TextGraphicsHandler.
func MakeTextGraphicsHandler() *TextGraphicsHandler {
tgh := &TextGraphicsHandler{
make([]string, 0),
*common.MakeVector(),
}
return tgh
}
// Tick returns the status of the text window.
func (tgh *TextGraphicsHandler) Tick() bool {
return true
}
// RenderDiff outputs text based on the GOiD's in the list passed as an argument.
// The function will only output a new text description if the model has newly come into the scene.
func (tgh *TextGraphicsHandler) RenderDiff(ids *common.Vector, sm component.SceneManager) {
diff := tgh.lastIdList.Difference(ids)
//common.LogInfo.Println(ids, tgh.lastIdList, diff)
tgh.lastIdList = *ids
tgh.Render(diff, sm)
}
// Render implements the Renderer interface and outputs text based on the GOiD's in the list passed as an argument.
func (tgh *TextGraphicsHandler) Render(ids *common.Vector, sm component.SceneManager) {
comps := ids.Array()
for i := range comps {
id := comps[i].(component.GOiD)
locStr := "no location"
loc, err := sm.GetObjectLocation(id)
if err == nil {
locStr = fmt.Sprint(loc)
}
fmt.Printf("%d %s, \"%s\"\n", id, locStr, tgh.compList[id])
}
}
// LoadModel implements the GraphicsHandler interface and adds data used to render the components later.
func (tgh *TextGraphicsHandler) LoadModel(id component.GOiD, gc GraphicsComponent) error {
tgh.resizeArrays(id)
tgh.compList[id] = gc.TextDescription
return nil
}
// DeleteModel implements the GraphicsHandler interface and deletes the data used for rendering.
func (tgh *TextGraphicsHandler) DeleteModel(id component.GOiD) {
tgh.compList[id] = "dead."
}
func (tgh *TextGraphicsHandler) resizeArrays(id component.GOiD) {
const RESIZESTEP = 1
if cap(tgh.compList)-1 < int(id) {
newCompList := make([]string, id+RESIZESTEP)
for i := range tgh.compList {
newCompList[i] = tgh.compList[i]
}
tgh.compList = newCompList
}
}
// HandleInputs implements the GraphicsHandler interface and returns the inputs recieved since the last query.
func (tgh *TextGraphicsHandler) HandleInputs() Inputs {
return Inputs{}
}
// DrawString implements the GraphicsHandler interface and outputs a the string passed in as an arguement.
// The x, y coordinates are ignored.
func (tgh *TextGraphicsHandler) DrawString(x, y float32, text string) {
}
// GetSize implements the GraphicsHandler interface, but returns 0, 0 always because the text window has no size.
func (tgh *TextGraphicsHandler) GetSize() (int, int) {
return 0, 0
} | graphics/text.go | 0.796886 | 0.414069 | text.go | starcoder |
package value
import (
"math/big"
)
func asinh(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) {
return complexAsinh(c, u)
}
v = u.real
}
return evalFloatFunc(c, v, floatAsinh)
}
func acosh(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) {
return complexAcosh(c, u)
}
v = u.real
}
if compare(v, 1) < 0 {
return complexAcosh(c, newComplex(v, zero))
}
return evalFloatFunc(c, v, floatAcosh)
}
func atanh(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) {
return complexAtanh(c, u)
}
v = u.real
}
if compare(v, -1) <= 0 || 0 <= compare(v, 1) {
return complexAtanh(c, newComplex(v, zero))
}
return evalFloatFunc(c, v, floatAtanh)
}
// floatAsinh computes asinh(x) using the formula asinh(x) = log(x + sqrt(x²+1)).
// The domain is the real line.
func floatAsinh(c Context, x *big.Float) *big.Float {
z := newFloat(c).Set(x)
z.Mul(z, x)
z.Add(z, floatOne)
z = floatSqrt(c, z)
z.Add(z, x)
return floatLog(c, z)
}
// floatAcosh computes acosh(x) using the formula asinh(x) = log(x + sqrt(x²-1)).
// The domain is the real line >= 1.
func floatAcosh(c Context, x *big.Float) *big.Float {
if x.Cmp(floatOne) < 0 {
Errorf("real acosh out of range [1, +∞ )")
}
z := newFloat(c).Set(x)
z.Mul(z, x)
z.Sub(z, floatOne)
z = floatSqrt(c, z)
z.Add(z, x)
return floatLog(c, z)
}
// floatAtanh computes atanh(x) using the formula asinh(x) = ½log((1+x)/(1-x))
// The domain is the open interval (-1, 1).
func floatAtanh(c Context, x *big.Float) *big.Float {
if x.Cmp(floatMinusOne) <= 0 || 0 <= x.Cmp(floatOne) {
Errorf("real atanh out of range (-1, 1)")
}
num := newFloat(c).Add(floatOne, x)
den := newFloat(c).Sub(floatOne, x)
z := floatLog(c, newFloat(c).Quo(num, den))
return z.Quo(z, floatTwo)
}
// complexAsinh computes asinh(x) using the formula asinh(x) = log(x + sqrt(x²+1)).
func complexAsinh(c Context, x Complex) Complex {
z := x.mul(c, x)
z = z.add(c, newComplex(one, zero))
z = complexSqrt(c, z)
z = z.add(c, x)
return complexLog(c, z)
}
// complexAcosh computes asinh(x) using the formula asinh(x) = log(x + sqrt(x²-1)).
func complexAcosh(c Context, x Complex) Complex {
z := x.mul(c, x)
z = z.sub(c, newComplex(one, zero))
z = complexSqrt(c, z)
z = z.add(c, x)
return complexLog(c, z)
}
// complexAtanh computes asinh(x) using the formula asinh(x) = ½log((1+x)/(1-x))
func complexAtanh(c Context, x Complex) Complex {
num := complexOne.add(c, x)
den := complexOne.sub(c, x)
if isZero(num) || isZero(den) {
Errorf("atanh is infinite")
}
z := num.div(c, den)
return complexLog(c, z).mul(c, complexHalf)
} | value/asinh.go | 0.765155 | 0.448487 | asinh.go | starcoder |
package wtf
import (
"github.com/gdamore/tcell"
)
var Colors = map[string]tcell.Color{
"aliceblue": tcell.ColorAliceBlue,
"antiquewhite": tcell.ColorAntiqueWhite,
"aqua": tcell.ColorAqua,
"aquamarine": tcell.ColorAquaMarine,
"azure": tcell.ColorAzure,
"beige": tcell.ColorBeige,
"bisque": tcell.ColorBisque,
"black": tcell.ColorBlack,
"blanchedalmond": tcell.ColorBlanchedAlmond,
"blue": tcell.ColorBlue,
"blueviolet": tcell.ColorBlueViolet,
"brown": tcell.ColorBrown,
"burlywood": tcell.ColorBurlyWood,
"cadetblue": tcell.ColorCadetBlue,
"chartreuse": tcell.ColorChartreuse,
"chocolate": tcell.ColorChocolate,
"coral": tcell.ColorCoral,
"cornflowerblue": tcell.ColorCornflowerBlue,
"cornsilk": tcell.ColorCornsilk,
"crimson": tcell.ColorCrimson,
"darkblue": tcell.ColorDarkBlue,
"darkcyan": tcell.ColorDarkCyan,
"darkgoldenrod": tcell.ColorDarkGoldenrod,
"darkgray": tcell.ColorDarkGray,
"darkgreen": tcell.ColorDarkGreen,
"darkkhaki": tcell.ColorDarkKhaki,
"darkmagenta": tcell.ColorDarkMagenta,
"darkolivegreen": tcell.ColorDarkOliveGreen,
"darkorange": tcell.ColorDarkOrange,
"darkorchid": tcell.ColorDarkOrchid,
"darkred": tcell.ColorDarkRed,
"darksalmon": tcell.ColorDarkSalmon,
"darkseagreen": tcell.ColorDarkSeaGreen,
"darkslateblue": tcell.ColorDarkSlateBlue,
"darkslategray": tcell.ColorDarkSlateGray,
"darkturquoise": tcell.ColorDarkTurquoise,
"darkviolet": tcell.ColorDarkViolet,
"deeppink": tcell.ColorDeepPink,
"deepskyblue": tcell.ColorDeepSkyBlue,
"dimgray": tcell.ColorDimGray,
"dodgerblue": tcell.ColorDodgerBlue,
"firebrick": tcell.ColorFireBrick,
"floralwhite": tcell.ColorFloralWhite,
"forestgreen": tcell.ColorForestGreen,
"fuchsia": tcell.ColorFuchsia,
"gainsboro": tcell.ColorGainsboro,
"ghostwhite": tcell.ColorGhostWhite,
"gold": tcell.ColorGold,
"goldenrod": tcell.ColorGoldenrod,
"gray": tcell.ColorGray,
"green": tcell.ColorGreen,
"greenyellow": tcell.ColorGreenYellow,
"grey": tcell.ColorGray,
"honeydew": tcell.ColorHoneydew,
"hotpink": tcell.ColorHotPink,
"indianred": tcell.ColorIndianRed,
"indigo": tcell.ColorIndigo,
"ivory": tcell.ColorIvory,
"khaki": tcell.ColorKhaki,
"lavender": tcell.ColorLavender,
"lavenderblush": tcell.ColorLavenderBlush,
"lawngreen": tcell.ColorLawnGreen,
"lemonchiffon": tcell.ColorLemonChiffon,
"lightblue": tcell.ColorLightBlue,
"lightcoral": tcell.ColorLightCoral,
"lightcyan": tcell.ColorLightCyan,
"lightgoldenrodyellow": tcell.ColorLightGoldenrodYellow,
"lightgray": tcell.ColorLightGray,
"lightgreen": tcell.ColorLightGreen,
"lightpink": tcell.ColorLightPink,
"lightsalmon": tcell.ColorLightSalmon,
"lightseagreen": tcell.ColorLightSeaGreen,
"lightskyblue": tcell.ColorLightSkyBlue,
"lightslategray": tcell.ColorLightSlateGray,
"lightsteelblue": tcell.ColorLightSteelBlue,
"lightyellow": tcell.ColorLightYellow,
"lime": tcell.ColorLime,
"limegreen": tcell.ColorLimeGreen,
"linen": tcell.ColorLinen,
"maroon": tcell.ColorMaroon,
"mediumaquamarine": tcell.ColorMediumAquamarine,
"mediumblue": tcell.ColorMediumBlue,
"mediumorchid": tcell.ColorMediumOrchid,
"mediumpurple": tcell.ColorMediumPurple,
"mediumseagreen": tcell.ColorMediumSeaGreen,
"mediumslateblue": tcell.ColorMediumSlateBlue,
"mediumspringgreen": tcell.ColorMediumSpringGreen,
"mediumturquoise": tcell.ColorMediumTurquoise,
"mediumvioletred": tcell.ColorMediumVioletRed,
"midnightblue": tcell.ColorMidnightBlue,
"mintcream": tcell.ColorMintCream,
"mistyrose": tcell.ColorMistyRose,
"moccasin": tcell.ColorMoccasin,
"navajowhite": tcell.ColorNavajoWhite,
"navy": tcell.ColorNavy,
"oldlace": tcell.ColorOldLace,
"olive": tcell.ColorOlive,
"olivedrab": tcell.ColorOliveDrab,
"orange": tcell.ColorOrange,
"orangered": tcell.ColorOrangeRed,
"orchid": tcell.ColorOrchid,
"palegoldenrod": tcell.ColorPaleGoldenrod,
"palegreen": tcell.ColorPaleGreen,
"paleturquoise": tcell.ColorPaleTurquoise,
"palevioletred": tcell.ColorPaleVioletRed,
"papayawhip": tcell.ColorPapayaWhip,
"peachpuff": tcell.ColorPeachPuff,
"peru": tcell.ColorPeru,
"pink": tcell.ColorPink,
"plum": tcell.ColorPlum,
"powderblue": tcell.ColorPowderBlue,
"purple": tcell.ColorPurple,
"rebeccapurple": tcell.ColorRebeccaPurple,
"red": tcell.ColorRed,
"rosybrown": tcell.ColorRosyBrown,
"royalblue": tcell.ColorRoyalBlue,
"saddlebrown": tcell.ColorSaddleBrown,
"salmon": tcell.ColorSalmon,
"sandybrown": tcell.ColorSandyBrown,
"seagreen": tcell.ColorSeaGreen,
"seashell": tcell.ColorSeashell,
"sienna": tcell.ColorSienna,
"silver": tcell.ColorSilver,
"skyblue": tcell.ColorSkyblue,
"slateblue": tcell.ColorSlateBlue,
"slategray": tcell.ColorSlateGray,
"snow": tcell.ColorSnow,
"springgreen": tcell.ColorSpringGreen,
"steelblue": tcell.ColorSteelBlue,
"tan": tcell.ColorTan,
"teal": tcell.ColorTeal,
"thistle": tcell.ColorThistle,
"tomato": tcell.ColorTomato,
"turquoise": tcell.ColorTurquoise,
"violet": tcell.ColorViolet,
"wheat": tcell.ColorWheat,
"white": tcell.ColorWhite,
"whitesmoke": tcell.ColorWhiteSmoke,
"yellow": tcell.ColorYellow,
"yellowgreen": tcell.ColorYellowGreen,
}
func ColorFor(label string) tcell.Color {
if _, ok := Colors[label]; ok {
return Colors[label]
} else {
return tcell.ColorGreen
}
} | wtf/colors.go | 0.504639 | 0.503967 | colors.go | starcoder |
// Renders a textured spinning cube using GLFW 3 and OpenGL 4.1 core forward-compatible profile.
// VS Code, left hand column: Green lines are new lines (since last commit), blue lines are changed from last commit,
// and red arrows mean deletion since last commit.
package main
import (
_ "image/png"
"github.com/go-gl/gl/v4.6-core/gl"
"github.com/go-gl/glfw/v3.3/glfw"
"github.com/go-gl/mathgl/mgl32"
"github.com/purelazy/modlib/internal/utils"
)
func main() {
// Create the OpenGL context, window and camera
//window, cam := utils.GetWindowAndCamera(1680-1, 1050-1)
window, cam := utils.GetWindowAndCamera(840, 525)
defer window.Destroy()
// Set up Box2D world
world := setupPhysics()
// Load Textures and Cubemap (aka Skybox)
modelTexture := utils.NewTexture("square.png")
gl.BindTexture(gl.TEXTURE_2D, modelTexture.ID)
cubemapTexture := utils.Cubemap(utils.Faces)
gl.BindTexture(gl.TEXTURE_CUBE_MAP, cubemapTexture)
// Compile model and cubemap shaders
lighting := utils.NewProgram(utils.ReadShader("Lighting.vs.glsl"), utils.ReadShader("Lighting.fs.glsl"))
cubemapShader := utils.NewProgram(utils.ReadShader("cubemap.vs.glsl"), utils.ReadShader("cubemap.fs.glsl"))
defer gl.DeleteProgram(lighting)
defer gl.DeleteProgram(cubemapShader)
// ------------------------- Compute and set static uniforms
projection := mgl32.Perspective(cam.Fovy, cam.Aspect, cam.Near, cam.Far)
//cubeVAO, indices, uniLocs := utils.SetupModel("cubewithhole.obj", lighting, &projection[0])
nutVAO := utils.SetupModel("cubewithhole.obj", lighting, &projection[0])
sphereVAO := utils.SetupModel("sphere.obj", lighting, &projection[0])
skyboxVAO, uViewCubemapLocation := setupSkybox(cubemapShader, &projection[0])
for !window.ShouldClose() {
// View is used in multiple programs
view := mgl32.LookAtV(cam.Position, cam.Position.Add(cam.Forward), cam.Up)
{ // ----------------Draw the skybox (36 verts)
gl.UseProgram(cubemapShader)
// Drawing the skybox first will draw every pixel, so the screen does not
// need to be cleared and not depth testing
gl.Disable(gl.DEPTH_TEST)
// The skybox does not move, relative to the view. So all translation is set to zero
viewWithoutTranslation := view.Mat3().Mat4()
gl.UniformMatrix4fv(uViewCubemapLocation, 1, false, &viewWithoutTranslation[0])
// Arm GPU with VAO and Render
gl.BindVertexArray(skyboxVAO)
gl.DrawArrays(gl.TRIANGLES, 0, 36)
}
// Step through time
world.Step(1.0/60.0, 8, 3)
bodies := world.GetBodyList()
// ----------------Draw the bodies
gl.Enable(gl.DEPTH_TEST)
gl.Clear(gl.DEPTH_BUFFER_BIT)
gl.UseProgram(lighting)
gl.Enable(gl.CULL_FACE) // Only front-facing triangles will be drawn
// Arm GPU with VAO and Render
//gl.BindVertexArray(nutVAO.CubeVAO)
gl.UniformMatrix4fv(nutVAO.UniLocs["uView"], 1, false, &view[0])
gl.Uniform3fv(nutVAO.UniLocs["uViewPos"], 1, &cam.Position[0])
for b := bodies; b != nil; b = b.GetNext() {
if b.GetUserData() == "box" {
// Layer 1 of this box
// Send Box2D info
gl.BindVertexArray(nutVAO.Vao)
gl.Uniform4f(nutVAO.UniLocs["uPosAngle"], float32(b.GetPosition().X), float32(b.GetPosition().Y), 0, float32(b.GetAngle()))
gl.DrawElements(gl.TRIANGLES, int32(len(*nutVAO.Indices)), gl.UNSIGNED_INT, gl.PtrOffset(0))
// Layer 2 of this box
gl.BindVertexArray(sphereVAO.Vao)
gl.Uniform4f(nutVAO.UniLocs["uPosAngle"], float32(b.GetPosition().Y), float32(b.GetPosition().X), 20, float32(b.GetAngle()))
gl.DrawElements(gl.TRIANGLES, int32(len(*sphereVAO.Indices)), gl.UNSIGNED_INT, gl.PtrOffset(0))
}
}
// Swap and Poll
window.SwapBuffers()
glfw.PollEvents()
}
} | cmd/Basics/09-RotateInShader/main.go | 0.702632 | 0.439567 | main.go | starcoder |
Package reversetunnel provides interfaces for accessing remote clusters
via reverse tunnels and directly.
Reverse Tunnels
Proxy server Proxy agent
Reverse tunnel
+----------+ +---------+
| <----------------------+ |
| | | |
+-----+----------+ +---------+-----+
| | | |
| | | |
+----------------+ +---------------+
Proxy Cluster "A" Proxy Cluster "B"
Reverse tunnel is established from a cluster "B" Proxy
to the a cluster "A" proxy, and clients of the cluster "A"
can access servers of the cluster "B" via reverse tunnel connection,
even if the cluster "B" is behind the firewall.
Multiple Proxies and Revese Tunnels
With multiple proxies behind the load balancer,
proxy agents will eventually discover and establish connections to all
proxies in cluster.
* Initially Proxy Agent connects to Proxy 1.
* Proxy 1 starts sending information about all available proxies
to the the Proxy Agent . This process is called "sending discovery request".
+----------+
| <--------+
| | |
+----------+ | +-----------+ +----------+
Proxy 1 +-------------------------------+ |
| | | |
+-----------+ +----------+
Load Balancer Proxy Agent
+----------+
| |
| |
+----------+
Proxy 2
* Agent will use the discovery request to establish new connections
and check if it has connected and "discovered" all the proxies specified
in the discovery request.
* Assuming that load balancer uses fair load balancing algorithm,
agent will eventually discover and connect back to all the proxies.
+----------+
| <--------+
| | |
+----------+ | +-----------+ +----------+
Proxy 1 +-------------------------------+ |
| | | | |
| +-----------+ +----------+
| Load Balancer Proxy Agent
+----------+ |
| <--------+
| |
+----------+
Proxy 2
*/
package reversetunnel | lib/reversetunnel/doc.go | 0.703855 | 0.619327 | doc.go | starcoder |
package parser
import (
"fmt"
"github.com/eliquious/aechbar/calculator/ast"
"github.com/eliquious/lexer"
"math/big"
"time"
)
func (p *Parser) parseLiteralExpression(tok lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
expr, err := p.parseLiteral(tok, pos, lit)
if err != nil {
return nil, err
}
// Scan for operators
tok, pos, lit = p.scanIgnoreWhitespace()
if ast.IsUnaryOperator(tok) {
return &ast.UnaryExpression{tok, expr}, nil
} else if ast.IsBinaryOperator(tok) {
return p.parseBinaryExpression(expr, tok, pos, lit)
} else {
p.unscan()
}
return expr, nil
}
func (p *Parser) parseBinaryExpression(lh ast.Expression, op lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
if !ast.IsBinaryOperator(op) {
return nil, tokenError("Invalid binary operator", op, pos, lit)
}
// Process right hand expression
expr, err := p.ParseExpression()
if err != nil {
return nil, err
}
// If literal or unary expression, set to right hand side.
if ast.IsLiteral(expr) || expr.Type() == ast.UnaryExpressionType {
return &ast.BinaryExpression{Op: op, LExpr: lh, RExpr: expr}, nil
} else if expr.Type() == ast.BinaryExpressionType {
return handleBinaryPrecedence(lh, op, expr.(*ast.BinaryExpression))
} else {
return nil, fmt.Errorf("Unsupported binary expression: %s", expr.String())
}
}
// Swap binary expressions based on precedence
func handleBinaryPrecedence(lh ast.Expression, op lexer.Token, rh *ast.BinaryExpression) (ast.Expression, error) {
if op.Precedence() < rh.Precedence() {
return &ast.BinaryExpression{op, lh, rh}, nil
} else if op.Precedence() > rh.Precedence() {
return &ast.BinaryExpression{rh.Op, &ast.BinaryExpression{op, lh, rh.LExpr}, rh.RExpr}, nil
}
return &ast.BinaryExpression{op, lh, rh}, nil
}
func (p *Parser) parseLiteral(tok lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
switch tok {
case lexer.INTEGER:
return p.parseLiteralInteger(tok, pos, lit)
case lexer.DECIMAL:
return p.parseLiteralDecimal(tok, pos, lit)
case lexer.STRING:
return p.parseLiteralString(tok, pos, lit)
case lexer.FALSE, lexer.TRUE:
return p.parseLiteralBoolean(tok, pos, lit)
case lexer.DURATION:
return p.parseLiteralDuration(tok, pos, lit)
default:
return nil, tokenError("Unrecognized literal token", tok, pos, lit)
}
}
func (p *Parser) parseLiteralInteger(tok lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
i := new(big.Int)
_, err := fmt.Sscan(lit, i)
if err != nil {
return nil, tokenError("Integer literal parse error", tok, pos, lit)
}
return &ast.IntegerLiteral{i}, nil
}
func (p *Parser) parseLiteralDecimal(tok lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
f := new(big.Float)
_, err := fmt.Sscan(lit, f)
if err != nil {
return nil, tokenError("Decimal literal parse error", tok, pos, lit)
}
return &ast.DecimalLiteral{f}, nil
}
func (p *Parser) parseLiteralBoolean(tok lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
switch tok {
case lexer.TRUE:
return &ast.BooleanLiteral{true}, nil
case lexer.FALSE:
return &ast.BooleanLiteral{false}, nil
default:
return nil, tokenError("Invalid boolean literal", tok, pos, lit)
}
}
func (p *Parser) parseLiteralString(tok lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
return &ast.StringLiteral{lit}, nil
}
func (p *Parser) parseLiteralDuration(tok lexer.Token, pos lexer.Pos, lit string) (ast.Expression, error) {
duration, err := time.ParseDuration(lit)
if err != nil {
return nil, tokenError("Invalid duration literal", tok, pos, lit)
}
return &ast.DurationLiteral{duration}, nil
} | calculator/parser/literals.go | 0.586641 | 0.408041 | literals.go | starcoder |
package fixpoint
// Useful link:
// https://spin.atomicobject.com/2012/03/15/simple-fixed-point-math/
// Q24 is a Q7.24 fixed point integer type that has 24 bits of precision to the
// right of the fixed point. It is designed to be used as a more efficient
// replacement for unit vectors with some extra room to avoid overflow.
type Q24 struct {
N int32
}
// Q24FromFloat converts a float32 to the same number in fixed point format.
// Inverse of .Float().
func Q24FromFloat(x float32) Q24 {
return Q24{int32(x * (1 << 24))}
}
// Q24FromInt32 returns a fixed point integer with all decimals set to zero.
func Q24FromInt32(x int32) Q24 {
return Q24{x << 24}
}
// Float returns the floating point version of this fixed point number. Inverse
// of Q24FromFloat.
func (q Q24) Float() float32 {
return float32(q.N) / (1 << 24)
}
// Int32Scaled returns the underlying fixed point number multiplied by scale.
func (q Q24) Int32Scaled(scale int32) int32 {
return q.N / (1 << 24 / scale)
}
// Add returns the argument plus this number.
func (q1 Q24) Add(q2 Q24) Q24 {
return Q24{q1.N + q2.N}
}
// Sub returns the argument minus this number.
func (q1 Q24) Sub(q2 Q24) Q24 {
return Q24{q1.N - q2.N}
}
// Neg returns the inverse of this number.
func (q1 Q24) Neg() Q24 {
return Q24{-q1.N}
}
// Mul returns this number multiplied by the argument.
func (q1 Q24) Mul(q2 Q24) Q24 {
return Q24{int32((int64(q1.N) * int64(q2.N)) >> 24)}
}
// Div returns this number divided by the argument.
func (q1 Q24) Div(q2 Q24) Q24 {
return Q24{int32((int64(q1.N) << 24) / int64(q2.N))}
}
// Vec3Q24 is a 3-dimensional vector with Q24 fixed point elements.
type Vec3Q24 struct {
X Q24
Y Q24
Z Q24
}
// Vec3Q24FromFloat returns the fixed-point vector of the given 3 floats.
func Vec3Q24FromFloat(x, y, z float32) Vec3Q24 {
return Vec3Q24{Q24FromFloat(x), Q24FromFloat(y), Q24FromFloat(z)}
}
// Add returns this vector added to the argument.
func (v1 Vec3Q24) Add(v2 Vec3Q24) Vec3Q24 {
// Copied from go-gl/mathgl and modified.
return Vec3Q24{v1.X.Add(v2.X), v1.Y.Add(v2.Y), v1.Z.Add(v2.Z)}
}
// Mul returns this vector multiplied by the argument.
func (v1 Vec3Q24) Mul(c Q24) Vec3Q24 {
// Copied from go-gl/mathgl and modified.
return Vec3Q24{v1.X.Mul(c), v1.Y.Mul(c), v1.Z.Mul(c)}
}
// Dot returns the dot product between this vector and the argument.
func (v1 Vec3Q24) Dot(v2 Vec3Q24) Q24 {
// Copied from go-gl/mathgl and modified.
return v1.X.Mul(v2.X).Add(v1.Y.Mul(v2.Y)).Add(v1.Z.Mul(v2.Z))
}
// Cross returns the cross product between this vector and the argument.
func (v1 Vec3Q24) Cross(v2 Vec3Q24) Vec3Q24 {
// Copied from go-gl/mathgl and modified.
return Vec3Q24{v1.Y.Mul(v2.Z).Sub(v1.Z.Mul(v2.Y)), v1.Z.Mul(v2.X).Sub(v1.X.Mul(v2.Z)), v1.X.Mul(v2.Y).Sub(v1.Y.Mul(v2.X))}
}
// QuatQ24 is a quaternion with Q24 fixed point elements.
type QuatQ24 struct {
W Q24
V Vec3Q24
}
// QuatIdent returns the identity quaternion.
func QuatIdent() QuatQ24 {
return QuatQ24{Q24FromInt32(1), Vec3Q24{}}
}
// X returns the X part of this quaternion.
func (q QuatQ24) X() Q24 {
return q.V.X
}
// Y returns the Y part of this quaternion.
func (q QuatQ24) Y() Q24 {
return q.V.Y
}
// Z returns the Z part of this quaternion.
func (q QuatQ24) Z() Q24 {
return q.V.Z
}
// Mul returns this quaternion multiplied by the argument.
func (q1 QuatQ24) Mul(q2 QuatQ24) QuatQ24 {
// Copied from go-gl/mathgl and modified.
return QuatQ24{q1.W.Mul(q2.W).Sub(q1.V.Dot(q2.V)), q1.V.Cross(q2.V).Add(q2.V.Mul(q1.W)).Add(q1.V.Mul(q2.W))}
}
// Rotate returns the vector from the argument rotated by the rotation this
// quaternion represents.
func (q1 QuatQ24) Rotate(v Vec3Q24) Vec3Q24 {
// Copied from go-gl/mathgl and modified.
cross := q1.V.Cross(v)
// v + 2q_w * (q_v x v) + 2q_v x (q_v x v)
return v.Add(cross.Mul(Q24FromInt32(2).Mul(q1.W))).Add(q1.V.Mul(Q24FromInt32(2)).Cross(cross))
} | fixpoint.go | 0.919276 | 0.734453 | fixpoint.go | starcoder |
package ma
import (
"time"
)
//trimaCalculator TRIMA
type trimaCalculator struct {
*Ticker
Period int32
Count int32
Factor float64
Idx float64
MiddleIdx int32
TrailingIdx int32
Temp []float64
NumeratorSub float64
NumeratorAdd float64
Numerator float64
SubDone bool
AddDone bool
LastValue float64
Result float64
}
//TRIMA TRIMA
type TRIMA struct {
Value float64
Calculator *trimaCalculator
}
//NewTRIMA new TRIMA
func (t *Ticker) NewTRIMA(inTimePeriod int32) *TRIMA {
calculator := &trimaCalculator{
Ticker: t,
Period: inTimePeriod,
Idx: float64(inTimePeriod >> 1),
}
return &TRIMA{
Calculator: calculator,
}
}
//calcTRIMA calculate TRIMA
func (t *trimaCalculator) calcTRIMA() {
t.Temp = append(t.Temp, t.Price)
if t.Period&1 == 1 && !t.SubDone && !t.AddDone {
t.Factor = (t.Idx + 1.0) * (t.Idx + 1.0)
t.Factor = 1.0 / t.Factor
t.MiddleIdx = t.TrailingIdx + int32(t.Idx)
t.Count = t.MiddleIdx + int32(t.Idx)
} else if t.Period&1 != 1 {
t.Factor = (t.Idx) * (t.Idx + 1)
t.Factor = 1.0 / t.Factor
t.MiddleIdx = t.TrailingIdx + int32(t.Idx) - 1
t.Count = t.MiddleIdx + int32(t.Idx)
}
if int32(len(t.Temp)) == t.MiddleIdx+1 && !t.SubDone {
for idx := t.MiddleIdx; idx >= t.TrailingIdx; idx-- {
t.NumeratorSub += t.Temp[idx]
t.Numerator += t.NumeratorSub
}
t.SubDone = true
}
if int32(len(t.Temp)) == t.Count+1 && !t.AddDone {
t.MiddleIdx++
for idx := t.MiddleIdx; idx <= t.Count; idx++ {
t.NumeratorAdd += t.Temp[idx]
t.Numerator += t.NumeratorAdd
}
t.AddDone = true
t.LastValue = t.Temp[t.TrailingIdx]
t.Result = t.Numerator * t.Factor
t.Count++
t.TrailingIdx++
}
if t.AddDone && t.SubDone && int32(len(t.Temp)) > t.Count {
t.Temp = t.Temp[t.TrailingIdx:]
t.MiddleIdx -= t.TrailingIdx
t.Count -= t.TrailingIdx
t.TrailingIdx = 0
t.Numerator -= t.NumeratorSub
t.NumeratorSub -= t.LastValue
t.LastValue = t.Temp[t.MiddleIdx]
t.MiddleIdx++
t.NumeratorSub += t.LastValue
if t.Period&1 == 1 {
t.Numerator += t.NumeratorAdd
t.NumeratorAdd -= t.LastValue
} else {
t.NumeratorAdd -= t.LastValue
t.Numerator += t.NumeratorAdd
}
t.LastValue = t.Temp[t.Count]
t.Count++
t.NumeratorAdd += t.LastValue
t.Numerator += t.LastValue
t.LastValue = t.Temp[t.TrailingIdx]
t.TrailingIdx++
t.Result = t.Numerator * t.Factor
}
}
//Update Update the TRIMA value of the current price
func (d *TRIMA) Update(price float64, date time.Time) {
d.Calculator.setPrice(price, date)
d.Calculator.calcTRIMA()
d.Value = d.Calculator.Result
}
//Sum Returns the TRIMA value of the current TRIMA object
func (d *TRIMA) Sum() float64 {
return d.Value
} | ma/trima.go | 0.534127 | 0.491517 | trima.go | starcoder |
package exec
import (
"context"
"fmt"
"github.com/ebay/beam/blog"
"github.com/ebay/beam/msg/facts"
"github.com/ebay/beam/query/planner/plandef"
"github.com/ebay/beam/rpc"
"github.com/ebay/beam/util/parallel"
"github.com/ebay/beam/viewclient/lookups"
)
// newProjection returns a projection operator (aka SELECT). When executed it
// evaluates each row of input using the expressions defined in the projection
// Operator, and sends the results as output.
func newProjection(op *plandef.Projection, inputs []queryOperator) operator {
if len(inputs) != 1 {
panic(fmt.Sprintf("projection operation with unexpected number of inputs: %v", len(inputs)))
}
cols := make(Columns, len(op.Select))
for i := range op.Select {
cols[i] = op.Select[i].Out
}
return &projection{
cols: cols,
def: op,
input: inputs[0],
}
}
type projection struct {
cols Columns
def *plandef.Projection
input queryOperator
}
func (p *projection) operator() plandef.Operator {
return p.def
}
func (p *projection) columns() Columns {
return p.cols
}
func (p *projection) execute(ctx context.Context, binder valueBinder, res results) error {
if binder.len() != 1 {
panic(fmt.Sprintf("projection operator %v unexpectedly bulk bound to %d rows",
p.def, binder.len()))
}
inputResCh := make(chan ResultChunk, 4)
wait := parallel.Go(func() {
evals := buildExprEvaluators(p.def, p.input.columns())
for chunk := range inputResCh {
res.setFinalStatistics(chunk.FinalStatistics)
for i := range chunk.offsets {
rowIn := chunk.Row(i)
rowOut := make([]Value, len(p.def.Select))
hasValue := false
for colIdx := range p.def.Select {
out := evals[colIdx].consume(rowIn)
if out != nil {
rowOut[colIdx] = *out
hasValue = true
}
}
if hasValue {
res.add(ctx, 0, FactSet{}, rowOut)
}
}
}
rowOut := make([]Value, len(p.def.Select))
hasValue := false
for colIdx := range p.def.Select {
out := evals[colIdx].completed()
if out != nil {
rowOut[colIdx] = *out
hasValue = true
}
}
if hasValue {
res.add(ctx, 0, FactSet{}, rowOut)
}
})
err := p.input.run(ctx, binder, inputResCh)
wait()
return err
}
// newAsk returns a new Ask operator, when executed it generates a single result
// of true if the input returned any rows, false otherwise.
func newAsk(op *plandef.Ask, inputs []queryOperator) operator {
if len(inputs) != 1 {
panic(fmt.Sprintf("ask operation with unexpected number of inputs: %d", len(inputs)))
}
return &ask{
def: op,
input: inputs[0],
}
}
type ask struct {
def *plandef.Ask
input queryOperator
}
func (a *ask) columns() Columns {
return Columns{a.def.Out}
}
func (a *ask) operator() plandef.Operator {
return a.def
}
func (a *ask) execute(ctx context.Context, binder valueBinder, res results) error {
if binder.len() != 1 {
panic(fmt.Sprintf("ask operator %v unexpectedly bulk bound to %d rows",
a.def, binder.len()))
}
inputResCh := make(chan ResultChunk)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
weCancelled := false
wait := parallel.Go(func() {
for chunk := range inputResCh {
if len(chunk.offsets) > 0 {
result := Value{KGObject: rpc.ABool(true, 0)}
res.add(ctx, 0, FactSet{}, []Value{result})
weCancelled = true
cancel()
return
}
}
result := Value{KGObject: rpc.ABool(false, 0)}
res.add(ctx, 0, FactSet{}, []Value{result})
})
err := a.input.run(ctx, binder, inputResCh)
wait()
if weCancelled && err == context.Canceled {
return nil
}
return err
}
// newExternalIDs returns an operator that updates the input Values with
// ExternalID values.
func newExternalIDs(index blog.Index, lookup lookups.SP, op *plandef.ExternalIDs, inputs []queryOperator) operator {
if len(inputs) != 1 {
panic(fmt.Sprintf("externalIDs operation with unexpected inputs: %v", len(inputs)))
}
return &externalIDs{
index: index,
lookup: lookup,
def: op,
input: inputs[0],
}
}
type externalIDs struct {
index blog.Index
lookup lookups.SP
def *plandef.ExternalIDs
input queryOperator
}
func (e *externalIDs) columns() Columns {
return e.input.columns()
}
func (e *externalIDs) operator() plandef.Operator {
return e.def
}
func (e *externalIDs) execute(ctx context.Context, binder valueBinder, res results) error {
inputResCh := make(chan ResultChunk, 4)
// processInput reads chunks from the input operator, collects up the
// set of externalIDs that need fetching, fetches them, and updates the
// chunk, and then publishes it as this operators output. Each chunk
// processed results in at most one LookupSP call. ExternalIDs are
// cached for the duration of the operation.
processInput := func(ctx context.Context) error {
resolver := newExternalIDResolver(e.index)
for c := range inputResCh {
res.setFinalStatistics(c.FinalStatistics)
for i := range c.Values {
v := &c.Values[i]
if v.KGObject.IsType(rpc.KtKID) {
resolver.resolveID(v.KGObject.ValKID(), v.SetExtID)
} else if v.KGObject.UnitID() != 0 {
resolver.resolveID(v.KGObject.UnitID(), v.SetUnitExtID)
} else if v.KGObject.LangID() != 0 {
resolver.resolveID(v.KGObject.LangID(), v.SetLangExtID)
}
}
// resolve any newly found KIDs and call the callback functions.
if err := resolver.fetchPending(ctx, e.lookup); err != nil {
return err
}
// we're done with this chunk now, send it as output
for i := range c.offsets {
res.add(ctx, c.offsets[i], c.Facts[i], c.Row(i))
}
}
return nil
}
err := parallel.Invoke(ctx,
processInput,
func(ctx context.Context) error {
return e.input.run(ctx, binder, inputResCh)
})
return err
}
// newExternalIDResolver returns a new externalIDResolver, which resolves KIDs
// to ExternalIDs as of the supplied log index. The returned resolver is not
// concurrent safe.
func newExternalIDResolver(index blog.Index) *externalIDResolver {
return &externalIDResolver{
ids: make(map[uint64]extIDlookupItem),
lookup: rpc.LookupSPRequest{
Index: index,
},
}
}
// externalIDResolver is used to bulk resolve internal KIDs to their
// externalIDs.
type externalIDResolver struct {
ids map[uint64]extIDlookupItem
lookup rpc.LookupSPRequest
}
// resolveID arranges for 'callback' to be called with the externalID value of
// the supplied 'kid'. It might be called straight away if we already have it or
// it might be later after a call to fetchPending(). The Lookup item will be
// added to lookupReq if needed.
func (r *externalIDResolver) resolveID(kid uint64, callback func(extID string)) {
item, exists := r.ids[kid]
if exists && item.extID != "" {
callback(item.extID)
return
}
item.callbacks = append(item.callbacks, callback)
r.ids[kid] = item
if !exists {
// this is the first request for this ID, add the lookup rpc.
lk := rpc.LookupSPRequest_Item{
Subject: kid,
Predicate: facts.HasExternalID,
}
r.lookup.Lookups = append(r.lookup.Lookups, lk)
}
}
// fetchPending executes a LookupSP RPC to fetch the ExternalIDs that still need
// resolving, and executes the callbacks once the results are received. There
// can be multiple cycles of resolveID & fetchPending on a single
// externalIDResolver. An error is returned if the LookupSP request fails, as
// that is a streaming response its possible to get partial results in error
// conditions.
func (r *externalIDResolver) fetchPending(ctx context.Context, lookup lookups.SP) error {
if len(r.lookup.Lookups) == 0 {
return nil
}
resCh := make(chan *rpc.LookupChunk, 4)
wait := parallel.Go(func() {
for lookupChunk := range resCh {
for _, f := range lookupChunk.Facts {
extID := f.Fact.Object.ValString()
for _, cb := range r.ids[f.Fact.Subject].callbacks {
cb(extID)
}
r.ids[f.Fact.Subject] = extIDlookupItem{extID: extID}
}
}
})
err := lookup.LookupSP(ctx, &r.lookup, resCh)
wait()
if err != nil {
return err
}
r.lookup.Lookups = r.lookup.Lookups[:0]
return nil
}
type extIDlookupItem struct {
extID string
callbacks []func(string)
}
// emptyResultOp is a queryOperator that generates a ResultChunk with the output
// columns in the event the input operator generates no results.
type emptyResultOp struct {
input queryOperator
}
func (e *emptyResultOp) columns() Columns {
return e.input.columns()
}
func (e *emptyResultOp) run(ctx context.Context, binder valueBinder, resCh chan<- ResultChunk) error {
if binder.len() != 1 {
panic(fmt.Sprintf("emptyResultOp unexpectedly bulk bound to %d rows", binder.len()))
}
childCh := make(chan ResultChunk, 4)
wait := parallel.Go(func() {
hadResult := false
defer close(resCh)
for chunk := range childCh {
hadResult = true
select {
case resCh <- chunk:
case <-ctx.Done():
return
}
}
if !hadResult {
empty := ResultChunk{
Columns: e.input.columns(),
}
select {
case resCh <- empty:
case <-ctx.Done():
}
}
})
err := e.input.run(ctx, binder, childCh)
wait()
return err
} | src/github.com/ebay/beam/query/exec/projection.go | 0.612426 | 0.409693 | projection.go | starcoder |
package dynamodb
import (
"fmt"
"github.com/aws/aws-sdk-go/service/dynamodb/expression"
"github.com/pipe-cd/pipe/pkg/datastore"
)
func buildDynamoDBCondition(f datastore.ListFilter) (expression.ConditionBuilder, error) {
switch f.Operator {
case "==":
return expression.Name(f.Field).Equal(expression.Value(f.Value)), nil
case "!=":
return expression.Name(f.Field).NotEqual(expression.Value(f.Value)), nil
case ">":
return expression.Name(f.Field).GreaterThan(expression.Value(f.Value)), nil
case ">=":
return expression.Name(f.Field).GreaterThanEqual(expression.Value(f.Value)), nil
case "in":
return expression.Name(f.Field).In(expression.Value(f.Value)), nil
case "<":
return expression.Name(f.Field).LessThan(expression.Value(f.Value)), nil
case "<=":
return expression.Name(f.Field).LessThanEqual(expression.Value(f.Value)), nil
default:
return expression.ConditionBuilder{}, fmt.Errorf("unacceptable expression for dynamodb: %s %s %v", f.Field, f.Operator, f.Value)
}
}
func buildDynamoDBExpression(opts datastore.ListOptions) (expression.Expression, error) {
var expr expression.Expression
ops := make([]expression.ConditionBuilder, len(opts.Filters))
for i, f := range opts.Filters {
op, err := buildDynamoDBCondition(f)
if err != nil {
return expr, err
}
ops[i] = op
}
if len(ops) == 0 {
return expr, fmt.Errorf("missing expression for dynamodb")
}
var cond expression.ConditionBuilder
switch len(ops) {
case 1:
cond = ops[0]
case 2:
cond = expression.And(ops[0], ops[1])
default:
cond = expression.And(ops[0], ops[1], ops[2:]...)
}
return expression.NewBuilder().WithFilter(cond).Build()
}
func buildDynamoDBKeyExistedExpression(key string, value interface{}) (expression.Expression, error) {
return expression.NewBuilder().WithCondition(expression.Name(key).Equal(expression.Value(value))).Build()
}
func buildDynamoDBKeyNotExistedExpression(key string, value interface{}) (expression.Expression, error) {
return expression.NewBuilder().WithCondition(expression.Name(key).NotEqual(expression.Value(value))).Build()
} | pkg/datastore/dynamodb/expression.go | 0.535584 | 0.442757 | expression.go | starcoder |
package pizzabot
import (
"errors"
"strconv"
"strings"
"github.com/andrewcopp/geometry"
)
// Parser identies objects that can parse the Pizzabot args.
type Parser interface {
Parse(args []string) error
}
// Config is a strongly-typed representation of the command line args.
type Config struct {
Size *geometry.Size
Points []*geometry.Point
}
// Parse takes the args and loads them into the Config struct.
func (c *Config) Parse(args []string) error {
if len(args) != 2 {
return errors.New("Expected one arg")
}
args = strings.Split(args[1], " (")
var err error
c.Size, err = c.dimensions(args[0])
if err != nil {
return err
}
c.Points, err = c.coordinates(args[1:])
if err != nil {
return err
}
if err := c.check(); err != nil {
return err
}
return nil
}
// Dimensions is a helper function that parses an argument into a Size struct.
func (c *Config) dimensions(arg string) (*geometry.Size, error) {
values := strings.Split(arg, "x")
if len(values) != 2 {
return nil, errors.New("Expected size to be two-dimensional")
}
w, err := strconv.Atoi(values[0])
if err != nil {
return nil, err
}
h, err := strconv.Atoi(values[1])
if err != nil {
return nil, err
}
return geometry.NewSize(w, h), nil
}
// Coordinates is a helper function that parses args into a slice of Point
// structs.
func (c *Config) coordinates(coords []string) ([]*geometry.Point, error) {
points := []*geometry.Point{}
for _, coord := range coords {
point := geometry.NewPoint(0, 0)
cmpnts := strings.Split(strings.TrimSpace(coord), ", ")
var err error
if len(cmpnts) != 2 {
text := "Expected coordinates to be in two-dimensional space"
return nil, errors.New(text)
}
point.X, err = strconv.Atoi(cmpnts[0])
if err != nil {
return nil, err
}
point.Y, err = strconv.Atoi(strings.Trim(cmpnts[1], ")"))
if err != nil {
return nil, err
}
points = append(points, point)
}
return points, nil
}
func (c *Config) check() error {
for _, point := range c.Points {
if point.X >= c.Size.Width || point.X <= -1 {
return errors.New("House exists outside of map")
}
if point.Y >= c.Size.Height || point.Y <= -1 {
return errors.New("House exists outside of map")
}
}
return nil
} | config.go | 0.706393 | 0.458712 | config.go | starcoder |
package fn
//The Beta function and relatives.
import (
"math"
)
func betaContinuedFraction(α, β, x float64) float64 {
var aa, del, res, qab, qap, qam, c, d, m2, m, acc float64
var i int64
const eps = 2.2204460492503131e-16
const maxIter = 1000000000
acc = 1e-16
qab = α + β
qap = α + 1.0
qam = α - 1.0
c = 1.0
d = 1.0 - qab*x/qap
if math.Abs(d) < eps {
d = eps
}
d = 1.0 / d
res = d
for i = 1; i <= maxIter; i++ {
m = (float64)(i)
m2 = 2 * m
aa = m * (β - m) * x / ((qam + m2) * (α + m2))
d = 1.0 + aa*d
if math.Abs(d) < eps {
d = eps
}
c = 1.0 + aa/c
if math.Abs(c) < eps {
c = eps
}
d = 1.0 / d
res *= d * c
aa = -(α + m) * (qab + m) * x / ((α + m2) * (qap + m2))
d = 1.0 + aa*d
if math.Abs(d) < eps {
d = eps
}
c = 1.0 + aa/c
if math.Abs(c) < eps {
c = eps
}
d = 1.0 / d
del = d * c
res *= del
if math.Abs(del-1.0) < acc {
return res
}
}
panic("betaContinuedFraction(): α or β too big, or maxIter too small")
return -1.00
}
//B returns the Beta function.
func B(x float64, y float64) float64 {
return Γ(x) * Γ(y) / Γ(x+y)
}
//LogBeta function
func LnB(x float64, y float64) float64 {
return LnΓ(x) + LnΓ(y) - LnΓ(x+y)
}
//BetaIncReg returns the Non-regularized incomplete Beta function.
func IB(a, b, x float64) float64 {
return BetaIncReg(a, b, x) * math.Exp(LnΓ(a)+LnΓ(b)-LnΓ(a+b))
}
//BetaIncReg returns the Regularized incomplete Beta function.
func BetaIncReg(α, β, x float64) float64 {
var y, res float64
y = math.Exp(LnΓ(α+β) - LnΓ(α) - LnΓ(β) + α*math.Log(x) + β*math.Log(1.0-x))
switch {
case x == 0:
res = 0.0
case x == 1.0:
res = 1.0
case x < (α+1.0)/(α+β+2.0):
res = y * betaContinuedFraction(α, β, x) / α
default:
res = 1.0 - y*betaContinuedFraction(β, α, 1.0-x)/β
}
return res
}
// LnBeta returns the value of the log beta function. Translation of the Fortran code by <NAME> of Los Alamos Scientific Laboratory.
func LnBeta(a, b float64) float64 {
var corr float64
if isNaN(a) || isNaN(b) {
return a + b
}
q := a
p := q
if b < p {
p = b
}
if b > q {
q = b
}
/* both arguments must be >= 0 */
if p < 0 {
return nan
} else if p == 0 {
return posInf
} else if isInf(q, 0) { /* q == +Inf */
return negInf
}
if p >= 10 {
/* p and q are big. */
corr = lgammacor(p) + lgammacor(q) - lgammacor(p+q)
return log(q)*-0.5 + lnSqrt2π + corr + (p-0.5)*log(p/(p+q)) + q*log1p(-p/(p+q))
} else if q >= 10 {
/* p is small, but q is big. */
corr = lgammacor(q) - lgammacor(p+q)
return lgammafn(p) + corr + p - p*log(p+q) + (q-0.5)*log1p(-p/(p+q))
}
/* p and q are small: p <= q < 10. */
if p < 1e-306 {
return LnΓ(p) + (LnΓ(q) - LnΓ(p+q))
}
return log(math.Gamma(p) * (math.Gamma(q) / math.Gamma(p+q)))
} | go/src/code.google.com/p/go-fn/fn/beta.go | 0.602997 | 0.474509 | beta.go | starcoder |
package models
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// UserSimulationEventInfo
type UserSimulationEventInfo struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Browser information from where the simulation event was initiated by a user in an attack simulation and training campaign.
browser *string
// Date and time of the simulation event by a user in an attack simulation and training campaign.
eventDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Name of the simulation event by a user in an attack simulation and training campaign.
eventName *string
// IP address from where the simulation event was initiated by a user in an attack simulation and training campaign.
ipAddress *string
// The operating system, platform, and device details from where the simulation event was initiated by a user in an attack simulation and training campaign.
osPlatformDeviceDetails *string
}
// NewUserSimulationEventInfo instantiates a new userSimulationEventInfo and sets the default values.
func NewUserSimulationEventInfo()(*UserSimulationEventInfo) {
m := &UserSimulationEventInfo{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateUserSimulationEventInfoFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateUserSimulationEventInfoFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewUserSimulationEventInfo(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *UserSimulationEventInfo) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetBrowser gets the browser property value. Browser information from where the simulation event was initiated by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) GetBrowser()(*string) {
if m == nil {
return nil
} else {
return m.browser
}
}
// GetEventDateTime gets the eventDateTime property value. Date and time of the simulation event by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) GetEventDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.eventDateTime
}
}
// GetEventName gets the eventName property value. Name of the simulation event by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) GetEventName()(*string) {
if m == nil {
return nil
} else {
return m.eventName
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *UserSimulationEventInfo) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["browser"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetBrowser(val)
}
return nil
}
res["eventDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetEventDateTime(val)
}
return nil
}
res["eventName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetEventName(val)
}
return nil
}
res["ipAddress"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetIpAddress(val)
}
return nil
}
res["osPlatformDeviceDetails"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOsPlatformDeviceDetails(val)
}
return nil
}
return res
}
// GetIpAddress gets the ipAddress property value. IP address from where the simulation event was initiated by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) GetIpAddress()(*string) {
if m == nil {
return nil
} else {
return m.ipAddress
}
}
// GetOsPlatformDeviceDetails gets the osPlatformDeviceDetails property value. The operating system, platform, and device details from where the simulation event was initiated by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) GetOsPlatformDeviceDetails()(*string) {
if m == nil {
return nil
} else {
return m.osPlatformDeviceDetails
}
}
// Serialize serializes information the current object
func (m *UserSimulationEventInfo) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("browser", m.GetBrowser())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("eventDateTime", m.GetEventDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("eventName", m.GetEventName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("ipAddress", m.GetIpAddress())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("osPlatformDeviceDetails", m.GetOsPlatformDeviceDetails())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *UserSimulationEventInfo) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetBrowser sets the browser property value. Browser information from where the simulation event was initiated by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) SetBrowser(value *string)() {
if m != nil {
m.browser = value
}
}
// SetEventDateTime sets the eventDateTime property value. Date and time of the simulation event by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) SetEventDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.eventDateTime = value
}
}
// SetEventName sets the eventName property value. Name of the simulation event by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) SetEventName(value *string)() {
if m != nil {
m.eventName = value
}
}
// SetIpAddress sets the ipAddress property value. IP address from where the simulation event was initiated by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) SetIpAddress(value *string)() {
if m != nil {
m.ipAddress = value
}
}
// SetOsPlatformDeviceDetails sets the osPlatformDeviceDetails property value. The operating system, platform, and device details from where the simulation event was initiated by a user in an attack simulation and training campaign.
func (m *UserSimulationEventInfo) SetOsPlatformDeviceDetails(value *string)() {
if m != nil {
m.osPlatformDeviceDetails = value
}
} | models/user_simulation_event_info.go | 0.563618 | 0.494141 | user_simulation_event_info.go | starcoder |
package statistics
import (
"fmt"
"math"
"sort"
"github.com/rpinheiroalmeida/collections"
"github.com/rpinheiroalmeida/linalg"
)
func Sum(sample collections.Vector) float64 {
total := 0.0
for _, value := range sample {
total += value
}
return total
}
func Mean(sample collections.Vector) float64 {
check(sample)
return Sum(sample) / float64(sample.Len())
}
func Median(sample collections.Vector) float64 {
check(sample)
sort.Float64s(sample)
half := sample.Len() / 2
if oddSize(sample) {
return sample[half]
}
return Mean(collections.Vector{sample[half-1], sample[half]})
}
func Quantile(sample collections.Vector, percentile float64) float64 {
pIndex := int(percentile * float64(sample.Len()))
sort.Float64s(sample)
return sample[pIndex]
}
func Mode(sample collections.Vector) collections.Vector {
check(sample)
counter := collections.NewCounter(sample)
maxQuantity := counter.MaxValue()
modes := make(collections.Vector, 0, len(sample))
for k, v := range counter.Items {
if v == maxQuantity {
modes = append(modes, k)
}
}
sort.Sort(sort.Reverse(modes))
return modes
}
func DataRange(sample collections.Vector) float64 {
return sample.Max() - sample.Min()
}
func DispersionMean(sample collections.Vector) collections.Vector {
mean := Mean(sample)
dispersion := make(collections.Vector, 0, cap(sample))
for _, value := range sample {
dispersion = append(dispersion, value-mean)
}
return dispersion
}
func Variance(sample collections.Vector) float64 {
checkMinimumSize(sample.Len(), 1)
dispersionMean := DispersionMean(sample)
return linalg.SumOfSquares(dispersionMean) / float64(sample.Len()-1)
}
func StandardDeviation(sample collections.Vector) float64 {
return math.Sqrt(Variance(sample))
}
func InterQuantileRange(sample collections.Vector) float64 {
return Quantile(sample, 0.75) - Quantile(sample, 0.25)
}
func Covariance(x, y collections.Vector) float64 {
n := x.Len()
checkMinimumSize(n, 1)
return linalg.Dot(DispersionMean(x), DispersionMean(y)) / float64(n-1)
}
func Correlation(x, y collections.Vector) float64 {
standardDeviationX := StandardDeviation(x)
standardDeviationY := StandardDeviation(y)
if standardDeviationX > 0 && standardDeviationY > 0 {
return Covariance(x, y) / standardDeviationX / standardDeviationY
}
return float64(0)
}
func checkMinimumSize(value, minimum int) {
if value <= minimum {
panic(fmt.Errorf("The minimum size was not obeyed - %d", minimum))
}
}
func oddSize(sample collections.Vector) bool {
return sample.Len()%2 == 1
}
func check(sample collections.Vector) {
if sample.Empty() {
panic("Operation Not allowed with empty sample")
}
} | statistics.go | 0.832849 | 0.599309 | statistics.go | starcoder |
package statistic
import (
"fmt"
"time"
"github.com/jansorg/marketplace-stats/marketplace"
)
// NewYear returns a pointer to a Year
func NewYear(year int) *Year {
return &Year{
Year: year,
}
}
// Year contains the statistics for a given year
type Year struct {
Year int
Months []*Month
TotalCustomers int
TotalCustomersAnnual int
TotalCustomersMonthly int
TotalSalesUSD AmountAndFee
DownloadsTotal int
DownloadsUnique int
}
func (y *Year) Name() string {
return fmt.Sprintf("%d", y.Year)
}
func (y *Year) LastMonth() *Month {
if len(y.Months) == 0 {
return nil
}
return y.Months[len(y.Months)-1]
}
func (y *Year) Update(previousYear *Year, sales marketplace.Sales, downloadsTotal, downloadsUnique []marketplace.DownloadMonthly, graceDays int) {
yearlySales := sales.ByYear(y.Year)
y.TotalCustomers = len(yearlySales.CustomersMap())
y.TotalCustomersAnnual = len(yearlySales.ByAnnualSubscription().CustomersMap())
y.TotalCustomersMonthly = len(yearlySales.ByMonthlySubscription().CustomersMap())
y.TotalSalesUSD.Total = yearlySales.TotalSumUSD()
y.TotalSalesUSD.Fee = yearlySales.FeeSumUSD()
// iterate months
if len(sales) > 0 {
currentMonth := time.Date(y.Year, time.January, 1, 0, 0, 0, 0, marketplace.ServerTimeZone)
lastMonth := time.Date(y.Year, time.December, 30, 23, 59, 59, 999, marketplace.ServerTimeZone)
if len(sales) > 0 && sales[0].Date.AsDate().After(currentMonth) {
currentMonth = time.Date(sales[0].Date.Year(), sales[0].Date.Month(), 1, 0, 0, 0, 0, marketplace.ServerTimeZone)
}
now := time.Now().In(marketplace.ServerTimeZone)
if lastMonth.After(now) {
lastMonth = now.AddDate(0, 1, -now.Day())
}
var prevMonthData *Month
if previousYear != nil {
prevMonthData = previousYear.LastMonth()
}
for !currentMonth.After(lastMonth) {
month := NewMonthForDate(currentMonth)
month.Update(sales, prevMonthData, downloadsTotal, downloadsUnique, graceDays)
y.Months = append(y.Months, month)
currentMonth = currentMonth.AddDate(0, 1, 0)
prevMonthData = month
}
// calculate total downloads
for _, d := range downloadsTotal {
if d.Year == y.Year {
y.DownloadsTotal += d.Downloads
}
}
for _, d := range downloadsUnique {
if d.Year == y.Year {
y.DownloadsUnique += d.Downloads
}
}
}
} | statistic/year.go | 0.651577 | 0.445469 | year.go | starcoder |
package continuous
import (
"github.com/jtejido/ggsl/specfunc"
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
"math"
"math/rand"
)
// Beta Prime distribution (Beta Distribution of the second kind)
// https://en.wikipedia.org/wiki/Beta_prime_distribution
type BetaPrime struct {
baseContinuousWithSource
alpha, beta float64
}
func NewBetaPrime(alpha, beta float64) (*BetaPrime, error) {
return NewBetaPrimeWithSource(alpha, beta, nil)
}
func NewBetaPrimeWithSource(alpha, beta float64, src rand.Source) (*BetaPrime, error) {
if alpha <= 0 || beta <= 0 {
return nil, err.Invalid()
}
ret := new(BetaPrime)
ret.alpha = alpha
ret.beta = beta
ret.src = src
return ret, nil
}
func (bp *BetaPrime) String() string {
return "BetaPrime: Parameters - " + bp.Parameters().String() + ", Support(x) - " + bp.Support().String()
}
// α ∈ (0,∞)
// β ∈ (0,∞)
func (bp *BetaPrime) Parameters() stats.Limits {
return stats.Limits{
"α": stats.Interval{0, math.Inf(1), true, true},
"β": stats.Interval{0, math.Inf(1), true, true},
}
}
// x ∈ (0,∞)
func (bp *BetaPrime) Support() stats.Interval {
return stats.Interval{0, math.Inf(1), true, true}
}
func (bp *BetaPrime) Probability(x float64) float64 {
if bp.Support().IsWithinInterval(x) {
return (math.Pow(x, bp.alpha-1) * math.Pow(1+x, -bp.alpha-bp.beta)) / specfunc.Beta(bp.alpha, bp.beta)
}
return 0
}
func (bp *BetaPrime) Distribution(x float64) float64 {
if bp.Support().IsWithinInterval(x) {
return specfunc.Beta_inc(bp.alpha, bp.beta, x/(1+x))
}
return 0
}
func (bp *BetaPrime) Inverse(p float64) float64 {
if p <= 0 {
return 0
}
if p >= 1 {
return math.Inf(1)
}
b, _ := NewBeta(bp.alpha, bp.beta)
x := b.Inverse(p)
return x / (1. - x)
}
func (bp *BetaPrime) Skewness() float64 {
if bp.beta > 3 {
return (2 * (2*bp.alpha + bp.beta - 1)) / (bp.beta - 3) * math.Sqrt((bp.beta-2)/(bp.alpha*(bp.alpha+bp.beta-1)))
}
return math.NaN()
}
func (bp *BetaPrime) Mean() float64 {
if bp.beta > 1 {
return bp.alpha / (bp.beta - 1)
}
return math.NaN()
}
func (bp *BetaPrime) Mode() float64 {
if bp.alpha >= 1 {
return bp.alpha - 1/bp.beta + 1
}
return 0
}
func (bp *BetaPrime) Median() float64 {
return bp.Inverse(.5)
}
func (bp *BetaPrime) Variance() float64 {
if bp.beta > 2 {
return (bp.alpha * (bp.alpha + bp.beta - 1)) / ((bp.beta - 2) * math.Pow(bp.beta-1, 2.))
}
return math.NaN()
}
func (bp *BetaPrime) ExKurtosis() float64 {
num := 3*(bp.alpha*bp.alpha*bp.alpha)*(bp.beta*bp.beta) + 69*(bp.alpha*bp.alpha*bp.alpha)*bp.beta - 30*(bp.alpha*bp.alpha*bp.alpha) + 6*(bp.alpha*bp.alpha)*(bp.beta*bp.beta*bp.beta) + 12*(bp.alpha*bp.alpha)*(bp.beta*bp.beta) - 78*(bp.alpha*bp.alpha)*bp.beta + 60*(bp.alpha*bp.alpha) + 3*bp.alpha*(bp.beta*bp.beta*bp.beta*bp.beta) + 9*bp.alpha*(bp.beta*bp.beta*bp.beta) - 69*bp.alpha*(bp.beta*bp.beta) + 99*bp.alpha*bp.beta - 42*bp.alpha + 6*(bp.beta*bp.beta*bp.beta*bp.beta) - 30*(bp.beta*bp.beta*bp.beta) + 54*(bp.beta*bp.beta) - 42*bp.beta + 12
denom := (bp.alpha + bp.beta - 1) * (bp.beta - 3) * (bp.beta - 4)
return num / denom
}
func (bp *BetaPrime) Rand() float64 {
var rnd float64
if bp.src != nil {
rnd = rand.New(bp.src).Float64()
} else {
rnd = rand.Float64()
}
return bp.Inverse(rnd)
} | dist/continuous/beta_prime.go | 0.818011 | 0.433502 | beta_prime.go | starcoder |
package main
var help = `
Demlo is a music library organizer. It can encode, fix case, change path
according to tags or file properties, tag from an online database, copy covers
while ignoring duplicates or those below a quality threshold, and much more. It
makes it possible to manage your libraries uniformly and dynamically. You can
write your own rules in Lua scripts to best fit your needs.
PROCESS
First Demlo creates a list of all input files. When a folder is specified, all
files matching the extensions from the 'extensions' variable will be appended to
the list. Identical files are appended only once.
Next all files get analyzed:
- The audio file details (tags, stream properties, format properties, etc.) are
stored into the 'input' variable. The 'output' variable gets its default values
from 'input', or from an index file if specified from command-line (see INDEX
section). If no index has been specified and if an attached cuesheet is found,
all cuesheet details are appended accordingly. Cuesheet tags override stream
tags, which override format tags. Finally, still without index, tags can be
retrieved from Internet if the command-line option is set.
- If a 'prescript' has been specified, it gets executed. It makes it possible to
adjust the input values and global variables before running the other scripts.
- The scripts, if any, get executed in the lexicographic order of their
basename. The 'output' variable is transformed accordingly (see VARIABLES
section). Scripts may contain rules such as defining a new file name, new tags,
new encoding properties, etc. You can use conditions on input values to set the
output properties, which makes it virtually possible to process a full music
library in one single run.
- If a 'postscript' has been specified, it gets executed. It allows to adjust
the output of the scripts from the commandline.
- Demlo makes some last-minute tweaking if need be: it adjusts the bitrate, the
path, the encoding parameters, and so on.
- A preview of changes is displayed.
- When applying changes, the covers get copied if required and the audio file
gets processed: tags are modified as specified, the file is re-encoded if
required, and the output is written to the appropriate folder. When destination
already exists, the 'exist' action is executed (see EXISTING DESTINATION
section).
CONFIGURATION
The program's default behaviour can be changed from the a configuration file.
The user configuration is found in
$XDG_CONFIG_HOME/demlo/config.lua (Default: $HOME/.config/demlo/config.lua)
If not found, then the system configuration is used:
$XDG_DATA_DIRS/demlo/config.lua (Default: /usr/local/share/demlo/config.lua or
/usr/share/demlo/config.lua)
The system configuration can provide a good starting point for the user
configuration. Most commandline flags default value can be changed. The
configuration file is loaded on startup, before parsing the commandline
options. You can review the default value of the commandline flags with by
starting 'demlo' without argument.
If you wish to use no configuration file, set the environment variable
DEMLO_CONFIG to ".".
SCRIPTS
Scripts can contain any sandboxed Lua code. They have no requirements at
all. To be useful however, they should set values of the 'output' table detailed
in the VARIABLES section. You can use idiomatic Lua to set the variables
dynamically. For instance:
output.path = library .. '/' ..
o.artist .. '/' ..
(o.album ~= nil and o.album .. '/' or '') ..
track_padded .. '. ' ..
o.title
'input' and 'output' are both accessible from any script.
All default functions and variables except 'output' are reset on every script
call to enforce consistency. Local variables are lost from one script call to
another. Global variables are preserved. You can use this feature to pass data
between scripts, for instance options or new functions.
The 'output' structure consistency is guaranteed at the start of every
script. Demlo will only extract the fields with the right type as described in
the 'Variables' section.
Warning: Do not abuse of global variables, especially when processing non-fixed
size data (e.g. tables). Data could grow big and slow down the program.
Some functions like 'os.execute' are not available to prevent scripts from
altering the system. It is not possible to print to the standard output/error
unless running in debug mode and using the 'debug' function.
See the 'sandbox.go' source file for a list of allowed functions and variables.
Lua patterns are replaced by Go regexes. See
https://github.com/google/re2/wiki/Syntax.
The official scripts are stored in
$XDG_DATA_DIRS/demlo/scripts (Default: /usr/local/share/demlo/scripts or
/usr/share/demlo/scripts)
The user script folder is located at
$XDG_CONFIG_HOME/demlo/scripts (Default: $HOME/.config/demlo/scripts)
The user script folder might have to be created before you can add your own
scripts inside. The user folder takes precedence over the system folder, thus
scripts with the same basename will be found in the user folder.
RUNTIME CODE (PRESCRIPT & POSTSCRIPT)
The user scripts are most useful when they are generic enough to be applied on
any file. Therefore they may not properly handle some uncommon input
values. You can tweak the input with temporary overrides from commandline thanks
to the 'prescript' and the 'postscript'. They will let you run sandboxed Lua
code before and after all other scripts, respectively.
Use global variables to transfer data and parameters along.
If the prescript and postscript end up being too long, consider writing a Demlo
script. You can also define shell aliases or use wrapper scripts as convenience.
EXISTING DESTINATION
By default, when the destination exists, Demlo will append a suffix to the
output destination. This behaviour can be changed from the 'exist' action
specified by the user. Demlo comes with a few default actions.
The 'exist' action works just like scripts with the following differences:
- Any change to 'output.path' will be skipped.
- An additional variable is accessible from the action: 'existinfo' holds the file
details of the existing file in the same fashion as 'input'. This allows for
comparing the input file and the existing destination.
The writing rules can be tweaked the following way:
output.write = 'skip' Skip current file.
output.write = 'overwrite' Overwrite existing destination.
output.write = '' Anything else: append random suffix (default)
Word of caution: overwriting breaks Demlo's rule of not altering existing files.
It can lead to undesired results if the overwritten file is also part of the
(yet to be processed) input. The overwrite capability comes in handy when
syncing music libraries.
VARIABLES (INPUT & OUTPUT)
The 'input' table describes the file:
input = {
path = '/real/path',
bitrate = 0,
tags = {},
time = {
sec = 0,
nsec = 0,
}
audioindex = 0,
streams = {},
format = {},
embeddedcovers = {},
externalcovers = {},
onlinecover = {},
}
Bitrate is in bits-per-seconds (bps). That is, for 320 kbps you would specify
output.bitrate = 320000
The 'time' is the modification time of the file. It holds the sec seconds and
nsec nanoseconds since January 1, 1970 UTC.
The entry 'streams' and 'format' are as returned by
$ ffprobe -v quiet -print_format json -show_streams -show_format FILE
They give access to most metadata that FFmpeg can return. For instance the
duration of the track in seconds can be found in 'input.format.duration'.
Since there may be more than one stream (covers, other data), the first audio
stream is assumed to be the music stream. For convenience, the index of the
music stream is stored in 'audioindex'.
The tags returned by FFmpeg are found in streams, format and in the cuesheet.
To make tag queries easier, all tags are stored in the 'tags' table, with the
following precedence:
format tags < stream tags < cuesheet header tags < cuesheet track tags
You can remove a tag by setting it to 'nil' or the empty string.
The 'output' table describes the transformation to apply to the file:
output = {
path = 'full/path/with.ext',
format = 'format',
parameters = {},
tags = {},
embeddedcovers = {},
externalcovers = {},
onlinecover = {},
write = '',
removesource = false,
}
The 'parameters' array holds the commandline parameters passed to FFmpeg. It can
be anything supported by FFmpeg, although this variable is supposed to hold
encoding information. See the EXAMPLES section.
The 'embeddedcovers', 'externalcovers' and 'onlinecover' variables are detailed
in the 'Covers' section.
The 'write' variable is explained in the EXISTING DESTINATION section.
The 'removesource' variable is a boolean: when true, Demlo removes the source file
after processing. This can speed up the process when not re-encoding. This
option is ignored for multi-track files.
For convenience, the following shortcuts are provided:
i = input.tags
o = output.tags
LUA FUNCTIONS
Demlo provides some non-standard Lua functions to ease scripting.
debug(string...)
Display a message on stderr if debug mode is on.
stringnorm(string)
Return lowercase string without non-alphanumeric characters nor leading zeros.
stringrel(string, string)
Return the relation coefficient of the two input strings. The result is a float
in the [0.0, 1.0] range. 0.0 means no relation at all, 1.0 means identical
strings.
PREVIEW
The official scripts are usually very smart at guessing the right values. They
might make mistakes however. If you are unsure, you can (and you are advised to)
preview the before-after changes before proceeding. A JSON preview of the
changes is printed to stdout with the '-o' commandline flag or if stdout is
redirected.
INTERNET TAGGING AND COVER FETCHING
The initial values of the 'output' table can be completed with tags fetched from
the MusicBrainz database. Audio files are fingerprinted for the queries, so even
with initially wrong file names and tags, the right values should still be
retrieved. The front album cover can also be retrieved.
Proxy parameters will be fetched automatically from the 'http_proxy'
and 'https_proxy' environment variables.
As this process requires network access it can be quite slow. Nevertheless,
Demlo is specifically optimized for albums, such that network queries are
used for only one track per album, when possible.
INDEX
Demlo can preset the 'output' variables according to the values set in a text file
before calling the scripts.
This 'index' is a JSON file and can be generated with the '-o' commandline flag
or with shell redirection if you shell supports that. It is valid JSON except
for the missing beginning and the missing end: This makes it possible to
concatenate and to append to existing index files. Demlo will automatically
complete the missing parts so that it becomes valid JSON.
Online tagging is automatically disabled when an index is used.
The index file is useful when you want to edit tags manually: You can redirect
the output to a file, edit the content manually with your favorite text editor,
then run Demlo again with the index as argument. See the EXAMPLES section.
This feature can also be used to interface Demlo with other programs.
EXAMPLES
The following examples will not proceed unless the '-p' command-line option is
true.
Important: on most shells, you _must_ use single quotes for the runtime Lua
command to prevent shell expansion. Inside the Lua code, use double quotes for
strings and escape single quotes.
demlo -s alternate audio.file
Add 'alternate' script to the script chain and preview the changes. The
specified name does not contain any folder separator, thus it is found in the
user or system script folder.
demlo -s path/to/local/script.lua audio.file
Add the local Lua file to the script chain. This feature is convenient if you
want to write scripts that are too complex to fit on the command-line, but not
generic enough to fit the user or system script folders.
demlo -r '' -s path -s case audio.file
Remove all script from the list, then add '30-tag-case' and '60-path'
scripts. Note that '30-case' is run before '60-path'.
demlo -post 'o.artist=i.artist' audio.file
Use the default scripts but keep original value for the 'artist' tag:
demlo *.wv >> index
# Oops! Forgot some files:
demlo *.flac >> index
# Edit index as needed...
demlo -p -i index -r '' *.wv
1) Preview default scripts transformation and save it to an index. 2) Edit file
to fix any potential mistake. 3) Run Demlo over the same files using the index
information only.
demlo -i index -s rename *.wv
Same as above but generate output filename according to the custom '61-rename'
script. The numeric prefix is important: it ensures that '61-rename' will be run
after all the default tag related scripts and after '60-path'. Otherwise, if a
change in tags would occur later on, it would not affect the renaming script.
demlo -t album/*.ogg > album-index.json
Retrieve all tags from the Internet and save the result to an index.
demlo -t -r path -s remove_source album/*
Change tags in-place with entries from MusicBrainz.
demlo -ext webm -pre 'output.format="webm"' audio.webm
Add support for non-default formats from commandline.
demlo -exist writenewer audio.file
Overwrite existing destination if input is newer:
SEE ALSO
- The ffmpeg(1) and ffprobe(1) man pages.
- The official Lua manual: http://www.lua.org/pil/contents.html.
` | doc.go | 0.518059 | 0.436322 | doc.go | starcoder |
package common
import (
"log"
"engo.io/engo"
"engo.io/gl"
"github.com/ilackarms/sprite-locator/models"
"encoding/json"
"image"
"github.com/engoengine/math"
)
// sort sprites into rows, starting with top left, and going down row by row
func sortSprites(sprites []models.Sprite) []models.Sprite {
sorted := []models.Sprite{}
origin := image.Pt(0, 0)
topLeft := sprites[0]
minDist := distance(origin, center(topLeft))
//find topleft sprite
for _, sprite := range sprites {
center := center(sprite)
dist := distance(origin, center(sprite))
if dist < minDist {
topLeft = sprite
minDist = dist
}
}
//next sprite = closest in X to top left, lowest value of Y
sprite0 := center(topLeft)
for _, sprite := range sprites {
dist := float32(center(sprite).X - sprite0.X)
if dist < minDist {
topLeft = sprite
minDist = dist
}
}
return sorted
}
func center(sprite models.Sprite) image.Point {
return image.Point{
X: (sprite.Min.X + sprite.Max.X)/2,
Y: (sprite.Min.Y + sprite.Max.Y)/2,
}
}
func distance(p1, p2 image.Point) float32 {
return math.Sqrt(math.Pow(p2.X - p1.X, 2)+math.Pow(p2.Y - p1.Y, 2))
}
// Spritesheet is a class that stores a set of tiles from a file, used by tilemaps and animations
type Spritesheet struct {
texture *gl.Texture // The original texture
width, height float32 // The dimensions of the total texture
Sprites []models.Sprite
cache map[int]Texture // The cell cache cells
}
func NewSpritesheetFromTexture(tr *TextureResource, metadata *TextResource) *Spritesheet {
var spriteMetadata models.Spritesheet
if err := json.Unmarshal([]byte(metadata.Text), &spriteMetadata); err != nil {
log.Println("[WARNING] [NewSpritesheetFromFile]: Unmarshalling json from ", metadata.URL(), ": ", err)
return nil
}
return &Spritesheet{texture: tr.Texture,
width: tr.Width, height: tr.Height,
Sprites: spriteMetadata.Sprites,
cache: make(map[int]Texture),
}
}
// NewSpritesheetFromFile is a simple handler for creating a new spritesheet from a file
// textureName is the name of a texture already preloaded with engo.Files.Add
func NewSpritesheetFromFile(textureName, textName string) *Spritesheet {
res, err := engo.Files.Resource(textureName)
if err != nil {
log.Println("[WARNING] [NewSpritesheetFromFile]: Received error:", err)
return nil
}
img, ok := res.(TextureResource)
if !ok {
log.Println("[WARNING] [NewSpritesheetFromFile]: Resource not of type `TextureResource`:", textureName)
return nil
}
res, err = engo.Files.Resource(textName)
if err != nil {
log.Println("[WARNING] [NewSpritesheetFromFile]: Received error:", err)
return nil
}
txt, ok := res.(TextResource)
if !ok {
log.Println("[WARNING] [NewSpritesheetFromFile]: Resource not of type `TextResource`:", textureName)
return nil
}
return NewSpritesheetFromTexture(&img, &txt)
}
// Cell gets the region at the index i, updates and pulls from cache if need be
func (s *Spritesheet) Cell(index int) Texture {
if r, ok := s.cache[index]; ok {
return r
}
x0 := float32(s.Sprites[index].Min.X)
y0 := float32(s.Sprites[index].Min.Y)
x1 := float32(s.Sprites[index].Max.X)
y1 := float32(s.Sprites[index].Max.Y)
s.cache[index] = Texture{
id: s.texture,
width: x1 - x0,
height: y1 - y0,
viewport: engo.AABB{
engo.Point{
X: x0,
Y: y0,
},
engo.Point{
X: x1,
Y: y1,
},
}}
return s.cache[index]
}
func (s *Spritesheet) Drawable(index int) Drawable {
return s.Cell(index)
}
func (s *Spritesheet) Drawables() []Drawable {
drawables := make([]Drawable, s.CellCount())
for i := 0; i < s.CellCount(); i++ {
drawables[i] = s.Drawable(i)
}
return drawables
}
func (s *Spritesheet) CellCount() int {
return len(s.Sprites)
}
func (s *Spritesheet) Cells() []Texture {
cellsNo := s.CellCount()
cells := make([]Texture, cellsNo)
for i := 0; i < cellsNo; i++ {
cells[i] = s.Cell(i)
}
return cells
}
/*
type Sprite struct {
Position *Point
Scale *Point
Anchor *Point
Rotation float32
Color color.Color
Alpha float32
Region *Region
}
func NewSprite(region *Region, x, y float32) *Sprite {
return &Sprite{
Position: &Point{x, y},
Scale: &Point{1, 1},
Anchor: &Point{0, 0},
Rotation: 0,
Color: color.White,
Alpha: 1,
Region: region,
}
}
*/ | common/spritesheet.go | 0.525125 | 0.406155 | spritesheet.go | starcoder |
package basically
// A Document represents a given text, and is responsible for
// handling the summarization and keyword extraction process.
type Document interface {
Summarize(length int, threshold float64, focus string) ([]*Sentence, error)
Highlight(length int, merge bool) ([]*Keyword, error)
Characters() (int, int)
}
// A Parser is responsible for parsing and tokenizing a document
// into strings and words. A Parser also performs additional tasks
// such as POS-tagging and sentiment analysis.
type Parser interface {
ParseDocument(doc string, quote bool) ([]*Sentence, []*Token, error)
}
// A Summarizer is responsible for extracting key sentences from a
// document.
type Summarizer interface {
Initialize(sents []*Sentence, similar Similarity, filter TokenFilter,
focusString *Sentence, threshold float64)
Rank(iters int)
}
// A Highlighter is responsible for extracting key words from a document.
type Highlighter interface {
Initialize(tokens []*Token, filter TokenFilter, window int)
Rank(iters int)
Highlight(length int, merge bool) ([]*Keyword, error)
}
// A TokenFilter represents a (black/white) filter applied to tokens before similarity calculations.
type TokenFilter func(*Token) bool
// A Similarity computes the similarity of two sentences after applying the token filter.
type Similarity func(n1, n2 []*Token, filter TokenFilter) float64
// A Token represents an individual token of text such as a word or punctuation
// symbol.
type Token struct {
Tag string // The token's part-of-speech tag.
Text string // The token's actual content.
Order int // The token's order in the text.
}
// A Keyword is the keyword belonging to a highlighted document.
// A Keyword contains the raw word, and its associated weight.
type Keyword struct {
Word string // Raw keyword.
Weight float64 // Weight of the keyword.
}
// A Sentence represents an individual sentence within the text.
type Sentence struct {
Raw string // Raw sentence string.
Tokens []*Token // Tokenized sentence.
Sentiment float64 // Sentiment score.
Score float64 // Score (weight) of the sentence.
Bias float64 // Bias assigned to the sentence for ranking.
Order int // The sentence's order in the text.
} | basically.go | 0.619586 | 0.484807 | basically.go | starcoder |
package compast
// Set of functions to create ast Nodes.
import (
"github.com/omakoto/compromise/src/compromise"
"github.com/omakoto/compromise/src/compromise/compfunc"
"sync/atomic"
)
func assertType(t *Token, tokenType int, argName string) *Token {
if t == nil {
panic(compromise.NewSpecErrorf(nil, "%s must not be nil", argName))
}
if t.TokenType != tokenType {
panic(compromise.NewSpecErrorf(t, "%s must be of type %s", argName, tokenTypeNames[tokenType]))
}
return t
}
func assertTypeOrNil(t *Token, tokenType int, argName string) *Token {
if t != nil {
assertType(t, tokenType, argName)
}
return t
}
var lastID int32 = -1
func newNode(nodeType int, selfToken *Token) *Node {
return &Node{
depth: 0,
id: int(atomic.AddInt32(&lastID, 1)),
nodeType: nodeType,
selfToken: selfToken,
}
}
func NewRoot() *Node {
n := newNode(NodeRoot, nil)
n.labels = make(map[string]*Node)
n.commandJumpTo = make(map[string]*Node)
n.root = n
return n
}
func NewCommand(this, command, label *Token) *Node {
n := newNode(NodeCommand, assertType(this, TokenCommand, "this"))
n.command = assertType(command, TokenLiteral, "command")
n.label = assertTypeOrNil(label, TokenLabel, "label")
return n
}
func NewLabel(this, label *Token) *Node {
n := newNode(NodeLabel, assertType(this, TokenCommand, "this"))
n.label = assertType(label, TokenLabel, "label")
return n
}
func NewCall(this, label *Token) *Node {
n := newNode(NodeCall, assertType(this, TokenCommand, "this"))
n.label = assertType(label, TokenLabel, "label")
return n
}
func NewBreak(this, label *Token) *Node {
n := newNode(NodeBreak, assertType(this, TokenCommand, "this"))
n.label = assertTypeOrNil(label, TokenLabel, "label")
return n
}
func NewContinue(this, label *Token) *Node {
n := newNode(NodeContinue, assertType(this, TokenCommand, "this"))
n.label = assertTypeOrNil(label, TokenLabel, "label")
return n
}
func NewFinish(this *Token) *Node {
n := newNode(NodeFinish, assertType(this, TokenCommand, "this"))
return n
}
func NewSwitch(this *Token, pattern, label *Token) *Node {
n := newNode(NodeSwitch, assertType(this, TokenCommand, "this"))
n.pattern = assertTypeOrNil(pattern, TokenLiteral, "pattern")
n.label = assertTypeOrNil(label, TokenLabel, "label")
return n
}
func NewAny(this *Token, help *Token) *Node {
n := newNode(NodeAny, assertType(this, TokenCommand, "this"))
n.help = assertTypeOrNil(help, TokenHelp, "help")
return n
}
func NewLoop(this, pattern, label *Token) *Node {
n := newNode(NodeLoop, assertType(this, TokenCommand, "this"))
n.pattern = assertTypeOrNil(pattern, TokenLiteral, "pattern")
n.label = assertTypeOrNil(label, TokenLabel, "label")
return n
}
func NewSwitchLoop(this, pattern, label *Token) *Node {
n := newNode(NodeSwitchLoop, assertType(this, TokenCommand, "this"))
n.pattern = assertTypeOrNil(pattern, TokenLiteral, "pattern")
n.label = assertTypeOrNil(label, TokenLabel, "label")
return n
}
func NewGoCall(this, funcName *Token, args []*Token) *Node {
return newGolangCallNode(NodeGoCall, this, funcName, args)
}
func NewCandidate(this, funcName *Token, args []*Token) *Node {
return newGolangCallNode(NodeCandidate, this, funcName, args)
}
func newGolangCallNode(nodeType int, this, funcName *Token, args []*Token) *Node {
n := newNode(nodeType, assertType(this, TokenCommand, "this"))
n.funcName = assertType(funcName, TokenLiteral, "funcName")
for _, a := range args {
assertType(a, TokenLiteral, "args")
}
n.args = args
if err := compfunc.Defined(n.funcName.Word); err != nil {
panic(compromise.NewSpecError(funcName, err.Error()))
}
return n
}
func NewLiteral(this, help *Token) *Node {
n := newNode(NodeLiteral, assertType(this, TokenLiteral, "this"))
n.literal = this
n.help = assertTypeOrNil(help, TokenHelp, "help")
return n
} | src/compromise/compast/generator.go | 0.512937 | 0.473779 | generator.go | starcoder |
package server
import (
"time"
)
type Measurement interface {
SensorID() string
Measurement() string
Value() interface{}
Time() time.Time
}
type BatteryVoltageMeasurement struct {
SensorID_ string `json:"sensorID"`
// Voltage given in volts
Voltage_ float64 `json:"voltage"`
// Time when measurement was recorded
Time_ time.Time `json:"time"`
}
func (m *BatteryVoltageMeasurement) Measurement() string {
return "batteryvoltage"
}
func (m *BatteryVoltageMeasurement) SensorID() string {
return m.SensorID_
}
func (m *BatteryVoltageMeasurement) Value() interface{} {
return m.Voltage_
}
func (m *BatteryVoltageMeasurement) Time() time.Time {
return m.Time_
}
type TemperatureMeasurement struct {
SensorID_ string `json:"sensorID"`
// Temperature given in Celsius
Temperature_ float64 `json:"temperature"`
// Time when measurement was recorded
Time_ time.Time `json:"time"`
}
func (m *TemperatureMeasurement) Measurement() string {
return "temperature"
}
func (m *TemperatureMeasurement) SensorID() string {
return m.SensorID_
}
func (m *TemperatureMeasurement) Value() interface{} {
return m.Temperature_
}
func (m *TemperatureMeasurement) Time() time.Time {
return m.Time_
}
type HumidityMeasurement struct {
SensorID_ string `json:"sensorID"`
// Humidity given in percent
Humidity_ float64 `json:"humidity"`
// Time when measurement was recorded
Time_ time.Time `json:"time"`
}
func (m *HumidityMeasurement) Measurement() string {
return "humidity"
}
func (m *HumidityMeasurement) SensorID() string {
return m.SensorID_
}
func (m *HumidityMeasurement) Value() interface{} {
return m.Humidity_
}
func (m *HumidityMeasurement) Time() time.Time {
return m.Time_
}
type PressureMeasurement struct {
SensorID_ string `json:"sensorID"`
// Pressure given in Pascal
Pressure_ int `json:"pressure"`
// Time when measurement was recorded
Time_ time.Time `json:"time"`
}
func (m *PressureMeasurement) Measurement() string {
return "pressure"
}
func (m *PressureMeasurement) SensorID() string {
return m.SensorID_
}
func (m *PressureMeasurement) Value() interface{} {
return m.Pressure_
}
func (m *PressureMeasurement) Time() time.Time {
return m.Time_
} | server/measurement.go | 0.865082 | 0.434761 | measurement.go | starcoder |
package sat
import (
"fmt"
"strings"
)
// Constraint implementations limit the circumstances under which a
// particular Installable can appear in a solution.
type Constraint interface {
String(subject Identifier) string
apply(x constrainer, subject Identifier)
}
// AppliedConstraint values compose a single Constraint with the
// Installable it applies to.
type AppliedConstraint struct {
Installable Installable
Constraint Constraint
}
// String implements fmt.Stringer and returns a human-readable message
// representing the receiver.
func (a AppliedConstraint) String() string {
return a.Constraint.String(a.Installable.Identifier())
}
// constrainer is the set of operations available to Constraint
// implementations.
type constrainer interface {
// Add appends the Installable identified by the given
// Identifier to the clause representing a Constraint.
Add(Identifier)
// Add appends the negation of the Installable identified by
// the given Identifier to the clause representing a
// Constraint.
AddNot(Identifier)
// Weight sets an additional weight to add to the constrained
// Installable. Calls with negative arguments are ignored.
Weight(int)
}
type mandatory struct{}
func (c mandatory) String(subject Identifier) string {
return fmt.Sprintf("%s is mandatory", subject)
}
func (c mandatory) apply(x constrainer, subject Identifier) {
x.Add(subject)
}
// Mandatory returns a Constraint that will permit only solutions that
// contain a particular Installable.
func Mandatory() Constraint {
return mandatory{}
}
type prohibited struct{}
func (c prohibited) String(subject Identifier) string {
return fmt.Sprintf("%s is prohibited", subject)
}
func (c prohibited) apply(x constrainer, subject Identifier) {
x.AddNot(subject)
}
// Prohibited returns a Constraint that will reject any solution that
// contains a particular Installable. Callers may also decide to omit
// an Installable from input to Solve rather than apply such a
// Constraint.
func Prohibited() Constraint {
return prohibited{}
}
type dependency []Identifier
func (c dependency) String(subject Identifier) string {
s := make([]string, len(c))
for i, each := range c {
s[i] = string(each)
}
return fmt.Sprintf("%s requires at least one of %s", subject, strings.Join(s, ", "))
}
func (c dependency) apply(x constrainer, subject Identifier) {
if len(c) == 0 {
return
}
x.AddNot(subject)
for _, each := range c {
x.Add(each)
}
}
// Dependency returns a Constraint that will only permit solutions
// containing a given Installable on the condition that at least one
// of the Installables identified by the given Identifiers also
// appears in the solution.
func Dependency(ids ...Identifier) Constraint {
return dependency(ids)
}
type conflict Identifier
func (c conflict) String(subject Identifier) string {
return fmt.Sprintf("%s conflicts with %s", subject, c)
}
func (c conflict) apply(x constrainer, subject Identifier) {
x.AddNot(subject)
x.AddNot(Identifier(c))
}
// Conflict returns a Constraint that will permit solutions containing
// either the constrained Installable, the Installable identified by
// the given Identifier, or neither, but not both.
func Conflict(id Identifier) Constraint {
return conflict(id)
}
type weight int
func (c weight) String(subject Identifier) string {
return fmt.Sprintf("%s has weight %d", subject, c)
}
func (c weight) apply(x constrainer, subject Identifier) {
x.Weight(int(c))
}
// Weight returns a Constraint that increases the weight of the
// constrainted Installable by the given (non-negative) amount.
func Weight(w int) Constraint {
return weight(w)
} | pkg/controller/registry/resolver/sat/constraints.go | 0.847005 | 0.430147 | constraints.go | starcoder |
package main
import (
"fmt"
"sort"
"strings"
"github.com/scottyw/adventofcode2020/pkg/aoc"
)
func main() {
input := aoc.FileLinesToStringSlice("input/10.txt")
chain := parseInput(input)
fmt.Println(countCombinations(chain))
}
/****************************************************************
From part A we know that the input is a chain with gaps of 1 or 3 but not 0, 2 or more than 3
Any adapter with a gap of 3 before or after cannot be optional or the gap would be too large if it were removed.
These ones will appear in every valid combination.
Any adaptor with a gap of 1 both before and after it is potentially optional.
We search for sequences of consective gaps of size 1.
In practice we don't need to check gaps on both sides of adapter.
We look at the previous gap only which is enough to work out how many valid combinations there are of each sequence
- [0 3 4 5 8] contains sequence [4 5] of length 2
- [0 3 4 5 6 9] contains sequence [4 5 6] of length 3
- [0 3 4 5 6 7 10] contains sequence [4 5 6 7] of length 4
- etc.
With this input we luckily only need to worry about N = 4 at most:
- length = 1, valid combinations = 1 e.g. [1 4 5 7]
- length = 2, valid combinations = 2 e.g. [1 4 5 6 9] [1 4 6 9]
- length = 3, valid combinations = 4 e.g. [1 4 5 6 7 10] [1 4 5 7 10] [1 4 6 7 10] [1 4 7 10]
- length = 4, valid combinations = 7
Once we have a count of the valid combinations for each sequence, we multiple them together to find the total count of possible combinations across the whole chain.
****************************************************************/
func countCombinations(adapters []int) int {
// Add the socket on the plane to make gap calculation simpler
adapters = append(adapters, 0)
// Sort the adapters by joltage
sort.Ints(adapters)
// Add the adapter on the device too
adapters = append(adapters, adapters[len(adapters)-1]+3)
// Implement the algorithm above
var gap, consecutive int
total := 1
for i := 1; i < len(adapters); i++ {
// Each adapter connects to the previous one
gap = adapters[i] - adapters[i-1]
switch gap {
case 1:
consecutive++
case 3:
if consecutive > 0 {
switch consecutive {
case 1:
// Ignore
case 2:
total *= 2
case 3:
total *= 4
case 4:
total *= 7
default:
panic(fmt.Sprintf("consecutive: %d", consecutive))
}
consecutive = 0
}
default:
panic(fmt.Sprintf("gap size: %d", i))
}
}
return total
}
func parseInput(lines []string) []int {
is := []int{}
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" {
continue
}
is = append(is, aoc.Atoi(line))
}
return is
} | cmd/day10b/main.go | 0.561936 | 0.493531 | main.go | starcoder |
package templates
const MigrateReadme = `# Migration Tools
Build and run:
{{.Backtick}}{{.Backtick}}{{.Backtick}}
cd migrate && go build -o "migrate"
migrate user:pass@tcp(localhost:3306)/dbname?parseTime=true&multiStatements=true /path/migrations/dir/
{{.Backtick}}{{.Backtick}}{{.Backtick}}
**Optional keys:**
- {{.Backtick}}-u{{.Backtick}} migration up. Default one step.
- {{.Backtick}}-d{{.Backtick}} rollback migration. Default one step.
- {{.Backtick}}-sn{{.Backtick}} - {{.Backtick}}n{{.Backtick}} migration steps.
- {{.Backtick}}-f{{.Backtick}} force the current version of the migration to be installed.
Running without optional keys will execute all required versions of the migration.
You cannot specify the keys {{.Backtick}}-u{{.Backtick}} and {{.Backtick}}-d{{.Backtick}} at the same time. Only {{.Backtick}}-u{{.Backtick}} will be executed.
## Naming Migration
Migration up:
{{.Backtick}}{{.Backtick}}{{.Backtick}}
[version]_[name].up.sql
{{.Backtick}}{{.Backtick}}{{.Backtick}}
Rollback migration:
{{.Backtick}}{{.Backtick}}{{.Backtick}}
[version]_[name].down.sql
{{.Backtick}}{{.Backtick}}{{.Backtick}}
{{.Backtick}}version{{.Backtick}} - unsigned integer. Usually as a date and time in the format {{.Backtick}}YYYYmmddHHii{{.Backtick}}.
{{.Backtick}}name{{.Backtick}} - the name of the migration for convenience.`
const MigrateMain = `package main
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
"github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
log "github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
// Подключение к БД
dbConn = kingpin.Arg("db", "Подключение к БД.").String()
dir = kingpin.Arg("dir", "Путь к директории миграции.").String()
up = kingpin.Flag("up", "Миграция вперед.").Short('u').Bool()
down = kingpin.Flag("down", "Миграция назад.").Short('d').Bool()
step = kingpin.Flag("step", "Количество шагов.").Short('s').Default("1").Int()
force = kingpin.Flag("force", "Принудительно выполнить текущую version.").Bool()
)
func main() {
var (
dirFullPath string
err error
db *sql.DB
driver database.Driver
m *migrate.Migrate
version uint
)
kingpin.HelpFlag.Short('h')
kingpin.Parse()
if dirFullPath, err = filepath.Abs(*dir); err != nil {
log.Fatalln(err)
}
if *dbConn == "" {
log.Fatalln("db variable not set.")
}
if db, err = sql.Open("mysql", *dbConn); err != nil {
log.Fatalln(err)
}
defer func() {
if derr := db.Close(); derr != nil {
log.Fatal(derr)
}
}()
driver, err = mysql.WithInstance(db, &mysql.Config{
MigrationsTable: "schema_migrations",
DatabaseName: "migrations",
})
if err != nil {
log.Fatalln(err)
}
m, err = migrate.NewWithDatabaseInstance(
fmt.Sprintf("file://%s", dirFullPath), "migrations", driver)
if err != nil {
log.Fatalln(err)
}
if *force {
version, _, err = m.Version()
if err != nil {
log.Fatalln(err)
}
if err = m.Force(int(version)); err != nil {
log.Fatalln(err)
}
}
if *up {
err = m.Steps(*step)
} else if *down {
err = m.Steps(-*step)
} else {
err = m.Up()
}
if err != nil {
log.Fatalln(err)
}
}
`
const MigrateUpSql = `CREATE TABLE IF NOT EXISTS {{.Backtick}}table{{.Backtick}}
(
{{.Backtick}}id{{.Backtick}} bigint(20) NOT NULL AUTO_INCREMENT PRIMARY KEY,
{{.Backtick}}host{{.Backtick}} varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL UNIQUE,
{{.Backtick}}success{{.Backtick}} boolean NOT NULL,
{{.Backtick}}created_at{{.Backtick}} datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
{{.Backtick}}updated_at{{.Backtick}} datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
{{.Backtick}}deleted_at{{.Backtick}} datetime DEFAULT NULL
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4
COLLATE = utf8mb4_unicode_ci;`
const MigrateDownSql = `DROP TABLE IF EXISTS {{.Backtick}}table{{.Backtick}};` | templates/migrate.go | 0.527803 | 0.458046 | migrate.go | starcoder |
package utils
// ToInt32Slice takes a go slice and convets it to a []int32. No overflow checking
func ToInt32Slice(input interface{}) []int32 {
switch array := input.(type) {
case []int32:
return array
case []float32:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []int8:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []int64:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []uint32:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []uint64:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []int:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []uint:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []int16:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []uint16:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []float64:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
case []uint8:
x := make([]int32, len(array))
for i := range array {
x[i] = int32(array[i])
}
return x
}
return nil
}
// ToUint8Slice takes a go slice and convets it to a []uint8. No overflow checking
func ToUint8Slice(input interface{}) []uint8 {
switch array := input.(type) {
case []uint8:
return array
case []float32:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []int32:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []int64:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []uint32:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []uint64:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []int:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []uint:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []int16:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []uint16:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []float64:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
case []int8:
x := make([]uint8, len(array))
for i := range array {
x[i] = uint8(array[i])
}
return x
}
return nil
}
// ToInt8Slice takes a go slice and convets it to a []int8. No overflow checking
func ToInt8Slice(input interface{}) []int8 {
switch array := input.(type) {
case []int8:
return array
case []float32:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []int32:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []int64:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []uint32:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []uint64:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []int:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []uint:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []int16:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []uint16:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []float64:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
case []uint8:
x := make([]int8, len(array))
for i := range array {
x[i] = int8(array[i])
}
return x
}
return nil
}
//ToFLoat64Slice takes a go slice and converts it to a []float64.
func ToFLoat64Slice(input interface{}) []float64 {
switch array := input.(type) {
case []float64:
return array
case []float32:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []int32:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []int64:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []uint32:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []uint64:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []int:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []uint:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []int16:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []uint16:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []int8:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
case []uint8:
x := make([]float64, len(array))
for i := range array {
x[i] = float64(array[i])
}
return x
}
return nil
}
//ToFloat32Slice takes a go typed slice and converts it to a []float32 slice
func ToFloat32Slice(input interface{}) []float32 {
switch array := input.(type) {
case []float32:
return array
case []float64:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []int32:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []int64:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []uint32:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []uint64:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []int:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []uint:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []int16:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []uint16:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []int8:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
case []uint8:
x := make([]float32, len(array))
for i := range array {
x[i] = float32(array[i])
}
return x
}
return nil
} | utils/typeslice.go | 0.537527 | 0.649655 | typeslice.go | starcoder |
package redblack
import (
"math"
"sync"
)
// NewRedBlackTree returns a new red-back tree. All operations on the tree are
// safe to be accessed concurrently.
func NewRedBlackTree() *Tree {
sentinel := &node{color: black, payload: "sentinel"}
return &Tree{
lock: sync.RWMutex{},
root: sentinel,
sentinel: sentinel,
}
}
// Root returns the payload of the root node of the tree.
func (t *Tree) Root() interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
if t.root == t.sentinel {
return nil
}
return t.root.payload
}
// Height returns the height (max depth) of the tree. Returns -1 if the tree
// has no nodes. A (rooted) tree with only a single node has a height of zero.
func (t *Tree) Height() int {
t.lock.RLock()
defer t.lock.RUnlock()
return int(t.height(t.root))
}
// Min returns the payload of the lowest key, or nil.
func (t *Tree) Min() interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
n := t.min(t.root)
if n == t.sentinel {
return nil
}
return n.payload
}
// Max returns the payload of the highest key, or nil.
func (t *Tree) Max() interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
n := t.max(t.root)
if n == t.sentinel {
return nil
}
return n.payload
}
// Search returns the payload for a given key, or nil.
func (t *Tree) Search(key Key) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
if t.root == nil {
return nil
}
n := t.search(t.root, key)
if n == t.sentinel {
return nil
}
return n.payload
}
// Successor returns the payload of the next highest neighbour (key-wise) of the
// passed key.
func (t *Tree) Successor(key Key) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
n := t.successor(t.search(t.root, key))
if n == nil {
return nil
}
return n.payload
}
func (t *Tree) height(node *node) float64 {
if node == t.sentinel {
return -1
}
return 1 + math.Max(t.height(node.left), t.height(node.right))
}
func (t *Tree) successor(z *node) *node {
if z == t.sentinel {
return nil
}
if z.right != t.sentinel {
return t.min(z.right)
}
parent := z.parent
for parent != t.sentinel && z == parent.right {
z = parent
parent = z.parent
}
return parent
}
func (t *Tree) min(z *node) *node {
for z != t.sentinel && z.left != t.sentinel {
z = z.left
}
return z
}
func (t *Tree) max(z *node) *node {
for z != t.sentinel && z.right != t.sentinel {
z = z.right
}
return z
}
func (t *Tree) search(z *node, key Key) *node {
if z == t.sentinel || z.key == key {
return z
}
for z != t.sentinel && z.key != key {
if z.key.Less(key) {
z = z.right
} else {
z = z.left
}
}
return z
}
func (t *Tree) rotateLeft(x *node) {
// y's left subtree will be x's right subtree.
y := x.right
x.right = y.left
if y.left != t.sentinel {
y.left.parent = x
}
// Restore parent relationships.
y.parent = x.parent
switch {
case x.parent == t.sentinel:
t.root = y
case x.parent.left == x:
x.parent.left = y
default:
x.parent.right = y
}
// x will be y's new left-child.
y.left = x
x.parent = y
}
func (t *Tree) rotateRight(x *node) {
y := x.left
x.left = y.right
if y.right != t.sentinel {
y.right.parent = x
}
y.parent = x.parent
switch {
case x.parent == t.sentinel:
t.root = y
case x.parent.left == x:
x.parent.left = y
default:
x.parent.right = y
}
y.right = x
x.parent = y
}
func (t *Tree) newLeaf(key Key, p interface{}) *node {
return &node{
key: key,
payload: p,
left: t.sentinel,
right: t.sentinel,
}
}
func (t *Tree) isLeaf(z *node) bool {
return z.left == t.sentinel && z.right == t.sentinel
} | redblack/tree.go | 0.871803 | 0.489564 | tree.go | starcoder |
package sketchy
import (
"math/rand"
"github.com/ojrac/opensimplex-go"
)
const (
defaultScale = 0.001
defaultOctaves = 1
defaultPersistence = 0.9
defaultLacunarity = 2.0
)
// Pseudo-random number generator data
type Rng struct {
seed int64
noise opensimplex.Noise
octaves int
persistence float64
lacunarity float64
xscale float64
yscale float64
zscale float64
xoffset float64
yoffset float64
zoffset float64
}
// Returns a PRNG with a system and noise generator
func NewRng(i int64) Rng {
rand.Seed(i)
return Rng{
seed: i,
noise: opensimplex.New(i),
octaves: defaultOctaves,
persistence: defaultPersistence,
lacunarity: defaultLacunarity,
xscale: defaultScale,
yscale: defaultScale,
zscale: defaultScale,
xoffset: 0,
yoffset: 0,
zoffset: 0,
}
}
// Sets the seed for both the system and opensimplex PRNG
func (r *Rng) SetSeed(i int64) {
rand.Seed(i)
r.seed = i
r.noise = opensimplex.New(i)
}
func (r *Rng) Gaussian(mean float64, stdev float64) float64 {
return rand.NormFloat64()*stdev + mean
}
// The noise scale functions scale the position values passed into the
// noise PRNG. Typically for screen coordinates scale values in the
// range of 0.001 to 0.01 produce visually appealing noise
// Scales the x position in noise calculations
func (r *Rng) SetNoiseScaleX(scale float64) {
r.xscale = scale
}
// Scales the y position in noise calculations
func (r *Rng) SetNoiseScaleY(scale float64) {
r.yscale = scale
}
// Scales the z position in noise calculations
func (r *Rng) SetNoiseScaleZ(scale float64) {
r.zscale = scale
}
// The noise offset functions simple increment/decrement the
// position values before scaling
// Offsets the x position in noise calculations
func (r *Rng) SetNoiseOffsetX(offset float64) {
r.xoffset = offset
}
// Offsets the y position in noise calculations
func (r *Rng) SetNoiseOffsetY(offset float64) {
r.yoffset = offset
}
// Offsets the z position in noise calculations
func (r *Rng) SetNoiseOffsetZ(offset float64) {
r.zoffset = offset
}
// Number of steps when calculating fractal noise
func (r *Rng) SetNoiseOctaves(i int) {
r.octaves = i
}
// How amplitude scales with octaves
func (r *Rng) SetNoisePersistence(p float64) {
r.persistence = p
}
// How frequency scales with octaves
func (r *Rng) SetNoiseLacunarity(l float64) {
r.lacunarity = l
}
// SignedNoise1D generates 1D noise values in the range of [-1, 1]
func (r *Rng) SignedNoise1D(x float64) float64 {
return r.calcNoise(x, 0, 0)
}
// SignedNoise2D generates 2D noise values in the range of [-1, 1]
func (r *Rng) SignedNoise2D(x float64, y float64) float64 {
return r.calcNoise(x, y, 0)
}
// SignedNoise3D generates 3D noise values in the range of [-1, 1]
func (r *Rng) SignedNoise3D(x float64, y float64, z float64) float64 {
return r.calcNoise(x, y, z)
}
// Noise1D 1D noise values in the range of [0, 1]
func (r *Rng) Noise1D(x float64) float64 {
return Map(-1, 1, 0, 1, r.calcNoise(x, 0, 0))
}
// Noise2D generates 2D noise values in the range of [0, 1]
func (r *Rng) Noise2D(x float64, y float64) float64 {
return Map(-1, 1, 0, 1, r.calcNoise(x, y, 0))
}
// Noise3D generates 3D noise values in the range of [0, 1]
func (r *Rng) Noise3D(x float64, y float64, z float64) float64 {
return Map(-1, 1, 0, 1, r.calcNoise(x, y, z))
}
// UniformRandomPoints generates a list of points whose coordinates
// follow a uniform random distribution within a rectangle
func (r *Rng) UniformRandomPoints(num int, rect Rect) []Point {
points := make([]Point, num)
for i := 0; i < num; i++ {
x := rect.X + rand.Float64()*rect.W
y := rect.Y + rand.Float64()*rect.H
points[i] = Point{X: x, Y: y}
}
return points
}
func (r *Rng) NoisyRandomPoints(num int, threshold float64, rect Rect) []Point {
var points []Point
maxtries := 10 * num
i := 0
for len(points) < num && i < maxtries {
x := rect.X + rand.Float64()*rect.W
y := rect.Y + rand.Float64()*rect.H
noise := r.Noise2D(x, y)
if noise >= threshold {
points = append(points, Point{X: x, Y: y})
}
i++
}
return points
}
func (r *Rng) calcNoise(x, y, z float64) float64 {
totalNoise := 0.0
totalAmp := 0.0
amp := 1.0
freq := 1.0
for i := 0; i < r.octaves; i++ {
totalNoise += r.noise.Eval3(
(x+r.xoffset)*r.xscale*freq,
(y+r.yoffset)*r.yscale*freq,
(z+r.zoffset)*r.zscale*freq,
)
totalAmp += amp
amp *= r.persistence
freq *= r.lacunarity
}
return totalNoise / totalAmp
} | random.go | 0.785843 | 0.54256 | random.go | starcoder |
package processor
import (
"errors"
"fmt"
"time"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/opentracing/opentracing-go"
"github.com/trivago/grok"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeGrok] = TypeSpec{
constructor: NewGrok,
Categories: []Category{
CategoryParsing,
},
Summary: `
Parses messages into a structured format by attempting to apply a list of Grok
patterns, if a pattern returns at least one value a resulting structured object
is created according to the chosen output format.`,
Description: `
Currently only json is a supported output format.
Type hints within patterns are respected, therefore with the pattern
` + "`%{WORD:first},%{INT:second:int}`" + ` and a payload of ` + "`foo,1`" + `
the resulting payload would be ` + "`{\"first\":\"foo\",\"second\":1}`" + `.
### Performance
This processor currently uses the [Go RE2](https://golang.org/s/re2syntax)
regular expression engine, which is guaranteed to run in time linear to the size
of the input. However, this property often makes it less performant than pcre
based implementations of grok. For more information see
[https://swtch.com/~rsc/regexp/regexp1.html](https://swtch.com/~rsc/regexp/regexp1.html).`,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("patterns", "A list of patterns to attempt against the incoming messages."),
docs.FieldCommon("pattern_definitions", "A map of pattern definitions that can be referenced within `patterns`."),
docs.FieldCommon("output_format", "The structured output format.").HasOptions("json"),
docs.FieldAdvanced("named_captures_only", "Whether to only capture values from named patterns."),
docs.FieldAdvanced("use_default_patterns", "Whether to use a [default set of patterns](#default-patterns)."),
docs.FieldAdvanced("remove_empty_values", "Whether to remove values that are empty from the resulting structure."),
partsFieldSpec,
},
Examples: []docs.AnnotatedExample{
{
Title: "VPC Flow Logs",
Summary: `
Grok can be used to parse unstructured logs such as VPC flow logs that look like this:
` + "```text" + `
2 123456789010 eni-1235b8ca123456789 172.31.16.139 172.31.16.21 20641 22 6 20 4249 1418530010 1418530070 ACCEPT OK
` + "```" + `
Into structured objects that look like this:
` + "```json" + `
{"accountid":"123456789010","action":"ACCEPT","bytes":4249,"dstaddr":"172.31.16.21","dstport":22,"end":1418530070,"interfaceid":"eni-1235b8ca123456789","logstatus":"OK","packets":20,"protocol":6,"srcaddr":"172.31.16.139","srcport":20641,"start":1418530010,"version":2}
` + "```" + `
With the following config:`,
Config: `
pipeline:
processors:
- grok:
output_format: json
patterns:
- '%{VPCFLOWLOG}'
pattern_definitions:
VPCFLOWLOG: '%{NUMBER:version:int} %{NUMBER:accountid} %{NOTSPACE:interfaceid} %{NOTSPACE:srcaddr} %{NOTSPACE:dstaddr} %{NOTSPACE:srcport:int} %{NOTSPACE:dstport:int} %{NOTSPACE:protocol:int} %{NOTSPACE:packets:int} %{NOTSPACE:bytes:int} %{NUMBER:start:int} %{NUMBER:end:int} %{NOTSPACE:action} %{NOTSPACE:logstatus}'
`,
},
},
Footnotes: `
## Default Patterns
A summary of the default patterns on offer can be [found here](https://github.com/trivago/grok/blob/master/patterns.go#L5).`,
}
}
//------------------------------------------------------------------------------
// GrokConfig contains configuration fields for the Grok processor.
type GrokConfig struct {
Parts []int `json:"parts" yaml:"parts"`
Patterns []string `json:"patterns" yaml:"patterns"`
RemoveEmpty bool `json:"remove_empty_values" yaml:"remove_empty_values"`
NamedOnly bool `json:"named_captures_only" yaml:"named_captures_only"`
UseDefaults bool `json:"use_default_patterns" yaml:"use_default_patterns"`
To string `json:"output_format" yaml:"output_format"`
PatternDefinitions map[string]string `json:"pattern_definitions" yaml:"pattern_definitions"`
}
// NewGrokConfig returns a GrokConfig with default values.
func NewGrokConfig() GrokConfig {
return GrokConfig{
Parts: []int{},
Patterns: []string{},
RemoveEmpty: true,
NamedOnly: true,
UseDefaults: true,
To: "json",
PatternDefinitions: make(map[string]string),
}
}
//------------------------------------------------------------------------------
// Grok is a processor that executes Grok queries on a message part and replaces
// the contents with the result.
type Grok struct {
parts []int
gparsers []*grok.CompiledGrok
conf Config
log log.Modular
stats metrics.Type
mCount metrics.StatCounter
mErrGrok metrics.StatCounter
mErrJSONS metrics.StatCounter
mErr metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewGrok returns a Grok processor.
func NewGrok(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
gcompiler, err := grok.New(grok.Config{
RemoveEmptyValues: conf.Grok.RemoveEmpty,
NamedCapturesOnly: conf.Grok.NamedOnly,
SkipDefaultPatterns: !conf.Grok.UseDefaults,
Patterns: conf.Grok.PatternDefinitions,
})
if err != nil {
return nil, fmt.Errorf("failed to create grok compiler: %v", err)
}
var compiled []*grok.CompiledGrok
for _, pattern := range conf.Grok.Patterns {
var gcompiled *grok.CompiledGrok
if gcompiled, err = gcompiler.Compile(pattern); err != nil {
return nil, fmt.Errorf("failed to compile Grok pattern '%v': %v", pattern, err)
}
compiled = append(compiled, gcompiled)
}
g := &Grok{
parts: conf.Grok.Parts,
gparsers: compiled,
conf: conf,
log: log,
stats: stats,
mCount: stats.GetCounter("count"),
mErrGrok: stats.GetCounter("error.grok_no_matches"),
mErrJSONS: stats.GetCounter("error.json_set"),
mErr: stats.GetCounter("error"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}
return g, nil
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (g *Grok) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
g.mCount.Incr(1)
newMsg := msg.Copy()
proc := func(index int, span opentracing.Span, part types.Part) error {
body := part.Get()
var values map[string]interface{}
for _, compiler := range g.gparsers {
var err error
if values, err = compiler.ParseTyped(body); err != nil {
g.log.Debugf("Failed to parse body: %v\n", err)
continue
}
if len(values) > 0 {
break
}
}
if len(values) == 0 {
g.mErrGrok.Incr(1)
g.mErr.Incr(1)
g.log.Debugf("No matches found for payload: %s\n", body)
return errors.New("no pattern matches found")
}
if err := newMsg.Get(index).SetJSON(values); err != nil {
g.mErrJSONS.Incr(1)
g.mErr.Incr(1)
g.log.Debugf("Failed to convert grok result into json: %v\n", err)
return err
}
return nil
}
IteratePartsWithSpan(TypeGrok, g.parts, newMsg, proc)
msgs := [1]types.Message{newMsg}
g.mBatchSent.Incr(1)
g.mSent.Incr(int64(newMsg.Len()))
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (g *Grok) CloseAsync() {
}
// WaitForClose blocks until the processor has closed down.
func (g *Grok) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------ | lib/processor/grok.go | 0.792504 | 0.662943 | grok.go | starcoder |
package reconcile
import (
"k8s.io/apimachinery/pkg/types"
)
// Result contains the result of a Reconcile invocation.
type Result struct {
// Requeue tells the Controller to requeue the reconcile key. Defaults to false.
Requeue bool
}
// Request contains the information necessary to reconcile a Kubernetes object. This includes the
// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about
// any specific Event or the object contents itself.
type Request struct {
// NamespacedName is the name and namespace of the object to reconcile.
types.NamespacedName
}
/*
Reconcile implements a Kubernetes API for a specific Resource by Creating, Updating or Deleting Kubernetes
objects, or by making changes to systems external to the cluster (e.g. cloudproviders, github, etc).
reconcile implementations compare the state specified in an object by a user against the actual cluster state,
and then perform operations to make the actual cluster state reflect the state specified by the user.
Typically, reconcile is triggered by a Controller in response to cluster Events (e.g. Creating, Updating,
Deleting Kubernetes objects) or external Events (GitHub Webhooks, polling external sources, etc).
Example reconcile Logic:
* Reader an object and all the Pods it owns.
* Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica.
* Create 4 Pods and set their OwnerReferences to the object.
reconcile may be implemented as either a type:
type reconcile struct {}
func (reconcile) reconcile(controller.Request) (controller.Result, error) {
// Implement business logic of reading and writing objects here
return controller.Result{}, nil
}
Or as a function:
controller.Func(func(o controller.Request) (controller.Result, error) {
// Implement business logic of reading and writing objects here
return controller.Result{}, nil
})
Reconciliation is level-based, meaning action isn't driven off changes in individual Events, but instead is
driven by actual cluster state read from the apiserver or a local cache.
For example if responding to a Pod Delete Event, the Request won't contain that a Pod was deleted,
instead the reconcile function observes this when reading the cluster state and seeing the Pod as missing.
*/
type Reconcile interface {
// reconcile performs a full reconciliation for the object referred to by the Request.
Reconcile(Request) (Result, error)
}
// Func is a function that implements the reconcile interface.
type Func func(Request) (Result, error)
var _ Reconcile = Func(nil)
// Reconcile implements Reconcile.
func (r Func) Reconcile(o Request) (Result, error) { return r(o) } | examples/godocbot/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go | 0.780453 | 0.575469 | reconcile.go | starcoder |
package xyzm
// Cell is a 128-bit integer that interleaves four coordinates.
type Cell struct {
Hi uint64
Lo uint64
}
func (c Cell) String() string {
s := make([]byte, 0, 32)
const hex = "0123456789abcdef"
for i := 0; i < 64; i += 4 {
s = append(s, hex[(c.Hi>>(60-i))&15])
}
for i := 0; i < 64; i += 4 {
s = append(s, hex[(c.Lo>>(60-i))&15])
}
return string(s)
}
// The maximum coord that is less than 1.0. Equal to math.Nextafter(1, 0).
const maxCoord = 0.99999999999999988897769753748434595763683319091796875
func clip(x float64) float64 {
if x < 0 {
return 0
}
if x > maxCoord {
return maxCoord
}
return x
}
// Encode returns an encoded Cell from X/Y/Z/M floating points.
// The input floating points must be within the range [0.0,1.0).
// Values outside that range are clipped.
func Encode(x, y, z, m float64) Cell {
// Produce 32-bit integers for X/Y/Z/M -> A/B/C/D
a := uint32(clip(x) * (1 << 32))
b := uint32(clip(y) * (1 << 32))
c := uint32(clip(z) * (1 << 32))
d := uint32(clip(m) * (1 << 32))
// Interleave A/C and B/D into 64-bit integers AC and BD
ac := interleave(a)<<1 | interleave(c)
bd := interleave(b)<<1 | interleave(d)
// Interleave AC/BD into a single 128-bit ABCD (hi/lo) integer
hi := interleave(uint32(ac>>32))<<1 | interleave(uint32(bd>>32))
lo := interleave(uint32(ac))<<1 | interleave(uint32(bd))
return Cell{Hi: hi, Lo: lo}
}
// Decode returns the decoded values from a cell.
func Decode(cell Cell) (x, y, z, m float64) {
// Decoding is the inverse of the Encode logic.
ac := (uint64(deinterleave(cell.Hi>>1)) << 32) |
uint64(deinterleave(cell.Lo>>1))
bd := (uint64(deinterleave(cell.Hi)) << 32) |
uint64(deinterleave(cell.Lo))
a := deinterleave(ac >> 1)
b := deinterleave(bd >> 1)
c := deinterleave(ac)
d := deinterleave(bd)
x = float64(a) / (1 << 32)
y = float64(b) / (1 << 32)
z = float64(c) / (1 << 32)
m = float64(d) / (1 << 32)
return x, y, z, m
}
// CellFromString returns the decoded values from a cell string.
func CellFromString(s string) Cell {
const tbl = "" +
"------------------------------------------------" +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09" +
"-------" +
"\x0A\x0B\x0C\x0D\x0E\x0F" +
"--------------------------" +
"\x0A\x0B\x0C\x0D\x0E\x0F" +
"---------------------------------------------------------" +
"---------------------------------------------------------" +
"---------------------------------------"
var cell Cell
for i := 0; i < len(s) && i < 16; i++ {
cell.Hi = (cell.Hi << 4) | uint64(tbl[s[i]])
}
for i := 16; i < len(s) && i < 32; i++ {
cell.Lo = (cell.Lo << 4) | uint64(tbl[s[i]])
}
return cell
}
// Bit interleaving thanks to the Daniel Lemire's blog entry:
// https://lemire.me/blog/2018/01/08/how-fast-can-you-bit-interleave-32-bit-integers/
func interleave(input uint32) uint64 {
word := uint64(input)
word = (word ^ (word << 16)) & 0x0000ffff0000ffff
word = (word ^ (word << 8)) & 0x00ff00ff00ff00ff
word = (word ^ (word << 4)) & 0x0f0f0f0f0f0f0f0f
word = (word ^ (word << 2)) & 0x3333333333333333
word = (word ^ (word << 1)) & 0x5555555555555555
return word
}
func deinterleave(word uint64) uint32 {
word &= 0x5555555555555555
word = (word ^ (word >> 1)) & 0x3333333333333333
word = (word ^ (word >> 2)) & 0x0f0f0f0f0f0f0f0f
word = (word ^ (word >> 4)) & 0x00ff00ff00ff00ff
word = (word ^ (word >> 8)) & 0x0000ffff0000ffff
word = (word ^ (word >> 16)) & 0x00000000ffffffff
return uint32(word)
} | xyzm/cell.go | 0.802981 | 0.446736 | cell.go | starcoder |
package geojson
import (
"github.com/tidwall/geojson/geometry"
)
// Clip clips the contents of a geojson object and return
func Clip(
obj Object, clipper Object, opts *geometry.IndexOptions,
) (clipped Object) {
switch obj := obj.(type) {
case *Point:
return clipPoint(obj, clipper, opts)
case *Rect:
return clipRect(obj, clipper, opts)
case *LineString:
return clipLineString(obj, clipper, opts)
case *Polygon:
return clipPolygon(obj, clipper, opts)
case *Circle:
return NewClippedCircle(obj, clipper, opts)
case *Feature:
return clipFeature(obj, clipper, opts)
case Collection:
return clipCollection(obj, clipper, opts)
}
return obj
}
// clipSegment is Cohen-Sutherland Line Clipping
// https://www.cs.helsinki.fi/group/goa/viewing/leikkaus/lineClip.html
func clipSegment(seg geometry.Segment, rect geometry.Rect) (
res geometry.Segment, rejected bool,
) {
startCode := getCode(rect, seg.A)
endCode := getCode(rect, seg.B)
if (startCode | endCode) == 0 {
// trivially accept
res = seg
} else if (startCode & endCode) != 0 {
// trivially reject
rejected = true
} else if startCode != 0 {
// start is outside. get new start.
newStart := intersect(rect, startCode, seg.A, seg.B)
res, rejected =
clipSegment(geometry.Segment{A: newStart, B: seg.B}, rect)
} else {
// end is outside. get new end.
newEnd := intersect(rect, endCode, seg.A, seg.B)
res, rejected = clipSegment(geometry.Segment{A: seg.A, B: newEnd}, rect)
}
return
}
// clipRing is Sutherland-Hodgman Polygon Clipping
// https://www.cs.helsinki.fi/group/goa/viewing/leikkaus/intro2.html
func clipRing(ring []geometry.Point, bbox geometry.Rect) (
resRing []geometry.Point,
) {
if len(ring) < 4 {
// under 4 elements this is not a polygon ring!
return
}
var edge uint8
var inside, prevInside bool
var prev geometry.Point
for edge = 1; edge <= 8; edge *= 2 {
prev = ring[len(ring)-2]
prevInside = (getCode(bbox, prev) & edge) == 0
for _, p := range ring {
inside = (getCode(bbox, p) & edge) == 0
if prevInside && inside {
// Staying inside
resRing = append(resRing, p)
} else if prevInside && !inside {
// Leaving
resRing = append(resRing, intersect(bbox, edge, prev, p))
} else if !prevInside && inside {
// Entering
resRing = append(resRing, intersect(bbox, edge, prev, p))
resRing = append(resRing, p)
} else {
// Staying outside
}
prev, prevInside = p, inside
}
if len(resRing) > 0 && resRing[0] != resRing[len(resRing)-1] {
resRing = append(resRing, resRing[0])
}
ring, resRing = resRing, []geometry.Point{}
if len(ring) == 0 {
break
}
}
resRing = ring
return
}
func getCode(bbox geometry.Rect, point geometry.Point) (code uint8) {
code = 0
if point.X < bbox.Min.X {
code |= 1 // left
} else if point.X > bbox.Max.X {
code |= 2 // right
}
if point.Y < bbox.Min.Y {
code |= 4 // bottom
} else if point.Y > bbox.Max.Y {
code |= 8 // top
}
return
}
func intersect(bbox geometry.Rect, code uint8, start, end geometry.Point) (
new geometry.Point,
) {
if (code & 8) != 0 { // top
new = geometry.Point{
X: start.X + (end.X-start.X)*(bbox.Max.Y-start.Y)/(end.Y-start.Y),
Y: bbox.Max.Y,
}
} else if (code & 4) != 0 { // bottom
new = geometry.Point{
X: start.X + (end.X-start.X)*(bbox.Min.Y-start.Y)/(end.Y-start.Y),
Y: bbox.Min.Y,
}
} else if (code & 2) != 0 { //right
new = geometry.Point{
X: bbox.Max.X,
Y: start.Y + (end.Y-start.Y)*(bbox.Max.X-start.X)/(end.X-start.X),
}
} else if (code & 1) != 0 { // left
new = geometry.Point{
X: bbox.Min.X,
Y: start.Y + (end.Y-start.Y)*(bbox.Min.X-start.X)/(end.X-start.X),
}
} else { // should not call intersect with the zero code
}
return
}
func clipPoint(
point *Point, clipper Object, opts *geometry.IndexOptions,
) Object {
if point.IntersectsRect(clipper.Rect()) {
return point
}
return NewMultiPoint(nil)
}
func clipRect(
rect *Rect, clipper Object, opts *geometry.IndexOptions,
) Object {
base := rect.Base()
points := make([]geometry.Point, base.NumPoints())
for i := 0; i < len(points); i++ {
points[i] = base.PointAt(i)
}
poly := geometry.NewPoly(points, nil, opts)
gPoly := NewPolygon(poly)
return Clip(gPoly, clipper, opts)
}
func clipLineString(
lineString *LineString, clipper Object,
opts *geometry.IndexOptions,
) Object {
bbox := clipper.Rect()
var newPoints [][]geometry.Point
var clipped geometry.Segment
var rejected bool
var line []geometry.Point
base := lineString.Base()
nSegments := base.NumSegments()
for i := 0; i < nSegments; i++ {
clipped, rejected = clipSegment(base.SegmentAt(i), bbox)
if rejected {
continue
}
if len(line) > 0 && line[len(line)-1] != clipped.A {
newPoints = append(newPoints, line)
line = []geometry.Point{clipped.A}
} else if len(line) == 0 {
line = append(line, clipped.A)
}
line = append(line, clipped.B)
}
if len(line) > 0 {
newPoints = append(newPoints, line)
}
var children []*geometry.Line
for _, points := range newPoints {
children = append(children,
geometry.NewLine(points, opts))
}
if len(children) == 1 {
return NewLineString(children[0])
}
return NewMultiLineString(children)
}
func clipPolygon(
polygon *Polygon, clipper Object,
opts *geometry.IndexOptions,
) Object {
rect := clipper.Rect()
var newPoints [][]geometry.Point
base := polygon.Base()
rings := []geometry.Ring{base.Exterior}
rings = append(rings, base.Holes...)
for _, ring := range rings {
ringPoints := make([]geometry.Point, ring.NumPoints())
for i := 0; i < len(ringPoints); i++ {
ringPoints[i] = ring.PointAt(i)
}
if clippedRing := clipRing(ringPoints, rect); len(clippedRing) > 0 {
newPoints = append(newPoints, clippedRing)
}
}
var exterior []geometry.Point
var holes [][]geometry.Point
if len(newPoints) > 0 {
exterior = newPoints[0]
}
if len(newPoints) > 1 {
holes = newPoints[1:]
}
newPoly := NewPolygon(
geometry.NewPoly(exterior, holes, opts),
)
if newPoly.Empty() {
return NewMultiPolygon(nil)
}
return newPoly
}
func clipFeature(
feature *Feature, clipper Object,
opts *geometry.IndexOptions,
) Object {
newFeature := Clip(feature.Base(), clipper, opts)
if _, ok := newFeature.(*Feature); !ok {
newFeature = NewFeature(newFeature, feature.Members())
}
return newFeature
}
func clipCollection(
collection Collection, clipper Object,
opts *geometry.IndexOptions,
) Object {
var features []Object
for _, feature := range collection.Children() {
feature = Clip(feature, clipper, opts)
if feature.Empty() {
continue
}
if _, ok := feature.(*Feature); !ok {
feature = NewFeature(feature, "")
}
features = append(features, feature)
}
return NewFeatureCollection(features)
} | clip.go | 0.738386 | 0.449513 | clip.go | starcoder |
package symbol
import (
"fmt"
"github.com/bazo-blockchain/lazo/parser/node"
)
// SymbolTable maps symbols to nodes, designators to declarations, expressions to types and contains the global scope
type SymbolTable struct {
GlobalScope *GlobalScope
symbolToNode map[Symbol]node.Node
designatorDeclarations map[node.DesignatorNode]Symbol
expressionTypes map[node.ExpressionNode]TypeSymbol
}
// NewSymbolTable creates a new symbol table and initializes mappings
func NewSymbolTable() *SymbolTable {
return &SymbolTable{
GlobalScope: newGlobalScope(),
symbolToNode: make(map[Symbol]node.Node),
designatorDeclarations: make(map[node.DesignatorNode]Symbol),
expressionTypes: make(map[node.ExpressionNode]TypeSymbol),
}
}
// FindTypeByNode searches for a type symbol.
// If an array type or map type are not found, they will be added to the global types.
func (t *SymbolTable) FindTypeByNode(typeNode node.TypeNode) TypeSymbol {
if typeSymbol := t.FindTypeByIdentifier(typeNode.Type()); typeSymbol != nil {
return typeSymbol
}
switch typeNode.(type) {
case *node.ArrayTypeNode:
arrayType := typeNode.(*node.ArrayTypeNode)
return t.AddArrayType(arrayType.ElementType)
case *node.MapTypeNode:
mapType := typeNode.(*node.MapTypeNode)
return t.AddMapType(mapType)
default:
return nil
}
}
// AddArrayType creates a new array type symbol and adds it to the global types
func (t *SymbolTable) AddArrayType(elementTypeNode node.TypeNode) TypeSymbol {
elementType := t.FindTypeByNode(elementTypeNode)
if elementType == nil {
return nil
}
arrayType := NewArrayTypeSymbol(t.GlobalScope, elementType)
t.GlobalScope.Types[arrayType.Identifier()] = arrayType
return arrayType
}
// AddMapType creates a new map type and adds it to the global scope types
func (t *SymbolTable) AddMapType(mapTypeNode *node.MapTypeNode) TypeSymbol {
keyType := t.FindTypeByNode(mapTypeNode.KeyType)
valueType := t.FindTypeByNode(mapTypeNode.ValueType)
// To create a map type, both types should be valid
if keyType == nil || valueType == nil {
return nil
}
mapType := NewMapTypeSymbol(t.GlobalScope, keyType, valueType)
t.GlobalScope.Types[mapType.Identifier()] = mapType
return mapType
}
// FindTypeByIdentifier searches for a type
// Returns the type or nil
func (t *SymbolTable) FindTypeByIdentifier(identifier string) TypeSymbol {
if compilationType, ok := t.GlobalScope.Types[identifier]; ok {
return compilationType
}
return nil
}
// Find recursively searches for a symbol within a specific scope
// Returns the symbol or nil
func (t *SymbolTable) Find(scope Symbol, identifier string) Symbol {
if scope == nil {
return nil
}
if identifier == This && scope == t.GlobalScope {
return t.GlobalScope.Contract
}
for _, declaration := range scope.AllDeclarations() {
if declaration.Identifier() == identifier {
return declaration
}
}
return t.Find(scope.Scope(), identifier)
}
// MapSymbolToNode maps a symbol to its node
func (t *SymbolTable) MapSymbolToNode(symbol Symbol, node node.Node) {
t.symbolToNode[symbol] = node
}
// GetNodeBySymbol returns the node linked to the symbol
func (t *SymbolTable) GetNodeBySymbol(symbol Symbol) node.Node {
return t.symbolToNode[symbol]
}
// MapDesignatorToDecl maps a designator to a declaration
func (t *SymbolTable) MapDesignatorToDecl(designatorNode node.DesignatorNode, symbol Symbol) {
t.designatorDeclarations[designatorNode] = symbol
}
// GetDeclByDesignator returns the declaration for a designator
func (t *SymbolTable) GetDeclByDesignator(designatorNode node.DesignatorNode) Symbol {
return t.designatorDeclarations[designatorNode]
}
// MapExpressionToType maps an expression to its type
func (t *SymbolTable) MapExpressionToType(expressionNode node.ExpressionNode, symbol TypeSymbol) {
t.expressionTypes[expressionNode] = symbol
}
// GetTypeByExpression returns the type of the expression
func (t *SymbolTable) GetTypeByExpression(expressionNode node.ExpressionNode) TypeSymbol {
return t.expressionTypes[expressionNode]
}
// String creates a string representation for the symbol table
func (t *SymbolTable) String() string {
return fmt.Sprintf("Global Scope: %s", t.GlobalScope)
} | checker/symbol/symbol_table.go | 0.830353 | 0.532547 | symbol_table.go | starcoder |
package rest
import (
"reflect"
)
type ReflectConsumer interface {
Consume(v reflect.Value)
ForIndex(index int) ReflectConsumer
ForField(field reflect.StructField) ReflectConsumer
ForKey(key string) ReflectConsumer
}
type ReflectShouldConsume func(t reflect.Type) bool
type ReflectIterator func(value reflect.Value, consumer ReflectConsumer)
type Reflector struct {
Iterators map[reflect.Type][]ReflectIterator
ShouldConsume ReflectShouldConsume
}
func NewReflector(should ReflectShouldConsume) *Reflector {
return &Reflector{
Iterators: make(map[reflect.Type][]ReflectIterator),
ShouldConsume: should,
}
}
func (r *Reflector) Consume(value any, consumer ReflectConsumer) {
reflectValue := reflect.ValueOf(value)
reflectType := reflectValue.Type()
iterators := r.getIterators(reflectType)
r.iterate(iterators, reflectValue, consumer)
}
func (r *Reflector) getIterators(t reflect.Type) []ReflectIterator {
iterators, exists := r.Iterators[t]
if exists {
return iterators
}
r.Iterators[t] = make([]ReflectIterator, 0)
pointerCanConsume := t.Kind() != reflect.Pointer && r.ShouldConsume(reflect.PointerTo(t))
if !pointerCanConsume && r.ShouldConsume(t) {
r.addIterator(t, r.consumeValue())
}
switch t.Kind() {
case reflect.Pointer:
r.addIterator(t, r.consumePointer(t.Elem()))
case reflect.Interface:
r.addIterator(t, r.consumeInterface(t.Elem()))
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
r.addIterator(t, r.consumeField(t, i))
}
case reflect.Array, reflect.Slice:
r.addIterator(t, r.consumeSlice(t.Elem()))
case reflect.Map:
r.addIterator(t, r.consumeMap(t.Elem()))
}
return r.Iterators[t]
}
func (r *Reflector) addIterator(t reflect.Type, iter ReflectIterator) {
if iter != nil {
r.Iterators[t] = append(r.Iterators[t], iter)
}
}
func (r *Reflector) iterate(iters []ReflectIterator, value reflect.Value, consumer ReflectConsumer) {
for _, i := range iters {
i(value, consumer)
}
}
func (r *Reflector) consumeValue() ReflectIterator {
return func(value reflect.Value, consumer ReflectConsumer) {
consumer.Consume(value)
}
}
func (r *Reflector) consumePointer(t reflect.Type) ReflectIterator {
iters := r.getIterators(t)
if len(iters) == 0 {
return nil
}
return func(value reflect.Value, consumer ReflectConsumer) {
if value.IsNil() {
return
}
r.iterate(iters, ref(value.Elem()), consumer)
}
}
func (r *Reflector) consumeInterface(t reflect.Type) ReflectIterator {
iters := r.getIterators(t)
if len(iters) == 0 {
return nil
}
return func(value reflect.Value, consumer ReflectConsumer) {
if value.IsNil() {
return
}
r.iterate(iters, ref(value.Elem()), consumer)
}
}
func (r *Reflector) consumeSlice(t reflect.Type) ReflectIterator {
iters := r.getIterators(t)
if len(iters) == 0 {
return nil
}
return func(value reflect.Value, consumer ReflectConsumer) {
if value.IsNil() {
return
}
slice := val(value)
for i := 0; i < slice.Len(); i++ {
item := ref(slice.Index(i))
if indexValidator := consumer.ForIndex(i); indexValidator != nil {
r.iterate(iters, item, indexValidator)
}
}
}
}
func (r *Reflector) consumeMap(t reflect.Type) ReflectIterator {
iters := r.getIterators(t)
if len(iters) == 0 {
return nil
}
return func(value reflect.Value, consumer ReflectConsumer) {
if value.IsNil() {
return
}
iter := value.MapRange()
for iter.Next() {
key := iter.Key()
value := ref(iter.Value())
if keyValidator := consumer.ForKey(key.String()); keyValidator != nil {
r.iterate(iters, value, keyValidator)
}
}
}
}
func (r *Reflector) consumeField(parent reflect.Type, fieldIndex int) ReflectIterator {
field := parent.Field(fieldIndex)
iters := r.getIterators(reflect.PointerTo(field.Type))
if len(iters) == 0 {
iters = r.getIterators(field.Type)
if len(iters) == 0 {
return nil
}
}
return func(value reflect.Value, consumer ReflectConsumer) {
fieldValue := ref(val(value).Field(fieldIndex))
if field.Anonymous {
r.iterate(iters, fieldValue, consumer)
} else {
if fieldValidator := consumer.ForField(field); fieldValidator != nil {
r.iterate(iters, fieldValue, fieldValidator)
}
}
}
}
func ref(value reflect.Value) reflect.Value {
if value.CanAddr() {
return value.Addr()
}
return value
}
func val(value reflect.Value) reflect.Value {
if value.Kind() == reflect.Pointer {
return value.Elem()
}
return value
} | pkg/rest/reflect.go | 0.611846 | 0.583559 | reflect.go | starcoder |
package event
import "fmt"
// Timing keeps min/max/avg information about a timer over a certain interval
type Timing struct {
Name string
Min int64
Max int64
Value int64
Count int64
}
// NewTiming is a factory for a Timing event, setting the Count to 1 to prevent div_by_0 errors
func NewTiming(k string, delta int64) *Timing {
return &Timing{Name: k, Min: delta, Max: delta, Value: delta, Count: 1}
}
// Update the event with metrics coming from a new one of the same type and with the same key
func (e *Timing) Update(e2 Event) error {
if e.Type() != e2.Type() {
return fmt.Errorf("statsd event type conflict: %s vs %s ", e.String(), e2.String())
}
p := e2.Payload().(map[string]int64)
e.Count += p["cnt"]
e.Value += p["val"]
e.Min = minInt64(e.Min, p["min"])
e.Max = maxInt64(e.Max, p["max"])
return nil
}
// Payload returns the aggregated value for this event
func (e Timing) Payload() interface{} {
return map[string]int64{
"min": e.Min,
"max": e.Max,
"val": e.Value,
"cnt": e.Count,
}
}
// Stats returns an array of StatsD events as they travel over UDP
func (e Timing) Stats() []string {
return []string{
fmt.Sprintf("%s.count:%d|a", e.Name, e.Count),
fmt.Sprintf("%s.avg:%d|ms", e.Name, int64(e.Value/e.Count)), // make sure e.Count != 0
fmt.Sprintf("%s.min:%d|ms", e.Name, e.Min),
fmt.Sprintf("%s.max:%d|ms", e.Name, e.Max),
}
}
// Key returns the name of this metric
func (e Timing) Key() string {
return e.Name
}
// SetKey sets the name of this metric
func (e *Timing) SetKey(key string) {
e.Name = key
}
// Type returns an integer identifier for this type of metric
func (e Timing) Type() int {
return EventTiming
}
// TypeString returns a name for this type of metric
func (e Timing) TypeString() string {
return "Timing"
}
// String returns a debug-friendly representation of this metric
func (e Timing) String() string {
return fmt.Sprintf("{Type: %s, Key: %s, Value: %+v}", e.TypeString(), e.Name, e.Payload())
}
func minInt64(v1, v2 int64) int64 {
if v1 <= v2 {
return v1
}
return v2
}
func maxInt64(v1, v2 int64) int64 {
if v1 >= v2 {
return v1
}
return v2
} | event/timing.go | 0.741393 | 0.41401 | timing.go | starcoder |
package algorithm
import (
"bufio"
"fmt"
"models"
"os"
"path/filepath"
)
func Select(S *models.Graph2D) (*models.Point2D, *models.Point2D) {
var p,q models.Point2D
for index, point := range S.Points {
if index == 0 {
p = point
continue
}
if index == 1 {
q = point
continue
}
// maximum y-value, then maximum x-value if y values are same
if p.YValue < point.YValue {
p = point
} else if p.YValue == point.YValue {
if p.XValue < point.XValue {
p = point
}
}
// minimum y-value, then minimum x-value if y values are same
if q.YValue > point.YValue {
q = point
} else if q.YValue == point.YValue {
if q.XValue > point.XValue {
q = point
}
}
}
return &p, &q
}
func Split(p *models.Point2D, q *models.Point2D, S *models.Graph2D) (*models.Graph2D, *models.Graph2D) {
var R, L models.Graph2D
for _, point := range S.Points {
d := models.RightSide(p, q, &point)
if d == 0 {
R.Points = append(R.Points, point)
} else if d == 1 {
L.Points = append(L.Points, point)
}
}
return &R, &L
}
func QuickHull(p *models.Point2D, q *models.Point2D, S *models.Graph2D, convex *models.Graph2D) {
if !S.IsEmpty() {
//fmt.Println("S: ", *S)
r := FarthestPoint(p, q, S)
U, L := PruneAndSplit(p, q, r, S)
//fmt.Println("p: ", *p)
//fmt.Println("q: ", *q)
QuickHull(p, r, U, convex)
convex.Points = append(convex.Points, *r)
fmt.Println("r: ", *r)
QuickHull(r, q, L, convex)
}
}
func FarthestPoint(p *models.Point2D, q *models.Point2D, S *models.Graph2D) *models.Point2D {
var line models.Line
line.Create(p, q)
var farPoint models.Point2D
var farDistance float64
for _, point := range S.Points {
distance := line.PerpendicularDistance(&point)
if farDistance < distance {
farPoint = point
farDistance = distance
}
}
return &farPoint
}
func PruneAndSplit(p *models.Point2D, q *models.Point2D, r *models.Point2D, S *models.Graph2D) (*models.Graph2D, *models.Graph2D) {
U, _ := Split(p, r, S)
L, _ := Split(r, q, S)
return U, L
}
func RunQuickHull(input string, output string) (*models.Graph2D, *models.Graph2D) {
inputAbs, _ := filepath.Abs("./tmp/" + input)
inputFile, _ := os.Open(inputAbs)
g, _ := models.Import(inputFile)
inputFile.Close()
var convex *models.Graph2D
convex = new(models.Graph2D)
p, q := Select(g)
convex.Points = append(convex.Points, *p)
convex.Points = append(convex.Points, *q)
R, L := Split(p, q, g)
QuickHull(p, q, R, convex)
QuickHull(q, p, L, convex)
outputAbs, _ := filepath.Abs("./tmp/" + output)
outputFile, _ := os.Create(outputAbs)
writer := bufio.NewWriter(outputFile)
convex.Export(writer)
writer.Flush()
outputFile.Close()
return g, convex
} | src/algorithm/algorithm2d.go | 0.534612 | 0.496521 | algorithm2d.go | starcoder |
package transformer
import (
"image"
"github.com/cs3238-tsuzu/prasoba/driver"
"github.com/hajimehoshi/ebiten/v2"
)
// Rect is a image.Rectangle with the capability to translate
type Rect struct {
rect image.Rectangle
}
// NewRect returns a new Rect
func NewRect(r image.Rectangle) *Rect {
return &Rect{
rect: r,
}
}
// Clone returns a cloned Rect
func (r *Rect) Clone() *Rect {
return &Rect{
rect: r.rect,
}
}
// Center move the center of the rect to the position
func (r *Rect) Center(x, y int) *Rect {
return r.From(x, y, Center)
}
// From move the rect to the position in the mode
func (r *Rect) From(x, y int, mode PositionMode) *Rect {
x, y = CalcRenderedPosFromRectangle(
x, y, r.rect, mode,
)
r.rect = RectangleFrom(x, y, r.rect)
return r
}
// Size returns Rectangle size
func (r *Rect) Size() image.Point {
return r.rect.Size()
}
// Min returns the minimum position of the rectangle
func (r *Rect) Min() image.Point {
return r.rect.Min
}
// Max returns the maximum position of the rectangle
func (r *Rect) Max() image.Point {
return r.rect.Max
}
func (r *Rect) Hover() bool {
pos := driver.DefaultDriver.CursorPosition()
if pos.In(r.rect) {
return true
}
for _, pos := range driver.DefaultDriver.PressedTouchPositions() {
if pos.In(r.rect) {
return true
}
}
return false
}
func (r *Rect) ClickedBy(key ebiten.MouseButton) bool {
pos := driver.DefaultDriver.CursorPosition()
if pos.In(r.rect) && driver.DefaultDriver.IsMouseButtonJustPressed(key) {
return true
}
for _, pos := range driver.DefaultDriver.JustPressedTouchPositions() {
if pos.In(r.rect) {
return true
}
}
return false
}
func (r *Rect) Clicked() bool {
return r.ClickedBy(driver.DefaultDriver.MousePrimaryButton())
}
func (r *Rect) PressedBy(key ebiten.MouseButton) bool {
pos := driver.DefaultDriver.CursorPosition()
if pos.In(r.rect) && driver.DefaultDriver.IsMouseButtonPressed(key) {
return true
}
for _, pos := range driver.DefaultDriver.PressedTouchPositions() {
if pos.In(r.rect) {
return true
}
}
return false
}
func (r *Rect) Pressed() bool {
return r.PressedBy(driver.DefaultDriver.MousePrimaryButton())
} | transformer/rect.go | 0.873566 | 0.546617 | rect.go | starcoder |
package gorp
import (
"fmt"
"reflect"
)
type SqliteDialect struct {
suffix string
}
func (d SqliteDialect) QuerySuffix() string { return ";" }
func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
switch val.Kind() {
case reflect.Ptr:
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
case reflect.Bool:
return "integer"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return "integer"
case reflect.Float64, reflect.Float32:
return "real"
case reflect.Slice:
if val.Elem().Kind() == reflect.Uint8 {
return "blob"
}
}
switch val.Name() {
case "NullInt64":
return "integer"
case "NullFloat64":
return "real"
case "NullBool":
return "integer"
case "Time":
return "datetime"
}
if maxsize < 1 {
maxsize = 255
}
return fmt.Sprintf("varchar(%d)", maxsize)
}
// Returns autoincrement
func (d SqliteDialect) AutoIncrStr() string {
return "autoincrement"
}
func (d SqliteDialect) AutoIncrBindValue() string {
return "null"
}
func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
return ""
}
// Returns suffix
func (d SqliteDialect) CreateTableSuffix() string {
return d.suffix
}
func (d SqliteDialect) CreateIndexSuffix() string {
return ""
}
func (d SqliteDialect) DropIndexSuffix() string {
return ""
}
// With sqlite, there technically isn't a TRUNCATE statement,
// but a DELETE FROM uses a truncate optimization:
// http://www.sqlite.org/lang_delete.html
func (d SqliteDialect) TruncateClause() string {
return "delete from"
}
// Returns "?"
func (d SqliteDialect) BindVar(i int) string {
return "?"
}
func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) {
return standardInsertAutoIncr(exec, insertSql, params...)
}
func (d SqliteDialect) QuoteField(f string) string {
return `"` + f + `"`
}
// sqlite does not have schemas like PostgreSQL does, so just escape it like normal
func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string {
return d.QuoteField(table)
}
func (d SqliteDialect) IfSchemaNotExists(command, schema string) string {
return fmt.Sprintf("%s if not exists", command)
}
func (d SqliteDialect) IfTableExists(command, schema, table string) string {
return fmt.Sprintf("%s if exists", command)
}
func (d SqliteDialect) IfTableNotExists(command, schema, table string) string {
return fmt.Sprintf("%s if not exists", command)
} | dialect_sqlite.go | 0.687105 | 0.407216 | dialect_sqlite.go | starcoder |
package day17
import (
"bufio"
"fmt"
"io"
"os"
)
const (
Inactive = '.'
Active = '#'
)
type Grid1D map[int]byte
type Grid2D map[int]Grid1D
type Grid3D map[int]Grid2D
type Grid4D map[int]Grid3D
func Day17() {
input, err := parseInput()
if err != nil {
panic(err)
}
fmt.Printf("Day 17 part 1 answer is %d\n", Part1(input))
fmt.Printf("Day 17 part 2 answer is %d\n", Part2(input))
}
const Cycles = 6
func Part1(input [][]byte) int {
activeCubes := 0
grid3D := make3DGrid(input)
yRange := [2]int{-1, len(input)}
xRange := [2]int{-1, len(input[0])}
zRange := [2]int{-1, 1}
for cycle := 1; cycle <= Cycles; cycle++ {
activeCubes = 0
newGrid3D := Grid3D{}
for z := zRange[0]; z <= zRange[1]; z++ {
newGrid3D[z] = Grid2D{}
for y := yRange[0]; y <= yRange[1]; y++ {
newGrid3D[z][y] = Grid1D{}
for x := xRange[0]; x <= xRange[1]; x++ {
nearbyActive := countActiveNeighbours3D(x, y, z, grid3D, true)
cube := grid3D[z][y][x]
var newCube byte
switch cube {
case Active:
if nearbyActive == 2 || nearbyActive == 3 {
newCube = Active
} else {
newCube = Inactive
}
case Inactive, byte(0):
if nearbyActive == 3 {
newCube = Active
} else {
newCube = Inactive
}
}
newGrid3D[z][y][x] = newCube
if newCube == Active {
activeCubes++
}
}
}
}
grid3D = newGrid3D
xRange[0]--
xRange[1]++
yRange[0]--
yRange[1]++
zRange[0]--
zRange[1]++
}
return activeCubes
}
func Part2(input [][]byte) int {
activeCubes := 0
grid4D := make4DGrid(input)
yRange := [2]int{-1, len(input)}
xRange := [2]int{-1, len(input[0])}
zRange := [2]int{-1, 1}
wRange := [2]int{-1, 1}
for cycle := 1; cycle <= Cycles; cycle++ {
activeCubes = 0
newGrid4D := Grid4D{}
for w := wRange[0]; w <= wRange[1]; w++ {
newGrid4D[w] = Grid3D{}
for z := zRange[0]; z <= zRange[1]; z++ {
newGrid4D[w][z] = Grid2D{}
for y := yRange[0]; y <= yRange[1]; y++ {
newGrid4D[w][z][y] = Grid1D{}
for x := xRange[0]; x <= xRange[1]; x++ {
nearbyActive := countActiveNeighbours4D(x, y, z, w, grid4D)
cube := grid4D[w][z][y][x]
var newCube byte
switch cube {
case Active:
if nearbyActive == 2 || nearbyActive == 3 {
newCube = Active
} else {
newCube = Inactive
}
case Inactive, byte(0):
if nearbyActive == 3 {
newCube = Active
} else {
newCube = Inactive
}
}
newGrid4D[w][z][y][x] = newCube
if newCube == Active {
activeCubes++
}
}
}
}
}
grid4D = newGrid4D
xRange[0]--
xRange[1]++
yRange[0]--
yRange[1]++
zRange[0]--
zRange[1]++
wRange[0]--
wRange[1]++
}
return activeCubes
}
func make3DGrid(grid [][]byte) Grid3D {
grid3D := Grid3D{}
grid3D[0] = Grid2D{}
for y, row := range grid {
grid3D[0][y] = Grid1D{}
for x, v := range row {
grid3D[0][y][x] = v
}
}
return grid3D
}
func make4DGrid(grid [][]byte) Grid4D {
grid4D := Grid4D{}
grid4D[0] = make3DGrid(grid)
return grid4D
}
func countActiveNeighbours3D(x, y, z int, grid Grid3D, skipSelf bool) int {
active := 0
for dz := -1; dz <= 1; dz++ {
for dy := -1; dy <= 1; dy++ {
for dx := -1; dx <= 1; dx++ {
if skipSelf && dx == 0 && dy == 0 && dz == 0 {
continue
}
if grid2D, ok := grid[z+dz]; ok {
if grid1D, ok := grid2D[y+dy]; ok {
if value, ok := grid1D[x+dx]; ok && value == Active {
active++
}
}
}
}
}
}
return active
}
func countActiveNeighbours4D(x, y, z, w int, grid Grid4D) int {
active := 0
for dw := -1; dw <= 1; dw++ {
if grid3D, ok := grid[w+dw]; ok {
active += countActiveNeighbours3D(x, y, z, grid3D, dw == 0)
}
}
return active
}
func parseInput() ([][]byte, error) {
f, err := os.Open("./input.txt")
if err != nil {
return nil, err
}
defer f.Close()
return readInput(f)
}
func readInput(r io.Reader) ([][]byte, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanLines)
var result [][]byte
for scanner.Scan() {
result = append(result, []byte(scanner.Text()))
}
return result, scanner.Err()
} | day17/day17.go | 0.511473 | 0.433981 | day17.go | starcoder |
package gollection
func HashSetOf[T comparable](hasher func(data T) int, elements ...T) HashSet[T] {
var size = len(elements)
var set = MakeHashSet(hasher, size)
for _, v := range elements {
set.Put(v)
}
return set
}
func NumberSetOf[T Number](elements ...T) HashSet[T] {
return HashSetOf(NumberHasher[T], elements...)
}
func StringSetOf[T ~string](elements ...T) HashSet[T] {
return HashSetOf(StringHasher[T], elements...)
}
func MakeHashSet[T comparable](hasher func(data T) int, capacity int) HashSet[T] {
return HashSet[T]{MakeHashMap[T, Void](hasher, capacity)}
}
func MakeNumberSet[T Number](capacity int) HashSet[T] {
return MakeHashSet(NumberHasher[T], capacity)
}
func MakeStringSet[T ~string](capacity int) HashSet[T] {
return MakeHashSet(StringHasher[T], capacity)
}
func HashSetFrom[T comparable, I Collection[T]](hasher func(data T) int, collection I) HashSet[T] {
var size = collection.Size()
var set = MakeHashSet(hasher, size)
ForEach(func(t T) {
set.Put(t)
}, collection.Iter())
return set
}
func NumberSetFrom[T Number, I Collection[T]](collection I) HashSet[T] {
return HashSetFrom(NumberHasher[T], collection)
}
func StringSetFrom[T ~string, I Collection[T]](collection I) HashSet[T] {
return HashSetFrom(StringHasher[T], collection)
}
type HashSet[T comparable] struct {
inner HashMap[T, Void]
}
func (a HashSet[T]) Size() int {
return a.inner.Size()
}
func (a HashSet[T]) IsEmpty() bool {
return a.inner.IsEmpty()
}
func (a HashSet[T]) Put(element T) bool {
return a.inner.Put(element, Void{}).IsSome()
}
func (a HashSet[T]) PutAll(elements Collection[T]) {
var iter = elements.Iter()
for item, ok := iter.Next().Get(); ok; item, ok = iter.Next().Get() {
a.Put(item)
}
}
func (a HashSet[T]) Remove(element T) bool {
return a.inner.Remove(element).IsSome()
}
func (a HashSet[T]) Contains(element T) bool {
return a.inner.Contains(element)
}
func (a HashSet[T]) ContainsAll(elements Collection[T]) bool {
var iter = elements.Iter()
for item, ok := iter.Next().Get(); ok; item, ok = iter.Next().Get() {
if !a.Contains(item) {
return false
}
}
return true
}
func (a HashSet[T]) Clear() {
a.inner.Clear()
}
func (a HashSet[T]) Iter() Iterator[T] {
return &hashSetIterator[T]{a.inner.Iter()}
}
func (a HashSet[T]) ToSlice() []T {
var arr = make([]T, a.Size())
ForEach(func(t T) {
arr = append(arr, t)
}, a.Iter())
return arr
}
func (a HashSet[T]) Clone() HashSet[T] {
return HashSet[T]{a.inner.Clone()}
}
type hashSetIterator[T comparable] struct {
source Iterator[Pair[T, Void]]
}
func (a *hashSetIterator[T]) Next() Option[T] {
var item = a.source.Next()
if v, ok := item.Get(); ok {
return Some(v.First)
}
return None[T]()
} | hash_set.go | 0.695855 | 0.534916 | hash_set.go | starcoder |
package memory
import (
"fmt"
"reflect"
)
// Read reads the value pointed at p from the decoder d using C alignment rules.
// If v is an array or slice, then each of the elements will be read,
// sequentially.
func Read(d *Decoder, p interface{}) {
v := reflect.ValueOf(p)
if v.Kind() != reflect.Ptr {
panic(fmt.Errorf("p must be pointer, got %T", p))
}
decode(d, v)
}
func decode(d *Decoder, v reflect.Value) {
t := v.Type()
if t.Implements(tyDecodable) {
v.Interface().(Decodable).Decode(d)
return
}
switch t.Kind() {
case reflect.Float32:
v.SetFloat(float64(d.F32()))
case reflect.Float64:
v.SetFloat(d.F64())
case reflect.Int8:
v.SetInt(int64(d.I8()))
case reflect.Int16:
v.SetInt(int64(d.I16()))
case reflect.Int32:
v.SetInt(int64(d.I32()))
case reflect.Int64:
if t.Implements(tyIntTy) {
v.SetInt(int64(d.Int()))
} else {
v.SetInt(d.I64())
}
case reflect.Uint8:
if t.Implements(tyCharTy) {
v.SetUint(uint64(d.Char()))
} else {
v.SetUint(uint64(d.U8()))
}
case reflect.Uint16:
v.SetUint(uint64(d.U16()))
case reflect.Uint32:
v.SetUint(uint64(d.U32()))
case reflect.Uint64:
switch {
case t.Implements(tyPointer):
v.SetUint(uint64(d.Pointer()))
case t.Implements(tySizeTy):
v.SetUint(uint64(d.Size()))
case t.Implements(tyUintTy):
v.SetUint(uint64(d.Uint()))
default:
v.SetUint(d.U64())
}
case reflect.Int:
v.SetInt(int64(d.Int()))
case reflect.Uint:
v.SetUint(uint64(d.Uint()))
case reflect.Array:
for i, c := 0, v.Len(); i < c; i++ {
decode(d, v.Index(i))
}
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 && !t.Elem().Implements(tyCharTy) {
d.Data(v.Interface().([]uint8))
} else {
for i, c := 0, v.Len(); i < c; i++ {
decode(d, v.Index(i))
}
}
case reflect.Struct:
d.Align(AlignOf(v.Type(), d.m))
base := d.o
for i, c := 0, v.NumField(); i < c; i++ {
decode(d, v.Field(i))
}
read := d.o - base
padding := SizeOf(v.Type(), d.m) - read
d.Skip(padding)
case reflect.String:
v.SetString(d.String())
case reflect.Bool:
v.SetBool(d.Bool())
case reflect.Interface, reflect.Ptr:
decode(d, v.Elem())
default:
panic(fmt.Errorf("Cannot write type: %v", t))
}
} | gapis/memory/read.go | 0.522689 | 0.415492 | read.go | starcoder |
package main
/*
Given a sorted array nums, remove the duplicates in-place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
Example 1:
Given nums = [1,1,2],
Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively.
It doesn't matter what you leave beyond the returned length.
Example 2:
Given nums = [0,0,1,1,1,2,2,3,3,4],
Your function should return length = 5, with the first five elements of nums being modified to 0, 1, 2, 3, and 4 respectively.
It doesn't matter what values are set beyond the returned length.
Clarification:
Confused why the returned value is an integer but your answer is an array?
Note that the input array is passed in by reference, which means modification to the input array will be known to the caller as well.
Internally you can think of this:
// nums is passed in by reference. (i.e., without making a copy)
int len = removeDuplicates(nums);
// any modification to nums in your function would be known by the caller.
// using the length returned by your function, it prints the first len elements.
for (int i = 0; i < len; i++) {
print(nums[i]);
}
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
示例 1:
给定数组 nums = [1,1,2],
函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。
你不需要考虑数组中超出新长度后面的元素。
示例 2:
给定 nums = [0,0,1,1,1,2,2,3,3,4],
函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。
你不需要考虑数组中超出新长度后面的元素。
说明:
为什么返回数值是整数,但输出的答案是数组呢?
请注意,输入数组是以“引用”方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。
你可以想象内部操作如下:
// nums 是以“引用”方式传递的。也就是说,不对实参做任何拷贝
int len = removeDuplicates(nums);
// 在函数里修改输入数组对于调用者是可见的。
// 根据你的函数返回的长度, 它会打印出数组中该长度范围内的所有元素。
for (int i = 0; i < len; i++) {
print(nums[i]);
}
*/
// 解法一 根据题目描述,不能用辅助数组,因为数组是排好序的,所以我们可以直接原数组上进行操作,操作和用辅助数组时一样
// 如果用一个新数组,我们会把不一样的数据不断放进去,那么在原数组的操作上也可以这样。用一个flag变量记录位置
// 不断拿更大下标的位置来比,知道碰到不一样的才替换掉flag
// 时间复杂度 O(n)空间复杂度O(1)
/*
Runtime: 44 ms, faster than 100.00% of Go online submissions for Remove Duplicates from Sorted Array.
Memory Usage: 7.7 MB, less than 28.89% of Go online submissions for Remove Duplicates from Sorted Array.
*/
func removeDuplicates(nums []int) int {
numsLen := len(nums)
if numsLen <= 1 {
return numsLen
}
flag := 0
for i := 1; i < numsLen; i++ {
if nums[i] != nums[flag] {
flag++
nums[flag] = nums[i]
}
}
return flag + 1
}
func main() {
} | Programs/026Remove Duplicates from Sorted Array/026Remove Duplicates from Sorted Array.go | 0.516108 | 0.733571 | 026Remove Duplicates from Sorted Array.go | starcoder |
package geodbtools
import "fmt"
// RecordBelongsRightFunc tests if a given record, given the byte-slice representation of
// its IP address, belongs into the right sub-tree or not.
// This function is used during build of a RecordTree.
type RecordBelongsRightFunc func(b []byte, depth uint) bool
// RecordTree represents the rooted binary tree of records
// Each node inside the tree is either a leaf, or has two children (left and right)
type RecordTree struct {
records []Record
left *RecordTree
right *RecordTree
}
// Leaf returns the leaf value of the tree
func (t *RecordTree) Leaf() Record {
if len(t.records) == 1 {
return t.records[0]
}
return nil
}
// Left returns the left sub-tree
func (t *RecordTree) Left() *RecordTree {
return t.left
}
// Right returns the right sub-tree
func (t *RecordTree) Right() *RecordTree {
return t.right
}
// Records returns all records the tree node and its children represent
func (t *RecordTree) Records() []Record {
return t.records
}
// Build builds the sub-tree starting at the given depth, a slice of records and a RecordBelongsRightFunc
func (t *RecordTree) Build(depth int, records []Record, belongsRightFn RecordBelongsRightFunc) (err error) {
t.records = records
if depth < 0 {
err = fmt.Errorf("depth<0! #records=%d", len(records))
return
}
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("recovered from panic: %v", r)
}
}()
recordsLeft := make([]Record, 0, len(records))
recordsRight := make([]Record, 0, len(records))
for _, r := range records {
if belongsRightFn(r.GetNetwork().IP, uint(depth)) {
recordsRight = append(recordsRight, r)
} else {
recordsLeft = append(recordsLeft, r)
}
}
if len(recordsLeft) > 0 {
t.left = &RecordTree{}
if len(recordsLeft) > 1 {
if err = t.left.Build(depth-1, recordsLeft, belongsRightFn); err != nil {
return
}
} else {
t.left.records = recordsLeft
}
}
if len(recordsRight) > 0 {
t.right = &RecordTree{}
if len(recordsRight) > 1 {
if err = t.right.Build(depth-1, recordsRight, belongsRightFn); err != nil {
return
}
} else {
t.right.records = recordsRight
}
}
return
}
// NewRecordTree initializes and builds a new RecordTree, given a slice of records
func NewRecordTree(maxDepth uint, records []Record, belongsRightFunc RecordBelongsRightFunc) (t *RecordTree, err error) {
t = &RecordTree{}
if err = t.Build(int(maxDepth), records, belongsRightFunc); err != nil {
t = nil
}
return
} | record_tree.go | 0.822795 | 0.661766 | record_tree.go | starcoder |
package numbercompression
//Ref https://stackoverflow.com/questions/5901153/compress-large-integers-into-smallest-possible-string
//base64::urlsafe()
//Base64 := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
var defaultEncodeDic = map[int]string{
0: "A",
1: "B",
2: "C",
3: "D",
4: "E",
5: "F",
6: "G",
7: "H",
8: "I",
9: "J",
10: "K",
11: "L",
12: "M",
13: "N",
14: "O",
15: "P",
16: "Q",
17: "R",
18: "S",
19: "T",
20: "U",
21: "V",
22: "W",
23: "X",
24: "Y",
25: "Z",
26: "a",
27: "b",
28: "c",
29: "d",
30: "e",
31: "f",
32: "g",
33: "h",
34: "i",
35: "j",
36: "k",
37: "l",
38: "m",
39: "n",
40: "o",
41: "p",
42: "q",
43: "r",
44: "s",
45: "t",
46: "u",
47: "v",
48: "w",
49: "x",
50: "y",
51: "z",
52: "0",
53: "1",
54: "2",
55: "3",
56: "4",
57: "5",
58: "6",
59: "7",
60: "8",
61: "9",
62: "-",
63: "_",
}
var defaultDecodeDic = map[string]int{
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"G": 6,
"H": 7,
"I": 8,
"J": 9,
"K": 10,
"L": 11,
"M": 12,
"N": 13,
"O": 14,
"P": 15,
"Q": 16,
"R": 17,
"S": 18,
"T": 19,
"U": 20,
"V": 21,
"W": 22,
"X": 23,
"Y": 24,
"Z": 25,
"a": 26,
"b": 27,
"c": 28,
"d": 29,
"e": 30,
"f": 31,
"g": 32,
"h": 33,
"i": 34,
"j": 35,
"k": 36,
"l": 37,
"m": 38,
"n": 39,
"o": 40,
"p": 41,
"q": 42,
"r": 43,
"s": 44,
"t": 45,
"u": 46,
"v": 47,
"w": 48,
"x": 49,
"y": 50,
"z": 51,
"0": 52,
"1": 53,
"2": 54,
"3": 55,
"4": 56,
"5": 57,
"6": 58,
"7": 59,
"8": 60,
"9": 61,
"-": 62,
"_": 63,
}
// CompresNumber :
func CompresNumber(input int64, dic map[int]string) string {
b := int64(len(dic))
res := ""
if input == 0 {
return dic[0]
}
for input > 0 {
val := int(input % b)
input = input / b
res += dic[val]
}
return res
}
// UncompresNumber :
func UncompresNumber(encoded string, dic map[string]int) int64 {
b := int64(len(dic))
res := int64(0)
for i := int64(len(encoded)) - 1; i >= 0; i-- {
ch := string([]rune(encoded)[i])
val := int64(dic[ch])
res = (res * b) + val
}
return res
}
// CompresNumberDefault :
func CompresNumberDefault(input int64) string {
return CompresNumber(input, defaultEncodeDic)
}
// UncompresNumberDefault :
func UncompresNumberDefault(input string) int64 {
return UncompresNumber(input, defaultDecodeDic)
} | numbercompression/numbercompression.go | 0.513181 | 0.422147 | numbercompression.go | starcoder |
package iso20022
// Provides further details specific to the individual direct debit transaction(s) included in the message.
type DirectDebitTransactionInformation11 struct {
// Set of elements used to reference a payment instruction.
PaymentIdentification *PaymentIdentification1 `xml:"PmtId"`
// Set of elements used to further specify the type of transaction.
PaymentTypeInformation *PaymentTypeInformation24 `xml:"PmtTpInf,omitempty"`
// Amount of money to be moved between the debtor and creditor, before deduction of charges, expressed in the currency as ordered by the initiating party.
InstructedAmount *ActiveOrHistoricCurrencyAndAmount `xml:"InstdAmt"`
// Specifies which party/parties will bear the charges associated with the processing of the payment transaction.
ChargeBearer *ChargeBearerType1Code `xml:"ChrgBr,omitempty"`
// Provides information specific to the direct debit mandate.
DirectDebitTransaction *DirectDebitTransaction7 `xml:"DrctDbtTx,omitempty"`
// Ultimate party to which an amount of money is due.
UltimateCreditor *PartyIdentification43 `xml:"UltmtCdtr,omitempty"`
// Financial institution servicing an account for the debtor.
DebtorAgent *BranchAndFinancialInstitutionIdentification5 `xml:"DbtrAgt"`
// Unambiguous identification of the account of the debtor agent at its servicing agent in the payment chain.
DebtorAgentAccount *CashAccount24 `xml:"DbtrAgtAcct,omitempty"`
// Party that owes an amount of money to the (ultimate) creditor.
Debtor *PartyIdentification43 `xml:"Dbtr"`
// Unambiguous identification of the account of the debtor to which a debit entry will be made as a result of the transaction.
DebtorAccount *CashAccount24 `xml:"DbtrAcct"`
// Ultimate party that owes an amount of money to the (ultimate) creditor.
UltimateDebtor *PartyIdentification43 `xml:"UltmtDbtr,omitempty"`
// Further information, related to the processing of the payment instruction, that may need to be acted upon by the creditor agent, depending on agreement between creditor and the creditor agent.
InstructionForCreditorAgent *Max140Text `xml:"InstrForCdtrAgt,omitempty"`
// Underlying reason for the payment transaction.
// Usage: Purpose is used by the end-customers, that is initiating party, (ultimate) debtor, (ultimate) creditor to provide information concerning the nature of the payment. Purpose is a content element, which is not used for processing by any of the agents involved in the payment chain.
Purpose *Purpose2Choice `xml:"Purp,omitempty"`
// Information needed due to regulatory and statutory requirements.
RegulatoryReporting []*RegulatoryReporting3 `xml:"RgltryRptg,omitempty"`
// Provides details on the tax.
Tax *TaxInformation3 `xml:"Tax,omitempty"`
// Provides information related to the handling of the remittance information by any of the agents in the transaction processing chain.
RelatedRemittanceInformation []*RemittanceLocation2 `xml:"RltdRmtInf,omitempty"`
// Information supplied to enable the matching of an entry with the items that the transfer is intended to settle, such as commercial invoices in an accounts' receivable system.
RemittanceInformation *RemittanceInformation7 `xml:"RmtInf,omitempty"`
}
func (d *DirectDebitTransactionInformation11) AddPaymentIdentification() *PaymentIdentification1 {
d.PaymentIdentification = new(PaymentIdentification1)
return d.PaymentIdentification
}
func (d *DirectDebitTransactionInformation11) AddPaymentTypeInformation() *PaymentTypeInformation24 {
d.PaymentTypeInformation = new(PaymentTypeInformation24)
return d.PaymentTypeInformation
}
func (d *DirectDebitTransactionInformation11) SetInstructedAmount(value, currency string) {
d.InstructedAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (d *DirectDebitTransactionInformation11) SetChargeBearer(value string) {
d.ChargeBearer = (*ChargeBearerType1Code)(&value)
}
func (d *DirectDebitTransactionInformation11) AddDirectDebitTransaction() *DirectDebitTransaction7 {
d.DirectDebitTransaction = new(DirectDebitTransaction7)
return d.DirectDebitTransaction
}
func (d *DirectDebitTransactionInformation11) AddUltimateCreditor() *PartyIdentification43 {
d.UltimateCreditor = new(PartyIdentification43)
return d.UltimateCreditor
}
func (d *DirectDebitTransactionInformation11) AddDebtorAgent() *BranchAndFinancialInstitutionIdentification5 {
d.DebtorAgent = new(BranchAndFinancialInstitutionIdentification5)
return d.DebtorAgent
}
func (d *DirectDebitTransactionInformation11) AddDebtorAgentAccount() *CashAccount24 {
d.DebtorAgentAccount = new(CashAccount24)
return d.DebtorAgentAccount
}
func (d *DirectDebitTransactionInformation11) AddDebtor() *PartyIdentification43 {
d.Debtor = new(PartyIdentification43)
return d.Debtor
}
func (d *DirectDebitTransactionInformation11) AddDebtorAccount() *CashAccount24 {
d.DebtorAccount = new(CashAccount24)
return d.DebtorAccount
}
func (d *DirectDebitTransactionInformation11) AddUltimateDebtor() *PartyIdentification43 {
d.UltimateDebtor = new(PartyIdentification43)
return d.UltimateDebtor
}
func (d *DirectDebitTransactionInformation11) SetInstructionForCreditorAgent(value string) {
d.InstructionForCreditorAgent = (*Max140Text)(&value)
}
func (d *DirectDebitTransactionInformation11) AddPurpose() *Purpose2Choice {
d.Purpose = new(Purpose2Choice)
return d.Purpose
}
func (d *DirectDebitTransactionInformation11) AddRegulatoryReporting() *RegulatoryReporting3 {
newValue := new (RegulatoryReporting3)
d.RegulatoryReporting = append(d.RegulatoryReporting, newValue)
return newValue
}
func (d *DirectDebitTransactionInformation11) AddTax() *TaxInformation3 {
d.Tax = new(TaxInformation3)
return d.Tax
}
func (d *DirectDebitTransactionInformation11) AddRelatedRemittanceInformation() *RemittanceLocation2 {
newValue := new (RemittanceLocation2)
d.RelatedRemittanceInformation = append(d.RelatedRemittanceInformation, newValue)
return newValue
}
func (d *DirectDebitTransactionInformation11) AddRemittanceInformation() *RemittanceInformation7 {
d.RemittanceInformation = new(RemittanceInformation7)
return d.RemittanceInformation
} | DirectDebitTransactionInformation11.go | 0.793946 | 0.482185 | DirectDebitTransactionInformation11.go | starcoder |
package audio
type Format struct {
NumChans int // number of channels (0 if unset)
Rate int // samples per second (0 if unset)
Layout int
Type int
}
type Formatted interface {
GetFormat(name string) Format
}
type FormatSetter interface {
SetFormat(f Format)
}
const Unspecified = 0
// layouts (earlier are considered better)
const (
Interleaved = iota + 1
NonInterleaved
Mono
)
// types (earlier are considered better)
const (
Float32Type = iota + 1
Int16Type
)
func (f0 Format) Eq(f1 Format) bool {
return f0.NumChans == f1.NumChans &&
f0.Rate == f1.Rate &&
f0.Layout == f1.Layout &&
f0.Type == f1.Type
}
func (f Format) GetFormat(_ string) Format {
return f
}
func(f0 Format) Match(f1 Format) bool {
return match(f0.NumChans, f1.NumChans) &&
match(f0.Rate, f1.Rate) &&
match(f0.Layout, f1.Layout) &&
match(f0.Type, f1.Type)
}
func (f Format) FullySpecified() bool {
return f.NumChans != Unspecified &&
f.Rate != Unspecified &&
f.Layout != Unspecified &&
f.Type != Unspecified
}
func (f Format) AllocBuffer(n int) Buffer {
switch f.Type {
case Float32Type:
switch f.Layout {
case Interleaved:
return AllocNFloat32Buf(f.NumChans, n)
case NonInterleaved:
return AllocFloat32NBuf(f.NumChans, n)
case Mono:
return make(Float32Buf, n)
}
case Int16Type:
if f.NumChans == 1 && f.Layout == Mono {
return make(Int16Buf, n)
}
}
panic("AllocBuffer on invalid format")
}
// set all the unspecified fields in f0 to
// values taken from f1.
func (f0 Format) Combine(f1 Format) Format {
if f0.NumChans == Unspecified {
f0.NumChans = f1.NumChans
}
if f0.Rate == Unspecified {
f0.Rate = f1.Rate
}
if f0.Layout == Unspecified {
f0.Layout = f1.Layout
}
if f0.Type == Unspecified {
f0.Type = f1.Type
}
return f0
}
func (f0 Format) CombineBest(f1 Format) Format {
if f0.NumChans < f1.NumChans {
f0.NumChans = f1.NumChans
}
if f0.Rate < f1.Rate {
f0.Rate = f1.Rate
}
if f0.Layout < f1.Layout {
f0.Layout = f1.Layout
}
if f0.Type < f1.Type {
f0.Type = f1.Type
}
return f0
}
func (f Format) TimeToSamples(t Time) int64 {
if t.real {
if f.Rate == 0 {
panic("unspecified rate")
}
return t.t * int64(f.Rate) / 1e9
}
return t.t
}
func match(a, b int) bool {
return a == b || a == Unspecified || b == Unspecified
} | _vendor/src/github.com/rogpeppe/rog-go/exp/abc/audio/format.go | 0.649356 | 0.407687 | format.go | starcoder |
package main
import (
"fmt"
)
type Trimesh struct {
Namer
Grouper
V []Tuple // Vertices
VN []Tuple // Vertex normals
VT []Tuple // Vertex texture coordinates
T []MeshTriangle
material *Material // TODO! Handling of material needs to be refactored
}
type MeshTriangle struct {
mesh *Trimesh
V [3]int // Vertex indices
VN [3]int // Vertex normals
VT [3]int // Texture vertices
E1 Tuple // Edges
E2 Tuple
N Tuple // Normal
T Tuple // Tangent vector
B Tuple // Bitangent vector
Mat *Material
}
func NewTrimesh(info *ObjInfo, group int) *Trimesh {
mesh := Trimesh{}
mesh.material = NewMaterial()
mesh.SetNameForKind("mesh")
mesh.SetTransform()
mesh.V = info.V
mesh.VN = info.VN
mesh.VT = info.VT
for _, f := range info.F {
if group == -1 || f.G == group {
var p [3]Tuple
mt := MeshTriangle{}
mt.mesh = &mesh
mt.V = f.V
mt.VN = f.VN
mt.VT = f.VT
for i := 0; i < 3; i++ {
p[i] = mesh.V[mt.V[i]] // Vertex
}
// Compute normal
mt.E1 = p[1].Sub(p[0])
mt.E2 = p[2].Sub(p[0])
mt.N = mt.E2.CrossProduct(mt.E1).Normalize()
// Compute tangent and bitangent vectors
if len(mesh.VT) > 0 {
vt0 := mesh.VT[mt.VT[0]]
vt1 := mesh.VT[mt.VT[1]]
vt2 := mesh.VT[mt.VT[2]]
u1 := vt1.X - vt0.X
v1 := vt1.Y - vt0.Y
u2 := vt2.X - vt0.X
v2 := vt2.Y - vt0.Y
d := 1 / (u1*v2 - v1*u2)
q1 := mt.E1
q2 := mt.E2
T := Vector(q1.X*v2-q2.X*v1, q1.Y*v2-q2.Y*v1, q1.Z*v2-q2.Z*v1).Mul(d)
B := Vector(q2.X*u1-q1.X*u2, q2.Y*u1-q1.Y*u2, q2.Z*u1-q1.Z*u2).Mul(d)
mt.T = T.Normalize()
mt.B = B.Normalize()
} else {
mt.T = Vector(0xBAD, 0, 0)
mt.B = Vector(0xBAD, 0, 0)
}
mt.Mat = f.M
mesh.T = append(mesh.T, mt)
}
}
return &mesh
}
func (s *Trimesh) Material() *Material {
return s.material
}
func (s *Trimesh) SetMaterial(m *Material) {
s.material = m
for i := range s.T {
s.T[i].SetMaterial(nil)
}
}
func (s *Trimesh) AddToGroup(group *Group) {
for i := range s.T {
group.Add(&(s.T[i]))
}
}
func (t *MeshTriangle) Transform() Matrix {
return t.mesh.Transform()
}
func (t *MeshTriangle) InverseTransform() Matrix {
return t.mesh.InverseTransform()
}
// SetTransform should never be called on a single mesh triangle
func (t *MeshTriangle) SetTransform(transforms ...Matrix) {
t.mesh.SetTransform(transforms...)
}
func (t *MeshTriangle) Clone() Groupable {
o := *t
return &o
}
func (t *MeshTriangle) Bounds() Box {
p1 := t.mesh.V[t.V[0]]
p2 := t.mesh.V[t.V[1]]
p3 := t.mesh.V[t.V[2]]
return Box{
Point(Min3(p1.X, p2.X, p3.X), Min3(p1.Y, p2.Y, p3.Y), Min3(p1.Z, p2.Z, p3.Z)),
Point(Max3(p1.X, p2.X, p3.X), Max3(p1.Y, p2.Y, p3.Y), Max3(p1.Z, p2.Z, p3.Z)),
}
}
func (t *MeshTriangle) Material() *Material {
if t.Mat != nil {
return t.Mat
}
return t.mesh.Material()
}
func (t *MeshTriangle) SetMaterial(m *Material) {
t.Mat = m
}
func (t *MeshTriangle) Name() string {
// Let's make a name for debugging
return fmt.Sprintf("%s_t_%d_%d_%d", t.mesh.Name(), t.V[0], t.V[1], t.V[2])
}
// SetName should never be called on a single mesh triangle
func (t *MeshTriangle) SetName(name string) {
}
func (t *MeshTriangle) Parent() Container {
return t.mesh.Parent()
}
func (t *MeshTriangle) SetParent(p Container) {
t.mesh.SetParent(p)
}
func (t *MeshTriangle) AddIntersections(ray Ray, xs *Intersections) {
dirCrossE2 := ray.Direction.CrossProduct(t.E2)
det := t.E1.DotProduct(dirCrossE2)
const E = 1e-9 // We need higher precision than usual here for models with many small triangles
// Check if ray is parallel to triangle plane
if det > -E && det < +E {
return
}
f := 1.0 / det
p1 := t.mesh.V[t.V[0]]
p1ToOrigin := ray.Origin.Sub(p1)
u := f * p1ToOrigin.DotProduct(dirCrossE2)
// Ray misses by the p1-p3 edge
if u < 0 || u > 1 {
return
}
originCrossE1 := p1ToOrigin.CrossProduct(t.E1)
v := f * ray.Direction.DotProduct(originCrossE1)
// Ray misses by the p1-p2 or p2-p3 edge
if v < 0 || (u+v) > 1 {
return
}
// Ray hits
h := f * t.E2.DotProduct(originCrossE1)
id := xs.AddWithData(t, h)
id.tU = u
id.tV = v
}
func (t *MeshTriangle) NormalAtHit(ii *IntersectionInfo, xs *Intersections) Tuple {
id := xs.Data(&ii.Intersection)
// Texture
if len(t.mesh.VT) > 0 {
VT1 := t.mesh.VT[t.VT[0]]
VT2 := t.mesh.VT[t.VT[1]]
VT3 := t.mesh.VT[t.VT[2]]
vt := VT2.Mul(id.tU).Add(VT3.Mul(id.tV)).Add(VT1.Mul(1 - id.tU - id.tV))
ii.U = vt.X
ii.V = vt.Y
}
// Normal
N := t.N
// This is a trick to mark the triangle edges (wireframe),
// but the line width depends on the triangle area and therefore is not constant,
// which should be fixed
/*
const EdgeWidth = 0.02
if id.tU < EdgeWidth || id.tV < EdgeWidth || (1-id.tU-id.tV) < EdgeWidth {
return Vector(0,0,0)
}
*/
if len(t.mesh.VN) > 0 {
N1 := t.mesh.VN[t.VN[0]]
N2 := t.mesh.VN[t.VN[1]]
N3 := t.mesh.VN[t.VN[2]]
N = N2.Mul(id.tU).Add(N3.Mul(id.tV)).Add(N1.Mul(1 - id.tU - id.tV))
}
// Apply normal map if present
if nmap := ii.GetNormalMap(); nmap != nil {
n := nmap.NormalAtHit(ii)
// TODO: I think T and B should be interpolated like the normal!
T := t.T
B := t.B
ii.SurfNormalv = (T.Mul(n.X).Add(B.Mul(n.Y)).Add(N.Mul(n.Z))).Normalize()
ii.HasSurfNormalv = true
}
return t.mesh.NormalToWorld(N)
}
func (t *MeshTriangle) WorldToObject(point Tuple) Tuple {
return t.mesh.WorldToObject(point)
} | trimesh.go | 0.520253 | 0.524943 | trimesh.go | starcoder |
package timelearn
// Counts contains statistics about the current state of problems.
type Counts struct {
// The number of "Active" problems. Something is considered
// active if it is due for learning.
Active int
// The number of problems that are being learned, but aren't
// ready to be asked again.
Later int
// The number of problems the user has never been shown
Unlearned int
// Counts of all of the problems, groups into histogram
// buckets based on the learning interval of the problem. The
// bucket names are a short description of the interval
Buckets []Bucket
}
// A Bucket is a single histogram bucket describing the number of
// problems of a given category.
type Bucket struct {
Name string // Short description of this bucket.
Count int // The number of problems in this bucket.
}
// A BucketBin represents a single bucket that will be returned to the
// caller. Limit is the number of this unit before moving to the next
// bucket.
type bucketBin struct {
name string
limit float64
}
var countBuckets = []bucketBin{
bucketBin{"sec", 60.0},
bucketBin{"min", 60.0},
bucketBin{"hr", 24.0},
bucketBin{"day", 30.0},
bucketBin{"mon", 1.0e30},
}
// GetCounts retrieves statistics about the problems available.
func (t *T) GetCounts() (*Counts, error) {
var unlearned int
err := t.conn.QueryRow(`
SELECT COUNT(*)
FROM probs
WHERE id NOT IN (SELECT probid FROM learning)`).Scan(&unlearned)
if err != nil {
return nil, err
}
now := t.now()
var active int
err = t.conn.QueryRow(`
SELECT COUNT (*)
FROM probs JOIN learning
WHERE probs.id = learning.probid
AND next <= ?`,
timeToDb(now)).Scan(&active)
if err != nil {
return nil, err
}
var later int
err = t.conn.QueryRow(`
SELECT COUNT (*)
FROM probs JOIN learning
WHERE probs.id = learning.probid
AND next > ?`,
timeToDb(now)).Scan(&later)
if err != nil {
return nil, err
}
// Place each problem into the various buckets.
interval := 1.0
prior := 0.0
var bucks = make([]Bucket, 0, len(countBuckets))
for _, cbuck := range countBuckets {
interval *= cbuck.limit
var count int
err = t.conn.QueryRow(`
SELECT COUNT(*)
FROM probs JOIN learning
WHERE probs.id = learning.probid
AND interval <= ? AND interval > ?`,
interval, prior).Scan(&count)
if err != nil {
return nil, err
}
prior = interval
bucks = append(bucks, Bucket{cbuck.name, count})
}
return &Counts{
Active: active,
Later: later,
Unlearned: unlearned,
Buckets: bucks,
}, nil
} | timelearn/stats.go | 0.577495 | 0.494629 | stats.go | starcoder |
package tls
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
)
//------------------------------------------------------------------------------
// Documentation is a markdown description of how and why to use TLS settings.
const Documentation = `### TLS
Custom TLS settings can be used to override system defaults. This includes
providing a collection of root certificate authorities, providing a list of
client certificates to use for client verification and skipping certificate
verification.
Client certificates can either be added by file or by raw contents:
` + "``` yaml" + `
enabled: true
client_certs:
- cert_file: ./example.pem
key_file: ./example.key
- cert: foo
key: bar
` + "```" + ``
//------------------------------------------------------------------------------
// ClientCertConfig contains config fields for a client certificate.
type ClientCertConfig struct {
CertFile string `json:"cert_file" yaml:"cert_file"`
KeyFile string `json:"key_file" yaml:"key_file"`
Cert string `json:"cert" yaml:"cert"`
Key string `json:"key" yaml:"key"`
}
// Config contains configuration params for TLS.
type Config struct {
Enabled bool `json:"enabled" yaml:"enabled"`
RootCAsFile string `json:"root_cas_file" yaml:"root_cas_file"`
InsecureSkipVerify bool `json:"skip_cert_verify" yaml:"skip_cert_verify"`
ClientCertificates []ClientCertConfig `json:"client_certs" yaml:"client_certs"`
}
// NewConfig creates a new Config with default values.
func NewConfig() Config {
return Config{
Enabled: false,
RootCAsFile: "",
InsecureSkipVerify: false,
ClientCertificates: []ClientCertConfig{},
}
}
//------------------------------------------------------------------------------
// Get returns a valid *tls.Config based on the configuration values of Config.
func (c *Config) Get() (*tls.Config, error) {
var rootCAs *x509.CertPool
if len(c.RootCAsFile) > 0 {
caCert, err := ioutil.ReadFile(c.RootCAsFile)
if err != nil {
return nil, err
}
rootCAs = x509.NewCertPool()
rootCAs.AppendCertsFromPEM(caCert)
}
clientCerts := []tls.Certificate{}
for _, conf := range c.ClientCertificates {
cert, err := conf.Load()
if nil != err {
return nil, err
}
clientCerts = append(clientCerts, cert)
}
return &tls.Config{
InsecureSkipVerify: c.InsecureSkipVerify,
RootCAs: rootCAs,
Certificates: clientCerts,
}, nil
}
// Load returns a TLS certificate, based on either file paths in the
// config or the raw certs as strings.
func (c *ClientCertConfig) Load() (tls.Certificate, error) {
if c.CertFile != "" && c.KeyFile != "" {
return tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
}
return tls.X509KeyPair([]byte(c.Cert), []byte(c.Key))
}
//------------------------------------------------------------------------------ | lib/util/tls/type.go | 0.731346 | 0.522202 | type.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// SensitivityLabelAssignment
type SensitivityLabelAssignment struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Indicates whether the label assignment is done automatically, as a standard, or a privileged operation. The possible values are: standard, privileged, auto, unknownFutureValue.
assignmentMethod *SensitivityLabelAssignmentMethod
// The unique identifier for the sensitivity label assigned to the file.
sensitivityLabelId *string
// The unique identifier for the tenant that hosts the file when this label is applied.
tenantId *string
}
// NewSensitivityLabelAssignment instantiates a new sensitivityLabelAssignment and sets the default values.
func NewSensitivityLabelAssignment()(*SensitivityLabelAssignment) {
m := &SensitivityLabelAssignment{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateSensitivityLabelAssignmentFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateSensitivityLabelAssignmentFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewSensitivityLabelAssignment(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SensitivityLabelAssignment) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetAssignmentMethod gets the assignmentMethod property value. Indicates whether the label assignment is done automatically, as a standard, or a privileged operation. The possible values are: standard, privileged, auto, unknownFutureValue.
func (m *SensitivityLabelAssignment) GetAssignmentMethod()(*SensitivityLabelAssignmentMethod) {
if m == nil {
return nil
} else {
return m.assignmentMethod
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *SensitivityLabelAssignment) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["assignmentMethod"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseSensitivityLabelAssignmentMethod)
if err != nil {
return err
}
if val != nil {
m.SetAssignmentMethod(val.(*SensitivityLabelAssignmentMethod))
}
return nil
}
res["sensitivityLabelId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSensitivityLabelId(val)
}
return nil
}
res["tenantId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetTenantId(val)
}
return nil
}
return res
}
// GetSensitivityLabelId gets the sensitivityLabelId property value. The unique identifier for the sensitivity label assigned to the file.
func (m *SensitivityLabelAssignment) GetSensitivityLabelId()(*string) {
if m == nil {
return nil
} else {
return m.sensitivityLabelId
}
}
// GetTenantId gets the tenantId property value. The unique identifier for the tenant that hosts the file when this label is applied.
func (m *SensitivityLabelAssignment) GetTenantId()(*string) {
if m == nil {
return nil
} else {
return m.tenantId
}
}
// Serialize serializes information the current object
func (m *SensitivityLabelAssignment) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
if m.GetAssignmentMethod() != nil {
cast := (*m.GetAssignmentMethod()).String()
err := writer.WriteStringValue("assignmentMethod", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("sensitivityLabelId", m.GetSensitivityLabelId())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("tenantId", m.GetTenantId())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SensitivityLabelAssignment) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetAssignmentMethod sets the assignmentMethod property value. Indicates whether the label assignment is done automatically, as a standard, or a privileged operation. The possible values are: standard, privileged, auto, unknownFutureValue.
func (m *SensitivityLabelAssignment) SetAssignmentMethod(value *SensitivityLabelAssignmentMethod)() {
if m != nil {
m.assignmentMethod = value
}
}
// SetSensitivityLabelId sets the sensitivityLabelId property value. The unique identifier for the sensitivity label assigned to the file.
func (m *SensitivityLabelAssignment) SetSensitivityLabelId(value *string)() {
if m != nil {
m.sensitivityLabelId = value
}
}
// SetTenantId sets the tenantId property value. The unique identifier for the tenant that hosts the file when this label is applied.
func (m *SensitivityLabelAssignment) SetTenantId(value *string)() {
if m != nil {
m.tenantId = value
}
} | models/sensitivity_label_assignment.go | 0.717903 | 0.422505 | sensitivity_label_assignment.go | starcoder |
package aho_corasick
import (
"unsafe"
)
type iDFA struct {
atom automaton
}
func (d iDFA) MatchKind() *matchKind {
return d.atom.MatchKind()
}
func (d iDFA) StartState() stateID {
return d.atom.StartState()
}
func (d iDFA) MaxPatternLen() int {
return d.atom.Repr().max_pattern_len
}
func (d iDFA) PatternCount() int {
return d.atom.Repr().pattern_count
}
func (d iDFA) Prefilter() prefilter {
return d.atom.Prefilter()
}
func (d iDFA) UsePrefilter() bool {
p := d.Prefilter()
if p == nil {
return false
}
return !p.LooksForNonStartOfMatch()
}
func (d iDFA) OverlappingFindAt(prestate *prefilterState, haystack []byte, at int, state_id *stateID, match_index *int) *Match {
return overlappingFindAt(d.atom, prestate, haystack, at, state_id, match_index)
}
func (d iDFA) EarliestFindAt(prestate *prefilterState, haystack []byte, at int, state_id *stateID) *Match {
return earliestFindAt(d.atom, prestate, haystack, at, state_id)
}
func (d iDFA) FindAtNoState(prestate *prefilterState, haystack []byte, at int) *Match {
return findAtNoState(d.atom, prestate, haystack, at)
}
func (n iDFA) LeftmostFindAtNoState(prestate *prefilterState, haystack []byte, at int) *Match {
return leftmostFindAtNoState(n.atom, prestate, haystack, at)
}
type iDFABuilder struct {
premultiply bool
byte_classes bool
}
func (d *iDFABuilder) build(nfa *iNFA) iDFA {
var byteClasses byteClasses
if d.byte_classes {
byteClasses = nfa.byteClasses
} else {
byteClasses = singletons()
}
alphabet_len := byteClasses.alphabetLen()
trans := make([]stateID, alphabet_len*len(nfa.states))
for i := range trans {
trans[i] = failedStateID
}
matches := make([][]pattern, len(nfa.states))
var p prefilter
if nfa.prefil != nil {
p = nfa.prefil.clone()
}
rep := iRepr{
match_kind: nfa.matchKind,
anchored: nfa.anchored,
premultiplied: false,
start_id: nfa.startID,
max_pattern_len: nfa.maxPatternLen,
pattern_count: nfa.patternCount,
state_count: len(nfa.states),
max_match: failedStateID,
heap_bytes: 0,
prefilter: p,
byte_classes: byteClasses,
trans: trans,
matches: matches,
}
for id := 0; id < len(nfa.states); id += 1 {
rep.matches[id] = append(rep.matches[id], nfa.states[id].matches...)
fail := nfa.states[id].fail
nfa.iterAllTransitions(&byteClasses, stateID(id), func(tr *next) {
if tr.id == failedStateID {
tr.id = nfaNextStateMemoized(nfa, &rep, stateID(id), fail, tr.key)
}
rep.setNextState(stateID(id), tr.key, tr.id)
})
}
rep.shuffleMatchStates()
rep.calculateSize()
if d.premultiply {
rep.premultiply()
if byteClasses.isSingleton() {
return iDFA{&iPremultiplied{rep}}
} else {
return iDFA{&iPremultipliedByteClass{&rep}}
}
}
if byteClasses.isSingleton() {
return iDFA{&iStandard{rep}}
}
return iDFA{&iByteClass{&rep}}
}
type iByteClass struct {
repr *iRepr
}
func (p iByteClass) FindAtNoState(prefilterState *prefilterState, bytes []byte, i int) *Match {
return findAtNoState(p, prefilterState, bytes, i)
}
func (p iByteClass) Repr() *iRepr {
return p.repr
}
func (p iByteClass) MatchKind() *matchKind {
return &p.repr.match_kind
}
func (p iByteClass) Anchored() bool {
return p.repr.anchored
}
func (p iByteClass) Prefilter() prefilter {
return p.repr.prefilter
}
func (p iByteClass) StartState() stateID {
return p.repr.start_id
}
func (b iByteClass) IsValid(id stateID) bool {
return int(id) < b.repr.state_count
}
func (b iByteClass) IsMatchState(id stateID) bool {
return b.repr.isMatchState(id)
}
func (b iByteClass) IsMatchOrDeadState(id stateID) bool {
return b.repr.isMatchStateOrDeadState(id)
}
func (b iByteClass) GetMatch(id stateID, i int, i2 int) *Match {
return b.repr.GetMatch(id, i, i2)
}
func (b iByteClass) MatchCount(id stateID) int {
return b.repr.MatchCount(id)
}
func (b iByteClass) NextState(id stateID, b2 byte) stateID {
alphabet_len := b.repr.byte_classes.alphabetLen()
input := b.repr.byte_classes.bytes[b2]
o := int(id)*alphabet_len + int(input)
return b.repr.trans[o]
}
func (p iByteClass) NextStateNoFail(id stateID, b byte) stateID {
next := p.NextState(id, b)
if next == failedStateID {
panic("automaton should never return fail_id for next state")
}
return next
}
func (p iByteClass) StandardFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return standardFindAt(&p, prefilterState, bytes, i, id)
}
func (p iByteClass) StandardFindAtImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return standardFindAtImp(&p, prefilterState, prefilter, bytes, i, id)
}
func (p iByteClass) LeftmostFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAt(&p, prefilterState, bytes, i, id)
}
func (p iByteClass) LeftmostFindAtImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAtImp(&p, prefilterState, prefilter, bytes, i, id)
}
func (p iByteClass) LeftmostFindAtNoState(prefilterState *prefilterState, bytes []byte, i int) *Match {
return leftmostFindAtNoState(&p, prefilterState, bytes, i)
}
func (p iByteClass) LeftmostFindAtNoStateImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int) *Match {
return leftmostFindAtNoStateImp(&p, prefilterState, prefilter, bytes, i)
}
func (p iByteClass) OverlappingFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID, i2 *int) *Match {
return overlappingFindAt(&p, prefilterState, bytes, i, id, i2)
}
func (p iByteClass) EarliestFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return earliestFindAt(&p, prefilterState, bytes, i, id)
}
func (p iByteClass) FindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return findAt(&p, prefilterState, bytes, i, id)
}
type iPremultipliedByteClass struct {
repr *iRepr
}
func (p iPremultipliedByteClass) FindAtNoState(prefilterState *prefilterState, bytes []byte, i int) *Match {
return findAtNoState(p, prefilterState, bytes, i)
}
func (p iPremultipliedByteClass) Repr() *iRepr {
return p.repr
}
func (p iPremultipliedByteClass) MatchKind() *matchKind {
return &p.repr.match_kind
}
func (p iPremultipliedByteClass) Anchored() bool {
return p.repr.anchored
}
func (p iPremultipliedByteClass) Prefilter() prefilter {
return p.repr.prefilter
}
func (p iPremultipliedByteClass) StartState() stateID {
return p.repr.start_id
}
func (p iPremultipliedByteClass) IsValid(id stateID) bool {
return (int(id) / p.repr.alphabetLen()) < p.repr.state_count
}
func (p iPremultipliedByteClass) IsMatchState(id stateID) bool {
return p.repr.isMatchState(id)
}
func (p iPremultipliedByteClass) IsMatchOrDeadState(id stateID) bool {
return p.repr.isMatchStateOrDeadState(id)
}
func (p iPremultipliedByteClass) GetMatch(id stateID, match_index int, end int) *Match {
if id > p.repr.max_match {
return nil
}
m := p.repr.matches[int(id)/p.repr.alphabetLen()][match_index]
return &Match{
pattern: m.PatternID,
len: m.PatternLength,
end: end,
}
}
func (p iPremultipliedByteClass) MatchCount(id stateID) int {
o := int(id) / p.repr.alphabetLen()
return len(p.repr.matches[o])
}
func (p iPremultipliedByteClass) NextState(id stateID, b byte) stateID {
input := p.repr.byte_classes.bytes[b]
o := int(id) + int(input)
return p.repr.trans[o]
}
//todo this leaks garbage
func (p iPremultipliedByteClass) NextStateNoFail(id stateID, b byte) stateID {
next := p.NextState(id, b)
if next == failedStateID {
panic("automaton should never return fail_id for next state")
}
return next
}
func (p iPremultipliedByteClass) StandardFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return standardFindAt(&p, prefilterState, bytes, i, id)
}
func (p iPremultipliedByteClass) StandardFindAtImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return standardFindAtImp(&p, prefilterState, prefilter, bytes, i, id)
}
func (p iPremultipliedByteClass) LeftmostFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAt(&p, prefilterState, bytes, i, id)
}
func (p iPremultipliedByteClass) LeftmostFindAtImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAtImp(&p, prefilterState, prefilter, bytes, i, id)
}
func (p iPremultipliedByteClass) LeftmostFindAtNoState(prefilterState *prefilterState, bytes []byte, i int) *Match {
return leftmostFindAtNoState(&p, prefilterState, bytes, i)
}
func (p iPremultipliedByteClass) LeftmostFindAtNoStateImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int) *Match {
return leftmostFindAtNoStateImp(&p, prefilterState, prefilter, bytes, i)
}
func (p iPremultipliedByteClass) OverlappingFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID, i2 *int) *Match {
return overlappingFindAt(&p, prefilterState, bytes, i, id, i2)
}
func (p iPremultipliedByteClass) EarliestFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return earliestFindAt(&p, prefilterState, bytes, i, id)
}
func (p iPremultipliedByteClass) FindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return findAt(&p, prefilterState, bytes, i, id)
}
type iPremultiplied struct {
repr iRepr
}
func (p iPremultiplied) FindAtNoState(prefilterState *prefilterState, bytes []byte, i int) *Match {
return findAtNoState(p, prefilterState, bytes, i)
}
func (p iPremultiplied) Repr() *iRepr {
return &p.repr
}
func (p iPremultiplied) MatchKind() *matchKind {
return &p.repr.match_kind
}
func (p iPremultiplied) Anchored() bool {
return p.repr.anchored
}
func (p iPremultiplied) Prefilter() prefilter {
return p.repr.prefilter
}
func (p iPremultiplied) StartState() stateID {
return p.repr.start_id
}
func (p iPremultiplied) IsValid(id stateID) bool {
return int(id)/256 < p.repr.state_count
}
func (p iPremultiplied) IsMatchState(id stateID) bool {
return p.repr.isMatchState(id)
}
func (p iPremultiplied) IsMatchOrDeadState(id stateID) bool {
return p.repr.isMatchStateOrDeadState(id)
}
func (p iPremultiplied) GetMatch(id stateID, match_index int, end int) *Match {
if id > p.repr.max_match {
return nil
}
m := p.repr.matches[int(id)/256][match_index]
return &Match{
pattern: m.PatternID,
len: m.PatternLength,
end: end,
}
}
func (p iPremultiplied) MatchCount(id stateID) int {
return len(p.repr.matches[int(id)/256])
}
func (p iPremultiplied) NextState(id stateID, b byte) stateID {
o := int(id) + int(b)
return p.repr.trans[o]
}
func (p iPremultiplied) NextStateNoFail(id stateID, b byte) stateID {
next := p.NextState(id, b)
if next == failedStateID {
panic("automaton should never return fail_id for next state")
}
return next
}
func (p iPremultiplied) StandardFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return standardFindAt(&p, prefilterState, bytes, i, id)
}
func (p iPremultiplied) StandardFindAtImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return standardFindAtImp(&p, prefilterState, prefilter, bytes, i, id)
}
func (p iPremultiplied) LeftmostFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAt(&p, prefilterState, bytes, i, id)
}
func (p iPremultiplied) LeftmostFindAtImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAtImp(&p, prefilterState, prefilter, bytes, i, id)
}
func (p iPremultiplied) LeftmostFindAtNoState(prefilterState *prefilterState, bytes []byte, i int) *Match {
return leftmostFindAtNoState(&p, prefilterState, bytes, i)
}
func (p iPremultiplied) LeftmostFindAtNoStateImp(prefilterState *prefilterState, prefilter prefilter, bytes []byte, i int) *Match {
return leftmostFindAtNoStateImp(&p, prefilterState, prefilter, bytes, i)
}
func (p iPremultiplied) OverlappingFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID, i2 *int) *Match {
return overlappingFindAt(&p, prefilterState, bytes, i, id, i2)
}
func (p iPremultiplied) EarliestFindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return earliestFindAt(&p, prefilterState, bytes, i, id)
}
func (p iPremultiplied) FindAt(prefilterState *prefilterState, bytes []byte, i int, id *stateID) *Match {
return findAt(&p, prefilterState, bytes, i, id)
}
func nfaNextStateMemoized(nfa *iNFA, dfa *iRepr, populating stateID, current stateID, input byte) stateID {
for {
if current < populating {
return dfa.nextState(current, input)
}
next := nfa.states[current].nextState(input)
if next != failedStateID {
return next
}
current = nfa.states[current].fail
}
}
func newDFABuilder() *iDFABuilder {
return &iDFABuilder{
premultiply: true,
byte_classes: true,
}
}
type iStandard struct {
repr iRepr
}
func (p iStandard) FindAtNoState(prefilterState *prefilterState, bytes []byte, i int) *Match {
return findAtNoState(&p, prefilterState, bytes, i)
}
func (p iStandard) Repr() *iRepr {
return &p.repr
}
func (s *iStandard) MatchKind() *matchKind {
return &s.repr.match_kind
}
func (s *iStandard) Anchored() bool {
return s.repr.anchored
}
func (s *iStandard) Prefilter() prefilter {
return s.repr.prefilter
}
func (s *iStandard) StartState() stateID {
return s.repr.start_id
}
func (s *iStandard) IsValid(id stateID) bool {
return int(id) < s.repr.state_count
}
func (s *iStandard) IsMatchState(id stateID) bool {
return s.repr.isMatchState(id)
}
func (s *iStandard) IsMatchOrDeadState(id stateID) bool {
return s.repr.isMatchStateOrDeadState(id)
}
func (s *iStandard) GetMatch(id stateID, match_index int, end int) *Match {
return s.repr.GetMatch(id, match_index, end)
}
func (s *iStandard) MatchCount(id stateID) int {
return s.repr.MatchCount(id)
}
func (s *iStandard) NextState(current stateID, input byte) stateID {
o := int(current)*256 + int(input)
return s.repr.trans[o]
}
func (s *iStandard) NextStateNoFail(id stateID, b byte) stateID {
next := s.NextState(id, b)
if next == failedStateID {
panic("automaton should never return fail_id for next state")
}
return next
}
func (s *iStandard) StandardFindAt(state *prefilterState, bytes []byte, i int, id *stateID) *Match {
return standardFindAt(s, state, bytes, i, id)
}
func (s *iStandard) StandardFindAtImp(state *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return standardFindAtImp(s, state, prefilter, bytes, i, id)
}
func (s *iStandard) LeftmostFindAt(state *prefilterState, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAt(s, state, bytes, i, id)
}
func (s *iStandard) LeftmostFindAtImp(state *prefilterState, prefilter prefilter, bytes []byte, i int, id *stateID) *Match {
return leftmostFindAtImp(s, state, prefilter, bytes, i, id)
}
func (s *iStandard) LeftmostFindAtNoState(state *prefilterState, bytes []byte, i int) *Match {
return leftmostFindAtNoState(s, state, bytes, i)
}
func (s *iStandard) LeftmostFindAtNoStateImp(state *prefilterState, prefilter prefilter, bytes []byte, i int) *Match {
return leftmostFindAtNoStateImp(s, state, prefilter, bytes, i)
}
func (s *iStandard) OverlappingFindAt(state *prefilterState, bytes []byte, i int, id *stateID, i2 *int) *Match {
return overlappingFindAt(s, state, bytes, i, id, i2)
}
func (s *iStandard) EarliestFindAt(state *prefilterState, bytes []byte, i int, id *stateID) *Match {
return earliestFindAt(s, state, bytes, i, id)
}
func (s *iStandard) FindAt(state *prefilterState, bytes []byte, i int, id *stateID) *Match {
return findAt(s, state, bytes, i, id)
}
type iRepr struct {
match_kind matchKind
anchored bool
premultiplied bool
start_id stateID
max_pattern_len int
pattern_count int
state_count int
max_match stateID
heap_bytes int
prefilter prefilter
byte_classes byteClasses
trans []stateID
matches [][]pattern
}
func (r *iRepr) premultiply() {
if r.premultiplied || r.state_count <= 1 {
return
}
alpha_len := r.alphabetLen()
for id := 2; id < r.state_count; id++ {
offset := id * alpha_len
slice := r.trans[offset : offset+alpha_len]
for i := range slice {
if slice[i] == deadStateID {
continue
}
slice[i] = stateID(int(slice[i]) * alpha_len)
}
}
r.premultiplied = true
r.start_id = stateID(int(r.start_id) * alpha_len)
r.max_match = stateID(int(r.max_match) * alpha_len)
}
func (r *iRepr) setNextState(from stateID, b byte, to stateID) {
alphabet_len := r.alphabetLen()
b = r.byte_classes.bytes[b]
r.trans[int(from)*alphabet_len+int(b)] = to
}
func (r *iRepr) alphabetLen() int {
return r.byte_classes.alphabetLen()
}
func (r *iRepr) nextState(from stateID, b byte) stateID {
alphabet_len := r.alphabetLen()
b = r.byte_classes.bytes[b]
return r.trans[int(from)*alphabet_len+int(b)]
}
func (r *iRepr) isMatchState(id stateID) bool {
return id <= r.max_match && id > deadStateID
}
func (r *iRepr) isMatchStateOrDeadState(id stateID) bool {
return id <= r.max_match
}
func (r *iRepr) GetMatch(id stateID, match_index int, end int) *Match {
i := int(id)
if id > r.max_match {
return nil
}
if i > len(r.matches) {
return nil
}
matches := r.matches[int(id)]
if match_index > len(matches) {
return nil
}
pattern := matches[match_index]
return &Match{
pattern: pattern.PatternID,
len: pattern.PatternLength,
end: end,
}
}
func (r *iRepr) MatchCount(id stateID) int {
return len(r.matches[id])
}
func (r *iRepr) swapStates(id1 stateID, id2 stateID) {
if r.premultiplied {
panic("cannot shuffle match states of premultiplied iDFA")
}
o1 := int(id1) * r.alphabetLen()
o2 := int(id2) * r.alphabetLen()
for b := 0; b < r.alphabetLen(); b++ {
r.trans[o1+b], r.trans[o2+b] = r.trans[o2+b], r.trans[o1+b]
}
r.matches[int(id1)], r.matches[int(id2)] = r.matches[int(id2)], r.matches[int(id1)]
}
func (r *iRepr) calculateSize() {
intSize := int(unsafe.Sizeof(stateID(1)))
size := (len(r.trans) * intSize) + (len(r.matches) * (intSize * 3))
for _, state_matches := range r.matches {
size += len(state_matches) * (intSize * 2)
}
var hb int
if r.prefilter != nil {
hb = r.prefilter.HeapBytes()
}
size += hb
r.heap_bytes = size
}
func (r *iRepr) shuffleMatchStates() {
if r.premultiplied {
panic("cannot shuffle match states of premultiplied iDFA")
}
if r.state_count <= 1 {
return
}
first_non_match := int(r.start_id)
for first_non_match < r.state_count && len(r.matches[first_non_match]) > 0 {
first_non_match += 1
}
swaps := make([]stateID, r.state_count)
for i := range swaps {
swaps[i] = failedStateID
}
cur := r.state_count - 1
for cur > first_non_match {
if len(r.matches[cur]) > 0 {
r.swapStates(stateID(cur), stateID(first_non_match))
swaps[cur] = stateID(first_non_match)
swaps[first_non_match] = stateID(cur)
first_non_match += 1
for first_non_match < cur && len(r.matches[first_non_match]) > 0 {
first_non_match += 1
}
}
cur -= 1
}
for id := 0; id < r.state_count; id++ {
alphabet_len := r.alphabetLen()
offset := id * alphabet_len
slice := r.trans[offset : offset+alphabet_len]
for i := range slice {
if swaps[slice[i]] != failedStateID {
slice[i] = swaps[slice[i]]
}
}
}
if swaps[r.start_id] != failedStateID {
r.start_id = swaps[r.start_id]
}
r.max_match = stateID(first_non_match - 1)
}
type pattern struct {
PatternID int
PatternLength int
} | dfa.go | 0.572723 | 0.442335 | dfa.go | starcoder |
package extinfo
// TeamScore contains the name of the team and the score, i.e. flags scored in flag modes / points gained for holding bases in capture modes / frags achieved in DM modes / skulls collected
type TeamScore struct {
Name string `json:"name"` // name of the team, e.g. "good"
Score int `json:"score"` // flags in ctf modes, frags in deathmatch modes, points in capture, skulls in collect
Bases []int `json:"bases"` // the numbers/IDs of the bases the team possesses (only used in capture modes)
}
// TeamScoresRaw contains the game mode as raw int, the seconds left in the game, and a slice of TeamScores
type TeamScoresRaw struct {
GameMode int `json:"gameMode"` // current game mode
SecsLeft int `json:"secsLeft"` // the time left until intermission in seconds
Scores map[string]TeamScore `json:"scores"` // a team score for each team, mapped to the team's name
}
// TeamScores contains the game mode as human readable string, the seconds left in the game, and a slice of TeamScores
type TeamScores struct {
TeamScoresRaw
GameMode string `json:"gameMode"` // current game mode
}
// GetTeamScoresRaw queries a Sauerbraten server at addr on port for the teams' names and scores and returns the raw response and/or an error in case something went wrong or the server is not running a team mode.
func (s *Server) GetTeamScoresRaw() (teamScoresRaw TeamScoresRaw, err error) {
request := buildRequest(InfoTypeExtended, ExtInfoTypeTeamScores, 0)
response, err := s.queryServer(request)
if err != nil {
return
}
teamScoresRaw.GameMode, err = response.ReadInt()
if err != nil {
return
}
teamScoresRaw.SecsLeft, err = response.ReadInt()
if err != nil {
return
}
teamScoresRaw.Scores = map[string]TeamScore{}
for response.HasRemaining() {
var name string
name, err = response.ReadString()
if err != nil {
return
}
var score int
score, err = response.ReadInt()
if err != nil {
return
}
var numBases int
numBases, err = response.ReadInt()
if err != nil {
return
}
if numBases < 0 {
numBases = 0
}
bases := make([]int, numBases)
for i := 0; i < numBases; i++ {
var base int
base, err = response.ReadInt()
if err != nil {
return
}
bases = append(bases, base)
}
teamScoresRaw.Scores[name] = TeamScore{name, score, bases}
}
return
}
// GetTeamScores queries a Sauerbraten server at addr on port for the teams' names and scores and returns the parsed response and/or an error in case something went wrong or the server is not running a team mode. Parsed response means that the int value sent as game mode is translated into the human readable name, e.g. '12' -> "insta ctf".
func (s *Server) GetTeamScores() (TeamScores, error) {
teamScores := TeamScores{}
teamScoresRaw, err := s.GetTeamScoresRaw()
if err != nil {
return teamScores, err
}
teamScores.TeamScoresRaw = teamScoresRaw
teamScores.GameMode = getGameModeName(teamScoresRaw.GameMode)
return teamScores, nil
} | team_scores.go | 0.665302 | 0.407157 | team_scores.go | starcoder |
package autoscaler
import (
"context"
"errors"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.uber.org/zap"
)
// Measurement represents the type of the autoscaler metric to be reported
type Measurement int
const (
// DesiredPodCountM is used for the pod count that autoscaler wants
DesiredPodCountM Measurement = iota
// RequestedPodCountM is used for the requested pod count from kubernetes
RequestedPodCountM
// ActualPodCountM is used for the actual number of pods we have
ActualPodCountM
// ObservedPodCountM is used for the observed number of pods we have
ObservedPodCountM
// ObservedStableConcurrencyM is the average of requests count in each 60 second stable window
ObservedStableConcurrencyM
// ObservedPanicConcurrencyM is the average of requests count in each 6 second panic window
ObservedPanicConcurrencyM
// TargetConcurrencyM is the desired number of concurrent requests for each pod
TargetConcurrencyM
// PanicM is used as a flag to indicate if autoscaler is in panic mode or not
PanicM
)
var (
measurements = []*stats.Float64Measure{
DesiredPodCountM: stats.Float64(
"desired_pod_count",
"Number of pods autoscaler wants to allocate",
stats.UnitNone),
RequestedPodCountM: stats.Float64(
"requested_pod_count",
"Number of pods autoscaler requested from Kubernetes",
stats.UnitNone),
ActualPodCountM: stats.Float64(
"actual_pod_count",
"Number of pods that are allocated currently",
stats.UnitNone),
ObservedPodCountM: stats.Float64(
"observed_pod_count",
"Number of pods that are observed currently",
stats.UnitNone),
ObservedStableConcurrencyM: stats.Float64(
"observed_stable_concurrency",
"Average of requests count in each 60 second stable window",
stats.UnitNone),
ObservedPanicConcurrencyM: stats.Float64(
"observed_panic_concurrency",
"Average of requests count in each 6 second panic window",
stats.UnitNone),
TargetConcurrencyM: stats.Float64(
"target_concurrency_per_pod",
"The desired number of concurrent requests for each pod",
stats.UnitNone),
PanicM: stats.Float64(
"panic_mode",
"1 if autoscaler is in panic mode, 0 otherwise",
stats.UnitNone),
}
namespaceTagKey tag.Key
configTagKey tag.Key
revisionTagKey tag.Key
)
func init() {
var err error
// Create the tag keys that will be used to add tags to our measurements.
// Tag keys must conform to the restrictions described in
// go.opencensus.io/tag/validate.go. Currently those restrictions are:
// - length between 1 and 255 inclusive
// - characters are printable US-ASCII
namespaceTagKey, err = tag.NewKey("configuration_namespace")
if err != nil {
panic(err)
}
configTagKey, err = tag.NewKey("configuration")
if err != nil {
panic(err)
}
revisionTagKey, err = tag.NewKey("revision")
if err != nil {
panic(err)
}
// Create views to see our measurements. This can return an error if
// a previously-registered view has the same name with a different value.
// View name defaults to the measure name if unspecified.
err = view.Register(
&view.View{
Description: "Number of pods autoscaler wants to allocate",
Measure: measurements[DesiredPodCountM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
&view.View{
Description: "Number of pods autoscaler requested from Kubernetes",
Measure: measurements[RequestedPodCountM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
&view.View{
Description: "Number of pods that are allocated currently",
Measure: measurements[ActualPodCountM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
&view.View{
Description: "Number of pods that are observed currently",
Measure: measurements[ObservedPodCountM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
&view.View{
Description: "Average of requests count in each 60 second stable window",
Measure: measurements[ObservedStableConcurrencyM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
&view.View{
Description: "Average of requests count in each 6 second panic window",
Measure: measurements[ObservedPanicConcurrencyM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
&view.View{
Description: "The desired number of concurrent requests for each pod",
Measure: measurements[TargetConcurrencyM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
&view.View{
Description: "1 if autoscaler is in panic mode, 0 otherwise",
Measure: measurements[PanicM],
Aggregation: view.LastValue(),
TagKeys: []tag.Key{namespaceTagKey, configTagKey, revisionTagKey},
},
)
if err != nil {
panic(err)
}
}
// StatsReporter defines the interface for sending autoscaler metrics
type StatsReporter interface {
Report(m Measurement, v float64) error
}
// Reporter holds cached metric objects to report autoscaler metrics
type Reporter struct {
ctx context.Context
logger *zap.SugaredLogger
initialized bool
}
// NewStatsReporter creates a reporter that collects and reports autoscaler metrics
func NewStatsReporter(podNamespace string, config string, revision string) (*Reporter, error) {
r := &Reporter{}
// Our tags are static. So, we can get away with creating a single context
// and reuse it for reporting all of our metrics.
ctx, err := tag.New(
context.Background(),
tag.Insert(namespaceTagKey, podNamespace),
tag.Insert(configTagKey, config),
tag.Insert(revisionTagKey, revision))
if err != nil {
return nil, err
}
r.ctx = ctx
r.initialized = true
return r, nil
}
// Report captures value v for measurement m
func (r *Reporter) Report(m Measurement, v float64) error {
if !r.initialized {
return errors.New("StatsReporter is not initialized yet")
}
stats.Record(r.ctx, measurements[m].M(v))
return nil
} | pkg/autoscaler/stats_reporter.go | 0.703651 | 0.460046 | stats_reporter.go | starcoder |
package aep
import (
"github.com/rioam2/rifx"
)
// LayerQualityLevel denotes the quality level of a layer (eg: Best, Draft, Wireframe)
type LayerQualityLevel uint16
const (
// LayerQualityBest enumerates the value of a layer with Best Quality
LayerQualityBest LayerQualityLevel = 0x0002
// LayerQualityDraft enumerates the value of a layer with Draft Quality
LayerQualityDraft LayerQualityLevel = 0x0001
// LayerQualityWireframe enumerates the value of a layer with Wireframe Quality
LayerQualityWireframe LayerQualityLevel = 0x0000
)
// LayerSamplingMode denotes the sampling mode of a layer (eg: Bilinear, Bicubic)
type LayerSamplingMode byte
const (
// LayerSamplingModeBilinear enumerates the value of a layer with Bilinear Sampling
LayerSamplingModeBilinear LayerSamplingMode = 0x00
// LayerSamplingModeBicubic enumerates the value of a layer with Bicubic Sampling
LayerSamplingModeBicubic LayerSamplingMode = 0x01
)
// LayerFrameBlendMode denotes the frame blending mode of a layer (eg: Frame mix, Pixel motion)
type LayerFrameBlendMode byte
const (
// LayerFrameBlendModeFrameMix enumerates the value of a layer with Frame Mix Frame Blending
LayerFrameBlendModeFrameMix LayerFrameBlendMode = 0x00
// LayerFrameBlendModePixelMotion enumerates the value of a layer with Pixel Motion Frame Blending
LayerFrameBlendModePixelMotion LayerFrameBlendMode = 0x01
)
// Layer describes a single layer in a composition.
type Layer struct {
Index uint32
Name string
SourceID uint32
Quality LayerQualityLevel
SamplingMode LayerSamplingMode
FrameBlendMode LayerFrameBlendMode
GuideEnabled bool
SoloEnabled bool
ThreeDEnabled bool
AdjustmentLayerEnabled bool
CollapseTransformEnabled bool
ShyEnabled bool
LockEnabled bool
FrameBlendEnabled bool
MotionBlurEnabled bool
EffectsEnabled bool
AudioEnabled bool
VideoEnabled bool
Effects []*Property
Text *Property
}
func parseLayer(layerHead *rifx.List, project *Project) (*Layer, error) {
layer := &Layer{}
type LDTA struct {
Unknown00 [4]byte // Offset 0B
Quality LayerQualityLevel // Offset 4B
Unknown01 [31]byte // Offset 6B
LayerAttrBits [3]byte // Offset 37B
SourceID uint32 // Offset 40B
}
ldtaBlock, err := layerHead.FindByType("ldta")
if err != nil {
return nil, err
}
ldta := &LDTA{}
ldtaBlock.ToStruct(ldta)
layer.SourceID = ldta.SourceID
layer.Quality = ldta.Quality
layer.SamplingMode = LayerSamplingMode((ldta.LayerAttrBits[0] & (1 << 6)) >> 6)
layer.FrameBlendMode = LayerFrameBlendMode((ldta.LayerAttrBits[0] & (1 << 2)) >> 2)
layer.GuideEnabled = ((ldta.LayerAttrBits[0] & (1 << 1)) >> 1) == 1
layer.SoloEnabled = ((ldta.LayerAttrBits[1] & (1 << 3)) >> 3) == 1
layer.ThreeDEnabled = ((ldta.LayerAttrBits[1] & (1 << 2)) >> 2) == 1
layer.AdjustmentLayerEnabled = ((ldta.LayerAttrBits[1] & (1 << 1)) >> 1) == 1
layer.CollapseTransformEnabled = ((ldta.LayerAttrBits[2] & (1 << 7)) >> 7) == 1
layer.ShyEnabled = ((ldta.LayerAttrBits[2] & (1 << 6)) >> 6) == 1
layer.LockEnabled = ((ldta.LayerAttrBits[2] & (1 << 5)) >> 5) == 1
layer.FrameBlendEnabled = ((ldta.LayerAttrBits[2] & (1 << 4)) >> 4) == 1
layer.MotionBlurEnabled = ((ldta.LayerAttrBits[2] & (1 << 3)) >> 3) == 1
layer.EffectsEnabled = ((ldta.LayerAttrBits[2] & (1 << 2)) >> 2) == 1
layer.AudioEnabled = ((ldta.LayerAttrBits[2] & (1 << 1)) >> 1) == 1
layer.VideoEnabled = ((ldta.LayerAttrBits[2] & (1 << 0)) >> 0) == 1
nameBlock, err := layerHead.FindByType("Utf8")
if err != nil {
return nil, err
}
layer.Name = nameBlock.ToString()
rootTDGP, _ := indexedGroupToMap(layerHead.SublistMerge("tdgp"))
// Parse effects stack
if effectsTDGP, ok := rootTDGP["ADBE Effect Parade"]; ok {
effectsProp, err := parseProperty(effectsTDGP, "ADBE Effect Parade")
if err != nil {
return nil, err
}
layer.Effects = effectsProp.Properties
} else {
layer.Effects = make([]*Property, 0)
}
// Parse text layer properties
if textTDGP, ok := rootTDGP["ADBE Text Properties"]; ok {
textProp, err := parseProperty(textTDGP, "ADBE Text Properties")
if err != nil {
return nil, err
}
layer.Text = textProp
}
return layer, nil
} | layer.go | 0.639286 | 0.616503 | layer.go | starcoder |
package models
import (
"time"
"gonum.org/v1/gonum/mat"
)
type BrownianModelConfig struct {
InitialVariance float64
ProcessVariance float64
ObservationVariance float64
}
type BrownianModel struct {
initialState State
dims int
transition *mat.Dense
observationModel *mat.Dense
observationCovariance *mat.Dense
cfg BrownianModelConfig
}
func NewBrownianModel(initialTime time.Time, initialState mat.Vector, cfg BrownianModelConfig) *BrownianModel {
dims := initialState.Len()
transition := mat.NewDense(dims, dims, nil)
for i := 0; i < dims; i++ {
transition.Set(i, i, 1.0)
}
initialCovariance := mat.NewDense(dims, dims, nil)
for i := 0; i < dims; i++ {
initialCovariance.Set(i, i, cfg.InitialVariance)
}
observationModel := mat.NewDense(dims, dims, nil)
for i := 0; i < dims; i++ {
observationModel.Set(i, i, 1.0)
}
observationCovariance := mat.NewDense(dims, dims, nil)
for i := 0; i < dims; i++ {
observationCovariance.Set(i, i, cfg.ObservationVariance)
}
return &BrownianModel{
dims: dims,
initialState: State{
Time: initialTime,
State: initialState,
Covariance: initialCovariance,
},
transition: transition,
observationModel: observationModel,
observationCovariance: observationCovariance,
cfg: cfg,
}
}
func (m *BrownianModel) InitialState() State {
return m.initialState
}
func (m *BrownianModel) Transition(dt time.Duration) mat.Matrix {
return m.transition
}
func (m *BrownianModel) CovarianceTransition(dt time.Duration) mat.Matrix {
result := mat.NewDense(m.dims, m.dims, nil)
v := m.cfg.ProcessVariance * dt.Seconds()
for i := 0; i < m.dims; i++ {
result.Set(i, i, v)
}
return result
}
func (s *BrownianModel) NewMeasurement(value mat.Vector) *Measurement {
return &Measurement{
Value: value,
Covariance: s.observationCovariance,
ObservationModel: s.observationModel,
}
}
func (s *BrownianModel) Value(state mat.Vector) mat.Vector {
return state
} | models/brownian.go | 0.784567 | 0.460835 | brownian.go | starcoder |
package assert
import (
"time"
)
func abs(x float64) float64 {
if x < 0 {
return -x
}
return x
}
// EqualErrors - asserts that specific error was produced
func EqualErrors(reporter interface{}, want, got error) {
if want == nil || got == nil || want.Error() != got.Error() {
reportError(reporter, &failedErrorCompMsg{want, got})
}
}
// EqualInt - asserts that two integers are the same
func EqualInt(reporter interface{}, want, got int) {
if want != got {
reportError(reporter, &failedIntCompMsg{want, got})
}
}
// EqualFloat32 - asserts that two floats are the same
func EqualFloat32(reporter interface{}, want, got float32) {
if want != got {
reportError(reporter, &failedFloatCompMsg{float64(want), float64(got)})
}
}
// EqualFloat32Tol - asserts that two floats are the same,
// allowing for (relative) tolerance given as a parameter
func EqualFloat32Tol(reporter interface{}, want, got, relTol float32) {
if want == 0.0 && got != 0.0 {
reportError(reporter, &failedFloatCompMsg{float64(want), float64(got)})
} else if abs(float64((want-got)/want)) > float64(relTol) {
reportError(reporter, &failedFloatCompMsg{float64(want), float64(got)})
}
}
// EqualFloat64 - asserts that two floats are the same
func EqualFloat64(reporter interface{}, want, got float64) {
if want != got {
reportError(reporter, &failedFloatCompMsg{want, got})
}
}
// EqualFloat64Tol - asserts that two floats are the same,
// allowing for (relative) tolerance given as a parameter
func EqualFloat64Tol(reporter interface{}, want, got, relTol float64) {
if want == 0.0 && got != 0.0 {
reportError(reporter, &failedFloatCompMsg{want, got})
} else if abs((want-got)/want) > relTol {
reportError(reporter, &failedFloatCompMsg{want, got})
}
}
// EqualStrings - asserts that two strings are equal
func EqualStrings(reporter interface{}, want, got string) {
if want != got {
reportError(reporter, &failedStrCompMsg{want, got})
}
}
// EqualTime - asserts that two time.Time are the same
func EqualTime(reporter interface{}, want, got time.Time) {
if !got.Equal(want) {
reportError(reporter, &failedTimeCompMsg{want, got})
}
} | equality.go | 0.674587 | 0.414129 | equality.go | starcoder |
package transform
import (
"errors"
"fmt"
)
// Sort by Rank Transform is a family of transforms typically used after
// a BWT to reduce the variance of the data prior to entropy coding.
// SBR(alpha) is defined by sbr(x, alpha) = (1-alpha)*(t-w1(x,t)) + alpha*(t-w2(x,t))
// where x is an item in the data list, t is the current access time and wk(x,t) is
// the k-th access time to x at time t (with 0 <= alpha <= 1).
// See [Two new families of list update algorihtms] by <NAME> for details.
// SBR(0)= Move to Front Transform
// SBR(1)= Time Stamp Transform
// This code implements SBR(0), SBR(1/2) and SBR(1). Code derived from openBWT
const (
// SBRT_MODE_MTF mode MoveToFront
SBRT_MODE_MTF = 1
// SBRT_MODE_RANK mode Rank
SBRT_MODE_RANK = 2
// SBRT_MODE_TIMESTAMP mode TimeStamp
SBRT_MODE_TIMESTAMP = 3
)
// SBRT Sort By Rank Transform
type SBRT struct {
mode int
mask1 int
mask2 int
shift uint
}
// NewSBRT creates a new instance of SBRT
func NewSBRT(mode int) (*SBRT, error) {
if mode != SBRT_MODE_MTF && mode != SBRT_MODE_RANK && mode != SBRT_MODE_TIMESTAMP {
return nil, errors.New("Invalid mode parameter")
}
this := &SBRT{}
this.mode = mode
if this.mode == SBRT_MODE_TIMESTAMP {
this.mask1 = 0
} else {
this.mask1 = -1
}
if this.mode == SBRT_MODE_MTF {
this.mask2 = 0
} else {
this.mask2 = -1
}
if this.mode == SBRT_MODE_RANK {
this.shift = 1
} else {
this.shift = 0
}
return this, nil
}
// NewSBRTWithCtx creates a new instance of SBRT using a
// configuration map as parameter.
func NewSBRTWithCtx(ctx *map[string]interface{}) (*SBRT, error) {
mode := SBRT_MODE_MTF
if _, containsKey := (*ctx)["sbrt"]; containsKey {
mode = (*ctx)["sbrt"].(int)
}
if mode != SBRT_MODE_MTF && mode != SBRT_MODE_RANK && mode != SBRT_MODE_TIMESTAMP {
return nil, errors.New("Invalid mode parameter")
}
this := &SBRT{}
this.mode = mode
if this.mode == SBRT_MODE_TIMESTAMP {
this.mask1 = 0
} else {
this.mask1 = -1
}
if this.mode == SBRT_MODE_MTF {
this.mask2 = 0
} else {
this.mask2 = -1
}
if this.mode == SBRT_MODE_RANK {
this.shift = 1
} else {
this.shift = 0
}
return this, nil
}
// Forward applies the function to the src and writes the result
// to the destination. Returns number of bytes read, number of bytes
// written and possibly an error.
func (this *SBRT) Forward(src, dst []byte) (uint, uint, error) {
if len(src) == 0 {
return 0, 0, nil
}
if &src[0] == &dst[0] {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
count := len(src)
if count > len(dst) {
errMsg := fmt.Sprintf("Block size is %v, output buffer length is %v", count, len(dst))
return 0, 0, errors.New(errMsg)
}
s2r := [256]uint8{}
r2s := [256]uint8{}
for i := range s2r {
s2r[i] = uint8(i)
r2s[i] = uint8(i)
}
m1 := this.mask1
m2 := this.mask2
s := this.shift
p := [256]int{}
q := [256]int{}
for i := 0; i < count; i++ {
c := uint8(src[i])
r := s2r[c]
dst[i] = byte(r)
qc := ((i & m1) + (p[c] & m2)) >> s
p[c] = i
q[c] = qc
// Move up symbol to correct rank
for r > 0 && q[r2s[r-1]] <= qc {
t := r2s[r-1]
r2s[r], s2r[t] = t, r
r--
}
r2s[r] = c
s2r[c] = r
}
return uint(count), uint(count), nil
}
// Inverse applies the reverse function to the src and writes the result
// to the destination. Returns number of bytes read, number of bytes
// written and possibly an error.
func (this *SBRT) Inverse(src, dst []byte) (uint, uint, error) {
if len(src) == 0 {
return 0, 0, nil
}
if &src[0] == &dst[0] {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
count := len(src)
if count > len(dst) {
errMsg := fmt.Sprintf("Block size is %v, output buffer length is %v", count, len(dst))
return 0, 0, errors.New(errMsg)
}
r2s := [256]uint8{}
for i := range r2s {
r2s[i] = uint8(i)
}
m1 := this.mask1
m2 := this.mask2
s := this.shift
p := [256]int{}
q := [256]int{}
for i := 0; i < count; i++ {
r := src[i]
c := r2s[r]
dst[i] = byte(c)
qc := ((i & m1) + (p[c] & m2)) >> s
p[c] = i
q[c] = qc
// Move up symbol to correct rank
for r > 0 && q[r2s[r-1]] <= qc {
r2s[r] = r2s[r-1]
r--
}
r2s[r] = c
}
return uint(count), uint(count), nil
} | transform/SBRT.go | 0.772359 | 0.412294 | SBRT.go | starcoder |
package parse
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
)
// Private function to catch Epoch integer in string
func ifEpoch(s string) (time.Time, error) {
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return time.Time{}, err
}
if len(s) > 10 {
seconds, _ := strconv.ParseInt(s[0:10], 10, 64)
nsec, err := strconv.ParseInt(s[10:], 10, 64)
t := time.Unix(seconds, nsec*1000000)
return t, err
}
t := time.Unix(i, 0)
return t, err
}
// DateTimeParse -- variety of expected dates
type DateTimeParse string
var layout = []string{
"January 2, 2006, 3:04 pm",
"January 2, 2006, 3:04pm",
"January 2, 2006, 03:04 pm",
"January 2 2006, 03:04 pm",
"January 2 2006 03:04 pm",
"January 2, 2006, 3:04:05 pm",
"January 2, 2006, 3:04:05pm",
"January 2, 2006, 03:04:05 pm",
"January 2 2006, 03:04:05 pm",
"January 2 2006 03:04:05 pm",
"Mon Jan _2 15:04 UTC 2006",
"Mon Jan _2 15:04:05 UTC 2006",
"January 2, 2006, 3:04 pm",
"January 2 2006, 3:04 pm",
"January 2 2006 3:04 pm",
"Jan 2, 2006, 03:04 pm",
"Jan 2 2006, 03:04 pm",
"Jan 2, 2006, 3:04 pm",
"Jan 2, 06, 3:04 pm",
"January 2, 2006, 3:04:05 pm",
"January 2 2006, 3:04:05 pm",
"January 2 2006 3:04:05 pm",
"Jan 2, 2006, 03:04:05 pm",
"Jan 2 2006, 03:04:05 pm",
"Jan 2, 2006, 3:04:05 pm",
"Jan 2, 06, 3:04:05 pm",
"Jan 2 15:04:05",
"2 Jan 15:04:05",
"15:04:05 2 Jan",
"2006-01-02 3:04 pm",
"2006-01-02 3:04pm",
"2006-01-02 3:04 PM",
"2006-01-02 3:04PM",
"2006-01-02 15:04",
"2006-01-02 3:04:05 pm",
"2006-01-02 3:04:05pm",
"2006-01-02 3:04:05 PM",
"2006-01-02 3:04:05PM",
"2006-01-02 15:04:05",
"2006/01/02 3:04 pm",
"2006/01/02 3:04pm",
"2006/01/02 3:04 PM",
"2006/01/02 3:04PM",
"2006/01/02 15:04",
"2006/01/02 3:04:05 pm",
"2006/01/02 3:04:05pm",
"2006/01/02 3:04:05 PM",
"2006/01/02 3:04:05PM",
"2006/01/02 15:04:05",
"01/02/2006 3:04 pm",
"01/02/2006 3:04pm",
"01/02/2006 3:04 PM",
"01/02/2006 3:04PM",
"01/02/2006 15:04",
"01/02/2006 3:04:05 pm",
"01/02/2006 3:04:05pm",
"01/02/2006 3:04:05 PM",
"01/02/2006 3:04:05PM",
"01/02/2006 15:04:05",
"01.02.2006 3:04 pm",
"01.02.2006 3:04pm",
"01.02.2006 3:04 PM",
"01.02.2006 3:04PM",
"01.02.2006 15:04",
"01.02.2006 3:04:05 pm",
"01.02.2006 3:04:05pm",
"01.02.2006 3:04:05 PM",
"01.02.2006 3:04:05PM",
"01.02.2006 15:04:05",
"01/02/2006",
"1/2/2006",
"01_02_2006",
"1_2_2006",
"01-02-2006",
"1-2-2006",
"01.02.2006",
"1.2.2006",
"1.2.2006 15:04",
"1/2/2006 15:04",
"1_2_2006 15:04",
"1 2 2006 15:04",
"1.2.2006 15:04:05",
"1/2/2006 15:04:05",
"1_2_2006 15:04:05",
"1 2 2006 15:04:05",
"15:04 1.2.2006",
"15:04 1/2/2006",
"15:04 1_2_2006",
"15:04 1 2 2006",
"15:04:05 1.2.2006",
"15:04:05 1/2/2006",
"15:04:05 1_2_2006",
"15:04:05 1 2 2006",
"2006-01-02 15:04:05 +0000 UTC",
"2006-01-02T15:04:05+07:00",
"2006-01-02 15:04:05 +07:00",
"2006-01-02T15:04:05-0700",
"2006-01-02 15:04:05 -0700 MST",
"2006-01-02 15:04:05 -0700 (MST)",
"15:04:05 2006-01-02 -0700 MST",
"15:04:05 2006-01-02 -0700 (MST)",
"Mon Jan _2 15:04:05 MST 2006",
"Mon Jan _2 15:04:05 -0700 2006",
"02 Jan 06 15:04 MST",
"02 Jan 06 15:04 -0700",
"Monday, 02-Jan-06 15:04:05 MST",
"Monday, 02-Jan-06 15:04:05 (MST)",
"Mon, _2 Jan 2006 15:04:05 MST",
"Mon, _2 Jan 2006 15:04:05 (MST)",
"Mon, _2 Jan 2006 15:04:05 -0700",
"Mon, _2 Jan 2006 15:04:05 -0700 MST",
"Mon, _2 Jan 2006 15:04:05 -0700 (MST)",
"2006-01-02T15:04:05Z07:00",
// Leave this last
//"2006-01-02T15:04:05.999999999Z07:00",
}
// getTime --
func (s DateTimeParse) GetTime() (time.Time, error) {
t, err := ifEpoch(string(s))
if err == nil {
return t, err
}
st := strings.Join(strings.Fields(string(s)), " ")
//fmt.Printf("-->%s\n", st)
for _, l := range layout {
t, err := time.Parse(l, st)
if err == nil {
return t, err
}
}
return time.Time{}, errors.New("Time format is not in layout.")
}
func (s DateTimeParse) GetTimeInLocation(zone string) (time.Time, error) {
loc, err := time.LoadLocation(zone)
if err != nil {
return time.Time{}, err
}
t, err := ifEpoch(string(s))
if err == nil {
zone_time := t.In(loc)
return zone_time, err
}
st := strings.Join(strings.Fields(string(s)), " ")
//fmt.Printf("-->%s\n", st)
for _, l := range layout {
t, err := time.ParseInLocation(l, st, loc)
if err == nil {
return t, err
}
}
return time.Time{}, errors.New("Time format is not in layout.")
}
/* NewYork() - Input localtime New_York and convert to UTC
Add year, if missing
Input: "Sep 8 13:24:18 "
Expected output: "2018-09-08 13:24:18 -0400 EDT"
*/
func (s DateTimeParse) NewYork() (time.Time, error) {
tt, err := DateTimeParse(s).GetTime()
if err != nil {
return tt, err
}
if tt.Year() == 0 {
tt = tt.AddDate(time.Now().Year(), 0, 0)
}
loc, err := time.LoadLocation("America/New_York")
_, offset := tt.In(loc).Zone()
tt = tt.Add(time.Duration(-offset) * time.Second)
return tt.In(loc), err
}
// GetTimeLoc --
func (s DateTimeParse) GetTimeLoc() (time.Time, error) {
tt, err := DateTimeParse(s).GetTime()
if err != nil {
return tt, err
}
loc, err := time.LoadLocation("America/New_York")
return tt.In(loc), err
}
// TimeIn gives time in zone
func (s DateTimeParse) TimeIn(zone string) (time.Time, error) {
tt, err := DateTimeParse(s).GetTime()
if err != nil {
return tt, err
}
loc, err := time.LoadLocation(zone)
if err != nil {
return tt, err
}
zone_time := tt.In(loc)
return zone_time, err
}
// GetTimeLocSquish -- Force min to be int in 10 min interval
func (s DateTimeParse) GetTimeLocSquish() (string, error) {
tt, err := DateTimeParse(s).GetTime()
if err != nil {
return "", err
}
squishMin := int(tt.Minute()/10) * 10
ret := fmt.Sprintf("%02d:%02d", tt.Hour(), squishMin)
return ret, err
}
// GetTimeLocHRminS --
func (s DateTimeParse) GetTimeLocHRminS() (string, error) {
tt, err := DateTimeParse(s).GetTime()
if err != nil {
return "", err
}
ret := fmt.Sprintf("%02d:%02d", tt.Hour(), tt.Minute())
return ret, err
}
func (s DateTimeParse) DaysFrom(day2 string) (int, error) {
tt, err := DateTimeParse(s).GetTime()
if err != nil {
return 0, err
}
t2, err := DateTimeParse(day2).GetTime()
if err != nil {
return 0, err
}
days := int(t2.Sub(tt).Hours() / 24)
return days, err
}
// DaysBetween always positive
func (s DateTimeParse) DaysBetween(day2 string) (int, error) {
tt, err := DateTimeParse(s).GetTime()
if err != nil {
return 0, err
}
t2, err := DateTimeParse(day2).GetTime()
if err != nil {
return 0, err
}
days := int(t2.Sub(tt).Hours() / 24)
if days < 0 {
days = -days
}
return days, err
} | parse/dateparse.go | 0.602296 | 0.545286 | dateparse.go | starcoder |
package sql
import (
"context"
"strconv"
"github.com/google/uuid"
"vitess.io/vitess/go/vt/sqlparser"
"github.com/liquidata-inc/dolt/go/libraries/doltcore/row"
"github.com/liquidata-inc/dolt/go/libraries/doltcore/schema"
"github.com/liquidata-inc/dolt/go/store/chunks"
"github.com/liquidata-inc/dolt/go/store/types"
)
const doubleQuot = "\""
// binaryNomsOperation knows how to combine two noms values into a single one, e.g. addition
type binaryNomsOperation func(left, right types.Value) types.Value
// predicate function for two noms values, e.g. <
type binaryNomsPredicate func(nbf *types.NomsBinFormat, left, right types.Value) bool
// unaryNomsOperation knows how to turn a single noms value into another one, e.g. negation
type unaryNomsOperation func(val types.Value) types.Value
// TagResolver knows how to find a tag number for a qualified table in a result set.
type TagResolver interface {
ResolveTag(tableName string, columnName string) (uint64, error)
}
// InitValue is a value type that knows how to initialize itself before the value is retrieved.
type InitValue interface {
// Init() resolves late-bound information for this getter, like the tag number of a column in a result set. Returns
// any error in initialization. Init() must be called before other methods on an object.
Init(TagResolver) error
}
// Composes zero or more InitValue into a new Init() function, where Init() is called on each InitValue in turn,
// returning any error encountered.
func ComposeInits(ivs ...InitValue) func(TagResolver) error {
return func(resolver TagResolver) error {
for _, iv := range ivs {
if err := iv.Init(resolver); err != nil {
return err
}
}
return nil
}
}
// GetValue is a value type that know how to retrieve a value from a row.
type GetValue interface {
// Get() returns a value from the row given (which needn't actually be a value from that row).
Get(row.Row) types.Value
}
// RowValGetter knows how to retrieve a Value from a Row.
type RowValGetter struct {
// The value type returned by this getter. Types are approximate and may need to be coerced, e.g. Float -> Int.
NomsKind types.NomsKind
// initFn performs whatever logic necessary to initialize the getter, and returns any errors in the initialization
// process. Leave unset to perform no initialization logic. Client should call the interface method Init() rather than
// calling this method directly.
initFn func(TagResolver) error
// getFn returns the value for this getter for the row given. Clients should call the interface method Get() rather
// than calling this method directly.
getFn func(r row.Row) types.Value
// Whether this value has been initialized.
inited bool
// Clients should use these interface methods, rather than getFn and initFn directly.
InitValue
GetValue
}
func (rvg *RowValGetter) Init(resolver TagResolver) error {
rvg.inited = true
if rvg.initFn != nil {
return rvg.initFn(resolver)
}
return nil
}
func (rvg *RowValGetter) Get(r row.Row) types.Value {
// TODO: find a way to not impede performance with this check
if !rvg.inited {
panic("Get() called without Init(). This is a bug.")
}
return rvg.getFn(r)
}
// Returns a new RowValGetter with default values filled in.
func RowValGetterForKind(kind types.NomsKind) *RowValGetter {
return &RowValGetter{
NomsKind: kind,
}
}
// Returns a new RowValGetter that wraps the given one, converting to the appropriate type as necessary. Returns an
// error if no conversion between the types is possible.
func ConversionValueGetter(getter *RowValGetter, destKind types.NomsKind) (*RowValGetter, error) {
if getter.NomsKind == destKind {
return getter, nil
}
converterFn := GetTypeConversionFn(getter.NomsKind, destKind)
if converterFn == nil {
return nil, errFmt("Type mismatch: cannot convert from %v to %v",
DoltToSQLType[getter.NomsKind], DoltToSQLType[destKind])
}
return &RowValGetter{
NomsKind: destKind,
getFn: func(r row.Row) types.Value {
val := getter.Get(r)
return converterFn(val)
},
initFn: func(resolver TagResolver) error {
return getter.Init(resolver)
},
}, nil
}
// Returns a new RowValGetter for the literal value given.
func LiteralValueGetter(value types.Value) *RowValGetter {
return &RowValGetter{
NomsKind: value.Kind(),
getFn: func(r row.Row) types.Value {
return value
},
}
}
// Returns a RowValGetter for the column given, or an error
func getterForColumn(qc QualifiedColumn, inputSchemas map[string]schema.Schema) (*RowValGetter, error) {
tableSch, ok := inputSchemas[qc.TableName]
if !ok {
return nil, errFmt("Unresolved table %v", qc.TableName)
}
column, ok := tableSch.GetAllCols().GetByName(qc.ColumnName)
if !ok {
return nil, errFmt(UnknownColumnErrFmt, qc.ColumnName)
}
getter := RowValGetterForKind(column.Kind)
var tag uint64
getter.initFn = func(resolver TagResolver) error {
var err error
tag, err = resolver.ResolveTag(qc.TableName, qc.ColumnName)
return err
}
getter.getFn = func(r row.Row) types.Value {
value, _ := r.GetColVal(tag)
return value
}
return getter, nil
}
// nullSafeBoolOp applies null checking semantics to a binary expression, so that if either of the two operands are
// null, the expression is null. Callers supply left and right RowValGetters and a predicate function for the extracted
// non-null row values.
func nullSafeBoolOp(left, right *RowValGetter, fn binaryNomsPredicate) func(r row.Row) types.Value {
return func(r row.Row) types.Value {
leftVal := left.Get(r)
rightVal := right.Get(r)
if types.IsNull(leftVal) || types.IsNull(rightVal) {
return nil
}
return types.Bool(fn(r.Format(), leftVal, rightVal))
}
}
// Returns RowValGetter for the expression given, or an error
func getterFor(expr sqlparser.Expr, inputSchemas map[string]schema.Schema, aliases *Aliases) (*RowValGetter, error) {
switch e := expr.(type) {
case *sqlparser.NullVal:
getter := RowValGetterForKind(types.NullKind)
getter.getFn = func(r row.Row) types.Value { return nil }
return getter, nil
case *sqlparser.ColName:
colNameStr := getColumnNameString(e)
if getter, err := resolveColumnAlias(colNameStr, aliases); err != nil {
return nil, err
} else if getter != nil {
return getter, nil
}
qc, err := resolveColumn(colNameStr, inputSchemas, aliases)
if err != nil {
return nil, err
}
return getterForColumn(qc, inputSchemas)
case *sqlparser.SQLVal:
val, err := divineNomsValueFromSQLVal(e)
if err != nil {
return nil, err
}
return LiteralValueGetter(val), nil
case sqlparser.BoolVal:
val := types.Bool(bool(e))
return LiteralValueGetter(val), nil
case sqlparser.ValTuple:
vals := make([]types.Value, len(e))
var kind types.NomsKind
for i, item := range e {
g, err := getterFor(item, inputSchemas, aliases)
if err != nil {
return nil, err
}
if err := g.Init(fakeResolver{}); err != nil {
return nil, err
}
val := g.Get(nil)
if i > 0 && kind != val.Kind() {
return nil, errFmt("Type mismatch: mixed types in list literal '%v'", nodeToString(e))
}
vals[i] = val
kind = val.Kind()
}
// TODO: surely there is a better way to do this without resorting to interface{}
ts := &chunks.TestStorage{}
vs := types.NewValueStore(ts.NewView())
set, err := types.NewSet(context.Background(), vs, vals...)
if err != nil {
return nil, err
}
// TODO: better type checking (set type is not literally the underlying type)
getter := LiteralValueGetter(set)
getter.NomsKind = kind
return getter, nil
case *sqlparser.ComparisonExpr:
leftGetter, err := getterFor(e.Left, inputSchemas, aliases)
if err != nil {
return nil, err
}
rightGetter, err := getterFor(e.Right, inputSchemas, aliases)
if err != nil {
return nil, err
}
// TODO: better type checking. This always converts the right type to the left. Probably not appropriate in all
// cases.
if leftGetter.NomsKind != rightGetter.NomsKind {
if rightGetter, err = ConversionValueGetter(rightGetter, leftGetter.NomsKind); err != nil {
return nil, err
}
}
getter := RowValGetterForKind(types.BoolKind)
var predicate binaryNomsPredicate
switch e.Operator {
case sqlparser.EqualStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
return left.Equals(right)
}
case sqlparser.LessThanStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
isLess, err := left.Less(nbf, right)
if err != nil {
panic(err)
}
return isLess
}
case sqlparser.GreaterThanStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
isLess, err := right.Less(nbf, left)
if err != nil {
panic(err)
}
return isLess
}
case sqlparser.LessEqualStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
isLess, err := right.Less(nbf, left)
if err != nil {
panic(err)
}
return !isLess
}
case sqlparser.GreaterEqualStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
isLess, err := left.Less(nbf, right)
if err != nil {
panic(err)
}
return !isLess
}
case sqlparser.NotEqualStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
return !left.Equals(right)
}
case sqlparser.InStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
set := right.(types.Set)
has, err := set.Has(context.Background(), left)
if err != nil {
panic(err)
}
return has
}
case sqlparser.NotInStr:
predicate = func(nbf *types.NomsBinFormat, left, right types.Value) bool {
set := right.(types.Set)
has, err := set.Has(context.Background(), left)
if err != nil {
panic(err)
}
return !has
}
case sqlparser.NullSafeEqualStr:
return nil, errFmt("null safe equal operation not supported")
case sqlparser.LikeStr:
return nil, errFmt("like keyword not supported")
case sqlparser.NotLikeStr:
return nil, errFmt("like keyword not supported")
case sqlparser.RegexpStr:
return nil, errFmt("regular expressions not supported")
case sqlparser.NotRegexpStr:
return nil, errFmt("regular expressions not supported")
case sqlparser.JSONExtractOp:
return nil, errFmt("json not supported")
case sqlparser.JSONUnquoteExtractOp:
return nil, errFmt("json not supported")
}
getter.getFn = nullSafeBoolOp(leftGetter, rightGetter, predicate)
getter.initFn = ComposeInits(leftGetter, rightGetter)
return getter, nil
case *sqlparser.AndExpr:
leftGetter, err := getterFor(e.Left, inputSchemas, aliases)
if err != nil {
return nil, err
}
rightGetter, err := getterFor(e.Right, inputSchemas, aliases)
if err != nil {
return nil, err
}
getter := RowValGetterForKind(types.BoolKind)
getter.getFn = nullSafeBoolOp(leftGetter, rightGetter, func(nbf *types.NomsBinFormat, left, right types.Value) bool {
return bool(left.(types.Bool) && right.(types.Bool))
})
getter.initFn = ComposeInits(leftGetter, rightGetter)
return getter, nil
case *sqlparser.OrExpr:
leftGetter, err := getterFor(e.Left, inputSchemas, aliases)
if err != nil {
return nil, err
}
rightGetter, err := getterFor(e.Right, inputSchemas, aliases)
if err != nil {
return nil, err
}
getter := RowValGetterForKind(types.BoolKind)
getter.getFn = nullSafeBoolOp(leftGetter, rightGetter, func(nbf *types.NomsBinFormat, left, right types.Value) bool {
return bool(left.(types.Bool) || right.(types.Bool))
})
getter.initFn = ComposeInits(leftGetter, rightGetter)
return getter, nil
case *sqlparser.IsExpr:
exprGetter, err := getterFor(e.Expr, inputSchemas, aliases)
if err != nil {
return nil, err
}
getter := RowValGetterForKind(types.BoolKind)
getter.initFn = ComposeInits(exprGetter)
op := e.Operator
switch op {
case sqlparser.IsNullStr, sqlparser.IsNotNullStr:
getter.getFn = func(r row.Row) types.Value {
val := exprGetter.Get(r)
if (types.IsNull(val) && op == sqlparser.IsNullStr) || (!types.IsNull(val) && op == sqlparser.IsNotNullStr) {
return types.Bool(true)
}
return types.Bool(false)
}
case sqlparser.IsTrueStr, sqlparser.IsNotTrueStr, sqlparser.IsFalseStr, sqlparser.IsNotFalseStr:
if exprGetter.NomsKind != types.BoolKind {
return nil, errFmt("Type mismatch: cannot use expression %v as boolean", nodeToString(expr))
}
getter.getFn = func(r row.Row) types.Value {
val := exprGetter.Get(r)
if types.IsNull(val) {
return types.Bool(false)
}
// TODO: this may not be the correct nullness semantics for "is not" comparisons
if val.Equals(types.Bool(true)) {
return types.Bool(op == sqlparser.IsTrueStr || op == sqlparser.IsNotFalseStr)
} else {
return types.Bool(op == sqlparser.IsFalseStr || op == sqlparser.IsNotTrueStr)
}
}
default:
return nil, errFmt("Unrecognized is comparison: %v", e.Operator)
}
return getter, nil
case *sqlparser.BinaryExpr:
return getterForBinaryExpr(e, inputSchemas, aliases)
case *sqlparser.UnaryExpr:
return getterForUnaryExpr(e, inputSchemas, aliases)
default:
return nil, errFmt("Unsupported expression: '%v'", nodeToString(e))
}
}
// getterForUnaryExpr returns a getter for the given unary expression, where calls to Get() evaluates the full
// expression for the row given
func getterForUnaryExpr(e *sqlparser.UnaryExpr, inputSchemas map[string]schema.Schema, aliases *Aliases) (*RowValGetter, error) {
getter, err := getterFor(e.Expr, inputSchemas, aliases)
if err != nil {
return nil, err
}
var opFn unaryNomsOperation
switch e.Operator {
case sqlparser.UPlusStr:
switch getter.NomsKind {
case types.IntKind, types.FloatKind:
// fine, nothing to do
default:
return nil, errFmt("Unsupported type for unary + operation: %v", DoltToSQLType[getter.NomsKind])
}
opFn = func(val types.Value) types.Value {
return val
}
case sqlparser.UMinusStr:
switch getter.NomsKind {
case types.IntKind:
opFn = func(val types.Value) types.Value {
if types.IsNull(val) {
return nil
}
return types.Int(-1 * val.(types.Int))
}
case types.FloatKind:
opFn = func(val types.Value) types.Value {
if types.IsNull(val) {
return nil
}
return types.Float(-1 * val.(types.Float))
}
case types.UintKind:
// TODO: this alters the type of the expression returned relative to the column's.
// This probably causes some problems.
opFn = func(val types.Value) types.Value {
if types.IsNull(val) {
return nil
}
return types.Int(-1 * int64(val.(types.Uint)))
}
default:
return nil, errFmt("Unsupported type for unary - operation: %v", DoltToSQLType[getter.NomsKind])
}
case sqlparser.BangStr:
switch getter.NomsKind {
case types.BoolKind:
opFn = func(val types.Value) types.Value {
return types.Bool(!val.(types.Bool))
}
default:
return nil, errFmt("Unsupported type for unary ! operation: %v", DoltToSQLType[getter.NomsKind])
}
default:
return nil, errFmt("Unsupported unary operation: %v", e.Operator)
}
unaryGetter := RowValGetterForKind(getter.NomsKind)
unaryGetter.getFn = func(r row.Row) types.Value {
return opFn(getter.Get(r))
}
unaryGetter.initFn = func(resolver TagResolver) error {
return getter.Init(resolver)
}
return unaryGetter, nil
}
// getterForBinaryExpr returns a getter for the given binary expression, where calls to Get() evaluates the full
// expression for the row given
func getterForBinaryExpr(e *sqlparser.BinaryExpr, inputSchemas map[string]schema.Schema, aliases *Aliases) (*RowValGetter, error) {
leftGetter, err := getterFor(e.Left, inputSchemas, aliases)
if err != nil {
return nil, err
}
rightGetter, err := getterFor(e.Right, inputSchemas, aliases)
if err != nil {
return nil, err
}
// TODO: support type conversion
if rightGetter.NomsKind != leftGetter.NomsKind {
return nil, errFmt("Type mismatch evaluating expression '%v': cannot compare %v, %v",
nodeToString(e), DoltToSQLType[leftGetter.NomsKind], DoltToSQLType[rightGetter.NomsKind])
}
// All the operations differ only in their filter logic
var opFn binaryNomsOperation
switch e.Operator {
case sqlparser.PlusStr:
switch leftGetter.NomsKind {
case types.UintKind:
opFn = func(left, right types.Value) types.Value {
return types.Uint(uint64(left.(types.Int)) + uint64(right.(types.Int)))
}
case types.IntKind:
opFn = func(left, right types.Value) types.Value {
return types.Int(int64(left.(types.Int)) + int64(right.(types.Int)))
}
case types.FloatKind:
opFn = func(left, right types.Value) types.Value {
return types.Float(float64(left.(types.Float)) + float64(right.(types.Float)))
}
default:
return nil, errFmt("Unsupported type for + operation: %v", DoltToSQLType[leftGetter.NomsKind])
}
case sqlparser.MinusStr:
switch leftGetter.NomsKind {
case types.UintKind:
opFn = func(left, right types.Value) types.Value {
return types.Uint(uint64(left.(types.Int)) - uint64(right.(types.Int)))
}
case types.IntKind:
opFn = func(left, right types.Value) types.Value {
return types.Int(int64(left.(types.Int)) - int64(right.(types.Int)))
}
case types.FloatKind:
opFn = func(left, right types.Value) types.Value {
return types.Float(float64(left.(types.Float)) - float64(right.(types.Float)))
}
default:
return nil, errFmt("Unsupported type for - operation: %v", DoltToSQLType[leftGetter.NomsKind])
}
case sqlparser.MultStr:
switch leftGetter.NomsKind {
case types.UintKind:
opFn = func(left, right types.Value) types.Value {
return types.Uint(uint64(left.(types.Int)) * uint64(right.(types.Int)))
}
case types.IntKind:
opFn = func(left, right types.Value) types.Value {
return types.Int(int64(left.(types.Int)) * int64(right.(types.Int)))
}
case types.FloatKind:
opFn = func(left, right types.Value) types.Value {
return types.Float(float64(left.(types.Float)) * float64(right.(types.Float)))
}
default:
return nil, errFmt("Unsupported type for * operation: %v", DoltToSQLType[leftGetter.NomsKind])
}
case sqlparser.DivStr:
switch leftGetter.NomsKind {
case types.UintKind:
opFn = func(left, right types.Value) types.Value {
return types.Uint(uint64(left.(types.Int)) / uint64(right.(types.Int)))
}
case types.IntKind:
opFn = func(left, right types.Value) types.Value {
return types.Int(int64(left.(types.Int)) / int64(right.(types.Int)))
}
case types.FloatKind:
opFn = func(left, right types.Value) types.Value {
return types.Float(float64(left.(types.Float)) / float64(right.(types.Float)))
}
default:
return nil, errFmt("Unsupported type for / operation: %v", DoltToSQLType[leftGetter.NomsKind])
}
case sqlparser.ModStr:
switch leftGetter.NomsKind {
case types.UintKind:
opFn = func(left, right types.Value) types.Value {
return types.Uint(uint64(left.(types.Int)) % uint64(right.(types.Int)))
}
case types.IntKind:
opFn = func(left, right types.Value) types.Value {
return types.Int(int64(left.(types.Int)) % int64(right.(types.Int)))
}
default:
return nil, errFmt("Unsupported type for %% operation: %v", DoltToSQLType[leftGetter.NomsKind])
}
default:
return nil, errFmt("Unsupported binary operation: %v", e.Operator)
}
getter := RowValGetterForKind(leftGetter.NomsKind)
getter.getFn = func(r row.Row) types.Value {
leftVal := leftGetter.Get(r)
rightVal := rightGetter.Get(r)
if types.IsNull(leftVal) || types.IsNull(rightVal) {
return nil
}
return opFn(leftVal, rightVal)
}
getter.initFn = func(resolver TagResolver) error {
if err := leftGetter.Init(resolver); err != nil {
return err
}
if err := rightGetter.Init(resolver); err != nil {
return err
}
return nil
}
return getter, nil
}
// Attempts to divine a value and type from the given SQLVal expression. Returns the value or an error.
// The most specific possible type is returned, e.g. Float over Int. Unsigned values are never returned.
func divineNomsValueFromSQLVal(val *sqlparser.SQLVal) (types.Value, error) {
switch val.Type {
// Integer-like values
case sqlparser.HexVal, sqlparser.HexNum, sqlparser.IntVal, sqlparser.BitVal:
intVal, err := strconv.ParseInt(string(val.Val), 0, 64)
if err != nil {
return nil, err
}
return types.Int(intVal), nil
// Float values
case sqlparser.FloatVal:
floatVal, err := strconv.ParseFloat(string(val.Val), 64)
if err != nil {
return nil, err
}
return types.Float(floatVal), nil
// Strings, which can be coerced into UUIDs
case sqlparser.StrVal:
strVal := string(val.Val)
if id, err := uuid.Parse(strVal); err == nil {
return types.UUID(id), nil
} else {
return types.String(strVal), nil
}
case sqlparser.ValArg:
return nil, errFmt("Value args not supported")
default:
return nil, errFmt("Unrecognized SQLVal type %v", val.Type)
}
}
// extractNomsValueFromSQLVal extracts a noms value from the given SQLVal, using type info in the dolt column given as
// a hint and for type-checking
func extractNomsValueFromSQLVal(val *sqlparser.SQLVal, kind types.NomsKind) (types.Value, error) {
switch val.Type {
// Integer-like values
case sqlparser.HexVal, sqlparser.HexNum, sqlparser.IntVal, sqlparser.BitVal:
intVal, err := strconv.ParseInt(string(val.Val), 0, 64)
if err != nil {
return nil, err
}
switch kind {
case types.IntKind:
return types.Int(intVal), nil
case types.FloatKind:
return types.Float(intVal), nil
case types.UintKind:
return types.Uint(intVal), nil
default:
return nil, errFmt("Type mismatch: numeric value but non-numeric column: %v", nodeToString(val))
}
// Float values
case sqlparser.FloatVal:
floatVal, err := strconv.ParseFloat(string(val.Val), 64)
if err != nil {
return nil, err
}
switch kind {
case types.FloatKind:
return types.Float(floatVal), nil
default:
return nil, errFmt("Type mismatch: float value but non-float column: %v", nodeToString(val))
}
// Strings, which can be coerced into UUIDs
case sqlparser.StrVal:
strVal := string(val.Val)
switch kind {
case types.StringKind:
return types.String(strVal), nil
case types.UUIDKind:
id, err := uuid.Parse(strVal)
if err != nil {
return nil, errFmt("Type mismatch: string value but non-string column: %v", nodeToString(val))
}
return types.UUID(id), nil
default:
return nil, errFmt("Type mismatch: string value but non-string column: %v", nodeToString(val))
}
case sqlparser.ValArg:
return nil, errFmt("Value args not supported")
default:
return nil, errFmt("Unrecognized SQLVal type %v", val.Type)
}
}
// extractNomsValueFromUnaryExpr extracts a noms value from the given expression, using the type info given as
// a hint and for type-checking. The underlying expression must be a SQLVal
func extractNomsValueFromUnaryExpr(expr *sqlparser.UnaryExpr, kind types.NomsKind) (types.Value, error) {
sqlVal, ok := expr.Expr.(*sqlparser.SQLVal)
if !ok {
return nil, errFmt("Only SQL values are supported in unary expressions: %v", nodeToString(expr))
}
val, err := extractNomsValueFromSQLVal(sqlVal, kind)
if err != nil {
return nil, err
}
switch expr.Operator {
case sqlparser.UPlusStr:
switch kind {
case types.UintKind, types.IntKind, types.FloatKind:
return val, nil
default:
return nil, errFmt("Unsupported type for unary + operator: %v", nodeToString(expr))
}
case sqlparser.UMinusStr:
switch kind {
case types.UintKind:
return nil, errFmt("Cannot use unary - with for an unsigned value: %v", nodeToString(expr))
case types.IntKind:
return types.Int(-1 * val.(types.Int)), nil
case types.FloatKind:
return types.Float(-1 * val.(types.Float)), nil
default:
return nil, errFmt("Unsupported type for unary - operator: %v", nodeToString(expr))
}
case sqlparser.BangStr:
switch kind {
case types.BoolKind:
return types.Bool(!val.(types.Bool)), nil
default:
return nil, errFmt("Unsupported type for unary ! operator: '%v'", nodeToString(expr))
}
default:
return nil, errFmt("Unsupported unary operator %v in expression: '%v'", expr.Operator, nodeToString(expr))
}
} | go/libraries/doltcore/sql/values.go | 0.652684 | 0.481759 | values.go | starcoder |
package prjn
import (
"github.com/emer/emergent/evec"
"github.com/emer/etable/etensor"
"github.com/goki/ki/ints"
"github.com/goki/mat32"
)
// Rect implements a rectangular pattern of connectivity between two layers
// where the lower-left corner moves in proportion to receiver position with offset
// and multiplier factors (with wrap-around optionally).
// 4D layers are automatically flattened to 2D for this projection.
type Rect struct {
Size evec.Vec2i `desc:"size of rectangle in sending layer that each receiving unit receives from"`
Start evec.Vec2i `desc:"starting offset in sending layer, for computing the corresponding sending lower-left corner relative to given recv unit position"`
Scale mat32.Vec2 `desc:"scaling to apply to receiving unit position to compute corresponding position in sending layer of the lower-left corner of rectangle"`
AutoScale bool `desc:"auto-set the Scale as function of the relative sizes of send and recv layers (e.g., if sending layer is 2x larger than receiving, Scale = 2)"`
RoundScale bool `desc:"if true, use Round when applying scaling factor -- otherwise uses Floor which makes Scale work like a grouping factor -- e.g., .25 will effectively group 4 recv units with same send position"`
Wrap bool `desc:"if true, connectivity wraps around all edges if it would otherwise go off the edge -- if false, then edges are clipped"`
SelfCon bool `desc:"if true, and connecting layer to itself (self projection), then make a self-connection from unit to itself"`
RecvStart evec.Vec2i `desc:"starting position in receiving layer -- if > 0 then units below this starting point remain unconnected"`
RecvN evec.Vec2i `desc:"number of units in receiving layer to connect -- if 0 then all (remaining after RecvStart) are connected -- otherwise if < remaining then those beyond this point remain unconnected"`
}
func NewRect() *Rect {
cr := &Rect{}
cr.Defaults()
return cr
}
func (cr *Rect) Defaults() {
cr.Wrap = true
cr.Size.Set(2, 2)
cr.Scale.SetScalar(1)
}
func (cr *Rect) Name() string {
return "Rect"
}
func (cr *Rect) Connect(send, recv *etensor.Shape, same bool) (sendn, recvn *etensor.Int32, cons *etensor.Bits) {
sendn, recvn, cons = NewTensors(send, recv)
sNy, sNx, _, _ := etensor.Prjn2DShape(send, false)
rNy, rNx, _, _ := etensor.Prjn2DShape(recv, false)
rnv := recvn.Values
snv := sendn.Values
sNtot := send.Len()
sc := cr.Scale
if cr.AutoScale {
ssz := mat32.Vec2{float32(sNx), float32(sNy)}
rsz := mat32.Vec2{float32(rNx), float32(rNy)}
sc = ssz.Div(rsz)
}
rNyEff := rNy
if cr.RecvN.Y > 0 {
rNyEff = ints.MinInt(rNy, cr.RecvStart.Y+cr.RecvN.Y)
}
rNxEff := rNx
if cr.RecvN.X > 0 {
rNxEff = ints.MinInt(rNx, cr.RecvStart.X+cr.RecvN.X)
}
for ry := cr.RecvStart.Y; ry < rNyEff; ry++ {
for rx := cr.RecvStart.X; rx < rNxEff; rx++ {
ri := etensor.Prjn2DIdx(recv, false, ry, rx)
sst := cr.Start
if cr.RoundScale {
sst.X += int(mat32.Round(float32(rx) * sc.X))
sst.Y += int(mat32.Round(float32(ry) * sc.Y))
} else {
sst.X += int(mat32.Floor(float32(rx) * sc.X))
sst.Y += int(mat32.Floor(float32(ry) * sc.Y))
}
for y := 0; y < cr.Size.Y; y++ {
sy, clipy := Edge(sst.Y+y, sNy, cr.Wrap)
if clipy {
continue
}
for x := 0; x < cr.Size.X; x++ {
sx, clipx := Edge(sst.X+x, sNx, cr.Wrap)
if clipx {
continue
}
si := etensor.Prjn2DIdx(send, false, sy, sx)
off := ri*sNtot + si
if !cr.SelfCon && same && ri == si {
continue
}
cons.Values.Set(off, true)
rnv[ri]++
snv[si]++
}
}
}
}
return
} | prjn/rect.go | 0.698432 | 0.53127 | rect.go | starcoder |
package labels
import (
"bufio"
"bytes"
"os"
"sort"
"strconv"
"strings"
"github.com/cespare/xxhash"
"github.com/pkg/errors"
)
const sep = '\xff'
// Label is a key/value pair of strings.
type Label struct {
Name, Value string
}
// Labels is a sorted set of labels. Order has to be guaranteed upon
// instantiation.
type Labels []Label
func (ls Labels) Len() int { return len(ls) }
func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
func (ls Labels) String() string {
var b bytes.Buffer
b.WriteByte('{')
for i, l := range ls {
if i > 0 {
b.WriteByte(',')
}
b.WriteString(l.Name)
b.WriteByte('=')
b.WriteString(strconv.Quote(l.Value))
}
b.WriteByte('}')
return b.String()
}
// Hash returns a hash value for the label set.
func (ls Labels) Hash() uint64 {
b := make([]byte, 0, 1024)
for _, v := range ls {
b = append(b, v.Name...)
b = append(b, sep)
b = append(b, v.Value...)
b = append(b, sep)
}
return xxhash.Sum64(b)
}
// Get returns the value for the label with the given name.
// Returns an empty string if the label doesn't exist.
func (ls Labels) Get(name string) string {
for _, l := range ls {
if l.Name == name {
return l.Value
}
}
return ""
}
// Equals returns whether the two label sets are equal.
func (ls Labels) Equals(o Labels) bool {
if len(ls) != len(o) {
return false
}
for i, l := range ls {
if o[i] != l {
return false
}
}
return true
}
// Map returns a string map of the labels.
func (ls Labels) Map() map[string]string {
m := make(map[string]string, len(ls))
for _, l := range ls {
m[l.Name] = l.Value
}
return m
}
// WithoutEmpty returns the labelset without empty labels.
// May return the same labelset.
func (ls Labels) WithoutEmpty() Labels {
for _, v := range ls {
if v.Value == "" {
els := make(Labels, 0, len(ls)-1)
for _, v := range ls {
if v.Value != "" {
els = append(els, v)
}
}
return els
}
}
return ls
}
// New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls))
for _, l := range ls {
set = append(set, l)
}
sort.Sort(set)
return set
}
// FromMap returns new sorted Labels from the given map.
func FromMap(m map[string]string) Labels {
l := make(Labels, 0, len(m))
for k, v := range m {
if v != "" {
l = append(l, Label{Name: k, Value: v})
}
}
sort.Sort(l)
return l
}
// FromStrings creates new labels from pairs of strings.
func FromStrings(ss ...string) Labels {
if len(ss)%2 != 0 {
panic("invalid number of strings")
}
var res Labels
for i := 0; i < len(ss); i += 2 {
if ss[i+1] != "" {
res = append(res, Label{Name: ss[i], Value: ss[i+1]})
}
}
sort.Sort(res)
return res
}
// Compare compares the two label sets.
// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
func Compare(a, b Labels) int {
l := len(a)
if len(b) < l {
l = len(b)
}
for i := 0; i < l; i++ {
if d := strings.Compare(a[i].Name, b[i].Name); d != 0 {
return d
}
if d := strings.Compare(a[i].Value, b[i].Value); d != 0 {
return d
}
}
// If all labels so far were in common, the set with fewer labels comes first.
return len(a) - len(b)
}
// Slice is a sortable slice of label sets.
type Slice []Labels
func (s Slice) Len() int { return len(s) }
func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful
// to load testing data.
func ReadLabels(fn string, n int) ([]Labels, error) {
f, err := os.Open(fn)
if err != nil {
return nil, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
var mets []Labels
hashes := map[uint64]struct{}{}
i := 0
for scanner.Scan() && i < n {
m := make(Labels, 0, 10)
r := strings.NewReplacer("\"", "", "{", "", "}", "")
s := r.Replace(scanner.Text())
labelChunks := strings.Split(s, ",")
for _, labelChunk := range labelChunks {
split := strings.Split(labelChunk, ":")
m = append(m, Label{Name: split[0], Value: split[1]})
}
// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
sort.Sort(m)
h := m.Hash()
if _, ok := hashes[h]; ok {
continue
}
mets = append(mets, m)
hashes[h] = struct{}{}
i++
}
if i != n {
return mets, errors.Errorf("requested %d metrics but found %d", n, i)
}
return mets, nil
} | vendor/github.com/dnxware/tsdb/labels/labels.go | 0.66628 | 0.404331 | labels.go | starcoder |
package value
import (
"encoding/base64"
"fmt"
"reflect"
)
// NewValueReflect creates a Value backed by an "interface{}" type,
// typically an structured object in Kubernetes world that is uses reflection to expose.
// The provided "interface{}" value must be a pointer so that the value can be modified via reflection.
// The provided "interface{}" may contain structs and types that are converted to Values
// by the jsonMarshaler interface.
func NewValueReflect(value interface{}) (Value, error) {
if value == nil {
return NewValueInterface(nil), nil
}
v := reflect.ValueOf(value)
if v.Kind() != reflect.Ptr {
// The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible.
return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer")
}
return wrapValueReflect(v, nil, nil)
}
// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap
// and parentMapKey must be provided so that the returned value may be set and deleted.
func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) {
val := HeapAllocator.allocValueReflect()
return val.reuse(value, nil, parentMap, parentMapKey)
}
// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data
// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value {
v, err := wrapValueReflect(value, parentMap, parentMapKey)
if err != nil {
panic(err)
}
return v
}
// the value interface doesn't care about the type for value.IsNull, so we can use a constant
var nilType = reflect.TypeOf(&struct{}{})
// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey
// must be provided so that the returned value may be set and deleted.
func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) {
if cacheEntry == nil {
cacheEntry = TypeReflectEntryOf(value.Type())
}
if cacheEntry.CanConvertToUnstructured() {
u, err := cacheEntry.ToUnstructured(value)
if err != nil {
return nil, err
}
if u == nil {
value = reflect.Zero(nilType)
} else {
value = reflect.ValueOf(u)
}
}
r.Value = dereference(value)
r.ParentMap = parentMap
r.ParentMapKey = parentMapKey
r.kind = kind(r.Value)
return r, nil
}
// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a
// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value {
v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey)
if err != nil {
panic(err)
}
return v
}
func dereference(val reflect.Value) reflect.Value {
kind := val.Kind()
if (kind == reflect.Interface || kind == reflect.Ptr) && !safeIsNil(val) {
return val.Elem()
}
return val
}
type valueReflect struct {
ParentMap *reflect.Value
ParentMapKey *reflect.Value
Value reflect.Value
kind reflectType
}
func (r valueReflect) IsMap() bool {
return r.kind == mapType || r.kind == structMapType
}
func (r valueReflect) IsList() bool {
return r.kind == listType
}
func (r valueReflect) IsBool() bool {
return r.kind == boolType
}
func (r valueReflect) IsInt() bool {
return r.kind == intType || r.kind == uintType
}
func (r valueReflect) IsFloat() bool {
return r.kind == floatType
}
func (r valueReflect) IsString() bool {
return r.kind == stringType || r.kind == byteStringType
}
func (r valueReflect) IsNull() bool {
return r.kind == nullType
}
type reflectType = int
const (
mapType = iota
structMapType
listType
intType
uintType
floatType
stringType
byteStringType
boolType
nullType
)
func kind(v reflect.Value) reflectType {
typ := v.Type()
rk := typ.Kind()
switch rk {
case reflect.Map:
if v.IsNil() {
return nullType
}
return mapType
case reflect.Struct:
return structMapType
case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:
return intType
case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8:
// Uint64 deliberately excluded, see valueUnstructured.Int.
return uintType
case reflect.Float64, reflect.Float32:
return floatType
case reflect.String:
return stringType
case reflect.Bool:
return boolType
case reflect.Slice:
if v.IsNil() {
return nullType
}
elemKind := typ.Elem().Kind()
if elemKind == reflect.Uint8 {
return byteStringType
}
return listType
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface:
if v.IsNil() {
return nullType
}
panic(fmt.Sprintf("unsupported type: %v", v.Type()))
default:
panic(fmt.Sprintf("unsupported type: %v", v.Type()))
}
}
// TODO find a cleaner way to avoid panics from reflect.IsNil()
func safeIsNil(v reflect.Value) bool {
k := v.Kind()
switch k {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
return v.IsNil()
}
return false
}
func (r valueReflect) AsMap() Map {
return r.AsMapUsing(HeapAllocator)
}
func (r valueReflect) AsMapUsing(a Allocator) Map {
switch r.kind {
case structMapType:
v := a.allocStructReflect()
v.valueReflect = r
return v
case mapType:
v := a.allocMapReflect()
v.valueReflect = r
return v
default:
panic("value is not a map or struct")
}
}
func (r valueReflect) AsList() List {
return r.AsListUsing(HeapAllocator)
}
func (r valueReflect) AsListUsing(a Allocator) List {
if r.IsList() {
v := a.allocListReflect()
v.Value = r.Value
return v
}
panic("value is not a list")
}
func (r valueReflect) AsBool() bool {
if r.IsBool() {
return r.Value.Bool()
}
panic("value is not a bool")
}
func (r valueReflect) AsInt() int64 {
if r.kind == intType {
return r.Value.Int()
}
if r.kind == uintType {
return int64(r.Value.Uint())
}
panic("value is not an int")
}
func (r valueReflect) AsFloat() float64 {
if r.IsFloat() {
return r.Value.Float()
}
panic("value is not a float")
}
func (r valueReflect) AsString() string {
switch r.kind {
case stringType:
return r.Value.String()
case byteStringType:
return base64.StdEncoding.EncodeToString(r.Value.Bytes())
}
panic("value is not a string")
}
func (r valueReflect) Unstructured() interface{} {
val := r.Value
switch {
case r.IsNull():
return nil
case val.Kind() == reflect.Struct:
return structReflect{r}.Unstructured()
case val.Kind() == reflect.Map:
return mapReflect{valueReflect: r}.Unstructured()
case r.IsList():
return listReflect{r.Value}.Unstructured()
case r.IsString():
return r.AsString()
case r.IsInt():
return r.AsInt()
case r.IsBool():
return r.AsBool()
case r.IsFloat():
return r.AsFloat()
default:
panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type()))
}
} | vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go | 0.673192 | 0.519399 | valuereflect.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_approx_kfn
#include <capi/approx_kfn.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type ApproxKfnOptionalParam struct {
Algorithm string
CalculateError bool
ExactDistances *mat.Dense
InputModel *approxkfnModel
K int
NumProjections int
NumTables int
Query *mat.Dense
Reference *mat.Dense
Verbose bool
}
func ApproxKfnOptions() *ApproxKfnOptionalParam {
return &ApproxKfnOptionalParam{
Algorithm: "ds",
CalculateError: false,
ExactDistances: nil,
InputModel: nil,
K: 0,
NumProjections: 5,
NumTables: 5,
Query: nil,
Reference: nil,
Verbose: false,
}
}
/*
This program implements two strategies for furthest neighbor search. These
strategies are:
- The 'qdafn' algorithm from "Approximate Furthest Neighbor in High
Dimensions" by <NAME>, <NAME>, <NAME>, and <NAME>, in
Similarity Search and Applications 2015 (SISAP).
- The 'DrusillaSelect' algorithm from "Fast approximate furthest neighbors
with data-dependent candidate selection", by <NAME> and <NAME>, in
Similarity Search and Applications 2016 (SISAP).
These two strategies give approximate results for the furthest neighbor search
problem and can be used as fast replacements for other furthest neighbor
techniques such as those found in the mlpack_kfn program. Note that
typically, the 'ds' algorithm requires far fewer tables and projections than
the 'qdafn' algorithm.
Specify a reference set (set to search in) with "Reference", specify a query
set with "Query", and specify algorithm parameters with "NumTables" and
"NumProjections" (or don't and defaults will be used). The algorithm to be
used (either 'ds'---the default---or 'qdafn') may be specified with
"Algorithm". Also specify the number of neighbors to search for with "K".
Note that for 'qdafn' in lower dimensions, "NumProjections" may need to be set
to a high value in order to return results for each query point.
If no query set is specified, the reference set will be used as the query set.
The "OutputModel" output parameter may be used to store the built model, and
an input model may be loaded instead of specifying a reference set with the
"InputModel" option.
Results for each query point can be stored with the "Neighbors" and
"Distances" output parameters. Each row of these output matrices holds the k
distances or neighbor indices for each query point.
For example, to find the 5 approximate furthest neighbors with reference_set
as the reference set and query_set as the query set using DrusillaSelect,
storing the furthest neighbor indices to neighbors and the furthest neighbor
distances to distances, one could call
// Initialize optional parameters for ApproxKfn().
param := mlpack.ApproxKfnOptions()
param.Query = query_set
param.Reference = reference_set
param.K = 5
param.Algorithm = "ds"
distances, neighbors, _ := mlpack.ApproxKfn(param)
and to perform approximate all-furthest-neighbors search with k=1 on the set
data storing only the furthest neighbor distances to distances, one could call
// Initialize optional parameters for ApproxKfn().
param := mlpack.ApproxKfnOptions()
param.Reference = reference_set
param.K = 1
distances, _, _ := mlpack.ApproxKfn(param)
A trained model can be re-used. If a model has been previously saved to
model, then we may find 3 approximate furthest neighbors on a query set
new_query_set using that model and store the furthest neighbor indices into
neighbors by calling
// Initialize optional parameters for ApproxKfn().
param := mlpack.ApproxKfnOptions()
param.InputModel = &model
param.Query = new_query_set
param.K = 3
_, neighbors, _ := mlpack.ApproxKfn(param)
Input parameters:
- Algorithm (string): Algorithm to use: 'ds' or 'qdafn'. Default value
'ds'.
- CalculateError (bool): If set, calculate the average distance error
for the first furthest neighbor only.
- ExactDistances (mat.Dense): Matrix containing exact distances to
furthest neighbors; this can be used to avoid explicit calculation when
--calculate_error is set.
- InputModel (approxkfnModel): File containing input model.
- K (int): Number of furthest neighbors to search for. Default value
0.
- NumProjections (int): Number of projections to use in each hash
table. Default value 5.
- NumTables (int): Number of hash tables to use. Default value 5.
- Query (mat.Dense): Matrix containing query points.
- Reference (mat.Dense): Matrix containing the reference dataset.
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
Output parameters:
- distances (mat.Dense): Matrix to save furthest neighbor distances
to.
- neighbors (mat.Dense): Matrix to save neighbor indices to.
- outputModel (approxkfnModel): File to save output model to.
*/
func ApproxKfn(param *ApproxKfnOptionalParam) (*mat.Dense, *mat.Dense, approxkfnModel) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("Approximate furthest neighbor search")
// Detect if the parameter was passed; set if so.
if param.Algorithm != "ds" {
setParamString("algorithm", param.Algorithm)
setPassed("algorithm")
}
// Detect if the parameter was passed; set if so.
if param.CalculateError != false {
setParamBool("calculate_error", param.CalculateError)
setPassed("calculate_error")
}
// Detect if the parameter was passed; set if so.
if param.ExactDistances != nil {
gonumToArmaMat("exact_distances", param.ExactDistances)
setPassed("exact_distances")
}
// Detect if the parameter was passed; set if so.
if param.InputModel != nil {
setApproxKFNModel("input_model", param.InputModel)
setPassed("input_model")
}
// Detect if the parameter was passed; set if so.
if param.K != 0 {
setParamInt("k", param.K)
setPassed("k")
}
// Detect if the parameter was passed; set if so.
if param.NumProjections != 5 {
setParamInt("num_projections", param.NumProjections)
setPassed("num_projections")
}
// Detect if the parameter was passed; set if so.
if param.NumTables != 5 {
setParamInt("num_tables", param.NumTables)
setPassed("num_tables")
}
// Detect if the parameter was passed; set if so.
if param.Query != nil {
gonumToArmaMat("query", param.Query)
setPassed("query")
}
// Detect if the parameter was passed; set if so.
if param.Reference != nil {
gonumToArmaMat("reference", param.Reference)
setPassed("reference")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Mark all output options as passed.
setPassed("distances")
setPassed("neighbors")
setPassed("output_model")
// Call the mlpack program.
C.mlpackApproxKfn()
// Initialize result variable and get output.
var distancesPtr mlpackArma
distances := distancesPtr.armaToGonumMat("distances")
var neighborsPtr mlpackArma
neighbors := neighborsPtr.armaToGonumUmat("neighbors")
var outputModel approxkfnModel
outputModel.getApproxKFNModel("output_model")
// Clear settings.
clearSettings()
// Return output(s).
return distances, neighbors, outputModel
} | approx_kfn.go | 0.668556 | 0.420659 | approx_kfn.go | starcoder |
package model
import (
"math"
"strconv"
native_time "time"
)
// Timestamp is the number of milliseconds since the epoch
// (1970-01-01 00:00 UTC) excluding leap seconds.
type Timestamp int64
const (
// MinimumTick is the minimum supported time resolution. This has to be
// at least native_time.Second in order for the code below to work.
MinimumTick = native_time.Millisecond
// second is the timestamp duration equivalent to one second.
second = int64(native_time.Second / MinimumTick)
// The number of nanoseconds per minimum tick.
nanosPerTick = int64(MinimumTick / native_time.Nanosecond)
// Earliest is the earliest timestamp representable. Handy for
// initializing a high watermark.
Earliest = Timestamp(math.MinInt64)
// Latest is the latest timestamp representable. Handy for initializing
// a low watermark.
Latest = Timestamp(math.MaxInt64)
)
// Equal reports whether two timestamps represent the same instant.
func (t Timestamp) Equal(o Timestamp) bool {
return t == o
}
// Before reports whether the timestamp t is before o.
func (t Timestamp) Before(o Timestamp) bool {
return t < o
}
// After reports whether the timestamp t is after o.
func (t Timestamp) After(o Timestamp) bool {
return t > o
}
// Add returns the Timestamp t + d.
func (t Timestamp) Add(d native_time.Duration) Timestamp {
return t + Timestamp(d/MinimumTick)
}
// Sub returns the Duration t - o.
func (t Timestamp) Sub(o Timestamp) native_time.Duration {
return native_time.Duration(t-o) * MinimumTick
}
// Time returns the time.Time representation of t.
func (t Timestamp) Time() native_time.Time {
return native_time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
}
// Unix returns t as a Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (t Timestamp) Unix() int64 {
return int64(t) / second
}
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC.
func (t Timestamp) UnixNano() int64 {
return int64(t) * nanosPerTick
}
// String returns a string representation of the timestamp.
func (t Timestamp) String() string {
return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
}
// MarshalJSON implements the json.Marshaler interface.
func (t Timestamp) MarshalJSON() ([]byte, error) {
return []byte(t.String()), nil
}
// Now returns the current time as a Timestamp.
func Now() Timestamp {
return TimestampFromTime(native_time.Now())
}
// TimestampFromTime returns the Timestamp equivalent to the time.Time t.
func TimestampFromTime(t native_time.Time) Timestamp {
return TimestampFromUnixNano(t.UnixNano())
}
// TimestampFromUnix returns the Timestamp equivalent to the Unix timestamp t
// provided in seconds.
func TimestampFromUnix(t int64) Timestamp {
return Timestamp(t * second)
}
// TimestampFromUnixNano returns the Timestamp equivalent to the Unix timestamp
// t provided in nanoseconds.
func TimestampFromUnixNano(t int64) Timestamp {
return Timestamp(t / nanosPerTick)
} | Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go | 0.880579 | 0.441854 | timestamp.go | starcoder |
package flightdb
import (
"fmt"
"time"
"github.com/skypies/adsb"
"github.com/skypies/geo"
)
// Trackpoint is a data point that locates an aircraft in space and time, etc
type Trackpoint struct {
DataSource string // What kind of trackpoint is this; flightaware radar, local ADSB, etc
//DataSystem // embedded string
//DataProvider // embedded string
ReceiverName string // For local ADSB
TimestampUTC time.Time // Always in UTC, to make life SIMPLE
geo.Latlong // Embedded type, so we can call all the geo stuff directly on trackpoints
Altitude float64 // This is pressure altitude (i.e. altitude in a standard atmosphere)
GroundSpeed float64 // In knots
Heading float64 // [0.0, 360.0) degrees. Direction plane is pointing in. Mag or real north?
VerticalRate float64 // In feet per minute (multiples of 64)
Squawk string // Generally, a string of four digits.
//// None of the fields below are stored in the database
// These two are transient fields, populated during analysis, and displayed on the map view
AnalysisAnnotation string `datastore:"-" json:"-"`
AnalysisDisplay AnalysisDisplayEnum `datastore:"-" json:"-"`
// These fields are derived
IndicatedAltitude float64 `datastore:"-" json:"-"` // Corrected for local air pressure
DistanceTravelledKM float64 `datastore:"-" json:"-"` // Totted up from point to point
GroundAccelerationKPS float64 `datastore:"-" json:"-"` // In knots per second
VerticalSpeedFPM float64 `datastore:"-" json:"-"` // Feet per minute (~== VerticalRate)
VerticalAccelerationFPMPS float64 `datastore:"-" json:"-"` // In (feet per minute) per second
AngleOfInclination float64 `datastore:"-" json:"-"` // In degrees. +ve means climbing
// Populated just in first trackpoint, to hold transient notes for the whole track.
Notes string `datastore:"-" json:"-"`
}
type InterpolatedTrackpoint struct {
Trackpoint // Embedded struct; only the interpolatable bits will be populated
Pre, Post *Trackpoint // The points we were interpolated from
Ratio float64 // How far we were inbetween them
Ref geo.Latlong // The point we were in reference to
Line geo.LatlongLine // The line that connects the ref point to the line {pre->post}
Perp geo.LatlongLine
}
type AnalysisDisplayEnum int
const(
AnalysisDisplayDefault AnalysisDisplayEnum = iota
AnalysisDisplayOmit
AnalysisDisplayHighlight // "red-large"
)
// {{{ tp.ShortString
func (tp Trackpoint)ShortString() string {
str := fmt.Sprintf("[%s] %s %.0fft (%.0f), %.0fkts, %.0fdeg", tp.TimestampUTC, tp.Latlong,
tp.Altitude, tp.IndicatedAltitude, tp.GroundSpeed, tp.Heading)
if tp.DistanceTravelledKM > 0.0 {
str += fmt.Sprintf(" [path:%.3fKM]", tp.DistanceTravelledKM)
}
return str
}
// }}}
// {{{ tp.String
func (tp Trackpoint)String() string {
str := fmt.Sprintf("[%s] %s %.0fft, %.0fkts, %.0fdeg", tp.TimestampUTC, tp.Latlong,
tp.Altitude, tp.GroundSpeed, tp.Heading)
if tp.DistanceTravelledKM > 0.0 {
str += fmt.Sprintf("\n* Travelled Dist: %.3f KM\n"+
"* Vertical rates: computed: %.0f feet/min; received: %.0f feet/min\n"+
"* Acceleration: horiz: %.2f knots/sec, vert %.0f feetpermin/sec",
tp.DistanceTravelledKM,
tp.VerticalSpeedFPM, tp.VerticalRate,
tp.GroundAccelerationKPS, tp.VerticalAccelerationFPMPS)
}
return str
}
// }}}
// {{{ tp.ToJSString
func (tp Trackpoint)ToJSString() string {
return fmt.Sprintf("source:%q, receiver:%q, pos:{lat:%.6f,lng:%.6f}, "+
"alt:%.0f, speed:%.0f, track:%.0f, vert:%.0f, t:\"%s\"",
tp.DataSource, tp.ReceiverName, tp.Lat, tp.Long,
tp.Altitude, tp.GroundSpeed, tp.Heading, tp.VerticalRate, tp.TimestampUTC)
}
// }}}
// {{{ tp.LongSource
func (tp Trackpoint)LongSource() string {
switch tp.DataSource {
case "": return "(none specified)"
case "FA:TZ": return "FlightAware, Radar (TZ)"
case "FA:TA": return "FlightAware, ADS-B Mode-ES (TA)"
case "ADSB": return "Private receiver, ADS-B Mode-ES ("+tp.ReceiverName+")"
case "MLAT": return "MLAT ("+tp.ReceiverName+")"
}
return tp.DataSource
}
// }}}
// {{{ TrackpointFromADSB
func TrackpointFromADSB(m *adsb.CompositeMsg) Trackpoint {
tp := Trackpoint{
//DataSystem: "ADSB",
DataSource: "ADSB",
ReceiverName: m.ReceiverName,
TimestampUTC: m.GeneratedTimestampUTC,
Latlong: m.Position,
Altitude: float64(m.Altitude),
GroundSpeed: float64(m.GroundSpeed),
Heading: float64(m.Track),
VerticalRate: float64(m.VerticalRate),
Squawk: m.Squawk,
}
// Need to really clean all this up
if m.IsMLAT() {
tp.DataSource = "MLAT"
}
return tp
}
// }}}
// {{{ TrackpointFromAverage
func TrackpointFromAverage(in []Trackpoint) Trackpoint {
if len(in) == 0 { return Trackpoint{} }
out := in[0] // Initialize, to get all the non-numeric stuff (and timestamp of in[0])
for _,tp := range in {
out.Altitude += tp.Altitude
out.GroundSpeed += tp.GroundSpeed
out.Heading += tp.Heading
out.VerticalRate += tp.VerticalRate
out.IndicatedAltitude += tp.IndicatedAltitude
out.DistanceTravelledKM += tp.DistanceTravelledKM
out.GroundAccelerationKPS += tp.GroundAccelerationKPS
out.VerticalSpeedFPM += tp.VerticalSpeedFPM
out.VerticalAccelerationFPMPS += tp.VerticalAccelerationFPMPS
}
out.Altitude /= float64(len(in))
out.GroundSpeed /= float64(len(in))
out.Heading /= float64(len(in))
out.VerticalRate /= float64(len(in))
out.IndicatedAltitude /= float64(len(in))
out.DistanceTravelledKM /= float64(len(in))
out.GroundAccelerationKPS /= float64(len(in))
out.VerticalSpeedFPM /= float64(len(in))
out.VerticalAccelerationFPMPS /= float64(len(in))
out.Notes += fmt.Sprintf("(avg, from %d points)", len(in))
return out
}
// }}}
// {{{ tp.InterpolateTo
func interpolateFloat64(from, to, ratio float64) float64 {
return from + (to-from)*ratio
}
func interpolateTime(from, to time.Time, ratio float64) time.Time {
d1 := to.Sub(from)
nanosToAdd := ratio * float64(d1.Nanoseconds())
d2 := time.Nanosecond * time.Duration(nanosToAdd)
d3 := time.Second * time.Duration(d2.Seconds()) // Round down to second precision
return from.Add(d3)
}
func (from Trackpoint)InterpolateTo(to Trackpoint, ratio float64) InterpolatedTrackpoint {
itp := InterpolatedTrackpoint{
Pre: &from,
Post: &to,
Ratio: ratio,
Trackpoint: Trackpoint{
GroundSpeed: interpolateFloat64(from.GroundSpeed, to.GroundSpeed, ratio),
VerticalRate: interpolateFloat64(from.VerticalRate, to.VerticalRate, ratio),
Altitude: interpolateFloat64(from.Altitude, to.Altitude, ratio),
Heading: geo.InterpolateHeading(from.Heading, to.Heading, ratio),
Latlong: from.Latlong.InterpolateTo(to.Latlong, ratio),
TimestampUTC: interpolateTime(from.TimestampUTC, to.TimestampUTC, ratio),
// Also interpolate the synthetic fields
DistanceTravelledKM: interpolateFloat64(from.DistanceTravelledKM, to.DistanceTravelledKM, ratio),
GroundAccelerationKPS: interpolateFloat64(from.GroundAccelerationKPS, to.GroundAccelerationKPS, ratio),
VerticalSpeedFPM: interpolateFloat64(from.VerticalSpeedFPM, to.VerticalSpeedFPM, ratio),
VerticalAccelerationFPMPS: interpolateFloat64(from.VerticalAccelerationFPMPS, to.VerticalAccelerationFPMPS, ratio),
},
}
return itp
}
// }}}
// {{{ tp.RepositionByTime
// RepositionByTime returns a trackpoint that has been repositioned, assuming it was
// travelling at constant velocity. The duration passed in determines how far to move in either
// direction.
func (s Trackpoint)RepositionByTime(d time.Duration) Trackpoint {
e := s
hDistMeters := geo.NMph2mps(s.GroundSpeed) * d.Seconds() // 1 knot == 1 NM/hour
e.Latlong = s.Latlong.MoveKM(s.Heading, hDistMeters/1000.0)
e.Altitude += (float64(s.VerticalRate) / 60.0) * d.Seconds() // vert rat in feet per minute
e.TimestampUTC = s.TimestampUTC.Add(d)
return e
}
// }}}
// {{{ -------------------------={ E N D }=----------------------------------
// Local variables:
// folded-file: t
// end:
// }}} | trackpoint.go | 0.628407 | 0.465387 | trackpoint.go | starcoder |
// Package skein256 implements the Skein256 hash function
// based on the Threefish256 tweakable block cipher.
package skein256
import (
"hash"
"github.com/aead/skein"
)
// Sum512 computes the 512 bit Skein256 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum512(out *[64]byte, msg, key []byte) {
var out256 [32]byte
s := new(hashFunc)
s.initialize(64, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(&out256, 0)
copy(out[:], out256[:])
s.output(&out256, 1)
copy(out[32:], out256[:])
}
// Sum384 computes the 384 bit Skein256 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum384(out *[48]byte, msg, key []byte) {
var out256 [32]byte
s := new(hashFunc)
s.initialize(48, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(&out256, 0)
copy(out[:], out256[:])
s.output(&out256, 1)
copy(out[32:], out256[:16])
}
// Sum256 computes the 256 bit Skein256 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum256(out *[32]byte, msg, key []byte) {
s := new(hashFunc)
s.initialize(32, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(out, 0)
}
// Sum160 computes the 160 bit Skein256 checksum (or MAC if key is set) of msg
// and writes it to out. The key is optional and can be nil.
func Sum160(out *[20]byte, msg, key []byte) {
var out256 [32]byte
s := new(hashFunc)
s.initialize(20, &skein.Config{Key: key})
s.Write(msg)
s.finalizeHash()
s.output(&out256, 0)
copy(out[:], out256[:20])
}
// Sum returns the Skein256 checksum with the given hash size of msg using the (optional)
// conf for configuration. The hashsize must be > 0.
func Sum(msg []byte, hashsize int, conf *skein.Config) []byte {
s := New(hashsize, conf)
s.Write(msg)
return s.Sum(nil)
}
// New512 returns a hash.Hash computing the Skein256 512 bit checksum.
// The key is optional and turns the hash into a MAC.
func New512(key []byte) hash.Hash {
s := new(hashFunc)
s.initialize(64, &skein.Config{Key: key})
return s
}
// New256 returns a hash.Hash computing the Skein256 256 bit checksum.
// The key is optional and turns the hash into a MAC.
func New256(key []byte) hash.Hash {
s := new(hashFunc)
s.initialize(32, &skein.Config{Key: key})
return s
}
// New returns a hash.Hash computing the Skein256 checksum with the given hash size.
// The conf is optional and configurates the hash.Hash
func New(hashsize int, conf *skein.Config) hash.Hash {
s := new(hashFunc)
s.initialize(hashsize, conf)
return s
} | vendor/github.com/aead/skein/skein256/skein.go | 0.835416 | 0.488222 | skein.go | starcoder |
package starlark
import (
"fmt"
"reflect"
"github.com/pkg/errors"
"go.starlark.net/starlark"
"go.starlark.net/starlarkstruct"
)
// GoValue represents an inherent Go value which can be
// converted to a Starlark value/type
type GoValue struct {
val interface{}
}
// NewGoValue creates a value with inherent Go type
func NewGoValue(val interface{}) *GoValue {
return &GoValue{val: val}
}
// Value returns the original value as an interface{}
func (v *GoValue) Value() interface{} {
return v.val
}
// ToStringDict converts map v to a starlark.StringDict value where the key is
// expected to be a string and the value to be a string, bool, numeric, or []T.
func (v *GoValue) ToStringDict() (starlark.StringDict, error) {
result := make(starlark.StringDict)
valType := reflect.TypeOf(v.val)
valValue := reflect.ValueOf(v.val)
switch valType.Kind() {
case reflect.Map:
if valType.Key().Kind() != reflect.String {
return nil, fmt.Errorf("ToStringDict failed assertion: %T requires string keys", v.val)
}
iter := valValue.MapRange()
for iter.Next() {
key := iter.Key()
val := iter.Value()
starVal, err := GoToStarlarkValue(val.Interface())
if err != nil {
return nil, fmt.Errorf("ToStringDict failed assertion: %s", err)
}
result[key.String()] = starVal
}
default:
return nil, fmt.Errorf("ToStringDict does not support %T", v.val)
}
return result, nil
}
// ToDict converts map v to a *starlark.Dict value where the key and value can
// be of an arbitrary types of string, bool, numeric, or []T.
func (v *GoValue) ToDict() (*starlark.Dict, error) {
valType := reflect.TypeOf(v.val)
valValue := reflect.ValueOf(v.val)
var dict *starlark.Dict
switch valType.Kind() {
case reflect.Map:
dict = starlark.NewDict(valValue.Len())
iter := valValue.MapRange()
for iter.Next() {
key, err := GoToStarlarkValue(iter.Key().Interface())
if err != nil {
return nil, fmt.Errorf("ToDict failed key conversion: %s", err)
}
val, err := GoToStarlarkValue(iter.Value().Interface())
if err != nil {
return nil, fmt.Errorf("ToDict failed value conversion: %s", err)
}
if err := dict.SetKey(key, val); err != nil {
return nil, errors.Wrapf(err, "failed to add key: %s", key)
}
}
default:
return nil, fmt.Errorf("ToDict does not support %T", v.val)
}
return dict, nil
}
// ToList converts v of type []T to a *starlark.List value where the elements can
// be of an arbitrary types of string, bool, numeric, or []T.
func (v *GoValue) ToList() (*starlark.List, error) {
valType := reflect.TypeOf(v.val)
switch valType.Kind() {
case reflect.Slice, reflect.Array:
val, err := v.ToStarlarkValue()
if err != nil {
return nil, fmt.Errorf("ToList failed: %s", err)
}
elems, ok := val.(starlark.Tuple)
if !ok {
return nil, fmt.Errorf("ToList failed assertion: unexpected type: %T", val)
}
return starlark.NewList(elems), nil
default:
return nil, fmt.Errorf("ToList does not support %T", v.val)
}
}
// ToTuple converts v of type []T to a starlark.Tuple value where the elements can
// be of an arbitrary types of string, bool, numeric, or []T.
func (v *GoValue) ToTuple() (starlark.Tuple, error) {
valType := reflect.TypeOf(v.val)
switch valType.Kind() {
case reflect.Slice, reflect.Array:
val, err := v.ToStarlarkValue()
if err != nil {
return nil, fmt.Errorf("ToList failed: %s", err)
}
return val.(starlark.Tuple), nil
default:
return nil, fmt.Errorf("ToList does not support %T", v.val)
}
}
// ToStarlarkStruct converts a v of type struct or map to a *starlarkstruct.Struct value
func (v *GoValue) ToStarlarkStruct(constructorName string) (*starlarkstruct.Struct, error) {
valType := reflect.TypeOf(v.val)
valValue := reflect.ValueOf(v.val)
constructor := starlark.String(constructorName)
switch valType.Kind() {
case reflect.Struct:
stringDict := make(starlark.StringDict)
for i := 0; i < valType.NumField(); i++ {
fname := valType.Field(i).Name
fval, err := GoToStarlarkValue(valValue.Field(i).Interface())
if err != nil {
return nil, fmt.Errorf("ToStarlarkStruct failed field value conversion: %s", err)
}
stringDict[fname] = fval
}
return starlarkstruct.FromStringDict(constructor, stringDict), nil
case reflect.Map:
stringDict, err := v.ToStringDict()
if err != nil {
return nil, fmt.Errorf("ToStarlarkStruct failed: %s", err)
}
return starlarkstruct.FromStringDict(constructor, stringDict), nil
default:
return nil, fmt.Errorf("ToDict does not support %T", v.val)
}
}
func (v *GoValue) ToStarlarkValue() (starlark.Value, error) {
return GoToStarlarkValue(v.val)
}
// GoToStarlarkValue converts Go value val to its Starlark value/type.
// It supports basic numeric types, string, bool, and slice/arrays.
func GoToStarlarkValue(val interface{}) (starlark.Value, error) {
valType := reflect.TypeOf(val)
valValue := reflect.ValueOf(val)
switch valType.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return starlark.MakeInt64(valValue.Int()), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return starlark.MakeUint64(valValue.Uint()), nil
case reflect.Float32, reflect.Float64:
return starlark.MakeInt64(valValue.Int()).Float(), nil
case reflect.String:
return starlark.String(valValue.String()), nil
case reflect.Bool:
return starlark.Bool(valValue.Bool()), nil
case reflect.Slice, reflect.Array:
var starElems []starlark.Value
for i := 0; i < valValue.Len(); i++ {
elemVal := valValue.Index(i)
starElemVal, err := GoToStarlarkValue(elemVal.Interface())
if err != nil {
return starlark.None, err
}
starElems = append(starElems, starElemVal)
}
return starlark.Tuple(starElems), nil
default:
return starlark.None, fmt.Errorf("unable to assert Go type %T as Starlark type", val)
}
} | starlark/govalue.go | 0.729905 | 0.441071 | govalue.go | starcoder |
package filters
import (
"gitee.com/andrewgithub/FireShotGo/firetheme"
"github.com/golang/freetype/truetype"
"github.com/golang/glog"
"golang.org/x/image/font"
"golang.org/x/image/math/fixed"
"image"
"image/color"
"strings"
)
// DPI constant. Ideally it would be read from the various system.
const DPI = 96
type Text struct {
// Text to render.
Text string
// Center (horizontal and vertical) where to draw the text.
Center image.Point
// Color of the Text to be drawn.
Color, Background color.Color
// Font size.
Size float64
// Rectangle enclosing text.
rect image.Rectangle
// Text rendered.
renderedText *image.RGBA
}
// NewText creates a new Text (or ellipsis) filter. It draws
// an ellipsis whose dimensions fit the given rectangle.
// You must specify the color and the thickness of the Text to be drawn.
func NewText(text string, center image.Point, color, background color.Color, size float64) *Text {
c := &Text{
Text: text,
Center: center,
Color: color,
Background: background,
Size: size}
c.SetText(text)
return c
}
func (t *Text) SetText(text string) {
t.Text = text
point := fixed.Point26_6{X: 0, Y: fixed.Int26_6(t.Size * 64)}
goBoldFont, err := truetype.Parse(firetheme.ShangShouJianSongXianXiTi)
if err != nil {
glog.Fatalf("Failed to generate font for golang.org/x/image/font/gofont/gobold TTF.")
}
d := &font.Drawer{
Dst: t.renderedText,
Src: image.NewUniform(t.Color),
Face: truetype.NewFace(goBoldFont, &truetype.Options{
Size: t.Size,
DPI: DPI,
Hinting: font.HintingFull,
SubPixelsX: 8,
SubPixelsY: 8,
}),
Dot: point,
}
// Handle multi-line content.
margins := int((t.Size*DPI/100.0)/2.0 + 0.99)
lines := strings.Split(text, "\n")
var boundingRect image.Rectangle
for _, line := range lines {
lineRect, _ := d.BoundString(line)
boundingRect.Max.Y += lineRect.Max.Y.Ceil() + margins
if lineRect.Max.X.Ceil() > boundingRect.Max.X {
boundingRect.Max.X = lineRect.Max.X.Ceil()
}
}
boundingRect.Max.X += 2 * margins
boundingRect.Max.Y += margins
// Prepare background.
img := image.NewRGBA(boundingRect)
t.renderedText = img
to8 := func(v uint32) uint8 { return uint8(v >> 8) }
for ii := 0; ii < len(img.Pix); ii += 4 {
r, g, b, a := t.Background.RGBA()
img.Pix[ii] = to8(r)
img.Pix[ii+1] = to8(g)
img.Pix[ii+2] = to8(b)
img.Pix[ii+3] = to8(a)
}
d.Dst = t.renderedText
// Draw lines.
for ii, line := range lines {
d.Dot = fixed.Point26_6{
X: fixed.Int26_6(margins * 64),
Y: fixed.Int26_6((float64(ii+1) * (t.Size + float64(margins))) * 64)}
d.DrawString(line)
}
normalizeAlpha(t.renderedText)
cx, cy := t.Center.X, t.Center.Y
dx, dy := t.renderedText.Rect.Dx(), t.renderedText.Rect.Dy()
t.rect = image.Rect(cx-dx/2, cy-dy/2, cx+dx/2, cy+dy/2)
}
func normalizeAlpha(img *image.RGBA) {
var maxAlpha uint8
for ii := 0; ii < len(img.Pix); ii += 4 {
alpha := img.Pix[ii+3]
if alpha > maxAlpha {
maxAlpha = alpha
}
}
const M = 1<<8 - 1
maxAlpha16 := uint16(maxAlpha)
if maxAlpha == 0 {
maxAlpha = 1
}
for ii := 0; ii < len(img.Pix); ii += 4 {
img.Pix[ii+3] = uint8(uint16(img.Pix[ii+3]) * M / maxAlpha16)
}
}
// at is the function given to the filterImage object.
func (t *Text) at(x, y int, under color.Color) color.Color {
if glog.V(3) {
// Debug: draw cross lines in center.
if x == t.Center.X || y == t.Center.Y {
return t.Color
}
}
if x > t.rect.Max.X || x < t.rect.Min.X || y > t.rect.Max.Y || y < t.rect.Min.Y {
return under
}
c := t.renderedText.At(x-t.rect.Min.X, y-t.rect.Min.Y)
fontR, fontG, fontB, a := c.RGBA()
if a == 0 {
return under
}
const M = 1<<16 - 1
underR, underG, underB, underA := under.RGBA()
blend := func(underChan uint32, fontChan uint32) uint8 {
return uint8((fontChan*a + underChan*(M-a)) / M >> 8)
}
return color.RGBA{
R: blend(underR, fontR),
G: blend(underG, fontG),
B: blend(underB, fontB),
A: uint8(underA >> 8),
}
}
// Apply implements the ImageFilter interface.
func (t *Text) Apply(image image.Image) image.Image {
return &filterImage{image, t.at}
} | filters/text.go | 0.772531 | 0.428413 | text.go | starcoder |
package quantize
import (
"image"
"image/color"
"sort"
)
// Spread takes in a slice of RGB pixels, and returns the delta across the red,
// green, & blue components of all pixels.
func Spread(pixels []color.RGBA) (uint8, uint8, uint8) {
// If there are no pixels, then the spread must be zero
if len(pixels) == 0 {
return 0, 0, 0
}
var (
minRed, maxRed = pixels[0].R, pixels[0].R
minGreen, maxGreen = pixels[0].G, pixels[0].G
minBlue, maxBlue = pixels[0].B, pixels[0].B
)
for _, pixel := range pixels {
r, g, b := pixel.R, pixel.G, pixel.B
// Minmax the red component
minRed = min(minRed, r)
maxRed = max(maxRed, r)
// Minmax the green component
minGreen = min(minGreen, g)
maxGreen = max(maxGreen, g)
// Minmax the blue component
minBlue = min(minBlue, b)
maxBlue = max(maxBlue, b)
}
return maxRed - minRed, maxGreen - minGreen, maxBlue - minBlue
}
// Partition takes in a slice of RGB pixels, and bisects the slice with respect
// to the color component with the largest spread.
func Partition(pixels []color.RGBA) ([]color.RGBA, []color.RGBA) {
if len(pixels) == 0 {
return []color.RGBA{}, []color.RGBA{}
}
deltaR, deltaG, deltaB := Spread(pixels)
var less func(int, int) bool
switch {
// Does the red component have the largest spread?
case deltaR >= deltaG && deltaR >= deltaB:
less = func(i int, j int) bool {
return pixels[i].R < pixels[j].R
}
// Does the green component have the largest spread?
case deltaG >= deltaR && deltaG >= deltaB:
less = func(i int, j int) bool {
return pixels[i].G < pixels[j].G
}
// Does the blue component have the largest spread?
case deltaB >= deltaR && deltaB >= deltaG:
less = func(i int, j int) bool {
return pixels[i].B < pixels[j].B
}
}
// Sort pixels by the component with the largest spread
sort.SliceStable(pixels, less)
return pixels[:len(pixels)/2], pixels[len(pixels)/2:]
}
// Average takes in a slice of RGB pixels, and returns the average across the
// red, green, & blue components of all pixels.
func Average(pixels []color.RGBA) color.RGBA {
var totalR int
var totalG int
var totalB int
if len(pixels) == 0 {
return color.RGBA{0, 0, 0, 0xFF}
}
for _, pixel := range pixels {
totalR += int(pixel.R)
totalG += int(pixel.G)
totalB += int(pixel.B)
}
return color.RGBA{
uint8(totalR / len(pixels)),
uint8(totalG / len(pixels)),
uint8(totalB / len(pixels)),
0xFF,
}
}
// Pixels takes in a slice of RGB pixels, and performs the MMCQ process to the
// specified number of levels. Returns a slice of RGB colors of length 2^levels.
func Pixels(pixels []color.RGBA, levels int) []color.RGBA {
partitions := [][]color.RGBA{
pixels,
}
for iteration := 0; iteration < levels; iteration++ {
next := [][]color.RGBA{}
for _, partition := range partitions {
left, right := Partition(partition)
next = append(next, left, right)
}
partitions = next
}
averages := make([]color.RGBA, len(partitions))
for index, partition := range partitions {
averages[index] = Average(partition)
}
return averages
}
// Image is a helper that converts the given image into a slice of RGB pixels
// before performing MMCQ.
func Image(img image.Image, levels int) []color.RGBA {
rect := img.Bounds()
pixels := make([]color.RGBA, 0, rect.Max.X*rect.Max.Y)
for x := rect.Min.X; x < rect.Max.X; x++ {
for y := rect.Min.Y; y < rect.Max.Y; y++ {
r, g, b, _ := img.At(x, y).RGBA()
pixel := color.RGBA{
uint8(r >> 8),
uint8(g >> 8),
uint8(b >> 8),
0xFF,
}
pixels = append(pixels, pixel)
}
}
return Pixels(pixels, levels)
}
func min(first uint8, second uint8) uint8 {
if first < second {
return first
}
return second
}
func max(first uint8, second uint8) uint8 {
if first > second {
return first
}
return second
} | mmcq.go | 0.865267 | 0.597696 | mmcq.go | starcoder |
package data
import (
"fmt"
"image"
)
type Layer [][]float32
func NewLayer(sizeX, sizeY int) Layer {
data := make([][]float32, sizeY)
for j := 0; j < sizeY; j++ {
data[j] = make([]float32, sizeX)
}
return data
}
func GetLayers(image image.Image) (Layer, Layer, Layer, uint, uint) {
sizeX := image.Bounds().Max.X - image.Bounds().Min.X
sizeY := image.Bounds().Max.Y - image.Bounds().Min.Y
r := NewLayer(sizeX, sizeY)
g := NewLayer(sizeX, sizeY)
b := NewLayer(sizeX, sizeY)
for j := 0; j < sizeY; j++ {
for i := 0; i < sizeX; i++ {
rv, gv, bv, _ := image.At(i, j).RGBA()
r[j][i] = float32(rv)
g[j][i] = float32(gv)
b[j][i] = float32(bv)
}
}
return r, g, b, uint(sizeX), uint(sizeY)
}
func (l Layer) GetDimensions() (int, int) {
if len(l) == 0 {
panic("Invalid image data")
}
return len(l[0]), len(l)
}
func (l Layer) Copy() Layer {
sizeX, sizeY := l.GetDimensions()
copy := NewLayer(sizeX, sizeY)
for j := 0; j < sizeY; j++ {
for i := 0; i < sizeX; i++ {
copy[j][i] = l[j][i]
}
}
return copy
}
func (l Layer) Times(factor float32) {
sizeX, sizeY := l.GetDimensions()
for j := 0; j < sizeY; j++ {
for i := 0; i < sizeX; i++ {
l[j][i] = factor * l[j][i]
}
}
}
func (l Layer) ScaleInteger(scaleX, scaleY int) Layer {
sizeX, sizeY := l.GetDimensions()
scaled := NewLayer(sizeX*scaleX, sizeY*scaleY)
for j := 0; j < sizeY*scaleY; j++ {
for i := 0; i < sizeX*scaleX; i++ {
scaled[j][i] = l[j/scaleY][i/scaleX]
}
}
return scaled
}
func (l Layer) ToProtobuf() ImageData {
sizeX, sizeY := l.GetDimensions()
rows := make([]*ImageRow, sizeY)
for j := 0; j < sizeY; j++ {
row := ImageRow{Values: make([]float32, sizeX)}
for i := 0; i < sizeX; i++ {
row.Values[i] = l[j][i]
}
rows[j] = &row
}
return ImageData{Rows: rows}
}
func (l *Layer) FromProtobuf(data ImageData) {
sizeX := len(data.Rows[0].Values)
sizeY := len(data.Rows)
(*l) = NewLayer(sizeX, sizeY)
for j := 0; j < sizeY; j++ {
for i := 0; i < sizeX; i++ {
(*l)[j][i] = data.Rows[j].Values[i]
}
}
}
func (l Layer) String() string {
sizeX, sizeY := l.GetDimensions()
str := ""
for j := 0; j < sizeY; j++ {
for i := 0; i < sizeX; i++ {
str += fmt.Sprintf("%8.2f ", l[j][i])
}
str += "\n"
}
return str
} | labo-2/jpeg2000/data/layer.go | 0.604282 | 0.44077 | layer.go | starcoder |
package fields
import (
"bytes"
"encoding/base64"
"encoding/binary"
"fmt"
"math"
)
const (
// CurrentVersion is the Forest version that this library writes
CurrentVersion Version = 1
// HashDigestLengthSHA512_256 is the length of the digest produced by the SHA512/256 hash algorithm
HashDigestLengthSHA512_256 ContentLength = 32
)
// multiByteSerializationOrder defines the order in which multi-byte
// integers are serialized into binary
var multiByteSerializationOrder binary.ByteOrder = binary.BigEndian
// fundamental types
type genericType uint8
const sizeofgenericType = 1
func (g genericType) MarshalBinary() ([]byte, error) {
b := new(bytes.Buffer)
err := binary.Write(b, multiByteSerializationOrder, g)
return b.Bytes(), err
}
func (g *genericType) UnmarshalBinary(b []byte) error {
buf := bytes.NewBuffer(b)
return binary.Read(buf, multiByteSerializationOrder, g)
}
func (g *genericType) SizeConstraints() (int, bool) {
return sizeofgenericType, false
}
func (g *genericType) BytesConsumed() int {
return sizeofgenericType
}
func (g *genericType) Equals(g2 *genericType) bool {
return *g == *g2
}
// ContentLength represents the length of a piece of data in the Forest
type ContentLength uint16
const sizeofContentLength = 2
const (
// MaxContentLength is the maximum representable content length in this
// version of the Forest
MaxContentLength = math.MaxUint16
)
func NewContentLength(size int) (*ContentLength, error) {
if size > MaxContentLength {
return nil, fmt.Errorf("Cannot represent content of size %d, max is %d", size, MaxContentLength)
}
c := ContentLength(size)
return &c, nil
}
// MarshalBinary converts the ContentLength into its binary representation
func (c ContentLength) MarshalBinary() ([]byte, error) {
b := new(bytes.Buffer)
err := binary.Write(b, multiByteSerializationOrder, c)
return b.Bytes(), err
}
func (c ContentLength) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("B%d", c)), nil
}
// UnmarshalBinary converts from the binary representation of a ContentLength
// back to its structured form
func (c *ContentLength) UnmarshalBinary(b []byte) error {
buf := bytes.NewBuffer(b)
return binary.Read(buf, multiByteSerializationOrder, c)
}
func (c *ContentLength) BytesConsumed() int {
return sizeofContentLength
}
func (c *ContentLength) Equals(c2 *ContentLength) bool {
return *c == *c2
}
// TreeDepth represents the depth of a node within a tree
type TreeDepth uint32
const sizeofTreeDepth = 4
// MarshalBinary converts the TreeDepth into its binary representation
func (t TreeDepth) MarshalBinary() ([]byte, error) {
b := new(bytes.Buffer)
err := binary.Write(b, multiByteSerializationOrder, t)
return b.Bytes(), err
}
func (t TreeDepth) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("L%d", t)), nil
}
// UnmarshalBinary converts from the binary representation of a TreeDepth
// back to its structured form
func (t *TreeDepth) UnmarshalBinary(b []byte) error {
buf := bytes.NewBuffer(b)
return binary.Read(buf, multiByteSerializationOrder, t)
}
func (t *TreeDepth) BytesConsumed() int {
return sizeofTreeDepth
}
func (t *TreeDepth) Equals(t2 *TreeDepth) bool {
return *t == *t2
}
// Blob represents a quantity of arbitrary binary data in the Forest
type Blob []byte
// MarshalBinary converts the Blob into its binary representation
func (v Blob) MarshalBinary() ([]byte, error) {
return v, nil
}
func (v Blob) MarshalText() ([]byte, error) {
based := base64.RawURLEncoding.EncodeToString([]byte(v))
return []byte(based), nil
}
// UnmarshalBinary converts from the binary representation of a Blob
// back to its structured form
func (v *Blob) UnmarshalBinary(b []byte) error {
*v = b
return nil
}
func (v *Blob) BytesConsumed() int {
return len([]byte(*v))
}
func (v *Blob) Equals(v2 *Blob) bool {
return bytes.Equal([]byte(*v), []byte(*v2))
}
// Version represents the version of the Arbor Forest Schema used to construct
// a particular node
type Version uint64
const sizeofVersion = 8
// MarshalBinary converts the Version into its binary representation
func (v Version) MarshalBinary() ([]byte, error) {
b := new(bytes.Buffer)
err := binary.Write(b, multiByteSerializationOrder, v)
return b.Bytes(), err
}
func (v Version) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("V%d", v)), nil
}
// UnmarshalBinary converts from the binary representation of a Version
// back to its structured form
func (v *Version) UnmarshalBinary(b []byte) error {
buf := bytes.NewBuffer(b)
return binary.Read(buf, multiByteSerializationOrder, v)
}
func (v *Version) BytesConsumed() int {
return sizeofVersion
}
func (v *Version) Equals(v2 *Version) bool {
return *v == *v2
}
// specialized types
type NodeType genericType
const (
sizeofNodeType = sizeofgenericType
NodeTypeIdentity NodeType = iota
NodeTypeCommunity
NodeTypeReply
)
var ValidNodeTypes = map[NodeType]struct{}{
NodeTypeIdentity: struct{}{},
NodeTypeCommunity: struct{}{},
NodeTypeReply: struct{}{},
}
var nodeTypeNames = map[NodeType]string{
NodeTypeIdentity: "identity",
NodeTypeCommunity: "community",
NodeTypeReply: "reply",
}
func (t NodeType) MarshalBinary() ([]byte, error) {
return genericType(t).MarshalBinary()
}
func (t NodeType) MarshalText() ([]byte, error) {
return []byte(nodeTypeNames[t]), nil
}
func (t *NodeType) UnmarshalBinary(b []byte) error {
if err := (*genericType)(t).UnmarshalBinary(b); err != nil {
return err
}
if _, valid := ValidNodeTypes[*t]; !valid {
return fmt.Errorf("%d is not a valid node type", *t)
}
return nil
}
func (t *NodeType) BytesConsumed() int {
return sizeofNodeType
}
func (t *NodeType) Equals(t2 *NodeType) bool {
return ((*genericType)(t)).Equals((*genericType)(t2))
}
type HashType genericType
const (
sizeofHashType = sizeofgenericType
HashTypeNullHash HashType = iota
HashTypeSHA512
)
// map to valid lengths
var ValidHashTypes = map[HashType][]ContentLength{
HashTypeNullHash: []ContentLength{0},
HashTypeSHA512: []ContentLength{HashDigestLengthSHA512_256},
}
var hashNames = map[HashType]string{
HashTypeNullHash: "NullHash",
HashTypeSHA512: "SHA512",
}
func (t HashType) MarshalBinary() ([]byte, error) {
return genericType(t).MarshalBinary()
}
func (t HashType) MarshalText() ([]byte, error) {
return []byte(hashNames[t]), nil
}
func (t *HashType) UnmarshalBinary(b []byte) error {
if err := (*genericType)(t).UnmarshalBinary(b); err != nil {
return err
}
if _, valid := ValidHashTypes[*t]; !valid {
return fmt.Errorf("%d is not a valid hash type", *t)
}
return nil
}
func (t *HashType) BytesConsumed() int {
return sizeofHashType
}
func (t *HashType) Equals(t2 *HashType) bool {
return ((*genericType)(t)).Equals((*genericType)(t2))
}
type ContentType genericType
const (
sizeofContentType = sizeofgenericType
ContentTypeUTF8String ContentType = 1
ContentTypeJSON ContentType = 2
)
var ValidContentTypes = map[ContentType]struct{}{
ContentTypeUTF8String: struct{}{},
ContentTypeJSON: struct{}{},
}
var contentNames = map[ContentType]string{
ContentTypeUTF8String: "UTF-8",
ContentTypeJSON: "JSON",
}
func (t ContentType) MarshalBinary() ([]byte, error) {
return genericType(t).MarshalBinary()
}
func (t ContentType) MarshalText() ([]byte, error) {
return []byte(contentNames[t]), nil
}
func (t *ContentType) UnmarshalBinary(b []byte) error {
if err := (*genericType)(t).UnmarshalBinary(b); err != nil {
return err
}
if _, valid := ValidContentTypes[*t]; !valid {
return fmt.Errorf("%d is not a valid content type", *t)
}
return nil
}
func (t *ContentType) BytesConsumed() int {
return sizeofContentType
}
func (t *ContentType) Equals(t2 *ContentType) bool {
return ((*genericType)(t)).Equals((*genericType)(t2))
}
type KeyType genericType
const (
sizeofKeyType = sizeofgenericType
KeyTypeNoKey KeyType = 0
KeyTypeOpenPGP KeyType = 1
)
var ValidKeyTypes = map[KeyType]struct{}{
KeyTypeNoKey: struct{}{},
KeyTypeOpenPGP: struct{}{},
}
var keyNames = map[KeyType]string{
KeyTypeNoKey: "None",
KeyTypeOpenPGP: "OpenPGP",
}
func (t KeyType) MarshalBinary() ([]byte, error) {
return genericType(t).MarshalBinary()
}
func (t KeyType) MarshalText() ([]byte, error) {
return []byte(keyNames[t]), nil
}
func (t *KeyType) UnmarshalBinary(b []byte) error {
if err := (*genericType)(t).UnmarshalBinary(b); err != nil {
return err
}
if _, valid := ValidKeyTypes[*t]; !valid {
return fmt.Errorf("%d is not a valid key type", *t)
}
return nil
}
func (t *KeyType) BytesConsumed() int {
return sizeofKeyType
}
func (t *KeyType) Equals(t2 *KeyType) bool {
return ((*genericType)(t)).Equals((*genericType)(t2))
}
type SignatureType genericType
const (
sizeofSignatureType = sizeofgenericType
SignatureTypeOpenPGP SignatureType = 1
)
var ValidSignatureTypes = map[SignatureType]struct{}{
SignatureTypeOpenPGP: struct{}{},
}
var signatureNames = map[SignatureType]string{
SignatureTypeOpenPGP: "OpenPGP",
}
func (t SignatureType) MarshalBinary() ([]byte, error) {
return genericType(t).MarshalBinary()
}
func (t SignatureType) MarshalText() ([]byte, error) {
return []byte(signatureNames[t]), nil
}
func (t *SignatureType) UnmarshalBinary(b []byte) error {
if err := (*genericType)(t).UnmarshalBinary(b); err != nil {
return err
}
if _, valid := ValidSignatureTypes[*t]; !valid {
return fmt.Errorf("%d is not a valid signature type", *t)
}
return nil
}
func (t *SignatureType) BytesConsumed() int {
return sizeofSignatureType
}
func (t *SignatureType) Equals(t2 *SignatureType) bool {
return ((*genericType)(t)).Equals((*genericType)(t2))
} | fields/primitives.go | 0.747432 | 0.408867 | primitives.go | starcoder |
package main
import "errors"
// returns true if rune is a letter of the English alphabet
func isAlpha(r rune) bool {
return (r >= 65 && r <= 90) || (r >= 97 && r <= 122)
}
// returns true if rune is a numeral
func isNumeral(r rune) bool {
return (r >= 48 && r <= 57)
}
func isSigil(r rune) bool {
for _, s := range sigils {
if r == s {
return true
}
}
return false
}
func lex(code string) ([]Token, error) {
tokens := []Token{}
runes := []rune(code)
line := 1
column := 1
for i := 0; i < len(runes); {
r := runes[i]
if r >= 128 {
return nil, errors.New("File improperly contains a non-ASCII character at line " + itoa(line) + " and column " + itoa(column))
}
if r == '\n' {
tokens = append(tokens, Token{Newline, "\n", line, column})
line++
column = 1
i++
} else if r == '\r' {
if runes[i+1] != '\n' {
return nil, errors.New("File improperly contains a CR not followed by a LF at end of line " + itoa(line))
}
tokens = append(tokens, Token{Newline, "\n", line, column})
line++
column = 1
i += 2
} else if r == '/' && runes[i+1] == '/' { // start of a comment
i += 2
for runes[i] != '\n' {
i++
}
i++
tokens = append(tokens, Token{Newline, "\n", line, column})
line++
column = 1
} else if r == '(' {
tokens = append(tokens, Token{OpenParen, "(", line, column})
column++
i++
} else if r == ')' {
tokens = append(tokens, Token{CloseParen, ")", line, column})
column++
i++
} else if r == '[' {
tokens = append(tokens, Token{OpenSquare, "[", line, column})
column++
i++
} else if r == ']' {
tokens = append(tokens, Token{CloseSquare, "]", line, column})
column++
i++
} else if r == '{' {
tokens = append(tokens, Token{OpenCurly, "{", line, column})
column++
i++
} else if r == '}' {
tokens = append(tokens, Token{CloseCurly, "}", line, column})
column++
i++
} else if r == '<' {
tokens = append(tokens, Token{OpenAngle, "<", line, column})
column++
i++
} else if r == '>' {
tokens = append(tokens, Token{CloseAngle, ">", line, column})
column++
i++
} else if r == ' ' {
firstIdx := i
for i < len(runes) && runes[i] == ' ' {
column++
i++
}
content := string(runes[firstIdx:i])
tokens = append(tokens, Token{Spaces, content, line, column})
} else if r == '\t' {
return nil, errors.New("File improperly contains a tab character: line " + itoa(line) + " and column " + itoa(column))
} else if r == '`' { // start of a string
prev := r
endIdx := i + 1
endColumn := column
endLine := line
for {
if endIdx >= len(runes) {
return nil, errors.New("String literal not closed by end of file on line " + itoa(line) + " and column " + itoa(column))
}
current := runes[endIdx]
if current == '\n' {
endLine++
endColumn = 1
} else {
endColumn++
}
if current == '`' && prev != '\\' { // end of the string
endIdx++
break
}
prev = current
endIdx++
}
tokens = append(tokens, Token{StringLiteral, string(runes[i:endIdx]), line, column})
column = endColumn
line = endLine
i = endIdx
} else if isNumeral(r) { // start of a number
endIdx := i + 1
for isNumeral(runes[endIdx]) {
endIdx++
}
tokens = append(tokens, Token{NumberLiteral, string(runes[i:endIdx]), line, column})
column += (endIdx - i)
i = endIdx
} else if isAlpha(r) { // start of a word
endIdx := i + 1
r := runes[endIdx]
for isAlpha(r) || r == '_' || isNumeral(r) {
endIdx++
r = runes[endIdx]
}
content := string(runes[i:endIdx])
tokens = append(tokens, Token{Word, content, line, column})
column += (endIdx - i)
i = endIdx
} else if isSigil(r) {
tokens = append(tokens, Token{Sigil, string(r), line, column})
column++
i++
} else {
return nil, errors.New("Unexpected character " + string(r) + " at line " + itoa(line) + ", column " + itoa(column))
}
}
return tokens, nil
}
func read(tokens []Token) ([]Atom, error) {
readerData := []Atom{}
for i := 0; i < len(tokens); {
atom, numTokens, err := readAtom(tokens, NoClose)
if err != nil {
return nil, err
}
tokens = tokens[numTokens:]
if atom != nil {
readerData = append(readerData, atom)
}
}
return readerData, nil
}
// atom may be nil if tokens consumed contain only whitespace
func readAtom(tokens []Token, expectedClose TokenType) (Atom, int, error) {
i := 0
// advance through all whitespace tokens
Loop:
for i < len(tokens) {
t := tokens[i]
switch t.Type {
case Spaces, Newline:
i++
default:
break Loop
}
}
elements := []Atom{}
Loop2:
for i < len(tokens) {
t := tokens[i]
switch t.Type {
case Word:
elements = append(elements, Symbol{t.Content, t.Line, t.Column})
i++
case Sigil:
elements = append(elements, SigilAtom{t.Content, t.Line, t.Column})
i++
case NumberLiteral:
elements = append(elements, NumberAtom{t.Content, t.Line, t.Column})
i++
case StringLiteral:
elements = append(elements, StringAtom{t.Content, t.Line, t.Column})
i++
case Spaces, Newline:
i++
break Loop2
case OpenParen, OpenSquare, OpenCurly, OpenAngle:
var end TokenType
switch t.Type {
case OpenParen:
end = CloseParen
case OpenSquare:
end = CloseSquare
case OpenCurly:
end = CloseCurly
case OpenAngle:
end = CloseAngle
}
list, n, err := readList(tokens[i:], end)
if err != nil {
return nil, 0, err
}
i += n
elements = append(elements, list)
case CloseParen, CloseSquare, CloseCurly, CloseAngle:
if t.Type == expectedClose {
// do NOT consume the token
break Loop2
} else {
return nil, 0, errors.New("Unexpected atom token: line " + itoa(t.Line) + " column " + itoa(t.Column))
}
default:
return nil, 0, errors.New("Unexpected atom token: line " + itoa(t.Line) + " column " + itoa(t.Column))
}
}
if len(elements) == 1 {
return elements[0], i, nil
} else if len(elements) > 1 {
return AtomChain{elements, tokens[0].Line, tokens[0].Column}, i, nil
}
return nil, i, nil
}
func readList(tokens []Token, expectedClose TokenType) (Atom, int, error) {
i := 1
elements := []Atom{}
Loop:
for i < len(tokens) {
t := tokens[i]
switch t.Type {
case expectedClose:
i++
break Loop
default:
atom, n, err := readAtom(tokens[i:], expectedClose)
if err != nil {
return nil, 0, err
}
i += n
if atom != nil {
elements = append(elements, atom)
}
}
}
t := tokens[0]
switch t.Type {
case OpenParen:
return ParenList{elements, t.Line, t.Column}, i, nil
case OpenSquare:
return SquareList{elements, t.Line, t.Column}, i, nil
case OpenCurly:
return CurlyList{elements, t.Line, t.Column}, i, nil
case OpenAngle:
return AngleList{elements, t.Line, t.Column}, i, nil
default:
return nil, 0, errors.New("Internal error. Expecting an open delimiter: line " + itoa(t.Line) + " column " + itoa(t.Column))
}
} | lexer.go | 0.539711 | 0.460956 | lexer.go | starcoder |
package mind
import "github.com/gonum/matrix/mat64"
// Version.
const Version = "0.0.1"
// Mind represents the neural network.
type Mind struct {
LearningRate float64 // speed the network will learn at
Iterations int // number of training iterations
HiddenUnits int // number of units in hidden layer
Activate func(*mat64.Dense) *mat64.Dense // activation function
ActivatePrime func(*mat64.Dense) *mat64.Dense // derivative of activation function
Weights // learning weights
Results // learning results
}
// Weights represents the connections between units.
type Weights struct {
InputHidden *mat64.Dense
HiddenOutput *mat64.Dense
}
// Results represents, at a given unit, the output of multiplying
// the inputs and weights in all previous layers.
type Results struct {
HiddenSum *mat64.Dense
HiddenResult *mat64.Dense
OutputSum *mat64.Dense
OutputResult *mat64.Dense
}
// New mind loaded with `rate`, `iterations`, and `units`.
func New(rate float64, iterations int, units int, activator string) *Mind {
m := &Mind{
LearningRate: rate,
Iterations: iterations,
HiddenUnits: units,
}
switch activator {
case "sigmoid":
m.Activate = Activator(Sigmoid)
m.ActivatePrime = Activator(SigmoidPrime)
case "htan":
m.Activate = Activator(Htan)
m.ActivatePrime = Activator(Htanprime)
default:
panic("unknown activator " + activator)
}
return m
}
// Learn from examples.
func (m *Mind) Learn(examples [][][]float64) {
input, output := Format(examples)
_, inCols := input.Dims()
_, outCols := output.Dims()
// Setup the weights
m.Weights.InputHidden = Normals(inCols, m.HiddenUnits)
m.Weights.HiddenOutput = Normals(m.HiddenUnits, outCols)
for i := 0; i < m.Iterations; i++ {
m.Forward(input)
m.Back(input, output)
}
}
// Forward propagate the examples through the network.
func (m *Mind) Forward(input *mat64.Dense) {
HiddenSum := &mat64.Dense{}
OutputSum := &mat64.Dense{}
HiddenSum.Mul(input, m.Weights.InputHidden)
m.Results.HiddenResult = m.Activate(HiddenSum)
OutputSum.Mul(m.Results.HiddenResult, m.Weights.HiddenOutput)
m.Results.OutputResult = m.Activate(OutputSum)
m.Results.HiddenSum = HiddenSum
m.Results.OutputSum = OutputSum
}
// Back propagate the error and update the weights.
func (m *Mind) Back(input *mat64.Dense, output *mat64.Dense) {
ErrorOutputLayer := &mat64.Dense{}
DeltaOutputLayer := &mat64.Dense{}
HiddenOutputChanges := &mat64.Dense{}
DeltaHiddenLayer := &mat64.Dense{}
InputHiddenChanges := &mat64.Dense{}
ErrorOutputLayer.Sub(output, m.Results.OutputResult)
DeltaOutputLayer.MulElem(m.ActivatePrime(m.Results.OutputSum), ErrorOutputLayer)
HiddenOutputChanges.Mul(m.Results.HiddenResult.T(), DeltaOutputLayer)
HiddenOutputChanges.Scale(m.LearningRate, HiddenOutputChanges)
m.Weights.HiddenOutput.Add(HiddenOutputChanges, m.Weights.HiddenOutput)
DeltaHiddenLayer.Mul(DeltaOutputLayer, m.Weights.HiddenOutput.T())
DeltaHiddenLayer.MulElem(m.ActivatePrime(m.Results.HiddenSum), DeltaHiddenLayer)
InputHiddenChanges.Mul(input.T(), DeltaHiddenLayer)
InputHiddenChanges.Scale(m.LearningRate, InputHiddenChanges)
m.Weights.InputHidden.Add(InputHiddenChanges, m.Weights.InputHidden)
}
// Predict from input.
func (m *Mind) Predict(input [][]float64) *mat64.Dense {
var in []float64
for _, i := range input {
in = append(in, i...)
}
m.Forward(mat64.NewDense(len(input), len(input[0]), in))
return m.Results.OutputResult
} | mind.go | 0.830181 | 0.539469 | mind.go | starcoder |
package core
// InteractorTypeRef defines the location, name and version of an Interactor type.
type InteractorTypeRef struct {
TypeLocation string
TypeName string
TypeVersion VersionSpec
}
// VersionSpec defines the a version using Semantic Versioning (SemVer) with an
// optional label.
type VersionSpec struct {
MajorVersion string
MinorVersion string
BuildNumber string
Label string
}
// InteractorSpec defines the specification of an Interactor to be included in a
// network for Interactors.
type InteractorSpec struct {
TypeRef InteractorTypeRef
// Port mappings allow ports to be renamed. This is optional
PortMappings map[string]string
}
// CompositeInteractorSpec describes an Interactor made up of one or more other Interactors.
type CompositeInteractorSpec struct {
TypeRef InteractorTypeRef
Interactors InteractorSpecs
InPorts InPortSpecs
OutPorts OutPortSpecs
Connections ConnectionSpecs
}
// InPortSpec defines an input port. The data type accepted by the port is given by the implementation.
type InPortSpec struct {
// The interactor unique name
TargetInteractor string
// The target port unique name
TargetPort string
}
// OutPortSpec defines an output port. The data type accepted by the port is given by the implementation.
type OutPortSpec struct {
// The interactor unique name
SourceInteractor string
// The source port unique name
SourcePort string
}
// ConnectionSpec defines the connection between an InPort and an OutPort.
// This connection can be buffered or unbuffered.
type ConnectionSpec struct {
Name string
Source OutPortSpec
Target InPortSpec
BufferSize uint16
}
// InteractorSpecs defines a named map of Interactor specifications.
type InteractorSpecs map[string]InteractorSpec
// InPortSpecs defines a map of input port names to input port specifications
type InPortSpecs map[string]InPortSpec
// OutPortSpecs defines a map of output port names to output port specifications
type OutPortSpecs map[string]OutPortSpec
// ConnectionSpecs defines a list of connections specifications
type ConnectionSpecs []ConnectionSpec | core/specs.go | 0.62395 | 0.441191 | specs.go | starcoder |
package db
// Database allows the persistence and retrieval of key / value data.
type Database interface {
// View provides a Transaction that can be used to read from the database
View(func(Transaction) error) error
// Update provides a Transaction that can be used to read from or write to the database
Update(func(Transaction) error) error
BatchWriter() BatchWriter
// Vacuum takes time to clean up the on-disk representation of data
// (compaction or vacuuming, depending on storage engine). Returns true if
// any progress was made (running again after a false will have no effect).
// This may result in an optimally compact database, and multiple runs may be
// necessary
Vacuum() bool
Close()
}
type BatchWriter interface {
Writer
// Flush must be called to ensure that any write operations are completed
Flush() error
// Cancel must be called if the BatchWriter is going to be discarded without
// being written. Note that if the write buffer is filled, it can be flushed
// without explicitly calling Flush(), and Cancel() will not undo those
// writes.
Cancel()
}
type Writer interface {
// Put sets a value at a specified key
Put([]byte, []byte) error
// PutReserve returns a byte slice of the specified size that can be updated
// to modify the specified key. For example, if you had a hexidecimal value
// and needed to store the decoded value, you could use PutReserve to get a
// byte slice and decode the hex into that byte slice, rather than decoding
// the hex into a new byte slice then calling Put(); this may be more
// efficient for some database implementations.
PutReserve([]byte, int) ([]byte, error)
// Delete removes a key from the database
Delete([]byte) error
}
// Transaction allows for atomic interaction with the database. It can be used
// to retrieve data or update the database, and a transaction should provide a
// consistent view (changes made to the database by other transactions will not
// effect the results returned by a transaction that was alread open)
type Transaction interface {
Writer
// Get returns the value stored for a given key. Note that modifying the
// value returned here is unsafe, and transactions may return an error if
// this value is modified.
Get([]byte) ([]byte, error)
// ZeroCopyGet invokes a closure, providing the value stored at the specified
// key. This value must only be accessed within the closure, and the slice
// may be modified after the closure finishes executing. The data must be
// parsed and / or copied within the closure.
ZeroCopyGet([]byte, func([]byte) error) error
// Iterator returns an iterator object that returns key / value pairs
// beginning with the specified prefix. Depending on the underlying database
// engine, the Iterator may or may not be ordered.
Iterator([]byte) Iterator
}
// Iterator iterates over key / value pairs beginning with the specified prefix.
// Depending on the underlying database engine, the Iterator may or may not be
// ordered.
type Iterator interface {
// Next advances the iterator to the next key / value pair, returning True
// upon successful advancement, False if no key / value pairs remain. Next()
// should be called before accessing the first pair.
Next() bool
// Key returns the key associated with the current Key / Value pair
Key() []byte
// Value returns the value associated with the current Key / Value pair. Note
// that depending on the database implementation, accessing values may be
// considerably more expensive than just accessing keys.
Value() []byte
Error() error
Close() error
}
// See for implementing iterators in lmdb:
// https://pkg.go.dev/github.com/bmatsuo/lmdb-go@v1.8.0/lmdbscan#New | db/interface.go | 0.83825 | 0.532425 | interface.go | starcoder |
package geometry
import (
"math"
"github.com/tab58/v1/spatial/pkg/numeric"
"gonum.org/v1/gonum/blas/blas64"
)
// Matrix4D is a row-major representation of a 4x4 matrix.
type Matrix4D struct {
elements [16]float64
}
// Rows returns the number of rows in the matrix.
func (m *Matrix4D) Rows() uint { return 4 }
// Cols returns the number of columns in the matrix.
func (m *Matrix4D) Cols() uint { return 4 }
// Clone returns a deep copy of the matrix.
func (m *Matrix4D) Clone() *Matrix4D {
a := m.elements
tmp := [16]float64{a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]}
return &Matrix4D{
elements: tmp,
}
}
// Copy copies the elements of the matrix to this one.
func (m *Matrix4D) Copy(mat *Matrix4D) {
a := mat.elements
m.elements[0] = a[0]
m.elements[1] = a[1]
m.elements[2] = a[2]
m.elements[3] = a[3]
m.elements[4] = a[4]
m.elements[5] = a[5]
m.elements[6] = a[6]
m.elements[7] = a[7]
m.elements[8] = a[8]
m.elements[9] = a[9]
m.elements[10] = a[10]
m.elements[11] = a[11]
m.elements[12] = a[12]
m.elements[13] = a[13]
m.elements[14] = a[14]
m.elements[15] = a[15]
}
// Identity sets the matrix to the identity matrix.
func (m *Matrix4D) Identity() {
// ignoring error since all elements will not overflow
m.SetElements(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)
}
// Scale multiplies the elements of the matrix by the given scalar.
func (m *Matrix4D) Scale(z float64) error {
out := [16]float64{}
for i, v := range m.elements {
val := v * z
if numeric.IsOverflow(val) {
return numeric.ErrOverflow
}
out[i] = val
}
m.elements = out
return nil
}
// ElementAt returns the value of the element at the given indices.
func (m *Matrix4D) ElementAt(i, j uint) (float64, error) {
cols := m.Cols()
if i <= m.Rows() || j <= cols {
return 0, numeric.ErrMatrixOutOfRange
}
return m.elements[i*cols+j], nil
}
// ToBlas64General returns a blas64.General with the same values as the matrix.
func (m *Matrix4D) ToBlas64General() blas64.General {
data := make([]float64, len(m.elements))
copy(data, m.elements[:])
return blas64.General{
Rows: int(m.Rows()),
Cols: int(m.Cols()),
Stride: int(m.Cols()),
Data: data,
}
}
// SetElementAt sets the value of the element at the given indices.
func (m *Matrix4D) SetElementAt(i, j uint, value float64) error {
cols := m.Cols()
if i <= m.Rows() || j <= cols {
return numeric.ErrMatrixOutOfRange
}
m.elements[i*cols+j] = value
return nil
}
// SetElements sets the elements in the matrix.
func (m *Matrix4D) SetElements(m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33 float64) error {
if numeric.AreAnyOverflow(m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33) {
return numeric.ErrOverflow
}
m.elements[0] = m00
m.elements[1] = m01
m.elements[2] = m02
m.elements[3] = m03
m.elements[4] = m10
m.elements[5] = m11
m.elements[6] = m12
m.elements[7] = m13
m.elements[8] = m20
m.elements[9] = m21
m.elements[10] = m22
m.elements[11] = m23
m.elements[12] = m30
m.elements[13] = m31
m.elements[14] = m32
m.elements[15] = m33
return nil
}
// Elements clones the elements of the matrix and returns them.
func (m *Matrix4D) Elements() [16]float64 {
tmp := [16]float64{}
for i, v := range m.elements {
tmp[i] = v
}
return tmp
}
// Add adds the elements of the given matrix to the elements of this matrix.
func (m *Matrix4D) Add(mat *Matrix4D) error {
tmp := [16]float64{
m.elements[0] + mat.elements[0],
m.elements[1] + mat.elements[1],
m.elements[2] + mat.elements[2],
m.elements[3] + mat.elements[3],
m.elements[4] + mat.elements[4],
m.elements[5] + mat.elements[5],
m.elements[6] + mat.elements[6],
m.elements[7] + mat.elements[7],
m.elements[8] + mat.elements[8],
m.elements[9] + mat.elements[9],
m.elements[10] + mat.elements[10],
m.elements[11] + mat.elements[11],
m.elements[12] + mat.elements[12],
m.elements[13] + mat.elements[13],
m.elements[14] + mat.elements[14],
m.elements[15] + mat.elements[15],
}
if numeric.AreAnyOverflow(tmp[:]...) {
return numeric.ErrOverflow
}
m.elements = tmp
return nil
}
// Sub subtracts the elements of the given matrix to the elements of this matrix.
func (m *Matrix4D) Sub(mat *Matrix4D) error {
tmp := [16]float64{
m.elements[0] - mat.elements[0],
m.elements[1] - mat.elements[1],
m.elements[2] - mat.elements[2],
m.elements[3] - mat.elements[3],
m.elements[4] - mat.elements[4],
m.elements[5] - mat.elements[5],
m.elements[6] - mat.elements[6],
m.elements[7] - mat.elements[7],
m.elements[8] - mat.elements[8],
m.elements[9] - mat.elements[9],
m.elements[10] - mat.elements[10],
m.elements[11] - mat.elements[11],
m.elements[12] - mat.elements[12],
m.elements[13] - mat.elements[13],
m.elements[14] - mat.elements[14],
m.elements[15] - mat.elements[15],
}
if numeric.AreAnyOverflow(tmp[:]...) {
return numeric.ErrOverflow
}
m.elements = tmp
return nil
}
func multiply4DMatrices(a, b [16]float64) ([16]float64, error) {
a00, a01, a02, a03 := a[0], a[1], a[2], a[3]
a10, a11, a12, a13 := a[4], a[5], a[6], a[7]
a20, a21, a22, a23 := a[8], a[9], a[10], a[11]
a30, a31, a32, a33 := a[12], a[13], a[14], a[15]
// Cache only the current line of the second matrix
b0, b1, b2, b3 := b[0], b[1], b[2], b[3]
out := [16]float64{}
out[0] = b0*a00 + b1*a10 + b2*a20 + b3*a30
out[1] = b0*a01 + b1*a11 + b2*a21 + b3*a31
out[2] = b0*a02 + b1*a12 + b2*a22 + b3*a32
out[3] = b0*a03 + b1*a13 + b2*a23 + b3*a33
b0, b1, b2, b3 = b[4], b[5], b[6], b[7]
out[4] = b0*a00 + b1*a10 + b2*a20 + b3*a30
out[5] = b0*a01 + b1*a11 + b2*a21 + b3*a31
out[6] = b0*a02 + b1*a12 + b2*a22 + b3*a32
out[7] = b0*a03 + b1*a13 + b2*a23 + b3*a33
b0, b1, b2, b3 = b[8], b[9], b[10], b[11]
out[8] = b0*a00 + b1*a10 + b2*a20 + b3*a30
out[9] = b0*a01 + b1*a11 + b2*a21 + b3*a31
out[10] = b0*a02 + b1*a12 + b2*a22 + b3*a32
out[11] = b0*a03 + b1*a13 + b2*a23 + b3*a33
b0, b1, b2, b3 = b[12], b[13], b[14], b[15]
out[12] = b0*a00 + b1*a10 + b2*a20 + b3*a30
out[13] = b0*a01 + b1*a11 + b2*a21 + b3*a31
out[14] = b0*a02 + b1*a12 + b2*a22 + b3*a32
out[15] = b0*a03 + b1*a13 + b2*a23 + b3*a33
return out, nil
}
// Premultiply left-multiplies the given matrix with this one.
func (m *Matrix4D) Premultiply(mat *Matrix4D) error {
res, err := multiply4DMatrices(mat.elements, m.elements)
if err != nil {
return err
}
m.elements = res
return nil
}
// Postmultiply right-multiplies the given matrix with this one.
func (m *Matrix4D) Postmultiply(mat *Matrix4D) error {
res, err := multiply4DMatrices(m.elements, mat.elements)
if err != nil {
return err
}
m.elements = res
return nil
}
// Invert inverts this matrix in-place.
func (m *Matrix4D) Invert() error {
a := m.elements
a00, a01, a02, a03 := a[0], a[1], a[2], a[3]
a10, a11, a12, a13 := a[4], a[5], a[6], a[7]
a20, a21, a22, a23 := a[8], a[9], a[10], a[11]
a30, a31, a32, a33 := a[12], a[13], a[14], a[15]
b00 := a00*a11 - a01*a10
b01 := a00*a12 - a02*a10
b02 := a00*a13 - a03*a10
b03 := a01*a12 - a02*a11
b04 := a01*a13 - a03*a11
b05 := a02*a13 - a03*a12
b06 := a20*a31 - a21*a30
b07 := a20*a32 - a22*a30
b08 := a20*a33 - a23*a30
b09 := a21*a32 - a22*a31
b10 := a21*a33 - a23*a31
b11 := a22*a33 - a23*a32
// Calculate the determinant
det := b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06
if math.Abs(det) < 1e-13 {
return numeric.ErrSingularMatrix
}
det = 1.0 / det
out := [16]float64{}
out[0] = (a11*b11 - a12*b10 + a13*b09) * det
out[1] = (a02*b10 - a01*b11 - a03*b09) * det
out[2] = (a31*b05 - a32*b04 + a33*b03) * det
out[3] = (a22*b04 - a21*b05 - a23*b03) * det
out[4] = (a12*b08 - a10*b11 - a13*b07) * det
out[5] = (a00*b11 - a02*b08 + a03*b07) * det
out[6] = (a32*b02 - a30*b05 - a33*b01) * det
out[7] = (a20*b05 - a22*b02 + a23*b01) * det
out[8] = (a10*b10 - a11*b08 + a13*b06) * det
out[9] = (a01*b08 - a00*b10 - a03*b06) * det
out[10] = (a30*b04 - a31*b02 + a33*b00) * det
out[11] = (a21*b02 - a20*b04 - a23*b00) * det
out[12] = (a11*b07 - a10*b09 - a12*b06) * det
out[13] = (a00*b09 - a01*b07 + a02*b06) * det
out[14] = (a31*b01 - a30*b03 - a32*b00) * det
out[15] = (a20*b03 - a21*b01 + a22*b00) * det
m.elements = a
return nil
}
// Determinant calculates the determinant of the matrix.
func (m *Matrix4D) Determinant() float64 {
a := m.elements
a00, a01, a02, a03 := a[0], a[1], a[2], a[3]
a10, a11, a12, a13 := a[4], a[5], a[6], a[7]
a20, a21, a22, a23 := a[8], a[9], a[10], a[11]
a30, a31, a32, a33 := a[12], a[13], a[14], a[15]
b0 := a00*a11 - a01*a10
b1 := a00*a12 - a02*a10
b2 := a01*a12 - a02*a11
b3 := a20*a31 - a21*a30
b4 := a20*a32 - a22*a30
b5 := a21*a32 - a22*a31
b6 := a00*b5 - a01*b4 + a02*b3
b7 := a10*b5 - a11*b4 + a12*b3
b8 := a20*b2 - a21*b1 + a22*b0
b9 := a30*b2 - a31*b1 + a32*b0
return a13*b6 - a03*b7 + a33*b8 - a23*b9
}
// Adjoint calculates the adjoint/adjugate matrix.
func (m *Matrix4D) Adjoint() *Matrix4D {
a := m.elements
a00, a01, a02, a03 := a[0], a[1], a[2], a[3]
a10, a11, a12, a13 := a[4], a[5], a[6], a[7]
a20, a21, a22, a23 := a[8], a[9], a[10], a[11]
a30, a31, a32, a33 := a[12], a[13], a[14], a[15]
b00 := a00*a11 - a01*a10
b01 := a00*a12 - a02*a10
b02 := a00*a13 - a03*a10
b03 := a01*a12 - a02*a11
b04 := a01*a13 - a03*a11
b05 := a02*a13 - a03*a12
b06 := a20*a31 - a21*a30
b07 := a20*a32 - a22*a30
b08 := a20*a33 - a23*a30
b09 := a21*a32 - a22*a31
b10 := a21*a33 - a23*a31
b11 := a22*a33 - a23*a32
out := [16]float64{}
out[0] = a11*b11 - a12*b10 + a13*b09
out[1] = a02*b10 - a01*b11 - a03*b09
out[2] = a31*b05 - a32*b04 + a33*b03
out[3] = a22*b04 - a21*b05 - a23*b03
out[4] = a12*b08 - a10*b11 - a13*b07
out[5] = a00*b11 - a02*b08 + a03*b07
out[6] = a32*b02 - a30*b05 - a33*b01
out[7] = a20*b05 - a22*b02 + a23*b01
out[8] = a10*b10 - a11*b08 + a13*b06
out[9] = a01*b08 - a00*b10 - a03*b06
out[10] = a30*b04 - a31*b02 + a33*b00
out[11] = a21*b02 - a20*b04 - a23*b00
out[12] = a11*b07 - a10*b09 - a12*b06
out[13] = a00*b09 - a01*b07 + a02*b06
out[14] = a31*b01 - a30*b03 - a32*b00
out[15] = a20*b03 - a21*b01 + a22*b00
return &Matrix4D{
elements: out,
}
}
// Transpose transposes the matrix in-place.
func (m *Matrix4D) Transpose() {
a01 := m.elements[1]
a02 := m.elements[2]
a03 := m.elements[3]
a12 := m.elements[6]
a13 := m.elements[7]
a23 := m.elements[11]
m.elements[1] = m.elements[4]
m.elements[2] = m.elements[8]
m.elements[3] = m.elements[12]
m.elements[4] = a01
m.elements[6] = m.elements[9]
m.elements[7] = m.elements[13]
m.elements[8] = a02
m.elements[9] = a12
m.elements[11] = m.elements[14]
m.elements[12] = a03
m.elements[13] = a13
m.elements[14] = a23
}
// IsSingular returns true if the matrix determinant is exactly zero, false if not.
func (m *Matrix4D) IsSingular() bool {
return m.Determinant() == 0
}
// IsNearSingular returns true if the matrix determinant is equal or below the given tolerance, false if not.
func (m *Matrix4D) IsNearSingular(tol float64) (bool, error) {
if numeric.IsInvalidTolerance(tol) {
return false, numeric.ErrInvalidTol
}
return math.Abs(m.Determinant()) <= tol, nil
} | pkg/geometry/matrix4d.go | 0.810516 | 0.771069 | matrix4d.go | starcoder |
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"contact": {},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/tasques": {
"post": {
"description": "Creates a new Task",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"tasks"
],
"summary": "Add a new Task",
"operationId": "create-task",
"parameters": [
{
"description": "The request body",
"name": "newTask",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/task.NewTask"
}
}
],
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/task.Task"
}
},
"400": {
"description": "Invalid JSON",
"schema": {
"$ref": "#/definitions/common.Body"
}
}
}
}
},
"/tasques/claims": {
"post": {
"description": "Claims a number of existing Tasks.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"tasks"
],
"summary": "Claims a number of Tasks",
"operationId": "claim-tasks",
"parameters": [
{
"type": "string",
"description": "Worker ID",
"name": "X-TASQUES-WORKER-ID",
"in": "header",
"required": true
},
{
"description": "The request body",
"name": "claim",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/task.Claim"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/task.Task"
}
}
}
}
}
},
"/tasques/claims/{queue}/{id}": {
"delete": {
"description": "Unclaims a claimed Task.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"tasks"
],
"summary": "Unclaims a Task",
"operationId": "unclaim-existing-task",
"parameters": [
{
"type": "string",
"description": "The Queue of the Task",
"name": "queue",
"in": "path",
"required": true
},
{
"type": "string",
"description": "The id of the Task",
"name": "id",
"in": "path",
"required": true
},
{
"type": "string",
"description": "Worker ID",
"name": "X-TASQUES-WORKER-ID",
"in": "header",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/task.Task"
}
},
"400": {
"description": "The Task is not currently claimed",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"403": {
"description": "Worker currently has not claimed the Task",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"404": {
"description": "Task does not exist",
"schema": {
"$ref": "#/definitions/common.Body"
}
}
}
}
},
"/tasques/done/{queue}/{id}": {
"put": {
"description": "Marks a claimed Task as done.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"tasks"
],
"summary": "Mark Task as Done",
"operationId": "mark-claimed-task-done",
"parameters": [
{
"description": "The request body",
"name": "success",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/task.Success"
}
},
{
"type": "string",
"description": "The Queue of the Task",
"name": "queue",
"in": "path",
"required": true
},
{
"type": "string",
"description": "The id of the Task",
"name": "id",
"in": "path",
"required": true
},
{
"type": "string",
"description": "Worker ID",
"name": "X-TASQUES-WORKER-ID",
"in": "header",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/task.Task"
}
},
"400": {
"description": "The Task is not currently claimed",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"403": {
"description": "Worker currently has not claimed the Task",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"404": {
"description": "Task does not exist",
"schema": {
"$ref": "#/definitions/common.Body"
}
}
}
}
},
"/tasques/failed/{queue}/{id}": {
"put": {
"description": "Marks a claimed Task as failed.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"tasks"
],
"summary": "Mark Task as Failed",
"operationId": "mark-claimed-task-failed",
"parameters": [
{
"description": "The request body",
"name": "failure",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/task.Failure"
}
},
{
"type": "string",
"description": "The Queue of the Task",
"name": "queue",
"in": "path",
"required": true
},
{
"type": "string",
"description": "The id of the Task",
"name": "id",
"in": "path",
"required": true
},
{
"type": "string",
"description": "Worker ID",
"name": "X-TASQUES-WORKER-ID",
"in": "header",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/task.Task"
}
},
"400": {
"description": "The Task is not currently claimed",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"403": {
"description": "Worker currently has not claimed the Task",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"404": {
"description": "Task does not exist",
"schema": {
"$ref": "#/definitions/common.Body"
}
}
}
}
},
"/tasques/reports/{queue}/{id}": {
"put": {
"description": "Reports in on a claimed Task.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"tasks"
],
"summary": "Reports on a Task",
"operationId": "report-on-claimed-task",
"parameters": [
{
"description": "The request body",
"name": "newReport",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/task.NewReport"
}
},
{
"type": "string",
"description": "The Queue of the Task",
"name": "queue",
"in": "path",
"required": true
},
{
"type": "string",
"description": "The id of the Task",
"name": "id",
"in": "path",
"required": true
},
{
"type": "string",
"description": "Worker ID",
"name": "X-TASQUES-WORKER-ID",
"in": "header",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/task.Task"
}
},
"400": {
"description": "The Task is not currently claimed",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"403": {
"description": "Worker currently has not claimed the Task",
"schema": {
"$ref": "#/definitions/common.Body"
}
},
"404": {
"description": "Task does not exist",
"schema": {
"$ref": "#/definitions/common.Body"
}
}
}
}
},
"/tasques/{queue}/{id}": {
"get": {
"description": "Retrieves a persisted Task",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"tasks"
],
"summary": "Get a Task",
"operationId": "get-existing-task",
"parameters": [
{
"type": "string",
"description": "The Queue of the Task",
"name": "queue",
"in": "path",
"required": true
},
{
"type": "string",
"description": "The id of the Task",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/task.Task"
}
},
"404": {
"description": "Task does not exist",
"schema": {
"$ref": "#/definitions/common.Body"
}
}
}
}
}
},
"definitions": {
"common.Body": {
"type": "object",
"required": [
"message"
],
"properties": {
"message": {
"type": "string",
"example": "Something went wrong :("
}
}
},
"common.Metadata": {
"type": "object",
"properties": {
"created_at": {
"type": "string",
"format": "date-time"
},
"modified_at": {
"type": "string",
"format": "date-time"
},
"version": {
"type": "object",
"$ref": "#/definitions/common.Version"
}
}
},
"common.Version": {
"type": "object",
"properties": {
"primary_term": {
"type": "integer"
},
"seq_num": {
"type": "integer"
}
}
},
"task.Args": {
"$ref": "#/definitions/task.JsonObj"
},
"task.Claim": {
"type": "object",
"required": [
"queues"
],
"properties": {
"amount": {
"type": "integer",
"example": 1
},
"block_for": {
"type": "string",
"example": "1s"
},
"queues": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"run-later",
"resize-images"
]
}
}
},
"task.Context": {
"$ref": "#/definitions/task.JsonObj"
},
"task.Failure": {
"type": "object",
"properties": {
"data": {
"type": "object"
}
}
},
"task.JsonObj": {
"type": "object",
"additionalProperties": true
},
"task.LastClaimed": {
"type": "object",
"required": [
"claimed_at",
"times_out_at",
"worker_id"
],
"properties": {
"claimed_at": {
"type": "string",
"format": "date-time"
},
"last_report": {
"type": "object",
"$ref": "#/definitions/task.Report"
},
"result": {
"type": "object",
"$ref": "#/definitions/task.Result"
},
"times_out_at": {
"type": "string",
"format": "date-time"
},
"worker_id": {
"type": "string"
}
}
},
"task.NewReport": {
"type": "object",
"properties": {
"data": {
"type": "object",
"$ref": "#/definitions/task.ReportedData"
}
}
},
"task.NewTask": {
"type": "object",
"required": [
"kind",
"queue"
],
"properties": {
"args": {
"type": "object"
},
"context": {
"type": "object"
},
"kind": {
"type": "string",
"example": "sayHello"
},
"priority": {
"type": "integer"
},
"processing_timeout": {
"type": "string",
"example": "30m"
},
"queue": {
"type": "string",
"example": "run-later"
},
"retry_times": {
"type": "integer",
"example": 10
},
"run_at": {
"type": "string",
"format": "date-time"
}
}
},
"task.Report": {
"type": "object",
"required": [
"at"
],
"properties": {
"at": {
"type": "string",
"format": "date-time"
},
"data": {
"type": "object"
}
}
},
"task.ReportedData": {
"$ref": "#/definitions/task.JsonObj"
},
"task.Result": {
"type": "object",
"required": [
"at"
],
"properties": {
"at": {
"type": "string",
"format": "date-time"
},
"failure": {
"description": "Results. Only one of the following will be filled in at a given time",
"type": "object"
},
"success": {
"type": "object"
}
}
},
"task.Success": {
"type": "object",
"properties": {
"data": {
"type": "object"
}
}
},
"task.Task": {
"type": "object",
"required": [
"attempted",
"id",
"kind",
"last_enqueued_at",
"metadata",
"priority",
"processing_timeout",
"queue",
"retry_times",
"run_at",
"state"
],
"properties": {
"args": {
"type": "object"
},
"attempted": {
"type": "integer"
},
"context": {
"type": "object"
},
"id": {
"type": "string"
},
"kind": {
"type": "string",
"example": "sayHello"
},
"last_claimed": {
"type": "object",
"$ref": "#/definitions/task.LastClaimed"
},
"last_enqueued_at": {
"type": "string",
"format": "date-time"
},
"metadata": {
"type": "object",
"$ref": "#/definitions/common.Metadata"
},
"priority": {
"type": "integer"
},
"processing_timeout": {
"type": "string",
"example": "30m"
},
"queue": {
"type": "string",
"example": "run-later"
},
"retry_times": {
"type": "integer",
"example": 10
},
"run_at": {
"type": "string",
"format": "date-time"
},
"state": {
"type": "string",
"example": "queued"
}
}
}
},
"securityDefinitions": {
"BasicAuth": {
"type": "basic"
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "0.0.1",
Host: "localhost:8080",
BasePath: "/",
Schemes: []string{},
Title: "Tasques API",
Description: "A Task queue backed by Elasticsearch",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
} | docs/docs.go | 0.515376 | 0.405566 | docs.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
)
// Hand represents a five-card poker hand.
type Hand struct {
// The ranks are sorted in descending order.
// e.g. the hand [AC 8D 8H 3S 2S] has ranks [14 8 8 3 2]
// [KC 9D 9H 3C 2C] [13 9 9 3 2]
ranks []int
// A set of strings which the hand contains. Used to determine if
// the hand is a flush.
suits map[string]bool
// A slice of [count rank] pairs. This will be used to break ties between
// two hands of equal value (e.g. two hands that both have 'one-pair').
// Will be sorted highest count first, then highest rank first.
// e.g. the hands [AC 8D 8H 3S 2S] and [KC 9D 9H 3C 2C] have groups
// [[2 8], [1 14], [1 3], [1 2]]
// [[2 9], [1 13], [1 3], [1 2]]
// This allows us to correctly judge that the second hand beats the former
// by sorting Hands lexicographically based on the groups field.
groups [][]int
// A sorted slice of counts of the card ranks.
// e.g. the ranks [14 8 8 3 2] have pattern [2 1 1 1]
// [10 9 7 7 7] [3 1 1]
// Used to determine whether the hand has 'two-pair' or 'threeofakind' etc.
pattern []int
}
// newHand constructs a new Hand type from the given cards.
// cards is of the form e.g. [AC 8D 8H 3C 2S]
func newHand(cards []string) Hand {
trans := map[string]int{"A": 14, "K": 13, "Q": 12, "J": 11, "T": 10}
var ranks []int // The ranks field.
suits := make(map[string]bool) // The suits field.
rankToCount := make(map[int]int) // Used to create other fields.
for _, card := range cards {
s := string(card[1])
suits[s] = true
r, ok := trans[string(card[0])]
if !ok {
r, _ = strconv.Atoi(string(card[0]))
}
ranks = append(ranks, r)
rankToCount[r]++
}
sort.Sort(sort.Reverse(sort.IntSlice(ranks)))
// An ace should be played 'low' i.e. with rank 1 if it makes a straight.
if reflect.DeepEqual(ranks, []int{14, 5, 4, 3, 2}) {
ranks = []int{5, 4, 3, 2, 1}
}
var pattern []int // The pattern field.
countToRanks := make(map[int][]int) // Used to create the groups field.
for k, v := range rankToCount {
pattern = append(pattern, v)
countToRanks[v] = append(countToRanks[v], k)
}
sort.Sort(sort.Reverse(sort.IntSlice(pattern)))
var groups [][]int // The groups field.
seen := make(map[int]bool) // Used to ensure no duplicates.
for _, count := range pattern {
// For every count in the pattern append to groups a [count rank] pair.
// groups is ordered by highest count first, then highest rank first.
if !seen[count] {
rs := countToRanks[count]
sort.Sort(sort.Reverse(sort.IntSlice(rs)))
for _, r := range rs {
groups = append(groups, []int{count, r})
}
}
seen[count] = true
}
return Hand{ranks, suits, groups, pattern}
}
// Hand type predicates.
func (h Hand) onepair() bool { return isEqual(h.pattern, []int{2, 1, 1, 1}) }
func (h Hand) twopair() bool { return isEqual(h.pattern, []int{2, 2, 1}) }
func (h Hand) threeofakind() bool { return isEqual(h.pattern, []int{3, 1, 1}) }
func (h Hand) fourofakind() bool { return isEqual(h.pattern, []int{4, 1}) }
func (h Hand) fullhouse() bool { return isEqual(h.pattern, []int{3, 2}) }
func (h Hand) flush() bool { return len(h.suits) == 1 }
func (h Hand) straight() bool { return (len(h.pattern) == 5) && (h.ranks[0]-h.ranks[4] == 4) }
func (h Hand) straightflush() bool { return h.flush() && h.straight() }
// Check whether two []int are identical.
func isEqual(x, y []int) bool {
if len(x) != len(y) {
return false
}
for i := 0; i < len(x); i++ {
if x[i] != y[i] {
return false
}
}
return true
}
// Return a value for hand from an eight-point scale.
func (h Hand) evaluate() int {
switch {
case h.straightflush():
return 8
case h.fourofakind():
return 7
case h.fullhouse():
return 6
case h.flush():
return 5
case h.straight():
return 4
case h.threeofakind():
return 3
case h.twopair():
return 2
case h.onepair():
return 1
default:
return 0
}
}
// Did Player 1 win this round?
func player1wins(h1, h2 Hand) bool {
// First check whether hands have different value on eight-point scale.
if v1, v2 := h1.evaluate(), h2.evaluate(); v1 != v2 {
return v1 > v2
}
// If those values are equal, perform lexicographic comparison based on
// the groups field, a slice of [count rank] pairs ordered by highest
// count first, then highest rank first.
for i := range h1.groups {
// Since v1 == v2, the groups are the same length and the
// counts are identical. Therefore, compare ranks.
g1, g2 := h1.groups[i], h2.groups[i]
rank1, rank2 := g1[1], g2[1]
switch {
case rank1 > rank2:
return true
case rank1 < rank2:
return false
}
}
// The problem specifies that ties are not possible, so this
// should be unreachable. This code path corresponds to
// two hands being equal in all relevant respects (i.e. they
// only differ by suit).
return false
}
func problem54() int {
file, err := os.Open("data/poker.txt")
if err != nil {
log.Fatal(err)
}
defer file.Close()
var count int // Count of number of times Player 1 wins
scanner := bufio.NewScanner(file)
for scanner.Scan() {
row := scanner.Text()
cards := strings.Split(row, " ")
h1, h2 := newHand(cards[:5]), newHand(cards[5:])
if player1wins(h1, h2) {
count++
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
return count
}
func main() {
fmt.Println(problem54())
} | go/problems/problem54.go | 0.566258 | 0.442034 | problem54.go | starcoder |
package gographviz
//The analysed representation of the Graph parsed from the DOT format.
type Graph struct {
Attrs Attrs
Name string
Directed bool
Strict bool
Nodes *Nodes
Edges *Edges
SubGraphs *SubGraphs
Relations *Relations
}
//Creates a new empty graph, ready to be populated.
func NewGraph() *Graph {
return &Graph{
Attrs: make(Attrs),
Name: "",
Directed: false,
Strict: false,
Nodes: NewNodes(),
Edges: NewEdges(),
SubGraphs: NewSubGraphs(),
Relations: NewRelations(),
}
}
//If the graph is strict then multiple edges are not allowed between the same pairs of nodes,
//see dot man page.
func (this *Graph) SetStrict(strict bool) {
this.Strict = strict
}
//Sets whether the graph is directed (true) or undirected (false).
func (this *Graph) SetDir(dir bool) {
this.Directed = dir
}
//Sets the graph name.
func (this *Graph) SetName(name string) {
this.Name = name
}
//Adds an edge to the graph from node src to node dst.
//srcPort and dstPort are the port the node ports, leave as empty strings if it is not required.
//This does not imply the adding of missing nodes.
func (this *Graph) AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) {
this.Edges.Add(&Edge{src, srcPort, dst, dstPort, directed, attrs})
}
//Adds an edge to the graph from node src to node dst.
//This does not imply the adding of missing nodes.
//If directed is set to true then SetDir(true) must also be called or there will be a syntax error in the output.
func (this *Graph) AddEdge(src, dst string, directed bool, attrs map[string]string) {
this.AddPortEdge(src, "", dst, "", directed, attrs)
}
//Adds a node to a graph/subgraph.
//If not subgraph exists use the name of the main graph.
//This does not imply the adding of a missing subgraph.
func (this *Graph) AddNode(parentGraph string, name string, attrs map[string]string) {
this.Nodes.Add(&Node{name, attrs})
this.Relations.Add(parentGraph, name)
}
func (this *Graph) getAttrs(graphName string) Attrs {
if this.Name == graphName {
return this.Attrs
}
g, ok := this.SubGraphs.SubGraphs[graphName]
if !ok {
panic("graph or subgraph " + graphName + " does not exist")
}
return g.Attrs
}
//Adds an attribute to a graph/subgraph.
func (this *Graph) AddAttr(parentGraph string, field string, value string) {
this.getAttrs(parentGraph).Add(field, value)
}
//Adds a subgraph to a graph/subgraph.
func (this *Graph) AddSubGraph(parentGraph string, name string, attrs map[string]string) {
this.SubGraphs.Add(name)
for key, value := range attrs {
this.AddAttr(name, key, value)
}
}
func (this *Graph) IsNode(name string) bool {
_, ok := this.Nodes.Lookup[name]
return ok
}
func (this *Graph) IsSubGraph(name string) bool {
_, ok := this.SubGraphs.SubGraphs[name]
return ok
} | vendor/github.com/awalterschulze/gographviz/graph.go | 0.759225 | 0.542257 | graph.go | starcoder |
package seq
import (
"time"
"github.com/leesjensen/go-chart/util"
)
// Time is a utility singleton with helper functions for time seq generation.
var Time timeSequence
type timeSequence struct{}
// Days generates a seq of timestamps by day, from -days to today.
func (ts timeSequence) Days(days int) []time.Time {
var values []time.Time
for day := days; day >= 0; day-- {
values = append(values, time.Now().AddDate(0, 0, -day))
}
return values
}
func (ts timeSequence) MarketHours(from, to time.Time, marketOpen, marketClose time.Time, isHoliday util.HolidayProvider) []time.Time {
var times []time.Time
cursor := util.Date.On(marketOpen, from)
toClose := util.Date.On(marketClose, to)
for cursor.Before(toClose) || cursor.Equal(toClose) {
todayOpen := util.Date.On(marketOpen, cursor)
todayClose := util.Date.On(marketClose, cursor)
isValidTradingDay := !isHoliday(cursor) && util.Date.IsWeekDay(cursor.Weekday())
if (cursor.Equal(todayOpen) || cursor.After(todayOpen)) && (cursor.Equal(todayClose) || cursor.Before(todayClose)) && isValidTradingDay {
times = append(times, cursor)
}
if cursor.After(todayClose) {
cursor = util.Date.NextMarketOpen(cursor, marketOpen, isHoliday)
} else {
cursor = util.Date.NextHour(cursor)
}
}
return times
}
func (ts timeSequence) MarketHourQuarters(from, to time.Time, marketOpen, marketClose time.Time, isHoliday util.HolidayProvider) []time.Time {
var times []time.Time
cursor := util.Date.On(marketOpen, from)
toClose := util.Date.On(marketClose, to)
for cursor.Before(toClose) || cursor.Equal(toClose) {
isValidTradingDay := !isHoliday(cursor) && util.Date.IsWeekDay(cursor.Weekday())
if isValidTradingDay {
todayOpen := util.Date.On(marketOpen, cursor)
todayNoon := util.Date.NoonOn(cursor)
today2pm := util.Date.On(util.Date.Time(14, 0, 0, 0, cursor.Location()), cursor)
todayClose := util.Date.On(marketClose, cursor)
times = append(times, todayOpen, todayNoon, today2pm, todayClose)
}
cursor = util.Date.NextDay(cursor)
}
return times
}
func (ts timeSequence) MarketDayCloses(from, to time.Time, marketOpen, marketClose time.Time, isHoliday util.HolidayProvider) []time.Time {
var times []time.Time
cursor := util.Date.On(marketOpen, from)
toClose := util.Date.On(marketClose, to)
for cursor.Before(toClose) || cursor.Equal(toClose) {
isValidTradingDay := !isHoliday(cursor) && util.Date.IsWeekDay(cursor.Weekday())
if isValidTradingDay {
todayClose := util.Date.On(marketClose, cursor)
times = append(times, todayClose)
}
cursor = util.Date.NextDay(cursor)
}
return times
}
func (ts timeSequence) MarketDayAlternateCloses(from, to time.Time, marketOpen, marketClose time.Time, isHoliday util.HolidayProvider) []time.Time {
var times []time.Time
cursor := util.Date.On(marketOpen, from)
toClose := util.Date.On(marketClose, to)
for cursor.Before(toClose) || cursor.Equal(toClose) {
isValidTradingDay := !isHoliday(cursor) && util.Date.IsWeekDay(cursor.Weekday())
if isValidTradingDay {
todayClose := util.Date.On(marketClose, cursor)
times = append(times, todayClose)
}
cursor = cursor.AddDate(0, 0, 2)
}
return times
}
func (ts timeSequence) MarketDayMondayCloses(from, to time.Time, marketOpen, marketClose time.Time, isHoliday util.HolidayProvider) []time.Time {
var times []time.Time
cursor := util.Date.On(marketClose, from)
toClose := util.Date.On(marketClose, to)
for cursor.Equal(toClose) || cursor.Before(toClose) {
isValidTradingDay := !isHoliday(cursor) && util.Date.IsWeekDay(cursor.Weekday())
if isValidTradingDay {
times = append(times, cursor)
}
cursor = util.Date.NextDayOfWeek(cursor, time.Monday)
}
return times
}
func (ts timeSequence) Hours(start time.Time, totalHours int) []time.Time {
times := make([]time.Time, totalHours)
last := start
for i := 0; i < totalHours; i++ {
times[i] = last
last = last.Add(time.Hour)
}
return times
}
// HoursFilled adds zero values for the data bounded by the start and end of the xdata array.
func (ts timeSequence) HoursFilled(xdata []time.Time, ydata []float64) ([]time.Time, []float64) {
start := Time.Start(xdata)
end := Time.End(xdata)
totalHours := util.Math.AbsInt(util.Date.DiffHours(start, end))
finalTimes := ts.Hours(start, totalHours+1)
finalValues := make([]float64, totalHours+1)
var hoursFromStart int
for i, xd := range xdata {
hoursFromStart = util.Date.DiffHours(start, xd)
finalValues[hoursFromStart] = ydata[i]
}
return finalTimes, finalValues
}
// Start returns the earliest (min) time in a list of times.
func (ts timeSequence) Start(times []time.Time) time.Time {
if len(times) == 0 {
return time.Time{}
}
start := times[0]
for _, t := range times[1:] {
if t.Before(start) {
start = t
}
}
return start
}
// Start returns the earliest (min) time in a list of times.
func (ts timeSequence) End(times []time.Time) time.Time {
if len(times) == 0 {
return time.Time{}
}
end := times[0]
for _, t := range times[1:] {
if t.After(end) {
end = t
}
}
return end
} | seq/time.go | 0.68458 | 0.499878 | time.go | starcoder |
package dnaThreeBit
import (
"github.com/vertgenlab/gonomics/dna"
"log"
"strings"
)
// RuneToThreeBitBase returns a single bases in ThreeBitBase format that corresponds to the given rune
func RuneToThreeBitBase(r rune) ThreeBitBase {
switch r {
case 'A':
return A
case 'C':
return C
case 'G':
return G
case 'T':
return T
case 'N':
return N
case 'a':
return A
case 'c':
return C
case 'g':
return G
case 't':
return T
case 'n':
return N
default:
log.Fatalf("Error: unexpected character in dna %c\n", r)
return N
}
}
// ThreeBitBaseToRune returns a rune that corresponds to the single base in ThreeBitBase format
func ThreeBitBaseToRune(base ThreeBitBase) rune {
switch base {
case A:
return 'A'
case C:
return 'C'
case G:
return 'G'
case T:
return 'T'
case N:
return 'N'
default:
log.Fatalf("Error: unexpected value in dna Base when converting to rune\n")
return 'N'
}
}
// ThreeBitBaseToString returns a string that corresponds to the single base give as a ThreeBitBase
func ThreeBitBaseToString(b ThreeBitBase) string {
return string(ThreeBitBaseToRune(b))
}
// FromString creates a new ThreeBit from a string of DNA characters {A,C,G,T,N}
func FromString(s string) *ThreeBit {
answer := &ThreeBit{Seq: []uint64{}, Len: 0}
for _, runeValue := range s {
answer = Append(answer, RuneToThreeBitBase(runeValue))
}
return answer
}
// ToString returns a string representation of the ThreeBit passed in
func ToString(fragment *ThreeBit) string {
var buffer strings.Builder
buffer.Grow(fragment.Len)
for i := 0; i < fragment.Len; i++ {
buffer.WriteRune(dna.BaseToRune(GetBase(fragment, i)))
}
return buffer.String()
}
// RangeToDnaBases returns a slice of dna.Base that represents the bases from
// start to end (left-closed, right-open) of fragment
func RangeToDnaBases(fragment *ThreeBit, start int, end int) []dna.Base {
if end > fragment.Len || start >= end {
log.Fatalf("Error: unable to extract bases from %d to %d from a sequence of length %d\n", start, end, fragment.Len)
}
answer := make([]dna.Base, 0, end-start)
for i := start; i < end; i++ {
answer = append(answer, GetBase(fragment, i))
}
return answer
}
// ToDnaBases returns a slice of dna.Base that represents the same sequence
// of bases present in fragment
func ToDnaBases(fragment *ThreeBit) []dna.Base {
return RangeToDnaBases(fragment, 0, fragment.Len)
} | dnaThreeBit/convert.go | 0.821796 | 0.434761 | convert.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPTopLevelEnumDeclaration284AllOf struct for BTPTopLevelEnumDeclaration284AllOf
type BTPTopLevelEnumDeclaration284AllOf struct {
Annotations *[]BTPAnnotation231 `json:"annotations,omitempty"`
BtType *string `json:"btType,omitempty"`
SpaceInEmptyList *BTPSpace10 `json:"spaceInEmptyList,omitempty"`
TrailingComma *bool `json:"trailingComma,omitempty"`
Values *[]BTPIdentifier8 `json:"values,omitempty"`
}
// NewBTPTopLevelEnumDeclaration284AllOf instantiates a new BTPTopLevelEnumDeclaration284AllOf object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPTopLevelEnumDeclaration284AllOf() *BTPTopLevelEnumDeclaration284AllOf {
this := BTPTopLevelEnumDeclaration284AllOf{}
return &this
}
// NewBTPTopLevelEnumDeclaration284AllOfWithDefaults instantiates a new BTPTopLevelEnumDeclaration284AllOf object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPTopLevelEnumDeclaration284AllOfWithDefaults() *BTPTopLevelEnumDeclaration284AllOf {
this := BTPTopLevelEnumDeclaration284AllOf{}
return &this
}
// GetAnnotations returns the Annotations field value if set, zero value otherwise.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetAnnotations() []BTPAnnotation231 {
if o == nil || o.Annotations == nil {
var ret []BTPAnnotation231
return ret
}
return *o.Annotations
}
// GetAnnotationsOk returns a tuple with the Annotations field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetAnnotationsOk() (*[]BTPAnnotation231, bool) {
if o == nil || o.Annotations == nil {
return nil, false
}
return o.Annotations, true
}
// HasAnnotations returns a boolean if a field has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) HasAnnotations() bool {
if o != nil && o.Annotations != nil {
return true
}
return false
}
// SetAnnotations gets a reference to the given []BTPAnnotation231 and assigns it to the Annotations field.
func (o *BTPTopLevelEnumDeclaration284AllOf) SetAnnotations(v []BTPAnnotation231) {
o.Annotations = &v
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPTopLevelEnumDeclaration284AllOf) SetBtType(v string) {
o.BtType = &v
}
// GetSpaceInEmptyList returns the SpaceInEmptyList field value if set, zero value otherwise.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetSpaceInEmptyList() BTPSpace10 {
if o == nil || o.SpaceInEmptyList == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceInEmptyList
}
// GetSpaceInEmptyListOk returns a tuple with the SpaceInEmptyList field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetSpaceInEmptyListOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceInEmptyList == nil {
return nil, false
}
return o.SpaceInEmptyList, true
}
// HasSpaceInEmptyList returns a boolean if a field has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) HasSpaceInEmptyList() bool {
if o != nil && o.SpaceInEmptyList != nil {
return true
}
return false
}
// SetSpaceInEmptyList gets a reference to the given BTPSpace10 and assigns it to the SpaceInEmptyList field.
func (o *BTPTopLevelEnumDeclaration284AllOf) SetSpaceInEmptyList(v BTPSpace10) {
o.SpaceInEmptyList = &v
}
// GetTrailingComma returns the TrailingComma field value if set, zero value otherwise.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetTrailingComma() bool {
if o == nil || o.TrailingComma == nil {
var ret bool
return ret
}
return *o.TrailingComma
}
// GetTrailingCommaOk returns a tuple with the TrailingComma field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetTrailingCommaOk() (*bool, bool) {
if o == nil || o.TrailingComma == nil {
return nil, false
}
return o.TrailingComma, true
}
// HasTrailingComma returns a boolean if a field has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) HasTrailingComma() bool {
if o != nil && o.TrailingComma != nil {
return true
}
return false
}
// SetTrailingComma gets a reference to the given bool and assigns it to the TrailingComma field.
func (o *BTPTopLevelEnumDeclaration284AllOf) SetTrailingComma(v bool) {
o.TrailingComma = &v
}
// GetValues returns the Values field value if set, zero value otherwise.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetValues() []BTPIdentifier8 {
if o == nil || o.Values == nil {
var ret []BTPIdentifier8
return ret
}
return *o.Values
}
// GetValuesOk returns a tuple with the Values field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) GetValuesOk() (*[]BTPIdentifier8, bool) {
if o == nil || o.Values == nil {
return nil, false
}
return o.Values, true
}
// HasValues returns a boolean if a field has been set.
func (o *BTPTopLevelEnumDeclaration284AllOf) HasValues() bool {
if o != nil && o.Values != nil {
return true
}
return false
}
// SetValues gets a reference to the given []BTPIdentifier8 and assigns it to the Values field.
func (o *BTPTopLevelEnumDeclaration284AllOf) SetValues(v []BTPIdentifier8) {
o.Values = &v
}
func (o BTPTopLevelEnumDeclaration284AllOf) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Annotations != nil {
toSerialize["annotations"] = o.Annotations
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.SpaceInEmptyList != nil {
toSerialize["spaceInEmptyList"] = o.SpaceInEmptyList
}
if o.TrailingComma != nil {
toSerialize["trailingComma"] = o.TrailingComma
}
if o.Values != nil {
toSerialize["values"] = o.Values
}
return json.Marshal(toSerialize)
}
type NullableBTPTopLevelEnumDeclaration284AllOf struct {
value *BTPTopLevelEnumDeclaration284AllOf
isSet bool
}
func (v NullableBTPTopLevelEnumDeclaration284AllOf) Get() *BTPTopLevelEnumDeclaration284AllOf {
return v.value
}
func (v *NullableBTPTopLevelEnumDeclaration284AllOf) Set(val *BTPTopLevelEnumDeclaration284AllOf) {
v.value = val
v.isSet = true
}
func (v NullableBTPTopLevelEnumDeclaration284AllOf) IsSet() bool {
return v.isSet
}
func (v *NullableBTPTopLevelEnumDeclaration284AllOf) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPTopLevelEnumDeclaration284AllOf(val *BTPTopLevelEnumDeclaration284AllOf) *NullableBTPTopLevelEnumDeclaration284AllOf {
return &NullableBTPTopLevelEnumDeclaration284AllOf{value: val, isSet: true}
}
func (v NullableBTPTopLevelEnumDeclaration284AllOf) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPTopLevelEnumDeclaration284AllOf) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_top_level_enum_declaration_284_all_of.go | 0.748995 | 0.407805 | model_btp_top_level_enum_declaration_284_all_of.go | starcoder |
package graph
import (
"fmt"
"github.com/graphql-go/graphql"
)
type Term struct {
id string
name string
value string
terms []string
}
func CreateTermSchema() (*graphql.Schema, error) {
terms := map[string]Term{
"entropy": Term{
id: "entropy",
name: "Entropy",
value: "Entropy represents the possible {{term:kinetic_state}} of a {{term:system_of_particles}}. A larger Entropy means there are more possible states the {{term: system_of_particles}} could be in. Entropy is measured from an arbitrary origin value; thus entropies can only be compared if they have the same origin value.",
terms: []string{"kinetic_state", "system_of_particles"},
},
"kinetic_state": Term{
id: "kinetic_state",
name: "Kinetic State",
value: "The Kinetic State, or often just state, of a {{term:particle}} is the current configuration of its position and velocity in space. In 3 dimensional space, there are thus 6 fields in a {{term:particle;display:particles}} state (x, y, z, Vx, Vy, Vz). A particle can be in one of many different states, collectively called its possible kinetic states. The kinetic state of a {{term:system_of_particles}} is the configuration of the kinetic states of each of its {{term:particle;display:particles}}. If {{term:particle;display:particles}} have M kinetic states, and there are N {{term:particle;display:particles}}, then there are almost M^N possible kinetic states (they cannot share the same x,y,z position).",
terms: []string{"particle", "system_of_particles"},
},
"particle": Term{
id: "particle",
name: "Particle",
value: "A particle is a physical entity that occupies space and has a {{term:kinetic_state}}. According to the dominant interpretation of quantum mechanics, it actually has just a probability of being in various states, and only takes on the single state when it is measured.",
terms: []string{"kinetic_state"},
},
"system_of_particles": Term{
id: "system_of_particles",
name: "System of Particles",
value: "A System of Particles is a collection of {{term:particle;display:particles}}. The {{term:particle;display:particles}} have independent {{term:kinetic_state;display:kinetic states}}, but cannot occupy the exact same position in space. The more {{term:kinetic_state;display:kinetic states}} a system of particles can take on, the larger its {{term:entropy}}.",
terms: []string{"particle", "kinetic_state", "entropy"},
},
}
term := graphql.NewObject(graphql.ObjectConfig{
Name: "Term",
Description: "A term in the graph",
Fields: graphql.Fields{
"id": &graphql.Field{
Type: graphql.ID,
Description: "The id of the term",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if term, ok := p.Source.(Term); ok {
return term.id, nil
}
return nil, nil
},
},
"name": &graphql.Field{
Type: graphql.String,
Description: "The human recognizable name of this term",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if term, ok := p.Source.(Term); ok {
return term.name, nil
}
return nil, nil
},
},
"value": &graphql.Field{
Type: graphql.String,
Description: "The interpolable value of this term, which contains templates for nested terms",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if term, ok := p.Source.(Term); ok {
return term.value, nil
}
return nil, nil
},
},
},
})
term.AddFieldConfig("terms", &graphql.Field{
Type: graphql.NewList(term),
Description: "The terms linked to this term",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if term, ok := p.Source.(Term); ok {
resultTerms := make([]Term, 0, len(term.terms))
for _, id := range term.terms {
resultTerms = append(resultTerms, terms[id])
}
return resultTerms, nil
}
return []interface{}{}, nil
},
})
query := graphql.NewObject(graphql.ObjectConfig{
Name: "TermQuery",
Fields: graphql.Fields{
"term": &graphql.Field{
Type: term,
Args: graphql.FieldConfigArgument{
"id": &graphql.ArgumentConfig{
Description: "The id of the term you are after",
Type: graphql.String,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
id, _ := p.Args["id"].(string)
if id == "" {
return nil, fmt.Errorf("id is required")
}
term, _ := terms[id]
return term, nil
},
},
},
})
schemaConfig := graphql.SchemaConfig{Query: query}
schema, err := graphql.NewSchema(schemaConfig)
if err != nil {
return nil, fmt.Errorf("Failed to create graph schema: %v", err)
}
return &schema, nil
} | packages/go/graph/term.go | 0.605099 | 0.7017 | term.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.