code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package lmq
import (
"github.com/fiwippi/go-quantise/internal/quantisers"
"image"
"image/color"
)
const (
xMax = 255
xMin = 0
)
// Returns "m" greyscale colours to best recreate the colour palette of the original image
func QuantiseGreyscale(img image.Image, m int) color.Palette {
// Create the histogram
histogram := quantisers.CreateGreyscaleHistogram(img)
// Calculate the initial threshold values
T := make([]uint8, m+1)
for i := 0; i <= m; i++ {
T[i] = uint8(xMin + (i*(xMax-xMin))/m)
}
// Initialising the segment map
segments := make(map[int]quantisers.LinearHistogram)
for i := 1; i <= m; i++ {
segments[i] = quantisers.LinearHistogram{}
}
// Initialising the averages for each segment
averages := make(map[int]int)
// Initialising the slice for the old threshold history
oldT := make([]uint8, len(T))
// Calculating the thresholds
for {
copy(oldT, T)
// Segments the pixels of the image into thresholds based on histogram
for k, v := range histogram {
// Checks for k=0 since it cannot be checked in the loop
if k == 0 {
segments[1][0] = v
}
// Checking in general
for i := 1; i <= m; i++ {
if T[i-1] < k && k <= T[i] {
segments[i][k] = v
}
}
}
// Calculating the segment averages
for i := 1; i <= m; i++ {
averages[i] = mean(segments[i])
}
// Recalculating the thresholds
for i := 1; i <= m-1; i++ {
T[i] = uint8((averages[i] + averages[i+1]) / 2)
}
// If the old threshold is equal to the new threshold we are done
if equal(oldT, T) {
break
}
}
// Convert the calculated averages to colour values
colours := make([]color.Color, m)
for i := range colours {
colours[i] = color.Gray{uint8(averages[i+1])}
}
return colours
}
// Calculates the mean greyscale value in the histogram
func mean(h quantisers.LinearHistogram) int {
sum, total := 0, 0
for k, v := range h {
sum += int(k) * v
total += v
}
if total == 0 {
return 0
}
return sum / total
}
// Checks if two uint8 slices are equal
func equal(a, b []uint8) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
} | pkg/quantisers/lmq/lmq.go | 0.745861 | 0.41834 | lmq.go | starcoder |
package parser
import (
"fmt"
)
// TypeNode is an interface for different ways of creating new types or referring to existing ones
type TypeNode interface {
Node() // must also implement the Node interface
Type() string
String() string
Variadic() bool
SetName(string)
GetName() string
}
// SingleTypeNode refers to an existing type. Such as "string".
type SingleTypeNode struct {
baseNode
PackageName string
SourceName string
TypeName string
IsVariadic bool
}
func (stn SingleTypeNode) Type() string {
return stn.TypeName
}
func (stn SingleTypeNode) String() string {
return fmt.Sprintf("type(%s.%s)", stn.PackageName, stn.Type())
}
func (stn SingleTypeNode) Variadic() bool {
return stn.IsVariadic
}
func (stn SingleTypeNode) SetName(name string) {
stn.SourceName = name
}
func (stn SingleTypeNode) GetName() string {
return stn.SourceName
}
// StructTypeNode refers to a struct type
type StructTypeNode struct {
baseNode
SourceName string
Types []TypeNode
Names map[string]int
IsVariadic bool
}
func (stn StructTypeNode) Type() string {
return fmt.Sprintf("%+v", stn.Types)
}
func (stn StructTypeNode) String() string {
return fmt.Sprintf("StructTypeNode(%+v)", stn.Types)
}
func (stn StructTypeNode) Variadic() bool {
return stn.IsVariadic
}
func (stn StructTypeNode) SetName(name string) {
stn.SourceName = name
}
func (stn StructTypeNode) GetName() string {
return stn.SourceName
}
// ArrayTypeNode refers to an array
type ArrayTypeNode struct {
baseNode
SourceName string
ItemType TypeNode
Len int64
IsVariadic bool
}
func (atn ArrayTypeNode) Type() string {
return fmt.Sprintf("[%d]%+v", atn.Len, atn.ItemType)
}
func (atn ArrayTypeNode) String() string {
return atn.Type()
}
func (atn ArrayTypeNode) Variadic() bool {
return atn.IsVariadic
}
func (atn ArrayTypeNode) SetName(name string) {
atn.SourceName = name
}
func (atn ArrayTypeNode) GetName() string {
return atn.SourceName
}
type SliceTypeNode struct {
baseNode
SourceName string
ItemType TypeNode
IsVariadic bool
}
func (stn SliceTypeNode) Type() string {
return fmt.Sprintf("[]%+v", stn.ItemType)
}
func (stn SliceTypeNode) String() string {
return stn.Type()
}
func (stn SliceTypeNode) Variadic() bool {
return stn.IsVariadic
}
func (stn SliceTypeNode) SetName(name string) {
stn.SourceName = name
}
func (stn SliceTypeNode) GetName() string {
return stn.SourceName
}
type InterfaceTypeNode struct {
baseNode
SourceName string
Methods map[string]InterfaceMethod
IsVariadic bool
}
func (itn InterfaceTypeNode) Type() string {
return fmt.Sprintf("interface{%+v}", itn.Methods)
}
func (itn InterfaceTypeNode) String() string {
return itn.Type()
}
func (itn InterfaceTypeNode) Variadic() bool {
return itn.IsVariadic
}
func (itn InterfaceTypeNode) SetName(name string) {
itn.SourceName = name
}
func (itn InterfaceTypeNode) GetName() string {
return itn.SourceName
}
type InterfaceMethod struct {
ArgumentTypes []TypeNode
ReturnTypes []TypeNode
}
type PointerTypeNode struct {
baseNode
SourceName string
IsVariadic bool
ValueType TypeNode
}
func (ptn PointerTypeNode) Type() string {
return fmt.Sprintf("pointer(%+v)", ptn.ValueType.Type())
}
func (ptn PointerTypeNode) String() string {
return ptn.Type()
}
func (ptn PointerTypeNode) SetName(name string) {
ptn.SourceName = name
}
func (ptn PointerTypeNode) GetName() string {
return ptn.SourceName
}
func (ptn PointerTypeNode) Variadic() bool {
return ptn.IsVariadic
}
type FuncTypeNode struct {
baseNode
ArgTypes []TypeNode
RetTypes []TypeNode
SourceName string
IsVariadic bool
}
func (ftn FuncTypeNode) Type() string {
return fmt.Sprintf("func(%+v)(%+v)", ftn.ArgTypes, ftn.RetTypes)
}
func (ftn FuncTypeNode) String() string {
return ftn.Type()
}
func (ftn FuncTypeNode) SetName(name string) {
ftn.SourceName = name
}
func (ftn FuncTypeNode) GetName() string {
return ftn.SourceName
}
func (ftn FuncTypeNode) Variadic() bool {
return ftn.IsVariadic
} | compiler/parser/node_types.go | 0.679179 | 0.441553 | node_types.go | starcoder |
package typ
import (
"fmt"
"github.com/mb0/xelf/bfr"
)
// Kind is a bit-set describing a type. It represents all type information except reference names
// and type parameters. It is a handy implementation detail, but not part of the xelf specification.
type Kind uint64
func (Kind) Bits() map[string]int64 { return kindConsts }
// A Kind describes a type in a slot that uses the 12 least significant bits. The rest of the bits
// are reserved to be used by specific types. Type variables use it to store a unique type id and
// other types might use it in the future to optimization access the most important type parameter
// details without chasing pointers.
const (
SlotSize = 12
SlotMask = 0xfff
)
// Each bit in a slot has a certain meaning. The first six bits specify a base type, next two bits
// flag the type a context or optional variant.
const (
KindNum Kind = 1 << iota // 0x001
KindChar // 0x002
KindIdxr // 0x004
KindKeyr // 0x008
KindExpr // 0x010
KindMeta // 0x020
KindCtx // 0x040
KindOpt // 0x080
KindBit1 // 0x100
KindBit2 // 0x200
KindBit3 // 0x400
KindBit4 // 0x800
)
const (
MaskUber = KindExpr | KindMeta // 0000 0011 0000
MaskBits = 0xf00 // 1111 0000 0000
MaskBase = KindAny | MaskUber // 0000 0011 1111
MaskElem = MaskBase | MaskBits // 1111 0011 1111
MaskRef = MaskElem | KindCtx // 1111 0111 1111
)
const (
KindVoid = 0x00
KindPrim = KindNum | KindChar // 0000 0000 0011
KindCont = KindIdxr | KindKeyr // 0000 0000 1100
KindAny = KindPrim | KindCont // 0000 0000 1111
KindBool = KindNum | KindBit1 // 0x101
KindInt = KindNum | KindBit2 // 0x201
KindReal = KindNum | KindBit3 // 0x401
KindSpan = KindNum | KindBit4 // 0x801
KindStr = KindChar | KindBit1 // 0x102
KindRaw = KindChar | KindBit2 // 0x202
KindUUID = KindChar | KindBit3 // 0x402
KindTime = KindChar | KindBit4 // 0x802
KindList = KindIdxr | KindBit1 // 0x104
KindDict = KindKeyr | KindBit2 // 0x208
KindRec = KindCont | KindBit3 // 0x30c
KindBits = KindCtx | KindInt // 0x241
KindEnum = KindCtx | KindStr // 0x142
KindObj = KindCtx | KindRec // 0x34c
KindTyp = KindExpr | KindBit1 // 0x110
KindFunc = KindExpr | KindBit2 // 0x210
KindDyn = KindExpr | KindBit3 // 0x410
KindTag = KindExpr | KindBit4 // 0x810
KindForm = KindCtx | KindFunc // 0x250
KindCall = KindCtx | KindDyn // 0x450
KindSym = KindCtx | KindTag // 0x850
KindVar = KindMeta | KindBit1 // 0x120
KindRef = KindMeta | KindBit2 // 0x220
KindSch = KindMeta | KindBit3 // 0x420
KindAlt = KindMeta | KindBit4 // 0x820
)
func ParseKind(str string) (Kind, error) {
if len(str) == 0 {
return KindVoid, ErrInvalid
}
// we allow the schema prefix for all types
// outside an explicit type context non-prominent types must use the prefix
pref := str[0] == '~'
if pref {
str = str[1:]
}
switch str {
case "void":
return KindVoid, nil
case "any":
return KindAny, nil
case "typ":
return KindTyp, nil
case "idxr":
return KindIdxr, nil
case "keyr":
return KindKeyr, nil
case "cont":
return KindCont, nil
case "expr":
return KindExpr, nil
case "list":
return KindList, nil
case "dict":
return KindDict, nil
case "sym":
return KindSym, nil
case "dyn":
return KindDyn, nil
case "call":
return KindCall, nil
case "form":
return KindForm, nil
case "func":
return KindFunc, nil
case "named":
return KindTag, nil
case "alt":
return KindAlt, nil
}
var kk Kind
if str[len(str)-1] == '?' {
str = str[:len(str)-1]
kk = KindOpt
}
if len(str) > 5 {
return KindVoid, ErrInvalid
}
switch str {
case "num":
return kk | KindNum, nil
case "char":
return kk | KindChar, nil
case "prim":
return kk | KindPrim, nil
case "bool":
return kk | KindBool, nil
case "int":
return kk | KindInt, nil
case "real":
return kk | KindReal, nil
case "str":
return kk | KindStr, nil
case "raw":
return kk | KindRaw, nil
case "uuid":
return kk | KindUUID, nil
case "time":
return kk | KindTime, nil
case "span":
return kk | KindSpan, nil
case "rec":
return kk | KindRec, nil
case "bits":
return kk | KindBits, nil
case "enum":
return kk | KindEnum, nil
case "obj":
return kk | KindObj, nil
}
return KindVoid, ErrInvalid
}
func (k Kind) WriteBfr(b *bfr.Ctx) (err error) {
str := simpleStr(k)
if str != "" {
err = b.Fmt(str)
if k != KindAny && k&KindOpt != 0 {
err = b.WriteByte('?')
}
return err
}
return nil
}
func (k Kind) String() string {
str := simpleStr(k)
if str != "" {
if k != KindAny && k&KindOpt != 0 {
return str + "?"
}
return str
}
return "invalid"
}
func (k Kind) MarshalText() ([]byte, error) {
return []byte(k.String()), nil
}
func (k *Kind) UnmarshalText(txt []byte) error {
kk, err := ParseKind(string(txt))
*k = kk
return err
}
func simpleStr(k Kind) string {
switch k & SlotMask {
case KindVoid:
return "void"
case KindAny:
return "any"
case KindTyp:
return "typ"
case KindForm:
return "form"
case KindFunc:
return "func"
case KindDyn:
return "dyn"
case KindCall:
return "call"
case KindTag:
return "named"
case KindSym:
return "sym"
case KindVar:
id := k >> SlotSize
if id == 0 {
return "@"
}
return fmt.Sprintf("@%d", k>>SlotSize)
case KindAlt:
return "alt"
case KindIdxr:
return "idxr"
case KindKeyr:
return "keyr"
case KindCont:
return "cont"
case KindExpr:
return "expr"
case KindMeta:
return "meta"
case KindList:
return "list"
case KindDict:
return "dict"
}
switch k & MaskRef {
case KindRef:
return "@"
case KindSch:
return "~"
case KindNum:
return "num"
case KindChar:
return "char"
case KindBool:
return "bool"
case KindInt:
return "int"
case KindReal:
return "real"
case KindStr:
return "str"
case KindRaw:
return "raw"
case KindUUID:
return "uuid"
case KindTime:
return "time"
case KindSpan:
return "span"
case KindRec:
return "rec"
case KindBits:
return "bits"
case KindEnum:
return "enum"
case KindObj:
return "obj"
}
return ""
}
var kindConsts = map[string]int64{
"Num": int64(KindNum),
"Char": int64(KindChar),
"Idxr": int64(KindIdxr),
"Keyr": int64(KindKeyr),
"Expr": int64(KindExpr),
"Meta": int64(KindMeta),
"Ctx": int64(KindCtx),
"Opt": int64(KindOpt),
"Bit1": int64(KindBit1),
"Bit2": int64(KindBit2),
"Bit3": int64(KindBit3),
"Bit4": int64(KindBit4),
"Void": int64(KindVoid),
"Prim": int64(KindPrim),
"Cont": int64(KindCont),
"Any": int64(KindAny),
"Bool": int64(KindBool),
"Int": int64(KindInt),
"Real": int64(KindReal),
"Span": int64(KindSpan),
"Str": int64(KindStr),
"Raw": int64(KindRaw),
"UUID": int64(KindUUID),
"Time": int64(KindTime),
"List": int64(KindList),
"Dict": int64(KindDict),
"Rec": int64(KindRec),
"Bits": int64(KindBits),
"Enum": int64(KindEnum),
"Obj": int64(KindObj),
"Typ": int64(KindTyp),
"Func": int64(KindFunc),
"Form": int64(KindForm),
"Dyn": int64(KindDyn),
"Call": int64(KindDyn),
"Named": int64(KindTag),
"Sym": int64(KindSym),
"Var": int64(KindVar),
"Ref": int64(KindRef),
"Alt": int64(KindAlt),
} | typ/kind.go | 0.581184 | 0.51879 | kind.go | starcoder |
package action
import (
"sync/atomic"
"time"
"github.com/aamcrae/gpio"
)
const stepperQueueSize = 20 // Size of queue for requests
type msg struct {
speed float64 // RPM
steps int
sync chan bool
}
// Stepper represents a stepper motor.
// All actual stepping is done in a background goroutine, so requests can be queued.
// All step values assume half-steps.
// The current step number is maintained as an absolute number, referenced from
// 0 when the stepper is first initialised. This can be a negative or positive number,
// depending on the movement.
type Stepper struct {
pin1, pin2, pin3, pin4 io.Setter // Pins for controlling outputs
factor float64 // Number of steps per revolution.
mChan chan msg // channel for message requests
stopChan chan bool // channel for signalling resets.
index int // Index to step sequence
on bool // true if motor drivers on
current int64 // Current step number as an absolute number
}
// Half step sequence of outputs.
var sequence = [][]int{
[]int{1, 0, 0, 0},
[]int{1, 1, 0, 0},
[]int{0, 1, 0, 0},
[]int{0, 1, 1, 0},
[]int{0, 0, 1, 0},
[]int{0, 0, 1, 1},
[]int{0, 0, 0, 1},
[]int{1, 0, 0, 1},
}
// NewStepper creates and initialises a Stepper struct, representing
// a stepper motor controlled by 4 GPIO pins.
// rev is the number of steps per revolution as a reference value for
// determining the delays between steps.
func NewStepper(rev int, pin1, pin2, pin3, pin4 io.Setter) *Stepper {
s := new(Stepper)
// Precalculate a timing factor so that a RPM value can be used
// to calculate the per-sequence step delay.
s.factor = float64(time.Second.Nanoseconds()*60) / float64(rev)
s.pin1 = pin1
s.pin2 = pin2
s.pin3 = pin3
s.pin4 = pin4
s.mChan = make(chan msg, stepperQueueSize)
s.stopChan = make(chan bool)
go s.handler()
return s
}
// Close stops the motor and frees any resources.
func (s *Stepper) Close() {
s.Stop()
s.Off()
close(s.mChan)
close(s.stopChan)
}
// State returns the current sequence index, so that the current state
// of the motor can be saved and then restored in a new instance.
// This allows the exact state of the motor to be restored
// across process restarts so that the maximum accuracy can be guaranteed.
func (s *Stepper) State() int {
return s.index
}
// GetStep returns the current step number, which is an accumulative
// signed value representing the steps moved, with 0 as the starting location.
func (s *Stepper) GetStep() int64 {
return atomic.LoadInt64(&s.current)
}
// Restore initialises the sequence index to this value and sets the outputs
func (s *Stepper) Restore(i int) {
s.index = i & 7
}
// Off turns off the GPIOs to remove the power from the motor.
func (s *Stepper) Off() {
if s.on {
s.Wait()
s.pin1.Set(0)
s.pin2.Set(0)
s.pin3.Set(0)
s.pin4.Set(0)
s.on = false
}
}
// Stop aborts any current stepping, and flushes all queued requests.
func (s *Stepper) Stop() {
s.stopChan <- true
s.Wait()
}
// Step queues a request to step the motor at the RPM selected for the
// number of half-steps.
// If halfSteps is positive, then the motor is run clockwise, otherwise ccw.
// A number of requests can be queued.
func (s *Stepper) Step(rpm float64, halfSteps int) {
if halfSteps != 0 && rpm > 0.0 {
if !s.on {
s.output()
s.on = true
}
s.mChan <- msg{speed: rpm, steps: halfSteps}
}
}
// Wait waits for all requests to complete
func (s *Stepper) Wait() {
c := make(chan bool)
s.mChan <- msg{speed: 0, steps: 0, sync: c}
<-c
}
// goroutine handler
// Listens on message channel, and runs the motor.
func (s *Stepper) handler() {
for {
select {
case m := <-s.mChan:
// Request to step the motor
if m.steps != 0 {
if s.step(m.speed, m.steps) {
return
}
}
if m.sync != nil {
// If sync channel is present, signal it.
m.sync <- true
close(m.sync)
}
case stop := <-s.stopChan:
// Request to stop and flush all requests
s.flush()
if !stop {
return
}
}
}
}
// step controls the motor via the GPIOs, to move the motor the
// requested number of steps. A negative value moves the motor
// counter-clockwise, positive moves the motor clockwise.
// Once started, a stop channel is used to abort the sequence.
func (s *Stepper) step(rpm float64, steps int) bool {
inc := 1
if steps < 0 {
// Counter-clockwise
inc = -1
steps = -steps
}
// Calculate the per-step delay in nanoseconds by using the timing factor
// and requested RPM, and use a ticker to signal the step sequence.
delay := time.Duration(s.factor / rpm)
ticker := time.NewTicker(delay)
defer ticker.Stop()
for i := 0; i < steps; i++ {
s.index = (s.index + inc) & 7
s.output()
atomic.AddInt64(&s.current, int64(inc))
select {
case stop := <-s.stopChan:
s.flush()
if stop {
// Abort current stepping loop
return false
} else {
// channel is closed, so kill handler.
return true
}
case <-ticker.C:
}
}
return false
}
// Flush all remaining actions from message channel.
func (s *Stepper) flush() {
for {
select {
case m := <-s.mChan:
if m.sync != nil {
m.sync <- true
close(m.sync)
} else if m.steps == 0 && m.speed == 0.0 {
// nil msg, channel has been closed.
return
}
default:
return
}
}
}
// Set the GPIO outputs according to the current sequence index.
func (s *Stepper) output() {
seq := sequence[s.index]
s.pin1.Set(seq[0])
s.pin2.Set(seq[1])
s.pin3.Set(seq[2])
s.pin4.Set(seq[3])
} | action/stepper.go | 0.67822 | 0.423875 | stepper.go | starcoder |
package iso20022
// Order to invest the investor's principal in an investment fund.
type SubscriptionOrder4 struct {
// Unique and unambiguous identifier for an order, as assigned by the instructing party.
OrderReference *Max35Text `xml:"OrdrRef"`
// Specifies the category of the investment fund order.
OrderType []*FundOrderType1 `xml:"OrdrTp,omitempty"`
// Investment fund class related to an order.
FinancialInstrumentDetails *FinancialInstrument6 `xml:"FinInstrmDtls"`
// Quantity of investment fund units to be subscribed.
UnitsNumber *FinancialInstrumentQuantity1 `xml:"UnitsNb"`
// Amount of money used to determine the quantity of investment fund units to be subscribed.
NetAmount *ActiveOrHistoricCurrencyAndAmount `xml:"NetAmt"`
// Indicates the rounding direction applied to nearest unit.
Rounding *RoundingDirection2Code `xml:"Rndg,omitempty"`
// Amount of money used to determine the quantity of investment fund units to be subscribed, including all charges, commissions, and tax.
GrossAmount *ActiveOrHistoricCurrencyAndAmount `xml:"GrssAmt,omitempty"`
// Information needed to process a currency exchange or conversion.
ForeignExchangeDetails *ForeignExchangeTerms5 `xml:"FXDtls,omitempty"`
// Dividend option chosen by the account owner based on the options offered in the prospectus.
IncomePreference *IncomePreference1Code `xml:"IncmPref,omitempty"`
// Reference of a letter of intent program, in which sales commissions are reduced based on the aggregate of a customer's actual purchase and anticipated purchases, over a specific period of time, and as agreed by the customer. A letter of intent program is mainly used in the US market.
LetterIntentReference *Max35Text `xml:"LttrInttRef,omitempty"`
// Reference of an accumulation right program, in which sales commissions are based on a customer's present purchases of shares and the aggregate quantity previously purchased by the customer. An accumulation rights program is mainly used in the US market.
AccumulationRightReference *Max35Text `xml:"AcmltnRghtRef,omitempty"`
// Charge for the placement of an order.
ChargeDetails []*Charge8 `xml:"ChrgDtls,omitempty"`
// Commission linked to the execution of an investment fund order.
CommissionDetails []*Commission6 `xml:"ComssnDtls,omitempty"`
// Tax applicable to an investment fund order.
TaxDetails []*Tax6 `xml:"TaxDtls,omitempty"`
// Parameters used to execute the settlement of an investment fund order.
SettlementAndCustodyDetails *FundSettlementParameters4 `xml:"SttlmAndCtdyDtls,omitempty"`
// Indicates whether the financial instrument is to be physically delivered.
PhysicalDeliveryIndicator *YesNoIndicator `xml:"PhysDlvryInd"`
// Information related to physical delivery of the securities.
PhysicalDeliveryDetails *NameAndAddress4 `xml:"PhysDlvryDtls,omitempty"`
// Currency requested for settlement of cash proceeds.
RequestedSettlementCurrency *CurrencyCode `xml:"ReqdSttlmCcy,omitempty"`
// Currency to be used for pricing the fund. This currency must be among the set of currencies in which the price may be expressed, as stated in the prospectus.
RequestedNAVCurrency *CurrencyCode `xml:"ReqdNAVCcy,omitempty"`
// Payment transaction resulting from the investment fund order execution.
CashSettlementDetails *PaymentTransaction19 `xml:"CshSttlmDtls,omitempty"`
}
func (s *SubscriptionOrder4) SetOrderReference(value string) {
s.OrderReference = (*Max35Text)(&value)
}
func (s *SubscriptionOrder4) AddOrderType() *FundOrderType1 {
newValue := new(FundOrderType1)
s.OrderType = append(s.OrderType, newValue)
return newValue
}
func (s *SubscriptionOrder4) AddFinancialInstrumentDetails() *FinancialInstrument6 {
s.FinancialInstrumentDetails = new(FinancialInstrument6)
return s.FinancialInstrumentDetails
}
func (s *SubscriptionOrder4) AddUnitsNumber() *FinancialInstrumentQuantity1 {
s.UnitsNumber = new(FinancialInstrumentQuantity1)
return s.UnitsNumber
}
func (s *SubscriptionOrder4) SetNetAmount(value, currency string) {
s.NetAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (s *SubscriptionOrder4) SetRounding(value string) {
s.Rounding = (*RoundingDirection2Code)(&value)
}
func (s *SubscriptionOrder4) SetGrossAmount(value, currency string) {
s.GrossAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (s *SubscriptionOrder4) AddForeignExchangeDetails() *ForeignExchangeTerms5 {
s.ForeignExchangeDetails = new(ForeignExchangeTerms5)
return s.ForeignExchangeDetails
}
func (s *SubscriptionOrder4) SetIncomePreference(value string) {
s.IncomePreference = (*IncomePreference1Code)(&value)
}
func (s *SubscriptionOrder4) SetLetterIntentReference(value string) {
s.LetterIntentReference = (*Max35Text)(&value)
}
func (s *SubscriptionOrder4) SetAccumulationRightReference(value string) {
s.AccumulationRightReference = (*Max35Text)(&value)
}
func (s *SubscriptionOrder4) AddChargeDetails() *Charge8 {
newValue := new(Charge8)
s.ChargeDetails = append(s.ChargeDetails, newValue)
return newValue
}
func (s *SubscriptionOrder4) AddCommissionDetails() *Commission6 {
newValue := new(Commission6)
s.CommissionDetails = append(s.CommissionDetails, newValue)
return newValue
}
func (s *SubscriptionOrder4) AddTaxDetails() *Tax6 {
newValue := new(Tax6)
s.TaxDetails = append(s.TaxDetails, newValue)
return newValue
}
func (s *SubscriptionOrder4) AddSettlementAndCustodyDetails() *FundSettlementParameters4 {
s.SettlementAndCustodyDetails = new(FundSettlementParameters4)
return s.SettlementAndCustodyDetails
}
func (s *SubscriptionOrder4) SetPhysicalDeliveryIndicator(value string) {
s.PhysicalDeliveryIndicator = (*YesNoIndicator)(&value)
}
func (s *SubscriptionOrder4) AddPhysicalDeliveryDetails() *NameAndAddress4 {
s.PhysicalDeliveryDetails = new(NameAndAddress4)
return s.PhysicalDeliveryDetails
}
func (s *SubscriptionOrder4) SetRequestedSettlementCurrency(value string) {
s.RequestedSettlementCurrency = (*CurrencyCode)(&value)
}
func (s *SubscriptionOrder4) SetRequestedNAVCurrency(value string) {
s.RequestedNAVCurrency = (*CurrencyCode)(&value)
}
func (s *SubscriptionOrder4) AddCashSettlementDetails() *PaymentTransaction19 {
s.CashSettlementDetails = new(PaymentTransaction19)
return s.CashSettlementDetails
} | SubscriptionOrder4.go | 0.776284 | 0.434401 | SubscriptionOrder4.go | starcoder |
package string
import (
"bytes"
"fmt"
)
// EditDist computes distance between strings
// Levenshtein algorithm
func EditDist(a, b string) int {
len1, len2 := len(a), len(b)
if len1 < len2 {
return EditDist(b, a)
}
row1, row2 := make([]int, len2+1), make([]int, len2+1)
for i := 0; i < len2+1; i++ {
row2[i] = i
}
for i := 0; i < len1; i++ {
row1[0] = i + 1
for j := 0; j < len2; j++ {
x := min(row2[j+1]+1, row1[j]+1)
y := row2[j] + invBool2int(a[i] == b[j])
row1[j+1] = min(x, y)
}
row1, row2 = row2, row1
}
return row2[len2]
}
// EditDistEx computes distance between strings
// Damerau-Levenshtein algorithm
func EditDistEx(a, b string) int {
len1, len2 := len(a), len(b)
if len1 == 0 {
return len2
}
if len2 == 0 {
return len1
}
if len1 < len2 {
return EditDistEx(b, a)
}
curr, next := 0, 0
row := make([]int, len2+1)
for i := 0; i < len2+1; i++ {
row[i] = i
}
for i := 0; i < len1; i++ {
curr = i + 1
for j := 0; j < len2; j++ {
cost := invBool2int(a[i] == b[j] || (i > 0 && j > 0 && a[i-1] == b[j] && a[i] == b[j-1]))
fmt.Printf("%v %v == %v\n", a[i], b[j], cost)
next = min(min(
row[j+1]+1,
row[j]+cost),
curr+1)
row[j], curr = curr, next
}
row[len2] = next
fmt.Printf("%v\n", row)
}
fmt.Printf("\n")
fmt.Printf("\n")
return next
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func invBool2int(b bool) int {
if !b {
return 1
}
return 0
}
// primeRK is the prime base used in Rabin-Karp algorithm.
const primeRK = 16777619
// SearchRabinKarp search `sep` in `s` using Rabin-Karp algorithm
// based on src/bytes/bytes.go
func SearchRabinKarp(s, sep []byte) int {
hashsep, pow := hashStr(sep)
n := len(sep)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
if h == hashsep && bytes.Equal(s[:n], sep) {
return 0
}
for i := n; i < len(s); {
h *= primeRK
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
if h == hashsep && bytes.Equal(s[i-n:i], sep) {
return i - n
}
}
return -1
}
// hashStr returns the hash and the appropriate multiplicative
// factor for use in Rabin-Karp algorithm.
func hashStr(sep []byte) (uint32, uint32) {
hash := uint32(0)
for i := 0; i < len(sep); i++ {
hash = hash*primeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, primeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
} | string/string.go | 0.551091 | 0.431824 | string.go | starcoder |
package nn
import (
"fmt"
"math"
"sync"
)
type relu struct {
inputShape Shape
outputShape Shape
mask [][]bool
}
// ReLU is an activation function layer.
func ReLU() Layer {
return &relu{}
}
func (r *relu) Init(inputShape Shape, _ OptimizerFactory) error {
r.inputShape = inputShape
r.outputShape = inputShape
return nil
}
func (r *relu) Call(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
output := NewTensor(input.shape)
for j := 0; j < input.shape.Elements(); j++ {
x := math.Max(input.rawData[j], 0)
output.rawData[j] = x
}
outputs[i] = output
wg.Done()
}(i, input)
}
wg.Wait()
return outputs
}
func (r *relu) Forward(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
r.mask = make([][]bool, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
r.mask[i] = make([]bool, input.shape.Elements())
output := NewTensor(input.shape)
for j := 0; j < input.shape.Elements(); j++ {
x := math.Max(input.rawData[j], 0)
r.mask[i][j] = x <= 0
output.rawData[j] = x
}
outputs[i] = output
wg.Done()
}(i, input)
}
wg.Wait()
return outputs
}
func (r *relu) Backward(douts []*Tensor) []*Tensor {
d := make([]*Tensor, len(douts))
wg := new(sync.WaitGroup)
wg.Add(len(douts))
for i, dout := range douts {
go func(i int, dout *Tensor) {
d[i] = dout.Clone()
for j := 0; j < d[i].shape.Elements(); j++ {
if r.mask[i][j] {
d[i].rawData[j] = 0
}
}
wg.Done()
}(i, dout)
}
wg.Wait()
return d
}
func (r *relu) InputShape() Shape {
return r.inputShape
}
func (r *relu) OutputShape() Shape {
return r.outputShape
}
func (r *relu) Params() []*Tensor {
return nil
}
func (r *relu) Update() {}
type sigmoid struct {
inputShape Shape
outputShape Shape
outputs []*Tensor
}
// Sigmoid is an activation function layer.
func Sigmoid() Layer {
return &sigmoid{}
}
func (s *sigmoid) Init(inputShape Shape, _ OptimizerFactory) error {
s.inputShape = inputShape
s.outputShape = inputShape
return nil
}
func (s *sigmoid) Call(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
outputs[i] = input.BroadCast(func(f float64) float64 {
return 1 / (1 + math.Exp(-f))
})
wg.Done()
}(i, input)
}
wg.Wait()
return outputs
}
func (s *sigmoid) Forward(inputs []*Tensor) []*Tensor {
s.outputs = make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
s.outputs[i] = input.BroadCast(func(f float64) float64 {
return 1 / (1 + math.Exp(-f))
})
wg.Done()
}(i, input)
}
wg.Wait()
return s.outputs
}
func (s *sigmoid) Backward(douts []*Tensor) []*Tensor {
d := make([]*Tensor, len(douts))
wg := new(sync.WaitGroup)
wg.Add(len(douts))
for i, dout := range douts {
go func(i int, dout *Tensor) {
d[i] = s.outputs[i].MulBroadCast(-1).AddBroadCast(1).MulTensor(s.outputs[i]).MulTensor(dout)
wg.Done()
}(i, dout)
}
wg.Wait()
return d
}
func (s *sigmoid) InputShape() Shape {
return s.inputShape
}
func (s *sigmoid) OutputShape() Shape {
return s.outputShape
}
func (s *sigmoid) Params() []*Tensor {
return nil
}
func (s *sigmoid) Update() {}
type softmax struct {
inputShape Shape
outputShape Shape
outputs []*Tensor
}
// Softmax is an activation function layer.
func Softmax() Layer {
return &softmax{}
}
func (s *softmax) Init(inputShape Shape, _ OptimizerFactory) error {
if inputShape.Rank() != 1 {
return fmt.Errorf("invalid rank %v", inputShape.Rank())
}
s.inputShape = inputShape
s.outputShape = inputShape
return nil
}
func (s *softmax) Call(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
max := input.Max()
exp := input.SubBroadCast(max).Exp()
sum := exp.Sum()
outputs[i] = exp.BroadCast(func(f float64) float64 {
return f / sum
})
wg.Done()
}(i, input)
}
wg.Wait()
return outputs
}
func (s *softmax) Forward(inputs []*Tensor) []*Tensor {
outputs := make([]*Tensor, len(inputs))
wg := new(sync.WaitGroup)
wg.Add(len(inputs))
for i, input := range inputs {
go func(i int, input *Tensor) {
max := input.Max()
exp := input.SubBroadCast(max).Exp()
sum := exp.Sum()
outputs[i] = exp.BroadCast(func(f float64) float64 {
return f / sum
})
wg.Done()
}(i, input)
}
wg.Wait()
s.outputs = outputs
return outputs
}
func (s *softmax) Backward(douts []*Tensor) []*Tensor {
wg := new(sync.WaitGroup)
wg.Add(len(s.outputs))
for i, output := range s.outputs {
go func(i int, output *Tensor) {
douts[i] = douts[i].MulTensor(output).AddTensor(output)
wg.Done()
}(i, output)
}
wg.Wait()
return douts
}
func (s *softmax) InputShape() Shape {
return s.inputShape
}
func (s *softmax) OutputShape() Shape {
return s.outputShape
}
func (s *softmax) Params() []*Tensor {
return nil
}
func (s *softmax) Update() {} | nn/activation.go | 0.656548 | 0.513729 | activation.go | starcoder |
package prayertime
import (
"fmt"
m "math"
"time"
)
const (
radToDeg = 180 / m.Pi
degToRad = m.Pi / 180
)
type coordinate struct {
longitude float64
latitude float64
zone float64
}
type Prayertime struct {
date time.Time
coordinate *coordinate
CalculationMethod int
DST bool
Mazhab int
Fajr float64
Shrouk float64
Zuhr float64
Asr float64
Maghrib float64
Isha float64
dec float64
}
func removeDuplication(val float64) float64 {
return float64(int(val) % 360)
}
func (self *Prayertime) equation(alt float64) float64 {
return radToDeg * (m.Acos((m.Sin(degToRad*(alt)) - m.Sin(degToRad*(self.dec))*m.Sin(degToRad*(self.coordinate.latitude))) / (m.Cos(degToRad*(self.dec)) * m.Cos(degToRad*(self.coordinate.latitude)))))
}
func (self *Prayertime) Calculate() {
year := self.date.Year()
month := int(self.date.Month())
day := float64(self.date.Day())
longitude := self.coordinate.longitude
latitude := self.coordinate.latitude
zone := self.coordinate.zone
julianDay := -730531.5 + float64(367*year) - float64((year+(month+9)/12)*7/4) + float64(275*month/9) + day
sunLength := 280.461 + 0.9856474*julianDay
sunLength = removeDuplication(sunLength)
middleSun := 357.528 + 0.9856003*julianDay
middleSun = removeDuplication(middleSun)
lamda := sunLength + 1.915*m.Sin(degToRad*(middleSun)) + 0.02*m.Sin(degToRad*(2*middleSun))
lamda = removeDuplication(lamda)
obliquity := 23.439 - 0.0000004*julianDay
alpha := radToDeg * (m.Atan(m.Cos(degToRad*(obliquity)) * m.Tan(degToRad*(lamda))))
if 90 < lamda && lamda < 180 {
alpha += 180
} else if 100 < lamda && lamda < 360 {
alpha += 360
}
ST := 100.46 + 0.985647352*julianDay
ST = removeDuplication(ST)
self.dec = radToDeg * (m.Asin(m.Sin(degToRad*(obliquity)) * m.Sin(degToRad*(lamda))))
noon := alpha - ST
if noon < 0 {
noon += 360
}
UTNoon := noon - longitude
localNoon := (UTNoon / 15) + zone
zuhr := localNoon // Zuhr Time.
maghrib := localNoon + self.equation(-0.8333)/15 // Maghrib Time
shrouk := localNoon - self.equation(-0.8333)/15 // Shrouk Time
fajrAlt := 0.0
ishaAlt := 0.0
if self.CalculationMethod == CalcUmmAlQuraUniversity {
fajrAlt = -19
} else if self.CalculationMethod == CalcEgyptianGeneralAuthorityOfSurvey {
fajrAlt = -19.5
ishaAlt = -17.5
} else if self.CalculationMethod == CalcMuslimWorldLeague {
fajrAlt = -18
ishaAlt = -17
} else if self.CalculationMethod == CalcIslamicSocietyOfNorthAmerica {
fajrAlt = -15
ishaAlt = -15
} else if self.CalculationMethod == CalcUnivOfIslamicSciencesKarachi {
fajrAlt = -18
ishaAlt = -18
}
fajr := localNoon - self.equation(fajrAlt)/15 // Fajr Time
isha := localNoon + self.equation(ishaAlt)/15 // Isha Time
if self.CalculationMethod == CalcUmmAlQuraUniversity {
isha = maghrib + 1.5
}
asrAlt := 0.0
if self.Mazhab == MazhabHanafi {
asrAlt = 90 - radToDeg*(m.Atan(2+m.Tan(degToRad*(m.Abs(latitude-self.dec)))))
} else {
asrAlt = 90 - radToDeg*(m.Atan(1+m.Tan(degToRad*(m.Abs(latitude-self.dec)))))
}
asr := localNoon + self.equation(asrAlt)/15 // Asr Time.
// Add one hour to all times if the season is Summmer.
if self.DST {
fajr++
shrouk++
zuhr++
asr++
maghrib++
isha++
}
self.Shrouk = shrouk
self.Fajr = fajr
self.Zuhr = zuhr
self.Asr = asr
self.Maghrib = maghrib
self.Isha = isha
}
// ToHRTime: Convert a double value (e.g shrouk, fajr,... time calculated in Prayertime struct) to a human readable time
func ToHRTime(val float64, isAM bool) string {
//"""val: double -> human readable string of format "%I:%M:%S %p" """
var time string
var zone string
var hours int
var minutes int
var seconds int
intval := int(val) // val is double.
if isAM {
if (intval%12) > 0 && intval%12 < 12 {
zone = "AM"
} else {
zone = "PM"
}
} else {
zone = "PM"
}
if intval > 12 {
hours = intval % 12
} else if intval%12 == 12 {
hours = intval
} else {
hours = intval
}
val -= m.Floor(val)
val *= 60
minutes = int(val)
val -= m.Floor(val)
val *= 60
seconds = int(val)
time = fmt.Sprintf("%d:%d:%d %s", hours, minutes, seconds, zone)
return time
}
func New(longitude, latitude, zone float64, year int, month time.Month, day int, calculationmethod int, mazhab int, dst bool) *Prayertime {
return &Prayertime{
coordinate: &coordinate{
latitude: latitude,
longitude: longitude,
zone: zone,
},
date: time.Date(year, month, day, 0, 0, 0, 0, time.UTC),
CalculationMethod: calculationmethod,
DST: dst,
}
}
// Returns the angle of the Qibla from north.
func (self *Prayertime) GetQibla() float64 {
kLat := degToRad * (21.423333)
kLong := degToRad * (39.823333)
longitude := degToRad * (self.coordinate.longitude)
latitude := degToRad * (self.coordinate.latitude)
numerator := m.Sin(kLong - longitude)
denominator := (m.Cos(latitude) * m.Tan(kLat)) - (m.Sin(latitude) * m.Cos(kLong-longitude))
q := m.Atan2(numerator, denominator)
q = degToRad * (q)
return q
}
//Returns the distance to Kaaba in Kilometers."""
func (self *Prayertime) GetQiblaDistance() float64 {
kLat := degToRad * (21.423333)
kLon := degToRad * (39.823333)
longitude := degToRad * (self.coordinate.longitude)
latitude := degToRad * (self.coordinate.latitude)
r := 6378.7 // kilometers
return m.Acos(m.Sin(kLat)*m.Sin(latitude)+m.Cos(kLat)*m.Cos(latitude)*m.Cos(longitude-kLon)) * r
}
// Show: prints the times for quick access.
func (self *Prayertime) Show() {
fmt.Println(self.Fajr, self.Zuhr, self.Asr, self.Maghrib, self.Isha)
}
// SimpleReport reports all time to stdout.
func (self *Prayertime) SimpleReport() {
fmt.Println(ToHRTime(self.Fajr, true))
fmt.Println(ToHRTime(self.Shrouk, true))
fmt.Println(ToHRTime(self.Zuhr, true))
fmt.Println(ToHRTime(self.Asr, false))
fmt.Println(ToHRTime(self.Maghrib, false))
fmt.Println(ToHRTime(self.Isha, false))
}
// Convert hrtime (returned by ToHRTime) to a Go Datetime object.
func ToDateTime(hrtime string) (time.Time, error) {
return time.Parse("%I:%M:%S %p", hrtime)
} | prayertime/prayertime.go | 0.765593 | 0.525125 | prayertime.go | starcoder |
package translator
import (
"fmt"
"strings"
)
/*
The definitive grammar guide on English to Gopherish translation:
1. If a word starts with a vowel letter, add prefix “g” to the word (ex. apple => gapple)
2. If a word starts with the consonant letters “xr”, add the prefix “ge” to the begging of the word.
Such words as “xray” actually sound in the beginning with vowel sound as you pronounce them so a true gopher would say “gexray”.
3. If a word starts with a consonant sound, move it to the end of the word and then add “ogo” suffix to the word.
Consonant sounds can be made up of multiple consonants, a.k.a. a consonant cluster (e.g. "chair" -> "airchogo”).
4. If a word starts with a consonant sound followed by "qu", move it to the end of the word, and then add "ogo" suffix to the word (e.g. "square" -> "aresquogo").
*/
// Use map for easier "contains" checks.
// All known vowels.
var vowels = map[string]interface{}{"a": 0, "e": 0, "i": 0, "o": 0, "u": 0, "y": 0}
// All known consonants.
var consonants = map[string]interface{}{"b": 0, "c": 0, "d": 0, "f": 0, "g": 0, "h": 0, "j": 0, "k": 0, "l": 0, "m": 0,
"n": 0, "p": 0, "q": 0, "r": 0, "s": 0, "t": 0, "v": 0, "w": 0, "x": 0, "z": 0}
// Capitals of all known letters.
var capitals = map[string]interface{}{"B": 0, "C": 0, "D": 0, "F": 0, "G": 0, "H": 0, "J": 0, "K": 0, "L": 0, "M": 0,
"N": 0, "P": 0, "Q": 0, "R": 0, "S": 0, "T": 0, "V": 0, "W": 0, "X": 0, "Z": 0, "Y": 0, "A": 0, "E": 0, "I": 0,
"O": 0, "U": 0}
// TranslateWord translates a word from English to Gopherish.
func TranslateWord(word string) string {
// Empty in English should be empty in Gopherish.
if word == "" {
return ""
}
// Skip translating words with apostrophes.
if strings.Contains(word, "'") {
return "(gunintelligible)"
}
// Strip word from punctuation.
leading, word, trailing := stripPunctuation(word)
// Determine if word is capitalized.
_, isCapital := capitals[word[:1]]
// Work with lowercase for consistency.
word = strings.ToLower(word)
first := word[:1]
// Output word in Gopherish.
var translated = ""
// Handle words starting with a vowel.
if _, ok := vowels[first]; ok {
translated = prefixG(word)
}
// Handle words starting with "xr"...
if len(word) >= 2 && word[0:2] == "xr" {
translated = prefixGe(word)
// ...and handle all other consonant sounds.
} else if _, ok := consonants[first]; ok {
translated = postfixOgo(extractConsonantSound(word))
}
// Capitalize if necessary.
// Assume capitalized words in English are capitalized in Gopherish, too.
if isCapital {
translated = strings.Title(translated)
}
translated = fmt.Sprintf("%s%s%s", leading, translated, trailing)
// Make the user aware of words that cannot be translated, such as ones starting with unhandled symbols.
if translated == "" {
translated = "(gunintelligible)"
}
return translated
}
// extractConsonantSound returns a cluster of consonant sounds and the remaining base of the word.
// Assume consonant sound is any sequence of consonants
// (diverging from actual definition as per requirements and for simplicity)
func extractConsonantSound(word string) (string, string) {
var conSound = ""
// Append consonants to conSound.
for {
if word == "" {
break
}
first := word[0:1]
_, hasCon := consonants[first]
if !hasCon {
break
}
conSound = fmt.Sprintf("%s%s", conSound, first)
word = word[1:]
}
// Append "u" if last consonant was "q".
if conSound[len(conSound)-1:] == "q" && word[:1] == "u" {
conSound = fmt.Sprintf("%su", conSound)
word = word[1:]
}
return conSound, word
}
// prefixG prefixes "g" to a word.
// Assume word begins with a vowel.
func prefixG(word string) string {
return fmt.Sprintf("g%s", word)
}
// prefixGe prefixes "ge" to a word.
// Assume word begins with 'xr'
func prefixGe(word string) string {
return fmt.Sprintf("ge%s", word)
}
// postfixOgo postfixes "ogo" to a word.
// Assume word starts with a consonant sound (1 or more consonant as per specification) and eventually following "qu".
func postfixOgo(consonants string, base string) string {
return fmt.Sprintf("%s%sogo", base, consonants)
}
// TranslateSentence translates a sentence from English to Gopherish.
func TranslateSentence(sentence string) (string, error) {
ending := sentence[len(sentence)-1:]
if ending != "!" && ending != "." && ending != "?" {
return "", fmt.Errorf("invalid sentence ending in '%s'.\nOnly '.', '?' and '!' are supported", sentence)
}
words := strings.Fields(sentence)
// Strip word of leading and trailing punctuation, translate it and then reassign with original punctuation.
for i, word := range words {
words[i] = TranslateWord(word)
}
return strings.Join(words, " "), nil
}
// stripPunctuation separates leading and trailing punctuation from words.
// First return value is a set of leading punctuation.
// Second return value is the extracted word.
// Third return value is a set of trailing punctuation.
func stripPunctuation(word string) (string, string, string) {
var leading = ""
var trailing = ""
// Strip leading punctuation.
for range word {
if word == "" {
break
}
first := word[:1]
if !isALetter(first) {
word = word[1:]
leading = fmt.Sprintf("%s%s", leading, first)
} else {
break
}
}
// Strip trailing punctuation
for range word {
if word == "" {
break
}
last := word[len(word)-1:]
if !isALetter(last) {
word = word[:len(word)-1]
trailing = fmt.Sprintf("%s%s", last, trailing)
} else {
break
}
}
return leading, word, trailing
}
// isALetter sifts out letters from punctuation.
// Assume all non-letters are punctuation.
func isALetter(symbol string) bool {
if _, isVowel := vowels[symbol]; isVowel {
return true
}
if _, isConsonant := consonants[symbol]; isConsonant {
return true
}
if _, isCapital := capitals[symbol]; isCapital {
return true
}
return false
} | pkg/translator/translator.go | 0.661704 | 0.47792 | translator.go | starcoder |
package dag
import (
"fmt"
"sync"
"github.com/goombaio/orderedmap"
)
// DAG type implements a Directed Acyclic Graph data structure.
type DAG struct {
mu sync.Mutex
vertices orderedmap.OrderedMap
}
// NewDAG creates a new Directed Acyclic Graph instance.
func NewDAG() *DAG {
d := &DAG{
vertices: *orderedmap.NewOrderedMap(),
}
return d
}
// AddVertex adds a vertex to the graph.
func (d *DAG) AddVertex(v *Vertex) error {
d.mu.Lock()
defer d.mu.Unlock()
d.vertices.Put(v.ID, v)
return nil
}
// DeleteVertex deletes a vertex and all the edges referencing it from the
// graph.
func (d *DAG) DeleteVertex(vertex *Vertex) error {
existsVertex := false
d.mu.Lock()
defer d.mu.Unlock()
// Check if vertices exists.
for _, v := range d.vertices.Values() {
if v == vertex {
existsVertex = true
}
}
if !existsVertex {
return fmt.Errorf("Vertex with ID %v not found", vertex.ID)
}
d.vertices.Remove(vertex.ID)
return nil
}
// AddEdge adds a directed edge between two existing vertices to the graph.
func (d *DAG) AddEdge(tailVertex *Vertex, headVertex *Vertex) error {
tailExists := false
headExists := false
d.mu.Lock()
defer d.mu.Unlock()
// Check if vertices exists.
for _, vertex := range d.vertices.Values() {
if vertex == tailVertex {
tailExists = true
}
if vertex == headVertex {
headExists = true
}
}
if !tailExists {
return fmt.Errorf("Vertex with ID %v not found", tailVertex.ID)
}
if !headExists {
return fmt.Errorf("Vertex with ID %v not found", headVertex.ID)
}
// Check if edge already exists.
for _, childVertex := range tailVertex.Children.Values() {
if childVertex == headVertex {
return fmt.Errorf("Edge (%v,%v) already exists", tailVertex.ID, headVertex.ID)
}
}
// Add edge.
tailVertex.Children.Add(headVertex)
headVertex.Parents.Add(tailVertex)
return nil
}
// DeleteEdge deletes a directed edge between two existing vertices from the
// graph.
func (d *DAG) DeleteEdge(tailVertex *Vertex, headVertex *Vertex) error {
for _, childVertex := range tailVertex.Children.Values() {
if childVertex == headVertex {
tailVertex.Children.Remove(childVertex)
}
}
return nil
}
// Return a vertex from the graph given a vertex ID.
func (d *DAG) GetVertex(id interface{}) (*Vertex, error) {
var vertex *Vertex
v, found := d.vertices.Get(id)
if !found {
return vertex, fmt.Errorf("vertex %s not found in the graph", id)
}
vertex = v.(*Vertex)
return vertex, nil
}
// Order return the number of vertices in the graph.
func (d *DAG) Order() int {
numVertices := d.vertices.Size()
return numVertices
}
// Size return the number of edges in the graph.
func (d *DAG) Size() int {
numEdges := 0
for _, vertex := range d.vertices.Values() {
numEdges = numEdges + vertex.(*Vertex).Children.Size()
}
return numEdges
}
// SinkVertices return vertices with no children defined by the graph edges.
func (d *DAG) SinkVertices() []*Vertex {
var sinkVertices []*Vertex
for _, vertex := range d.vertices.Values() {
if vertex.(*Vertex).Children.Size() == 0 {
sinkVertices = append(sinkVertices, vertex.(*Vertex))
}
}
return sinkVertices
}
// SourceVertices return vertices with no parent defined by the graph edges.
func (d *DAG) SourceVertices() []*Vertex {
var sourceVertices []*Vertex
for _, vertex := range d.vertices.Values() {
if vertex.(*Vertex).Parents.Size() == 0 {
sourceVertices = append(sourceVertices, vertex.(*Vertex))
}
}
return sourceVertices
}
// Successors return vertices that are children of a given vertex.
func (d *DAG) Successors(vertex *Vertex) ([]*Vertex, error) {
var successors []*Vertex
_, found := d.GetVertex(vertex.ID)
if found != nil {
return successors, fmt.Errorf("vertex %s not found in the graph", vertex.ID)
}
for _, v := range vertex.Children.Values() {
successors = append(successors, v.(*Vertex))
}
return successors, nil
}
// Predecessors return vertices that are parent of a given vertex.
func (d *DAG) Predecessors(vertex *Vertex) ([]*Vertex, error) {
var predecessors []*Vertex
_, found := d.GetVertex(vertex.ID)
if found != nil {
return predecessors, fmt.Errorf("vertex %s not found in the graph", vertex.ID)
}
for _, v := range vertex.Parents.Values() {
predecessors = append(predecessors, v.(*Vertex))
}
return predecessors, nil
}
// String implements stringer interface and prints an string representation
// of this instance.
func (d *DAG) String() string {
result := fmt.Sprintf("DAG Vertices: %d - Edges: %d\n", d.Order(), d.Size())
result += fmt.Sprintf("Vertices:\n")
for _, vertex := range d.vertices.Values() {
vertex = vertex.(*Vertex)
result += fmt.Sprintf("%s", vertex)
}
return result
} | dag.go | 0.845815 | 0.609263 | dag.go | starcoder |
package helper
import (
"regexp"
"time"
"github.com/onsi/gomega"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
// DevSession represents a session running `odo dev`
/*
It can be used in different ways:
# Starting a session for a series of tests and stopping the session after the tests:
This format can be used when you want to run several independent tests
when the `odo dev` command is running in the background
```
When("running dev session", func() {
var devSession DevSession
var outContents []byte
var errContents []byte
BeforeEach(func() {
devSession, outContents, errContents = helper.StartDevMode()
})
AfterEach(func() {
devSession.Stop()
})
It("...", func() {
// Test with `dev odo` running in the background
// outContents and errContents are contents of std/err output when dev mode is started
})
It("...", func() {
// Test with `dev odo` running in the background
})
})
# Starting a session and stopping it cleanly
This format can be used to test the behaviour of `odo dev` when it is stopped cleanly
When("running dev session and stopping it with cleanup", func() {
var devSession DevSession
var outContents []byte
var errContents []byte
BeforeEach(func() {
devSession, outContents, errContents = helper.StartDevMode()
defer devSession.Stop()
[...]
})
It("...", func() {
// Test after `odo dev` has been stopped cleanly
// outContents and errContents are contents of std/err output when dev mode is started
})
It("...", func() {
// Test after `odo dev` has been stopped cleanly
})
})
# Starting a session and stopping it immediately without cleanup
This format can be used to test the behaviour of `odo dev` when it is stopped with a KILL signal
When("running dev session and stopping it without cleanup", func() {
var devSession DevSession
var outContents []byte
var errContents []byte
BeforeEach(func() {
devSession, outContents, errContents = helper.StartDevMode()
defer devSession.Kill()
[...]
})
It("...", func() {
// Test after `odo dev` has been killed
// outContents and errContents are contents of std/err output when dev mode is started
})
It("...", func() {
// Test after `odo dev` has been killed
})
})
# Running a dev session and executing some tests inside this session
This format can be used to run a series of related tests in dev mode
All tests will be ran in the same session (ideal for e2e tests)
To run independent tests, previous formats should be used instead.
It("should do ... in dev mode", func() {
helper.RunDevMode(func(session *gexec.Session, outContents []byte, errContents []byte, ports map[string]string) {
// test on dev mode
// outContents and errContents are contents of std/err output when dev mode is started
// ports contains a map where keys are container ports and associated values are local IP:port redirecting to these local ports
})
})
# Waiting for file synchronisation to finish
The method session.WaitSync() can be used to wait for the synchronization of files to finish.
The method returns the contents of std/err output since the end of the dev mode started or previous sync, and until the end of the synchronization.
*/
type DevSession struct {
session *gexec.Session
stopped bool
}
// StartDevMode starts a dev session with `odo dev`
// It returns a session structure, the contents of the standard and error outputs
// and the redirections endpoints to access ports opened by component
// when the dev mode is completely started
func StartDevMode(opts ...string) (DevSession, []byte, []byte, map[string]string, error) {
args := []string{"dev", "--random-ports"}
args = append(args, opts...)
session := CmdRunner("odo", args...)
WaitForOutputToContain("Press Ctrl+c to exit `odo dev` and delete resources from the cluster", 360, 10, session)
result := DevSession{
session: session,
}
outContents := session.Out.Contents()
errContents := session.Err.Contents()
err := session.Out.Clear()
if err != nil {
return DevSession{}, nil, nil, nil, err
}
err = session.Err.Clear()
if err != nil {
return DevSession{}, nil, nil, nil, err
}
return result, outContents, errContents, getPorts(string(outContents)), nil
}
// Kill a Dev session abruptly, without handling any cleanup
func (o DevSession) Kill() {
o.session.Kill()
}
// Stop a Dev session cleanly (equivalent as hitting Ctrl-c)
func (o *DevSession) Stop() {
if o.stopped {
return
}
err := terminateProc(o.session)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
o.stopped = true
}
func (o DevSession) WaitEnd() {
o.session.Wait(3 * time.Minute)
}
// WaitSync waits for the synchronization of files to be finished
// It returns the contents of the standard and error outputs
// since the end of the dev mode started or previous sync, and until the end of the synchronization.
func (o DevSession) WaitSync() ([]byte, []byte, error) {
WaitForOutputToContain("Pushing files...", 180, 10, o.session)
WaitForOutputToContain("Watching for changes in the current directory", 240, 10, o.session)
outContents := o.session.Out.Contents()
errContents := o.session.Err.Contents()
err := o.session.Out.Clear()
if err != nil {
return nil, nil, err
}
err = o.session.Err.Clear()
if err != nil {
return nil, nil, err
}
return outContents, errContents, nil
}
func (o DevSession) CheckNotSynced(timeout time.Duration) {
Consistently(func() string {
return string(o.session.Out.Contents())
}, timeout).ShouldNot(ContainSubstring("Pushing files..."))
}
// RunDevMode runs a dev session and executes the `inside` code when the dev mode is completely started
// The inside handler is passed the internal session pointer, the contents of the standard and error outputs,
// and a slice of strings - ports - giving the redirections in the form localhost:<port_number> to access ports opened by component
func RunDevMode(inside func(session *gexec.Session, outContents []byte, errContents []byte, ports map[string]string)) error {
session, outContents, errContents, urls, err := StartDevMode()
if err != nil {
return err
}
defer func() {
session.Stop()
session.WaitEnd()
}()
inside(session.session, outContents, errContents, urls)
return nil
}
// getPorts returns a map of ports redirected depending on the information in s
// `- Forwarding from 127.0.0.1:40001 -> 3000` will return { "3000": "127.0.0.1:40001" }
func getPorts(s string) map[string]string {
result := map[string]string{}
re := regexp.MustCompile("(127.0.0.1:[0-9]+) -> ([0-9]+)")
matches := re.FindAllStringSubmatch(s, -1)
for _, match := range matches {
result[match[2]] = match[1]
}
return result
} | tests/helper/helper_dev.go | 0.588653 | 0.71049 | helper_dev.go | starcoder |
package schema
import (
"crypto/sha256"
"github.com/codenotary/immudb/embedded/htree"
"github.com/codenotary/immudb/embedded/store"
)
func TxTo(tx *store.Tx) *Tx {
entries := make([]*TxEntry, len(tx.Entries()))
for i, e := range tx.Entries() {
hValue := e.HVal()
entries[i] = &TxEntry{
Key: e.Key(),
HValue: hValue[:],
VLen: int32(e.VLen()),
}
}
return &Tx{
Metadata: TxMetatadaTo(tx.Metadata()),
Entries: entries,
}
}
func TxFrom(stx *Tx) *store.Tx {
entries := make([]*store.TxEntry, len(stx.Entries))
for i, e := range stx.Entries {
entries[i] = store.NewTxEntry(e.Key, int(e.VLen), DigestFrom(e.HValue), 0)
}
tx := store.NewTxWithEntries(entries)
tx.ID = stx.Metadata.Id
tx.PrevAlh = DigestFrom(stx.Metadata.PrevAlh)
tx.Ts = stx.Metadata.Ts
tx.BlTxID = stx.Metadata.BlTxId
tx.BlRoot = DigestFrom(stx.Metadata.BlRoot)
tx.BuildHashTree()
tx.CalcAlh()
return tx
}
func InclusionProofTo(iproof *htree.InclusionProof) *InclusionProof {
return &InclusionProof{
Leaf: int32(iproof.Leaf),
Width: int32(iproof.Width),
Terms: DigestsTo(iproof.Terms),
}
}
func InclusionProofFrom(iproof *InclusionProof) *htree.InclusionProof {
return &htree.InclusionProof{
Leaf: int(iproof.Leaf),
Width: int(iproof.Width),
Terms: DigestsFrom(iproof.Terms),
}
}
func DualProofTo(dualProof *store.DualProof) *DualProof {
return &DualProof{
SourceTxMetadata: TxMetatadaTo(dualProof.SourceTxMetadata),
TargetTxMetadata: TxMetatadaTo(dualProof.TargetTxMetadata),
InclusionProof: DigestsTo(dualProof.InclusionProof),
ConsistencyProof: DigestsTo(dualProof.ConsistencyProof),
TargetBlTxAlh: dualProof.TargetBlTxAlh[:],
LastInclusionProof: DigestsTo(dualProof.LastInclusionProof),
LinearProof: LinearProofTo(dualProof.LinearProof),
}
}
func TxMetatadaTo(txMetadata *store.TxMetadata) *TxMetadata {
return &TxMetadata{
Id: txMetadata.ID,
PrevAlh: txMetadata.PrevAlh[:],
Ts: txMetadata.Ts,
Nentries: int32(txMetadata.NEntries),
EH: txMetadata.Eh[:],
BlTxId: txMetadata.BlTxID,
BlRoot: txMetadata.BlRoot[:],
}
}
func LinearProofTo(linearProof *store.LinearProof) *LinearProof {
return &LinearProof{
SourceTxId: linearProof.SourceTxID,
TargetTxId: linearProof.TargetTxID,
Terms: DigestsTo(linearProof.Terms),
}
}
func DualProofFrom(dproof *DualProof) *store.DualProof {
return &store.DualProof{
SourceTxMetadata: TxMetadataFrom(dproof.SourceTxMetadata),
TargetTxMetadata: TxMetadataFrom(dproof.TargetTxMetadata),
InclusionProof: DigestsFrom(dproof.InclusionProof),
ConsistencyProof: DigestsFrom(dproof.ConsistencyProof),
TargetBlTxAlh: DigestFrom(dproof.TargetBlTxAlh),
LastInclusionProof: DigestsFrom(dproof.LastInclusionProof),
LinearProof: LinearProofFrom(dproof.LinearProof),
}
}
func TxMetadataFrom(txMetadata *TxMetadata) *store.TxMetadata {
return &store.TxMetadata{
ID: txMetadata.Id,
PrevAlh: DigestFrom(txMetadata.PrevAlh),
Ts: txMetadata.Ts,
NEntries: int(txMetadata.Nentries),
Eh: DigestFrom(txMetadata.EH),
BlTxID: txMetadata.BlTxId,
BlRoot: DigestFrom(txMetadata.BlRoot),
}
}
func LinearProofFrom(lproof *LinearProof) *store.LinearProof {
return &store.LinearProof{
SourceTxID: lproof.SourceTxId,
TargetTxID: lproof.TargetTxId,
Terms: DigestsFrom(lproof.Terms),
}
}
func DigestsTo(terms [][sha256.Size]byte) [][]byte {
slicedTerms := make([][]byte, len(terms))
for i, t := range terms {
slicedTerms[i] = make([]byte, sha256.Size)
copy(slicedTerms[i], t[:])
}
return slicedTerms
}
func DigestFrom(slicedDigest []byte) [sha256.Size]byte {
var d [sha256.Size]byte
copy(d[:], slicedDigest)
return d
}
func DigestsFrom(slicedTerms [][]byte) [][sha256.Size]byte {
terms := make([][sha256.Size]byte, len(slicedTerms))
for i, t := range slicedTerms {
copy(terms[i][:], t)
}
return terms
} | pkg/api/schema/database_protoconv.go | 0.619817 | 0.401805 | database_protoconv.go | starcoder |
package breeze
import (
"github.com/pkg/errors"
"math"
"reflect"
)
// WriteFieldsFunc is a func interface of how to write all fields of a breeze message to the buffer.
type WriteFieldsFunc func(buf *Buffer)
// WriteElemFunc write map or array elements
type WriteElemFunc func(buf *Buffer)
// WriteBool write a bool value into the buffer
func WriteBool(buf *Buffer, b bool, withType bool) {
if b {
buf.WriteByte(TrueType)
} else {
buf.WriteByte(FalseType)
}
}
// WriteString write a string value into the buffer
func WriteString(buf *Buffer, s string, withType bool) {
if withType {
l := len(s)
if l <= DirectStringMaxLength { // direct string
buf.WriteByte(byte(l))
buf.Write([]byte(s))
return
}
WriteStringType(buf)
}
buf.WriteVarInt(uint64(len(s)))
buf.Write([]byte(s))
}
// WriteByte write a byte value into the buffer
func WriteByte(buf *Buffer, b byte, withType bool) {
if withType {
WriteByteType(buf)
}
buf.WriteByte(b)
}
// WriteBytes write a byte slice into the buffer
func WriteBytes(buf *Buffer, bytes []byte, withType bool) {
if withType {
WriteBytesType(buf)
}
buf.WriteUint32(uint32(len(bytes)))
buf.Write(bytes)
}
// WriteInt16 write a int16 value into the buffer
func WriteInt16(buf *Buffer, i int16, withType bool) {
if withType {
WriteInt16Type(buf)
}
buf.WriteUint16(uint16(i))
}
// WriteInt32 write a int32 value into the buffer
func WriteInt32(buf *Buffer, i int32, withType bool) {
if withType {
if i >= DirectInt32MinValue && i <= DirectInt32MaxValue {
buf.WriteByte(byte(i + Int32Zero))
return
}
WriteInt32Type(buf)
}
buf.WriteZigzag32(uint32(i))
}
// WriteInt64 write a int64 value into the buffer
func WriteInt64(buf *Buffer, i int64, withType bool) {
if withType {
if i >= DirectInt64MinValue && i <= DirectInt64MaxValue {
buf.WriteByte(byte(i + Int64Zero))
return
}
WriteInt64Type(buf)
}
buf.WriteZigzag64(uint64(i))
}
// WriteFloat32 write a float32 value into the buffer
func WriteFloat32(buf *Buffer, f float32, withType bool) {
if withType {
WriteFloat32Type(buf)
}
buf.WriteUint32(math.Float32bits(float32(f)))
}
// WriteFloat64 write a float64 value into the buffer
func WriteFloat64(buf *Buffer, f float64, withType bool) {
if withType {
WriteFloat64Type(buf)
}
buf.WriteUint64(math.Float64bits(f))
}
// WritePackedMap write packed map by WriteElemFunc
func WritePackedMap(buf *Buffer, withType bool, size int, f WriteElemFunc) {
if withType {
WritePackedMapType(buf)
}
buf.WriteVarInt(uint64(size))
f(buf)
}
// WritePackedArray write packed array by WriteElemFunc
func WritePackedArray(buf *Buffer, withType bool, size int, f WriteElemFunc) {
if withType {
WritePackedArrayType(buf)
}
buf.WriteVarInt(uint64(size))
f(buf)
}
// WriteStringStringMapEntries write map[string]string directly
func WriteStringStringMapEntries(buf *Buffer, m map[string]string) {
WriteStringType(buf)
WriteStringType(buf)
for k, v := range m {
WriteString(buf, k, false)
WriteString(buf, v, false)
}
}
// WriteStringInt32MapEntries write map[string]int32 directly
func WriteStringInt32MapEntries(buf *Buffer, m map[string]int32) {
WriteStringType(buf)
WriteInt32Type(buf)
for k, v := range m {
WriteString(buf, k, false)
WriteInt32(buf, v, false)
}
}
// WriteStringInt64MapEntries write map[string]int64 directly
func WriteStringInt64MapEntries(buf *Buffer, m map[string]int64) {
WriteStringType(buf)
WriteInt64Type(buf)
for k, v := range m {
WriteString(buf, k, false)
WriteInt64(buf, v, false)
}
}
// WriteStringArrayElems write []string directly
func WriteStringArrayElems(buf *Buffer, a []string) {
WriteStringType(buf)
for _, v := range a {
WriteString(buf, v, false)
}
}
// WriteInt32ArrayElems write []int32 directly
func WriteInt32ArrayElems(buf *Buffer, a []int32) {
WriteInt32Type(buf)
for _, v := range a {
WriteInt32(buf, v, false)
}
}
// WriteInt64ArrayElems write []int64 directly
func WriteInt64ArrayElems(buf *Buffer, a []int64) {
WriteInt64Type(buf)
for _, v := range a {
WriteInt64(buf, v, false)
}
}
// WriteMessageWithoutType write a breeze message according to WriteFieldsFunc. without message type
func WriteMessageWithoutType(buf *Buffer, fieldsFunc WriteFieldsFunc) (err error) {
defer func() {
if inner := recover(); inner != nil {
err = inner.(error)
}
}()
pos := skipLength(buf)
fieldsFunc(buf)
writeLength(buf, pos)
return err
}
//========== write BreezeType to buffer, only for packed model(packed map and packed array) =====================
// WriteBoolType write bool type
func WriteBoolType(buf *Buffer) {
buf.WriteByte(TrueType)
}
// WriteStringType write string type
func WriteStringType(buf *Buffer) {
buf.WriteByte(StringType)
}
// WriteByteType write byte type
func WriteByteType(buf *Buffer) {
buf.WriteByte(ByteType)
}
// WriteBytesType write byte array type
func WriteBytesType(buf *Buffer) {
buf.WriteByte(BytesType)
}
// WriteInt16Type write int16 type
func WriteInt16Type(buf *Buffer) {
buf.WriteByte(Int16Type)
}
// WriteInt32Type write int32 type
func WriteInt32Type(buf *Buffer) {
buf.WriteByte(Int32Type)
}
// WriteInt64Type write int64 type
func WriteInt64Type(buf *Buffer) {
buf.WriteByte(Int64Type)
}
// WriteFloat32Type write float32 type
func WriteFloat32Type(buf *Buffer) {
buf.WriteByte(Float32Type)
}
// WriteFloat64Type write float64 type
func WriteFloat64Type(buf *Buffer) {
buf.WriteByte(Float64Type)
}
// WritePackedMapType write packed map type
func WritePackedMapType(buf *Buffer) {
buf.WriteByte(PackedMapType)
}
// WritePackedArrayType write packed array type
func WritePackedArrayType(buf *Buffer) {
buf.WriteByte(PackedArrayType)
}
// WriteMessageType write message type. it can be a ref index or message with name
func WriteMessageType(buf *Buffer, name string) {
index := buf.GetContext().getMessageTypeIndex(name)
if index < 0 { // first write
buf.WriteByte(MessageType)
WriteString(buf, name, false)
buf.GetContext().putMessageType(name)
} else {
if index > DirectRefMessageMaxValue {
buf.WriteByte(RefMessageType)
buf.WriteVarInt(uint64(index))
} else {
buf.WriteByte(byte(RefMessageType + index))
}
}
}
//========== write message field by type. it will not write if the value is default =====================
// WriteBoolField write field with index
func WriteBoolField(buf *Buffer, index int, b bool) {
if b {
buf.WriteVarInt(uint64(index))
WriteBool(buf, b, true)
}
}
// WriteStringField write field with index
func WriteStringField(buf *Buffer, index int, s string) {
if s != "" {
buf.WriteVarInt(uint64(index))
WriteString(buf, s, true)
}
}
// WriteByteField write field with index
func WriteByteField(buf *Buffer, index int, b byte) {
buf.WriteVarInt(uint64(index))
WriteByte(buf, b, true)
}
// WriteBytesField write field with index
func WriteBytesField(buf *Buffer, index int, b []byte) {
if len(b) > 0 {
buf.WriteVarInt(uint64(index))
WriteBytes(buf, b, true)
}
}
// WriteInt16Field write field with index
func WriteInt16Field(buf *Buffer, index int, i int16) {
if i != 0 {
buf.WriteVarInt(uint64(index))
WriteInt16(buf, i, true)
}
}
// WriteInt32Field write field with index
func WriteInt32Field(buf *Buffer, index int, i int32) {
if i != 0 {
buf.WriteVarInt(uint64(index))
WriteInt32(buf, i, true)
}
}
// WriteInt64Field write field with index
func WriteInt64Field(buf *Buffer, index int, i int64) {
if i != 0 {
buf.WriteVarInt(uint64(index))
WriteInt64(buf, i, true)
}
}
// WriteFloat32Field write field with index
func WriteFloat32Field(buf *Buffer, index int, f float32) {
if f != 0 {
buf.WriteVarInt(uint64(index))
WriteFloat32(buf, f, true)
}
}
// WriteFloat64Field write field with index
func WriteFloat64Field(buf *Buffer, index int, f float64) {
if f != 0 {
buf.WriteVarInt(uint64(index))
WriteFloat64(buf, f, true)
}
}
// WriteMapField write field with index
func WriteMapField(buf *Buffer, index int, size int, f WriteElemFunc) {
buf.WriteVarInt(uint64(index))
WritePackedMap(buf, true, size, f)
}
// WriteArrayField write field with index
func WriteArrayField(buf *Buffer, index int, size int, f WriteElemFunc) {
buf.WriteVarInt(uint64(index))
WritePackedArray(buf, true, size, f)
}
// WriteMessageField write field with index
func WriteMessageField(buf *Buffer, index int, m Message) {
buf.WriteVarInt(uint64(index))
WriteMessageType(buf, m.GetName())
m.WriteTo(buf)
}
// WriteField write an any type field into buffer.
func WriteField(buf *Buffer, index int, v interface{}) {
if v != nil {
buf.WriteVarInt(uint64(index))
err := WriteValue(buf, v)
if err != nil {
panic(err)
}
}
}
// WriteValue can write primitive type and ptr of primitive type, and breeze message.
func WriteValue(buf *Buffer, v interface{}) error {
if v == nil {
buf.WriteByte(NullType)
return nil
}
if msg, ok := v.(Message); ok {
return writeMessage(buf, msg, true)
}
var rv reflect.Value
if nrv, ok := v.(reflect.Value); ok {
rv = nrv
} else {
rv = reflect.ValueOf(v)
}
return writeReflectValue(buf, rv, true)
}
func writeReflectValue(buf *Buffer, rv reflect.Value, withType bool) error {
k := rv.Kind()
if k == reflect.Ptr {
if rv.CanInterface() { //message
realV := rv.Interface()
if msg, ok := realV.(Message); ok {
return writeMessage(buf, msg, withType)
}
}
//TODO extension for custom process
rv = rv.Elem()
k = rv.Kind()
}
if k == reflect.Interface {
rv = reflect.ValueOf(rv.Interface())
k = rv.Kind()
}
switch k {
case reflect.String:
WriteString(buf, rv.String(), withType)
case reflect.Bool:
WriteBool(buf, rv.Bool(), withType)
case reflect.Int, reflect.Int32:
WriteInt32(buf, int32(rv.Int()), withType)
case reflect.Int64:
WriteInt64(buf, rv.Int(), withType)
case reflect.Map:
return writeMap(buf, rv, withType)
case reflect.Slice:
t := rv.Type().Elem().Kind()
if t == reflect.Uint8 {
WriteBytes(buf, rv.Bytes(), withType)
} else {
return writeArray(buf, rv, withType)
}
case reflect.Uint, reflect.Uint32:
WriteInt32(buf, int32(rv.Uint()), withType)
case reflect.Uint64:
WriteInt64(buf, int64(rv.Uint()), withType)
case reflect.Uint8:
WriteByte(buf, byte(rv.Uint()), withType)
case reflect.Int16:
WriteInt16(buf, int16(rv.Int()), withType)
case reflect.Uint16:
WriteInt16(buf, int16(rv.Uint()), withType)
case reflect.Float32:
WriteFloat32(buf, float32(rv.Float()), withType)
case reflect.Float64:
WriteFloat64(buf, rv.Float(), withType)
default:
return errors.New("breeze: unsupported type " + k.String())
}
return nil
}
func writeType(buf *Buffer, rv reflect.Value) {
k := rv.Kind()
if k == reflect.Ptr {
if rv.CanInterface() { //message
realV := rv.Interface()
if msg, ok := realV.(Message); ok {
WriteMessageType(buf, msg.GetName())
return
}
}
rv = rv.Elem()
k = rv.Kind()
}
switch k {
case reflect.String:
WriteStringType(buf)
case reflect.Bool:
WriteBoolType(buf)
case reflect.Int, reflect.Int32, reflect.Uint, reflect.Uint32:
WriteInt32Type(buf)
case reflect.Int64, reflect.Uint64:
WriteInt64Type(buf)
case reflect.Map:
if canPackMap(rv.Type()) {
WritePackedMapType(buf)
} else {
buf.WriteByte(MapType)
}
case reflect.Slice:
tp := rv.Type()
if tp.Elem().Kind() == reflect.Uint8 {
WriteBytesType(buf)
} else {
if canPackArray(rv.Type()) {
WritePackedArrayType(buf)
} else {
buf.WriteByte(ArrayType)
}
}
case reflect.Uint8:
WriteByteType(buf)
case reflect.Int16, reflect.Uint16:
WriteInt16Type(buf)
case reflect.Float32:
WriteFloat32Type(buf)
case reflect.Float64:
WriteFloat64Type(buf)
default:
panic(errors.New("breeze: unsupported type " + k.String()))
}
}
func writeArray(buf *Buffer, v reflect.Value, withType bool) (err error) {
if canPackArray(v.Type()) {
defer func() {
if inner := recover(); inner != nil {
err = inner.(error)
}
}()
WritePackedArray(buf, withType, v.Len(), func(buf *Buffer) {
for i := 0; i < v.Len(); i++ {
elem := v.Index(i)
if i == 0 {
writeType(buf, elem)
}
err = writeReflectValue(buf, elem, false)
if err != nil {
panic(err)
}
}
})
} else {
if withType {
buf.WriteByte(ArrayType)
}
buf.WriteVarInt(uint64(v.Len()))
for i := 0; i < v.Len(); i++ {
err = writeReflectValue(buf, v.Index(i), true)
if err != nil {
return err
}
}
}
return err
}
func writeMap(buf *Buffer, v reflect.Value, withType bool) (err error) {
if canPackMap(v.Type()) {
defer func() {
if inner := recover(); inner != nil {
err = inner.(error)
}
}()
WritePackedMap(buf, withType, v.Len(), func(buf *Buffer) {
rangePackedMap(buf, v)
})
} else {
if withType {
buf.WriteByte(MapType)
}
buf.WriteVarInt(uint64(v.Len()))
err = rangeMap(buf, v)
}
return err
}
func canPackArray(t reflect.Type) bool {
return t.Elem().Kind() != reflect.Interface
}
func canPackMap(t reflect.Type) bool {
return (t.Key().Kind() != reflect.Interface) && (t.Elem().Kind() != reflect.Interface)
}
func writeMessage(buf *Buffer, message Message, withType bool) error {
if withType {
WriteMessageType(buf, message.GetName())
}
return message.WriteTo(buf)
}
// keep 4 bytes for write length later
func skipLength(buf *Buffer) int {
pos := buf.GetWPos()
buf.SetWPos(pos + 4)
return pos
}
// write length into keep position
func writeLength(buf *Buffer, keepPos int) {
curPos := buf.GetWPos()
buf.SetWPos(keepPos)
buf.WriteUint32(uint32(curPos - keepPos - 4))
buf.SetWPos(curPos)
} | breezeWriter.go | 0.553023 | 0.45847 | breezeWriter.go | starcoder |
package evaluator
import (
"github.com/manishmeganathan/tunalang/object"
"github.com/manishmeganathan/tunalang/syntaxtree"
)
// A function that evaluates a Syntax tree program into an evaluated object
func evalProgram(program *syntaxtree.Program, env *object.Environment) object.Object {
// Declare an object
var result object.Object
// Iterate over the program statements
for _, statement := range program.Statements {
// Update the result object
result = Evaluate(statement, env)
// Check the type of evaluated object
switch result := result.(type) {
// Return Object
case *object.ReturnValue:
// Return the return value
return result.Value
// Error Object
case *object.Error:
// Return the error object
return result
}
}
// Return the result object
return result
}
// A function that evaluates a Syntax tree block into an evaluated object
func evalBlockStatement(block *syntaxtree.BlockStatement, env *object.Environment) object.Object {
// Declare an object
var result object.Object
// Iterate over the block statements
for _, statement := range block.Statements {
// Update the result object
result = Evaluate(statement, env)
// Check if result has evaluated object
if result != nil {
// Retrieve the object type
rt := result.Type()
// Check if the object type is either a Return or an Error
if rt == object.RETURN_VALUE_OBJ || rt == object.ERROR_OBJ {
// Return the object
return result
}
}
}
// Return the result object
return result
}
// A function that evaluates a prefix expression
// given a prefix operator and an object
func evalPrefixExpression(operator string, right object.Object) object.Object {
// Check the type of operator
switch operator {
// Bang Operator
case "!":
// Evaluate the object for the bang operator
return evalBangOperatorExpression(right)
// Minus Operator
case "-":
// Evaluate the object for the minus operator
return evalMinusPrefixOperatorExpression(right)
// Unsupported Operator
default:
// Return Error
return object.NewError("unsupported operator: %s%s", operator, right.Type())
}
}
// A function that returns the result object for a
// given object with the prefix bang operator applied
func evalBangOperatorExpression(right object.Object) object.Object {
// Check value of object
switch right {
// Flip true to false
case TRUE:
return FALSE
// Flip false to true
case FALSE:
return TRUE
// Flip null to true
case NULL:
return TRUE
// Default to false
default:
return FALSE
}
}
// A function that returns the result object for a
// given object with the prefix minus operator applied
func evalMinusPrefixOperatorExpression(right object.Object) object.Object {
// Check that object is an Integer
if right.Type() != object.INTEGER_OBJ {
// Return Error for non integer objects
return object.NewError("unsupported operator: -%s", right.Type())
}
// Retrieve the value of the Integer object
value := right.(*object.Integer).Value
// Return the modified Integer with the negative of the value
return &object.Integer{Value: -value}
}
// A function that evaluates an infix expression given
// a infix operator and the left and right objects
func evalInfixExpression(operator string, left, right object.Object) object.Object {
// Check Parameters
switch {
// If both objects are Integers
case left.Type() == object.INTEGER_OBJ && right.Type() == object.INTEGER_OBJ:
// Evaluate expression for integer objects
return evalIntegerInfixExpression(operator, left, right)
// If both objects are Strings
case left.Type() == object.STRING_OBJ && right.Type() == object.STRING_OBJ:
// Evaluate expression for integer objects
return evalStringInfixExpression(operator, left, right)
// If both objects are not Integers but the operator is '=='
case operator == "==":
// Evaluate the objects for '=='
return getNativeBoolean(left == right)
// If both objects are not Integers but the operator is '!='
case operator == "!=":
// Evaluate the objects for '!='
return getNativeBoolean(left != right)
// If both objects are not of the same type
case left.Type() != right.Type():
// Return Error
return object.NewError("type mismatch: %s %s %s", left.Type(), operator, right.Type())
// Unsupported combination
default:
// Return Error
return object.NewError("unsupported operator: %s %s %s", left.Type(), operator, right.Type())
}
}
// A function that evaluates an infix expression between two Integers
// given a infix operator and the left and right Integers objects
func evalIntegerInfixExpression(operator string, left, right object.Object) object.Object {
// Retrieve the left and right integer values
leftVal := left.(*object.Integer).Value
rightVal := right.(*object.Integer).Value
// Check the type of operator
switch operator {
// Plus operator (Add)
case "+":
// Evaluate the objects for addition
return &object.Integer{Value: leftVal + rightVal}
// Minus Operator (Subtract)
case "-":
// Evaluate the objects for subtraction
return &object.Integer{Value: leftVal - rightVal}
// Asterisk Operator (Multiply)
case "*":
// Evaluate the objects for multiplication
return &object.Integer{Value: leftVal * rightVal}
// Slash Operator (Divide)
case "/":
// Evaluate the objects for division
return &object.Integer{Value: leftVal / rightVal}
// Less Than Operator
case "<":
// Evaluate the objects for '<'
return getNativeBoolean(leftVal < rightVal)
// Greater Than Operator
case ">":
// Evaluate the objects for '>'
return getNativeBoolean(leftVal > rightVal)
// Equal To Operator
case "==":
// Evaluate the objects for '=='
return getNativeBoolean(leftVal == rightVal)
// Not Equal To Operator
case "!=":
// Evaluate the objects for '!='
return getNativeBoolean(leftVal != rightVal)
// Unsupported Operator
default:
// Return Error
return object.NewError("unsupported operator: %s %s %s", left.Type(), operator, right.Type())
}
}
// A function that evaluates an infix expression between two Strings
// given a infix operator and the left and right String objects
func evalStringInfixExpression(operator string, left, right object.Object) object.Object {
// Only concatenation is supported
if operator != "+" {
// Return an error
return object.NewError("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
// Retrieve the left and right integer values
leftVal := left.(*object.String).Value
rightVal := right.(*object.String).Value
// Return the String Object
return &object.String{Value: leftVal + rightVal}
}
// A function that evaluates an if expression given an IfExpression syntax tree node
func evalIfExpression(ie *syntaxtree.IfExpression, env *object.Environment) object.Object {
// Evaluate the conditional statement
condition := Evaluate(ie.Condition, env)
// Check if evaluated condition is an error
if isError(condition) {
// Return the error
return condition
}
// Check if the condition is truthy
if isTruthy(condition) {
// Evaluate the consequence block
return Evaluate(ie.Consequence, env)
// Check if alternate exists
} else if ie.Alternative != nil {
// Evaluate the alternate consequence block
return Evaluate(ie.Alternative, env)
} else {
// Return null
return NULL
}
}
// A function that evaluates an identifier literal given an Identifier syntax tree node
func evalIdentifier(node *syntaxtree.Identifier, env *object.Environment) object.Object {
// Check and retrieve the identifier value from the environment
if val, ok := env.Get(node.Value); ok {
// Return the value
return val
}
// Check and retrieve the identifer value from the built-ins
if builtin, ok := builtins[node.Value]; ok {
// Return the builtin
return builtin
}
// Return error when the identifier does not exist in the environment
return object.NewError("identifier not found: " + node.Value)
}
// A function that evaluates a slice of Expression syntax nodes into evaluated objects
func evalExpressions(exps []syntaxtree.Expression, env *object.Environment) []object.Object {
// Declare a result Object slice
var result []object.Object
// Iterate over the expression nodes
for _, e := range exps {
// Evaluate the expression
evaluated := Evaluate(e, env)
// Check for an error
if isError(evaluated) {
// Return the error as the first object of the slice
return []object.Object{evaluated}
}
// Add the evaluated object into the slice
result = append(result, evaluated)
}
// Return the result slice
return result
}
// A function that evaluates an IndexExpression between a List and an Integer index
func evalIndexExpression(left, index object.Object) object.Object {
switch {
// Check if the left object is a List and the index is an Integer
case left.Type() == object.LIST_OBJ && index.Type() == object.INTEGER_OBJ:
// Evaluate the list index expression
return evalListIndexExpression(left, index)
// Check if the left object is a Map
case left.Type() == object.MAP_OBJ:
// Evaluate the map index expression
return evalMapIndexExpression(left, index)
default:
// Return error
return object.NewError("index operator not supported: %s", left.Type())
}
}
// A function that evaluates a List index expression given a List and an Integer index
func evalListIndexExpression(list, index object.Object) object.Object {
// Assert the list object as a List
listObject := list.(*object.List)
// Assert the index object as an Integer
idx := index.(*object.Integer).Value
// Check if the index is out of range
max := int64(len(listObject.Elements) - 1)
if idx < 0 || idx > max {
// Return null
return NULL
}
// Return the list element at the index
return listObject.Elements[idx]
}
func evalMapLiteral(node *syntaxtree.MapLiteral, env *object.Environment) object.Object {
// Init a new mapping for HashKeys to MapPairs
pairs := make(map[object.HashKey]object.MapPair)
// Iterate keys and values in the map literal
for keyNode, valueNode := range node.Pairs {
// Evaluate the key
key := Evaluate(keyNode, env)
// Check for an error
if isError(key) {
// Return the error
return key
}
// Assert that the key is a Hashable
hashKey, ok := key.(object.Hashable)
if !ok {
// Return error
return object.NewError("unusable as hash key: %s", key.Type())
}
// Evaluate the value
value := Evaluate(valueNode, env)
// Check for an error
if isError(value) {
// Return the error
return value
}
// Generate the hash key for the key
hashed := hashKey.HashKey()
// Create a MapPair for the key and value and add it to the pairs map
pairs[hashed] = object.MapPair{Key: key, Value: value}
}
// Create a new HashMap from the pairs map and return it
return &object.Map{Pairs: pairs}
}
// A function that evaluates a Map index expression given a Map and a Hashable index
func evalMapIndexExpression(mapobj, index object.Object) object.Object {
// Assert the map object as a Map
mapObject := mapobj.(*object.Map)
// Assert the index object as a Hashable
key, ok := index.(object.Hashable)
if !ok {
// Return error
return object.NewError("unusable as hash key: %s", index.Type())
}
// Retrieve the MapPair from the map object for the hash key
pair, ok := mapObject.Pairs[key.HashKey()]
if !ok {
// Return null when not found
return NULL
}
// Return the value from the MapPair
return pair.Value
} | evaluator/evaluators.go | 0.772144 | 0.583559 | evaluators.go | starcoder |
package evdb
import (
"time"
)
// TimeRange is a range of time with a specific step
type TimeRange struct {
Start time.Time `json:"start"`
End time.Time `json:"end"`
Step time.Duration `json:"step"`
}
// TimeRel is a relation between two time ranges
type TimeRel int
// TimeRel enum
const (
TimeRelNone TimeRel = iota
TimeRelAround
TimeRelOverlapsBefore
TimeRelBefore
TimeRelEqual
TimeRelAfter
TimeRelOverlapsAfter
TimeRelBetween
)
// Truncate truncates time to the TimeRange step
func (tr *TimeRange) Truncate(tm time.Time) time.Time {
if tr.Step > 0 {
return tm.Truncate(tr.Step).In(tm.Location())
}
if tr.Step == 0 {
return time.Time{}
}
return tm
}
// Each calls a function for each step in a TimeRange
func (tr *TimeRange) Each(fn func(time.Time, int)) {
start := tr.Start.Truncate(tr.Step)
end := tr.End.Truncate(tr.Step)
for i := 0; !end.Before(start); start, i = start.Add(tr.Step), i+1 {
fn(start, i)
}
}
// NumSteps calculates the number of steps in a TimeRange
func (tr *TimeRange) NumSteps() int {
if tr.Step == 0 {
return -1
}
start := tr.Start.Truncate(tr.Step)
end := tr.End.Truncate(tr.Step)
return int(end.Sub(start) / tr.Step)
}
// Rel finds the relation between two time ranges
func (tr *TimeRange) Rel(other *TimeRange) TimeRel {
if tr.Step != other.Step {
return TimeRelNone
}
tminA, tmaxA, tminB, tmaxB := tr.Start, tr.End, other.Start, other.End
if tminB.Equal(tminA) {
if tmaxB.After(tmaxA) {
return TimeRelAround
}
if tmaxB.Equal(tmaxA) {
return TimeRelEqual
}
return TimeRelBetween
}
// tminB != tminA
if tminB.After(tmaxA) {
return TimeRelAfter
}
// tminB <= tmaxA
if tmaxB.Before(tminA) {
return TimeRelBefore
}
// tmaxB >= tminA
if tminB.Before(tminA) {
if tmaxB.After(tmaxA) {
return TimeRelAround
}
return TimeRelOverlapsBefore
}
// tminB >= tminA
if tmaxB.After(tmaxA) {
if tminB.Before(tminA) {
return TimeRelAround
}
return TimeRelOverlapsAfter
}
// tmaxB <= tmaxA
return TimeRelBetween
}
// Offset offsets a TimeRange by a duration
func (tr TimeRange) Offset(d time.Duration) TimeRange {
tr.Start, tr.End = tr.Start.Add(d), tr.End.Add(d)
return tr
} | timerange.go | 0.802826 | 0.445771 | timerange.go | starcoder |
package main
import (
"fmt"
"text/template"
)
type templateData struct {
Datetime string
System systemInfo
Tests []*Test
}
var (
rootTmpl *template.Template
)
func init() {
rootTmpl = template.New("")
template.Must(rootTmpl.New("results").Funcs(template.FuncMap{
"formatTimeUs": formatTimeUs,
"formatBinary": formatBinary,
}).Parse(`## System
| | |
|----|:---|
| Processor | {{.System.Processor}} |
| RAM | {{.System.RAM}} |
| OS | {{.System.OS}} |
| [Bombardier](https://github.com/codesenberg/bombardier) | {{.System.Bombardier}} |
| [Go](https://golang.org) | {{.System.Go}} |
| [.Net Core](https://dotnet.microsoft.com/) | {{.System.Dotnet}} |
| [Node.js](https://nodejs.org/) | {{.System.Node}} |
> Last updated: {{.Datetime}}
## Terminology
**Name** is the name of the framework(or router) used under a particular test.
**Reqs/sec** is the avg number of total requests could be processed per second (the higher the better).
**Latency** is the amount of time it takes from when a request is made by the client to the time it takes for the response to get back to that client (the smaller the better).
**Throughput** is the rate of production or the rate at which data are transferred (the higher the better, it depends from response length (body + headers).
**Time To Complete** is the total time (in seconds) the test completed (the smaller the better).
## Results
{{ range $test := .Tests}}
### Test:{{ $test.Name}}
{{ if $test.Description -}}
📖 {{ $test.ParseDescription $test -}}
{{ end }}
| Name | Language | Reqs/sec | Latency | Throughput | Time To Complete |
|------|:---------|:---------|:--------|:-----------|:-----------------|
{{ range $env := $test.Envs -}}
| [{{ $env.GetName }}](https://github.com/{{$env.Repo}}) | {{ $env.Language }} |
{{- if $env.CanBenchmark }}
{{- printf "%.0f" $env.Result.RequestsPerSecond.Mean }} |
{{- formatTimeUs $env.Result.Latency.Mean }} |
{{- formatBinary $env.Result.Throughput }} |
{{- printf "%.2f" $env.Result.TimeTakenSeconds }}s |
{{- else -}}
- | - | - | - | - | - |
{{- end}}
{{end -}}
{{ end -}}
`))
template.Must(rootTmpl.New("readme").Parse(`# Server Benchmarks
A benchmark suite which, **transparently**, stress-tests web servers and generates a report in markdown. It measures the requests per second, data transferred and time between requests and responses.
## Why YABS (Yet Another Benchmark Suite)
It's true, there already enough of benchmark suites to play around. However, most of them don't even contain real-life test applications to benchmark, therefore the results are not always accurate e.g. a route handler executes SQL queries or reads and sends JSON. This benchmark suite is a fresh start, it can contain any type of tests as the tests are running as self-executables and the measuring is done by a popular and trusted 3rd-party software which acts as a real HTTP Client (one more reason of transparency). [Contributions](CONTRIBUTING.md) and improvements are always welcomed here.
## Use case
Measure the performance of application(s) between different versions or implementations (or web frameworks).
This suite can be further customized, through its [tests.yml](tests.yml) file, in order to test personal or internal web applications before their public releases.
## How to run
1. Install [Go](https://golang.org/dl), [Bombardier](https://github.com/codesenberg/bombardier/releases/tag/v1.2.4), [Node.js](https://nodejs.org/en/download/current/) and [.NET Core](https://dotnet.microsoft.com/download)
2. Clone the repository
3. Stress-tests are described inside [tests.yml](tests.yml) file, it can be customized to fit your needs
4. Execute: ` + "`go build -o server-benchmarks`" + `
5. Run and wait for the executable _server-benchmarks_ (or _server-benchmarks.exe_ for windows) to finish
6. Read the results from the generated _README.md_ file.
### Docker
The only requirement is [Docker](https://docs.docker.com/).
` + "```sh" + `
$ docker run -v ${PWD}:/data kataras/server-benchmarks
` + "```" + `
## Benchmarks
The following generated README contains benchmark results from builtin tests between popular **HTTP/2 web frameworks as of 2020**.
_Note:_ it's possible that the contents of this file will be updated regularly to accept even more tests cases and frameworks.
{{ template "results" .}}
## License
This project is licensed under the [MIT License](LICENSE).
`))
}
// copied from bombardier's source code itself to display identical results.
type units struct {
scale uint64
base string
units []string
}
var (
binaryUnits = &units{
scale: 1024,
base: "",
units: []string{"KB", "MB", "GB", "TB", "PB"},
}
timeUnitsUs = &units{
scale: 1000,
base: "us",
units: []string{"ms", "s"},
}
timeUnitsS = &units{
scale: 60,
base: "s",
units: []string{"m", "h"},
}
)
func formatUnits(n float64, m *units, prec int) string {
amt := n
unit := m.base
scale := float64(m.scale) * 0.85
for i := 0; i < len(m.units) && amt >= scale; i++ {
amt /= float64(m.scale)
unit = m.units[i]
}
return fmt.Sprintf("%.*f%s", prec, amt, unit)
}
func formatBinary(n float64) string {
return formatUnits(n, binaryUnits, 2)
}
func formatTimeUs(n float64) string {
units := timeUnitsUs
if n >= 1000000.0 {
n /= 1000000.0
units = timeUnitsS
}
return formatUnits(n, units, 2)
} | template.go | 0.779154 | 0.778313 | template.go | starcoder |
package doltcore
import (
"errors"
"math"
"strconv"
"strings"
"github.com/google/uuid"
"github.com/liquidata-inc/dolt/go/store/types"
)
// StringToValue takes a string and a NomsKind and tries to convert the string to a noms Value.
func StringToValue(s string, kind types.NomsKind) (types.Value, error) {
if !types.IsPrimitiveKind(kind) || kind == types.BlobKind {
return nil, errors.New("Only primitive type support")
}
switch kind {
case types.StringKind:
return types.String(s), nil
case types.FloatKind:
return stringToFloat(s)
case types.BoolKind:
return stringToBool(s)
case types.IntKind:
return stringToInt(s)
case types.UintKind:
return stringToUint(s)
case types.UUIDKind:
return stringToUUID(s)
case types.NullKind:
return types.NullValue, nil
}
panic("Unsupported type " + kind.String())
}
func stringToFloat(s string) (types.Value, error) {
if len(s) == 0 {
return types.NullValue, nil
}
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return types.Float(math.NaN()), ConversionError{types.StringKind, types.FloatKind, err}
}
return types.Float(f), nil
}
func stringToBool(s string) (types.Value, error) {
if len(s) == 0 {
return types.NullValue, nil
}
b, err := strconv.ParseBool(strings.ToLower(s))
if err != nil {
return types.Bool(false), ConversionError{types.StringKind, types.BoolKind, err}
}
return types.Bool(b), nil
}
func stringToInt(s string) (types.Value, error) {
if len(s) == 0 {
return types.NullValue, nil
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return types.Int(0), ConversionError{types.StringKind, types.IntKind, err}
}
return types.Int(n), nil
}
func stringToUint(s string) (types.Value, error) {
if len(s) == 0 {
return types.NullValue, nil
}
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return types.Uint(0), ConversionError{types.StringKind, types.UintKind, err}
}
return types.Uint(n), nil
}
func stringToUUID(s string) (types.Value, error) {
if len(s) == 0 {
return types.NullValue, nil
}
u, err := uuid.Parse(s)
if err != nil {
return types.UUID(u), ConversionError{types.StringKind, types.UUIDKind, err}
}
return types.UUID(u), nil
} | go/libraries/doltcore/str_to_noms.go | 0.69451 | 0.450118 | str_to_noms.go | starcoder |
package index
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"os"
"sync"
"github.com/ipld/go-storethehash/store/primary"
"github.com/ipld/go-storethehash/store/types"
)
/* An append-only log [`recordlist`]s.
The format of that append only log is:
```text
| Once | Repeated |
| | |
| 4 bytes | Variable size | 4 bytes | Variable size | … |
| Size of the header | [`Header`] | Size of the Recordlist | Recordlist | … |
```
*/
const IndexVersion uint8 = 2
// Number of bytes used for the size prefix of a record list.
const SizePrefixSize int = 4
// Remove the prefix that is used for the bucket.
//
// The first bits of a key are used to determine the bucket to put the key into. This function
// removes those bytes. Only bytes that are fully covered by the bits are removed. E.g. a bit
// value of 19 will remove only 2 bytes, whereas 24 bits removes 3 bytes.
func StripBucketPrefix(key []byte, bits byte) []byte {
return key[(bits / 8):]
}
// The header of the index
//
// The serialized header is:
// ```text
// | 1 byte | 1 byte |
// | Version of the header | Number of bits used for the buckets |
// ```
type Header struct {
// A version number in case we change the header
Version byte
// The number of bits used to determine the in-memory buckets
BucketsBits byte
}
func NewHeader(bucketsBits byte) Header {
return Header{IndexVersion, bucketsBits}
}
func FromHeader(h Header) []byte {
return []byte{h.Version, h.BucketsBits}
}
func FromBytes(bytes []byte) Header {
return Header{
Version: bytes[0],
BucketsBits: bytes[1],
}
}
type Index struct {
sizeBits uint8
buckets Buckets
sizeBuckets SizeBuckets
file *os.File
writer *bufio.Writer
Primary primary.PrimaryStorage
bucketLk sync.RWMutex
outstandingWork types.Work
curPool, nextPool bucketPool
length types.Position
}
const indexBufferSize = 32 * 4096
type bucketPool map[BucketIndex][]byte
const BucketPoolSize = 1024
// Open and index.
//
// It is created if there is no existing index at that path.
func OpenIndex(path string, primary primary.PrimaryStorage, indexSizeBits uint8) (*Index, error) {
var file *os.File
var buckets Buckets
var sizeBuckets SizeBuckets
var length types.Position
stat, err := os.Stat(path)
if os.IsNotExist(err) {
header := FromHeader(NewHeader(indexSizeBits))
headerSize := make([]byte, 4)
binary.LittleEndian.PutUint32(headerSize, uint32(len(header)))
file, err = openFileRandom(path, os.O_RDWR|os.O_APPEND|os.O_EXCL|os.O_CREATE)
if err != nil {
return nil, err
}
if _, err := file.Write(headerSize); err != nil {
return nil, err
}
if _, err = file.Write(header); err != nil {
return nil, err
}
if err := file.Sync(); err != nil {
return nil, err
}
length = types.Position(len(header) + len(headerSize))
buckets, err = NewBuckets(indexSizeBits)
if err != nil {
return nil, err
}
sizeBuckets, err = NewSizeBuckets(indexSizeBits)
if err != nil {
return nil, err
}
} else {
if err != nil {
return nil, err
}
buckets, sizeBuckets, err = scanIndex(path, indexSizeBits)
if err != nil {
return nil, err
}
file, err = openFileRandom(path, os.O_RDWR|os.O_APPEND|os.O_EXCL)
if err != nil {
return nil, err
}
length = types.Position(stat.Size())
}
return &Index{
indexSizeBits,
buckets,
sizeBuckets,
file,
bufio.NewWriterSize(file, indexBufferSize),
primary,
sync.RWMutex{},
0,
make(bucketPool, BucketPoolSize),
make(bucketPool, BucketPoolSize),
length,
}, nil
}
func scanIndex(path string, indexSizeBits uint8) (Buckets, SizeBuckets, error) {
// this is a single sequential read across the whole index
file, err := openFileForScan(path)
if err != nil {
return nil, nil, err
}
defer func() {
_ = file.Close()
}()
header, bytesRead, err := ReadHeader(file)
if err != nil {
return nil, nil, err
}
if header.BucketsBits != indexSizeBits {
return nil, nil, types.ErrIndexWrongBitSize{header.BucketsBits, indexSizeBits}
}
buckets, err := NewBuckets(indexSizeBits)
if err != nil {
return nil, nil, err
}
sizeBuckets, err := NewSizeBuckets(indexSizeBits)
if err != nil {
return nil, nil, err
}
buffered := bufio.NewReader(file)
iter := NewIndexIter(buffered, types.Position(bytesRead))
for {
data, pos, done, err := iter.Next()
if done {
break
}
if err == io.EOF {
// The file is corrupt. Though it's not a problem, just take the data we
// are able to use and move on.
if _, err := file.Seek(0, 2); err != nil {
return nil, nil, err
}
break
}
if err != nil {
return nil, nil, err
}
bucketPrefix := BucketIndex(binary.LittleEndian.Uint32(data))
buckets.Put(bucketPrefix, pos)
sizeBuckets.Put(bucketPrefix, types.Size(len(data)))
}
return buckets, sizeBuckets, nil
}
// Put a key together with a file offset into the index.
//
// The key needs to be a cryptographically secure hash and at least 4 bytes long.
func (i *Index) Put(key []byte, location types.Block) error {
// Get record list and bucket index
bucket, err := i.getBucketIndex(key)
if err != nil {
return err
}
i.bucketLk.Lock()
defer i.bucketLk.Unlock()
records, err := i.getRecordsFromBucket(bucket)
if err != nil {
return err
}
// The key doesn't need the prefix that was used to find the right bucket. For simplicty
// only full bytes are trimmed off.
indexKey := StripBucketPrefix(key, i.sizeBits)
// No records stored in that bucket yet
var newData []byte
if records == nil {
// As it's the first key a single byte is enough as it doesn't need to be distinguised
// from other keys.
trimmedIndexKey := indexKey[:1]
newData = EncodeKeyPosition(KeyPositionPair{trimmedIndexKey, location})
} else {
// Read the record list from disk and insert the new key
pos, prevRecord, has := records.FindKeyPosition(indexKey)
if has && bytes.HasPrefix(indexKey, prevRecord.Key) {
// The previous key is fully contained in the current key. We need to read the full
// key from the main data file in order to retrieve a key that is distinguishable
// from the one that should get inserted.
fullPrevKey, err := i.Primary.GetIndexKey(prevRecord.Block)
if err != nil {
return err
}
// The index key has already removed the prefix that is used to determine the
// bucket. Do the same for the full previous key.
prevKey := StripBucketPrefix(fullPrevKey, i.sizeBits)
keyTrimPos := FirstNonCommonByte(indexKey, prevKey)
// Only store the new key if it doesn't exist yet.
if keyTrimPos >= len(indexKey) {
return nil
}
trimmedPrevKey := prevKey[:keyTrimPos+1]
trimmedIndexKey := indexKey[:keyTrimPos+1]
var keys []KeyPositionPair
// Replace the existing previous key (which is too short) with a new one and
// also insert the new key.
if bytes.Compare(trimmedPrevKey, trimmedIndexKey) == -1 {
keys = []KeyPositionPair{
{trimmedPrevKey, prevRecord.Block},
{trimmedIndexKey, location},
}
} else {
keys = []KeyPositionPair{
{trimmedIndexKey, location},
{trimmedPrevKey, prevRecord.Block},
}
}
newData = records.PutKeys(keys, prevRecord.Pos, pos)
// There is no need to do anything with the next key as the next key is
// already guaranteed to be distinguishable from the new key as it was already
// distinguishable from the previous key.
} else {
// The previous key is not fully contained in the key that should get inserted.
// Hence we only need to trim the new key to the smallest one possible that is
// still distinguishable from the previous (in case there is one) and next key
// (in case there is one).
prevRecordNonCommonBytePos := 0
if has {
prevRecordNonCommonBytePos = FirstNonCommonByte(indexKey, prevRecord.Key)
}
// The new record won't be the last record
nextRecordNonCommonBytePos := 0
if pos < records.Len() {
// In order to determine the minimal key size, we need to get the next key
// as well.
nextRecord := records.ReadRecord(pos)
nextRecordNonCommonBytePos = FirstNonCommonByte(indexKey, nextRecord.Key)
}
// Minimum prefix of the key that is different in at least one byte from the
// previous as well as the next key.
minPrefix := max(
prevRecordNonCommonBytePos,
nextRecordNonCommonBytePos,
)
// We cannot trim beyond the key length
keyTrimPos := min(minPrefix, len(indexKey)-1)
trimmedIndexKey := indexKey[:keyTrimPos+1]
newData = records.PutKeys([]KeyPositionPair{{trimmedIndexKey, location}}, pos, pos)
}
}
i.outstandingWork += types.Work(len(newData) + BucketPrefixSize + SizePrefixSize)
i.nextPool[bucket] = newData
return nil
}
// Update a key together with a file offset into the index.
func (i *Index) Update(key []byte, location types.Block) error {
// Get record list and bucket index
bucket, err := i.getBucketIndex(key)
if err != nil {
return err
}
i.bucketLk.Lock()
defer i.bucketLk.Unlock()
records, err := i.getRecordsFromBucket(bucket)
if err != nil {
return err
}
// The key doesn't need the prefix that was used to find the right bucket. For simplicty
// only full bytes are trimmed off.
indexKey := StripBucketPrefix(key, i.sizeBits)
var newData []byte
// If no records stored in that bucket yet it means there is no key
// to be updated.
if records == nil {
return fmt.Errorf("no records found in index. We can't udpate the key")
} else {
// Read the record list to find the key and position
r := records.GetRecord(indexKey)
if r == nil {
return fmt.Errorf("key to update not found in index")
}
// We want to overwrite the key so no need to do anything else.
// Update key in position.
newData = records.PutKeys([]KeyPositionPair{{r.Key, location}}, r.Pos, r.NextPos())
}
i.outstandingWork += types.Work(len(newData) + BucketPrefixSize + SizePrefixSize)
i.nextPool[bucket] = newData
return nil
}
// Remove a key from index
func (i *Index) Remove(key []byte) (bool, error) {
// Get record list and bucket index
bucket, err := i.getBucketIndex(key)
if err != nil {
return false, err
}
i.bucketLk.Lock()
defer i.bucketLk.Unlock()
records, err := i.getRecordsFromBucket(bucket)
if err != nil {
return false, err
}
// The key doesn't need the prefix that was used to find the right bucket. For simplicty
// only full bytes are trimmed off.
indexKey := StripBucketPrefix(key, i.sizeBits)
var newData []byte
// If no records stored in that bucket yet it means there is no key
// to be removed.
if records == nil {
// No records in index. Nothing to remove.
return false, nil
}
// Read the record list to find the key and position
r := records.GetRecord(indexKey)
if r == nil {
// The record doesn't exist. Nothing to remove
return false, nil
}
// Remove key from record
newData = records.PutKeys([]KeyPositionPair{}, r.Pos, r.NextPos())
// NOTE: We are removing the key without changing any keys. If we want
// to optimize for storage we need to check the keys with the same prefix
// and see if any of them can be shortened. This process will be similar
// to finding where to put a new key.
i.outstandingWork += types.Work(len(newData) + BucketPrefixSize + SizePrefixSize)
i.nextPool[bucket] = newData
return true, nil
}
func (i *Index) getBucketIndex(key []byte) (BucketIndex, error) {
if len(key) < 4 {
return 0, types.ErrKeyTooShort
}
// Determine which bucket a key falls into. Use the first few bytes of they key for it and
// interpret them as a little-endian integer.
prefix := BucketIndex(binary.LittleEndian.Uint32(key))
var leadingBits BucketIndex = (1 << i.sizeBits) - 1
return prefix & leadingBits, nil
}
// getRecordsFromBucket returns the recordList and bucket the key belongs to.
func (i *Index) getRecordsFromBucket(bucket BucketIndex) (RecordList, error) {
// Get the index file offset of the record list the key is in.
cached, indexOffset, recordListSize, err := i.readBucketInfo(bucket)
if err != nil {
return nil, err
}
var records RecordList
if cached != nil {
records = NewRecordListRaw(cached)
} else {
records, err = i.readDiskBuckets(bucket, indexOffset, recordListSize)
if err != nil {
return nil, err
}
}
return records, nil
}
func (i *Index) flushBucket(bucket BucketIndex, newData []byte) (types.Block, types.Work, error) {
// Write new data to disk. The record list is prefixed with bucket they are in. This is
// needed in order to reconstruct the in-memory buckets from the index itself.
// TODO vmx 2020-11-25: This should be an error and not a panic
newDataSize := make([]byte, SizePrefixSize)
binary.LittleEndian.PutUint32(newDataSize, uint32(len(newData))+uint32(BucketPrefixSize))
if _, err := i.writer.Write(newDataSize); err != nil {
return types.Block{}, 0, err
}
bucketPrefixBuffer := make([]byte, BucketPrefixSize)
binary.LittleEndian.PutUint32(bucketPrefixBuffer, uint32(bucket))
if _, err := i.writer.Write(bucketPrefixBuffer); err != nil {
return types.Block{}, 0, err
}
if _, err := i.writer.Write(newData); err != nil {
return types.Block{}, 0, err
}
length := i.length
toWrite := types.Position(len(newData) + BucketPrefixSize + SizePrefixSize)
i.length += toWrite
// Fsyncs are expensive
//self.file.syncData()?;
// Keep the reference to the stored data in the bucket
return types.Block{Offset: length + types.Position(SizePrefixSize),
Size: types.Size(len(newData) + BucketPrefixSize)},
types.Work(toWrite), nil
}
type bucketBlock struct {
bucket BucketIndex
blk types.Block
}
func (i *Index) commit() (types.Work, error) {
i.bucketLk.Lock()
nextPool := i.curPool
i.curPool = i.nextPool
i.nextPool = nextPool
i.outstandingWork = 0
i.bucketLk.Unlock()
if len(i.curPool) == 0 {
return 0, nil
}
blks := make([]bucketBlock, 0, len(i.curPool))
var work types.Work
for bucket, data := range i.curPool {
blk, newWork, err := i.flushBucket(bucket, data)
if err != nil {
return 0, err
}
blks = append(blks, bucketBlock{bucket, blk})
work += newWork
}
i.bucketLk.Lock()
defer i.bucketLk.Unlock()
for _, blk := range blks {
bucket := blk.bucket
pos := blk.blk.Offset
size := blk.blk.Size
if err := i.buckets.Put(bucket, pos); err != nil {
return 0, err
}
if err := i.sizeBuckets.Put(bucket, size); err != nil {
return 0, err
}
}
return work, nil
}
func (i *Index) readBucketInfo(bucket BucketIndex) ([]byte, types.Position, types.Size, error) {
data, ok := i.nextPool[bucket]
if ok {
return data, 0, 0, nil
}
data, ok = i.curPool[bucket]
if ok {
return data, 0, 0, nil
}
indexOffset, err := i.buckets.Get(bucket)
if err != nil {
return nil, 0, 0, err
}
recordListSize, err := i.sizeBuckets.Get(bucket)
if err != nil {
return nil, 0, 0, err
}
return nil, indexOffset, recordListSize, nil
}
func (i *Index) readDiskBuckets(bucket BucketIndex, indexOffset types.Position, recordListSize types.Size) (RecordList, error) {
if indexOffset == 0 {
return nil, nil
}
// Read the record list from disk and get the file offset of that key in the primary
// storage.
data := make([]byte, recordListSize)
_, err := i.file.ReadAt(data, int64(indexOffset))
if err != nil {
return nil, err
}
return NewRecordList(data), nil
}
// Get the file offset in the primary storage of a key.
func (i *Index) Get(key []byte) (types.Block, bool, error) {
// Get record list and bucket index
bucket, err := i.getBucketIndex(key)
if err != nil {
return types.Block{}, false, err
}
// Here we just nead an RLock, there won't be changes over buckets.
// This is why we don't use getRecordsFromBuckets to wrap only this
// line of code in the lock
i.bucketLk.RLock()
cached, indexOffset, recordListSize, err := i.readBucketInfo(bucket)
i.bucketLk.RUnlock()
if err != nil {
return types.Block{}, false, err
}
var records RecordList
if cached != nil {
records = NewRecordListRaw(cached)
} else {
records, err = i.readDiskBuckets(bucket, indexOffset, recordListSize)
if err != nil {
return types.Block{}, false, err
}
}
if records == nil {
return types.Block{}, false, nil
}
// The key doesn't need the prefix that was used to find the right bucket. For simplicty
// only full bytes are trimmed off.
indexKey := StripBucketPrefix(key, i.sizeBits)
fileOffset, found := records.Get(indexKey)
return fileOffset, found, nil
}
func (i *Index) Flush() (types.Work, error) {
return i.commit()
}
func (i *Index) Sync() error {
if err := i.writer.Flush(); err != nil {
return err
}
if err := i.file.Sync(); err != nil {
return err
}
i.bucketLk.Lock()
i.curPool = make(bucketPool, BucketPoolSize)
i.bucketLk.Unlock()
return nil
}
func (i *Index) Close() error {
return i.file.Close()
}
func (i *Index) OutstandingWork() types.Work {
i.bucketLk.RLock()
defer i.bucketLk.RUnlock()
return i.outstandingWork
}
// An iterator over index entries.
//
// On each iteration it returns the position of the record within the index together with the raw
// record list data.
type IndexIter struct {
// The index data we are iterating over
index io.Reader
// The current position within the index
pos types.Position
}
func NewIndexIter(index io.Reader, pos types.Position) *IndexIter {
return &IndexIter{index, pos}
}
func (iter *IndexIter) Next() ([]byte, types.Position, bool, error) {
size, err := ReadSizePrefix(iter.index)
switch err {
case nil:
pos := iter.pos + types.Position(SizePrefixSize)
iter.pos += types.Position(SizePrefixSize) + types.Position(size)
data := make([]byte, size)
_, err := io.ReadFull(iter.index, data)
if err != nil {
return nil, 0, false, err
}
return data, pos, false, nil
case io.EOF:
return nil, 0, true, nil
default:
return nil, 0, false, err
}
}
// Only reads the size prefix of the data and returns it.
func ReadSizePrefix(reader io.Reader) (uint32, error) {
sizeBuffer := make([]byte, SizePrefixSize)
_, err := io.ReadFull(reader, sizeBuffer)
if err != nil {
return 0, err
}
return binary.LittleEndian.Uint32(sizeBuffer), nil
}
// Returns the headet together with the bytes read.
//
// The bytes read include all the bytes that were read by this function. Hence it also includes
// the 4-byte size prefix of the header besides the size of the header data itself.
func ReadHeader(file *os.File) (Header, types.Position, error) {
headerSizeBuffer := make([]byte, SizePrefixSize)
_, err := io.ReadFull(file, headerSizeBuffer)
if err != nil {
return Header{}, 0, err
}
headerSize := binary.LittleEndian.Uint32(headerSizeBuffer)
headerBytes := make([]byte, headerSize)
_, err = io.ReadFull(file, headerBytes)
if err != nil {
return Header{}, 0, err
}
return FromBytes(headerBytes), types.Position(SizePrefixSize) + types.Position(headerSize), nil
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// Returns the position of the first character that both given slices have not in common.
//
// It might return an index that is bigger than the input strings. If one is full prefix of the
// other, the index will be `shorterSlice.len() + 1`, if both slices are equal it will be
// `slice.len() + 1`
func FirstNonCommonByte(aa []byte, bb []byte) int {
smallerLength := min(len(aa), len(bb))
index := 0
for ; index < smallerLength; index++ {
if aa[index] != bb[index] {
break
}
}
return index
}
func openFile(name string, flag int, perm os.FileMode, advice int) (*os.File, error) {
f, err := os.OpenFile(name, flag, perm)
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
/*
err = unix.Fadvise(int(f.Fd()), 0, 0, advice)
if err != nil {
return nil, fmt.Errorf("fadvise: %w", err)
}
*/
return f, nil
}
func openFileRandom(name string, flag int) (*os.File, error) {
// return openFile(name, flag, 0o644, unix.FADV_RANDOM)
return openFile(name, flag, 0o644, 0)
}
func openFileForScan(name string) (*os.File, error) {
// return openFile(name, os.O_RDONLY, 0o644, unix.FADV_SEQUENTIAL)
return openFile(name, os.O_RDONLY, 0o644, 0)
} | store/index/index.go | 0.660829 | 0.499634 | index.go | starcoder |
package marketplace
import (
"sort"
"strings"
"time"
)
// Subscription is either an Annual or a Monthly subscription
type Subscription string
func (s Subscription) Abbrev() string {
if s == AnnualSubscription {
return "y"
}
if s == MonthlySubscription {
return "m"
}
return "?"
}
// AccountType is either a Personal or an Organization account
type AccountType string
// Sales is a slice of sales. It offers a wide range of methods to aggregate the data.
type Sales []Sale
// CustomersMap maps a customer ID to customer
type CustomersMap map[CustomerID]Customer
// CustomerDateMap maps a customer ID to a YearMonthDate
type CustomerDateMap map[CustomerID]YearMonthDay
const (
AnnualSubscription Subscription = "Annual"
MonthlySubscription Subscription = "Monthly"
AccountTypePersonal AccountType = "Personal"
AccountTypeOrganization AccountType = "Organization"
)
// FilterBy returns a new Sales slice, which contains all items, were the keep function returned true
func (s Sales) FilterBy(keep func(Sale) bool) Sales {
var filtered Sales
for _, s := range s {
if keep(s) {
filtered = append(filtered, s)
}
}
return filtered
}
// ByDay returns a new Sales slice, which contains all items bought at this particular day
func (s Sales) ByDay(date time.Time) Sales {
y, m, d := date.Date()
return s.FilterBy(func(sale Sale) bool {
date := sale.Date
return date.Year() == y && date.Month() == m && date.Day() == d
})
}
// ByYearMonthDay returns a new Sales slice, which contains all items bought at this particular day
func (s Sales) ByYearMonthDay(day YearMonthDay) Sales {
return s.FilterBy(func(sale Sale) bool {
date := sale.Date
return date.Year() == day.Year() && date.Month() == day.Month() && date.Day() == day.Day()
})
}
// ByYearMonth returns a new Sales slice, which contains all items bought in the particular month
func (s Sales) ByYearMonth(month YearMonth) Sales {
return s.FilterBy(func(sale Sale) bool {
date := sale.Date
return date.Year() == month.Year() && date.Month() == month.Month()
})
}
// ByWeek returns a new Sales slice, which contains all items bought in the week of the year
func (s Sales) ByWeek(year int, isoWeek int) Sales {
return s.FilterBy(func(sale Sale) bool {
y, w := sale.Date.AsDate().ISOWeek()
return year == y && isoWeek == w
})
}
// ByDateRange returns a new Sales slice, which contains all items bought in the given date range (inclusive)
func (s Sales) ByDateRange(begin, end YearMonthDay) Sales {
return s.After(begin.AsDate()).Before(end.AsDate().AddDate(0, 0, 1))
}
func (s Sales) ByYear(year int) Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Date.Year() == year
})
}
func (s Sales) ByMonth(year int, month time.Month) Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Date.Year() == year && sale.Date.Month() == month
})
}
func (s Sales) Before(date time.Time) Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Date.AsDate().Before(date)
})
}
func (s Sales) After(date time.Time) Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Date.AsDate().After(date)
})
}
func (s Sales) AtOrAfter(date time.Time) Sales {
reference := NewYearMonthDayByDate(date)
return s.FilterBy(func(sale Sale) bool {
return sale.Date.Equals(reference) || sale.Date.IsAfter(reference)
})
}
func (s Sales) ByMonthlySubscription() Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Period == MonthlySubscription
})
}
func (s Sales) ByAnnualSubscription() Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Period == AnnualSubscription
})
}
func (s Sales) ByFreeSubscription() Sales {
return s.FilterBy(Sale.IsFreeSubscription)
}
func (s Sales) ByCustomer(c Customer) Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Customer.ID == c.ID
})
}
func (s Sales) ByAccountType(subscription AccountType) Sales {
return s.FilterBy(func(sale Sale) bool {
return sale.Customer.Type == subscription
})
}
func (s Sales) ByNewCustomers(allPreviousSales Sales, referenceDate time.Time) Sales {
previousCustomers := allPreviousSales.Before(referenceDate).CustomersMap()
return s.FilterBy(func(sale Sale) bool {
_, seen := previousCustomers[sale.Customer.ID]
return !seen
})
}
func (s Sales) ByReturnedCustomers(previouslyChurned ChurnedCustomerList) Sales {
churnedCustomers := previouslyChurned.Customers().AsMap()
return s.FilterBy(func(sale Sale) bool {
_, seen := churnedCustomers[sale.Customer.ID]
return seen
})
}
func (s Sales) CustomersMap() CustomersMap {
result := make(CustomersMap)
for _, s := range s {
customer := s.Customer
_, ok := result[customer.ID]
if !ok {
result[customer.ID] = customer
}
}
return result
}
// Customers returns a unique set of customers found in the sales
func (s Sales) Customers() Customers {
var result Customers
for _, c := range s.CustomersMap() {
result = append(result, c)
}
return result
}
func (s Sales) TotalSumUSD() Amount {
var sum Amount
for _, s := range s {
sum += s.AmountUSD
}
return sum
}
func (s Sales) FeeSumUSD() Amount {
var sum Amount
for _, s := range s {
sum += s.FeeAmountUSD()
}
return sum
}
func (s Sales) PaidOutUSD() Amount {
return s.TotalSumUSD() - s.FeeSumUSD()
}
func (s Sales) CustomerCount() int {
return len(s.CustomerSalesMap())
}
func (s Sales) CustomerSalesMap() CustomerSalesMap {
mapping := make(CustomerSalesMap)
for _, sale := range s {
value, seen := mapping[sale.Customer.ID]
if !seen {
value = &CustomerSales{
Customer: sale.Customer,
Sales: Sales{},
TotalUSD: 0.0,
}
}
value.Sales = append(value.Sales, sale)
value.TotalUSD += sale.AmountUSD
mapping[sale.Customer.ID] = value
}
return mapping
}
func (s Sales) CustomerSales() []*CustomerSales {
mapping := s.CustomerSalesMap()
var result []*CustomerSales
for _, v := range mapping {
result = append(result, v)
}
sort.SliceStable(result, func(i, j int) bool {
if result[i].TotalUSD == result[j].TotalUSD {
return strings.Compare(result[i].Customer.Name, result[j].Customer.Name) < 0
}
return result[i].TotalUSD > result[j].TotalUSD
})
return result
}
func (s Sales) CountrySales() []*CountrySales {
mapping := make(map[string]*CountrySales)
for _, sale := range s {
value, seen := mapping[sale.Customer.Country]
if !seen {
value = &CountrySales{
Country: sale.Customer.Country,
Sales: Sales{},
TotalUSD: 0.0,
}
}
value.Sales = append(value.Sales, sale)
value.TotalUSD += sale.AmountUSD
mapping[sale.Customer.Country] = value
}
var result []*CountrySales
for _, v := range mapping {
result = append(result, v)
}
sort.SliceStable(result, func(i, j int) bool {
if result[i].TotalUSD == result[j].TotalUSD {
return strings.Compare(result[i].Country, result[j].Country) < 0
}
return result[i].TotalUSD > result[j].TotalUSD
})
return result
}
func (s Sales) SubscriptionSales() []GroupedSales {
annual := s.ByAnnualSubscription()
monthly := s.ByMonthlySubscription()
result := []GroupedSales{
{
Name: "Annual",
TotalUSD: annual.TotalSumUSD(),
Sales: annual,
},
{
Name: "Monthly",
TotalUSD: monthly.TotalSumUSD(),
Sales: monthly,
},
}
sort.SliceStable(result, func(i, j int) bool {
return result[i].TotalUSD > result[j].TotalUSD
})
return result
}
func (s Sales) GroupByWeekday() []GroupedSales {
result := make([]GroupedSales, 7)
for day := time.Sunday; day <= time.Saturday; day++ {
result[int(day)] = GroupedSales{
Name: day.String(),
}
}
for _, sale := range s {
key := int(sale.Date.AsDate().Weekday())
sales := append(result[key].Sales, sale)
result[key].Sales = sales
result[key].TotalUSD += sale.AmountUSD
}
return result
}
func (s Sales) CustomerTypeSales() []GroupedSales {
organizations := s.ByAccountType(AccountTypeOrganization)
persons := s.ByAccountType(AccountTypePersonal)
result := []GroupedSales{
{
Name: "Organization",
TotalUSD: organizations.TotalSumUSD(),
Sales: organizations,
},
{
Name: "Person",
TotalUSD: persons.TotalSumUSD(),
Sales: persons,
},
}
sort.SliceStable(result, func(i, j int) bool {
return result[i].TotalUSD > result[j].TotalUSD
})
return result
}
// sales in currencies, sorted by USD
func (s Sales) GroupByCurrency() []*CurrencySales {
mapping := make(map[Currency]*CurrencySales)
for _, sale := range s {
value, seen := mapping[sale.Currency]
if !seen {
value = &CurrencySales{
Currency: sale.Currency,
}
mapping[sale.Currency] = value
}
value.TotalSales += sale.Amount
value.TotalSalesUSD += sale.AmountUSD
}
var result []*CurrencySales
for _, v := range mapping {
result = append(result, v)
}
sort.SliceStable(result, func(i, j int) bool {
a := result[i].TotalSalesUSD
b := result[j].TotalSalesUSD
if a == b {
return strings.Compare(string(result[i].Currency), string(result[j].Currency)) < 0
}
return a > b
})
return result
}
// sales, grouped by year-month-day
func (s Sales) GroupByDate(newestDateFirst bool) []DateGroupedSales {
groups := make(map[YearMonthDay]Sales)
for _, sale := range s {
values := groups[sale.Date]
groups[sale.Date] = append(values, sale)
}
var groupedSales []DateGroupedSales
for date, sales := range groups {
groupedSales = append(
groupedSales,
DateGroupedSales{
Date: date,
Name: date.String(),
TotalUSD: sales.TotalSumUSD(),
Sales: sales,
},
)
}
sort.SliceStable(groupedSales, func(i, j int) bool {
a := groupedSales[i]
b := groupedSales[j]
if newestDateFirst {
return a.Date.IsAfter(b.Date)
}
return !a.Date.IsAfter(b.Date)
})
return groupedSales
}
func (s Sales) SortedByDate() Sales {
c := s
sort.SliceStable(c, func(i, j int) bool {
return !c[i].Date.IsAfter(c[j].Date)
})
return c
}
func (s Sales) Reversed() Sales {
size := len(s)
rev := make(Sales, size)
for i, sale := range s {
rev[size-i-1] = sale
}
return rev
}
func (s Sales) CustomersFirstPurchase() CustomerDateMap {
result := make(CustomerDateMap)
for _, sale := range s {
stored, found := result[sale.Customer.ID]
if !found || sale.Date.IsBefore(stored) {
result[sale.Customer.ID] = sale.Date
}
}
return result
}
func (s Sales) CustomersLastPurchase() CustomerDateMap {
result := make(CustomerDateMap)
for _, sale := range s {
stored, found := result[sale.Customer.ID]
if !found || sale.Date.IsAfter(stored) {
result[sale.Customer.ID] = sale.Date
}
}
return result
}
func (m CustomersMap) Without(customersMap CustomersMap) CustomersMap {
result := make(CustomersMap)
for k, v := range m {
_, found := customersMap[k]
if !found {
result[k] = v
}
}
return result
} | marketplace/sales.go | 0.790247 | 0.496338 | sales.go | starcoder |
package cnns
import (
"fmt"
"github.com/LdDl/cnns/tensor"
"gonum.org/v1/gonum/mat"
)
// ReLULayer Rectified Linear Unit layer (activation: max(0, x))
/*
Oj - Input data
Ok - Output data
LocalDelta - Incoming gradients*weights (backpropagation)
*/
type ReLULayer struct {
Oj *mat.Dense
Ok *mat.Dense
LocalDelta *mat.Dense
OutputSize *tensor.TDsize
inputSize *tensor.TDsize
trainMode bool
}
// NewReLULayer - Constructor for new ReLU layer. You need to specify input size
/*
inSize - input layer's size
*/
func NewReLULayer(inSize *tensor.TDsize) Layer {
newLayer := &ReLULayer{
inputSize: inSize,
Oj: mat.NewDense(inSize.X*inSize.Z, inSize.Y, nil),
Ok: mat.NewDense(inSize.X*inSize.Z, inSize.Y, nil),
LocalDelta: mat.NewDense(inSize.X*inSize.Z, inSize.Y, nil),
OutputSize: &tensor.TDsize{X: inSize.X, Y: inSize.Y, Z: inSize.Z},
trainMode: false,
}
return newLayer
}
// SetCustomWeights Set user's weights for ReLU layer (make it carefully)
func (relu *ReLULayer) SetCustomWeights(t []*mat.Dense) {
fmt.Println("There are no weights for ReLU layer")
}
// GetInputSize Returns dimensions of incoming data for ReLU layer
func (relu *ReLULayer) GetInputSize() *tensor.TDsize {
return relu.inputSize
}
// GetOutputSize Returns output size (dimensions) of ReLU layer
func (relu *ReLULayer) GetOutputSize() *tensor.TDsize {
return relu.OutputSize
}
// GetActivatedOutput Returns ReLU layer's output
func (relu *ReLULayer) GetActivatedOutput() *mat.Dense {
return relu.Ok
}
// GetWeights Returns ReLU layer's weights
func (relu *ReLULayer) GetWeights() []*mat.Dense {
fmt.Println("There are no weights for ReLU layer")
return nil
}
// GetGradients Returns ReLU layer's gradients
func (relu *ReLULayer) GetGradients() *mat.Dense {
return relu.LocalDelta
}
// FeedForward - Feed data to ReLU layer
func (relu *ReLULayer) FeedForward(t *mat.Dense) error {
relu.Oj = t
relu.doActivation()
return nil
}
// doActivation ReLU layer's output activation
func (relu *ReLULayer) doActivation() {
rawOj := relu.Oj.RawMatrix().Data
rawOk := relu.Ok.RawMatrix().Data
for j := range rawOj {
if rawOj[j] < 0 {
rawOk[j] = 0
} else {
rawOk[j] = rawOj[j]
}
}
}
// CalculateGradients Evaluate ReLU layer's gradients
func (relu *ReLULayer) CalculateGradients(errorsDense *mat.Dense) error {
raw := relu.Oj.RawMatrix().Data
rawDelta := relu.LocalDelta.RawMatrix().Data
rawErrors := errorsDense.RawMatrix().Data
for i := range raw {
if raw[i] < 0 {
rawDelta[i] = 0
} else {
rawDelta[i] = rawErrors[i]
}
}
return nil
}
// UpdateWeights Just to point, that ReLU layer does NOT updating weights
func (relu *ReLULayer) UpdateWeights(lp *LearningParams) {
// There are no weights to update for ReLU layer
}
// PrintOutput Pretty print ReLU layer's output
func (relu *ReLULayer) PrintOutput() {
fmt.Println("Printing ReLU Layer output...")
}
// PrintWeights Just to point, that ReLU layer has not weights
func (relu *ReLULayer) PrintWeights() {
fmt.Println("There are no weights for ReLU layer")
}
// SetActivationFunc Set activation function for layer
func (relu *ReLULayer) SetActivationFunc(f func(v float64) float64) {
// Nothing here. Just for interface.
fmt.Println("You can not set activation function for ReLU layer")
}
// SetActivationDerivativeFunc Set derivative of activation function
func (relu *ReLULayer) SetActivationDerivativeFunc(f func(v float64) float64) {
// Nothing here. Just for interface.
fmt.Println("You can not set derivative of activation function for ReLU layer")
}
// GetStride Returns stride of layer
func (relu *ReLULayer) GetStride() int {
return 0
}
// GetType Returns "relu" as layer's type
func (relu *ReLULayer) GetType() string {
return "relu"
} | relu_layer.go | 0.738858 | 0.419588 | relu_layer.go | starcoder |
package drawing
import (
"math"
)
// Matrix represents an affine transformation
type Matrix [6]float64
const (
epsilon = 1e-6
)
// Determinant compute the determinant of the matrix
func (tr Matrix) Determinant() float64 {
return tr[0]*tr[3] - tr[1]*tr[2]
}
// Transform applies the transformation matrix to points. It modify the points passed in parameter.
func (tr Matrix) Transform(points []float64) {
for i, j := 0, 1; j < len(points); i, j = i+2, j+2 {
x := points[i]
y := points[j]
points[i] = x*tr[0] + y*tr[2] + tr[4]
points[j] = x*tr[1] + y*tr[3] + tr[5]
}
}
// TransformPoint applies the transformation matrix to point. It returns the point the transformed point.
func (tr Matrix) TransformPoint(x, y float64) (xres, yres float64) {
xres = x*tr[0] + y*tr[2] + tr[4]
yres = x*tr[1] + y*tr[3] + tr[5]
return xres, yres
}
func minMax(x, y float64) (min, max float64) {
if x > y {
return y, x
}
return x, y
}
// TransformRectangle applies the transformation matrix to the rectangle represented by the min and the max point of the rectangle
func (tr Matrix) TransformRectangle(x0, y0, x2, y2 float64) (nx0, ny0, nx2, ny2 float64) {
points := []float64{x0, y0, x2, y0, x2, y2, x0, y2}
tr.Transform(points)
points[0], points[2] = minMax(points[0], points[2])
points[4], points[6] = minMax(points[4], points[6])
points[1], points[3] = minMax(points[1], points[3])
points[5], points[7] = minMax(points[5], points[7])
nx0 = math.Min(points[0], points[4])
ny0 = math.Min(points[1], points[5])
nx2 = math.Max(points[2], points[6])
ny2 = math.Max(points[3], points[7])
return nx0, ny0, nx2, ny2
}
// InverseTransform applies the transformation inverse matrix to the rectangle represented by the min and the max point of the rectangle
func (tr Matrix) InverseTransform(points []float64) {
d := tr.Determinant() // matrix determinant
for i, j := 0, 1; j < len(points); i, j = i+2, j+2 {
x := points[i]
y := points[j]
points[i] = ((x-tr[4])*tr[3] - (y-tr[5])*tr[2]) / d
points[j] = ((y-tr[5])*tr[0] - (x-tr[4])*tr[1]) / d
}
}
// InverseTransformPoint applies the transformation inverse matrix to point. It returns the point the transformed point.
func (tr Matrix) InverseTransformPoint(x, y float64) (xres, yres float64) {
d := tr.Determinant() // matrix determinant
xres = ((x-tr[4])*tr[3] - (y-tr[5])*tr[2]) / d
yres = ((y-tr[5])*tr[0] - (x-tr[4])*tr[1]) / d
return xres, yres
}
// VectorTransform applies the transformation matrix to points without using the translation parameter of the affine matrix.
// It modify the points passed in parameter.
func (tr Matrix) VectorTransform(points []float64) {
for i, j := 0, 1; j < len(points); i, j = i+2, j+2 {
x := points[i]
y := points[j]
points[i] = x*tr[0] + y*tr[2]
points[j] = x*tr[1] + y*tr[3]
}
}
// NewIdentityMatrix creates an identity transformation matrix.
func NewIdentityMatrix() Matrix {
return Matrix{1, 0, 0, 1, 0, 0}
}
// NewTranslationMatrix creates a transformation matrix with a translation tx and ty translation parameter
func NewTranslationMatrix(tx, ty float64) Matrix {
return Matrix{1, 0, 0, 1, tx, ty}
}
// NewScaleMatrix creates a transformation matrix with a sx, sy scale factor
func NewScaleMatrix(sx, sy float64) Matrix {
return Matrix{sx, 0, 0, sy, 0, 0}
}
// NewRotationMatrix creates a rotation transformation matrix. angle is in radian
func NewRotationMatrix(angle float64) Matrix {
c := math.Cos(angle)
s := math.Sin(angle)
return Matrix{c, s, -s, c, 0, 0}
}
// NewMatrixFromRects creates a transformation matrix, combining a scale and a translation, that transform rectangle1 into rectangle2.
func NewMatrixFromRects(rectangle1, rectangle2 [4]float64) Matrix {
xScale := (rectangle2[2] - rectangle2[0]) / (rectangle1[2] - rectangle1[0])
yScale := (rectangle2[3] - rectangle2[1]) / (rectangle1[3] - rectangle1[1])
xOffset := rectangle2[0] - (rectangle1[0] * xScale)
yOffset := rectangle2[1] - (rectangle1[1] * yScale)
return Matrix{xScale, 0, 0, yScale, xOffset, yOffset}
}
// Inverse computes the inverse matrix
func (tr *Matrix) Inverse() {
d := tr.Determinant() // matrix determinant
tr0, tr1, tr2, tr3, tr4, tr5 := tr[0], tr[1], tr[2], tr[3], tr[4], tr[5]
tr[0] = tr3 / d
tr[1] = -tr1 / d
tr[2] = -tr2 / d
tr[3] = tr0 / d
tr[4] = (tr2*tr5 - tr3*tr4) / d
tr[5] = (tr1*tr4 - tr0*tr5) / d
}
// Copy copies the matrix.
func (tr Matrix) Copy() Matrix {
var result Matrix
copy(result[:], tr[:])
return result
}
// Compose multiplies trToConcat x tr
func (tr *Matrix) Compose(trToCompose Matrix) {
tr0, tr1, tr2, tr3, tr4, tr5 := tr[0], tr[1], tr[2], tr[3], tr[4], tr[5]
tr[0] = trToCompose[0]*tr0 + trToCompose[1]*tr2
tr[1] = trToCompose[1]*tr3 + trToCompose[0]*tr1
tr[2] = trToCompose[2]*tr0 + trToCompose[3]*tr2
tr[3] = trToCompose[3]*tr3 + trToCompose[2]*tr1
tr[4] = trToCompose[4]*tr0 + trToCompose[5]*tr2 + tr4
tr[5] = trToCompose[5]*tr3 + trToCompose[4]*tr1 + tr5
}
// Scale adds a scale to the matrix
func (tr *Matrix) Scale(sx, sy float64) {
tr[0] = sx * tr[0]
tr[1] = sx * tr[1]
tr[2] = sy * tr[2]
tr[3] = sy * tr[3]
}
// Translate adds a translation to the matrix
func (tr *Matrix) Translate(tx, ty float64) {
tr[4] = tx*tr[0] + ty*tr[2] + tr[4]
tr[5] = ty*tr[3] + tx*tr[1] + tr[5]
}
// Rotate adds a rotation to the matrix.
func (tr *Matrix) Rotate(radians float64) {
c := math.Cos(radians)
s := math.Sin(radians)
t0 := c*tr[0] + s*tr[2]
t1 := s*tr[3] + c*tr[1]
t2 := c*tr[2] - s*tr[0]
t3 := c*tr[3] - s*tr[1]
tr[0] = t0
tr[1] = t1
tr[2] = t2
tr[3] = t3
}
// GetTranslation gets the matrix traslation.
func (tr Matrix) GetTranslation() (x, y float64) {
return tr[4], tr[5]
}
// GetScaling gets the matrix scaling.
func (tr Matrix) GetScaling() (x, y float64) {
return tr[0], tr[3]
}
// GetScale computes a scale for the matrix
func (tr Matrix) GetScale() float64 {
x := 0.707106781*tr[0] + 0.707106781*tr[1]
y := 0.707106781*tr[2] + 0.707106781*tr[3]
return math.Sqrt(x*x + y*y)
}
// ******************** Testing ********************
// Equals tests if a two transformation are equal. A tolerance is applied when comparing matrix elements.
func (tr Matrix) Equals(tr2 Matrix) bool {
for i := 0; i < 6; i = i + 1 {
if !fequals(tr[i], tr2[i]) {
return false
}
}
return true
}
// IsIdentity tests if a transformation is the identity transformation. A tolerance is applied when comparing matrix elements.
func (tr Matrix) IsIdentity() bool {
return fequals(tr[4], 0) && fequals(tr[5], 0) && tr.IsTranslation()
}
// IsTranslation tests if a transformation is is a pure translation. A tolerance is applied when comparing matrix elements.
func (tr Matrix) IsTranslation() bool {
return fequals(tr[0], 1) && fequals(tr[1], 0) && fequals(tr[2], 0) && fequals(tr[3], 1)
}
// fequals compares two floats. return true if the distance between the two floats is less than epsilon, false otherwise
func fequals(float1, float2 float64) bool {
return math.Abs(float1-float2) <= epsilon
} | vendor/github.com/wcharczuk/go-chart/v2/drawing/matrix.go | 0.905795 | 0.838151 | matrix.go | starcoder |
package energy
import (
"fmt"
"os"
"sort"
pb "github.com/openthread/ot-ns/visualize/grpc/pb"
"github.com/simonlingoogle/go-simplelogger"
)
type EnergyAnalyser struct {
nodes map[int]*NodeEnergy
networkHistory []NetworkConsumption
energyHistoryByNodes [][]*pb.NodeEnergy
title string
}
func (e *EnergyAnalyser) AddNode(nodeID int, timestamp uint64) {
if _, ok := e.nodes[nodeID]; ok {
return
}
e.nodes[nodeID] = newNode(nodeID, timestamp)
}
func (e *EnergyAnalyser) DeleteNode(nodeID int) {
delete(e.nodes, nodeID)
if len(e.nodes) == 0 {
e.ClearEnergyData()
}
}
func (e *EnergyAnalyser) GetNode(nodeID int) *NodeEnergy {
return e.nodes[nodeID]
}
func (e *EnergyAnalyser) GetNetworkEnergyHistory() []NetworkConsumption {
return e.networkHistory
}
func (e *EnergyAnalyser) GetEnergyHistoryByNodes() [][]*pb.NodeEnergy {
return e.energyHistoryByNodes
}
func (e *EnergyAnalyser) GetLatestEnergyOfNodes() []*pb.NodeEnergy {
return e.energyHistoryByNodes[len(e.energyHistoryByNodes)-1]
}
func (e *EnergyAnalyser) StoreNetworkEnergy(timestamp uint64) {
nodesEnergySnapshot := make([]*pb.NodeEnergy, 0, len(e.nodes))
networkSnapshot := NetworkConsumption{
Timestamp: timestamp,
}
netSize := float64(len(e.nodes))
for _, node := range e.nodes {
node.ComputeRadioState(timestamp)
e := &pb.NodeEnergy{
NodeId: int32(node.nodeId),
Disabled: float64(node.radio.SpentDisabled) * RadioDisabledConsumption,
Sleep: float64(node.radio.SpentSleep) * RadioSleepConsumption,
Tx: float64(node.radio.SpentTx) * RadioTxConsumption,
Rx: float64(node.radio.SpentRx) * RadioRxConsumption,
}
networkSnapshot.EnergyConsDisabled += e.Disabled / netSize
networkSnapshot.EnergyConsSleep += e.Sleep / netSize
networkSnapshot.EnergyConsTx += e.Tx / netSize
networkSnapshot.EnergyConsRx += e.Rx / netSize
nodesEnergySnapshot = append(nodesEnergySnapshot, e)
}
e.networkHistory = append(e.networkHistory, networkSnapshot)
e.energyHistoryByNodes = append(e.energyHistoryByNodes, nodesEnergySnapshot)
}
func (e *EnergyAnalyser) SaveEnergyDataToFile(name string, timestamp uint64) {
if name == "" {
if e.title == "" {
name = "energy"
} else {
name = e.title
}
}
//Get current directory and add name to the path
dir, _ := os.Getwd()
//create "energy_results" directory if it does not exist
if _, err := os.Stat(dir + "/energy_results"); os.IsNotExist(err) {
err := os.Mkdir(dir+"/energy_results", 0777)
if err != nil {
simplelogger.Error("Failed to create energy_results directory")
return
}
}
path := fmt.Sprintf("%s/energy_results/%s", dir, name)
fileNodes, err := os.Create(path + "_nodes.txt")
if err != nil {
simplelogger.Error("Error creating file: %s", err)
return
}
defer fileNodes.Close()
fileNetwork, err := os.Create(path + ".txt")
if err != nil {
simplelogger.Error("Error creating file: %s", err)
return
}
defer fileNetwork.Close()
//Save all nodes' energy data to file
e.writeEnergyByNodes(fileNodes, timestamp)
//Save network energy data to file (timestamp converted to milliseconds)
e.writeNetworkEnergy(fileNetwork, timestamp)
}
func (e *EnergyAnalyser) writeEnergyByNodes(fileNodes *os.File, timestamp uint64) {
fmt.Fprintf(fileNodes, "Duration of the simulated network (in milliseconds): %d\n", timestamp/1000)
fmt.Fprintf(fileNodes, "ID\tDisabled (mJ)\tIdle (mJ)\tTransmiting (mJ)\tReceiving (mJ)\n")
sortedNodes := make([]int, 0, len(e.nodes))
for id := range e.nodes {
sortedNodes = append(sortedNodes, id)
}
sort.Ints(sortedNodes)
for _, id := range sortedNodes {
node := e.nodes[id]
fmt.Fprintf(fileNodes, "%d\t%f\t%f\t%f\t%f\n",
id,
float64(node.radio.SpentDisabled)*RadioDisabledConsumption,
float64(node.radio.SpentSleep)*RadioSleepConsumption,
float64(node.radio.SpentTx)*RadioTxConsumption,
float64(node.radio.SpentRx)*RadioRxConsumption,
)
}
}
func (e *EnergyAnalyser) writeNetworkEnergy(fileNetwork *os.File, timestamp uint64) {
fmt.Fprintf(fileNetwork, "Duration of the simulated network (in milliseconds): %d\n", timestamp/1000)
fmt.Fprintf(fileNetwork, "Time (ms)\tDisabled (mJ)\tIdle (mJ)\tTransmiting (mJ)\tReceiving (mJ)\n")
for _, snapshot := range e.networkHistory {
fmt.Fprintf(fileNetwork, "%d\t%f\t%f\t%f\t%f\n",
snapshot.Timestamp/1000,
snapshot.EnergyConsDisabled,
snapshot.EnergyConsSleep,
snapshot.EnergyConsTx,
snapshot.EnergyConsRx,
)
}
}
func (e *EnergyAnalyser) ClearEnergyData() {
simplelogger.Debugf("Node's energy data cleared")
e.networkHistory = make([]NetworkConsumption, 0, 3600)
e.energyHistoryByNodes = make([][]*pb.NodeEnergy, 0, 3600)
}
func (e *EnergyAnalyser) SetTitle(title string) {
e.title = title
}
func NewEnergyAnalyser() *EnergyAnalyser {
ea := &EnergyAnalyser{
nodes: make(map[int]*NodeEnergy),
networkHistory: make([]NetworkConsumption, 0, 3600), //Start with space for 1 sample every 30s for 1 hour = 1*60*60/30 = 3600 samples
energyHistoryByNodes: make([][]*pb.NodeEnergy, 0, 3600),
}
return ea
} | energy/core.go | 0.595257 | 0.412412 | core.go | starcoder |
package main
import (
"errors"
"math"
"math/rand"
)
type point struct {
x, y float32
}
type population struct {
nodes []point
current [][]int
currentFitnesses []float32
fittest []int
dnaLength int
crossoverProbability float32
mutationProbability float32
generation int
}
func makePopulation(size int, crossoverProbability, mutationProbability float32, seed []point) *population {
initialPop := make([][]int, size)
for i := 0; i < size; i++ {
p0individual := make([]int, len(seed))
for j := 0; j < len(seed); j++ {
p0individual[j] = j
}
shuffle(p0individual)
initialPop[i] = p0individual
}
p := &population{
nodes: seed,
current: initialPop,
currentFitnesses: make([]float32, size),
fittest: make([]int, len(seed)),
dnaLength: len(seed),
crossoverProbability: crossoverProbability,
mutationProbability: mutationProbability,
generation: 0,
}
updateFitness(p)
return p
}
func nextGen(p *population) {
evolved := make([][]int, 0, len(p.current))
for len(evolved) < len(p.current) {
child1, child2 := twoChildren(p)
evolved = append(evolved, child1, child2)
}
p.current = evolved
updateFitness(p)
p.generation++
}
//updateFitness fills p.currentFitnesses with current values and updates p.fittest
func updateFitness(p *population) {
bestFitness, indexOfFittest := float32(0), -1
for i := 0; i < len(p.current); i++ {
f := 1 / distance(p.nodes, p.current[i])
p.currentFitnesses[i] = f
if i == 0 || bestFitness < f {
indexOfFittest = i
bestFitness = f
}
}
if indexOfFittest >= 0 {
copy(p.fittest, p.current[indexOfFittest])
}
}
//twoChildren with parents from the current generation. The children are new slices.
func twoChildren(p *population) (child1, child2 []int) {
mom, dad := roulette(p), roulette(p)
if rand.Float32() < p.crossoverProbability {
child1, child2 = crossover(p.current[mom], p.current[dad])
} else {
child1, child2 = make([]int, p.dnaLength), make([]int, p.dnaLength)
copy(child1, p.current[mom])
copy(child2, p.current[dad])
}
mutate(child1, p.mutationProbability)
mutate(child2, p.mutationProbability)
return
}
//roulette selection, preferring fit individuals over less fit individuals
func roulette(p *population) int {
//take sum of fitnesses of all individuals
fitnessSum := float32(0)
for i := 0; i < len(p.currentFitnesses); i++ {
fitnessSum += p.currentFitnesses[i]
}
//get random treshold
roll := rand.Float32() * fitnessSum
//select individual
for i := 0; i < len(p.current); i++ {
if roll < p.currentFitnesses[i] {
return i
}
//decrease treshold so there will always be an individual selected
roll -= p.currentFitnesses[i]
}
//should be practically impossible to reach
panic(errors.New("Roulette selection did not select any individual"))
}
//crossover combines two routes by taking one (random) segment from one parent and the rest from the other parent. The children are new slices.
func crossover(mom, dad []int) (child1, child2 []int) {
l := len(mom)
//determine which part is copied from which parent
num1 := rand.Intn(l)
num2 := rand.Intn(l)
segmentStart := num1
segmentEnd := num2
if num1 > num2 {
segmentStart = num2
segmentEnd = num1
}
//create offspring
child1 = make([]int, l)
copy(child1[segmentStart:segmentEnd], mom[segmentStart:segmentEnd])
child2 = make([]int, l)
copy(child2[segmentStart:segmentEnd], dad[segmentStart:segmentEnd])
//copy points which are outside of the segment (while maintaining their order)
j1, j2 := segmentEnd, segmentEnd
for i := segmentEnd; i < l+segmentEnd; i++ {
p := dad[i%l]
if !hasPoint(child1[segmentStart:segmentEnd], p) {
child1[j1%l] = p
j1++
}
p = mom[i%l]
if !hasPoint(child2[segmentStart:segmentEnd], p) {
child2[j2%l] = p
}
}
return
}
//shuffle the order of points using fisher swap
func shuffle(path []int) {
for i := len(path); i != 0; i-- {
j := rand.Intn(i + 1)
path[i], path[j] = path[j], path[i]
}
}
//mutate swaps the points around randomly, a hihger mutationProbability means more swaps
func mutate(path []int, mutationProbability float32) {
for i := 0; i < len(path); i++ {
if rand.Float32() < mutationProbability {
j := rand.Intn(len(path))
path[i], path[j] = path[j], path[i]
}
}
}
//distance along the full path (including from last to first node)
func distance(nodes []point, path []int) float32 {
var d float32
l := len(path)
for i := 1; i < l+1; i++ {
prev := nodes[path[i-1]]
cur := nodes[path[i%l]]
xdist := cur.x - prev.x
ydist := cur.y - prev.y
d += float32(math.Sqrt(float64(xdist*xdist + ydist*ydist)))
}
return d
}
//hasPoint is true if path contains p
func hasPoint(path []int, p int) bool {
for i := 0; i < len(path); i++ {
if p == path[i] {
return true
}
}
return false
}
//randomPoint with coordinates between 0 and max.
func randomPoint(max float32) point {
return point{
x: (rand.Float32() - 0.5) * max,
y: (rand.Float32() - 0.5) * max,
}
}
//randomRoute with n random points
func randomRoute(n int, coordSystemSize float32) []point {
r := make([]point, n)
for i := 0; i < n; i++ {
r[i] = randomPoint(coordSystemSize)
}
return r
}
//samePath is true if both paths are exactly the same
func samePath(path1, path2 []int) bool {
if len(path1) != len(path2) {
return false
}
for i := 0; i < len(path1); i++ {
if path1[i] != path2[i] {
return false
}
}
return true
} | nodepath.go | 0.550849 | 0.447279 | nodepath.go | starcoder |
package rgb8
// Conversion of natively non-D50 RGB colorspaces with D50 illuminator to CIE XYZ and back.
// Bradford adaptation was used to calculate D50 matrices from colorspaces' native illuminators.
// RGB values must be linear and in the nominal range [0, 255].
// XYZ values are usually in [0, 255] but may be greater
// To get quick and dirty XYZ approximations, divide by 255, otherwise use the float64 version of these functions.
// Ref.: [24]
// AdobeToXYZ_D50 converts from Adobe RGB 0 with D50 illuminator to CIE XYZ.
func AdobeToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2391199*rr + 804863*gg + 585192*bb
y = 1220094*rr + 2453552*gg + 247920*bb
z = 76396*rr + 238785*gg + 2920936*bb
return
}
// XYZToAdobe_D50 converts from CIE XYZ to Adobe RGB with D50 illuminator.
func XYZToAdobe_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (5004*x - 1556*y - 870*z) / 1e6
gg := (-2495*x + 4886*y + 85*z) / 1e6
bb := (73*x - 358*y + 3439*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// AppleToXYZ_D50 converts from Apple RGB with D50 illuminator to CIE XYZ.
func AppleToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1864971*rr + 1332047*gg + 584235*bb
y = 1000710*rr + 2637526*gg + 283331*bb
z = 72430*rr + 444616*gg + 2719071*bb
return
}
// XYZToApple_D50 converts from CIE XYZ to Apple RGB with D50 illuminator.
func XYZToApple_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (7270*x - 3469*y - 1200*z) / 1e6
gg := (-2786*x + 5188*y + 58*z) / 1e6
bb := (261*x - 756*y + 3700*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// BruceToXYZ_D50 converts from Bruce RGB with D50 illuminator to CIE XYZ.
func BruceToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1937967*rr + 1256797*gg + 586490*bb
y = 988835*rr + 2684262*gg + 248470*bb
z = 61916*rr + 246785*gg + 2927415*bb
return
}
// XYZToBruce_D50 converts from CIE XYZ to Bruce RGB with D50 illuminator.
func XYZToBruce_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (6758*x - 3063*y - 1093*z) / 1e6
gg := (-2495*x + 4886*y + 85*z) / 1e6
bb := (67*x - 347*y + 3431*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// CieToXYZ_D50 converts from CIE RGB with D50 illuminator to CIE XYZ.
func CieToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1909360*rr + 1201170*gg + 670724*bb
y = 684934*rr + 3234329*gg + 2304*bb
z = -4926*rr + 66600*gg + 3174443*bb
return
}
// XYZToCie_D50 converts from CIE XYZ to CIE RGB with D50 illuminator.
func XYZToCie_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (6027*x - 2212*y - 1271*z) / 1e6
gg := (-1276*x + 3560*y + 267*z) / 1e6
bb := (36*x - 78*y + 3142*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// NTSCToXYZ_D50 converts from NTSC RGB with D50 illuminator to CIE XYZ.
func NTSCToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2487727*rr + 726354*gg + 567172*bb
y = 1219410*rr + 2319993*gg + 382164*bb
z = -4634*rr + 217850*gg + 3022901*bb
return
}
// XYZToNTSC_D50 converts from CIE XYZ to NTSC RGB with D50 illuminator.
func XYZToNTSC_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (4708*x - 1407*y - 705*z) / 1e6
gg := (-2505*x + 5111*y - 176*z) / 1e6
bb := (187*x - 370*y + 3319*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// PALToXYZ_D50 converts from PAL/SECAM RGB with D50 illuminator to CIE XYZ.
func PALToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1785401*rr + 1441372*gg + 554480*bb
y = 910990*rr + 2775669*gg + 234909*bb
z = 57041*rr + 411432*gg + 2767642*bb
return
}
// XYZToPAL_D50 converts from CIE XYZ to PAL/SECAM RGB with D50 illuminator.
func XYZToPAL_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (7549*x - 3743*y - 1194*z) / 1e6
gg := (-2495*x + 4886*y + 85*z) / 1e6
bb := (215*x - 649*y + 3625*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// SMPTE_CToXYZ_D50 converts from SMPTE-C RGB with D50 illuminator to CIE XYZ.
func SMPTE_CToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1632662*rr + 1541750*gg + 606841*bb
y = 869411*rr + 2757862*gg + 294294*bb
z = 53559*rr + 358276*gg + 2824282*bb
return
}
// XYZToSMPTE_C_D50 converts from CIE XYZ to SMPTE-C RGB with D50 illuminator.
func XYZToSMPTE_C_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (8650*x - 4657*y - 1373*z) / 1e6
gg := (-2746*x + 5154*y + 53*z) / 1e6
bb := (184*x - 565*y + 3560*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// SRGBToXYZ_D50 converts from sRGB with D50 illuminator to CIE XYZ.
func SRGBToXYZ_D50(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1710096*rr + 1510058*gg + 561099*bb
y = 872566*rr + 2811288*gg + 237713*bb
z = 54636*rr + 380801*gg + 2800679*bb
return
}
// XYZToSRGB_D50 converts from CIE XYZ to sRGB with D50 illuminator.
func XYZToSRGB_D50(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (7991*x - 4123*y - 1251*z) / 1e6
gg := (-2495*x + 4886*y + 85*z) / 1e6
bb := (183*x - 583*y + 3583*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
} | i8/rgb8/rgb_d50.go | 0.754373 | 0.425187 | rgb_d50.go | starcoder |
package main
import (
"log"
"regexp"
"strings"
redis "github.com/xuyu/goredis"
)
//Extract set expressions from the query (virtual index builders)
func extractSetExpressions(q string) []string {
re := regexp.MustCompile("\\[([^]]+)\\]")
return re.FindAllString(q, -1)
}
//Is token operator
func isOp(o string) bool {
if o == "AND" || o == "OR" {
return true
}
return false
}
//Used to pad the timestring components (timestring builder helper function)
func leftPad2Len(s string, padStr string, overallLen int) string {
var padCountInt int
padCountInt = 1 + ((overallLen - len(padStr)) / len(padStr))
var retStr = strings.Repeat(padStr, padCountInt) + s
return retStr[(len(retStr) - overallLen):]
}
//Build the timestring (index suffix)
func buildTimestring(t []string) string {
//Get the length of the time arguments
l := len(t)
//The return string
rtn := ""
//The different time granularities
year := ""
month := ""
day := ""
hour := ""
minute := ""
second := ""
switch l {
//Year only
case 1:
year = t[0]
//Check year is properly formatted
if len(year) == 4 {
rtn += year
}
//year month
case 2:
year = t[0]
month = leftPad2Len(t[1], "0", 2)
//Check year is properly formatted
if len(year) == 4 {
rtn += year
}
//Check the month is properly formatted
if len(month) == 2 {
rtn += month
}
//year month day
case 3:
year = t[0]
month = leftPad2Len(t[1], "0", 2)
day = leftPad2Len(t[2], "0", 2)
//Check year is properly formatted
if len(year) == 4 {
rtn += year
}
//Check the month is properly formatted
if len(month) == 2 {
rtn += month
}
//Check the day is properly formatted
if len(day) == 2 {
rtn += day
}
//year month day hour
case 4:
year = t[0]
month = leftPad2Len(t[1], "0", 2)
day = leftPad2Len(t[2], "0", 2)
hour = leftPad2Len(t[3], "0", 2)
//Check year is properly formatted
if len(year) == 4 {
rtn += year
}
//Check the month is properly formatted
if len(month) == 2 {
rtn += month
}
//Check the day is properly formatted
if len(day) == 2 {
rtn += day
}
//Check the hour is properly formatted
if len(hour) == 2 {
rtn += hour
}
//year month day hour minute
case 5:
year = t[0]
month = leftPad2Len(t[1], "0", 2)
day = leftPad2Len(t[2], "0", 2)
hour = leftPad2Len(t[3], "0", 2)
minute = leftPad2Len(t[4], "0", 2)
//Check year is properly formatted
if len(year) == 4 {
rtn += year
}
//Check the month is properly formatted
if len(month) == 2 {
rtn += month
}
//Check the day is properly formatted
if len(day) == 2 {
rtn += day
}
//Check the hour is properly formatted
if len(hour) == 2 {
rtn += hour
}
//Check the minute is properly formatted
if len(minute) == 2 {
rtn += minute
}
//year month day hour minute second
case 6:
year = t[0]
month = leftPad2Len(t[1], "0", 2)
day = leftPad2Len(t[2], "0", 2)
hour = leftPad2Len(t[3], "0", 2)
minute = leftPad2Len(t[4], "0", 2)
second = leftPad2Len(t[5], "0", 2)
//Check year is properly formatted
if len(year) == 4 {
rtn += year
}
//Check the month is properly formatted
if len(month) == 2 {
rtn += month
}
//Check the day is properly formatted
if len(day) == 2 {
rtn += day
}
//Check the hour is properly formatted
if len(hour) == 2 {
rtn += hour
}
//Check the minute is properly formatted
if len(minute) == 2 {
rtn += minute
}
//Check the second is properly formatted
if len(second) == 2 {
rtn += second
}
}
return rtn
}
//Build the Redis index using the operand supplied
//Return the correct Redis index and if it is negated or not
func buildIndex(oprnd string) (string, bool) {
//Get the text from the inside of the parentheses
re := regexp.MustCompile("\\(([^)]+)\\)")
r := re.FindString(oprnd)
//Remove the ( and ) from the string
r = strings.Replace(r, "(", "", -1)
r = strings.Replace(r, ")", "", -1)
//Split the arguments of the operand (time arguments and index)
args := strings.Split(r, ",")
//Timestring (will remain null if the index does not have a time function attached)
ts := ""
//Length of arguments must be greater than 1 (time function) (not true if virtual index)
if len(args) > 1 {
//Pass the time arguments to build the index suffix (exclude the index istelf)
//Time string is in the format YYYYMMDDHHMMSS
ts = buildTimestring(args[0 : len(args)-1])
}
//Check to see if the index is negated or not
index := args[len(args)-1]
log.Println(index)
if string(index[0]) == "~" {
//Remove the tilde
index := strings.Replace(index, "~", "", -1)
//Remove the single quotes
index = strings.Replace(index, "'", "", -1)
//Return the index with the timestring and negation set to true
if ts != "" {
return index + ":" + ts, true
} else {
return index, true
}
} else { //Not negated
//Remove the single quotes
index := strings.Replace(index, "'", "", -1)
//Return the index with the timestring and negation set to false
if ts != "" {
return index + ":" + ts, false
} else {
return index, false
}
}
}
//Perform the binary operation (interfaces with redis and will verify index existence and create new indexes)
// Returns the virtual index created
func performBinOp(optr string, oprnd1 string, oprnd2 string) string {
//Connect to server (TESTING)
client, _ := redis.Dial(&redis.DialConfig{Address: "127.0.0.1:6379"})
log.Println("Evaluating: ", oprnd1, " ", optr, " ", oprnd2)
i1, n1 := buildIndex(oprnd1)
i2, n2 := buildIndex(oprnd2)
//Verify the existence of both indices, log fatal if not exists
i1exists, _ := client.Exists(i1)
i2exists, _ := client.Exists(i2)
if !i1exists {
log.Fatal("KEY DOES NOT EXIST: ", i1)
}
if !i2exists {
log.Fatal("KEY DOES NOT EXIST: ", i2)
}
//If the first operand is negated, create a temporary (virtual) idnex
if n1 {
client.BitOp("NOT:" + i1, i1)
i1 = "NOT:" + i1
}
//If the second operand is negated, create a temporary (virtual) idnex
if n2 {
client.BitOp("NOT:" + i2, i2)
i2 = "NOT:" + i2
}
//Perform the operation
client.BitOp(optr, i1 + optr + i2, i1, i2)
log.Println("('" + i1 + optr + i2 + "')")
return "('" + i1 + optr + i2 + "')"
}
//Evaluate a rpn epxression of indices and bitset operators
// Returns any virtual indexes created
func eval(rpn string) {
//The buffer stack
buf := new(Stack)
//List of virtual indexes created
//virt := []string{}
//Break the expression into fields
e := strings.Fields(rpn)
//Iterate over the stack and evaluate using the buffer stack
for i := range e {
//If the token is an operator, pop two items off the buffer and perform the binary operation
if isOp(e[i]) {
v := performBinOp(e[i], buf.Pop().(string), buf.Pop().(string))
buf.Push(v)
} else {
//If the token is not an operator, push onto the buffer
buf.Push(e[i])
}
}
}
func Test() {
query := "COUNT[( MONTH(2014,1,'users:Active') AND MONTH(2014,1,'users:inactive') ) OR ( HOUR(2014,1,12,10,'users:Testing') AND HOUR(2014,2,15,5,'users:Testing') ) ]"
setExps := extractSetExpressions(query)
//Remove the square brackets
q := strings.Replace(setExps[0], "[", "", -1)
q = strings.Replace(q, "]", "", -1)
//Convert the expression to rpn (reverse polish notation)
rpn := ParseInfix(q)
//fmt.Println(rpn)
//Evaluate the rpn expression, and return the virtual index created
eval(rpn)
} | queryparse.go | 0.536556 | 0.483892 | queryparse.go | starcoder |
package entity
import (
"github.com/lquesada/cavernal/model"
"github.com/lquesada/cavernal/lib/g3n/engine/math32"
)
type IEntity interface {
model.IDrawable
ITickable
Name() string
ShadowNode() model.INode
Colision() []*Cylinder
Radius() float32
OuterRadius() float32
ClimbRadius() float32
ClimbReach() float32
Height() float32
Position() *math32.Vector3
FormerPosition() *math32.Vector3
Speed() *math32.Vector3
MaxSpeed() float32
SetLookAngle(float32)
LookAngle() float32
Destroyed() bool
Health() float32
SetHealth(v float32)
MaxHealth() float32
SetMaxHealth(v float32)
Gravity(v float32)
SetOnGround(bool)
OnGround() bool
BorderDistanceTo(IEntity) float32
CenterDistanceTo(IEntity) float32
FallingToVoidPosition() (*math32.Vector3)
SetFallingToVoidPosition(*math32.Vector3)
Generate() []IEntity
Healed(float32)
Damaged(float32)
Destroy()
}
type Entity struct {
// Config
name string
maxSpeed float32
friction float32
acceleration float32
minSpeed float32
rotationSpeed float32
colision []*SimpleCylinder
radius float32
outerRadius float32
climbRadius float32
climbReach float32
height float32
maxHealth float32
damageable bool
shadowOpacity float32
// Runtime
health float32
gravity float32
speed *math32.Vector3
shadow model.INode
onGround bool
formerPosition *math32.Vector3
position *math32.Vector3
lookAngle float32
fallingToVoidPosition *math32.Vector3
destroyed bool
}
func NewEntity(name string) *Entity {
return &Entity{
name: name,
position: &math32.Vector3{},
formerPosition: &math32.Vector3{},
speed: &math32.Vector3{},
maxHealth: 1,
health: 1,
colision: []*SimpleCylinder{},
shadow: model.NewShadow(),
shadowOpacity: 0.2,
}
}
func (e *Entity) Name() string {
return e.name
}
func (e *Entity) MaxHorizontalSpeed() float32 {
return e.maxSpeed
}
func (e *Entity) SetMaxSpeed(v float32) {
e.maxSpeed = v
}
func (e *Entity) MaxSpeed() float32 {
return e.maxSpeed
}
func (e *Entity) Friction() float32 {
return e.friction
}
func (e *Entity) SetFriction(v float32) {
e.friction = v
}
func (e *Entity) Acceleration() float32 {
return e.acceleration
}
func (e *Entity) SetAcceleration(v float32) {
e.acceleration = v
}
func (e *Entity) ShadowOpacity() float32 {
return e.shadowOpacity
}
func (e *Entity) SetShadowOpacity(v float32) {
e.shadowOpacity = v
}
func (e *Entity) MinHorizontalSpeed() float32 {
return e.minSpeed
}
func (e *Entity) SetMinSpeed(v float32) {
e.minSpeed = v
}
func (e *Entity) RotationHorizontalSpeed() float32 {
return e.rotationSpeed
}
func (e *Entity) SetRotationSpeed(v float32) {
e.rotationSpeed = v
}
func (e *Entity) Colision() []*Cylinder {
return []*Cylinder{
SimpleToAbsolute(&SimpleCylinder{Radius: e.radius, Height: e.height}),
}
}
func (e *Entity) Radius() float32 {
return e.radius
}
func (e *Entity) SetRadius(v float32) {
e.radius = v
}
func (e *Entity) OuterRadius() float32 {
return math32.Max(0.001, math32.Max(e.outerRadius, e.radius))
}
func (e *Entity) SetOuterRadius(v float32) {
e.outerRadius = v
}
func (e *Entity) ClimbRadius() float32 {
return math32.Max(0.001, e.climbRadius)
}
func (e *Entity) SetClimbRadius(v float32) {
e.climbRadius = v
}
func (e *Entity) ClimbReach() float32 {
return math32.Max(0.001, e.climbReach)
}
func (e *Entity) SetClimbReach(v float32) {
e.climbReach = v
}
func (e *Entity) Height() float32 {
return e.height
}
func (e *Entity) SetHeight(v float32) {
e.height = v
}
func (e *Entity) MaxHealth() float32 {
return e.maxHealth
}
func (e *Entity) SetMaxHealth(v float32) {
e.maxHealth = v
}
func (e *Entity) Damageable() bool {
return e.damageable
}
func (e *Entity) SetDamageable(v bool) {
e.damageable = v
}
func (e *Entity) Health() float32 {
return e.health
}
func (e *Entity) SetHealth(v float32) {
e.health = v
}
func (e *Entity) PreTick() {
e.FormerPosition().Copy(e.Position())
}
func (e *Entity) Tick(delta float32) {
// Friction
if e.OnGround() {
frictionX := e.friction * e.speed.X
if e.speed.X < 0 {
e.speed.X -= frictionX * delta
if e.speed.X > 0 {
e.speed.X = 0
}
} else if e.speed.X > 0 {
e.speed.X -= frictionX * delta
if e.speed.X < 0 {
e.speed.X = 0
}
}
frictionZ := e.friction * e.speed.Z
if e.speed.Z < 0 {
e.speed.Z -= frictionZ * delta
if e.speed.Z > 0 {
e.speed.Z = 0
}
} else if e.speed.Z > 0 {
e.speed.Z -= frictionZ * delta
if e.speed.Z < 0 {
e.speed.Z = 0
}
}
}
// Min-Max speed
if e.HorizontalSpeed() > e.maxSpeed {
e.SetSpeed(e.maxSpeed)
}
if e.HorizontalSpeed() < e.minSpeed {
e.SetSpeed(0)
}
e.speed.Y += e.gravity * delta
// Update position
e.position.X += e.speed.X * delta
e.position.Y += e.speed.Y * delta
e.position.Z += e.speed.Z * delta
}
func (e *Entity) PostTick(delta float32) {
}
func (e *Entity) Position() *math32.Vector3 {
return e.position
}
func (e *Entity) FormerPosition() *math32.Vector3 {
return e.formerPosition
}
func (e *Entity) Speed() *math32.Vector3 {
return e.speed
}
func (e *Entity) SetLookAngle(v float32) {
e.lookAngle = v
}
func (e *Entity) LookAngle() float32 {
return e.lookAngle
}
func (e *Entity) BorderDistanceTo(c IEntity) float32 {
return e.CenterDistanceTo(c)-e.Radius()-c.Radius()
}
func (e *Entity) CenterDistanceTo(c IEntity) float32 {
return Distance2D(e.position, c.Position())
}
func (e *Entity) HorizontalSpeed() float32 {
return math32.Sqrt(e.speed.X * e.speed.X + e.speed.Z * e.speed.Z)
}
func (e *Entity) SetSpeed(v float32) {
speed := e.HorizontalSpeed()
var delta float32 = 1
if speed > 0 {
delta = v / e.HorizontalSpeed()
}
e.speed.X *= delta
e.speed.Z *= delta
}
func (e *Entity) Destroy() {
e.destroyed = true
}
func (e *Entity) Destroyed() bool {
return e.destroyed
}
func (e *Entity) Healed(points float32) {
e.health += points
if e.health > e.maxHealth {
e.health = e.maxHealth
}
}
func (e *Entity) Damaged(damage float32) {
if !e.damageable {
return
}
e.health -= damage
if e.health <= 0 {
e.health = 0
e.Destroy()
}
}
func (e *Entity) Node() model.INode {
return nil
}
func (e *Entity) ShadowNode() model.INode {
if e.radius > 0 && e.shadowOpacity > 0 {
return e.shadow.
RGBA(&math32.Color4{0, 0, 0, e.shadowOpacity}).
Transform(
&model.Transform{
Rotation: &math32.Vector3{-math32.Pi/2, 0, 0},
},
&model.Transform{
Scale: &math32.Vector3{e.radius, 1, e.radius},
Position: &math32.Vector3{e.position.X, 0.01, e.position.Z},
},
)
}
return nil
}
func (e *Entity) Generate() []IEntity {
return nil
}
func (e *Entity) Gravity(v float32) {
e.gravity = v
}
func (e *Entity) SetOnGround(v bool) {
e.onGround = v
}
func (e *Entity) OnGround() bool {
return e.onGround
}
func (e *Entity) FallingToVoidPosition() (v *math32.Vector3) {
return e.fallingToVoidPosition
}
func (e *Entity) SetFallingToVoidPosition(v *math32.Vector3) {
e.fallingToVoidPosition = v
} | entity/entity.go | 0.750553 | 0.481332 | entity.go | starcoder |
package container
import (
"fmt"
"math/rand"
"time"
)
/*
https://cp-algorithms.com/data_structures/treap.html
https://zh.wikipedia.org/wiki/%E6%A0%91%E5%A0%86
简述:
treap = tree + heap
其Node.Value符合tree(二叉查找树)的特性(左≤根≤右)
再给每个Node一个随机的权重(或称优先级),Node.randWeight 来使其姿态符合二叉堆的特性,所以保证了操作(期望)都是O(log(n))的
logN的证明:
不是特别“显然”,以下文献也是一笔带过
priorities allow to uniquely specify the tree that will be constructed, which can be proven using corresponding theorem
Obviously, if you choose the priorities randomly, you will get non-degenerate trees on average, which will ensure O(logN) complexity for the main operations
意思是对于给定的优先级,构造出来二叉堆(tree)的姿态也是唯一确定的(相关理论可以证明),如果优先级是完全随机的,那么就会得到一个均匀的不会退化的二叉树。细品好像是这样。
插入:
给节点随机分配一个优先级,先和二叉搜索树的插入一样,先把要插入的点插入到一个叶子上,然后跟维护堆一样,如果当前节点的优先级比根大就旋转,如果当前节点是根的左儿子就右旋如果当前节点是根的右儿子就左旋。
由于旋转是O(1)的,最多进行h次(h是树的高度),插入的复杂度是O(h)的,在期望情况下O(log(n)),所以它的期望复杂度是O(log(n))。
删除:
因为Treap满足堆性质,所以只需要把要删除的节点旋转到叶节点上,然后直接删除就可以了。具体的方法就是每次找到优先级最大的儿子,向与其相反的方向旋转,直到那个节点被旋转到了叶节点,然后直接删除。
删除最多进行O(h)次旋转,期望复杂度是O(log(n))。
*/
type TreapValue interface {
SortComparator
}
type TreapNode struct {
left *TreapNode
right *TreapNode
randWeight int
size int
Value TreapValue
}
var treapRand = rand.New(rand.NewSource(time.Now().UnixNano()))
func newTreapNode(val TreapValue) *TreapNode {
return &TreapNode{
Value: val,
randWeight: treapRand.Int(),
size: 1,
}
}
func (node *TreapNode) fixSize() {
size := 1
if node.left != nil {
size += node.left.size
}
if node.right != nil {
size += node.right.size
}
node.size = size
}
type Treap struct {
root *TreapNode
}
func NewTreap() *Treap {
return &Treap{}
}
func (t *Treap) Insert(val TreapValue) *TreapNode {
node := newTreapNode(val)
t.root = t.insert(t.root, node)
return node
}
func (t *Treap) Delete(val TreapValue) *TreapNode {
var del *TreapNode
t.root, del = t.remove(t.root, val)
return del
}
func (t *Treap) Size() int {
if t.root == nil {
return 0
}
return t.root.size
}
func (t *Treap) Foreach(f func(val TreapValue) bool) {
t.foreach(t.root, f)
}
func (t *Treap) GetRankByValue(val TreapValue) int {
rank, ok := t.getRankByValue(t.root, val)
if !ok {
return -1
}
return rank
}
func (t *Treap) GetValueByRank(rank int) TreapValue {
return t.getValueByRank(t.root, rank)
}
func (t *Treap) leftRotate(root *TreapNode) (newRoot *TreapNode) {
newRoot = root.right
root.right = newRoot.left
newRoot.left = root
root.fixSize()
newRoot.fixSize()
return
}
func (t *Treap) rightRotate(root *TreapNode) (newRoot *TreapNode) {
newRoot = root.left
root.left = newRoot.right
newRoot.right = root
root.fixSize()
newRoot.fixSize()
return
}
func (t *Treap) insert(root, node *TreapNode) *TreapNode {
if root == nil {
return node
}
if root.Value.Compare(node.Value) <= 0 {
root.right = t.insert(root.right, node)
if root.randWeight < root.right.randWeight {
root = t.leftRotate(root)
}
} else {
root.left = t.insert(root.left, node)
if root.randWeight < root.left.randWeight {
root = t.rightRotate(root)
}
}
root.fixSize()
return root
}
func (t *Treap) remove(root *TreapNode, val TreapValue) (*TreapNode, *TreapNode) {
if root == nil {
return nil, nil
}
var del *TreapNode
k := root.Value.Compare(val)
if k < 0 {
root.right, del = t.remove(root.right, val)
} else if k > 0 {
root.left, del = t.remove(root.left, val)
} else {
del = root
if root.left == nil {
root = root.right
} else if root.right == nil {
root = root.left
} else {
if root.left.randWeight > root.right.randWeight {
root = t.rightRotate(root)
root.right, _ = t.remove(root.right, val)
} else {
root = t.leftRotate(root)
root.left, _ = t.remove(root.left, val)
}
}
}
if root != nil {
root.fixSize()
}
return root, del
}
func (t *Treap) getRankByValue(root *TreapNode, val TreapValue) (int, bool) {
if root == nil {
return 0, false
}
leftSize := 0
if root.left != nil {
leftSize += root.left.size
}
k := root.Value.Compare(val)
if k < 0 {
right, ok := t.getRankByValue(root.right, val)
return leftSize + 1 + right, ok
} else if k > 0 {
return t.getRankByValue(root.left, val)
} else {
return leftSize + 1, true
}
}
func (t *Treap) getValueByRank(root *TreapNode, rank int) TreapValue {
if root == nil {
return nil
}
leftSize := 0
if root.left != nil {
leftSize += root.left.size
}
if rank < leftSize+1 {
return t.getValueByRank(root.left, rank)
} else if rank > leftSize+1 {
return t.getValueByRank(root.right, rank-leftSize-1)
} else {
return root.Value
}
}
func (t *Treap) foreach(node *TreapNode, f func(value TreapValue) bool) {
if node == nil {
return
}
t.foreach(node.left, f)
if !f(node.Value) {
return
}
t.foreach(node.right, f)
}
func (t *Treap) print(node *TreapNode) {
if node == nil {
return
}
t.print(node.left)
fmt.Println(node.Value)
t.print(node.right)
}
func (t *Treap) height(node *TreapNode) int {
if node == nil {
return 0
}
left := t.height(node.left)
right := t.height(node.right)
if left > right {
return left + 1
} else {
return right + 1
}
} | src/gostd/container/treap.go | 0.527803 | 0.588239 | treap.go | starcoder |
package main
import (
"errors"
"fmt"
)
//TreeNode data structure represents a typical binary tree
type TreeNode struct {
val int
left *TreeNode
right *TreeNode
}
func main() {
t := &TreeNode{val: 8}
t.Insert(1)
t.Insert(2)
t.Insert(3)
t.Insert(4)
t.Insert(5)
t.Insert(6)
t.Insert(7)
t.Find(11)
t.Delete(5)
t.Delete(7)
t.PrintInorder()
fmt.Println("")
fmt.Println("min is %d", t.FindMin())
fmt.Println("max is %d", t.FindMax())
}
//PrintInorder prints the elements in order
func (t *TreeNode) PrintInorder() {
if t == nil {
return
}
t.left.PrintInorder()
fmt.Print(t.val)
t.right.PrintInorder()
}
//Insert inserts a new node into the binary tree while adhering to the rules of a perfect BST.
func (t *TreeNode) Insert(value int) error {
if t == nil {
return errors.New("Tree is nil")
}
if t.val == value {
return errors.New("This node value already exists")
}
if t.val > value {
if t.left == nil {
t.left = &TreeNode{val: value}
return nil
}
return t.left.Insert(value)
}
if t.val < value {
if t.right == nil {
t.right = &TreeNode{val: value}
return nil
}
return t.right.Insert(value)
}
return nil
}
//Find finds the treenode for the given node val
func (t *TreeNode) Find(value int) (TreeNode, bool) {
if t == nil {
return TreeNode{}, false
}
switch {
case value == t.val:
return *t, true
case value < t.val:
return t.left.Find(value)
default:
return t.right.Find(value)
}
}
//Delete removes the Item with value from the tree
func (t *TreeNode) Delete(value int) {
t.remove(value)
}
func (t *TreeNode) remove(value int) *TreeNode {
if t == nil {
return nil
}
if value < t.val {
t.left = t.left.remove(value)
return t
}
if value > t.val {
t.right = t.right.remove(value)
return t
}
if t.left == nil && t.right == nil {
t = nil
return nil
}
if t.left == nil {
t = t.right
return t
}
if t.right == nil {
t = t.left
return t
}
smallestValOnRight := t.right
for {
//find smallest value on the right side
if smallestValOnRight != nil && smallestValOnRight.left != nil {
smallestValOnRight = smallestValOnRight.left
} else {
break
}
}
t.val = smallestValOnRight.val
t.right = t.right.remove(t.val)
return t
}
//FindMax finds the max element in the given BST
func (t *TreeNode) FindMax() int {
if t.right == nil {
return t.val
}
return t.right.FindMax()
}
//FindMin finds the min element in the given BST
func (t *TreeNode) FindMin() int {
if t.left == nil {
return t.val
}
return t.left.FindMin()
} | binarysearchtree/binarysearchtree.go | 0.734881 | 0.616186 | binarysearchtree.go | starcoder |
package openapi
import "encoding/json"
// An OrderedMap is a set of key-value pairs that preserves the order in which
// items were added. It marshals to JSON as an object.
type OrderedMap struct {
kvs []KeyValue
}
// KeyValue associates a value with a key.
type KeyValue struct {
Key string
Value interface{}
}
// Pairs returns the KeyValue pairs associated with m.
func (m *OrderedMap) Pairs() []KeyValue {
return m.kvs
}
// Set sets a key value pair. If a pair with the same key already existed, it
// will be replaced with the new value. Otherwise, the new value is added to
// the end.
func (m *OrderedMap) Set(key string, value interface{}) {
for i, v := range m.kvs {
if v.Key == key {
m.kvs[i].Value = value
return
}
}
m.kvs = append(m.kvs, KeyValue{key, value})
}
// SetAll replaces existing key-value pairs with the given ones. The keys must
// be unique.
func (m *OrderedMap) SetAll(kvs []KeyValue) {
m.kvs = kvs
}
// exists reports whether a key-value pair exists for the given key.
func (m *OrderedMap) exists(key string) bool {
for _, v := range m.kvs {
if v.Key == key {
return true
}
}
return false
}
// exists reports whether a key-value pair exists for the given key.
func (m *OrderedMap) getMap(key string) *OrderedMap {
for _, v := range m.kvs {
if v.Key == key {
return v.Value.(*OrderedMap)
}
}
return nil
}
// MarshalJSON implements json.Marshaler.
func (m *OrderedMap) MarshalJSON() (b []byte, err error) {
// This is a pointer receiever to enforce that we only store pointers to
// OrderedMap in the output.
b = append(b, '{')
for i, v := range m.kvs {
if i > 0 {
b = append(b, ",\n"...)
}
key, ferr := json.Marshal(v.Key)
if je, ok := ferr.(*json.MarshalerError); ok {
return nil, je.Err
}
b = append(b, key...)
b = append(b, ": "...)
value, jerr := json.Marshal(v.Value)
if je, ok := jerr.(*json.MarshalerError); ok {
err = jerr
value, _ = json.Marshal(je.Err.Error())
}
b = append(b, value...)
}
b = append(b, '}')
return b, err
} | encoding/openapi/orderedmap.go | 0.679285 | 0.408572 | orderedmap.go | starcoder |
package shimesaba
import (
"fmt"
"log"
"math"
"time"
"github.com/mashiike/shimesaba/internal/timeutils"
)
// Metric handles aggregated Mackerel metrics
type Metric struct {
id string
values map[time.Time][]float64
aggregationInterval time.Duration
aggregationMethod func([]float64) float64
startAt time.Time
endAt time.Time
}
func NewMetric(cfg *MetricConfig) *Metric {
return &Metric{
id: cfg.ID,
values: make(map[time.Time][]float64),
aggregationInterval: time.Duration(cfg.AggregationInterval) * time.Minute,
aggregationMethod: getAggregationMethod(cfg.AggregationMethod),
startAt: time.Date(9999, 12, 31, 59, 59, 59, 999999999, time.UTC),
endAt: time.Unix(0, 0).In(time.UTC),
}
}
func getAggregationMethod(str string) func([]float64) float64 {
totalFunc := func(values []float64) float64 {
t := 0.0
for _, v := range values {
t += v
}
return t
}
switch str {
case "total":
return totalFunc
case "avg":
return func(values []float64) float64 {
if len(values) == 0 {
return math.NaN()
}
t := totalFunc(values)
return t / float64(len(values))
}
case "max":
case "":
log.Println("[warn] aggregation_method is empty. select default method `max`")
default:
log.Printf("[warn] aggregation_method `%s` is not found. select default method `max`\n", str)
}
return func(values []float64) float64 {
maxValue := 0.0
for _, v := range values {
if v > maxValue {
maxValue = v
}
}
return maxValue
}
}
// ID is the identifier of the metric
func (m *Metric) ID() string {
return m.id
}
// AppendValue adds a value to the metric
func (m *Metric) AppendValue(t time.Time, v interface{}) error {
t = t.Truncate(m.aggregationInterval)
var value float64
switch v := v.(type) {
case float64:
value = v
case int64:
value = float64(v)
case int32:
value = float64(v)
case uint64:
value = float64(v)
case uint32:
value = float64(v)
case float32:
value = float64(v)
case int:
value = float64(v)
default:
return fmt.Errorf("Metric.Append() unknown value type = %T", v)
}
values, ok := m.values[t]
if !ok {
values = make([]float64, 0, 1)
}
values = append(values, value)
m.values[t] = values
if m.startAt.After(t) {
m.startAt = t
}
if m.endAt.Before(t) {
m.endAt = t
}
return nil
}
// GetValue gets the value at the specified time
func (m *Metric) GetValue(t time.Time) (float64, bool) {
t = t.Truncate(m.aggregationInterval)
values, ok := m.values[t]
if !ok {
return math.NaN(), false
}
return m.aggregationMethod(values), true
}
// GetValues gets the values for the specified time period
func (m *Metric) GetValues(startAt time.Time, endAt time.Time) map[time.Time]float64 {
iter := timeutils.NewIterator(
startAt,
endAt,
m.aggregationInterval,
)
ret := make(map[time.Time]float64)
for iter.HasNext() {
curAt, _ := iter.Next()
if v, ok := m.GetValue(curAt); ok {
ret[curAt] = v
}
}
return ret
}
// StartAt returns the start time of the metric
func (m *Metric) StartAt() time.Time {
return m.startAt
}
// EndAt returns the end time of the metric
func (m *Metric) EndAt() time.Time {
return m.endAt.Add(m.aggregationInterval - time.Nanosecond)
}
// AggregationInterval returns the aggregation interval for metrics
func (m *Metric) AggregationInterval() time.Duration {
return m.aggregationInterval
}
//String implements fmt.Stringer
func (m *Metric) String() string {
return fmt.Sprintf("[id:%s len(values):%d aggregate_interval:%s, range:%s~%s<%s>]", m.id, len(m.values), m.aggregationInterval, m.startAt, m.endAt, m.endAt.Sub(m.startAt))
}
// Metrics is a collection of metrics
type Metrics map[string]*Metric
// Set adds a metric to the collection
func (ms Metrics) Set(m *Metric) {
ms[m.ID()] = m
}
// Get uses an identifier to get the metric
func (ms Metrics) Get(id string) (*Metric, bool) {
m, ok := ms[id]
return m, ok
}
func (ms Metrics) ToSlice() []*Metric {
ret := make([]*Metric, 0, len(ms))
for _, m := range ms {
ret = append(ret, m)
}
return ret
}
// ToSlice converts the collection to Slice
func (ms Metrics) String() string {
return fmt.Sprintf("%v", ms.ToSlice())
}
// StartAt returns the earliest start time in the metric in the collection
func (ms Metrics) StartAt() time.Time {
startAt := time.Date(9999, 12, 31, 59, 59, 59, 999999999, time.UTC)
for _, m := range ms {
if startAt.After(m.StartAt()) {
startAt = m.StartAt()
}
}
return startAt
}
// EndAt returns the latest end time of the metric in the collection
func (ms Metrics) EndAt() time.Time {
endAt := time.Unix(0, 0).In(time.UTC)
for _, m := range ms {
if endAt.Before(m.EndAt()) {
endAt = m.EndAt()
}
}
return endAt
}
// AggregationInterval returns the longest aggregation period for the metric in the collection
func (ms Metrics) AggregationInterval() time.Duration {
ret := time.Duration(0)
for _, m := range ms {
a := m.AggregationInterval()
if a > ret {
ret = a
}
}
return ret
} | metric.go | 0.754644 | 0.420421 | metric.go | starcoder |
package aep
import (
"bytes"
"encoding/binary"
"fmt"
"strings"
"github.com/rioam2/rifx"
)
// PropertyTypeName enumerates the value/type of a property
type PropertyTypeName uint16
const (
// PropertyTypeBoolean denotes a boolean checkbox property
PropertyTypeBoolean PropertyTypeName = 0x04
// PropertyTypeOneD denotes a one-dimensional slider property
PropertyTypeOneD PropertyTypeName = 0x02
// PropertyTypeTwoD denotes a two-dimensional point property
PropertyTypeTwoD PropertyTypeName = 0x06
// PropertyTypeThreeD denotes a three-dimensional point property
PropertyTypeThreeD PropertyTypeName = 0x12
// PropertyTypeColor denotes a four-dimensional color property
PropertyTypeColor PropertyTypeName = 0x05
// PropertyTypeAngle denotes a one-dimensional angle property
PropertyTypeAngle PropertyTypeName = 0x03
// PropertyTypeLayerSelect denotes a single-valued layer selection property
PropertyTypeLayerSelect PropertyTypeName = 0x00
// PropertyTypeSelect denotes a single-valued selection property
PropertyTypeSelect PropertyTypeName = 0x07
// PropertyTypeGroup denotes a collection/group property
PropertyTypeGroup PropertyTypeName = 0x0d
// PropertyTypeCustom denotes an unknown/custom property type (default)
PropertyTypeCustom PropertyTypeName = 0x0f
)
// String translates a property type enumeration to string
func (p PropertyTypeName) String() string {
switch p {
case PropertyTypeBoolean:
return "Boolean"
case PropertyTypeOneD:
return "OneD"
case PropertyTypeTwoD:
return "TwoD"
case PropertyTypeThreeD:
return "ThreeD"
case PropertyTypeColor:
return "Color"
case PropertyTypeAngle:
return "Angle"
case PropertyTypeLayerSelect:
return "LayerSelect"
case PropertyTypeSelect:
return "Select"
case PropertyTypeGroup:
return "Group"
default:
return "Custom"
}
}
// Property describes a property object of a layer or nested property
type Property struct {
MatchName string
Name string
Index uint32
PropertyType PropertyTypeName
Properties []*Property
SelectOptions []string
}
func parseProperty(propData interface{}, matchName string) (*Property, error) {
prop := &Property{}
// Apply some sensible default values
prop.PropertyType = PropertyTypeCustom
prop.SelectOptions = make([]string, 0)
prop.MatchName = matchName
prop.Name = matchName
switch matchName {
case "ADBE Effect Parade":
prop.Name = "Effects"
}
// Handle different types of property data
switch propData.(type) {
case *rifx.List:
propHead := propData.(*rifx.List)
// Parse sub-properties
prop.Properties = make([]*Property, 0)
tdgpMap, orderedMatchNames := indexedGroupToMap(propHead)
for idx, mn := range orderedMatchNames {
subProp, err := parseProperty(tdgpMap[mn], mn)
if err == nil {
subProp.Index = uint32(idx) + 1
prop.Properties = append(prop.Properties, subProp)
}
}
// Parse effect sub-properties
if propHead.Identifier == "sspc" {
prop.PropertyType = PropertyTypeGroup
fnamBlock, err := propHead.FindByType("fnam")
if err == nil {
prop.Name = fnamBlock.ToString()
}
parTList := propHead.SublistMerge("parT")
subPropMatchNames, subPropPards := pairMatchNames(parTList)
for idx, mn := range subPropMatchNames {
// Skip first pard entry (describes parent)
if idx == 0 {
continue
}
subProp, err := parseProperty(subPropPards[idx], mn)
if err == nil {
subProp.Index = uint32(idx)
prop.Properties = append(prop.Properties, subProp)
}
}
}
case []interface{}:
for _, entry := range propData.([]interface{}) {
if block, ok := entry.(*rifx.Block); ok {
switch block.Type {
case "pdnm":
strContent := block.ToString()
if prop.PropertyType == PropertyTypeSelect {
prop.SelectOptions = strings.Split(strContent, "|")
} else if strContent != "" {
prop.Name = strContent
}
case "pard":
blockData := block.Data.([]byte)
prop.PropertyType = PropertyTypeName(binary.BigEndian.Uint16(blockData[14:16]))
if prop.PropertyType == 0x0a {
prop.PropertyType = PropertyTypeOneD
}
pardName := fmt.Sprintf("%s", bytes.Trim(blockData[16:48], "\x00"))
if pardName != "" {
prop.Name = pardName
}
}
}
}
}
return prop, nil
}
func pairMatchNames(head *rifx.List) ([]string, [][]interface{}) {
matchNames := make([]string, 0)
datum := make([][]interface{}, 0)
if head != nil {
groupIdx := -1
skipToNextTDMNFlag := false
for _, block := range head.Blocks {
if block.Type == "tdmn" {
matchName := fmt.Sprintf("%s", bytes.Trim(block.Data.([]byte), "\x00"))
if matchName == "ADBE Group End" || matchName == "ADBE Effect Built In Params" {
skipToNextTDMNFlag = true
continue
}
matchNames = append(matchNames, matchName)
skipToNextTDMNFlag = false
groupIdx++
} else if groupIdx >= 0 && !skipToNextTDMNFlag {
if groupIdx >= len(datum) {
datum = append(datum, make([]interface{}, 0))
}
switch block.Data.(type) {
case *rifx.List:
datum[groupIdx] = append(datum[groupIdx], block.Data)
default:
datum[groupIdx] = append(datum[groupIdx], block)
}
}
}
}
return matchNames, datum
}
func indexedGroupToMap(tdgpHead *rifx.List) (map[string]*rifx.List, []string) {
tdgpMap := make(map[string]*rifx.List, 0)
matchNames, contents := pairMatchNames(tdgpHead)
for idx, matchName := range matchNames {
tdgpMap[matchName] = contents[idx][0].(*rifx.List)
}
return tdgpMap, matchNames
} | property.go | 0.629547 | 0.432363 | property.go | starcoder |
package config
import (
"encoding/csv"
"fmt"
"io"
"os"
"strconv"
"github.com/pkg/errors"
"github.com/google/simhospital/pkg/ir"
"github.com/google/simhospital/pkg/sample"
)
// nilKey is a keyword to use in CSV files that are loaded with loadCSVWithFrequency.
// Rows for which all items (except the frequency) match this keyword are represented with a nil RecordWithFreq.Value.
const nilKey = "nil"
// RecordWithFreq stores a record as a list of strings (e.g. from CSV) and its associated frequency value.
type RecordWithFreq struct {
Value map[string]string
Weight uint
}
// loadCSVWithFrequency loads a CSV file where each row is a list of strings and the last
// column is a frequency represented as int, and returns a slice where each element corresponds
// to one row in the file. Rows that start with # are ignored.
// The columnKeys parameter are the keys to be set in the RecordWithFreq.Value map. The keys are
// mapped to the items in the rows in order: the first key will be used for the first element in
// each row, and successively.
// All rows are expected to have the same number of columns.
// Rows for which all items (except the frequency) are the keyword "nil" are represented as a
// nil RecordWithFreq.Value. Callers need to check for the presence of a nil Value.
// Example format for the file:
// # Distribution of patient classes.
// OUTPATIENT,EMERGENCY,10
// nil,nil,20
// Output for columnKeys ("class", "type") :
// []RecordWithFreq {
// {
// Value: map[string]string{
// "class": "OUTPATIENT",
// "type": "EMERGENCY",
// },
// Weight: 10,
// },
// {
// Value: nil,
// Weight: 20,
// },
// (etc).
//}
func loadCSVWithFrequency(fName string, columnKeys []string) ([]RecordWithFreq, error) {
f, err := os.Open(fName)
if err != nil {
return nil, errors.Wrapf(err, "cannot open file %s", fName)
}
defer f.Close()
reader := csv.NewReader(f)
reader.Comment = '#'
var records []RecordWithFreq
nColumns := len(columnKeys) + 1
for record, err := reader.Read(); err != io.EOF; record, err = reader.Read() {
if err != nil {
return nil, errors.Wrapf(err, "cannot read file %s", fName)
}
if len(record) != nColumns {
return nil, fmt.Errorf("cannot load frequencies from file %s: got %d elements in one line, want %d", fName, len(record), nColumns)
}
m := map[string]string{}
countNil := 0
for i := 0; i < nColumns-1; i++ {
m[columnKeys[i]] = record[i]
if record[i] == nilKey {
countNil++
}
}
if countNil == nColumns-1 {
m = nil
}
frequencyField := record[nColumns-1]
weight, err := strconv.ParseInt(frequencyField, 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "cannot load frequencies from file %s: got frequency %s, want int", fName, frequencyField)
}
records = append(records, RecordWithFreq{
Value: m,
Weight: uint(weight),
})
}
return records, nil
}
// loadCodedElements loads a CSV file where each row contains one coded element and its frequency.
// The first element of each row is the code of the coded element, the second is its description,
// and the last one is the frequency.
// allowNil specifies whether nil rows are allowed. A nil row is of the form "nil,nil,<frequency>".
func loadCodedElements(fileName string, codingSystem string, allowNil bool) ([]MappableWeightedValue, error) {
idKey := "id"
textKey := "text"
recordsWithFrequency, err := loadCSVWithFrequency(fileName, []string{idKey, textKey})
if err != nil {
return nil, err
}
var values []MappableWeightedValue
for _, record := range recordsWithFrequency {
if !allowNil && record.Value == nil {
return nil, fmt.Errorf("found nil value in file %s; nil values not supported", fileName)
}
id := record.Value[idKey]
key := record.Value[textKey]
values = append(values, MappableWeightedValue{
WeightedVal: sample.WeightedValue{
Value: &ir.CodedElement{
ID: id,
Text: key,
CodingSystem: codingSystem,
},
Frequency: record.Weight,
},
Mapping: Mapping{Key: id, Value: key},
})
}
return values, nil
}
func ethnicities(fileName string) ([]sample.WeightedValue, error) {
idKey := "id"
textKey := "text"
recordsWithFrequency, err := loadCSVWithFrequency(fileName, []string{idKey, textKey})
if err != nil {
return nil, err
}
var distr []sample.WeightedValue
for _, record := range recordsWithFrequency {
var e *ir.Ethnicity
if record.Value != nil {
e = &ir.Ethnicity{
ID: record.Value[idKey],
Text: record.Value[textKey],
}
}
distr = append(distr, sample.WeightedValue{
Value: e,
Frequency: record.Weight,
})
}
return distr, nil
}
// PatientClassAndType represents a class and type pair.
type PatientClassAndType struct {
Class string
Type string
}
func patientClass(fileName string) ([]sample.WeightedValue, error) {
classKey := "class"
typeKey := "type"
recordsWithFrequency, err := loadCSVWithFrequency(fileName, []string{classKey, typeKey})
if err != nil {
return nil, err
}
var distr []sample.WeightedValue
for _, record := range recordsWithFrequency {
if record.Value == nil {
return nil, fmt.Errorf("cannot load patient classes from file %s: nil records are not supported", fileName)
}
patientC := record.Value[classKey]
patientT := record.Value[typeKey]
distr = append(distr, sample.WeightedValue{
Value: &PatientClassAndType{Class: patientC, Type: patientT},
Frequency: record.Weight,
})
}
return distr, nil
} | pkg/config/csv.go | 0.658198 | 0.471041 | csv.go | starcoder |
package dxf
// AcadVersion represents the minimum version of AutoCAD that is expected to be able to read the file.
type AcadVersion int
const (
// Version1_0 corresponds to the value "MC0.0"
Version1_0 AcadVersion = iota
// Version1_2 corresponds to the value "AC1.2"
Version1_2
// Version1_40 corresponds to the value "AC1.40"
Version1_40
// Version2_05 corresponds to the value "AC1.50"
Version2_05
// Version2_10 corresponds to the value "AC2.10"
Version2_10
// Version2_21 corresponds to the value "AC2.21"
Version2_21
// Version2_22 corresponds to the value "AC2.22"
Version2_22
// Version2_5 corresponds to the value "AC1002"
Version2_5
// Version2_6 corresponds to the value "AC1003"
Version2_6
// R9 corresponds to the value "AC1004"
R9
// R10 corresponds to the value "AC1006"
R10
// R11 corresponds to the value "AC1009"
R11
// R12 corresponds to the value "AC1009"
R12
// R13 corresponds to the value "AC1012"
R13
// R14 corresponds to the value "AC1014"
R14
// R2000 corresponds to the value "AC1015"
R2000
// R2004 corresponds to the value "AC1018"
R2004
// R2007 corresponds to the value "AC1021"
R2007
// R2010 corresponds to the value "AC1024"
R2010
// R2013 corresponds to the value "AC1027"
R2013
// R2018 corresponds to the value "AC1032"
R2018
)
func parseAcadVersion(val string) AcadVersion {
switch val {
case "MC0.0":
return Version1_0
case "AC1.2":
return Version1_2
case "AC1.40":
return Version1_40
case "AC1.50":
return Version2_05
case "AC2.10":
return Version2_10
case "AC2.21":
return Version2_21
case "AC2.22":
return Version2_22
case "AC1002":
return Version2_5
case "AC1003":
return Version2_6
case "AC1004":
return R9
case "AC1006":
return R10
case "AC1009":
// also R11
return R12
case "AC1012":
return R13
case "AC1014":
return R14
case "AC1015":
return R2000
case "AC1018":
return R2004
case "AC1021":
return R2007
case "AC1024":
return R2010
case "AC1027":
return R2013
case "AC1032":
return R2018
default:
// TODO: add error handling?
return R12
}
}
func (v AcadVersion) String() string {
switch v {
case Version1_0:
return "MC0.0"
case Version1_2:
return "AC1.2"
case Version1_40:
return "AC1.40"
case Version2_05:
return "AC1.50"
case Version2_10:
return "AC2.10"
case Version2_21:
return "AC2.21"
case Version2_22:
return "AC2.22"
case Version2_5:
return "AC1002"
case Version2_6:
return "AC1003"
case R9:
return "AC1004"
case R10:
return "AC1006"
case R11:
return "AC1009"
case R12:
return "AC1009"
case R13:
return "AC1012"
case R14:
return "AC1014"
case R2000:
return "AC1015"
case R2004:
return "AC1018"
case R2007:
return "AC1021"
case R2010:
return "AC1024"
case R2013:
return "AC1027"
case R2018:
return "AC1032"
default:
return "UNKNOWN"
}
} | acadVersion.go | 0.561816 | 0.521532 | acadVersion.go | starcoder |
Create 2d/3d panels.
*/
//-----------------------------------------------------------------------------
package obj
import "github.com/deadsy/sdfx/sdf"
//-----------------------------------------------------------------------------
/*
2D Panel with rounded corners and edge holes.
Note: The hole pattern is used to layout multiple holes along an edge.
Examples:
"x" - single hole on edge
"xx" - two holes on edge
"x.x" = two holes on edge with spacing
"xx.x.xx" = five holes on edge with spacing
etc.
*/
// PanelParms defines the parameters for a 2D panel.
type PanelParms struct {
Size sdf.V2 // size of the panel
CornerRadius float64 // radius of rounded corners
HoleDiameter float64 // diameter of panel holes
HoleMargin [4]float64 // hole margins for top, right, bottom, left
HolePattern [4]string // hole pattern for top, right, bottom, left
Thickness float64 // panel thickness (3d only)
}
// Panel2D returns a 2d panel with holes on the edges.
func Panel2D(k *PanelParms) (sdf.SDF2, error) {
// panel
s0 := sdf.Box2D(k.Size, k.CornerRadius)
if k.HoleDiameter <= 0.0 {
// no holes
return s0, nil
}
// corners
tl := sdf.V2{-0.5*k.Size.X + k.HoleMargin[3], 0.5*k.Size.Y - k.HoleMargin[0]}
tr := sdf.V2{0.5*k.Size.X - k.HoleMargin[1], 0.5*k.Size.Y - k.HoleMargin[0]}
br := sdf.V2{0.5*k.Size.X - k.HoleMargin[1], -0.5*k.Size.Y + k.HoleMargin[2]}
bl := sdf.V2{-0.5*k.Size.X + k.HoleMargin[3], -0.5*k.Size.Y + k.HoleMargin[2]}
// holes
hole, err := sdf.Circle2D(0.5 * k.HoleDiameter)
if err != nil {
return nil, err
}
var holes []sdf.SDF2
// clockwise: top, right, bottom, left
holes = append(holes, sdf.LineOf2D(hole, tl, tr, k.HolePattern[0]))
holes = append(holes, sdf.LineOf2D(hole, tr, br, k.HolePattern[1]))
holes = append(holes, sdf.LineOf2D(hole, br, bl, k.HolePattern[2]))
holes = append(holes, sdf.LineOf2D(hole, bl, tl, k.HolePattern[3]))
return sdf.Difference2D(s0, sdf.Union2D(holes...)), nil
}
// Panel3D returns a 3d panel with holes on the edges.
func Panel3D(k *PanelParms) (sdf.SDF3, error) {
if k.Thickness <= 0 {
return nil, sdf.ErrMsg("k.Thickness <= 0")
}
s, err := Panel2D(k)
if err != nil {
return nil, err
}
return sdf.Extrude3D(s, k.Thickness), nil
}
//-----------------------------------------------------------------------------
// EuroRack Module Panels: http://www.doepfer.de/a100_man/a100m_e.htm
const erU = 1.75 * sdf.MillimetresPerInch
const erHP = 0.2 * sdf.MillimetresPerInch
const erHoleDiameter = 3.2
// gaps between adjacent panels (doepfer 3U module spec)
const erUGap = ((3 * erU) - 128.5) * 0.5
const erHPGap = ((3 * erHP) - 15) * 0.5
// EuroRackParms defines the parameters for a eurorack panel.
type EuroRackParms struct {
U float64 // U-size (vertical)
HP float64 // HP-size (horizontal)
CornerRadius float64 // radius of panel corners
HoleDiameter float64 // panel holes (0 for default)
Thickness float64 // panel thickness (3d only)
Ridge bool // add side ridges for reinforcing (3d only)
}
func erUSize(u float64) float64 {
return (u * erU) - (2 * erUGap)
}
func erHPSize(hp float64) float64 {
return (hp * erHP) - (2 * erHPGap)
}
// EuroRackPanel2D returns a 2d eurorack synthesizer module panel (in mm).
func EuroRackPanel2D(k *EuroRackParms) (sdf.SDF2, error) {
if k.U < 1 {
return nil, sdf.ErrMsg("k.U < 1")
}
if k.HP <= 1 {
return nil, sdf.ErrMsg("k.HP <= 1")
}
if k.CornerRadius < 0 {
return nil, sdf.ErrMsg("k.CornerRadius < 0")
}
if k.HoleDiameter <= 0 {
k.HoleDiameter = erHoleDiameter
}
// edge to mount hole margins
const vMargin = 3.0
const hMargin = (3 * erHP * 0.5) - erHPGap
x := erHPSize(k.HP)
y := erUSize(k.U)
pk := PanelParms{
Size: sdf.V2{x, y},
CornerRadius: k.CornerRadius,
HoleDiameter: k.HoleDiameter,
HoleMargin: [4]float64{vMargin, hMargin, vMargin, hMargin},
}
if k.HP < 8 {
// two holes
pk.HolePattern = [4]string{"x", "", "", "x"}
} else {
// four holes
pk.HolePattern = [4]string{"x", "x", "x", "x"}
}
return Panel2D(&pk)
}
// EuroRackPanel3D returns a 3d eurorack synthesizer module panel (in mm).
func EuroRackPanel3D(k *EuroRackParms) (sdf.SDF3, error) {
if k.Thickness <= 0 {
return nil, sdf.ErrMsg("k.Thickness <= 0")
}
panel2d, err := EuroRackPanel2D(k)
if err != nil {
return nil, err
}
s := sdf.Extrude3D(panel2d, k.Thickness)
if !k.Ridge {
return s, nil
}
// create a reinforcing ridge
xSize := k.Thickness
ySize := erUSize(k.U) - 18.0
zSize := k.Thickness * 1.5
r, err := sdf.Box3D(sdf.V3{xSize, ySize, zSize}, 0)
if err != nil {
return nil, err
}
// add the ridges to the sides
zOfs := 0.5 * (k.Thickness + zSize)
xOfs := 0.5 * (erHPSize(k.HP) - xSize)
r = sdf.Transform3D(r, sdf.Translate3d(sdf.V3{0, 0, zOfs}))
r0 := sdf.Transform3D(r, sdf.Translate3d(sdf.V3{xOfs, 0, 0}))
r1 := sdf.Transform3D(r, sdf.Translate3d(sdf.V3{-xOfs, 0, 0}))
return sdf.Union3D(s, r0, r1), nil
}
//-----------------------------------------------------------------------------
// PanelHoleParms defines the parameters for a panel hole.
type PanelHoleParms struct {
Diameter float64 // hole diameter
Thickness float64 // panel thickness
Indent sdf.V3 // indent size
Offset float64 // indent offset from main axis
Orientation float64 // orientation of indent, 0 == x-axis
}
// PanelHole3D returns a panel hole and an indent for a retention pin.
func PanelHole3D(k *PanelHoleParms) (sdf.SDF3, error) {
if k.Diameter <= 0 {
return nil, sdf.ErrMsg("k.Diameter <= 0")
}
if k.Thickness <= 0 {
return nil, sdf.ErrMsg("k.Thickness <= 0")
}
if k.Indent.LTZero() {
return nil, sdf.ErrMsg("k.Indent < 0")
}
if k.Offset < 0 {
return nil, sdf.ErrMsg("k.Offset")
}
// build the hole
s, err := sdf.Cylinder3D(k.Thickness, k.Diameter*0.5, 0)
if err != nil {
return nil, err
}
if k.Offset == 0 || k.Indent.X == 0 || k.Indent.Y == 0 || k.Indent.Z == 0 {
return s, nil
}
// build the indent
indent, err := sdf.Box3D(k.Indent, 0)
zOfs := (k.Thickness - k.Indent.Z) * 0.5
indent = sdf.Transform3D(indent, sdf.Translate3d(sdf.V3{k.Offset, 0, zOfs}))
s = sdf.Union3D(s, indent)
if k.Orientation != 0 {
s = sdf.Transform3D(s, sdf.RotateZ(k.Orientation))
}
return s, nil
}
//----------------------------------------------------------------------------- | obj/panel.go | 0.776114 | 0.546738 | panel.go | starcoder |
package ent
import (
"context"
"errors"
"fmt"
"github.com/facebookincubator/ent/dialect/sql/sqlgraph"
"github.com/facebookincubator/ent/schema/field"
"github.com/google/uuid"
"github.com/minskylab/asclepius/ent/epidemiologicresults"
"github.com/minskylab/asclepius/ent/test"
)
// EpidemiologicResultsCreate is the builder for creating a EpidemiologicResults entity.
type EpidemiologicResultsCreate struct {
config
id *uuid.UUID
visitedPlaces *[]string
infectedFamily *bool
fromInfectedPlace *int
toInfectedPlace *int
test map[uuid.UUID]struct{}
}
// SetVisitedPlaces sets the visitedPlaces field.
func (erc *EpidemiologicResultsCreate) SetVisitedPlaces(s []string) *EpidemiologicResultsCreate {
erc.visitedPlaces = &s
return erc
}
// SetInfectedFamily sets the infectedFamily field.
func (erc *EpidemiologicResultsCreate) SetInfectedFamily(b bool) *EpidemiologicResultsCreate {
erc.infectedFamily = &b
return erc
}
// SetNillableInfectedFamily sets the infectedFamily field if the given value is not nil.
func (erc *EpidemiologicResultsCreate) SetNillableInfectedFamily(b *bool) *EpidemiologicResultsCreate {
if b != nil {
erc.SetInfectedFamily(*b)
}
return erc
}
// SetFromInfectedPlace sets the fromInfectedPlace field.
func (erc *EpidemiologicResultsCreate) SetFromInfectedPlace(i int) *EpidemiologicResultsCreate {
erc.fromInfectedPlace = &i
return erc
}
// SetNillableFromInfectedPlace sets the fromInfectedPlace field if the given value is not nil.
func (erc *EpidemiologicResultsCreate) SetNillableFromInfectedPlace(i *int) *EpidemiologicResultsCreate {
if i != nil {
erc.SetFromInfectedPlace(*i)
}
return erc
}
// SetToInfectedPlace sets the toInfectedPlace field.
func (erc *EpidemiologicResultsCreate) SetToInfectedPlace(i int) *EpidemiologicResultsCreate {
erc.toInfectedPlace = &i
return erc
}
// SetNillableToInfectedPlace sets the toInfectedPlace field if the given value is not nil.
func (erc *EpidemiologicResultsCreate) SetNillableToInfectedPlace(i *int) *EpidemiologicResultsCreate {
if i != nil {
erc.SetToInfectedPlace(*i)
}
return erc
}
// SetID sets the id field.
func (erc *EpidemiologicResultsCreate) SetID(u uuid.UUID) *EpidemiologicResultsCreate {
erc.id = &u
return erc
}
// SetTestID sets the test edge to Test by id.
func (erc *EpidemiologicResultsCreate) SetTestID(id uuid.UUID) *EpidemiologicResultsCreate {
if erc.test == nil {
erc.test = make(map[uuid.UUID]struct{})
}
erc.test[id] = struct{}{}
return erc
}
// SetNillableTestID sets the test edge to Test by id if the given value is not nil.
func (erc *EpidemiologicResultsCreate) SetNillableTestID(id *uuid.UUID) *EpidemiologicResultsCreate {
if id != nil {
erc = erc.SetTestID(*id)
}
return erc
}
// SetTest sets the test edge to Test.
func (erc *EpidemiologicResultsCreate) SetTest(t *Test) *EpidemiologicResultsCreate {
return erc.SetTestID(t.ID)
}
// Save creates the EpidemiologicResults in the database.
func (erc *EpidemiologicResultsCreate) Save(ctx context.Context) (*EpidemiologicResults, error) {
if erc.fromInfectedPlace != nil {
if err := epidemiologicresults.FromInfectedPlaceValidator(*erc.fromInfectedPlace); err != nil {
return nil, fmt.Errorf("ent: validator failed for field \"fromInfectedPlace\": %v", err)
}
}
if erc.toInfectedPlace != nil {
if err := epidemiologicresults.ToInfectedPlaceValidator(*erc.toInfectedPlace); err != nil {
return nil, fmt.Errorf("ent: validator failed for field \"toInfectedPlace\": %v", err)
}
}
if len(erc.test) > 1 {
return nil, errors.New("ent: multiple assignments on a unique edge \"test\"")
}
return erc.sqlSave(ctx)
}
// SaveX calls Save and panics if Save returns an error.
func (erc *EpidemiologicResultsCreate) SaveX(ctx context.Context) *EpidemiologicResults {
v, err := erc.Save(ctx)
if err != nil {
panic(err)
}
return v
}
func (erc *EpidemiologicResultsCreate) sqlSave(ctx context.Context) (*EpidemiologicResults, error) {
var (
er = &EpidemiologicResults{config: erc.config}
_spec = &sqlgraph.CreateSpec{
Table: epidemiologicresults.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: epidemiologicresults.FieldID,
},
}
)
if value := erc.id; value != nil {
er.ID = *value
_spec.ID.Value = *value
}
if value := erc.visitedPlaces; value != nil {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeJSON,
Value: *value,
Column: epidemiologicresults.FieldVisitedPlaces,
})
er.VisitedPlaces = *value
}
if value := erc.infectedFamily; value != nil {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: *value,
Column: epidemiologicresults.FieldInfectedFamily,
})
er.InfectedFamily = *value
}
if value := erc.fromInfectedPlace; value != nil {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: *value,
Column: epidemiologicresults.FieldFromInfectedPlace,
})
er.FromInfectedPlace = *value
}
if value := erc.toInfectedPlace; value != nil {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: *value,
Column: epidemiologicresults.FieldToInfectedPlace,
})
er.ToInfectedPlace = *value
}
if nodes := erc.test; len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: epidemiologicresults.TestTable,
Columns: []string{epidemiologicresults.TestColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: test.FieldID,
},
},
}
for k, _ := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if err := sqlgraph.CreateNode(ctx, erc.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
}
return nil, err
}
return er, nil
} | ent/epidemiologicresults_create.go | 0.509764 | 0.4016 | epidemiologicresults_create.go | starcoder |
package types
import (
"fmt"
"sort"
"time"
"github.com/dgraph-io/dgraph/protos/pb"
"github.com/dgraph-io/dgraph/x"
)
type sortBase struct {
values [][]Val // Each uid could have multiple values which we need to sort it by.
desc []bool // Sort orders for different values.
ul *pb.List
o []*pb.Facets
}
// Len returns size of vector.
func (s sortBase) Len() int { return len(s.values) }
// Swap swaps two elements.
func (s sortBase) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
data := s.ul.Uids
data[i], data[j] = data[j], data[i]
if s.o != nil {
s.o[i], s.o[j] = s.o[j], s.o[i]
}
}
type byValue struct{ sortBase }
// Less compares two elements
func (s byValue) Less(i, j int) bool {
first, second := s.values[i], s.values[j]
if len(first) == 0 || len(second) == 0 {
return false
}
for vidx, _ := range first {
// Null value is considered greatest hence comes at first place while doing descending sort
// and at last place while doing ascending sort.
if first[vidx].Value == nil {
return s.desc[vidx]
}
if second[vidx].Value == nil {
return !s.desc[vidx]
}
// We have to look at next value to decide.
if eq := equal(first[vidx], second[vidx]); eq {
continue
}
// Its either less or greater.
less := less(first[vidx], second[vidx])
if s.desc[vidx] {
return !less
}
return less
}
return false
}
// Sort sorts the given array in-place.
func SortWithFacet(v [][]Val, ul *pb.List, l []*pb.Facets, desc []bool) error {
if len(v) == 0 || len(v[0]) == 0 {
return nil
}
typ := v[0][0].Tid
switch typ {
case DateTimeID, IntID, FloatID, StringID, DefaultID:
// Don't do anything, we can sort values of this type.
default:
return fmt.Errorf("Value of type: %s isn't sortable.", typ.Name())
}
var toBeSorted sort.Interface
b := sortBase{v, desc, ul, l}
toBeSorted = byValue{b}
sort.Sort(toBeSorted)
return nil
}
// Sort sorts the given array in-place.
func Sort(v [][]Val, ul *pb.List, desc []bool) error {
return SortWithFacet(v, ul, nil, desc)
}
// Less returns true if a is strictly less than b.
func Less(a, b Val) (bool, error) {
if a.Tid != b.Tid {
return false, x.Errorf("Arguments of different type can not be compared.")
}
typ := a.Tid
switch typ {
case DateTimeID, UidID, IntID, FloatID, StringID, DefaultID:
// Don't do anything, we can sort values of this type.
default:
return false, x.Errorf("Compare not supported for type: %v", a.Tid)
}
return less(a, b), nil
}
func less(a, b Val) bool {
if a.Tid != b.Tid {
return mismatchedLess(a, b)
}
switch a.Tid {
case DateTimeID:
return a.Value.(time.Time).Before(b.Value.(time.Time))
case IntID:
return (a.Value.(int64)) < (b.Value.(int64))
case FloatID:
return (a.Value.(float64)) < (b.Value.(float64))
case UidID:
return (a.Value.(uint64) < b.Value.(uint64))
case StringID, DefaultID:
return (a.Value.(string)) < (b.Value.(string))
}
return false
}
func mismatchedLess(a, b Val) bool {
x.AssertTrue(a.Tid != b.Tid)
if (a.Tid != IntID && a.Tid != FloatID) || (b.Tid != IntID && b.Tid != FloatID) {
// Non-float/int are sorted arbitrarily by type.
return a.Tid < b.Tid
}
// Floats and ints can be sorted together in a sensible way. The approach
// here isn't 100% correct, and will be wrong when dealing with ints and
// floats close to each other and greater in magnitude than 1<<53 (the
// point at which consecutive floats are more than 1 apart).
if a.Tid == FloatID {
return a.Value.(float64) < float64(b.Value.(int64))
} else {
x.AssertTrue(b.Tid == FloatID)
return float64(a.Value.(int64)) < b.Value.(float64)
}
}
// Equal returns true if a is equal to b.
func Equal(a, b Val) (bool, error) {
if a.Tid != b.Tid {
return false, x.Errorf("Arguments of different type can not be compared.")
}
typ := a.Tid
switch typ {
case DateTimeID, IntID, FloatID, StringID, DefaultID, BoolID:
// Don't do anything, we can sort values of this type.
default:
return false, x.Errorf("Equal not supported for type: %v", a.Tid)
}
return equal(a, b), nil
}
func equal(a, b Val) bool {
if a.Tid != b.Tid {
return false
}
switch a.Tid {
case DateTimeID:
return a.Value.(time.Time).Equal((b.Value.(time.Time)))
case IntID:
return (a.Value.(int64)) == (b.Value.(int64))
case FloatID:
return (a.Value.(float64)) == (b.Value.(float64))
case StringID, DefaultID:
return (a.Value.(string)) == (b.Value.(string))
case BoolID:
return a.Value.(bool) == (b.Value.(bool))
}
return false
} | types/sort.go | 0.648021 | 0.40116 | sort.go | starcoder |
package tsm1
// bool encoding uses 1 bit per value. Each compressed byte slice contains a 1 byte header
// indicating the compression type, followed by a variable byte encoded length indicating
// how many booleans are packed in the slice. The remaining bytes contains 1 byte for every
// 8 boolean values encoded.
import "encoding/binary"
const (
// boolUncompressed is an uncompressed boolean format.
// Not yet implemented.
boolUncompressed = 0
// boolCompressedBitPacked is an bit packed format using 1 bit per boolean
boolCompressedBitPacked = 1
)
// BoolEncoder encodes a series of bools to an in-memory buffer.
type BoolEncoder interface {
Write(b bool)
Bytes() ([]byte, error)
}
type boolEncoder struct {
// The encoded bytes
bytes []byte
// The current byte being encoded
b byte
// The number of bools packed into b
i int
// The total number of bools written
n int
}
// NewBoolEncoder returns a new instance of BoolEncoder.
func NewBoolEncoder() BoolEncoder {
return &boolEncoder{}
}
func (e *boolEncoder) Write(b bool) {
// If we have filled the current byte, flush it
if e.i >= 8 {
e.flush()
}
// Use 1 bit for each boolen value, shift the current byte
// by 1 and set the least signficant bit acordingly
e.b = e.b << 1
if b {
e.b |= 1
}
// Increment the current bool count
e.i++
// Increment the total bool count
e.n++
}
func (e *boolEncoder) flush() {
// Pad remaining byte w/ 0s
for e.i < 8 {
e.b = e.b << 1
e.i++
}
// If we have bits set, append them to the byte slice
if e.i > 0 {
e.bytes = append(e.bytes, e.b)
e.b = 0
e.i = 0
}
}
func (e *boolEncoder) Bytes() ([]byte, error) {
// Ensure the current byte is flushed
e.flush()
b := make([]byte, 10+1)
// Store the encoding type in the 4 high bits of the first byte
b[0] = byte(boolCompressedBitPacked) << 4
i := 1
// Encode the number of bools written
i += binary.PutUvarint(b[i:], uint64(e.n))
// Append the packed booleans
return append(b[:i], e.bytes...), nil
}
// BoolDecoder decodes a series of bools from an in-memory buffer.
type BoolDecoder interface {
Next() bool
Read() bool
Error() error
}
type boolDecoder struct {
b []byte
i int
n int
err error
}
// NewBoolDecoder returns a new instance of BoolDecoder.
func NewBoolDecoder(b []byte) BoolDecoder {
// First byte stores the encoding type, only have 1 bit-packet format
// currently ignore for now.
b = b[1:]
count, n := binary.Uvarint(b)
return &boolDecoder{b: b[n:], i: -1, n: int(count)}
}
func (e *boolDecoder) Next() bool {
e.i++
return e.i < e.n
}
func (e *boolDecoder) Read() bool {
// Index into the byte slice
idx := e.i / 8
// Bit position
pos := (8 - e.i%8) - 1
// The mask to select the bit
mask := byte(1 << uint(pos))
// The packed byte
v := e.b[idx]
// Returns true if the bit is set
return v&mask == mask
}
func (e *boolDecoder) Error() error {
return e.err
} | tsdb/engine/tsm1/bool.go | 0.834508 | 0.554772 | bool.go | starcoder |
package main
import (
"fmt"
"math"
"math/rand"
"time"
)
// Fade function as defined by <NAME>. This will smooth out the result.
func Fade(t float64) float64 {
t = math.Abs(t)
return t * t * t * (t*(t*6-15) + 10)
}
// Generates a randomly arranged array of 512 values ranging between 0-255 inclusive
func GeneratePermutations() []int {
temp := make([]int, 256)
for i := 0; i < 256; i++ {
temp[i] = i
}
rand.Shuffle(len(temp), func(i, j int) {
temp[i], temp[j] = temp[j], temp[i]
})
result := make([]int, 512)
for i := 0; i < 512; i++ {
result[i] = temp[i%256]
}
return result
}
// Generates 2d gradients which are uses for composing the final noise pattern
func GenerateGradients() []vec2 {
grads := make([]vec2, 256)
for i := 0; i < len(grads); i++ {
var gradient vec2
for {
gradient = vec2{rand.Float64()*2 - 1, rand.Float64()*2 - 1}
if gradient.LengthSquared() >= 1.0 {
break
}
}
gradient.Normalize()
grads[i] = gradient
}
return grads
}
// Applies Fade on a 2d vector and multiplies the it's values
func Q(uv vec2) float64 {
return Fade(uv.x) * Fade(uv.y)
}
// Generate a perlin noise point from predefined permutations and gradients
func Noise(pos vec2, perms []int, grads []vec2) float64 {
cell := vec2{math.Floor(pos.x), math.Floor(pos.y)}
total := 0.0
corners := [4]vec2{{0, 0}, {0, 1}, {1, 0}, {1, 1}}
for _, corner := range corners {
ij := cell.Add(&corner)
uv := pos.Sub(&ij)
index := perms[int(ij.x)%len(perms)]
index = perms[(index+int(ij.y))%len(perms)]
grad := grads[index%len(grads)]
total += Q(uv) * grad.Dot(&uv)
}
return math.Max(math.Min(total, 1.0), -1.0)
}
// Cubic interpolation of the given values float values
func CubicInterpolation(p []float64, x float64) float64 {
return cubicInterpAux(p[0], p[1], p[2], p[3], x)
}
func cubicInterpAux(v0, v1, v2, v3, x float64) float64 {
P := (v3 - v2) - (v0 - v1)
Q := (v0 - v1) - P
R := v2 - v0
S := v1
return P*x*x*x + Q*x*x + R*x + S
}
// Generates a perlin noise value where each point is stretched over multiple points and smoothened
func StretchedNoise(pos vec2, perms []int, grads []vec2, stretch float64) float64 {
xf := pos.x / stretch
yf := pos.y / stretch
x := int(math.Floor(xf))
y := int(math.Floor(yf))
fracX := xf - float64(x)
fracY := yf - float64(y)
p := make([]float64, 4)
for j := 0; j < 4; j++ {
p2 := make([]float64, 4)
for i := 0; i < 4; i++ {
p2[i] = Noise(
vec2{float64(x + i), float64(y + j)},
perms,
grads)
}
p[j] = CubicInterpolation(p2, fracX)
}
return CubicInterpolation(p, fracY)
}
// Generates a unique noise pattern with the given parameters
func GenerateNoiseMap(width int, height int, octave float64, stretch float64, multiplier float64) []float64 {
rand.Seed(time.Now().UnixNano())
perms := GeneratePermutations()
grads := GenerateGradients()
data := make([]float64, width*height)
index := 0
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
pos := vec2{float64(x) * octave, float64(y) * octave}
res := StretchedNoise(pos, perms, grads, stretch) * multiplier
res = Clamp(res, 0.0, 1.0)
res *= 0.5
data[index] = res
index++
}
}
return data
}
// Merges multiple noise patterns and applies redistribution and water level cutoff
func MergeNoiseData(multipliers []float64, redistribution float64, waterHeight float64, layers ...[]float64) []float64 {
result := make([]float64, len(layers[0]))
sumMultipliers := 0.0
for _, m := range multipliers {
sumMultipliers += m
}
for i := range result {
res := 0.0
for _, ns := range layers {
res += ns[i]
}
res /= sumMultipliers
res = math.Pow(res, redistribution)
if res < waterHeight {
res = waterHeight
}
result[i] = Clamp(res, 0.0, 1.0)
}
return result
}
// Converts a float64 array into an array of color values using the same value per each channel
func NoiseDataToColor(noiseData []float64) []color {
colors := make([]color, len(noiseData))
for i, n := range noiseData {
colors[i] = color{n, n, n}
}
return colors
}
// Generates a terrain heightmap using perlin noise and store the result in a PPM file
func RenderToImage() {
const imageWidth = 600
const imageHeight = 600
const redistribution = 0.72
const waterHeight = 0.1
multipliers := []float64{1.0, 0.5, 0.25}
noiseData1 := GenerateNoiseMap(imageWidth, imageHeight, 1, 100, multipliers[0])
noiseData2 := GenerateNoiseMap(imageWidth, imageHeight, 2, 100, multipliers[1])
noiseData3 := GenerateNoiseMap(imageWidth, imageHeight, 4, 100, multipliers[2])
mapData := MergeNoiseData(multipliers, redistribution, waterHeight, noiseData1, noiseData2, noiseData3)
colors := NoiseDataToColor(mapData)
WriteToPPMFile("output.ppm", imageWidth, imageHeight, colors)
}
func main() {
start := time.Now()
RenderToImage()
t := time.Now()
elapsed := t.Sub(start)
fmt.Println("Elapsed Time", elapsed.Seconds(), "seconds")
} | generator.go | 0.713432 | 0.474996 | generator.go | starcoder |
package binarytrie
import (
"net"
)
type NaiveTrie struct {
root *naiveTrieNode
mutable bool
}
type naiveTrieNode struct {
skipValue uint8
skippedBits uint32
branchingFactor uint8
parent *naiveTrieNode
children []*naiveTrieNode
value uint32
}
// NewNaiveTrie creates an empty NaiveTrie.
func NewNaiveTrie() *NaiveTrie {
return &NaiveTrie{
root: &naiveTrieNode{},
mutable: true,
}
}
// Insert implements Trie.
func (t *NaiveTrie) Insert(ipNet *net.IPNet, value uint32) error {
if !t.mutable {
return ErrTrieImmutable
}
prefix, prefixSize, err := parseIpNet(ipNet)
if err != nil {
return err
}
currentNode := t.root
bitPosition := 0
for {
if currentNode.branchingFactor == 0 {
currentNode.branchingFactor = 1
currentNode.children = make([]*naiveTrieNode, 2)
}
bit := extractBits(prefix, bitPosition, 1)
bitPosition++
if currentNode.children[bit] == nil {
currentNode.children[bit] = &naiveTrieNode{parent: currentNode}
}
currentNode = currentNode.children[bit]
if bitPosition >= prefixSize {
break
}
}
currentNode.value = value
return nil
}
// Lookup implements Trie.
func (t *NaiveTrie) Lookup(ip net.IP) (value uint32, err error) {
ip = ip.To16()
if ip == nil {
return 0, ErrInvalidIPAddress
}
currentNode := t.root
bitPosition := 0
for {
if currentNode.value != 0 {
value = currentNode.value
}
if currentNode.isLeaf() {
break
}
skippedBits := extractBits(ip, bitPosition, int(currentNode.skipValue))
bitPosition += int(currentNode.skipValue)
prefix := extractBits(ip, bitPosition, int(currentNode.branchingFactor))
bitPosition += int(currentNode.branchingFactor)
nextNode := currentNode.children[prefix]
if nextNode == nil || nextNode.skippedBits != skippedBits {
break
}
currentNode = nextNode
}
if value == 0 {
return 0, ErrValueNotFound
}
return
}
// ToArrayTrie creates an identical ArrayTrie.
func (t *NaiveTrie) ToArrayTrie() *ArrayTrie {
return NewArrayTrieFromNaiveTrie(t)
}
func (t *NaiveTrie) allocatedSize() int {
return t.root.allocatedSize()
}
func (n *naiveTrieNode) isLeaf() bool {
return n.branchingFactor == 0
}
func (n *naiveTrieNode) allocatedSize() int {
count := 1
for _, child := range n.children {
if child != nil {
count += child.allocatedSize()
} else {
count++
}
}
return count
} | pkg/binarytrie/naive.go | 0.617513 | 0.469034 | naive.go | starcoder |
package chart
import "fmt"
const (
// DefaultMACDPeriodPrimary is the long window.
DefaultMACDPeriodPrimary = 26
// DefaultMACDPeriodSecondary is the short window.
DefaultMACDPeriodSecondary = 12
// DefaultMACDSignalPeriod is the signal period to compute for the MACD.
DefaultMACDSignalPeriod = 9
)
// MACDSeries computes the difference between the MACD line and the MACD Signal line.
// It is used in technical analysis and gives a lagging indicator of momentum.
type MACDSeries struct {
Name string
Style Style
YAxis YAxisType
InnerSeries ValuesProvider
PrimaryPeriod int
SecondaryPeriod int
SignalPeriod int
signal *MACDSignalSeries
macdl *MACDLineSeries
}
// Validate validates the series.
func (macd MACDSeries) Validate() error {
var err error
if macd.signal != nil {
err = macd.signal.Validate()
}
if err != nil {
return err
}
if macd.macdl != nil {
err = macd.macdl.Validate()
}
if err != nil {
return err
}
return nil
}
// GetPeriods returns the primary and secondary periods.
func (macd MACDSeries) GetPeriods() (w1, w2, sig int) {
if macd.PrimaryPeriod == 0 {
w1 = DefaultMACDPeriodPrimary
} else {
w1 = macd.PrimaryPeriod
}
if macd.SecondaryPeriod == 0 {
w2 = DefaultMACDPeriodSecondary
} else {
w2 = macd.SecondaryPeriod
}
if macd.SignalPeriod == 0 {
sig = DefaultMACDSignalPeriod
} else {
sig = macd.SignalPeriod
}
return
}
// GetName returns the name of the time series.
func (macd MACDSeries) GetName() string {
return macd.Name
}
// GetStyle returns the line style.
func (macd MACDSeries) GetStyle() Style {
return macd.Style
}
// GetYAxis returns which YAxis the series draws on.
func (macd MACDSeries) GetYAxis() YAxisType {
return macd.YAxis
}
// Len returns the number of elements in the series.
func (macd MACDSeries) Len() int {
if macd.InnerSeries == nil {
return 0
}
return macd.InnerSeries.Len()
}
// GetValues gets a value at a given index. For MACD it is the signal value.
func (macd *MACDSeries) GetValues(index int) (x float64, y float64) {
if macd.InnerSeries == nil {
return
}
if macd.signal == nil || macd.macdl == nil {
macd.ensureChildSeries()
}
_, lv := macd.macdl.GetValues(index)
_, sv := macd.signal.GetValues(index)
x, _ = macd.InnerSeries.GetValues(index)
y = lv - sv
return
}
func (macd *MACDSeries) ensureChildSeries() {
w1, w2, sig := macd.GetPeriods()
macd.signal = &MACDSignalSeries{
InnerSeries: macd.InnerSeries,
PrimaryPeriod: w1,
SecondaryPeriod: w2,
SignalPeriod: sig,
}
macd.macdl = &MACDLineSeries{
InnerSeries: macd.InnerSeries,
PrimaryPeriod: w1,
SecondaryPeriod: w2,
}
}
// MACDSignalSeries computes the EMA of the MACDLineSeries.
type MACDSignalSeries struct {
Name string
Style Style
YAxis YAxisType
InnerSeries ValuesProvider
PrimaryPeriod int
SecondaryPeriod int
SignalPeriod int
signal *EMASeries
}
// Validate validates the series.
func (macds MACDSignalSeries) Validate() error {
if macds.signal != nil {
return macds.signal.Validate()
}
return nil
}
// GetPeriods returns the primary and secondary periods.
func (macds MACDSignalSeries) GetPeriods() (w1, w2, sig int) {
if macds.PrimaryPeriod == 0 {
w1 = DefaultMACDPeriodPrimary
} else {
w1 = macds.PrimaryPeriod
}
if macds.SecondaryPeriod == 0 {
w2 = DefaultMACDPeriodSecondary
} else {
w2 = macds.SecondaryPeriod
}
if macds.SignalPeriod == 0 {
sig = DefaultMACDSignalPeriod
} else {
sig = macds.SignalPeriod
}
return
}
// GetName returns the name of the time series.
func (macds MACDSignalSeries) GetName() string {
return macds.Name
}
// GetStyle returns the line style.
func (macds MACDSignalSeries) GetStyle() Style {
return macds.Style
}
// GetYAxis returns which YAxis the series draws on.
func (macds MACDSignalSeries) GetYAxis() YAxisType {
return macds.YAxis
}
// Len returns the number of elements in the series.
func (macds *MACDSignalSeries) Len() int {
if macds.InnerSeries == nil {
return 0
}
return macds.InnerSeries.Len()
}
// GetValues gets a value at a given index. For MACD it is the signal value.
func (macds *MACDSignalSeries) GetValues(index int) (x float64, y float64) {
if macds.InnerSeries == nil {
return
}
if macds.signal == nil {
macds.ensureSignal()
}
x, _ = macds.InnerSeries.GetValues(index)
_, y = macds.signal.GetValues(index)
return
}
func (macds *MACDSignalSeries) ensureSignal() {
w1, w2, sig := macds.GetPeriods()
macds.signal = &EMASeries{
InnerSeries: &MACDLineSeries{
InnerSeries: macds.InnerSeries,
PrimaryPeriod: w1,
SecondaryPeriod: w2,
},
Period: sig,
}
}
// Render renders the series.
func (macds *MACDSignalSeries) Render(r Renderer, canvasBox Box, xrange, yrange Range, defaults Style) {
style := macds.Style.InheritFrom(defaults)
Draw.LineSeries(r, canvasBox, xrange, yrange, style, macds)
}
// MACDLineSeries is a series that computes the inner ema1-ema2 value as a series.
type MACDLineSeries struct {
Name string
Style Style
YAxis YAxisType
InnerSeries ValuesProvider
PrimaryPeriod int
SecondaryPeriod int
ema1 *EMASeries
ema2 *EMASeries
Sigma float64
}
// Validate validates the series.
func (macdl MACDLineSeries) Validate() error {
var err error
if macdl.ema1 != nil {
err = macdl.ema1.Validate()
}
if err != nil {
return err
}
if macdl.ema2 != nil {
err = macdl.ema2.Validate()
}
if err != nil {
return err
}
if macdl.InnerSeries == nil {
return fmt.Errorf("MACDLineSeries: must provide an inner series")
}
return nil
}
// GetName returns the name of the time series.
func (macdl MACDLineSeries) GetName() string {
return macdl.Name
}
// GetStyle returns the line style.
func (macdl MACDLineSeries) GetStyle() Style {
return macdl.Style
}
// GetYAxis returns which YAxis the series draws on.
func (macdl MACDLineSeries) GetYAxis() YAxisType {
return macdl.YAxis
}
// GetPeriods returns the primary and secondary periods.
func (macdl MACDLineSeries) GetPeriods() (w1, w2 int) {
if macdl.PrimaryPeriod == 0 {
w1 = DefaultMACDPeriodPrimary
} else {
w1 = macdl.PrimaryPeriod
}
if macdl.SecondaryPeriod == 0 {
w2 = DefaultMACDPeriodSecondary
} else {
w2 = macdl.SecondaryPeriod
}
return
}
// Len returns the number of elements in the series.
func (macdl *MACDLineSeries) Len() int {
if macdl.InnerSeries == nil {
return 0
}
return macdl.InnerSeries.Len()
}
// GetValues gets a value at a given index. For MACD it is the signal value.
func (macdl *MACDLineSeries) GetValues(index int) (x float64, y float64) {
if macdl.InnerSeries == nil {
return
}
if macdl.ema1 == nil && macdl.ema2 == nil {
macdl.ensureEMASeries()
}
x, _ = macdl.InnerSeries.GetValues(index)
_, emav1 := macdl.ema1.GetValues(index)
_, emav2 := macdl.ema2.GetValues(index)
y = emav2 - emav1
return
}
func (macdl *MACDLineSeries) ensureEMASeries() {
w1, w2 := macdl.GetPeriods()
macdl.ema1 = &EMASeries{
InnerSeries: macdl.InnerSeries,
Period: w1,
}
macdl.ema2 = &EMASeries{
InnerSeries: macdl.InnerSeries,
Period: w2,
}
}
// Render renders the series.
func (macdl *MACDLineSeries) Render(r Renderer, canvasBox Box, xrange, yrange Range, defaults Style) {
style := macdl.Style.InheritFrom(defaults)
Draw.LineSeries(r, canvasBox, xrange, yrange, style, macdl)
} | vendor/github.com/wcharczuk/go-chart/v2/macd_series.go | 0.803868 | 0.594904 | macd_series.go | starcoder |
package anns
import (
"math/big"
"sync"
"github.com/ncw/gmp"
"github.com/sachaservan/argsort"
"github.com/sachaservan/vec"
)
// DistanceMetric specifies the distance LSH should be sensitive to
type DistanceMetric int
const (
// HammingDistance specifies a hamming weight distance metric
HammingDistance DistanceMetric = iota
// EuclideanDistance specifies a euclidean (l2) distance metric
EuclideanDistance
)
// Table stores a set of hash buckets
type Table struct {
Buckets map[string]map[int]bool // hash table for all buckets per LSH table
}
// LSHBasedKNN is a data structure that uses GaussianHash to
// hash a set of points into buckets for nearest neighbor search
type LSHBasedKNN struct {
Params *LSHParams // parameters used in constructing the data structure
Data []*vec.Vec // copy of the original data vectors
Tables map[int]*Table // array of hash tables storing the data
Hashes map[int]*LSH // hash function for each of the numTables tables
}
// RandomizeBucketKeys replaces each key in the hash table
// to a new randomized key (using a universal hash)
// returns the new keys and universal hash used in the randomization
func (knn *LSHBasedKNN) RandomizeBucketKeys(uhash *UniversalHash) [][]*gmp.Int {
newKeys := make([][]*gmp.Int, len(knn.Tables))
var wg sync.WaitGroup
for i, t := range knn.Tables {
wg.Add(1)
go func(i int, t *Table) {
defer wg.Done()
newKeys[i] = make([]*gmp.Int, len(t.Buckets))
j := 0
for k := range t.Buckets {
// set new randomize key
bigKey := gmp.NewInt(0).SetBytes([]byte(k))
newKey := uhash.Digest(bigKey)
newKeys[i][j] = newKey
j++
}
sort := argsort.NewIntArgsort(newKeys[i])
newKeys[i] = argsort.SortIntsByArray(newKeys[i], sort)
}(i, t)
}
wg.Wait()
return newKeys
}
// BuildWithData builds the data structure for the data
// using the parameters of the data structure
// hashBytes specifies the output hash length
// maxBucketSize is the max size of a bucket in a hash table (set -1 for no limit)
func (knn *LSHBasedKNN) BuildWithData(data []*vec.Vec) {
knn.Data = data
var wg sync.WaitGroup
for i := 0; i < knn.Params.NumTables; i++ {
knn.Tables[i] = &Table{
Buckets: make(map[string]map[int]bool),
}
wg.Add(1)
go func(i int) {
defer wg.Done()
for j, point := range data {
digest := knn.Hashes[i].StringDigest(point)
if knn.Tables[i].Buckets[digest] == nil {
knn.Tables[i].Buckets[digest] = make(map[int]bool)
}
// add the value to the bucket
if knn.Params.BucketSize == -1 || len(knn.Tables[i].Buckets[digest]) < knn.Params.BucketSize {
knn.Tables[i].Buckets[digest][j] = true
}
}
}(i)
}
wg.Wait()
}
// Query returns the set of points (and point ids)
// that appeared in the same buckets that query hashed to
func (knn *LSHBasedKNN) Query(query *vec.Vec) ([]*vec.Vec, []int) {
candidates := make([]*vec.Vec, 0)
ids := make([]int, 0)
var mu sync.Mutex
var wg sync.WaitGroup
for i := 0; i < knn.Params.NumTables; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
digest := knn.Hashes[i].StringDigest(query)
// fmt.Printf("Digest is %v\n", digest)
if bucket, ok := knn.Tables[i].Buckets[digest]; ok {
for key := range bucket {
mu.Lock()
candidates = append(candidates, knn.Data[key])
ids = append(ids, key)
mu.Unlock()
}
}
}(i)
}
wg.Wait()
return candidates, ids
}
// GetTableKeys returns the keys of each bucket for each hash table
func (knn *LSHBasedKNN) GetTableKeys() [][]string {
var wg sync.WaitGroup
keys := make([][]string, len(knn.Tables))
for i, t := range knn.Tables {
keys[i] = make([]string, len(t.Buckets))
wg.Add(1)
go func(i int, t *Table) {
defer wg.Done()
j := 0
for k := range t.Buckets {
keys[i][j] = k
j++
}
}(i, t)
}
wg.Wait()
return keys
}
// GetTableBuckets returns all buckets in each hash table
func (knn *LSHBasedKNN) GetTableBuckets(tableIndex int) [][]string {
t := knn.Tables[tableIndex]
// all buckets in table t
buckets := make([][]string, 0)
for _, b := range t.Buckets {
// all values in the bucket
bucket := make([]string, 0)
for k := range b {
bucket = append(bucket, string(big.NewInt(int64(k)).Bytes()))
}
// add the bucket to the list of buckets
buckets = append(buckets, bucket)
}
return buckets
}
// GetTableMaxBucketSize returns the size of the largest bucket for each hash table
func (knn *LSHBasedKNN) GetTableMaxBucketSize() []int {
maxBucketSizesPerTable := make([]int, len(knn.Tables))
for i, t := range knn.Tables {
maxBucketSize := 0
for _, v := range t.Buckets {
numKeysInBucket := len(v)
if maxBucketSize < numKeysInBucket {
maxBucketSize = numKeysInBucket
}
}
maxBucketSizesPerTable[i] = maxBucketSize
}
return maxBucketSizesPerTable
}
// GetHashForTable returns the locality sensitive hash for the table
func (knn *LSHBasedKNN) GetHashForTable(t int) *LSH {
return knn.Hashes[t]
}
// NumTables returns the number of tables in the KNN data structure
func (knn *LSHBasedKNN) NumTables() int {
return knn.Params.NumTables
} | anns/lsh_nn.go | 0.716219 | 0.45048 | lsh_nn.go | starcoder |
package spogoto
import (
"math"
"strconv"
)
// NewFloatStack generates a float DataStack.
func NewFloatStack(floats []float64) *datastack {
elements := Elements{}
for _, v := range floats {
elements = append(elements, float64(v))
}
d := NewDataStack(elements, FunctionMap{}, func(str string) (Element, bool) {
val, err := strconv.ParseFloat(str, 64)
return Element(val), err == nil
})
addFloatFunctions(d)
return d
}
func FloatStackConstructor() (string, DataStack) {
return "float", NewFloatStack([]float64{})
}
func addFloatFunctions(ds *datastack) {
ds.FunctionMap["+"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
d.Push(d.Pop().(float64) + d.Pop().(float64))
}
ds.FunctionMap["*"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
d.Push(d.Pop().(float64) * d.Pop().(float64))
}
ds.FunctionMap["-"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
d.Push(-d.Pop().(float64) + d.Pop().(float64))
}
ds.FunctionMap["/"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) || d.Peek().(float64) == 0 {
return
}
f1 := d.Pop().(float64)
f2 := d.Pop().(float64)
d.Push(f2 / f1)
}
ds.FunctionMap["%"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) || d.Peek().(float64) == 0 {
return
}
f1 := d.Pop().(float64)
f2 := d.Pop().(float64)
mod := math.Mod(f2, f1)
d.Push(mod)
}
ds.FunctionMap["min"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
f1 := d.Pop().(float64)
f2 := d.Pop().(float64)
if f1 < f2 {
d.Push(f1)
} else {
d.Push(f2)
}
}
ds.FunctionMap["max"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
f1 := d.Pop().(float64)
f2 := d.Pop().(float64)
if f1 > f2 {
d.Push(f1)
} else {
d.Push(f2)
}
}
ds.FunctionMap[">"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
f1 := d.Pop().(float64)
f2 := d.Pop().(float64)
r.Stack("boolean").Push(f2 > f1)
}
ds.FunctionMap["<"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
f1 := d.Pop().(float64)
f2 := d.Pop().(float64)
r.Stack("boolean").Push(f2 < f1)
}
ds.FunctionMap["="] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(2) {
return
}
r.Stack("boolean").Push(d.Pop().(float64) == d.Pop().(float64))
}
ds.FunctionMap["fromboolean"] = func(d DataStack, r RunSet, i Interpreter) {
if r.Bad("boolean", 1) {
return
}
b := r.Stack("boolean").Pop().(bool)
if b {
d.Push(float64(1))
} else {
d.Push(float64(0))
}
}
ds.FunctionMap["frominteger"] = func(d DataStack, r RunSet, i Interpreter) {
if r.Bad("integer", 1) {
return
}
d.Push(float64(r.Stack("integer").Pop().(int64)))
}
ds.FunctionMap["sin"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(1) {
return
}
d.Push(math.Sin(d.Pop().(float64)))
}
ds.FunctionMap["cos"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(1) {
return
}
d.Push(math.Cos(d.Pop().(float64)))
}
ds.FunctionMap["tan"] = func(d DataStack, r RunSet, i Interpreter) {
if d.Lack(1) {
return
}
d.Push(math.Tan(d.Pop().(float64)))
}
ds.FunctionMap["rand"] = func(d DataStack, r RunSet, i Interpreter) {
d.Push(i.RandFloat())
}
} | float_stack.go | 0.601477 | 0.489381 | float_stack.go | starcoder |
package prism
// DecoratedString is a string with methods for coloring
type DecoratedString string
// InBlack returns this DecoratedString with black text
func (ds DecoratedString) InBlack() DecoratedString {
return InBlack(string(ds))
}
// InRed returns this DecoratedString with red text
func (ds DecoratedString) InRed() DecoratedString {
return InRed(string(ds))
}
// InGreen returns this DecoratedString with green text
func (ds DecoratedString) InGreen() DecoratedString {
return InGreen(string(ds))
}
// InYellow returns this DecoratedString with yellow text
func (ds DecoratedString) InYellow() DecoratedString {
return InYellow(string(ds))
}
// InBlue returns this DecoratedString with blue text
func (ds DecoratedString) InBlue() DecoratedString {
return InBlue(string(ds))
}
// InMagenta returns this DecoratedString with magenta text
func (ds DecoratedString) InMagenta() DecoratedString {
return InMagenta(string(ds))
}
// InCyan returns this DecoratedString with cyan text
func (ds DecoratedString) InCyan() DecoratedString {
return InCyan(string(ds))
}
// InWhite returns this DecoratedString with white text
func (ds DecoratedString) InWhite() DecoratedString {
return InWhite(string(ds))
}
// OnBlack returns this decorated string with a black background
func (ds DecoratedString) OnBlack() DecoratedString {
return OnBlack(string(ds))
}
// OnRed returns this DecoratedString with a red background
func (ds DecoratedString) OnRed() DecoratedString {
return OnRed(string(ds))
}
// OnGreen returns this DecoratedString with a green background
func (ds DecoratedString) OnGreen() DecoratedString {
return OnGreen(string(ds))
}
// OnYellow returns this DecoratedString with a yellow background
func (ds DecoratedString) OnYellow() DecoratedString {
return OnYellow(string(ds))
}
// OnBlue returns this DecoratedString with a blue background
func (ds DecoratedString) OnBlue() DecoratedString {
return OnBlue(string(ds))
}
// OnMagenta returns this DecoratedString with a magenta background
func (ds DecoratedString) OnMagenta() DecoratedString {
return OnMagenta(string(ds))
}
// OnCyan returns this DecoratedString with a cyan background
func (ds DecoratedString) OnCyan() DecoratedString {
return OnCyan(string(ds))
}
// OnWhite returns this DecoratedString with a white background
func (ds DecoratedString) OnWhite() DecoratedString {
return OnWhite(string(ds))
}
// Underlined returns this DecoratedString underlined
func (ds DecoratedString) Underlined() DecoratedString {
return Underlined(string(ds))
}
// InBold returns this DecoratedString in bold
func (ds DecoratedString) InBold() DecoratedString {
return InBold(string(ds))
}
func (ds DecoratedString) Inverted() DecoratedString {
return Inverted(string(ds))
} | ds.go | 0.911458 | 0.442516 | ds.go | starcoder |
package metadata
// https://media.kingston.com/support/downloads/MKP_521.6_SMART-DCP1000_attribute.pdf
// https://www.percona.com/blog/2017/02/09/using-nvme-command-line-tools-to-check-nvme-flash-health/
// https://nvmexpress.org/resources/nvm-express-technology-features/nvme-features-for-error-reporting-smart-log-pages-failures-and-management-capabilities-in-nvme-architectures/
// https://www.micromat.com/product_manuals/drive_scope_manual_01.pdf
type NvmeAttributeMetadata struct {
ID string `json:"-"`
DisplayName string `json:"-"`
Ideal string `json:"ideal"`
Critical bool `json:"critical"`
Description string `json:"description"`
Transform func(int, int64, string) int64 `json:"-"` //this should be a method to extract/tranform the normalized or raw data to a chartable format. Str
TransformValueUnit string `json:"transform_value_unit,omitempty"`
DisplayType string `json:"display_type"` //"raw" "normalized" or "transformed"
}
var NmveMetadata = map[string]NvmeAttributeMetadata{
"critical_warning": {
ID: "critical_warning",
DisplayName: "Critical Warning",
DisplayType: "",
Ideal: "low",
Critical: true,
Description: "This field indicates critical warnings for the state of the controller. Each bit corresponds to a critical warning type; multiple bits may be set. If a bit is cleared to ‘0’, then that critical warning does not apply. Critical warnings may result in an asynchronous event notification to the host. Bits in this field represent the current associated state and are not persistent.",
},
"temperature": {
ID: "temperature",
DisplayName: "Temperature",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "",
},
"available_spare": {
ID: "available_spare",
DisplayName: "Available Spare",
DisplayType: "",
Ideal: "high",
Critical: true,
Description: "Contains a normalized percentage (0 to 100%) of the remaining spare capacity available.",
},
"percentage_used": {
ID: "percentage_used",
DisplayName: "Percentage Used",
DisplayType: "",
Ideal: "low",
Critical: true,
Description: "Contains a vendor specific estimate of the percentage of NVM subsystem life used based on the actual usage and the manufacturer’s prediction of NVM life. A value of 100 indicates that the estimated endurance of the NVM in the NVM subsystem has been consumed, but may not indicate an NVM subsystem failure. The value is allowed to exceed 100. Percentages greater than 254 shall be represented as 255. This value shall be updated once per power-on hour (when the controller is not in a sleep state).",
},
"data_units_read": {
ID: "data_units_read",
DisplayName: "Data Units Read",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the number of 512 byte data units the host has read from the controller; this value does not include metadata. This value is reported in thousands (i.e., a value of 1 corresponds to 1000 units of 512 bytes read) and is rounded up. When the LBA size is a value other than 512 bytes, the controller shall convert the amount of data read to 512 byte units.",
},
"data_units_written": {
ID: "data_units_written",
DisplayName: "Data Units Written",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the number of 512 byte data units the host has written to the controller; this value does not include metadata. This value is reported in thousands (i.e., a value of 1 corresponds to 1000 units of 512 bytes written) and is rounded up. When the LBA size is a value other than 512 bytes, the controller shall convert the amount of data written to 512 byte units.",
},
"host_reads": {
ID: "host_reads",
DisplayName: "Host Reads",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the number of read commands completed by the controller",
},
"host_writes": {
ID: "host_writes",
DisplayName: "Host Writes",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the number of write commands completed by the controller",
},
"controller_busy_time": {
ID: "controller_busy_time",
DisplayName: "Controller Busy Time",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the amount of time the controller is busy with I/O commands. The controller is busy when there is a command outstanding to an I/O Queue (specifically, a command was issued via an I/O Submission Queue Tail doorbell write and the corresponding completion queue entry has not been posted yet to the associated I/O Completion Queue). This value is reported in minutes.",
},
"power_cycles": {
ID: "power_cycles",
DisplayName: "Power Cycles",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the number of power cycles.",
},
"power_on_hours": {
ID: "power_on_hours",
DisplayName: "Power on Hours",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the number of power-on hours. Power on hours is always logging, even when in low power mode.",
},
"unsafe_shutdowns": {
ID: "unsafe_shutdowns",
DisplayName: "Unsafe Shutdowns",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the number of unsafe shutdowns. This count is incremented when a shutdown notification (CC.SHN) is not received prior to loss of power.",
},
"media_errors": {
ID: "media_errors",
DisplayName: "Media Errors",
DisplayType: "",
Ideal: "low",
Critical: true,
Description: "Contains the number of occurrences where the controller detected an unrecovered data integrity error. Errors such as uncorrectable ECC, CRC checksum failure, or LBA tag mismatch are included in this field.",
},
"num_err_log_entries": {
ID: "num_err_log_entries",
DisplayName: "Numb Err Log Entries",
DisplayType: "",
Ideal: "low",
Critical: true,
Description: "Contains the number of Error Information log entries over the life of the controller.",
},
"warning_temp_time": {
ID: "warning_temp_time",
DisplayName: "Warning Temp Time",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the amount of time in minutes that the controller is operational and the Composite Temperature is greater than or equal to the Warning Composite Temperature Threshold (WCTEMP) field and less than the Critical Composite Temperature Threshold (CCTEMP) field in the Identify Controller data structure.",
},
"critical_comp_time": {
ID: "critical_comp_time",
DisplayName: "Critical CompTime",
DisplayType: "",
Ideal: "",
Critical: false,
Description: "Contains the amount of time in minutes that the controller is operational and the Composite Temperature is greater the Critical Composite Temperature Threshold (CCTEMP) field in the Identify Controller data structure.",
},
} | webapp/backend/pkg/metadata/nvme_attribute_metadata.go | 0.753104 | 0.412767 | nvme_attribute_metadata.go | starcoder |
package spriter
import "fmt"
type Pixel int8
const (
PixelBorder = Pixel(-1)
PixelEmpty = iota
PixelEmptyOrBody = iota
PixelBorderOrBody = iota
PixelBody = iota
p_ = PixelBorder
p0 = PixelEmpty
p1 = PixelEmptyOrBody
p2 = PixelBorderOrBody
)
type Mask struct {
Bitmap []Pixel
MaskWidth int
MaskHeight int
MirrorX bool
MirrorY bool
}
func NewMask(charmap []string, mirrorX, mirrorY bool) *Mask {
m := &Mask{MaskHeight: len(charmap), MirrorX: mirrorX, MirrorY: mirrorY}
if len(charmap) == 0 || len(charmap[0]) == 0 {
return m
}
m.Bitmap = make([]Pixel, len(charmap)*len(charmap[0]))
for y := range charmap {
if m.MaskWidth == 0 {
m.MaskWidth = len(charmap[y])
} else if m.MaskWidth != len(charmap[y]) {
panic(fmt.Sprintf("misaligned mask character map, row[%d] has %d columns, expected %d",
y, len(charmap[y]), m.MaskWidth))
}
for x := range charmap[y] {
i := y*m.MaskWidth + x
switch charmap[y][x] {
case '-', '|', '+':
m.Bitmap[i] = PixelBorder
case ' ':
m.Bitmap[i] = PixelEmpty
case '.':
m.Bitmap[i] = PixelEmptyOrBody
case '/':
m.Bitmap[i] = PixelBorderOrBody
case 'O':
m.Bitmap[i] = PixelBody
}
}
}
return m
}
func (m *Mask) ImageWidth() int {
if m.MirrorX {
return m.MaskWidth * 2
}
return m.MaskWidth
}
func (m *Mask) ImageHeight() int {
if m.MirrorY {
return m.MaskHeight * 2
}
return m.MaskHeight
}
func (m *Mask) get(x, y int) Pixel {
if x >= m.MaskWidth && m.MirrorX {
x = m.MaskWidth - (x - m.MaskWidth) - 1
}
if y >= m.MaskHeight && m.MirrorY {
y = m.MaskHeight - (y - m.MaskHeight) - 1
}
return m.Bitmap[y*m.MaskWidth+x]
}
func (m *Mask) BitLen() int {
n := 0
for i := range m.Bitmap {
switch m.Bitmap[i] {
case PixelEmptyOrBody, PixelBorderOrBody:
n++
}
}
return n
}
func (m *Mask) chooseBody(f Flipper) {
for i := range m.Bitmap {
switch m.Bitmap[i] {
case PixelEmptyOrBody:
if f.Next() {
m.Bitmap[i] = PixelEmpty
} else {
m.Bitmap[i] = PixelBody
}
case PixelBorderOrBody:
if f.Next() {
m.Bitmap[i] = PixelBorder
} else {
m.Bitmap[i] = PixelBody
}
}
}
}
func (m *Mask) chooseEdges() {
for y := 0; y < m.MaskHeight; y++ {
for x := 0; x < m.MaskWidth; x++ {
if m.Bitmap[y*m.MaskWidth+x] > PixelEmpty {
above := (y-1)*m.MaskWidth + x
if y-1 >= 0 && m.Bitmap[above] == PixelEmpty {
m.Bitmap[above] = PixelBorder
}
if !m.MirrorY {
below := (y+1)*m.MaskWidth + x
if y+1 < m.MaskHeight && m.Bitmap[below] == PixelEmpty {
m.Bitmap[below] = PixelBorder
}
}
left := y*m.MaskWidth + x - 1
if x-1 >= 0 && m.Bitmap[left] == PixelEmpty {
m.Bitmap[left] = PixelBorder
}
if !m.MirrorX {
right := y*m.MaskWidth + x + 1
if x+1 < m.MaskWidth && m.Bitmap[right] == PixelEmpty {
m.Bitmap[right] = PixelBorder
}
}
}
}
}
} | mask.go | 0.506347 | 0.49585 | mask.go | starcoder |
package config
const deploymentConfigSchema = `{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Deployment configuration",
"description": "URLs and instructions for how to install and run the application.",
"type": "object",
"definitions": {
"TargetPlatformsArray": {
"type": "array",
"items": {
"type": "string",
"pattern": "^(((windows|darwin|linux|\\{\\{\\.OS\\}\\})(-(386|amd64|\\{\\{\\.Arch\\}\\}))?)|(386|amd64|\\{\\{\\.Arch\\}\\}))$"
},
"uniqueItems": true
},
"URL": {
"type": "string",
"pattern": "^(https?|file)://.*$"
}
},
"properties": {
"Timestamp": {
"type": "string",
"pattern": "^([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})|(<TIMESTAMP>)$"
},
"LauncherUpdate": {
"type": "array",
"items": {
"type": "object",
"properties": {
"BundleInfoURL": {
"$ref": "#/definitions/URL"
},
"BundleURL": {
"$ref": "#/definitions/URL"
},
"TargetPlatforms": {
"$ref": "#/definitions/TargetPlatformsArray"
}
},
"required": [ "BundleInfoURL" ]
}
},
"Bundles": {
"type": "array",
"items": {
"type": "object",
"properties": {
"BundleInfoURL": {
"$ref": "#/definitions/URL"
},
"BundleURL": {
"$ref": "#/definitions/URL"
},
"TargetPlatforms": {
"$ref": "#/definitions/TargetPlatformsArray"
},
"LocalDirectory": {
"type": "string",
"minLength": 1
},
"Tags": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": [ "BundleInfoURL", "LocalDirectory" ]
},
"minItems": 1,
"uniqueItems": true
},
"Execution": {
"type": "object",
"properties": {
"Commands": {
"type": "array",
"items": {
"type": "object",
"properties": {
"Name": {
"type": "string",
"minLength": 1
},
"Arguments": {
"type": "array",
"items": {
"type": "string"
}
},
"Env": {
"type": "object",
"additionalProperties": {
"oneOf": [ { "type": "string" }, { "type": "null" } ]
}
},
"TargetPlatforms": {
"$ref": "#/definitions/TargetPlatformsArray"
}
},
"required": [ "Name" ]
}
},
"LingerTimeMilliseconds": {
"type": "integer",
"minimum": 0
}
}
}
},
"required": [ "Timestamp", "Bundles", "Execution" ]
}` | pkg/launcher/config/schemas.go | 0.504639 | 0.42662 | schemas.go | starcoder |
package modular32
import (
mgl "github.com/go-gl/mathgl/mgl32"
)
// NewVec2Modulus creates a new 2d Vector Modulus
func NewVec2Modulus(vec mgl.Vec2) Vec2Modulus {
return Vec2Modulus{
x: NewModulus(vec[0]),
y: NewModulus(vec[1]),
}
}
// Vec2Modulus defines a modulus for 2d vectors
type Vec2Modulus struct {
x Modulus
y Modulus
}
// Congruent performs Congruent() on all axis
func (m Vec2Modulus) Congruent(vec mgl.Vec2) mgl.Vec2 {
return mgl.Vec2{
m.x.Congruent(vec[0]),
m.y.Congruent(vec[1]),
}
}
// Dist returns the distance and direction of v1 to v2
// It picks the shortest distance.
func (m Vec2Modulus) Dist(v1, v2 mgl.Vec2) mgl.Vec2 {
return mgl.Vec2{
m.x.Dist(v1[0], v2[0]),
m.y.Dist(v1[1], v2[1]),
}
}
// GetCongruent returns the vector closest to v1 that is congruent to v2
func (m Vec2Modulus) GetCongruent(v1, v2 mgl.Vec2) mgl.Vec2 {
return mgl.Vec2{
m.x.GetCongruent(v1[0], v2[0]),
m.y.GetCongruent(v1[1], v2[1]),
}
}
// NewVec3Modulus creates a new 3d Vector Modulus
func NewVec3Modulus(vec mgl.Vec3) Vec3Modulus {
return Vec3Modulus{
x: NewModulus(vec[0]),
y: NewModulus(vec[1]),
z: NewModulus(vec[2]),
}
}
// Vec3Modulus defines a modulus for 3d vectors
type Vec3Modulus struct {
x Modulus
y Modulus
z Modulus
}
// Congruent performs Congruent() on all axis
func (m Vec3Modulus) Congruent(vec mgl.Vec3) mgl.Vec3 {
return mgl.Vec3{
m.x.Congruent(vec[0]),
m.y.Congruent(vec[1]),
m.z.Congruent(vec[2]),
}
}
// Dist returns the distance and direction of v1 to v2
// It picks the shortest distance.
func (m Vec3Modulus) Dist(v1, v2 mgl.Vec3) mgl.Vec3 {
return mgl.Vec3{
m.x.Dist(v1[0], v2[0]),
m.y.Dist(v1[1], v2[1]),
m.z.Dist(v1[2], v2[2]),
}
}
// GetCongruent returns the vector closest to v1 that is congruent to v2
func (m Vec3Modulus) GetCongruent(v1, v2 mgl.Vec3) mgl.Vec3 {
return mgl.Vec3{
m.x.GetCongruent(v1[0], v2[0]),
m.y.GetCongruent(v1[1], v2[1]),
m.z.GetCongruent(v1[2], v2[2]),
}
}
// NewVec4Modulus creates a new 4d Vector Modulus
func NewVec4Modulus(vec mgl.Vec4) Vec4Modulus {
return Vec4Modulus{
x: NewModulus(vec[0]),
y: NewModulus(vec[1]),
z: NewModulus(vec[2]),
w: NewModulus(vec[3]),
}
}
// Vec4Modulus defines a modulus for 4d vectors
type Vec4Modulus struct {
x Modulus
y Modulus
z Modulus
w Modulus
}
// Congruent performs Congruent() on all axis
func (m Vec4Modulus) Congruent(vec mgl.Vec4) mgl.Vec4 {
return mgl.Vec4{
m.x.Congruent(vec[0]),
m.y.Congruent(vec[1]),
m.z.Congruent(vec[2]),
m.w.Congruent(vec[3]),
}
}
// Dist returns the distance and direction of v1 to v2
// It picks the shortest distance.
func (m Vec4Modulus) Dist(v1, v2 mgl.Vec4) mgl.Vec4 {
return mgl.Vec4{
m.x.Dist(v1[0], v2[0]),
m.y.Dist(v1[1], v2[1]),
m.z.Dist(v1[2], v2[2]),
m.z.Dist(v1[3], v2[3]),
}
}
// GetCongruent returns the vector closest to v1 that is congruent to v2
func (m Vec4Modulus) GetCongruent(v1, v2 mgl.Vec4) mgl.Vec4 {
return mgl.Vec4{
m.x.GetCongruent(v1[0], v2[0]),
m.y.GetCongruent(v1[1], v2[1]),
m.z.GetCongruent(v1[2], v2[2]),
m.w.GetCongruent(v1[3], v2[3]),
}
} | modular32/vec.go | 0.854415 | 0.59131 | vec.go | starcoder |
package utl
import (
"math"
"math/rand"
"github.com/cpmech/gosl/chk"
)
// ParetoMin compares two vectors using Pareto's optimal criterion
// Note: minimum dominates (is better)
func ParetoMin(u, v []float64) (uDominates, vDominates bool) {
chk.IntAssert(len(u), len(v))
uHasAllLeq := true // all u values are less-than or equal-to v values
uHasOneLe := false // u has at least one value less-than v
vHasAllLeq := true // all v values are less-than or equalt-to u values
vHasOneLe := false // v has at least one value less-than u
for i := 0; i < len(u); i++ {
if u[i] > v[i] {
uHasAllLeq = false
vHasOneLe = true
}
if u[i] < v[i] {
uHasOneLe = true
vHasAllLeq = false
}
}
if uHasAllLeq && uHasOneLe {
uDominates = true
}
if vHasAllLeq && vHasOneLe {
vDominates = true
}
return
}
// ParetoMinProb compares two vectors using Pareto's optimal criterion
// φ ∃ [0,1] is a scaling factor that helps v win even if it's not smaller.
// If φ==0, deterministic analysis is carried out. If φ==1, probabilistic analysis is carried out.
// As φ → 1, v "gets more help".
// Note: (1) minimum dominates (is better)
// (2) v dominates if !uDominates
func ParetoMinProb(u, v []float64, φ float64) (uDominates bool) {
chk.IntAssert(len(u), len(v))
var pu float64
for i := 0; i < len(u); i++ {
pu += ProbContestSmall(u[i], v[i], φ)
}
pu /= float64(len(u))
if FlipCoin(pu) {
uDominates = true
}
return
}
// ProbContestSmall computes the probability for a contest between u and v where u wins if it's
// the smaller value. φ ∃ [0,1] is a scaling factor that helps v win even if it's not smaller.
// If φ==0, deterministic analysis is carried out. If φ==1, probabilistic analysis is carried out.
// As φ → 1, v "gets more help".
func ProbContestSmall(u, v, φ float64) float64 {
u = math.Atan(u)/math.Pi + 1.5
v = math.Atan(v)/math.Pi + 1.5
if u < v {
return v / (v + φ*u)
}
if u > v {
return φ * v / (φ*v + u)
}
return 0.5
}
// FlipCoin generates a Bernoulli variable; throw a coin with probability p
func FlipCoin(p float64) bool {
if p == 1.0 {
return true
}
if p == 0.0 {
return false
}
if rand.Float64() <= p {
return true
}
return false
}
// ParetoFront computes the Pareto optimal front
// Input:
// Ovs -- [nsamples][ndim] objective values
// Output:
// front -- indices of pareto front
// Note: this function is slow for large sets
func ParetoFront(Ovs [][]float64) (front []int) {
dominated := map[int]bool{}
nsamples := len(Ovs)
for i := 0; i < nsamples; i++ {
dominated[i] = false
}
for i := 0; i < nsamples; i++ {
for j := i + 1; j < nsamples; j++ {
uDominates, vDominates := ParetoMin(Ovs[i], Ovs[j])
if uDominates {
dominated[j] = true
}
if vDominates {
dominated[i] = true
}
}
}
nondom := 0
for i := 0; i < nsamples; i++ {
if !dominated[i] {
nondom++
}
}
front = make([]int, nondom)
k := 0
for i := 0; i < nsamples; i++ {
if !dominated[i] {
front[k] = i
k++
}
}
return
} | utl/pareto.go | 0.595493 | 0.434941 | pareto.go | starcoder |
package crypto
import (
"crypto/cipher"
"crypto/des"
"github.com/pkg/errors"
)
// Pad80 takes a []byte and a block size which must be a multiple of 8 and appends '80' and zero bytes until
// the length of the resulting []byte reaches a multiple of the block size and returns the padded []byte.
// If force is false, the padding will only be applied, if the []byte is not a multiple of the blocksize.
// If force is true, the padding will be applied anyways.
func Pad80(b []byte, blockSize int, force bool) ([]byte, error) {
if blockSize%8 != 0 {
return nil, errors.New("block size must be a multiple of 8")
}
rest := len(b) % blockSize
if rest != 0 || force {
padded := make([]byte, len(b)+blockSize-rest)
copy(padded, b)
padded[len(b)] = 0x80
return padded, nil
}
return b, nil
}
// DESFinalTDESMac calculates a MAC with Single DES and a final round TripleDES in CBC mode.
// The length of input data must be a multiple of 8.
func DESFinalTDESMac(dst *[8]byte, src []byte, key [16]byte, iv [8]byte) error {
if len(src)%des.BlockSize != 0 {
return errors.New("length of src must be a multiple of 8")
}
tdesKey := resizeDoubleDESToTDES(key)
sdesKey := key[:8]
// get key as single des
sdes, err := des.NewCipher(sdesKey)
if err != nil {
return errors.Wrap(err, "create DES cipher")
}
tdes, err := des.NewTripleDESCipher(tdesKey[:])
if err != nil {
return errors.Wrap(err, "create TDES cipher")
}
tdesCbc := cipher.NewCBCEncrypter(tdes, iv[:])
if len(src) > 8 {
// first do simple DES
sdesCbc := cipher.NewCBCEncrypter(sdes, iv[:])
tmp1 := make([]byte, len(src)-des.BlockSize)
sdesCbc.CryptBlocks(tmp1, src[:len(src)-des.BlockSize])
// use the result as IV for TDES
tdesCbc = cipher.NewCBCEncrypter(tdes, tmp1[len(tmp1)-des.BlockSize:])
}
tdesCbc.CryptBlocks(dst[:], src[len(src)-des.BlockSize:])
return nil
}
// resizeDoubleDESToTDES resizes a double length DES key to a Triple DES key.
func resizeDoubleDESToTDES(key [16]byte) [24]byte {
var k [24]byte
copy(k[:], key[:])
copy(k[16:], key[:9])
return k
} | internal/crypto/crypto.go | 0.654232 | 0.496277 | crypto.go | starcoder |
package scheme
import (
"fmt"
"strings"
)
func areEqual(a Object, b Object) bool {
if a == nil {
return true
}
if typeName(a) != typeName(b) {
return false
} else if areIdentical(a, b) {
return true
}
switch a.(type) {
case *Pair:
return areEqual(a.(*Pair).Car, b.(*Pair).Car) && areEqual(a.(*Pair).Cdr, b.(*Pair).Cdr)
default:
return false
}
}
func areIdentical(a Object, b Object) bool {
if typeName(a) != typeName(b) {
return false
}
switch a.(type) {
case *Number:
return a.(*Number).value == b.(*Number).value
case *Boolean:
return a.(*Boolean).value == b.(*Boolean).value
default:
return a == b
}
}
func areSameList(a Object, b Object) bool {
if typeName(a) != typeName(b) {
return false
}
switch a.(type) {
case *Pair:
return areSameList(a.(*Pair).Car, b.(*Pair).Car) && areSameList(a.(*Pair).Cdr, b.(*Pair).Cdr)
default:
return areIdentical(a, b)
}
}
func assertListMinimum(arguments Object, minimum int) {
if !arguments.isList() {
compileError("proper list required for function application or macro use")
} else if arguments.(*Pair).ListLength() < minimum {
compileError("procedure requires at least %d argument", minimum)
}
}
func assertListEqual(arguments Object, length int) {
if !arguments.isList() {
compileError("proper list required for function application or macro use")
} else if arguments.(*Pair).ListLength() != length {
compileError("wrong number of arguments: requires %d, but got %d",
length, arguments.(*Pair).ListLength())
}
}
func assertObjectsType(objects []Object, typeName string) {
for _, object := range objects {
assertObjectType(object, typeName)
}
}
func assertObjectType(object Object, assertType string) {
if assertType != typeName(object) {
compileError("%s required, but got %s", assertType, object)
}
}
func compileError(format string, a ...interface{}) Object {
return runtimeError("Compile Error: "+format, a...)
}
func defaultBinding() Binding {
binding := make(Binding)
for key, value := range builtinProcedures {
binding[key] = value
}
for key, value := range builtinSyntaxes {
binding[key] = value
}
return binding
}
func evaledObjects(objects []Object) []Object {
evaledObjects := []Object{}
for _, object := range objects {
evaledObjects = append(evaledObjects, object.Eval())
}
return evaledObjects
}
func runtimeError(format string, a ...interface{}) Object {
panic(fmt.Sprintf(format, a...))
return undef
}
func syntaxError(format string, a ...interface{}) Object {
return compileError("syntax-error: "+format, a...)
}
func typeName(object Object) string {
switch object.(type) {
case *Pair:
if object.isNull() {
return "null"
} else {
return "pair"
}
default:
rawTypeName := fmt.Sprintf("%T", object)
typeName := strings.Replace(rawTypeName, "*scheme.", "", 1)
return strings.ToLower(typeName)
}
} | scheme/misc.go | 0.625095 | 0.446796 | misc.go | starcoder |
package main
var schemas = `
{
"API": {
"createContainerLogistics": {
"description": "Create an asset. One argument, a JSON encoded event. Container No is required with zero or more writable properties. Establishes an initial asset state.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "A set of fields that constitute the writable fields in an asset's state. Container No is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"containerno": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"humidity": {
"description": "Humidity in percentage.",
"type": "number"
},
"light": {
"description": "Light in candela.",
"type": "number"
},
"acceleration": {
"description": "acceleration -gforce / shock.",
"type": "number"
},
"airquality": {
"description": "A geographical coordinate",
"properties": {
"oxygen": {
"type": "number"
},
"carbondioxide": {
"type": "number"
},
"ethylene": {
"type": "number"
}
},
"type": "object"
}
},
"required": [
"containerno"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "createContainerLogistics function",
"enum": [
"createContainerLogistics"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"init": {
"description": "Initializes the contract when started, either by deployment or by peer restart.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "event sent to init on deployment",
"properties": {
"nickname": {
"default": "SIMPLE",
"description": "The nickname of the current contract",
"type": "string"
},
"version": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "init function",
"enum": [
"init"
],
"type": "string"
},
"method": "deploy"
},
"type": "object"
},
"readContainerCurrentStatus": {
"description": "Returns the state an asset. Argument is a JSON encoded string. Container No is the only accepted property.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "An object containing only an Container No for use as an argument to read or delete.",
"properties": {
"containerno": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "readContainerCurrentStatus function",
"enum": [
"readContainerCurrentStatus"
],
"type": "string"
},
"method": "query",
"result": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"containerno": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"humidity": {
"description": "Humidity in percentage.",
"type": "number"
},
"light": {
"description": "Light in candela.",
"type": "number"
},
"acceleration": {
"description": "acceleration -gforce / shock.",
"type": "number"
},
"airquality": {
"description": "A geographical coordinate",
"properties": {
"oxygen": {
"type": "number"
},
"carbondioxide": {
"type": "number"
},
"ethylene": {
"type": "number"
}
},
"type": "object"
}
},
"type": "object"
}
},
"type": "object"
},
"readContainerLogisitcsSchemas": {
"description": "Returns a string generated from the schema containing APIs and Objects as specified in generate.json in the scripts folder.",
"properties": {
"args": {
"description": "accepts no arguments",
"items": {},
"maxItems": 0,
"minItems": 0,
"type": "array"
},
"function": {
"description": "readContainerLogisitcsSchemas function",
"enum": [
"readContainerLogisitcsSchemas"
],
"type": "string"
},
"method": "query",
"result": {
"description": "JSON encoded object containing selected schemas",
"type": "string"
}
},
"type": "object"
},
"updateContainerLogistics": {
"description": "Update the state of an asset. The one argument is a JSON encoded event. Container No is required along with one or more writable properties. Establishes the next asset state. ",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "A set of fields that constitute the writable fields in an asset's state. Container No is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"containerno": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"humidity": {
"description": "Humidity in percentage.",
"type": "number"
},
"light": {
"description": "Light in candela.",
"type": "number"
},
"acceleration": {
"description": "acceleration -gforce / shock.",
"type": "number"
},
"airquality": {
"description": "A geographical coordinate",
"properties": {
"oxygen": {
"type": "number"
},
"carbondioxide": {
"type": "number"
},
"ethylene": {
"type": "number"
}
},
"type": "object"
}
},
"required": [
"containerno"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "updateContainerLogistics function",
"enum": [
"updateContainerLogistics"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
}
},
"objectModelSchemas": {
"containernoKey": {
"description": "An object containing only an Container No for use as an argument to read or delete.",
"properties": {
"containerno": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"event": {
"description": "A set of fields that constitute the writable fields in an asset's state. Container no. is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"containerno": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"humidity": {
"description": "Humidity in percentage.",
"type": "number"
},
"light": {
"description": "Light in candela.",
"type": "number"
},
"acceleration": {
"description": "acceleration -gforce / shock.",
"type": "number"
},
"airquality": {
"description": "A geographical coordinate",
"properties": {
"oxygen": {
"type": "number"
},
"carbondioxide": {
"type": "number"
},
"ethylene": {
"type": "number"
}
},
"type": "object"
}
},
"required": [
"containerno"
],
"type": "object"
},
"initEvent": {
"description": "event sent to init on deployment",
"properties": {
"nickname": {
"default": "SIMPLE",
"description": "The nickname of the current contract",
"type": "string"
},
"version": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"state": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"containerno": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"humidity": {
"description": "Humidity in percentage.",
"type": "number"
},
"light": {
"description": "Light in candela.",
"type": "number"
},
"acceleration": {
"description": "acceleration -gforce / shock.",
"type": "number"
},
"airquality": {
"description": "A geographical coordinate",
"properties": {
"oxygen": {
"type": "number"
},
"carbondioxide": {
"type": "number"
},
"ethylene": {
"type": "number"
}
},
"type": "object"
}
},
"type": "object"
}
}
}` | contracts/industry/LogisticsSplit.0.6/Container/schemas.go | 0.862901 | 0.60092 | schemas.go | starcoder |
package calc
import (
"math"
"strconv"
"strings"
"unicode"
)
var oprData = map[string]struct {
prec int
rAsoc bool // true = right // false = left
fx func(x, y float64) float64
}{
"^": {4, true, func(x, y float64) float64 { return math.Pow(x, y) }},
"*": {3, false, func(x, y float64) float64 { return x * y }},
"/": {3, false, func(x, y float64) float64 { return x / y }},
"+": {2, false, func(x, y float64) float64 { return x + y }},
"-": {2, false, func(x, y float64) float64 { return x - y }},
">": {2, false, func(x, y float64) float64 { return b2f(x > y) }},
"<": {2, false, func(x, y float64) float64 { return b2f(x < y) }},
"&": {2, false, func(x, y float64) float64 { return b2f(f2b(x) && f2b(y)) }},
"|": {2, false, func(x, y float64) float64 { return b2f(f2b(x) || f2b(y)) }},
//"!": {2, false, func(x, y float64) float64 { return b2f(!f2b(x)) }},
}
var unaryData = map[string]struct {
fx func(x float64) float64
}{
"!": {func(x float64) float64 { return b2f(!f2b(x)) }},
}
func f2b(f float64) bool {
if f == 0 {
return false
}
return true
}
func b2f(b bool) float64 {
if b == false {
return 0
}
return 1
}
var funcs = map[string]func(x float64) float64{
"LN": math.Log,
"ABS": math.Abs,
"COS": math.Cos,
"SIN": math.Sin,
"TAN": math.Tan,
"ACOS": math.Acos,
"ASIN": math.Asin,
"ATAN": math.Atan,
"SQRT": math.Sqrt,
"CBRT": math.Cbrt,
"CEIL": math.Ceil,
"FLOOR": math.Floor,
}
var consts = map[string]float64{
"E": math.E,
"PI": math.Pi,
"PHI": math.Phi,
"SQRT2": math.Sqrt2,
"SQRTE": math.SqrtE,
"SQRTPI": math.SqrtPi,
"SQRTPHI": math.SqrtPhi,
}
// SolvePostfix evaluates and returns the answer of the expression converted to postfix
func SolvePostfix(tokens Stack) float64 {
stack := Stack{}
for _, v := range tokens.Values {
switch v.Type {
case NUMBER:
stack.Push(v)
case FUNCTION:
stack.Push(Token{NUMBER, SolveFunction(v.Value)})
case CONSTANT:
if val, ok := consts[v.Value]; ok {
stack.Push(Token{NUMBER, strconv.FormatFloat(val, 'f', -1, 64)})
}
case UNARY:
//unary invert
f := unaryData[v.Value].fx
var x float64
x, _ = strconv.ParseFloat(stack.Pop().Value, 64)
result := f(x)
stack.Push(Token{NUMBER, strconv.FormatFloat(result, 'f', -1, 64)})
case OPERATOR:
if v.Value == "!" {
//unary invert
f := oprData[v.Value].fx
var x float64
x, _ = strconv.ParseFloat(stack.Pop().Value, 64)
result := f(x, 0)
stack.Push(Token{NUMBER, strconv.FormatFloat(result, 'f', -1, 64)})
} else {
f := oprData[v.Value].fx
var x, y float64
y, _ = strconv.ParseFloat(stack.Pop().Value, 64)
x, _ = strconv.ParseFloat(stack.Pop().Value, 64)
result := f(x, y)
stack.Push(Token{NUMBER, strconv.FormatFloat(result, 'f', -1, 64)})
}
}
}
out, _ := strconv.ParseFloat(stack.Values[0].Value, 64)
return out
}
// SolveFunction returns the answer of a function found within an expression
func SolveFunction(s string) string {
var fArg float64
fType := s[:strings.Index(s, "(")]
args := s[strings.Index(s, "(")+1 : strings.LastIndex(s, ")")]
if !strings.ContainsAny(args, "+ & * & - & / & ^") && !ContainsLetter(args) {
fArg, _ = strconv.ParseFloat(args, 64)
} else {
stack, _ := NewParser(strings.NewReader(args)).Parse()
stack = ShuntingYard(stack)
fArg = SolvePostfix(stack)
}
return strconv.FormatFloat(funcs[fType](fArg), 'f', -1, 64)
}
// ContainsLetter checks if a string contains a letter
func ContainsLetter(s string) bool {
for _, v := range s {
if unicode.IsLetter(v) {
return true
}
}
return false
}
func Solve(s string) float64 {
p := NewParser(strings.NewReader(s))
stack, _ := p.Parse()
stack = ShuntingYard(stack)
answer := SolvePostfix(stack)
return answer
}
func BoolSolve(s string) bool {
return f2b(Solve(s))
} | calc/solver.go | 0.599368 | 0.57684 | solver.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
/*
--- Day 1: Report Repair ---
After saving Christmas five years in a row, you've decided to take a vacation at a nice resort on a tropical island. Surely, Christmas will go on without you.
The tropical island has its own currency and is entirely cash-only. The gold coins used there have a little picture of a starfish; the locals just call them stars. None of the currency exchanges seem to have heard of them, but somehow, you'll need to find fifty of these coins by the time you arrive so you can pay the deposit on your room.
To save your vacation, you need to get all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
Before you leave, the Elves in accounting just need you to fix your expense report (your puzzle input); apparently, something isn't quite adding up.
Specifically, they need you to find the two entries that sum to 2020 and then multiply those two numbers together.
For example, suppose your expense report contained the following:
1721
979
366
299
675
1456
In this list, the two entries that sum to 2020 are 1721 and 299. Multiplying them together produces 1721 * 299 = 514579, so the correct answer is 514579.
Of course, your expense report is much larger. Find the two entries that sum to 2020; what do you get if you multiply them together?
Your puzzle answer was 471019.
--- Part Two ---
The Elves in accounting are thankful for your help; one of them even offers you a starfish coin they had left over from a past vacation. They offer you a second one if you can find three numbers in your expense report that meet the same criteria.
Using the above example again, the three entries that sum to 2020 are 979, 366, and 675. Multiplying them together produces the answer, 241861950.
In your expense report, what is the product of the three entries that sum to 2020?
Your puzzle answer was 103927824.
*/
// TARGET value that should be found
const TARGET = 2020
func check(e error) {
if e != nil {
panic(e)
}
}
func readInputAsIntArray(inputFileName string) ([]int, error) {
dat, err := ioutil.ReadFile("./input.txt")
if err != nil {
return nil, err
}
result := []int{}
lines := strings.Split(string(dat), "\n")
for i := 0; i < len(lines); i++ {
line := lines[i]
value, err := strconv.Atoi(line)
if err != nil {
return nil, err
}
result = append(result, value)
}
return result, nil
}
func naiveSearchForTwoValues(values []int, target int) (int, int) {
numberOfValues := len(values)
if numberOfValues < 2 {
return -1, -1
}
for i := 0; i < (numberOfValues - 1); i++ {
firstValue := values[i]
for j := (i + 1); j < numberOfValues; j++ {
secondValue := values[j]
if (firstValue + secondValue) == target {
return firstValue, secondValue
}
}
}
return -1, -1
}
func naiveSearchForThreeValues(values []int, target int) (int, int, int) {
numberOfValues := len(values)
if numberOfValues < 3 {
return -1, -1, -1
}
for i := 0; i < (numberOfValues - 2); i++ {
firstValue := values[i]
for j := (i + 1); j < (numberOfValues - 1); j++ {
secondValue := values[j]
for k := (i + 2); k < numberOfValues; k++ {
thirdValue := values[k]
if (firstValue + secondValue + thirdValue) == target {
return firstValue, secondValue, thirdValue
}
}
}
}
return -1, -1, -1
}
func partOne(values []int) {
firstValue, secondValue := naiveSearchForTwoValues(values, TARGET)
fmt.Println(firstValue * secondValue)
// should be 471019
}
func partTwo(values []int) {
firstValue, secondValue, thirdValue := naiveSearchForThreeValues(values, TARGET)
fmt.Println(firstValue * secondValue * thirdValue)
// should be 103927824
}
func main() {
values, err := readInputAsIntArray("./input.txt")
check(err)
partTwo(values)
} | 2020/day-01/main.go | 0.532182 | 0.493042 | main.go | starcoder |
package sample
import (
"crypto/rand"
"encoding/binary"
"math/big"
)
// cdtTable consists of a precomputed table of values
// using which one can create a constant time half-Gaussian
// sampler with sigma = sqrt(1/2ln(2))
var cdtTable = [][2]uint64{{2200310400551559144, 3327841033070651387},
{7912151619254726620, 380075531178589176},
{5167367257772081627, 11604843442081400},
{5081592746475748971, 90134450315532},
{6522074513864805092, 175786317361},
{2579734681240182346, 85801740},
{8175784047440310133, 10472},
{2947787991558061753, 0},
{22489665999543, 0}}
var cdtLen = 9 // upper bound on sample values
var cdtLowMask uint64 = 0x7fffffffffffffff
// SigmaCDT is a constant sqrt(1/(2ln(2)))
var SigmaCDT, _ = new(big.Float).SetString("0.84932180028801904272150283410")
// NormalCDT samples random values from the discrete Normal (Gaussian)
// probability distribution, limited to non-negative values (half-Gaussian).
// In particular each value x from Z^+ is sampled with probability proportional to
// exp(-x^2/sigma^2) where sigma = sqrt(1/2ln(2)).
// The implementation is based on paper:
// "FACCT: FAst, Compact, and Constant-Time Discrete Gaussian
// Sampler over Integers" by <NAME>, <NAME>, and <NAME>
// (https://eprint.iacr.org/2018/1234.pdf). See the above paper where
// it is argued that such a sampling achieves a relative error at most
// 2^{-46} with the chosen parameters.
type NormalCDT struct {
*normal
}
// NewNormalCDT returns an instance of NormalCDT sampler.
func NewNormalCDT() *NormalCDT {
s := &NormalCDT{}
return s
}
// Sample samples discrete non-negative values with Gaussian
// distribution.
func (c *NormalCDT) Sample() (*big.Int, error) {
randBytes := make([]byte, 16)
_, err := rand.Read(randBytes)
if err != nil {
return nil, err
}
r1 := binary.LittleEndian.Uint64(randBytes[0:8])
r1 = r1 & cdtLowMask
r2 := binary.LittleEndian.Uint64(randBytes[8:16])
r2 = r2 & cdtLowMask
x := uint64(0)
for i := 0; i < cdtLen; i++ {
x += (((r1 - cdtTable[i][0]) & ((uint64(1) << 63) ^ ((r2 - cdtTable[i][1]) | (cdtTable[i][1] - r2)))) | (r2 - cdtTable[i][1])) >> 63
}
return big.NewInt(int64(x)), nil
} | sample/normal_cdt.go | 0.708011 | 0.402979 | normal_cdt.go | starcoder |
package tree
import (
"github.com/efreitasn/go-datas/graph"
"github.com/efreitasn/go-datas/linkedlist"
)
// Tree is a tree of ints.
type Tree struct {
g *graph.Graph
root int
}
// New create a tree of ints.
func New(root int) *Tree {
g := graph.New(true)
g.AddVertex(root)
return &Tree{
g,
root,
}
}
// HasNode checks whether a node exists in the tree
func (tr *Tree) HasNode(v int) bool {
return tr.g.HasVertex(v)
}
// AddNode adds a node to the tree.
func (tr *Tree) AddNode(parent int, v int) (ok bool) {
if !tr.HasNode(parent) || tr.g.HasVertex(v) {
return false
}
tr.g.AddVertex(v)
tr.g.AddEdge(parent, v)
return true
}
// NodeChildren returns the children of a node.
func (tr *Tree) NodeChildren(v int) (children *linkedlist.LinkedList, found bool) {
adjVertices, found := tr.g.AdjacentVertices(v)
if !found {
return nil, false
}
return adjVertices, true
}
// NodeHeight returns the number of edges from a node to its deepest descendent (the height of a node) using DFS.
func (tr *Tree) NodeHeight(v int) (height int, found bool) {
if !tr.HasNode(v) {
return 0, false
}
return tr.nodeHeightRecursive(v), true
}
func (tr *Tree) nodeHeightRecursive(v int) int {
adjVertices, _ := tr.g.AdjacentVertices(v)
if adjVertices.Size() == 0 {
return 0
}
var heights []int
adjVertices.Traverse(true, func(adjV int) {
heights = append(heights, tr.nodeHeightRecursive(adjV))
})
var maxHeight int
for _, h := range heights {
if h > maxHeight {
maxHeight = h
}
}
return maxHeight + 1
}
// NodeDepth returns the number of edges from the root to a node (the depth of a node) using DFS.
func (tr *Tree) NodeDepth(v int) (depth int, found bool) {
if !tr.HasNode(v) {
return 0, false
}
depth, _ = tr.nodeDepthRecursive(tr.Root(), v)
return depth, true
}
type nodeDepthItem struct {
depth int
vFound bool
}
func (tr *Tree) nodeDepthRecursive(v, valueToFind int) (depth int, vFound bool) {
adjVertices, _ := tr.g.AdjacentVertices(v)
if adjVertices.Size() == 0 || v == valueToFind {
return 0, v == valueToFind
}
var depths []nodeDepthItem
adjVertices.Traverse(true, func(adjV int) {
depth, vFound := tr.nodeDepthRecursive(adjV, valueToFind)
depths = append(
depths,
nodeDepthItem{
depth,
vFound,
},
)
})
for _, d := range depths {
if d.vFound {
return d.depth + 1, true
}
}
return 0, false
}
// Root returns the root node of the tree.
func (tr *Tree) Root() int {
return tr.root
}
// Height returns the number of edges between the root of the tree and its deepest descendent, i.e. the heigh of the root.
func (tr *Tree) Height() int {
height, _ := tr.NodeHeight(tr.Root())
return height
}
// Size returns the number of nodes in the tree.
func (tr *Tree) Size() int {
return tr.g.NumVertices()
} | tree/tree.go | 0.823825 | 0.424054 | tree.go | starcoder |
package ent
import (
"context"
"errors"
"fmt"
"time"
"github.com/empiricaly/recruitment/internal/ent/project"
"github.com/empiricaly/recruitment/internal/ent/run"
"github.com/empiricaly/recruitment/internal/ent/steprun"
"github.com/empiricaly/recruitment/internal/ent/template"
"github.com/facebook/ent/dialect/sql/sqlgraph"
"github.com/facebook/ent/schema/field"
)
// RunCreate is the builder for creating a Run entity.
type RunCreate struct {
config
mutation *RunMutation
hooks []Hook
}
// SetCreatedAt sets the created_at field.
func (rc *RunCreate) SetCreatedAt(t time.Time) *RunCreate {
rc.mutation.SetCreatedAt(t)
return rc
}
// SetNillableCreatedAt sets the created_at field if the given value is not nil.
func (rc *RunCreate) SetNillableCreatedAt(t *time.Time) *RunCreate {
if t != nil {
rc.SetCreatedAt(*t)
}
return rc
}
// SetUpdatedAt sets the updated_at field.
func (rc *RunCreate) SetUpdatedAt(t time.Time) *RunCreate {
rc.mutation.SetUpdatedAt(t)
return rc
}
// SetNillableUpdatedAt sets the updated_at field if the given value is not nil.
func (rc *RunCreate) SetNillableUpdatedAt(t *time.Time) *RunCreate {
if t != nil {
rc.SetUpdatedAt(*t)
}
return rc
}
// SetStatus sets the status field.
func (rc *RunCreate) SetStatus(r run.Status) *RunCreate {
rc.mutation.SetStatus(r)
return rc
}
// SetStartedAt sets the startedAt field.
func (rc *RunCreate) SetStartedAt(t time.Time) *RunCreate {
rc.mutation.SetStartedAt(t)
return rc
}
// SetNillableStartedAt sets the startedAt field if the given value is not nil.
func (rc *RunCreate) SetNillableStartedAt(t *time.Time) *RunCreate {
if t != nil {
rc.SetStartedAt(*t)
}
return rc
}
// SetEndedAt sets the endedAt field.
func (rc *RunCreate) SetEndedAt(t time.Time) *RunCreate {
rc.mutation.SetEndedAt(t)
return rc
}
// SetNillableEndedAt sets the endedAt field if the given value is not nil.
func (rc *RunCreate) SetNillableEndedAt(t *time.Time) *RunCreate {
if t != nil {
rc.SetEndedAt(*t)
}
return rc
}
// SetName sets the name field.
func (rc *RunCreate) SetName(s string) *RunCreate {
rc.mutation.SetName(s)
return rc
}
// SetStartAt sets the startAt field.
func (rc *RunCreate) SetStartAt(t time.Time) *RunCreate {
rc.mutation.SetStartAt(t)
return rc
}
// SetNillableStartAt sets the startAt field if the given value is not nil.
func (rc *RunCreate) SetNillableStartAt(t *time.Time) *RunCreate {
if t != nil {
rc.SetStartAt(*t)
}
return rc
}
// SetError sets the error field.
func (rc *RunCreate) SetError(s string) *RunCreate {
rc.mutation.SetError(s)
return rc
}
// SetNillableError sets the error field if the given value is not nil.
func (rc *RunCreate) SetNillableError(s *string) *RunCreate {
if s != nil {
rc.SetError(*s)
}
return rc
}
// SetID sets the id field.
func (rc *RunCreate) SetID(s string) *RunCreate {
rc.mutation.SetID(s)
return rc
}
// SetProjectID sets the project edge to Project by id.
func (rc *RunCreate) SetProjectID(id string) *RunCreate {
rc.mutation.SetProjectID(id)
return rc
}
// SetNillableProjectID sets the project edge to Project by id if the given value is not nil.
func (rc *RunCreate) SetNillableProjectID(id *string) *RunCreate {
if id != nil {
rc = rc.SetProjectID(*id)
}
return rc
}
// SetProject sets the project edge to Project.
func (rc *RunCreate) SetProject(p *Project) *RunCreate {
return rc.SetProjectID(p.ID)
}
// SetTemplateID sets the template edge to Template by id.
func (rc *RunCreate) SetTemplateID(id string) *RunCreate {
rc.mutation.SetTemplateID(id)
return rc
}
// SetTemplate sets the template edge to Template.
func (rc *RunCreate) SetTemplate(t *Template) *RunCreate {
return rc.SetTemplateID(t.ID)
}
// SetCurrentStepID sets the currentStep edge to StepRun by id.
func (rc *RunCreate) SetCurrentStepID(id string) *RunCreate {
rc.mutation.SetCurrentStepID(id)
return rc
}
// SetNillableCurrentStepID sets the currentStep edge to StepRun by id if the given value is not nil.
func (rc *RunCreate) SetNillableCurrentStepID(id *string) *RunCreate {
if id != nil {
rc = rc.SetCurrentStepID(*id)
}
return rc
}
// SetCurrentStep sets the currentStep edge to StepRun.
func (rc *RunCreate) SetCurrentStep(s *StepRun) *RunCreate {
return rc.SetCurrentStepID(s.ID)
}
// AddStepIDs adds the steps edge to StepRun by ids.
func (rc *RunCreate) AddStepIDs(ids ...string) *RunCreate {
rc.mutation.AddStepIDs(ids...)
return rc
}
// AddSteps adds the steps edges to StepRun.
func (rc *RunCreate) AddSteps(s ...*StepRun) *RunCreate {
ids := make([]string, len(s))
for i := range s {
ids[i] = s[i].ID
}
return rc.AddStepIDs(ids...)
}
// Mutation returns the RunMutation object of the builder.
func (rc *RunCreate) Mutation() *RunMutation {
return rc.mutation
}
// Save creates the Run in the database.
func (rc *RunCreate) Save(ctx context.Context) (*Run, error) {
var (
err error
node *Run
)
rc.defaults()
if len(rc.hooks) == 0 {
if err = rc.check(); err != nil {
return nil, err
}
node, err = rc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*RunMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = rc.check(); err != nil {
return nil, err
}
rc.mutation = mutation
node, err = rc.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(rc.hooks) - 1; i >= 0; i-- {
mut = rc.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, rc.mutation); err != nil {
return nil, err
}
}
return node, err
}
// SaveX calls Save and panics if Save returns an error.
func (rc *RunCreate) SaveX(ctx context.Context) *Run {
v, err := rc.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// defaults sets the default values of the builder before save.
func (rc *RunCreate) defaults() {
if _, ok := rc.mutation.CreatedAt(); !ok {
v := run.DefaultCreatedAt()
rc.mutation.SetCreatedAt(v)
}
if _, ok := rc.mutation.UpdatedAt(); !ok {
v := run.DefaultUpdatedAt()
rc.mutation.SetUpdatedAt(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (rc *RunCreate) check() error {
if _, ok := rc.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
}
if _, ok := rc.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
}
if _, ok := rc.mutation.Status(); !ok {
return &ValidationError{Name: "status", err: errors.New("ent: missing required field \"status\"")}
}
if v, ok := rc.mutation.Status(); ok {
if err := run.StatusValidator(v); err != nil {
return &ValidationError{Name: "status", err: fmt.Errorf("ent: validator failed for field \"status\": %w", err)}
}
}
if _, ok := rc.mutation.Name(); !ok {
return &ValidationError{Name: "name", err: errors.New("ent: missing required field \"name\"")}
}
if v, ok := rc.mutation.ID(); ok {
if err := run.IDValidator(v); err != nil {
return &ValidationError{Name: "id", err: fmt.Errorf("ent: validator failed for field \"id\": %w", err)}
}
}
if _, ok := rc.mutation.TemplateID(); !ok {
return &ValidationError{Name: "template", err: errors.New("ent: missing required edge \"template\"")}
}
return nil
}
func (rc *RunCreate) sqlSave(ctx context.Context) (*Run, error) {
_node, _spec := rc.createSpec()
if err := sqlgraph.CreateNode(ctx, rc.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
}
return nil, err
}
return _node, nil
}
func (rc *RunCreate) createSpec() (*Run, *sqlgraph.CreateSpec) {
var (
_node = &Run{config: rc.config}
_spec = &sqlgraph.CreateSpec{
Table: run.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: run.FieldID,
},
}
)
if id, ok := rc.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = id
}
if value, ok := rc.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: run.FieldCreatedAt,
})
_node.CreatedAt = value
}
if value, ok := rc.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: run.FieldUpdatedAt,
})
_node.UpdatedAt = value
}
if value, ok := rc.mutation.Status(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeEnum,
Value: value,
Column: run.FieldStatus,
})
_node.Status = value
}
if value, ok := rc.mutation.StartedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: run.FieldStartedAt,
})
_node.StartedAt = &value
}
if value, ok := rc.mutation.EndedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: run.FieldEndedAt,
})
_node.EndedAt = &value
}
if value, ok := rc.mutation.Name(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: run.FieldName,
})
_node.Name = value
}
if value, ok := rc.mutation.StartAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: run.FieldStartAt,
})
_node.StartAt = &value
}
if value, ok := rc.mutation.Error(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: run.FieldError,
})
_node.Error = &value
}
if nodes := rc.mutation.ProjectIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: run.ProjectTable,
Columns: []string{run.ProjectColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: project.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := rc.mutation.TemplateIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2O,
Inverse: false,
Table: run.TemplateTable,
Columns: []string{run.TemplateColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: template.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := rc.mutation.CurrentStepIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: false,
Table: run.CurrentStepTable,
Columns: []string{run.CurrentStepColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: steprun.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := rc.mutation.StepsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: run.StepsTable,
Columns: []string{run.StepsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: steprun.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// RunCreateBulk is the builder for creating a bulk of Run entities.
type RunCreateBulk struct {
config
builders []*RunCreate
}
// Save creates the Run entities in the database.
func (rcb *RunCreateBulk) Save(ctx context.Context) ([]*Run, error) {
specs := make([]*sqlgraph.CreateSpec, len(rcb.builders))
nodes := make([]*Run, len(rcb.builders))
mutators := make([]Mutator, len(rcb.builders))
for i := range rcb.builders {
func(i int, root context.Context) {
builder := rcb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*RunMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, rcb.builders[i+1].mutation)
} else {
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, rcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, rcb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX calls Save and panics if Save returns an error.
func (rcb *RunCreateBulk) SaveX(ctx context.Context) []*Run {
v, err := rcb.Save(ctx)
if err != nil {
panic(err)
}
return v
} | internal/ent/run_create.go | 0.57678 | 0.434521 | run_create.go | starcoder |
package rotate
import (
"math"
"math/rand"
"github.com/paulwrubel/photolum/config/geometry"
"github.com/paulwrubel/photolum/config/geometry/primitive"
"github.com/paulwrubel/photolum/config/geometry/primitive/aabb"
"github.com/paulwrubel/photolum/config/shading/material"
)
// RotationZ is a primitive with a rotations around the y axis attached
type RotationZ struct {
AngleDegrees float64 `json:"angle"`
TypeName string `json:"type"`
Data interface{} `json:"data"`
Primitive primitive.Primitive
theta float64
sinTheta float64
cosTheta float64
}
// Setup sets up some internal fields of a rotation
func (rz *RotationZ) Setup() (*RotationZ, error) {
// convert to radians and save
rz.theta = (math.Pi / 180.0) * rz.AngleDegrees
// find sin(theta)
rz.sinTheta = math.Sin(rz.theta)
// find cos(theta)
rz.cosTheta = math.Cos(rz.theta)
return rz, nil
}
// Intersection computer the intersection of this object and a given ray if it exists
func (rz *RotationZ) Intersection(ray geometry.Ray, tMin, tMax float64, rng *rand.Rand) (*material.RayHit, bool) {
rotatedRay := ray
rotatedRay.Origin.X = rz.cosTheta*ray.Origin.X + rz.sinTheta*ray.Origin.Y
rotatedRay.Origin.Y = -rz.sinTheta*ray.Origin.X + rz.cosTheta*ray.Origin.Y
rotatedRay.Direction.X = rz.cosTheta*ray.Direction.X + rz.sinTheta*ray.Direction.Y
rotatedRay.Direction.Y = -rz.sinTheta*ray.Direction.X + rz.cosTheta*ray.Direction.Y
rayHit, wasHit := rz.Primitive.Intersection(rotatedRay, tMin, tMax, rng)
if wasHit {
unrotatedNormal := rayHit.NormalAtHit
unrotatedNormal.X = rz.cosTheta*rayHit.NormalAtHit.X - rz.sinTheta*rayHit.NormalAtHit.Y
unrotatedNormal.Y = rz.sinTheta*rayHit.NormalAtHit.X + rz.cosTheta*rayHit.NormalAtHit.Y
return &material.RayHit{
Ray: ray,
NormalAtHit: unrotatedNormal,
Time: rayHit.Time,
U: rayHit.U,
V: rayHit.V,
Material: rayHit.Material,
}, true
}
return nil, false
}
// BoundingBox returns an AABB for this object
func (rz *RotationZ) BoundingBox(t0, t1 float64) (*aabb.AABB, bool) {
box, ok := rz.Primitive.BoundingBox(t0, t1)
if !ok {
return nil, false
}
minPoint := geometry.PointMax
maxPoint := geometry.PointMax.Negate()
for i := 0.0; i < 2; i++ {
for j := 0.0; j < 2; j++ {
for k := 0.0; k < 2; k++ {
x := i*box.B.X + (1-i)*box.A.X
y := j*box.B.Y + (1-j)*box.A.Y
z := k*box.B.Z + (1-k)*box.A.Z
newX := rz.cosTheta*x - rz.sinTheta*y
newY := rz.sinTheta*x + rz.cosTheta*y
rotatedCorner := geometry.Point{
X: newX,
Y: newY,
Z: z,
}
maxPoint = geometry.MaxComponents(maxPoint, rotatedCorner)
minPoint = geometry.MinComponents(minPoint, rotatedCorner)
}
}
}
return &aabb.AABB{
A: minPoint,
B: maxPoint,
}, true
}
// SetMaterial sets the material of this object
func (rz *RotationZ) SetMaterial(m material.Material) {
rz.Primitive.SetMaterial(m)
}
// IsInfinite returns whether this object is infinite
func (rz *RotationZ) IsInfinite() bool {
return rz.Primitive.IsInfinite()
}
// IsClosed returns whether this object is closed
func (rz *RotationZ) IsClosed() bool {
return rz.Primitive.IsClosed()
}
// Copy returns a shallow copy of this object
func (rz *RotationZ) Copy() primitive.Primitive {
newRZ := *rz
return &newRZ
} | config/geometry/primitive/transform/rotate/rotatez.go | 0.825695 | 0.444444 | rotatez.go | starcoder |
package cnns
import (
"fmt"
"math/rand"
"github.com/LdDl/cnns/tensor"
"github.com/pkg/errors"
"gonum.org/v1/gonum/mat"
)
// FullyConnectedLayer FC is simple layer structure (so this layer can be used for simple neural networks like XOR problem)
/*
Oj - O{j}, activated output from previous layer for j-th neuron (in other words: previous summation input)
Ok - O{k}, activated output from current layer for k-th node (in other words: activated summation input)
SumInput - non-activated output for current layer for k-th node (in other words: summation input)
LocalDelta - δ{k}, delta for current layer for k-th neuron
NextDeltaWeightSum - SUM(δ{k}*w{j,k}), summation component for evaluating δ{j} for previous layer for j-th neuron
Weights - w{j,k}, weight from j-th node of previous layer to k-th node of current layer
*/
type FullyConnectedLayer struct {
Oj *mat.Dense
Ok *mat.Dense
NextDeltaWeightSum *mat.Dense
Weights *mat.Dense
PreviousWeightsState *mat.Dense
LocalDelta *mat.Dense
SumInput *mat.Dense
ActivationFunc func(v float64) float64
ActivationDerivative func(v float64) float64
OutputSize *tensor.TDsize
inputSize *tensor.TDsize
trainMode bool
}
// NewFullyConnectedLayer Constructor for fully-connected layer. You need to specify input size and output size
func NewFullyConnectedLayer(inSize *tensor.TDsize, outSize int) Layer {
newLayer := &FullyConnectedLayer{
inputSize: inSize,
OutputSize: &tensor.TDsize{X: outSize, Y: 1, Z: 1},
Ok: &mat.Dense{},
Oj: mat.NewDense(outSize, 1, nil),
SumInput: mat.NewDense(outSize, 1, nil),
Weights: mat.NewDense(outSize, inSize.Total(), nil),
PreviousWeightsState: mat.NewDense(outSize, inSize.Total(), nil),
ActivationFunc: ActivationTanh, // Default Activation function is TanH
ActivationDerivative: ActivationTanhDerivative, // Default derivative of activation function is 1 - TanH(x)*TanH(x)
trainMode: false,
}
newLayer.PreviousWeightsState.Zero()
for i := 0; i < outSize; i++ {
for h := 0; h < inSize.Total(); h++ {
newLayer.Weights.Set(i, h, rand.Float64()-0.5)
}
}
return newLayer
}
// SetCustomWeights Set user's weights for fully-connected layer (make it carefully)
func (fc *FullyConnectedLayer) SetCustomWeights(weights []*mat.Dense) {
if len(weights) != 1 {
fmt.Println("You can provide array of length 1 only (for fully-connected layer)")
return
}
r, c := weights[0].Dims()
fc.Weights = mat.NewDense(r, c, nil)
fc.Weights.CloneFrom(weights[0])
}
// GetInputSize Returns dimensions of incoming data for fully-connected layer
func (fc *FullyConnectedLayer) GetInputSize() *tensor.TDsize {
return fc.inputSize
}
// GetOutputSize Returns output size (dimensions) of fully-connected layer
func (fc *FullyConnectedLayer) GetOutputSize() *tensor.TDsize {
return fc.OutputSize
}
// GetActivatedOutput Returns fully-connected layer's output
func (fc *FullyConnectedLayer) GetActivatedOutput() *mat.Dense {
return fc.Ok // ACTIVATED values
}
// GetWeights Returns fully-connected layer's weights.
func (fc *FullyConnectedLayer) GetWeights() []*mat.Dense {
return []*mat.Dense{fc.Weights}
}
// GetGradients Returns fully-connected layer's gradients dense
func (fc *FullyConnectedLayer) GetGradients() *mat.Dense {
return fc.NextDeltaWeightSum
}
// FeedForward Feed data to fully-connected layer
func (fc *FullyConnectedLayer) FeedForward(input *mat.Dense) error {
r, _ := input.Dims()
_, weightsCC := fc.Weights.Dims()
if r != weightsCC {
// Try to reshape input Dense to match matrix multiplication
temp, err := Reshape(input, weightsCC, 1)
if err != nil {
return errors.Wrap(err, "Can't call FeedForward() on fully-connected layer [1]")
}
fc.Oj = temp
} else {
fc.Oj.CloneFrom(input)
}
err := fc.doActivation()
if err != nil {
return errors.Wrap(err, "Can't call FeedForward() on fully-connected layer [2]")
}
return nil
}
// doActivation fully-connected layer's output activation
func (fc *FullyConnectedLayer) doActivation() error {
if fc.Oj == nil {
return fmt.Errorf("Can't call doActivation() on FC layer")
}
fc.Ok.Mul(fc.Weights, fc.Oj)
fc.SumInput.Copy(fc.Ok)
rawMatrix := fc.Ok.RawMatrix().Data
for i := range rawMatrix {
rawMatrix[i] = fc.ActivationFunc(rawMatrix[i])
}
return nil
}
// CalculateGradients Evaluate fully-connected layer's gradients
func (fc *FullyConnectedLayer) CalculateGradients(errorsDense *mat.Dense) error {
// Evaluate ΔO{k}/ΔΣ(k)
rawMatrix := fc.SumInput.RawMatrix().Data
for i := range rawMatrix {
rawMatrix[i] = fc.ActivationDerivative(rawMatrix[i])
}
// Evaluate ΔE{k}/ΔO{k} * ΔO{k}/ΔΣ(k)
fc.LocalDelta = &mat.Dense{}
fc.LocalDelta.MulElem(errorsDense, fc.SumInput)
// Evaluate ΔE{k}/ΔO{k} for next layers in backpropagation direction
fc.NextDeltaWeightSum = &mat.Dense{}
fc.NextDeltaWeightSum.Mul(fc.Weights.T(), fc.LocalDelta)
return nil
}
// UpdateWeights Update fully-connected layer's weights
func (fc *FullyConnectedLayer) UpdateWeights(lp *LearningParams) {
// Evaluate ΔΣ(k)/Δw{j}{k}
Δw := &mat.Dense{}
Δw.Mul(fc.LocalDelta, fc.Oj.T())
Δw.Scale(-1.0*lp.LearningRate, Δw)
// Inertia (as separated Scale() call)
// @todo - this should be optional.
Δw.Scale(1.0-lp.Momentum, Δw)
fc.PreviousWeightsState.Scale(lp.Momentum, fc.PreviousWeightsState)
Δw.Add(Δw, fc.PreviousWeightsState)
fc.PreviousWeightsState.CloneFrom(Δw)
// Update weights: w = w + Δw
fc.Weights.Add(fc.Weights, Δw)
}
// PrintOutput Pretty prrint fully-connected layer's output
func (fc *FullyConnectedLayer) PrintOutput() {
fmt.Println("Printing fully-connected Layer output...")
rows, _ := fc.Ok.Dims()
for r := 0; r < rows; r++ {
fmt.Printf("\t%v\n", fc.Ok.RawRowView(r))
}
}
// PrintWeights Pretty print fully-connected layer's weights
func (fc *FullyConnectedLayer) PrintWeights() {
fmt.Println("Printing fully-connected Layer weights...")
rows, _ := fc.Weights.Dims()
for r := 0; r < rows; r++ {
fmt.Printf("\t%v\n", fc.Weights.RawRowView(r))
}
}
// SetActivationFunc Set activation function for fully-connected layer. You need to specify function: func(v float64) float64
func (fc *FullyConnectedLayer) SetActivationFunc(f func(v float64) float64) {
fc.ActivationFunc = f
}
// SetActivationDerivativeFunc Set derivative of activation function for fully-connected layer. You need to specify function: func(v float64) float64
func (fc *FullyConnectedLayer) SetActivationDerivativeFunc(f func(v float64) float64) {
fc.ActivationDerivative = f
}
// GetStride Returns stride of fully-connected layer
func (fc *FullyConnectedLayer) GetStride() int {
return 0
}
// GetType Returns "fc" as layer's type
func (fc *FullyConnectedLayer) GetType() string {
return "fc"
} | fully_connected_layer.go | 0.689306 | 0.563198 | fully_connected_layer.go | starcoder |
package packed
import "io"
const wordSize = 8
// Special case tags.
const (
zeroTag byte = 0x00
unpackedTag byte = 0xff
)
// Pack appends the packed version of src to dst and returns the
// resulting slice. len(src) must be a multiple of 8 or Pack panics.
func Pack(dst, src []byte) []byte {
if len(src)%wordSize != 0 {
panic("packed.Pack len(src) must be a multiple of 8")
}
var buf [wordSize]byte
for len(src) > 0 {
var hdr byte
n := 0
for i := uint(0); i < wordSize; i++ {
if src[i] != 0 {
hdr |= 1 << i
buf[n] = src[i]
n++
}
}
dst = append(dst, hdr)
dst = append(dst, buf[:n]...)
src = src[wordSize:]
switch hdr {
case zeroTag:
z := min(numZeroWords(src), 0xff)
dst = append(dst, byte(z))
src = src[z*wordSize:]
case unpackedTag:
i := 0
end := min(len(src), 0xff*wordSize)
for i < end {
zeros := 0
for _, b := range src[i : i+wordSize] {
if b == 0 {
zeros++
}
}
if zeros > 1 {
break
}
i += wordSize
}
rawWords := byte(i / wordSize)
dst = append(dst, rawWords)
dst = append(dst, src[:i]...)
src = src[i:]
}
}
return dst
}
// numZeroWords returns the number of leading zero words in b.
func numZeroWords(b []byte) int {
for i, bb := range b {
if bb != 0 {
return i / wordSize
}
}
return len(b) / wordSize
}
type decompressor struct {
r io.Reader
buf [wordSize]byte
bufsz int
// track the bytes after a 0xff raw tag
ffBuf [wordSize]byte
ffBufLoadCount int // count of bytes loaded from r into ffBuf (max wordSize)
ffBufUsedCount int // count of bytes supplied to v during Read().
zeros int
raw int // number of raw bytes left to copy through
state decompressorState
}
// NewReader returns a reader that decompresses a packed stream from r.
func NewReader(r io.Reader) io.Reader {
return &decompressor{r: r}
}
func min(a, b int) int {
if b < a {
return b
}
return a
}
func (c *decompressor) Read(v []byte) (n int, err error) {
var b [1]byte
var bytesRead int
for {
if len(v) == 0 {
return
}
switch c.state {
case rawState:
if c.raw > 0 {
bytesRead, err = c.r.Read(v[:min(len(v), c.raw)])
c.raw -= bytesRead
v = v[bytesRead:]
n += bytesRead
if err != nil {
return
}
} else {
c.state = normalState
}
case postFFState:
if c.ffBufUsedCount >= wordSize {
c.state = readnState
continue
}
// invar: c.ffBufUsedCount < wordSize
// before reading more from r, first empty any residual in buffer. Such
// bytes were already read from r, are now
// waiting in c.ffBuf, and have not yet been given to v: so
// these bytes are first in line to go.
if c.ffBufUsedCount < c.ffBufLoadCount {
br := copy(v, c.ffBuf[c.ffBufUsedCount:c.ffBufLoadCount])
c.ffBufUsedCount += br
v = v[br:]
n += br
}
if c.ffBufUsedCount >= wordSize {
c.state = readnState
continue
}
// invar: c.ffBufUsedCount < wordSize
// io.ReadFull, try to read exactly (wordSize - cc.ffBufLoadCount) bytes
// io.ReadFull returns EOF only if no bytes were read
if c.ffBufLoadCount < wordSize {
bytesRead, err = io.ReadFull(c.r, c.ffBuf[c.ffBufLoadCount:]) // read up to wordSize bytes into c.buf
if bytesRead > 0 {
c.ffBufLoadCount += bytesRead
} else {
return
}
if err != nil {
return
}
}
// stay in postFFState
case readnState:
if bytesRead, err = c.r.Read(b[:]); err != nil {
return
}
if bytesRead == 0 {
return
}
c.raw = int(b[0]) * wordSize
c.state = rawState
case normalState:
if c.zeros > 0 {
num0 := min(len(v), c.zeros)
x := v[:num0]
for i := range x {
x[i] = 0
}
c.zeros -= num0
n += num0
if c.zeros > 0 {
return n, nil
}
v = v[num0:]
if len(v) == 0 {
return n, nil
}
}
// INVAR: c.zeros == 0
if c.bufsz > 0 {
nc := copy(v, c.buf[wordSize-c.bufsz:])
c.bufsz -= nc
n += nc
v = v[nc:]
if c.bufsz > 0 {
return n, nil
}
}
// INVAR: c.bufz == 0
for c.state == normalState && len(v) > 0 {
if _, err = c.r.Read(b[:]); err != nil {
return
}
switch b[0] {
case unpackedTag:
c.ffBufLoadCount = 0
c.ffBufUsedCount = 0
c.state = postFFState
break
case zeroTag:
if _, err = c.r.Read(b[:]); err != nil {
return
}
requestedZeroBytes := (int(b[0]) + 1) * wordSize
zeros := min(requestedZeroBytes, len(v))
for i := 0; i < zeros; i++ {
v[i] = 0
}
v = v[zeros:]
n += zeros
// remember the leftover zeros to write
c.zeros = requestedZeroBytes - zeros
default:
ones := 0
var buf [wordSize]byte
for i := 0; i < wordSize; i++ {
if (b[0] & (1 << uint(i))) != 0 {
ones++
}
}
_, err = io.ReadFull(c.r, buf[:ones])
if err != nil {
return
}
for i, j := 0, 0; i < wordSize; i++ {
if (b[0] & (1 << uint(i))) != 0 {
c.buf[i] = buf[j]
j++
} else {
c.buf[i] = 0
}
}
use := copy(v, c.buf[:])
v = v[use:]
n += use
c.bufsz = wordSize - use
}
}
}
}
return
}
// decompressorState is the state of a decompressor.
type decompressorState uint8
// Decompressor states
const (
normalState decompressorState = iota
// These states are for dealing with the 0xFF tag and the raw bytes that follow.
// They tell us where to pick up if we are interrupted in the middle of anything
// after the 0xFF tag, until we are done with the raw read.
postFFState
readnState
rawState
) | internal/packed/packed.go | 0.602412 | 0.408218 | packed.go | starcoder |
package accounting
import (
"encoding/json"
)
// AccountingFeatures Outlines the features that are supported by the external accounting system.
type AccountingFeatures struct {
CreateInvoice CreateInvoiceFeature `json:"createInvoice"`
ImportInvoice ImportInvoiceFeature `json:"importInvoice"`
// Indicates if syncing objects from the external account system into HubSpot is supported for the integration. This is a map, where the key is one of `CONTACT` or `PRODUCT`, to indicate which type of object you do or don't support syncing. For example: ``` \"sync\": { \"CONTACT\": { \"toHubSpot\": true }, \"PRODUCT\": { \"toHubSpot\": true } } ```
Sync map[string]ObjectSyncFeature `json:"sync"`
}
// NewAccountingFeatures instantiates a new AccountingFeatures object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewAccountingFeatures(createInvoice CreateInvoiceFeature, importInvoice ImportInvoiceFeature, sync map[string]ObjectSyncFeature) *AccountingFeatures {
this := AccountingFeatures{}
this.CreateInvoice = createInvoice
this.ImportInvoice = importInvoice
this.Sync = sync
return &this
}
// NewAccountingFeaturesWithDefaults instantiates a new AccountingFeatures object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewAccountingFeaturesWithDefaults() *AccountingFeatures {
this := AccountingFeatures{}
return &this
}
// GetCreateInvoice returns the CreateInvoice field value
func (o *AccountingFeatures) GetCreateInvoice() CreateInvoiceFeature {
if o == nil {
var ret CreateInvoiceFeature
return ret
}
return o.CreateInvoice
}
// GetCreateInvoiceOk returns a tuple with the CreateInvoice field value
// and a boolean to check if the value has been set.
func (o *AccountingFeatures) GetCreateInvoiceOk() (*CreateInvoiceFeature, bool) {
if o == nil {
return nil, false
}
return &o.CreateInvoice, true
}
// SetCreateInvoice sets field value
func (o *AccountingFeatures) SetCreateInvoice(v CreateInvoiceFeature) {
o.CreateInvoice = v
}
// GetImportInvoice returns the ImportInvoice field value
func (o *AccountingFeatures) GetImportInvoice() ImportInvoiceFeature {
if o == nil {
var ret ImportInvoiceFeature
return ret
}
return o.ImportInvoice
}
// GetImportInvoiceOk returns a tuple with the ImportInvoice field value
// and a boolean to check if the value has been set.
func (o *AccountingFeatures) GetImportInvoiceOk() (*ImportInvoiceFeature, bool) {
if o == nil {
return nil, false
}
return &o.ImportInvoice, true
}
// SetImportInvoice sets field value
func (o *AccountingFeatures) SetImportInvoice(v ImportInvoiceFeature) {
o.ImportInvoice = v
}
// GetSync returns the Sync field value
func (o *AccountingFeatures) GetSync() map[string]ObjectSyncFeature {
if o == nil {
var ret map[string]ObjectSyncFeature
return ret
}
return o.Sync
}
// GetSyncOk returns a tuple with the Sync field value
// and a boolean to check if the value has been set.
func (o *AccountingFeatures) GetSyncOk() (*map[string]ObjectSyncFeature, bool) {
if o == nil {
return nil, false
}
return &o.Sync, true
}
// SetSync sets field value
func (o *AccountingFeatures) SetSync(v map[string]ObjectSyncFeature) {
o.Sync = v
}
func (o AccountingFeatures) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["createInvoice"] = o.CreateInvoice
}
if true {
toSerialize["importInvoice"] = o.ImportInvoice
}
if true {
toSerialize["sync"] = o.Sync
}
return json.Marshal(toSerialize)
}
type NullableAccountingFeatures struct {
value *AccountingFeatures
isSet bool
}
func (v NullableAccountingFeatures) Get() *AccountingFeatures {
return v.value
}
func (v *NullableAccountingFeatures) Set(val *AccountingFeatures) {
v.value = val
v.isSet = true
}
func (v NullableAccountingFeatures) IsSet() bool {
return v.isSet
}
func (v *NullableAccountingFeatures) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableAccountingFeatures(val *AccountingFeatures) *NullableAccountingFeatures {
return &NullableAccountingFeatures{value: val, isSet: true}
}
func (v NullableAccountingFeatures) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableAccountingFeatures) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | generated/accounting/model_accounting_features.go | 0.758421 | 0.433862 | model_accounting_features.go | starcoder |
package main
import "github.com/jakecoffman/cp/examples"
import . "github.com/jakecoffman/cp"
func main() {
space := NewSpace()
body1 := addBar(space, Vector{-240, 160}, Vector{-160, 80}, 1)
body2 := addBar(space, Vector{-160, 80}, Vector{-80, 160}, 1)
body3 := addBar(space, Vector{0, 160}, Vector{80, 0}, 1)
body4 := addBar(space, Vector{160, 160}, Vector{240, 160}, 1)
body5 := addBar(space, Vector{-240, 0}, Vector{-160, -80}, 1)
body6 := addBar(space, Vector{-160, -80}, Vector{-80, 0}, 1)
body7 := addBar(space, Vector{-80, 0}, Vector{0, 0}, 1)
body8 := addBar(space, Vector{0, -80}, Vector{80, -80}, 1)
body9 := addBar(space, Vector{240, 80}, Vector{160, 0}, 1)
body10 := addBar(space, Vector{160, 0}, Vector{240, -80}, 1)
body11 := addBar(space, Vector{-240, -80}, Vector{-160, -160}, 1)
body12 := addBar(space, Vector{-160, -160}, Vector{-80, -160}, 1)
body13 := addBar(space, Vector{0, -160}, Vector{80, -160}, 1)
body14 := addBar(space, Vector{160, -160}, Vector{240, -160}, 1)
space.AddConstraint(NewPivotJoint2(body1, body2, Vector{40, -40}, Vector{-40, -40}))
space.AddConstraint(NewPivotJoint2(body5, body6, Vector{40, -40}, Vector{-40, -40}))
space.AddConstraint(NewPivotJoint2(body6, body7, Vector{40, 40}, Vector{-40, 0}))
space.AddConstraint(NewPivotJoint2(body9, body10, Vector{-40, -40}, Vector{-40, 40}))
space.AddConstraint(NewPivotJoint2(body11, body12, Vector{40, -40}, Vector{-40, 0}))
stiff := 100.0
damp := 0.5
space.AddConstraint(newSpring(space.StaticBody, body1, Vector{-320, 240}, Vector{-40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body1, Vector{-320, 80}, Vector{-40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body1, Vector{-160, 240}, Vector{-40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body2, Vector{-160, 240}, Vector{40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body2, Vector{0, 240}, Vector{40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body3, Vector{80, 240}, Vector{-40, 80}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body4, Vector{80, 240}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body4, Vector{320, 240}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body5, Vector{-320, 80}, Vector{-40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body9, Vector{320, 80}, Vector{40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body10, Vector{320, 0}, Vector{40, -40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body10, Vector{320, -160}, Vector{40, -40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body11, Vector{-320, -160}, Vector{-40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body12, Vector{-240, -240}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body12, Vector{0, -240}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body13, Vector{0, -240}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body13, Vector{80, -240}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body14, Vector{80, -240}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body14, Vector{240, -240}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(space.StaticBody, body14, Vector{320, -160}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body1, body5, Vector{40, -40}, Vector{-40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(body1, body6, Vector{40, -40}, Vector{40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(body2, body3, Vector{40, 40}, Vector{-40, 80}, 0, stiff, damp))
space.AddConstraint(newSpring(body3, body4, Vector{-40, 80}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body3, body4, Vector{40, -80}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body3, body7, Vector{40, -80}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body3, body7, Vector{-40, 80}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body3, body8, Vector{40, -80}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body3, body9, Vector{40, -80}, Vector{-40, -40}, 0, stiff, damp))
space.AddConstraint(newSpring(body4, body9, Vector{40, 0}, Vector{40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(body5, body11, Vector{-40, 40}, Vector{-40, 40}, 0, stiff, damp))
space.AddConstraint(newSpring(body5, body11, Vector{40, -40}, Vector{40, -40}, 0, stiff, damp))
space.AddConstraint(newSpring(body7, body8, Vector{40, 0}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body8, body12, Vector{-40, 0}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body8, body13, Vector{-40, 0}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body8, body13, Vector{40, 0}, Vector{40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body8, body14, Vector{40, 0}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body10, body14, Vector{40, -40}, Vector{-40, 0}, 0, stiff, damp))
space.AddConstraint(newSpring(body10, body14, Vector{40, -40}, Vector{-40, 0}, 0, stiff, damp))
examples.Main(space, 1.0/60.0, update, examples.DefaultDraw)
}
func springForce(spring *DampedSpring, dist float64) float64 {
clamp := 20.0
return Clamp(spring.RestLength-dist, -clamp, clamp) * spring.Stiffness
}
func newSpring(a, b *Body, anchorA, anchorB Vector, restLength, stiff, damp float64) *Constraint {
constraint := NewDampedSpring(a, b, anchorA, anchorB, restLength, stiff, damp)
spring := constraint.Class.(*DampedSpring)
spring.SpringForceFunc = springForce
return spring.Constraint
}
func addBar(space *Space, a, b Vector, group uint) *Body {
center := a.Add(b).Mult(1.0 / 2.0)
length := b.Sub(a).Length()
mass := length / 160.0
body := space.AddBody(NewBody(mass, mass*length*length/12.0))
body.SetPosition(center)
shape := space.AddShape(NewSegment(body, a.Sub(center), b.Sub(center), 10))
shape.SetFilter(NewShapeFilter(group, ALL_CATEGORIES, ALL_CATEGORIES))
return body
}
func update(space *Space, dt float64) {
space.Step(dt)
} | examples/springies/springies.go | 0.651687 | 0.416797 | springies.go | starcoder |
package edge_compute
import (
"encoding/json"
)
// V1MatchExpression An expression to match selectors against a set of values
type V1MatchExpression struct {
// The name of the selector to perform a match against
Key *string `json:"key,omitempty"`
// The operation to perform to match a selector Valid values are \"In\", \"NotIn\", \"Exists\", and \"DoesNotExist\".
Operator *string `json:"operator,omitempty"`
// The values to match in the selector
Values *[]string `json:"values,omitempty"`
}
// NewV1MatchExpression instantiates a new V1MatchExpression object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewV1MatchExpression() *V1MatchExpression {
this := V1MatchExpression{}
return &this
}
// NewV1MatchExpressionWithDefaults instantiates a new V1MatchExpression object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewV1MatchExpressionWithDefaults() *V1MatchExpression {
this := V1MatchExpression{}
return &this
}
// GetKey returns the Key field value if set, zero value otherwise.
func (o *V1MatchExpression) GetKey() string {
if o == nil || o.Key == nil {
var ret string
return ret
}
return *o.Key
}
// GetKeyOk returns a tuple with the Key field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V1MatchExpression) GetKeyOk() (*string, bool) {
if o == nil || o.Key == nil {
return nil, false
}
return o.Key, true
}
// HasKey returns a boolean if a field has been set.
func (o *V1MatchExpression) HasKey() bool {
if o != nil && o.Key != nil {
return true
}
return false
}
// SetKey gets a reference to the given string and assigns it to the Key field.
func (o *V1MatchExpression) SetKey(v string) {
o.Key = &v
}
// GetOperator returns the Operator field value if set, zero value otherwise.
func (o *V1MatchExpression) GetOperator() string {
if o == nil || o.Operator == nil {
var ret string
return ret
}
return *o.Operator
}
// GetOperatorOk returns a tuple with the Operator field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V1MatchExpression) GetOperatorOk() (*string, bool) {
if o == nil || o.Operator == nil {
return nil, false
}
return o.Operator, true
}
// HasOperator returns a boolean if a field has been set.
func (o *V1MatchExpression) HasOperator() bool {
if o != nil && o.Operator != nil {
return true
}
return false
}
// SetOperator gets a reference to the given string and assigns it to the Operator field.
func (o *V1MatchExpression) SetOperator(v string) {
o.Operator = &v
}
// GetValues returns the Values field value if set, zero value otherwise.
func (o *V1MatchExpression) GetValues() []string {
if o == nil || o.Values == nil {
var ret []string
return ret
}
return *o.Values
}
// GetValuesOk returns a tuple with the Values field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V1MatchExpression) GetValuesOk() (*[]string, bool) {
if o == nil || o.Values == nil {
return nil, false
}
return o.Values, true
}
// HasValues returns a boolean if a field has been set.
func (o *V1MatchExpression) HasValues() bool {
if o != nil && o.Values != nil {
return true
}
return false
}
// SetValues gets a reference to the given []string and assigns it to the Values field.
func (o *V1MatchExpression) SetValues(v []string) {
o.Values = &v
}
func (o V1MatchExpression) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Key != nil {
toSerialize["key"] = o.Key
}
if o.Operator != nil {
toSerialize["operator"] = o.Operator
}
if o.Values != nil {
toSerialize["values"] = o.Values
}
return json.Marshal(toSerialize)
}
type NullableV1MatchExpression struct {
value *V1MatchExpression
isSet bool
}
func (v NullableV1MatchExpression) Get() *V1MatchExpression {
return v.value
}
func (v *NullableV1MatchExpression) Set(val *V1MatchExpression) {
v.value = val
v.isSet = true
}
func (v NullableV1MatchExpression) IsSet() bool {
return v.isSet
}
func (v *NullableV1MatchExpression) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableV1MatchExpression(val *V1MatchExpression) *NullableV1MatchExpression {
return &NullableV1MatchExpression{value: val, isSet: true}
}
func (v NullableV1MatchExpression) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableV1MatchExpression) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pkg/edge_compute/model_v1_match_expression.go | 0.809878 | 0.478163 | model_v1_match_expression.go | starcoder |
package fauxgl
import (
"fmt"
"image/color"
"math"
"strings"
)
var (
Discard = Color{}
Transparent = Color{}
Black = Color{0, 0, 0, 1}
White = Color{1, 1, 1, 1}
)
type Color struct {
R, G, B, A float64
}
func Gray(x float64) Color {
return Color{x, x, x, 1}
}
func MakeColor(c color.Color) Color {
r, g, b, a := c.RGBA()
const d = 0xffff
return Color{float64(r) / d, float64(g) / d, float64(b) / d, float64(a) / d}
}
func HexColor(x string) Color {
x = strings.Trim(x, "#")
var r, g, b, a int
a = 255
switch len(x) {
case 3:
fmt.Sscanf(x, "%1x%1x%1x", &r, &g, &b)
r = (r << 4) | r
g = (g << 4) | g
b = (b << 4) | b
case 4:
fmt.Sscanf(x, "%1x%1x%1x%1x", &r, &g, &b, &a)
r = (r << 4) | r
g = (g << 4) | g
b = (b << 4) | b
a = (a << 4) | a
case 6:
fmt.Sscanf(x, "%02x%02x%02x", &r, &g, &b)
case 8:
fmt.Sscanf(x, "%02x%02x%02x%02x", &r, &g, &b, &a)
}
const d = 0xff
return Color{float64(r) / d, float64(g) / d, float64(b) / d, float64(a) / d}
}
func (c Color) NRGBA() color.NRGBA {
const d = 0xff
r := Clamp(c.R, 0, 1)
g := Clamp(c.G, 0, 1)
b := Clamp(c.B, 0, 1)
a := Clamp(c.A, 0, 1)
return color.NRGBA{uint8(r * d), uint8(g * d), uint8(b * d), uint8(a * d)}
}
func (a Color) Opaque() Color {
return Color{a.R, a.G, a.B, 1}
}
func (a Color) Alpha(alpha float64) Color {
return Color{a.R, a.G, a.B, alpha}
}
func (a Color) Lerp(b Color, t float64) Color {
return a.Add(b.Sub(a).MulScalar(t))
}
func (a Color) Add(b Color) Color {
return Color{a.R + b.R, a.G + b.G, a.B + b.B, a.A + b.A}
}
func (a Color) Sub(b Color) Color {
return Color{a.R - b.R, a.G - b.G, a.B - b.B, a.A - b.A}
}
func (a Color) Mul(b Color) Color {
return Color{a.R * b.R, a.G * b.G, a.B * b.B, a.A * b.A}
}
func (a Color) Div(b Color) Color {
return Color{a.R / b.R, a.G / b.G, a.B / b.B, a.A / b.A}
}
func (a Color) AddScalar(b float64) Color {
return Color{a.R + b, a.G + b, a.B + b, a.A + b}
}
func (a Color) SubScalar(b float64) Color {
return Color{a.R - b, a.G - b, a.B - b, a.A - b}
}
func (a Color) MulScalar(b float64) Color {
return Color{a.R * b, a.G * b, a.B * b, a.A * b}
}
func (a Color) DivScalar(b float64) Color {
return Color{a.R / b, a.G / b, a.B / b, a.A / b}
}
func (a Color) Pow(b float64) Color {
return Color{math.Pow(a.R, b), math.Pow(a.G, b), math.Pow(a.B, b), math.Pow(a.A, b)}
}
func (a Color) Min(b Color) Color {
return Color{math.Min(a.R, b.R), math.Min(a.G, b.G), math.Min(a.B, b.B), math.Min(a.A, b.A)}
}
func (a Color) Max(b Color) Color {
return Color{math.Max(a.R, b.R), math.Max(a.G, b.G), math.Max(a.B, b.B), math.Max(a.A, b.A)}
} | color.go | 0.84124 | 0.401981 | color.go | starcoder |
package assert
import (
"encoding/json"
"testing"
"github.com/google/go-cmp/cmp"
)
func JsonObjResponseMatchExpected(t *testing.T, expected interface{}, jsonResponse []byte) {
t.Run("JsonObjResponseMatchExpected", func(t *testing.T) {
response := make(map[string]interface{})
if err := json.Unmarshal(jsonResponse, &response); err != nil {
t.Error(err)
}
if !cmp.Equal(response, expected) {
difference := cmp.Diff(response, expected)
t.Errorf(
"response does not match the expected output\nExpected: %v\nActual: %v\nDifference: %v",
expected, response, difference)
}
})
} // JsonResponseMatchExpected
func JsonArrResponseMatchExpected(t *testing.T, expected interface{}, jsonResponse []byte) {
t.Run("JsonArrResponseMatchExpected", func(t *testing.T) {
response := make([]map[string]interface{}, 0)
if err := json.Unmarshal(jsonResponse, &response); err != nil {
t.Error(err)
}
if !cmp.Equal(response, expected) {
difference := cmp.Diff(response, expected)
t.Errorf(
"response does not match the expected output\nExpected: %v\nActual: %v\nDifference: %v",
expected, response, difference)
}
})
} // JsonResponseMatchExpected
func JsonArrResponseContains(t *testing.T, expected interface{}, jsonResponse []byte) {
t.Run("JsonArrResponseContains", func(t *testing.T) {
response := make([]map[string]interface{}, 0)
if err := json.Unmarshal(jsonResponse, &response); err != nil {
t.Error(err)
}
lenExpected := len(expected.([]map[string]interface{}))
lenDetected := 0
for _, expectedItem := range expected.([]map[string]interface{}) {
for _, responseItem := range response {
if cmp.Equal(expectedItem, responseItem) {
lenDetected += 1
break
}
}
}
if lenExpected != lenDetected {
t.Errorf("Expected to find %d items in the response, found %d instead", lenExpected, lenDetected)
}
})
} // JsonResponseMatchExpected
func Equal(t *testing.T, topic string, expected, actual interface{}) {
t.Run(topic, func(t *testing.T) {
if expected != actual {
t.Errorf("[%v] Expected response code <%v>. Got <%v>\n", topic, expected, actual)
}
})
} // Equal
func IntGreater(t *testing.T, topic string, expected, actual int) {
t.Run(topic, func(t *testing.T) {
if expected <= actual {
t.Errorf("[%v] Expected response code <%v>. Got <%v>\n", topic, expected, actual)
}
})
} // IntGreater | service/src/assert/assertions.go | 0.615435 | 0.552057 | assertions.go | starcoder |
package nifi
import (
"encoding/json"
)
// VersionedFlowCoordinates struct for VersionedFlowCoordinates
type VersionedFlowCoordinates struct {
// The URL of the Flow Registry that contains the flow
RegistryUrl *string `json:"registryUrl,omitempty"`
// The UUID of the bucket that the flow resides in
BucketId *string `json:"bucketId,omitempty"`
// The UUID of the flow
FlowId *string `json:"flowId,omitempty"`
// The version of the flow
Version *int32 `json:"version,omitempty"`
// Whether or not these coordinates point to the latest version of the flow
Latest *bool `json:"latest,omitempty"`
}
// NewVersionedFlowCoordinates instantiates a new VersionedFlowCoordinates object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewVersionedFlowCoordinates() *VersionedFlowCoordinates {
this := VersionedFlowCoordinates{}
return &this
}
// NewVersionedFlowCoordinatesWithDefaults instantiates a new VersionedFlowCoordinates object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewVersionedFlowCoordinatesWithDefaults() *VersionedFlowCoordinates {
this := VersionedFlowCoordinates{}
return &this
}
// GetRegistryUrl returns the RegistryUrl field value if set, zero value otherwise.
func (o *VersionedFlowCoordinates) GetRegistryUrl() string {
if o == nil || o.RegistryUrl == nil {
var ret string
return ret
}
return *o.RegistryUrl
}
// GetRegistryUrlOk returns a tuple with the RegistryUrl field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VersionedFlowCoordinates) GetRegistryUrlOk() (*string, bool) {
if o == nil || o.RegistryUrl == nil {
return nil, false
}
return o.RegistryUrl, true
}
// HasRegistryUrl returns a boolean if a field has been set.
func (o *VersionedFlowCoordinates) HasRegistryUrl() bool {
if o != nil && o.RegistryUrl != nil {
return true
}
return false
}
// SetRegistryUrl gets a reference to the given string and assigns it to the RegistryUrl field.
func (o *VersionedFlowCoordinates) SetRegistryUrl(v string) {
o.RegistryUrl = &v
}
// GetBucketId returns the BucketId field value if set, zero value otherwise.
func (o *VersionedFlowCoordinates) GetBucketId() string {
if o == nil || o.BucketId == nil {
var ret string
return ret
}
return *o.BucketId
}
// GetBucketIdOk returns a tuple with the BucketId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VersionedFlowCoordinates) GetBucketIdOk() (*string, bool) {
if o == nil || o.BucketId == nil {
return nil, false
}
return o.BucketId, true
}
// HasBucketId returns a boolean if a field has been set.
func (o *VersionedFlowCoordinates) HasBucketId() bool {
if o != nil && o.BucketId != nil {
return true
}
return false
}
// SetBucketId gets a reference to the given string and assigns it to the BucketId field.
func (o *VersionedFlowCoordinates) SetBucketId(v string) {
o.BucketId = &v
}
// GetFlowId returns the FlowId field value if set, zero value otherwise.
func (o *VersionedFlowCoordinates) GetFlowId() string {
if o == nil || o.FlowId == nil {
var ret string
return ret
}
return *o.FlowId
}
// GetFlowIdOk returns a tuple with the FlowId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VersionedFlowCoordinates) GetFlowIdOk() (*string, bool) {
if o == nil || o.FlowId == nil {
return nil, false
}
return o.FlowId, true
}
// HasFlowId returns a boolean if a field has been set.
func (o *VersionedFlowCoordinates) HasFlowId() bool {
if o != nil && o.FlowId != nil {
return true
}
return false
}
// SetFlowId gets a reference to the given string and assigns it to the FlowId field.
func (o *VersionedFlowCoordinates) SetFlowId(v string) {
o.FlowId = &v
}
// GetVersion returns the Version field value if set, zero value otherwise.
func (o *VersionedFlowCoordinates) GetVersion() int32 {
if o == nil || o.Version == nil {
var ret int32
return ret
}
return *o.Version
}
// GetVersionOk returns a tuple with the Version field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VersionedFlowCoordinates) GetVersionOk() (*int32, bool) {
if o == nil || o.Version == nil {
return nil, false
}
return o.Version, true
}
// HasVersion returns a boolean if a field has been set.
func (o *VersionedFlowCoordinates) HasVersion() bool {
if o != nil && o.Version != nil {
return true
}
return false
}
// SetVersion gets a reference to the given int32 and assigns it to the Version field.
func (o *VersionedFlowCoordinates) SetVersion(v int32) {
o.Version = &v
}
// GetLatest returns the Latest field value if set, zero value otherwise.
func (o *VersionedFlowCoordinates) GetLatest() bool {
if o == nil || o.Latest == nil {
var ret bool
return ret
}
return *o.Latest
}
// GetLatestOk returns a tuple with the Latest field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VersionedFlowCoordinates) GetLatestOk() (*bool, bool) {
if o == nil || o.Latest == nil {
return nil, false
}
return o.Latest, true
}
// HasLatest returns a boolean if a field has been set.
func (o *VersionedFlowCoordinates) HasLatest() bool {
if o != nil && o.Latest != nil {
return true
}
return false
}
// SetLatest gets a reference to the given bool and assigns it to the Latest field.
func (o *VersionedFlowCoordinates) SetLatest(v bool) {
o.Latest = &v
}
func (o VersionedFlowCoordinates) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.RegistryUrl != nil {
toSerialize["registryUrl"] = o.RegistryUrl
}
if o.BucketId != nil {
toSerialize["bucketId"] = o.BucketId
}
if o.FlowId != nil {
toSerialize["flowId"] = o.FlowId
}
if o.Version != nil {
toSerialize["version"] = o.Version
}
if o.Latest != nil {
toSerialize["latest"] = o.Latest
}
return json.Marshal(toSerialize)
}
type NullableVersionedFlowCoordinates struct {
value *VersionedFlowCoordinates
isSet bool
}
func (v NullableVersionedFlowCoordinates) Get() *VersionedFlowCoordinates {
return v.value
}
func (v *NullableVersionedFlowCoordinates) Set(val *VersionedFlowCoordinates) {
v.value = val
v.isSet = true
}
func (v NullableVersionedFlowCoordinates) IsSet() bool {
return v.isSet
}
func (v *NullableVersionedFlowCoordinates) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableVersionedFlowCoordinates(val *VersionedFlowCoordinates) *NullableVersionedFlowCoordinates {
return &NullableVersionedFlowCoordinates{value: val, isSet: true}
}
func (v NullableVersionedFlowCoordinates) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableVersionedFlowCoordinates) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_versioned_flow_coordinates.go | 0.770292 | 0.427456 | model_versioned_flow_coordinates.go | starcoder |
package finnhub
import (
"encoding/json"
)
// EarningResult struct for EarningResult
type EarningResult struct {
// Actual earning result.
Actual *float32 `json:"actual,omitempty"`
// Estimated earning.
Estimate *float32 `json:"estimate,omitempty"`
// Surprise - The difference between actual and estimate.
Surprise *float32 `json:"surprise,omitempty"`
// Surprise percent.
SurprisePercent *float32 `json:"surprisePercent,omitempty"`
// Reported period.
Period *string `json:"period,omitempty"`
// Company symbol.
Symbol *string `json:"symbol,omitempty"`
}
// NewEarningResult instantiates a new EarningResult object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewEarningResult() *EarningResult {
this := EarningResult{}
return &this
}
// NewEarningResultWithDefaults instantiates a new EarningResult object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewEarningResultWithDefaults() *EarningResult {
this := EarningResult{}
return &this
}
// GetActual returns the Actual field value if set, zero value otherwise.
func (o *EarningResult) GetActual() float32 {
if o == nil || o.Actual == nil {
var ret float32
return ret
}
return *o.Actual
}
// GetActualOk returns a tuple with the Actual field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *EarningResult) GetActualOk() (*float32, bool) {
if o == nil || o.Actual == nil {
return nil, false
}
return o.Actual, true
}
// HasActual returns a boolean if a field has been set.
func (o *EarningResult) HasActual() bool {
if o != nil && o.Actual != nil {
return true
}
return false
}
// SetActual gets a reference to the given float32 and assigns it to the Actual field.
func (o *EarningResult) SetActual(v float32) {
o.Actual = &v
}
// GetEstimate returns the Estimate field value if set, zero value otherwise.
func (o *EarningResult) GetEstimate() float32 {
if o == nil || o.Estimate == nil {
var ret float32
return ret
}
return *o.Estimate
}
// GetEstimateOk returns a tuple with the Estimate field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *EarningResult) GetEstimateOk() (*float32, bool) {
if o == nil || o.Estimate == nil {
return nil, false
}
return o.Estimate, true
}
// HasEstimate returns a boolean if a field has been set.
func (o *EarningResult) HasEstimate() bool {
if o != nil && o.Estimate != nil {
return true
}
return false
}
// SetEstimate gets a reference to the given float32 and assigns it to the Estimate field.
func (o *EarningResult) SetEstimate(v float32) {
o.Estimate = &v
}
// GetSurprise returns the Surprise field value if set, zero value otherwise.
func (o *EarningResult) GetSurprise() float32 {
if o == nil || o.Surprise == nil {
var ret float32
return ret
}
return *o.Surprise
}
// GetSurpriseOk returns a tuple with the Surprise field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *EarningResult) GetSurpriseOk() (*float32, bool) {
if o == nil || o.Surprise == nil {
return nil, false
}
return o.Surprise, true
}
// HasSurprise returns a boolean if a field has been set.
func (o *EarningResult) HasSurprise() bool {
if o != nil && o.Surprise != nil {
return true
}
return false
}
// SetSurprise gets a reference to the given float32 and assigns it to the Surprise field.
func (o *EarningResult) SetSurprise(v float32) {
o.Surprise = &v
}
// GetSurprisePercent returns the SurprisePercent field value if set, zero value otherwise.
func (o *EarningResult) GetSurprisePercent() float32 {
if o == nil || o.SurprisePercent == nil {
var ret float32
return ret
}
return *o.SurprisePercent
}
// GetSurprisePercentOk returns a tuple with the SurprisePercent field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *EarningResult) GetSurprisePercentOk() (*float32, bool) {
if o == nil || o.SurprisePercent == nil {
return nil, false
}
return o.SurprisePercent, true
}
// HasSurprisePercent returns a boolean if a field has been set.
func (o *EarningResult) HasSurprisePercent() bool {
if o != nil && o.SurprisePercent != nil {
return true
}
return false
}
// SetSurprisePercent gets a reference to the given float32 and assigns it to the SurprisePercent field.
func (o *EarningResult) SetSurprisePercent(v float32) {
o.SurprisePercent = &v
}
// GetPeriod returns the Period field value if set, zero value otherwise.
func (o *EarningResult) GetPeriod() string {
if o == nil || o.Period == nil {
var ret string
return ret
}
return *o.Period
}
// GetPeriodOk returns a tuple with the Period field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *EarningResult) GetPeriodOk() (*string, bool) {
if o == nil || o.Period == nil {
return nil, false
}
return o.Period, true
}
// HasPeriod returns a boolean if a field has been set.
func (o *EarningResult) HasPeriod() bool {
if o != nil && o.Period != nil {
return true
}
return false
}
// SetPeriod gets a reference to the given string and assigns it to the Period field.
func (o *EarningResult) SetPeriod(v string) {
o.Period = &v
}
// GetSymbol returns the Symbol field value if set, zero value otherwise.
func (o *EarningResult) GetSymbol() string {
if o == nil || o.Symbol == nil {
var ret string
return ret
}
return *o.Symbol
}
// GetSymbolOk returns a tuple with the Symbol field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *EarningResult) GetSymbolOk() (*string, bool) {
if o == nil || o.Symbol == nil {
return nil, false
}
return o.Symbol, true
}
// HasSymbol returns a boolean if a field has been set.
func (o *EarningResult) HasSymbol() bool {
if o != nil && o.Symbol != nil {
return true
}
return false
}
// SetSymbol gets a reference to the given string and assigns it to the Symbol field.
func (o *EarningResult) SetSymbol(v string) {
o.Symbol = &v
}
func (o EarningResult) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Actual != nil {
toSerialize["actual"] = o.Actual
}
if o.Estimate != nil {
toSerialize["estimate"] = o.Estimate
}
if o.Surprise != nil {
toSerialize["surprise"] = o.Surprise
}
if o.SurprisePercent != nil {
toSerialize["surprisePercent"] = o.SurprisePercent
}
if o.Period != nil {
toSerialize["period"] = o.Period
}
if o.Symbol != nil {
toSerialize["symbol"] = o.Symbol
}
return json.Marshal(toSerialize)
}
type NullableEarningResult struct {
value *EarningResult
isSet bool
}
func (v NullableEarningResult) Get() *EarningResult {
return v.value
}
func (v *NullableEarningResult) Set(val *EarningResult) {
v.value = val
v.isSet = true
}
func (v NullableEarningResult) IsSet() bool {
return v.isSet
}
func (v *NullableEarningResult) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableEarningResult(val *EarningResult) *NullableEarningResult {
return &NullableEarningResult{value: val, isSet: true}
}
func (v NullableEarningResult) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableEarningResult) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_earning_result.go | 0.785103 | 0.411643 | model_earning_result.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"math"
"os"
"strconv"
"strings"
)
type Coordinate struct {
X, Y, Z int
}
type Position Coordinate
type Velocity Coordinate
type Acceleration Coordinate
type Particle struct {
Position
Velocity
Acceleration
}
func (self *Particle) Step() *Particle {
velocity := Velocity{
X: self.Velocity.X + self.Acceleration.X,
Y: self.Velocity.Y + self.Acceleration.Y,
Z: self.Velocity.Z + self.Acceleration.Z,
}
position := Position{
X: self.Position.X + velocity.X,
Y: self.Position.Y + velocity.Y,
Z: self.Position.Z + velocity.Z,
}
return &Particle{
Position: position,
Velocity: velocity,
Acceleration: self.Acceleration,
}
}
func readTuple(tuple string) (x, y, z int, err error) {
err = nil
x, y, z = 0, 0, 0
values := strings.FieldsFunc(tuple, func(r rune) bool { return r == ',' })
x, err = strconv.Atoi(values[0])
y, err = strconv.Atoi(values[1])
z, err = strconv.Atoi(values[2])
return
}
func readParticle(line string) (*Particle, error) {
fields := strings.FieldsFunc(line, func(r rune) bool { return r == '=' })
posTuple := fields[1]
posX, posY, posZ, err := readTuple(posTuple[1 : len(posTuple)-4])
if err != nil {
return nil, err
}
velTuple := fields[2]
velX, velY, velZ, err := readTuple(velTuple[1 : len(velTuple)-4])
if err != nil {
return nil, err
}
accTuple := fields[3]
accX, accY, accZ, err := readTuple(accTuple[1 : len(accTuple)-1])
if err != nil {
return nil, err
}
particle := &Particle{
Position: Position{posX, posY, posZ},
Velocity: Velocity{velX, velY, velZ},
Acceleration: Acceleration{accX, accY, accZ},
}
return particle, nil
}
func readParticles(lines []string) (particles []*Particle, err error) {
particles = make([]*Particle, len(lines))
err = nil
var particle *Particle
for i, line := range lines {
particle, err = readParticle(line)
if err != nil {
break
}
particles[i] = particle
}
return
}
func main() {
input, err := ioutil.ReadAll(os.Stdin)
if err != nil {
panic(err)
}
lines := strings.FieldsFunc(string(input), func(r rune) bool { return r == '\n' })
particles, err := readParticles(lines)
if err != nil {
panic(err)
}
acc := math.Inf(1)
accBuckets := map[float64][]int{}
for i, particle := range particles {
manhattan := math.Abs(float64(particle.Acceleration.X)) + math.Abs(float64(particle.Acceleration.Y)) + math.Abs(float64(particle.Acceleration.Z))
if bucket, ok := accBuckets[manhattan]; ok {
accBuckets[manhattan] = append(bucket, i)
} else {
accBuckets[manhattan] = []int{i}
}
if manhattan < acc {
acc = manhattan
}
}
closest := 0
vel := math.Inf(1)
for _, i := range accBuckets[acc] {
particle := particles[i]
manhattan := math.Abs(float64(particle.Velocity.X)) + math.Abs(float64(particle.Velocity.Y)) + math.Abs(float64(particle.Velocity.Z))
if manhattan < vel {
vel = manhattan
closest = i
}
}
fmt.Println(closest)
// In an ideal world, I would check the lines at each step to see if
// there is any possibility of intersection, but instead I just ran
// this for a long time until the length of the particle list seemed to
// stabilize. Oh well.
done := false
for !done {
fmt.Println(len(particles))
positionBuckets := map[Position][]int{}
for i, particle := range particles {
if bucket, ok := positionBuckets[particle.Position]; ok {
positionBuckets[particle.Position] = append(bucket, i)
} else {
positionBuckets[particle.Position] = []int{i}
}
}
ids := map[int]bool{}
for _, bucket := range positionBuckets {
if len(bucket) > 1 {
for _, id := range bucket {
ids[id] = true
}
}
}
tmp := make([]*Particle, 0, len(particles)-len(ids))
for i, particle := range particles {
if _, ok := ids[i]; !ok {
tmp = append(tmp, particle.Step())
}
}
particles = tmp
}
} | 20/main.go | 0.577019 | 0.518424 | main.go | starcoder |
package collections
import (
"context"
"github.com/MontFerret/ferret/pkg/runtime/core"
"github.com/MontFerret/ferret/pkg/runtime/values"
)
type (
Iterator interface {
HasNext() bool
Next() (value core.Value, key core.Value, err error)
}
Iterable interface {
Iterate() Iterator
}
IterableExpression interface {
core.Expression
Iterate(ctx context.Context, scope *core.Scope) (Iterator, error)
}
SliceIterator struct {
values []core.Value
pos int
}
MapIterator struct {
values map[string]core.Value
keys []string
pos int
}
ArrayIterator struct {
values *values.Array
pos int
}
ObjectIterator struct {
values *values.Object
keys []string
pos int
}
HTMLNodeIterator struct {
values values.HTMLNode
pos int
}
)
func ToIterator(value core.Value) (Iterator, error) {
switch value.Type() {
case core.ArrayType:
return NewArrayIterator(value.(*values.Array)), nil
case core.ObjectType:
return NewObjectIterator(value.(*values.Object)), nil
case core.HTMLElementType, core.HTMLDocumentType:
return NewHTMLNodeIterator(value.(values.HTMLNode)), nil
default:
return nil, core.TypeError(
value.Type(),
core.ArrayType,
core.ObjectType,
core.HTMLDocumentType,
core.HTMLElementType,
)
}
}
func ToSlice(iterator Iterator) ([]core.Value, error) {
res := make([]core.Value, 0, 10)
for iterator.HasNext() {
item, _, err := iterator.Next()
if err != nil {
return nil, err
}
res = append(res, item)
}
return res, nil
}
func ToMap(iterator Iterator) (map[string]core.Value, error) {
res := make(map[string]core.Value)
for iterator.HasNext() {
item, key, err := iterator.Next()
if err != nil {
return nil, err
}
res[key.String()] = item
}
return res, nil
}
func ToArray(iterator Iterator) (*values.Array, error) {
res := values.NewArray(10)
for iterator.HasNext() {
item, _, err := iterator.Next()
if err != nil {
return nil, err
}
res.Push(item)
}
return res, nil
}
func NewSliceIterator(input []core.Value) *SliceIterator {
return &SliceIterator{input, 0}
}
func (iterator *SliceIterator) HasNext() bool {
return len(iterator.values) > iterator.pos
}
func (iterator *SliceIterator) Next() (core.Value, core.Value, error) {
if len(iterator.values) > iterator.pos {
idx := iterator.pos
val := iterator.values[idx]
iterator.pos++
return val, values.NewInt(idx), nil
}
return values.None, values.None, ErrExhausted
}
func NewMapIterator(input map[string]core.Value) *MapIterator {
return &MapIterator{input, nil, 0}
}
func (iterator *MapIterator) HasNext() bool {
// lazy initialization
if iterator.keys == nil {
keys := make([]string, len(iterator.values))
i := 0
for k := range iterator.values {
keys[i] = k
i++
}
iterator.keys = keys
}
return len(iterator.keys) > iterator.pos
}
func (iterator *MapIterator) Next() (core.Value, core.Value, error) {
if len(iterator.keys) > iterator.pos {
key := iterator.keys[iterator.pos]
val := iterator.values[key]
iterator.pos++
return val, values.NewString(key), nil
}
return values.None, values.None, ErrExhausted
}
func NewArrayIterator(input *values.Array) *ArrayIterator {
return &ArrayIterator{input, 0}
}
func (iterator *ArrayIterator) HasNext() bool {
return int(iterator.values.Length()) > iterator.pos
}
func (iterator *ArrayIterator) Next() (core.Value, core.Value, error) {
if int(iterator.values.Length()) > iterator.pos {
idx := iterator.pos
val := iterator.values.Get(values.NewInt(idx))
iterator.pos++
return val, values.NewInt(idx), nil
}
return values.None, values.None, ErrExhausted
}
func NewObjectIterator(input *values.Object) *ObjectIterator {
return &ObjectIterator{input, nil, 0}
}
func (iterator *ObjectIterator) HasNext() bool {
// lazy initialization
if iterator.keys == nil {
iterator.keys = iterator.values.Keys()
}
return len(iterator.keys) > iterator.pos
}
func (iterator *ObjectIterator) Next() (core.Value, core.Value, error) {
if len(iterator.keys) > iterator.pos {
key := iterator.keys[iterator.pos]
val, _ := iterator.values.Get(values.NewString(key))
iterator.pos++
return val, values.NewString(key), nil
}
return values.None, values.None, ErrExhausted
}
func NewHTMLNodeIterator(input values.HTMLNode) *HTMLNodeIterator {
return &HTMLNodeIterator{input, 0}
}
func (iterator *HTMLNodeIterator) HasNext() bool {
return iterator.values.Length() > values.NewInt(iterator.pos)
}
func (iterator *HTMLNodeIterator) Next() (core.Value, core.Value, error) {
if iterator.values.Length() > values.NewInt(iterator.pos) {
idx := iterator.pos
val := iterator.values.GetChildNode(values.NewInt(idx))
iterator.pos++
return val, values.NewInt(idx), nil
}
return values.None, values.None, ErrExhausted
} | pkg/runtime/collections/iterator.go | 0.663342 | 0.421314 | iterator.go | starcoder |
package csmt
import (
"fmt"
"github.com/phoreproject/synapse/chainhash"
)
// Compact Sparse Merkle Trees
// Paper: https://eprint.iacr.org/2018/955.pdf
// Key is the key type of a CSMT
type Key = chainhash.Hash
// Hash is the hash type of a CSMT
type Hash = Key
// NodeHashFunction computes the hash for the children for a inner node
type NodeHashFunction func(*Hash, *Hash) Hash
// CSMT is Compact Sparse Merkle Tree
// It implements interface SMT
type CSMT struct {
root Node
nodeHashFunction NodeHashFunction
}
// NewCSMT creates a CSMT
func NewCSMT(nodeHashFunction NodeHashFunction) CSMT {
return CSMT{
root: nil,
nodeHashFunction: nodeHashFunction,
}
}
// DebugToJSONString return a JSON string, for debug purpose
func (tree *CSMT) DebugToJSONString() string {
if tree.root == nil {
return "null"
}
return tree.root.DebugToJSONString()
}
// GetRootHash get the root hash
func (tree *CSMT) GetRootHash() *Hash {
return tree.root.GetHash()
}
// Insert inserts a hash
func (tree *CSMT) Insert(leafHash *Hash, value interface{}) error {
if tree.root == nil {
tree.root = tree.createLeafNode(leafHash, value)
} else {
node, err := tree.doInsert(tree.root, leafHash, leafHash, value)
if err == nil {
tree.root = node
} else {
return err
}
}
return nil
}
func (tree *CSMT) createLeafNode(leafHash *Hash, value interface{}) Node {
return NewLeafNode(leafHash, value)
}
func (tree *CSMT) createInnerNode(left Node, right Node) Node {
return NewInnerNode(tree.nodeHashFunction(left.GetHash(), right.GetHash()), left, right)
}
func (tree *CSMT) doInsert(node Node, key *Key, leafHash *Hash, value interface{}) (Node, error) {
if node.IsLeaf() {
if key.IsEqual(node.GetKey()) {
return nil, fmt.Errorf("key exists")
}
newLeaf := tree.createLeafNode(leafHash, value)
if compareKey(key, node.GetKey()) < 0 {
return tree.createInnerNode(newLeaf, node), nil
}
return tree.createInnerNode(node, newLeaf), nil
}
left := node.(InnerNode).GetLeft()
right := node.(InnerNode).GetRight()
leftDistance := distance(key, left.GetKey())
rightDistance := distance(key, right.GetKey())
if leftDistance == rightDistance {
newLeaf := tree.createLeafNode(leafHash, value)
minKey := getMinKey(left.GetKey(), right.GetKey())
if compareKey(key, minKey) < 0 {
return tree.createInnerNode(newLeaf, node), nil
}
return tree.createInnerNode(node, newLeaf), nil
}
if leftDistance < rightDistance {
newNode, err := tree.doInsert(left, key, leafHash, value)
if err != nil {
return nil, err
}
return tree.createInnerNode(newNode, right), nil
}
newNode, err := tree.doInsert(right, key, leafHash, value)
if err != nil {
return nil, err
}
return tree.createInnerNode(left, newNode), nil
}
// GetValue gets the value at leafHash
func (tree *CSMT) GetValue(leafHash *Hash) (interface{}, error) {
if tree.root == nil {
return nil, fmt.Errorf("No such key")
}
if tree.root.IsLeaf() {
if leafHash.IsEqual(tree.root.GetHash()) {
return tree.root.(LeafNode).GetValue(), nil
}
} else {
return tree.doGetValue(tree.root.(InnerNode), leafHash)
}
return nil, fmt.Errorf("No such key")
}
func (tree *CSMT) doGetValue(node InnerNode, leafHash *Hash) (interface{}, error) {
left := node.GetLeft()
right := node.GetRight()
if left.IsLeaf() && leafHash.IsEqual(left.GetHash()) {
return left.(LeafNode).GetValue(), nil
}
if right.IsLeaf() && leafHash.IsEqual(right.GetHash()) {
return right.(LeafNode).GetValue(), nil
}
leftDistance := distance(leafHash, left.GetKey())
rightDistance := distance(leafHash, right.GetKey())
if leftDistance == rightDistance {
return nil, fmt.Errorf("No such key")
}
if leftDistance < rightDistance {
if left.IsLeaf() {
return nil, fmt.Errorf("No such key")
}
return tree.doGetValue(left.(InnerNode), leafHash)
}
if leftDistance > rightDistance {
if right.IsLeaf() {
return nil, fmt.Errorf("No such key")
}
return tree.doGetValue(right.(InnerNode), leafHash)
}
return nil, fmt.Errorf("Illegal state")
}
// Remove removes a hash
func (tree *CSMT) Remove(leafHash *Hash) error {
if tree.root == nil {
return fmt.Errorf("No such key")
}
if tree.root.IsLeaf() {
if leafHash.IsEqual(tree.root.GetHash()) {
tree.root = nil
return nil
}
} else {
newRoot, err := tree.doRemove(tree.root.(InnerNode), leafHash)
if err != nil {
return err
}
tree.root = newRoot
return nil
}
return fmt.Errorf("No such key")
}
func (tree *CSMT) doRemove(node InnerNode, leafHash *Hash) (Node, error) {
left := node.GetLeft()
right := node.GetRight()
if left.IsLeaf() && leafHash.IsEqual(left.GetHash()) {
return right, nil
}
if right.IsLeaf() && leafHash.IsEqual(right.GetHash()) {
return left, nil
}
leftDistance := distance(leafHash, left.GetKey())
rightDistance := distance(leafHash, right.GetKey())
if leftDistance == rightDistance {
return nil, fmt.Errorf("No such key")
}
if leftDistance < rightDistance {
if left.IsLeaf() {
return nil, fmt.Errorf("No such key")
}
newNode, err := tree.doRemove(left.(InnerNode), leafHash)
if err != nil {
return nil, err
}
return tree.createInnerNode(newNode, right), nil
}
if leftDistance > rightDistance {
if right.IsLeaf() {
return nil, fmt.Errorf("No such key")
}
newNode, err := tree.doRemove(right.(InnerNode), leafHash)
if err != nil {
return nil, err
}
return tree.createInnerNode(left, newNode), nil
}
return nil, fmt.Errorf("Illegal state")
}
// GetProof gets the proof for hash
func (tree *CSMT) GetProof(leafHash *Hash) Proof {
if tree.root == nil {
return NonMembershipProof{
leftBoundProof: nil,
rightBoundProof: nil,
}
}
if tree.root.IsLeaf() {
rootProof := NewMembershipProof([]*MembershipProofEntry{})
if leafHash.IsEqual(tree.root.GetHash()) {
return rootProof
}
if compareKey(leafHash, tree.root.GetKey()) < 0 {
return NonMembershipProof{
leftBoundProof: nil,
rightBoundProof: &rootProof,
}
}
return NonMembershipProof{
leftBoundProof: &rootProof,
rightBoundProof: nil,
}
}
castedRoot := tree.root.(InnerNode)
leftBound, rightBound := tree.findBounds(castedRoot, leafHash)
if leftBound != nil && leftBound.IsEqual(rightBound) {
return *tree.findProof(castedRoot, leftBound)
}
if leftBound != nil && rightBound != nil {
return NonMembershipProof{
leftBoundProof: tree.findProof(castedRoot, leftBound),
rightBoundProof: tree.findProof(castedRoot, rightBound),
}
}
if leftBound == nil {
return NonMembershipProof{
leftBoundProof: nil,
rightBoundProof: tree.findProof(castedRoot, rightBound),
}
}
return NonMembershipProof{
leftBoundProof: tree.findProof(castedRoot, leftBound),
rightBoundProof: nil,
}
}
func (tree *CSMT) findProof(root InnerNode, key *Key) *MembershipProof {
left := root.GetLeft()
right := root.GetRight()
leftDistance := distance(key, left.GetKey())
rightDistance := distance(key, right.GetKey())
var resultEntries []*MembershipProofEntry
var resultNode LeafNode
if leftDistance < rightDistance {
resultEntries, resultNode = tree.findProofHelper(right, DirLeft, left, key)
} else {
resultEntries, resultNode = tree.findProofHelper(left, DirRight, right, key)
}
_ = resultNode
proof := NewMembershipProof(resultEntries)
return &proof
}
func (tree *CSMT) findProofHelper(sibling Node, direction int, node Node, key *Key) ([]*MembershipProofEntry, LeafNode) {
if node.IsLeaf() {
return []*MembershipProofEntry{
{
hash: sibling.GetHash(),
direction: reverseDirection(direction),
},
}, node.(LeafNode)
}
left := node.(InnerNode).GetLeft()
right := node.(InnerNode).GetRight()
leftDistance := distance(key, left.GetKey())
rightDistance := distance(key, right.GetKey())
var resultEntries []*MembershipProofEntry
var resultNode LeafNode
if leftDistance < rightDistance {
resultEntries, resultNode = tree.findProofHelper(right, DirLeft, left, key)
} else {
resultEntries, resultNode = tree.findProofHelper(left, DirRight, right, key)
}
resultEntries = append(resultEntries, &MembershipProofEntry{hash: sibling.GetHash(), direction: reverseDirection(direction)})
return resultEntries, resultNode
}
func (tree *CSMT) findBounds(root InnerNode, key *Key) (*Key, *Key) {
left := root.GetLeft()
right := root.GetRight()
leftDistance := distance(key, left.GetKey())
rightDistance := distance(key, right.GetKey())
if leftDistance == rightDistance {
if compareKey(key, root.GetKey()) > 0 {
return right.GetKey(), nil
}
return nil, left.GetKey()
}
if leftDistance < rightDistance {
return tree.findBoundsBySibling(right, DirLeft, left, key)
}
return tree.findBoundsBySibling(left, DirRight, right, key)
}
func (tree *CSMT) findBoundsBySibling(sibling Node, direction int, node Node, key *Key) (*Key, *Key) {
if node.IsLeaf() {
if key.IsEqual(node.GetKey()) {
return key, key
}
return tree.findBoundsHelper(key, node, direction, sibling)
}
left := node.(InnerNode).GetLeft()
right := node.(InnerNode).GetRight()
leftDistance := distance(key, left.GetKey())
rightDistance := distance(key, right.GetKey())
if leftDistance == rightDistance {
return tree.findBoundsHelper(key, node, direction, sibling)
}
var leftBound, rightBound *Key
if leftDistance < rightDistance {
leftBound, rightBound = tree.findBoundsBySibling(right, DirLeft, left, key)
} else {
leftBound, rightBound = tree.findBoundsBySibling(left, DirRight, right, key)
}
if rightBound == nil && direction == DirLeft {
return leftBound, minInSubtree(sibling)
}
if leftBound == nil && direction == DirRight {
return maxInSubtree(sibling), rightBound
}
return leftBound, rightBound
}
func (tree *CSMT) findBoundsHelper(key *Key, node Node, direction int, sibling Node) (*Key, *Key) {
relation := compareKey(key, node.GetKey())
if relation > 0 && direction == DirLeft {
return node.GetKey(), minInSubtree(sibling)
}
if relation > 0 && direction == DirRight {
return node.GetKey(), nil
}
if relation <= 0 && direction == DirLeft {
return nil, minInSubtree(node)
}
return maxInSubtree(sibling), minInSubtree(node)
}
func maxInSubtree(node Node) *Key {
return node.GetKey()
}
func minInSubtree(node Node) *Key {
if node.IsLeaf() {
return node.GetKey()
}
return minInSubtree(node.(InnerNode).GetLeft())
}
func getMaxKey(keyA *Key, keyB *Key) *Key {
for i := 0; i < chainhash.HashSize; i++ {
a := keyA[i]
b := keyB[i]
if a > b {
return keyA
}
if a < b {
return keyB
}
}
return keyA
}
func getMinKey(keyA *Key, keyB *Key) *Key {
for i := 0; i < chainhash.HashSize; i++ {
a := keyA[i]
b := keyB[i]
if a < b {
return keyA
}
if a > b {
return keyB
}
}
return keyA
}
func compareKey(keyA *Key, keyB *Key) int {
for i := 0; i < chainhash.HashSize; i++ {
a := int(keyA[i])
b := int(keyB[i])
if a != b {
return a - b
}
}
return 0
}
// Fast calc log2(keyA ^ keyB)
// Note the keys must be the key in the node, not the hash in the node
func distance(keyA *Key, keyB *Key) int {
result := chainhash.HashSize * 8
for i := 0; i < chainhash.HashSize; i++ {
b := keyA[i] ^ keyB[i]
if b != 0 {
var a uint8 = 0x80
for k := 0; k < 8; k++ {
if b&a != 0 {
break
}
a >>= 1
result--
}
break
}
result -= 8
}
return result
}
func reverseDirection(direction int) int {
if direction == DirLeft {
return DirRight
}
return DirLeft
} | csmt/csmt.go | 0.799912 | 0.441492 | csmt.go | starcoder |
package common
import (
"fmt"
"math/big"
"math/rand"
"reflect"
"github.com/ethereum/go-ethereum/common/hexutil"
)
const (
HashLength = 32
AddressLength = 20
)
type (
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
Hash [HashLength]byte
// Address represents the 20 byte address of an Ethereum account.
Address [AddressLength]byte
)
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
func StringToHash(s string) Hash { return BytesToHash([]byte(s)) }
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
// Don't use the default 'String' method in case we want to overwrite
// Get the string representation of the underlying hash
func (h Hash) Str() string { return string(h[:]) }
func (h Hash) Bytes() []byte { return h[:] }
func (h Hash) Big() *big.Int { return Bytes2Big(h[:]) }
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
// UnmarshalJSON parses a hash in its hex from to a hash.
func (h *Hash) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalJSON("Hash", input, h[:])
}
// Serialize given hash to JSON
func (h Hash) MarshalJSON() ([]byte, error) {
return hexutil.Bytes(h[:]).MarshalJSON()
}
// Sets the hash to the value of b. If b is larger than len(h) it will panic
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
// Set string `s` to h. If s is larger than len(h) it will panic
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
// Sets h to other
func (h *Hash) Set(other Hash) {
for i, v := range other {
h[i] = v
}
}
// Generate implements testing/quick.Generator.
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
m := rand.Intn(len(h))
for i := len(h) - 1; i > m; i-- {
h[i] = byte(rand.Uint32())
}
return reflect.ValueOf(h)
}
func EmptyHash(h Hash) bool {
return h == Hash{}
}
/////////// Address
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
func StringToAddress(s string) Address { return BytesToAddress([]byte(s)) }
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
// IsHexAddress verifies whether a string can represent a valid hex-encoded
// Ethereum address or not.
func IsHexAddress(s string) bool {
if len(s) == 2+2*AddressLength && IsHex(s) {
return true
}
if len(s) == 2*AddressLength && IsHex("0x"+s) {
return true
}
return false
}
// Get the string representation of the underlying address
func (a Address) Str() string { return string(a[:]) }
func (a Address) Bytes() []byte { return a[:] }
func (a Address) Big() *big.Int { return Bytes2Big(a[:]) }
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
func (a Address) Hex() string { return hexutil.Encode(a[:]) }
// Sets the address to the value of b. If b is larger than len(a) it will panic
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
// Set string `s` to a. If s is larger than len(a) it will panic
func (a *Address) SetString(s string) { a.SetBytes([]byte(s)) }
// Sets a to other
func (a *Address) Set(other Address) {
for i, v := range other {
a[i] = v
}
}
// Serialize given address to JSON
func (a Address) MarshalJSON() ([]byte, error) {
return hexutil.Bytes(a[:]).MarshalJSON()
}
// Parse address from raw json data
func (a *Address) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalJSON("Address", input, a[:])
}
// PP Pretty Prints a byte slice in the following format:
// hex(value[:4])...(hex[len(value)-4:])
func PP(value []byte) string {
if len(value) <= 8 {
return Bytes2Hex(value)
}
return fmt.Sprintf("%x...%x", value[:4], value[len(value)-4])
} | vendor/github.com/ethereum/go-ethereum/common/types.go | 0.802942 | 0.49408 | types.go | starcoder |
package dtruncate
import (
"github.com/lawrencewoodman/ddataset"
"github.com/lawrencewoodman/ddataset/internal"
)
// DTruncate represents a truncated Dataset
type DTruncate struct {
dataset ddataset.Dataset
numRecords int64
isReleased bool
}
// DTruncateConn represents a connection to a DTruncate Dataset
type DTruncateConn struct {
dataset *DTruncate
conn ddataset.Conn
recordNum int64
err error
}
// New creates a new DTruncate Dataset
func New(dataset ddataset.Dataset, numRecords int64) ddataset.Dataset {
return &DTruncate{
dataset: dataset,
numRecords: numRecords,
isReleased: false,
}
}
// Open creates a connection to the Dataset
func (d *DTruncate) Open() (ddataset.Conn, error) {
if d.isReleased {
return nil, ddataset.ErrReleased
}
conn, err := d.dataset.Open()
if err != nil {
return nil, err
}
return &DTruncateConn{
dataset: d,
conn: conn,
recordNum: 0,
err: nil,
}, nil
}
// Fields returns the field names used by the Dataset
func (d *DTruncate) Fields() []string {
return d.dataset.Fields()
}
// NumRecords returns the number of records in the Dataset. If there is
// a problem getting the number of records it returns -1. NOTE: The returned
// value can change if the underlying Dataset changes.
func (d *DTruncate) NumRecords() int64 {
return internal.CountNumRecords(d)
}
// Release releases any resources associated with the Dataset d,
// rendering it unusable in the future.
func (d *DTruncate) Release() error {
if !d.isReleased {
d.isReleased = true
return nil
}
return ddataset.ErrReleased
}
// Next returns whether there is a Record to be Read
func (c *DTruncateConn) Next() bool {
if c.conn.Err() != nil {
return false
}
if c.recordNum < c.dataset.numRecords {
c.recordNum++
return c.conn.Next()
}
return false
}
// Err returns any errors from the connection
func (c *DTruncateConn) Err() error {
return c.conn.Err()
}
// Read returns the current Record
func (c *DTruncateConn) Read() ddataset.Record {
return c.conn.Read()
}
// Close closes the connection
func (c *DTruncateConn) Close() error {
return c.conn.Close()
} | dtruncate/dtruncate.go | 0.744842 | 0.496216 | dtruncate.go | starcoder |
package rgb8
// Conversion of different RGB colorspaces with their native illuminators (reference whites) to CIE XYZ scaled to 256 and back.
// RGB values must be linear and in the nominal range [0, 255].
// XYZ values are usually in [0, 1e9] but may slightly outside.
// To get quick and dirty XYZ approximations, divide by 1e9, otherwise use the float64 version of these functions.
// Ref.: [24]
// AdobeToXYZ converts from Adobe RGB (1998) with D65 illuminator to CIE XYZ.
func AdobeToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2261689*rr + 727662*gg + 737981*bb
y = 1166183*rr + 2460192*gg + 295192*bb
z = 106016*rr + 277204*gg + 3886699*bb
return
}
// XYZToAdobe converts from CIE XYZ to Adobe RGB (1998) with D65 illuminator.
func XYZToAdobe(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (5205*x - 1440*y - 878*z) / 1e6
gg := (-2471*x + 4783*y + 105*z) / 1e6
bb := (34*x - 301*y + 2589*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// AppleToXYZ converts from Apple RGB with D65 illuminator to CIE XYZ.
func AppleToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1763642*rr + 1240190*gg + 723500*bb
y = 959421*rr + 2635405*gg + 326741*bb
z = 98763*rr + 553656*gg + 3617501*bb
return
}
// XYZToApple converts from CIE XYZ to Apple RGB with D65 illuminator.
func XYZToApple(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (7526*x - 3287*y - 1208*z) / 1e6
gg := (-2767*x + 5076*y + 94*z) / 1e6
bb := (218*x - 687*y + 2782*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// BestToXYZ converts from Best RGB with D50 illuminator to CIE XYZ.
func BestToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2481057*rr + 802179*gg + 498018*bb
y = 895909*rr + 2891577*gg + 134081*bb
z = 37310*gg + 3198807*bb
return
}
// XYZToBest converts from CIE XYZ to Best RGB with D50 illuminator.
func XYZToBest(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (4475*x - 1233*y - 645*z) / 1e6
gg := (-1387*x + 3842*y + 54*z) / 1e6
bb := (16*x - 44*y + 3125*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// BetaToXYZ converts from Beta RGB with D50 illuminator to CIE XYZ.
func BetaToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2632367*rr + 684640*gg + 464246*bb
y = 1189304*rr + 2603082*gg + 129181*bb
z = 159611*gg + 3076505*bb
return
}
// XYZToBeta converts from CIE XYZ to Beta RGB with D50 illuminator.
func XYZToBeta(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (4292*x - 1092*y - 601*z) / 1e6
gg := (-1966*x + 4351*y + 113*z) / 1e6
bb := (102*x - 225*y + 3244*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// BruceToXYZ converts from Bruce RGB with D65 illuminator to CIE XYZ.
func BruceToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1833004*rr + 1154710*gg + 739618*bb
y = 945143*rr + 2680578*gg + 295847*bb
z = 85921*rr + 288677*gg + 3895321*bb
return
}
// XYZToBruce converts from CIE XYZ to Bruce RGB with D65 illuminator.
func XYZToBruce(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (7000*x - 2896*y - 1109*z) / 1e6
gg := (-2471*x + 4783*y + 105*z) / 1e6
bb := (28*x - 290*y + 2583*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// CIEToXYZ converts from CIE RGB with E illuminator to CIE XYZ.
func CIEToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1916541*rr + 1218354*gg + 786673*bb
y = 690997*rr + 3188175*gg + 42395*bb
z = 40018*gg + 3881549*bb
return
}
// XYZToCIE converts from CIE XYZ to CIE RGB with E illuminator.
func XYZToCIE(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (6045*x - 2295*y - 1200*z) / 1e6
gg := (-1310*x + 3634*y + 225*z) / 1e6
bb := (13*x - 37*y + 2573*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// ColorMatchToXYZ converts from ColorMatch RGB with D50 illuminator to CIE XYZ.
func ColorMatchToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1997427*rr + 1258459*gg + 525369*bb
y = 1077976*rr + 2580907*gg + 262684*bb
z = 95115*rr + 426596*gg + 2714405*bb
return
}
// XYZToColorMatch converts from CIE XYZ to ColorMatch RGB with D50 illuminator.
func XYZToColorMatch(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (6737*x - 3119*y - 1002*z) / 1e6
gg := (-2835*x + 5250*y + 40*z) / 1e6
bb := (209*x - 715*y + 3712*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
//DonToXYZ converts from Don RGB-4 with D50 illuminator to CIE XYZ.
func DonToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2532435*rr + 758239*gg + 490579*bb
y = 1091567*rr + 2697922*gg + 132079*bb
z = 14554*rr + 70533*gg + 3151029*bb
return
}
// XYZToDon converts from CIE XYZ to Don RGB-4 with D50 illuminator.
func XYZToDon(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (4488*x - 1244*y - 646*z) / 1e6
gg := (-1817*x + 4214*y + 106*z) / 1e6
bb := (19*x - 88*y + 3174*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// ECIToXYZ converts from ECI RGB with D50 illuminator to CIE XYZ.
func ECIToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2549820*rr + 698342*gg + 533091*bb
y = 1255881*rr + 2361063*gg + 304623*bb
z = 266035*gg + 2970082*bb
return
}
// XYZToECI converts from CIE XYZ to ECI RGB with D50 illuminator.
func XYZToECI(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (4546*x - 1267*y - 685*z) / 1e6
gg := (-2446*x + 4966*y - 70*z) / 1e6
bb := (219*x - 444*y + 3373*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// EktaSpaceToXYZ converts from Ekta Space PS5 with D50 illuminator to CIE XYZ.
func EktaSpaceToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2328985*rr + 1070510*gg + 381758*bb
y = 1022072*rr + 2882143*gg + 17352*bb
z = 164693*gg + 3071423*bb
return
}
// XYZToEktaSpace converts from CIE XYZ to Ekta Space PS5 with D50 illuminator.
func XYZToEktaSpace(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (5111*x - 1862*y - 624*z) / 1e6
gg := (-1813*x + 4131*y + 202*z) / 1e6
bb := (97*x - 221*y + 3244*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// NTSCToXYZ converts from NTSC RGB with D50 illuminator to CIE XYZ.
func NTSCToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2379964*rr + 680396*gg + 785678*bb
y = 1172221*rr + 2300388*gg + 448958*bb
z = 259198*gg + 4377350*bb
return
}
// XYZToNTSC converts from CIE XYZ to NTSC RGB with D50 illuminator.
func XYZToNTSC(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (4870*x - 1357*y - 734*z) / 1e6
gg := (-2510*x + 5097*y - 72*z) / 1e6
bb := (148*x - 301*y + 2288*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// PALToXYZ converts from PAL/SECAM RGB with D65 illuminator to CIE XYZ.
func PALToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1688701*rr + 1339380*gg + 699251*bb
y = 870736*rr + 2771130*gg + 279700*bb
z = 79158*rr + 508040*gg + 3682723*bb
return
}
// XYZToPAL converts from CIE XYZ to PAL/SECAM RGB with D65 illuminator.
func XYZToPAL(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (7810*x - 3552*y - 1213*z) / 1e6
gg := (-2471*x + 4783*y + 105*z) / 1e6
bb := (173*x - 583*y + 2726*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// ProPhotoToXYZ converts from ProPhoto RGB with D50 illuminator to CIE XYZ.
func ProPhotoToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 3128136*rr + 530163*gg + 122954*bb
y = 1129569*rr + 2791663*gg + 336*bb
z = 3236117 * bb
return
}
// XYZToProPhoto converts from CIE XYZ to ProPhoto RGB with D50 illuminator.
func XYZToProPhoto(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (3432*x - 651*y - 130*z) / 1e6
gg := (-1388*x + 3845*y + 52*z) / 1e6
bb := (0*x + 0*y + 3090*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// SMPTE_CToXYZ converts from SMPTE-C RGB with D65 illuminator to CIE XYZ.
func SMPTE_CToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1543486*rr + 1432351*gg + 751495*bb
y = 832992*rr + 2749190*gg + 339385*bb
z = 73499*rr + 438946*gg + 3757475*bb
return
}
// XYZToSMPTE_C converts from CIE XYZ to SMPTE-C RGB with D65 illuminator.
func XYZToSMPTE_C(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (8938*x - 4435*y - 1387*z) / 1e6
gg := (-2726*x + 5043*y + 89*z) / 1e6
bb := (143*x - 502*y + 2678*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// SRGBToXYZ converts from sRGB with D65 illuminator to CIE XYZ.
func SRGBToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 1617476*rr + 1402259*gg + 707598*bb
y = 834011*rr + 2804518*gg + 283039*bb
z = 75819*rr + 467419*gg + 3726682*bb
return
}
// XYZToSRGB converts from CIE XYZ to sRGB with D65 illuminator.
func XYZToSRGB(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (8263*x - 3919*y - 1271*z) / 1e6
gg := (-2471*x + 4783*y + 105*z) / 1e6
bb := (141*x - 520*y + 2695*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
}
// WGamutToXYZ converts from Wide Gamut RGB with D50 illuminator to CIE XYZ.
func WGamutToXYZ(r, g, b uint8) (x, y, z int) {
rr, gg, bb := int(r), int(g), int(b)
x = 2808253*rr + 395802*gg + 577199*bb
y = 1012499*rr + 2842893*gg + 66175*bb
z = 203063*gg + 3033053*bb
return
}
// XYZToWGamut converts from CIE XYZ to Wide Gamut RGB with D50 illuminator.
func XYZToWGamut(x, y, z int) (r, g, b uint8) {
x /= 1e4
y /= 1e4
z /= 1e4
rr := (3730*x - 469*y - 699*z) / 1e6
gg := (-1330*x + 3690*y + 172*z) / 1e6
bb := (89*x - 247*y + 3285*z) / 1e6
if rr < 0 {
rr = 0
} else if rr > 255 {
rr = 255
}
if gg < 0 {
gg = 0
} else if gg > 255 {
gg = 255
}
if bb < 0 {
bb = 0
} else if bb > 255 {
bb = 255
}
r, g, b = uint8(rr), uint8(gg), uint8(bb)
return
} | i8/rgb8/rgb.go | 0.72086 | 0.441011 | rgb.go | starcoder |
package batchnorm
import (
"encoding/gob"
"github.com/nlpodyssey/spago/ag"
"github.com/nlpodyssey/spago/mat"
"github.com/nlpodyssey/spago/nn"
)
var _ nn.Model[float32] = &Model[float32]{}
// Model contains the serializable parameters.
type Model[T mat.DType] struct {
nn.BaseModel[T]
W nn.Param[T] `spago:"type:weights"`
B nn.Param[T] `spago:"type:biases"`
Mean nn.Param[T] `spago:"type:undefined"`
StdDev nn.Param[T] `spago:"type:undefined"`
Momentum nn.Param[T] `spago:"type:undefined"`
}
const epsilon = 1e-5
const defaultMomentum = 0.9
func init() {
gob.Register(&Model[float32]{})
gob.Register(&Model[float64]{})
}
// NewWithMomentum returns a new model with supplied size and momentum.
func NewWithMomentum[T mat.DType](size int, momentum T) *Model[T] {
return &Model[T]{
W: nn.NewParam[T](mat.NewInitVecDense[T](size, epsilon)),
B: nn.NewParam[T](mat.NewEmptyVecDense[T](size)),
Mean: nn.NewParam[T](mat.NewEmptyVecDense[T](size), nn.RequiresGrad[T](false)),
StdDev: nn.NewParam[T](mat.NewEmptyVecDense[T](size), nn.RequiresGrad[T](false)),
Momentum: nn.NewParam[T](mat.NewScalar[T](momentum), nn.RequiresGrad[T](false)),
}
}
// New returns a new model with the supplied size and default momentum
func New[T mat.DType](size int) *Model[T] {
return NewWithMomentum[T](size, defaultMomentum)
}
// Forward performs the forward step for each input node and returns the result.
func (m *Model[T]) Forward(xs ...ag.Node[T]) []ag.Node[T] {
meanVector := ag.StopGrad[T](m.Mean)
devVector := ag.StopGrad[T](m.StdDev)
return m.process(xs, devVector, meanVector)
}
// ForwardT performs the forward step for each input node and returns the result.
func (m *Model[T]) ForwardT(xs ...ag.Node[T]) []ag.Node[T] {
meanVector := m.mean(xs)
devVector := m.stdDev(meanVector, xs)
m.updateBatchNormParameters(meanVector.Value(), devVector.Value())
return m.process(xs, devVector, meanVector)
}
func (m *Model[T]) process(xs []ag.Node[T], devVector ag.Node[T], meanVector ag.Node[T]) []ag.Node[T] {
devVector = ag.Div[T](m.W, ag.AddScalar(devVector, ag.Constant[T](epsilon)))
ys := make([]ag.Node[T], len(xs))
for i, x := range xs {
ys[i] = ag.Add[T](ag.Prod(ag.Sub(x, meanVector), devVector), m.B)
}
return ys
}
func (m *Model[T]) updateBatchNormParameters(meanVector, devVector mat.Matrix[T]) {
momentum := m.Momentum.Value().Scalar()
m.Mean.ReplaceValue(
m.Mean.Value().ProdScalar(momentum).Add(meanVector.ProdScalar(1.0 - momentum)))
m.StdDev.ReplaceValue(
m.StdDev.Value().ProdScalar(momentum).Add(devVector.ProdScalar(1.0 - momentum)))
}
// Mean computes the mean of the input.
func (m *Model[T]) mean(xs []ag.Node[T]) ag.Node[T] {
sumVector := xs[0]
for i := 1; i < len(xs); i++ {
sumVector = ag.Add(sumVector, xs[i])
}
return ag.DivScalar(sumVector, ag.NewScalar[T](T(len(xs))+epsilon))
}
// StdDev computes the standard deviation of the input.
func (m *Model[T]) stdDev(meanVector ag.Node[T], xs []ag.Node[T]) ag.Node[T] {
devVector := ag.NewVariable[T](meanVector.Value().ZerosLike(), false)
for _, x := range xs {
diffVector := ag.Square(ag.Sub(meanVector, x))
devVector = ag.Add(devVector, diffVector)
}
devVector = ag.Sqrt(ag.DivScalar(devVector, ag.NewScalar[T](T(len(xs))+epsilon)))
return devVector
} | nn/normalization/batchnorm/batchnorm.go | 0.865295 | 0.602471 | batchnorm.go | starcoder |
package mcs
import "mcs/games/samegame"
// (╯°□°)╯︵ ┻━┻ poor mans's generic:
// GameState can be anything that describes accurately the state of a game.
// In samegame it's a board.
type GameState samegame.State
// Clone returns a memory-independent copy.
func (g GameState) Clone() GameState {
return GameState(samegame.State(g).Clone())
}
// Moves returns a list of legal moves from the calling state.
func (g GameState) Moves() MoveSet {
return MoveSet(samegame.State(g).Moves())
}
// Play returns the game state after the given move has been played in the
// calling state.
func (g GameState) Play(m Move) GameState {
return GameState(samegame.State(g).Play(m.(samegame.Move)))
}
// Sample simulates a game to its end by applying a move selection policy. The policy usually
// embeds randomness.
func (g GameState) Sample(done <-chan struct{}, policy GamePolicy) Decision {
score, moves := samegame.State(g).Sample(done, samegame.ColorPolicy(policy))
return Decision{moves: MoveSequence(moves), score: score}
}
// Score returns a statically computed score of the calling state.
func (g GameState) Score() float64 {
return samegame.State(g).Score()
}
func (g GameState) String() string {
return samegame.State(g).String()
}
// Move has a length, is scorable and printable.
type Move interface {
Len() int
Score() float64
String() string
}
// MoveSequence is a FIFO structure.
type MoveSequence samegame.Sequence
// Clone returns an independent copy of the calling sequence.
func (s MoveSequence) Clone() MoveSequence {
return MoveSequence(samegame.Sequence(s).Clone())
}
// Dequeue is customary for FIFO structures.
func (s MoveSequence) Dequeue() (Move, MoveSequence) {
move, seq := samegame.Sequence(s).Dequeue()
return Move(move), MoveSequence(seq)
}
// Enqueue is customary for FIFO structures.
func (s MoveSequence) Enqueue(m Move) MoveSequence {
return MoveSequence(samegame.Sequence(s).Enqueue(m.(samegame.Move)))
}
// Join returns an aggregated sequence.
func (s MoveSequence) Join(t MoveSequence) MoveSequence {
return MoveSequence(samegame.Sequence(s).Join(samegame.Sequence(t)))
}
// Len returns the number of moves in the sequence.
func (s MoveSequence) Len() int {
return samegame.Sequence(s).Len()
}
// MoveSet is a collection of legal moves.
type MoveSet samegame.Hand
// Draw randomly removes a move from the set.
func (m MoveSet) Draw() (Move, MoveSet) {
move, set := samegame.Hand(m).Draw()
return Move(move), MoveSet(set)
}
// Len returns the number of legal moves.
func (m MoveSet) Len() int {
return samegame.Hand(m).Len()
}
// List returns all the moves present in the set.
func (m MoveSet) List() []Move {
list := samegame.Hand(m).List()
moves := make([]Move, 0, len(list))
for _, move := range list {
moves = append(moves, Move(move))
}
return moves
}
// GamePolicy is a game policy used during the simulation step.
// It is a reference passed back to the game sampler.
type GamePolicy samegame.ColorPolicy
// (╯°□°)╯︵ ┻━┻ here is precisely what interfaces aren't for:
/*
type GameState interface {
Clone() GameState
Moves() MoveSet
Play(Edge) GameState
Sample(<-chan struct{}, float64, GamePolicy) (float64, MoveSequence)
Score() float64
String() string
}
type Edge interface {
Score() float64
}
type MoveSequence interface {
Enqueue(Edge) MoveSequence
Join(MoveSequence) MoveSequence
Len() int
}
type MoveSet interface {
Draw() (Edge, MoveSet)
Len() int
List() []Edge
}
type GamePolicy interface{}
*/ | pkg/mcs/iface.go | 0.830147 | 0.713054 | iface.go | starcoder |
package hindley_milner
import "fmt"
type FunctionType struct {
a, b Type
context CodeContext
}
func NewFnType(ts ...Type) *FunctionType {
if len(ts) < 2 {
panic("Expected at least 2 input types")
}
retVal := borrowFnType()
retVal.a = ts[0]
if len(ts) > 2 {
retVal.b = NewFnType(ts[1:]...)
} else {
retVal.b = ts[1]
}
return retVal
}
func (t *FunctionType) Name() string { return "→" }
func (t *FunctionType) Apply(sub Subs) Substitutable {
a := t.a
b := t.b
t.a = t.a.Apply(sub).(Type)
t.b = t.b.Apply(sub).(Type)
t.a = CopyContextTo(t.a, a, b)
t.b = CopyContextTo(t.b, b, a)
return t
}
func (t *FunctionType) FreeTypeVar() TypeVarSet { return t.a.FreeTypeVar().Union(t.b.FreeTypeVar()) }
func (t *FunctionType) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%s%v → %v", TypeStringPrefix(t), t.a, t.b)
}
func (t *FunctionType) String() string { return fmt.Sprintf("%s%v", TypeStringPrefix(t), t) }
func (t *FunctionType) Normalize(k, v TypeVarSet) (Type, error) {
var a, b Type
var err error
if a, err = t.a.Normalize(k, v); err != nil {
return nil, err
}
if b, err = t.b.Normalize(k, v); err != nil {
return nil, err
}
return NewFnType(a, b), nil
}
func (t *FunctionType) Types() Types {
retVal := BorrowTypes(2)
retVal[0] = t.a
retVal[1] = t.b
return retVal
}
func (t *FunctionType) Eq(other Type) bool {
if ot, ok := other.(*FunctionType); ok {
return TypeEq(ot.a, t.a) && TypeEq(ot.b, t.b)
}
return false
}
func (t *FunctionType) Arg() Type { return t.a }
func (t *FunctionType) CountArgs() int {
if fnt, ok := t.b.(*FunctionType); ok {
return 1 + fnt.CountArgs()
}
return 1
}
func (t *FunctionType) Ret(recursive bool) Type {
if !recursive {
return t.b
}
if fnt, ok := t.b.(*FunctionType); ok {
return fnt.Ret(recursive)
}
return t.b
}
func (t *FunctionType) FlatTypes() Types {
retVal := BorrowTypes(8)
retVal = retVal[:0]
if a, ok := t.a.(*FunctionType); ok {
ft := a.FlatTypes()
retVal = append(retVal, ft...)
ReturnTypes(ft)
} else {
retVal = append(retVal, t.a)
}
if b, ok := t.b.(*FunctionType); ok {
ft := b.FlatTypes()
retVal = append(retVal, ft...)
ReturnTypes(ft)
} else {
retVal = append(retVal, t.b)
}
return retVal
}
func (t *FunctionType) Clone() interface{} {
retVal := new(FunctionType)
if ac, ok := t.a.(Cloner); ok {
retVal.a = ac.Clone().(Type)
} else {
retVal.a = t.a
}
if bc, ok := t.b.(Cloner); ok {
retVal.b = bc.Clone().(Type)
} else {
retVal.b = t.b
}
return retVal
}
func (t *FunctionType) MapTypes(mapper TypeMapper) Type {
return mapper(&FunctionType{
a: mapper(t.a),
b: mapper(t.b),
context: t.context,
})
}
func (t *FunctionType) WithContext(c CodeContext) Type {
return &FunctionType{
a: t.a,
b: t.b,
context: c,
}
}
func (t *FunctionType) GetContext() CodeContext {
return t.context
} | src/type_checker/hindley_milner/functionType.go | 0.518302 | 0.467818 | functionType.go | starcoder |
package ast
import (
"fmt"
"regexp"
"time"
"github.com/influxdata/influxql"
)
// NodeTypeOf is used by all Node to identify the node duration Marshal and Unmarshal
const NodeTypeOf = "typeOf"
// JSONNode is the intermediate type between Node and JSON serialization
type JSONNode map[string]interface{}
// Type adds the Node type information
func (j JSONNode) Type(typ string) JSONNode {
j[NodeTypeOf] = typ
return j
}
// Set adds the key/value to the JSONNode
func (j JSONNode) Set(key string, value interface{}) JSONNode {
j[key] = value
return j
}
// SetDuration adds key to the JSONNode but formats the duration in InfluxQL style
func (j JSONNode) SetDuration(key string, value time.Duration) JSONNode {
return j.Set(key, influxql.FormatDuration(value))
}
// SetRegex adds key to the JSONNode but formats the regex as a string
func (j JSONNode) SetRegex(key string, value *regexp.Regexp) JSONNode {
if value == nil {
return j.Set(key, nil)
}
return j.Set(key, value.String())
}
// SetOperator adds key to JSONNode but formats the operator as a string
func (j JSONNode) SetOperator(key string, op TokenType) JSONNode {
return j.Set(key, op.String())
}
// SetFunctionType adds key to JSONNode but formats the function type as a string
func (j JSONNode) SetFunctionType(key string, fn FuncType) JSONNode {
return j.Set(key, fn.String())
}
// TypeOf returns the type of the node
func (j JSONNode) TypeOf() (string, error) {
return j.String(NodeTypeOf)
}
// CheckTypeOf tests that the typeOf field is correctly set to typ.
func (j JSONNode) CheckTypeOf(typ string) error {
t, ok := j[NodeTypeOf]
if !ok {
return fmt.Errorf("missing typeOf field")
}
if t != typ {
return fmt.Errorf("error unmarshaling node type %s; received %s", typ, t)
}
return nil
}
// Has returns true if field exists
func (j JSONNode) Has(field string) bool {
_, ok := j[field]
return ok
}
// Field returns expected field or error if field doesn't exist
func (j JSONNode) Field(field string) (interface{}, error) {
fld, ok := j[field]
if !ok {
return nil, fmt.Errorf("missing expected field %s", field)
}
return fld, nil
}
// String reads the field for a string value
func (j JSONNode) String(field string) (string, error) {
s, err := j.Field(field)
if err != nil {
return "", err
}
str, ok := s.(string)
if !ok {
return "", fmt.Errorf("field %s is not a string value but is %T", field, s)
}
return str, nil
}
// Int64 reads the field for a int64 value
func (j JSONNode) Int64(field string) (int64, error) {
n, err := j.Field(field)
if err != nil {
return 0, err
}
num, ok := n.(int64)
if !ok {
flt, ok := n.(float64)
if !ok {
return 0, fmt.Errorf("field %s is not an integer value but is %T", field, n)
}
num = int64(flt)
}
return num, nil
}
// Float64 reads the field for a float64 value
func (j JSONNode) Float64(field string) (float64, error) {
n, err := j.Field(field)
if err != nil {
return 0, err
}
num, ok := n.(float64)
if !ok {
integer, ok := n.(int64)
if !ok {
return 0, fmt.Errorf("field %s is not a floating point value but is %T", field, n)
}
num = float64(integer)
}
return num, nil
}
// Strings reads the field an array of strings
func (j JSONNode) Strings(field string) ([]string, error) {
s, err := j.Field(field)
if err != nil {
return nil, err
}
strs, ok := s.([]string)
if !ok {
return nil, fmt.Errorf("field %s is not an array of strings but is %T", field, s)
}
return strs, nil
}
// Duration reads the field and assumes the string is in InfluxQL Duration format.
func (j JSONNode) Duration(field string) (time.Duration, error) {
d, err := j.Field(field)
if err != nil {
return 0, err
}
dur, ok := d.(string)
if !ok {
return 0, fmt.Errorf("field %s is not a string duration value but is %T", field, d)
}
return influxql.ParseDuration(dur)
}
// Regex reads the field and assumes the string is a regular expression.
func (j JSONNode) Regex(field string) (*regexp.Regexp, error) {
r, err := j.Field(field)
if err != nil {
return nil, err
}
re, ok := r.(string)
if !ok {
return nil, fmt.Errorf("field %s is not a string regex value but is %T", field, r)
}
return regexp.Compile(re)
}
// Bool reads the field for a boolean value
func (j JSONNode) Bool(field string) (bool, error) {
b, err := j.Field(field)
if err != nil {
return false, err
}
boolean, ok := b.(bool)
if !ok {
return false, fmt.Errorf("field %s is not a bool value but is %T", field, b)
}
return boolean, nil
}
// Operator reads the field for an TokenType operator value
func (j JSONNode) Operator(field string) (TokenType, error) {
o, err := j.Field(field)
if err != nil {
return TokenError, err
}
op, ok := o.(string)
if !ok {
return TokenError, fmt.Errorf("field %s is not an operator value but is %T", field, o)
}
return NewTokenType(op)
}
// FunctionType reads the field for an FuncType value
func (j JSONNode) FunctionType(field string) (FuncType, error) {
f, err := j.Field(field)
if err != nil {
return -1, err
}
fn, ok := f.(string)
if !ok {
return -1, fmt.Errorf("field %s is not a function type value but is %T", field, f)
}
return NewFuncType(fn)
}
// NodeList reads the field for a list of nodes
func (j JSONNode) NodeList(field string) ([]Node, error) {
l, err := j.Field(field)
if err != nil {
return nil, err
}
list, ok := l.([]interface{})
if !ok {
return nil, fmt.Errorf("field %s is not a list of values but is %T", field, l)
}
nodes := make([]Node, len(list))
for i, lst := range list {
nodes[i], err = j.getNode(lst)
if err != nil {
return nil, err
}
}
return nodes, nil
}
// Node reads the field for a node
func (j JSONNode) Node(field string) (Node, error) {
nn, err := j.Field(field)
if err != nil {
return nil, err
}
return j.getNode(nn)
}
// IDNode reads an IdentifierNode from the field
func (j JSONNode) IDNode(field string) (*IdentifierNode, error) {
n, err := j.Node(field)
if err != nil {
return nil, err
}
id, ok := n.(*IdentifierNode)
if !ok {
return nil, fmt.Errorf("field %s is not an identifier node but is %T", field, n)
}
return id, nil
}
// RefNode reads a ReferenceNode from the field
func (j JSONNode) RefNode(field string) (*ReferenceNode, error) {
n, err := j.Node(field)
if err != nil {
return nil, err
}
id, ok := n.(*ReferenceNode)
if !ok {
return nil, fmt.Errorf("field %s is not a reference node but is %T", field, n)
}
return id, nil
}
func (j JSONNode) getNode(nn interface{}) (Node, error) {
nd, ok := nn.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("expected node type but is %T", nn)
}
node := JSONNode(nd)
typ, err := node.TypeOf()
if err != nil {
return nil, err
}
var n Node
switch typ {
case "number":
n = &NumberNode{}
case "dbrp":
n = &DBRPNode{}
case "duration":
n = &DurationNode{}
case "bool":
n = &BoolNode{}
case "unary":
n = &UnaryNode{}
case "binary":
n = &BinaryNode{}
case "declaration":
n = &DeclarationNode{}
case "typeDeclaration":
n = &TypeDeclarationNode{}
case "identifier":
n = &IdentifierNode{}
case "reference":
n = &ReferenceNode{}
case "string":
n = &StringNode{}
case "list":
n = &ListNode{}
case "regex":
n = &RegexNode{}
case "star":
n = &StarNode{}
case "func":
n = &FunctionNode{}
case "lambda":
n = &LambdaNode{}
case "program":
n = &ProgramNode{}
case "comment":
n = &CommentNode{}
}
err = n.unmarshal(node)
return n, err
} | tick/ast/json.go | 0.740644 | 0.567637 | json.go | starcoder |
package contracts
import (
"context"
"reflect"
"testing"
"github.com/adamluzsi/frameless"
"github.com/adamluzsi/frameless/contracts/assert"
"github.com/adamluzsi/frameless/extid"
"github.com/adamluzsi/testcase"
"github.com/stretchr/testify/require"
)
type Publisher struct {
T
Subject func(testing.TB) PublisherSubject
Context func(testing.TB) context.Context
FixtureFactory func(testing.TB) frameless.FixtureFactory
}
type PublisherSubject interface {
CRD
frameless.CreatorPublisher
frameless.UpdaterPublisher
frameless.DeleterPublisher
}
func (c Publisher) Test(t *testing.T) {
c.Spec(testcase.NewSpec(t))
}
func (c Publisher) Benchmark(b *testing.B) {
c.Spec(testcase.NewSpec(b))
}
func (c Publisher) String() string { return `Publisher` }
func (c Publisher) Spec(s *testcase.Spec) {
testcase.RunContract(s,
CreatorPublisher{T: c.T,
Subject: func(tb testing.TB) CreatorPublisherSubject {
return c.Subject(tb)
},
Context: c.Context,
FixtureFactory: c.FixtureFactory,
},
UpdaterPublisher{T: c.T,
Subject: func(tb testing.TB) UpdaterPublisherSubject {
publisher, ok := c.Subject(tb).(UpdaterPublisherSubject)
if !ok {
tb.Skip()
}
return publisher
},
Context: c.Context,
FixtureFactory: c.FixtureFactory,
},
DeleterPublisher{T: c.T,
Subject: func(tb testing.TB) DeleterPublisherSubject {
return c.Subject(tb)
},
Context: c.Context,
FixtureFactory: c.FixtureFactory,
},
)
}
type CreatorPublisher struct {
T
Subject func(testing.TB) CreatorPublisherSubject
Context func(testing.TB) context.Context
FixtureFactory func(testing.TB) frameless.FixtureFactory
}
type CreatorPublisherSubject interface {
CRD
frameless.CreatorPublisher
}
func (c CreatorPublisher) Test(t *testing.T) {
c.Spec(testcase.NewSpec(t))
}
func (c CreatorPublisher) Benchmark(b *testing.B) {
c.Spec(testcase.NewSpec(b))
}
func (c CreatorPublisher) String() string {
return `CreatorPublisher`
}
func (c CreatorPublisher) Spec(s *testcase.Spec) {
factoryLet(s, c.FixtureFactory)
s.Describe(`.Subscribe/Create`, func(s *testcase.Spec) {
resource := s.Let(`resource`, func(t *testcase.T) interface{} {
return c.Subject(t)
})
resourceGet := func(t *testcase.T) CreatorPublisherSubject {
return resource.Get(t).(CreatorPublisherSubject)
}
subject := func(t *testcase.T) (frameless.Subscription, error) {
subscription, err := resourceGet(t).SubscribeToCreatorEvents(ctxGet(t), subscriberGet(t))
if err == nil && subscription != nil {
t.Set(subscriptionKey, subscription)
t.Defer(subscription.Close)
}
return subscription, err
}
onSuccess := func(t *testcase.T) frameless.Subscription {
subscription, err := subject(t)
require.Nil(t, err)
return subscription
}
ctx.Let(s, func(t *testcase.T) interface{} {
return c.Context(t)
})
s.Let(subscriberKey, func(t *testcase.T) interface{} {
return newEventSubscriber(t, `Create`, nil)
})
s.Before(func(t *testcase.T) {
t.Log(`given a subscription is made`)
require.NotNil(t, onSuccess(t))
})
s.Test(`and no events made after the subscription time then subscriberGet doesn't receive any event`, func(t *testcase.T) {
require.Empty(t, subscriberGet(t).Events())
})
s.And(`events made`, func(s *testcase.Spec) {
events := s.Let(`events`, func(t *testcase.T) interface{} {
entities := genEntities(factoryGet(t), c.T)
for _, entity := range entities {
assert.CreateEntity(t, resourceGet(t), ctxGet(t), entity)
}
// wait until the subscriberGet received the events
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < len(entities)
})
var events []frameless.CreateEvent
for _, entity := range entities {
events = append(events, frameless.CreateEvent{Entity: base(entity)})
}
return events
}).EagerLoading(s)
getEvents := func(t *testcase.T) []frameless.CreateEvent { return events.Get(t).([]frameless.CreateEvent) }
s.Then(`subscriberGet receive those events`, func(t *testcase.T) {
require.ElementsMatch(t, getEvents(t), subscriberGet(t).Events())
})
s.And(`subscription is cancelled by close`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
sub := t.I(subscriptionKey).(frameless.Subscription)
require.Nil(t, sub.Close())
})
s.And(`more events made`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
entities := genEntities(factoryGet(t), c.T)
for _, entity := range entities {
assert.CreateEntity(t, resourceGet(t), ctxGet(t), entity)
}
assert.Waiter.Wait()
})
s.Then(`handler don't receive the new events`, func(t *testcase.T) {
require.ElementsMatch(t, getEvents(t), subscriberGet(t).Events())
})
})
})
s.And(`then new subscriberGet registered`, func(s *testcase.Spec) {
const othSubscriberKey = `oth-subscriberGet`
othSubscriber := func(t *testcase.T) *eventSubscriber {
return getSubscriber(t, othSubscriberKey)
}
s.Before(func(t *testcase.T) {
othSubscriber := newEventSubscriber(t, `Create`, nil)
t.Set(othSubscriberKey, othSubscriber)
newSubscription, err := resourceGet(t).SubscribeToCreatorEvents(ctxGet(t), othSubscriber)
require.Nil(t, err)
require.NotNil(t, newSubscription)
t.Defer(newSubscription.Close)
})
s.Then(`original subscriberGet still receive old events`, func(t *testcase.T) {
require.ElementsMatch(t, subscriberGet(t).Events(), getEvents(t))
})
s.Then(`new subscriberGet do not receive old events`, func(t *testcase.T) {
t.Log(`new subscriberGet don't have the vents since it subscribed after events had been already fired`)
assert.Waiter.Wait() // Wait a little to receive events if we receive any
require.Empty(t, othSubscriber(t).Events())
})
s.And(`further events made`, func(s *testcase.Spec) {
furtherEvents := s.Let(`further events`, func(t *testcase.T) interface{} {
entities := genEntities(factoryGet(t), c.T)
for _, entity := range entities {
assert.CreateEntity(t, resourceGet(t), ctxGet(t), entity)
}
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < len(getEvents(t))+len(entities)
})
assert.Waiter.While(func() bool {
return othSubscriber(t).EventsLen() < len(entities)
})
var events []frameless.CreateEvent
for _, ent := range entities {
events = append(events, frameless.CreateEvent{Entity: base(ent)})
}
return events
}).EagerLoading(s)
getFurtherEvents := func(t *testcase.T) []frameless.CreateEvent { return furtherEvents.Get(t).([]frameless.CreateEvent) }
s.Then(`original subscriberGet receives all events`, func(t *testcase.T) {
requireContainsList(t, subscriberGet(t).Events(), events.Get(t), `missing old events`)
requireContainsList(t, subscriberGet(t).Events(), getFurtherEvents(t), `missing new events`)
})
s.Then(`new subscriberGet don't receive back old events`, func(t *testcase.T) {
requireNotContainsList(t, othSubscriber(t).Events(), getEvents(t))
})
s.Then(`new subscriberGet will receive new events`, func(t *testcase.T) {
requireContainsList(t, othSubscriber(t).Events(), getFurtherEvents(t))
})
})
})
})
})
}
type DeleterPublisher struct {
T
Subject func(testing.TB) DeleterPublisherSubject
Context func(testing.TB) context.Context
FixtureFactory func(testing.TB) frameless.FixtureFactory
}
type DeleterPublisherSubject interface {
CRD
frameless.DeleterPublisher
}
func (c DeleterPublisher) resource() testcase.Var {
return testcase.Var{
Name: "resource",
Init: func(t *testcase.T) interface{} {
return c.Subject(t)
},
}
}
func (c DeleterPublisher) resourceGet(t *testcase.T) DeleterPublisherSubject {
return c.resource().Get(t).(DeleterPublisherSubject)
}
func (c DeleterPublisher) String() string { return `DeleterPublisher` }
func (c DeleterPublisher) Test(t *testing.T) {
c.Spec(testcase.NewSpec(t))
}
func (c DeleterPublisher) Benchmark(b *testing.B) {
c.Spec(testcase.NewSpec(b))
}
func (c DeleterPublisher) Spec(s *testcase.Spec) {
c.resource().Let(s, nil)
factoryLet(s, c.FixtureFactory)
s.Describe(`.Subscribe/DeleteByID`, c.specEventDeleteByID)
s.Describe(`.Subscribe/DeleteAll`, c.specEventDeleteAll)
}
func (c DeleterPublisher) specEventDeleteByID(s *testcase.Spec) {
subject := func(t *testcase.T) (frameless.Subscription, error) {
subscription, err := c.resourceGet(t).SubscribeToDeleterEvents(ctxGet(t), subscriberGet(t))
if err == nil && subscription != nil {
t.Set(subscriptionKey, subscription)
t.Defer(subscription.Close)
}
return subscription, err
}
onSuccess := func(t *testcase.T) {
sub, err := subject(t)
require.Nil(t, err)
require.NotNil(t, sub)
}
ctx.Let(s, func(t *testcase.T) interface{} {
return c.Context(t)
})
const subName = `DeleteByID`
s.Let(subscriberKey, func(t *testcase.T) interface{} {
return newEventSubscriber(t, subName, nil)
})
const entityKey = `entity`
entity := s.Let(entityKey, func(t *testcase.T) interface{} {
entityPtr := CreatePTR(factoryGet(t), c.T)
assert.CreateEntity(t, c.resourceGet(t), ctxGet(t), entityPtr)
return entityPtr
}).EagerLoading(s)
s.Before(func(t *testcase.T) {
t.Log(`given a subscription is made`)
onSuccess(t)
})
s.Test(`and no events made after the subscription time then subscriberGet doesn't receive any event`, func(t *testcase.T) {
assert.Waiter.Wait()
require.Empty(t, subscriberGet(t).Events())
})
s.And(`delete event made`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
assert.DeleteEntity(t, c.resourceGet(t), ctxGet(t), entity.Get(t))
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < 1
})
})
s.Then(`subscriberGet receive the delete event where ID can be located`, func(t *testcase.T) {
c.hasDeleteEntity(t, subscriberGet(t).Events, entity.Get(t))
})
s.And(`subscription is cancelled via Close`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
require.Nil(t, t.I(subscriptionKey).(frameless.Subscription).Close())
})
s.And(`more events made`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
entityPtr := CreatePTR(factoryGet(t), c.T)
assert.CreateEntity(t, c.resourceGet(t), ctxGet(t), entityPtr)
assert.DeleteEntity(t, c.resourceGet(t), ctxGet(t), entityPtr)
assert.Waiter.Wait()
})
s.Then(`subscriberGet no longer receive them`, func(t *testcase.T) {
require.Len(t, subscriberGet(t).Events(), 1)
})
})
})
s.And(`then new subscriberGet registered`, func(s *testcase.Spec) {
const othSubscriberKey = `oth-subscriberGet`
othSubscriber := func(t *testcase.T) *eventSubscriber {
return getSubscriber(t, othSubscriberKey)
}
s.Before(func(t *testcase.T) {
othSubscriber := newEventSubscriber(t, subName, nil)
t.Set(othSubscriberKey, othSubscriber)
sub, err := c.resourceGet(t).SubscribeToDeleterEvents(ctxGet(t), othSubscriber)
require.Nil(t, err)
require.NotNil(t, sub)
t.Defer(sub.Close)
})
s.Then(`original subscriberGet still received the old delete event`, func(t *testcase.T) {
require.Len(t, subscriberGet(t).Events(), 1)
expectedID, _ := extid.Lookup(entity.Get(t))
actualID, _ := extid.Lookup(subscriberGet(t).Events()[0])
require.Equal(t, expectedID, actualID)
})
s.Then(`new subscriberGet do not receive any events`, func(t *testcase.T) {
require.Empty(t, othSubscriber(t).Events())
})
s.And(`an additional delete event is made`, func(s *testcase.Spec) {
const furtherEventKey = `f<PASSWORD> event`
furtherEvent := s.Let(furtherEventKey, func(t *testcase.T) interface{} {
t.Log(`given an another entity is stored`)
entityPtr := CreatePTR(factoryGet(t), c.T)
assert.CreateEntity(t, c.resourceGet(t), ctxGet(t), entityPtr)
assert.DeleteEntity(t, c.resourceGet(t), ctxGet(t), entityPtr)
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < 2
})
assert.Waiter.While(func() bool {
return getSubscriber(t, othSubscriberKey).EventsLen() < 1
})
return base(entityPtr)
}).EagerLoading(s)
s.Then(`original subscriberGet receives all events`, func(t *testcase.T) {
c.hasDeleteEntity(t, subscriberGet(t).Events, entity.Get(t))
c.hasDeleteEntity(t, subscriberGet(t).Events, furtherEvent.Get(t))
})
s.Then(`new subscriberGet don't receive back old events`, func(t *testcase.T) {
c.doesNotHaveDeleteEntity(t, othSubscriber(t).Events, entity.Get(t))
})
s.Then(`new subscriberGet will receive new events`, func(t *testcase.T) {
c.hasDeleteEntity(t, subscriberGet(t).Events, furtherEvent.Get(t))
})
})
})
})
}
func (c DeleterPublisher) specEventDeleteAll(s *testcase.Spec) {
subject := func(t *testcase.T) (frameless.Subscription, error) {
subscription, err := c.resourceGet(t).SubscribeToDeleterEvents(ctxGet(t), subscriberGet(t))
if err == nil && subscription != nil {
t.Set(subscriptionKey, subscription)
t.Defer(subscription.Close)
}
return subscription, err
}
onSuccess := func(t *testcase.T) {
sub, err := subject(t)
require.Nil(t, err)
require.NotNil(t, sub)
}
const subName = `DeleteAll`
s.Let(subscriberKey, func(t *testcase.T) interface{} {
return newEventSubscriber(t, subName, nil)
})
ctx.Let(s, func(t *testcase.T) interface{} {
return c.Context(t)
})
s.Before(func(t *testcase.T) {
t.Log(`given a subscription is made`)
onSuccess(t)
})
s.Test(`and no events made after the subscription time then subscriberGet doesn't receive any event`, func(t *testcase.T) {
require.Empty(t, subscriberGet(t).Events())
})
s.And(`delete event made`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
require.Nil(t, c.resourceGet(t).DeleteAll(ctxGet(t)))
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < 1
})
})
s.Then(`subscriberGet receive the delete event where ID can be located`, func(t *testcase.T) {
require.Contains(t, subscriberGet(t).Events(), frameless.DeleteAllEvent{})
})
s.And(`then new subscriberGet registered`, func(s *testcase.Spec) {
const othSubscriberKey = `oth-subscriberGet`
othSubscriber := func(t *testcase.T) *eventSubscriber {
return getSubscriber(t, othSubscriberKey)
}
s.Before(func(t *testcase.T) {
othSubscriber := newEventSubscriber(t, subName, nil)
t.Set(othSubscriberKey, othSubscriber)
sub, err := c.resourceGet(t).SubscribeToDeleterEvents(ctxGet(t), othSubscriber)
require.Nil(t, err)
require.NotNil(t, sub)
t.Defer(sub.Close)
})
s.Then(`original subscriberGet still received the old delete event`, func(t *testcase.T) {
require.Contains(t, subscriberGet(t).Events(), frameless.DeleteAllEvent{})
})
s.Then(`new subscriberGet do not receive any events`, func(t *testcase.T) {
assert.Waiter.Wait()
require.Empty(t, othSubscriber(t).Events())
})
s.And(`an additional delete event is made`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
require.Nil(t, c.resourceGet(t).DeleteAll(ctxGet(t)))
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < 2
})
assert.Waiter.While(func() bool {
return getSubscriber(t, othSubscriberKey).EventsLen() < 1
})
})
s.Then(`original subscriberGet receives all events`, func(t *testcase.T) {
require.Contains(t, subscriberGet(t).Events(), frameless.DeleteAllEvent{})
require.Len(t, subscriberGet(t).Events(), 2)
})
s.Then(`new subscriberGet only receive events made after the subscription`, func(t *testcase.T) {
require.Contains(t, othSubscriber(t).Events(), frameless.DeleteAllEvent{})
require.Len(t, othSubscriber(t).Events(), 1)
})
})
})
})
}
func (c DeleterPublisher) hasDeleteEntity(tb testing.TB, getList func() []interface{}, e interface{}) {
assert.Eventually.Assert(tb, func(tb testing.TB) {
var matchingIDFound bool
for _, event := range getList() {
eventDeleteByID, ok := event.(frameless.DeleteByIDEvent)
if !ok {
continue
}
expectedID := eventDeleteByID.ID
actualID, _ := extid.Lookup(e)
if expectedID == actualID {
matchingIDFound = true
break
}
}
require.True(tb, matchingIDFound, `it was expected to includes the delete event entry`)
})
}
func (c DeleterPublisher) doesNotHaveDeleteEntity(tb testing.TB, getList func() []interface{}, e interface{}) {
assert.Eventually.Assert(tb, func(tb testing.TB) {
var matchingIDFound bool
for _, event := range getList() {
eventDeleteByID, ok := event.(frameless.DeleteByIDEvent)
if !ok {
continue
}
expectedID := eventDeleteByID.ID
actualID, _ := extid.Lookup(e)
if expectedID == actualID {
matchingIDFound = true
break
}
}
require.False(tb, matchingIDFound, `it was expected to doesn't have the delete event entry`)
})
}
type UpdaterPublisher struct {
T
Subject func(testing.TB) UpdaterPublisherSubject
Context func(testing.TB) context.Context
FixtureFactory func(testing.TB) frameless.FixtureFactory
}
type UpdaterPublisherSubject interface {
CRD
frameless.Updater
frameless.UpdaterPublisher
}
func (c UpdaterPublisher) resource() testcase.Var {
return testcase.Var{
Name: "resource",
Init: func(t *testcase.T) interface{} {
return c.Subject(t)
},
}
}
func (c UpdaterPublisher) resourceGet(t *testcase.T) UpdaterPublisherSubject {
return c.resource().Get(t).(UpdaterPublisherSubject)
}
func (c UpdaterPublisher) String() string {
return `UpdaterPublisher`
}
func (c UpdaterPublisher) Test(t *testing.T) {
c.Spec(testcase.NewSpec(t))
}
func (c UpdaterPublisher) Benchmark(b *testing.B) {
c.Spec(testcase.NewSpec(b))
}
func (c UpdaterPublisher) Spec(s *testcase.Spec) {
c.resource().Let(s, nil)
factoryLet(s, c.FixtureFactory)
s.Describe(`.Subscribe/Update`, func(s *testcase.Spec) {
subject := func(t *testcase.T) (frameless.Subscription, error) {
subscription, err := c.resourceGet(t).SubscribeToUpdaterEvents(ctxGet(t), subscriberGet(t))
if err == nil && subscription != nil {
t.Set(subscriptionKey, subscription)
t.Defer(subscription.Close)
}
return subscription, err
}
onSuccess := func(t *testcase.T) {
sub, err := subject(t)
require.Nil(t, err)
require.NotNil(t, sub)
}
ctx.Let(s, func(t *testcase.T) interface{} {
return c.Context(t)
})
const subName = `Update`
s.Let(subscriberKey, func(t *testcase.T) interface{} {
return newEventSubscriber(t, subName, nil)
})
const entityKey = `entity`
entity := s.Let(entityKey, func(t *testcase.T) interface{} {
ptr := CreatePTR(factoryGet(t), c.T)
assert.CreateEntity(t, c.resourceGet(t), ctxGet(t), ptr)
return ptr
}).EagerLoading(s)
getID := func(t *testcase.T) interface{} {
id, _ := extid.Lookup(entity.Get(t))
return id
}
s.Before(func(t *testcase.T) {
t.Log(`given a subscription is made`)
onSuccess(t)
})
s.Test(`and no events made after the subscription time then subscriberGet doesn't receive any event`, func(t *testcase.T) {
require.Empty(t, subscriberGet(t).Events())
})
s.And(`update event made`, func(s *testcase.Spec) {
const updatedEntityKey = `updated-entity`
updatedEntity := s.Let(updatedEntityKey, func(t *testcase.T) interface{} {
entityWithNewValuesPtr := CreatePTR(factoryGet(t), c.T)
require.Nil(t, extid.Set(entityWithNewValuesPtr, getID(t)))
assert.UpdateEntity(t, c.resourceGet(t), ctxGet(t), entityWithNewValuesPtr)
assert.Waiter.While(func() bool { return subscriberGet(t).EventsLen() < 1 })
return base(entityWithNewValuesPtr)
}).EagerLoading(s)
s.Then(`subscriberGet receive the event`, func(t *testcase.T) {
require.Contains(t, subscriberGet(t).Events(), frameless.UpdateEvent{Entity: updatedEntity.Get(t)})
})
s.And(`subscription is cancelled via Close`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
require.Nil(t, t.I(subscriptionKey).(frameless.Subscription).Close())
})
s.And(`more events made`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
id, _ := extid.Lookup(t.I(entityKey))
updatedEntityPtr := CreatePTR(factoryGet(t), c.T)
require.Nil(t, extid.Set(updatedEntityPtr, id))
require.Nil(t, c.resourceGet(t).Update(ctxGet(t), updatedEntityPtr))
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < 1
})
})
s.Then(`subscriberGet no longer receive them`, func(t *testcase.T) {
require.Len(t, subscriberGet(t).Events(), 1)
})
})
})
s.And(`then new subscriberGet registered`, func(s *testcase.Spec) {
const othSubscriberKey = `oth-subscriberGet`
othSubscriber := func(t *testcase.T) *eventSubscriber {
return getSubscriber(t, othSubscriberKey)
}
s.Before(func(t *testcase.T) {
othSubscriber := newEventSubscriber(t, subName, nil)
t.Set(othSubscriberKey, othSubscriber)
sub, err := c.resourceGet(t).SubscribeToUpdaterEvents(ctxGet(t), othSubscriber)
require.Nil(t, err)
require.NotNil(t, sub)
t.Defer(sub.Close)
})
s.Then(`original subscriberGet still receive old events`, func(t *testcase.T) {
require.Contains(t, subscriberGet(t).Events(), frameless.UpdateEvent{Entity: updatedEntity.Get(t)})
})
s.Then(`new subscriberGet do not receive old events`, func(t *testcase.T) {
assert.Waiter.Wait()
require.Empty(t, othSubscriber(t).Events())
})
s.And(`a further event is made`, func(s *testcase.Spec) {
furtherEventUpdate := s.Let(`further event update`, func(t *testcase.T) interface{} {
updatedEntityPtr := CreatePTR(factoryGet(t), c.T)
require.Nil(t, extid.Set(updatedEntityPtr, getID(t)))
assert.UpdateEntity(t, c.resourceGet(t), ctxGet(t), updatedEntityPtr)
assert.Waiter.While(func() bool {
return subscriberGet(t).EventsLen() < 2
})
assert.Waiter.While(func() bool {
return getSubscriber(t, othSubscriberKey).EventsLen() < 1
})
return base(updatedEntityPtr)
}).EagerLoading(s)
s.Then(`original subscriberGet receives all events`, func(t *testcase.T) {
require.Contains(t, subscriberGet(t).Events(), frameless.UpdateEvent{Entity: updatedEntity.Get(t)}, `missing old update events`)
require.Contains(t, subscriberGet(t).Events(), frameless.UpdateEvent{Entity: furtherEventUpdate.Get(t)}, `missing new update events`)
})
s.Then(`new subscriberGet don't receive back old events`, func(t *testcase.T) {
assert.Waiter.Wait()
if reflect.DeepEqual(base(updatedEntity.Get(t)), base(furtherEventUpdate.Get(t))) {
t.Log("skipping test because original entity looks the same as the new variant")
t.Log("this can happen when the entity have only one field: ID")
return
}
require.NotContains(t, othSubscriber(t).Events(), frameless.UpdateEvent{Entity: updatedEntity.Get(t)})
})
s.Then(`new subscriberGet will receive new events`, func(t *testcase.T) {
require.Contains(t, othSubscriber(t).Events(), frameless.UpdateEvent{Entity: furtherEventUpdate.Get(t)})
})
})
})
})
})
} | contracts/Publisher.go | 0.617167 | 0.592961 | Publisher.go | starcoder |
package slice
import "reflect"
// Interface type for an iterator
type Iterator interface {
// Returns whether there is a next element
HasNext() bool
// Goes to the next element, then returns false if the end of the slice
// was reached
Next() bool
// Returns whether there is a previous element
HasPrev() bool
// Goes to the previous element, then returns false if the start of the
// slice was reached
Prev() bool
// Gets the element the iterator is currently pointed to
Elem() interface{}
}
// Interface type for a generic data structure that behaves like a []interface
// type
type Slice interface {
// Copies the given element(s) onto the end of the slice. This function is
// roughly equivalent to `append(slice, elems...)`
Append(...interface{}) Slice
// Copies the elements in the given slice onto the end of this slice
AppendSlice(Slice) Slice
// Copies the given elements onto the start of the slice. This function is
// roughly equivalent to `append(elems, slice...)`
Prepend(...interface{}) Slice
// Copies the elements in the given slice onto the start of this slice
PrependSlice(Slice) Slice
// Gets a subset of the slice. This function is roughly equivalent to
// `slice[i:j]`
Slice(int, int) Slice
// Gets the element at the given index. This function is roughly equivalent
// to `slice[i]`
Index(int) interface{}
// Creates an iterator, pointed to the first element
IterStart() Iterator
// Creates a reverse iterator, pointed to the first element
ReverseIterStart() Iterator
// Creates an iterator, pointed to the last elements
IterEnd() Iterator
// Creates a reverse iterator, pointed to the last element
ReverseIterEnd() Iterator
// Creates a deep copy of the slice
DeepCopy() Slice
// Gets the slice's length. This function is roughly equivalent to
// `len(slice)`
Len() int
// Gets the slice's capacity. This function is roughly equivalent to
// `cap(slice)`
Cap() int
// Converts the slice to an []interface{} type
ToGoSlice() []interface{}
}
type bucket []interface{}
// Essentially math.Max for ints
func atLeast(a, b int) int {
if a > b {return a}
return b
}
// Appends an interface{} to the given slice. intf must be some kind of slice
func appendNativeSliceToSlice(s Slice, slice interface{}) Slice {
switch v := slice.(type) {
case []interface{}:
return s.Append(v...)
default:
val := reflect.ValueOf(slice)
// Make sure the slice is a slice
if val.Kind() != reflect.Slice {
panic("given slice is not slice type")
}
// Iterate over the elements of the slice
for i := 0; i < val.Len(); i++ {
// Add it
s = s.Append(val.Index(i).Interface())
}
return s
}
}
// Converts a slice to an []interface{} type
func ToGoSlice(s Slice) []interface{} {
// Create a slice
slice := make([]interface{}, 0, s.Len())
// Iterate over the elements
iter := s.IterStart()
for iter.Next() {
// Add the element to the slice
slice = append(slice, iter.Elem())
}
return slice
}
// Returns a slice where the element at the given index is erased. Equivalent to
// `append(s[:index], s[index + 1:]...)`. Warning: this function does not copy
// s, so the contents of s can be (and probably will be) modified. The
// Slice.Erase() function should be used instead of this function where
// possible, as it can be faster
func Erase(s Slice, index int) Slice {
return s.Slice(0, index).AppendSlice(s.Slice(index + 1, s.Len()))
}
// Returns a slice where the range of elements are erased. Equivalent to
// `append(s[:i], s[j + 1:]...)`. Warning: this function does not copy s, so the
// contents of s can be modified. The Slice.EraseRange function should be used
// instead of this function where possible, as it can be faster
func EraseRange(s Slice, i, j int) Slice {
return s.Slice(0, i).AppendSlice(s.Slice(j + 1, s.Len()))
}
// A reverse iterator, essentially an inverted iterator
type ReverseIterator struct {
Iterator
}
// Create a reverse iterator
func Reverse(i Iterator) Iterator {
// If the given iterator is a reverse iterator
reverse, ok := i.(*ReverseIterator)
if ok {
// The reverse iterator is just the unwrapped iterator
return reverse.Iterator
}
// Otherwise return a new iterator
return &ReverseIterator{i}
}
func (i *ReverseIterator) HasNext() bool {
return i.Iterator.HasPrev()
}
func (i *ReverseIterator) Next() bool {
return i.Iterator.Prev()
}
func (i *ReverseIterator) HasPrev() bool {
return i.Iterator.HasNext()
}
func (i *ReverseIterator) Prev() bool {
return i.Iterator.Next()
} | slice.go | 0.846229 | 0.448487 | slice.go | starcoder |
package checks
import (
"github.com/pkg/errors"
"time"
)
// Status represents the status of a Consul health check match.
type Status string
const (
// StatusPassing represents a Consul health check status match that is passing.
StatusPassing Status = "passing"
// StatusWarning represents a Consul check status match that is a warning.
StatusWarning = "warning"
// StatusFailing represents a Consul check status match that is a failure.
StatusFailing = "failing"
)
// HealthStatus represents the status of a Consul health check.
type HealthStatus int
const (
// HealthPassing represents Consul health check in the Passing state.
HealthPassing HealthStatus = iota
// HealthWarning represents Consul health check in the Warning state
HealthWarning
// HealthMaintenance represents Consul health check in the Maintenance state.
HealthMaintenance
// HealthCritical represents Consul health check in the Critical state
HealthCritical
)
var healthNames = []string{"passing", "warning", "maintenance", "critical"}
func (s HealthStatus) String() string {
return healthNames[s]
}
// ParseHealthStatus parses a string into a HealthStatus
func ParseHealthStatus(s string) (HealthStatus, bool) {
for i, n := range healthNames {
if n == s {
return HealthStatus(i), true
}
}
return 0, false
}
// ResultStatus represents the status of a Consulate call.
type ResultStatus string
const (
// Ok represents a successful call to Consulate.
Ok ResultStatus = "Ok"
// Warning represents a warning call to Consulate.
Warning ResultStatus = "Warning"
// Failed represents a failed call to Consulate.
Failed ResultStatus = "Failed"
// NoChecks represents verify check call to Consulate which had no checks to verify.
NoChecks ResultStatus = "No Checks"
)
// Result represents the result of a Consulate call.
type Result struct {
Status ResultStatus
Detail string `json:",omitempty"`
Counts map[Status]int `json:",omitempty"`
Checks map[string]*Check `json:",omitempty"`
}
// Check the result of a Consul check.
type Check struct {
Node string
CheckID string
Name string
Status string
Notes string `json:",omitempty"`
Output string `json:",omitempty"`
ServiceID string
ServiceName string
ServiceTags []string `json:",omitempty"`
Definition CheckDefinition `json:"-"`
CreateIndex uint64 `json:",omitempty"`
ModifyIndex uint64 `json:",omitempty"`
}
// MatchStatus returns a Status that indicates how the Status of a Check matches the specified status.
func (c *Check) MatchStatus(s HealthStatus) (Status, error) {
parsedStatus, parsed := ParseHealthStatus(c.Status)
if !parsed {
return StatusFailing, errors.Errorf("Unsupported status: %s", c.Status)
}
if parsedStatus > s {
if parsedStatus == HealthWarning {
return StatusWarning, nil
}
return StatusFailing, nil
}
return StatusPassing, nil
}
// IsServiceId returns True if the Check ServiceId matches the specified serviceId.
func (c *Check) IsServiceId(serviceId string) bool {
return serviceId == c.ServiceID
}
// IsServiceName returns True if the Check ServiceName matches the specified serviceName.
func (c *Check) IsServiceName(serviceName string) bool {
return serviceName == c.ServiceName
}
// IsCheckId returns True if the Check CheckID/Name matches the specified check.
func (c *Check) IsCheckId(checkId string) bool {
return checkId == c.CheckID
}
// IsCheckName returns True if the Check CheckName matches the specified checkName.
func (c *Check) IsCheckName(checkName string) bool {
return checkName == c.Name
}
// CheckDefinition represents the configuration of a Consul check.
type CheckDefinition struct {
HTTP string
Header map[string][]string
Method string
TLSSkipVerify bool
TCP string
Interval time.Duration
Timeout time.Duration
DeregisterCriticalServiceAfter time.Duration
} | checks/check.go | 0.712232 | 0.529628 | check.go | starcoder |
package sort
import "sync"
// Sort the given `slice` in-place (non-decreasing order) using the merge sort algorithm
// Runs in O(NlgN) time with O(N) memory
func MergeSort(slice []int) {
if len(slice) < 2 {
return
}
q := (len(slice) + 1) / 2
left, right := slice[:q], slice[q:]
MergeSort(left)
MergeSort(right)
merge(slice, q)
}
// Sort the given `slice` in-place (non-decreasing order) using the merge sort algorithm
// and taking advantage of goroutines to do recursive calls on partial slices in parallel
// Runs in O(NlgN) time with O(N) memory
func GoMergeSort(slice []int) {
var wg sync.WaitGroup
wg.Add(1)
goMergeSortHelper(slice, &wg)
}
func goMergeSortHelper(slice []int, wg *sync.WaitGroup) {
defer wg.Done()
if len(slice) < 2 {
return
}
q := (len(slice) + 1) / 2
var subWg sync.WaitGroup
subWg.Add(2)
go goMergeSortHelper(slice[:q], &subWg)
go goMergeSortHelper(slice[q:], &subWg)
subWg.Wait()
merge(slice, q)
}
func merge(slice []int, q int) {
tmp := make([]int, len(slice))
copy(tmp, slice)
for i, j, n := 0, q, 0; n < len(slice); n++ {
if tmp[i] < tmp[j] {
slice[n] = tmp[i]
i++
if i == q {
copy(slice[n+1:], tmp[j:])
break
}
} else {
slice[n] = tmp[j]
j++
if j == len(tmp) {
copy(slice[n+1:], tmp[i:q])
break
}
}
}
}
// Sort the given `slice` in-place (non-decreasing order) using the merge sort algorithm and
// take advantage of goroutines with channels for recursive calls on partial slices in parallel
// Runs in O(NlgN) time with O(N) memory
func ChanMergeSort(slice []int) {
c := make(chan int, len(slice))
chanMergeSortHelper(slice, c)
sorted := make([]int, len(slice))
i := 0
for n := range c {
sorted[i] = n
i++
}
copy(slice, sorted)
}
func chanMergeSortHelper(slice []int, up chan int) {
if len(slice) > 1 {
q := (len(slice) + 1) / 2
left, right := make(chan int), make(chan int)
go chanMergeSortHelper(slice[:q], left)
go chanMergeSortHelper(slice[q:], right)
chanMerge(up, left, right)
} else {
for _, n := range slice {
up <- n
}
}
close(up)
}
func chanMerge(up, left, right chan int) {
leftVal, leftOk := <-left
rightVal, rightOk := <-right
for {
switch {
case leftOk && rightOk:
if leftVal < rightVal {
up <- leftVal
leftVal, leftOk = <-left
} else {
up <- rightVal
rightVal, rightOk = <-right
}
case !leftOk && rightOk:
up <- rightVal
for n := range right {
up <- n
}
return
case leftOk && !rightOk:
up <- leftVal
for n := range left {
up <- n
}
return
case !leftOk && !rightOk:
return
}
}
} | Algorithms/Sort/go/mergesort.go | 0.801897 | 0.564639 | mergesort.go | starcoder |
package formatters
import (
"fmt"
"github.com/kdelwat/recipaliser"
"github.com/olekukonko/tablewriter"
"os"
)
type ingredientField struct {
field string
value float64
}
func selectIngredientFields(ingredient recipaliser.Ingredient, selections ...string) []ingredientField {
selectionSets := map[string][]ingredientField{
"macronutrients": {
{field: "Energy (kJ)", value: ingredient.EnergyWithDietaryFibre},
{field: "Available carbohydrates (with sugar alcohols)", value: ingredient.AvailableCarbohydratesWithSugarAlcohols},
{field: "Dietary fibre", value: ingredient.DietaryFibre},
{field: "Protein", value: ingredient.Protein},
{field: "Total fat", value: ingredient.TotalFat},
},
"carbohydrates": {
{field: "Available carbohydrates (with sugar alcohols)", value: ingredient.AvailableCarbohydratesWithSugarAlcohols},
{field: "Available carbohydrates (without sugar alcohols)", value: ingredient.AvailableCarbohydratesWithoutSugarAlcohol},
{field: "Starch", value: ingredient.Starch},
{field: "Total sugars", value: ingredient.TotalSugars},
{field: "Added sugars", value: ingredient.AddedSugars},
{field: "Free sugars", value: ingredient.FreeSugars},
},
"protein": {
{field: "Protein", value: ingredient.Protein},
{field: "Tryptophan", value: ingredient.Tryptophan},
},
"fats": {
{field: "Total fat", value: ingredient.TotalFat},
{field: "Cholesterol", value: ingredient.Cholesterol},
{field: "Total saturated fat", value: ingredient.TotalSaturatedFat},
{field: "Total monounsaturated fat", value: ingredient.TotalMonounsaturatedFat},
{field: "Total polyunsaturated fat", value: ingredient.TotalPolyunsaturatedFat},
{field: "Linoleic acid", value: ingredient.LinoleicAcid},
{field: "Alphalinolenic acid", value: ingredient.AlphalinolenicAcid},
{field: "EPA", value: ingredient.C205w3Eicosapentaenoic},
{field: "DPA", value: ingredient.C225w3Docosapentaenoic},
{field: "DHA", value: ingredient.C226w3Docosahexaenoic},
{field: "Total long-chain omega-3 fatty acids", value: ingredient.TotalLongChainOmega3FattyAcids},
{field: "Total trans-fatty acids", value: ingredient.TotalTransFattyAcids},
},
"vitamins": {
{field: "Vitamin A (retinol equivalents)", value: ingredient.VitaminARetinolEquivalents},
{field: "Thiamin (B1)", value: ingredient.ThiaminB1},
{field: "Riboflavin (B2)", value: ingredient.RiboflavinB2},
{field: "Niacin (B3) (derived equivalents)", value: ingredient.NiacinDerivedEquivalents},
{field: "Dietary folate equivalents", value: ingredient.DietaryFolateEquivalents},
{field: "Vitamin B6", value: ingredient.VitaminB6},
{field: "Vitamin B12", value: ingredient.VitaminB12},
{field: "Vitamin C", value: ingredient.VitaminC},
{field: "Vitamin E", value: ingredient.VitaminE},
},
"minerals": {
{field: "Calcium (Ca)", value: ingredient.CalciumCa},
{field: "Iodine (I)", value: ingredient.IodineI},
{field: "Iron (Fe)", value: ingredient.IronFe},
{field: "Magnesium (Mg)", value: ingredient.MagnesiumMg},
{field: "Phosphorus (P)", value: ingredient.PhosphorusP},
{field: "Potassium (K)", value: ingredient.PotassiumK},
{field: "Selenium (Se)", value: ingredient.SeleniumSe},
{field: "Sodium (Na)", value: ingredient.SodiumNa},
{field: "Zinc (Zn)", value: ingredient.ZincZn},
},
"stimulants": {
{field: "Caffeine", value: ingredient.Caffeine},
},
"depressants": {
{field: "Alcohol", value: ingredient.Alcohol},
},
}
var selectionSet [][]ingredientField
for _, selection := range selections {
selectionSet = append(selectionSet, selectionSets[selection])
}
return flattenSelectionSet(selectionSet)
}
func containsField(fields []ingredientField, target ingredientField) bool {
for i := 0; i < len(fields); i++ {
if fields[i].field == target.field {
return true
}
}
return false
}
func flattenSelectionSet(selectionSet [][]ingredientField) []ingredientField {
var flattenedFields []ingredientField
for i := len(selectionSet) - 1; i >= 0; i-- {
for j := len(selectionSet[i]) - 1; j >= 0; j-- {
if !containsField(flattenedFields, selectionSet[i][j]) {
flattenedFields = append(flattenedFields, selectionSet[i][j])
}
}
}
// Reverse fields
// From https://stackoverflow.com/a/42545484
for i, j := 0, len(flattenedFields)-1; i < j; i, j = i+1, j-1 {
flattenedFields[i], flattenedFields[j] = flattenedFields[j], flattenedFields[i]
}
return flattenedFields
}
func printIngredient(ingredient recipaliser.Ingredient, table *tablewriter.Table, selections ...string) {
tableValues := []string{ingredient.Name}
for _, f := range selectIngredientFields(ingredient, selections...) {
tableValues = append(tableValues, fmt.Sprintf("%f", f.value))
}
table.Append(tableValues)
}
func PrintIngredients(ingredients []recipaliser.Ingredient, selections ...string) {
outputTable := tablewriter.NewWriter(os.Stdout)
tableHeaders := []string{"Name"}
for _, f := range selectIngredientFields(ingredients[0], selections...) {
tableHeaders = append(tableHeaders, f.field)
}
outputTable.SetHeader(tableHeaders)
for _, ingredient := range ingredients {
printIngredient(ingredient, outputTable, selections...)
}
outputTable.Render()
} | formatters/print_ingredients.go | 0.597256 | 0.480296 | print_ingredients.go | starcoder |
package carving
import (
"log"
"math"
"alvin.com/GoCarver/geom"
g "alvin.com/GoCarver/geom"
"alvin.com/GoCarver/hmap"
)
type oneRun interface {
isDone() bool
setEnableCarvingAtFulldepth(enable bool)
doOnePass(delta float64)
}
var maxDepth = 0.0
// carvingRun represent a single rectilinear run of the carving tool. It is used to manage
// the multiple carving passes that are required to carve the material to the maximum depth
// given the maximum step-down size.
type carvingRun struct {
numSteps int // Number of steps along the run.
step g.Vec2 // Increment vector for each step.
startingPoint g.Pt2 // Starting point for this run.
endPoint g.Pt2 // End point for this run.
whiteCarvingDepth float64 // The carving depth for white samples.
blackCarvingDepth float64 // The carving depth for black samples.
currentCarvingDepth float64 // The current carving depth, always starting at 0.
depthStepDown float64 // How much to step down for each new pass.
enableCarveAtFullDepth bool
needMorePasses bool // Whether more passes are need to finish this run.
sampler hmap.ScalarGridSampler
generator codeGenerator
}
var _ oneRun = (*carvingRun)(nil)
// isDone returns whether the maximum carving depth has been achieved and no more carving
// passes are needed.
func (r *carvingRun) isDone() bool {
return !r.needMorePasses
}
// setEnableCarvingAtFulldepth is used to enable at full depth, ignoring the maximum
// step-down size.
func (r *carvingRun) setEnableCarvingAtFulldepth(enable bool) {
r.enableCarveAtFullDepth = enable
}
// doOnePass is called to generate one carving pass along the run. Parameter delta must be
// either +1 or -1. It determines wether the run goes forward or backward along the run.
func (r *carvingRun) doOnePass(delta float64) {
if !r.needMorePasses {
return
}
if math.Abs(delta) != 1.0 {
log.Fatalln("Invalid delta value, should be 1.0 or -1.0")
}
// Check wether the carving depth reaches below the old carving depth. If it doesn't we
// can discard the path. This is mostly useful on the very fisrt pass.
oldCarvingDepth := r.currentCarvingDepth
discardPath := true
// If the carving depth doesn't go as deep as the deepest sampled carving depth,
// we'll need more passes.
r.needMorePasses = false
r.currentCarvingDepth = r.currentCarvingDepth - r.depthStepDown
// fmt.Printf("*** Run y=%f, carving depth = %f, delta = %2.0f\n", r.startingPoint.Y, r.currentCarvingDepth, delta)
// fmt.Printf(" black=%5.2f, white=%5.2f\n", r.blackCarvingDepth, r.whiteCarvingDepth)
var origin geom.Pt2
for s := 0; s < r.numSteps; s++ {
var depth = 0.0
var clipped = false
if s == 0 {
// First step: starting point depends on run direction.
pt := r.startingPoint
if delta < 0 {
pt = r.endPoint
}
origin = pt
depth, clipped = r.getCarvingDepthAt(pt)
r.needMorePasses = r.needMorePasses || clipped
if depth < oldCarvingDepth {
discardPath = false
}
r.generator.startPath(pt.X, pt.Y, depth)
// fmt.Printf(" Start: %4.1f, %4.1f, %4.1f\n", pt.X, pt.Y, depth)
} else if s == r.numSteps-1 {
// Last step: end point depends on direction.
pt := r.startingPoint
if delta > 0 {
pt = r.endPoint
}
depth, clipped = r.getCarvingDepthAt(pt)
r.needMorePasses = r.needMorePasses || clipped
if depth < oldCarvingDepth {
discardPath = false
}
r.generator.moveTo(pt.X, pt.Y, depth)
r.generator.endPath(discardPath)
// fmt.Printf(" End: %4.1f, %4.1f, depth = %4.1f, discard = %v, more = %v\n", pt.X, pt.Y, depth, discardPath, r.needMorePasses)
} else {
stepVec := r.step.Scale(float64(s) * delta)
pt := origin.Add(stepVec)
depth, clipped = r.getCarvingDepthAt(pt)
r.needMorePasses = r.needMorePasses || clipped
if depth < oldCarvingDepth {
discardPath = false
}
r.generator.moveTo(pt.X, pt.Y, depth)
}
}
}
// getCarvingDepthAt samples and returns the carving depth at the given location. This
// function takes into account whether carving-at-full-depth is enabled.
func (r *carvingRun) getCarvingDepthAt(q geom.Pt2) (depth float64, clipped bool) {
s := r.sampler.At(q)
d := (1-s)*r.blackCarvingDepth + s*r.whiteCarvingDepth
if d < maxDepth {
maxDepth = d
}
depth = d
if r.enableCarveAtFullDepth {
clipped = false
} else {
clipped = d < r.currentCarvingDepth-0.05
// fmt.Printf(" target depth = %f5.2f, clip = %v\n", depth, clipped)
if clipped {
depth = r.currentCarvingDepth
}
}
return
}
// Sanitize the carving run, ensuring that it is in a valid configuration.
func (r *carvingRun) sanitize() {
if r.numSteps <= 0 {
r.numSteps = 1
}
if r.whiteCarvingDepth > 0 {
r.whiteCarvingDepth = 0
}
if r.blackCarvingDepth > 0 {
r.blackCarvingDepth = 0
}
if r.depthStepDown < 0 {
r.depthStepDown = -r.depthStepDown
}
} | carving/carving_run.go | 0.686055 | 0.542863 | carving_run.go | starcoder |
package api
func init() {
Swagger.Add("auth_tokens_tokens", `{
"swagger": "2.0",
"info": {
"title": "components/automate-gateway/api/auth/tokens/tokens.proto",
"version": "version not set"
},
"schemes": [
"http",
"https"
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {
"/auth/tokens": {
"get": {
"operationId": "GetTokens",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/responseTokens"
}
}
},
"tags": [
"TokensMgmt"
]
},
"post": {
"operationId": "CreateToken",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/responseToken"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/requestCreateToken"
}
}
],
"tags": [
"TokensMgmt"
]
}
},
"/auth/tokens/{id}": {
"get": {
"operationId": "GetToken",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/responseToken"
}
}
},
"parameters": [
{
"name": "id",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"TokensMgmt"
]
},
"delete": {
"operationId": "DeleteToken",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/responseDeleteTokenResp"
}
}
},
"parameters": [
{
"name": "id",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"TokensMgmt"
]
},
"put": {
"operationId": "UpdateToken",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/responseToken"
}
}
},
"parameters": [
{
"name": "id",
"in": "path",
"required": true,
"type": "string"
},
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/requestUpdateToken"
}
}
],
"tags": [
"TokensMgmt"
]
}
}
},
"definitions": {
"requestCreateToken": {
"type": "object",
"properties": {
"description": {
"type": "string"
},
"active": {
"type": "boolean",
"format": "boolean"
},
"value": {
"type": "string"
},
"id": {
"type": "string"
}
}
},
"requestUpdateToken": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"active": {
"type": "boolean",
"format": "boolean"
},
"description": {
"type": "string"
}
}
},
"responseDeleteTokenResp": {
"type": "object"
},
"responseToken": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"description": {
"type": "string"
},
"value": {
"type": "string"
},
"active": {
"type": "boolean",
"format": "boolean"
},
"created": {
"type": "string"
},
"updated": {
"type": "string"
}
}
},
"responseTokens": {
"type": "object",
"properties": {
"tokens": {
"type": "array",
"items": {
"$ref": "#/definitions/responseToken"
}
}
}
}
}
}
`)
} | components/automate-gateway/api/auth_tokens_tokens.pb.swagger.go | 0.623721 | 0.413181 | auth_tokens_tokens.pb.swagger.go | starcoder |
package copier
import (
"fmt"
"reflect"
)
func Copy(toValue interface{}, fromValue interface{}) (err error) {
var (
isSlice bool
fromType reflect.Type
isFromPtr bool
toType reflect.Type
amount int
)
var accumulatedError error
from := reflect.Indirect(reflect.ValueOf(fromValue))
to := reflect.Indirect(reflect.ValueOf(toValue))
if to.Kind() == reflect.Slice {
isSlice = true
if from.Kind() == reflect.Slice {
fromType = from.Type().Elem()
if fromType.Kind() == reflect.Ptr {
fromType = fromType.Elem()
isFromPtr = true
}
amount = from.Len()
} else {
fromType = from.Type()
amount = 1
}
toType = to.Type().Elem()
if toType.Kind() == reflect.Ptr {
toType = toType.Elem()
}
} else {
fromType = from.Type()
toType = to.Type()
amount = 1
}
if isSlice {
if to.IsNil() {
to.Set(reflect.MakeSlice(to.Type(), 0, amount))
}
if from.Kind() == reflect.Slice {
if from.Type().Elem().Kind() == reflect.Ptr {
newSlice := reflect.MakeSlice(to.Type(), amount, amount)
originalLen := to.Len()
to.Set(reflect.AppendSlice(to, newSlice))
for i := 0; i < amount; i++ {
var newT reflect.Value
if to.Type().Elem().Kind() == reflect.Ptr {
newT = reflect.New(to.Type().Elem().Elem())
} else {
newT = reflect.New(to.Type().Elem())
}
err := Copy(newT.Interface(), from.Index(i).Addr().Interface())
to.Index(originalLen + i).Set(newT)
if nil != err {
if nil == accumulatedError {
accumulatedError = err
continue
}
accumulatedError = fmt.Errorf("error copying %v\n%v", err, accumulatedError)
}
}
} else if from.Type().Elem().Kind() == reflect.Struct {
newSlice := reflect.MakeSlice(to.Type(), amount, amount)
originalLen := to.Len()
to.Set(reflect.AppendSlice(to, newSlice))
for i := 0; i < amount; i++ {
err := Copy(to.Index(originalLen+i).Addr().Interface(), from.Index(i).Addr().Interface())
if nil != err {
if nil == accumulatedError {
accumulatedError = err
continue
}
accumulatedError = fmt.Errorf("error copying %v\n%v", err, accumulatedError)
}
}
} else {
reflect.Copy(to, from)
}
} else if from.Kind() == reflect.Struct {
newSlice := reflect.MakeSlice(to.Type(), 1, 1)
var newT reflect.Value
if to.Type().Elem().Kind() == reflect.Ptr {
newT = reflect.New(to.Type().Elem().Elem())
newSlice.Index(0).Set(newT)
} else {
newT = reflect.New(to.Type().Elem())
newSlice.Index(0).Set(newT.Elem())
}
originalLen := to.Len()
to.Set(reflect.AppendSlice(to, newSlice))
if to.Type().Elem().Kind() == reflect.Ptr {
return Copy(to.Index(originalLen).Addr().Interface(), from.Addr().Interface())
}
return Copy(to.Index(originalLen).Addr().Interface(), from.Addr().Interface())
} else if from.Kind() == reflect.Ptr {
return Copy(toValue, from.Elem().Interface())
}
return fmt.Errorf("source slice type unsupported\n%v", accumulatedError)
}
for e := 0; e < amount; e++ {
var dest, source reflect.Value
if isSlice {
if from.Kind() == reflect.Slice {
source = from.Index(e)
if isFromPtr {
source = source.Elem()
}
} else {
source = from
}
} else {
source = from
}
if isSlice {
dest = reflect.New(toType).Elem()
} else {
dest = to
}
for _, field := range deepFields(reflect.ValueOf(toValue).Type()) {
name := field
var fromField reflect.Value
var fromMethod reflect.Value
var toField reflect.Value
var toMethod reflect.Value
if source.Kind() == reflect.Ptr {
if source.Elem().Kind() == reflect.Struct {
fromField = source.Elem().FieldByName(name)
fromMethod = source.MethodByName(name)
} else {
return fmt.Errorf("error\n%v", accumulatedError)
}
} else if source.Kind() == reflect.Struct {
fromField = source.FieldByName(name)
fromMethod = source.Addr().MethodByName(name)
} else {
return fmt.Errorf("error\n%v", accumulatedError)
}
if dest.Kind() == reflect.Ptr {
if dest.Elem().Kind() == reflect.Struct {
toField = dest.Elem().FieldByName(name)
toMethod = dest.MethodByName(name)
} else {
return fmt.Errorf("error\n%v", accumulatedError)
}
} else if dest.Kind() == reflect.Struct {
toField = dest.FieldByName(name)
toMethod = dest.Addr().MethodByName(name)
} else {
return fmt.Errorf("error\n%v", accumulatedError)
}
canCopy := fromField.IsValid() && toMethod.IsValid() &&
toMethod.Type().NumIn() == 1 && fromField.Type().AssignableTo(toMethod.Type().In(0))
if canCopy {
toMethod.Call([]reflect.Value{fromField})
continue
}
canCopy = fromMethod.IsValid() && toField.IsValid() &&
fromMethod.Type().NumOut() == 1 && fromMethod.Type().Out(0).AssignableTo(toField.Type())
if canCopy {
toField.Set(fromMethod.Call([]reflect.Value{})[0])
continue
}
if fromMethod.IsValid() && toMethod.IsValid() {
}
canCopy = fromMethod.IsValid() && toMethod.IsValid() &&
toMethod.Type().NumIn() == 1 && fromMethod.Type().NumOut() == 1 &&
fromMethod.Type().Out(0).AssignableTo(toMethod.Type().In(0))
if canCopy {
toMethod.Call(fromMethod.Call([]reflect.Value{}))
continue
}
_, accumulatedError = copyValue(toField, fromField, accumulatedError)
}
}
return accumulatedError
}
func copyValue(to reflect.Value, from reflect.Value, accumulatedError error) (bool, error) {
fieldsAreValid := to.IsValid() && from.IsValid()
canCopy := fieldsAreValid && to.CanSet() && from.Type().AssignableTo(to.Type())
if canCopy {
to.Set(from)
return true, accumulatedError
}
if !fieldsAreValid {
return false, accumulatedError
}
_, accumulatedError = tryDeepCopyPtr(to, from, accumulatedError)
_, accumulatedError = tryDeepCopyStruct(to, from, accumulatedError)
_, accumulatedError = tryDeepCopySlice(to, from, accumulatedError)
return false, accumulatedError
}
func tryDeepCopyPtr(toField reflect.Value, fromField reflect.Value, accumulatedError error) (bool, error) {
deepCopyRequired := toField.Type().Kind() == reflect.Ptr && fromField.Type().Kind() == reflect.Ptr &&
!fromField.IsNil() && toField.CanSet()
copied := false
if deepCopyRequired {
toType := toField.Type().Elem()
emptyObject := reflect.New(toType)
toField.Set(emptyObject)
err := Copy(toField.Interface(), fromField.Interface())
if nil != err {
copied = false
if nil == accumulatedError {
accumulatedError = err
return false, accumulatedError
}
accumulatedError = fmt.Errorf("error copying %v\n%v", err, accumulatedError)
} else {
copied = true
}
}
return copied, accumulatedError
}
func tryDeepCopyStruct(toField reflect.Value, fromField reflect.Value, accumulatedError error) (bool, error) {
deepCopyRequired := toField.Type().Kind() == reflect.Struct && fromField.Type().Kind() == reflect.Struct && toField.CanSet()
copied := false
if deepCopyRequired {
err := Copy(toField.Addr().Interface(), fromField.Addr().Interface())
if nil != err {
copied = false
if nil == accumulatedError {
accumulatedError = err
return false, accumulatedError
}
accumulatedError = fmt.Errorf("error copying %v\n%v", err, accumulatedError)
} else {
copied = true
}
}
return copied, accumulatedError
}
func tryDeepCopySlice(toField reflect.Value, fromField reflect.Value, accumulatedError error) (bool, error) {
deepCopyRequired := toField.Type().Kind() == reflect.Slice && fromField.Type().Kind() == reflect.Slice && toField.CanSet()
copied := false
if deepCopyRequired {
err := Copy(toField.Addr().Interface(), fromField.Addr().Interface())
if nil != err {
copied = false
if nil == accumulatedError {
accumulatedError = err
return false, accumulatedError
}
accumulatedError = fmt.Errorf("error copying %v\n%v", err, accumulatedError)
} else {
copied = true
}
}
return copied, accumulatedError
}
func deepFields(ifaceType reflect.Type) []string {
fields := []string{}
if ifaceType.Kind() == reflect.Ptr {
// find all methods which take ptr as receiver
fields = append(fields, deepFields(ifaceType.Elem())...)
}
// repeat (or do it for the first time) for all by-value-receiver methods
fields = append(fields, deepFieldsImpl(ifaceType)...)
return fields
}
func deepFieldsImpl(ifaceType reflect.Type) []string {
fields := []string{}
if ifaceType.Kind() != reflect.Ptr && ifaceType.Kind() != reflect.Struct {
return fields
}
methods := ifaceType.NumMethod()
for i := 0; i < methods; i++ {
var v reflect.Method
v = ifaceType.Method(i)
fields = append(fields, v.Name)
}
if ifaceType.Kind() == reflect.Ptr {
return fields
}
elements := ifaceType.NumField()
for i := 0; i < elements; i++ {
var v reflect.StructField
v = ifaceType.Field(i)
fields = append(fields, v.Name)
}
return fields
} | copier.go | 0.531939 | 0.503357 | copier.go | starcoder |
package utils
import (
"fmt"
"time"
)
// TimeSecondAt returns the result of rounding t down to the nearest multiple of a second
func TimeSecondAt(t time.Time) time.Time {
return t.Local().Truncate(time.Second)
}
// TimeMinuteAt returns the result of rounding t down to the nearest multiple of a minute
func TimeMinuteAt(t time.Time) time.Time {
return t.Local().Truncate(time.Minute)
}
// TimeHourAt returns the result of rounding t down to the nearest multiple of a hour
func TimeHourAt(t time.Time) time.Time {
return t.Local().Truncate(time.Hour)
}
// TimeDayAt returns the result of rounding t down to the nearest multiple of a day
func TimeDayAt(t time.Time, offset time.Duration) time.Time {
strDay := t.Local().Add(-offset).Format("2006-01-02")
dayAt, _ := time.Parse("2006-01-02", strDay)
return dayAt.Add(offset)
}
// TimeMonthAt returns the result of rounding t down to the nearest multiple of a month
func TimeMonthAt(t time.Time, offset time.Duration) time.Time {
strMonth := t.Local().Add(-offset).Format("2006-01") + "-01"
monthAt, _ := time.Parse("2006-01-02", strMonth)
return monthAt.Add(offset)
}
// TimeBetween returns true if t is between start and end
// start=nil means -∞, end=nil means ∞
func TimeBetween(t time.Time, start *time.Time, end *time.Time) bool {
if start != nil && start.After(t) {
return false
}
if end != nil && end.Before(t) {
return false
}
return true
}
// TimeParseAny parses string with any common format to time.Time
func TimeParseAny(value string) (*time.Time, error) {
if t, err := time.Parse("2006-01-02 15:04:05", value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RFC3339, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RFC3339Nano, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RFC822, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RFC822Z, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RFC850, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RFC1123, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RFC1123Z, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.UnixDate, value); err == nil {
return &t, nil
} else if t, err = time.Parse(time.RubyDate, value); err == nil {
return &t, nil
}
return nil, fmt.Errorf("parse datetime %s error", value)
} | utils/datetime.go | 0.844697 | 0.799403 | datetime.go | starcoder |
package main
// MapToInt is a right-bias mapping function and an alias for MapRightToInt
func (e *EitherStringOrString) MapToInt(f func(string) int) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherStringOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapToInt is a right-bias mapping function and an alias for FlatMapRightToInt
func (e *EitherStringOrString) FlatMapToInt(f func(string) *EitherStringOrInt) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
}
// MapLeftToInt maps the left'ness of the either to a new either of type EitherIntOrString
func (e *EitherStringOrString) MapLeftToInt(f func(string) int) *EitherIntOrString {
if e.isLeft {
return &EitherIntOrString{
left: f(e.left),
isLeft: true,
}
}
return &EitherIntOrString{
right: e.right,
isLeft: false,
}
}
// MapRightToInt maps the right'ness of the either to a new either of type EitherIntOrString
func (e *EitherStringOrString) MapRightToInt(f func(string) int) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherStringOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapLeftToInt maps the left'ness of the either to a new Either of type EitherIntOrString
func (e *EitherStringOrString) FlatMapLeftToInt(f func(string) *EitherIntOrString) *EitherIntOrString {
if e.isLeft {
return f(e.left)
}
return &EitherIntOrString{
right: e.right,
isLeft: false,
}
}
// FlatMapRightToInt maps the right'ness of the either to a new Either of type EitherStringOrInt
func (e *EitherStringOrString) FlatMapRightToInt(f func(string) *EitherStringOrInt) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
}
// MapToInt is a right-bias mapping function and an alias for MapRightToInt
func (e *EitherStringOrInt) MapToInt(f func(int) int) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherStringOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapToInt is a right-bias mapping function and an alias for FlatMapRightToInt
func (e *EitherStringOrInt) FlatMapToInt(f func(int) *EitherStringOrInt) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
}
// MapLeftToInt maps the left'ness of the either to a new either of type EitherIntOrInt
func (e *EitherStringOrInt) MapLeftToInt(f func(string) int) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: f(e.left),
isLeft: true,
}
}
return &EitherIntOrInt{
right: e.right,
isLeft: false,
}
}
// MapRightToInt maps the right'ness of the either to a new either of type EitherIntOrInt
func (e *EitherStringOrInt) MapRightToInt(f func(int) int) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherStringOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapLeftToInt maps the left'ness of the either to a new Either of type EitherIntOrInt
func (e *EitherStringOrInt) FlatMapLeftToInt(f func(string) *EitherIntOrInt) *EitherIntOrInt {
if e.isLeft {
return f(e.left)
}
return &EitherIntOrInt{
right: e.right,
isLeft: false,
}
}
// FlatMapRightToInt maps the right'ness of the either to a new Either of type EitherStringOrInt
func (e *EitherStringOrInt) FlatMapRightToInt(f func(int) *EitherStringOrInt) *EitherStringOrInt {
if e.isLeft {
return &EitherStringOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
}
// MapToInt is a right-bias mapping function and an alias for MapRightToInt
func (e *EitherIntOrString) MapToInt(f func(string) int) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherIntOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapToInt is a right-bias mapping function and an alias for FlatMapRightToInt
func (e *EitherIntOrString) FlatMapToInt(f func(string) *EitherIntOrInt) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
}
// MapLeftToInt maps the left'ness of the either to a new either of type EitherIntOrString
func (e *EitherIntOrString) MapLeftToInt(f func(int) int) *EitherIntOrString {
if e.isLeft {
return &EitherIntOrString{
left: f(e.left),
isLeft: true,
}
}
return &EitherIntOrString{
right: e.right,
isLeft: false,
}
}
// MapRightToInt maps the right'ness of the either to a new either of type EitherIntOrString
func (e *EitherIntOrString) MapRightToInt(f func(string) int) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherIntOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapLeftToInt maps the left'ness of the either to a new Either of type EitherIntOrString
func (e *EitherIntOrString) FlatMapLeftToInt(f func(int) *EitherIntOrString) *EitherIntOrString {
if e.isLeft {
return f(e.left)
}
return &EitherIntOrString{
right: e.right,
isLeft: false,
}
}
// FlatMapRightToInt maps the right'ness of the either to a new Either of type EitherIntOrInt
func (e *EitherIntOrString) FlatMapRightToInt(f func(string) *EitherIntOrInt) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
}
// MapToInt is a right-bias mapping function and an alias for MapRightToInt
func (e *EitherIntOrInt) MapToInt(f func(int) int) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherIntOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapToInt is a right-bias mapping function and an alias for FlatMapRightToInt
func (e *EitherIntOrInt) FlatMapToInt(f func(int) *EitherIntOrInt) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
}
// MapLeftToInt maps the left'ness of the either to a new either of type EitherIntOrInt
func (e *EitherIntOrInt) MapLeftToInt(f func(int) int) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: f(e.left),
isLeft: true,
}
}
return &EitherIntOrInt{
right: e.right,
isLeft: false,
}
}
// MapRightToInt maps the right'ness of the either to a new either of type EitherIntOrInt
func (e *EitherIntOrInt) MapRightToInt(f func(int) int) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return &EitherIntOrInt{
right: f(e.right),
isLeft: false,
}
}
// FlatMapLeftToInt maps the left'ness of the either to a new Either of type EitherIntOrInt
func (e *EitherIntOrInt) FlatMapLeftToInt(f func(int) *EitherIntOrInt) *EitherIntOrInt {
if e.isLeft {
return f(e.left)
}
return &EitherIntOrInt{
right: e.right,
isLeft: false,
}
}
// FlatMapRightToInt maps the right'ness of the either to a new Either of type EitherIntOrInt
func (e *EitherIntOrInt) FlatMapRightToInt(f func(int) *EitherIntOrInt) *EitherIntOrInt {
if e.isLeft {
return &EitherIntOrInt{
left: e.left,
isLeft: true,
}
}
return f(e.right)
} | examples/gen_either_compose_1.go | 0.820541 | 0.556821 | gen_either_compose_1.go | starcoder |
package geometry
import (
"fmt"
"math"
)
func Degree2Radian(degree float64) float64 { return math.Pi * degree / 180 }
func Radian2Degree(radian float64) float64 { return radian * 180 / math.Pi }
type Point complex128
func P(x, y float64) Point { return Point(complex(x, y)) }
func Float(f float64) Point { return Point(complex(f, 0)) }
func (p Point) String() string { return fmt.Sprintf("(%g,%g)", p.X(), p.Y()) }
func (p Point) X() float64 { return real(p) }
func (p Point) Y() float64 { return imag(p) }
func (p Point) Square() float64 { return p.X()*p.X() + p.Y()*p.Y() }
func (p Point) SquareTo(p2 Point) float64 { return (p - p2).Square() }
func (p Point) Len() float64 { return math.Sqrt(p.Square()) }
func (p Point) Dist(p2 Point) float64 { return (p - p2).Len() }
func (p Point) Radian() float64 {
x, y := p.X(), p.Y()
if x == 0 || y == 0 {
return 0
}
if x == 0 {
if y > 0 {
return math.Pi / 2
} else {
return math.Pi + math.Pi/2
}
}
r := math.Atan(y / x)
if x > 0 {
if y >= 0 {
return r
} else {
return 2*math.Pi + r
}
} else {
return math.Pi + r
}
}
type Matrix [2][2]float64
func NewMatrix(v00, v01, v10, v11 float64) Matrix {
var m Matrix
m[0][0] = v00
m[0][1] = v01
m[1][0] = v10
m[1][1] = v11
return m
}
func Rotate(radian float64) Matrix {
cosv := math.Cos(radian)
sinv := math.Sin(radian)
return NewMatrix(cosv, -sinv, sinv, cosv)
}
func (m Matrix) Mul(m2 Matrix) Matrix {
v00 := m[0][0]*m2[0][0] + m[0][1]*m2[1][0]
v01 := m[0][0]*m2[0][1] + m[0][1]*m2[1][1]
v10 := m[1][0]*m2[0][0] + m[1][1]*m2[1][0]
v11 := m[1][0]*m2[0][1] + m[1][1]*m2[1][1]
return NewMatrix(v00, v01, v10, v11)
}
func (m Matrix) String() string {
const format = `+---------+---------+
| %7.1f | %7.1f |
+---------+---------+
| %7.1f | %7.1f |
+---------+---------+`
return fmt.Sprintf(format, m[0][0], m[0][1], m[1][0], m[1][1])
}
type Bezier struct {
points []Point
radians []float64
}
func (b Bezier) Points() []Point { return b.points }
func (b Bezier) Radians() []float64 { return b.radians }
func NewBezier(points []Point, length float64) *Bezier {
if len(points) < 2 {
return nil
}
if len(points) == 3 {
return newBezier3(points, length)
}
if len(points) == 4 {
return newBezier4(points, length)
}
bezier := new(Bezier)
bezier.points = append(bezier.points, points[0])
var (
count = len(points) - 1
p1 Point
length2 = length * length
)
for i := 0; i < 1000; i++ {
index := 0
t := float64(i) / 1000
for index < count {
k := math.Pow(t, float64(index))
k *= math.Pow(1-t, float64(count-index))
k *= float64(combination(int64(count), int64(index)))
p1 = p1 + (points[index] * Float(k))
index++
}
last := bezier.points[len(bezier.points)-1]
dist2 := last.SquareTo(p1)
if dist2 > length2 {
bezier.points = append(bezier.points, p1)
bezier.radians = append(bezier.radians, (p1 - last).Radian())
}
}
return bezier
}
func newBezier3(points []Point, length float64) *Bezier {
bezier := new(Bezier)
bezier.points = append(bezier.points, points[0])
var (
p1 Point
length2 = length * length
)
for i := 0; i < 1000; i++ {
t := float64(i) / 1000
t2 := t * t
k0 := t2 - 2*t + 1
k1 := 2*t - 2*t2
k2 := t2
p1 = points[0]*Float(k0) + points[1]*Float(k1) + points[2]*Float(k2)
last := bezier.points[len(bezier.points)-1]
dist2 := last.SquareTo(p1)
if dist2 > length2 {
bezier.points = append(bezier.points, p1)
bezier.radians = append(bezier.radians, (p1 - last).Radian())
}
}
return bezier
}
func newBezier4(points []Point, length float64) *Bezier {
bezier := new(Bezier)
bezier.points = append(bezier.points, points[0])
var (
p1 Point
length2 = length * length
)
for i := 0; i < 1000; i++ {
t := float64(i) / 1000
t2 := t * t
t3 := t2 * t
nt := 1 - t
nt2 := nt * nt
nt3 := nt2 * nt
k0 := nt3
k1 := 3 * t * nt2
k2 := 3 * t2 * nt
k3 := t3
p1 = points[0]*Float(k0) + points[1]*Float(k1) + points[2]*Float(k2) + points[3]*Float(k3)
last := bezier.points[len(bezier.points)-1]
dist2 := last.SquareTo(p1)
if dist2 > length2 {
bezier.points = append(bezier.points, p1)
bezier.radians = append(bezier.radians, (p1 - last).Radian())
}
}
return bezier
}
func combination(c, r int64) int64 {
if (r << 1) > c {
r = c - r
}
x := int64(1)
y := int64(1)
for i := int64(0); i < r; i++ {
x *= c - i
}
for i := int64(0); i < r; i++ {
y *= r - i
}
return x / y
} | math/geometry/geometry.go | 0.770983 | 0.643343 | geometry.go | starcoder |
package lz77
import (
"bytes"
"fmt"
"log"
)
// Compress takes a slice of bytes and returns a compressed version of
// it. Compression is done in 4096 byte blocks for historical reasons.
func Compress(data []byte) ([]byte, error) {
// Is our input less than one block? If so just compress it and be
// done. No need for chunking.
if len(data) <= 4096 {
return compressBlock(data), nil
}
ret := make([]byte, 0, len(data))
for start := 0; start < len(data); start += 4096 {
end := start + 4096
if end > len(data) {
end = len(data)
}
c := compressBlock(data[start:end])
ret = append(ret, c...)
}
return ret, nil
}
// byteLiteral takes a byte and returns the lz77 encoded version of it.
func byteLiteral(b byte) []byte {
switch {
case b == 0:
return []byte{b}
case b >= 0x09 && b <= 0x7f:
return []byte{b}
default:
return []byte{1, b}
}
}
// compressBlock compresses a single up-to-4096 byte block of the input.
func compressBlock(data []byte) []byte {
// Preallocate the output slice on the optimistic assumption that
// the output won't be bigger than the input.
ret := make([]byte, 0, len(data))
for i := 0; i < len(data); i++ {
// Last byte in the input? Encode it and be done.
if i == len(data)-1 {
ret = append(ret, byteLiteral(data[i])...)
continue
}
// Have we seen a run already? If so then encode it.
l, offset := findRun(data[i:], data[0:i])
if l >= 3 {
// 10 bytes is our maximum run length.
if l > 10 {
l = 10
}
word := uint16(offset<<3+(l-3)) | 0x8000
ret = append(ret, byte(word>>8), byte(word&0xff))
i += (l - 1)
continue
}
// space + printable? Add in the special byte and be done.
if data[i] == ' ' && (data[i+1] >= 0x40 && data[i+1] <= 0x7f) {
ret = append(ret, 0x80^data[i+1])
i++
continue
}
// A literal character? Then just pass it on to the output stream.
if (data[i] >= 0x09 && data[i] <= 0x7f) || data[i] == 0 {
ret = append(ret, data[i])
continue
}
// Not a literal. In that case we need to blob a range of bytes --
// send out a chunk as big as we can.
max := len(data) - i
if max > 8 {
max = 8
}
ret = append(ret, byte(max))
ret = append(ret, data[i:i+max]...)
i += (max - 1)
continue
}
return ret
}
// findRun looks back in the data we've already compressed to see if
// we can find a chunk that matches the data that's left to be compressed.
func findRun(data []byte, seen []byte) (int, int) {
// If we don't even have 3 bytes left then we can't have a run.
if len(data) < 3 {
return -1, -1
}
idx := -1
l := -1
// we can only look back 1024 bytes, since the offset has to be
// encoded in 11 bits.
if len(seen) > 1024 {
e := len(seen)
b := e - 1024
seen = seen[b:e]
}
for max := 3; max < 11 && max <= len(data); max++ {
offset := bytes.Index(seen, data[0:max])
if offset == -1 {
break
}
idx = len(seen) - offset
l = max
}
return l, idx
}
// Decompress decompresses a compressed block of data.
func Decompress(data []byte) ([]byte, error) {
// Start off assuming that decompressing a buffer makes the result
// larger. This is mostly but not always true.
ret := make([]byte, 0, len(data)*2)
for o := 0; o < len(data); o++ {
b := data[o]
switch {
case b == 0:
ret = append(ret, b)
case (b >= 1 && b <= 8):
if o+int(b)+1 > len(data) {
return nil, fmt.Errorf("copy from past end of block: %v/%v", len(data), o+int(b)+1)
}
d := data[o+1 : o+int(b)+1]
ret = append(ret, d...)
o += int(b)
case (b >= 0x09 && b <= 0x7f):
ret = append(ret, b)
case b >= 0x80 && b <= 0xbf:
o++
m := int(b)<<8 + int(data[o])
dist := (m & 0x3fff) >> 3
l := m&0x07 + 3
if dist > len(ret) {
log.Fatalf("dist %v, len %v but len(ret) only %v (%x)", dist, l, len(ret), m)
}
if dist < 1 {
log.Printf("dist %v is less than 1", dist)
dist = 1
}
sl := len(ret)
for i := 0; i < l; i++ {
idx := (len(ret) - dist)
if idx < 0 || idx >= len(ret) {
log.Printf("Out of range; started %v, off %v, len %v, curidx %v, curlen %v", sl, dist, l, idx, len(ret))
}
sb := ret[idx]
ret = append(ret, sb)
}
case b >= 0xc0:
ret = append(ret, ' ')
ret = append(ret, b^0x80)
default:
log.Fatalf("unknown byte %v", b)
}
}
return ret, nil
} | lz77/lz77.go | 0.647241 | 0.470615 | lz77.go | starcoder |
package unit
import (
"math"
"github.com/brettbuddin/shaden/dsp"
)
func newSlope(io *IO, c Config) (*Unit, error) {
return NewUnit(io, &slope{
state: &slopeState{
lastTrigger: -1,
},
stateFunc: slopeIdle,
trigger: io.NewIn("trigger", dsp.Float64(-1)),
gate: io.NewIn("gate", dsp.Float64(-1)),
rise: io.NewIn("rise", dsp.Duration(100, c.SampleRate)),
fall: io.NewIn("fall", dsp.Duration(100, c.SampleRate)),
retrigger: io.NewIn("retrigger", dsp.Float64(1)),
cycle: io.NewIn("cycle", dsp.Float64(0)),
ratio: io.NewIn("ratio", dsp.Float64(0.01)),
out: io.NewOut("out"),
mirror: io.NewOut("mirror"),
eoc: io.NewOut("eoc"),
eor: io.NewOut("eor"),
}), nil
}
type slope struct {
trigger, retrigger, gate, rise, fall, cycle, ratio *In
out, mirror, eoc, eor *Out
state *slopeState
stateFunc slopeStateFunc
}
func (s *slope) ProcessSample(i int) {
s.state.trigger = s.trigger.Read(i)
s.state.retrigger = s.retrigger.ReadSlow(i, ident)
s.state.gate = s.gate.Read(i)
s.state.rise = math.Abs(s.rise.Read(i))
s.state.fall = math.Abs(s.fall.Read(i))
s.state.cycle = s.cycle.Read(i)
s.state.ratio = s.ratio.Read(i)
s.stateFunc = s.stateFunc(s.state)
s.state.lastTrigger = s.state.trigger
s.state.lastGate = s.state.gate
s.out.Write(i, s.state.out)
s.mirror.Write(i, 1-s.state.out)
s.eoc.Write(i, s.state.eoc)
s.eor.Write(i, s.state.eor)
}
type slopeStateFunc func(*slopeState) slopeStateFunc
type slopeState struct {
trigger, retrigger, gate, rise, fall, cycle, ratio float64
base, multiplier float64
lastTrigger, lastGate float64
out, eoc, eor float64
}
func slopeIdle(s *slopeState) slopeStateFunc {
s.out = 0
s.eoc = -1
s.eor = -1
if isTrig(s.lastTrigger, s.trigger) || isTrig(s.lastGate, s.gate) {
return prepSlopeRise(s)
}
return slopeIdle
}
func slopeRise(s *slopeState) slopeStateFunc {
s.out = s.base + s.out*s.multiplier
s.eoc = -1
s.eor = -1
if s.out >= 1 {
s.eor = 1
if s.gate > 0 {
return slopeHold
}
return prepSlopeFall(s)
}
return slopeRise
}
func slopeHold(s *slopeState) slopeStateFunc {
if s.gate <= 0 {
return prepSlopeFall(s)
}
return slopeHold
}
func slopeFall(s *slopeState) slopeStateFunc {
s.eoc = -1
s.eor = -1
if isHigh(s.retrigger) && isTrig(s.lastTrigger, s.trigger) || isTrig(s.lastGate, s.gate) {
return prepSlopeRise(s)
}
s.out = s.base + s.out*s.multiplier
if s.out < math.SmallestNonzeroFloat64 {
s.eoc = 1
s.out = 0
if s.cycle > 0 {
return prepSlopeRise(s)
}
return slopeIdle
}
return slopeFall
}
func prepSlopeRise(s *slopeState) slopeStateFunc {
if s.rise <= 0 {
s.out = 1
return slopeHold
}
s.base, s.multiplier = slopeCoeffs(s.ratio, s.rise, 1, logCurve)
return slopeRise
}
func prepSlopeFall(s *slopeState) slopeStateFunc {
if s.fall <= 0 {
s.out = 0
return slopeIdle
}
s.base, s.multiplier = slopeCoeffs(s.ratio, s.fall, 0, expCurve)
return slopeFall
}
const (
expCurve int = iota
logCurve
)
func slopeCoeffs(ratio, duration, target float64, curve int) (base, multiplier float64) {
multiplier = dsp.ExpRatio(ratio, duration)
if curve == expCurve {
ratio = -ratio
}
base = (target + ratio) * (1.0 - multiplier)
return
} | unit/slope.go | 0.720368 | 0.46035 | slope.go | starcoder |
package geom
import (
"github.com/ctessum/geom/proj"
)
// Transform shifts the coordinates of p according to t.
func (p Point) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return p, nil
}
var err error
p2 := Point{}
p2.X, p2.Y, err = t(p.X, p.Y)
return p2, err
}
// Transform shifts the coordinates of mp according to t.
func (mp MultiPoint) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return mp, nil
}
mp2 := make(MultiPoint, len(mp))
for i, p := range mp {
g, err := p.Transform(t)
if err != nil {
return nil, err
}
mp2[i] = g.(Point)
}
return mp2, nil
}
// Transform shifts the coordinates of l according to t.
func (l LineString) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return l, nil
}
l2 := make(LineString, len(l))
var err error
for i, p := range l {
p2 := Point{}
p2.X, p2.Y, err = t(p.X, p.Y)
if err != nil {
return nil, err
}
l2[i] = p2
}
return l2, nil
}
// Transform shifts the coordinates of ml according to t.
func (ml MultiLineString) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return ml, nil
}
ml2 := make(MultiLineString, len(ml))
for i, l := range ml {
g, err := l.Transform(t)
ml2[i] = g.(LineString)
if err != nil {
return nil, err
}
}
return ml2, nil
}
// Transform shifts the coordinates of p according to t.
func (p Polygon) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return p, nil
}
p2 := make(Polygon, len(p))
var err error
for i, r := range p {
p2[i] = make([]Point, len(r))
for j, pp := range r {
pp2 := Point{}
pp2.X, pp2.Y, err = t(pp.X, pp.Y)
if err != nil {
return nil, err
}
p2[i][j] = pp2
}
}
return p2, nil
}
// Transform shifts the coordinates of mp according to t.
func (mp MultiPolygon) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return mp, nil
}
mp2 := make(MultiPolygon, len(mp))
for i, p := range mp {
g, err := p.Transform(t)
mp2[i] = g.(Polygon)
if err != nil {
return nil, err
}
}
return mp2, nil
}
// Transform shifts the coordinates of gc according to t.
func (gc GeometryCollection) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return gc, nil
}
gc2 := make(GeometryCollection, len(gc))
var err error
for i, g := range gc {
gc2[i], err = g.Transform(t)
if err != nil {
return nil, err
}
}
return gc2, nil
}
// Transform shifts the coordinates of b according to t.
// If t is not nil, this function returns a Polygon instead of a *Bounds
// because the transformed polygon may not match the transformed bounding
// rectangle.
func (b *Bounds) Transform(t proj.Transformer) (Geom, error) {
if t == nil {
return b, nil
}
p := Polygon{{b.Min, {X: b.Max.X, Y: b.Min.Y}, b.Max, {X: b.Min.X, Y: b.Max.Y}}}
return p.Transform(t)
} | transform.go | 0.759761 | 0.452354 | transform.go | starcoder |
package tda
import (
"image"
"sort"
)
// Persistence constructs object persistence trajectories for an
// image.
type Persistence struct {
// The dimensions of the image
rows int
cols int
// The current step, 1 plus the number of times that Next was
// called.
step int
// The persistence trajectories
traj []Trajectory
// The original image being processed
img []int
// The minimum and maximum of the image pixel intensities
min, max int
// The current thresholded image
timg []uint8
// The current and previous labeled image
lbuf1, lbuf2 []int
// The current distribution of sizes
size2 []int
// The current distribution of maximum intensities
max2 []int
// The current set of bounding boxes
bboxes2 []image.Rectangle
// Link each region in the previous image to its descendent in the
// current image
pns []Pstate
}
// Trajectories returns the persistence trajectories. Each outer
// element of the returned slice is a sequence of states defining a
// trajectory. The order of the trajectories may be
// non-deterministic, calling Sort before calling Trajectories ensures
// a deterministic order.
func (ps *Persistence) Trajectories() []Trajectory {
return ps.traj
}
// Pstate defines a state in a persistence trajectory.
type Pstate struct {
// The connected component label for the object (not
// comparable across points on a trajectory).
Label int
// The size in pixels of the object.
Size int
// The maximum intensity of the object.
Max int
// The step of the algorithm at which the state is defined.
Step int
// The threshold used to define the image used at this step of
// the algorithm.
Threshold int
// A bounding box for the object
Bbox image.Rectangle
}
// BirthDeath returns the object birth and death times as float64
// slices.
func (ps *Persistence) BirthDeath() ([]float64, []float64) {
var birth, death []float64
for _, tr := range ps.traj {
birth = append(birth, float64(tr[0].Threshold))
death = append(death, float64(tr[len(tr)-1].Threshold))
}
return birth, death
}
func threshold(img []int, timg []uint8, thresh int) []uint8 {
if len(timg) != len(img) {
timg = make([]uint8, len(img))
}
for i := range img {
if img[i] >= thresh {
timg[i] = 1
} else {
timg[i] = 0
}
}
return timg
}
func maxes(lab, max2, img []int, ncomp, rows int) []int {
if cap(max2) < ncomp {
max2 = make([]int, ncomp)
} else {
max2 = max2[0:ncomp]
for j := range max2 {
max2[j] = 0
}
}
for i := range lab {
l := lab[i]
if img[i] > max2[l] {
max2[l] = img[i]
}
}
return max2
}
// NewPersistence calculates an object persistence diagram for the
// given image, which must be rectangular with the given number of
// rows. The steps argument determines the threshold increments used
// to produce the persistence diagram.
func NewPersistence(img []int, rows, steps int) *Persistence {
cols := len(img) / rows
if rows*cols != len(img) {
panic("rows is not compatible with img")
}
mn, mx := iminmax(img)
timg := make([]uint8, rows*cols)
timg = threshold(img, timg, mn)
lbuf1 := make([]int, rows*cols)
lbuf2 := make([]int, rows*cols)
// Label the first image
lbl := NewLabel(timg, rows, lbuf2)
lbuf2 = lbl.Labels()
size2 := lbl.Sizes(nil)
max2 := maxes(lbuf2, nil, img, len(size2), rows)
bboxes2 := lbl.Bboxes(nil)
// Start the persistence trajectories
var traj []Trajectory
for k, m := range max2 {
if k != 0 {
s := size2[k]
bb := bboxes2[k]
v := []Pstate{
{
Label: k,
Max: m,
Size: s,
Step: 0,
Threshold: mn,
Bbox: bb,
},
}
traj = append(traj, v)
}
}
per := &Persistence{
rows: rows,
cols: cols,
img: img,
timg: timg,
lbuf1: lbuf1,
lbuf2: lbuf2,
traj: traj,
size2: size2,
max2: max2,
bboxes2: bboxes2,
min: mn,
max: mx,
}
// Extend the persistence trajectories
d := float64(mx-mn) / float64(steps-1)
for i := 1; i < steps; i++ {
t := mn + int(float64(i)*d)
per.next(t)
}
return per
}
// Labels returns the current object labels.
func (ps *Persistence) Labels() []int {
return ps.lbuf2
}
func (ps *Persistence) getAncestors(thresh int) {
ps.pns = ps.pns[0:0]
rc := ps.rows * ps.cols
for i := 0; i < rc; i++ {
if ps.lbuf1[i] == 0 || ps.lbuf2[i] == 0 {
continue
}
l1 := ps.lbuf1[i]
l2 := ps.lbuf2[i]
s2 := ps.size2[l2]
m2 := ps.max2[l2]
for len(ps.pns) < l1+1 {
ps.pns = append(ps.pns, Pstate{})
}
mx := ps.pns[l1].Max
// The favored descendent is the brightest one, which
// will have the longest lifespan. But if the
// brightness values are tied, go with the larger
// region.
if m2 > mx || (m2 == mx && s2 > ps.pns[l1].Size) {
bb := ps.bboxes2[l2]
ps.pns[l1] = Pstate{
Label: l2,
Max: m2,
Size: s2,
Step: ps.step,
Threshold: thresh,
Bbox: bb,
}
}
}
}
// Extend each region from the previous step to its descendant in the
// current step, where possible Add regions that are born in this
// step.
func (ps *Persistence) extend(thresh int) {
notnew := make([]bool, 0, 1000)
for i, tr := range ps.traj {
r := tr[len(tr)-1]
if r.Step != ps.step-1 {
continue
}
for len(ps.pns) < r.Label+1 {
ps.pns = append(ps.pns, Pstate{})
}
q := ps.pns[r.Label]
if q.Size > 0 {
ps.traj[i] = append(ps.traj[i], q)
for len(notnew) < q.Label+1 {
notnew = append(notnew, false)
}
notnew[q.Label] = true
}
}
for l2, m2 := range ps.max2 {
for len(notnew) < l2+1 {
notnew = append(notnew, false)
}
if l2 != 0 && !notnew[l2] {
s2 := ps.size2[l2]
bb := ps.bboxes2[l2]
v := []Pstate{
{
Label: l2,
Max: m2,
Size: s2,
Step: ps.step,
Threshold: thresh,
Bbox: bb,
},
}
ps.traj = append(ps.traj, v)
}
}
}
// next adds another labeled image to the persistence graph. The
// threshold values t should be strictly increasing.
func (ps *Persistence) next(t int) {
ps.lbuf1, ps.lbuf2 = ps.lbuf2, ps.lbuf1
ps.step++
ps.timg = threshold(ps.img, ps.timg, t)
lbl := NewLabel(ps.timg, ps.rows, ps.lbuf2)
ps.lbuf2 = lbl.Labels()
ps.size2 = lbl.Sizes(ps.size2)
ps.max2 = maxes(ps.lbuf2, ps.max2, ps.img, len(ps.size2), ps.rows)
ps.bboxes2 = lbl.Bboxes(ps.bboxes2)
ps.getAncestors(t)
ps.extend(t)
}
// Trajectory is a sequence of persistence states defined by labeling
// an image thresholded at an increasing sequence of threshold values.
type Trajectory []Pstate
type straj []Trajectory
func (a straj) Len() int { return len(a) }
func (a straj) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a straj) Less(i, j int) bool {
if a[i][0].Max < a[j][0].Max {
return true
} else if a[i][0].Max > a[j][0].Max {
return false
}
if a[i][0].Size < a[j][0].Size {
return true
} else if a[i][0].Size > a[j][0].Size {
return false
}
return a[i][0].Label < a[j][0].Label
}
// Sort gives a deterministic order to the persistence trajectories.
func (ps *Persistence) Sort() {
sort.Sort(sort.Reverse(straj(ps.traj)))
} | persistence.go | 0.691706 | 0.563438 | persistence.go | starcoder |
package cartridge
// MBC1 represents memory bank controller for MBC1 type.
type MBC1 struct {
rom []byte
romBankNumber byte
ram []byte
ramBankNumber byte
romBanking bool
ramEnabled bool
}
// NewMBC1 is a constructor for MBC1 type memory banking controller.
func NewMBC1(rom []byte) *MBC1 {
return &MBC1{
rom: rom,
ram: make([]byte, 0x8000),
romBankNumber: 1,
romBanking: true,
}
}
// WriteMemory handles writes to MBC1.
func (mbc *MBC1) WriteMemory(address uint16, value byte) {
if address < 0x2000 {
// Any value with 0x0a in the lower 4 bits enables RAM and other values disable it
mbc.ramEnabled = value&0x0f == 0x0a
} else if address < 0x4000 {
// Set lower bits for ROM bank number
bank := (mbc.romBankNumber & 0xe0) | (value & 0x1f)
mbc.selectRomBank(bank)
} else if address < 0x6000 {
// Select RAM bank or set higher bits for ROM bank number
if mbc.romBanking {
bank := (mbc.romBankNumber & 0x1f) | (value & 0xe0)
mbc.selectRomBank(bank)
} else {
mbc.selectRamBank(value & 0x03)
}
} else if address < 0x8000 {
// Select ROM/RAM banking mode
mbc.romBanking = value&1 == 0
if mbc.romBanking {
mbc.selectRamBank(0)
} else {
mbc.selectRomBank(mbc.romBankNumber & 0x1f)
}
} else if address >= 0xa000 && address < 0xc000 {
// Write to RAM
if mbc.ramEnabled {
mbc.ram[mbc.mapAddressToRam(address)] = value
}
}
}
// ReadMemory handles reads from memory for MBC1.
func (mbc *MBC1) ReadMemory(address uint16) byte {
if address < 0x4000 {
// ROM bank 0
return mbc.rom[address]
} else if address < 0x8000 {
// Switchable ROM bank
return mbc.rom[mbc.mapAddressToRom(address)]
} else if address >= 0xa000 && address < 0xc000 {
// Switchable RAM bank
return mbc.ram[mbc.mapAddressToRam(address)]
}
panic("Tried to read invalid memory address from MBC")
}
func (mbc *MBC1) selectRomBank(bank byte) {
if bank == 0x00 || bank == 0x20 || bank == 0x40 || bank == 0x60 {
bank++
}
mbc.romBankNumber = bank
}
func (mbc *MBC1) selectRamBank(bank byte) {
mbc.ramBankNumber = bank
}
func (mbc *MBC1) mapAddressToRom(address uint16) int {
bank := int(mbc.romBankNumber)
return int(address-0x4000) + (bank * 0x4000)
}
func (mbc *MBC1) mapAddressToRam(address uint16) int {
bank := int(mbc.ramBankNumber)
return int(address-0xa000) + (bank * 0x2000)
} | pkg/cartridge/mbc1.go | 0.669961 | 0.457076 | mbc1.go | starcoder |
package helper
import (
"errors"
"github.com/guregu/null"
"strconv"
)
// Convert the val int a float64, return value and null/nill status
func ConvertToFloat64(val interface{}, nullable bool) (float64, bool, error) {
if nullable && val == nil {
return 0, true, nil
}
switch v := val.(type) {
case null.String:
if nullable && !v.Valid {
return 0, !v.Valid, nil // null
}
return parseStringToFloat64(v.String)
case string:
return parseStringToFloat64(v)
case bool:
if v {
return 1.0, false, nil
} else {
return 0.0, false, nil
}
case int:
return float64(v), false, nil
case int8:
return float64(v), false, nil
case int16:
return float64(v), false, nil
case int32:
return float64(v), false, nil
case int64:
return float64(v), false, nil
case uint:
return float64(v), false, nil
case uint8:
return float64(v), false, nil
case uint16:
return float64(v), false, nil
case uint32:
return float64(v), false, nil
case uint64:
return float64(v), false, nil
case float32:
return float64(v), false, nil
case float64:
return v, false, nil
case null.Float:
return v.Float64, !v.Valid, nil
case null.Int:
return float64(v.Int64), !v.Valid, nil
default:
return 0, true, errors.New("Cannot convert value to float64") // null!!
}
}
func parseStringToFloat64(v string) (float64, bool, error) {
f, err := strconv.ParseFloat(v, 64)
if err != nil {
return 0, true, err
}
return f, false, nil
}
// Convert the val int a float32, return value and null/nill status
func ConvertToFloat32(val interface{}, nullable bool) (float32, bool, error) {
if nullable && val == nil {
return 0, true, nil
}
switch v := val.(type) {
case null.String:
if nullable && !v.Valid {
return 0, !v.Valid, nil // null
}
return parseStringToFloat32(v.String)
case string:
return parseStringToFloat32(v)
case bool:
if v {
return 1.0, false, nil
} else {
return 0.0, false, nil
}
case int:
return float32(v), false, nil
case int8:
return float32(v), false, nil
case int16:
return float32(v), false, nil
case int32:
return float32(v), false, nil
case int64:
return float32(v), false, nil
case uint:
return float32(v), false, nil
case uint8:
return float32(v), false, nil
case uint16:
return float32(v), false, nil
case uint32:
return float32(v), false, nil
case uint64:
return float32(v), false, nil
case float32:
return v, false, nil
case float64:
return float32(v), false, nil
case null.Float:
return float32(v.Float64), !v.Valid, nil
case null.Int:
return float32(v.Int64), !v.Valid, nil
default:
return 0, true, errors.New("Cannot convert value to float32") // null!!
}
}
func parseStringToFloat32(v string) (float32, bool, error) {
f, err := strconv.ParseFloat(v, 32)
if err != nil {
return 0, true, err
}
return float32(f), false, nil
} | helper/Conversion.go | 0.684264 | 0.421552 | Conversion.go | starcoder |
package pflag
import (
"fmt"
"strconv"
)
// optional interface to indicate boolean flags that can be
// supplied without "=value" text
type boolFlag interface {
Value
IsBoolFlag() bool
}
// -- bool Value
type boolValue bool
func newBoolValue(val bool, p *bool) *boolValue {
*p = val
return (*boolValue)(p)
}
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
*b = boolValue(v)
return err
}
func (b *boolValue) Type() string {
return "bool"
}
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
func (b *boolValue) IsBoolFlag() bool { return true }
func boolConv(sval string) (interface{}, error) {
return strconv.ParseBool(sval)
}
// GetBool return the bool value of a flag with the given name
func (f *FlagSet) GetBool(name string) (bool, error) {
val, err := f.getFlagType(name, "bool", boolConv)
if err != nil {
return false, err
}
return val.(bool), nil
}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
f.BoolVarP(p, name, "", value, usage)
}
// Like BoolVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
flag.NoOptDefVal = "true"
}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func BoolVar(p *bool, name string, value bool, usage string) {
BoolVarP(p, name, "", value, usage)
}
// Like BoolVar, but accepts a shorthand letter that can be used after a single dash.
func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
flag.NoOptDefVal = "true"
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
return f.BoolP(name, "", value, usage)
}
// Like Bool, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
p := new(bool)
f.BoolVarP(p, name, shorthand, value, usage)
return p
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func Bool(name string, value bool, usage string) *bool {
return BoolP(name, "", value, usage)
}
// Like Bool, but accepts a shorthand letter that can be used after a single dash.
func BoolP(name, shorthand string, value bool, usage string) *bool {
b := CommandLine.BoolP(name, shorthand, value, usage)
return b
} | Godeps/_workspace/src/github.com/spf13/pflag/bool.go | 0.755005 | 0.404625 | bool.go | starcoder |
package meta
// ConditionStatus represents a condition's status.
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in
// the condition; "ConditionFalse" means a resource is not in the condition;
// "ConditionUnknown" means kubernetes can't decide if a resource is in the
// condition or not. In the future, we could add other intermediate
// conditions, e.g. ConditionDegraded.
const (
// ConditionTrue represents the fact that a given condition is true
ConditionTrue ConditionStatus = "True"
// ConditionFalse represents the fact that a given condition is false
ConditionFalse ConditionStatus = "False"
// ConditionUnknown represents the fact that a given condition is unknown
ConditionUnknown ConditionStatus = "Unknown"
)
// A reference to an object in the same namespace as the referent.
// If the referent is a cluster-scoped resource (e.g. a ClusterIssuer),
// the reference instead refers to the resource with the given name in the
// configured 'cluster resource namespace', which is set as a flag on the
// controller component (and defaults to the namespace that cert-manager
// runs in).
type LocalObjectReference struct {
// Name of the resource being referred to.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string
}
// ObjectReference is a reference to an object with a given name, kind and group.
type ObjectReference struct {
// Name of the resource being referred to.
Name string
// Kind of the resource being referred to.
Kind string
// Group of the resource being referred to.
Group string
}
// A reference to a specific 'key' within a Secret resource.
// In some instances, `key` is a required field.
type SecretKeySelector struct {
// The name of the Secret resource being referred to.
LocalObjectReference
// The key of the entry in the Secret resource's `data` field to be used.
// Some instances of this field may be defaulted, in others it may be
// required.
Key string
}
const (
// Used as a data key in Secret resources to store a CA certificate.
TLSCAKey = "ca.crt"
) | internal/apis/meta/types.go | 0.714728 | 0.437703 | types.go | starcoder |
package v1beta1
func (DataVolume) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolume is an abstraction on top of PersistentVolumeClaims to allow easy population of those PersistentVolumeClaims with relation to VirtualMachines\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=dv;dvs,categories=all\n+kubebuilder:printcolumn:name=\"Phase\",type=\"string\",JSONPath=\".status.phase\",description=\"The phase the data volume is in\"\n+kubebuilder:printcolumn:name=\"Progress\",type=\"string\",JSONPath=\".status.progress\",description=\"Transfer progress in percentage if known, N/A otherwise\"\n+kubebuilder:printcolumn:name=\"Restarts\",type=\"integer\",JSONPath=\".status.restartCount\",description=\"The number of times the transfer has been restarted.\"\n+kubebuilder:printcolumn:name=\"Age\",type=\"date\",JSONPath=\".metadata.creationTimestamp\"",
}
}
func (DataVolumeSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSpec defines the DataVolume type specification",
"source": "Source is the src of the data for the requested DataVolume\n+optional",
"sourceRef": "SourceRef is an indirect reference to the source of data for the requested DataVolume\n+optional",
"pvc": "PVC is the PVC specification",
"storage": "Storage is the requested storage specification",
"priorityClassName": "PriorityClassName for Importer, Cloner and Uploader pod",
"contentType": "DataVolumeContentType options: \"kubevirt\", \"archive\"\n+kubebuilder:validation:Enum=\"kubevirt\";\"archive\"",
"checkpoints": "Checkpoints is a list of DataVolumeCheckpoints, representing stages in a multistage import.",
"finalCheckpoint": "FinalCheckpoint indicates whether the current DataVolumeCheckpoint is the final checkpoint.",
"preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.",
}
}
func (StorageSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageSpec defines the Storage type specification",
"accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional",
"selector": "A label query over volumes to consider for binding.\n+optional",
"resources": "Resources represents the minimum resources the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional",
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.\n+optional",
"storageClassName": "Name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional",
"volumeMode": "volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional",
"dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.\n+optional",
}
}
func (DataVolumeCheckpoint) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeCheckpoint defines a stage in a warm migration.",
"previous": "Previous is the identifier of the snapshot from the previous checkpoint.",
"current": "Current is the identifier of the snapshot created for this checkpoint.",
}
}
func (DataVolumeSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSource represents the source for our Data Volume, this can be HTTP, Imageio, S3, Registry or an existing PVC",
}
}
func (DataVolumeSourcePVC) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourcePVC provides the parameters to create a Data Volume from an existing PVC",
"namespace": "The namespace of the source PVC",
"name": "The name of the source PVC",
}
}
func (DataVolumeBlankImage) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeBlankImage provides the parameters to create a new raw blank image for the PVC",
}
}
func (DataVolumeSourceUpload) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceUpload provides the parameters to create a Data Volume by uploading the source",
}
}
func (DataVolumeSourceS3) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source",
"url": "URL is the url of the S3 source",
"secretRef": "SecretRef provides the secret reference needed to access the S3 source",
"certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional",
}
}
func (DataVolumeSourceRegistry) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceRegistry provides the parameters to create a Data Volume from an registry source",
"url": "URL is the url of the Docker registry source",
"secretRef": "SecretRef provides the secret reference needed to access the Registry source",
"certConfigMap": "CertConfigMap provides a reference to the Registry certs",
}
}
func (DataVolumeSourceHTTP) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceHTTP can be either an http or https endpoint, with an optional basic auth user name and password, and an optional configmap containing additional CAs",
"url": "URL is the URL of the http(s) endpoint",
"secretRef": "SecretRef A Secret reference, the secret should contain accessKeyId (user name) base64 encoded, and secretKey (password) also base64 encoded\n+optional",
"certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional",
}
}
func (DataVolumeSourceImageIO) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceImageIO provides the parameters to create a Data Volume from an imageio source",
"url": "URL is the URL of the ovirt-engine",
"diskId": "DiskID provides id of a disk to be imported",
"secretRef": "SecretRef provides the secret reference needed to access the ovirt-engine",
"certConfigMap": "CertConfigMap provides a reference to the CA cert",
}
}
func (DataVolumeSourceVDDK) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceVDDK provides the parameters to create a Data Volume from a Vmware source",
"url": "URL is the URL of the vCenter or ESXi host with the VM to migrate",
"uuid": "UUID is the UUID of the virtual machine that the backing file is attached to in vCenter/ESXi",
"backingFile": "BackingFile is the path to the virtual hard disk to migrate from vCenter/ESXi",
"thumbprint": "Thumbprint is the certificate thumbprint of the vCenter or ESXi host",
"secretRef": "SecretRef provides a reference to a secret containing the username and password needed to access the vCenter or ESXi host",
}
}
func (DataVolumeSourceRef) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeSourceRef defines an indirect reference to the source of data for the DataVolume",
"kind": "The kind of the source reference, currently only \"DataSource\" is supported",
"namespace": "The namespace of the source reference, defaults to the DataVolume namespace\n+optional",
"name": "The name of the source reference",
}
}
func (DataVolumeStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeStatus contains the current status of the DataVolume",
"phase": "Phase is the current phase of the data volume",
"restartCount": "RestartCount is the number of times the pod populating the DataVolume has restarted",
}
}
func (DataVolumeList) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeList provides the needed parameters to do request a list of Data Volumes from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataVolumes",
}
}
func (DataVolumeCondition) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataVolumeCondition represents the state of a data volume condition.",
}
}
func (StorageProfile) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfile provides a CDI specific recommendation for storage parameters\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:scope=Cluster",
}
}
func (StorageProfileSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfileSpec defines specification for StorageProfile",
"claimPropertySets": "ClaimPropertySets is a provided set of properties applicable to PVC",
}
}
func (StorageProfileStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfileStatus provides the most recently observed status of the StorageProfile",
"storageClass": "The StorageClass name for which capabilities are defined",
"provisioner": "The Storage class provisioner plugin name",
"claimPropertySets": "ClaimPropertySets computed from the spec and detected in the system",
}
}
func (ClaimPropertySet) SwaggerDoc() map[string]string {
return map[string]string{
"": "ClaimPropertySet is a set of properties applicable to PVC",
"accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional",
"volumeMode": "VolumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional",
"cloneStrategy": "CloneStrategy defines the preferred method for performing a CDI clone",
}
}
func (StorageProfileList) SwaggerDoc() map[string]string {
return map[string]string{
"": "StorageProfileList provides the needed parameters to request a list of StorageProfile from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of StorageProfile",
}
}
func (DataSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSource references an import/clone source for a DataVolume\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion",
}
}
func (DataSourceSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceSpec defines specification for DataSource",
"source": "Source is the source of the data referenced by the DataSource",
}
}
func (DataSourceSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceSource represents the source for our DataSource",
"pvc": "+optional",
}
}
func (DataSourceStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceStatus provides the most recently observed status of the DataSource",
}
}
func (DataSourceCondition) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceCondition represents the state of a data source condition",
}
}
func (DataSourceList) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataSourceList provides the needed parameters to do request a list of Data Sources from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataSources",
}
}
func (DataImportCron) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCron defines a cron job for recurring polling/importing disk images as PVCs into a golden image namespace\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion",
}
}
func (DataImportCronSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronSpec defines specification for DataImportCron",
"source": "Source specifies where to poll disk images from",
"schedule": "Schedule specifies in cron format when and how often to look for new imports",
"garbageCollect": "GarbageCollect specifies whether old PVCs should be cleaned up after a new PVC is imported.\nOptions are currently \"Never\" and \"Outdated\", defaults to \"Never\".\n+optional",
"managedDataSource": "ManagedDataSource specifies the name of the corresponding DataSource this cron will manage.\nDataSource has to be in the same namespace.",
}
}
func (DataImportCronSource) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronSource defines where to poll and import disk images from",
}
}
func (DataImportCronStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronStatus provides the most recently observed status of the DataImportCron",
"lastImportedPVC": "LastImportedPVC is the last imported PVC",
"lastExecutionTimestamp": "LastExecutionTimestamp is the time of the last polling",
"lastImportTimestamp": "LastImportTimestamp is the time of the last import",
}
}
func (DataImportCronCondition) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronCondition represents the state of a data import cron condition",
}
}
func (DataImportCronList) SwaggerDoc() map[string]string {
return map[string]string{
"": "DataImportCronList provides the needed parameters to do request a list of DataImportCrons from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of DataImportCrons",
}
}
func (CDI) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDI is the CDI Operator CRD\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=cdi;cdis,scope=Cluster\n+kubebuilder:printcolumn:name=\"Age\",type=\"date\",JSONPath=\".metadata.creationTimestamp\"\n+kubebuilder:printcolumn:name=\"Phase\",type=\"string\",JSONPath=\".status.phase\"",
"status": "+optional",
}
}
func (CertConfig) SwaggerDoc() map[string]string {
return map[string]string{
"": "CertConfig contains the tunables for TLS certificates",
"duration": "The requested 'duration' (i.e. lifetime) of the Certificate.",
"renewBefore": "The amount of time before the currently issued certificate's `notAfter`\ntime that we will begin to attempt to renew the certificate.",
}
}
func (CDICertConfig) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDICertConfig has the CertConfigs for CDI",
"ca": "CA configuration\nCA certs are kept in the CA bundle as long as they are valid",
"server": "Server configuration\nCerts are rotated and discarded",
}
}
func (CDISpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDISpec defines our specification for the CDI installation",
"imagePullPolicy": "+kubebuilder:validation:Enum=Always;IfNotPresent;Never\nPullPolicy describes a policy for if/when to pull a container image",
"uninstallStrategy": "+kubebuilder:validation:Enum=RemoveWorkloads;BlockUninstallIfWorkloadsExist\nCDIUninstallStrategy defines the state to leave CDI on uninstall",
"infra": "Rules on which nodes CDI infrastructure pods will be scheduled",
"workload": "Restrict on which nodes CDI workload pods will be scheduled",
"cloneStrategyOverride": "Clone strategy override: should we use a host-assisted copy even if snapshots are available?\n+kubebuilder:validation:Enum=\"copy\";\"snapshot\"",
"config": "CDIConfig at CDI level",
"certConfig": "certificate configuration",
"priorityClass": "PriorityClass of the CDI control plane",
}
}
func (CDIStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIStatus defines the status of the installation",
}
}
func (CDIList) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIList provides the needed parameters to do request a list of CDIs from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of CDIs",
}
}
func (CDIConfig) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIConfig provides a user configuration for CDI\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:scope=Cluster",
}
}
func (FilesystemOverhead) SwaggerDoc() map[string]string {
return map[string]string{
"": "FilesystemOverhead defines the reserved size for PVCs with VolumeMode: Filesystem",
"global": "Global is how much space of a Filesystem volume should be reserved for overhead. This value is used unless overridden by a more specific value (per storageClass)",
"storageClass": "StorageClass specifies how much space of a Filesystem volume should be reserved for safety. The keys are the storageClass and the values are the overhead. This value overrides the global value",
}
}
func (CDIConfigSpec) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIConfigSpec defines specification for user configuration",
"uploadProxyURLOverride": "Override the URL used when uploading to a DataVolume",
"importProxy": "ImportProxy contains importer pod proxy configuration.\n+optional",
"scratchSpaceStorageClass": "Override the storage class to used for scratch space during transfer operations. The scratch space storage class is determined in the following order: 1. value of scratchSpaceStorageClass, if that doesn't exist, use the default storage class, if there is no default storage class, use the storage class of the DataVolume, if no storage class specified, use no storage class for scratch space",
"podResourceRequirements": "ResourceRequirements describes the compute resource requirements.",
"featureGates": "FeatureGates are a list of specific enabled feature gates",
"filesystemOverhead": "FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A value is between 0 and 1, if not defined it is 0.055 (5.5% overhead)",
"preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.",
"insecureRegistries": "InsecureRegistries is a list of TLS disabled registries",
}
}
func (CDIConfigStatus) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIConfigStatus provides the most recently observed status of the CDI Config resource",
"uploadProxyURL": "The calculated upload proxy URL",
"importProxy": "ImportProxy contains importer pod proxy configuration.\n+optional",
"scratchSpaceStorageClass": "The calculated storage class to be used for scratch space",
"defaultPodResourceRequirements": "ResourceRequirements describes the compute resource requirements.",
"filesystemOverhead": "FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A percentage value is between 0 and 1",
"preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.",
}
}
func (CDIConfigList) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIConfigList provides the needed parameters to do request a list of CDIConfigs from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"items": "Items provides a list of CDIConfigs",
}
}
func (ImportProxy) SwaggerDoc() map[string]string {
return map[string]string{
"": "ImportProxy provides the information on how to configure the importer pod proxy.",
"HTTPProxy": "HTTPProxy is the URL http://<username>:<pswd>@<ip>:<port> of the import proxy for HTTP requests. Empty means unset and will not result in the import pod env var.\n+optional",
"HTTPSProxy": "HTTPSProxy is the URL https://<username>:<pswd>@<ip>:<port> of the import proxy for HTTPS requests. Empty means unset and will not result in the import pod env var.\n+optional",
"noProxy": "NoProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in the import pod env var.\n+optional",
"trustedCAProxy": "TrustedCAProxy is the name of a ConfigMap in the cdi namespace that contains a user-provided trusted certificate authority (CA) bundle.\nThe TrustedCAProxy field is consumed by the import controller that is resposible for coping it to a config map named trusted-ca-proxy-bundle-cm in the cdi namespace.\nHere is an example of the ConfigMap (in yaml):\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: trusted-ca-proxy-bundle-cm\n namespace: cdi\ndata:\n ca.pem: |",
}
} | pkg/apis/core/v1beta1/types_swagger_generated.go | 0.777596 | 0.422803 | types_swagger_generated.go | starcoder |
package ring
import (
"encoding/binary"
"errors"
"math/bits"
"github.com/tuneinsight/lattigo/v3/utils"
)
// Poly is the structure that contains the coefficients of a polynomial.
type Poly struct {
Coeffs [][]uint64 // Coefficients in CRT representation
IsNTT bool
IsMForm bool
}
// NewPoly creates a new polynomial with N coefficients set to zero and nbModuli moduli.
func NewPoly(N, nbModuli int) (pol *Poly) {
pol = new(Poly)
pol.Coeffs = make([][]uint64, nbModuli)
for i := 0; i < nbModuli; i++ {
pol.Coeffs[i] = make([]uint64, N)
}
return
}
// Degree returns the number of coefficients of the polynomial, which equals the degree of the Ring cyclotomic polynomial.
func (pol *Poly) Degree() int {
return len(pol.Coeffs[0])
}
// LenModuli returns the current number of moduli.
func (pol *Poly) LenModuli() int {
return len(pol.Coeffs)
}
// Level returns the current number of moduli minus 1.
func (pol *Poly) Level() int {
return len(pol.Coeffs) - 1
}
// Zero sets all coefficients of the target polynomial to 0.
func (pol *Poly) Zero() {
for i := range pol.Coeffs {
p0tmp := pol.Coeffs[i]
for j := range p0tmp {
p0tmp[j] = 0
}
}
}
// CopyNew creates an exact copy of the target polynomial.
func (pol *Poly) CopyNew() (p1 *Poly) {
p1 = new(Poly)
p1.Coeffs = make([][]uint64, pol.Level()+1)
for i := range pol.Coeffs {
p1.Coeffs[i] = make([]uint64, len(pol.Coeffs[i]))
copy(p1.Coeffs[i], pol.Coeffs[i])
}
p1.IsNTT = pol.IsNTT
p1.IsMForm = pol.IsMForm
return
}
// CopyValues copies the coefficients of p0 on p1 within the given Ring. It requires p1 to be at least as big p0.
// Expects the degree of both polynomials to be identical.
// Does not transfer the IsNTT and IsMForm flags.
func CopyValues(p0, p1 *Poly) {
CopyValuesLvl(utils.MinInt(p0.Level(), p1.Level()), p0, p1)
}
// Copy copies the coefficients of p0 on p1 within the given Ring. It requires p1 to be at least as big p0.
// Expects the degree of both polynomials to be identical.
// Transfers the IsNTT and IsMForm flags.
func Copy(p0, p1 *Poly) {
CopyValuesLvl(utils.MinInt(p0.Level(), p1.Level()), p0, p1)
p1.IsNTT = p0.IsNTT
p1.IsMForm = p0.IsMForm
}
// CopyValuesLvl copies the coefficients of p0 on p1 within the given Ring for the moduli from 0 to level.
// Expects the degree of both polynomials to be identical.
// Does not transfer the IsNTT and IsMForm flags.
func CopyValuesLvl(level int, p0, p1 *Poly) {
if p0 != p1 {
for i := 0; i < level+1; i++ {
copy(p1.Coeffs[i], p0.Coeffs[i])
}
}
}
// CopyLvl copies the coefficients of p0 on p1 within the given Ring for the moduli from 0 to level.
// Expects the degree of both polynomials to be identical.
// Transfers the IsNTT and IsMForm flags.
func CopyLvl(level int, p0, p1 *Poly) {
CopyValuesLvl(level, p0, p1)
p1.IsNTT = p0.IsNTT
p1.IsMForm = p0.IsMForm
}
// CopyValues copies the coefficients of p1 on the target polynomial.
// Onyl copies minLevel(pol, p1) levels.
// Expects the degree of both polynomials to be identical.
// Does not transfer the IsNTT and IsMForm flags.
func (pol *Poly) CopyValues(p1 *Poly) {
if pol != p1 {
minLevel := utils.MinInt(pol.Level(), p1.Level())
for i := range p1.Coeffs[:minLevel+1] {
copy(pol.Coeffs[i], p1.Coeffs[i])
}
}
}
// Copy copies the coefficients of p1 on the target polynomial.
// Onyl copies minLevel(pol, p1) levels.
// Transfers the IsNTT and IsMForm flags.
func (pol *Poly) Copy(p1 *Poly) {
pol.CopyValues(p1)
pol.IsNTT = p1.IsNTT
pol.IsMForm = p1.IsMForm
}
// Equals returns true if the receiver Poly is equal to the provided other Poly.
// This function checks for strict equality between the polynomial coefficients
// (i.e., it does not consider congruence as equality within the ring like
// `Ring.Equals` does).
// Will not check if IsNTT and IsMForm flags are equal
func (pol *Poly) Equals(other *Poly) bool {
if pol == other {
return true
}
if pol != nil && other != nil && len(pol.Coeffs) == len(other.Coeffs) {
for i := range pol.Coeffs {
if len(other.Coeffs[i]) != len(pol.Coeffs[i]) {
return false
}
for j := range pol.Coeffs[i] {
if other.Coeffs[i][j] != pol.Coeffs[i][j] {
return false
}
}
}
return true
}
return false
}
// SetCoefficients sets the coefficients of the polynomial directly from a CRT format (double slice).
func (pol *Poly) SetCoefficients(coeffs [][]uint64) {
for i := range coeffs {
copy(pol.Coeffs[i], coeffs[i])
}
}
// GetCoefficients returns a new double slice that contains the coefficients of the polynomial.
func (pol *Poly) GetCoefficients() (coeffs [][]uint64) {
coeffs = make([][]uint64, len(pol.Coeffs))
for i := range pol.Coeffs {
coeffs[i] = make([]uint64, len(pol.Coeffs[i]))
copy(coeffs[i], pol.Coeffs[i])
}
return
}
// WriteCoeffsTo converts a matrix of coefficients to a byte array.
func WriteCoeffsTo(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 3
for i := 0; i < numberModuli; i++ {
for j := 0; j < N; j++ {
binary.BigEndian.PutUint64(data[pointer+(j<<3):pointer+((j+1)<<3)], coeffs[i][j])
}
pointer += tmp
}
return pointer, nil
}
// WriteTo writes the given poly to the data array.
// It returns the number of written bytes, and the corresponding error, if it occurred.
func (pol *Poly) WriteTo(data []byte) (int, error) {
N := pol.Degree()
numberModuli := pol.LenModuli()
if len(data) < pol.GetDataLen(true) {
// The data is not big enough to write all the information
return 0, errors.New("data array is too small to write ring.Poly")
}
data[0] = uint8(bits.Len64(uint64(N)) - 1)
data[1] = uint8(numberModuli)
if pol.IsNTT {
data[2] = 1
}
if pol.IsMForm {
data[3] = 1
}
cnt, err := WriteCoeffsTo(4, N, numberModuli, pol.Coeffs, data)
return cnt, err
}
// WriteTo32 writes the given poly to the data array.
// It returns the number of written bytes, and the corresponding error, if it occurred.
func (pol *Poly) WriteTo32(data []byte) (int, error) {
N := pol.Degree()
numberModuli := pol.LenModuli()
if len(data) < pol.GetDataLen32(true) {
//The data is not big enough to write all the information
return 0, errors.New("data array is too small to write ring.Poly")
}
data[0] = uint8(bits.Len64(uint64(N)) - 1)
data[1] = uint8(numberModuli)
if pol.IsNTT {
data[2] = 1
}
if pol.IsMForm {
data[3] = 1
}
cnt, err := WriteCoeffsTo32(4, N, numberModuli, pol.Coeffs, data)
return cnt, err
}
// WriteCoeffsTo32 converts a matrix of coefficients to a byte array.
func WriteCoeffsTo32(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 2
for i := 0; i < numberModuli; i++ {
for j := 0; j < N; j++ {
binary.BigEndian.PutUint32(data[pointer+(j<<2):pointer+((j+1)<<2)], uint32(coeffs[i][j]))
}
pointer += tmp
}
return pointer, nil
}
// GetDataLen32 returns the number of bytes the polynomial will take when written to data.
// It can take into account meta data if necessary.
func (pol *Poly) GetDataLen32(WithMetadata bool) (cnt int) {
cnt = (pol.LenModuli() * pol.Degree()) << 2
if WithMetadata {
cnt += 4
}
return
}
// WriteCoeffs writes the coefficients to the given data array.
// It fails if the data array is not big enough to contain the ring.Poly
func (pol *Poly) WriteCoeffs(data []byte) (int, error) {
return WriteCoeffsTo(0, pol.Degree(), pol.LenModuli(), pol.Coeffs, data)
}
// GetDataLen returns the number of bytes the polynomial will take when written to data.
// It can take into account meta data if necessary.
func (pol *Poly) GetDataLen(WithMetadata bool) (cnt int) {
cnt = (pol.LenModuli() * pol.Degree()) << 3
if WithMetadata {
cnt += 4
}
return
}
// DecodeCoeffs converts a byte array to a matrix of coefficients.
func DecodeCoeffs(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 3
for i := 0; i < numberModuli; i++ {
for j := 0; j < N; j++ {
coeffs[i][j] = binary.BigEndian.Uint64(data[pointer+(j<<3) : pointer+((j+1)<<3)])
}
pointer += tmp
}
return pointer, nil
}
// DecodeCoeffsNew converts a byte array to a matrix of coefficients.
func DecodeCoeffsNew(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 3
for i := 0; i < numberModuli; i++ {
coeffs[i] = make([]uint64, N)
for j := 0; j < N; j++ {
coeffs[i][j] = binary.BigEndian.Uint64(data[pointer+(j<<3) : pointer+((j+1)<<3)])
}
pointer += tmp
}
return pointer, nil
}
// MarshalBinary encodes the target polynomial on a slice of bytes.
func (pol *Poly) MarshalBinary() (data []byte, err error) {
data = make([]byte, pol.GetDataLen(true))
_, err = pol.WriteTo(data)
return
}
// UnmarshalBinary decodes a slice of byte on the target polynomial.
func (pol *Poly) UnmarshalBinary(data []byte) (err error) {
N := int(1 << data[0])
numberModulies := int(data[1])
if data[2] == 1 {
pol.IsNTT = true
}
if data[3] == 1 {
pol.IsMForm = true
}
pointer := 4
if ((len(data) - pointer) >> 3) != N*numberModulies {
return errors.New("invalid polynomial encoding")
}
if _, err = pol.DecodePolyNew(data); err != nil {
return err
}
return nil
}
// DecodePolyNew decodes a slice of bytes in the target polynomial returns the number of bytes
// decoded.
func (pol *Poly) DecodePolyNew(data []byte) (pointer int, err error) {
N := int(1 << data[0])
numberModulies := int(data[1])
if data[2] == 1 {
pol.IsNTT = true
}
if data[3] == 1 {
pol.IsMForm = true
}
pointer = 4
if pol.Coeffs == nil {
pol.Coeffs = make([][]uint64, numberModulies)
}
if pointer, err = DecodeCoeffsNew(pointer, N, numberModulies, pol.Coeffs, data); err != nil {
return pointer, err
}
return pointer, nil
}
// DecodePolyNew32 decodes a slice of bytes in the target polynomial returns the number of bytes
// decoded.
func (pol *Poly) DecodePolyNew32(data []byte) (pointer int, err error) {
N := int(1 << data[0])
numberModulies := int(data[1])
if data[2] == 1 {
pol.IsNTT = true
}
if data[3] == 1 {
pol.IsMForm = true
}
pointer = 4
if pol.Coeffs == nil {
pol.Coeffs = make([][]uint64, numberModulies)
}
if pointer, err = DecodeCoeffsNew32(pointer, N, numberModulies, pol.Coeffs, data); err != nil {
return pointer, err
}
return pointer, nil
}
// DecodeCoeffsNew32 converts a byte array to a matrix of coefficients.
func DecodeCoeffsNew32(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 2
for i := 0; i < numberModuli; i++ {
coeffs[i] = make([]uint64, N)
for j := 0; j < N; j++ {
coeffs[i][j] = uint64(binary.BigEndian.Uint32(data[pointer+(j<<2) : pointer+((j+1)<<2)]))
}
pointer += tmp
}
return pointer, nil
} | ring/ring_poly.go | 0.800419 | 0.562477 | ring_poly.go | starcoder |
package untrusted_deserialization
import (
"github.com/threagile/threagile/model"
)
func Category() model.RiskCategory {
return model.RiskCategory{
Id: "untrusted-deserialization",
Title: "Untrusted Deserialization",
Description: "When a technical asset accepts data in a specific serialized form (like Java or .NET serialization), " +
"Untrusted Deserialization risks might arise." +
"<br><br>See <a href=\"https://christian-schneider.net/JavaDeserializationSecurityFAQ.html\">https://christian-schneider.net/JavaDeserializationSecurityFAQ.html</a> " +
"for more details.",
Impact: "If this risk is unmitigated, attackers might be able to execute code on target systems by exploiting untrusted deserialization endpoints.",
ASVS: "V5 - Validation, Sanitization and Encoding Verification Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Deserialization_Cheat_Sheet.html",
Action: "Prevention of Deserialization of Untrusted Data",
Mitigation: "Try to avoid the deserialization of untrusted data (even of data within the same trust-boundary as long as " +
"it is sent across a remote connection) in order to stay safe from Untrusted Deserialization vulnerabilities. " +
"Alternatively a strict whitelisting approach of the classes/types/values to deserialize might help as well. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
Function: model.Architecture,
STRIDE: model.Tampering,
DetectionLogic: "In-scope technical assets accepting serialization data formats (including EJB and RMI protocols).",
RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.",
FalsePositives: "Fully trusted (i.e. cryptographically signed or similar) data deserialized can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
CWE: 502,
}
}
func SupportedTags() []string {
return []string{}
}
func GenerateRisks() []model.Risk {
risks := make([]model.Risk, 0)
for _, id := range model.SortedTechnicalAssetIDs() {
technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
if technicalAsset.OutOfScope {
continue
}
hasOne, acrossTrustBoundary := false, false
commLinkTitle := ""
for _, format := range technicalAsset.DataFormatsAccepted {
if format == model.Serialization {
hasOne = true
}
}
if technicalAsset.Technology == model.EJB {
hasOne = true
}
// check for any incoming IIOP and JRMP protocols
for _, commLink := range model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] {
if commLink.Protocol == model.IIOP || commLink.Protocol == model.IIOP_encrypted ||
commLink.Protocol == model.JRMP || commLink.Protocol == model.JRMP_encrypted {
hasOne = true
if commLink.IsAcrossTrustBoundaryNetworkOnly() {
acrossTrustBoundary = true
commLinkTitle = commLink.Title
}
}
}
if hasOne {
risks = append(risks, createRisk(technicalAsset, acrossTrustBoundary, commLinkTitle))
}
}
return risks
}
func createRisk(technicalAsset model.TechnicalAsset, acrossTrustBoundary bool, commLinkTitle string) model.Risk {
title := "<b>Untrusted Deserialization</b> risk at <b>" + technicalAsset.Title + "</b>"
impact := model.HighImpact
likelihood := model.Likely
if acrossTrustBoundary {
likelihood = model.VeryLikely
title += " across a trust boundary (at least via communication link <b>" + commLinkTitle + "</b>)"
}
if technicalAsset.HighestConfidentiality() == model.Sensitive ||
technicalAsset.HighestIntegrity() == model.MissionCritical ||
technicalAsset.HighestAvailability() == model.MissionCritical {
impact = model.VeryHighImpact
}
risk := model.Risk{
Category: Category(),
Severity: model.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
DataBreachProbability: model.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
return risk
} | risks/built-in/untrusted-deserialization/untrusted-deserialization-rule.go | 0.743168 | 0.451387 | untrusted-deserialization-rule.go | starcoder |
package value
import (
"errors"
"strings"
"github.com/mithrandie/ternary"
)
type ComparisonResult int
const (
IsEqual ComparisonResult = iota
IsBoolEqual
IsNotEqual
IsLess
IsGreater
IsIncommensurable
)
var comparisonResultLiterals = map[ComparisonResult]string{
IsEqual: "IsEqual",
IsBoolEqual: "IsBoolEqual",
IsNotEqual: "IsNotEqual",
IsLess: "IsLess",
IsGreater: "IsGreater",
IsIncommensurable: "IsIncommensurable",
}
func (cr ComparisonResult) String() string {
return comparisonResultLiterals[cr]
}
func CompareCombinedly(p1 Primary, p2 Primary) ComparisonResult {
if IsNull(p1) || IsNull(p2) {
return IsIncommensurable
}
if i1 := ToInteger(p1); !IsNull(i1) {
if i2 := ToInteger(p2); !IsNull(i2) {
v1 := i1.(Integer).Raw()
v2 := i2.(Integer).Raw()
if v1 == v2 {
return IsEqual
} else if v1 < v2 {
return IsLess
} else {
return IsGreater
}
}
}
if f1 := ToFloat(p1); !IsNull(f1) {
if f2 := ToFloat(p2); !IsNull(f2) {
v1 := f1.(Float).Raw()
v2 := f2.(Float).Raw()
if v1 == v2 {
return IsEqual
} else if v1 < v2 {
return IsLess
} else {
return IsGreater
}
}
}
if d1 := ToDatetime(p1); !IsNull(d1) {
if d2 := ToDatetime(p2); !IsNull(d2) {
v1 := d1.(Datetime).Raw()
v2 := d2.(Datetime).Raw()
if v1.Equal(v2) {
return IsEqual
} else if v1.Before(v2) {
return IsLess
} else {
return IsGreater
}
}
}
if b1 := ToBoolean(p1); !IsNull(b1) {
if b2 := ToBoolean(p2); !IsNull(b2) {
v1 := b1.(Boolean).Raw()
v2 := b2.(Boolean).Raw()
if v1 == v2 {
return IsBoolEqual
} else {
return IsNotEqual
}
}
}
if s1, ok := p1.(String); ok {
if s2, ok := p2.(String); ok {
v1 := strings.ToUpper(strings.TrimSpace(s1.Raw()))
v2 := strings.ToUpper(strings.TrimSpace(s2.Raw()))
if v1 == v2 {
return IsEqual
} else if v1 < v2 {
return IsLess
} else {
return IsGreater
}
}
}
return IsIncommensurable
}
func Identical(p1 Primary, p2 Primary) ternary.Value {
if t, ok := p1.(Ternary); (ok && t.value == ternary.UNKNOWN) || IsNull(p1) {
return ternary.UNKNOWN
}
if t, ok := p2.(Ternary); (ok && t.value == ternary.UNKNOWN) || IsNull(p2) {
return ternary.UNKNOWN
}
if v1, ok := p1.(Integer); ok {
if v2, ok := p2.(Integer); ok {
return ternary.ConvertFromBool(v1.value == v2.value)
}
}
if v1, ok := p1.(Float); ok {
if v2, ok := p2.(Float); ok {
return ternary.ConvertFromBool(v1.value == v2.value)
}
}
if v1, ok := p1.(Datetime); ok {
if v2, ok := p2.(Datetime); ok {
return ternary.ConvertFromBool(v1.value.Equal(v2.value))
}
}
if v1, ok := p1.(Boolean); ok {
if v2, ok := p2.(Boolean); ok {
return ternary.ConvertFromBool(v1.value == v2.value)
}
}
if v1, ok := p1.(Ternary); ok {
if v2, ok := p2.(Ternary); ok {
return ternary.ConvertFromBool(v1.value == v2.value)
}
}
if v1, ok := p1.(String); ok {
if v2, ok := p2.(String); ok {
return ternary.ConvertFromBool(v1.literal == v2.literal)
}
}
return ternary.FALSE
}
func Equal(p1 Primary, p2 Primary) ternary.Value {
if r := CompareCombinedly(p1, p2); r != IsIncommensurable {
return ternary.ConvertFromBool(r == IsEqual || r == IsBoolEqual)
}
return ternary.UNKNOWN
}
func NotEqual(p1 Primary, p2 Primary) ternary.Value {
if r := CompareCombinedly(p1, p2); r != IsIncommensurable {
return ternary.ConvertFromBool(r != IsEqual && r != IsBoolEqual)
}
return ternary.UNKNOWN
}
func Less(p1 Primary, p2 Primary) ternary.Value {
if r := CompareCombinedly(p1, p2); r != IsIncommensurable && r != IsNotEqual && r != IsBoolEqual {
return ternary.ConvertFromBool(r == IsLess)
}
return ternary.UNKNOWN
}
func Greater(p1 Primary, p2 Primary) ternary.Value {
if r := CompareCombinedly(p1, p2); r != IsIncommensurable && r != IsNotEqual && r != IsBoolEqual {
return ternary.ConvertFromBool(r == IsGreater)
}
return ternary.UNKNOWN
}
func LessOrEqual(p1 Primary, p2 Primary) ternary.Value {
if r := CompareCombinedly(p1, p2); r != IsIncommensurable && r != IsNotEqual && r != IsBoolEqual {
return ternary.ConvertFromBool(r != IsGreater)
}
return ternary.UNKNOWN
}
func GreaterOrEqual(p1 Primary, p2 Primary) ternary.Value {
if r := CompareCombinedly(p1, p2); r != IsIncommensurable && r != IsNotEqual && r != IsBoolEqual {
return ternary.ConvertFromBool(r != IsLess)
}
return ternary.UNKNOWN
}
func Compare(p1 Primary, p2 Primary, operator string) ternary.Value {
switch operator {
case "=":
return Equal(p1, p2)
case "==":
return Identical(p1, p2)
case ">":
return Greater(p1, p2)
case "<":
return Less(p1, p2)
case ">=":
return GreaterOrEqual(p1, p2)
case "<=":
return LessOrEqual(p1, p2)
default: //case "<>", "!=":
return NotEqual(p1, p2)
}
}
func CompareRowValues(rowValue1 RowValue, rowValue2 RowValue, operator string) (ternary.Value, error) {
if rowValue1 == nil || rowValue2 == nil {
return ternary.UNKNOWN, nil
}
if len(rowValue1) != len(rowValue2) {
return ternary.FALSE, errors.New("row value length does not match")
}
unknown := false
for i := 0; i < len(rowValue1); i++ {
if operator == "==" {
t := Identical(rowValue1[i], rowValue2[i])
if t == ternary.FALSE {
return ternary.FALSE, nil
}
if t == ternary.UNKNOWN {
unknown = true
}
continue
}
r := CompareCombinedly(rowValue1[i], rowValue2[i])
if r == IsIncommensurable {
switch operator {
case "=", "<>", "!=":
if i < len(rowValue1)-1 {
unknown = true
continue
}
}
return ternary.UNKNOWN, nil
}
switch operator {
case ">", "<", ">=", "<=":
if r == IsNotEqual || r == IsBoolEqual {
return ternary.UNKNOWN, nil
}
}
switch operator {
case "=":
if r != IsEqual && r != IsBoolEqual {
return ternary.FALSE, nil
}
case ">", ">=":
switch r {
case IsGreater:
return ternary.TRUE, nil
case IsLess:
return ternary.FALSE, nil
}
case "<", "<=":
switch r {
case IsLess:
return ternary.TRUE, nil
case IsGreater:
return ternary.FALSE, nil
}
case "<>", "!=":
if r != IsEqual && r != IsBoolEqual {
return ternary.TRUE, nil
}
}
}
if unknown {
return ternary.UNKNOWN, nil
}
switch operator {
case ">", "<", "<>", "!=":
return ternary.FALSE, nil
}
return ternary.TRUE, nil
}
func Equivalent(p1 Primary, p2 Primary) ternary.Value {
if IsNull(p1) && IsNull(p2) {
return ternary.TRUE
}
return Equal(p1, p2)
} | lib/value/comparison.go | 0.60778 | 0.517266 | comparison.go | starcoder |
package types
import (
"encoding/binary"
"fmt"
"regexp"
"strings"
"sync"
)
// BitArray is a thread-safe implementation of a bit array.
type BitArray struct {
mtx sync.Mutex
Bits uint `json:"bits"` // NOTE: persisted via reflect, must be exported
Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported
}
// NewBitArray returns a new bit array.
// It returns nil if the number of bits is zero.
func NewBitArray(bits int) *BitArray {
if bits <= 0 {
return nil
}
return &BitArray{
Bits: uint(bits),
Elems: make([]uint64, (bits+63)/64),
}
}
// Size returns the number of bits in the bitarray
func (bA *BitArray) Size() uint {
if bA == nil {
return 0
}
return bA.Bits
}
// GetIndex returns the bit at index i within the bit array.
// The behavior is undefined if i >= bA.Bits
func (bA *BitArray) GetIndex(i int) bool {
if bA == nil {
return false
}
bA.mtx.Lock()
defer bA.mtx.Unlock()
return bA.getIndex(uint(i))
}
func (bA *BitArray) getIndex(i uint) bool {
if i >= bA.Bits {
return false
}
return bA.Elems[i/64]&(uint64(1)<<uint(i%64)) > 0
}
// SetIndex sets the bit at index i within the bit array.
// The behavior is undefined if i >= bA.Bits
func (bA *BitArray) SetIndex(i int, v bool) bool {
if bA == nil {
return false
}
bA.mtx.Lock()
defer bA.mtx.Unlock()
return bA.setIndex(uint(i), v)
}
func (bA *BitArray) setIndex(i uint, v bool) bool {
if i >= bA.Bits {
return false
}
if v {
bA.Elems[i/64] |= (uint64(1) << uint(i%64))
} else {
bA.Elems[i/64] &= ^(uint64(1) << uint(i%64))
}
return true
}
// String returns a string representation of BitArray: BA{<bit-string>},
// where <bit-string> is a sequence of 'x' (1) and '_' (0).
// The <bit-string> includes spaces and newlines to help people.
// For a simple sequence of 'x' and '_' characters with no spaces or newlines,
// see the MarshalJSON() method.
// Example: "BA{_x_}" or "nil-BitArray" for nil.
func (bA *BitArray) String() string {
return bA.StringIndented("")
}
// StringIndented returns the same thing as String(), but applies the indent
// at every 10th bit, and twice at every 50th bit.
func (bA *BitArray) StringIndented(indent string) string {
if bA == nil {
return "nil-BitArray"
}
bA.mtx.Lock()
defer bA.mtx.Unlock()
return bA.stringIndented(indent)
}
func (bA *BitArray) stringIndented(indent string) string {
lines := []string{}
bits := ""
for i := uint(0); i < bA.Bits; i++ {
if bA.getIndex(i) {
bits += "x"
} else {
bits += "_"
}
if i%100 == 99 {
lines = append(lines, bits)
bits = ""
}
if i%10 == 9 {
bits += indent
}
if i%50 == 49 {
bits += indent
}
}
if len(bits) > 0 {
lines = append(lines, bits)
}
return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent))
}
// Bytes returns the byte representation of the bits within the bitarray.
func (bA *BitArray) Bytes() []byte {
bA.mtx.Lock()
defer bA.mtx.Unlock()
numBytes := (bA.Bits + 7) / 8
bytes := make([]byte, numBytes)
for i := 0; i < len(bA.Elems); i++ {
elemBytes := [8]byte{}
binary.LittleEndian.PutUint64(elemBytes[:], bA.Elems[i])
copy(bytes[i*8:], elemBytes[:])
}
return bytes
}
// MarshalJSON implements json.Marshaler interface by marshaling bit array
// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit.
func (bA *BitArray) MarshalJSON() ([]byte, error) {
if bA == nil {
return []byte("null"), nil
}
bA.mtx.Lock()
defer bA.mtx.Unlock()
bits := `"`
for i := uint(0); i < bA.Bits; i++ {
if bA.getIndex(i) {
bits += `x`
} else {
bits += `_`
}
}
bits += `"`
return []byte(bits), nil
}
var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`)
// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom
// JSON description.
func (bA *BitArray) UnmarshalJSON(bz []byte) error {
b := string(bz)
if b == "null" {
// This is required e.g. for encoding/json when decoding
// into a pointer with pre-allocated BitArray.
bA.Bits = 0
bA.Elems = nil
return nil
}
// Validate 'b'.
match := bitArrayJSONRegexp.FindStringSubmatch(b)
if match == nil {
return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b)
}
bits := match[1]
// Construct new BitArray and copy over.
numBits := len(bits)
bA2 := NewBitArray(numBits)
for i := 0; i < numBits; i++ {
if bits[i] == 'x' {
bA2.SetIndex(i, true)
}
}
*bA = *bA2 //nolint:govet
return nil
} | core/types/bitarray.go | 0.690037 | 0.413714 | bitarray.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.