code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package multi
import (
"errors"
"github.com/matrixorigin/matrixone/pkg/container/nulls"
"github.com/matrixorigin/matrixone/pkg/container/types"
"github.com/matrixorigin/matrixone/pkg/container/vector"
"github.com/matrixorigin/matrixone/pkg/encoding"
"github.com/matrixorigin/matrixone/pkg/vectorize/ceil"
"github.com/matrixorigin/matrixone/pkg/vm/process"
)
func CeilUint64(vecs []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
if vecs[0].IsScalarNull() {
return proc.AllocScalarNullVector(types.Type{Oid: types.T_uint64, Size: 8}), nil
}
digits := int64(0)
vs := vecs[0].Col.([]uint64)
if len(vecs) > 1 {
if vecs[1].IsScalarNull() {
return proc.AllocScalarNullVector(types.Type{Oid: types.T_uint64, Size: 8}), nil
}
if !vecs[1].IsScalar() || vecs[1].Typ.Oid != types.T_int64 {
return nil, errors.New("the second argument of the ceil function must be an int64 constant")
}
digits = vecs[1].Col.([]int64)[0]
}
if vecs[0].IsScalar() {
vec := proc.AllocScalarVector(types.Type{Oid: types.T_uint64, Size: 8})
rs := make([]uint64, 1)
nulls.Set(vec.Nsp, vecs[0].Nsp)
vector.SetCol(vec, ceil.CeilUint64(vs, rs, digits))
return vec, nil
} else {
vec, err := proc.AllocVector(types.Type{Oid: types.T_uint64, Size: 8}, 8*int64(len(vs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeUint64Slice(vec.Data)
rs = rs[:len(vs)]
vec.Col = rs
nulls.Set(vec.Nsp, vecs[0].Nsp)
vector.SetCol(vec, ceil.CeilUint64(vs, rs, digits))
return vec, nil
}
}
func CeilInt64(vecs []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
if vecs[0].IsScalarNull() {
return proc.AllocScalarNullVector(types.Type{Oid: types.T_int64, Size: 8}), nil
}
digits := int64(0)
vs := vecs[0].Col.([]int64)
if len(vecs) > 1 {
if vecs[1].IsScalarNull() {
return proc.AllocScalarNullVector(types.Type{Oid: types.T_int64, Size: 8}), nil
}
if !vecs[1].IsScalar() || vecs[1].Typ.Oid != types.T_int64 {
return nil, errors.New("the second argument of the ceil function must be an int64 constant")
}
digits = vecs[1].Col.([]int64)[0]
}
if vecs[0].IsScalar() {
vec := proc.AllocScalarVector(types.Type{Oid: types.T_int64, Size: 8})
rs := make([]int64, 1)
nulls.Set(vec.Nsp, vecs[0].Nsp)
vector.SetCol(vec, ceil.CeilInt64(vs, rs, digits))
return vec, nil
} else {
vec, err := proc.AllocVector(types.Type{Oid: types.T_int64, Size: 8}, 8*int64(len(vs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeInt64Slice(vec.Data)
rs = rs[:len(vs)]
vec.Col = rs
nulls.Set(vec.Nsp, vecs[0].Nsp)
vector.SetCol(vec, ceil.CeilInt64(vs, rs, digits))
return vec, nil
}
}
func CeilFloat64(vecs []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
if vecs[0].IsScalarNull() {
return proc.AllocScalarNullVector(types.Type{Oid: types.T_float64, Size: 8}), nil
}
digits := int64(0)
vs := vecs[0].Col.([]float64)
if len(vecs) > 1 {
if vecs[1].IsScalarNull() {
return proc.AllocScalarNullVector(types.Type{Oid: types.T_float64, Size: 8}), nil
}
if !vecs[1].IsScalar() || vecs[1].Typ.Oid != types.T_int64 {
return nil, errors.New("the second argument of the ceil function must be an int64 constant")
}
digits = vecs[1].Col.([]int64)[0]
}
if vecs[0].IsScalar() {
vec := proc.AllocScalarVector(types.Type{Oid: types.T_int64, Size: 8})
rs := make([]float64, 1)
nulls.Set(vec.Nsp, vecs[0].Nsp)
vector.SetCol(vec, ceil.CeilFloat64(vs, rs, digits))
return vec, nil
} else {
vec, err := proc.AllocVector(types.Type{Oid: types.T_float64, Size: 8}, 8*int64(len(vs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeFloat64Slice(vec.Data)
rs = rs[:len(vs)]
vec.Col = rs
nulls.Set(vec.Nsp, vecs[0].Nsp)
vector.SetCol(vec, ceil.CeilFloat64(vs, rs, digits))
return vec, nil
}
} | pkg/sql/plan2/function/builtin/multi/ceil.go | 0.547706 | 0.504822 | ceil.go | starcoder |
package missing_build_infrastructure
import (
"github.com/threagile/threagile/model"
)
func Category() model.RiskCategory {
return model.RiskCategory{
Id: "missing-build-infrastructure",
Title: "Missing Build Infrastructure",
Description: "A arquitetura modelada não contém uma infraestrutura de construção (devops-client, sourcecode-repo, build-pipeline, etc.), " +
"o que pode ser o risco de um modelo perder ativos críticos (e, portanto, não ver seus riscos). " +
"Se a arquitetura contém partes desenvolvidas de forma personalizada, o pipeline onde o código é desenvolvido " +
"e construído precisa fazer parte do modelo.",
Impact: "Se este risco não for mitigado, os invasores podem explorar riscos não vistos neste modelo de ameaça devido ao " +
"componentes críticos de infraestrutura de construção ausentes no modelo",
ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
Action: "Construir enrijecimento de dutos",
Mitigation: "Inclui a infraestrutura de construção no modelo.",
Check: "As recomendações do cheat sheet e do ASVS/CSVS referenciado são aplicadas?",
Function: model.Architecture,
STRIDE: model.Tampering,
DetectionLogic: "Modelos com partes desenvolvidas personalizadas no escopo sem desenvolvimento no escopo (criação de código) e infraestrutura de construção " +
"componentes (devops-client, sourcecode-repo, build-pipeline, etc.).",
RiskAssessment: "A classificação de risco depende da sensibilidade mais alta dos ativos no escopo que executam peças desenvolvidas de maneira personalizada.",
FalsePositives: "Modelos sem peças desenvolvidas sob medida " +
"podem ser considerados falsos positivos após revisão individual.",
ModelFailurePossibleReason: true,
CWE: 1127,
}
}
func SupportedTags() []string {
return []string{}
}
func GenerateRisks() []model.Risk {
risks := make([]model.Risk, 0)
hasCustomDevelopedParts, hasBuildPipeline, hasSourcecodeRepo, hasDevOpsClient := false, false, false, false
impact := model.LowImpact
var mostRelevantAsset model.TechnicalAsset
for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset
technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
if technicalAsset.CustomDevelopedParts && !technicalAsset.OutOfScope {
hasCustomDevelopedParts = true
if impact == model.LowImpact {
mostRelevantAsset = technicalAsset
if technicalAsset.HighestConfidentiality() >= model.Confidential ||
technicalAsset.HighestIntegrity() >= model.Critical ||
technicalAsset.HighestAvailability() >= model.Critical {
impact = model.MediumImpact
}
}
if technicalAsset.Confidentiality >= model.Confidential ||
technicalAsset.Integrity >= model.Critical ||
technicalAsset.Availability >= model.Critical {
impact = model.MediumImpact
}
// just for referencing the most interesting asset
if technicalAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() {
mostRelevantAsset = technicalAsset
}
}
if technicalAsset.Technology == model.BuildPipeline {
hasBuildPipeline = true
}
if technicalAsset.Technology == model.SourcecodeRepository {
hasSourcecodeRepo = true
}
if technicalAsset.Technology == model.DevOpsClient {
hasDevOpsClient = true
}
}
hasBuildInfrastructure := hasBuildPipeline && hasSourcecodeRepo && hasDevOpsClient
if hasCustomDevelopedParts && !hasBuildInfrastructure {
risks = append(risks, createRisk(mostRelevantAsset, impact))
}
return risks
}
func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact) model.Risk {
title := "<b>Missing Build Infrastructure</b> in the threat model (referencing asset <b>" + technicalAsset.Title + "</b> as an example)"
risk := model.Risk{
Category: Category(),
Severity: model.CalculateSeverity(model.Unlikely, impact),
ExploitationLikelihood: model.Unlikely,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
DataBreachProbability: model.Improbable,
DataBreachTechnicalAssetIDs: []string{},
}
risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
return risk
} | risks/built-in/missing-build-infrastructure/missing-build-infrastructure-rule.go | 0.536556 | 0.421611 | missing-build-infrastructure-rule.go | starcoder |
package gotabulate
import "strconv"
import "fmt"
// Create normalized Array from strings
func createFromString(data [][]string) []*TabulateRow {
rows := make([]*TabulateRow, len(data))
for index, el := range data {
rows[index] = &TabulateRow{Elements: el}
}
return rows
}
// Create normalized array of rows from mixed data (interface{})
func createFromMixed(data [][]interface{}, format byte) []*TabulateRow {
rows := make([]*TabulateRow, len(data))
for index_1, element := range data {
normalized := make([]string, len(element))
for index, el := range element {
switch el.(type) {
case int32:
quoted := strconv.QuoteRuneToASCII(el.(int32))
normalized[index] = quoted[1 : len(quoted)-1]
case int:
normalized[index] = strconv.Itoa(el.(int))
case int64:
normalized[index] = strconv.FormatInt(el.(int64), 10)
case bool:
normalized[index] = strconv.FormatBool(el.(bool))
case float64:
normalized[index] = strconv.FormatFloat(el.(float64), format, -1, 64)
case uint64:
normalized[index] = strconv.FormatUint(el.(uint64), 10)
default:
normalized[index] = fmt.Sprintf("%s", el)
}
}
rows[index_1] = &TabulateRow{Elements: normalized}
}
return rows
}
// Create normalized array from ints
func createFromInt(data [][]int) []*TabulateRow {
rows := make([]*TabulateRow, len(data))
for index_1, arr := range data {
row := make([]string, len(arr))
for index, el := range arr {
row[index] = strconv.Itoa(el)
}
rows[index_1] = &TabulateRow{Elements: row}
}
return rows
}
// Create normalized array from float64
func createFromFloat64(data [][]float64, format byte) []*TabulateRow {
rows := make([]*TabulateRow, len(data))
for index_1, arr := range data {
row := make([]string, len(arr))
for index, el := range arr {
row[index] = strconv.FormatFloat(el, format, -1, 64)
}
rows[index_1] = &TabulateRow{Elements: row}
}
return rows
}
// Create normalized array from ints32
func createFromInt32(data [][]int32) []*TabulateRow {
rows := make([]*TabulateRow, len(data))
for index_1, arr := range data {
row := make([]string, len(arr))
for index, el := range arr {
quoted := strconv.QuoteRuneToASCII(el)
row[index] = quoted[1 : len(quoted)-1]
}
rows[index_1] = &TabulateRow{Elements: row}
}
return rows
}
// Create normalized array from ints64
func createFromInt64(data [][]int64) []*TabulateRow {
rows := make([]*TabulateRow, len(data))
for index_1, arr := range data {
row := make([]string, len(arr))
for index, el := range arr {
row[index] = strconv.FormatInt(el, 10)
}
rows[index_1] = &TabulateRow{Elements: row}
}
return rows
}
// Create normalized array from bools
func createFromBool(data [][]bool) []*TabulateRow {
rows := make([]*TabulateRow, len(data))
for index_1, arr := range data {
row := make([]string, len(arr))
for index, el := range arr {
row[index] = strconv.FormatBool(el)
}
rows[index_1] = &TabulateRow{Elements: row}
}
return rows
}
// Create normalized array from a map of mixed elements (interface{})
// Keys will be used as header
func createFromMapMixed(data map[string][]interface{}, format byte) (headers []string, tData []*TabulateRow) {
var dataslice [][]interface{}
for key, value := range data {
headers = append(headers, key)
dataslice = append(dataslice, value)
}
return headers, createFromMixed(dataslice, format)
}
// Create normalized array from Map of strings
// Keys will be used as header
func createFromMapString(data map[string][]string) (headers []string, tData []*TabulateRow) {
var dataslice [][]string
for key, value := range data {
headers = append(headers, key)
dataslice = append(dataslice, value)
}
return headers, createFromString(dataslice)
}
// Check if element is present in a slice.
func inSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
} | vendor/github.com/bndr/gotabulate/utils.go | 0.57678 | 0.563858 | utils.go | starcoder |
package main
import "math"
type Tuple struct {
X float64
Y float64
Z float64
W float64
}
// Add a tuple to the instance and return the result. Note that the sum of two
// vectors is a vector, the sum of a point and a vector is a point, and the sum
// of two points is nonsensical.
func (t Tuple) Add(other Tuple) Tuple {
return Tuple{
t.X + other.X,
t.Y + other.Y,
t.Z + other.Z,
t.W + other.W,
}
}
// Find the cross product of this tuple and another.
func (t Tuple) Cross(other Tuple) Tuple {
return MakeVector(
t.Y*other.Z-t.Z*other.Y,
t.Z*other.X-t.X*other.Z,
t.X*other.Y-t.Y*other.X,
)
}
// Divide the tuple by the provided factor and return the result. This is
// equivalent to dividing all the tuple's components by the factor.
func (t Tuple) Divide(scale float64) Tuple {
return Tuple{t.X / scale, t.Y / scale, t.Z / scale, t.W / scale}
}
// Compute the dot product of two tuples.
func (t Tuple) Dot(other Tuple) float64 {
return t.X*other.X +
t.Y*other.Y +
t.Z*other.Z +
t.W*other.W
}
func (t Tuple) Equals(other Tuple) bool {
return Float64Equal(t.X, other.X) &&
Float64Equal(t.Y, other.Y) &&
Float64Equal(t.Z, other.Z) &&
Float64Equal(t.W, other.W)
}
func (t Tuple) IsPoint() bool {
return t.W == 1
}
func (t Tuple) IsVector() bool {
return t.W == 0
}
// Get the total distance represented by a vector.
func (t Tuple) Magnitude() float64 {
return math.Sqrt(t.X*t.X + t.Y*t.Y + t.Z*t.Z)
}
// Multiply the tuple by the provided factor and return the result. This is
// equivalent to multiplying all the tuple's components by the factor.
func (t Tuple) Multiply(scale float64) Tuple {
return Tuple{t.X * scale, t.Y * scale, t.Z * scale, t.W * scale}
}
// Negate a tuple and return the result. This is equivalent to subtracting the
// tuple from the zero vector.
func (t Tuple) Negate() Tuple {
return Tuple{-t.X, -t.Y, -t.Z, -t.W}
}
// Get the normalized version of a tuple. The normalized tuple maintains the
// ratio of the tuple's components to each other but has a magnitude of 1.
func (t Tuple) Normalized() Tuple {
magnitude := t.Magnitude()
return Tuple{
t.X / magnitude,
t.Y / magnitude,
t.Z / magnitude,
t.W / magnitude,
}
}
// Subtract a tuple from the instance and return the result. Note that
// subtracting a point from a point is a vector, subtracting a vector from a
// point is a point, subtracting a vector from a vector is a vector, and
// subtracting a point from a vector is nonsensical.
func (t Tuple) Subtract(other Tuple) Tuple {
return Tuple{
t.X - other.X,
t.Y - other.Y,
t.Z - other.Z,
t.W - other.W,
}
}
func MakePoint(x, y, z float64) Tuple {
return Tuple{x, y, z, 1}
}
func MakeVector(x, y, z float64) Tuple {
return Tuple{x, y, z, 0}
} | tuple.go | 0.928959 | 0.850033 | tuple.go | starcoder |
package cbp
import (
"fmt"
cb "github.com/preichenberger/go-coinbasepro/v2"
"github.com/rs/zerolog/log"
"math"
"strconv"
"strings"
)
// Pattern defines the criteria for matching rates and placing orders.
type Pattern struct {
// Id is concatenation of two currencies. eg. BTC-USD
ID string `yaml:"id" json:"id"`
// Gain is a percentage used to produce the goal sell price from the entry buy price.
Gain float64 `yaml:"gain" json:"gain"`
// Loss is a percentage used to derive a limit sell price from the entry buy price.
Loss float64 `yaml:"loss" json:"loss"`
// Size is the amount of the transaction, using the products native quote increment.
Size float64 `yaml:"size" json:"size"`
// Delta is the size of an acceptable difference between tweezer bottom candlesticks.
Delta float64 `yaml:"delta" json:"delta"`
}
func (p *Pattern) InitPattern(size, gain, loss, delta float64) {
if p.Size == 0 {
p.Size = size
}
if p.Gain == 0 {
p.Gain = gain
}
if p.Loss == 0 {
p.Loss = loss
}
if p.Delta == 0 {
p.Delta = delta
}
}
func (p *Pattern) GoalPrice(price float64) float64 {
return price + (price * p.Gain)
}
func (p *Pattern) LossPrice(price float64) float64 {
return price - (price * p.Loss)
}
func (p *Pattern) NewMarketBuyOrder() *cb.Order {
size := GetProduct(p.ID).BaseMinSize
if qty, err := strconv.ParseFloat(size, 64); err != nil {
log.Debug().Err(err).Str("𝑓", "size").Str("𝑽", size).Send()
} else if qty < p.Size {
size = preciseResult(size, p.Size)
}
o := new(cb.Order)
o.ProductID = p.ID
o.Side = "buy"
o.Size = size
o.Type = "market"
return o
}
func (p *Pattern) NewMarketSellOrder(size string) *cb.Order {
o := new(cb.Order)
o.ProductID = p.ID
o.Side = "sell"
o.Size = p.PreciseSize(size)
o.Type = "market"
return o
}
func (p *Pattern) NewLimitSellEntryOrderAtGoalPrice(trade *Trade) *cb.Order {
return p.NewLimitSellEntryOrder(p.GoalPrice(trade.Price()), trade.Fill.Size)
}
func (p *Pattern) NewLimitSellEntryOrder(price float64, size string) *cb.Order {
o := new(cb.Order)
o.Price = p.PrecisePrice(price)
o.ProductID = p.ID
o.Side = "sell"
o.Size = p.PreciseSize(size)
o.Stop = "entry"
o.StopPrice = p.PrecisePrice(price)
o.Type = "limit"
return o
}
func (p *Pattern) NewLimitLossOrder(price float64, size string) *cb.Order {
o := new(cb.Order)
o.Price = p.PrecisePrice(price)
o.ProductID = p.ID
o.Side = "sell"
o.Size = p.PreciseSize(size)
o.Stop = "loss"
o.StopPrice = p.PrecisePrice(price)
o.Type = "limit"
return o
}
func (p *Pattern) MatchesTweezerBottomPattern(then, that, this Rate) bool {
return then.IsInit() &&
then.IsDown() &&
that.IsInit() &&
that.IsDown() &&
this.IsUp() &&
math.Abs(math.Min(that.Low, that.Close)-math.Min(this.Low, this.Open)) <= p.Delta
}
func (p *Pattern) PreciseSize(s string) string {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
log.Debug().Err(err).Str("𝑓", "size").Str("𝑽", s).Send()
return s
}
return preciseResult(GetProduct(p.ID).BaseMinSize, f)
}
func (p *Pattern) PrecisePrice(f float64) string {
return preciseResult(GetProduct(p.ID).QuoteIncrement, f)
}
func (p *Pattern) PrecisePriceFromString(s string) string {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
log.Debug().Err(err).Str("𝑓", "size").Str("𝑽", s).Send()
return s
}
return preciseResult(GetProduct(p.ID).QuoteIncrement, f)
}
func preciseResult(c string, f float64) string {
if !strings.Contains(c, `.`) {
return c
}
chunks := strings.Split(c, `.`)
format := fmt.Sprintf("%s.%df", "%", len(chunks[1]))
result := fmt.Sprintf(format, f)
return result
} | pkg/cbp/pattern.go | 0.738198 | 0.454654 | pattern.go | starcoder |
package graphicx
import "image/color"
// ARGB像素值
type Pixel uint32
func (p Pixel) RGBA() (R, G, B, A uint8) {
A, R, G, B = Pixel2ARGB(uint32(p))
return
}
// ARGB像素值
type Pixel64 uint64
func (p Pixel64) RGBA() (R, G, B, A uint16) {
A, R, G, B = Pixel2ARGB64(uint64(p))
return
}
//-----------------------------------------
func ARGB2Pixel(A, R, G, B uint8) (pixel uint32) {
pA := uint32(A) << 24
pR := uint32(R) << 16
pG := uint32(G) << 8
pB := uint32(B) << 0
return pA | pR | pG | pB
}
func Pixel2ARGB(pixel uint32) (A, R, G, B uint8) {
A = uint8((pixel & 0xff000000) >> 24)
R = uint8((pixel & 0x00ff0000) >> 16)
G = uint8((pixel & 0x0000ff00) >> 8)
B = uint8((pixel & 0x000000ff) >> 0)
return
}
func RGBA2Pixel(R, G, B, A uint8) (pixel uint32) {
pR := uint32(R) << 24
pG := uint32(G) << 16
pB := uint32(B) << 8
pA := uint32(A) << 0
return pA | pR | pG | pB
}
func Pixel2RGBA(pixel uint32) (R, G, B, A uint8) {
R = uint8((pixel & 0xff000000) >> 24)
G = uint8((pixel & 0x00ff0000) >> 16)
B = uint8((pixel & 0x0000ff00) >> 8)
A = uint8((pixel & 0x000000ff) >> 0)
return
}
func ARGB2Pixel64(A, R, G, B uint16) (pixel uint64) {
pA := uint64(A) << 48
pR := uint64(R) << 32
pG := uint64(G) << 16
pB := uint64(B) << 0
return pA | pR | pG | pB
}
func Pixel2ARGB64(pixel uint64) (A, R, G, B uint16) {
A = uint16((pixel & 0xffff000000000000) >> 48)
R = uint16((pixel & 0x0000ffff00000000) >> 32)
G = uint16((pixel & 0x00000000ffff0000) >> 16)
B = uint16((pixel & 0x000000000000ffff) >> 0)
return
}
func RGB2APixel64(R, G, B, A uint16) (pixel uint64) {
pR := uint64(R) << 48
pG := uint64(G) << 32
pB := uint64(B) << 16
pA := uint64(A) << 0
return pA | pR | pG | pB
}
func Pixel2RGBA64(pixel uint64) (R, G, B, A uint16) {
R = uint16((pixel & 0xffff000000000000) >> 48)
G = uint16((pixel & 0x0000ffff00000000) >> 32)
B = uint16((pixel & 0x00000000ffff0000) >> 16)
A = uint16((pixel & 0x000000000000ffff) >> 0)
return
}
func Color2Pixel(c color.Color) uint32 {
r, g, b, a := c.RGBA()
return (a << 16) | (r << 8) | g | (b >> 8)
}
func Color2Pixel64(c color.Color) uint64 {
r, g, b, a := c.RGBA()
return (uint64(a) << 48) | (uint64(r) << 32) | (uint64(g) << 16) | uint64(b)
} | graphicx/pixel.go | 0.74158 | 0.506897 | pixel.go | starcoder |
package writer
import (
"errors"
"io"
)
// Writer is the structure used to write bits, bytes into predefined data.
// It allows to write the bits in two modes. The first and default
// writes bytes with the initial bitIndex 0 as the LSB (Least Significant Bit)
// The second mode writes bits in an opposite manner starting from the MSB (Most Significant Bit).
// The writer is being created by the methods: 'New' and 'NewMSB', where the first
// creates default writer and the second the 'msb' flagged writer.
// Implements io.Writer, io.ByteWriter interfaces.
type Writer struct {
data []byte
bitIndex uint8
byteIndex int
msb bool
}
var (
// ErrInvalidBitValue defines the error when invalid 'bit' value is provided.
ErrInvalidBitValue = errors.New("invalid bit value")
)
var (
_ io.Writer = &Writer{}
_ io.ByteWriter = &Writer{}
)
// New creates new writer for the provided data.
func New(data []byte) *Writer {
return &Writer{data: data}
}
// NewMSB creates new writer with the msb flag.
// While default writer writes single bits into LSB, the msbWriter writes single bits
// starting from the MSB.
// Example:
// InverseWriter contains following data:
// data - 10010100 01001110 00000000
// ^
// The default current bit index is pointed by '^'.
// Writing new '1' bit to the following data would result as:
// data - 10010100 01001110 10000000
func NewMSB(data []byte) *Writer {
return &Writer{data: data, msb: true}
}
// Data gets the writer data.
func (w *Writer) Data() []byte {
return w.data
}
// UseMSB gets the writer flag if it works on the MSB mode.
func (w *Writer) UseMSB() bool {
return w.msb
}
// Write implements io.Writer interface.
func (w *Writer) Write(p []byte) (int, error) {
if len(p) > w.byteCapacity() {
return 0, io.EOF
}
for _, b := range p {
if err := w.writeByte(b); err != nil {
return 0, err
}
}
return len(p), nil
}
// WriteByte implements io.ByteWriter interface.
func (w *Writer) WriteByte(c byte) error {
return w.writeByte(c)
}
// WriteBit writes single bit into provided bit writer data.
func (w *Writer) WriteBit(bit int) error {
switch bit {
case 0, 1:
return w.writeBit(uint8(bit))
}
return ErrInvalidBitValue
}
func (w *Writer) byteCapacity() int {
currentCapacity := len(w.data) - w.byteIndex
if w.bitIndex != 0 {
currentCapacity--
}
return currentCapacity
}
func (w *Writer) writeBit(b uint8) error {
if len(w.data)-1 < w.byteIndex {
return io.EOF
}
bitIndex := w.bitIndex
if w.msb {
bitIndex = 7 - w.bitIndex
}
w.data[w.byteIndex] |= byte(uint16(b<<bitIndex) & 0xff)
w.bitIndex++
if w.bitIndex == 8 {
w.byteIndex++
w.bitIndex = 0
}
return nil
}
func (w *Writer) writeByte(b byte) error {
if w.byteIndex > len(w.data)-1 {
return io.EOF
}
if w.byteIndex == len(w.data)-1 && w.bitIndex != 0 {
return io.EOF
}
if w.bitIndex == 0 {
w.data[w.byteIndex] = b
w.byteIndex++
return nil
}
if w.msb {
w.data[w.byteIndex] |= b >> w.bitIndex
w.byteIndex++
w.data[w.byteIndex] = byte(uint16(b) << (8 - w.bitIndex) & 0xff)
} else {
w.data[w.byteIndex] |= byte(uint16(b) << w.bitIndex & 0xff)
w.byteIndex++
w.data[w.byteIndex] = b >> (8 - w.bitIndex)
}
return nil
} | bot/vendor/github.com/pzduniak/unipdf/internal/jbig2/writer/writer.go | 0.577614 | 0.453262 | writer.go | starcoder |
package glucose
import (
"math"
"github.com/tidepool-org/platform/data"
"github.com/tidepool-org/platform/structure"
)
type Target struct {
High *float64 `json:"high,omitempty" bson:"high,omitempty"`
Low *float64 `json:"low,omitempty" bson:"low,omitempty"`
Range *float64 `json:"range,omitempty" bson:"range,omitempty"`
Target *float64 `json:"target,omitempty" bson:"target,omitempty"`
}
func ParseTarget(parser structure.ObjectParser) *Target {
if !parser.Exists() {
return nil
}
datum := NewTarget()
parser.Parse(datum)
return datum
}
func NewTarget() *Target {
return &Target{}
}
func (t *Target) Parse(parser structure.ObjectParser) {
t.High = parser.Float64("high")
t.Low = parser.Float64("low")
t.Range = parser.Float64("range")
t.Target = parser.Float64("target")
}
func (t *Target) Validate(validator structure.Validator, units *string) {
if t.Target != nil && t.Range != nil {
validator.Float64("high", t.High).NotExists()
validator.Float64("low", t.Low).NotExists()
validator.Float64("range", t.Range).Exists().InRange(RangeRangeForUnits(*t.Target, units))
validator.Float64("target", t.Target).Exists().InRange(TargetRangeForUnits(units))
} else if t.Target != nil && t.High != nil {
validator.Float64("high", t.High).Exists().InRange(HighRangeForUnits(*t.Target, units))
validator.Float64("low", t.Low).NotExists()
validator.Float64("range", t.Range).NotExists()
validator.Float64("target", t.Target).Exists().InRange(TargetRangeForUnits(units))
} else if t.Target != nil {
validator.Float64("high", t.High).NotExists()
validator.Float64("low", t.Low).NotExists()
validator.Float64("range", t.Range).NotExists()
validator.Float64("target", t.Target).Exists().InRange(TargetRangeForUnits(units))
} else if t.High != nil && t.Low != nil {
validator.Float64("high", t.High).Exists().InRange(HighRangeForUnits(*t.Low, units))
validator.Float64("low", t.Low).Exists().InRange(LowRangeForUnits(units))
validator.Float64("range", t.Range).NotExists()
validator.Float64("target", t.Target).NotExists()
} else if t.Low != nil {
validator.Float64("high", t.High).Exists()
} else {
validator.Float64("target", t.Target).Exists()
}
}
func (t *Target) Normalize(normalizer data.Normalizer, units *string) {
if normalizer.Origin() == structure.OriginExternal {
t.High = NormalizeValueForUnits(t.High, units)
t.Low = NormalizeValueForUnits(t.Low, units)
t.Range = NormalizeValueForUnits(t.Range, units)
t.Target = NormalizeValueForUnits(t.Target, units)
}
}
func HighRangeForUnits(low float64, units *string) (float64, float64) {
if units != nil {
switch *units {
case MmolL, Mmoll:
if low >= MmolLMinimum && low <= MmolLMaximum {
return low, MmolLMaximum
}
case MgdL, Mgdl:
if low >= MgdLMinimum && low <= MgdLMaximum {
return low, MgdLMaximum
}
}
}
return -math.MaxFloat64, math.MaxFloat64
}
func LowRangeForUnits(units *string) (float64, float64) {
return ValueRangeForUnits(units)
}
func RangeRangeForUnits(target float64, units *string) (float64, float64) {
if units != nil {
switch *units {
case MmolL, Mmoll:
if target >= MmolLMinimum && target <= MmolLMaximum {
return 0.0, math.Min(target-MmolLMinimum, MmolLMaximum-target)
}
case MgdL, Mgdl:
if target >= MgdLMinimum && target <= MgdLMaximum {
return 0.0, math.Min(target-MgdLMinimum, MgdLMaximum-target)
}
}
}
return -math.MaxFloat64, math.MaxFloat64
}
func TargetRangeForUnits(units *string) (float64, float64) {
return ValueRangeForUnits(units)
} | data/blood/glucose/target.go | 0.694406 | 0.483831 | target.go | starcoder |
package mask
import (
"regexp"
"unicode/utf8"
"github.com/ozontech/file.d/fd"
"github.com/ozontech/file.d/pipeline"
"github.com/ozontech/file.d/stats"
insaneJSON "github.com/vitkovskii/insane-json"
"go.uber.org/zap"
)
/*{ introduction
Mask plugin matches event with regular expression and substitutions successfully matched symbols via asterix symbol.
You could set regular expressions and submatch groups.
**Example:**
```yaml
pipelines:
example_pipeline:
...
actions:
- type: mask
metric_subsystem_name: "some_name"
masks:
- mask:
re: "\b(\d{1,4})\D?(\d{1,4})\D?(\d{1,4})\D?(\d{1,4})\b"
groups: [1,2,3]
...
```
}*/
const (
substitution = byte('*')
timesActivated = "times_activated"
)
type Plugin struct {
config *Config
sourceBuf []byte
maskBuf []byte
valueNodes []*insaneJSON.Node
logger *zap.SugaredLogger
logMaskAppeared bool
}
//! config-params
//^ config-params
type Config struct {
//> @3@4@5@6
//>
//> If set counterMetric with this name would be sent on metric_subsystem_name.mask_plugin
MetricSubsystemName *string `json:"metric_subsystem_name" required:"false"` //*
//> @3@4@5@6
//>
//> List of masks.
Masks []Mask `json:"masks"` //*
}
type Mask struct {
//> @3@4@5@6
//>
//> Regular expression for masking
Re string `json:"re" default:"" required:"true"` //*
Re_ *regexp.Regexp
//> @3@4@5@6
//>
//> Numbers of masking groups in expression, zero for mask all expression
Groups []int `json:"groups" required:"true"` //*
//> @3@4@5@6
//>
//> MaxCount limits the number of masked symbols in the masked output, if zero, no limit is set
MaxCount int `json:"max_count"` //*
//> @3@4@5@6
//>
//> ReplaceWord, if set, is used instead of asterisks for masking patterns that are of the same length or longer.
ReplaceWord string `json:"replace_word"` //*
}
func init() {
fd.DefaultPluginRegistry.RegisterAction(&pipeline.PluginStaticInfo{
Type: "mask",
Factory: factory,
})
}
func factory() (pipeline.AnyPlugin, pipeline.AnyConfig) {
return &Plugin{}, &Config{}
}
func compileMasks(masks []Mask, logger *zap.SugaredLogger) []Mask {
for i := range masks {
masks[i] = compileMask(masks[i], logger)
}
return masks
}
func compileMask(m Mask, logger *zap.SugaredLogger) Mask {
logger.Infof("compiling, re=%s, groups=%v", m.Re, m.Groups)
re, err := regexp.Compile(m.Re)
if err != nil {
logger.Fatalf("error on compiling regexp, regexp=%s", m.Re)
}
m.Re_ = re
m.Groups = verifyGroupNumbers(m.Groups, re.NumSubexp(), logger)
return m
}
func isGroupsUnique(groups []int) bool {
uniqueGrp := make(map[int]struct{}, len(groups))
var exists struct{}
for _, g := range groups {
if _, isContains := uniqueGrp[g]; isContains {
return false
}
uniqueGrp[g] = exists
}
return true
}
func verifyGroupNumbers(groups []int, totalGroups int, logger *zap.SugaredLogger) []int {
if len(groups) == 0 {
logger.Fatal("groups is empty")
}
if !isGroupsUnique(groups) {
logger.Fatalf("groups numbers must be unique, groups numbers=%v", groups)
}
if len(groups) > totalGroups {
logger.Fatalf("there are many groups, groups=%d, totalGroups=%d", len(groups), totalGroups)
}
for _, g := range groups {
if g > totalGroups || g < 0 {
logger.Fatalf("wrong group number, number=%d", g)
} else if g == 0 {
return []int{0}
}
}
return groups
}
func (p *Plugin) Start(config pipeline.AnyConfig, params *pipeline.ActionPluginParams) {
p.config = config.(*Config)
for _, mask := range p.config.Masks {
if mask.MaxCount > 0 && mask.ReplaceWord != "" {
p.logger.Fatal("Invalid mask configuration")
}
}
p.maskBuf = make([]byte, 0, params.PipelineSettings.AvgEventSize)
p.sourceBuf = make([]byte, 0, params.PipelineSettings.AvgEventSize)
p.valueNodes = make([]*insaneJSON.Node, 0)
p.logger = params.Logger
p.config.Masks = compileMasks(p.config.Masks, p.logger)
if p.config.MetricSubsystemName != nil {
p.logMaskAppeared = true
p.registerPluginMetrics()
}
}
func (p *Plugin) registerPluginMetrics() {
stats.RegisterCounter(&stats.MetricDesc{
Name: timesActivated,
Subsystem: *p.config.MetricSubsystemName,
Help: "Number of times mask plugin found the provided pattern",
})
}
func (p *Plugin) Stop() {
}
func (p *Plugin) appendMask(mask *Mask, dst, src []byte, begin, end int) ([]byte, int) {
runeCounter := utf8.RuneCount(src[begin:end])
if mask.ReplaceWord != "" {
dst = append(dst, []byte(mask.ReplaceWord)...)
return dst, len(src[begin:end]) - len(mask.ReplaceWord)
}
for j := 0; j < runeCounter; j++ {
if mask.MaxCount != 0 && j >= mask.MaxCount {
break
}
dst = append(dst, substitution)
}
return dst, len(src[begin:end]) - runeCounter
}
func (p *Plugin) maskSection(mask *Mask, dst, src []byte, begin, end int) ([]byte, int) {
dst = append(dst, src[:begin]...)
dst, offset := p.appendMask(mask, dst, src, begin, end)
if len(dst)+offset < len(src) {
dst = append(dst, src[end:]...)
}
return dst, offset
}
// mask value returns masked value and bool answer was buf masked at all.
func (p *Plugin) maskValue(mask *Mask, value, buf []byte) ([]byte, bool) {
indexes := mask.Re_.FindAllSubmatchIndex(value, -1)
if len(indexes) == 0 {
return value, false
}
buf = buf[:0]
offset := 0
for _, index := range indexes {
for _, grp := range mask.Groups {
value, offset = p.maskSection(
mask,
buf,
value,
index[grp*2]-offset,
index[grp*2+1]-offset,
)
}
}
return value, true
}
func getValueNodeList(currentNode *insaneJSON.Node, valueNodes []*insaneJSON.Node) []*insaneJSON.Node {
switch {
case currentNode.IsField():
valueNodes = getValueNodeList(currentNode.AsFieldValue(), valueNodes)
case currentNode.IsArray():
for _, n := range currentNode.AsArray() {
valueNodes = getValueNodeList(n, valueNodes)
}
case currentNode.IsObject():
for _, n := range currentNode.AsFields() {
valueNodes = getValueNodeList(n, valueNodes)
}
default:
valueNodes = append(valueNodes, currentNode)
}
return valueNodes
}
func (p *Plugin) Do(event *pipeline.Event) pipeline.ActionResult {
root := event.Root.Node
// apply vars need to check if mask was applied to event data and send metric.
maskApplied := false
locApplied := false
p.valueNodes = p.valueNodes[:0]
p.valueNodes = getValueNodeList(root, p.valueNodes)
for _, v := range p.valueNodes {
value := v.AsBytes()
p.sourceBuf = append(p.sourceBuf[:0], value...)
p.maskBuf = append(p.maskBuf[:0], p.sourceBuf...)
for _, mask := range p.config.Masks {
p.maskBuf, locApplied = p.maskValue(&mask, p.sourceBuf, p.maskBuf)
p.sourceBuf = p.maskBuf
if locApplied {
maskApplied = true
}
}
v.MutateToString(string(p.maskBuf))
}
if p.logMaskAppeared && maskApplied {
stats.GetCounter(*p.config.MetricSubsystemName, timesActivated).Inc()
p.logger.Infof("mask appeared to event, output string: %s", event.Root.EncodeToString())
}
return pipeline.ActionPass
} | plugin/action/mask/mask.go | 0.751283 | 0.610512 | mask.go | starcoder |
package parser
import (
"fmt"
"net/url"
"github.com/kasperisager/pak/pkg/asset/css/ast"
"github.com/kasperisager/pak/pkg/asset/css/token"
)
type SyntaxError struct {
Offset int
Message string
}
func (err SyntaxError) Error() string {
return err.Message
}
func Parse(tokens []token.Token) (*ast.StyleSheet, error) {
offset, tokens, styleSheet, err := parseStyleSheet(0, tokens)
if err != nil {
return nil, err
}
if len(tokens) > 0 {
return nil, SyntaxError{
Offset: offset,
Message: "unexpected token",
}
}
return styleSheet, nil
}
func parseStyleSheet(offset int, tokens []token.Token) (int, []token.Token, *ast.StyleSheet, error) {
styleSheet := &ast.StyleSheet{}
for {
switch peek(tokens, 1).(type) {
case token.Whitespace:
offset, tokens = offset+1, tokens[1:]
case token.CloseParen, token.CloseCurly, token.CloseSquare, nil:
return offset, tokens, styleSheet, nil
default:
var (
rule ast.Rule
err error
)
offset, tokens, rule, err = parseRule(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
styleSheet.Rules = append(styleSheet.Rules, rule)
}
}
}
func parseRule(offset int, tokens []token.Token) (int, []token.Token, ast.Rule, error) {
switch t := peek(tokens, 1).(type) {
default:
return parseStyleRule(offset, tokens)
case token.AtKeyword:
switch t.Value {
case "import":
return parseImportRule(offset+1, tokens[1:])
case "media":
return parseMediaRule(offset+1, tokens[1:])
case "font-face":
return parseFontFaceRule(offset+1, tokens[1:])
case "keyframes":
return parseKeyframesRule(offset+1, tokens[1:], "")
case "-webkit-keyframes":
return parseKeyframesRule(offset+1, tokens[1:], "-webkit-")
case "supports":
return parseSupportsRule(offset+1, tokens[1:])
case "page":
return parsePageRule(offset+1, tokens[1:])
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token",
}
}
}
}
func parseStyleRule(offset int, tokens []token.Token) (int, []token.Token, *ast.StyleRule, error) {
rule := &ast.StyleRule{}
offset, tokens, selectors, err := parseSelectorList(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, nil, err
}
rule.Selectors = selectors
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.OpenCurly:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "{"`,
}
}
offset, tokens, declarations, err := parseDeclarationList(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, rule, err
}
rule.Declarations = declarations
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseCurly:
return offset + 1, tokens[1:], rule, nil
default:
return offset, tokens, rule, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "}"`,
}
}
}
func parseImportRule(offset int, tokens []token.Token) (int, []token.Token, *ast.ImportRule, error) {
rule := &ast.ImportRule{}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Url:
parsed, err := url.Parse(t.Value)
if err != nil {
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: err.Error(),
}
}
rule.URL = parsed
offset, tokens = offset+1, tokens[1:]
case token.String:
parsed, err := url.Parse(t.Value)
if err != nil {
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: err.Error(),
}
}
rule.URL = parsed
offset, tokens = offset+1, tokens[1:]
case token.Function:
if t.Value != "url" {
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected function, expected "url()"`,
}
}
offset, tokens = skipWhitespace(offset+1, tokens[1:])
switch t := peek(tokens, 1).(type) {
case token.String:
parsed, err := url.Parse(t.Value)
if err != nil {
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: err.Error(),
}
}
rule.URL = parsed
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected string",
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseParen:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ")"`,
}
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.Semicolon:
return offset + 1, tokens[1:], rule, nil
case nil:
return offset, tokens, rule, nil
default:
offset, tokens, conditions, err := parseMediaQueryList(offset, tokens)
if err != nil {
return offset, tokens, rule, err
}
rule.Conditions = conditions
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.Semicolon:
return offset + 1, tokens[1:], rule, nil
case nil:
return offset, tokens, rule, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ";"`,
}
}
}
}
func parseMediaRule(offset int, tokens []token.Token) (int, []token.Token, *ast.MediaRule, error) {
rule := &ast.MediaRule{}
offset, tokens, conditions, err := parseMediaQueryList(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, nil, err
}
rule.Conditions = conditions
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.OpenCurly:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "{"`,
}
}
offset, tokens, styleSheet, err := parseStyleSheet(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
rule.StyleSheet = styleSheet
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseCurly:
return offset + 1, tokens[1:], rule, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "}"`,
}
}
}
func parseFontFaceRule(offset int, tokens []token.Token) (int, []token.Token, *ast.FontFaceRule, error) {
rule := &ast.FontFaceRule{}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.OpenCurly:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "{"`,
}
}
offset, tokens, declarations, err := parseDeclarationList(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, rule, err
}
rule.Declarations = declarations
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseCurly:
return offset + 1, tokens[1:], rule, nil
default:
return offset, tokens, rule, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "}"`,
}
}
}
func parseKeyframesRule(offset int, tokens []token.Token, prefix string) (int, []token.Token, *ast.KeyframesRule, error) {
rule := &ast.KeyframesRule{Prefix: prefix}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Ident:
rule.Name = t.Value
case token.String:
rule.Name = t.Value
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected string or ident",
}
}
offset, tokens = skipWhitespace(offset+1, tokens[1:])
switch peek(tokens, 1).(type) {
case token.OpenCurly:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "{"`,
}
}
for {
switch peek(tokens, 1).(type) {
case token.Whitespace:
offset, tokens = offset+1, tokens[1:]
case token.CloseCurly:
return offset + 1, tokens[1:], rule, nil
default:
var (
block *ast.KeyframeBlock
err error
)
offset, tokens, block, err = parseKeyframeBlock(offset, tokens)
if err != nil {
return offset, tokens, rule, err
}
rule.Blocks = append(rule.Blocks, block)
}
}
}
func parseKeyframeBlock(offset int, tokens []token.Token) (int, []token.Token, *ast.KeyframeBlock, error) {
block := &ast.KeyframeBlock{}
switch t := peek(tokens, 1).(type) {
case token.Ident:
switch t.Value {
case "from":
block.Selector = 0
case "to":
block.Selector = 1
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "from" or "to"`,
}
}
case token.Percentage:
block.Selector = t.Value
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "from", "to", or percentage`,
}
}
offset, tokens = skipWhitespace(offset+1, tokens[1:])
switch peek(tokens, 1).(type) {
case token.OpenCurly:
var (
declarations []*ast.Declaration
err error
)
offset, tokens, declarations, err = parseDeclarationList(skipWhitespace(offset+1, tokens[1:]))
if err != nil {
return offset, tokens, nil, err
}
block.Declarations = declarations
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "{"`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseCurly:
return offset + 1, tokens[1:], block, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "}"`,
}
}
}
func parseSupportsRule(offset int, tokens []token.Token) (int, []token.Token, *ast.SupportsRule, error) {
rule := &ast.SupportsRule{}
offset, tokens, condition, err := parseSupportsCondition(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, nil, err
}
rule.Condition = condition
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.OpenCurly:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "{"`,
}
}
offset, tokens, styleSheet, err := parseStyleSheet(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, nil, err
}
rule.StyleSheet = styleSheet
switch peek(tokens, 1).(type) {
case token.CloseCurly:
return offset + 1, tokens[1:], rule, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "}"`,
}
}
}
func parsePageRule(offset int, tokens []token.Token) (int, []token.Token, *ast.PageRule, error) {
rule := &ast.PageRule{}
offset, tokens, selectors, err := parsePageSelectorList(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, nil, err
}
rule.Selectors = selectors
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.OpenCurly:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "{"`,
}
}
offset, tokens, components, err := parsePageComponentList(skipWhitespace(offset, tokens))
if err != nil {
return offset, tokens, nil, err
}
rule.Components = components
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseCurly:
return offset + 1, tokens[1:], rule, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "}"`,
}
}
}
func parseDeclarationList(offset int, tokens []token.Token) (int, []token.Token, []*ast.Declaration, error) {
var declarations []*ast.Declaration
for {
switch peek(tokens, 1).(type) {
case token.Whitespace, token.Semicolon:
offset, tokens = offset+1, tokens[1:]
case token.CloseCurly:
return offset, tokens, declarations, nil
default:
var (
declaration *ast.Declaration
err error
)
offset, tokens, declaration, err = parseDeclaration(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
declarations = append(declarations, declaration)
}
}
}
func parseDeclaration(offset int, tokens []token.Token) (int, []token.Token, *ast.Declaration, error) {
declaration := &ast.Declaration{}
switch t := peek(tokens, 1).(type) {
case token.Ident:
declaration.Name = t.Value
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ident`,
}
}
offset, tokens = skipWhitespace(offset+1, tokens[1:])
switch peek(tokens, 1).(type) {
case token.Colon:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ":"`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
for {
switch peek(tokens, 1).(type) {
case token.CloseParen, token.CloseCurly, token.CloseSquare, token.Semicolon, nil:
return offset, tokens, declaration, nil
default:
var (
component []token.Token
err error
)
offset, tokens, component, err = parseComponent(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
declaration.Value = append(declaration.Value, component...)
}
}
}
func parseComponent(offset int, tokens []token.Token) (int, []token.Token, []token.Token, error) {
component := tokens[:1]
switch peek(tokens, 1).(type) {
case token.OpenParen, token.Function:
offset, tokens, block, err := parseParenBlock(offset+1, tokens[1:])
if err != nil {
return offset, tokens, nil, err
}
return offset, tokens, append(component, block...), nil
case nil:
return offset, tokens, nil, nil
default:
return offset + 1, tokens[1:], component, nil
}
}
func parseParenBlock(offset int, tokens []token.Token) (int, []token.Token, []token.Token, error) {
var block []token.Token
for {
switch next := peek(tokens, 1).(type) {
case token.CloseParen:
return offset + 1, tokens[1:], append(block, next), nil
case nil:
return offset, tokens, block, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ")"`,
}
default:
var (
component []token.Token
err error
)
offset, tokens, component, err = parseComponent(offset, tokens)
if err != nil {
return offset, tokens, block, err
}
block = append(block, component...)
}
}
}
func parseSelectorList(offset int, tokens []token.Token) (int, []token.Token, []ast.Selector, error) {
var selectors []ast.Selector
for {
switch peek(tokens, 1).(type) {
case token.Whitespace, token.Comma:
offset, tokens = offset+1, tokens[1:]
case token.OpenCurly:
return offset, tokens, selectors, nil
default:
var (
selector ast.Selector
err error
)
offset, tokens, selector, err = parseSelector(offset, tokens)
if err != nil {
return offset, tokens, selectors, err
}
selectors = append(selectors, selector)
}
}
}
func parseSelector(offset int, tokens []token.Token) (int, []token.Token, ast.Selector, error) {
var (
left ast.Selector
right ast.Selector
err error
)
for {
switch t := peek(tokens, 1).(type) {
case token.Delim:
switch t.Value {
case '.':
offset, tokens, right, err = parseClassSelector(offset+1, tokens[1:])
if err != nil {
return offset, tokens, left, err
}
left = combineSelectors(left, right)
case '#':
offset, tokens, right, err = parseIDSelector(offset+1, tokens[1:])
if err != nil {
return offset, tokens, left, err
}
left = combineSelectors(left, right)
case '*':
offset, tokens, right = offset+1, tokens[1:], &ast.TypeSelector{Name: "*"}
left = combineSelectors(left, right)
case '>', '~', '+':
offset, tokens, left, err = parseComplexSelector(offset, tokens, left)
if err != nil {
return offset, tokens, left, err
}
default:
return offset, tokens, left, nil
}
case token.OpenSquare:
offset, tokens, right, err = parseAttributeSelector(offset+1, tokens[1:])
if err != nil {
return offset, tokens, left, err
}
left = combineSelectors(left, right)
case token.Ident:
offset, tokens, right, err = parseTypeSelector(offset, tokens)
if err != nil {
return offset, tokens, left, err
}
left = combineSelectors(left, right)
case token.Colon:
offset, tokens, right, err = parsePseudoSelector(offset+1, tokens[1:])
if err != nil {
return offset, tokens, left, err
}
left = combineSelectors(left, right)
case token.Whitespace:
if len(tokens) > 1 && startsSelector(tokens[1]) {
offset, tokens, left, err = parseComplexSelector(offset, tokens, left)
if err != nil {
return offset, tokens, left, err
}
} else {
offset, tokens = offset+1, tokens[1:]
}
default:
return offset, tokens, left, nil
}
}
}
func startsSelector(t token.Token) bool {
switch t := t.(type) {
case token.Ident, token.Colon:
return true
case token.Delim:
return t.Value == '.' || t.Value == '#' || t.Value == '*'
}
return false
}
func combineSelectors(left ast.Selector, right ast.Selector) ast.Selector {
if left == nil {
return right
}
return &ast.CompoundSelector{Left: left, Right: right}
}
func parseIDSelector(offset int, tokens []token.Token) (int, []token.Token, *ast.IdSelector, error) {
selector := &ast.IdSelector{}
switch t := peek(tokens, 1).(type) {
case token.Ident:
selector.Name = t.Value
return offset + 1, tokens[1:], selector, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected id",
}
}
}
func parseClassSelector(offset int, tokens []token.Token) (int, []token.Token, *ast.ClassSelector, error) {
selector := &ast.ClassSelector{}
switch t := peek(tokens, 1).(type) {
case token.Ident:
selector.Name = t.Value
return offset + 1, tokens[1:], selector, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected class",
}
}
}
func parseAttributeSelector(offset int, tokens []token.Token) (int, []token.Token, *ast.AttributeSelector, error) {
selector := &ast.AttributeSelector{}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Ident:
selector.Name = t.Value
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected attribute name",
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.CloseSquare:
return offset + 1, tokens[1:], selector, nil
case token.Delim:
switch t.Value {
case '=':
selector.Matcher = "="
offset, tokens = offset+1, tokens[1:]
case '~', '|', '^', '$', '*':
offset, tokens = offset+1, tokens[1:]
switch u := peek(tokens, 1).(type) {
case token.Delim:
if u.Value == '=' {
switch t.Value {
case '~':
selector.Matcher = "~="
case '|':
selector.Matcher = "|="
case '^':
selector.Matcher = "^="
case '$':
selector.Matcher = "$="
case '*':
selector.Matcher = "*="
}
offset, tokens = offset+1, tokens[1:]
}
}
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.String:
selector.Value = fmt.Sprintf("%c%s%[1]c", t.Mark, t.Value)
case token.Ident:
selector.Value = t.Value
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected attribute value",
}
}
offset, tokens = skipWhitespace(offset+1, tokens[1:])
switch t := peek(tokens, 1).(type) {
case token.Ident:
switch t.Value {
case "i", "s":
selector.Modifier = t.Value
offset, tokens = offset+1, tokens[1:]
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseSquare:
return offset + 1, tokens[1:], selector, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "]"`,
}
}
}
func parseTypeSelector(offset int, tokens []token.Token) (int, []token.Token, *ast.TypeSelector, error) {
selector := &ast.TypeSelector{}
switch t := peek(tokens, 1).(type) {
case token.Ident:
selector.Name = t.Value
return offset + 1, tokens[1:], selector, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected type",
}
}
}
func parsePseudoSelector(offset int, tokens []token.Token) (int, []token.Token, *ast.PseudoSelector, error) {
selector := &ast.PseudoSelector{Name: ":"}
switch peek(tokens, 1).(type) {
case token.Colon:
selector.Name += ":"
offset, tokens = offset+1, tokens[1:]
}
switch t := peek(tokens, 1).(type) {
case token.Ident:
selector.Name += t.Value
return offset + 1, tokens[1:], selector, nil
case token.Function:
selector.Name += t.Value
offset, tokens = offset+1, tokens[1:]
for {
switch t := peek(tokens, 1).(type) {
case token.CloseParen:
return offset + 1, tokens[1:], selector, nil
case nil:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ")"`,
}
default:
selector.Value = append(selector.Value, t)
offset, tokens = offset+1, tokens[1:]
}
}
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected ident",
}
}
}
func parseComplexSelector(offset int, tokens []token.Token, left ast.Selector) (int, []token.Token, *ast.ComplexSelector, error) {
selector := &ast.ComplexSelector{Left: left}
switch t := peek(tokens, 1).(type) {
case token.Whitespace:
selector.Combinator = ' '
case token.Delim:
switch t.Value {
case '>', '~', '+':
selector.Combinator = t.Value
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected selector combinator",
}
}
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected selector combinator",
}
}
var (
right ast.Selector
err error
)
offset, tokens, right, err = parseSelector(skipWhitespace(offset+1, tokens[1:]))
if err != nil {
return offset, tokens, nil, err
}
selector.Right = right
return offset, tokens, selector, nil
}
func parseMediaQueryList(offset int, tokens []token.Token) (int, []token.Token, []*ast.MediaQuery, error) {
var mediaQueries []*ast.MediaQuery
for {
offset, tokens = skipWhitespace(offset, tokens)
var (
mediaQuery *ast.MediaQuery
err error
)
offset, tokens, mediaQuery, err = parseMediaQuery(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
mediaQueries = append(mediaQueries, mediaQuery)
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.Comma:
offset, tokens = offset+1, tokens[1:]
case token.OpenCurly, token.Semicolon, nil:
return offset, tokens, mediaQueries, nil
default:
return offset, tokens, mediaQueries, SyntaxError{
Offset: offset,
Message: "unexpected token",
}
}
}
}
func parseMediaQuery(offset int, tokens []token.Token) (int, []token.Token, *ast.MediaQuery, error) {
mediaQuery := &ast.MediaQuery{}
switch t := peek(tokens, 1).(type) {
case token.Ident:
switch t.Value {
case "not", "only":
mediaQuery.Qualifier = t.Value
offset, tokens = offset+1, tokens[1:]
}
case token.OpenParen:
var (
condition ast.MediaCondition
err error
)
offset, tokens, condition, err = parseMediaCondition(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
mediaQuery.Condition = condition
return offset, tokens, mediaQuery, nil
}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Ident:
switch t.Value {
case "not":
case "only":
case "and":
case "or":
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected media type",
}
default:
mediaQuery.Type = t.Value
offset, tokens = offset+1, tokens[1:]
}
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token",
}
}
return offset, tokens, mediaQuery, nil
}
func parseMediaCondition(offset int, tokens []token.Token) (int, []token.Token, ast.MediaCondition, error) {
switch t := peek(tokens, 1).(type) {
case token.Ident:
if t.Value == "not" {
var (
condition ast.MediaCondition
err error
)
offset, tokens, condition, err = parseMediaExpression(
skipWhitespace(offset+1, tokens[1:]),
)
if err != nil {
return offset, tokens, nil, err
}
condition = &ast.MediaNegation{Condition: condition}
return offset, tokens, condition, nil
}
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token",
}
default:
var (
left ast.MediaCondition
err error
)
offset, tokens, left, err = parseMediaExpression(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
var operator string
for {
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Ident:
switch t.Value {
case "and", "or":
if operator == "" || operator == t.Value {
operator = t.Value
} else {
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "` + operator + `"`,
}
}
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "and" or "or"`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
var right ast.MediaCondition
offset, tokens, right, err = parseMediaExpression(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
left = &ast.MediaOperation{
Operator: operator,
Left: left,
Right: right,
}
default:
return offset, tokens, left, nil
}
}
}
}
func parseMediaExpression(offset int, tokens []token.Token) (int, []token.Token, ast.MediaCondition, error) {
if offset, tokens, feature, err := parseMediaFeature(offset, tokens); err == nil {
return offset, tokens, feature, nil
}
switch peek(tokens, 1).(type) {
case token.OpenParen:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "("`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
var (
condition ast.MediaCondition
err error
)
offset, tokens, condition, err = parseMediaCondition(offset, tokens)
if err != nil {
return offset, tokens, condition, err
}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseParen:
return offset + 1, tokens[1:], condition, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ")"`,
}
}
}
func parseMediaFeature(offset int, tokens []token.Token) (int, []token.Token, *ast.MediaFeature, error) {
feature := &ast.MediaFeature{}
switch peek(tokens, 1).(type) {
case token.OpenParen:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "("`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Ident:
feature.Name = t.Value
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token, expected ident",
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.Colon:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ":"`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Number:
feature.Value = &ast.MediaValuePlain{Value: t}
case token.Dimension:
feature.Value = &ast.MediaValuePlain{Value: t}
case token.Ident:
feature.Value = &ast.MediaValuePlain{Value: t}
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected number, dimension, or ident`,
}
}
offset, tokens = skipWhitespace(offset+1, tokens[1:])
switch peek(tokens, 1).(type) {
case token.CloseParen:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ")"`,
}
}
return offset, tokens, feature, nil
}
func parseSupportsCondition(offset int, tokens []token.Token) (int, []token.Token, ast.SupportsCondition, error) {
switch t := peek(tokens, 1).(type) {
case token.Ident:
if t.Value == "not" {
var (
condition ast.SupportsCondition
err error
)
offset, tokens, condition, err = parseSupportsExpression(
skipWhitespace(offset+1, tokens[1:]),
)
if err != nil {
return offset, tokens, nil, err
}
condition = &ast.SupportsNegation{Condition: condition}
return offset, tokens, condition, nil
}
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: "unexpected token",
}
default:
var (
left ast.SupportsCondition
err error
)
offset, tokens, left, err = parseSupportsExpression(offset, tokens)
if err != nil {
return offset, tokens, left, err
}
var operator string
for {
offset, tokens = skipWhitespace(offset, tokens)
switch t := peek(tokens, 1).(type) {
case token.Ident:
switch t.Value {
case "and", "or":
if operator == "" || operator == t.Value {
operator = t.Value
} else {
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "` + operator + `"`,
}
}
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "and" or "or"`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
var right ast.SupportsCondition
offset, tokens, right, err = parseSupportsExpression(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
left = &ast.SupportsOperation{
Operator: operator,
Left: left,
Right: right,
}
default:
return offset, tokens, left, nil
}
}
}
}
func parseSupportsExpression(offset int, tokens []token.Token) (int, []token.Token, ast.SupportsCondition, error) {
if offset, tokens, feature, err := parseSupportsFeature(offset, tokens); err == nil {
return offset, tokens, feature, nil
}
switch peek(tokens, 1).(type) {
case token.OpenParen:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "("`,
}
}
var (
condition ast.SupportsCondition
err error
)
offset, tokens, condition, err = parseSupportsCondition(offset, tokens)
if err != nil {
return offset, tokens, condition, err
}
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseParen:
return offset + 1, tokens[1:], condition, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ")"`,
}
}
}
func parseSupportsFeature(offset int, tokens []token.Token) (int, []token.Token, *ast.SupportsFeature, error) {
feature := &ast.SupportsFeature{}
switch peek(tokens, 1).(type) {
case token.OpenParen:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected "("`,
}
}
offset, tokens = skipWhitespace(offset, tokens)
var (
declaration *ast.Declaration
err error
)
offset, tokens, declaration, err = parseDeclaration(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
feature.Declaration = declaration
offset, tokens = skipWhitespace(offset, tokens)
switch peek(tokens, 1).(type) {
case token.CloseParen:
offset, tokens = offset+1, tokens[1:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ")"`,
}
}
return offset, tokens, feature, nil
}
func parsePageSelectorList(offset int, tokens []token.Token) (int, []token.Token, []*ast.PageSelector, error) {
var selectors []*ast.PageSelector
for {
switch peek(tokens, 1).(type) {
case token.Whitespace, token.Comma:
offset, tokens = offset+1, tokens[1:]
case token.OpenCurly:
return offset, tokens, selectors, nil
default:
var (
selector *ast.PageSelector
err error
)
offset, tokens, selector, err = parsePageSelector(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
selectors = append(selectors, selector)
}
}
}
func parsePageSelector(offset int, tokens []token.Token) (int, []token.Token, *ast.PageSelector, error) {
selector := &ast.PageSelector{}
switch t := peek(tokens, 1).(type) {
case token.Ident:
selector.Type = t.Value
offset, tokens = offset+1, tokens[1:]
}
for {
switch peek(tokens, 1).(type) {
case token.Colon:
switch t := peek(tokens, 2).(type) {
case token.Ident:
switch t.Value {
case "left", "right", "first", "blank":
selector.Classes = append(selector.Classes, ":"+t.Value)
offset, tokens = offset+2, tokens[2:]
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected page selector`,
}
}
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected page selector`,
}
}
default:
return offset, tokens, selector, nil
}
}
}
func parsePageComponentList(offset int, tokens []token.Token) (int, []token.Token, []ast.PageComponent, error) {
var pageComponents []ast.PageComponent
for {
switch peek(tokens, 1).(type) {
case token.Whitespace, token.Semicolon:
offset, tokens = offset+1, tokens[1:]
case token.CloseCurly:
return offset, tokens, pageComponents, nil
default:
var (
pageComponent ast.PageComponent
err error
)
offset, tokens, pageComponent, err = parsePageComponent(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
pageComponents = append(pageComponents, pageComponent)
}
}
}
func parsePageComponent(offset int, tokens []token.Token) (int, []token.Token, ast.PageComponent, error) {
switch peek(tokens, 1).(type) {
case token.Ident:
var (
declaration *ast.Declaration
err error
)
offset, tokens, declaration, err = parseDeclaration(offset, tokens)
if err != nil {
return offset, tokens, nil, err
}
return offset, tokens, &ast.PageDeclaration{Declaration: declaration}, nil
default:
return offset, tokens, nil, SyntaxError{
Offset: offset,
Message: `unexpected token, expected ident`,
}
}
}
func peek(tokens []token.Token, n int) token.Token {
if len(tokens) < n {
return nil
}
return tokens[n-1]
}
func skipWhitespace(offset int, tokens []token.Token) (int, []token.Token) {
if _, ok := peek(tokens, 1).(token.Whitespace); ok {
return offset + 1, tokens[1:]
}
return offset, tokens
} | pkg/asset/css/parser/parse.go | 0.679817 | 0.402568 | parse.go | starcoder |
package plaid
import (
"encoding/json"
)
// Pay An object representing a monetary amount.
type Pay struct {
// A numerical amount of a specific currency.
Amount NullableFloat32 `json:"amount,omitempty"`
// Currency code, e.g. USD
Currency NullableString `json:"currency,omitempty"`
AdditionalProperties map[string]interface{}
}
type _Pay Pay
// NewPay instantiates a new Pay object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewPay() *Pay {
this := Pay{}
return &this
}
// NewPayWithDefaults instantiates a new Pay object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPayWithDefaults() *Pay {
this := Pay{}
return &this
}
// GetAmount returns the Amount field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *Pay) GetAmount() float32 {
if o == nil || o.Amount.Get() == nil {
var ret float32
return ret
}
return *o.Amount.Get()
}
// GetAmountOk returns a tuple with the Amount field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *Pay) GetAmountOk() (*float32, bool) {
if o == nil {
return nil, false
}
return o.Amount.Get(), o.Amount.IsSet()
}
// HasAmount returns a boolean if a field has been set.
func (o *Pay) HasAmount() bool {
if o != nil && o.Amount.IsSet() {
return true
}
return false
}
// SetAmount gets a reference to the given NullableFloat32 and assigns it to the Amount field.
func (o *Pay) SetAmount(v float32) {
o.Amount.Set(&v)
}
// SetAmountNil sets the value for Amount to be an explicit nil
func (o *Pay) SetAmountNil() {
o.Amount.Set(nil)
}
// UnsetAmount ensures that no value is present for Amount, not even an explicit nil
func (o *Pay) UnsetAmount() {
o.Amount.Unset()
}
// GetCurrency returns the Currency field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *Pay) GetCurrency() string {
if o == nil || o.Currency.Get() == nil {
var ret string
return ret
}
return *o.Currency.Get()
}
// GetCurrencyOk returns a tuple with the Currency field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *Pay) GetCurrencyOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.Currency.Get(), o.Currency.IsSet()
}
// HasCurrency returns a boolean if a field has been set.
func (o *Pay) HasCurrency() bool {
if o != nil && o.Currency.IsSet() {
return true
}
return false
}
// SetCurrency gets a reference to the given NullableString and assigns it to the Currency field.
func (o *Pay) SetCurrency(v string) {
o.Currency.Set(&v)
}
// SetCurrencyNil sets the value for Currency to be an explicit nil
func (o *Pay) SetCurrencyNil() {
o.Currency.Set(nil)
}
// UnsetCurrency ensures that no value is present for Currency, not even an explicit nil
func (o *Pay) UnsetCurrency() {
o.Currency.Unset()
}
func (o Pay) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Amount.IsSet() {
toSerialize["amount"] = o.Amount.Get()
}
if o.Currency.IsSet() {
toSerialize["currency"] = o.Currency.Get()
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *Pay) UnmarshalJSON(bytes []byte) (err error) {
varPay := _Pay{}
if err = json.Unmarshal(bytes, &varPay); err == nil {
*o = Pay(varPay)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "amount")
delete(additionalProperties, "currency")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullablePay struct {
value *Pay
isSet bool
}
func (v NullablePay) Get() *Pay {
return v.value
}
func (v *NullablePay) Set(val *Pay) {
v.value = val
v.isSet = true
}
func (v NullablePay) IsSet() bool {
return v.isSet
}
func (v *NullablePay) Unset() {
v.value = nil
v.isSet = false
}
func NewNullablePay(val *Pay) *NullablePay {
return &NullablePay{value: val, isSet: true}
}
func (v NullablePay) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullablePay) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_pay.go | 0.838878 | 0.411229 | model_pay.go | starcoder |
package forGraphBLASGo
import (
"github.com/intel/forGoParallel/parallel"
"github.com/intel/forGoParallel/pipeline"
"sync/atomic"
)
/*
Matrix is the exported representation of matrices. It uses Matrix.ref as an indirection to a matrixReference
representation.
When a Matrix is side-effected, then assignments are made to the .ref pointer. For example:
func (m *Matrix[T]) SetElement(value T, row, col int) error {
...
m.ref = newMatrixReference(m.ref.setElement(value, row, col))
return nil
}
In this example, m.ref.setElement is a side-effect-free function that creates a new matrix, which then gets assigned to
m.ref to perform the actual side effect. This is important because the original m.ref might still be used in other
contexts.
The representation underneath Matrix.ref might silently change (for example from listMatrix to csrMatrix), but
these changes do not affect the semantics. (They are side-effect-free in the functional programming sense.) These
changes are properly synchronized.
Assignments to Matrix.ref directly are not synchronized. As per GraphBLAS specification, it is the task of user programs
to take care of synchronizing actual side effects.
*/
type Matrix[T any] struct {
ref *matrixReference[T]
}
func MatrixNew[T any](nrows, ncols int) (result *Matrix[T], err error) {
if nrows <= 0 || ncols <= 0 {
err = InvalidValue
return
}
return &Matrix[T]{newMatrixReference[T](makeEmptyMatrix[T](nrows, ncols), 0)}, nil
}
func (m *Matrix[T]) Dup() (result *Matrix[T], err error) {
if m == nil || m.ref == nil {
err = UninitializedObject
return
}
return &Matrix[T]{m.ref}, nil
}
func MatrixDiag[T any](v *Vector[T], k int) (result *Matrix[T], err error) {
if v == nil || v.ref == nil {
err = UninitializedObject
return
}
size := v.ref.size() + absInt(k)
n := atomic.LoadInt64(&v.ref.nvalues)
return &Matrix[T]{newMatrixReference[T](newDiagonalMatrix[T](size, size, v.ref, k), n)}, nil
}
func (m *Matrix[T]) Resize(nrows, ncols int) error {
if nrows <= 0 || ncols <= 0 {
return InvalidValue
}
if m == nil || m.ref == nil {
return UninitializedObject
}
m.ref = m.ref.resize(nrows, ncols)
return nil
}
func (m *Matrix[T]) Clear() error {
if m == nil || m.ref == nil {
return UninitializedObject
}
nrows, ncols := m.ref.size()
m.ref = newMatrixReference[T](makeEmptyMatrix[T](nrows, ncols), 0)
return nil
}
func (m *Matrix[T]) NRows() (int, error) {
if m == nil || m.ref == nil {
return 0, UninitializedObject
}
nrows, _ := m.ref.size()
return nrows, nil
}
func (m *Matrix[T]) NCols() (int, error) {
if m == nil || m.ref == nil {
return 0, UninitializedObject
}
_, ncols := m.ref.size()
return ncols, nil
}
func (m *Matrix[T]) Size() (int, int, error) {
if m == nil || m.ref == nil {
return 0, 0, UninitializedObject
}
nrows, ncols := m.ref.size()
return nrows, ncols, nil
}
func (m *Matrix[T]) NVals() (int, error) {
if m == nil || m.ref == nil {
return 0, UninitializedObject
}
return m.ref.nvals(), nil
}
func (m *Matrix[T]) Build(rows, cols []int, values []T, dup BinaryOp[T, T, T]) error {
if m == nil || m.ref == nil {
return UninitializedObject
}
if len(rows) != len(cols) || len(rows) != len(values) {
return IndexOutOfBounds
}
if m.ref.nvals() > 0 {
return OutputNotEmpty
}
nrows, ncols := m.ref.size()
// todo: use speculative.RangeOr
if parallel.RangeOr(0, len(rows), func(low, high int) bool {
for i := low; i < high; i++ {
if row := rows[i]; row < 0 || row >= nrows {
return true
}
if col := cols[i]; col < 0 || col >= ncols {
return true
}
}
return false
}) {
return IndexOutOfBounds
}
rowCopies, colCopies, valueCopies := fpcopy3(rows, cols, values)
if dup == nil {
matrixSort(rowCopies, colCopies, valueCopies)
// todo: use speculative.RangeOr
if parallel.RangeOr(0, len(rows), func(low, high int) bool {
for i := low; i < high-1; i++ {
if rowCopies[i] == rowCopies[i+1] && colCopies[i] == colCopies[i+1] {
return true
}
}
return high < len(rows) && rowCopies[high-1] == rowCopies[high] && colCopies[high-1] == colCopies[high]
}) {
return InvalidValue
}
m.ref = newDelayedMatrixReference[T](func() (functionalMatrix[T], int64) {
newRows, rowSpans := csrRows(rowCopies)
return newCSRMatrix[T](nrows, ncols, newRows, rowSpans, colCopies, valueCopies), int64(len(valueCopies))
})
return nil
}
m.ref = newDelayedMatrixReference[T](func() (functionalMatrix[T], int64) {
matrixSort(rowCopies, colCopies, valueCopies)
var dups [][2]int
var p pipeline.Pipeline[any]
p.Source(newIntervalSource(len(valueCopies)))
p.Add(
pipeline.Par(pipeline.Receive(func(_ int, data any) any {
batch := data.(interval)
low, high := batch.start, batch.end
var result [][2]int
if low > 0 {
low--
}
if high < len(rowCopies) {
high++
}
for i := low; i < high; {
row := rowCopies[i]
col := colCopies[i]
j := i + 1
for j < high && rowCopies[j] == row && colCopies[j] == col {
j++
}
if j-i > 1 {
result = append(result, [2]int{i, j})
i = j
} else {
i++
}
}
return result
})),
//todo: we can simplify this: since we are already sequential here, maybe we can copy indices and values
// already to their right destinations (also in VectorBuild)?
pipeline.Ord(pipeline.Receive(func(_ int, data any) any {
ndups := data.([][2]int)
if len(ndups) == 0 {
return nil
}
lx := len(dups)
if lx == 0 {
dups = ndups
return nil
}
lx--
if i, j := dups[lx][0], ndups[0][0]; rowCopies[i] == rowCopies[j] && colCopies[i] == colCopies[j] {
ndups[0][0] = i
if lx == 0 {
dups = ndups
return nil
}
dups = dups[:lx]
}
dups = append(dups, ndups...)
return nil
})),
)
p.Run()
if err := p.Err(); err != nil {
panic(err)
}
parallel.Range(0, len(dups), func(low, high int) {
for i := low; i < high; i++ {
dp := dups[i]
start, end := dp[0], dp[1]
for j := start + 1; j < end; j++ {
valueCopies[start] = dup(valueCopies[start], valueCopies[j])
}
}
})
dups = append(dups, [2]int{len(rowCopies) - 1, len(rowCopies) - 1})
delta := 0
for i := 0; i < len(dups)-1; i++ {
dstStart := dups[i][0] + 1 - delta
srcStart := dups[i][1]
srcEnd := dups[i+1][0] + 1
copy(rowCopies[dstStart:], rowCopies[srcStart:srcEnd])
copy(colCopies[dstStart:], colCopies[srcStart:srcEnd])
copy(valueCopies[dstStart:], valueCopies[srcStart:srcEnd])
delta += dups[i][1] - dups[i][0] - 1
}
rowCopies = rowCopies[:len(rowCopies)-delta]
colCopies = colCopies[:len(colCopies)-delta]
valueCopies = valueCopies[:len(valueCopies)-delta]
newRows, rowSpans := csrRows(rowCopies)
return newCSRMatrix[T](nrows, ncols, newRows, rowSpans, colCopies, valueCopies), int64(len(valueCopies))
})
return nil
}
func (m *Matrix[T]) SetElement(value T, row, col int) error {
if row < 0 || col < 0 {
return InvalidIndex
}
if m == nil || m.ref == nil {
return UninitializedObject
}
nrows, ncols := m.ref.size()
if row >= nrows || col >= ncols {
return InvalidIndex
}
m.ref = m.ref.setElement(value, row, col)
return nil
}
func (m *Matrix[T]) RemoveElement(row, col int) error {
if row < 0 || col < 0 {
return InvalidIndex
}
if m == nil || m.ref == nil {
return UninitializedObject
}
nrows, ncols := m.ref.size()
if row >= nrows || col >= ncols {
return InvalidIndex
}
m.ref = m.ref.removeElement(row, col)
return nil
}
func (m *Matrix[T]) ExtractElement(row, col int) (result T, err error) {
if row < 0 || col < 0 {
err = InvalidIndex
return
}
if m == nil || m.ref == nil {
err = UninitializedObject
return
}
nrows, ncols := m.ref.size()
if row >= nrows || col >= ncols {
err = InvalidIndex
return
}
if value, ok := m.ref.extractElement(row, col); ok {
return value, nil
}
err = NoValue
return
}
func (m *Matrix[T]) ExtractTuples() (rows, cols []int, values []T, err error) {
if m == nil || m.ref == nil {
err = UninitializedObject
return
}
p := m.ref.getPipeline()
if p == nil {
atomic.StoreInt64(&m.ref.nvalues, 0)
return
}
var result matrixSlice[T]
result.collect(p)
rows = result.rows
cols = result.cols
values = result.values
atomic.StoreInt64(&m.ref.nvalues, int64(len(values)))
return
}
func (m *Matrix[T]) ExportHint() (Format, error) {
if m == nil || m.ref == nil {
return 0, UninitializedObject
}
return CSRFormat, nil
}
func (m *Matrix[T]) ExportSize(_ Format) (int, int, int, error) {
if m == nil || m.ref == nil {
return 0, 0, 0, UninitializedObject
}
panic("todo") // todo
}
func (m *Matrix[T]) Export(_ Format, _, _, _ int) ([]int, []int, []T, error) {
if m == nil || m.ref == nil {
return nil, nil, nil, UninitializedObject
}
panic("todo") // todo
}
func MatrixImport[T any](_, _ int, _, _ []int, _ []T, _ Format) (result *Matrix[T], err error) {
panic("todo") // todo
}
/* todo
SerialSize
Serialize
Deserialize
=> ensure compatibility with Go standard library
*/
func (m *Matrix[T]) Wait(mode WaitMode) error {
if m == nil || m.ref == nil {
return UninitializedObject
}
if mode == Complete {
return nil
}
m.ref.optimize()
return nil
}
func (m *Matrix[T]) AsMask() *Matrix[bool] {
if m == nil || m.ref == nil {
return nil
}
n := atomic.LoadInt64(&m.ref.nvalues)
switch m := any(m).(type) {
case *Matrix[bool]:
return m
case *Matrix[int8]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[int8](m.ref), n)}
case *Matrix[int16]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[int16](m.ref), n)}
case *Matrix[int32]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[int32](m.ref), n)}
case *Matrix[int64]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[int64](m.ref), n)}
case *Matrix[uint8]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[uint8](m.ref), n)}
case *Matrix[uint16]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[uint16](m.ref), n)}
case *Matrix[uint32]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[uint32](m.ref), n)}
case *Matrix[uint64]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[uint64](m.ref), n)}
case *Matrix[float32]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[float32](m.ref), n)}
case *Matrix[float64]:
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsMask[float64](m.ref), n)}
}
return &Matrix[bool]{newMatrixReference[bool](newMatrixAsStructuralMask[T](m.ref), n)}
} | api_Matrix.go | 0.583678 | 0.695748 | api_Matrix.go | starcoder |
package polyhedra
// NewPolyhedron creates a Polyhedron from the given vertices, edges and faces.
func NewPolyhedron(vertices []Vertex, edges []Edge, faces []Face) (*Polyhedron, error) {
poly := Polyhedron{vertices: vertices}
poly.setFaces(faces)
poly.vertexNeighbors = make(map[Vertex][]Vertex)
poly.addEdges(edges)
return &poly, nil
}
// Polyhedron represents a Polyhedron consisting of vertices, edges and faces.
type Polyhedron struct {
faces []Face
vertices []Vertex
vertexNeighbors map[Vertex][]Vertex
edgeCache []Edge
edgeToFace map[Edge][]Face
}
// init initialises the polyhedrons access caches.
func (p *Polyhedron) init() {
p.vertexNeighbors = make(map[Vertex][]Vertex)
p.edgeToFace = make(map[Edge][]Face)
}
// Vertices returns the polyhedrons vertices.
func (p *Polyhedron) Vertices() []Vertex {
return p.vertices
}
// Edges returns the polyhedrons edges.
func (p *Polyhedron) Edges() []Edge {
if len(p.edgeCache) == 0 {
edges := make([]Edge, 0)
for _, v := range p.vertices {
vns := p.vertexNeighbors[v]
for _, vn := range vns {
edges = append(edges, NewEdge(v, vn))
}
}
p.edgeCache = cullDuplicates(edges)
}
return p.edgeCache
}
// resetEdge caches invalidates the cache that contains the polyhedrons edges.
func (p *Polyhedron) resetEdgeCache() {
if len(p.edgeCache) > 0 {
p.edgeCache = make([]Edge, 0)
}
}
// Faces returns the polyhedrons faces.
func (p *Polyhedron) Faces() []Face {
return p.faces
}
// addVertex adds the given vertex to the Polyhedron.
func (p *Polyhedron) addVertex(v Vertex) {
p.vertices = append(p.vertices, v)
}
// addSingleEdge adds a Edge between the two given vertices.
func (p *Polyhedron) addSingleEdge(v1 Vertex, v2 Vertex) {
vn, ok := p.vertexNeighbors[v1]
if !ok {
p.vertexNeighbors[v1] = make([]Vertex, 0)
vn = p.vertexNeighbors[v1]
}
p.vertexNeighbors[v1] = append(vn, v2)
}
// addEdge adds an Edge between the two given vertices.
func (p *Polyhedron) addEdge(v1 Vertex, v2 Vertex) error {
p.addSingleEdge(v1, v2)
p.addSingleEdge(v2, v1)
p.resetEdgeCache()
return nil
}
// addEdges adds the given edges to the Polyhedron.
func (p *Polyhedron) addEdges(edges []Edge) {
for _, e := range edges {
v := e.Vertices()
err := p.addEdge(v[0], v[1])
if err != nil {
panic("Added illegal edge.")
}
}
}
// setEdges clears all current edges and adds the given edges instead.
func (p *Polyhedron) setEdges(edges []Edge) {
p.vertexNeighbors = make(map[Vertex][]Vertex, len(edges))
p.addEdges(edges)
}
// addFace adds the given Face to the Polyhedron.
func (p *Polyhedron) addFace(f Face) {
p.faces = append(p.faces, f)
for _, e := range f.Edges() {
p.edgeToFace[e] = append(p.edgeToFace[e], f)
redge := e.Reversed()
p.edgeToFace[redge] = append(p.edgeToFace[redge], f)
}
}
// addFaceFromLoop adds a Face defined by the given vertices to the Polyhedron.
func (p *Polyhedron) addFaceFromLoop(vertices []Vertex) {
edges := make([]Edge, len(vertices))
for i, vertex := range vertices {
nextI := (i + 1) % len(vertices)
edges[i] = NewEdge(vertex, vertices[nextI])
}
f := NewFace(vertices)
p.addFace(f)
}
// addFaces adds all the given faces to the Polyhedron.
func (p *Polyhedron) addFaces(faces []Face) {
for _, face := range faces {
p.addFace(face)
}
}
// setFaces clears all current faces and adds the given faces instead.
func (p *Polyhedron) setFaces(faces []Face) {
p.faces = make([]Face, 0, len(faces))
p.edgeToFace = make(map[Edge][]Face, len(faces))
for _, face := range faces {
p.addFace(face)
}
}
// VertexDegree returns the number of neighbours of the given vertex.
func (p *Polyhedron) VertexDegree(vertex Vertex) int {
return len(p.vertexNeighbors[vertex])
}
// VertexAdjacentFaces returns all faces that contain the given vertex.
func (p *Polyhedron) VertexAdjacentFaces(v Vertex) []Face {
resultFaces := make([]Face, 0)
for i, face := range p.faces {
for _, vf := range face.Loop() {
if v == vf {
resultFaces = append(resultFaces, p.faces[i])
}
}
}
return resultFaces
}
// EdgeAdjacentFaces returns the faces that are adjacent to the given Edge.
func (p *Polyhedron) EdgeAdjacentFaces(e Edge) [2]Face {
faces := p.edgeToFace[e]
return [2]Face{faces[0], faces[1]}
}
// FaceEdgeAdjacentFaces returns the faces that share an Edge with the given facce.
func (p *Polyhedron) FaceEdgeAdjacentFaces(f Face) []Face {
resultFaces := make([]Face, 0)
for _, e := range f.Edges() {
for _, ef := range p.EdgeAdjacentFaces(e) {
if !f.Equals(ef) {
resultFaces = append(resultFaces, ef)
}
}
}
return resultFaces
}
// FaceVertexAdjacentFaces returns the faces that share a vertex with the given Face.
func (p *Polyhedron) FaceVertexAdjacentFaces(f Face) []Face {
resultFaces := make([]Face, 0)
for _, face := range p.faces {
for _, v := range face.Loop() {
for _, vf := range p.VertexAdjacentFaces(v) {
if !f.Equals(vf) {
resultFaces = append(resultFaces, f)
}
}
}
}
return resultFaces
}
// AdjacentVertices returns all vertices that are part of an Edge with the given vertex.
func (p *Polyhedron) AdjacentVertices(vertex Vertex) []Vertex {
return p.vertexNeighbors[vertex]
} | polyhedra/polyhedron.go | 0.89222 | 0.737867 | polyhedron.go | starcoder |
package ahocorasick
// ConstructTrie Function that constructs Trie as an automaton for a set of reversed & trimmed strings.
func ConstructTrie(p []string) (trie map[int]map[uint8]int, stateIsTerminal []bool, f map[int][]int) {
trie = make(map[int]map[uint8]int)
stateIsTerminal = make([]bool, 1)
f = make(map[int][]int)
state := 1
CreateNewState(0, trie)
for i := 0; i < len(p); i++ {
current := 0
j := 0
for j < len(p[i]) && GetTransition(current, p[i][j], trie) != -1 {
current = GetTransition(current, p[i][j], trie)
j++
}
for j < len(p[i]) {
stateIsTerminal = BoolArrayCapUp(stateIsTerminal)
CreateNewState(state, trie)
stateIsTerminal[state] = false
CreateTransition(current, p[i][j], state, trie)
current = state
j++
state++
}
if stateIsTerminal[current] {
newArray := IntArrayCapUp(f[current])
newArray[len(newArray)-1] = i
f[current] = newArray // F(Current) <- F(Current) union {i}
} else {
stateIsTerminal[current] = true
f[current] = []int{i} // F(Current) <- {i}
}
}
return trie, stateIsTerminal, f
}
// Contains Returns 'true' if arry of int's 's' contains int 'e', 'false' otherwise.
func Contains(s []int, e int) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// GetWord Function that returns word found in text 't' at position range 'begin' to 'end'.
func GetWord(begin, end int, t string) string {
for end >= len(t) {
return ""
}
d := make([]uint8, end-begin+1)
for j, i := 0, begin; i <= end; i, j = i+1, j+1 {
d[j] = t[i]
}
return string(d)
}
// ComputeAlphabet Function that returns string of all the possible characters in given patterns.
func ComputeAlphabet(p []string) (s string) {
s = p[0]
for i := 1; i < len(p); i++ {
s = s + p[i]
}
return s
}
// IntArrayCapUp Dynamically increases an array size of int's by 1.
func IntArrayCapUp(old []int) (new []int) {
new = make([]int, cap(old)+1)
copy(new, old) //copy(dst,src)
// old = new
return new
}
// BoolArrayCapUp Dynamically increases an array size of bool's by 1.
func BoolArrayCapUp(old []bool) (new []bool) {
new = make([]bool, cap(old)+1)
copy(new, old)
// old = new
return new
}
// ArrayUnion Concats two arrays of int's into one.
func ArrayUnion(to, from []int) (concat []int) {
concat = to
for i := range from {
if !Contains(concat, from[i]) {
concat = IntArrayCapUp(concat)
concat[len(concat)-1] = from[i]
}
}
return concat
}
// GetParent Function that finds the first previous state of a state and returns it.
// Used for trie where there is only one parent.
func GetParent(state int, at map[int]map[uint8]int) (uint8, int) {
for beginState, transitions := range at {
for c, endState := range transitions {
if endState == state {
return c, beginState
}
}
}
return 0, 0 //unreachable
}
// CreateNewState Automaton function for creating a new state 'state'.
func CreateNewState(state int, at map[int]map[uint8]int) {
at[state] = make(map[uint8]int)
}
// CreateTransition Creates a transition for function σ(state,letter) = end.
func CreateTransition(fromState int, overChar uint8, toState int, at map[int]map[uint8]int) {
at[fromState][overChar] = toState
}
// GetTransition Returns ending state for transition σ(fromState,overChar), '-1' if there is none.
func GetTransition(fromState int, overChar uint8, at map[int]map[uint8]int) (toState int) {
if !StateExists(fromState, at) {
return -1
}
toState, ok := at[fromState][overChar]
if !ok {
return -1
}
return toState
}
// StateExists Checks if state 'state' exists. Returns 'true' if it does, 'false' otherwise.
func StateExists(state int, at map[int]map[uint8]int) bool {
_, ok := at[state]
if !ok || state == -1 || at[state] == nil {
return false
}
return true
} | strings/ahocorasick/shared.go | 0.67104 | 0.464112 | shared.go | starcoder |
package iotmaker_platform_textMetrics
type TextMetrics struct {
// en: Is a double giving the calculated width of a segment of inline text in CSS
// pixels. It takes into account the current font of the context.
Width float64
// en: Is a double giving the distance parallel to the baseline from the alignment
// point given by the CanvasRenderingContext2D.textAlign property to the left side
// of the bounding rectangle of the given text, in CSS pixels.
ActualBoundingBoxLeft float64
// en: Is a double giving the distance parallel to the baseline from the alignment
// point given by the CanvasRenderingContext2D.textAlign property to the right side
// of the bounding rectangle of the given text, in CSS pixels.
ActualBoundingBoxRight float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline attribute to the top of the highest
// bounding rectangle of all the fonts used to render the text, in CSS pixels.
FontBoundingBoxAscent float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline attribute to the bottom of the bounding
// rectangle of all the fonts used to render the text, in CSS pixels.
FontBoundingBoxDescent float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline attribute to the top of the bounding
// rectangle used to render the text, in CSS pixels.
ActualBoundingBoxAscent float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline attribute to the bottom of the bounding
// rectangle used to render the text, in CSS pixels.
ActualBoundingBoxDescent float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline property to the top of the em square in
// the line box, in CSS pixels.
EmHeightAscent float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline property to the bottom of the em square in
// the line box, in CSS pixels.
EmHeightDescent float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline property to the hanging baseline of the
// line box, in CSS pixels.
HangingBaseline float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline property to the alphabetic baseline of the
// line box, in CSS pixels.
AlphabeticBaseline float64
// en: Is a double giving the distance from the horizontal line indicated by the
// CanvasRenderingContext2D.textBaseline property to the ideographic baseline of
// the line box, in CSS pixels.
IdeographicBaseline float64
} | typeTextMetrics.go | 0.728169 | 0.714142 | typeTextMetrics.go | starcoder |
package deep
import math "github.com/chewxy/math32"
// Mode denotes inference mode
type Mode int
const (
// ModeDefault is unspecified mode
ModeDefault Mode = 0
// ModeMultiClass is for one-hot encoded classification, applies softmax output layer
ModeMultiClass Mode = 1
// ModeRegression is regression, applies linear output layer
ModeRegression Mode = 2
// ModeBinary is binary classification, applies sigmoid output layer
ModeBinary Mode = 3
// ModeMultiLabel is for multilabel classification, applies sigmoid output layer
ModeMultiLabel Mode = 4
)
// OutputActivation returns activation corresponding to prediction mode
func OutputActivation(c Mode) ActivationType {
switch c {
case ModeMultiClass:
return ActivationSoftmax
case ModeRegression:
return ActivationLinear
case ModeBinary, ModeMultiLabel:
return ActivationSigmoid
}
return ActivationNone
}
// GetActivation returns the concrete activation given an ActivationType
func GetActivation(act ActivationType) Differentiable {
switch act {
case ActivationSigmoid:
return Sigmoid{}
case ActivationTanh:
return Tanh{}
case ActivationReLU:
return ReLU{}
case ActivationELU:
return eLU{}
case ActivationSwish:
return Swish{}
case ActivationMish:
return Mish{}
case ActivationCustom:
return Custom{}
case ActivationLinear:
return Linear{}
case ActivationSoftmax:
return Linear{}
}
return Linear{}
}
// ActivationType is represents a neuron activation function
type ActivationType int
const (
// ActivationNone is no activation
ActivationNone ActivationType = 0
// ActivationSigmoid is a sigmoid activation
ActivationSigmoid ActivationType = 1
// ActivationTanh is hyperbolic activation
ActivationTanh ActivationType = 2
// ActivationReLU is rectified linear unit activation
ActivationReLU ActivationType = 3
// ActivationLinear is linear activation
ActivationLinear ActivationType = 4
// ActivationSoftmax is a softmax activation (per layer)
ActivationSoftmax ActivationType = 5
// ActivationELU is a Elu activation
ActivationELU ActivationType = 6
// ActivationSwish is a Swish activation
ActivationSwish ActivationType = 7
// ActivationMish is a Mish activation
ActivationMish ActivationType = 8
// ActivationCustom is a Custom activation
ActivationCustom ActivationType = 9
)
// Differentiable is an activation function and its first order derivative,
// where the latter is expressed as a function of the former for efficiency
type Differentiable interface {
F(float32, bool) float32
Df(float32) float32
}
// Sigmoid is a logistic activator in the special case of a = 1
type Sigmoid struct {
Mem map[float32]float32
}
// F is Sigmoid(x)
func (a Sigmoid) F(x float32, training bool) float32 { return Logistic(x, 1) }
// Df is Sigmoid'(y), where y = Sigmoid(x)
func (a Sigmoid) Df(y float32) float32 { return y * (1 - y) }
// Logistic is the logistic function
func Logistic(x, a float32) float32 {
return 1 / (1 + math.Exp(-a*x))
}
// Tanh is a hyperbolic activator
type Tanh struct {
Mem map[float32]float32
}
// F is Tanh(x)
func (a Tanh) F(x float32, training bool) float32 { return (1 - math.Exp(-2*x)) / (1 + math.Exp(-2*x)) }
// Df is Tanh'(y), where y = Tanh(x)
func (a Tanh) Df(y float32) float32 { return 1 - math.Pow(y, 2) }
// ReLU is a rectified linear unit activator
type ReLU struct {
Mem map[float32]float32
}
// F is ReLU(x)
func (a ReLU) F(x float32, training bool) float32 {
return math.Max(x, 0)
}
// Df is ReLU'(y), where y = ReLU(x)
func (a ReLU) Df(y float32) float32 {
if y > 0 {
return 1
}
return 0
}
type eLU struct {
Mem map[float32]float32
}
// F is ELU(x)
func (a eLU) F(x float32, training bool) float32 {
if x >= 0 {
// elu formula
return x + 0.0000001
} else {
return 1.0*math.Pow(math.E, x)*-1 + float32(math.SmallestNonzeroFloat32)
}
}
// Df is ReLU'(y), where y = ReLU(x)
func (a eLU) Df(y float32) float32 {
if y > 0 {
return 1 - 0.0000001
} else {
return 1.0*math.Exp(y) - float32(math.SmallestNonzeroFloat32)
}
}
type Swish struct {
Mem map[float32]float32
}
// F is Swish(x)
func (a Swish) F(x float32, training bool) float32 {
if a.Mem == nil {
a.Mem = map[float32]float32{}
}
ans := x * Logistic(x, 1)
if training {
a.Mem[ans] = x
}
return ans
}
// Df is swish'(y), where y = Swish(x)
func (a Swish) Df(y float32) float32 {
x := a.Mem[y]
delete(a.Mem, y)
sigX := Logistic(x, 1)
return y * (sigX * (1 + x*(1-sigX)))
}
type Mish struct {
Mem map[float32]float32
}
// F is Mish(x)
func (a Mish) F(x float32, training bool) float32 {
if a.Mem == nil {
a.Mem = map[float32]float32{}
}
ans := x * math.Tanh(math.Log(1+math.Exp(x)))
if training {
a.Mem[ans] = x
}
return ans
}
// Df is Mish'(y), where y = Mish(x)
func (a Mish) Df(y float32) float32 {
x := a.Mem[y]
delete(a.Mem, y)
sigX := Logistic(x, 1)
xTanhSp := math.Tanh(math.Log(1 + math.Exp(x)))
return y * (xTanhSp + x*sigX*(1-xTanhSp*xTanhSp))
}
type Custom struct {
Mem map[float32]float32
}
var customF func(float32) float32
var customDf func(float32, float32) float32
func SetCustomF(F func(float32) float32) {
customF = F
}
func SetCustomDf(Df func(float32,float32) float32) {
customDf = Df
}
// F is Custom(x)
func (a Custom) F(x float32, training bool) float32 {
if a.Mem == nil {
a.Mem = map[float32]float32{}
}
if customF != nil {
ans := customF(x)
if training {
a.Mem[ans] = x
}
return ans
} else {
ans := x
if training {
a.Mem[ans] = x
}
return x
}
}
// Df is Custom'(y), where y = Custom(x)
func (a Custom) Df(y float32) float32 {
x := a.Mem[y]
delete(a.Mem, y)
if customDf != nil {
return customDf(y, x)
} else {
return x
}
}
// Linear is a linear activator
type Linear struct {
Mem map[float32]float32
}
// F is the identity function
func (a Linear) F(x float32, training bool) float32 { return x }
// Df is constant
func (a Linear) Df(x float32) float32 { return 1 } | activation.go | 0.846863 | 0.529203 | activation.go | starcoder |
package score
import (
"math"
cvssv3 "go.zenithar.org/mitre/pkg/protocol/mitre/cvss/v3"
)
func weightAttackVector(av cvssv3.AttackVector) float64 {
switch av {
case cvssv3.AttackVector_ATTACK_VECTOR_NETWORK:
return 0.85
case cvssv3.AttackVector_ATTACK_VECTOR_ADJACENT:
return 0.62
case cvssv3.AttackVector_ATTACK_VECTOR_LOCAL:
return 0.55
}
// Local / Physical
return 0.2
}
func weightAttackComplexity(ac cvssv3.AttackComplexity) float64 {
switch ac {
case cvssv3.AttackComplexity_ATTACK_COMPLEXITY_LOW:
return 0.77
}
// High
return 0.44
}
func weightPrivilegeRequired(pr cvssv3.PrivilegeRequired, scope cvssv3.Scope) float64 {
switch pr {
case cvssv3.PrivilegeRequired_PRIVILEGE_REQUIRED_HIGH:
switch scope {
case cvssv3.Scope_SCOPE_CHANGED:
return 0.50
case cvssv3.Scope_SCOPE_UNCHANGED:
return 0.27
}
case cvssv3.PrivilegeRequired_PRIVILEGE_REQUIRED_LOW:
switch scope {
case cvssv3.Scope_SCOPE_CHANGED:
return 0.68
case cvssv3.Scope_SCOPE_UNCHANGED:
return 0.62
}
}
// None
return 0.85
}
func weightUserInteraction(ui cvssv3.UserInteraction) float64 {
switch ui {
case cvssv3.UserInteraction_USER_INTERACTION_REQUIRED:
return 0.62
}
// Required
return 0.85
}
func weightConfidentiality(i cvssv3.ConfidentialityImpact) float64 {
switch i {
case cvssv3.ConfidentialityImpact_CONFIDENTIALITY_IMPACT_LOW:
return 0.22
case cvssv3.ConfidentialityImpact_CONFIDENTIALITY_IMPACT_HIGH:
return 0.56
}
// None
return 0.0
}
func weightIntegrity(i cvssv3.IntegrityImpact) float64 {
switch i {
case cvssv3.IntegrityImpact_INTEGRITY_IMPACT_LOW:
return 0.22
case cvssv3.IntegrityImpact_INTEGRITY_IMPACT_HIGH:
return 0.56
}
// None
return 0.0
}
func weightAvailability(i cvssv3.AvailabilityImpact) float64 {
switch i {
case cvssv3.AvailabilityImpact_AVAILABILITY_IMPACT_LOW:
return 0.22
case cvssv3.AvailabilityImpact_AVAILABILITY_IMPACT_HIGH:
return 0.56
}
// None
return 0.0
}
func impactScore(bm *cvssv3.BaseMetrics) float64 {
isc := iscBase(bm)
switch bm.Scope {
case cvssv3.Scope_SCOPE_CHANGED:
// 7.52 × [ISCBase−0.029] − 3.25 × [ISCBase−0.02]^15
return 7.52*(isc-0.029) - 3.25*math.Pow((isc-0.02), 15.0)
}
// Unchanged
return 6.42 * isc
}
// ISCBase = 1 - [(1−ImpactConf) × (1−ImpactInteg) × (1−ImpactAvail)]
func iscBase(bm *cvssv3.BaseMetrics) float64 {
return float64(1.0 - (1.0-weightConfidentiality(bm.ConfidentialityImpact))*(1.0-weightIntegrity(bm.IntegrityImpact))*(1.0-weightAvailability(bm.AvailabilityImpact)))
}
// 8.22 × AttackVector × AttackComplexity × PrivilegeRequired × UserInteraction
func exploitabilityScore(bm *cvssv3.BaseMetrics) float64 {
return 8.22 * weightAttackVector(bm.AttackVector) * weightAttackComplexity(bm.AttackComplexity) * weightPrivilegeRequired(bm.PrivilegeRequired, bm.Scope) * weightUserInteraction(bm.UserInteraction)
}
// If (Impact sub score <= 0) 0 else,
// Scope Unchanged[4] Round up (Minimum [(Impact + Exploitability), 10])
// Scope Changed Round up (Minimum [1.08 × (Impact + Exploitability), 10])
func baseScore(v *cvssv3.Vector) float64 {
impact := impactScore(v.BaseMetrics)
if impact <= 0 {
return 0.0
}
exploitability := exploitabilityScore(v.BaseMetrics)
coeff := 1.0
switch v.BaseMetrics.Scope {
case cvssv3.Scope_SCOPE_CHANGED:
coeff = 1.08
default:
coeff = 1.0
}
return roundUp1(math.Min(coeff*(impact+exploitability), 10.0))
}
func roundUp1(val float64) float64 {
return math.Round(val*10) / 10
} | pkg/services/cvss/v3/score/tables.go | 0.685423 | 0.429609 | tables.go | starcoder |
package term
import (
"fmt"
"reflect"
)
// Subst takes a Term and finds all instances of a variable called
// `name` and replaces them with the replacement.
func Subst(name string, replacement, t Term) Term {
return substAtLevel(0, name, replacement, t)
}
func substAtLevel(i int, name string, replacement, t Term) Term {
switch t := t.(type) {
case Universe:
return t
case Builtin:
return t
case Var:
if t.Name == name && t.Index == i {
return replacement
}
return t
case LocalVar:
return t
case Lambda:
j := i
if t.Label == name {
j = i + 1
}
return Lambda{
Label: t.Label,
Type: substAtLevel(i, name, replacement, t.Type),
Body: substAtLevel(j, name, replacement, t.Body),
}
case Pi:
j := i
if t.Label == name {
j = i + 1
}
return Pi{
Label: t.Label,
Type: substAtLevel(i, name, replacement, t.Type),
Body: substAtLevel(j, name, replacement, t.Body),
}
case App:
return App{
Fn: substAtLevel(i, name, replacement, t.Fn),
Arg: substAtLevel(i, name, replacement, t.Arg),
}
case NaturalLit:
return t
case Let:
newLet := Let{}
for _, b := range t.Bindings {
newBinding := Binding{
Variable: b.Variable,
Value: substAtLevel(i, name, replacement, b.Value),
}
if b.Annotation != nil {
newBinding.Annotation = substAtLevel(i, name, replacement, b.Annotation)
}
newLet.Bindings = append(newLet.Bindings, newBinding)
if b.Variable == name {
i = i + 1
}
}
newLet.Body = substAtLevel(i, name, replacement, t.Body)
return newLet
case Annot:
return substAtLevel(i, name, replacement, t.Expr)
case DoubleLit:
return t
case TextLit:
result := TextLit{Suffix: t.Suffix}
if t.Chunks == nil {
return result
}
result.Chunks = Chunks{}
for _, chunk := range t.Chunks {
result.Chunks = append(result.Chunks,
Chunk{
Prefix: chunk.Prefix,
Expr: substAtLevel(i, name, replacement, chunk.Expr),
})
}
return result
case BoolLit:
return t
case If:
return If{
Cond: substAtLevel(i, name, replacement, t.Cond),
T: substAtLevel(i, name, replacement, t.T),
F: substAtLevel(i, name, replacement, t.F),
}
case IntegerLit:
return t
case Op:
return Op{
OpCode: t.OpCode,
L: substAtLevel(i, name, replacement, t.L),
R: substAtLevel(i, name, replacement, t.R),
}
case EmptyList:
return EmptyList{Type: substAtLevel(i, name, replacement, t.Type)}
case NonEmptyList:
result := make(NonEmptyList, len(t))
for j, e := range t {
result[j] = substAtLevel(i, name, replacement, e)
}
return result
case Some:
return Some{substAtLevel(i, name, replacement, t.Val)}
case RecordType:
result := make(RecordType, len(t))
for k, v := range t {
result[k] = substAtLevel(i, name, replacement, v)
}
return result
case RecordLit:
result := make(RecordLit, len(t))
for k, v := range t {
result[k] = substAtLevel(i, name, replacement, v)
}
return result
case ToMap:
result := ToMap{Record: substAtLevel(i, name, replacement, t.Record)}
if t.Type != nil {
result.Type = substAtLevel(i, name, replacement, t.Type)
}
return result
case Field:
return Field{
Record: substAtLevel(i, name, replacement, t.Record),
FieldName: t.FieldName,
}
case Project:
return Project{
Record: substAtLevel(i, name, replacement, t.Record),
FieldNames: t.FieldNames,
}
case ProjectType:
return ProjectType{
Record: substAtLevel(i, name, replacement, t.Record),
Selector: substAtLevel(i, name, replacement, t.Selector),
}
case UnionType:
result := make(UnionType, len(t))
for k, v := range t {
if v == nil {
result[k] = nil
continue
}
result[k] = substAtLevel(i, name, replacement, v)
}
return result
case Merge:
result := Merge{
Handler: substAtLevel(i, name, replacement, t.Handler),
Union: substAtLevel(i, name, replacement, t.Union),
}
if t.Annotation != nil {
result.Annotation = substAtLevel(i, name, replacement, t.Annotation)
}
return result
case Assert:
return Assert{Annotation: substAtLevel(i, name, replacement, t.Annotation)}
case Import:
return t
default:
panic(fmt.Sprintf("unknown term type %+v (%v)", t, reflect.ValueOf(t).Type()))
}
}
// RebindLocal takes a Term and finds all instances of a LocalVar and
// replaces them with the equivalent Var.
func RebindLocal(local LocalVar, t Term) Term {
return rebindAtLevel(0, local, t)
}
func rebindAtLevel(i int, local LocalVar, t Term) Term {
switch t := t.(type) {
case Universe:
return t
case Builtin:
return t
case Var:
return t
case LocalVar:
if t == local {
return Var{
Name: t.Name,
Index: i,
}
}
return t
case Lambda:
j := i
if t.Label == local.Name {
j = i + 1
}
return Lambda{
Label: t.Label,
Type: rebindAtLevel(i, local, t.Type),
Body: rebindAtLevel(j, local, t.Body),
}
case Pi:
j := i
if t.Label == local.Name {
j = i + 1
}
return Pi{
Label: t.Label,
Type: rebindAtLevel(i, local, t.Type),
Body: rebindAtLevel(j, local, t.Body),
}
case App:
return App{
Fn: rebindAtLevel(i, local, t.Fn),
Arg: rebindAtLevel(i, local, t.Arg),
}
case NaturalLit:
return t
case Let:
newLet := Let{}
for _, b := range t.Bindings {
newBinding := Binding{
Variable: b.Variable,
Value: rebindAtLevel(i, local, b.Value),
}
if b.Annotation != nil {
newBinding.Annotation = rebindAtLevel(i, local, b.Annotation)
}
newLet.Bindings = append(newLet.Bindings, newBinding)
if b.Variable == local.Name {
i = i + 1
}
}
newLet.Body = rebindAtLevel(i, local, t.Body)
return newLet
case Annot:
return rebindAtLevel(i, local, t.Expr)
case DoubleLit:
return t
case TextLit:
result := TextLit{Suffix: t.Suffix}
if t.Chunks == nil {
return result
}
result.Chunks = Chunks{}
for _, chunk := range t.Chunks {
result.Chunks = append(result.Chunks,
Chunk{
Prefix: chunk.Prefix,
Expr: rebindAtLevel(i, local, chunk.Expr),
})
}
return result
case BoolLit:
return t
case If:
return If{
Cond: rebindAtLevel(i, local, t.Cond),
T: rebindAtLevel(i, local, t.T),
F: rebindAtLevel(i, local, t.F),
}
case IntegerLit:
return t
case Op:
return Op{
OpCode: t.OpCode,
L: rebindAtLevel(i, local, t.L),
R: rebindAtLevel(i, local, t.R),
}
case EmptyList:
return EmptyList{
Type: rebindAtLevel(i, local, t.Type),
}
case NonEmptyList:
result := make(NonEmptyList, len(t))
for j, e := range t {
result[j] = rebindAtLevel(i, local, e)
}
return result
case Some:
return Some{rebindAtLevel(i, local, t.Val)}
case RecordType:
result := make(RecordType, len(t))
for k, v := range t {
result[k] = rebindAtLevel(i, local, v)
}
return result
case RecordLit:
result := make(RecordLit, len(t))
for k, v := range t {
result[k] = rebindAtLevel(i, local, v)
}
return result
case ToMap:
result := ToMap{Record: rebindAtLevel(i, local, t.Record)}
if t.Type != nil {
result.Type = rebindAtLevel(i, local, t.Type)
}
return result
case Field:
return Field{
Record: rebindAtLevel(i, local, t.Record),
FieldName: t.FieldName,
}
case Project:
return Project{
Record: rebindAtLevel(i, local, t.Record),
FieldNames: t.FieldNames,
}
case ProjectType:
return ProjectType{
Record: rebindAtLevel(i, local, t.Record),
Selector: rebindAtLevel(i, local, t.Selector),
}
case UnionType:
result := make(UnionType, len(t))
for k, v := range t {
if v == nil {
result[k] = nil
continue
}
result[k] = rebindAtLevel(i, local, v)
}
return result
case Merge:
result := Merge{
Handler: rebindAtLevel(i, local, t.Handler),
Union: rebindAtLevel(i, local, t.Union),
}
if t.Annotation != nil {
result.Annotation = rebindAtLevel(i, local, t.Annotation)
}
return result
case Assert:
return Assert{Annotation: rebindAtLevel(i, local, t.Annotation)}
case Import:
return t
default:
panic(fmt.Sprintf("unknown term type %+v (%v)", t, reflect.ValueOf(t).Type()))
}
} | term/subst.go | 0.503906 | 0.440289 | subst.go | starcoder |
package promql
import (
"fmt"
"github.com/m3db/m3/src/query/functions"
"github.com/m3db/m3/src/query/functions/linear"
"github.com/m3db/m3/src/query/functions/logical"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
"github.com/m3db/m3/src/query/parser/common"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
)
// NewSelectorFromVector creates a new fetchop
func NewSelectorFromVector(n *promql.VectorSelector) (parser.Params, error) {
matchers, err := labelMatchersToModelMatcher(n.LabelMatchers)
if err != nil {
return nil, err
}
return functions.FetchOp{
Name: n.Name,
Offset: n.Offset,
Matchers: matchers,
}, nil
}
// NewSelectorFromMatrix creates a new fetchop
func NewSelectorFromMatrix(n *promql.MatrixSelector) (parser.Params, error) {
matchers, err := labelMatchersToModelMatcher(n.LabelMatchers)
if err != nil {
return nil, err
}
return functions.FetchOp{Name: n.Name, Offset: n.Offset, Matchers: matchers, Range: n.Range}, nil
}
// NewOperator creates a new operator based on the type
func NewOperator(opType promql.ItemType) (parser.Params, error) {
switch getOpType(opType) {
case functions.CountType:
return functions.CountOp{}, nil
default:
// TODO: handle other types
return nil, fmt.Errorf("operator not supported: %s", opType)
}
}
// NewBinaryOperator creates a new binary operator based on the type
func NewBinaryOperator(expr *promql.BinaryExpr, lhs, rhs parser.NodeID) (parser.Params, error) {
switch getOpType(expr.Op) {
case logical.AndType:
return logical.NewAndOp(lhs, rhs, promMatchingToM3(expr.VectorMatching)), nil
default:
// TODO: handle other types
return nil, fmt.Errorf("operator not supported: %s", expr.Op)
}
}
// NewFunctionExpr creates a new function expr based on the type
func NewFunctionExpr(name string, argValues []interface{}) (parser.Params, error) {
switch name {
case linear.AbsType, linear.CeilType, linear.ExpType, linear.FloorType, linear.LnType,
linear.Log10Type, linear.Log2Type, linear.SqrtType:
return linear.NewMathOp(name)
case linear.AbsentType:
return linear.NewAbsentOp(), nil
case linear.ClampMinType, linear.ClampMaxType:
return linear.NewClampOp(argValues, name)
case linear.RoundType:
return linear.NewRoundOp(argValues)
case linear.DayOfMonthType, linear.DayOfWeekType, linear.DaysInMonthType, linear.HourType,
linear.MinuteType, linear.MonthType, linear.YearType:
return linear.NewDateOp(name)
default:
// TODO: handle other types
return nil, fmt.Errorf("function not supported: %s", name)
}
}
func getOpType(opType promql.ItemType) string {
switch opType {
case promql.ItemType(itemCount):
return functions.CountType
case promql.ItemType(itemLAND):
return logical.AndType
default:
return common.UnknownOpType
}
}
func labelMatchersToModelMatcher(lMatchers []*labels.Matcher) (models.Matchers, error) {
matchers := make(models.Matchers, len(lMatchers))
for i, m := range lMatchers {
modelType, err := promTypeToM3(m.Type)
if err != nil {
return nil, err
}
match, err := models.NewMatcher(modelType, m.Name, m.Value)
if err != nil {
return nil, err
}
matchers[i] = match
}
return matchers, nil
}
// promTypeToM3 converts a prometheus label type to m3 matcher type
//TODO(nikunj): Consider merging with prompb code
func promTypeToM3(labelType labels.MatchType) (models.MatchType, error) {
switch labelType {
case labels.MatchEqual:
return models.MatchEqual, nil
case labels.MatchNotEqual:
return models.MatchNotEqual, nil
case labels.MatchRegexp:
return models.MatchRegexp, nil
case labels.MatchNotRegexp:
return models.MatchNotRegexp, nil
default:
return 0, fmt.Errorf("unknown match type %v", labelType)
}
}
func promVectorCardinalityToM3(card promql.VectorMatchCardinality) logical.VectorMatchCardinality {
switch card {
case promql.CardOneToOne:
return logical.CardOneToOne
case promql.CardManyToMany:
return logical.CardManyToMany
case promql.CardManyToOne:
return logical.CardManyToOne
case promql.CardOneToMany:
return logical.CardOneToMany
}
panic(fmt.Sprintf("unknown prom cardinality %d", card))
}
func promMatchingToM3(vectorMatching *promql.VectorMatching) *logical.VectorMatching {
return &logical.VectorMatching{
Card: promVectorCardinalityToM3(vectorMatching.Card),
MatchingLabels: vectorMatching.MatchingLabels,
On: vectorMatching.On,
Include: vectorMatching.Include,
}
} | src/query/parser/promql/types.go | 0.566258 | 0.406155 | types.go | starcoder |
package passthepoop
import (
"math"
"mondaynightpoker-server/pkg/deck"
)
// PairsEdition is a variant of Pass the Poop where pairs on the board good
// Any pair on the board is better than any single card
// Trips or better on the board and the rest of the board loses all their lives
type PairsEdition struct {
}
// Name returns the name of the Edition
func (p *PairsEdition) Name() string {
return "Pairs"
}
// ParticipantWasPassed is a no-op in pairs edition
func (p *PairsEdition) ParticipantWasPassed(participant *Participant, nextCard *deck.Card) {
// noop
}
// EndRound ends the round
// In pairs edition, and match on the board beats any single high card. For example,
// If two people turn over aces, those "pair" of aces beat someone with a King.
// In the event three or four people all have the same card, the rest of the table loses their entire stack
// of lives
func (p *PairsEdition) EndRound(participants []*Participant) ([]*LoserGroup, error) {
cardStats := make(map[int][]*Participant)
largestGroupSize := 0
largestGroupRank := -1
for _, participant := range participants {
rank := participant.card.AceLowRank()
group, found := cardStats[rank]
if !found {
group = []*Participant{participant}
} else {
group = append(group, participant)
}
nInGroup := len(group)
if nInGroup == largestGroupSize {
if rank > largestGroupRank {
largestGroupRank = rank
}
} else if nInGroup > largestGroupSize {
largestGroupSize = nInGroup
largestGroupRank = rank
}
cardStats[rank] = group
}
// trips or better and the rest lose
if largestGroupSize >= 3 {
roundLosers := make([]*RoundLoser, 0, len(participants)-largestGroupSize)
for _, participant := range participants {
if participant.card.AceLowRank() != largestGroupRank {
roundLosers = append(roundLosers, &RoundLoser{
PlayerID: participant.PlayerID,
Card: participant.card,
LivesLost: participant.subtractLife(0),
})
}
}
return newLoserGroup(roundLosers), nil
}
// otherwise, find the lowest rank in the smallest group
lowestRank := math.MaxInt32
smallestGroupSize := math.MaxInt32
for rank, participants := range cardStats {
nParticipants := len(participants)
if nParticipants > smallestGroupSize {
continue
} else if nParticipants == smallestGroupSize {
if rank < lowestRank {
lowestRank = rank
}
} else {
smallestGroupSize = nParticipants
lowestRank = rank
}
}
if lowestRank == math.MaxInt32 {
// this should never happen
panic("could not find lowest card")
}
losingParticipants := cardStats[lowestRank]
roundLosers := make([]*RoundLoser, len(losingParticipants))
if len(participants) == len(losingParticipants) {
return nil, ErrMutualDestruction
}
for i, participant := range losingParticipants {
roundLosers[i] = &RoundLoser{
PlayerID: participant.PlayerID,
Card: participant.card,
LivesLost: participant.subtractLife(1),
}
}
return newLoserGroup(roundLosers), nil
} | pkg/playable/passthepoop/edition_pairs.go | 0.608827 | 0.400808 | edition_pairs.go | starcoder |
package xy
import "github.com/twpayne/go-geom"
// PointsCentroid computes the centroid of the point arguments
//
// Algorithm: average of all points
func PointsCentroid(point *geom.Point, extra ...*geom.Point) geom.Coord {
calc := NewPointCentroidCalculator()
calc.AddCoord(geom.Coord(point.FlatCoords()))
for _, p := range extra {
calc.AddCoord(geom.Coord(p.FlatCoords()))
}
return calc.GetCentroid()
}
// MultiPointCentroid computes the centroid of the multi point argument
//
// Algorithm: average of all points in MultiPoint
func MultiPointCentroid(point *geom.MultiPoint) geom.Coord {
calc := NewPointCentroidCalculator()
coords := point.FlatCoords()
Strd := point.Layout().Stride()
for i := 0; i < len(coords); i += Strd {
calc.AddCoord(geom.Coord(coords[i : i+Strd]))
}
return calc.GetCentroid()
}
// PointsCentroidFlat computes the centroid of the points in the coordinate array.
// Lay is only used to determine how to find each coordinate. X-Y are assumed
// to be the first two elements in each coordinate.
//
// Algorithm: average of all points
func PointsCentroidFlat(Lay geom.Layout, pointData []float64) geom.Coord {
calc := NewPointCentroidCalculator()
coord := geom.Coord{0, 0}
Strd := Lay.Stride()
arrayLen := len(pointData)
for i := 0; i < arrayLen; i += Strd {
coord[0] = pointData[i]
coord[1] = pointData[i+1]
calc.AddCoord(coord)
}
return calc.GetCentroid()
}
// PointCentroidCalculator is the data structure that contains the centroid calculation
// data. This type cannot be used using its 0 values, it must be created
// using NewPointCentroid
type PointCentroidCalculator struct {
ptCount int
centSum geom.Coord
}
// NewPointCentroidCalculator creates a new calculator.
// Once the coordinates or points can be added to the calculator
// and GetCentroid can be used to get the current centroid at any point
func NewPointCentroidCalculator() PointCentroidCalculator {
return PointCentroidCalculator{centSum: geom.Coord{0, 0}}
}
// AddPoint adds a point to the calculation
func (calc *PointCentroidCalculator) AddPoint(point *geom.Point) {
calc.AddCoord(geom.Coord(point.FlatCoords()))
}
// AddCoord adds a point to the calculation
func (calc *PointCentroidCalculator) AddCoord(point geom.Coord) {
calc.ptCount++
calc.centSum[0] += point[0]
calc.centSum[1] += point[1]
}
// GetCentroid obtains centroid currently calculated. Returns a 0 coord if no coords have been added
func (calc *PointCentroidCalculator) GetCentroid() geom.Coord {
cent := geom.Coord{0, 0}
cent[0] = calc.centSum[0] / float64(calc.ptCount)
cent[1] = calc.centSum[1] / float64(calc.ptCount)
return cent
} | xy/point_centroid.go | 0.812012 | 0.520862 | point_centroid.go | starcoder |
package sql
import (
"io"
"gopkg.in/src-d/go-errors.v1"
)
// IndexBatchSize is the number of rows to save at a time when creating indexes.
const IndexBatchSize = uint64(10000)
// ChecksumKey is the key in an index config to store the checksum.
const ChecksumKey = "checksum"
// IndexDriver manages the coordination between the indexes and their
// representation on disk.
type IndexDriver interface {
// ID returns the unique name of the driver.
ID() string
// Create a new index. If exprs is more than one expression, it means the
// index has multiple columns indexed. If it's just one, it means it may
// be an expression or a column.
Create(db, table, id string, expressions []Expression, config map[string]string) (DriverIndex, error)
// LoadAll loads all indexes for given db and table.
LoadAll(ctx *Context, db, table string) ([]DriverIndex, error)
// Save the given index for all partitions.
Save(*Context, DriverIndex, PartitionIndexKeyValueIter) error
// Delete the given index for all partitions in the iterator.
Delete(DriverIndex, PartitionIter) error
}
// DriverIndexableTable represents a table that supports being indexed and receiving indexes to be able to speed up its
// execution.
type DriverIndexableTable interface {
IndexAddressableTable
// IndexKeyValues returns an iterator over partitions and ultimately the rows of the table to compute the value of an
// index for every row in this table. Used when creating an index for access through an IndexDriver.
IndexKeyValues(*Context, []string) (PartitionIndexKeyValueIter, error)
}
// An indexed managed by a driver, as opposed to natively by a DB table.
type DriverIndex interface {
Index
// Driver ID of the index.
Driver() string
}
// DriverIndexLookup is a subset of an index. More specific interfaces can be
// implemented to grant more capabilities to the index lookup.
type DriverIndexLookup interface {
IndexLookup
// Values returns the values in the subset of the index. These are used to populate the index via the driver.
Values(Partition) (IndexValueIter, error)
// Indexes returns the IDs of all indexes involved in this lookup.
Indexes() []string
}
// Checksumable provides the checksum of some data.
type Checksumable interface {
// Checksum returns a checksum and an error if there was any problem
// computing or obtaining the checksum.
Checksum() (string, error)
}
// PartitionIndexKeyValueIter is an iterator of partitions that will return
// the partition and the IndexKeyValueIter of that partition.
type PartitionIndexKeyValueIter interface {
// Next returns the next partition and the IndexKeyValueIter for that
// partition.
Next() (Partition, IndexKeyValueIter, error)
io.Closer
}
// IndexKeyValueIter is an iterator of index key values, that is, a tuple of
// the values that will be index keys.
type IndexKeyValueIter interface {
// Next returns the next tuple of index key values. The length of the
// returned slice will be the same as the number of columns used to
// create this iterator. The second returned parameter is a repo's location.
Next() ([]interface{}, []byte, error)
io.Closer
}
// IndexValueIter is an iterator of index values.
type IndexValueIter interface {
// Next returns the next value (repo's location) - see IndexKeyValueIter.
Next() ([]byte, error)
io.Closer
}
var (
// ErrIndexIDAlreadyRegistered is the error returned when there is already
// an index with the same ID.
ErrIndexIDAlreadyRegistered = errors.NewKind("an index with id %q has already been registered")
// ErrIndexExpressionAlreadyRegistered is the error returned when there is
// already an index with the same expression.
ErrIndexExpressionAlreadyRegistered = errors.NewKind("there is already an index registered for the expressions: %s")
// ErrIndexNotFound is returned when the index could not be found.
ErrIndexNotFound = errors.NewKind("index %q was not found")
// ErrIndexDeleteInvalidStatus is returned when the index trying to delete
// does not have a ready or outdated state.
ErrIndexDeleteInvalidStatus = errors.NewKind("can't delete index %q because it's not ready for removal")
) | sql/index_driver.go | 0.764452 | 0.407392 | index_driver.go | starcoder |
package benford
import (
"gonum.org/v1/gonum/stat"
)
// Benford = result of the Benford's Test,
// Benford.Dist is a array with float64 which describe the distribution of the numbers 1 to 9,
// Benford.ChiSquared is a float and describes how well Benford's Law was matched. Lower is better.
type Benford struct {
Dist []float64
ChiSquared float64
}
// firstDigit returns only the first digit of each number but no zeroes
func firstDigit(numbers []int) []int {
var new []int
for i := 0; i < len(numbers); i++ {
n := numbers[i]
// only the first digit
for n >= 10 {
n = n / 10
}
// No 0
if n == 0 {
continue
}
new = append(new, n)
}
return new
}
// Count the occurrences of one number
func countOccurrencesOfOne(numbers []int, searched int) int {
var res int
res = 0
for i := 0; i < len(numbers); i++ {
if numbers[i] == searched {
res = res + 1
}
}
return res
}
// Count the occurrences of every number in a array
func countOccurrences(numbers []int) []int {
var res []int
var searched = [9]int{1, 2, 3, 4, 5, 6, 7, 8, 9}
for i := 0; i < len(searched); i++ {
found := countOccurrencesOfOne(numbers, searched[i])
res = append(res, found)
}
return res
}
// Get the distribution of 1 to 9 in the number array
func countDistribution(occurrences []int) []float64 {
sum := float64(0)
res := []float64{}
for _, value := range occurrences {
sum = sum + float64(value)
}
for _, value := range occurrences {
n := float64(value) / sum
res = append(res, n)
}
return res
}
// CalcBenfords takes a array of integers and returns a struct with information about how well benfords law was matched
func CalcBenfords(numbers []int) Benford {
benfordNumbers := []float64{
0.301, // 1
0.176, // 2
0.125, // 3
0.097, // 4
0.079, // 5
0.067, // 6
0.058, // 7
0.051, // 8
0.046, // 9
}
occurrences := countOccurrences(firstDigit(numbers))
dist := countDistribution(occurrences)
chiSquared := stat.ChiSquare(dist, benfordNumbers)
return Benford{Dist: dist, ChiSquared: chiSquared}
} | main.go | 0.669961 | 0.631353 | main.go | starcoder |
package quadedge
import (
"fmt"
"github.com/go-spatial/geom/cmp"
)
/*
QuadEdge represents the edge data structure which implements the quadedge
algebra. The quadedge algebra was described in a well-known paper by Guibas
and Stolfi, "Primitives for the manipulation of general subdivisions and the
computation of Voronoi diagrams", ACM Transactions on Graphics, 4(2), 1985,
75-123.
Each edge object is part of a quartet of 4 edges, linked via their rot
references. Any edge in the group may be accessed using a series of rot()
operations. Quadedges in a subdivision are linked together via their next
references. The linkage between the quadedge quartets determines the topology
of the subdivision.
The edge class does not contain separate information for vertices or faces; a
vertex is implicitly defined as a ring of edges (created using the next field).
Author <NAME>
Author <NAME>
Ported to Go by <NAME>
*/
type QuadEdge struct {
rot *QuadEdge
vertex Vertex
next *QuadEdge
data interface{}
}
/*
MakeEdge creates a new QuadEdge quartet from {@link Vertex} o to {@link Vertex} d.
o - the origin Vertex
d - the destination Vertex
returns the new QuadEdge quartet
*/
func MakeEdge(o Vertex, d Vertex) *QuadEdge {
q0 := new(QuadEdge)
q1 := new(QuadEdge)
q2 := new(QuadEdge)
q3 := new(QuadEdge)
q0.rot = q1
q1.rot = q2
q2.rot = q3
q3.rot = q0
q0.SetNext(q0)
q1.SetNext(q3)
q2.SetNext(q2)
q3.SetNext(q1)
base := q0
base.setOrig(o)
base.setDest(d)
base.rot.setOrig(o)
base.rot.setDest(d)
return base
}
/*
Connect creates a new QuadEdge connecting the destination of a to the origin of
b, in such a way that all three have the same left face after the
connection is complete. Additionally, the data pointers of the new edge
are set.
Returns the connected edge.
*/
func Connect(a *QuadEdge, b *QuadEdge) *QuadEdge {
e := MakeEdge(a.Dest(), b.Orig())
Splice(e, a.LNext())
Splice(e.Sym(), b)
return e
}
/*
Splices two edges together or apart.
Splice affects the two edge rings around the origins of a and b, and, independently, the two
edge rings around the left faces of <tt>a</tt> and <tt>b</tt>.
In each case, (i) if the two rings are distinct,
Splice will combine them into one, or (ii) if the two are the same ring, Splice will break it
into two separate pieces. Thus, Splice can be used both to attach the two edges together, and
to break them apart.
a - an edge to splice
b - an edge to splice
*/
func Splice(a *QuadEdge, b *QuadEdge) {
alpha := a.ONext().Rot()
beta := b.ONext().Rot()
t1 := b.ONext()
t2 := a.ONext()
t3 := beta.ONext()
t4 := alpha.ONext()
a.SetNext(t1)
b.SetNext(t2)
alpha.SetNext(t3)
beta.SetNext(t4)
}
/*
Swap Turns an edge counterclockwise inside its enclosing quadrilateral.
e - the quadedge to turn
*/
func Swap(e *QuadEdge) {
a := e.OPrev()
b := e.Sym().OPrev()
Splice(e, a)
Splice(e.Sym(), b)
Splice(e, a.LNext())
Splice(e.Sym(), b.LNext())
e.setOrig(a.Dest())
e.setDest(b.Dest())
}
/*
Quadedges must be made using {@link makeEdge},
to ensure proper construction.
private QuadEdge()
{
}
*/
/*
getPrimary gets the primary edge of this quadedge and its sym. The primary
edge is the one for which the origin and destination coordinates are ordered
according to the standard Point ordering.
Returns the primary quadedge
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) GetPrimary() *QuadEdge {
v1 := qe.Orig()
v2 := qe.Dest()
if cmp.PointLess(v1, v2) || cmp.PointEqual(v1, v2) {
return qe
}
return qe.Sym()
}
/*
SetData sets the external data value for this edge.
data an object containing external data
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) SetData(data interface{}) {
qe.data = data
}
/*
GetData returns the external data value for this edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) GetData() interface{} {
return qe.data
}
/*
Delete marks this quadedge as being deleted. This does not free the memory
used by this quadedge quartet, but indicates that this edge no longer
participates in a subdivision.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) Delete() {
qe.rot = nil
}
/*
IsLive tests whether this edge has been deleted.
Returns true if this edge has not been deleted.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) IsLive() bool {
return qe.rot != nil
}
/*
SetNext sets the connected edge
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) SetNext(next *QuadEdge) {
qe.next = next
}
/**************************************************************************
QuadEdge Algebra
***************************************************************************
*/
/*
Rot gets the dual of this edge, directed from its right to its left.
Return the rotated edge
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) Rot() *QuadEdge {
return qe.rot
}
/*
InvRot gets the dual of this edge, directed from its left to its right.
Return the inverse rotated edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) InvRot() *QuadEdge {
return qe.rot.Sym()
}
/*
Sym gets the edge from the destination to the origin of this edge.
Return the sym of the edge
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) Sym() *QuadEdge {
return qe.rot.rot
}
/*
ONext gets the next CCW edge around the origin of this edge.
Return the next linked edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) ONext() *QuadEdge {
return qe.next
}
/*
OPrev gets the next CW edge around (from) the origin of this edge.
Return the previous edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) OPrev() *QuadEdge {
return qe.rot.next.rot
}
/*
DNext gets the next CCW edge around (into) the destination of this edge.
Return the next destination edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) DNext() *QuadEdge {
return qe.Sym().ONext().Sym()
}
/*
DPrev gets the next CW edge around (into) the destination of this edge.
Return the previous destination edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) DPrev() *QuadEdge {
return qe.InvRot().ONext().InvRot()
}
/*
LNext gets the CCW edge around the left face following this edge.
Return the next left face edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) LNext() *QuadEdge {
return qe.InvRot().ONext().Rot()
}
/*
LPrev gets the CCW edge around the left face before this edge.
Return the previous left face edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) LPrev() *QuadEdge {
return qe.next.Sym()
}
/*
RNext gets the edge around the right face ccw following this edge.
Return the next right face edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) RNext() *QuadEdge {
return qe.rot.next.InvRot()
}
/*
RPrev gets the edge around the right face ccw before this edge.
Return the previous right face edge.
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) RPrev() *QuadEdge {
return qe.Sym().ONext()
}
/**********************************************************************************************
Data Access
**********************************************************************************************/
/*
SetOrig sets the vertex for this edge's origin
o - the origin vertex
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) setOrig(o Vertex) {
qe.vertex = o
}
/*
SetDest sets the vertex for this edge's destination
d - the destination vertex
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) setDest(d Vertex) {
qe.Sym().setOrig(d)
}
/*
Orig gets the vertex for the edge's origin
Returns the origin vertex
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) Orig() Vertex {
return qe.vertex
}
/*
Dest gets the vertex for the edge's destination
Returns the destination vertex
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) Dest() Vertex {
return qe.Sym().Orig()
}
/*
Gets the length of the geometry of this quadedge.
@return the length of the quadedge
public double getLength() {
return orig().getCoordinate().distance(dest().getCoordinate());
}
*/
/*
Tests if this quadedge and another have the same line segment geometry,
regardless of orientation.
@param qe a quadedge
@return true if the quadedges are based on the same line segment regardless of orientation
public boolean equalsNonOriented(QuadEdge qe) {
if (equalsOriented(qe))
return true;
if (equalsOriented(qe.sym()))
return true;
return false;
}
*/
/*
Tests if this quadedge and another have the same line segment geometry
with the same orientation.
@param qe a quadedge
@return true if the quadedges are based on the same line segment
public boolean equalsOriented(QuadEdge qe) {
if (orig().getCoordinate().equals2D(qe.orig().getCoordinate())
&& dest().getCoordinate().equals2D(qe.dest().getCoordinate()))
return true;
return false;
}
*/
/*
Creates a {@link LineSegment} representing the
geometry of this edge.
@return a LineSegment
public LineSegment toLineSegment()
{
return new LineSegment(vertex.getCoordinate(), dest().getCoordinate());
}
*/
/*
String Converts this edge to a WKT two-point LINESTRING indicating
the geometry of this edge.
Unlike JTS, if IsLive() is false, a deleted string is returned.
return a String representing this edge's geometry
If qe is nil a panic will occur.
*/
func (qe *QuadEdge) String() string {
if qe.IsLive() == false {
return fmt.Sprintf("<deleted %v>", qe.Orig())
}
return fmt.Sprintf("LINESTRING (%v %v, %v %v)", qe.Orig().X(), qe.Orig().Y(), qe.Dest().X(), qe.Dest().Y())
} | planar/triangulate/quadedge/quadedge.go | 0.880322 | 0.670676 | quadedge.go | starcoder |
package stdlib
import (
"fmt"
"math"
"math/cmplx"
"github.com/vida-lang/vida/vida"
)
// loadMath loads the stdlib for mathematical computations over Floats.
func loadMath() vida.Importable {
gmodule := vida.GModule{Name: "math", Namespace: vida.Namespace{
"e": vida.Float(math.E),
"pi": vida.Float(math.Pi),
"tau": vida.Float(math.Pi * 2.0),
"phi": vida.Float(math.Phi),
"sqrt2": vida.Float(math.Sqrt2),
"sqrtE": vida.Float(math.SqrtE),
"sqrtPi": vida.Float(math.SqrtPi),
"sqrtPhi": vida.Float(math.SqrtPhi),
"ln2": vida.Float(math.Ln2),
"log2e": vida.Float(1 / math.Ln2),
"ln10": vida.Float(math.Ln10),
"log10e": vida.Float(math.Log10E),
"inf": vida.Float(math.Inf(0)),
"nan": vida.Float(math.NaN()),
"isnan": vida.GFunction{Name: "isnan", Value: gIsNan},
"acos": GFunctionFromFloatToFloat("acos", math.Acos),
"acosh": GFunctionFromFloatToFloat("acosh", math.Acosh),
"asin": GFunctionFromFloatToFloat("asin", math.Asin),
"asinh": GFunctionFromFloatToFloat("ashih", math.Asinh),
"atan": GFunctionFromFloatToFloat("atan", math.Atan),
"atan2": GFunctionFromFloatFloatToFloat("atan2", math.Atan2),
"atanh": GFunctionFromFloatToFloat("atanh", math.Atanh),
"cbrt": GFunctionFromFloatToFloat("cbrt", math.Cbrt),
"ceil": GFunctionFromFloatToFloat("ceil", math.Ceil),
"cos": GFunctionFromFloatToFloat("cos", math.Cos),
"cosh": GFunctionFromFloatToFloat("cosh", math.Cosh),
"exp": GFunctionFromFloatToFloat("exp", math.Exp),
"exp2": GFunctionFromFloatToFloat("exp2", math.Exp2),
"floor": GFunctionFromFloatToFloat("floor", math.Floor),
"gamma": GFunctionFromFloatToFloat("gamma", math.Gamma),
"hypot": GFunctionFromFloatFloatToFloat("hypot", math.Hypot),
"log": GFunctionFromFloatToFloat("log", math.Log),
"log10": GFunctionFromFloatToFloat("log10", math.Log10),
"log2": GFunctionFromFloatToFloat("log2", math.Log2),
"max": GFunctionFromFloatFloatToFloat("max", math.Max),
"min": GFunctionFromFloatFloatToFloat("min", math.Min),
"pow": GFunctionFromFloatFloatToFloat("pow", math.Pow),
"rem": GFunctionFromFloatFloatToFloat("rem", math.Remainder),
"round": GFunctionFromFloatToFloat("round", math.Round),
"sin": GFunctionFromFloatToFloat("sin", math.Sin),
"sinh": GFunctionFromFloatToFloat("sinh", math.Sinh),
"sqrt": GFunctionFromFloatToFloat("sqrt", math.Sqrt),
"tan": GFunctionFromFloatToFloat("tan", math.Tan),
"tanh": GFunctionFromFloatToFloat("tanh", math.Tanh),
"trunc": GFunctionFromFloatToFloat("trunc", math.Trunc),
"toDegrees": GFunctionFromFloatToFloat("toDegrees", func(f float64) float64 { return math.Mod(f*(180.0/math.Pi), 360.0) }),
"toRadians": GFunctionFromFloatToFloat("toRadians", func(f float64) float64 { return math.Mod(f*(math.Pi/180.0), math.Pi*2.0) }),
}}
return gmodule
}
func gIsNan(args ...vida.Value) (vida.Value, error) {
if len(args) == 1 {
switch value := args[0].(type) {
case vida.Float:
return vida.Bool(math.IsNaN(float64(value))), nil
case vida.Complex:
return vida.Bool(cmplx.IsNaN(complex128(value))), nil
default:
return vida.False, nil
}
}
return nil, fmt.Errorf("expected %v argument and got %v", 1, len(args))
} | stdlib/math.go | 0.596668 | 0.584419 | math.go | starcoder |
package graph
import (
"fmt"
"github.com/heustis/tsp-solver-go/model"
)
type GraphEdge struct {
path []*GraphVertex
distance float64
}
func (e *GraphEdge) Delete() {
// Note: Deleting a graph edge should not delete the graph itself, use Graph.Delete() to delete all GraphVertexes.
e.path = nil
}
func (e *GraphEdge) DistanceIncrease(vertex model.CircuitVertex) float64 {
a, b := e.Split(vertex)
return a.GetLength() + b.GetLength() - e.GetLength()
}
func (e *GraphEdge) Equals(other interface{}) bool {
if otherEdge, okay := other.(*GraphEdge); okay {
if len(e.path) != len(otherEdge.path) {
return false
}
for i := 0; i < len(e.path); i++ {
if e.path[i] != otherEdge.path[i] {
return false
}
}
}
return true
}
func (e *GraphEdge) GetEnd() model.CircuitVertex {
if lastIndex := len(e.path) - 1; lastIndex < 0 {
return nil
} else {
return e.path[lastIndex]
}
}
func (e *GraphEdge) GetLength() float64 {
return e.distance
}
func (e *GraphEdge) GetPath() []*GraphVertex {
return e.path
}
func (e *GraphEdge) GetStart() model.CircuitVertex {
return e.path[0]
}
func (e *GraphEdge) Intersects(other model.CircuitEdge) bool {
otherGraphEdge := other.(*GraphEdge)
// Using a map enables the complexity of Intersects() to be O(n)
graphIds := make(map[string]bool)
for _, v := range e.path {
graphIds[v.id] = true
}
for _, o := range otherGraphEdge.path {
if graphIds[o.id] {
return true
}
}
return false
}
func (e *GraphEdge) Merge(other model.CircuitEdge) model.CircuitEdge {
return e.GetStart().(*GraphVertex).EdgeTo(other.GetEnd())
}
func (e *GraphEdge) Split(vertex model.CircuitVertex) (model.CircuitEdge, model.CircuitEdge) {
return e.GetStart().EdgeTo(vertex), vertex.EdgeTo(e.GetEnd())
}
func (e *GraphEdge) String() string {
s := "["
isFirst := true
for _, v := range e.path {
if !isFirst {
s += ","
}
isFirst = false
s += fmt.Sprintf(`"%s"`, v.id)
}
s += "]"
return s
}
var _ model.CircuitEdge = (*GraphEdge)(nil) | graph/graphedge.go | 0.73782 | 0.486514 | graphedge.go | starcoder |
package lib
import (
"fmt"
"math"
"math/rand"
"runtime"
"gonum.org/v1/gonum/mat"
gtet_rand "github.com/phil-mansfield/gotetra/math/rand"
"github.com/phil-mansfield/gotetra/render/geom"
"github.com/phil-mansfield/gotetra/render/io"
"github.com/phil-mansfield/nbody-utils/thread"
)
// GetDensities writes densities associated with an array of tetrahedra into the
// array out. These are interpolated off the grid using CIC. Requires buffers of
// randompoint sampled from within a tetrahedra. (i.e. the output of
// TetraRngBuf()) This function is multithreaded and will automatically run
// the hot loop on GOMAXPROCS cores.
func GetDensities(
ghd *io.GridHeader, grid[]float64, tet []geom.Tetra,
rngBuf [][]geom.Vec, out []float64,
) {
numCPU := runtime.GOMAXPROCS(-1)
thread.SplitArray(len(out), numCPU, func(worker, start, end, step int) {
vecBuf := make([]geom.Vec, len(rngBuf[0]))
for i := start; i < end; i += step {
for j := 0; j < 6; j++ {
bufIdx := rand.Intn(len(rngBuf))
tet[i*6 + j].DistributeTetra(rngBuf[bufIdx], vecBuf)
density := cicInterpolate(vecBuf, ghd, grid)
out[i] += density / 6
}
}
})
}
func ngcInterpolate(
vecs []geom.Vec, ghd *io.GridHeader, grid []float64,
) float64 {
L := float32(ghd.Loc.Span[0] - ghd.Loc.PixelWidth)
dx := float32(ghd.Loc.PixelWidth)
N := int(ghd.Loc.PixelSpan[0])
origin := [3]float32{
float32(ghd.Loc.Origin[0]), float32(ghd.Loc.Origin[1]),
float32(ghd.Loc.Origin[2]),
}
sum := 0.0
for _, vec := range vecs {
for k := 0; k < 3; k++ {
vec[k] -= origin[k]
if vec[k] < 0 {
vec[k] += L
} else if vec[k] >= L {
vec[k] -= L
}
}
ix, iy, iz := int(vec[0]/dx), int(vec[1]/dx), int(vec[2]/dx)
sum += grid[ix + iy*N + iz*N*N]
}
return sum / float64(len(vecs))
}
func cicInterpolate(
vecs []geom.Vec, ghd *io.GridHeader, grid []float64,
) float64 {
L := float32(ghd.Cosmo.BoxWidth)
dx := float32(ghd.Loc.PixelWidth)
N := int(ghd.Loc.PixelSpan[0])
N2 := int(ghd.Loc.PixelSpan[0]*ghd.Loc.PixelSpan[1])
origin := [3]float32{
float32(ghd.Loc.Origin[0]), float32(ghd.Loc.Origin[1]),
float32(ghd.Loc.Origin[2]),
}
periodic := ghd.Loc.Span[0] >= ghd.Cosmo.BoxWidth
nTrue := int(ghd.Loc.PixelSpan[0]) - 1
sum := 0.0
for _, vec := range vecs {
for k := 0; k < 3; k++ {
vec[k] -= origin[k]
if vec[k] < 0 {
vec[k] += L
} else if vec[k] >= L {
vec[k] -= L
}
}
xp, yp, zp := float64(vec[0]/dx), float64(vec[1]/dx), float64(vec[2]/dx)
// Floor calls neccessary if xp - 0.5 wraps around.
ix0 := int(math.Floor(xp - 0.5))
iy0 := int(math.Floor(yp - 0.5))
iz0 := int(math.Floor(zp - 0.5))
xc, yc, zc := float64(ix0)+0.5, float64(iy0)+0.5, float64(iz0)+0.5
dx, dy, dz := xp - xc, yp - yc, zp - zc
tx, ty, tz := 1 - dx, 1 - dy, 1 - dz
ix1, iy1, iz1 := ix0 + 1, iy0 + 1, iz0 + 1
if periodic {
if ix1 == nTrue { ix1 = 0 }
if iy1 == nTrue { iy1 = 0 }
if iz1 == nTrue { iz1 = 0 }
if ix0 == -1 { ix0 = nTrue -1 }
if iy0 == -1 { iy0 = nTrue -1 }
if iz0 == -1 { iz0 = nTrue -1 }
}
sum += grid[(ix0) + (iy0)*N + (iz0)*N2]*tx*ty*tz
sum += grid[(ix1) + (iy0)*N + (iz0)*N2]*dx*ty*tz
sum += grid[(ix0) + (iy1)*N + (iz0)*N2]*tx*dy*tz
sum += grid[(ix1) + (iy1)*N + (iz0)*N2]*dx*dy*tz
sum += grid[(ix0) + (iy0)*N + (iz1)*N2]*tx*ty*dz
sum += grid[(ix1) + (iy0)*N + (iz1)*N2]*dx*ty*dz
sum += grid[(ix0) + (iy1)*N + (iz1)*N2]*tx*dy*dz
sum += grid[(ix1) + (iy1)*N + (iz1)*N2]*dx*dy*dz
}
return sum / float64(len(vecs))
}
func TetraRngBuf(pts int) []geom.Vec {
gen := gtet_rand.NewTimeSeed(gtet_rand.Tausworthe)
unitBuf := make([]geom.Vec, pts)
for j := range unitBuf {
for k := 0; k < 3; k++ {
unitBuf[j][k] = float32(gen.Uniform(0, 1))
}
}
geom.DistributeUnit(unitBuf)
return unitBuf
}
// DeformationEig reutrns the eigenvalues of a deformation tensor. Requires a
// length 3 buffer of complex128 values.
func DeformationEig(def *mat.Dense, eig *mat.Eigen, buf []complex128) (
l1, l2, l3 float64,
) {
ok := eig.Factorize(def, mat.EigenRight)
if !ok {
panic(fmt.Sprintf("decomposition of %v failed", def))
}
val := eig.Values(buf)
return sort3(real(val[0]), real(val[1]), real(val[2]))
}
func sort3(x, y, z float64) (l1, l2, l3 float64) {
min, max := x, x
if y > max {
max = y
} else if y < min {
min = y
}
if z > max {
max = z
} else if z < min {
min = z
}
return max, (x+y+z) - (min+max), min
} | lib/analysis.go | 0.606498 | 0.435361 | analysis.go | starcoder |
package exif
import (
"errors"
"fmt"
"time"
"github.com/dsoprea/go-logging"
"github.com/golang/geo/s2"
"github.com/dsoprea/go-exif/v3/common"
)
var (
// ErrGpsCoordinatesNotValid means that some part of the geographic data was
// unparseable.
ErrGpsCoordinatesNotValid = errors.New("GPS coordinates not valid")
)
// GpsDegrees is a high-level struct representing geographic data.
type GpsDegrees struct {
// Orientation describes the N/E/S/W direction that this position is
// relative to.
Orientation byte
// Degrees is a simple float representing the underlying rational degrees
// amount.
Degrees float64
// Minutes is a simple float representing the underlying rational minutes
// amount.
Minutes float64
// Seconds is a simple float representing the underlying ration seconds
// amount.
Seconds float64
}
// NewGpsDegreesFromRationals returns a GpsDegrees struct given the EXIF-encoded
// information. The refValue is the N/E/S/W direction that this position is
// relative to.
func NewGpsDegreesFromRationals(refValue string, rawCoordinate []exifcommon.Rational) (gd GpsDegrees, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if len(rawCoordinate) != 3 {
log.Panicf("new GpsDegrees struct requires a raw-coordinate with exactly three rationals")
}
gd = GpsDegrees{
Orientation: refValue[0],
Degrees: float64(rawCoordinate[0].Numerator) / float64(rawCoordinate[0].Denominator),
Minutes: float64(rawCoordinate[1].Numerator) / float64(rawCoordinate[1].Denominator),
Seconds: float64(rawCoordinate[2].Numerator) / float64(rawCoordinate[2].Denominator),
}
return gd, nil
}
// String provides returns a descriptive string.
func (d GpsDegrees) String() string {
return fmt.Sprintf("Degrees<O=[%s] D=(%g) M=(%g) S=(%g)>", string([]byte{d.Orientation}), d.Degrees, d.Minutes, d.Seconds)
}
// Decimal calculates and returns the simplified float representation of the
// component degrees.
func (d GpsDegrees) Decimal() float64 {
decimal := float64(d.Degrees) + float64(d.Minutes)/60.0 + float64(d.Seconds)/3600.0
if d.Orientation == 'S' || d.Orientation == 'W' {
return -decimal
}
return decimal
}
// Raw returns a Rational struct that can be used to *write* coordinates. In
// practice, the denominator are typically (1) in the original EXIF data, and,
// that being the case, this will best preserve precision.
func (d GpsDegrees) Raw() []exifcommon.Rational {
return []exifcommon.Rational{
{Numerator: uint32(d.Degrees), Denominator: 1},
{Numerator: uint32(d.Minutes), Denominator: 1},
{Numerator: uint32(d.Seconds), Denominator: 1},
}
}
// GpsInfo encapsulates all of the geographic information in one place.
type GpsInfo struct {
Latitude, Longitude GpsDegrees
Altitude int
Timestamp time.Time
}
// String returns a descriptive string.
func (gi *GpsInfo) String() string {
return fmt.Sprintf("GpsInfo<LAT=(%.05f) LON=(%.05f) ALT=(%d) TIME=[%s]>",
gi.Latitude.Decimal(), gi.Longitude.Decimal(), gi.Altitude, gi.Timestamp)
}
// S2CellId returns the cell-ID of the geographic location on the earth.
func (gi *GpsInfo) S2CellId() s2.CellID {
latitude := gi.Latitude.Decimal()
longitude := gi.Longitude.Decimal()
ll := s2.LatLngFromDegrees(latitude, longitude)
cellId := s2.CellIDFromLatLng(ll)
if cellId.IsValid() == false {
panic(ErrGpsCoordinatesNotValid)
}
return cellId
} | v3/gps.go | 0.773388 | 0.555616 | gps.go | starcoder |
package matchers
import "bytes"
// Zip matches a zip archive.
func Zip(in []byte) bool {
return len(in) > 3 &&
in[0] == 0x50 && in[1] == 0x4B &&
(in[2] == 0x3 || in[2] == 0x5 || in[2] == 0x7) &&
(in[3] == 0x4 || in[3] == 0x6 || in[3] == 0x8)
}
// SevenZ matches a 7z archive.
func SevenZ(in []byte) bool {
return len(in) > 6 &&
bytes.Equal(in[:6], []byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C})
}
// Epub matches an EPUB file.
func Epub(in []byte) bool {
return len(in) > 58 && bytes.Equal(in[30:58], []byte("mimetypeapplication/epub+zip"))
}
// Jar matches a Java archive file.
func Jar(in []byte) bool {
return bytes.Contains(in, []byte("META-INF/MANIFEST.MF"))
}
// Gzip matched gzip files based on http://www.zlib.org/rfc-gzip.html#header-trailer.
func Gzip(in []byte) bool {
return len(in) > 2 && bytes.Equal(in[:2], []byte{0x1f, 0x8b})
}
// Crx matches a Chrome extension file: a zip archive prepended by "Cr24".
func Crx(in []byte) bool {
return bytes.HasPrefix(in, []byte("Cr24"))
}
// Tar matches a (t)ape (ar)chive file.
func Tar(in []byte) bool {
return len(in) > 262 && bytes.Equal(in[257:262], []byte("ustar"))
}
// Fits matches an Flexible Image Transport System file.
func Fits(in []byte) bool {
return bytes.HasPrefix(in, []byte{0x53, 0x49, 0x4D, 0x50, 0x4C, 0x45, 0x20,
0x20, 0x3D, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x54})
}
// Xar matches an eXtensible ARchive format file.
func Xar(in []byte) bool {
return bytes.HasPrefix(in, []byte{0x78, 0x61, 0x72, 0x21})
}
// Bz2 matches a bzip2 file.
func Bz2(in []byte) bool {
return bytes.HasPrefix(in, []byte{0x42, 0x5A, 0x68})
}
// Ar matches an ar (Unix) archive file.
func Ar(in []byte) bool {
return bytes.HasPrefix(in, []byte{0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E})
}
// Deb matches a Debian package file.
func Deb(in []byte) bool {
return len(in) > 8 && bytes.HasPrefix(in[8:], []byte{0x64, 0x65, 0x62, 0x69,
0x61, 0x6E, 0x2D, 0x62, 0x69, 0x6E, 0x61, 0x72, 0x79})
}
// Rar matches a RAR archive file.
func Rar(in []byte) bool {
if !bytes.HasPrefix(in, []byte{0x52, 0x61, 0x72, 0x21, 0x1A, 0x07}) {
return false
}
return len(in) > 8 && (bytes.Equal(in[6:8], []byte{0x01, 0x00}) || in[6] == 0x00)
}
// Warc matches a Web ARChive file.
func Warc(in []byte) bool {
return bytes.HasPrefix(in, []byte("WARC/"))
} | internal/matchers/archive.go | 0.60778 | 0.637666 | archive.go | starcoder |
package ui
import (
"image"
"image/color"
"strings"
"gioui.org/f32"
"gioui.org/layout"
"gioui.org/op"
)
type grid struct {
Background color.NRGBA
cellSize image.Point
data [][]texture
}
// Init initializes the grid with size as the available space.
func (g *grid) Init(width, height int) {
g.data = nil
g.Resize(width, height)
}
func (g *grid) Resize(width, height int) {
if len(g.data) == 0 {
g.data = make([][]texture, height)
textures := make([]texture, width*height)
for i := range g.data {
g.data[i] = textures[:width]
textures = textures[width:]
}
return
}
textures := g.data[0][:cap(g.data[0])]
wh := width * height
sz := g.Size()
w := sz.X
if width < w {
w = width
}
old := textures
if len(textures) < wh {
textures = make([]texture, wh)
}
g.data = append(g.data[:0], make([][]texture, height)...)
for i := range g.data {
g.data[i] = textures[:width]
copy(textures, old[:w])
textures = textures[width:]
}
}
// Size returns the width and height of the grid as a point.
func (g *grid) Size() image.Point {
if len(g.data) == 0 {
return image.Point{}
}
return image.Pt(len(g.data[0]), len(g.data))
}
// CellSize returns the width and height of a grid's cell as a point.
func (g *grid) CellSize() image.Point {
return g.cellSize
}
// SetCellSize updates the grid cell's size.
func (g *grid) SetCellSize(size image.Point) {
g.cellSize = size
}
func (g *grid) Clear() {
textures := g.data[0][:cap(g.data[0])]
copy(textures, make([]texture, len(textures)))
}
// Slice returns a subset of g that shares its cells,
// so that any cell change to the returned grid changes g's.
func (g *grid) Slice(min, max image.Point) *grid {
if len(g.data) == 0 {
return g
}
gg := *g
var data [][]texture
if min.X == 0 && max.X == len(g.data[0]) {
// Slice lines only, avoid an alloc.
data = g.data[min.Y:max.Y]
} else {
data = make([][]texture, max.Y-min.Y)
for y := min.Y; y < max.Y; y++ {
data[y] = g.data[y][min.X:max.X]
}
}
gg.data = data
return &gg
}
func (g *grid) Set(x, y int, t texture) {
g.data[y][x] = t
}
func (g *grid) Get(x, y int) texture {
return g.data[y][x]
}
func (g *grid) SetLine(x, y int, t ...texture) {
copy(g.data[y][x:], t)
}
func (g *grid) Layout(gtx layout.Context) layout.Dimensions {
if len(g.data) == 0 {
return layout.Dimensions{Size: gtx.Constraints.Min}
}
defer op.Save(gtx.Ops).Load()
// Display textures.
gtxT := gtx
gtxT.Constraints = layout.Exact(g.cellSize)
var size image.Point
for _, row := range g.data {
var x, y int
for _, t := range row {
dims := t.Layout(gtxT, g.Background)
x += dims.Size.X
y = max(y, dims.Size.Y)
op.Offset(f32.Point{
X: float32(dims.Size.X),
}).Add(gtx.Ops)
}
size.X = max(size.X, x)
size.Y += y
op.Offset(f32.Point{
X: -float32(x),
Y: float32(y),
}).Add(gtx.Ops)
}
size.Y = max(size.Y, gtx.Constraints.Min.Y)
return layout.Dimensions{Size: size}
}
func (g *grid) Fill(t texture) {
for _, row := range g.data {
for x := range row {
row[x] = t
}
}
}
func (g *grid) String() string {
buf := new(strings.Builder)
for _, row := range g.data {
for _, t := range row {
buf.WriteString(t.String())
}
buf.WriteString("\n")
}
return buf.String()
} | blocks/internal/ui/grid.go | 0.69987 | 0.416381 | grid.go | starcoder |
package main
import (
"math"
"github.com/unixpickle/model3d/model2d"
"github.com/unixpickle/model3d/model3d"
)
const (
PeelLongSide = 0.12
PeelSmallSide = 0.05
PeelStops = 800
PeelRounding = 0.01
PeelEdgeInset = 0.01
)
func NewPeel() model3d.Solid {
mesh := PeelMesh(PeelStops)
return model3d.NewColliderSolidInset(model3d.MeshToCollider(mesh), -PeelRounding)
}
// PeelMesh creates a mesh for the peel.
func PeelMesh(stops int) *model3d.Mesh {
curve := PeelCentralCurve()
twist := PeelTwist()
centralDir := func(x float64) model3d.Coord3D {
delta := 0.001
if x > 0 {
x -= delta
}
return curve(x + delta).Sub(curve(x)).Normalize()
}
corners := func(t int) [4]model3d.Coord3D {
x := float64(t)/float64(stops)*(1-PeelEdgeInset)*2 - (1 - PeelEdgeInset)
c := curve(x)
dir := centralDir(x)
theta := model2d.CurveEvalX(twist, x)
rotation := model3d.Rotation(dir, theta)
longDir := model3d.Z(1.0)
shortDir := longDir.Cross(dir).Normalize()
longDir = rotation.Apply(longDir).Scale(PeelLongSide / 2.0)
shortDir = rotation.Apply(shortDir).Scale(PeelSmallSide / 2.0)
scales := [4][2]float64{{1, 1}, {1, -1}, {-1, -1}, {-1, 1}}
res := [4]model3d.Coord3D{}
for i, scale := range scales {
res[i] = c.Add(shortDir.Scale(scale[0])).Add(longDir.Scale(scale[1]))
}
return res
}
res := model3d.NewMesh()
for t := 0; t < stops; t++ {
corners1 := corners(t)
corners2 := corners(t + 1)
for i := 0; i < 4; i++ {
res.AddQuad(corners1[(i+1)%4], corners1[i], corners2[i], corners2[(i+1)%4])
}
}
for _, t := range [2]int{0, stops} {
corners := corners(t)
if t == 0 {
res.AddQuad(corners[0], corners[1], corners[2], corners[3])
} else {
res.AddQuad(corners[1], corners[0], corners[3], corners[2])
}
}
return res
}
// PeelCentralCurve gets the curve of the peel's center,
// as a parameter of x.
func PeelCentralCurve() func(x float64) model3d.Coord3D {
planeCurve := PeelCurve()
zCurve := PeelHeight()
return func(x float64) model3d.Coord3D {
return model3d.XYZ(x, model2d.CurveEvalX(planeCurve, x), model2d.CurveEvalX(zCurve, x))
}
}
// PeelCurve creates a 2D projection of the overall peel
// on the plane.
func PeelCurve() model2d.Curve {
return model2d.JoinedCurve{
model2d.BezierCurve{
model2d.XY(-1.0, 0.0),
model2d.XY(-0.6, 0.6),
model2d.XY(0.0, 0.0),
},
model2d.BezierCurve{
model2d.XY(0.0, 0.0),
model2d.XY(0.6, -0.6),
model2d.XY(1.0, 0.0),
},
}
}
// PeelHeight creates the z-component of the peel on the
// plane, where x maps to z.
func PeelHeight() model2d.Curve {
return model2d.JoinedCurve{
model2d.BezierCurve{
model2d.XY(-1.0, -0.3),
model2d.XY(-0.2, -0.06),
},
model2d.BezierCurve{
model2d.XY(-0.2, -0.06),
model2d.XY(-0.1, -0.03),
// Provide a small lift above the line.
model2d.XY(0.0, 0.05),
model2d.XY(0.1, -0.03),
model2d.XY(0.2, -0.06),
},
model2d.BezierCurve{
model2d.XY(0.2, -0.06),
model2d.XY(1.0, -0.3),
},
}
}
// PeelTwist creates a function of theta with respect to x.
func PeelTwist() model2d.Curve {
return model2d.JoinedCurve{
model2d.BezierCurve{
model2d.XY(-1.0, 0.0),
model2d.XY(-0.2, 0.0),
},
model2d.BezierCurve{
model2d.XY(-0.2, 0.0),
model2d.XY(-0.05, 0.0),
model2d.XY(0.05, math.Pi),
model2d.XY(0.2, math.Pi),
},
model2d.BezierCurve{
model2d.XY(0.2, math.Pi),
model2d.XY(1.0, math.Pi),
},
}
} | examples/decoration/orange_slices/peel.go | 0.715225 | 0.46563 | peel.go | starcoder |
package fads
import (
"go-hep.org/x/hep/fmom"
"go-hep.org/x/hep/hepmc"
)
type Particle interface {
P4() fmom.P4
Charge() int32
}
type MissingEt struct {
MET float64 // missing transverse energy
Phi float64 // missing energy azimuthal angle
}
// scalar sum of transverse momenta
type ScalarHt float64
// rho energy density
type Rho float64
type mcParticle struct {
Pid int32 // pdg id number
Status int32 // particle status
IsPU byte // 0 or 1 for particles from pile-up interactions
M1 int // particle 1st mother
M2 int // particle 2nd mother
D1 int // particle 1st daughter
D2 int // particle 2nd daughter
McCharge int // particle charge
Mass float64 // particle mass
Mom fmom.PxPyPzE // particle momentum (px,py,pz,e)
Pt float64 // particle transverse momentum
Eta float64 // particle pseudo-rapidity
Phi float64 // particle azimuthal angle
Rapidity float64 // particle rapidity
Pos [4]float64 // particle vertex position (t,x,y,z)
}
func (mc *mcParticle) P4() fmom.P4 {
return &mc.Mom
}
func (mc *mcParticle) Charge() int32 {
return int32(mc.McCharge)
}
type Photon struct {
Mom fmom.PtEtaPhiM // photon momentum (mass=0.0)
EhoEem float64 // ratio of the hadronic over electromagnetic energy deposited in the calorimeter
McPart *hepmc.Particle // generated particle
}
func (pho *Photon) P4() fmom.P4 {
return &pho.Mom
}
func (pho *Photon) Charge() int32 {
return 0
}
type Electron struct {
Mom fmom.PtEtaPhiM // electron momentum (mass=0.0)
EleCharge int32 // electron charge
EhoEem float64 // ratio of the hadronic versus electromagnetic energy deposited in the calorimeter
McPart *hepmc.Particle // generated particle
}
func (ele *Electron) P4() fmom.P4 {
return &ele.Mom
}
func (ele *Electron) Charge() int32 {
return ele.EleCharge
}
type Muon struct {
Mom fmom.PtEtaPhiM // muon momentum (mass=0.0)
MuCharge int32 // muon charge
McPart *hepmc.Particle // generated particle
}
func (muon *Muon) P4() fmom.P4 {
return &muon.Mom
}
func (muon *Muon) Charge() int32 {
return muon.MuCharge
}
type Jet struct {
Mom fmom.PtEtaPhiM // jet momentum
JetCharge int32 // jet charge
DEta float64 // jet radius in pseudo-rapidity
DPhi float64 // jet radius in azimuthal angle
BTag byte // 0 or 1 for a jet that has been tagged as containing a heavy quark
TauTag byte // 0 or 1 for a jet that has been tagged as a tau
Constituents []Particle // pointers to constituents
McParts []*hepmc.Particle // pointers to generated particles
}
func (jet *Jet) P4() fmom.P4 {
return &jet.Mom
}
func (jet *Jet) Charge() int32 {
return jet.JetCharge
}
type Track struct {
Pid int32 // HEP ID number
Mom fmom.PtEtaPhiM // track momentum (mass=0.0)
TrkCharge int32 // track charge
Eta float64 // track pseudo-rapidity at the tracker edge
Phi float64 // track azimuthal angle at the tracker edge
X float64 // track vertex position
Y float64 // track vertex position
Z float64 // track vertex position
Xout float64 // track vertex position at the tracker edge
Yout float64 // track vertex position at the tracker edge
Zout float64 // track vertex position at the tracker edge
McPart *hepmc.Particle // pointer to generated particle
}
func (trk *Track) P4() fmom.P4 {
return &trk.Mom
}
func (trk *Track) Charge() int32 {
return trk.TrkCharge
}
type Tower struct {
Mom fmom.EtEtaPhiM // calorimeter tower momentum
Ene float64 // calorimeter tower energy
Eem float64 // calorimeter tower electromagnetic energy
Ehad float64 // calorimter tower hadronic energy
Edges [4]float64 // calorimeter tower edges
McParts []*hepmc.Particle // pointers to generated particles
}
func (tower *Tower) P4() fmom.P4 {
return &tower.Mom
}
type Candidate struct {
Pid int32 // HEP ID number
Status int32 // particle status
M1, M2, D1, D2 int32 // particle mothers and daughters
CandCharge int32 // particle charge
CandMass float64 // particle mass
IsPU byte // 0 or 1 for particles from pile-up interactions
IsConstituent byte // 0 or 1 for particles being constituents
BTag uint32 // b-tag information (bit-mask)
TauTag uint32 // tau-tag information (bit-mask)
Eem float64 // electromagnetic energy
Ehad float64 // hadronic energy
Edges [4]float64
DEta float64
DPhi float64
Mom fmom.PxPyPzE
Pos fmom.PxPyPzE
Area fmom.PxPyPzE
Candidates []Candidate
}
func (cand *Candidate) Clone() *Candidate {
c := *cand
c.Candidates = make([]Candidate, 0, len(cand.Candidates))
for i := range cand.Candidates {
cc := &cand.Candidates[i]
c.Add(cc)
}
return &c
}
func (cand *Candidate) P4() fmom.P4 {
return &cand.Mom
}
func (cand *Candidate) Charge() int32 {
return cand.CandCharge
}
func (cand *Candidate) Add(c *Candidate) {
cand.Candidates = append(cand.Candidates, *c)
}
func (cand *Candidate) Overlaps(o *Candidate) bool {
if cand == o {
return true
}
for i := range cand.Candidates {
cc := &cand.Candidates[i]
if cc.Overlaps(o) {
return true
}
}
for i := range o.Candidates {
cc := &o.Candidates[i]
if cc.Overlaps(cand) {
return true
}
}
return false
}
// Classifier classifies candidates into categories
type Classifier interface {
Category(cand *Candidate) int
} | fads/datamodel.go | 0.742982 | 0.536374 | datamodel.go | starcoder |
package nimgobus
// drawCursor draws the cursor
func (n *Nimbus) drawCursor() {
// Set up cursor
var charPixels [][]int
if n.deleteMode {
charPixels = n.deleteModeCursorImage
} else {
switch n.cursorCharset {
case 0:
charPixels = n.charImages0[n.cursorChar]
case 1:
charPixels = n.charImages1[n.cursorChar]
}
}
// Pick the textbox, get x, y coordinate of cursor and draw the char
box := n.textBoxes[n.selectedTextBox]
relCurPos := n.cursorPosition
var absCurPos colRow // we need the absolute cursor position
absCurPos.col = relCurPos.col + box.col1 - 1
absCurPos.row = relCurPos.row + box.row1 - 1
curX, curY := n.convertColRow(absCurPos)
// cleanup cursor if disables and/or skip to next iteration
n.muCursorFlash.Lock()
if n.cursorFlash {
n.writeSpriteToOverlay(Sprite{pixels: charPixels, x: curX, y: curY, colour: n.penColour, over: false})
}
n.muCursorFlash.Unlock()
}
// AdvanceCursor moves the cursor forward and handles line feeds and carriage returns
func (n *Nimbus) AdvanceCursor(forceCarriageReturn bool) {
// Pick the textbox
box := n.textBoxes[n.selectedTextBox]
width := box.col2 - box.col1 // width and height in chars
height := box.row2 - box.row1
// Get relative cursor position and move cursor forward
relCurPos := n.cursorPosition
relCurPos.col++
// Carriage return?
if relCurPos.col > width+1 || forceCarriageReturn {
// over the edge so carriage return
relCurPos.col = 1
relCurPos.row++
}
// Line feed?
if relCurPos.row > height+1 {
// move cursor up and scroll textbox
relCurPos.row--
// Scroll up:
// Define bounding rectangle for the textbox
x1, y1 := n.convertColRow(colRow{box.col1, box.row1})
x2, y2 := n.convertColRow(colRow{box.col2, box.row2})
y1 += 10
x2 += 8
// We have to manipulate videoMemory itself next, so force redraw and get the drawQueue lock
n.ForceRedraw()
n.muDrawQueue.Lock()
n.muVideoMemory.Lock()
// Copy the textbox segment of videoMemory
textBoxImg := make2dArray((x2-x1)+1, y1-y2)
for y := y2; y < y1; y++ {
textBoxImg[(len(textBoxImg)-1)-(y-y2)] = n.videoMemory[249-y][x1:x2]
}
// Empty paper on bottom row of textbox
paperImg := make2dArray((x2-x1)+9, 10)
for x := 0; x < (x2-x1)+9; x++ {
for y := 0; y < 10; y++ {
paperImg[y][x] = n.paperColour
}
}
n.muVideoMemory.Unlock()
n.muDrawQueue.Unlock()
n.drawSprite(Sprite{pixels: textBoxImg[10:], x: x1, y: y2 + 10, colour: -1, over: true})
n.drawSprite(Sprite{pixels: paperImg, x: x1, y: y2, colour: -1, over: true})
}
// Set new cursor position
n.cursorPosition = relCurPos
}
// SetWriting selects a textbox if only 1 parameter is passed (index), or
// defines a textbox if 5 parameters are passed (index, col1, row1, col2,
// row2)
func (n *Nimbus) SetWriting(p ...int) {
// Validate number of parameters
if len(p) != 1 && len(p) != 5 {
// invalid
panic("SetWriting accepts either 1 or 5 parameters")
}
if len(p) == 1 {
// Select textbox - validate choice first then set it
if p[0] < 0 || p[0] > 10 {
panic("SetWriting index out of range")
}
oldTextBox := n.selectedTextBox
n.selectedTextBox = p[0]
// Set cursor position to 1,1 if different textbox selected
if oldTextBox != n.selectedTextBox {
n.cursorPosition = colRow{1, 1}
}
return
}
// Otherwise define textbox if index is not 0
if p[0] == 0 {
panic("SetWriting cannot define index zero")
}
// Validate column and row values
for i := 1; i < 5; i++ {
if p[i] < 0 {
panic("Negative row or column values are not allowed")
}
}
if p[2] > 25 || p[4] > 25 {
panic("Row values above 25 are not allowed")
}
maxColumns := n.mode
if p[1] > maxColumns || p[3] > maxColumns {
panic("Column value out of range for this screen mode")
}
// Validate passed - set bottomLeft and topRight colrows
var upper, lower, left, right int
if p[1] < p[3] {
left = p[1]
right = p[3]
} else {
left = p[3]
right = p[1]
}
if p[2] < p[4] {
upper = p[2]
lower = p[4]
} else {
upper = p[4]
lower = p[2]
}
// Set textbox
n.textBoxes[p[0]] = textBox{left, upper, right, lower}
return
} | pkg/nimgobus/cursor.go | 0.587115 | 0.432842 | cursor.go | starcoder |
package strconvh
import (
"github.com/apaxa-go/helper/mathh"
"strconv"
)
// ParseUint64 interprets a string s in 10-base and returns the corresponding value i (uint64) and error.
func ParseUint64(stringValue string) (uint64, error) {
return strconv.ParseUint(stringValue, defaultIntegerBase, mathh.Uint64Bits)
}
// ParseInt interprets a string s in 10-base and returns the corresponding value i (int) and error.
func ParseInt(stringValue string) (i int, err error) {
value64, err := strconv.ParseInt(stringValue, defaultIntegerBase, mathh.IntBits)
if err == nil {
i = int(value64)
}
return
}
// ParseInt8 interprets a string s in 10-base and returns the corresponding value i (int8) and error.
func ParseInt8(stringValue string) (i int8, err error) {
value64, err := strconv.ParseInt(stringValue, defaultIntegerBase, mathh.Int8Bits)
if err == nil {
i = int8(value64)
}
return
}
// ParseInt16 interprets a string s in 10-base and returns the corresponding value i (int16) and error.
func ParseInt16(stringValue string) (i int16, err error) {
value64, err := strconv.ParseInt(stringValue, defaultIntegerBase, mathh.Int16Bits)
if err == nil {
i = int16(value64)
}
return
}
// ParseUint interprets a string s in 10-base and returns the corresponding value i (uint) and error.
func ParseUint(stringValue string) (i uint, err error) {
value64, err := strconv.ParseUint(stringValue, defaultIntegerBase, mathh.UintBits)
if err == nil {
i = uint(value64)
}
return
}
// ParseUint8 interprets a string s in 10-base and returns the corresponding value i (uint8) and error.
func ParseUint8(stringValue string) (i uint8, err error) {
value64, err := strconv.ParseUint(stringValue, defaultIntegerBase, mathh.Uint8Bits)
if err == nil {
i = uint8(value64)
}
return
}
// ParseUint16 interprets a string s in 10-base and returns the corresponding value i (uint16) and error.
func ParseUint16(stringValue string) (i uint16, err error) {
value64, err := strconv.ParseUint(stringValue, defaultIntegerBase, mathh.Uint16Bits)
if err == nil {
i = uint16(value64)
}
return
}
// ParseUint32 interprets a string s in 10-base and returns the corresponding value i (uint32) and error.
func ParseUint32(stringValue string) (i uint32, err error) {
value64, err := strconv.ParseUint(stringValue, defaultIntegerBase, mathh.Uint32Bits)
if err == nil {
i = uint32(value64)
}
return
} | back/vendor/github.com/apaxa-go/helper/strconvh/parseint-gen.go | 0.740644 | 0.426979 | parseint-gen.go | starcoder |
package com
import (
"fmt"
"strconv"
"strings"
"time"
)
// Format unix time int64 to string
func Date(ti int64, format string) string {
t := time.Unix(int64(ti), 0)
return DateT(t, format)
}
// Format unix time string to string
func DateS(ts string, format string) string {
i, _ := strconv.ParseInt(ts, 10, 64)
return Date(i, format)
}
// Format time.Time struct to string
// MM - month - 01
// M - month - 1, single bit
// DD - day - 02
// D - day 2
// YYYY - year - 2006
// YY - year - 06
// HH - 24 hours - 03
// H - 24 hours - 3
// hh - 12 hours - 03
// h - 12 hours - 3
// mm - minute - 04
// m - minute - 4
// ss - second - 05
// s - second = 5
func DateT(t time.Time, format string) string {
res := strings.Replace(format, "MM", t.Format("01"), -1)
res = strings.Replace(res, "M", t.Format("1"), -1)
res = strings.Replace(res, "DD", t.Format("02"), -1)
res = strings.Replace(res, "D", t.Format("2"), -1)
res = strings.Replace(res, "YYYY", t.Format("2006"), -1)
res = strings.Replace(res, "YY", t.Format("06"), -1)
res = strings.Replace(res, "HH", fmt.Sprintf("%02d", t.Hour()), -1)
res = strings.Replace(res, "H", fmt.Sprintf("%d", t.Hour()), -1)
res = strings.Replace(res, "hh", t.Format("03"), -1)
res = strings.Replace(res, "h", t.Format("3"), -1)
res = strings.Replace(res, "mm", t.Format("04"), -1)
res = strings.Replace(res, "m", t.Format("4"), -1)
res = strings.Replace(res, "ss", t.Format("05"), -1)
res = strings.Replace(res, "s", t.Format("5"), -1)
return res
}
// DateFormat pattern rules.
var datePatterns = []string{
// year
"Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003
"y", "06", //A two digit representation of a year Examples: 99 or 03
// month
"m", "01", // Numeric representation of a month, with leading zeros 01 through 12
"n", "1", // Numeric representation of a month, without leading zeros 1 through 12
"M", "Jan", // A short textual representation of a month, three letters Jan through Dec
"F", "January", // A full textual representation of a month, such as January or March January through December
// day
"d", "02", // Day of the month, 2 digits with leading zeros 01 to 31
"j", "2", // Day of the month without leading zeros 1 to 31
// week
"D", "Mon", // A textual representation of a day, three letters Mon through Sun
"l", "Monday", // A full textual representation of the day of the week Sunday through Saturday
// time
"g", "3", // 12-hour format of an hour without leading zeros 1 through 12
"G", "15", // 24-hour format of an hour without leading zeros 0 through 23
"h", "03", // 12-hour format of an hour with leading zeros 01 through 12
"H", "15", // 24-hour format of an hour with leading zeros 00 through 23
"a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm
"A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM
"i", "04", // Minutes with leading zeros 00 to 59
"s", "05", // Seconds, with leading zeros 00 through 59
// time zone
"T", "MST",
"P", "-07:00",
"O", "-0700",
// RFC 2822
"r", time.RFC1123Z,
}
var DateFormatReplacer = strings.NewReplacer(datePatterns...)
// Parse Date use PHP time format.
func DateParse(dateString, format string) (time.Time, error) {
return time.ParseInLocation(ConvDateFormat(format), dateString, time.Local)
}
// Convert PHP time format.
func ConvDateFormat(format string) string {
format = DateFormatReplacer.Replace(format)
return format
}
//将时间戳格式化为日期字符窜
func DateFormat(format string, timestamp interface{}) (t string) { // timestamp
switch format {
case "Y-m-d H:i:s", "":
format = "2006-01-02 15:04:05"
case "Y-m-d H:i":
format = "2006-01-02 15:04"
case "y-m-d H:i":
format = "06-01-02 15:04"
case "m-d H:i":
format = "01-02 15:04"
case "Y-m-d":
format = "2006-01-02"
case "y-m-d":
format = "06-01-02"
case "m-d":
format = "01-02"
default:
format = ConvDateFormat(format)
}
sd := Int64(timestamp)
t = time.Unix(sd, 0).Format(format)
return
}
//日期字符窜转为时间戳数字
func StrToTime(str string, args ...string) (unixtime int) {
layout := "2006-01-02 15:04:05"
if len(args) > 0 {
layout = args[0]
}
t, err := time.Parse(layout, str)
if err == nil {
unixtime = int(t.Unix())
} else {
fmt.Println(err, str)
}
return
}
//格式化字节。 FormatByte(字节整数,保留小数位数)
func FormatByte(args ...interface{}) string {
sizes := [...]string{"YB", "ZB", "EB", "PB", "TB", "GB", "MB", "KB", "B"}
var (
total int = len(sizes)
size float64 = 0
precision int = 0
)
ln := len(args)
if ln > 0 {
switch args[0].(type) {
case float64:
size = args[0].(float64)
case float32:
size = float64(args[0].(float32))
case int64:
size = float64(args[0].(int64))
case int32:
size = float64(args[0].(int32))
case int:
size = float64(args[0].(int))
case uint64:
size = float64(args[0].(uint64))
case uint32:
size = float64(args[0].(uint32))
case uint:
size = float64(args[0].(uint))
case string:
i, _ := strconv.Atoi(args[0].(string))
size = float64(i)
default:
fmt.Printf("FormatByte error: first param (%#v) invalid.\n", args[0])
}
}
if ln > 1 {
switch args[1].(type) {
case int:
precision = args[1].(int)
case int64:
precision = int(args[1].(int64))
case int32:
precision = int(args[1].(int32))
case uint:
precision = int(args[1].(uint))
case uint64:
precision = int(args[1].(uint64))
case uint32:
precision = int(args[1].(uint32))
default:
fmt.Printf("FormatByte error: second param (%#v) invalid.\n", args[1])
}
}
for total--; total > 0 && size > 1024.0; total-- {
size /= 1024.0
}
return fmt.Sprintf("%.*f%s", precision, size, sizes[total])
}
//格式化耗时
func DateFormatShort(timestamp interface{}) string {
now := time.Now()
year := now.Year()
month := now.Month()
day := now.Day()
cTime := StrToTime(fmt.Sprintf(`%d-%.2d-%.2d 00:00:00`, year, month, day)) //月、日始终保持两位
timestamp2 := Int(timestamp)
if cTime < timestamp2 {
return DateFormat("15:04", timestamp)
}
cTime = StrToTime(fmt.Sprintf(`%d-01-01 00:00:00`, year))
if cTime < timestamp2 {
return DateFormat("01-02", timestamp)
}
return DateFormat("06-01-02", timestamp)
}
//格式化耗时
func FormatPastTime(timestamp interface{}, args ...string) string {
duration := time.Now().Sub(time.Unix(Int64(timestamp), 0))
if u := uint64(duration); u >= uint64(time.Hour)*24 {
format := "Y-m-d H:i:s"
if len(args) > 0 {
format = args[0]
}
return DateFormat(format, timestamp)
}
return FriendlyTime(duration)
}
//对人类友好的经历时间格式
func FriendlyTime(d time.Duration, args ...string) (r string) {
format := `Y-m-d H:i:s`
shortt := ``
switch len(args) {
case 2:
shortt = args[1]
fallthrough
case 1:
if args[0] != `` {
format = args[0]
}
}
u := uint64(d)
if u < uint64(time.Second) {
switch {
case u == 0:
r = `0s`
case u < uint64(time.Microsecond):
r = fmt.Sprintf("%.2f%s", float64(u), `ns`) //纳秒
case u < uint64(time.Millisecond):
r = fmt.Sprintf("%.2f%s", float64(u)/1000, `us`) //微秒
default:
r = fmt.Sprintf("%.2f%s", float64(u)/1000/1000, `ms`) //毫秒
}
r += shortt
} else {
switch {
case u < uint64(time.Minute):
r = fmt.Sprintf("%.2f%s", float64(u)/1000/1000/1000, `s`) + shortt //秒
case u < uint64(time.Hour):
r = fmt.Sprintf("%.2f%s", float64(u)/1000/1000/1000/60, `m`) + shortt //分钟
case u < uint64(time.Hour)*24:
r = fmt.Sprintf("%.2f%s", float64(u)/1000/1000/1000/60/60, `h`) + shortt //小时
default:
r = DateFormat(format, u)
}
}
return
}
var StartTime time.Time = time.Now()
//总运行时长
func TotalRunTime() string {
return FriendlyTime(time.Now().Sub(StartTime))
} | vendor/github.com/webx-top/com/time.go | 0.591487 | 0.400867 | time.go | starcoder |
package day24
import "log"
type BlackHexes map[Hex]bool
// Stores a hex coordinate where x + y + z = 0
type Hex struct {
x, y, z int
}
// Returns a map of all the black hexes after initialising from the instructions.
func GetBlackHexes(instructions []string) BlackHexes {
blackHexes := make(BlackHexes)
for _, instruction := range instructions {
hex := Hex{0, 0, 0}
instructionRunes := []rune(instruction)
nextRune := 0
for nextRune < len(instructionRunes) {
firstRune := instructionRunes[nextRune]
switch firstRune {
case 'e', 'w':
hex = moveHex(hex, string([]rune{firstRune}))
case 'n', 's':
nextRune++
hex = moveHex(hex, string([]rune{firstRune, instructionRunes[nextRune]}))
default:
log.Fatalf("Unrecognised instruction: %v", firstRune)
}
nextRune++
}
if blackHexes[hex] {
delete(blackHexes, hex)
} else {
blackHexes[hex] = true
}
}
return blackHexes
}
// Returns the number of black hexes after iterating the floor for a number of days.
func BlackHexesAfterDays(blackHexes BlackHexes, numDays int) int {
for i := 1; i <= numDays; i++ {
newBlackHexes := make(BlackHexes)
activeNeighbours := make(map[Hex]int)
for hex := range blackHexes {
for _, adjacent := range adjacentHexes(hex) {
activeNeighbours[adjacent]++
}
}
for hex, blackNeighbours := range activeNeighbours {
if (blackHexes[hex] && blackNeighbours <= 2) || (!blackHexes[hex] && blackNeighbours == 2) {
newBlackHexes[hex] = true
}
}
blackHexes = newBlackHexes
}
return len(blackHexes)
}
func moveHex(hex Hex, direction string) Hex {
x, y, z := hex.x, hex.y, hex.z
switch direction {
case "e":
x++
y--
case "w":
x--
y++
case "ne":
x++
z--
case "sw":
x--
z++
case "nw":
y++
z--
case "se":
y--
z++
default:
log.Fatalf("Unrecognised direction: %v", direction)
}
return Hex{x, y, z}
}
func adjacentHexes(hex Hex) []Hex {
adjacent := make([]Hex, 0)
for _, direction := range []string{"e", "w", "ne", "sw", "nw", "se"} {
adjacent = append(adjacent, moveHex(hex, direction))
}
return adjacent
} | day24/day24.go | 0.789883 | 0.488588 | day24.go | starcoder |
package main
import "fmt"
func first() {
// first make a mask to reset the bits from position i to j
var i uint = 2
var j uint = 6
var m uint = 19 // 10011
m = m << i
var n uint = 1044 // 10000010100
var ib uint = ^uint(0) >> (64-i)
var jb uint = ^uint(0) << j
var k = jb ^ ib
var r uint = n & k
r = r ^ m
fmt.Printf("%b\n", r)
}
func third() {
var n uint = 1775
fmt.Println(flipBits(n))
/*need to test flip any zero bit
need to find zero bits*/
}
func countMostBitsOneInSeries(n uint) (x uint) {
var c, i uint = 0, 0
for ; i <= 64; i++ {
if 1 == ((n >> i) & uint(1)) {
c++
} else {
c = 0
}
if c > x {
x = c
}
}
return
}
func flipBits(n uint) (x uint) {
var i uint = 0
for ; i <= 64; i++ {
var r uint = n >> i & uint(1)
if 0 == (r | uint(0)) {
z := countMostBitsOneInSeries((1 << i) | n)
if z > x {
x = z
}
}
}
return
}
func fourth() {
var n uint = 7
x := countBits(n)
var i uint = 1
var y, z uint
for ; i <= 2000000; i++ {
if y == 0 && countBits( (n-i) ) == x {
y = n-i
}
if z == 0 && countBits( (n+i) ) == x {
z = n+i
}
if y != 0 && z != 0 {
break
}
}
fmt.Println(n, y, z)
fmt.Printf("%b %b %b\n", n, y, z)
}
func countBits(n uint) (r uint) {
var i uint = 0
for ; i <= 64; i++ {
if 1 == (n >> i) & uint(1) {
r++
}
}
return
}
func sixth() {
var m, n uint = 29, 15
var x = m ^ n
fmt.Println(countBits(x))
}
func seventh() {
var n uint = 555
fmt.Printf("%b\n", n)
n = swap(n)
fmt.Printf("%b\n", n)
}
func swap(n uint) (x uint) {
b := n >> 64
x = n << 1
x += b
return
}
func main() {
first()
third()
fourth()
sixth()
seventh()
} | bits.go | 0.546012 | 0.404155 | bits.go | starcoder |
package primitives
import (
"fmt"
"math"
)
var (
AxisX = V(1, 0, 0)
AxisY = V(0, 1, 0)
AxisZ = V(0, 0, 1)
)
type Vector struct {
X, Y, Z float64
}
func ToVector(p1, p2 Point) Vector {
return V(p2.X-p1.X, p2.Y-p1.Y, p2.Z-p1.Z)
}
func (a Vector) String() string {
return fmt.Sprintf("X%s Y%s Z%s", StrF(a.X), StrF(a.Y), StrF(a.Z))
}
func V(x, y, z float64) Vector {
return Vector{X: x, Y: y, Z: z}
}
func (a Vector) Cross(b Vector) Vector {
x := a.Y*b.Z - a.Z*b.Y
y := a.Z*b.X - a.X*b.Z
z := a.X*b.Y - a.Y*b.X
return V(x, y, z)
}
func (a Vector) Add(b Vector) Vector {
return V(a.X+b.X, a.Y+b.Y, a.Z+b.Z)
}
func (a Vector) Sub(b Vector) Vector {
return V(a.X-b.X, a.Y-b.Y, a.Z-b.Z)
}
func (a Vector) Dot(b Vector) float64 {
return a.X*b.X + a.Y*b.Y + a.Z*b.Z
}
func (a Vector) MulScalar(b float64) Vector {
return Vector{a.X * b, a.Y * b, a.Z * b}
}
func (a Vector) Rotate(rm RotationMatrix) Vector {
return V(a.Dot(rm.X), a.Dot(rm.Y), a.Dot(rm.Z))
}
func (a Vector) ToPoint() Point {
return Point{X: a.X, Y: a.Y, Z: a.Z}
}
func (a Vector) CodirectedWith(b Vector) bool {
return a.Dot(b) >= 0
}
func (a Vector) Length() float64 {
return math.Sqrt(a.LengthSquare())
}
func (a Vector) LengthSquare() float64 {
return a.X*a.X + a.Y*a.Y + a.Z*a.Z
}
func (a Vector) Normalize() Vector {
n := a.Length()
if n == 0 {
return a
}
return V(a.X/n, a.Y/n, a.Z/n)
}
func (a Vector) Reverse() Vector {
return V(-a.X, -a.Y, -a.Z)
}
func (a Vector) Angle(b Vector) float64 {
v := a.Dot(b) / a.Length() / b.Length()
if v < -1 || v > 1 {
return 0
}
return math.Acos(v) * 180 / math.Pi
}
func (a Vector) ProjectOn(b Vector) Vector {
bl := b.LengthSquare() // because result = b.MulScalar(projectLen/|b|), where projectLen = b.Dot(a)/|b|
if bl == 0 {
return b
}
return b.MulScalar(b.Dot(a) / bl)
}
func (a Vector) ProjectOnPlane(p Plane) Vector {
return a.Add(a.ProjectOn(p.N).Reverse()) // substracting orthogonal to the plane component from a
}
//Rotate vector a about vector b by angle degrees.
func (a Vector) RotateAbout(b Vector, angle float64) Vector {
// Thanks user MNKY at http://math.stackexchange.com/a/1432182/81266
theta := ToRadians(angle)
prj := a.ProjectOn(b)
perp := a.Sub(prj)
w := b.Cross(perp)
return prj.Add(perp.MulScalar(math.Cos(theta))).Add(w.Normalize().MulScalar(perp.Length()).MulScalar(math.Sin(theta)))
} | primitives/vector.go | 0.856377 | 0.697873 | vector.go | starcoder |
package core
// High level, ORM-like bindings. Like Domain, the models here mirror crust objects. Unlike
// the crust, Models do not store their mirror's data-- this interface is much more functional.
// All functions return the model's "contents"
// Note that this implementation assumes that there's a global, single connection. This might be ok.
// since this is such a difference you'll still need to pass the connection to the model
type model struct {
storage Domain // the domain of the storage appliance responsible for our data
}
type Model interface {
All(string, map[string]interface{}) ([]interface{}, error)
Find(string, map[string]interface{}) ([]interface{}, error)
Create(string, map[string]interface{}) ([]interface{}, error)
Save(string, map[string]interface{}) ([]interface{}, error)
Count(string, map[string]interface{}) ([]interface{}, error)
}
// Set a session and return a new model interface. The session must already be joined
func SetSession(appDomain Domain) Model {
// Note the hardcoded storage domain endpoint. Temporary!
s := "Storage"
return &model{storage: appDomain.Subdomain(s)}
}
// Executes the query against the collection
func (m *model) query(endpoint string, collection string, query map[string]interface{}) ([]interface{}, error) {
r, e := m.storage.Call(endpoint, []interface{}{collection, query}, nil)
Debug("Model operation: %s, Name: %s, Query: %s: Result: %s Error: %v", endpoint, collection, query, r, e)
return r, e
}
// Model query functions. Each mantle should copy this interface, crusts should emulate it
// Arguments: the name of the model, contents of the query based on the call
// All functions share signatures for easier mantle access
func (m *model) Find(collection string, query map[string]interface{}) ([]interface{}, error) {
return m.query("collection/find", collection, query)
}
func (m *model) All(collection string, query map[string]interface{}) ([]interface{}, error) {
return m.Find(collection, nil)
}
func (m *model) Create(collection string, query map[string]interface{}) ([]interface{}, error) {
return m.query("collection/insert_one", collection, query)
}
func (m *model) Save(collection string, query map[string]interface{}) ([]interface{}, error) {
// Check the count of the incoming models and call update_many?
return m.query("collection/update_one", collection, query)
}
func (m *model) Count(collection string, query map[string]interface{}) ([]interface{}, error) {
return m.query("collection/count", collection, nil)
} | model.go | 0.85738 | 0.438725 | model.go | starcoder |
package engine
import (
"fmt"
"math"
)
const (
// BlockSize is the size of a block when splitting the image plane.
BlockSize = 128
// Overlap is the size of the pixels that the image overlaps.
Overlap = 14
)
// ImagePlane represents an image in which each pixel has a continuous value.
type ImagePlane struct {
Width int
Height int
Buffer []float32
}
// NewImagePlaneWidthHeight returns an image plane of specific width and height.
func NewImagePlaneWidthHeight(width, height int) ImagePlane {
return ImagePlane{
Width: width,
Height: height,
Buffer: make([]float32, width*height),
}
}
// NewNormalizedImagePlane create a normalized image plane from a channel image.
func NewNormalizedImagePlane(img ChannelImage) (ImagePlane, error) {
p := NewImagePlaneWidthHeight(img.Width, img.Height)
if len(img.Buffer) != len(p.Buffer) {
return ImagePlane{}, fmt.Errorf("invalid image channel: width*heignt=%d <> len(buffer)=%d", img.Width*img.Height, img.Buffer)
}
for i := range img.Buffer {
p.Buffer[i] = float32(img.Buffer[i]) / 255.0
}
return p, nil
}
// Index returns the buffer position corresponding to the specified width and height of the image.
func (p ImagePlane) Index(width, height int) int {
return width + height*p.Width
}
// Value returns the value corresponding to the specified width and height of the image.
func (p ImagePlane) Value(width, height int) float32 {
i := p.Index(width, height)
if i < 0 || i >= len(p.Buffer) {
panic(fmt.Errorf("width %d, height %d, Index %d, len(buf) %d", width, height, i, len(p.Buffer)))
}
// fmt.Printf("width %d, height %d, Index %d, len(buf) %d\n", width, height, i, len(p.Buffer))
return p.Buffer[i]
}
// SegmentAt returns the 3x3 pixels at the specified position.
// [a0][a1][a2]
// [b0][b1][b2]
// [c0][c1][c2] where (x, y) is b1.
func (p ImagePlane) SegmentAt(x, y int) (a0, a1, a2, b0, b1, b2, c0, c1, c2 float32) {
i := (x - 1) + (y-1)*p.Width
j := i + p.Width
k := j + p.Width
a := p.Buffer[i : i+3 : i+3]
b := p.Buffer[j : j+3 : j+3]
c := p.Buffer[k : k+3 : k+3]
return a[0], a[1], a[2], b[0], b[1], b[2], c[0], c[1], c[2]
}
// SetAt sets the value to the buffer corresponding to the specified width and height of the image.
func (p *ImagePlane) SetAt(width, height int, v float32) {
p.Buffer[p.Index(width, height)] = v
}
// Blocking divides a given image into blocks.
func Blocking(initialPlanes [3]ImagePlane) ([][]ImagePlane, int, int) {
widthInput := initialPlanes[0].Width
heightInput := initialPlanes[0].Height
// blocks overlap 14px each other.
blocksW := int(math.Ceil(float64(widthInput-Overlap) / float64(BlockSize-Overlap)))
blocksH := int(math.Ceil(float64(heightInput-Overlap) / float64(BlockSize-Overlap)))
blocks := blocksW * blocksH
// fmt.Println("BlockSize:", BlockSize)
// fmt.Printf("blocksW:%d, blocksH:%d, blocks:%d\n", blocksW, blocksH, blocks)
inputBlocks := make([][]ImagePlane, blocks) // [ [ block0_R, block0_G, block0_B ], [ block1_R, ...] ... ]
for b := 0; b < blocks; b++ {
blockIndexW := b % blocksW
blockIndexH := b / blocksW
// fmt.Printf("blockIndexW:%d, blockIndexH:%d\n", blockIndexW, blockIndexH)
blockWidth := BlockSize
blockHeight := BlockSize
if blockIndexW == blocksW-1 {
blockWidth = widthInput - ((BlockSize - Overlap) * blockIndexW) // right end block
}
if blockIndexH == blocksH-1 {
blockHeight = heightInput - ((BlockSize - Overlap) * blockIndexH) // bottom end block
}
// fmt.Printf("\t>>blockWidth:%d, blockHeight:%d\n", blockWidth, blockHeight)
channels := make([]ImagePlane, len(initialPlanes))
for i := range channels {
channels[i] = NewImagePlaneWidthHeight(blockWidth, blockHeight)
}
for w := 0; w < blockWidth; w++ {
for h := 0; h < blockHeight; h++ {
for i := 0; i < len(initialPlanes); i++ {
targetIndexW := blockIndexW*(BlockSize-Overlap) + w
targetIndexH := blockIndexH*(BlockSize-Overlap) + h
channel := initialPlanes[i]
v := channel.Value(targetIndexW, targetIndexH)
channels[i].SetAt(w, h, v)
}
}
}
inputBlocks[b] = channels
}
return inputBlocks, blocksW, blocksH
}
// Deblocking combines blocks for each of the R, G, and B channels.
func Deblocking(outputBlocks [][]ImagePlane, blocksW, blocksH int) [3]ImagePlane {
blockSize := outputBlocks[0][0].Width
var width int
for b := 0; b < blocksW; b++ {
width += outputBlocks[b][0].Width
}
var height int
for b := 0; b < blocksW*blocksH; b += blocksW {
height += outputBlocks[b][0].Height
}
var outputPlanes [3]ImagePlane // R,G,B
for b := range outputBlocks {
block := outputBlocks[b]
blockIndexW := b % blocksW
blockIndexH := int(math.Floor(float64(b) / float64(blocksW)))
for i := 0; i < len(block); i++ {
if len(outputPlanes[i].Buffer) == 0 {
outputPlanes[i] = NewImagePlaneWidthHeight(width, height)
}
channelBlock := block[i]
for w := 0; w < channelBlock.Width; w++ {
for h := 0; h < channelBlock.Height; h++ {
targetIndexW := blockIndexW*blockSize + w
targetIndexH := blockIndexH*blockSize + h
targetIndex := targetIndexH*width + targetIndexW
v := channelBlock.Value(w, h)
outputPlanes[i].Buffer[targetIndex] = v
}
}
}
}
return outputPlanes
} | engine/image_plane.go | 0.757974 | 0.592048 | image_plane.go | starcoder |
package prometheus
import (
"sync"
"time"
"github.com/prometheus/common/model"
kialiConfig "github.com/kiali/kiali/config"
"github.com/kiali/kiali/log"
)
type (
timeInResult struct {
queryTime time.Time
inResult model.Vector
}
timeInOutResult struct {
queryTime time.Time
inResult model.Vector
outResult model.Vector
}
PromCache interface {
GetAllRequestRates(namespace string, ratesInterval string, queryTime time.Time) (bool, model.Vector)
GetAppRequestRates(namespace, app, ratesInterval string, queryTime time.Time) (bool, model.Vector, model.Vector)
GetNamespaceServicesRequestRates(namespace string, ratesInterval string, queryTime time.Time) (bool, model.Vector)
GetServiceRequestRates(namespace, service, ratesInterval string, queryTime time.Time) (bool, model.Vector)
GetWorkloadRequestRates(namespace, workload, ratesInterval string, queryTime time.Time) (bool, model.Vector, model.Vector)
SetAllRequestRates(namespace string, ratesInterval string, queryTime time.Time, inResult model.Vector)
SetAppRequestRates(namespace, app, ratesInterval string, queryTime time.Time, inResult model.Vector, outResult model.Vector)
SetNamespaceServicesRequestRates(namespace string, ratesInterval string, queryTime time.Time, inResult model.Vector)
SetServiceRequestRates(namespace, service, ratesInterval string, queryTime time.Time, inResult model.Vector)
SetWorkloadRequestRates(namespace, workload, ratesInterval string, queryTime time.Time, inResult model.Vector, outResult model.Vector)
}
promCacheImpl struct {
cacheDuration time.Duration
cacheExpiration time.Duration
cacheAllRequestRates map[string]map[string]timeInResult
cacheAppRequestRates map[string]map[string]map[string]timeInOutResult
cacheNsSvcRequestRates map[string]map[string]timeInResult
cacheSvcRequestRates map[string]map[string]map[string]timeInResult
cacheWkRequestRates map[string]map[string]map[string]timeInOutResult
allRequestRatesLock sync.RWMutex
appRequestRatesLock sync.RWMutex
nsSvcRequestRatesLock sync.RWMutex
svcRequestRatesLock sync.RWMutex
wkRequestRatesLock sync.RWMutex
}
)
func NewPromCache() PromCache {
kConfig := kialiConfig.Get()
cacheDuration := time.Duration(kConfig.ExternalServices.Prometheus.CacheDuration) * time.Second
cacheExpiration := time.Duration(kConfig.ExternalServices.Prometheus.CacheExpiration) * time.Second
promCacheImpl := promCacheImpl{
cacheDuration: cacheDuration,
cacheExpiration: cacheExpiration,
cacheAllRequestRates: make(map[string]map[string]timeInResult),
cacheAppRequestRates: make(map[string]map[string]map[string]timeInOutResult),
cacheNsSvcRequestRates: make(map[string]map[string]timeInResult),
cacheSvcRequestRates: make(map[string]map[string]map[string]timeInResult),
cacheWkRequestRates: make(map[string]map[string]map[string]timeInOutResult),
}
go promCacheImpl.watchExpiration()
return &promCacheImpl
}
func (c *promCacheImpl) GetAllRequestRates(namespace string, ratesInterval string, queryTime time.Time) (bool, model.Vector) {
defer c.allRequestRatesLock.RUnlock()
c.allRequestRatesLock.RLock()
if nsRates, okNs := c.cacheAllRequestRates[namespace]; okNs {
if rtInterval, okRt := nsRates[ratesInterval]; okRt {
if !queryTime.Before(rtInterval.queryTime) && queryTime.Sub(rtInterval.queryTime) < c.cacheDuration {
log.Tracef("[Prom Cache] GetAllRequestRates [namespace: %s] [ratesInterval: %s] [queryTime: %s]", namespace, ratesInterval, queryTime.String())
return true, rtInterval.inResult
}
}
}
return false, nil
}
func (c *promCacheImpl) SetAllRequestRates(namespace string, ratesInterval string, queryTime time.Time, inResult model.Vector) {
defer c.allRequestRatesLock.Unlock()
c.allRequestRatesLock.Lock()
if _, okNs := c.cacheAllRequestRates[namespace]; !okNs {
c.cacheAllRequestRates[namespace] = make(map[string]timeInResult)
}
c.cacheAllRequestRates[namespace][ratesInterval] = timeInResult{
queryTime: queryTime,
inResult: inResult,
}
log.Tracef("[Prom Cache] SetAllRequestRates [namespace: %s] [ratesInterval: %s] [queryTime: %s]", namespace, ratesInterval, queryTime.String())
}
func (c *promCacheImpl) GetAppRequestRates(namespace string, app string, ratesInterval string, queryTime time.Time) (bool, model.Vector, model.Vector) {
defer c.appRequestRatesLock.RUnlock()
c.appRequestRatesLock.RLock()
if nsRates, okNs := c.cacheAppRequestRates[namespace]; okNs {
if appInterval, okApp := nsRates[app]; okApp {
if rtInterval, okRt := appInterval[ratesInterval]; okRt {
if !queryTime.Before(rtInterval.queryTime) && queryTime.Sub(rtInterval.queryTime) < c.cacheDuration {
log.Tracef("[Prom Cache] GetAppRequestRates [namespace: %s] [app: %s] [ratesInterval: %s] [queryTime: %s]", namespace, app, ratesInterval, queryTime.String())
return true, rtInterval.inResult, rtInterval.outResult
}
}
}
}
return false, nil, nil
}
func (c *promCacheImpl) SetAppRequestRates(namespace string, app string, ratesInterval string, queryTime time.Time, inResult model.Vector, outResult model.Vector) {
defer c.appRequestRatesLock.Unlock()
c.appRequestRatesLock.Lock()
if _, okNs := c.cacheAppRequestRates[namespace]; !okNs {
c.cacheAppRequestRates[namespace] = make(map[string]map[string]timeInOutResult)
}
if _, okApp := c.cacheAppRequestRates[namespace][app]; !okApp {
c.cacheAppRequestRates[namespace][app] = make(map[string]timeInOutResult)
}
c.cacheAppRequestRates[namespace][app][ratesInterval] = timeInOutResult{
queryTime: queryTime,
inResult: inResult,
outResult: outResult,
}
log.Tracef("[Prom Cache] SetAppRequestRates [namespace: %s] [app: %s] [ratesInterval: %s] [queryTime: %s]", namespace, app, ratesInterval, queryTime.String())
}
func (c *promCacheImpl) GetNamespaceServicesRequestRates(namespace string, ratesInterval string, queryTime time.Time) (bool, model.Vector) {
defer c.nsSvcRequestRatesLock.RUnlock()
c.nsSvcRequestRatesLock.RLock()
if nsRates, okNs := c.cacheNsSvcRequestRates[namespace]; okNs {
if rtInterval, okRt := nsRates[ratesInterval]; okRt {
if !queryTime.Before(rtInterval.queryTime) && queryTime.Sub(rtInterval.queryTime) < c.cacheDuration {
log.Tracef("[Prom Cache] GetNamespaceServicesRequestRates [namespace: %s] [ratesInterval: %s] [queryTime: %s]", namespace, ratesInterval, queryTime.String())
return true, rtInterval.inResult
}
}
}
return false, nil
}
func (c *promCacheImpl) SetNamespaceServicesRequestRates(namespace string, ratesInterval string, queryTime time.Time, inResult model.Vector) {
defer c.nsSvcRequestRatesLock.Unlock()
c.nsSvcRequestRatesLock.Lock()
if _, okNs := c.cacheNsSvcRequestRates[namespace]; !okNs {
c.cacheNsSvcRequestRates[namespace] = make(map[string]timeInResult)
}
c.cacheNsSvcRequestRates[namespace][ratesInterval] = timeInResult{
queryTime: queryTime,
inResult: inResult,
}
log.Tracef("[Prom Cache] SetNamespaceServicesRequestRates [namespace: %s] [ratesInterval: %s] [queryTime: %s]", namespace, ratesInterval, queryTime.String())
}
func (c *promCacheImpl) GetServiceRequestRates(namespace string, service string, ratesInterval string, queryTime time.Time) (bool, model.Vector) {
defer c.svcRequestRatesLock.RUnlock()
c.svcRequestRatesLock.RLock()
if nsRates, okNs := c.cacheSvcRequestRates[namespace]; okNs {
if svcInterval, okSvc := nsRates[service]; okSvc {
if rtInterval, okRt := svcInterval[ratesInterval]; okRt {
if !queryTime.Before(rtInterval.queryTime) && queryTime.Sub(rtInterval.queryTime) < c.cacheDuration {
log.Tracef("[Prom Cache] GetServiceRequestRates [namespace: %s] [service: %s] [ratesInterval: %s] [queryTime: %s]", namespace, service, ratesInterval, queryTime.String())
return true, rtInterval.inResult
}
}
}
}
return false, nil
}
func (c *promCacheImpl) SetServiceRequestRates(namespace string, service string, ratesInterval string, queryTime time.Time, inResult model.Vector) {
defer c.svcRequestRatesLock.Unlock()
c.svcRequestRatesLock.Lock()
if _, okNs := c.cacheSvcRequestRates[namespace]; !okNs {
c.cacheSvcRequestRates[namespace] = make(map[string]map[string]timeInResult)
}
if _, okSvc := c.cacheSvcRequestRates[namespace][service]; !okSvc {
c.cacheSvcRequestRates[namespace][service] = make(map[string]timeInResult)
}
c.cacheSvcRequestRates[namespace][service][ratesInterval] = timeInResult{
queryTime: queryTime,
inResult: inResult,
}
log.Tracef("[Prom Cache] SetServiceRequestRates [namespace: %s] [service: %s] [ratesInterval: %s] [queryTime: %s]", namespace, service, ratesInterval, queryTime.String())
}
func (c *promCacheImpl) GetWorkloadRequestRates(namespace string, workload string, ratesInterval string, queryTime time.Time) (bool, model.Vector, model.Vector) {
defer c.wkRequestRatesLock.RUnlock()
c.wkRequestRatesLock.RLock()
if nsRates, okNs := c.cacheWkRequestRates[namespace]; okNs {
if wkInterval, okWk := nsRates[workload]; okWk {
if rtInterval, okRt := wkInterval[ratesInterval]; okRt {
if !queryTime.Before(rtInterval.queryTime) && queryTime.Sub(rtInterval.queryTime) < c.cacheDuration {
log.Tracef("[Prom Cache] GetWorkloadRequestRates [namespace: %s] [workload: %s] [ratesInterval: %s] [queryTime: %s]", namespace, workload, ratesInterval, queryTime.String())
return true, rtInterval.inResult, rtInterval.outResult
}
}
}
}
return false, nil, nil
}
func (c *promCacheImpl) SetWorkloadRequestRates(namespace string, workload string, ratesInterval string, queryTime time.Time, inResult model.Vector, outResult model.Vector) {
defer c.wkRequestRatesLock.Unlock()
c.wkRequestRatesLock.Lock()
if _, okNs := c.cacheWkRequestRates[namespace]; !okNs {
c.cacheWkRequestRates[namespace] = make(map[string]map[string]timeInOutResult)
}
if _, okApp := c.cacheWkRequestRates[namespace][workload]; !okApp {
c.cacheWkRequestRates[namespace][workload] = make(map[string]timeInOutResult)
}
c.cacheWkRequestRates[namespace][workload][ratesInterval] = timeInOutResult{
queryTime: queryTime,
inResult: inResult,
outResult: outResult,
}
log.Tracef("[Prom Cache] SetAppRequestRates [namespace: %s] [workload: %s] [ratesInterval: %s] [queryTime: %s]", namespace, workload, ratesInterval, queryTime.String())
}
// Expiration is done globally, this cache is designed as short term, so in the worst case it would populated the queries
// Doing an expiration check per item is costly and it's not necessary in this particular context
func (c *promCacheImpl) watchExpiration() {
for {
time.Sleep(c.cacheExpiration)
c.allRequestRatesLock.Lock()
c.cacheAllRequestRates = make(map[string]map[string]timeInResult)
c.allRequestRatesLock.Unlock()
c.appRequestRatesLock.Lock()
c.cacheAppRequestRates = make(map[string]map[string]map[string]timeInOutResult)
c.appRequestRatesLock.Unlock()
c.nsSvcRequestRatesLock.Lock()
c.cacheNsSvcRequestRates = make(map[string]map[string]timeInResult)
c.nsSvcRequestRatesLock.Unlock()
c.svcRequestRatesLock.Lock()
c.cacheSvcRequestRates = make(map[string]map[string]map[string]timeInResult)
c.svcRequestRatesLock.Unlock()
c.wkRequestRatesLock.Lock()
c.cacheWkRequestRates = make(map[string]map[string]map[string]timeInOutResult)
c.wkRequestRatesLock.Unlock()
log.Infof("[Prom Cache] Expired")
}
} | prometheus/cache.go | 0.516108 | 0.443661 | cache.go | starcoder |
package tort
import (
"strings"
"github.com/go-playground/validator/v10"
)
// ValidatorAssertions test validation errors from https://github.com/go-playground/validator.
type ValidatorAssertions struct {
Assertions
err validator.ValidationErrors
}
// Valid assists in validating validation errors,
func (assert Assertions) Valid(err error) ValidatorAssertions {
assert.t.Helper()
if ve, ok := err.(validator.ValidationErrors); ok {
return ValidatorAssertions{
Assertions: assert,
err: ve,
}
}
assert.Failed("error is not a validator error")
return ValidatorAssertions{}
}
// IsNil generates an error message if the error isn't nil.
func (assert ValidatorAssertions) IsNil(msg ...string) {
assert.t.Helper()
if assert.err != nil {
if len(msg) == 0 {
assert.Failed(`unexpected error "%s"`, assert.err)
return
}
assert.Failed(`%s: %s`, strings.Join(msg, " "), assert.err)
}
}
// IsNotNil generates an error message when the error is nil.
func (assert ValidatorAssertions) IsNotNil(msg ...string) {
assert.t.Helper()
if assert.err == nil {
if len(msg) == 0 {
assert.Failed("expected error wasn't present")
return
}
assert.Failed(strings.Join(msg, " "))
}
}
// For checks for a validation error matching the field and the kind of error, e.g. "required" or
// "min". If kind is blank, confirms there was any error on the given field.
func (assert ValidatorAssertions) For(field, kind string) {
assert.t.Helper()
for _, fe := range assert.err {
if fe.Field() == field {
if kind == "" || fe.Tag() == kind {
return
}
}
}
assert.Failed(`expected a validation error "%s" on field "%s"`, kind, field)
}
// NotFor checks that the validation error either doesn't exist, or isn't for the given field and
// type of error. If kind is blank, just confirms there wasn't an error on the field.
func (assert ValidatorAssertions) NotFor(field, kind string) {
assert.t.Helper()
for _, fe := range assert.err {
if fe.Field() == field {
if kind == "" || fe.Tag() == kind {
assert.Failed(`didn't expect a validation error "%s" on field "%s"`, kind, field)
}
}
}
} | validator.go | 0.726329 | 0.583708 | validator.go | starcoder |
package gocudnn
/*
#include <cudnn.h>
*/
import "C"
import (
"runtime"
"unsafe"
"github.com/dereklstinson/cutil"
)
//RNNDataD is a RNNDataDescriptor
type RNNDataD struct {
d C.cudnnRNNDataDescriptor_t
dtype DataType
seqlensize int32
gogc bool
}
//CreateRNNDataD creates an RNNDataD through cudnn's cudnnCreateRNNDataDescriptor
//This is put into the runtime for GC
func CreateRNNDataD() (*RNNDataD, error) {
d := new(RNNDataD)
err := Status(C.cudnnCreateRNNDataDescriptor(&d.d)).error("CreateRNNDataD")
if err != nil {
return nil, err
}
if setfinalizer {
runtime.SetFinalizer(d, destroyrnndatadescriptor)
}
return d, nil
}
/*Set sets the RNNDataD
dataType - The datatype of the RNN data tensor. See cudnnDataType_t.
layout - The memory layout of the RNN data tensor.
maxSeqLength - The maximum sequence length within this RNN data tensor. In the unpacked (padded) layout, this should include the padding vectors in each sequence. In the packed (unpadded) layout, this should be equal to the greatest element in seqLengthArray.
vectorSize -The vector length (i.e. embedding size) of the input or output tensor at each timestep.
seqLengthArray - An integer array the size of the mini-batch number number of elements. Describes the length (i.e. number of timesteps) of each sequence. Each element in seqLengthArray must be greater than 0 but less than or equal to maxSeqLength. In the packed layout, the elements should be sorted in descending order, similar to the layout required by the non-extended RNN compute functions.
paddingFill - For gocudnn it will auto typecast the value into the correct datatype. Just put the value you want used as an float64.
From Documentation:
A user-defined symbol for filling the padding position in RNN output.
This is only effective when the descriptor is describing the RNN output, and the unpacked layout is specified.
The symbol should be in the host memory, and is interpreted as the same data type as that of the RNN data tensor.
*/
func (r *RNNDataD) Set(dtype DataType, layout RNNDataLayout,
maxSeqLength, vectorsize int32, seqLengthArray []int32, paddingsymbol float64) error {
r.dtype = dtype
symbol := cscalarbydatatypeforsettensor(dtype, paddingsymbol)
batchsize := len(seqLengthArray)
seqlenarray := int32Tocint(seqLengthArray)
return Status(C.cudnnSetRNNDataDescriptor(r.d, dtype.c(), layout.c(), (C.int)(maxSeqLength), (C.int)(batchsize), (C.int)(vectorsize), &seqlenarray[0], symbol.CPtr())).error("(*RNNDataD)Set")
}
//Get gets the parameters used in Set for RNNDataD
func (r *RNNDataD) Get() (dtype DataType, layout RNNDataLayout, maxSeqLength, vectorsize int32, seqLengthArray []int32, paddingsymbol float64, err error) {
ps := cscalarbydatatypeforsettensor(r.dtype, paddingsymbol)
sla := make([]C.int, r.seqlensize)
var (
cdtype C.cudnnDataType_t
lo C.cudnnRNNDataLayout_t
msl C.int
bs C.int
vs C.int
)
err = Status(C.cudnnGetRNNDataDescriptor(r.d, &cdtype, &lo, &msl, &bs, &vs, C.int(r.seqlensize), &sla[0], ps.CPtr())).error("(*RNNDATAD)Get")
dtype = DataType(cdtype)
layout = RNNDataLayout(lo)
maxSeqLength = int32(msl)
vectorsize = int32(vs)
paddingsymbol = cutil.CScalartoFloat64(ps)
if r.seqlensize > int32(bs) {
seqLengthArray = cintToint32(sla[:bs])
} else {
seqLengthArray = cintToint32(sla)
}
return dtype, layout, maxSeqLength, vectorsize, seqLengthArray, paddingsymbol, err
}
//Destroy destorys descriptor unless gogc is being used in which it will just return nil
func (r *RNNDataD) Destroy() error {
if setfinalizer || r.gogc {
return nil
}
err := destroyrnndatadescriptor(r)
if err != nil {
return err
}
r = nil
return nil
}
func destroyrnndatadescriptor(d *RNNDataD) error {
err := Status(C.cudnnDestroyRNNDataDescriptor(d.d)).error("destroyrnndatadescriptor")
d = nil
return err
}
//RNNDataLayout are used for flags for data layout
type RNNDataLayout C.cudnnRNNDataLayout_t
func (r RNNDataLayout) c() C.cudnnRNNDataLayout_t {
return C.cudnnRNNDataLayout_t(r)
}
func (r *RNNDataLayout) cptr() *C.cudnnRNNDataLayout_t {
return (*C.cudnnRNNDataLayout_t)(r)
}
//SeqMajorUnPacked sets r to and returns CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED flag
func (r *RNNDataLayout) SeqMajorUnPacked() RNNDataLayout {
*r = RNNDataLayout(C.CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED)
return *r
}
//SeqMajorPacked sets r to CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED flag
func (r *RNNDataLayout) SeqMajorPacked() RNNDataLayout {
*r = RNNDataLayout(C.CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED)
return *r
}
//BatchMajorUnPacked sets r to CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED flag
func (r *RNNDataLayout) BatchMajorUnPacked() RNNDataLayout {
*r = RNNDataLayout(C.CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED)
return *r
}
func (r RNNDataLayout) String() string {
var x string
f := r
switch r {
case f.BatchMajorUnPacked():
x = "BatchMajorUnPacked"
case f.SeqMajorPacked():
x = "SeqMajorPacked"
case f.SeqMajorUnPacked():
x = "SeqMajorUnPacked"
default:
x = "Unsupported Flag"
}
return "RNNDataLayout: " + x
}
//RNNPaddingMode is the padding mode flag
type RNNPaddingMode C.cudnnRNNPaddingMode_t
func (r RNNPaddingMode) c() C.cudnnRNNPaddingMode_t {
return C.cudnnRNNPaddingMode_t(r)
}
func (r *RNNPaddingMode) cptr() *C.cudnnRNNPaddingMode_t {
return (*C.cudnnRNNPaddingMode_t)(r)
}
//Disabled sets r to and returns RNNPaddingMode(C.CUDNN_RNN_PADDED_IO_DISABLED)
func (r *RNNPaddingMode) Disabled() RNNPaddingMode {
*r = RNNPaddingMode(C.CUDNN_RNN_PADDED_IO_DISABLED)
return *r
}
//Enabled sets r to and returns RNNPaddingMode(C.CUDNN_RNN_PADDED_IO_ENABLED)
func (r *RNNPaddingMode) Enabled() RNNPaddingMode {
*r = RNNPaddingMode(C.CUDNN_RNN_PADDED_IO_ENABLED)
return *r
}
func (r RNNPaddingMode) String() string {
var x string
f := r
switch r {
case f.Disabled():
x = "Disabled"
case f.Enabled():
x = "Enabled"
default:
x = "Unsupported Flag"
}
return "RNNPaddingMode: " + x
}
//SetPaddingMode sets the padding mode with flag passed
func (r *RNND) SetPaddingMode(mode RNNPaddingMode) error {
return Status(C.cudnnSetRNNPaddingMode(r.descriptor, mode.c())).error("SetRNNPaddingMode")
}
//GetPaddingMode gets padding mode for the descriptor
func (r *RNND) GetPaddingMode() (mode RNNPaddingMode, err error) {
err = Status(C.cudnnGetRNNPaddingMode(r.descriptor, mode.cptr())).error("GetRNNPaddingMode")
return mode, err
}
/*ForwardTrainingEx - From cudnn documentation
This routine is the extended version of the cudnnRNNForwardTraining function.
The ForwardTrainingEx allows the user to use unpacked (padded) layout for input x and output y.
In the unpacked layout, each sequence in the mini-batch is considered to be of fixed length, specified by
maxSeqLength in its corresponding RNNDataDescriptor. Each fixed-length sequence, for example,
the nth sequence in the mini-batch, is composed of a valid segment specified by the seqLengthArray[n]
in its corresponding RNNDataDescriptor; and a padding segment to make the combined sequence length equal to maxSeqLength.
With the unpacked layout, both sequence major (i.e. time major) and batch major are supported.
For backward compatibility, the packed sequence major layout is supported.
However, similar to the non-extended function cudnnRNNForwardTraining, the sequences
in the mini-batch need to be sorted in descending order according to length.
Parameters:
handle - Input. Handle to a previously created cuDNN context.
xD - Input. A previously initialized RNN Data descriptor. The dataType, layout, maxSeqLength , batchSize, and seqLengthArray need to match that of yD.
x - Input. Data pointer to the GPU memory associated with the RNN data descriptor xD.
The input vectors are expected to be laid out in memory according to the layout specified by xD.
The elements in the tensor (including elements in the padding vector) must be densely packed, and no strides are supported.
hxD - Input. A fully packed tensor descriptor describing the initial hidden state of the RNN.
The first dimension of the tensor depends on the direction argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc. Moreover:
If direction is CUDNN_UNIDIRECTIONAL then the first dimension should match the numLayers argument passed to cudnnSetRNNDescriptor.
If direction is CUDNN_BIDIRECTIONAL then the first dimension should match double the numLayers argument passed to cudnnSetRNNDescriptor.
The second dimension must match the batchSize parameter in xD.
The third dimension depends on whether RNN mode is CUDNN_LSTM and whether LSTM projection is enabled. Moreover:
If RNN mode is CUDNN_LSTM and LSTM projection is enabled, the third dimension must match the
recProjSize argument passed to cudnnSetRNNProjectionLayers call used to set rnnDesc.
Otherwise, the third dimension must match the hiddenSize argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc .
hx - Input. Data pointer to GPU memory associated with the tensor descriptor hxD.
If a NULL pointer is passed, the initial hidden state of the network will be initialized to zero.
cxD - Input. A fully packed tensor descriptor describing the initial cell state for LSTM networks.
The first dimension of the tensor depends on the direction argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc. Moreover:
If direction is CUDNN_UNIDIRECTIONAL the first dimension should match the numLayers argument passed to cudnnSetRNNDescriptor.
If direction is CUDNN_BIDIRECTIONAL the first dimension should match double the numLayers argument passed to cudnnSetRNNDescriptor.
The second dimension must match the first dimension of the tensors described in xD.
The third dimension must match the hiddenSize argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc. The tensor must be fully packed.
cx - Input. Data pointer to GPU memory associated with the tensor descriptor cxD. If a NULL pointer
is passed, the initial cell state of the network will be initialized to zero.
wD - Input. Handle to a previously initialized filter descriptor describing the weights for the RNN.
w- Input. Data pointer to GPU memory associated with the filter descriptor wD.
yD - Input. A previously initialized RNN data descriptor. The dataType, layout, maxSeqLength , batchSize, and seqLengthArray
need to match that of dyD and dxD. The parameter vectorSize depends on whether RNN mode is CUDNN_LSTM and
whether LSTM projection is enabled and whether the network is bidirectional.
In specific: For uni-directional network, if RNN mode is CUDNN_LSTM and LSTM projection is enabled,
the parameter vectorSize must match the recProjSize argument passed to cudnnSetRNNProjectionLayers
call used to set rnnDesc. If the network is bidirectional, then multiply the value by 2.
Otherwise, for uni-directional network, the parameter vectorSize must match the
hiddenSize argument passed to the cudnnSetRNNDescriptor call used
to initialize rnnDesc. If the network is bidirectional, then multiply the value by 2.
y - Output. Data pointer to GPU memory associated with the RNN data descriptor yD.
The input vectors are expected to be laid out in memory according to the layout
specified by yD. The elements in the tensor (including elements in the padding vector)
must be densely packed, and no strides are supported.
hyD - Input. A fully packed tensor descriptor describing the final hidden state of the RNN. The descriptor must be set exactly the same as hxD.
hy - Output. Data pointer to GPU memory associated with the tensor descriptor hyD. If a NULL pointer is passed, the final hidden state of the network will not be saved.
cyD - Input. A fully packed tensor descriptor describing the final cell state for LSTM networks. The descriptor must be set exactly the same as cxD.
cy- Output. Data pointer to GPU memory associated with the tensor descriptor cyD. If a NULL pointer is passed, the final cell state of the network will be not be saved.
wspace - Input. Data pointer to GPU memory to be used as a wspace for this call.
wspacesib - Input. Specifies the size in bytes of the provided wspace.
rspace -Input/Output. Data pointer to GPU memory to be used as a reserve space for this call.
rspacesib - Input. Specifies the size in bytes of the provided rspace
*/
func (r *RNND) ForwardTrainingEx(h *Handle,
xD *RNNDataD, x cutil.Mem,
hxD *TensorD, hx cutil.Mem,
cxD *TensorD, cx cutil.Mem,
wD *FilterD, w cutil.Mem,
yD *RNNDataD, y cutil.Mem,
hyD *TensorD, hy cutil.Mem,
cyD *TensorD, cy cutil.Mem,
wspace cutil.Mem, wspacesib uint,
rspace cutil.Mem, rspacesib uint) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNForwardTrainingEx(h.x,
r.descriptor,
xD.d, x.Ptr(),
hxD.descriptor, hx.Ptr(),
cxD.descriptor, cx.Ptr(),
wD.descriptor, w.Ptr(),
yD.d, y.Ptr(),
hyD.descriptor, hy.Ptr(),
cyD.descriptor, cy.Ptr(),
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace.Ptr(),
C.size_t(wspacesib),
rspace.Ptr(),
C.size_t(rspacesib))).error("(r *RNND) ForwardTrainingEx")
})
}
return Status(C.cudnnRNNForwardTrainingEx(h.x,
r.descriptor,
xD.d, x.Ptr(),
hxD.descriptor, hx.Ptr(),
cxD.descriptor, cx.Ptr(),
wD.descriptor, w.Ptr(),
yD.d, y.Ptr(),
hyD.descriptor, hy.Ptr(),
cyD.descriptor, cy.Ptr(),
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace.Ptr(),
C.size_t(wspacesib),
rspace.Ptr(),
C.size_t(rspacesib))).error("(r *RNND) ForwardTrainingEx")
}
//ForwardTrainingExUS is like ForwardTrainingEx but uses unsafe.Pointer instead of cutil.Mem
func (r *RNND) ForwardTrainingExUS(h *Handle,
xD *RNNDataD, x unsafe.Pointer,
hxD *TensorD, hx unsafe.Pointer,
cxD *TensorD, cx unsafe.Pointer,
wD *FilterD, w unsafe.Pointer,
yD *RNNDataD, y unsafe.Pointer,
hyD *TensorD, hy unsafe.Pointer,
cyD *TensorD, cy unsafe.Pointer,
wspace unsafe.Pointer, wspacesib uint,
rspace unsafe.Pointer, rspacesib uint) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNForwardTrainingEx(h.x,
r.descriptor,
xD.d, x,
hxD.descriptor, hx,
cxD.descriptor, cx,
wD.descriptor, w,
yD.d, y,
hyD.descriptor, hy,
cyD.descriptor, cy,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace, C.size_t(wspacesib),
rspace, C.size_t(rspacesib))).error("(r *RNND) ForwardTrainingExUS")
})
}
return Status(C.cudnnRNNForwardTrainingEx(h.x,
r.descriptor,
xD.d, x,
hxD.descriptor, hx,
cxD.descriptor, cx,
wD.descriptor, w,
yD.d, y,
hyD.descriptor, hy,
cyD.descriptor, cy,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace, C.size_t(wspacesib),
rspace, C.size_t(rspacesib))).error("(r *RNND) ForwardTrainingExUS")
}
/*ForwardInferenceEx - from cudnn documentation
This routine is the extended version of the cudnnRNNForwardInference function.
The ForwardTrainingEx allows the user to use unpacked (padded) layout for input x and output y.
In the unpacked layout, each sequence in the mini-batch is considered to be of fixed length, specified by maxSeqLength in its corresponding RNNDataDescriptor.
Each fixed-length sequence, for example, the nth sequence in the mini-batch, is composed of a valid segment,
specified by the seqLengthArray[n] in its corresponding RNNDataDescriptor, and a padding segment to make the combined sequence length equal to maxSeqLength.
With unpacked layout, both sequence major (i.e. time major) and batch major are supported.
For backward compatibility, the packed sequence major layout is supported.
However, similar to the non-extended function cudnnRNNForwardInference, the sequences in the mini-batch need to be sorted in descending order according to length.
Parameters
handle - Input. Handle to a previously created cuDNN context.
xD- Input. A previously initialized RNN Data descriptor. The dataType, layout, maxSeqLength , batchSize, and seqLengthArray need to match that of yD.
x -Input. Data pointer to the GPU memory associated with the RNN data descriptor xD. The vectors are expected to be laid out in memory according to the layout specified by xD.
The elements in the tensor (including elements in the padding vector) must be densely packed, and no strides are supported.
hxD - Input. A fully packed tensor descriptor describing the initial hidden state of the RNN. The first dimension of the tensor depends on the direction argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc:
If direction is CUDNN_UNIDIRECTIONAL the first dimension should match the numLayers argument passed to cudnnSetRNNDescriptor.
If direction is CUDNN_BIDIRECTIONAL the first dimension should match double the numLayers argument passed to cudnnSetRNNDescriptor.
The second dimension must match the batchSize parameter described in xD.
The third dimension depends on whether RNN mode is CUDNN_LSTM and whether LSTM projection is enabled. In specific:
If RNN mode is CUDNN_LSTM and LSTM projection is enabled, the third dimension must match the recProjSize argument passed to cudnnSetRNNProjectionLayers call used to set rnnDesc.
Otherwise, the third dimension must match the hiddenSize argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc.
hx - Input. Data pointer to GPU memory associated with the tensor descriptor hxD. If a NULL pointer is passed, the initial hidden state of the network will be initialized to zero.
cxD -Input. A fully packed tensor descriptor describing the initial cell state for LSTM networks.
The first dimension of the tensor depends on the direction argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc:
If direction is CUDNN_UNIDIRECTIONAL the first dimension should match the numLayers argument passed to cudnnSetRNNDescriptor.
If direction is CUDNN_BIDIRECTIONAL the first dimension should match double the numLayers argument passed to cudnnSetRNNDescriptor.
The second dimension must match the batchSize parameter in xD. The third dimension must match the hiddenSize argument passed to the cudnnSetRNNDescriptor call used to initialize rnnDesc.
cx - Input. Data pointer to GPU memory associated with the tensor descriptor cxD.
If a NULL pointer is passed, the initial cell state of the network will be initialized to zero.
wD - Input. Handle to a previously initialized filter descriptor describing the weights for the RNN.
w - Input. Data pointer to GPU memory associated with the filter descriptor wD.
yD - Input. A previously initialized RNN data descriptor. The dataType, layout, maxSeqLength , batchSize, and seqLengthArray must match that of dyD and dxD.
The parameter vectorSize depends on whether RNN mode is CUDNN_LSTM and whether LSTM projection is enabled and whether the network is bidirectional.
In specific: For uni-directional network, if RNN mode is CUDNN_LSTM and LSTM projection is enabled,
the parameter vectorSize must match the recProjSize argument passed to cudnnSetRNNProjectionLayers call used to set rnnDesc.
If the network is bidirectional, then multiply the value by 2.
Otherwise, for uni-directional network, the parameter vectorSize must match the hiddenSize argument passed
to the cudnnSetRNNDescriptor call used to initialize rnnDesc. If the network is bidirectional, then multiply the value by 2.
y - Output. Data pointer to the GPU memory associated with the RNN data descriptor yD.
The vectors are expected to be laid out in memory according to the layout specified by yD.
The elements in the tensor (including elements in the padding vector) must be densely packed, and no strides are supported.
hyD - Input. A fully packed tensor descriptor describing the final hidden state of the RNN. The descriptor must be set exactly the same way as hxD.
hy - Output. Data pointer to GPU memory associated with the tensor descriptor hyD. If a NULL pointer is passed, the final hidden state of the network will not be saved.
cyD - Input. A fully packed tensor descriptor describing the final cell state for LSTM networks. The descriptor must be set exactly the same way as cxD.
cy -Output. Data pointer to GPU memory associated with the tensor descriptor cyD. If a NULL pointer is passed, the final cell state of the network will be not be saved.
wspace - Input. Data pointer to GPU memory to be used as a wspace for this call.
wspacesib - Input. Specifies the size in bytes of the provided wspace.
*/
func (r *RNND) ForwardInferenceEx(
h *Handle,
xD *RNNDataD, x cutil.Mem,
hxD *TensorD, hx cutil.Mem,
cxD *TensorD, cx cutil.Mem,
wD *FilterD, w cutil.Mem,
yD *RNNDataD, y cutil.Mem,
hyD *TensorD, hy cutil.Mem,
cyD *TensorD, cy cutil.Mem,
wspace cutil.Mem, wspacesib uint,
) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNForwardInferenceEx(h.x,
r.descriptor,
xD.d, x.Ptr(),
hxD.descriptor, hx.Ptr(),
cxD.descriptor, cx.Ptr(),
wD.descriptor, w.Ptr(),
yD.d, y.Ptr(),
hyD.descriptor, hy.Ptr(),
cyD.descriptor, cy.Ptr(),
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace.Ptr(), C.size_t(wspacesib))).error(" (r *RNND) ForwardInferenceEx")
})
}
return Status(C.cudnnRNNForwardInferenceEx(h.x,
r.descriptor,
xD.d, x.Ptr(),
hxD.descriptor, hx.Ptr(),
cxD.descriptor, cx.Ptr(),
wD.descriptor, w.Ptr(),
yD.d, y.Ptr(),
hyD.descriptor, hy.Ptr(),
cyD.descriptor, cy.Ptr(),
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace.Ptr(), C.size_t(wspacesib))).error(" (r *RNND) ForwardInferenceEx")
}
//ForwardInferenceExUS is like ForwardInferenceEx but uses unsafe.Pointer instead of cutil.Mem
func (r *RNND) ForwardInferenceExUS(
h *Handle,
xD *RNNDataD, x unsafe.Pointer,
hxD *TensorD, hx unsafe.Pointer,
cxD *TensorD, cx unsafe.Pointer,
wD *FilterD, w unsafe.Pointer,
yD *RNNDataD, y unsafe.Pointer,
hyD *TensorD, hy unsafe.Pointer,
cyD *TensorD, cy unsafe.Pointer,
wspace unsafe.Pointer, wspacesib uint,
) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNForwardInferenceEx(h.x,
r.descriptor,
xD.d, x,
hxD.descriptor, hx,
cxD.descriptor, cx,
wD.descriptor, w,
yD.d, y,
hyD.descriptor, hy,
cyD.descriptor, cy,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace, C.size_t(wspacesib))).error("(r *RNND) ForwardInferenceExUS")
})
}
return Status(C.cudnnRNNForwardInferenceEx(h.x,
r.descriptor,
xD.d, x,
hxD.descriptor, hx,
cxD.descriptor, cx,
wD.descriptor, w,
yD.d, y,
hyD.descriptor, hy,
cyD.descriptor, cy,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
wspace, C.size_t(wspacesib))).error("(r *RNND) ForwardInferenceExUS")
}
/*BackwardDataEx - Taken from cudnn documentation
This routine is the extended version of the function cudnnRNNBackwardData.
This function cudnnRNNBackwardDataEx allows the user to use unpacked (padded) layout for input y and output dx.
In the unpacked layout, each sequence in the mini-batch is considered to be of fixed length, specified by maxSeqLength in its corresponding RNNDataDescriptor.
Each fixed-length sequence, for example, the nth sequence in the mini-batch, is composed of a valid segment specified
by the seqLengthArray[n] in its corresponding RNNDataDescriptor; and a padding segment to make the combined sequence length equal to maxSeqLength.
With the unpacked layout, both sequence major (i.e. time major) and batch major are supported.
For backward compatibility, the packed sequence major layout is supported.
However, similar to the non-extended function cudnnRNNBackwardData, the sequences in the mini-batch need to be sorted in descending order according to length.
Parameters:
handle is handle passed to all cudnn funcs. needs to be initialized before using.
yD -Input. A previously initialized RNN data descriptor.
Must match or be the exact same descriptor previously passed into ForwardTrainingEx.
y -Input. Data pointer to the GPU memory associated with the RNN data descriptor yD.
The vectors are expected to be laid out in memory according to the layout specified by yD.
The elements in the tensor (including elements in the padding vector) must be densely packed, and no strides are supported.
Must contain the exact same data previously produced by ForwardTrainingEx.
dyD -Input. A previously initialized RNN data descriptor.
The dataType, layout, maxSeqLength , batchSize, vectorSize and seqLengthArray need to match the yD previously passed to ForwardTrainingEx.
dy -Input.Data pointer to the GPU memory associated with the RNN data descriptor dyD.
The vectors are expected to be laid out in memory according to the layout specified by dyD.
The elements in the tensor (including elements in the padding vector) must be densely packed, and no strides are supported.
dhyD -Input. A fully packed tensor descriptor describing the gradients at the final hidden state of the RNN.
The first dimension of the tensor depends on the direction argument passed to the (*RNND)Set(params) call used to initialize rnnDesc.
Moreover:
If direction is CUDNN_UNIDIRECTIONAL the first dimension should match the numLayers argument passed to ((*RNND)Set(params).)
If direction is CUDNN_BIDIRECTIONAL the first dimension should match double the numLayers argument passed to (*RNND)Set(params).
The second dimension must match the batchSize parameter in xD.
The third dimension depends on whether RNN mode is CUDNN_LSTM and whether LSTM projection is enabled. Moreover:
If RNN mode is CUDNN_LSTM and LSTM projection is enabled, the third dimension must match the recProjSize argument passed to (*RNND)SetProjectionLayers(params) call used to set rnnDesc.
Otherwise, the third dimension must match the hiddenSize argument passed to the (*RNND)Set(params) call used to initialize rnnDesc.
dhy
Input. Data pointer to GPU memory associated with the tensor descriptor dhyD. If a NULL pointer is passed, the gradients at the final hidden state of the network will be initialized to zero.
dcyD - Input. A fully packed tensor descriptor describing the gradients at the final cell state of the RNN. The first dimension of the tensor depends on the direction argument passed to the (*RNND)Set(params) call used to initialize rnnDesc. Moreover:
If direction is CUDNN_UNIDIRECTIONAL the first dimension should match the numLayers argument passed to (*RNND)Set(params).
If direction is CUDNN_BIDIRECTIONAL the first dimension should match double the numLayers argument passed to (*RNND)Set(params).
The second dimension must match the first dimension of the tensors described in xD.
The third dimension must match the hiddenSize argument passed to the (*RNND)Set(params) call used to initialize rnnDesc. The tensor must be fully packed.
dcy - Input. Data pointer to GPU memory associated with the tensor descriptor dcyD. If a NULL pointer is passed, the gradients at the final cell state of the network will be initialized to zero.
wD -Input. Handle to a previously initialized filter descriptor describing the weights for the RNN.
w -Input. Data pointer to GPU memory associated with the filter descriptor wD.
hxD -Input. A fully packed tensor descriptor describing the initial hidden state of the RNN. Must match or be the exact same descriptor previously passed into ForwardTrainingEx.
hx -Input. Data pointer to GPU memory associated with the tensor descriptor hxD. If a NULL pointer is passed, the initial hidden state of the network will be initialized to zero. Must contain the exact same data previously passed into ForwardTrainingEx, or be NULL if NULL was previously passed to ForwardTrainingEx.
cxD - Input. A fully packed tensor descriptor describing the initial cell state for LSTM networks. Must match or be the exact same descriptor previously passed into ForwardTrainingEx.
cx -Input. Data pointer to GPU memory associated with the tensor descriptor cxD. If a NULL pointer is passed, the initial cell state of the network will be initialized to zero. Must contain the exact same data previously passed into ForwardTrainingEx, or be NULL if NULL was previously passed to ForwardTrainingEx.
dxD - Input. A previously initialized RNN data descriptor. The dataType, layout, maxSeqLength, batchSize, vectorSize and seqLengthArray need to match that of xD previously passed to ForwardTrainingEx.
dx -Output. Data pointer to the GPU memory associated with the RNN data descriptor dxD. The vectors are expected to be laid out in memory according to the layout specified by dxD. The elements in the tensor (including elements in the padding vector) must be densely packed, and no strides are supported.
dhxD -Input. A fully packed tensor descriptor describing the gradient at the initial hidden state of the RNN. The descriptor must be set exactly the same way as dhyD.
dhx- Output. Data pointer to GPU memory associated with the tensor descriptor dhxD. If a NULL pointer is passed, the gradient at the hidden input of the network will not be set.
dcxD-Input. A fully packed tensor descriptor describing the gradient at the initial cell state of the RNN. The descriptor must be set exactly the same way as dcyD.
dcx -Output. Data pointer to GPU memory associated with the tensor descriptor dcxD. If a NULL pointer is passed, the gradient at the cell input of the network will not be set.
wspace - Input. Data pointer to GPU memory to be used as a wspace for this call.
wspacesib - Input. Specifies the size in bytes of the provided wspace.
rspace - Input/Output. Data pointer to GPU memory to be used as a reserve space for this call.
rspacesib - Input. Specifies the size in bytes of the provided rspace.
*/
func (r *RNND) BackwardDataEx(h *Handle,
yD *RNNDataD, y cutil.Mem,
dyD *RNNDataD, dy cutil.Mem,
dhyD *TensorD, dhy cutil.Mem,
dcyD *TensorD, dcy cutil.Mem,
wD *FilterD, w cutil.Mem,
hxD *TensorD, hx cutil.Mem,
cxD *TensorD, cx cutil.Mem,
dxD *RNNDataD, dx cutil.Mem,
dhxD *TensorD, dhx cutil.Mem,
dcxD *TensorD, dcx cutil.Mem,
wspace cutil.Mem, wspacesib uint,
rspace cutil.Mem, rspacesib uint) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNBackwardDataEx(h.x,
r.descriptor,
yD.d, y.Ptr(),
dyD.d, dy.Ptr(),
nil, nil,
dhyD.descriptor, dhy.Ptr(),
dcyD.descriptor, dcy.Ptr(),
wD.descriptor, w.Ptr(),
hxD.descriptor, hx.Ptr(),
cxD.descriptor, cx.Ptr(),
dxD.d, dx.Ptr(),
dhxD.descriptor, dhx.Ptr(),
dcxD.descriptor, dcx.Ptr(),
nil,
nil,
wspace.Ptr(), C.size_t(wspacesib),
rspace.Ptr(), C.size_t(rspacesib))).error("(r *RNND) BackwardDataEx")
})
}
return Status(C.cudnnRNNBackwardDataEx(h.x,
r.descriptor,
yD.d, y.Ptr(),
dyD.d, dy.Ptr(),
nil, nil,
dhyD.descriptor, dhy.Ptr(),
dcyD.descriptor, dcy.Ptr(),
wD.descriptor, w.Ptr(),
hxD.descriptor, hx.Ptr(),
cxD.descriptor, cx.Ptr(),
dxD.d, dx.Ptr(),
dhxD.descriptor, dhx.Ptr(),
dcxD.descriptor, dcx.Ptr(),
nil,
nil,
wspace.Ptr(), C.size_t(wspacesib),
rspace.Ptr(), C.size_t(rspacesib))).error("(r *RNND) BackwardDataEx")
}
//BackwardDataExUS is like BackwardDataEx but uses unsafe.Pointer instead of cutil.Mem
func (r *RNND) BackwardDataExUS(h *Handle,
yD *RNNDataD, y unsafe.Pointer,
dyD *RNNDataD, dy unsafe.Pointer,
dhyD *TensorD, dhy unsafe.Pointer,
dcyD *TensorD, dcy unsafe.Pointer,
wD *FilterD, w unsafe.Pointer,
hxD *TensorD, hx unsafe.Pointer,
cxD *TensorD, cx unsafe.Pointer,
dxD *RNNDataD, dx unsafe.Pointer,
dhxD *TensorD, dhx unsafe.Pointer,
dcxD *TensorD, dcx unsafe.Pointer,
wspace unsafe.Pointer, wspacesib uint,
rspace unsafe.Pointer, rspacesib uint) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNBackwardDataEx(h.x,
r.descriptor,
yD.d, y,
dyD.d, dy,
nil, nil,
dhyD.descriptor, dhy,
dcyD.descriptor, dcy,
wD.descriptor, w,
hxD.descriptor, hx,
cxD.descriptor, cx,
dxD.d, dx,
dhxD.descriptor, dhx,
dcxD.descriptor, dcx,
nil,
nil,
wspace, C.size_t(wspacesib),
rspace, C.size_t(rspacesib))).error("(r *RNND) BackwardDataExUS")
})
}
return Status(C.cudnnRNNBackwardDataEx(h.x,
r.descriptor,
yD.d, y,
dyD.d, dy,
nil, nil,
dhyD.descriptor, dhy,
dcyD.descriptor, dcy,
wD.descriptor, w,
hxD.descriptor, hx,
cxD.descriptor, cx,
dxD.d, dx,
dhxD.descriptor, dhx,
dcxD.descriptor, dcx,
nil,
nil,
wspace, C.size_t(wspacesib),
rspace, C.size_t(rspacesib))).error("(r *RNND) BackwardDataExUS")
}
/*BackwardWeightsEx -from cudnn documentation
This routine is the extended version of the function cudnnRNNBackwardWeights.
This function cudnnRNNBackwardWeightsEx allows the user to use unpacked (padded) layout for input x and output dw.
In the unpacked layout, each sequence in the mini-batch is considered to be of fixed length,
specified by maxSeqLength in its corresponding RNNDataDescriptor. Each fixed-length sequence,
for example, the nth sequence in the mini-batch, is composed of a valid segment specified by t
he seqLengthArray[n] in its corresponding RNNDataDescriptor; and a padding segment to
make the combined sequence length equal to maxSeqLength.
With the unpacked layout, both sequence major (i.e. time major) and batch major are supported.
For backward compatibility, the packed sequence major layout is supported.
However, similar to the non-extended function cudnnRNNBackwardWeights, the sequences in the
mini-batch need to be sorted in descending order according to length.
Parameters:
handle - Input. Handle to a previously created cuDNN context.
xD - Input. A previously initialized RNN data descriptor. Must match or
be the exact same descriptor previously passed into ForwardTrainingEx.
x - Input. Data pointer to GPU memory associated with the tensor descriptors
in the array xD. Must contain the exact same data previously passed into ForwardTrainingEx.
hxD - Input. A fully packed tensor descriptor describing the initial hidden state of the RNN.
Must match or be the exact same descriptor previously passed into ForwardTrainingEx.
hx - Input. Data pointer to GPU memory associated with the tensor descriptor hxD.
If a NULL pointer is passed, the initial hidden state of the network will be initialized to zero.
Must contain the exact same data previously passed into ForwardTrainingEx, or be NULL if NULL was previously passed to ForwardTrainingEx.
yD - Input. A previously initialized RNN data descriptor.
Must match or be the exact same descriptor previously passed into ForwardTrainingEx.
y -Input. Data pointer to GPU memory associated with the output tensor descriptor yD.
Must contain the exact same data previously produced by ForwardTrainingEx.
wspace - Input. Data pointer to GPU memory to be used as a wspace for this call.
wspacesib - Input. Specifies the size in bytes of the provided wspace.
dwD- Input. Handle to a previously initialized filter descriptor describing the gradients of the weights for the RNN.
dw - Input/Output. Data pointer to GPU memory associated with the filter descriptor dwD.
rspace - Input. Data pointer to GPU memory to be used as a reserve space for this call.
rspacesib - Input. Specifies the size in bytes of the provided rspace
*/
func (r *RNND) BackwardWeightsEx(h *Handle,
xD *RNNDataD, x cutil.Mem,
hxD *TensorD, hx cutil.Mem,
yD *RNNDataD, y cutil.Mem,
wspace cutil.Mem, wspacesib uint,
dwD *FilterD, dw cutil.Mem,
rspace cutil.Mem, rspacesib uint,
) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNBackwardWeightsEx(
h.x,
r.descriptor,
xD.d, x.Ptr(),
hxD.descriptor, hx.Ptr(),
yD.d, y.Ptr(),
wspace.Ptr(), C.size_t(wspacesib),
dwD.descriptor, dw.Ptr(),
rspace.Ptr(), C.size_t(rspacesib),
)).error("(r *RNND) BackwardWeightsEx")
})
}
return Status(C.cudnnRNNBackwardWeightsEx(
h.x,
r.descriptor,
xD.d, x.Ptr(),
hxD.descriptor, hx.Ptr(),
yD.d, y.Ptr(),
wspace.Ptr(), C.size_t(wspacesib),
dwD.descriptor, dw.Ptr(),
rspace.Ptr(), C.size_t(rspacesib),
)).error("(r *RNND) BackwardWeightsEx")
}
//BackwardWeightsExUS is like BackwardWeightsEx but with unsafe.Pointer instead of cutil.Mem
func (r *RNND) BackwardWeightsExUS(h *Handle,
xD *RNNDataD, x unsafe.Pointer,
hxD *TensorD, hx unsafe.Pointer,
yD *RNNDataD, y unsafe.Pointer,
wspace unsafe.Pointer, wspacesib uint,
dwD *FilterD, dw unsafe.Pointer,
rspace unsafe.Pointer, rspacesib uint,
) error {
if h.w != nil {
return h.w.Work(func() error {
return Status(C.cudnnRNNBackwardWeightsEx(
h.x,
r.descriptor,
xD.d, x,
hxD.descriptor, hx,
yD.d, y,
wspace, C.size_t(wspacesib),
dwD.descriptor, dw,
rspace, C.size_t(rspacesib),
)).error("(r *RNND) BackwardWeightsExUS")
})
}
return Status(C.cudnnRNNBackwardWeightsEx(
h.x,
r.descriptor,
xD.d, x,
hxD.descriptor, hx,
yD.d, y,
wspace, C.size_t(wspacesib),
dwD.descriptor, dw,
rspace, C.size_t(rspacesib),
)).error("(r *RNND) BackwardWeightsExUS")
} | cudnnRNN_data_padding.go | 0.663342 | 0.444203 | cudnnRNN_data_padding.go | starcoder |
package zflag
import "strconv"
// -- float64 Value
type float64Value float64
func newFloat64Value(val float64, p *float64) *float64Value {
*p = val
return (*float64Value)(p)
}
func (f *float64Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 64)
*f = float64Value(v)
return err
}
func (s *float64Value) Get() interface{} {
return float64(*s)
}
func (f *float64Value) Type() string {
return "float64"
}
func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
// GetFloat64 return the float64 value of a flag with the given name
func (f *FlagSet) GetFloat64(name string) (float64, error) {
val, err := f.getFlagType(name, "float64")
if err != nil {
return 0, err
}
return val.(float64), nil
}
// MustGetFloat64 is like GetFloat64, but panics on error.
func (f *FlagSet) MustGetFloat64(name string) float64 {
val, err := f.GetFloat64(name)
if err != nil {
panic(err)
}
return val
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string, opts ...Opt) {
f.Var(newFloat64Value(value, p), name, usage, opts...)
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func Float64Var(p *float64, name string, value float64, usage string, opts ...Opt) {
CommandLine.Float64Var(p, name, value, usage, opts...)
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func (f *FlagSet) Float64(name string, value float64, usage string, opts ...Opt) *float64 {
var p float64
f.Float64Var(&p, name, value, usage, opts...)
return &p
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func Float64(name string, value float64, usage string, opts ...Opt) *float64 {
return CommandLine.Float64(name, value, usage, opts...)
} | float64.go | 0.793306 | 0.457864 | float64.go | starcoder |
package list
import (
"fmt"
"github.com/flowonyx/functional/errors"
)
// InsertAt inserts newValue into existing at the given index.
// If the index is not in the range of indexes for values, it will return a nil slice and a IndexOutOfRangeErr.
func InsertAt[T any](index int, newValue T, existing []T) ([]T, error) {
if index < 0 || index > len(existing) {
return nil, fmt.Errorf("%w: InsertAt(%d, _, [%d]%T)", errors.IndexOutOfRangeErr, index, len(existing), existing)
}
return InsertManyAt(index, []T{newValue}, existing)
}
// InsertManyAt inserts newValues into existing at the given index.
// If the index is not in the range of indexes for existing, it will return a nil slice and a IndexOutOfRangeErr.
func InsertManyAt[T any](index int, newValues []T, existing []T) ([]T, error) {
if index < 0 || index > len(existing) {
return nil, fmt.Errorf("%w: InsertManyAt(%d, _, [%d]%T)", errors.IndexOutOfRangeErr, index, len(existing), existing)
}
if index == len(existing) {
return append(existing, newValues...), nil
}
return append(existing[:index], append(newValues, existing[index:]...)...), nil
}
// RemoveAt removes the item at index from values.
// If index is not in the range of indexes for values, it will return a nil slice and a IndexOutOfRangeErr.
func RemoveAt[T any](index int, values []T) ([]T, error) {
if index < 0 || index >= len(values) {
return nil, fmt.Errorf("%w: RemoveAt(%d, [%d]%T)", errors.IndexOutOfRangeErr, index, len(values), values)
}
return RemoveManyAt(index, 1, values)
}
// RemoveManyAt removes count number of items starting at index from values.
// If index is not in the range of indexes for values, it will return a nil slice and a IndexOutOfRangeErr.
// If count is larger the the number of items in values starting at index, it will only remove as many items as is in the slice.
func RemoveManyAt[T any](index int, count int, values []T) ([]T, error) {
if index < 0 || index >= len(values) {
return nil, fmt.Errorf("%w: RemoveManyAt(%d, [%d]%T)", errors.IndexOutOfRangeErr, index, len(values), values)
}
count, _ = Min(count, len(values)-index)
return append(values[0:index], values[index+count:]...), nil
} | list/insertRemove.go | 0.662469 | 0.553023 | insertRemove.go | starcoder |
package nist_sp800_22
import (
"math"
)
// Input Size Recommendation
// NIST recommends m = 9 or m = 10, n >= 10^6
// m should be chosen so that m ≈ log_2(M)
func OverlappingTemplateMatching(B []uint8, eachBlockSize uint64) (float64, bool, error) {
// Original Parameter
var m int = len(B)
var n int = len(epsilon)
var M uint64 = eachBlockSize // The length in bits of the substring of ε to be tested.
var N uint64 = (uint64(n) / M) // The number of independent blocks. N has been fixed at 8 in the test code.
// (1) Partition the sequence into N independent blocks of length M.
var blocks [][]uint8 = make([][]uint8, N)
var v []float64 = make([]float64, 6) // the number of occurrences of B in each block by incrementing an array v[i]
var partitionStart uint64 = 0
var partitionEnd uint64 = M
for j := range blocks {
blocks[j] = epsilon[partitionStart:partitionEnd]
partitionStart = partitionEnd
partitionEnd = partitionEnd + M
}
// fmt.Println("N", N)
//var hit uint64 = 0
// (2) Search for matches
var numberOfOccurrences uint64
for _, eachBlock := range blocks {
numberOfOccurrences = 0
for bitPosition := 0; bitPosition <= len(eachBlock)-m; bitPosition++ {
if isEqualBetweenBitsArray(eachBlock[bitPosition:bitPosition+m], B) {
numberOfOccurrences++
if numberOfOccurrences >= 5 {
goto RECORD_V_ARRAY
}
}
}
RECORD_V_ARRAY:
v[numberOfOccurrences]++
}
/*
for j := range blocks {
hit = 0
for bitPosition := 0; bitPosition <= int(M)-m; bitPosition++ {
for i := range B {
if blocks[j][bitPosition+i] != B[i] {
goto UN_HIT
}
}
// Hit
hit++
UN_HIT:
}
// Misprint
// In this part, There is example error. Page 40.
if hit > 0 {
if hit > 5 {
v[5]++
} else {
v[hit]++
}
}
}
*/
// (3) Compute values for λand η
// that will be used to compute the theoretical probabilities π_i corresponding to the classes of v0:
var _float64_m float64 = float64(m)
var lambda float64 = (float64(M) - _float64_m + 1) / math.Pow(2, _float64_m)
var eta float64 = lambda / 2.0
// fmt.Println("lambda\t", lambda)
// fmt.Println("eta\t", eta)
// Page 40.
// (4) Compute χ^2 as specified in Section 3.8 (Page. 74)
var pi []float64 = []float64{0.364091, 0.185659, 0.139381, 0.100571, 0.070432, 0.139865} // On page 74
// var pi []float64 = []float64{0.324652, 0.182617, 0.142670, 0.106645, 0.077147, 0.166269}
// var p float64 = math.Exp(-1 * eta)
// fmt.Println("P(U=0)\t", p)
// Compute Probabilities
sum := 0.0
K := 5
for i := 0; i < K; i++ {
pi[i] = Pr(i, eta)
// fmt.Printf("Pr(%d, %.1f) = %.8f\n", i, eta, Pr(i, eta))
sum += pi[i]
}
pi[K] = 1 - sum
// fmt.Println("N", N)
// fmt.Println("v", v)
// fmt.Println("pi", pi)
var chi_square float64 = 0
var _float64_N_ float64 = float64(N)
for i := range v {
var temp float64 = _float64_N_ * pi[i]
chi_square += (v[i] - temp) * (v[i] - temp) / temp
}
// fmt.Println("chi_square\t", chi_square)
// (5) Compute P-value
var P_value float64 = igamc(2.5, chi_square/2.0)
// Misprint report : in Page 41. P-value = igamc(5.0/2.0, 3.167729/2.0) = 0.274932
// But igamc(5.0/2.0, 3.167729/2.0) = 0.6741449650657756 in Cephes.
return P_value, DecisionRule(P_value, 0.01), nil
}
/*
func Factorial(n uint64) (result uint64) {
if n > 0 {
result = n * Factorial(n-1)
return result
}
return 1
}
func Sum(x uint64) float64 {
if x == 0 {
return 0.0
}
var ret float64 = 0.0
var _x float64 = float64(x)
var i float64
for i = 0.0; i < _x; i = i + 1.0 {
ret = ret + i
}
return ret
}
// https://encyclopediaofmath.org/wiki/Confluent_hypergeometric_function
func KummerFunction(a float64, lambda float64, z float64) float64 {
var result float64 = 1.0
var current float64 = 1.0
var index uint64 = 1
for {
current *= (a + Sum(index-1)) / (lambda + Sum(index-1)) * z / float64(Factorial(index))
result += current
// fmt.Println(index, current)
index++
if current < 0.000000001 {
break
}
}
return result
}
// Reference : Page 74. function P(U = u)
func Pr_ver2(u int, eta float64) float64 {
if u == 0 {
return math.Exp(-1 * eta)
} else {
return eta * math.Exp(-2*eta) / math.Exp2(float64(u)) * KummerFunction(float64(u+1), 2, eta)
}
}
*/
// Reference : https://github.com/terrillmoore/NIST-Statistical-Test-Suite/blob/master/sts/src/overlappingTemplateMatchings.c#L95-L110
func Pr(u int, eta float64) float64 {
var l int
var sum, p float64
if u == 0 {
p = math.Exp(-1 * eta)
} else {
sum = 0.0
for l = 1; l <= u; l++ {
lgam_u, _ := math.Lgamma(float64(u))
lgam_l, _ := math.Lgamma(float64(l))
lgam_l_plus1, _ := math.Lgamma(float64(l + 1))
lgam_u_l_plus1, _ := math.Lgamma(float64(u - l + 1))
sum += math.Exp(-1*eta - float64(u)*math.Log(2) + float64(l)*math.Log(eta) - lgam_l_plus1 + lgam_u - lgam_l - lgam_u_l_plus1)
}
p = sum
}
return p
} | nist_sp800_22/overlappingTemplateMatching.go | 0.518059 | 0.439026 | overlappingTemplateMatching.go | starcoder |
package afm
import (
"fmt"
"io"
"star-tex.org/x/tex/font/fixed"
)
type direction struct {
// underlinePosition is the distance from the baseline for centering
// underlining strokes.
underlinePosition fixed.Int16_16
// underlineThickness is the stroke width for underlining.
underlineThickness fixed.Int16_16
// italicAngle is the angle (in degrees counter-clockwise from the vertical)
// of the dominant vertical stroke of the font.
italicAngle fixed.Int16_16
// charWidth is the width vector of this font's program characters.
charWidth charWidth
// isFixedPitch indicates whether the program is a fixed pitch (monospace) font.
isFixedPitch bool
}
type charWidth struct {
x fixed.Int16_16 // x component of the width vector of a font's program characters.
y fixed.Int16_16 // y component of the width vector of a font's program characters.
}
type charMetric struct {
// c is the decimal value of default character code.
// c is -1 if the character is not encoded.
c int
// name is the PostScript name of this character.
name string
// w0 is the character width vector for writing direction 0.
w0 charWidth
// w1 is the character width vector for writing direction 1.
w1 charWidth
// vvector holds the components of a vector from origin 0 to origin 1.
// origin 0 is the origin for writing direction 0.
// origin 1 is the origin for writing direction 1.
vv [2]fixed.Int16_16
// bbox is the character bounding box.
bbox bbox
// ligs is a ligature sequence.
ligs []lig
}
type bbox struct {
llx, lly fixed.Int16_16
urx, ury fixed.Int16_16
}
// lig is a ligature.
type lig struct {
// succ is the name of the successor
succ string
// name is the name of the composite ligature, consisting
// of the current character and the successor.
name string
}
// Font is an Adobe Font metrics.
type Font struct {
// metricsSets defines the writing direction.
// 0: direction 0 only.
// 1: direction 1 only.
// 2: both directions.
metricsSets int
fontName string // fontName is the name of the font program as presented to the PostScript language 'findfont' operator.
fullName string // fullName is the full text name of the font.
familyName string // familyName is the name of the typeface family to which the font belongs.
weight string // weight is the weight of the font (ex: Regular, Bold, Light).
bbox bbox // bbox is the font bounding box.
version string // version is the font program version identifier.
notice string // notice contains the font name trademark or copyright notice.
// encodingScheme specifies the default encoding vector used for this font
// program (ex: AdobeStandardEncoding, JIS12-88-CFEncoding, ...)
// Special font program might state FontSpecific.
encodingScheme string
mappingScheme int
escChar int
characterSet string // characterSet describes the character set (glyph complement) of this font program.
characters int // characters describes the number of characters defined in this font program.
isBaseFont bool // isBaseFont indicates whether this font is a base font program.
// vvector holds the components of a vector from origin 0 to origin 1.
// origin 0 is the origin for writing direction 0.
// origin 1 is the origin for writing direction 1.
// vvector is required when metricsSet is 2.
vvector [2]fixed.Int16_16
isFixedV bool // isFixedV indicates whether vvector is the same for every character in this font.
isCIDFont bool // isCIDFont indicates whether the font is a CID-keyed font.
capHeight fixed.Int16_16 // capHeight is usually the y-value of the top of the capital 'H'.
xHeight fixed.Int16_16 // xHeight is typically the y-value of the top of the lowercase 'x'.
ascender fixed.Int16_16 // ascender is usually the y-value of the top of the lowercase 'd'.
descender fixed.Int16_16 // descender is typically the y-value of the bottom of the lowercase 'p'.
stdHW fixed.Int16_16 // stdHW specifies the dominant width of horizontal stems.
stdVW fixed.Int16_16 // stdVW specifies the dominant width of vertical stems.
blendAxisTypes []string
blendDesignPositions [][]fixed.Int16_16
blendDesignMap [][][]fixed.Int16_16
weightVector []fixed.Int16_16
direction [3]direction
charMetrics []charMetric
composites []composite
tkerns []trackKern
pkerns []kernPair
}
func newFont() Font {
return Font{
isBaseFont: true,
}
}
// Parse parses an AFM file.
func Parse(r io.Reader) (Font, error) {
var (
fnt = newFont()
p = newParser(r)
)
err := p.parse(&fnt)
if err != nil {
return fnt, fmt.Errorf("could not parse AFM file: %w", err)
}
return fnt, nil
} | font/afm/font.go | 0.547706 | 0.487124 | font.go | starcoder |
package g
// Zero2 zero vec 2
var Zero2 = V2{}
// V2 vec2
type V2 struct{ X, Y float32 }
// RandomV2 random vec2
func RandomV2(r Rect) V2 {
return V2{
RandomBetween(r.Min.X, r.Max.X),
RandomBetween(r.Min.Y, r.Max.Y),
}
}
// RandomV2Circle random vec2 circle
func RandomV2Circle(radius float32) V2 {
v := V2{RandomBetween(-1, 1), RandomBetween(-1, 1)}
return v.Normalize().Scale(radius)
}
// XY returns both components
func (a V2) XY() (x, y float32) { return a.X, a.Y }
// XYZ returns x, y, 0
func (a V2) XYZ() (x, y, z float32) { return a.X, a.Y, 0 }
// Add adds two vectors and returns the result
func (a V2) Add(b V2) V2 { return V2{a.X + b.X, a.Y + b.Y} }
// AddScale adds b vector multiplied by scale to a and returns the result
func (a V2) AddScale(b V2, s float32) V2 { return V2{a.X + b.X*s, a.Y + b.Y*s} }
// Sub subtracts two vectors and returns the result
func (a V2) Sub(b V2) V2 { return V2{a.X - b.X, a.Y - b.Y} }
// Dot calculates the dot product
func (a V2) Dot(b V2) float32 { return a.X*b.X + a.Y*b.Y }
// Scale scales each component and returns the result
func (a V2) Scale(s float32) V2 { return V2{a.X * s, a.Y * s} }
// Length returns the length of the vector
func (a V2) Length() float32 { return Sqrt(a.X*a.X + a.Y*a.Y) }
// Length2 returns the squared length of the vector
func (a V2) Length2() float32 { return a.X*a.X + a.Y*a.Y }
// Distance returns the distance to vector b
func (a V2) Distance(b V2) float32 {
dx, dy := a.X-b.X, a.Y-b.Y
return Sqrt(dx*dx + dy*dy)
}
// Distance2 returns the squared distance to vector b
func (a V2) Distance2(b V2) float32 {
dx, dy := a.X-b.X, a.Y-b.Y
return dx*dx + dy*dy
}
// Normalize normalized vec2
func (a V2) Normalize() V2 {
m := a.Length()
if m < 1 {
m = 1
}
return V2{a.X / m, a.Y / m}
}
// Negate Negate of a vec2
func (a V2) Negate() V2 { return V2{-a.X, -a.Y} }
// Cross product of a and b
func (a V2) Cross(b V2) float32 { return a.X*b.Y - a.Y*b.X }
// NearZero if vec2 is close to zero
func (a V2) NearZero() bool { return a.Length2() < 0.0001 }
// Rotate rotates a vec2 by an angle
func (a V2) Rotate(angle float32) V2 {
cs, sn := Cos(angle), Sin(angle)
return V2{a.X*cs - a.Y*sn, a.X*sn + a.Y*cs}
}
// Angle returns the angle for this absolute vector
func (a V2) Angle() float32 { return Atan2(a.Y, a.X) }
// Rotate90 rotates a vec2 by 90 degrees ccw
func (a V2) Rotate90() V2 { return V2{-a.Y, a.X} }
// Rotate90c rotates a vec2 by 90 degrees cw
func (a V2) Rotate90c() V2 { return V2{a.Y, -a.X} }
// Rotate180 rotates a vec2 by 180 degrees
func (a V2) Rotate180() V2 { return V2{-a.X, -a.Y} } | g/vector.go | 0.933499 | 0.780181 | vector.go | starcoder |
package packets
import "github.com/orionowy/f1-telemetry-go/pkg/math"
// The motion packet gives physics data for all the cars being driven.
// There is additional data for the car being driven with the goal of being able to drive a motion platform setup.
// Frequency: Rate as specified in menus
// Size: 1464 bytes
// Version: 1
type CarMotionData struct {
WorldPositionX float32 // World space X position
WorldPositionY float32 // World space Y position
WorldPositionZ float32 // World space Z position
WorldVelocityX float32 // Velocity in world space X
WorldVelocityY float32 // Velocity in world space Y
WorldVelocityZ float32 // Velocity in world space Z
WorldForwardDirX int16 // World space forward X direction (normalised)
WorldForwardDirY int16 // World space forward Y direction (normalised)
WorldForwardDirZ int16 // World space forward Z direction (normalised)
WorldRightDirX int16 // World space right X direction (normalised)
WorldRightDirY int16 // World space right Y direction (normalised)
WorldRightDirZ int16 // World space right Z direction (normalised)
GForceLateral float32 // Lateral G-Force component
GForceLongitudinal float32 // Longitudinal G-Force component
GForceVertical float32 // Vertical G-Force component
Yaw float32 // Yaw angle in radians
Pitch float32 // Pitch angle in radians
Roll float32 // Roll angle in radians
}
type PacketMotionData struct {
Header PacketHeader // Header
CarMotionData [22]CarMotionData // Data for all cars on track
// Extra player car ONLY data
SuspensionPosition [4]float32 // Note: All wheel arrays have the following order:
SuspensionVelocity [4]float32 // RL, RR, FL, FR
SuspensionAcceleration [4]float32 // RL, RR, FL, FR
WheelSpeed [4]float32 // Speed of each wheel
WheelSlip [4]float32 // Slip ratio for each wheel
LocalVelocityX float32 // Velocity in local space
LocalVelocityY float32 // Velocity in local space
LocalVelocityZ float32 // Velocity in local space
AngularVelocityX float32 // Angular velocity x-component
AngularVelocityY float32 // Angular velocity y-component
AngularVelocityZ float32 // Angular velocity z-component
AngularAccelerationX float32 // Angular velocity x-component
AngularAccelerationY float32 // Angular velocity y-component
AngularAccelerationZ float32 // Angular velocity z-component
FrontWheelsAngle float32 // Current front wheels angle in radians
}
func (p *PacketMotionData) Self() CarMotionData {
return p.CarMotionData[p.Header.PlayerCarIndex]
}
func (p *PacketMotionData) LocalVelocityAsVector3() *math.Vector3 {
return math.NewVector3(p.LocalVelocityX, p.LocalVelocityY, p.LocalVelocityZ)
}
func (p *PacketMotionData) AngularVelocityAsVector3() *math.Vector3 {
return math.NewVector3(p.AngularVelocityX, p.AngularVelocityY, p.AngularVelocityZ)
}
func (p *PacketMotionData) AngularAccelerationAsVector3() *math.Vector3 {
return math.NewVector3(p.AngularAccelerationX, p.AngularAccelerationY, p.AngularAccelerationZ)
}
func (p *CarMotionData) WorldPositionAsVector3() *math.Vector3 {
return math.NewVector3(p.WorldPositionX, p.WorldPositionY, p.WorldPositionZ)
}
func (p *CarMotionData) WorldVelocityAsVector3() *math.Vector3 {
return math.NewVector3(p.WorldVelocityX, p.WorldVelocityY, p.WorldVelocityZ)
} | pkg/packets/motion.go | 0.82748 | 0.723212 | motion.go | starcoder |
package search
import (
"github.com/christat/search"
"math"
"time"
)
// BranchAndBound performs depth search iteratively. An upper bound is set every time a solution is found,
// pruning costlier descendants and stopping once no better solution was found. Because of its nature (minimization of positive costs)
// it is not expected to work correctly with negative costs. Maximization problems should be redefined accordingly.
// Paramter bound can be left as default float64 (0); the algorithm will assume an initial bound of plus infinity.
func DepthFirstBranchAndBound(origin, target search.WeightedState, bound float64) (path map[search.State]search.State, found bool, cost float64) {
path, bound ,cost = initBnBVariables(bound)
var solutionPath map[search.State]search.State
// The expansion order in root dictates the order of branch expansions;
// hence the algorithm follows a left-to-right DFS "scanning" pattern.
for _, neighbor := range origin.Neighbors() {
// because of Go's inflexible type system, neighbor must be coerced to allow access to cost function
solutionFound, currentCost := costBoundSearch(origin, origin, neighbor.(search.HeuristicState), target, cost, bound, path)
if solutionFound && currentCost < bound {
found = true
bound = currentCost
solutionPath = copyPath(path)
}
}
return solutionPath, found, bound
}
func costBoundSearch(origin, from, to, target search.WeightedState, branchCost, bound float64, path map[search.State]search.State) (found bool, cost float64) {
expansionCost := from.Cost(to)
if branchCost + expansionCost < bound {
path[to] = from
cost = branchCost + expansionCost
if to.Equals(target) {
bound = cost
found = true
} else {
oldCost := cost
for _, neighbor := range to.Neighbors() {
// because of Go's inflexible type system, neighbor must be coerced to allow access to cost/heuristic functions
solutionBranch, branchCost := costBoundSearch(origin, to, neighbor.(search.HeuristicState), target, oldCost, bound, path)
if solutionBranch && branchCost < bound {
found = true
bound = branchCost
cost = bound
}
}
}
}
return
}
// Benchmark variant of DepthFirstBranchAndBound.
// It measures execution parameters (time, nodes expanded) them in a search.AlgorithmBenchmark entity.
func BenchmarkDepthFirstBranchAndBound(origin, target search.WeightedState, bound float64) (path map[search.State]search.State, found bool, cost float64, bench search.AlgorithmBenchmark) {
path, bound ,cost = initBnBVariables(bound)
start := time.Now()
var expansions uint = 0
var solutionPath map[search.State]search.State
// The expansion order in root dictates the order of branch expansions;
// hence the algorithm follows a left-to-right DFS "scanning" pattern.
for _, neighbor := range origin.Neighbors() {
// because of Go's inflexible type system, neighbor must be coerced to allow access to cost/heuristic functions
solutionFound, currentCost := benchmarkCostBoundSearch(origin, origin, neighbor.(search.HeuristicState), target, cost, bound, path, &expansions)
if solutionFound && currentCost < bound {
found = true
bound = currentCost
solutionPath = copyPath(path)
}
}
elapsed := time.Since(start)
return solutionPath, found, bound, search.AlgorithmBenchmark{ElapsedTime: elapsed, TotalExpansions: expansions}
}
func benchmarkCostBoundSearch(origin, from, to, target search.WeightedState, branchCost, bound float64, path map[search.State]search.State, expansions *uint) (found bool, cost float64) {
expansionCost := from.Cost(to)
if branchCost + expansionCost < bound {
path[to] = from
cost = branchCost + expansionCost
if to.Equals(target) {
bound = cost
found = true
} else {
oldCost := cost
for _, neighbor := range to.Neighbors() {
*expansions++
// because of Go's inflexible type system, neighbor must be coerced to allow access to cost/heuristic functions
solutionBranch, branchCost := benchmarkCostBoundSearch(origin, to, neighbor.(search.HeuristicState), target, oldCost, bound, path, expansions)
if solutionBranch && branchCost < bound {
found = true
bound = branchCost
cost = bound
}
}
}
}
return
}
func initBnBVariables(initialBound float64) (path map[search.State]search.State, bound, cost float64) {
if bound == 0 {
bound = math.Inf(0)
} else {
bound = initialBound
}
path = make(map[search.State]search.State)
cost = 0
return
} | blind/depth_first_branch_and_bound.go | 0.759671 | 0.600042 | depth_first_branch_and_bound.go | starcoder |
package switchboard
// Board represents a set of supplies and demands for which a universes of choices can be explored.
type Board struct {
supplies []Supply
demands []Demand
choices []Choice
comparator BoardComparator
}
// NewBoard constructs a new board with the given supplies and demands
func NewBoard(supplies []Supply, demands []Demand, comparator BoardComparator) (board Board) {
board.supplies = append(board.supplies, supplies...)
board.demands = append(board.demands, demands...)
board.comparator = comparator
return
}
// ChoicesMade returns a list of the choices made so far, in the order they were made
func (board Board) ChoicesMade() (choicesMade []Choice) {
return append(choicesMade, board.choices...)
}
// Cost returns the sum of the costs of all the choices made so far
func (board Board) Cost() (cost float64) {
for _, choice := range board.choices {
cost += choice.cost
}
return
}
// Play runs the board through the given Player repeatedly until the Player no longer shows improvement.
func (board Board) Play(player Player) (bestBoard Board) {
currentBoard := board
betterBoard := player(currentBoard)
for board.comparator(currentBoard, betterBoard) {
currentBoard = betterBoard
betterBoard = player(currentBoard)
}
return betterBoard
}
// Explore uses the given explorer to discover the best board (sequence
// of choices) among the universe of all possible boards.
func (board Board) Explore(explorer Explorer) (bestBoard Board) {
finishedBoards := board.explore(explorer)
if len(finishedBoards) == 0 {
return board
}
bestBoard = finishedBoards[0]
for _, candidateBoard := range finishedBoards {
if board.comparator(bestBoard, candidateBoard) {
bestBoard = candidateBoard
}
}
return
}
// Supplies returns the list of supplies associated with the board
func (board Board) Supplies() (supplies []Supply) {
return append(supplies, board.supplies...)
}
// Demands returns the list of demands associated with the board
func (board Board) Demands() (demands []Demand) {
return append(demands, board.demands...)
}
func (board Board) explore(shouldExplore Explorer) (finishedBoards []Board) {
if board.isFinished() {
return append(finishedBoards, board)
}
for _, possibleBoard := range board.possibleBoards() {
if shouldExplore(possibleBoard) {
finishedBoards = append(finishedBoards, possibleBoard.explore(shouldExplore)...)
}
}
return
}
func (board Board) possibleBoards() (possibleBoards []Board) {
for _, choice := range board.availableChoices() {
possibleBoards = append(possibleBoards, board.choose(choice))
}
return
}
func (board Board) isFinished() bool {
return len(board.availableChoices()) == 0
}
func (board Board) pendingDemands() (pendingDemands []Demand) {
demandSet := make(map[Demand]struct{})
for _, demand := range board.demands {
demandSet[demand] = struct{}{}
}
for _, choiceMade := range board.ChoicesMade() {
delete(demandSet, choiceMade.demand)
}
for k := range demandSet {
pendingDemands = append(pendingDemands, k)
}
return
}
func (board Board) choose(choiceMade Choice) (newBoard Board) {
newBoard.supplies = board.supplies
newBoard.demands = board.demands
newBoard.choices = append(newBoard.choices, board.choices...)
newBoard.choices = append(newBoard.choices, choiceMade)
newBoard.comparator = board.comparator
return
}
func (board Board) availableChoices() (availableChoices []Choice) {
for _, pendingDemand := range board.pendingDemands() {
for _, supply := range board.supplies {
choice, err := supply.Estimate(pendingDemand, []Choice{})
if err == nil {
availableChoices = append(availableChoices, choice)
}
}
}
return
} | board.go | 0.805211 | 0.586079 | board.go | starcoder |
package exporter
var (
gaugeMetrics = map[string]string{
"indices_fielddata_memory_size_bytes": "Field data cache memory usage in bytes",
"indices_filter_cache_memory_size_bytes": "Filter cache memory usage in bytes",
"indices_query_cache_memory_size_bytes": "Query cache memory usage in bytes",
"indices_request_cache_memory_size_bytes": "Request cache memory usage in bytes",
"indices_docs": "Count of documents on this node",
"indices_docs_deleted": "Count of deleted documents on this node",
"indices_store_size_bytes": "Current size of stored index data in bytes",
"indices_segments_memory_bytes": "Current memory size of segments in bytes",
"indices_segments_count": "Count of index segments on this node",
"process_cpu_percent": "Percent CPU used by process",
"process_mem_resident_size_bytes": "Resident memory in use by process in bytes",
"process_mem_share_size_bytes": "Shared memory in use by process in bytes",
"process_mem_virtual_size_bytes": "Total virtual memory used in bytes",
"process_open_files_count": "Open file descriptors",
"process_max_files_count": "Max file descriptors for process",
"breakers_estimated_size_bytes": "Estimated size in bytes of breaker",
"breakers_limit_size_bytes": "Limit size in bytes for breaker",
"jvm_memory_committed_bytes": "JVM memory currently committed by area",
"jvm_memory_used_bytes": "JVM memory currently used by area",
"jvm_memory_max_bytes": "JVM memory max",
"thread_pool_active_count": "Thread Pool threads active",
"thread_pool_largest_count": "Thread Pool largest threads count",
"thread_pool_queue_count": "Thread Pool operations queued",
"thread_pool_threads_count": "Thread Pool current threads count",
}
counterMetrics = map[string]string{
"indices_fielddata_evictions": "Evictions from field data",
"indices_filter_cache_evictions": "Evictions from filter cache",
"indices_query_cache_evictions": "Evictions from query cache",
"indices_request_cache_evictions": "Evictions from request cache",
"indices_flush_total": "Total flushes",
"indices_flush_time_ms_total": "Cumulative flush time in milliseconds",
"transport_rx_packets_total": "Count of packets received",
"transport_rx_size_bytes_total": "Total number of bytes received",
"transport_tx_packets_total": "Count of packets sent",
"transport_tx_size_bytes_total": "Total number of bytes sent",
"indices_store_throttle_time_ms_total": "Throttle time for index store in milliseconds",
"indices_indexing_index_total": "Total index calls",
"indices_indexing_index_time_ms_total": "Cumulative index time in milliseconds",
"indices_merges_total": "Total merges",
"indices_merges_total_docs_total": "Cumulative docs merged",
"indices_merges_total_size_bytes_total": "Total merge size in bytes",
"indices_merges_total_time_ms_total": "Total time spent merging in milliseconds",
"indices_refresh_total": "Total refreshes",
"indices_refresh_total_time_ms_total": "Total time spent refreshing",
"jvm_gc_collection_seconds_count": "Count of JVM GC runs",
"jvm_gc_collection_seconds_sum": "GC run time in seconds",
"process_cpu_time_seconds_sum": "Process CPU time in seconds",
"thread_pool_completed_count": "Thread Pool operations completed",
"thread_pool_rejected_count": "Thread Pool operations rejected",
}
) | exporter/metrics.go | 0.61231 | 0.420243 | metrics.go | starcoder |
package gates
import (
"math"
"reflect"
"strings"
)
type Array struct {
values []Value
}
type arrayIter struct {
i int
a *Array
}
func NewArray(values []Value) Array {
return Array{
values: values,
}
}
func NewArrayFromStringSlice(a []string) Array {
values := make([]Value, len(a))
for i := range a {
values[i] = String(a[i])
}
return NewArray(values)
}
func (Array) Type() string { return "array" }
func (Array) IsString() bool { return false }
func (Array) IsInt() bool { return false }
func (Array) IsFloat() bool { return false }
func (Array) IsBool() bool { return false }
func (Array) IsFunction() bool { return false }
func (a Array) ToString() string {
stringSl := make([]string, 0, len(a.values))
for _, v := range a.values {
stringSl = append(stringSl, ToValue(v).ToString())
}
return strings.Join(stringSl, ",")
}
func (Array) ToInt() int64 { return 0 }
func (Array) ToFloat() float64 { return math.NaN() }
func (a Array) ToNumber() Number { return Float(a.ToFloat()) }
func (Array) ToBool() bool { return true }
func (Array) ToFunction() Function { return _EmptyFunction }
func (a Array) ToNative(ops ...ToNativeOption) interface{} {
return toNative(nil, a, convertToNativeOption2BinaryOptions(ops))
}
func (a Array) toNative(seen map[interface{}]interface{}, ops int) interface{} {
if a.values == nil {
return []interface{}(nil)
}
v := reflect.ValueOf(a.values)
ptr := struct {
ptr uintptr
len int
}{v.Pointer(), v.Len()}
if v, ok := seen[ptr]; ok && !checkToNativeOption(SkipCircularReference, ops) {
return v
} else if ok {
return nil
}
result := make([]interface{}, len(a.values))
seen[ptr] = result
for i := range a.values {
result[i] = toNative(seen, a.values[i], ops)
}
delete(seen, ptr)
return result
}
func (a Array) Equals(other Value) bool {
o, ok := other.(Array)
if !ok {
return false
}
return reflect.DeepEqual(a.values, o.values)
}
func (a Array) SameAs(other Value) bool { return false }
func (a Array) Get(r *Runtime, key Value) Value {
i := key.ToNumber()
if i.IsInt() {
ii := i.ToInt()
if ii < 0 || ii >= int64(len(a.values)) {
return Null
}
return a.values[ii]
}
switch key.ToString() {
case "length":
return Int(len(a.values))
}
return Null
}
func (a Array) Set(r *Runtime, key, value Value) {
if !key.IsInt() {
return
}
i := key.ToInt()
if i < 0 || i >= int64(len(a.values)) {
return
}
a.values[i] = value
}
func (a Array) Iterator() Iterator {
return &arrayIter{i: 0, a: &a}
}
func (a *arrayIter) Next() (Value, bool) {
i := a.i
if i >= 0 && i < len(a.a.values) {
a.i++
return a.a.values[i], true
}
return Null, false
}
func (a *Array) push(value Value) {
a.values = append(a.values, value)
} | array.go | 0.649356 | 0.505859 | array.go | starcoder |
// Package chans contains utility constraints, functions, and types regarding
// Go channels, such as the PubSub type for fan-out events.
package chans
import (
"context"
"time"
"gopkg.in/typ.v4"
)
// SendTimeout sends a value to a channel, or cancels after a given duration.
func SendTimeout[C Sender[V], V any](ch C, value V, timeout time.Duration) bool {
if timeout <= 0 {
ch <- value
return true
}
timer := time.NewTimer(timeout)
select {
case ch <- value:
timer.Stop()
return true
case <-timer.C:
return false
}
}
// SendContext receives a value from a channel, or cancels when the given
// context is cancelled.
func SendContext[C Sender[V], V any](ctx context.Context, ch C, value V) bool {
select {
case ch <- value:
return true
case <-ctx.Done():
return false
}
}
// RecvTimeout receives a value from a channel, or cancels after a given timeout.
// If the timeout duration is zero or negative, then no limit is used.
func RecvTimeout[C Receiver[V], V any](ch C, timeout time.Duration) (V, bool) {
if timeout <= 0 {
value, ok := <-ch
return value, ok
}
timer := time.NewTimer(timeout)
select {
case value, ok := <-ch:
timer.Stop()
return value, ok
case <-timer.C:
return typ.Zero[V](), false
}
}
// RecvContext receives a value from a channel, or cancels when the given
// context is cancelled.
func RecvContext[C ~<-chan V, V any](ctx context.Context, ch C) (V, bool) {
select {
case value, ok := <-ch:
return value, ok
case <-ctx.Done():
return typ.Zero[V](), false
}
}
// RecvQueued will receive all values from a channel until either there's no
// more values in the channel's queue buffer, or it has received maxValues
// values, or until the channel is closed, whichever comes first.
func RecvQueued[C Receiver[V], V any](ch C, maxValues int) []V {
var buffer []V
for len(buffer) < maxValues {
select {
case v := <-ch:
buffer = append(buffer, v)
default:
return buffer
}
}
return buffer
}
// RecvQueuedFull will receive all values from a channel until either there's no
// more values in the channel's queue buffer, or it has filled buf with
// values, or until the channel is closed, whichever comes first, and then
// returns the number of values that was received.
func RecvQueuedFull[C Receiver[V], B ~[]V, V any](ch C, buf B) int {
var index int
for index < len(buf) {
select {
case v := <-ch:
buf[index] = v
index++
default:
return index
}
}
return index
} | chans/chans.go | 0.673406 | 0.408188 | chans.go | starcoder |
package testdata
// GetSubscriptionResponse example
const GetSubscriptionResponse = `{
"resource": "subscription",
"id": "sub_rVKGtNd6s3",
"mode": "live",
"createdAt": "2016-06-01T12:23:34+00:00",
"status": "active",
"amount": {
"value": "25.00",
"currency": "EUR"
},
"times": 4,
"timesRemaining": 4,
"interval": "3 months",
"startDate": "2016-06-01",
"nextPaymentDate": "2016-09-01",
"description": "Quarterly payment",
"method": null,
"mandateId": "mdt_38HS4fsS",
"webhookUrl": "https://webshop.example.org/payments/webhook",
"metadata": {
"plan": "small"
},
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_stTC2WHAuS/subscriptions/sub_rVKGtNd6s3",
"type": "application/hal+json"
},
"customer": {
"href": "https://api.mollie.com/v2/customers/cst_stTC2WHAuS",
"type": "application/hal+json"
},
"profile": {
"href": "https://api.mollie.com/v2/profiles/pfl_URR55HPMGx",
"type": "application/hal+json"
},
"payments": {
"href": "https://api.mollie.com/v2/customers/cst_stTC2WHAuS/subscriptions/sub_rVKGtNd6s3/payments",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/subscriptions-api/get-subscription",
"type": "text/html"
}
}
}`
// DeleteSubscriptionResponse example
const DeleteSubscriptionResponse = `{
"resource": "subscription",
"id": "sub_rVKGtNd6s3",
"mode": "live",
"createdAt": "2018-06-01T12:23:34+00:00",
"status": "canceled",
"amount": {
"value": "25.00",
"currency": "EUR"
},
"times": 4,
"interval": "3 months",
"nextPaymentDate": null,
"description": "Quarterly payment",
"method": null,
"startDate": "2016-06-01",
"webhookUrl": "https://webshop.example.org/payments/webhook",
"canceledAt": "2018-08-01T11:04:21+00:00",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_stTC2WHAuS/subscriptions/sub_rVKGtNd6s3",
"type": "application/hal+json"
},
"customer": {
"href": "https://api.mollie.com/v2/customers/cst_stTC2WHAuS",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/subscriptions-api/cancel-subscription",
"type": "text/html"
}
}
}`
// ListAllSubscriptionsResponse example
const ListAllSubscriptionsResponse = `{
"count": 3,
"_embedded": {
"subscriptions": [
{
"resource": "subscription",
"id": "sub_rVKGtNd6s3",
"mode": "live",
"createdAt": "2018-06-01T12:23:34+00:00",
"status": "active",
"amount": {
"value": "25.00",
"currency": "EUR"
},
"times": 4,
"timesRemaining": 3,
"interval": "3 months",
"startDate": "2016-06-01",
"nextPaymentDate": "2016-09-01",
"description": "Quarterly payment",
"method": null,
"webhookUrl": "https://webshop.example.org/subscriptions/webhook",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_stTC2WHAuS/subscriptions/sub_rVKGtNd6s3",
"type": "application/hal+json"
},
"profile": {
"href": "https://api.mollie.com/v2/profiles/pfl_URR55HPMGx",
"type": "application/hal+json"
},
"customer": {
"href": "https://api.mollie.com/v2/customers/cst_stTC2WHAuS",
"type": "application/hal+json"
}
}
}
]
},
"_links": {
"self": {
"href": "https://api.mollie.com/v2/subscriptions",
"type": "application/hal+json"
},
"previous": null,
"next": {
"href": "https://api.mollie.com/v2/subscriptions?from=sub_mnfbwhMfvo",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/subscriptions-api/list-all-subscriptions",
"type": "text/html"
}
}
}` | testdata/subscriptions.go | 0.741768 | 0.432483 | subscriptions.go | starcoder |
package codexec
import "time"
type (
// ContainerNode node that stores the container id, next and prev and metrics information
ContainerNode struct {
ID string
ExecutionCount int64
AvgResTime time.Duration
MaxResTime time.Duration
MinResTime time.Duration
Prev *ContainerNode
Next *ContainerNode
}
// ContainerPool stores the container nodes in a structured way
// Active container nodes stored in a Circular Doubly Linked List and Nodes map
// Passive, Exited container nodes are removed from CDLL and stored in garbage
ContainerPool struct {
Head *ContainerNode
Tail *ContainerNode
Curr *ContainerNode
Nodes map[string]*ContainerNode
Garbage map[string]*ContainerNode
}
)
func NewContainerPool() *ContainerPool {
return &ContainerPool{
Nodes: make(map[string]*ContainerNode),
Garbage: make(map[string]*ContainerNode),
}
}
func (p *ContainerPool) Get() *ContainerNode {
if p.Head == nil {
return nil
}
defer func() { p.Curr = p.Curr.Next }()
return p.Curr
}
func (p *ContainerPool) Remove(id string) {
// Remove from nodes and from list
// Add to garbage to collect later
nodeToRemove := p.Nodes[id]
delete(p.Nodes, id)
if p.Head == p.Tail {
p.Head = nil
p.Tail = nil
p.Curr = nil
return
}
if p.Curr == nodeToRemove {
p.Curr = nodeToRemove.Next
}
if p.Head == nodeToRemove {
p.Head.Next.Prev = p.Head.Prev
p.Head = p.Head.Next
return
}
if p.Tail == nodeToRemove {
p.Tail.Prev.Next = p.Tail.Next
p.Tail = p.Tail.Prev
return
}
nodeToRemove.Next.Prev = nodeToRemove.Prev
nodeToRemove.Prev.Next = nodeToRemove.Next
nodeToRemove.Next = nil
nodeToRemove.Prev = nil
}
func (p *ContainerPool) Add(id string) {
node := &ContainerNode{ID: id}
p.Nodes[id] = node
if p.Head == nil {
node.Next = node
node.Prev = node
p.Head = node
p.Tail = node
p.Curr = node
return
}
if p.Tail == p.Head {
node.Next = p.Head
node.Prev = p.Head
p.Head.Next = node
p.Head.Prev = node
p.Tail = node
return
}
node.Next = p.Tail.Next
node.Prev = p.Tail
p.Tail.Next.Prev = node
p.Tail.Next = node
p.Tail = node
} | internal/codexec/pool.go | 0.540196 | 0.467271 | pool.go | starcoder |
package object
import (
"github.com/carlosroman/aun-otra-ray-tracer/go/internal/ray"
)
const (
epsilon = 1e-5
)
type Object interface {
LocalIntersect(ray ray.Ray) Intersections
LocalNormalAt(worldPoint ray.Vector, hit Intersection) ray.Vector
Transform() ray.Matrix
TransformInverse() ray.Matrix
SetTransform(t ray.Matrix) error
Material() (m Material)
SetMaterial(m Material)
Parent() Object
SetParent(obj Object)
WorldToObject(worldPoint ray.Vector) (point ray.Vector)
NormalToWorld(normalVector ray.Vector) (vector ray.Vector)
}
func NormalAt(xs Intersection, worldPoint ray.Vector) ray.Vector {
localPoint := xs.Obj.WorldToObject(worldPoint)
localNormal := xs.Obj.LocalNormalAt(localPoint, xs)
return xs.Obj.NormalToWorld(localNormal)
}
func Intersect(obj Object, rr ray.Ray) Intersections {
return obj.
LocalIntersect(
rr.Transform(obj.TransformInverse()))
}
type BasicObject struct {
Transform ray.Matrix
Material Material
}
type obj struct {
t ray.Matrix
tInv ray.Matrix
m Material
p Object
}
func (o obj) LocalNormalAt(worldPoint ray.Vector, _ Intersection) ray.Vector {
return worldPoint
}
func (o obj) LocalIntersect(r ray.Ray) Intersections {
return nil
}
func (o obj) Parent() Object {
return o.p
}
func (o *obj) SetParent(obj Object) {
o.p = obj
}
func NewTestShape(opts ...Option) Object {
s := obj{}
_ = s.SetTransform(ray.DefaultIdentityMatrix())
m := DefaultMaterial()
m.Ambient = 1
s.SetMaterial(m)
for i := range opts {
opts[i].Apply(&s)
}
return &s
}
func (o obj) Transform() ray.Matrix {
return o.t
}
func (o *obj) SetTransform(t ray.Matrix) error {
o.t = t
inverse, err := t.Inverse()
o.tInv = inverse
return err
}
func (o obj) TransformInverse() ray.Matrix {
return o.tInv
}
func (o obj) Material() Material {
if o.p != nil {
return o.p.Material()
}
return o.m
}
func (o *obj) SetMaterial(m Material) {
o.m = m
}
func (o obj) WorldToObject(worldPoint ray.Vector) (point ray.Vector) {
if o.p != nil {
worldPoint = o.p.WorldToObject(worldPoint)
}
return o.
tInv.
MultiplyByVector(worldPoint)
}
func (o obj) NormalToWorld(normaVector ray.Vector) (resultVector ray.Vector) {
resultVector = o.
tInv.
Transpose().
MultiplyByVector(normaVector).
SetW(0).
Normalize()
if o.p != nil {
resultVector = o.p.
NormalToWorld(resultVector)
}
return resultVector
} | go/internal/object/object.go | 0.804021 | 0.655095 | object.go | starcoder |
package fractales
import (
"github.com/Balise42/marzipango/fractales/orbits"
"github.com/Balise42/marzipango/params"
"image"
"image/color"
"math"
"math/rand"
"sync"
)
func CreateFlameComputer(params params.ImageParams) Computation {
flameFuncs := createFlameFuncs()
ifsMap := createFlameMap(params, flameFuncs)
comp := func(x int, ymin int, ymax int, img *image.RGBA64, wg *sync.WaitGroup) {
for y := ymin; y < ymax; y++ {
val, ok := ifsMap[orbits.Coords{int64(x), int64(y)}]
if ok {
img.Set(x, y, val)
} else {
img.Set(x, y, params.Palette.Divergence)
}
}
wg.Done()
}
return comp
}
type ifsFunc func(float64, float64) (float64, float64)
func createFlameFuncs() []ifsFunc {
V0 := func(x float64, y float64) (float64, float64) {
sqr := math.Sqrt(x*x + y*y)
return 1/sqr * ((x-y)*(x+y)), 1/sqr * 2*x*y
}
V1 := func(x float64, y float64) (float64, float64) {
return math.Sin(x), y
}
V2 := func(x float64, y float64) (float64, float64) {
sqr := math.Sqrt(x*x + y*y)
theta := math.Atan(x / y)
return 1/sqr * (math.Cos(theta) + math.Sin(sqr)), 1/sqr * (math.Sin(theta) - math.Cos(sqr))
}
V3 := func(x float64, y float64) (float64, float64) {
if x >= 0 && y >= 0 {
return x, y
} else if x < 0 && y >= 0 {
return 2*x, y
} else if x >= 0 && y < 0 {
return x, y/2
} else {
return 2*x, y/2
}
}
V4 := func(x float64, y float64) (float64, float64) {
sqr := math.Sqrt(x*x + y*y)
theta := math.Atan(x / y)
return math.Sin(theta) * math.Cos(sqr), math.Cos(theta) * math.Sin(sqr)
}
return []ifsFunc{V0, V1, V2, V3, V4}
}
type triplet struct {
R float64
G float64
B float64
A float64
}
func createFlameMap(params params.ImageParams, funcs []ifsFunc) map[orbits.Coords]color.NRGBA {
imgRes := make(map[orbits.Coords]color.NRGBA)
x := float64(0)
y := float64(0)
rf := []float64{1.0, 1.0, 1.0, 1.0, 1.0}
gf := []float64{0.0, 0.1, 0.2, 0.3, 0.4}
bf := []float64{0, 0, 0, 0, 0}
histo := make(map[orbits.Coords]int)
cols := make(map[orbits.Coords]triplet)
col := triplet{1.0, 0, 0, 0}
maxValue := 0
for i := 0; i < 500000000; i++ {
rule := rand.Float32()
var a, b, c, d, e, f float64
var funcIndex int
if rule < 0.08 {
a = -0.98
b = -0.12
c = -0.6
d = 0.01
e = -0.028
f = 0.07
funcIndex = 0
} else if rule < 0.8 {
a = -0.5
b = 0.43
c = -0.06
d = -0.44
e = -0.09
f = -0.88
funcIndex = 1
} else if rule < 0.85 {
a = 0.18
b = -0.12
c = -0.18
d = 0.04
e = 0.18
f = 0.40
funcIndex = 2
} else if rule < 0.87 {
a = 1.62
b = 1.03
c = 0.59
d = -0.66
e = 0.25
f = -0.72
funcIndex = 3
} else {
a = 0.02
b = 0.13
c = -1.17
d = -1.44
e = -0.17
f = -0.14
funcIndex = 4
}
x1 := a*x + b*y + e
y1 := c*x + d*y + f
x1, y1 = funcs[funcIndex](x1, y1)
coords := scaleFlame(x1, y1, params)
//col := cols[coords]
col = triplet{ (col.R + rf[funcIndex]) / 2, col.G + gf[funcIndex], col.B + bf[funcIndex], 1.0 }
cols[coords] = col
if i > 20 {
histo[coords] = histo[coords] + 1
cols[coords] = col
if histo[coords] > maxValue {
maxValue = histo[coords]
}
}
x = x1
y = y1
}
for k, col := range cols {
alpha := math.Log(float64(histo[k]) + 1) / math.Log(float64(maxValue) + 1)
if alpha < 0 {
alpha = 0
}
tmpR := col.R//math.Pow(col.R * alpha, 0.5)
tmpG := col.G//math.Pow(col.G * alpha, 0.5)
tmpB := col.B//math.Pow(col.B * alpha, 0.5)
imgRes[k] = color.NRGBA{R: uint8(tmpR * 255), G: uint8(tmpG * 255), B: uint8(tmpB * 255), A: uint8((alpha) * 255)}
}
return imgRes
}
func scaleFlame(x1 float64, y1 float64, imageParams params.ImageParams) orbits.Coords {
x := (x1 + 1.0) * float64(imageParams.Width)
y := (y1 + 1.0) * float64(imageParams.Height)
return orbits.Coords{int64(x), int64(y)}
} | fractales/flame.go | 0.534127 | 0.544862 | flame.go | starcoder |
package lru
/**
题目:https://leetcode-cn.com/problems/lru-cache/
LRU 缓存
请你设计并实现一个满足 LRU (最近最少使用) 缓存 约束的数据结构。
实现 LRUCache 类:
LRUCache(int Capacity) 以 正整数 作为容量capacity 初始化 LRU 缓存
int get(int Key) 如果关键字 Key 存在于缓存中,则返回关键字的值,否则返回 -1 。
void put(int Key, int Value)如果关键字key 已经存在,则变更其数据值value ;如果不存在,则向缓存中插入该组key-Value 。如果插入操作导致关键字数量超过capacity ,则应该 逐出 最久未使用的关键字。
函数 get 和 put 必须以 O(1) 的平均时间复杂度运行。
示例:
输入
["LRUCache", "put", "put", "get", "put", "get", "put", "get", "get", "get"]
[[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]
输出
[null, null, null, 1, null, -1, null, -1, 3, 4]
解释
LRUCache lRUCache = new LRUCache(2);
lRUCache.put(1, 1); // 缓存是 {1=1}
lRUCache.put(2, 2); // 缓存是 {1=1, 2=2}
lRUCache.get(1); // 返回 1
lRUCache.put(3, 3); // 该操作会使得关键字 2 作废,缓存是 {1=1, 3=3}
lRUCache.get(2); // 返回 -1 (未找到)
lRUCache.put(4, 4); // 该操作会使得关键字 1 作废,缓存是 {4=4, 3=3}
lRUCache.get(1); // 返回 -1 (未找到)
lRUCache.get(3); // 返回 3
lRUCache.get(4); // 返回 4
提示:
1 <= Capacity <= 3000
0 <= Key <= 10000
0 <= Value <= 105
最多调用 2 * 105 次 get 和 put
*/
// LRUCache Least Recently Used 最近最少使用,基于双向链表+哈希实现
type LRUCache struct {
Size int
Capacity int
Cache map[int]*DoubleListNode
// 固定从头部添加新元素,从尾部pop
// 双向链表方便从尾部删除,单向链表不行
DoubleList *DoubleList
}
type DoubleList struct {
Head, Tail *DoubleListNode
}
type DoubleListNode struct {
Key, Value int
Prev, Next *DoubleListNode
}
func initDoubleListNode(key, value int) *DoubleListNode {
return &DoubleListNode{
Key: key,
Value: value,
}
}
func initDoubleList() *DoubleList {
d := &DoubleList{
Head: initDoubleListNode(0, 0),
Tail: initDoubleListNode(0, 0),
}
// 初始化的时候有2个结点,头结点,尾结点互相指
d.Head.Next = d.Tail
d.Tail.Prev = d.Head
return d
}
func Constructor(capacity int) LRUCache {
l := LRUCache{
Cache: map[int]*DoubleListNode{},
Capacity: capacity,
DoubleList: initDoubleList(),
}
return l
}
func (this *LRUCache) Get(key int) int {
if _, ok := this.Cache[key]; !ok {
return -1
}
node := this.Cache[key]
this.moveToHead(node)
return node.Value
}
func (this *LRUCache) Put(key int, value int) {
if this.Capacity == 0 {
return
}
if _, ok := this.Cache[key]; !ok {
if this.Size+1 > this.Capacity {
removed := this.removeTail()
delete(this.Cache, removed.Key)
} else {
this.Size++
}
node := initDoubleListNode(key, value)
this.Cache[key] = node
this.addToHead(node)
} else {
node := this.Cache[key]
node.Value = value
this.moveToHead(node)
}
}
func (this *LRUCache) addToHead(node *DoubleListNode) {
// 不是替换头结点,而是添加到头结点后边一个元素
node.Prev = this.DoubleList.Head
node.Next = this.DoubleList.Head.Next
this.DoubleList.Head.Next.Prev = node
this.DoubleList.Head.Next = node
}
func (this *LRUCache) removeNode(node *DoubleListNode) {
node.Prev.Next = node.Next
node.Next.Prev = node.Prev
node.Next = nil
node.Prev = nil
}
func (this *LRUCache) moveToHead(node *DoubleListNode) {
this.removeNode(node)
this.addToHead(node)
}
// 超出容量的时候需要移除元素
func (this *LRUCache) removeTail() *DoubleListNode {
node := this.DoubleList.Tail.Prev
this.removeNode(node)
return node
} | datastruct/lru/lru/lru.go | 0.585338 | 0.402187 | lru.go | starcoder |
// Package scanner provides the Scanner type that is able to read from any
// io.Reader and generate tokens.
package scanner
import (
"bufio"
"fmt"
"io"
"unicode"
"github.com/campoy/groto/token"
)
// New creates a new Scanner reading from the given io.Reader.
func New(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r)}
}
// A Scanner scans tokens from the io.Reader given at construction.
type Scanner struct {
r *bufio.Reader
}
// A Token is defined by its kind, and sometimes by some text.
type Token struct {
token.Kind
Text string
}
// String returns a human readable representation of a Token.
func (t Token) String() string {
if t.Text == "" {
return t.Kind.String()
}
return fmt.Sprintf("%s (%s)", t.Kind, t.Text)
}
func (s *Scanner) emit(kind token.Kind, value []rune) Token {
return Token{Kind: kind, Text: string(value)}
}
// Scan returns the next token found in the given io.Reader.
// If an error occurs the Token will be of kind Illegal, and
// the text includes information about the error.
// If the io.Reader reaches EOF, the token will be of kind EOF.
func (s *Scanner) Scan() (tok Token) {
s.readWhile(isSpace)
r := s.peek()
switch {
case r == eof:
return s.emit(token.EOF, nil)
case isLetter(r):
return s.identifier()
case isDecimalDigit(r):
return s.number()
case r == quote || r == doubleQuote:
return s.string()
case r == '/':
return s.comment()
case token.Punctuation(string(r)) != token.Illegal:
s.read()
return s.emit(token.Punctuation(string(r)), nil)
default:
s.read()
return s.emit(token.Illegal, []rune{r})
}
}
func (s *Scanner) identifier() Token {
value := s.readWhile(or(isLetter, isDecimalDigit, equals(underscore)))
switch text := string(value); {
case text == "true":
return s.emit(token.True, nil)
case text == "false":
return s.emit(token.False, nil)
case token.Keyword(text) != token.Illegal:
return s.emit(token.Keyword(text), nil)
case token.Type(text) != token.Illegal:
return s.emit(token.Type(text), nil)
default:
return s.emit(token.Identifier, value)
}
}
func (s *Scanner) string() Token {
first := s.read()
value := []rune{first}
for {
value = append(value, s.readUntil(equals(first))...)
value = append(value, s.read())
if len(value) == 2 || value[len(value)-2] != backslash {
return s.emit(token.StringLiteral, value)
}
}
}
func (s *Scanner) comment() Token {
value := []rune{s.read(), s.read()}
if string(value) != "//" {
return s.emit(token.Illegal, value)
}
value = append(value, s.readUntil(equals('\n'))...)
return s.emit(token.Comment, value)
}
func (s *Scanner) number() Token {
first := s.read()
second := s.peek()
if first == '0' && isDecimalDigit(second) {
return s.octal([]rune{first})
}
if first == '0' && (second == 'x' || second == 'X') {
s.read()
return s.hex([]rune{first, second})
}
tok := token.DecimalLiteral
value := []rune{first}
value = append(value, s.readWhile(isDecimalDigit)...)
next := s.peek()
if next == dot {
s.read()
tok = token.FloatLiteral
value = append(value, dot)
value = append(value, s.readWhile(isDecimalDigit)...)
next = s.peek()
}
if next == 'E' || next == 'e' {
s.read()
tok = token.FloatLiteral
value = append(value, next)
sign := s.read()
value = append(value, sign)
if sign != '+' && sign != '-' {
return s.emit(token.Illegal, value)
}
value = append(value, s.readWhile(isDecimalDigit)...)
}
return s.emit(tok, value)
}
func (s *Scanner) octal(value []rune) Token {
value = append(value, s.readWhile(isOctalDigit)...)
if isDecimalDigit(s.peek()) {
return s.emit(token.Illegal, append(value, s.read()))
}
return s.emit(token.OctalLiteral, value)
}
func (s *Scanner) hex(value []rune) Token {
value = append(value, s.readWhile(isHexDigit)...)
if len(value) == 2 {
return s.emit(token.Illegal, value)
}
return s.emit(token.HexLiteral, value)
}
func (s *Scanner) read() rune {
r, _, err := s.r.ReadRune()
if err == io.EOF {
return eof
}
return r
}
func (s *Scanner) unread() {
if err := s.r.UnreadRune(); err != nil {
panic(err)
}
}
func (s *Scanner) peek() rune {
r := s.read()
if r != eof {
s.unread()
}
return r
}
func (s *Scanner) readUntil(p runePredicate) []rune {
var value []rune
for {
r := s.read()
if r == eof {
return value
}
if p(r) {
s.unread()
return value
}
value = append(value, r)
}
}
func (s *Scanner) readWhile(p runePredicate) []rune { return s.readUntil(not(p)) }
type runePredicate func(rune) bool
var (
isLetter = unicode.IsLetter
isSpace = unicode.IsSpace
isDecimalDigit = isBetween('0', '9')
isOctalDigit = isBetween('0', '7')
isHexDigit = or(isDecimalDigit, isBetween('a', 'f'), isBetween('A', 'F'))
)
func isBetween(a, b rune) runePredicate { return func(r rune) bool { return r >= a && r <= b } }
func equals(r rune) runePredicate { return func(s rune) bool { return r == s } }
func not(f runePredicate) runePredicate { return func(r rune) bool { return !f(r) } }
func or(fs ...runePredicate) runePredicate {
return func(r rune) bool {
for _, f := range fs {
if f(r) {
return true
}
}
return false
}
}
const (
underscore rune = '_'
eof rune = 0
dot rune = '.'
backslash rune = '\\'
quote rune = '\''
doubleQuote rune = '"'
) | scanner/scanner.go | 0.729423 | 0.438785 | scanner.go | starcoder |
package theme
import (
"image/color"
"gioui.org/font/gofont"
"gioui.org/widget/material"
)
// PairFor wraps the provided theme color in a Color type with an automatically
// populated Text color. The Text field value is chosen based on the luminance
// of the provided color.
func PairFor(bg color.NRGBA) ContrastPair {
col := ContrastPair{
Bg: bg,
}
lum := grayscaleLuminance(bg)
if lum < 150 {
col.Fg = white
} else {
col.Fg = black
}
return col
}
func grayscaleLuminance(c color.NRGBA) uint8 {
return uint8(float32(c.R)*.3 + float32(c.G)*.59 + float32(c.B)*.11)
}
var (
teal = color.NRGBA{R: 0x44, G: 0xa8, B: 0xad, A: 255}
brightTeal = color.NRGBA{R: 0x79, G: 0xda, B: 0xdf, A: 255}
darkTeal = color.NRGBA{R: 0x00, G: 0x79, B: 0x7e, A: 255}
green = color.NRGBA{R: 0x45, G: 0xae, B: 0x7f, A: 255}
brightGreen = color.NRGBA{R: 0x79, G: 0xe0, B: 0xae, A: 255}
darkGreen = color.NRGBA{R: 0x00, G: 0x7e, B: 0x52, A: 255}
gold = color.NRGBA{R: 255, G: 214, B: 79, A: 255}
lightGold = color.NRGBA{R: 255, G: 255, B: 129, A: 255}
darkGold = color.NRGBA{R: 200, G: 165, B: 21, A: 255}
white = color.NRGBA{R: 255, G: 255, B: 255, A: 255}
lightGray = color.NRGBA{R: 225, G: 225, B: 225, A: 255}
gray = color.NRGBA{R: 200, G: 200, B: 200, A: 255}
darkGray = color.NRGBA{R: 100, G: 100, B: 100, A: 255}
veryDarkGray = color.NRGBA{R: 50, G: 50, B: 50, A: 255}
black = color.NRGBA{A: 255}
purple1 = color.NRGBA{R: 69, G: 56, B: 127, A: 255}
lightPurple1 = color.NRGBA{R: 121, G: 121, B: 174, A: 255}
darkPurple1 = color.NRGBA{R: 99, G: 41, B: 115, A: 255}
purple2 = color.NRGBA{R: 127, G: 96, B: 183, A: 255}
lightPurple2 = color.NRGBA{R: 121, G: 150, B: 223, A: 255}
darkPurple2 = color.NRGBA{R: 101, G: 89, B: 223, A: 255}
dmBackground = color.NRGBA{R: 12, G: 12, B: 15, A: 255}
dmDarkBackground = black
dmLightBackground = color.NRGBA{R: 27, G: 22, B: 33, A: 255}
dmText = color.NRGBA{R: 194, G: 196, B: 199, A: 255}
)
func New() *Theme {
gioTheme := material.NewTheme(gofont.Collection())
var t Theme
t.Theme = gioTheme
t.Primary = Swatch{
Default: PairFor(green),
Light: PairFor(brightGreen),
Dark: PairFor(darkGreen),
}
t.Secondary = Swatch{
Default: PairFor(teal),
Light: PairFor(brightTeal),
Dark: PairFor(darkTeal),
}
t.Background = Swatch{
Default: PairFor(lightGray),
Light: PairFor(white),
Dark: PairFor(gray),
}
t.Theme.Palette.ContrastBg = t.Primary.Default.Bg
t.Theme.Palette.ContrastFg = t.Primary.Default.Fg
t.Ancestors = &t.Secondary.Default.Bg
t.Descendants = &t.Secondary.Default.Bg
t.Selected = &t.Secondary.Light.Bg
t.Unselected = &t.Background.Light.Bg
t.Siblings = t.Unselected
return &t
}
func (t *Theme) ToDark() {
t.Background.Dark = PairFor(darkGray)
t.Background.Default = PairFor(veryDarkGray)
t.Background.Light = PairFor(black)
t.Primary.Default = PairFor(purple1)
t.Primary.Light = PairFor(lightPurple1)
t.Primary.Dark = PairFor(darkPurple1)
t.Secondary.Default = PairFor(purple2)
t.Secondary.Light = PairFor(lightPurple2)
t.Secondary.Dark = PairFor(darkPurple2)
t.Background.Default = PairFor(dmBackground)
t.Background.Light = PairFor(dmLightBackground)
t.Background.Dark = PairFor(dmDarkBackground)
// apply to theme
t.Theme.Palette.Fg, t.Theme.Palette.Bg = t.Theme.Palette.Bg, t.Theme.Palette.Fg
t.Theme.Palette = ApplyAsContrast(t.Theme.Palette, t.Primary.Default)
}
type ContrastPair struct {
Fg, Bg color.NRGBA
}
func ApplyAsContrast(p material.Palette, pair ContrastPair) material.Palette {
p.ContrastBg = pair.Bg
p.ContrastFg = pair.Fg
return p
}
func ApplyAsNormal(p material.Palette, pair ContrastPair) material.Palette {
p.Bg = pair.Bg
p.Fg = pair.Fg
return p
}
type Swatch struct {
Light, Dark, Default ContrastPair
}
type Theme struct {
*material.Theme
Primary Swatch
Secondary Swatch
Background Swatch
Ancestors, Descendants, Selected, Siblings, Unselected *color.NRGBA
} | widget/theme/theme.go | 0.765067 | 0.412648 | theme.go | starcoder |
package main
import (
"bufio"
"encoding/csv"
"fmt"
"io"
"log"
"math"
"math/rand"
"os"
"runtime"
"sort"
"strconv"
"time"
)
// Data is a struct to store attributes of a row in csv
type Data struct {
X1 float64 // atribut 1
X2 float64 // atribut 2
X3 float64 // atribut 3
X4 float64 // atribut 4
Y string // kelas
}
// StringToData to convert a row in csv in the form of Data type
func StringToData(row []string) Data {
var dt Data
var err error
dt.X1, err = strconv.ParseFloat(row[0], 64)
if err != nil {
log.Fatal(err)
}
dt.X2, err = strconv.ParseFloat(row[1], 64)
if err != nil {
log.Fatal(err)
}
dt.X3, err = strconv.ParseFloat(row[2], 64)
if err != nil {
log.Fatal(err)
}
dt.X4, err = strconv.ParseFloat(row[3], 64)
if err != nil {
log.Fatal(err)
}
dt.Y = row[4]
return dt
}
// DataSplit will split data train into 25% data validation and 75% data test in random manner
func DataSplit(data []Data) ([]Data, []Data) {
r := rand.New(rand.NewSource(time.Now().Unix()))
val := make([]Data, 1000)
perm := r.Perm(1000)
// Assign the random data into data validation based on permutation
for i, randIdx := range perm {
val[i] = data[randIdx]
}
// Get the rest for data test
for _, i := range perm {
data[i] = data[len(data)-1]
data = data[:len(data)-1]
}
return val, data
}
// EuclideanDistance to get distances between two datas
func EuclideanDistance(a, b Data) float64 {
ret := 0.00
ret += math.Pow(a.X1-b.X1, 2)
ret += math.Pow(a.X2-b.X2, 2)
ret += math.Pow(a.X3-b.X3, 2)
ret += math.Pow(a.X4-b.X4, 2)
return math.Sqrt(ret)
}
// Point holds the distance between two datas
type Point struct {
Label string
Distance float64
}
// GetNeighbors to get the k-nearest neighbors
func GetNeighbors(a Data, train []Data, k int) []Point {
var distances []Point
// Calculate all distance between objects
for _, b := range train {
var dist Point
dist.Label = b.Y
dist.Distance = EuclideanDistance(a, b)
distances = append(distances, dist)
}
// Sort the array using comparator
sort.Slice(distances, func(x, y int) bool {
if distances[x].Distance == distances[y].Distance {
return distances[x].Label < distances[y].Label
}
return distances[x].Distance < distances[y].Distance
})
// Store the k neareast data to neighbors
neighbors := distances[:k]
return neighbors
}
// GetResponse to get the prediction based on the number of occurence for every classes
func GetResponse(neighbors []Point) string {
classVotes := map[string]int{
"0": 0,
"1": 0,
}
// Get the number of occurence for every classes
for _, x := range neighbors {
if x.Label == "0" {
classVotes["0"]++
} else if x.Label == "1" {
classVotes["1"]++
}
}
// Compare the occurences of every classes
var res string
if classVotes["0"] > classVotes["1"] {
res = "0"
} else {
res = "1"
}
return res
}
// GetAccuracy hat sums the total correct predictions and returns the accuracy as a percentage of correct
func GetAccuracy(val []Data, prediction []string) float64 {
correct := 0
// Get the sums of total correct
for i := 0; i < len(val); i++ {
if val[i].Y == prediction[i] {
correct++
}
}
return float64(correct) / float64(len(val))
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
// READ THE DATA OF GIVEN DATA TRAIN
csvFile, _ := os.Open("./data/DataTrain_Tugas_2_AI.csv.csv")
reader := csv.NewReader(bufio.NewReader(csvFile))
defer csvFile.Close()
// Store the data into array of Data
var dataTrain []Data
reader.Read()
for {
row, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
dt := StringToData(row)
dataTrain = append(dataTrain, dt)
}
// READ THE DATA OF GIVEN DATA TEST
csvFile, _ = os.Open("./data/DataTest_Tugas_2_AI.csv.csv")
reader = csv.NewReader(bufio.NewReader(csvFile))
defer csvFile.Close()
// Store the data in dataTrain array
var dataTest []Data
reader.Read()
for {
row, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
dt := StringToData(row)
dataTest = append(dataTest, dt)
}
// Split data train into data validation and data test
// Please note data test used for validation is not same as data test used for prediction
val, test := DataSplit(dataTrain)
// Print the length of data validation and data test
fmt.Println("data validation:", len(val), "data test:", len(test))
// Initialize the best k and its accuracy
bestK := 1
bestAcc := 0.00
// Try to get the best k from 1 to 100
for i := 1; i <= 100; i++ {
prediction := []string{}
// Perform the validation from data test to data validation
for j := 0; j < len(val); j++ {
neighbors := GetNeighbors(val[j], test, i)
result := GetResponse(neighbors)
prediction = append(prediction, result)
}
acc := GetAccuracy(val, prediction)
// Print the current k and its accuracy
fmt.Println("k:", i, "acc:", acc)
if acc > bestAcc {
bestAcc = acc
bestK = i
}
}
// Print the result
fmt.Println("Best k:", bestK, "acc: ", bestAcc)
// Use the best k based on our observation
k := bestK
// Get prediction for given data test
for i, dt := range dataTest {
neighbors := GetNeighbors(dt, dataTrain, k)
dataTest[i].Y = GetResponse(neighbors)
}
// Make file out prediction of the data test
outFile, _ := os.Create("Prediksi_Tugas2AI_1301174099.csv")
defer outFile.Close()
writer := csv.NewWriter(outFile)
defer writer.Flush()
// Write the class value for the data
for _, r := range dataTest {
csvData := []string{
fmt.Sprintf("%s", r.Y),
}
if err := writer.Write(csvData); err != nil {
log.Fatalln("ERROR WRITING RECORD TO CSV:", err)
}
}
} | main.go | 0.517815 | 0.560674 | main.go | starcoder |
package util
import "math"
func getExpectedValue(values []float64) float64 {
var sum float64
for _, element := range values {
sum = sum + element
}
return sum / float64(len(values))
}
func getDispersion(values []float64, expectedValue float64) float64 {
var sum float64
for _, element := range values {
sum = sum + math.Pow(element - expectedValue, 2)
}
return sum / float64(len(values))
}
func getStandardDeviation(dispersion float64) float64 {
return math.Sqrt(dispersion)
}
func GetStatisticValues(values []float64) (float64, float64, float64) {
expValue := getExpectedValue(values)
dispersion := getDispersion(values, expValue)
standardDeviation := getStandardDeviation(dispersion)
return expValue, dispersion, standardDeviation
}
func max(r1, r2 float64) float64 {
if r1 >= r2 {
return r1
} else {
return r2
}
}
func getNewLinearValue(a, b, value float64) float64 {
return a + (b - a) * value
}
/* 1 - Get Linear Random values*/
func GetLinear(values []float64, a, b float64) []float64 {
var newValues []float64
for index, _ := range values {
value := getNewLinearValue(a, b, values[index])
newValues = append(newValues, value)
}
return newValues
}
func GetLinearValues(a, b float64) (float64, float64) {
m := (a + b) / 2
dispersion := math.Pow((b - a), 2)/12
return m, dispersion
}
/* 2 - Get Gauss Random values (EXP and STANDDEV is immutable) */
func GetGauss(expValue, standardDeviation float64, count int, values []float64) []float64 {
var result []float64
index := 0
for index < len(values) - 1 {
var sum float64
for i := 0; i < count; i++ {
sum = sum + values[index]
index++
if (index > len(values) - 1) {
break;
}
}
newValue := expValue + standardDeviation * math.Sqrt(12 / float64(count)) * (sum - float64(count) / 2)
result = append(result, newValue)
}
return result
}
/* 3 - Get exponential values */
func GetExponential(lambda float64, values []float64) []float64 {
var result []float64
for index, _ := range values {
element := -(1 / lambda) * math.Log(values[index])
result = append(result, element)
}
return result
}
func GetExponentialValues(lambda float64) (float64, float64) {
return (1 / lambda), (1 / math.Pow(lambda, 2))
}
/* 4 - Get Gamma values */
func GetGamma(lambda float64, nu int, values []float64) []float64 {
var result []float64
index := 0
for index < len(values) - 1 {
sum := 0.0
for i := 0; i < nu; i++ {
sum = sum + math.Log(values[index])
index++
if (index > len(values) - 1) {
break;
}
}
newValue := -(1 / lambda) * (sum)
result = append(result, newValue)
}
return result
}
func GetGammaValues(lambda float64, nu float64) (float64, float64) {
return (nu / lambda), (nu / math.Pow(nu, 2))
}
/* 5 - Get Triangle values */
func GetTriangle(a, b float64, values []float64) []float64 {
var result []float64
for i := 0; i < len(values) - 2; i++ {
result = append(result, getNewLinearValue(a, b, max(values[i], values[i+1])))
}
return result
}
/* 6 - Get Simpson values */
func GetSimpson(a, b float64, values []float64) []float64 {
firstValues := GetLinear(values, a/2, b/2)
secondValues := GetLinear(values, a/2, b/2)
var result []float64
for i := 0; i < len(values) - 2; i+=2 {
result = append(result, firstValues[i] + secondValues[i+1])
}
return result
} | src/main/util/math.go | 0.661267 | 0.644057 | math.go | starcoder |
package geometries
import (
"github.com/go-gl/mathgl/mgl32"
three "github.com/tobscher/go-three"
)
// Box defines a box geometry consisting of 6 faces
type Box struct {
three.Geometry
width float32
height float32
depth float32
}
// NewBox creates a new Box with the given width, height and depth.
// This method will generate the required vertices and its uv mappings.
func NewBox(width, height, depth float32) *Box {
box := Box{
width: width,
height: height,
depth: depth,
}
halfWidth := width / 2.0
halfHeight := height / 2.0
halfDepth := depth / 2.0
vertices := []mgl32.Vec3{
// front
mgl32.Vec3{-halfWidth, -halfHeight, halfDepth},
mgl32.Vec3{halfWidth, -halfHeight, halfDepth},
mgl32.Vec3{halfWidth, halfHeight, halfDepth},
mgl32.Vec3{-halfWidth, halfHeight, halfDepth},
// top
mgl32.Vec3{-halfWidth, halfHeight, halfDepth},
mgl32.Vec3{halfWidth, halfHeight, halfDepth},
mgl32.Vec3{halfWidth, halfHeight, -halfDepth},
mgl32.Vec3{-halfWidth, halfHeight, -halfDepth},
// back
mgl32.Vec3{halfWidth, -halfHeight, -halfDepth},
mgl32.Vec3{-halfWidth, -halfHeight, -halfDepth},
mgl32.Vec3{-halfWidth, halfHeight, -halfDepth},
mgl32.Vec3{halfWidth, halfHeight, -halfDepth},
//bottom
mgl32.Vec3{-halfWidth, -halfHeight, -halfDepth},
mgl32.Vec3{halfWidth, -halfHeight, -halfDepth},
mgl32.Vec3{halfWidth, -halfHeight, halfDepth},
mgl32.Vec3{-halfWidth, -halfHeight, halfDepth},
// left
mgl32.Vec3{-halfWidth, -halfHeight, -halfDepth},
mgl32.Vec3{-halfWidth, -halfHeight, halfDepth},
mgl32.Vec3{-halfWidth, halfHeight, halfDepth},
mgl32.Vec3{-halfWidth, halfHeight, -halfDepth},
// right
mgl32.Vec3{halfWidth, -halfHeight, halfDepth},
mgl32.Vec3{halfWidth, -halfHeight, -halfDepth},
mgl32.Vec3{halfWidth, halfHeight, -halfDepth},
mgl32.Vec3{halfWidth, halfHeight, halfDepth},
}
var uvs []mgl32.Vec2
for i := 0; i < 6; i++ {
uvs = append(uvs,
mgl32.Vec2{1.0, 1.0},
mgl32.Vec2{0.0, 1.0},
mgl32.Vec2{0.0, 0.0},
mgl32.Vec2{1.0, 0.0},
)
}
faces := []*three.Face{
// front
three.NewFace(0, 1, 2),
three.NewFace(2, 3, 0),
// top
three.NewFace(4, 5, 6),
three.NewFace(6, 7, 4),
// back
three.NewFace(8, 9, 10),
three.NewFace(10, 11, 8),
// bottom
three.NewFace(12, 13, 14),
three.NewFace(14, 15, 12),
// left
three.NewFace(16, 17, 18),
three.NewFace(18, 19, 16),
// right
three.NewFace(20, 21, 22),
three.NewFace(22, 23, 20),
}
box.SetVertices(vertices)
box.SetUVs(uvs)
box.SetFaces(faces)
return &box
}
// NewCube generates a new Box for the given side.
// Vertices and VertexUvs will be created accordingly.
func NewCube(size float32) *Box {
return NewBox(size, size, size)
} | geometries/box.go | 0.840848 | 0.655763 | box.go | starcoder |
package rand
import (
"github.com/pkg/errors"
"github.com/seehuhn/mt19937"
)
// A Generator uses a goroutine to populate batches of random numbers.
// Currently we use a Mersenne twister implementation instead of the
// default Go implementation (which is fast, but has a much shorter
// period than MT, and we use a LOT of random draws)
type Generator struct {
ch chan int64
}
// NewGeneratorSlice starts a new background PRNG based on the given seed
// slice. If the slice has only one entry, then the MT generator is
// initialized with Seed. Otherwise SeedFromSlice is used
func NewGeneratorSlice(seed []uint64) (*Generator, error) {
if len(seed) < 1 {
return nil, errors.Errorf("Invalid generator seed array %v", seed)
}
numChan := make(chan int64, 1024)
r := mt19937.New()
if len(seed) == 1 {
r.Seed(int64(seed[0]))
} else {
r.SeedFromSlice(seed)
}
go func() {
for {
numChan <- r.Int63()
}
}()
g := &Generator{
ch: numChan,
}
return g, nil
}
// NewGenerator is a helper wrapper around NewGeneratorSlice
func NewGenerator(seed int64) (*Generator, error) {
return NewGeneratorSlice([]uint64{uint64(seed)})
}
// Int63 provides the same interface as Go's math/rand, but with pre-generation.
func (g *Generator) Int63() int64 {
return <-g.ch
}
// Int63n is a copy of the current Go code
func (g *Generator) Int63n(n int64) int64 {
if n <= 0 {
panic("invalid argument to Int63n")
}
if n&(n-1) == 0 { // n is power of two, can mask
return g.Int63() & (n - 1)
}
max := int64((1 << 63) - 1 - (1<<63)%uint64(n))
v := g.Int63()
for v > max {
v = g.Int63()
}
return v % n
}
// Int31 is just a copy of the golang impl
func (g *Generator) Int31() int32 {
return int32(g.Int63() >> 32)
}
// Int31n is just a copy of the golang impL
func (g *Generator) Int31n(n int32) int32 {
if n <= 0 {
panic("invalid argument to Int31n")
}
if n&(n-1) == 0 { // n is power of two, can mask
return g.Int31() & (n - 1)
}
max := int32((1 << 31) - 1 - (1<<31)%uint32(n))
v := g.Int31()
for v > max {
v = g.Int31()
}
return v % n
}
// Float64 uses the commented, simpler implmentation since we don't have the
// same support requirements for users
func (g *Generator) Float64() float64 {
// See the Go lang comments for Rand Float64 implementation for details
return float64(g.Int63n(1<<53)) / (1 << 53)
} | rand/rand.go | 0.680772 | 0.404213 | rand.go | starcoder |
package main
type problemMapping map[int]chapter
type chapter struct {
Number int
Name string
Problems []problem
}
type problem struct {
Name string
Folder string
}
func isChapterNumberValid(n int) bool {
_, ok := chapters[n]
return ok
}
func getAllChapterNumbers() []int {
chapterKeys := make([]int, 0, len(chapters))
for k := range chapters {
chapterKeys = append(chapterKeys, k)
}
return chapterKeys
}
var chapters = problemMapping{
4: {
Name: "Chapter 04: Primitive Types",
Number: 4,
Problems: []problem{
{
Name: "4.00 Bootcamp: Primitive Types",
Folder: "count_bits",
},
{
Name: "4.01 Computing the parity of a word",
Folder: "parity",
},
{
Name: "4.02 Swap bits",
Folder: "swap_bits",
},
{
Name: "4.03 Reverse bits",
Folder: "reverse_bits",
},
{
Name: "4.04 Find a closest integer with the same weight",
Folder: "closest_int_same_weight",
},
{
Name: "4.05 Compute product without arithmetical operators",
Folder: "primitive_multiply",
},
{
Name: "4.06 Compute quotient without arithmetical operators",
Folder: "primitive_divide",
},
{
Name: "4.07 Compute pow(x,y)",
Folder: "power_xy",
},
{
Name: "4.08 Reverse digits",
Folder: "reverse_digits",
},
{
Name: "4.09 Check if a decimal integer is a palindrome",
Folder: "is_number_palindromic",
},
{
Name: "4.10 Generate uniform random numbers",
Folder: "uniform_random_number",
},
{
Name: "4.11 Rectangle intersection",
Folder: "rectangle_intersection",
},
},
},
5: {
Name: "Chapter 05: Arrays",
Number: 5,
Problems: []problem{
{
Name: "5.00 Bootcamp: Arrays",
Folder: "even_odd_array",
},
{
Name: "5.01 The Dutch national flag problem",
Folder: "dutch_national_flag",
},
{
Name: "5.02 Increment an arbitrary-precision integer",
Folder: "int_as_array_increment",
},
{
Name: "5.03 Multiply two arbitrary-precision integers",
Folder: "int_as_array_multiply",
},
{
Name: "5.04 Advancing through an array",
Folder: "advance_by_offsets",
},
{
Name: "5.05 Delete duplicates from a sorted array",
Folder: "sorted_array_remove_dups",
},
{
Name: "5.06 Buy and sell a stock once",
Folder: "buy_and_sell_stock",
},
{
Name: "5.07 Buy and sell a stock twice",
Folder: "buy_and_sell_stock_twice",
},
{
Name: "5.08 Computing an alternation",
Folder: "alternating_array",
},
{
Name: "5.09 Enumerate all primes to n",
Folder: "prime_sieve",
},
{
Name: "5.10 Permute the elements of an array",
Folder: "apply_permutation",
},
{
Name: "5.11 Compute the next permutation",
Folder: "next_permutation",
},
{
Name: "5.12 Sample offline data",
Folder: "offline_sampling",
},
{
Name: "5.13 Sample online data",
Folder: "online_sampling",
},
{
Name: "5.14 Compute a random permutation",
Folder: "random_permutation",
},
{
Name: "5.15 Compute a random subset",
Folder: "random_subset",
},
{
Name: "5.16 Generate nonuniform random numbers",
Folder: "nonuniform_random_number",
},
{
Name: "5.17 The Sudoku checker problem",
Folder: "is_valid_sudoku",
},
{
Name: "5.18 Compute the spiral ordering of a 2D array",
Folder: "spiral_ordering",
},
{
Name: "5.19 Rotate a 2D array",
Folder: "matrix_rotation",
},
{
Name: "5.20 Compute rows in Pascal's Triangle",
Folder: "pascal_triangle",
},
},
},
6: {
Name: "Chapter 06: Strings",
Number: 6,
Problems: []problem{
{
Name: "6.00 Bootcamp: Strings",
Folder: "is_string_palindromic",
},
{
Name: "6.01 Interconvert strings and integers",
Folder: "string_integer_interconversion",
},
{
Name: "6.02 Base conversion",
Folder: "convert_base",
},
{
Name: "6.03 Compute the spreadsheet column encoding",
Folder: "spreadsheet_encoding",
},
{
Name: "6.04 Replace and remove",
Folder: "replace_and_remove",
},
{
Name: "6.05 Test palindromicity",
Folder: "is_string_palindromic_punctuation",
},
{
Name: "6.06 Reverse all the words in a sentence",
Folder: "reverse_words",
},
{
Name: "6.07 The look-and-say problem",
Folder: "look_and_say",
},
{
Name: "6.08 Convert from Roman to decimal",
Folder: "roman_to_integer",
},
{
Name: "6.09 Compute all valid IP addresses",
Folder: "valid_ip_addresses",
},
{
Name: "6.10 Write a string sinusoidally",
Folder: "snake_string",
},
{
Name: "6.11 Implement run-length encoding",
Folder: "run_length_compression",
},
{
Name: "6.12 Find the first occurrence of a substring",
Folder: "substring_match",
},
},
},
7: {
Name: "Chapter 07: Linked Lists",
Number: 7,
Problems: []problem{
{
Name: "7.00 Bootcamp: Delete From List",
Folder: "delete_from_list",
},
{
Name: "7.00 Bootcamp: Insert In List",
Folder: "insert_in_list",
},
{
Name: "7.00 Bootcamp: Search In List",
Folder: "search_in_list",
},
{
Name: "7.01 Merge two sorted lists",
Folder: "sorted_lists_merge",
},
{
Name: "7.02 Reverse a single sublist",
Folder: "reverse_sublist",
},
{
Name: "7.03 Test for cyclicity",
Folder: "is_list_cyclic",
},
{
Name: "7.04 Test for overlapping lists---lists are cycle-free",
Folder: "do_terminated_lists_overlap",
},
{
Name: "7.05 Test for overlapping lists---lists may have cycles",
Folder: "do_lists_overlap",
},
{
Name: "7.06 Delete a node from a singly linked list",
Folder: "delete_node_from_list",
},
{
Name: "7.07 Remove the kth last element from a list",
Folder: "delete_kth_last_from_list",
},
{
Name: "7.08 Remove duplicates from a sorted list",
Folder: "remove_duplicates_from_sorted_list",
},
{
Name: "7.09 Implement cyclic right shift for singly linked lists",
Folder: "list_cyclic_right_shift",
},
{
Name: "7.10 Implement even-odd merge",
Folder: "even_odd_list_merge",
},
{
Name: "7.11 Test whether a singly linked list is palindromic",
Folder: "is_list_palindromic",
},
{
Name: "7.12 Implement list pivoting",
Folder: "pivot_list",
},
{
Name: "7.13 Add list-based integers",
Folder: "int_as_list_add",
},
},
},
8: {
Name: "Chapter 08: Stacks and Queues",
Number: 8,
Problems: []problem{
{
Name: "8.01 Implement a stack with max API",
Folder: "stack_with_max",
},
{
Name: "8.02 Evaluate RPN expressions",
Folder: "evaluate_rpn",
},
{
Name: "8.03 Is a string well-formed?",
Folder: "is_valid_parenthesization",
},
{
Name: "8.04 Normalize pathnames",
Folder: "directory_path_normalization",
},
{
Name: "8.05 Compute buildings with a sunset view",
Folder: "sunset_view",
},
{
Name: "8.06 Compute binary tree nodes in order of increasing depth",
Folder: "tree_level_order",
},
{
Name: "8.07 Implement a circular queue",
Folder: "circular_queue",
},
{
Name: "8.08 Implement a queue using stacks",
Folder: "queue_from_stacks",
},
{
Name: "8.09 Implement a queue with max API",
Folder: "queue_with_max",
},
},
},
9: {
Name: "Chapter 09: Binary Trees",
Number: 9,
Problems: []problem{
{
Name: "9.01 Test if a binary tree is height-balanced",
Folder: "is_tree_balanced",
},
{
Name: "9.02 Test if a binary tree is symmetric",
Folder: "is_tree_symmetric",
},
{
Name: "9.03 Compute the lowest common ancestor in a binary tree",
Folder: "lowest_common_ancestor",
},
{
Name: "9.04 Compute the LCA when nodes have parent pointers",
Folder: "lowest_common_ancestor_with_parent",
},
{
Name: "9.05 Sum the root-to-leaf paths in a binary tree",
Folder: "sum_root_to_leaf",
},
{
Name: "9.06 Find a root to leaf path with specified sum",
Folder: "path_sum",
},
{
Name: "9.07 Implement an inorder traversal without recursion",
Folder: "tree_inorder",
},
{
Name: "9.08 Compute the kth node in an inorder traversal",
Folder: "kth_node_in_tree",
},
{
Name: "9.09 Compute the successor",
Folder: "successor_in_tree",
},
{
Name: "9.10 Implement an inorder traversal with constant space",
Folder: "tree_with_parent_inorder",
},
{
Name: "9.11 Reconstruct a binary tree from traversal data",
Folder: "tree_from_preorder_inorder",
},
{
Name: "9.12 Reconstruct a binary tree from a preorder traversal with markers",
Folder: "tree_from_preorder_with_null",
},
{
Name: "9.13 Compute the leaves of a binary tree",
Folder: "tree_connect_leaves",
},
{
Name: "9.14 Compute the exterior of a binary tree",
Folder: "tree_exterior",
},
{
Name: "9.15 Compute the right sibling tree",
Folder: "tree_right_sibling",
},
},
},
10: {
Name: "Chapter 10: Heaps",
Number: 10,
Problems: []problem{
{
Name: "10.01 Merge sorted files",
Folder: "sorted_arrays_merge",
},
{
Name: "10.02 Sort an increasing-decreasing array",
Folder: "sort_increasing_decreasing_array",
},
{
Name: "10.03 Sort an almost-sorted array",
Folder: "sort_almost_sorted_array",
},
{
Name: "10.04 Compute the k closest stars",
Folder: "k_closest_stars",
},
{
Name: "10.05 Compute the median of online data",
Folder: "online_median",
},
{
Name: "10.06 Compute the k largest elements in a max-heap",
Folder: "k_largest_in_heap",
},
},
},
11: {
Name: "Chapter 11: Searching",
Number: 11,
Problems: []problem{
{
Name: "11.01 Search a sorted array for first occurrence of k",
Folder: "search_first_key",
},
{
Name: "11.02 Search a sorted array for entry equal to its index",
Folder: "search_entry_equal_to_index",
},
{
Name: "11.03 Search a cyclically sorted array",
Folder: "search_shifted_sorted_array",
},
{
Name: "11.04 Compute the integer square root",
Folder: "int_square_root",
},
{
Name: "11.05 Compute the real square root",
Folder: "real_square_root",
},
{
Name: "11.06 Search in a 2D sorted array",
Folder: "search_row_col_sorted_matrix",
},
{
Name: "11.07 Find the min and max simultaneously",
Folder: "search_for_min_max_in_array",
},
{
Name: "11.08 Find the kth largest element",
Folder: "kth_largest_in_array",
},
{
Name: "11.09 Find the missing IP address",
Folder: "absent_value_array",
},
{
Name: "11.10 Find the duplicate and missing elements",
Folder: "search_for_missing_element",
},
},
},
12: {
Name: "Chapter 12: Hash Tables",
Number: 12,
Problems: []problem{
{
Name: "12.00 Bootcamp: Hash Tables",
Folder: "anagrams",
},
{
Name: "12.01 Test for palindromic permutations",
Folder: "is_string_permutable_to_palindrome",
},
{
Name: "12.02 Is an anonymous letter constructible?",
Folder: "is_anonymous_letter_constructible",
},
{
Name: "12.03 Implement an ISBN cache",
Folder: "lru_cache",
},
{
Name: "12.04 Compute the LCA, optimizing for close ancestors",
Folder: "lowest_common_ancestor_close_ancestor",
},
{
Name: "12.05 Find the nearest repeated entries in an array",
Folder: "nearest_repeated_entries",
},
{
Name: "12.06 Find the smallest subarray covering all values",
Folder: "smallest_subarray_covering_set",
},
{
Name: "12.07 Find smallest subarray sequentially covering all values",
Folder: "smallest_subarray_covering_all_values",
},
{
Name: "12.08 Find the longest subarray with distinct entries",
Folder: "longest_subarray_with_distinct_values",
},
{
Name: "12.09 Find the length of a longest contained interval",
Folder: "longest_contained_interval",
},
{
Name: "12.10 Compute all string decompositions",
Folder: "string_decompositions_into_dictionary_words",
},
{
Name: "12.11 Test the Collatz conjecture",
Folder: "collatz_checker",
},
},
},
13: {
Name: "Chapter 13: Sorting",
Number: 13,
Problems: []problem{
{
Name: "13.01 Compute the intersection of two sorted arrays",
Folder: "intersect_sorted_arrays",
},
{
Name: "13.02 Merge two sorted arrays",
Folder: "two_sorted_arrays_merge",
},
{
Name: "13.03 Computing the h-index",
Folder: "h_index",
},
{
Name: "13.04 Remove first-name duplicates",
Folder: "remove_duplicates",
},
{
Name: "13.05 Smallest nonconstructible value",
Folder: "smallest_nonconstructible_value",
},
{
Name: "13.06 Render a calendar",
Folder: "calendar_rendering",
},
{
Name: "13.07 Merging intervals",
Folder: "interval_add",
},
{
Name: "13.08 Compute the union of intervals",
Folder: "intervals_union",
},
{
Name: "13.09 Partitioning and sorting an array with many repeated entries",
Folder: "group_equal_entries",
},
{
Name: "13.10 Team photo day---1",
Folder: "is_array_dominated",
},
{
Name: "13.11 Implement a fast sorting algorithm for lists",
Folder: "sort_list",
},
{
Name: "13.12 Compute a salary threshold",
Folder: "find_salary_threshold",
},
},
},
14: {
Name: "Chapter 14: Binary Search Trees",
Number: 14,
Problems: []problem{
{
Name: "14.00 Bootcamp: Binary Search Trees",
Folder: "search_in_bst",
},
{
Name: "14.01 Test if a binary tree satisfies the BST property",
Folder: "is_tree_a_bst",
},
{
Name: "14.02 Find the first key greater than a given value in a BST",
Folder: "search_first_greater_value_in_bst",
},
{
Name: "14.03 Find the k largest elements in a BST",
Folder: "k_largest_values_in_bst",
},
{
Name: "14.04 Compute the LCA in a BST",
Folder: "lowest_common_ancestor_in_bst",
},
{
Name: "14.05 Reconstruct a BST from traversal data",
Folder: "bst_from_preorder",
},
{
Name: "14.06 Find the closest entries in three sorted arrays",
Folder: "minimum_distance_3_sorted_arrays",
},
{
Name: "14.07 Enumerate extended integers",
Folder: "ab_sqrt_2",
},
{
Name: "14.08 Build a minimum height BST from a sorted array",
Folder: "bst_from_sorted_array",
},
{
Name: "14.09 Test if three BST nodes are totally ordered",
Folder: "descendant_and_ancestor_in_bst",
},
{
Name: "14.10 The range lookup problem",
Folder: "range_lookup_in_bst",
},
{
Name: "14.11 Add credits",
Folder: "adding_credits",
},
},
},
15: {
Name: "Chapter 15: Recursion",
Number: 15,
Problems: []problem{
{
Name: "15.00 Bootcamp: Recursion",
Folder: "euclidean_gcd",
},
{
Name: "15.01 The Towers of Hanoi problem",
Folder: "hanoi",
},
{
Name: "15.02 Compute all mnemonics for a phone number",
Folder: "phone_number_mnemonic",
},
{
Name: "15.03 Generate all nonattacking placements of n-Queens",
Folder: "n_queens",
},
{
Name: "15.04 Generate permutations",
Folder: "permutations",
},
{
Name: "15.05 Generate the power set",
Folder: "power_set",
},
{
Name: "15.06 Generate all subsets of size k",
Folder: "combinations",
},
{
Name: "15.07 Generate strings of matched parens",
Folder: "enumerate_balanced_parentheses",
},
{
Name: "15.08 Generate palindromic decompositions",
Folder: "enumerate_palindromic_decompositions",
},
{
Name: "15.09 Generate binary trees",
Folder: "enumerate_trees",
},
{
Name: "15.10 Implement a Sudoku solver",
Folder: "sudoku_solve",
},
{
Name: "15.11 Compute a Gray code",
Folder: "gray_code",
},
},
},
16: {
Name: "Chapter 16: Dynamic Programming",
Number: 16,
Problems: []problem{
{
Name: "16.00 Bootcamp: Max Sum Subarray",
Folder: "max_sum_subarray",
},
{
Name: "16.00 Bootcamp: Fibonacci",
Folder: "fibonacci",
},
{
Name: "16.01 Count the number of score combinations",
Folder: "number_of_score_combinations",
},
{
Name: "16.02 Compute the Levenshtein distance",
Folder: "levenshtein_distance",
},
{
Name: "16.03 Count the number of ways to traverse a 2D array",
Folder: "number_of_traversals_matrix",
},
{
Name: "16.04 Compute the binomial coefficients",
Folder: "binomial_coefficients",
},
{
Name: "16.05 Search for a sequence in a 2D array",
Folder: "is_string_in_matrix",
},
{
Name: "16.06 The knapsack problem",
Folder: "knapsack",
},
{
Name: "16.07 Building a search index for domains",
Folder: "is_string_decomposable_into_words",
},
{
Name: "16.08 Find the minimum weight path in a triangle",
Folder: "minimum_weight_path_in_a_triangle",
},
{
Name: "16.09 Pick up coins for maximum gain",
Folder: "picking_up_coins",
},
{
Name: "16.10 Count the number of moves to climb stairs",
Folder: "number_of_traversals_staircase",
},
{
Name: "16.11 The pretty printing problem",
Folder: "pretty_printing",
},
{
Name: "16.12 Find the longest nondecreasing subsequence",
Folder: "longest_nondecreasing_subsequence",
},
},
},
17: {
Name: "Chapter 17: Greedy Algorithms and Invariants",
Number: 17,
Problems: []problem{
{
Name: "17.00 Bootcamp: Greedy Algorithms and Invariants",
Folder: "making_change",
},
{
Name: "17.01 Compute an optimum assignment of tasks",
Folder: "task_pairing",
},
{
Name: "17.02 Schedule to minimize waiting time",
Folder: "minimum_waiting_time",
},
{
Name: "17.03 The interval covering problem",
Folder: "minimum_points_covering_intervals",
},
{
Name: "17.03 Invariant Bootcamp: Two Sum",
Folder: "two_sum",
},
{
Name: "17.04 The 3-sum problem",
Folder: "three_sum",
},
{
Name: "17.05 Find the majority element",
Folder: "majority_element",
},
{
Name: "17.06 The gasup problem",
Folder: "refueling_schedule",
},
{
Name: "17.07 Compute the maximum water trapped by a pair of vertical lines",
Folder: "max_trapped_water",
},
{
Name: "17.08 Compute the largest rectangle under the skyline",
Folder: "largest_rectangle_under_skyline",
},
},
},
18: {
Name: "Chapter 18: Graphs",
Number: 18,
Problems: []problem{
{
Name: "18.01 Search a maze",
Folder: "search_maze",
},
{
Name: "18.02 Paint a Boolean matrix",
Folder: "matrix_connected_regions",
},
{
Name: "18.03 Compute enclosed regions",
Folder: "matrix_enclosed_regions",
},
{
Name: "18.04 Deadlock detection",
Folder: "deadlock_detection",
},
{
Name: "18.05 Clone a graph",
Folder: "graph_clone",
},
{
Name: "18.06 Making wired connections",
Folder: "is_circuit_wirable",
},
{
Name: "18.07 Transform one string to another",
Folder: "string_transformability",
},
{
Name: "18.08 Team photo day---2",
Folder: "max_teams_in_photograph",
},
},
},
24: {
Name: "Chapter 24: Honors Class",
Number: 24,
Problems: []problem{
{
Name: "24.01 Compute the greatest common divisor",
Folder: "gcd",
},
{
Name: "24.02 Find the first missing positive entry",
Folder: "first_missing_positive_entry",
},
{
Name: "24.03 Buy and sell a stock at most k times",
Folder: "buy_and_sell_stock_k_times",
},
{
Name: "24.04 Compute the maximum product of all entries but one",
Folder: "max_product_all_but_one",
},
{
Name: "24.05 Compute the longest contiguous increasing subarray",
Folder: "longest_increasing_subarray",
},
{
Name: "24.06 Rotate an array",
Folder: "rotate_array",
},
{
Name: "24.07 Identify positions attacked by rooks",
Folder: "rook_attack",
},
{
Name: "24.08 Justify text",
Folder: "left_right_justify_text",
},
{
Name: "24.09 Implement list zipping",
Folder: "zip_list",
},
{
Name: "24.10 Copy a postings list",
Folder: "copy_posting_list",
},
{
Name: "24.11 Compute the longest substring with matching parens",
Folder: "longest_substring_with_matching_parentheses",
},
{
Name: "24.12 Compute the maximum of a sliding window",
Folder: "max_of_sliding_window",
},
{
Name: "24.13 Compute fair bonuses",
Folder: "bonus",
},
{
Name: "24.14 Search a sorted array of unknown length",
Folder: "search_unknown_length_array",
},
{
Name: "24.15 Search in two sorted arrays",
Folder: "kth_largest_element_in_two_sorted_arrays",
},
{
Name: "24.16 Find the kth largest element---large n, small k",
Folder: "kth_largest_element_in_long_array",
},
{
Name: "24.17 Find an element that appears only once",
Folder: "element_appearing_once",
},
{
Name: "24.18 Find the line through the most points",
Folder: "line_through_most_points",
},
{
Name: "24.19 Convert a sorted doubly linked list into a BST",
Folder: "sorted_list_to_bst",
},
{
Name: "24.20 Convert a BST to a sorted doubly linked list",
Folder: "bst_to_sorted_list",
},
{
Name: "24.21 Merge two BSTs",
Folder: "bst_merge",
},
{
Name: "24.22 Implement regular expression matching",
Folder: "regular_expression",
},
{
Name: "24.23 Synthesize an expression",
Folder: "insert_operators_in_string",
},
{
Name: "24.24 Count inversions",
Folder: "count_inversions",
},
{
Name: "24.25 Draw the skyline",
Folder: "drawing_skyline",
},
{
Name: "24.26 Measure with defective jugs",
Folder: "defective_jugs",
},
{
Name: "24.27 Compute the maximum subarray sum in a circular array",
Folder: "maximum_subarray_in_circular_array",
},
{
Name: "24.28 Determine the critical height",
Folder: "max_safe_height",
},
{
Name: "24.29 Max Square Submatrix",
Folder: "max_square_submatrix",
},
{
Name: "24.29 Max Submatrix",
Folder: "max_submatrix",
},
{
Name: "24.30 Implement Huffman coding",
Folder: "huffman_coding",
},
{
Name: "24.31 Trapping water",
Folder: "max_water_trappable",
},
{
Name: "24.32 The heavy hitter problem",
Folder: "search_frequent_items",
},
{
Name: "24.33 Find the longest subarray with sum constraint",
Folder: "longest_subarray_with_sum_constraint",
},
{
Name: "24.34 Road network",
Folder: "road_network",
},
{
Name: "24.35 Test if arbitrage is possible",
Folder: "arbitrage",
},
},
},
} | progress/problem_mapping.go | 0.654343 | 0.537648 | problem_mapping.go | starcoder |
package measurement
import (
"bytes"
"fmt"
"sync"
"time"
"github.com/HdrHistogram/hdrhistogram-go"
)
type Histogram struct {
*hdrhistogram.Histogram
m sync.RWMutex
sum int64
startTime time.Time
}
type HistInfo struct {
Elapsed float64
Sum float64
Count int64
Ops float64
Avg float64
P50 float64
P90 float64
P95 float64
P99 float64
P999 float64
Max float64
}
func NewHistogram(minLat, maxLat time.Duration, sf int) *Histogram {
return &Histogram{Histogram: hdrhistogram.New(minLat.Nanoseconds(), maxLat.Nanoseconds(), sf), startTime: time.Now()}
}
func (h *Histogram) Measure(rawLatency time.Duration) {
latency := rawLatency
if latency < time.Duration(h.LowestTrackableValue()) {
latency = time.Duration(h.LowestTrackableValue())
} else if latency > time.Duration(h.HighestTrackableValue()) {
latency = time.Duration(h.HighestTrackableValue())
}
h.m.Lock()
err := h.RecordValue(latency.Nanoseconds())
h.sum += rawLatency.Nanoseconds()
h.m.Unlock()
if err != nil {
panic(fmt.Sprintf(`recording value error: %s`, err))
}
}
func (h *Histogram) Empty() bool {
h.m.Lock()
defer h.m.Unlock()
return h.TotalCount() == 0
}
func (h *Histogram) Summary() string {
res := h.GetInfo()
buf := new(bytes.Buffer)
buf.WriteString(fmt.Sprintf("Takes(s): %.1f, ", res.Elapsed))
buf.WriteString(fmt.Sprintf("Count: %d, ", res.Count))
buf.WriteString(fmt.Sprintf("TPM: %.1f, ", res.Ops*60))
buf.WriteString(fmt.Sprintf("Sum(ms): %.1f, ", res.Sum))
buf.WriteString(fmt.Sprintf("Avg(ms): %.1f, ", res.Avg))
buf.WriteString(fmt.Sprintf("50th(ms): %.1f, ", res.P50))
buf.WriteString(fmt.Sprintf("90th(ms): %.1f, ", res.P90))
buf.WriteString(fmt.Sprintf("95th(ms): %.1f, ", res.P95))
buf.WriteString(fmt.Sprintf("99th(ms): %.1f, ", res.P99))
buf.WriteString(fmt.Sprintf("99.9th(ms): %.1f, ", res.P999))
buf.WriteString(fmt.Sprintf("Max(ms): %.1f", res.Max))
return buf.String()
}
func (h *Histogram) GetInfo() HistInfo {
h.m.RLock()
defer h.m.RUnlock()
sum := time.Duration(h.sum).Seconds() * 1000
avg := time.Duration(h.Mean()).Seconds() * 1000
elapsed := time.Now().Sub(h.startTime).Seconds()
count := h.TotalCount()
ops := float64(count) / elapsed
info := HistInfo{
Elapsed: elapsed,
Sum: sum,
Count: count,
Ops: ops,
Avg: avg,
P50: time.Duration(h.ValueAtQuantile(50)).Seconds() * 1000,
P90: time.Duration(h.ValueAtQuantile(90)).Seconds() * 1000,
P95: time.Duration(h.ValueAtQuantile(95)).Seconds() * 1000,
P99: time.Duration(h.ValueAtQuantile(99)).Seconds() * 1000,
P999: time.Duration(h.ValueAtQuantile(99.9)).Seconds() * 1000,
Max: time.Duration(h.ValueAtQuantile(100)).Seconds() * 1000,
}
return info
} | pkg/measurement/hist.go | 0.5794 | 0.415492 | hist.go | starcoder |
package layer
import (
"github.com/varrrro/gonn/internal/functions"
"gonum.org/v1/gonum/mat"
)
// SigmoidalLayer with logistic activation function.
type SigmoidalLayer struct {
inputSize int
outputSize int
input mat.Vector
output mat.Vector
weights mat.Matrix
biases mat.Vector
deltas mat.Vector
}
// CreateSigmoidalLayer with the given size.
func CreateSigmoidalLayer(nInput, nOutput int, weights, biases []float64) *SigmoidalLayer {
return &SigmoidalLayer{
inputSize: nInput,
outputSize: nOutput,
weights: mat.NewDense(nOutput, nInput, weights),
biases: mat.NewVecDense(nOutput, biases),
}
}
// GetOutput of the layer.
func (l *SigmoidalLayer) GetOutput() mat.Vector {
return l.output
}
// GetWeights of the layer.
func (l *SigmoidalLayer) GetWeights() mat.Matrix {
return l.weights
}
// GetDeltas of the layer.
func (l *SigmoidalLayer) GetDeltas() mat.Vector {
return l.deltas
}
// FeedForward an input through the layer.
func (l *SigmoidalLayer) FeedForward(x mat.Vector) {
l.input = x
z := mat.NewVecDense(l.outputSize, nil)
z.MulVec(l.weights, l.input)
z.AddVec(z, l.biases)
y := mat.NewVecDense(l.outputSize, nil)
for i := 0; i < l.outputSize; i++ {
value := functions.Logistic(z.AtVec(i))
y.SetVec(i, value)
}
l.output = y
}
// CalculateDeltas for the layer with the given target.
func (l *SigmoidalLayer) CalculateDeltas(t mat.Vector) {
diff := mat.NewVecDense(l.outputSize, nil)
diff.SubVec(l.output, t)
d := mat.NewVecDense(l.outputSize, nil)
for i := 0; i < l.outputSize; i++ {
value := l.output.AtVec(i) * (1.0 - l.output.AtVec(i)) * diff.AtVec(i)
d.SetVec(i, value)
}
l.deltas = d
}
// CalculateHiddenDeltas for the layer with the values from the next layer.
func (l *SigmoidalLayer) CalculateHiddenDeltas(nextDeltas mat.Vector, nextWeights mat.Matrix) {
d := mat.NewVecDense(l.outputSize, nil)
for i := 0; i < l.outputSize; i++ {
sum := 0.0
for j := 0; j < nextDeltas.Len(); j++ {
sum += nextDeltas.AtVec(j) * nextWeights.At(j, i)
}
value := l.output.AtVec(i) * (1.0 - l.output.AtVec(i)) * sum
d.SetVec(i, value)
}
l.deltas = d
}
// UpdateWeights and biases of the layer with the given Eta.
func (l *SigmoidalLayer) UpdateWeights(eta float64) {
newWeights := mat.NewDense(l.outputSize, l.inputSize, nil)
newBiases := mat.NewVecDense(l.outputSize, nil)
for i := 0; i < l.outputSize; i++ {
for j := 0; j < l.inputSize; j++ {
weightIncrement := -1.0 * eta * l.deltas.AtVec(i) * l.input.AtVec(j)
newWeights.Set(i, j, l.weights.At(i, j)+weightIncrement)
}
biasIncrement := -1.0 * eta * l.deltas.AtVec(i)
newBiases.SetVec(i, l.biases.AtVec(i)+biasIncrement)
}
l.weights = newWeights
l.biases = newBiases
} | internal/layer/sigmoidal.go | 0.812607 | 0.561936 | sigmoidal.go | starcoder |
package p472
/**
Given a list of words (without duplicates), please write a program that returns all concatenated words in the given list of words.
A concatenated word is defined as a string that is comprised entirely of at least two shorter words in the given array.
Example:
Input: ["cat","cats","catsdogcats","dog","dogcatsdog","hippopotamuses","rat","ratcatdogcat"]
Output: ["catsdogcats","dogcatsdog","ratcatdogcat"]
Explanation: "catsdogcats" can be concatenated by "cats", "dog" and "cats";
"dogcatsdog" can be concatenated by "dog", "cats" and "dog";
"ratcatdogcat" can be concatenated by "rat", "cat", "dog" and "cat".
Note:
The number of elements of the given array will not exceed 10,000
The length sum of elements in the given array will not exceed 600,000.
All the input string will only include lower case letters.
The returned elements order does not matter.
*/
type Trie struct {
root *TrieNode
}
type TrieNode struct {
word bool
children []*TrieNode
}
/** Initialize your data structure here. */
func Constructor() Trie {
trie := Trie{root: &TrieNode{children: make([]*TrieNode, 26)}}
return trie
}
/** Inserts a word into the trie. */
func (this *Trie) Insert(word string) {
if len(word) == 0 {
return
}
cur := this.root
for _, v := range []byte(word) {
if cur.children[v-'a'] == nil {
cur.children[v-'a'] = &TrieNode{children: make([]*TrieNode, 26)}
}
cur = cur.children[v-'a']
}
cur.word = true
}
func (this *Trie) isConcatenated(key []byte, ix int) bool {
concat := false
cur := this.root
for i := ix; i <= len(key); i++ {
if cur == nil {
break
}
if i == len(key) {
concat = concat || (cur.word && ix != 0)
continue
}
if cur.word && this.isConcatenated(key, i) {
concat = true
break
}
cur = cur.children[key[i]-'a']
}
return concat
}
func findAllConcatenatedWordsInADict(words []string) []string {
trie := Constructor()
for _, v := range words {
trie.Insert(v)
}
res := make([]string, 0)
for _, w := range words {
if trie.isConcatenated([]byte(w), 0) {
res = append(res, w)
}
}
return res
} | algorithms/p472/472.go | 0.744471 | 0.627409 | 472.go | starcoder |
package equitytest
const TrivialLock = `
contract TrivialLock() locks amount of asset {
clause trivialUnlock() {
unlock amount of asset
}
}
`
const LockWithPublicKey = `
contract LockWithPublicKey(publicKey: PublicKey) locks amount of asset {
clause unlockWithSig(sig: Signature) {
verify checkTxSig(publicKey, sig)
unlock amount of asset
}
}
`
const LockWithPKHash = `
contract LockWithPublicKeyHash(pubKeyHash: Hash) locks amount of asset {
clause spend(pubKey: PublicKey, sig: Signature) {
verify sha3(pubKey) == pubKeyHash
verify checkTxSig(pubKey, sig)
unlock amount of asset
}
}
`
const LockWith2of3Keys = `
contract LockWith3Keys(pubkey1, pubkey2, pubkey3: PublicKey) locks amount of asset {
clause unlockWith2Sigs(sig1, sig2: Signature) {
verify checkTxMultiSig([pubkey1, pubkey2, pubkey3], [sig1, sig2])
unlock amount of asset
}
}
`
const LockToOutput = `
contract LockToOutput(address: Program) locks amount of asset {
clause relock() {
lock amount of asset with address
}
}
`
const TradeOffer = `
contract TradeOffer(requestedAsset: Asset, requestedAmount: Amount, sellerProgram: Program, sellerKey: PublicKey) locks amount of asset {
clause trade() {
lock requestedAmount of requestedAsset with sellerProgram
unlock amount of asset
}
clause cancel(sellerSig: Signature) {
verify checkTxSig(sellerKey, sellerSig)
unlock amount of asset
}
}
`
const EscrowedTransfer = `
contract EscrowedTransfer(agent: PublicKey, sender: Program, recipient: Program) locks amount of asset {
clause approve(sig: Signature) {
verify checkTxSig(agent, sig)
lock amount of asset with recipient
}
clause reject(sig: Signature) {
verify checkTxSig(agent, sig)
lock amount of asset with sender
}
}
`
const RevealPreimage = `
contract RevealPreimage(hash: Hash) locks amount of asset {
clause reveal(string: String) {
verify sha3(string) == hash
unlock amount of asset
}
}
`
const PriceChanger = `
contract PriceChanger(askAmount: Amount, askAsset: Asset, sellerKey: PublicKey, sellerProg: Program) locks valueAmount of valueAsset {
clause changePrice(newAmount: Amount, newAsset: Asset, sig: Signature) {
verify checkTxSig(sellerKey, sig)
lock valueAmount of valueAsset with PriceChanger(newAmount, newAsset, sellerKey, sellerProg)
}
clause redeem() {
lock askAmount of askAsset with sellerProg
unlock valueAmount of valueAsset
}
}
`
const TestDefineVar = `
contract TestDefineVar(result: Integer) locks valueAmount of valueAsset {
clause LockWithMath(left: Integer, right: Integer) {
define calculate: Integer = left + right
verify left != calculate
verify result == calculate
unlock valueAmount of valueAsset
}
}
`
const TestAssignVar = `
contract TestAssignVar(result: Integer) locks valueAmount of valueAsset {
clause LockWithMath(first: Integer, second: Integer) {
define calculate: Integer = first
assign calculate = calculate + second
verify result == calculate
unlock valueAmount of valueAsset
}
}
`
const TestSigIf = `
contract TestSigIf(a: Integer, count:Integer) locks valueAmount of valueAsset {
clause check(b: Integer, c: Integer) {
verify b != count
if a > b {
verify b > c
} else {
verify a > c
}
unlock valueAmount of valueAsset
}
}
`
const TestIfAndMultiClause = `
contract TestIfAndMultiClause(a: Integer, cancelKey: PublicKey) locks valueAmount of valueAsset {
clause check(b: Integer, c: Integer) {
verify b != c
if a > b {
verify a > c
}
unlock valueAmount of valueAsset
}
clause cancel(sellerSig: Signature) {
verify checkTxSig(cancelKey, sellerSig)
unlock valueAmount of valueAsset
}
}
`
const TestIfNesting = `
contract TestIfNesting(a: Integer, count:Integer) locks valueAmount of valueAsset {
clause check(b: Integer, c: Integer, d: Integer) {
verify b != count
if a > b {
if d > c {
verify a > d
}
verify d != b
} else {
verify a > c
}
verify c != count
unlock valueAmount of valueAsset
}
clause cancel(e: Integer, f: Integer) {
verify a != e
if a > f {
verify e > count
}
verify f != count
unlock valueAmount of valueAsset
}
}
`
const TestConstantMath = `
contract TestConstantMath(result: Integer, hashByte: Hash, hashStr: Hash, outcome: Boolean) locks valueAmount of valueAsset {
clause calculation(left: Integer, right: Integer, boolResult: Boolean) {
verify result == left + right + 10
verify hashByte == sha3(0x31323330)
verify hashStr == sha3('string')
verify !outcome
verify boolResult && (result == left + 20)
unlock valueAmount of valueAsset
}
}
` | vendor/github.com/bytom/equity/compiler/equitytest/equitytest.go | 0.677687 | 0.483161 | equitytest.go | starcoder |
package bits
import (
"math"
)
// ShiftDown takes a matrix of bytes and shifts a column of bits down
// by 'count'. Bits pushed off the bottom are rotated back around to
// the top. Each slice in the matrix is expected to be the same length.
func ShiftDown(matrix [][]byte, index float64, count float64) [][]byte {
yCursor := len(matrix) - int(count)
return shiftVertical(matrix, index, count, yCursor)
}
// ShiftUp takes a matrix of bytes and shifts a column of bits up
// by 'count'. Bits pushed off the top are rotated back around to
// the bottom. Each slice in the matrix is expected to be the same length.
func ShiftUp(matrix [][]byte, index float64, count float64) [][]byte {
yCursor := int(count)
return shiftVertical(matrix, index, count, yCursor)
}
func shiftVertical(matrix [][]byte, index float64, count float64, yCursor int) [][]byte {
validateVerticalShift(matrix, index, count)
xByte, mask := getMaskForByteArrayIndex(index)
froms := copyColumnBytes(matrix, yCursor, xByte)
copyIntoMatrix(froms, matrix, xByte, mask)
return matrix
}
func validateVerticalShift(matrix [][]byte, index float64, count float64) {
if int(count) >= len(matrix) {
panic("Must shift by a number less than the bits in the array")
}
if !matrixIsLongEnough(matrix, index) {
panic("Each slice in the matrix must be as long as the index")
}
if count <= 0 {
panic("Must shift by a number greater than 0")
}
}
func matrixIsLongEnough(matrix [][]byte, index float64) bool {
for i := 0; i < len(matrix); i++ {
if len(matrix[i]) < (int(index)/8 + 1) {
return false
}
}
return true
}
func getMaskForByteArrayIndex(index float64) (xByte int, mask byte) {
div := index / 8.0
xByte = int(math.Trunc(div))
offset := math.Remainder(index, 8)
if offset < 0 {
offset += 8
}
mask = NewSimpleCopyPasteMask(uint8(offset))
return
}
func copyColumnBytes(matrix [][]byte, rowStart, xIndex int) []byte {
froms := make([]byte, len(matrix))
for i := 0; i < len(matrix); i++ {
froms[i] = matrix[rowStart][xIndex]
rowStart++
if rowStart >= len(matrix) {
rowStart = 0
}
}
return froms
}
func copyIntoMatrix(from []byte, into [][]byte, xIndex int, mask byte) {
for i := 0; i < len(into); i++ {
into[i][xIndex] = CopyPaste(from[i], into[i][xIndex], mask)
}
} | shiftvertical.go | 0.828662 | 0.674667 | shiftvertical.go | starcoder |
package jbtracer
import (
"fmt"
"math"
)
// Epsilon is the expected precision for our floating point operations
const Epsilon = 0.00001
type Tuple struct {
X, Y, Z float64 // 3D coordinates
W float64 // 1.0 when a point, 0.0 when a vector
}
// String returns a string representation of the tuple
func (a *Tuple) String() string {
var types string
if a.IsPoint() {
types = "point"
} else {
types = "vector"
}
return fmt.Sprintf("x=%+2.5f, y=%+2.5f, z=%+2.5f (%s)", a.X, a.Y, a.Z, types)
}
// IsPoint returns true if this Tuple is a point
func (a *Tuple) IsPoint() bool {
return math.Abs(a.W-1.0) < Epsilon
}
// IsVector returns true if this Tuple is a vector
func (a *Tuple) IsVector() bool {
return a.W < Epsilon
}
// Equal determines if two Tuples are the same
func (a *Tuple) Equal(b *Tuple) bool {
return EqualFloat64(a.X, b.X) && EqualFloat64(a.Y, b.Y) && EqualFloat64(a.Z, b.Z) && EqualFloat64(a.W, b.W)
}
// Add adds one tuple to another
func (a *Tuple) Add(b *Tuple) *Tuple {
return &Tuple{
X: a.X + b.X,
Y: a.Y + b.Y,
Z: a.Z + b.Z,
W: a.W + b.W,
}
}
// Add subtracts one tuple from another
func (a *Tuple) Subtract(b *Tuple) *Tuple {
return &Tuple{
X: a.X - b.X,
Y: a.Y - b.Y,
Z: a.Z - b.Z,
W: a.W - b.W,
}
}
// Negate negates a tuple
func (a *Tuple) Negate() *Tuple {
return &Tuple{
X: a.X * -1.0,
Y: a.Y * -1.0,
Z: a.Z * -1.0,
W: a.W * -1.0,
}
}
// Multiply multiplies a tuple by a scalar
func (a *Tuple) Multiply(scalar float64) *Tuple {
return &Tuple{
X: a.X * scalar,
Y: a.Y * scalar,
Z: a.Z * scalar,
W: a.W * scalar,
}
}
// Divide divides a tuple by a scalar
func (a *Tuple) Divide(scalar float64) *Tuple {
return &Tuple{
X: a.X / scalar,
Y: a.Y / scalar,
Z: a.Z / scalar,
W: a.W / scalar,
}
}
// Magnitude returns the magnitude (or length) of the tuple
func (a *Tuple) Magnitude() float64 {
return math.Sqrt(a.X*a.X + a.Y*a.Y + a.Z*a.Z + a.W*a.W)
}
// Normalize returns a normalized unit vector
func (a *Tuple) Normalize() *Tuple {
m := a.Magnitude()
return &Tuple{
X: a.X / m,
Y: a.Y / m,
Z: a.Z / m,
W: a.W / m,
}
}
// Dot returns the dot product of this vector with the provided vector
func (a *Tuple) Dot(b *Tuple) float64 {
return a.X*b.X + a.Y*b.Y + a.Z*b.Z + a.W*b.W
}
// Cross returns the cross product of this vector with the provided vector
func (a *Tuple) Cross(b *Tuple) *Tuple {
return NewVector(
a.Y*b.Z-a.Z*b.Y,
a.Z*b.X-a.X*b.Z,
a.X*b.Y-a.Y*b.X,
)
}
// EqualFloat64 determines if two float64 values are the within Epsilon of each other
func EqualFloat64(a, b float64) bool {
return math.Abs(a-b) < Epsilon
}
// Reflect reflects vector v around the normal n
func (v *Tuple) Reflect(n *Tuple) *Tuple {
return v.Subtract(n.Multiply(2 * v.Dot(n)))
}
// NewPoint creates a new Tuple of type point
func NewPoint(X, Y, Z float64) *Tuple {
point := &Tuple{
X: X,
Y: Y,
Z: Z,
W: 1.0,
}
return point
}
// NewVector creates a new Tuple of type vector
func NewVector(X, Y, Z float64) *Tuple {
vector := &Tuple{
X: X,
Y: Y,
Z: Z,
W: 0.0,
}
return vector
}
type Color struct {
Red, Green, Blue float64
}
var (
Black *Color = &Color{0, 0, 0}
White *Color = &Color{1, 1, 1}
)
func NewColor(red, green, blue float64) *Color {
return &Color{
Red: red,
Green: green,
Blue: blue,
}
}
// Equal determines if two Colors are the same
func (a *Color) Equal(b *Color) bool {
return EqualFloat64(a.Red, b.Red) && EqualFloat64(a.Green, b.Green) && EqualFloat64(a.Blue, b.Blue)
}
// Add adds one Color to another
func (a *Color) Add(b *Color) *Color {
return &Color{
Red: a.Red + b.Red,
Green: a.Green + b.Green,
Blue: a.Blue + b.Blue,
}
}
// Add subtracts one Color from another
func (a *Color) Subtract(b *Color) *Color {
return &Color{
Red: a.Red - b.Red,
Green: a.Green - b.Green,
Blue: a.Blue - b.Blue,
}
}
// Multiply multiplies this Color by another Color
func (a *Color) Multiply(b *Color) *Color {
return &Color{
Red: a.Red * b.Red,
Green: a.Green * b.Green,
Blue: a.Blue * b.Blue,
}
}
// MultiplyScalar multiplies this Color by a scalar
func (a *Color) MultiplyScalar(scalar float64) *Color {
return &Color{
Red: a.Red * scalar,
Green: a.Green * scalar,
Blue: a.Blue * scalar,
}
} | tuples.go | 0.905216 | 0.625724 | tuples.go | starcoder |
package assert
import (
"fmt"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// ObjectsAreEqual checks two interfaces with reflect.DeepEqual.
func ObjectsAreEqual(expected, actual interface{}) bool {
if expected == nil || actual == nil {
return expected == actual
}
return reflect.DeepEqual(expected, actual)
}
// IsNil checks an interface{} with the reflect package.
func IsNil(object interface{}) bool {
if object == nil {
return true
}
value := reflect.ValueOf(object)
kind := value.Kind()
if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
return true
}
return false
}
// errorSingle fails and prints the single object
// along with the message.
func errorSingle(t testing.TB, msg string, obj interface{}) {
_, file, line, _ := runtime.Caller(2)
fmt.Printf("\033[31m\t%s:%d: %s\n\n\t\t%#v\033[39m\n\n", filepath.Base(file), line, msg, obj)
t.Fail()
}
// errorCompare fails and prints both the compared objects
// along with the message.
func errorCompare(t testing.TB, msg string, expected, actual interface{}) {
_, file, line, _ := runtime.Caller(2)
fmt.Printf("\033[31m\t%s:%d: %s\n\n\t\tgot: %#v\n\033[32m\t\texp: %#v\033[39m\n\n", filepath.Base(file), line, msg, actual, expected)
t.Fail()
}
// Assert wraps a testing.TB for convenient asserting calls.
type Assert struct {
t testing.TB
}
// True tests if the cond is true and prints the msg for failure.
func (a *Assert) True(cond bool, msg string) {
if !cond {
errorSingle(a.t, msg, cond)
}
}
// Equal tests if the two interfaces provided is equal
// and prints the msg for failure.
func (a *Assert) Equal(expected, actual interface{}, msg string) {
if !ObjectsAreEqual(expected, actual) {
errorCompare(a.t, msg, expected, actual)
}
}
// NoError fails the test and prints the msg if err != nil.
func (a *Assert) NoError(err error, msg string) {
if err != nil {
errorSingle(a.t, msg, err)
}
}
// NotNil fails the test and prints the msg if the obj is nil.
func (a *Assert) NotNil(obj interface{}, msg string) {
if IsNil(obj) {
errorSingle(a.t, msg, obj)
}
}
// NewAssert provides an Assert instance.
func NewAssert(t testing.TB) *Assert {
return &Assert{t}
}
// New is a func alias for NewAssert. The name
// NewAssert is reduncdant but kept for compatibility.
var New = NewAssert | assert/assert.go | 0.729134 | 0.48987 | assert.go | starcoder |
package generator
import (
"errors"
"fmt"
"go/ast"
"go/token"
"reflect"
"strconv"
"strings"
"unicode"
"github.com/asdine/genji/value"
)
const recordsTmpl = `
{{- define "records" }}
{{- range .Records }}
{{- template "record" . }}
{{- end }}
{{- end }}
`
const recordTmpl = `
{{- define "record" }}
{{- template "record-GetField" . }}
{{- template "record-Iterate" . }}
{{- template "record-ScanRecord" . }}
{{- template "record-Scan" . }}
{{- template "record-Pk" . }}
{{- end }}
`
const recordGetFieldTmpl = `
{{ define "record-GetField" }}
{{- $fl := .FirstLetter -}}
{{- $structName := .Name -}}
// GetField implements the field method of the record.Record interface.
func ({{$fl}} *{{$structName}}) GetField(name string) (record.Field, error) {
switch name {
{{- range .Fields }}
case "{{.Name}}":
return record.New{{.Type}}Field("{{.Name}}", {{$fl}}.{{.Name}}), nil
{{- end}}
}
return record.Field{}, errors.New("unknown field")
}
{{ end }}
`
const recordIterateTmpl = `
{{ define "record-Iterate" }}
{{- $fl := .FirstLetter -}}
{{- $structName := .Name -}}
// Iterate through all the fields one by one and pass each of them to the given function.
// It the given function returns an error, the iteration is interrupted.
func ({{$fl}} *{{$structName}}) Iterate(fn func(record.Field) error) error {
var err error
{{range .Fields}}
err = fn(record.New{{.Type}}Field("{{.Name}}", {{$fl}}.{{.Name}}))
if err != nil {
return err
}
{{end}}
return nil
}
{{ end }}
`
const recordScanRecordTmpl = `
{{ define "record-ScanRecord" }}
{{- $fl := .FirstLetter -}}
{{- $structName := .Name -}}
// ScanRecord extracts fields from record and assigns them to the struct fields.
// It implements the record.Scanner interface.
func ({{$fl}} *{{$structName}}) ScanRecord(rec record.Record) error {
return rec.Iterate(func(f record.Field) error {
var err error
switch f.Name {
{{- range .Fields}}
case "{{.Name}}":
{{$fl}}.{{.Name}}, err = f.DecodeTo{{.Type}}()
{{- end}}
}
return err
})
}
{{ end }}
`
const recordScanTmpl = `
{{ define "record-Scan" }}
{{- $fl := .FirstLetter -}}
{{- $structName := .Name -}}
// Scan extracts fields from src and assigns them to the struct fields.
// It implements the driver.Scanner interface.
func ({{$fl}} *{{$structName}}) Scan(src interface{}) error {
r, ok := src.(record.Record)
if !ok {
return errors.New("unable to scan record from src")
}
return {{$fl}}.ScanRecord(r)
}
{{ end }}
`
const recordPkTmpl = `
{{ define "record-Pk" }}
{{- $fl := .FirstLetter -}}
{{- $structName := .Name -}}
{{- if ne .Pk.Name ""}}
// PrimaryKey returns the primary key. It implements the table.PrimaryKeyer interface.
func ({{$fl}} *{{$structName}}) PrimaryKey() ([]byte, error) {
return value.Encode{{.Pk.Type}}({{$fl}}.{{.Pk.Name}}), nil
}
{{- end}}
{{ end }}
`
type recordContext struct {
Name string
Fields []struct {
Name, Type, GoType string
}
Pk struct {
Name, Type, GoType string
}
}
func (rctx *recordContext) lookupRecord(f *ast.File, target string) (bool, error) {
for _, n := range f.Decls {
gn, ok := ast.Node(n).(*ast.GenDecl)
if !ok || gn.Tok != token.TYPE || len(gn.Specs) == 0 {
continue
}
ts, ok := gn.Specs[0].(*ast.TypeSpec)
if !ok {
continue
}
if ts.Name.Name != target {
continue
}
s, ok := ts.Type.(*ast.StructType)
if !ok {
return false, errors.New("invalid object")
}
rctx.Name = target
for _, fd := range s.Fields.List {
var typeName string
typ, ok := fd.Type.(*ast.Ident)
if !ok {
atyp, ok := fd.Type.(*ast.ArrayType)
if !ok {
return false, errors.New("struct must only contain supported fields")
}
typ, ok = atyp.Elt.(*ast.Ident)
if !ok || typ.Name != "byte" {
return false, errors.New("struct must only contain supported fields")
}
typeName = "[]byte"
} else {
typeName = typ.Name
}
if len(fd.Names) == 0 {
return false, errors.New("embedded fields are not supported")
}
if value.TypeFromGoType(typeName) == 0 {
return false, fmt.Errorf("unsupported type %s", typeName)
}
for _, name := range fd.Names {
rctx.Fields = append(rctx.Fields, struct {
Name, Type, GoType string
}{
name.String(), value.TypeFromGoType(typeName).String(), typeName,
})
}
if fd.Tag != nil {
err := handleGenjiTag(rctx, fd)
if err != nil {
return false, err
}
}
}
return true, nil
}
return false, nil
}
func (rctx *recordContext) IsExported() bool {
return unicode.IsUpper(rune(rctx.Name[0]))
}
func (rctx *recordContext) FirstLetter() string {
return strings.ToLower(rctx.Name[0:1])
}
func (rctx *recordContext) UnexportedName() string {
if !rctx.IsExported() {
return rctx.Name
}
return rctx.Unexport(rctx.Name)
}
func (rctx *recordContext) ExportedName() string {
if rctx.IsExported() {
return rctx.Name
}
return rctx.Export(rctx.Name)
}
func (rctx *recordContext) NameWithPrefix(prefix string) string {
n := prefix + rctx.ExportedName()
if rctx.IsExported() {
return rctx.Export(n)
}
return rctx.Unexport(n)
}
func (rctx *recordContext) Export(n string) string {
name := []byte(n)
name[0] = byte(unicode.ToUpper(rune(n[0])))
return string(name)
}
func (rctx *recordContext) Unexport(n string) string {
name := []byte(n)
name[0] = byte(unicode.ToLower(rune(n[0])))
return string(name)
}
func handleGenjiTag(ctx *recordContext, fd *ast.Field) error {
unquoted, err := strconv.Unquote(fd.Tag.Value)
if err != nil {
return err
}
v, ok := reflect.StructTag(unquoted).Lookup("genji")
if !ok {
return nil
}
gtags := strings.Split(v, ",")
for _, gtag := range gtags {
switch gtag {
case "pk":
if ctx.Pk.Name != "" {
return errors.New("only one pk field is allowed")
}
ctx.Pk.Name = fd.Names[0].Name
ctx.Pk.Type = value.TypeFromGoType(fd.Type.(*ast.Ident).Name).String()
ctx.Pk.GoType = fd.Type.(*ast.Ident).Name
default:
return fmt.Errorf("unsupported genji tag '%s'", gtag)
}
}
return nil
} | cmd/genji/generator/record.go | 0.690768 | 0.40439 | record.go | starcoder |
package ioutil
// A ByteOrder specifies how to convert byte sequences into
// 16-, 24-, 32-, 40-, 48-, 56- or 64-bit unsigned integers.
// It is compatible with the standard library ByteOrder but contains
// more integer types.
type ByteOrder interface {
// Uint16 reads 2 bytes
Uint16([]byte) uint16
// Uint24 reads 3 bytes
Uint24([]byte) uint32
// Uint32 reads 4 bytes
Uint32([]byte) uint32
// Uint40 reads 5 bytes
Uint40([]byte) uint64
// Uint48 reads 6 bytes
Uint48([]byte) uint64
// Uint56 reads 7 bytes
Uint56([]byte) uint64
// Uint64 reads 8 bytes
Uint64([]byte) uint64
// PutUint16 writes 2 bytes
PutUint16([]byte, uint16)
// PutUint24 writes 3 bytes
PutUint24([]byte, uint32)
// PutUint32 writes 4 bytes
PutUint32([]byte, uint32)
// PutUint40 writes 5 bytes
PutUint40([]byte, uint64)
// PutUint48 writes 6 bytes
PutUint48([]byte, uint64)
// PutUint56 writes 7 bytes
PutUint56([]byte, uint64)
// PutUint64 writes 8 bytes
PutUint64([]byte, uint64)
// String returns the endianness name
String() string
}
// LittleEndian instance of ByteOrder.
var LittleEndian littleEndian //nolint:gochecknoglobals
// BigEndian instance of ByteOrder.
var BigEndian bigEndian //nolint:gochecknoglobals
type littleEndian struct {
}
func (littleEndian) Uint16(b []byte) uint16 {
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint16(b[0]) | uint16(b[1])<<8
}
func (littleEndian) Uint24(b []byte) uint32 {
_ = b[2] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
}
func (littleEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) Uint40(b []byte) uint64 {
_ = b[4] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32
}
func (littleEndian) Uint48(b []byte) uint64 {
_ = b[5] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40
}
func (littleEndian) Uint56(b []byte) uint64 {
_ = b[6] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48
}
func (littleEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (littleEndian) PutUint16(b []byte, v uint16) {
_ = b[1] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8) //nolint:gomnd
}
func (littleEndian) PutUint24(b []byte, v uint32) {
_ = b[2] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v >> 16) //nolint:gomnd
}
func (littleEndian) PutUint32(b []byte, v uint32) {
_ = b[3] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v >> 16) //nolint:gomnd
b[3] = byte(v >> 24) //nolint:gomnd
}
func (littleEndian) PutUint40(b []byte, v uint64) {
_ = b[4] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v >> 16) //nolint:gomnd
b[3] = byte(v >> 24) //nolint:gomnd
b[4] = byte(v >> 32) //nolint:gomnd
}
func (littleEndian) PutUint48(b []byte, v uint64) {
_ = b[5] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v >> 16) //nolint:gomnd
b[3] = byte(v >> 24) //nolint:gomnd
b[4] = byte(v >> 32) //nolint:gomnd
b[5] = byte(v >> 40) //nolint:gomnd
}
func (littleEndian) PutUint56(b []byte, v uint64) {
_ = b[6] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v >> 16) //nolint:gomnd
b[3] = byte(v >> 24) //nolint:gomnd
b[4] = byte(v >> 32) //nolint:gomnd
b[5] = byte(v >> 40) //nolint:gomnd
b[6] = byte(v >> 48) //nolint:gomnd
}
func (littleEndian) PutUint64(b []byte, v uint64) {
_ = b[7] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v >> 16) //nolint:gomnd
b[3] = byte(v >> 24) //nolint:gomnd
b[4] = byte(v >> 32) //nolint:gomnd
b[5] = byte(v >> 40) //nolint:gomnd
b[6] = byte(v >> 48) //nolint:gomnd
b[7] = byte(v >> 56) //nolint:gomnd
}
func (littleEndian) String() string { return "LittleEndian" }
type bigEndian struct{}
func (bigEndian) Uint16(b []byte) uint16 {
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint16(b[1]) | uint16(b[0])<<8
}
func (bigEndian) Uint24(b []byte) uint32 {
_ = b[2] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
func (bigEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) Uint40(b []byte) uint64 {
_ = b[4] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[4]) | uint64(b[3])<<8 | uint64(b[2])<<16 | uint64(b[1])<<24 |
uint64(b[0])<<32
}
func (bigEndian) Uint48(b []byte) uint64 {
_ = b[5] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[5]) | uint64(b[4])<<8 | uint64(b[3])<<16 | uint64(b[2])<<24 |
uint64(b[1])<<32 | uint64(b[0])<<40
}
func (bigEndian) Uint56(b []byte) uint64 {
_ = b[6] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[6]) | uint64(b[5])<<8 | uint64(b[4])<<16 | uint64(b[3])<<24 |
uint64(b[2])<<32 | uint64(b[1])<<40 | uint64(b[0])<<48
}
func (bigEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func (bigEndian) PutUint16(b []byte, v uint16) {
_ = b[1] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 8) //nolint:gomnd
b[1] = byte(v)
}
func (bigEndian) PutUint24(b []byte, v uint32) {
_ = b[2] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 16) //nolint:gomnd
b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v)
}
func (bigEndian) PutUint32(b []byte, v uint32) {
_ = b[3] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 24) //nolint:gomnd
b[1] = byte(v >> 16) //nolint:gomnd
b[2] = byte(v >> 8) //nolint:gomnd
b[3] = byte(v)
}
func (bigEndian) PutUint40(b []byte, v uint64) {
_ = b[4] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 32) //nolint:gomnd
b[1] = byte(v >> 24) //nolint:gomnd
b[2] = byte(v >> 16) //nolint:gomnd
b[3] = byte(v >> 8) //nolint:gomnd
b[4] = byte(v)
}
func (bigEndian) PutUint48(b []byte, v uint64) {
_ = b[5] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 40) //nolint:gomnd
b[1] = byte(v >> 32) //nolint:gomnd
b[2] = byte(v >> 24) //nolint:gomnd
b[3] = byte(v >> 16) //nolint:gomnd
b[4] = byte(v >> 8) //nolint:gomnd
b[5] = byte(v)
}
func (bigEndian) PutUint56(b []byte, v uint64) {
_ = b[6] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 48) //nolint:gomnd
b[1] = byte(v >> 40) //nolint:gomnd
b[2] = byte(v >> 32) //nolint:gomnd
b[3] = byte(v >> 24) //nolint:gomnd
b[4] = byte(v >> 16) //nolint:gomnd
b[5] = byte(v >> 8) //nolint:gomnd
b[6] = byte(v)
}
func (bigEndian) PutUint64(b []byte, v uint64) {
_ = b[7] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 56) //nolint:gomnd
b[1] = byte(v >> 48) //nolint:gomnd
b[2] = byte(v >> 40) //nolint:gomnd
b[3] = byte(v >> 32) //nolint:gomnd
b[4] = byte(v >> 24) //nolint:gomnd
b[5] = byte(v >> 16) //nolint:gomnd
b[6] = byte(v >> 8) //nolint:gomnd
b[7] = byte(v)
}
func (bigEndian) String() string { return "BigEndian" } | byteorder.go | 0.573201 | 0.50354 | byteorder.go | starcoder |
package tai64n
import "time"
// Represents the first moment after a leap second occurs.
type LeapSecond struct {
Threshold time.Time
Offset int
}
var AllLeapSeconds = []*LeapSecond{
&LeapSecond{time.Date(1972, time.January, 1, 0, 0, 0, 0, time.UTC), 10},
&LeapSecond{time.Date(1972, time.July, 1, 0, 0, 0, 0, time.UTC), 11},
&LeapSecond{time.Date(1973, time.January, 1, 0, 0, 0, 0, time.UTC), 12},
&LeapSecond{time.Date(1974, time.January, 1, 0, 0, 0, 0, time.UTC), 13},
&LeapSecond{time.Date(1975, time.January, 1, 0, 0, 0, 0, time.UTC), 14},
&LeapSecond{time.Date(1976, time.January, 1, 0, 0, 0, 0, time.UTC), 15},
&LeapSecond{time.Date(1977, time.January, 1, 0, 0, 0, 0, time.UTC), 16},
&LeapSecond{time.Date(1978, time.January, 1, 0, 0, 0, 0, time.UTC), 17},
&LeapSecond{time.Date(1979, time.January, 1, 0, 0, 0, 0, time.UTC), 18},
&LeapSecond{time.Date(1980, time.January, 1, 0, 0, 0, 0, time.UTC), 19},
&LeapSecond{time.Date(1981, time.July, 1, 0, 0, 0, 0, time.UTC), 20},
&LeapSecond{time.Date(1982, time.July, 1, 0, 0, 0, 0, time.UTC), 21},
&LeapSecond{time.Date(1983, time.July, 1, 0, 0, 0, 0, time.UTC), 22},
&LeapSecond{time.Date(1985, time.July, 1, 0, 0, 0, 0, time.UTC), 23},
&LeapSecond{time.Date(1988, time.January, 1, 0, 0, 0, 0, time.UTC), 24},
&LeapSecond{time.Date(1990, time.January, 1, 0, 0, 0, 0, time.UTC), 25},
&LeapSecond{time.Date(1991, time.January, 1, 0, 0, 0, 0, time.UTC), 26},
&LeapSecond{time.Date(1992, time.July, 1, 0, 0, 0, 0, time.UTC), 27},
&LeapSecond{time.Date(1993, time.July, 1, 0, 0, 0, 0, time.UTC), 28},
&LeapSecond{time.Date(1994, time.July, 1, 0, 0, 0, 0, time.UTC), 29},
&LeapSecond{time.Date(1996, time.January, 1, 0, 0, 0, 0, time.UTC), 30},
&LeapSecond{time.Date(1997, time.July, 1, 0, 0, 0, 0, time.UTC), 31},
&LeapSecond{time.Date(1999, time.January, 1, 0, 0, 0, 0, time.UTC), 32},
&LeapSecond{time.Date(2006, time.January, 1, 0, 0, 0, 0, time.UTC), 33},
&LeapSecond{time.Date(2009, time.January, 1, 0, 0, 0, 0, time.UTC), 34},
&LeapSecond{time.Date(2012, time.July, 1, 0, 0, 0, 0, time.UTC), 35},
&LeapSecond{time.Date(2015, time.July, 1, 0, 0, 0, 0, time.UTC), 36},
&LeapSecond{time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC), 37},
}
type LeapMoment struct {
LeapSecond *LeapSecond
Moment *TAI64N
}
var AllLeapMoments []*LeapMoment
func init() {
for _, ls := range AllLeapSeconds {
moment := FromTime(ls.Threshold)
moment.Seconds--
AllLeapMoments = append(AllLeapMoments, &LeapMoment{ls, moment})
}
}
// Return the number of leap seconds that occur previous to the given
// time.
func LeapSecondsInvolved(t time.Time) uint64 {
// performance bias: typically times will be in the recent history,
// because, well, computers. So check from most recent leap second
// backwards.
for i := len(AllLeapSeconds) - 1; i >= 0; i-- {
ls := AllLeapSeconds[i]
if t.Unix() >= ls.Threshold.Unix() {
return uint64(ls.Offset)
}
}
return 0
}
func nearestLeapMoment(t *TAI64N) *LeapMoment {
for i := len(AllLeapMoments) - 1; i >= 0; i-- {
lm := AllLeapMoments[i]
if t.Equal(lm.Moment) || t.After(lm.Moment) {
return lm
}
}
return nil
} | leapsecond.go | 0.551332 | 0.522263 | leapsecond.go | starcoder |
package interpreter
import (
"fmt"
"reflect"
)
type List struct {
Elements []Value
}
func (l List) Compare(other Value) (Value, error) {
if r, ok := other.(List); ok {
if reflect.DeepEqual(l, r) {
return Number(0), nil
}
}
return nil, nil
}
func (l List) Add(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: list + %s Not supported", reflect.TypeOf(other))
}
func (l List) Sub(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: list - %s Not supported", reflect.TypeOf(other))
}
func (l List) Mul(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: list * %s Not supported", reflect.TypeOf(other))
}
func (l List) Div(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: list / %s Not supported", reflect.TypeOf(other))
}
func (l List) Mod(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: list %% %s Not supported", reflect.TypeOf(other))
}
func (l List) In(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: list In %s Not supported", reflect.TypeOf(other))
}
func (l List) Neg() (Value, error) {
return nil, fmt.Errorf("type mismatch: -list Not supported")
}
func (l List) Not() (Value, error) {
return nil, fmt.Errorf("type mismatch: 'Not list' Not supported")
}
func (l List) At(bitmap BitmapContext) (Value, error) {
return nil, fmt.Errorf("type mismatch: @list Not supported")
}
func (l List) Property(ident string) (Value, error) {
switch ident {
case "count":
return Number(len(l.Elements)), nil
}
return baseProperty(l, ident)
}
func (l List) PrintStr() string {
return fmt.Sprintf("list(count: %d)", len(l.Elements))
}
func (l List) Iterate(visit func(Value) error) error {
for _, v := range l.Elements {
if err := visit(v); err != nil {
return err
}
}
return nil
}
func (l List) Index(index Value) (Value, error) {
i, ok := index.(Number)
if !ok {
return nil, fmt.Errorf("type mismatch: expected list[number] but found list[%s]", reflect.TypeOf(index))
}
return l.Elements[indexAt(i, len(l.Elements))], nil
}
func (l List) IndexRange(lower, upper Value) (Value, error) {
lowern, ok := lower.(Number)
if !ok {
return nil, fmt.Errorf("type mismatch: kernel[%s..upper] Not supported", reflect.TypeOf(lower))
}
loweri := indexAt(lowern, len(l.Elements))
uppern, ok := upper.(Number)
if !ok {
return nil, fmt.Errorf("type mismatch: kernel[lower..%s] Not supported", reflect.TypeOf(upper))
}
upperi := indexAt(uppern, len(l.Elements))
return List{
Elements: l.Elements[int(loweri) : upperi+1],
}, nil
}
func (l List) IndexAssign(index Value, val Value) error {
i, ok := index.(Number)
if !ok {
return fmt.Errorf("type mismatch: expected list[number] but found list[%s]", reflect.TypeOf(index))
}
l.Elements[indexAt(i, len(l.Elements))] = val
return nil
}
func (l List) RuntimeTypeName() string {
return "list"
}
func (l List) Concat(val Value) (Value, error) {
if r, ok := val.(List); ok {
return List{
Elements: append(l.Elements, r.Elements...),
}, nil
}
return List{
Elements: append(l.Elements, val),
}, nil
} | internal/interpreter/list.go | 0.663451 | 0.491578 | list.go | starcoder |
// Package set defines a Set type that holds a set of elements.
package set
// A Set is a set of elements of some comparable type.
// Sets are implemented using maps, and have similar performance characteristics.
// Like maps, Sets are reference types.
// That is, for Sets s1 = s2 will leave s1 and s2 pointing to the same set of elements:
// changes to s1 will be reflected in s2 and vice-versa.
// Unlike maps, the zero value of a Set is usable; there is no equivalent to make.
// As with maps, concurrent calls to functions and methods that read values are fine;
// concurrent calls to functions and methods that write values are racy.
type Set[Elem comparable] struct {
m map[Elem]struct{}
}
// Of returns a new set containing the listed elements.
func Of[Elem comparable](v ...Elem) Set[Elem] {
m := map[Elem]struct{}{}
for _, e := range v {
m[e] = struct{}{}
}
return Set[Elem]{m}
}
// Add adds elements to a set.
func (s *Set[Elem]) Add(v ...Elem) {
for _, e := range v {
s.m[e] = struct{}{}
}
}
// AddSet adds the elements of set s2 to s.
func (s *Set[Elem]) AddSet(s2 Set[Elem]) {
for k := range s2.m {
s.m[k] = struct{}{}
}
}
// Remove removes elements from a set.
// Elements that are not present are ignored.
func (s *Set[Elem]) Remove(v ...Elem) {
for k := range s.m {
for i := range v {
if k == v[i] {
delete(s.m, v[i])
}
}
}
}
// RemoveSet removes the elements of set s2 from s.
// Elements present in s2 but not s are ignored.
func (s *Set[Elem]) RemoveSet(s2 Set[Elem]) {
for k1 := range s.m {
for k2 := range s2.m {
if k1 == k2 {
delete(s.m, k1)
}
}
}
}
// Has reports whether v is in the set.
func (s *Set[Elem]) Has(v Elem) bool {
_, ok := s.m[v]
return ok
}
// HasAny reports whether any of the elements in s2 are in s.
func (s *Set[Elem]) HasAny(s2 Set[Elem]) bool {
for k2 := range s2.m {
if _, ok := s.m[k2]; ok {
return true
}
}
return false
}
// HasAll reports whether all of the elements in s2 are in s.
func (s *Set[Elem]) HasAll(s2 Set[Elem]) bool {
for k2 := range s2.m {
if _, ok := s.m[k2]; !ok {
return false
}
}
return true
}
// Values returns the elements in the set s as a slice.
// The values will be in an indeterminate order.
func (s *Set[Elem]) Values() []Elem {
sli := make([]Elem, len(s.m))
for k := range s.m {
sli = append(sli, k)
}
return sli
}
// Equal reports whether s and s2 contain the same elements.
func (s *Set[Elem]) Equal(s2 Set[Elem]) bool {
if len(s.m) != len(s2.m) {
return false
}
for k := range s.m {
if _, ok := s2.m[k]; !ok {
return false
}
}
return true
}
// Clear removes all elements from s, leaving it empty.
func (s *Set[Elem]) Clear() {
s.m = map[Elem]struct{}{}
}
// Clone returns a copy of s.
// The elements are copied using assignment,
// so this is a shallow clone.
func (s *Set[Elem]) Clone() Set[Elem] {
m := map[Elem]struct{}{}
for k := range s.m {
m[k] = struct{}{}
}
return Set[Elem]{m}
}
// Filter deletes any elements from s for which keep returns false.
func (s *Set[Elem]) Filter(keep func(Elem) bool) {
for k := range s.m {
if !keep(k) {
delete(s.m, k)
}
}
}
// Len returns the number of elements in s.
func (s *Set[Elem]) Len() int {
return len(s.m)
}
// Do calls f on every element in the set s,
// stopping if f returns false.
// f should not change s.
// f will be called on values in an indeterminate order.
func (s *Set[Elem]) Do(f func(Elem) bool) {
for k := range s.m {
if !f(k) {
return
}
}
}
// Union constructs a new set containing the union of s1 and s2.
func Union[Elem comparable](s1, s2 Set[Elem]) Set[Elem] {
m := map[Elem]struct{}{}
for k := range s1.m {
m[k] = struct{}{}
}
for k := range s2.m {
m[k] = struct{}{}
}
return Set[Elem]{m}
}
// Intersection constructs a new set containing the intersection of s1 and s2.
func Intersection[Elem comparable](s1, s2 Set[Elem]) Set[Elem] {
m := map[Elem]struct{}{}
for k := range s1.m {
if _, ok := s2.m[k]; ok {
m[k] = struct{}{}
}
}
return Set[Elem]{m}
}
// Difference constructs a new set containing the elements of s1 that
// are not present in s2.
func Difference[Elem comparable](s1, s2 Set[Elem]) Set[Elem] {
m := map[Elem]struct{}{}
for k := range s1.m {
if _, ok := s2.m[k]; !ok {
m[k] = struct{}{}
}
}
return Set[Elem]{m}
} | container/set/set.go | 0.805747 | 0.508483 | set.go | starcoder |
package boxlayout
import (
"github.com/jesseduffield/generics/slices"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/samber/lo"
)
type Dimensions struct {
X0 int
X1 int
Y0 int
Y1 int
}
type Direction int
const (
ROW Direction = iota
COLUMN
)
// to give a high-level explanation of what's going on here. We layout our windows by arranging a bunch of boxes in the available space.
// If a box has children, it needs to specify how it wants to arrange those children: ROW or COLUMN.
// If a box represents a window, you can put the window name in the Window field.
// When determining how to divvy-up the available height (for row children) or width (for column children), we first
// give the boxes with a static `size` the space that they want. Then we apportion
// the remaining space based on the weights of the dynamic boxes (you can't define
// both size and weight at the same time: you gotta pick one). If there are two
// boxes, one with weight 1 and the other with weight 2, the first one gets 33%
// of the available space and the second one gets the remaining 66%
type Box struct {
// Direction decides how the children boxes are laid out. ROW means the children will each form a row i.e. that they will be stacked on top of eachother.
Direction Direction
// function which takes the width and height assigned to the box and decides which orientation it will have
ConditionalDirection func(width int, height int) Direction
Children []*Box
// function which takes the width and height assigned to the box and decides the layout of the children.
ConditionalChildren func(width int, height int) []*Box
// Window refers to the name of the window this box represents, if there is one
Window string
// static Size. If parent box's direction is ROW this refers to height, otherwise width
Size int
// dynamic size. Once all statically sized children have been considered, Weight decides how much of the remaining space will be taken up by the box
// TODO: consider making there be one int and a type enum so we can't have size and Weight simultaneously defined
Weight int
}
func ArrangeWindows(root *Box, x0, y0, width, height int) map[string]Dimensions {
children := root.getChildren(width, height)
if len(children) == 0 {
// leaf node
if root.Window != "" {
dimensionsForWindow := Dimensions{X0: x0, Y0: y0, X1: x0 + width - 1, Y1: y0 + height - 1}
return map[string]Dimensions{root.Window: dimensionsForWindow}
}
return map[string]Dimensions{}
}
direction := root.getDirection(width, height)
var availableSize int
if direction == COLUMN {
availableSize = width
} else {
availableSize = height
}
sizes := calcSizes(children, availableSize)
result := map[string]Dimensions{}
offset := 0
for i, child := range children {
boxSize := sizes[i]
var resultForChild map[string]Dimensions
if direction == COLUMN {
resultForChild = ArrangeWindows(child, x0+offset, y0, boxSize, height)
} else {
resultForChild = ArrangeWindows(child, x0, y0+offset, width, boxSize)
}
result = mergeDimensionMaps(result, resultForChild)
offset += boxSize
}
return result
}
func calcSizes(boxes []*Box, availableSpace int) []int {
normalizedWeights := normalizeWeights(slices.Map(boxes, func(box *Box) int { return box.Weight }))
totalWeight := 0
reservedSpace := 0
for i, box := range boxes {
if box.isStatic() {
reservedSpace += box.Size
} else {
totalWeight += normalizedWeights[i]
}
}
dynamicSpace := utils.Max(0, availableSpace-reservedSpace)
unitSize := 0
extraSpace := 0
if totalWeight > 0 {
unitSize = dynamicSpace / totalWeight
extraSpace = dynamicSpace % totalWeight
}
result := make([]int, len(boxes))
for i, box := range boxes {
if box.isStatic() {
// assuming that only one static child can have a size greater than the
// available space. In that case we just crop the size to what's available
result[i] = utils.Min(availableSpace, box.Size)
} else {
result[i] = unitSize * normalizedWeights[i]
}
}
// distribute the remainder across dynamic boxes.
for extraSpace > 0 {
for i, weight := range normalizedWeights {
if weight > 0 {
result[i]++
extraSpace--
normalizedWeights[i]--
if extraSpace == 0 {
break
}
}
}
}
return result
}
// removes common multiple from weights e.g. if we get 2, 4, 4 we return 1, 2, 2.
func normalizeWeights(weights []int) []int {
if len(weights) == 0 {
return []int{}
}
// to spare us some computation we'll exit early if any of our weights is 1
if slices.Some(weights, func(weight int) bool { return weight == 1 }) {
return weights
}
// map weights to factorSlices and find the lowest common factor
positiveWeights := slices.Filter(weights, func(weight int) bool { return weight > 0 })
factorSlices := slices.Map(positiveWeights, func(weight int) []int { return calcFactors(weight) })
commonFactors := factorSlices[0]
for _, factors := range factorSlices {
commonFactors = lo.Intersect(commonFactors, factors)
}
if len(commonFactors) == 0 {
return weights
}
newWeights := slices.Map(weights, func(weight int) int { return weight / commonFactors[0] })
return normalizeWeights(newWeights)
}
func calcFactors(n int) []int {
factors := []int{}
for i := 2; i <= n; i++ {
if n%i == 0 {
factors = append(factors, i)
}
}
return factors
}
func (b *Box) isStatic() bool {
return b.Size > 0
}
func (b *Box) getDirection(width int, height int) Direction {
if b.ConditionalDirection != nil {
return b.ConditionalDirection(width, height)
}
return b.Direction
}
func (b *Box) getChildren(width int, height int) []*Box {
if b.ConditionalChildren != nil {
return b.ConditionalChildren(width, height)
}
return b.Children
}
func mergeDimensionMaps(a map[string]Dimensions, b map[string]Dimensions) map[string]Dimensions {
result := map[string]Dimensions{}
for _, dimensionMap := range []map[string]Dimensions{a, b} {
for k, v := range dimensionMap {
result[k] = v
}
}
return result
} | pkg/gui/boxlayout/boxlayout.go | 0.561455 | 0.558207 | boxlayout.go | starcoder |
package palette
import (
"fmt"
"image/color"
)
// Rainbow creates a palette of colors in rainbow order. It accepts a step
// parameter which determines the distance between each color shift. The smaller
// the step, the more individual colors will result in the palette. The smallest
// possible step is 7, which results in 223 evenly spaced colors. With larger
// steps, there will be less colors, and the colors will be less evenly spaced.
// nolint: funlen
func Rainbow(step int) (color.Palette, error) {
p := color.Palette{}
if step < 7 {
return p, fmt.Errorf(
"step must be greater than 6, got: %d. Palette cannot hold more than 256 colors",
step,
)
}
if step > 255 {
return p, fmt.Errorf(
"step must be less than 256, got: %d. 255 is the greatest color increment possible",
step,
)
}
// Start at Red
rgba := color.RGBA{255, 0, 0, 255}
p = append(p, rgba)
// Red -> Yellow
for rgba.G < 255 {
nextValue := int(rgba.G) + step
// NOTE: an alternative approach would be to keep track of the overflow to
// make the overall distribution of color more even.
if nextValue > 255 {
nextValue = 255
}
rgba.G = uint8(nextValue)
p = append(p, rgba)
}
// Yellow -> Green
for rgba.R > 0 {
nextValue := int(rgba.R) - step
if nextValue < 0 {
nextValue = 0
}
rgba.R = uint8(nextValue)
p = append(p, rgba)
}
// Green -> Cyan
for rgba.B < 255 {
nextValue := int(rgba.B) + step
if nextValue > 255 {
nextValue = 255
}
rgba.B = uint8(nextValue)
p = append(p, rgba)
}
// Cyan -> Blue
for rgba.G > 0 {
nextValue := int(rgba.G) - step
if nextValue < 0 {
nextValue = 0
}
rgba.G = uint8(nextValue)
p = append(p, rgba)
}
// Blue -> Magenta
for rgba.R < 255 {
nextValue := int(rgba.R) + step
if nextValue > 255 {
nextValue = 255
}
rgba.R = uint8(nextValue)
p = append(p, rgba)
}
// Magenta -> Red
for rgba.B > 0 {
nextValue := int(rgba.B) - step
if nextValue < 0 {
nextValue = 0
}
rgba.B = uint8(nextValue)
p = append(p, rgba)
}
return p, nil
} | internal/palette/rainbow.go | 0.744563 | 0.458712 | rainbow.go | starcoder |
package nem12
import (
"fmt"
"strings"
)
const (
// QualityUndefined is for undefined quality flags.
QualityUndefined Quality = iota
// QualityActual is the quality flag value for actual data.
QualityActual
// QualityEstimated is the quality flag value for forward estimated data.
QualityEstimated
// QualityFinal is the quality flag value for final substituted data.
QualityFinal
// QualityNull is the quality flag value for null data.
QualityNull
// QualitySubstituted is the quality flag value for substituted data.
QualitySubstituted
// QualityVariable is the quality flag value for variable data.
QualityVariable
)
var (
// qualities lists all qualities.
qualities = []Quality{ //nolint:gochecknoglobals
QualityActual,
QualityEstimated,
QualityFinal,
QualityNull,
QualitySubstituted,
QualityVariable,
}
// QualityName maps a Quality to its name.
QualityName = map[Quality]string{ //nolint:gochecknoglobals
QualityActual: "A",
QualityEstimated: "E",
QualityFinal: "F",
QualityNull: "N",
QualitySubstituted: "S",
QualityVariable: "V",
}
// QualityValue maps a name to its value.
QualityValue = map[string]Quality{ //nolint:gochecknoglobals
"A": QualityActual,
"E": QualityEstimated,
"F": QualityFinal,
"N": QualityNull,
"S": QualitySubstituted,
"V": QualityVariable,
}
// qualityDescriptions provides the descriptions for the quality flags.
qualityDescriptions = map[Quality]string{ //nolint:gochecknoglobals
QualityActual: "actual data",
QualityEstimated: "forward estimated data",
QualityFinal: "final estimated data",
QualityNull: "null data",
QualitySubstituted: "substituted data",
QualityVariable: "variable data",
}
)
// Quality represents the value of the quality flag part of the QualityMethod field
// of an NEM12 interval.
type Quality int
// Qualities returns a slice of all the qualities.
func Qualities() []Quality {
return qualities
}
// NewQualityFlag returns a new quality flag if valid, and an error if not.
func NewQualityFlag(s string) (Quality, error) {
if s == "" {
return QualityUndefined, ErrQualityNil
}
q, ok := QualityValue[strings.ToUpper(s)]
if !ok {
return q, ErrQualityInvalid
}
return q, nil
}
// Validate returns an error if the quality flag is invalid.
func (q Quality) Validate() (err error) {
switch q {
case QualityActual, QualityEstimated, QualityFinal, QualityNull, QualitySubstituted, QualityVariable:
err = nil
case QualityUndefined:
err = ErrQualityInvalid
default:
err = ErrQualityInvalid
}
return err
}
// Identifier to meet the interface specification for a Flag.
func (q Quality) Identifier() string {
id, ok := QualityName[q]
if !ok {
return fmt.Sprintf("Method(%d)", q)
}
return id
}
// GoString returns a text representation of the Quality to satisfy the GoStringer
// interface.
func (q Quality) GoString() string {
return fmt.Sprintf("Quality(%d)", q)
}
// String returns a text representation of the Quality.
func (q Quality) String() string {
s, err := q.Description()
if err != nil {
return fmt.Sprintf("%q", q.Identifier())
}
return fmt.Sprintf("\"%s: %s\"", q.Identifier(), s)
}
// Description returns the description of a quality flag. Error is returned if the
// flag is invalid.
func (q Quality) Description() (string, error) {
d, ok := qualityDescriptions[q]
if !ok {
return "", ErrQualityInvalid
}
return d, nil
}
// MarshalJSON marshals for JSON.
func (q *Quality) MarshalJSON() ([]byte, error) {
id, ok := QualityName[*q]
if !ok {
return []byte(fmt.Sprintf("\"%d\"", *q)), nil
}
return []byte(fmt.Sprintf("%q", id)), nil
}
// UnmarshalJSON unmarshals json string.
func (q *Quality) UnmarshalJSON(data []byte) error {
v, ok := QualityValue[string(data)]
if !ok {
return ErrSuffixTypeInvalid
}
*q = v
return nil
}
// MustNotHaveReason indicates if a quality flag must not have a reason.
func (q Quality) MustNotHaveReason() bool {
return q == QualityVariable
}
// RequiresMethod indicates if a quality flag requires an accompanying method.
func (q Quality) RequiresMethod() (b bool) {
switch q {
case QualityEstimated, QualityFinal, QualitySubstituted:
b = true
case QualityActual, QualityNull, QualityVariable, QualityUndefined:
b = false
default:
b = false
}
return b
}
// RequiresReason indicates if a quality flag requires a reason.
func (q Quality) RequiresReason() (b bool) {
switch q {
case QualityFinal, QualitySubstituted:
b = true
case QualityActual, QualityNull, QualityEstimated, QualityVariable, QualityUndefined:
b = false
default:
b = false
}
return b
} | nem12/quality.go | 0.68721 | 0.455259 | quality.go | starcoder |
package tranquil
import (
"fmt"
"reflect"
"time"
)
// Assertion provides functions for performing assertions when testing.
type Assertion struct {
Test *Test
Value interface{}
}
// NewAssertion creates and returns a new Assertion instance.
func NewAssertion(t *Test, value interface{}) *Assertion {
return &Assertion{t, value}
}
// ToEqual asserts that the assertion value is equal to the given value.
func (as *Assertion) ToEqual(value interface{}) {
if !as.areEqual(as.Value, value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to equal `%v`", as.Value, value))
}
}
// ToBe asserts that the assertion value is equal to the given value (equivalent to ToEqual function).
func (as *Assertion) ToBe(value interface{}) {
as.ToEqual(value)
}
// ToNotEqual asserts that the assertion value is not equal to the given value.
func (as *Assertion) ToNotEqual(value interface{}) {
if as.areEqual(as.Value, value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to not equal `%v`", as.Value, value))
}
}
// ToNotBe asserts that the assertion value is not equal to the given value (equivalent to ToNotEqual function).
func (as *Assertion) ToNotBe(value interface{}) {
as.ToNotEqual(value)
}
// ToBeTrue asserts that the assertion value is true.
func (as *Assertion) ToBeTrue() {
as.ToBe(true)
}
// ToBeFalse asserts that the assertion value is false.
func (as *Assertion) ToBeFalse() {
as.ToBe(false)
}
// ToBeTheSame asserts that the assertion value references the same object as the given value.
func (as *Assertion) ToBeTheSame(value interface{}) {
if !as.areTheSame(as.Value, value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to be the same instance as `%v`", as.Value, value))
}
}
// ToNotBeTheSame asserts that the assertion value references a different object then the given value.
func (as *Assertion) ToNotBeTheSame(value interface{}) {
if as.areTheSame(as.Value, value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to not be the same instance as `%v`", as.Value, value))
}
}
// ToBeNil asserts that the assertion value is nil.
func (as *Assertion) ToBeNil() {
if !as.isNil(as.Value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to not exist", as.Value))
}
}
// ToNotBeNil asserts that the assertion value is not nil.
func (as *Assertion) ToNotBeNil() {
if as.isNil(as.Value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to exist", as.Value))
}
}
// ToBeEmpty asserts that the assertion value is empty. An 'empty' value includes:
// nil, "", false, 0, an map/slice/chan with length 0, a zero time value
func (as *Assertion) ToBeEmpty() {
if !as.isEmpty(as.Value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to be empty", as.Value))
}
}
// ToNotBeEmpty asserts that the assertion value is not empty. An 'empty' value includes:
// nil, "", false, 0, an map/slice/chan with length 0, a zero time value
func (as *Assertion) ToNotBeEmpty() {
if as.isEmpty(as.Value) {
as.Test.PrintError(fmt.Sprintf("Expected `%v` to not be empty", as.Value))
}
}
// ToPanic asserts that the assertion value is a function that when executed 'throws' a panic.
func (as *Assertion) ToPanic() {
var panicObj interface{}
func() {
defer func() {
if r := recover(); r != nil {
panicObj = r
}
}()
as.Value.(func())()
}()
if panicObj == nil {
as.Test.PrintError("Expected panic")
}
}
// ToNotPanic asserts that the assertion value is a function that when executed does not 'throw' a panic.
func (as *Assertion) ToNotPanic() {
var panicObj interface{}
func() {
defer func() {
if r := recover(); r != nil {
panicObj = r
}
}()
as.Value.(func())()
}()
if panicObj != nil {
as.Test.PrintError("Expected not to panic")
}
}
func (*Assertion) isNil(obj interface{}) bool {
if obj == nil {
return true
}
value := reflect.ValueOf(obj)
kind := value.Kind()
if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
return true
}
return false
}
func (*Assertion) areEqual(act, exp interface{}) bool {
if act == nil || exp == nil {
return act == exp
}
if reflect.DeepEqual(act, exp) {
return true
}
actValue := reflect.ValueOf(act)
expValue := reflect.ValueOf(exp)
if actValue == expValue {
return true
}
t := expValue.Type()
if actValue.Type().ConvertibleTo(t) && actValue.Convert(t) == expValue {
return true
}
return false
}
func (as *Assertion) areTheSame(act, exp interface{}) bool {
actType := reflect.TypeOf(act)
expType := reflect.TypeOf(exp)
if actType != expType {
return false
}
return as.areEqual(act, exp)
}
func (as *Assertion) isEmpty(obj interface{}) bool {
if obj == nil {
return true
}
if obj == "" {
return true
}
if obj == false {
return true
}
if f, err := as.getFloat(obj); err == nil {
if f == float64(0) {
return true
}
}
v := reflect.ValueOf(obj)
switch v.Kind() {
case reflect.Map, reflect.Slice, reflect.Chan:
return v.Len() == 0
case reflect.Ptr:
switch obj.(type) {
case *time.Time:
return obj.(*time.Time).IsZero()
default:
return false
}
}
return false
}
func (as *Assertion) getFloat(obj interface{}) (float64, error) {
v := reflect.ValueOf(obj)
v = reflect.Indirect(v)
floatType := reflect.TypeOf(float64(0))
if !v.Type().ConvertibleTo(floatType) {
return 0, fmt.Errorf("cannot convert to float64")
}
fv := v.Convert(floatType)
return fv.Float(), nil
} | Godeps/_workspace/src/github.com/davelaursen/tranquil/assertion.go | 0.787319 | 0.662339 | assertion.go | starcoder |
package engine
import (
"errors"
"machassert/config"
"machassert/machine"
"sort"
)
// Executor stores state/configuration for applying assertions to targets.
type Executor struct {
machines *config.MachineSpec
assertions []*config.AssertionSpec
logger Logger
}
// New creates a new executor.
func New(machines *config.MachineSpec, assertions []*config.AssertionSpec) *Executor {
return &Executor{
machines: machines,
assertions: assertions,
logger: &ConsoleLogger{},
}
}
// Run applies the assertions in the executor to the machines it knows about.
func (e *Executor) Run() error {
for name, machine := range e.machines.Machine {
e.logger.LogMachineStatus(name, false, machine, nil)
m, err := connect(name, machine, e.logger)
e.logger.LogMachineStatus(name, true, machine, err)
if err != nil {
return err
}
for _, assertions := range e.assertions {
err = e.runAssertionOnMachine(m, assertions)
if err != nil {
m.Close()
return err
}
}
err = m.Close()
if err != nil {
return err
}
}
return nil
}
type assertionForSort struct {
name string
assertion *config.Assertion
}
func sortAssertions(assertions map[string]*config.Assertion) []string {
var out []assertionForSort
for k, v := range assertions {
out = append(out, assertionForSort{k, v})
}
bo := ByOrder(out)
sort.Sort(bo)
return bo.keys()
}
// ByOrder implements sort.Interface for []assertionsForSort based on
// the Order field.
type ByOrder []assertionForSort
func (a ByOrder) Len() int { return len(a) }
func (a ByOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByOrder) Less(i, j int) bool {
if a[i].assertion.Order == a[j].assertion.Order {
return a[i].name < a[j].name
}
return a[i].assertion.Order < a[j].assertion.Order
}
func (a ByOrder) keys() []string {
out := make([]string, len(a))
for i := range a {
out[i] = a[i].name
}
return out
}
func (e *Executor) runAssertionOnMachine(machine Machine, assertions *config.AssertionSpec) error {
for _, assertionName := range sortAssertions(assertions.Assertions) {
assertion := assertions.Assertions[assertionName]
e.logger.LogAssertionStatus(assertions.Name, assertionName, assertion, nil, nil)
result, err := applyAssertion(machine, assertion, e, assertions.Name+"."+assertionName)
e.logger.LogAssertionStatus(assertions.Name, assertionName, assertion, result, err)
if err != nil {
return err
}
}
return nil
}
func connect(name string, m *config.Machine, l Logger) (Machine, error) {
switch m.Kind {
case config.KindLocal:
return machine.ConnectLocal(name, m)
case config.KindSSH:
return machine.ConnectRemote(name, m, l)
}
return nil, errors.New("Could not interpret machine kind")
} | src/machassert/engine/executor.go | 0.620852 | 0.454956 | executor.go | starcoder |
package heap
// pi returns parent's index.
func pi(i int) int {
return (i - 1) >> 1
}
// li returns left child's index.
func li(i int) int {
return i<<1 + 1
}
// ri returns right child's index.
func ri(i int) int {
return i<<1 + 2
}
type MaxHeap struct {
Elems []int
HeapSize int
}
// Size returns heap's size (count of elements).
func (mp MaxHeap) Size() int {
return mp.HeapSize
}
// IsEmpty returns true if current heap is empty, otherwise false.
func (mp MaxHeap) IsEmpty() bool {
return mp.Size() == 0
}
// IsMaxHeap determines if given slice is a max-heap.
func IsMaxHeap(elems []int, size int) bool {
if size > len(elems) {
size = len(elems)
}
// 2i+1 < size and 2i+2 < size -> i < (size-1) >> 1
mid := (size - 2) >> 1
var i = 0
for i = 0; i < mid; i++ {
if l, r := li(i), ri(i); elems[l] > elems[i] || elems[r] > elems[i] {
return false
}
}
if l := li(i); l < size && elems[l] > elems[i] {
return false
}
if r := ri(i); r < size && elems[r] > elems[i] {
return false
}
return true
}
func NewMaxHeap() *MaxHeap {
return &MaxHeap{
Elems: []int{},
HeapSize: 0,
}
}
func BuildMaxHeap(elems []int) MaxHeap {
m := MaxHeap{
Elems: elems,
HeapSize: len(elems),
}
mid := m.Size() / 2
for i := mid; i >= 0; i-- {
m.MaxHeapify(i)
}
return m
}
func (mp *MaxHeap) Insert(elem int) {
// Add the element to the bottom level of the heap at the leftmost open space.
mp.Elems = append(mp.Elems[:mp.HeapSize], elem)
mp.HeapSize += 1
mp.upHeap(mp.HeapSize - 1) // last element's index
}
func (mp *MaxHeap) upHeap(i int) {
// one element, returns directly
if i < 1 {
return
}
p := pi(i)
// Compare the added element with its parent; if they are in the correct order, stop.
if p < mp.Size() && mp.Elems[p] > mp.Elems[i] {
return
}
// If not, swap the element with its parent and return to the previous step.
mp.Elems[p], mp.Elems[i] = mp.Elems[i], mp.Elems[p]
mp.upHeap(p)
}
func (mp *MaxHeap) Extract() int {
// Replace the root of the heap with the last element on the last level.
mp.Elems[0], mp.Elems[mp.HeapSize-1] = mp.Elems[mp.HeapSize-1], mp.Elems[0]
// before max heapify, decrease the heap size
val := mp.Elems[mp.HeapSize-1]
mp.HeapSize -= 1
mp.Elems = append(mp.Elems[:mp.HeapSize])
mp.MaxHeapify(0)
return val
}
func (mp *MaxHeap) MaxHeapify(i int) {
l, r, largest := li(i), ri(i), i
if l < mp.Size() && mp.Elems[l] > mp.Elems[i] {
largest = l
}
if r < mp.Size() && mp.Elems[r] > mp.Elems[largest] {
largest = r
}
if largest != i {
mp.Elems[i], mp.Elems[largest] = mp.Elems[largest], mp.Elems[i]
// If not, swap the element with one of its children and return to the previous step.
// (Swap with its larger child in a max-heap.)
mp.MaxHeapify(largest)
} // Compare the new root with its children; if they are in the correct order, stop.
}
func (mp *MaxHeap) downHeap(i int) { // alias of maxHeap
mp.MaxHeapify(i)
} | dsa/heap/max_heap.go | 0.762689 | 0.402392 | max_heap.go | starcoder |
package v1
import (
"github.com/turbinelabs/cli/command"
"github.com/turbinelabs/codec"
tbnflag "github.com/turbinelabs/nonstdlib/flag"
"github.com/turbinelabs/nonstdlib/flag/usage"
"github.com/turbinelabs/rotor"
)
const (
envoyV1FileDescription = `Watches the given JSON or YAML Envoy
configuration file and updates Clusters stored in the Turbine Labs API at
startup and whenever the file changes.
Uses the provided file to discover the configuration for the SDS cluster that
will be used to resolve any cluster that is defined as 'sds'.
`
envoyV1RestDescription = `Connects to a running Envoy CDS server and
updates clusters stored in the Turbine Labs API at startup and periodically
thereafer.
The {{ul "clusters-nodes"}} argument defines the path(s) used to call the CDS API
(details below). If not provided, the wildcard ` + cdsPathRoot + ` is used.
The {{ul "sds-addr"}} is used to collect hosts for each cluster defined with an
'sds' type. If no sds clusters exist, {{ul "sds-addr"}} is ignored. If no
{{ul "sds-addr"}} is provided, any cluster with 'sds' type will be ignored.
{{bold "clusters-nodes"}}
A comma-delimited, unbounded list of (1) service cluster and service node
pairs, (2) service clusters or (3) any combination of (1) and (2). Format is:
cluster1:node1,cluster2,cluster1:node2,cluster3
These correspond to the local cluster(s) that the Envoys we're collecting for
are running in, as well as their corresponding node names (i.e. the
"--service-cluster" and "--service-node" arguments used at Envoy startup). For
cases where only <service_cluster>s are provided, it's assumed that the CDS
being called responds to "GET ` + cdsPathRoot + `/<service_cluster>" with all
clusters within that <service_cluster>. If no {{ul "clusters-nodes"}} argument is
passed, "GET ` + cdsPathRoot + `" will be called and should return all
clusters in a Zone.
`
)
// RESTCmd configures the parameters needed for running rotor against a V1
// envoy CDS server
func RESTCmd(updaterFlags rotor.UpdaterFromFlags) *command.Cmd {
cmd := &command.Cmd{
Name: "exp-envoy-cds-v1",
Summary: "envoy CDS v1 collector [EXPERIMENTAL]",
Usage: "[OPTIONS]",
Description: envoyV1RestDescription,
}
flags := tbnflag.Wrap(&cmd.Flags)
runner := &restRunner{clustersNodes: tbnflag.NewStrings()}
runner.updaterFlags = updaterFlags
flags.HostPortVar(
&runner.addr,
"addr",
tbnflag.HostPort{},
usage.Required("The address ('host:port') of a running CDS server."),
)
flags.HostPortVar(
&runner.sdsAddr,
"sds-addr",
tbnflag.HostPort{},
"The address ('host:port') of a running SDS server.",
)
flags.Var(
&runner.clustersNodes,
"clusters-nodes",
"A comma-delimited list of cluster/node pairs or clusters with which to call the"+
" CDS V1 API. Of the form: \"<cluster>[:<node>],...\".",
)
cmd.Runner = runner
return cmd
}
// FileCmd configures the parameters needed for running rotor against a V1
// envoy CDS defined in a JSON or YAML file.
func FileCmd(updaterFlags rotor.UpdaterFromFlags) *command.Cmd {
cmd := &command.Cmd{
Name: "exp-envoy-cds-v1-file",
Summary: "envoy CDS v1 file collector [EXPERIMENTAL]",
Usage: "[OPTIONS] file",
Description: envoyV1FileDescription,
}
cmd.Runner = &fileRunner{
codecFlags: codec.NewFromFlags(tbnflag.Wrap(&cmd.Flags)),
updaterFlags: updaterFlags,
}
return cmd
} | plugins/envoy/v1/cmd.go | 0.620047 | 0.477981 | cmd.go | starcoder |
package models
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// SynchronizationProgress
type SynchronizationProgress struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The numerator of a progress ratio; the number of units of changes already processed.
completedUnits *int64
// The time of a progress observation as an offset in minutes from UTC.
progressObservationDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// The denominator of a progress ratio; a number of units of changes to be processed to accomplish synchronization.
totalUnits *int64
// An optional description of the units.
units *string
}
// NewSynchronizationProgress instantiates a new synchronizationProgress and sets the default values.
func NewSynchronizationProgress()(*SynchronizationProgress) {
m := &SynchronizationProgress{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateSynchronizationProgressFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateSynchronizationProgressFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewSynchronizationProgress(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SynchronizationProgress) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCompletedUnits gets the completedUnits property value. The numerator of a progress ratio; the number of units of changes already processed.
func (m *SynchronizationProgress) GetCompletedUnits()(*int64) {
if m == nil {
return nil
} else {
return m.completedUnits
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *SynchronizationProgress) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["completedUnits"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt64Value()
if err != nil {
return err
}
if val != nil {
m.SetCompletedUnits(val)
}
return nil
}
res["progressObservationDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetProgressObservationDateTime(val)
}
return nil
}
res["totalUnits"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt64Value()
if err != nil {
return err
}
if val != nil {
m.SetTotalUnits(val)
}
return nil
}
res["units"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetUnits(val)
}
return nil
}
return res
}
// GetProgressObservationDateTime gets the progressObservationDateTime property value. The time of a progress observation as an offset in minutes from UTC.
func (m *SynchronizationProgress) GetProgressObservationDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.progressObservationDateTime
}
}
// GetTotalUnits gets the totalUnits property value. The denominator of a progress ratio; a number of units of changes to be processed to accomplish synchronization.
func (m *SynchronizationProgress) GetTotalUnits()(*int64) {
if m == nil {
return nil
} else {
return m.totalUnits
}
}
// GetUnits gets the units property value. An optional description of the units.
func (m *SynchronizationProgress) GetUnits()(*string) {
if m == nil {
return nil
} else {
return m.units
}
}
// Serialize serializes information the current object
func (m *SynchronizationProgress) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteInt64Value("completedUnits", m.GetCompletedUnits())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("progressObservationDateTime", m.GetProgressObservationDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteInt64Value("totalUnits", m.GetTotalUnits())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("units", m.GetUnits())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SynchronizationProgress) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCompletedUnits sets the completedUnits property value. The numerator of a progress ratio; the number of units of changes already processed.
func (m *SynchronizationProgress) SetCompletedUnits(value *int64)() {
if m != nil {
m.completedUnits = value
}
}
// SetProgressObservationDateTime sets the progressObservationDateTime property value. The time of a progress observation as an offset in minutes from UTC.
func (m *SynchronizationProgress) SetProgressObservationDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.progressObservationDateTime = value
}
}
// SetTotalUnits sets the totalUnits property value. The denominator of a progress ratio; a number of units of changes to be processed to accomplish synchronization.
func (m *SynchronizationProgress) SetTotalUnits(value *int64)() {
if m != nil {
m.totalUnits = value
}
}
// SetUnits sets the units property value. An optional description of the units.
func (m *SynchronizationProgress) SetUnits(value *string)() {
if m != nil {
m.units = value
}
} | models/synchronization_progress.go | 0.872782 | 0.407392 | synchronization_progress.go | starcoder |
package isogram
import (
"regexp"
"strings"
"unicode"
)
var regexPattern, _ = regexp.Compile("[^a-zA-Z]+")
// IsIsogramV1 receive a word and return true if word is a isogram and false if it is not
func IsIsogramV1(word string) (isIsogram bool) {
cleanedWord := regexPattern.ReplaceAllString(word, "")
cleanedWord = strings.ToLower(cleanedWord)
for firstIndex, firstRune := range cleanedWord {
for secondIndex, secondRune := range cleanedWord {
if firstRune == secondRune && firstIndex != secondIndex {
return false
}
}
}
return true
}
// IsIsogramV2 receive a word and return true if word is a isogram and false if it is not
func IsIsogramV2(word string) (isIsogram bool) {
cleanedWord := strings.ToLower(word)
cleanedWord = strings.ReplaceAll(cleanedWord, "-", "")
cleanedWord = strings.ReplaceAll(cleanedWord, " ", "")
for _, rune := range cleanedWord {
count := strings.Count(cleanedWord, string(rune))
if count > 1 {
return false
}
}
return true
}
// IsIsogramV3 receive a word and return true if word is a isogram and false if it is not
func IsIsogramV3(word string) (isIsogram bool) {
for _, rune := range word {
if unicode.IsLetter(rune) == false {
continue
}
lowerRune := unicode.ToLower(rune)
upperRune := unicode.ToUpper(rune)
count := strings.Count(word, string(lowerRune))
count += strings.Count(word, string(upperRune))
if count > 1 {
return false
}
}
return true
}
// IsIsogram receive a word and return true if word is a isogram and false if it is not
func IsIsogram(word string) bool {
if word == "" {
return true
}
var seen = map[rune]bool{}
for _, r := range word {
if unicode.IsLetter(r) == false {
continue
}
lowerRune := unicode.ToLower(r)
if seen[lowerRune] == true {
return false
}
seen[lowerRune] = true
}
return true
}
// IsIsogramFastest is an fastest version from isogram function
// Reference: https://exercism.io/tracks/go/exercises/isogram/solutions/2e0f7084226541b5af92d9895d1a9fa3
func IsIsogramFastest(s string) bool {
s = strings.ToLower(s)
for i, c := range s {
if unicode.IsLetter(c) && strings.ContainsRune(s[i+1:], c) {
return false
}
}
return true
} | exercism/go/isogram/isogram.go | 0.586404 | 0.413714 | isogram.go | starcoder |
package executetest
import (
"fmt"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/values"
)
// Table is an implementation of execute.Table
// It is designed to make it easy to statically declare the data within the table.
// Not all fields need to be set. See comments on each field.
// Use Normalize to ensure that all fields are set before equality comparisons.
type Table struct {
// GroupKey of the table. Does not need to be set explicitly.
GroupKey flux.GroupKey
// KeyCols is a list of column that are part of the group key.
// The column type is deduced from the ColMeta slice.
KeyCols []string
// KeyValues is a list of values for the group key columns.
// Only needs to be set when no data is present on the table.
KeyValues []interface{}
// ColMeta is a list of columns of the table.
ColMeta []flux.ColMeta
// Data is a list of rows, i.e. Data[row][col]
// Each row must be a list with length equal to len(ColMeta)
Data [][]interface{}
}
// Normalize ensures all fields of the table are set correctly.
func (t *Table) Normalize() {
if t.GroupKey == nil {
cols := make([]flux.ColMeta, len(t.KeyCols))
vs := make([]values.Value, len(t.KeyCols))
if len(t.KeyValues) != len(t.KeyCols) {
t.KeyValues = make([]interface{}, len(t.KeyCols))
}
for j, label := range t.KeyCols {
idx := execute.ColIdx(label, t.ColMeta)
if idx < 0 {
panic(fmt.Errorf("table invalid: missing group column %q", label))
}
cols[j] = t.ColMeta[idx]
if len(t.Data) > 0 {
t.KeyValues[j] = t.Data[0][idx]
}
v := values.New(t.KeyValues[j])
if v.Type() == semantic.Invalid {
panic(fmt.Errorf("invalid value: %s", t.KeyValues[j]))
}
vs[j] = v
}
t.GroupKey = execute.NewGroupKey(cols, vs)
}
}
func (t *Table) Empty() bool {
return len(t.Data) == 0
}
func (t *Table) RefCount(n int) {}
func (t *Table) Cols() []flux.ColMeta {
return t.ColMeta
}
func (t *Table) Key() flux.GroupKey {
t.Normalize()
return t.GroupKey
}
func (t *Table) Do(f func(flux.ColReader) error) error {
for _, r := range t.Data {
if err := f(ColReader{
key: t.Key(),
cols: t.ColMeta,
row: r,
}); err != nil {
return err
}
}
return nil
}
type ColReader struct {
key flux.GroupKey
cols []flux.ColMeta
row []interface{}
}
func (cr ColReader) Cols() []flux.ColMeta {
return cr.cols
}
func (cr ColReader) Key() flux.GroupKey {
return cr.key
}
func (cr ColReader) Len() int {
return 1
}
func (cr ColReader) Bools(j int) []bool {
return []bool{cr.row[j].(bool)}
}
func (cr ColReader) Ints(j int) []int64 {
return []int64{cr.row[j].(int64)}
}
func (cr ColReader) UInts(j int) []uint64 {
return []uint64{cr.row[j].(uint64)}
}
func (cr ColReader) Floats(j int) []float64 {
return []float64{cr.row[j].(float64)}
}
func (cr ColReader) Strings(j int) []string {
return []string{cr.row[j].(string)}
}
func (cr ColReader) Times(j int) []execute.Time {
return []execute.Time{cr.row[j].(execute.Time)}
}
func TablesFromCache(c execute.DataCache) (tables []*Table, err error) {
c.ForEach(func(key flux.GroupKey) {
if err != nil {
return
}
var tbl flux.Table
tbl, err = c.Table(key)
if err != nil {
return
}
var cb *Table
cb, err = ConvertTable(tbl)
if err != nil {
return
}
tables = append(tables, cb)
c.ExpireTable(key)
})
return tables, nil
}
func ConvertTable(tbl flux.Table) (*Table, error) {
key := tbl.Key()
blk := &Table{
GroupKey: key,
ColMeta: tbl.Cols(),
}
keyCols := key.Cols()
if len(keyCols) > 0 {
blk.KeyCols = make([]string, len(keyCols))
blk.KeyValues = make([]interface{}, len(keyCols))
for j, c := range keyCols {
blk.KeyCols[j] = c.Label
var v interface{}
switch c.Type {
case flux.TBool:
v = key.ValueBool(j)
case flux.TUInt:
v = key.ValueUInt(j)
case flux.TInt:
v = key.ValueInt(j)
case flux.TFloat:
v = key.ValueFloat(j)
case flux.TString:
v = key.ValueString(j)
case flux.TTime:
v = key.ValueTime(j)
default:
return nil, fmt.Errorf("unsupported column type %v", c.Type)
}
blk.KeyValues[j] = v
}
}
err := tbl.Do(func(cr flux.ColReader) error {
l := cr.Len()
for i := 0; i < l; i++ {
row := make([]interface{}, len(blk.ColMeta))
for j, c := range blk.ColMeta {
var v interface{}
switch c.Type {
case flux.TBool:
v = cr.Bools(j)[i]
case flux.TInt:
v = cr.Ints(j)[i]
case flux.TUInt:
v = cr.UInts(j)[i]
case flux.TFloat:
v = cr.Floats(j)[i]
case flux.TString:
v = cr.Strings(j)[i]
case flux.TTime:
v = cr.Times(j)[i]
default:
panic(fmt.Errorf("unknown column type %s", c.Type))
}
row[j] = v
}
blk.Data = append(blk.Data, row)
}
return nil
})
if err != nil {
return nil, err
}
return blk, nil
}
type SortedTables []*Table
func (b SortedTables) Len() int {
return len(b)
}
func (b SortedTables) Less(i int, j int) bool {
return b[i].Key().Less(b[j].Key())
}
func (b SortedTables) Swap(i int, j int) {
b[i], b[j] = b[j], b[i]
}
// NormalizeTables ensures that each table is normalized
func NormalizeTables(bs []*Table) {
for _, b := range bs {
b.Key()
}
}
func MustCopyTable(tbl flux.Table) flux.Table {
cpy, _ := execute.CopyTable(tbl, UnlimitedAllocator)
return cpy
} | execute/executetest/table.go | 0.69181 | 0.462898 | table.go | starcoder |
package simplefunctionrules
import "fmt"
// DoSomeThing example to a simple function
func DoSomeThing() {
fmt.Println("Hello World! print in a simple function")
}
// PrintUserName print the user name string that pass in argument
func PrintUserName(userName string) {
fmt.Println("Hello " + userName)
}
// As discussed earlier, a function may take input values for its execution.
// These input values are provided in a function call, called arguments.
// One or multiple arguments can also be passed to a function.
// PrintAdd sum and print two integers
func PrintAdd(a int, b int) {
c := a + b
fmt.Println(c)
}
// A function can also return a value that can be printed or assigned to another variable.
// You can use shorthand parameter notation in case multiple parameters in succession are of the same data type.
// In case a function returns a value, you must specify the data type of a return value just after
// the function parameter parentheses.
// Add return sum of two integers
func Add(a, b int) int {
return a + b
}
// Unlike other programming languages, Go can return multiple values from the function.
// In this case, we must specify return types of the values (just like above) inside parentheses
// just after the function parameter parentheses.
// AddAndMult return sum and mult of two integers
func AddAndMult(a, b int) (int, int) {
return a + b, a * b
}
// Named return values are a great way to explicitly mention return variables in the function definition itself.
// These variables will be created automatically and made available inside the function body.
// You can change the values of these variables inside a function.
// A return statement at the end of the function is necessary to return named values.
// Go will automatically return these variables when the function hits the return statement.
// DivAndMult return sum and mult of two integers
func DivAndMult(a, b int) (div, mul int) {
div = a / b
mul = a * b
return
}
// A function is called recursive when it calls itself from inside the body.
// A simple syntax for the recursive function is
// func r() {
// r()
// }
// GetFactorial return the number factorial
func GetFactorial(num int) int {
// n! = n*(n-1)! where n>0
if num > 1 {
return num * GetFactorial(num-1)
}
return 1 // 1! == 1
}
// SayDone print I am done
func SayDone() {
fmt.Println("I am done and I am deferred")
}
// Subtract return the two numbers substract
func Subtract(a int, b int) int {
return a - b
}
// Calc do calc function that pass in third argument
func Calc(a int, b int, f func(int, int) int) int {
r := f(a, b)
return r
}
type calcFunc func(int, int) int
// CalcPlus do calc function that pass in third argument
func CalcPlus(a int, b int, f calcFunc) int {
r := f(a, b) // calling add(a,b) or substract(a,b)
return r
}
// A function in Go can also be a value.
// This means you can assign a function to a variable.
// SayYeah print yeah yeah yeah
var SayYeah = func() {
fmt.Println("Yeah Yeah Yeah")
} | example-functions/simplefunctionrules/simplefunctionrules.go | 0.625667 | 0.485234 | simplefunctionrules.go | starcoder |
package leveldb
import (
"encoding/binary"
)
const batchHeaderLen = 12
// Batch is a sequence of Sets and/or Deletes that are applied atomically.
type Batch struct {
// Data is the wire format of a batch's log entry:
// - 8 bytes for a sequence number of the first batch element,
// or zeroes if the batch has not yet been applied,
// - 4 bytes for the count: the number of elements in the batch,
// or "\xff\xff\xff\xff" if the batch is invalid,
// - count elements, being:
// - one byte for the kind: delete (0) or set (1),
// - the varint-string user key,
// - the varint-string value (if kind == set).
// The sequence number and count are stored in little-endian order.
data []byte
}
// Set adds an action to the batch that sets the key to map to the value.
func (b *Batch) Set(key, value []byte) {
if len(b.data) == 0 {
b.init(len(key) + len(value) + 2*binary.MaxVarintLen64 + batchHeaderLen)
}
if b.increment() {
b.data = append(b.data, byte(internalKeyKindSet))
b.appendStr(key)
b.appendStr(value)
}
}
// Delete adds an action to the batch that deletes the entry for key.
func (b *Batch) Delete(key []byte) {
if len(b.data) == 0 {
b.init(len(key) + binary.MaxVarintLen64 + batchHeaderLen)
}
if b.increment() {
b.data = append(b.data, byte(internalKeyKindDelete))
b.appendStr(key)
}
}
func (b *Batch) init(cap int) {
n := 256
for n < cap {
n *= 2
}
b.data = make([]byte, batchHeaderLen, n)
}
// seqNumData returns the 8 byte little-endian sequence number. Zero means that
// the batch has not yet been applied.
func (b *Batch) seqNumData() []byte {
return b.data[:8]
}
// countData returns the 4 byte little-endian count data. "\xff\xff\xff\xff"
// means that the batch is invalid.
func (b *Batch) countData() []byte {
return b.data[8:12]
}
func (b *Batch) increment() (ok bool) {
p := b.countData()
for i := range p {
p[i]++
if p[i] != 0x00 {
return true
}
}
// The countData was "\xff\xff\xff\xff". Leave it as it was.
p[0] = 0xff
p[1] = 0xff
p[2] = 0xff
p[3] = 0xff
return false
}
func (b *Batch) appendStr(s []byte) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], uint64(len(s)))
b.data = append(b.data, buf[:n]...)
b.data = append(b.data, s...)
}
func (b *Batch) seqNum() uint64 {
return binary.LittleEndian.Uint64(b.seqNumData())
}
func (b *Batch) count() uint32 {
return binary.LittleEndian.Uint32(b.countData())
}
func (b *Batch) iter() batchIter {
return b.data[batchHeaderLen:]
}
type batchIter []byte
// next returns the next operation in this batch.
// The final return value is false if the batch is corrupt.
func (t *batchIter) next() (kind internalKeyKind, key []byte, value []byte, ok bool) {
p := *t
if len(p) == 0 {
return 0, nil, nil, false
}
kind, *t = internalKeyKind(p[0]), p[1:]
if kind > internalKeyKindMax {
return 0, nil, nil, false
}
key, ok = t.nextStr()
if !ok {
return 0, nil, nil, false
}
if kind != internalKeyKindDelete {
value, ok = t.nextStr()
if !ok {
return 0, nil, nil, false
}
}
return kind, key, value, true
}
func (t *batchIter) nextStr() (s []byte, ok bool) {
p := *t
u, numBytes := binary.Uvarint(p)
if numBytes <= 0 {
return nil, false
}
p = p[numBytes:]
if u > uint64(len(p)) {
return nil, false
}
s, *t = p[:u], p[u:]
return s, true
} | leveldb/batch.go | 0.649134 | 0.479869 | batch.go | starcoder |
package parser
import (
"fmt"
"github.com/bazo-blockchain/lazo/lexer/token"
"github.com/bazo-blockchain/lazo/parser/node"
)
// Expressions
// -------------------------
func (p *Parser) parseExpression() node.ExpressionNode {
return p.parseTernaryExpression()
}
func (p *Parser) parseTernaryExpression() node.ExpressionNode {
expr := p.parseOr()
if p.isSymbol(token.QuestionMark) {
p.nextToken()
ternary := &node.TernaryExpressionNode{
AbstractNode: p.newAbstractNodeWithPos(expr.Pos()),
Condition: expr,
Then: p.parseOr(),
}
p.check(token.Colon)
ternary.Else = p.parseOr()
return ternary
}
return expr
}
func (p *Parser) parseOr() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseAnd()
for p.isAnySymbol(token.Or) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseAnd(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseAnd() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseBitwiseOr()
for p.isAnySymbol(token.And) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseBitwiseOr(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseBitwiseOr() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseBitwiseXOr()
for p.isAnySymbol(token.BitwiseOr) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseBitwiseXOr(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseBitwiseXOr() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseBitwiseAnd()
for p.isAnySymbol(token.BitwiseXOr) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseBitwiseAnd(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseBitwiseAnd() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseEquality()
for p.isAnySymbol(token.BitwiseAnd) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseEquality(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseEquality() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseRelationalComparison()
for p.isAnySymbol(token.Equal, token.Unequal) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseRelationalComparison(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseRelationalComparison() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseBitwiseShift()
for p.isAnySymbol(token.Less, token.LessEqual, token.GreaterEqual, token.Greater) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseBitwiseShift(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseBitwiseShift() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseTerm()
for p.isAnySymbol(token.ShiftLeft, token.ShiftRight) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseTerm(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseTerm() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseFactor()
for p.isAnySymbol(token.Plus, token.Minus) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseFactor(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseFactor() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseExponent()
for p.isAnySymbol(token.Multiplication, token.Division, token.Modulo) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseExponent(),
}
leftExpr = binExpr
}
return leftExpr
}
func (p *Parser) parseExponent() node.ExpressionNode {
abstractNode := p.newAbstractNode()
leftExpr := p.parseExpressionRest()
if p.isSymbol(token.Exponent) {
binExpr := &node.BinaryExpressionNode{
AbstractNode: abstractNode,
Left: leftExpr,
Operator: p.readSymbol(),
Right: p.parseExponent(), // recursive because of right-to-left associativity
}
return binExpr
}
return leftExpr
}
func (p *Parser) parseExpressionRest() node.ExpressionNode {
if p.isAnySymbol(token.Plus, token.Minus, token.Not, token.BitwiseNot) {
return p.parseUnaryExpression()
}
if p.isSymbol(token.OpenParen) {
abstractNode := p.newAbstractNode()
p.nextToken()
expr := p.parseExpression()
p.check(token.CloseParen)
switch p.currentToken.Type() {
case token.IDENTIFER, token.CHARACTER, token.INTEGER:
// (String) x.y.z, (String) 'c', (String) 5
return p.parseTypeCast(abstractNode, expr)
case token.SYMBOL:
// (String) true
if p.isAnySymbol(token.True, token.False) {
return p.parseTypeCast(abstractNode, expr)
}
}
return expr
}
return p.parseOperand()
}
func (p *Parser) parseUnaryExpression() *node.UnaryExpressionNode {
return &node.UnaryExpressionNode{
AbstractNode: p.newAbstractNode(),
Operator: p.readSymbol(),
Expression: p.parseFactor(),
}
}
func (p *Parser) parseTypeCast(abstractNode node.AbstractNode, expr node.ExpressionNode) *node.TypeCastNode {
typeCast := &node.TypeCastNode{
AbstractNode: abstractNode,
Expression: p.parseOperand(),
}
if basicDesignator, ok := expr.(*node.BasicDesignatorNode); ok {
typeCast.Type = &node.BasicTypeNode{
AbstractNode: p.newAbstractNodeWithPos(expr.Pos()),
Identifier: basicDesignator.Value,
}
} else {
p.addError(fmt.Sprintf("Invalid type %s", expr))
}
return typeCast
}
func (p *Parser) parseOperand() node.ExpressionNode {
switch p.currentToken.Type() {
case token.IDENTIFER:
designator := p.parseDesignator()
if p.isSymbol(token.OpenParen) {
return p.parseFuncCall(designator)
}
return designator
case token.INTEGER:
return p.parseInteger()
case token.CHARACTER:
return p.parseCharacter()
case token.STRING:
return p.parseString()
case token.SYMBOL:
return p.parseOperandSymbol()
}
var error string
if tok, ok := p.currentToken.(*token.ErrorToken); ok {
error = tok.Msg
} else {
panic("Unsupported token type: " + p.currentToken.Literal())
}
return p.newErrorNode(error)
}
func (p *Parser) parseOperandSymbol() node.ExpressionNode {
tok, ok := p.currentToken.(*token.FixToken)
if !ok {
panic("Invalid operation")
}
switch tok.Value {
case token.True, token.False:
return p.parseBoolean(tok)
case token.New:
return p.parseCreation()
default:
return p.newErrorNode("Unsupported expression symbol " + p.currentToken.Literal())
}
}
func (p *Parser) parseDesignator() node.DesignatorNode {
return p.parseDesignatorWithIdentifier(p.newAbstractNode(), p.readIdentifier())
}
func (p *Parser) parseDesignatorWithIdentifier(abstractNode node.AbstractNode, identifier string) node.DesignatorNode {
var left node.DesignatorNode = &node.BasicDesignatorNode{
AbstractNode: abstractNode,
Value: identifier,
}
for p.isSymbol(token.Period) || p.isSymbol(token.OpenBracket) {
if p.isSymbol(token.Period) {
p.nextToken()
memberIdentifier := p.readIdentifier()
left = &node.MemberAccessNode{
AbstractNode: abstractNode,
Designator: left,
Identifier: memberIdentifier,
}
} else {
p.check(token.OpenBracket)
exp := p.parseExpression()
p.check(token.CloseBracket)
left = &node.ElementAccessNode{
AbstractNode: abstractNode,
Designator: left,
Expression: exp,
}
}
}
return left
}
func (p *Parser) parseFuncCall(designator node.DesignatorNode) *node.FuncCallNode {
funcCall := &node.FuncCallNode{
AbstractNode: p.newAbstractNodeWithPos(designator.Pos()),
Designator: designator,
}
p.check(token.OpenParen)
isFirstArg := true
for !p.isEnd() && !p.isSymbol(token.CloseParen) {
if !isFirstArg {
p.check(token.Comma)
}
funcCall.Args = append(funcCall.Args, p.parseExpression())
isFirstArg = false
}
p.check(token.CloseParen)
return funcCall
}
func (p *Parser) parseCreation() node.ExpressionNode {
abstractNode := p.newAbstractNode()
p.nextToken() // skip 'new' keyword
identifier := p.readIdentifier()
if p.isSymbol(token.OpenParen) {
return p.parseStructCreation(abstractNode, identifier)
} else if p.isSymbol(token.OpenBracket) {
return p.parseArrayCreation(abstractNode, identifier)
}
return p.newErrorNode(fmt.Sprintf("Unsupported creation type with %s", p.currentToken.Literal()))
}
func (p *Parser) parseArrayCreation(abstractNode node.AbstractNode, identifier string) node.ExpressionNode {
var arrayType node.TypeNode = &node.BasicTypeNode{
AbstractNode: abstractNode,
Identifier: identifier,
}
// Initialization using values: new int[][]{{1, 2}, {3, 4}}
if p.peekIsSymbol(token.CloseBracket) {
arrayType = p.parseArrayType(arrayType)
return &node.ArrayValueCreationNode{
AbstractNode: abstractNode,
Type: arrayType,
Elements: p.parseArrayInitialization(),
}
}
// Initialization using Length: new int[2][3]
p.check(token.OpenBracket)
var expressions []node.ExpressionNode
expression := p.parseExpression() // Read length expression
expressions = append(expressions, expression)
p.check(token.CloseBracket)
for !p.isEnd() && p.isSymbol(token.OpenBracket) {
p.nextToken()
expressions = append(expressions, p.parseExpression())
p.check(token.CloseBracket)
arrayType = &node.ArrayTypeNode{
AbstractNode: p.newAbstractNodeWithPos(arrayType.Pos()),
ElementType: arrayType,
}
}
return &node.ArrayLengthCreationNode{
AbstractNode: abstractNode,
ElementType: arrayType,
Lengths: expressions,
}
}
func (p *Parser) parseArrayInitialization() *node.ArrayInitializationNode {
abstractNode := p.newAbstractNode()
p.checkAndSkipNewLines(token.OpenBrace) // skip '{'
if !p.isEnd() && !p.isSymbol(token.OpenBrace) {
var expressions []node.ExpressionNode
expressions = append(expressions, p.parseExpression())
for !p.isEnd() && !p.isSymbol(token.CloseBrace) {
p.checkAndSkipNewLines(token.Comma)
expressions = append(expressions, p.parseExpression())
}
p.check(token.CloseBrace)
return &node.ArrayInitializationNode{
AbstractNode: abstractNode,
Values: expressions,
}
}
arrayInitialization := &node.ArrayInitializationNode{
AbstractNode: abstractNode,
}
if !p.isEnd() {
var expressions []node.ExpressionNode
expressions = append(expressions, p.parseArrayInitialization())
for !p.isEnd() && !p.isSymbol(token.CloseBrace) {
p.checkAndSkipNewLines(token.Comma)
expressions = append(expressions, p.parseArrayInitialization())
}
p.check(token.CloseBrace)
arrayInitialization.Values = expressions
} else {
p.addError("Invalid array initialization")
}
return arrayInitialization
}
func (p *Parser) parseStructCreation(abstractNode node.AbstractNode, identifier string) node.ExpressionNode {
p.nextTokenWhileNewLine() // skip '('
if ftok, ok := p.peekToken.(*token.FixToken); ok && ftok.Value == token.Assign {
return p.parseStructNamedCreation(abstractNode, identifier)
}
structCreation := &node.StructCreationNode{
AbstractNode: abstractNode,
Name: identifier,
}
isFirstArg := true
for !p.isEnd() && !p.isSymbol(token.CloseParen) {
if !isFirstArg {
p.check(token.Comma)
}
structCreation.FieldValues = append(structCreation.FieldValues, p.parseExpression())
isFirstArg = false
}
p.check(token.CloseParen)
return structCreation
}
func (p *Parser) parseStructNamedCreation(abstractNode node.AbstractNode, identifier string) *node.StructNamedCreationNode {
structCreation := &node.StructNamedCreationNode{
AbstractNode: abstractNode,
Name: identifier,
}
isFirstArg := true
for !p.isEnd() && !p.isSymbol(token.CloseParen) {
if !isFirstArg {
p.checkAndSkipNewLines(token.Comma)
}
field := &node.StructFieldAssignmentNode{
AbstractNode: p.newAbstractNode(),
Name: p.readIdentifier(),
}
p.check(token.Assign)
field.Expression = p.parseExpression()
structCreation.FieldValues = append(structCreation.FieldValues, field)
p.skipNewLines()
isFirstArg = false
}
p.check(token.CloseParen)
return structCreation
}
func (p *Parser) parseInteger() *node.IntegerLiteralNode {
tok, _ := p.currentToken.(*token.IntegerToken)
i := &node.IntegerLiteralNode{
AbstractNode: p.newAbstractNode(),
Value: tok.Value,
}
p.nextToken()
return i
}
func (p *Parser) parseCharacter() *node.CharacterLiteralNode {
tok, _ := p.currentToken.(*token.CharacterToken)
c := &node.CharacterLiteralNode{
AbstractNode: p.newAbstractNode(),
Value: tok.Value,
}
p.nextToken()
return c
}
func (p *Parser) parseString() *node.StringLiteralNode {
tok, _ := p.currentToken.(*token.StringToken)
s := &node.StringLiteralNode{
AbstractNode: p.newAbstractNode(),
Value: tok.Literal(),
}
p.nextToken()
return s
}
func (p *Parser) parseBoolean(tok *token.FixToken) node.ExpressionNode {
if value, ok := token.BooleanConstants[tok.Value]; ok {
b := &node.BoolLiteralNode{
AbstractNode: p.newAbstractNode(),
Value: value,
}
p.nextToken()
return b
}
return p.newErrorNode("Invalid boolean value " + tok.Literal())
} | parser/parser_expression.go | 0.655336 | 0.457016 | parser_expression.go | starcoder |
package classic
import (
"math"
"sort"
"github.com/grafana/grafana/pkg/expr/mathexp"
)
func nilOrNaN(f *float64) bool {
return f == nil || math.IsNaN(*f)
}
func (cr classicReducer) ValidReduceFunc() bool {
switch cr {
case "avg", "sum", "min", "max", "count", "last", "median":
return true
case "diff", "diff_abs", "percent_diff", "percent_diff_abs", "count_not_null":
return true
}
return false
}
//nolint: gocyclo
func (cr classicReducer) Reduce(series mathexp.Series) mathexp.Number {
num := mathexp.NewNumber("", nil)
num.SetValue(nil)
if series.Len() == 0 {
return num
}
value := float64(0)
allNull := true
vF := series.Frame.Fields[series.ValueIdx]
ff := mathexp.Float64Field(*vF)
switch cr {
case "avg":
validPointsCount := 0
for i := 0; i < ff.Len(); i++ {
f := ff.GetValue(i)
if nilOrNaN(f) {
continue
}
value += *f
validPointsCount++
allNull = false
}
if validPointsCount > 0 {
value /= float64(validPointsCount)
}
case "sum":
for i := 0; i < ff.Len(); i++ {
f := ff.GetValue(i)
if nilOrNaN(f) {
continue
}
value += *f
allNull = false
}
case "min":
value = math.MaxFloat64
for i := 0; i < ff.Len(); i++ {
f := ff.GetValue(i)
if nilOrNaN(f) {
continue
}
allNull = false
if value > *f {
value = *f
}
}
if allNull {
value = 0
}
case "max":
value = -math.MaxFloat64
for i := 0; i < ff.Len(); i++ {
f := ff.GetValue(i)
if nilOrNaN(f) {
continue
}
allNull = false
if value < *f {
value = *f
}
}
if allNull {
value = 0
}
case "count":
value = float64(ff.Len())
allNull = false
case "last":
for i := ff.Len() - 1; i >= 0; i-- {
f := ff.GetValue(i)
if !nilOrNaN(f) {
value = *f
allNull = false
break
}
}
case "median":
var values []float64
for i := 0; i < ff.Len(); i++ {
f := ff.GetValue(i)
if nilOrNaN(f) {
continue
}
allNull = false
values = append(values, *f)
}
if len(values) >= 1 {
sort.Float64s(values)
length := len(values)
if length%2 == 1 {
value = values[(length-1)/2]
} else {
value = (values[(length/2)-1] + values[length/2]) / 2
}
}
case "diff":
allNull, value = calculateDiff(ff, allNull, value, diff)
case "diff_abs":
allNull, value = calculateDiff(ff, allNull, value, diffAbs)
case "percent_diff":
allNull, value = calculateDiff(ff, allNull, value, percentDiff)
case "percent_diff_abs":
allNull, value = calculateDiff(ff, allNull, value, percentDiffAbs)
case "count_non_null":
for i := 0; i < ff.Len(); i++ {
f := ff.GetValue(i)
if nilOrNaN(f) {
continue
}
value++
}
if value > 0 {
allNull = false
}
}
if allNull {
return num
}
num.SetValue(&value)
return num
}
func calculateDiff(ff mathexp.Float64Field, allNull bool, value float64, fn func(float64, float64) float64) (bool, float64) {
var (
first float64
i int
)
// get the newest point
for i = ff.Len() - 1; i >= 0; i-- {
f := ff.GetValue(i)
if !nilOrNaN(f) {
first = *f
allNull = false
break
}
}
if i >= 1 {
// get the oldest point
for i := 0; i < ff.Len(); i++ {
f := ff.GetValue(i)
if !nilOrNaN(f) {
value = fn(first, *f)
allNull = false
break
}
}
}
return allNull, value
}
var diff = func(newest, oldest float64) float64 {
return newest - oldest
}
var diffAbs = func(newest, oldest float64) float64 {
return math.Abs(newest - oldest)
}
var percentDiff = func(newest, oldest float64) float64 {
return (newest - oldest) / math.Abs(oldest) * 100
}
var percentDiffAbs = func(newest, oldest float64) float64 {
return math.Abs((newest - oldest) / oldest * 100)
} | pkg/expr/classic/reduce.go | 0.541651 | 0.425367 | reduce.go | starcoder |
package labels
import (
"fmt"
"k8s.io/kubernetes/pkg/api/unversioned"
)
// Clones the given map and returns a new map with the given key and value added.
// Returns the given map, if labelKey is empty.
func CloneAndAddLabel(labels map[string]string, labelKey string, labelValue uint32) map[string]string {
if labelKey == "" {
// Don't need to add a label.
return labels
}
// Clone.
newLabels := map[string]string{}
for key, value := range labels {
newLabels[key] = value
}
newLabels[labelKey] = fmt.Sprintf("%d", labelValue)
return newLabels
}
// CloneAndRemoveLabel clones the given map and returns a new map with the given key removed.
// Returns the given map, if labelKey is empty.
func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]string {
if labelKey == "" {
// Don't need to add a label.
return labels
}
// Clone.
newLabels := map[string]string{}
for key, value := range labels {
newLabels[key] = value
}
delete(newLabels, labelKey)
return newLabels
}
// AddLabel returns a map with the given key and value added to the given map.
func AddLabel(labels map[string]string, labelKey string, labelValue string) map[string]string {
if labelKey == "" {
// Don't need to add a label.
return labels
}
if labels == nil {
labels = make(map[string]string)
}
labels[labelKey] = labelValue
return labels
}
// Clones the given selector and returns a new selector with the given key and value added.
// Returns the given selector, if labelKey is empty.
func CloneSelectorAndAddLabel(selector *unversioned.LabelSelector, labelKey string, labelValue uint32) *unversioned.LabelSelector {
if labelKey == "" {
// Don't need to add a label.
return selector
}
// Clone.
newSelector := new(unversioned.LabelSelector)
// TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here.
newSelector.MatchLabels = make(map[string]string)
if selector.MatchLabels != nil {
for key, val := range selector.MatchLabels {
newSelector.MatchLabels[key] = val
}
}
newSelector.MatchLabels[labelKey] = fmt.Sprintf("%d", labelValue)
if selector.MatchExpressions != nil {
newMExps := make([]unversioned.LabelSelectorRequirement, len(selector.MatchExpressions))
for i, me := range selector.MatchExpressions {
newMExps[i].Key = me.Key
newMExps[i].Operator = me.Operator
if me.Values != nil {
newMExps[i].Values = make([]string, len(me.Values))
copy(newMExps[i].Values, me.Values)
} else {
newMExps[i].Values = nil
}
}
newSelector.MatchExpressions = newMExps
} else {
newSelector.MatchExpressions = nil
}
return newSelector
}
// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels.
func AddLabelToSelector(selector *unversioned.LabelSelector, labelKey string, labelValue string) *unversioned.LabelSelector {
if labelKey == "" {
// Don't need to add a label.
return selector
}
if selector.MatchLabels == nil {
selector.MatchLabels = make(map[string]string)
}
selector.MatchLabels[labelKey] = labelValue
return selector
}
// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels
func SelectorHasLabel(selector *unversioned.LabelSelector, labelKey string) bool {
return len(selector.MatchLabels[labelKey]) > 0
} | vendor/k8s.io/kubernetes/pkg/util/labels/labels.go | 0.71123 | 0.499451 | labels.go | starcoder |
package expression
import (
"fmt"
"strconv"
"strings"
"time"
"gopkg.in/src-d/go-errors.v1"
"github.com/dolthub/go-mysql-server/sql"
)
// ErrConvertExpression is returned when a conversion is not possible.
var ErrConvertExpression = errors.NewKind("expression '%v': couldn't convert to %v")
const (
// ConvertToBinary is a conversion to binary.
ConvertToBinary = "binary"
// ConvertToChar is a conversion to char.
ConvertToChar = "char"
// ConvertToNChar is a conversion to nchar.
ConvertToNChar = "nchar"
// ConvertToDate is a conversion to date.
ConvertToDate = "date"
// ConvertToDatetime is a conversion to datetime.
ConvertToDatetime = "datetime"
// ConvertToDecimal is a conversion to decimal.
ConvertToDecimal = "decimal"
// ConvertToDouble is a conversion to double.
ConvertToDouble = "double"
// ConvertToJSON is a conversion to json.
ConvertToJSON = "json"
// ConvertToReal is a conversion to double.
ConvertToReal = "real"
// ConvertToSigned is a conversion to signed.
ConvertToSigned = "signed"
// ConvertToTime is a conversion to time.
ConvertToTime = "time"
// ConvertToUnsigned is a conversion to unsigned.
ConvertToUnsigned = "unsigned"
)
// Convert represent a CAST(x AS T) or CONVERT(x, T) operation that casts x expression to type T.
type Convert struct {
UnaryExpression
// Type to cast
castToType string
}
// NewConvert creates a new Convert expression.
func NewConvert(expr sql.Expression, castToType string) *Convert {
return &Convert{
UnaryExpression: UnaryExpression{Child: expr},
castToType: strings.ToLower(castToType),
}
}
// IsNullable implements the Expression interface.
func (c *Convert) IsNullable() bool {
switch c.castToType {
case ConvertToDate, ConvertToDatetime:
return true
default:
return c.Child.IsNullable()
}
}
// Type implements the Expression interface.
func (c *Convert) Type() sql.Type {
switch c.castToType {
case ConvertToBinary:
return sql.LongBlob
case ConvertToChar, ConvertToNChar:
return sql.LongText
case ConvertToDate:
return sql.Date
case ConvertToDatetime:
return sql.Datetime
case ConvertToDecimal:
//TODO: these values are completely arbitrary, we need to get the given precision/scale and store it
return sql.MustCreateDecimalType(65, 10)
case ConvertToDouble, ConvertToReal:
return sql.Float64
case ConvertToJSON:
return sql.JSON
case ConvertToSigned:
return sql.Int64
case ConvertToTime:
return sql.Time
case ConvertToUnsigned:
return sql.Uint64
default:
return sql.Null
}
}
// Name implements the Expression interface.
func (c *Convert) String() string {
return fmt.Sprintf("convert(%v, %v)", c.Child, c.castToType)
}
// WithChildren implements the Expression interface.
func (c *Convert) WithChildren(ctx *sql.Context, children ...sql.Expression) (sql.Expression, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(c, len(children), 1)
}
return NewConvert(children[0], c.castToType), nil
}
// Eval implements the Expression interface.
func (c *Convert) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
val, err := c.Child.Eval(ctx, row)
if err != nil {
return nil, err
}
if val == nil {
return nil, nil
}
casted, err := convertValue(val, c.castToType)
if err != nil {
return nil, ErrConvertExpression.Wrap(err, c.String(), c.castToType)
}
return casted, nil
}
// convertValue only returns an error if converting to JSON, and returns the zero value for float types.
// Nil is returned in all other cases.
func convertValue(val interface{}, castTo string) (interface{}, error) {
switch strings.ToLower(castTo) {
case ConvertToBinary:
b, err := sql.LongBlob.Convert(val)
if err != nil {
return nil, nil
}
return b, nil
case ConvertToChar, ConvertToNChar:
s, err := sql.LongText.Convert(val)
if err != nil {
return nil, nil
}
return s, nil
case ConvertToDate:
_, isTime := val.(time.Time)
_, isString := val.(string)
if !(isTime || isString) {
return nil, nil
}
d, err := sql.Date.Convert(val)
if err != nil {
return nil, nil
}
return d, nil
case ConvertToDatetime:
_, isTime := val.(time.Time)
_, isString := val.(string)
if !(isTime || isString) {
return nil, nil
}
d, err := sql.Datetime.Convert(val)
if err != nil {
return nil, nil
}
return d, nil
case ConvertToDecimal:
//TODO: these values are completely arbitrary, we need to get the given precision/scale and store it
typ := sql.MustCreateDecimalType(65, 10)
d, err := typ.Convert(val)
if err != nil {
return typ.Zero(), nil
}
return d, nil
case ConvertToDouble, ConvertToReal:
d, err := sql.Float64.Convert(val)
if err != nil {
return sql.Float64.Zero(), nil
}
return d, nil
case ConvertToJSON:
js, err := sql.JSON.Convert(val)
if err != nil {
return nil, err
}
return js, nil
case ConvertToSigned:
num, err := sql.Int64.Convert(val)
if err != nil {
return sql.Int64.Zero(), nil
}
return num, nil
case ConvertToTime:
t, err := sql.Time.Convert(val)
if err != nil {
return nil, nil
}
return t, nil
case ConvertToUnsigned:
num, err := sql.Uint64.Convert(val)
if err != nil {
num = handleUnsignedErrors(err, val)
}
return num, nil
default:
return nil, nil
}
}
func handleUnsignedErrors(err error, val interface{}) uint64 {
if err.Error() == "unable to cast negative value" {
return castSignedToUnsigned(val)
}
if strings.Contains(err.Error(), "strconv.ParseUint") {
signedNum, err := strconv.ParseInt(val.(string), 0, 64)
if err != nil {
return uint64(0)
}
return castSignedToUnsigned(signedNum)
}
return uint64(0)
}
func castSignedToUnsigned(val interface{}) uint64 {
var unsigned uint64
switch num := val.(type) {
case int:
unsigned = uint64(num)
case int8:
unsigned = uint64(num)
case int16:
unsigned = uint64(num)
case int32:
unsigned = uint64(num)
case int64:
unsigned = uint64(num)
}
return unsigned
} | sql/expression/convert.go | 0.595728 | 0.441854 | convert.go | starcoder |
package blocks
import (
"fmt"
"retroio/spectrum/tap"
"retroio/spectrum/tzx/blocks/types"
"retroio/storage"
)
// Select
// ID: 28h (40d)
// This block is useful when the tape consists of two or more separately-loadable parts. With this
// block, you are able to select one of the parts and the utility/emulator will start loading from
// that block. For example you can use it when the game has a separate Trainer or when it is a
// multi-load. Of course, to make some use of it the emulator/utility has to show a menu with the
// selections when it encounters such a block. All offsets are relative signed words.
type Select struct {
BlockID types.BlockType
Length uint16 // Length of the whole block (without these two bytes)
Count uint8 // Number of selections
Selections []Selection // List of selections
}
type Selection struct {
RelativeOffset int16 // Relative Offset as `signed` value
Length uint8 // Length of description text
Description []uint8 // Description text (please use single line and max. 30 chars)
}
// Read the tape and extract the data.
// It is expected that the tape pointer is at the correct position for reading.
func (s *Select) Read(reader *storage.Reader) error {
s.BlockID = types.BlockType(reader.ReadByte())
if s.BlockID != s.Id() {
return fmt.Errorf("expected block ID 0x%02x, got 0x%02x", s.Id(), s.BlockID)
}
s.Length = reader.ReadShort()
s.Count = reader.ReadByte()
for i := 0; i < int(s.Count); i++ {
var selection Selection
selection.RelativeOffset = int16(reader.ReadShort())
selection.Length = reader.ReadByte()
for _, b := range reader.ReadBytes(int(selection.Length)) {
selection.Description = append(selection.Description, b)
}
s.Selections = append(s.Selections, selection)
}
return nil
}
// Id of the block as given in the TZX specification, written as a hexadecimal number.
func (s Select) Id() types.BlockType {
return types.Select
}
// Name of the block as given in the TZX specification.
func (s Select) Name() string {
return "Select"
}
func (s Select) BlockData() tap.Block {
return nil
}
// String returns a human readable string of the block data
func (s Select) String() string {
str := fmt.Sprintf("%s\n", s.Name())
for _, b := range s.Selections {
str += fmt.Sprintf("- Offset: %d\n", b.RelativeOffset)
str += fmt.Sprintf(" Description: %s\n", b.Description)
}
return str
} | spectrum/tzx/blocks/select.go | 0.607547 | 0.439206 | select.go | starcoder |
package fp
func (a Int) Min(b Int) Int {
if a <= b {
return a
} else {
return b
}
}
func (a Int64) Min(b Int64) Int64 {
if a <= b {
return a
} else {
return b
}
}
func (a Byte) Min(b Byte) Byte {
if a <= b {
return a
} else {
return b
}
}
func (a Rune) Min(b Rune) Rune {
if a <= b {
return a
} else {
return b
}
}
func (a Float32) Min(b Float32) Float32 {
if a <= b {
return a
} else {
return b
}
}
func (a Float64) Min(b Float64) Float64 {
if a <= b {
return a
} else {
return b
}
}
func (a Int) Max(b Int) Int {
if a > b {
return a
} else {
return b
}
}
func (a Int64) Max(b Int64) Int64 {
if a > b {
return a
} else {
return b
}
}
func (a Byte) Max(b Byte) Byte {
if a > b {
return a
} else {
return b
}
}
func (a Rune) Max(b Rune) Rune {
if a > b {
return a
} else {
return b
}
}
func (a Float32) Max(b Float32) Float32 {
if a > b {
return a
} else {
return b
}
}
func (a Float64) Max(b Float64) Float64 {
if a > b {
return a
} else {
return b
}
}
func (n Int) To(t Int) IntList {
acc := NilIntList
for i := n.Underlined(); i <= t.Underlined(); i++ {
acc = acc.Cons(i)
}
return acc.Reverse()
}
func (n Byte) To(t Byte) ByteList {
acc := NilByteList
for i := n.Underlined(); i <= t.Underlined(); i++ {
acc = acc.Cons(i)
}
return acc.Reverse()
}
func (n Int) Until(t Int) IntList {
acc := NilIntList
for i := n.Underlined(); i < t.Underlined(); i++ {
acc = acc.Cons(i)
}
return acc.Reverse()
}
func (n Byte) Until(t Byte) ByteList {
acc := NilByteList
for i := n.Underlined(); i < t.Underlined(); i++ {
acc = acc.Cons(i)
}
return acc.Reverse()
}
func (a Int) IsBetween(left, right int) bool { return int(a) > left && int(a) < right }
func (a Int8) IsBetween(left, right int8) bool { return int8(a) > left && int8(a) < right }
func (a Int16) IsBetween(left, right int16) bool { return int16(a) > left && int16(a) < right }
func (a Int32) IsBetween(left, right int32) bool { return int32(a) > left && int32(a) < right }
func (a Int64) IsBetween(left, right int64) bool { return int64(a) > left && int64(a) < right }
func (a Uint) IsBetween(left, right uint) bool { return uint(a) > left && uint(a) < right }
func (a Uint8) IsBetween(left, right uint8) bool { return uint8(a) > left && uint8(a) < right }
func (a Uint16) IsBetween(left, right uint16) bool { return uint16(a) > left && uint16(a) < right }
func (a Uint32) IsBetween(left, right uint32) bool { return uint32(a) > left && uint32(a) < right }
func (a Uint64) IsBetween(left, right uint64) bool { return uint64(a) > left && uint64(a) < right }
func (a Uintptr) IsBetween(left, right uintptr) bool { return uintptr(a) > left && uintptr(a) < right }
func (a Byte) IsBetween(left, right byte) bool { return byte(a) > left && byte(a) < right }
func (a Rune) IsBetween(left, right rune) bool { return rune(a) > left && rune(a) < right }
func (a Float32) IsBetween(left, right float32) bool { return float32(a) > left && float32(a) < right }
func (a Float64) IsBetween(left, right float64) bool { return float64(a) > left && float64(a) < right }
func (a Int) IsBetweenInclusive(left, right int) bool { return int(a) >= left && int(a) <= right }
func (a Int8) IsBetweenInclusive(left, right int8) bool { return int8(a) >= left && int8(a) <= right }
func (a Int16) IsBetweenInclusive(left, right int16) bool {
return int16(a) >= left && int16(a) <= right
}
func (a Int32) IsBetweenInclusive(left, right int32) bool {
return int32(a) >= left && int32(a) <= right
}
func (a Int64) IsBetweenInclusive(left, right int64) bool {
return int64(a) >= left && int64(a) <= right
}
func (a Uint) IsBetweenInclusive(left, right uint) bool { return uint(a) >= left && uint(a) <= right }
func (a Uint8) IsBetweenInclusive(left, right uint8) bool {
return uint8(a) >= left && uint8(a) <= right
}
func (a Uint16) IsBetweenInclusive(left, right uint16) bool {
return uint16(a) >= left && uint16(a) <= right
}
func (a Uint32) IsBetweenInclusive(left, right uint32) bool {
return uint32(a) >= left && uint32(a) <= right
}
func (a Uint64) IsBetweenInclusive(left, right uint64) bool {
return uint64(a) >= left && uint64(a) <= right
}
func (a Uintptr) IsBetweenInclusive(left, right uintptr) bool {
return uintptr(a) >= left && uintptr(a) <= right
}
func (a Byte) IsBetweenInclusive(left, right byte) bool { return byte(a) >= left && byte(a) <= right }
func (a Rune) IsBetweenInclusive(left, right rune) bool { return rune(a) >= left && rune(a) <= right }
func (a Float32) IsBetweenInclusive(left, right float32) bool {
return float32(a) >= left && float32(a) <= right
}
func (a Float64) IsBetweenInclusive(left, right float64) bool {
return float64(a) >= left && float64(a) <= right
} | fp/bootstrap_base_numeric.go | 0.773986 | 0.566798 | bootstrap_base_numeric.go | starcoder |
package trafficpolicy
// EgressTrafficPolicy is the type used to represent the different egress traffic policy configurations
// applicable to a client of Egress destinations.
type EgressTrafficPolicy struct {
// TrafficMatches defines the list of traffic matches for matching Egress traffic.
// The matches specified are used to match outbound traffic as Egress traffic, and
// subject matching traffic to Egress traffic policies.
TrafficMatches []*TrafficMatch
// HTTPRouteConfigsPerPort defines the Egress HTTP route configurations per port.
// Egress HTTP routes are grouped based on their port to avoid route conflicts that
// can arise when the same host headers are to be routed differently based on the
// port specified in an egress policy.
HTTPRouteConfigsPerPort map[int][]*EgressHTTPRouteConfig
// ClustersConfigs defines the list of Egress cluster configurations.
// The specified config is used to program external clusters corresponding to
// the external endpoints defined in an Egress policy.
ClustersConfigs []*EgressClusterConfig
}
// TrafficMatch is the type used to represent attributes used to match Egress traffic
type TrafficMatch struct {
// DestinationPort defines the destination port number
DestinationPort int
// DestinationProtocol defines the protocol served by DestinationPort
DestinationProtocol string
// DestinationIPRanges defines the list of destination IP ranges
// +optional
DestinationIPRanges []string
// ServerNames defines the list of server names to be used as SNI when the
// DestinationProtocol is TLS based, ex. when the DestinationProtocol is 'https'
// +optional
ServerNames []string
// Cluster defines the cluster associated with this TrafficMatch, if possible.
// A TrafficMatch corresponding to an HTTP based cluster will not make use of
// this property since the cluster is determined based on the computed routes.
// A TraficMatch corresponding to a TCP based cluster will make use of this
// property to associate the match with the corresponding cluster.
// +optional
Cluster string
}
// EgressClusterConfig is the type used to represent an external cluster corresponding to a
// destination specified in an Egress policy.
type EgressClusterConfig struct {
// Name defines the name of the external cluster
Name string
// Host defines the DNS resolvabe hostname for the external cluster.
// If specified, the cluster's address will be resolved using DNS.
// HTTP based clusters will set the Host attribute.
// If unspecified, the cluster's address will be resolved to its original
// destination in the request prior to being redirected by iptables.
// TCP based clusters will not set the Host attribute.
// +optional
Host string
// Port defines the port number of the external cluster's endpoint
Port int
}
// EgressHTTPRouteConfig is the type used to represent an HTTP route configuration along with associated routing rules
type EgressHTTPRouteConfig struct {
// Name defines the name of the Egress HTTP route configuration
Name string
// Hostnames defines the list of hostnames corresponding to the Egress HTTP route configuration.
// The Hostnames match against the :host header in the HTTP request and subject matching requests
// to the routing rules defined by `RoutingRules`.
Hostnames []string
// RoutingRules defines the list of routes for the Egress HTTP route configuration, and corresponding
// rules to be applied to those routes.
RoutingRules []*EgressHTTPRoutingRule
}
// EgressHTTPRoutingRule is the type used to represent an Egress HTTP routing rule with its route and associated permissions
type EgressHTTPRoutingRule struct {
// Route defines the HTTP route match and its associated cluster.
Route RouteWeightedClusters
// AllowedDestinationIPRanges defines the destination IP ranges allowed for the `Route` defined in the routing rule.
AllowedDestinationIPRanges []string
} | pkg/trafficpolicy/egress_types.go | 0.697815 | 0.464537 | egress_types.go | starcoder |
package wchart
import (
"github.com/grokify/gocharts/data/statictimeseries"
"github.com/grokify/gotilla/time/month"
"github.com/grokify/gotilla/time/quarter"
"github.com/grokify/gotilla/time/timeutil"
"github.com/wcharczuk/go-chart"
)
func DataSeriesMapToContinuousSeriesMonths(dsm map[string]statictimeseries.DataSeries, order []string) []chart.ContinuousSeries {
csSet := []chart.ContinuousSeries{}
for _, seriesName := range order {
if ds, ok := dsm[seriesName]; ok {
csSet = append(csSet, DataSeriesToContinuousSeries(ds))
}
}
return csSet
}
func DataSeriesToContinuousSeries(ds statictimeseries.DataSeries) chart.ContinuousSeries {
series := chart.ContinuousSeries{
Name: ds.SeriesName,
XValues: []float64{},
YValues: []float64{}}
items := ds.ItemsSorted()
for _, item := range items {
switch ds.Interval {
case timeutil.Month:
series.XValues = append(series.XValues,
float64(month.TimeToMonthContinuous(item.Time)))
case timeutil.Quarter:
series.XValues = append(series.XValues,
float64(quarter.TimeToQuarterContinuous(item.Time)))
default:
series.XValues = append(series.XValues, float64(item.Time.Unix()))
}
if ds.IsFloat {
series.YValues = append(series.YValues, item.ValueFloat)
} else {
series.YValues = append(series.YValues, float64(item.Value))
}
}
return series
}
func DataSeriesMapToContinuousSeriesQuarters(dsm map[string]statictimeseries.DataSeries, order []string) []chart.ContinuousSeries {
csSet := []chart.ContinuousSeries{}
for _, seriesName := range order {
if ds, ok := dsm[seriesName]; ok {
csSet = append(csSet, DataSeriesToContinuousSeriesQuarter(ds))
}
}
return csSet
}
func DataSeriesToContinuousSeriesQuarter(ds statictimeseries.DataSeries) chart.ContinuousSeries {
series := chart.ContinuousSeries{
Name: ds.SeriesName,
XValues: []float64{},
YValues: []float64{}}
items := ds.ItemsSorted()
for _, item := range items {
series.XValues = append(
series.XValues,
float64(quarter.TimeToQuarterContinuous(item.Time)))
series.YValues = append(
series.YValues,
float64(item.Value))
}
return series
} | charts/wchart/continuous_series.go | 0.602412 | 0.419707 | continuous_series.go | starcoder |
// Package grammars parses XML GEP grammar representations for a particular output language.
// It can then be used to generate code from Karva expressions for that language.
package grammars
import (
"encoding/xml"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/gmlewis/gep/v2/functions"
)
const grammarPath = "github.com/gmlewis/gep/grammars"
// Functions is a collection of Functions available in the language grammar.
type Functions struct {
Count int `xml:"count,attr"`
Functions []Function `xml:"function"`
// Lookup table of function symbol name to function definition
FuncMap functions.FuncMap `xml:"-"`
}
// Function represents a single function.
type Function struct {
// Idx is the index of the function in the XML grammar.
Idx int `xml:"idx,attr"`
// SymbolName is the Karva symbol used to represent the function.
SymbolName string `xml:"symbol,attr"`
// TerminalCount specifies the number of input terminals to the function.
TerminalCount int `xml:"terminals,attr"`
// Uniontype (optional) determines how the diadic function is rendered.
Uniontype string `xml:"uniontype,attr"`
// Chardata is the action function rendering for the given language.
Chardata string `xml:",chardata"`
}
// Symbol returns the symbol name of the function.
func (f *Function) Symbol() string {
return f.SymbolName
}
// Terminals returns the terminal count of the function.
func (f *Function) Terminals() int {
return f.TerminalCount
}
// BoolFunction allows FuncMap to implement interace functions.FuncMap.
func (f *Function) BoolFunction([]bool) bool {
return false
}
// IntFunction allows FuncMap to implement interace functions.FuncMap.
func (f *Function) IntFunction([]int) int {
return 0
}
// Float64Function allows FuncMap to implement interace functions.FuncMap.
func (f *Function) Float64Function([]float64) float64 {
return 0.0
}
// VectorIntFunction allows FuncMap to implement interace functions.FuncMap.
func (f *Function) VectorIntFunction([]functions.VectorInt) functions.VectorInt {
return functions.VectorInt{}
}
// Replacement determines how a function can be replaced.
type Replacement struct {
Type string `xml:"type,attr"`
Replace string `xml:"replace,attr"`
Indexzero bool `xml:"indexzero,attr"`
Indexone bool `xml:"indexone,attr"`
Chardata string `xml:",chardata"`
}
// Categories lists the types of grammar available.
type Categories struct {
Functioncall Functioncall `xml:"functioncall"`
TransformFunction TransformFunction `xml:"transformfunction"`
Switch Switch `xml:"switch"`
Case Format `xml:"case"`
Equality Format `xml:"equality"`
}
// Functioncall represents a function call.
type Functioncall struct {
Call string `xml:"call,attr"`
}
// TransformFunction represents the transformation of a function.
type TransformFunction struct {
Header string `xml:"header,attr"`
Footer string `xml:"footer,attr"`
Prototype string `xml:"prototype,attr"`
}
// Switch represents the grammar for a switch call.
type Switch struct {
Special string `xml:"special,attr"`
Top string `xml:"top,attr"`
Bottom string `xml:"bottom,attr"`
Categoricaldefault string `xml:"categoricaldefault,attr"`
Numericaldefault string `xml:"numericaldefault,attr"`
}
// Format represents how an item is formatted.
type Format struct {
Format string `xml:"format,attr"`
}
// Transformation represents a function transformation.
type Transformation struct {
Name string `xml:"name,attr"`
Call string `xml:"call,attr"`
Itemformat string `xml:"itemformat,attr"`
Prototype string `xml:"prototype,attr"`
Declarations string `xml:"declarations,attr"`
Chardata string `xml:",chardata"`
}
// Constant represents a constant.
type Constant struct {
Type string `xml:"type,attr"`
Replace string `xml:"replace,attr"`
Labelindex int `xml:"labelindex,attr"`
Chardata string `xml:",chardata"`
}
// Tempvar represents a temporary variable.
type Tempvar struct {
Type string `xml:"type,attr"`
Typename string `xml:"typename,attr"`
Varname string `xml:"varname,attr"`
Chardata string `xml:",chardata"`
}
// Helper represents a helper function.
type Helper struct {
Replaces string `xml:"replaces,attr"`
Prototype string `xml:"prototype,attr"`
Chardata string `xml:",chardata"`
}
// Testing represents a testing function.
type Testing struct {
Prototype Prototype `xml:"prototype"`
Method Method `xml:"method"`
}
// Prototype is a function signature.
type Prototype struct {
Paramsformat string `xml:"paramsformat,attr"`
Chardata string `xml:",chardata"`
}
// Method represents a method.
type Method struct {
Callformat string `xml:"callformat,attr"`
Listformat string `xml:"listformat,attr"`
Chardata string `xml:",chardata"`
}
// OrderItem is a single element of the code structure.
type OrderItem struct {
Name string `xml:"name,attr"`
}
// HelperMap maps helper replacement functions to their definitions.
type HelperMap map[string]string
// Helpers represents helper functions for the target language.
type Helpers struct {
Count int `xml:"count,attr"`
Declaration string `xml:"declaration,attr"`
Assignment string `xml:"assignment,attr"`
Helpers []Helper `xml:"helper"`
// Lookup table of helper symbol name to helper definition
HelperMap HelperMap `xml:"-"`
}
// Keyword is a reserved keyword in the target language.
type Keyword struct {
Chardata string `xml:",chardata"`
}
// LinkingFunctions are special functions used by GEP for the target language.
type LinkingFunctions struct {
Count int `xml:"count,attr"`
LinkingFunctions []Helper `xml:"linkingFunction"`
}
// BasicFunctions are simple functions used by GEP for the target language.
type BasicFunctions struct {
Count int `xml:"count,attr"`
BasicFunctions []Helper `xml:"basicFunction"`
}
// Grammar represents the complete XML specification for rendering a Karva string for the given target language.
type Grammar struct {
XMLName xml.Name `xml:"grammar"`
Comments string `xml:",comment"`
Name string `xml:"name,attr"`
Version string `xml:"version,attr"`
Ext string `xml:"ext,attr"`
Type string `xml:"type,attr"`
// Functions lists all possible functions available in the language grammar.
Functions Functions `xml:"functions"`
// Order specifies the order structure of the program.
Order []OrderItem `xml:"order>item"`
// Open contains the text for the start of the generated source code file.
Open string `xml:"open"`
// Close contains the text for the end of the generated source code file.
Close string `xml:"close"`
// Headers/Subheaders contains any required headers such as imports needed near the top of the generated source code file.
Headers []Replacement `xml:"headers>header"`
Subheaders []Replacement `xml:"subheaders>subheader"`
// RandomConstants defines the syntax for a random constant.
RandomConstants []Replacement `xml:"randomconstants>randomconst"`
// Categories defines the special features of the target language.
Categories Categories `xml:"categories"`
// Transformations/ReverseTransformations lists any special transformations for the target language.
Transformations []Transformation `xml:"transformations>transformation"`
ReverseTransformations []Transformation `xml:"reversetransformations>transformation"`
// Constants defines the syntax for constants for the target language.
Constants []Constant `xml:"constants>constant"`
// Tempvars defines the syntax for temporary variables for the target language.
Tempvars []Tempvar `xml:"tempvars>tempvar"`
// Endline specifies how the end of a line is rendered for the target language.
Endline string `xml:"endline"`
// Indent is the number of tab characters to add to each line in the code block.
Indent int `xml:"indent"`
// Parenstype specifies the type of parentheses to use for arrays in the target language.
// 0=(), 1=[]
Parenstype int `xml:"parenstype"`
// Footers defines special code structures for the target language.
Footers []Replacement `xml:"footers>footer"`
// Helpers defines the complete set of helper functions used in the Functions above.
Helpers Helpers `xml:"helpers"`
// Keywords lists the keywords for the target language.
Keywords []Keyword `xml:"keywords>keyword"`
// Commentmark defines how a comment is rendered in the target language.
Commentmark string `xml:"commentmark"`
// LinkingFunctions lists special linking functions used by GEP for the target language.
LinkingFunctions LinkingFunctions `xml:"linkingFunctions"`
// BasicFunctions lists basic functions used by GEP for the target language.
BasicFunctions BasicFunctions `xml:"basicFunctions"`
Ddfcomment string `xml:"ddfcomment"`
Udfcomment string `xml:"udfcomment"`
// Testing defines a function used for testing a method for the target language.
Testing Testing `xml:"testing"`
}
func loadGrammar(path string) (*Grammar, error) {
v := &Grammar{}
data, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("unable to read file %q: %q", path, err)
return nil, err
}
err = xml.Unmarshal(data, &v)
if err != nil {
log.Printf("error unmarshaling %q: %q", path, err)
return nil, err
}
// Build the function map lookups for fast access
v.Functions.FuncMap = make(functions.FuncMap, len(v.Functions.Functions))
for i, f := range v.Functions.Functions {
v.Functions.FuncMap[f.SymbolName] = &v.Functions.Functions[i]
}
// Build the helpers map lookups for fast access
v.Helpers.HelperMap = make(HelperMap, len(v.Helpers.Helpers))
for _, h := range v.Helpers.Helpers {
v.Helpers.HelperMap[h.Replaces] = h.Chardata
}
return v, nil
}
func getPath(filename string) string {
// Support Travis CI automated builds by searching for files
dirs := strings.Split(os.Getenv("GOPATH"), ":")
for _, dir := range dirs {
name := filepath.Join(dir, "src", grammarPath, filename)
if _, err := os.Stat(name); err == nil {
return name
}
}
name := filepath.Join(grammarPath, filename)
if _, err := os.Stat(name); err == nil {
return name
}
return filename
}
// LoadGoMathGrammar loads the floating-point math grammer for Go as the target language.
func LoadGoMathGrammar() (*Grammar, error) {
path := getPath("go.Math.00.default.grm.xml")
return loadGrammar(path)
}
// LoadGoBooleanAllGatesGrammar loads the general boolean grammer for Go as the target language.
func LoadGoBooleanAllGatesGrammar() (*Grammar, error) {
path := getPath("go.Boolean.01.AllGates.grm.xml")
return loadGrammar(path)
}
// LoadGoBooleanNotAndOrGatesGrammar loads the specialized boolean grammer for Go as the target language.
func LoadGoBooleanNotAndOrGatesGrammar() (*Grammar, error) {
path := getPath("go.Boolean.02.NotAndOrGates.grm.xml")
return loadGrammar(path)
}
// LoadGoBooleanNandGatesGrammar loads the specialized boolean grammer for Go as the target language.
func LoadGoBooleanNandGatesGrammar() (*Grammar, error) {
path := getPath("go.Boolean.03.NandGates.grm.xml")
return loadGrammar(path)
}
// LoadGoBooleanNorGatesGrammar loads the specialized boolean grammer for Go as the target language.
func LoadGoBooleanNorGatesGrammar() (*Grammar, error) {
path := getPath("go.Boolean.04.NorGates.grm.xml")
return loadGrammar(path)
}
// LoadGoBooleanMuxSystemGrammar loads the specialized boolean grammer for Go as the target language.
func LoadGoBooleanMuxSystemGrammar() (*Grammar, error) {
path := getPath("go.Boolean.05.MuxSystem.grm.xml")
return loadGrammar(path)
}
// LoadGoReedMullerSystemGrammar loads the specialized boolean grammer for Go as the target language.
func LoadGoReedMullerSystemGrammar() (*Grammar, error) {
path := getPath("go.Boolean.06.ReedMullerSystem.grm.xml")
return loadGrammar(path)
} | grammars/grammars.go | 0.826257 | 0.486271 | grammars.go | starcoder |
package geometry
// Poly ...
type Poly struct {
Exterior Ring
Holes []Ring
}
// NewPoly ...
func NewPoly(exterior []Point, holes [][]Point, opts *IndexOptions) *Poly {
poly := new(Poly)
poly.Exterior = newRing(exterior, opts)
if len(holes) > 0 {
poly.Holes = make([]Ring, len(holes))
for i := range holes {
poly.Holes[i] = newRing(holes[i], opts)
}
}
return poly
}
// Clockwise ...
func (poly *Poly) Clockwise() bool {
if poly == nil || poly.Exterior == nil {
return false
}
return poly.Exterior.Clockwise()
}
// Empty ...
func (poly *Poly) Empty() bool {
if poly == nil || poly.Exterior == nil {
return true
}
return poly.Exterior.Empty()
}
// Valid ...
func (poly *Poly) Valid() bool {
if !WorldPolygon.ContainsPoly(poly) {
return false
}
return true
}
// Rect ...
func (poly *Poly) Rect() Rect {
if poly == nil || poly.Exterior == nil {
return Rect{}
}
return poly.Exterior.Rect()
}
// Move the polygon by delta. Returns a new polygon
func (poly *Poly) Move(deltaX, deltaY float64) *Poly {
if poly == nil {
return nil
}
if poly.Exterior == nil {
return new(Poly)
}
npoly := new(Poly)
if series, ok := poly.Exterior.(*baseSeries); ok {
npoly.Exterior = Ring(series.Move(deltaX, deltaY))
} else {
nseries := makeSeries(
seriesCopyPoints(poly.Exterior), false, true, DefaultIndexOptions)
npoly.Exterior = Ring(nseries.Move(deltaX, deltaY))
}
if len(poly.Holes) > 0 {
npoly.Holes = make([]Ring, len(poly.Holes))
for i, hole := range poly.Holes {
if series, ok := hole.(*baseSeries); ok {
npoly.Holes[i] = Ring(series.Move(deltaX, deltaY))
} else {
nseries := makeSeries(
seriesCopyPoints(hole), false, true, DefaultIndexOptions)
npoly.Holes[i] = Ring(nseries.Move(deltaX, deltaY))
}
}
}
return npoly
}
// ContainsPoint ...
func (poly *Poly) ContainsPoint(point Point) bool {
if poly == nil || poly.Exterior == nil {
return false
}
if !ringContainsPoint(poly.Exterior, point, true).hit {
return false
}
contains := true
for _, hole := range poly.Holes {
if ringContainsPoint(hole, point, false).hit {
contains = false
break
}
}
return contains
}
// IntersectsPoint ...
func (poly *Poly) IntersectsPoint(point Point) bool {
if poly == nil {
return false
}
return poly.ContainsPoint(point)
}
// ContainsRect ...
func (poly *Poly) ContainsRect(rect Rect) bool {
if poly == nil {
return false
}
// convert rect into a polygon
return poly.ContainsPoly(&Poly{Exterior: rect})
}
// IntersectsRect ...
func (poly *Poly) IntersectsRect(rect Rect) bool {
if poly == nil {
return false
}
// convert rect into a polygon
return poly.IntersectsPoly(&Poly{Exterior: rect})
}
// ContainsLine ...
func (poly *Poly) ContainsLine(line *Line) bool {
if poly == nil || poly.Exterior == nil || line == nil {
return false
}
if !ringContainsLine(poly.Exterior, line, true) {
return false
}
for _, polyHole := range poly.Holes {
if ringIntersectsLine(polyHole, line, false) {
return false
}
}
return true
}
// IntersectsLine ...
func (poly *Poly) IntersectsLine(line *Line) bool {
if poly == nil || poly.Exterior == nil || line == nil {
return false
}
return ringIntersectsLine(poly.Exterior, line, true)
}
// ContainsPoly ...
func (poly *Poly) ContainsPoly(other *Poly) bool {
if poly == nil || poly.Exterior == nil ||
other == nil || other.Exterior == nil {
return false
}
// 1) other exterior must be fully contained inside of the poly exterior.
if !ringContainsRing(poly.Exterior, other.Exterior, true) {
return false
}
// 2) ring cannot intersect poly holes
contains := true
for _, polyHole := range poly.Holes {
if ringIntersectsRing(polyHole, other.Exterior, false) {
contains = false
// 3) unless the poly hole is contain inside of a other hole
for _, otherHole := range other.Holes {
if ringContainsRing(otherHole, polyHole, true) {
contains = true
// println(4)
break
}
}
if !contains {
break
}
}
}
return contains
}
// IntersectsPoly ...
func (poly *Poly) IntersectsPoly(other *Poly) bool {
if poly == nil || poly.Exterior == nil ||
other == nil || other.Exterior == nil {
return false
}
if !ringIntersectsRing(other.Exterior, poly.Exterior, true) {
return false
}
for _, hole := range poly.Holes {
if ringContainsRing(hole, other.Exterior, false) {
return false
}
}
for _, hole := range other.Holes {
if ringContainsRing(hole, poly.Exterior, false) {
return false
}
}
return true
} | geometry/poly.go | 0.770983 | 0.528229 | poly.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.