code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package format
import (
"fmt"
"math"
"strings"
)
// Format converts currency to a string with correct symbol and precision.
func Format(currencyCode string, value float64) (string, error) {
strFormat := getDefaultFormat(value < 0)
return FormatAs(currencyCode, value, strFormat)
}
func getDefaultFormat(isNegative bool) string {
if isNegative {
return "%s-%v"
}
return "%s%v"
}
// FormatAs converts currency to a string with correct symbol and precision according to specified format string.
// %v is used for value, %s is used for currency symbol
func FormatAs(currencyCode string, value float64, strFormat string) (string, error) {
if precision, ok := currencyPrecision[currencyCode]; ok {
strFormat = strings.ReplaceAll(strings.ReplaceAll(strFormat, "%v", "%.[2]*[1]f"), "%s", "%[3]s")
roundedValue := roundToPrecision(math.Abs(value), precision)
return fmt.Sprintf(strFormat, roundedValue, precision, currencySymbols[currencyCode]), nil
}
return "", fmt.Errorf("format: no precision value found for currency code: %v", currencyCode)
}
// Round returns a float64 with the precision for the currency specified.
func Round(currencyCode string, value float64) (float64, error) {
if precision, ok := currencyPrecision[currencyCode]; ok {
return roundToPrecision(value, precision), nil
}
return 0, fmt.Errorf("format: no precision value found for currency code: %v", currencyCode)
}
func roundToPrecision(value float64, precision int8) float64 {
output := math.Pow(10, float64(precision))
return float64(math.Round(value*output)) / output
}
var currencyPrecision = map[string]int8{
"AED": 2,
"AFN": 2,
"ALL": 2,
"AMD": 2,
"ANG": 2,
"AOA": 2,
"ARS": 2,
"AUD": 2,
"AWG": 2,
"AZN": 2,
"BAM": 2,
"BBD": 2,
"BDT": 2,
"BGN": 2,
"BHD": 3,
"BIF": 0,
"BMD": 2,
"BND": 2,
"BOB": 2,
"BOV": 2,
"BRL": 2,
"BSD": 2,
"BTN": 2,
"BWP": 2,
"BYR": 0,
"BZD": 2,
"CAD": 2,
"CDF": 2,
"CHE": 2,
"CHF": 2,
"CHW": 2,
"CLF": 4,
"CLP": 0,
"CNY": 2,
"COP": 2,
"COU": 2,
"CRC": 2,
"CUC": 2,
"CUP": 2,
"CVE": 2,
"CZK": 2,
"DJF": 0,
"DKK": 2,
"DOP": 2,
"DZD": 2,
"EGP": 2,
"ERN": 2,
"ETB": 2,
"EUR": 2,
"FJD": 2,
"FKP": 2,
"GBP": 2,
"GEL": 2,
"GHS": 2,
"GIP": 2,
"GMD": 2,
"GNF": 0,
"GTQ": 2,
"GYD": 2,
"HKD": 2,
"HNL": 2,
"HRK": 2,
"HTG": 2,
"HUF": 2,
"IDR": 2,
"ILS": 2,
"INR": 2,
"IQD": 3,
"IRR": 2,
"ISK": 0,
"JMD": 2,
"JOD": 3,
"JPY": 0,
"KES": 2,
"KGS": 2,
"KHR": 2,
"KMF": 0,
"KPW": 2,
"KRW": 0,
"KWD": 3,
"KYD": 2,
"KZT": 2,
"LAK": 2,
"LBP": 2,
"LKR": 2,
"LRD": 2,
"LSL": 2,
"LYD": 3,
"MAD": 2,
"MDL": 2,
"MGA": 2,
"MKD": 2,
"MMK": 2,
"MNT": 2,
"MOP": 2,
"MRO": 2,
"MUR": 2,
"MVR": 2,
"MWK": 2,
"MXN": 2,
"MXV": 2,
"MYR": 2,
"MZN": 2,
"NAD": 2,
"NGN": 2,
"NIO": 2,
"NOK": 2,
"NPR": 2,
"NZD": 2,
"OMR": 3,
"PAB": 2,
"PEN": 2,
"PGK": 2,
"PHP": 2,
"PKR": 2,
"PLN": 2,
"PYG": 0,
"QAR": 2,
"RON": 2,
"RSD": 2,
"RUB": 2,
"RWF": 0,
"SAR": 2,
"SBD": 2,
"SCR": 2,
"SDG": 2,
"SEK": 2,
"SGD": 2,
"SHP": 2,
"SLL": 2,
"SOS": 2,
"SRD": 2,
"SSP": 2,
"STD": 2,
"SVC": 2,
"SYP": 2,
"SZL": 2,
"THB": 2,
"TJS": 2,
"TMT": 2,
"TND": 3,
"TOP": 2,
"TRY": 2,
"TTD": 2,
"TWD": 2,
"TZS": 2,
"UAH": 2,
"UGX": 0,
"USD": 2,
"USN": 2,
"UYI": 0,
"UYU": 2,
"UZS": 2,
"VEF": 2,
"VND": 0,
"VUV": 0,
"WST": 2,
"XAF": 0,
"XCD": 2,
"XOF": 0,
"XPF": 0,
"YER": 2,
"ZAR": 2,
"ZMW": 2,
"ZWL": 2,
}
var currencySymbols = map[string]string{
"ALL": "Lek",
"AFN": "؋",
"ARS": "$",
"AWG": "ƒ",
"AUD": "$",
"AZN": "₼",
"BSD": "$",
"BBD": "$",
"BYN": "Br",
"BZD": "BZ$",
"BMD": "$",
"BOB": "$b",
"BAM": "KM",
"BWP": "P",
"BGN": "лв",
"BRL": "R$",
"BND": "$",
"KHR": "៛",
"CAD": "$",
"KYD": "$",
"CLP": "$",
"CNY": "¥",
"COP": "$",
"CRC": "₡",
"HRK": "kn",
"CUP": "₱",
"CZK": "Kč",
"DKK": "kr",
"DOP": "RD$",
"XCD": "$",
"EGP": "£",
"SVC": "$",
"EUR": "€",
"FKP": "£",
"FJD": "$",
"GHS": "¢",
"GIP": "£",
"GTQ": "Q",
"GGP": "£",
"GYD": "$",
"HNL": "L",
"HKD": "$",
"HUF": "Ft",
"ISK": "kr",
"IDR": "Rp",
"IRR": "﷼",
"IMP": "£",
"ILS": "₪",
"JMD": "J$",
"JPY": "¥",
"JEP": "£",
"KZT": "лв",
"KPW": "₩",
"KRW": "₩",
"KGS": "лв",
"LAK": "₭",
"LBP": "£",
"LRD": "$",
"MKD": "ден",
"MYR": "RM",
"MUR": "₨",
"MXN": "$",
"MNT": "₮",
"MZN": "MT",
"NAD": "$",
"NPR": "₨",
"ANG": "ƒ",
"NZD": "$",
"NIO": "C$",
"NGN": "₦",
"NOK": "kr",
"OMR": "﷼",
"PKR": "₨",
"PAB": "B/.",
"PYG": "Gs",
"PEN": "S/.",
"PHP": "₱",
"PLN": "zł",
"QAR": "﷼",
"RON": "lei",
"RUB": "₽",
"SHP": "£",
"SAR": "﷼",
"RSD": "Дин.",
"SCR": "₨",
"SGD": "$",
"SBD": "$",
"SOS": "S",
"ZAR": "R",
"LKR": "₨",
"SEK": "kr",
"CHF": "CHF",
"SRD": "$",
"SYP": "£",
"TWD": "NT$",
"THB": "฿",
"TTD": "TT$",
"TVD": "$",
"UAH": "₴",
"GBP": "£",
"USD": "$",
"UYU": "$U",
"UZS": "лв",
"VEF": "Bs",
"VND": "₫",
"YER": "﷼",
"ZWD": "Z$",
} | format/format.go | 0.68658 | 0.577853 | format.go | starcoder |
package zstd
import (
"bytes"
"errors"
"io"
)
// HeaderMaxSize is the maximum size of a Frame and Block Header.
// If less is sent to Header.Decode it *may* still contain enough information.
const HeaderMaxSize = 14 + 3
// Header contains information about the first frame and block within that.
type Header struct {
// Window Size the window of data to keep while decoding.
// Will only be set if HasFCS is false.
WindowSize uint64
// Frame content size.
// Expected size of the entire frame.
FrameContentSize uint64
// Dictionary ID.
// If 0, no dictionary.
DictionaryID uint32
// First block information.
FirstBlock struct {
// OK will be set if first block could be decoded.
OK bool
// Is this the last block of a frame?
Last bool
// Is the data compressed?
// If true CompressedSize will be populated.
// Unfortunately DecompressedSize cannot be determined
// without decoding the blocks.
Compressed bool
// DecompressedSize is the expected decompressed size of the block.
// Will be 0 if it cannot be determined.
DecompressedSize int
// CompressedSize of the data in the block.
// Does not include the block header.
// Will be equal to DecompressedSize if not Compressed.
CompressedSize int
}
// Skippable will be true if the frame is meant to be skipped.
// No other information will be populated.
Skippable bool
// If set there is a checksum present for the block content.
HasCheckSum bool
// If this is true FrameContentSize will have a valid value
HasFCS bool
SingleSegment bool
}
// Decode the header from the beginning of the stream.
// This will decode the frame header and the first block header if enough bytes are provided.
// It is recommended to provide at least HeaderMaxSize bytes.
// If the frame header cannot be read an error will be returned.
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
if len(in) < 4 {
return io.ErrUnexpectedEOF
}
b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch
}
*h = Header{Skippable: true}
return nil
}
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
// Clear output
*h = Header{}
fhd, in := in[0], in[1:]
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
if fhd&(1<<3) != 0 {
return errors.New("reserved bit set on frame header")
}
// Read Window_Descriptor
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if !h.SingleSegment {
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
h.WindowSize = windowBase + windowAdd
}
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
}
if len(in) < int(size) {
return io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
if b == nil {
return io.ErrUnexpectedEOF
}
switch size {
case 1:
h.DictionaryID = uint32(b[0])
case 2:
h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
case 4:
h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
}
}
// Read Frame_Content_Size
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
var fcsSize int
v := fhd >> 6
switch v {
case 0:
if h.SingleSegment {
fcsSize = 1
}
default:
fcsSize = 1 << v
}
if fcsSize > 0 {
h.HasFCS = true
if len(in) < fcsSize {
return io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
if b == nil {
return io.ErrUnexpectedEOF
}
switch fcsSize {
case 1:
h.FrameContentSize = uint64(b[0])
case 2:
// When FCS_Field_Size is 2, the offset of 256 is added.
h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
case 4:
h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
case 8:
d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
h.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
}
// Frame Header done, we will not fail from now on.
if len(in) < 3 {
return nil
}
tmp := in[:3]
bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
h.FirstBlock.Last = bh&1 != 0
blockType := blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
switch blockType {
case blockTypeReserved:
return nil
case blockTypeRLE:
h.FirstBlock.Compressed = true
h.FirstBlock.DecompressedSize = cSize
h.FirstBlock.CompressedSize = 1
case blockTypeCompressed:
h.FirstBlock.Compressed = true
h.FirstBlock.CompressedSize = cSize
case blockTypeRaw:
h.FirstBlock.DecompressedSize = cSize
h.FirstBlock.CompressedSize = cSize
default:
panic("Invalid block type")
}
h.FirstBlock.OK = true
return nil
} | vendor/github.com/klauspost/compress/zstd/decodeheader.go | 0.639961 | 0.433082 | decodeheader.go | starcoder |
package bo
import (
"math"
)
// Exploration is the strategy to use for exploring the Gaussian process.
type Exploration interface {
Estimate(gp *GP, minimize bool, x []float64) (float64, error)
}
// UCB implements upper confidence bound exploration.
type UCB struct {
Kappa float64
}
// Estimate implements Exploration.
func (e UCB) Estimate(gp *GP, minimize bool, x []float64) (float64, error) {
mean, sd, err := gp.Estimate(x)
if err != nil {
return 0, err
}
if minimize {
return mean - e.Kappa*sd, nil
}
return mean + e.Kappa*sd, nil
}
type EI struct {
}
func (e EI) Estimate(gp *GP, minimize bool, x []float64) (float64, error) {
mean, std, err := gp.Estimate(x)
if err != nil {
return 0, err
}
a := mean // (mean - y_max)? ymax?
z := a / std
if minimize {
return a*StdNormal.CDF(z) - std*StdNormal.PDF(z), nil
}
return a*StdNormal.CDF(z) + std*StdNormal.PDF(z), nil
}
// NormalDist is a normal (Gaussian) distribution with mean Mu and
// standard deviation Sigma.
type NormalDist struct {
Mu, Sigma float64
}
// StdNormal is the standard normal distribution (Mu = 0, Sigma = 1)
var StdNormal = NormalDist{0, 1}
// 1/sqrt(2 * pi)
const invSqrt2Pi = 0.39894228040143267793994605993438186847585863116493465766592583
func (n NormalDist) PDF(x float64) float64 {
z := x - n.Mu
return math.Exp(-z*z/(2*n.Sigma*n.Sigma)) * invSqrt2Pi / n.Sigma
}
func (n NormalDist) CDF(x float64) float64 {
return math.Erfc(-(x-n.Mu)/(n.Sigma*math.Sqrt2)) / 2
}
// BarrierFunc returns a value that is added to the value to bound the
// optimization.
type BarrierFunc interface {
Val(x []float64, params []Param) float64
Grad(x []float64, params []Param) []float64
}
// BasicBarrier returns -Inf if an x value is outside the param range.
func BasicBarrier(x []float64, params []Param) float64 {
for i, p := range params {
v := x[i]
if v < p.GetMin() || v > p.GetMax() {
return math.Inf(-1)
}
}
return 0
}
// LogBarrier implements a logarithmic barrier function.
type LogBarrier struct{}
// Val returns the value of the barrier function.
func (LogBarrier) Val(x []float64, params []Param) float64 {
v := 0.0
for i, p := range params {
v += math.Log2(p.GetMax() - x[i])
v += math.Log2(x[i] - p.GetMin())
}
if math.IsNaN(v) {
return math.Inf(-1)
}
return v
}
// Grad returns the gradient of the barrier function.
func (LogBarrier) Grad(x []float64, params []Param) []float64 {
grad := make([]float64, len(x))
for i, p := range params {
grad[i] = 1/(x[i]-p.GetMin()) - 1/(p.GetMax()-x[i])
// TODO: handle NaN
}
return grad
} | exploration.go | 0.768125 | 0.59408 | exploration.go | starcoder |
package value_render
// used for ES indexing template
import (
"encoding/json"
"errors"
"reflect"
"regexp"
"strings"
"time"
"github.com/golang/glog"
)
func dateFormat(t interface{}, format string, location *time.Location) (string, error) {
if t1, ok := t.(time.Time); ok {
return t1.In(location).Format(format), nil
}
if reflect.TypeOf(t).String() == "json.Number" {
t1, err := t.(json.Number).Int64()
if err != nil {
return format, err
}
return time.Unix(t1/1000, t1%1000*1000000).In(location).Format(format), nil
}
if reflect.TypeOf(t).Kind() == reflect.Int {
t1 := int64(t.(int))
return time.Unix(t1/1000, t1%1000*1000000).In(location).Format(format), nil
}
if reflect.TypeOf(t).Kind() == reflect.Int64 {
t1 := t.(int64)
return time.Unix(t1/1000, t1%1000*1000000).In(location).Format(format), nil
}
if reflect.TypeOf(t).Kind() == reflect.String {
t1, e := time.Parse(time.RFC3339, t.(string))
if e != nil {
return format, e
}
return t1.In(location).Format(format), nil
}
return format, errors.New("could not tell the type timestamp field belongs to")
}
type field struct {
literal bool
date bool
value string
}
type IndexRender struct {
fields []*field
location *time.Location
}
func NewIndexRender(t string) *IndexRender {
r, _ := regexp.Compile(`%{.*?}`)
fields := make([]*field, 0)
lastPos := 0
for _, loc := range r.FindAllStringIndex(t, -1) {
s, e := loc[0], loc[1]
fields = append(fields, &field{
literal: true,
value: t[lastPos:s],
})
if t[s+2] == '+' {
fields = append(fields, &field{
literal: false,
date: true,
value: t[s+3 : e-1],
})
} else {
fields = append(fields, &field{
literal: false,
date: false,
value: t[s+2 : e-1],
})
}
lastPos = e
}
if lastPos < len(t) {
fields = append(fields, &field{
literal: true,
value: t[lastPos:],
})
}
return &IndexRender{fields, time.UTC}
}
// SetTimeLocation parse `location` to time.Location ans set it as its member.
// use this location to format time string
func (r *IndexRender) SetTimeLocation(loc string) {
location, err := time.LoadLocation(loc)
if err != nil {
glog.Fatalf("invalid location: %s", loc)
}
r.location = location
}
func (r *IndexRender) Render(event map[string]interface{}) interface{} {
fields := make([]string, len(r.fields))
for i, f := range r.fields {
if f.literal {
fields[i] = f.value
continue
}
if f.date {
if t, ok := event["@timestamp"]; ok {
fields[i], _ = dateFormat(t, f.value, r.location)
} else {
fields[i], _ = dateFormat(time.Now(), f.value, r.location)
}
} else {
if s, ok := event[f.value]; !ok {
fields[i] = "null"
} else {
if fields[i], ok = s.(string); !ok {
fields[i] = "null"
}
}
}
}
return strings.Join(fields, "")
} | value_render/index_render.go | 0.50708 | 0.446434 | index_render.go | starcoder |
package hdrcolour
import (
"encoding/json"
"fmt"
"image/color"
"github.com/DexterLB/traytor/maths"
)
// Colour is a representation of a float32 RGB colour
type Colour struct {
R, G, B float32
}
// String returns the string representation of the colour in the form of {r, g, b}
func (c *Colour) String() string {
return fmt.Sprintf("{%.3g, %.3g, %.3g}", c.R, c.G, c.B)
}
// String returns the string representation of the 32bit colour in the form of [r, g, b]
func (c *Colour32Bit) String() string {
return fmt.Sprintf("[%d, %d, %d]", c.R, c.G, c.B)
}
// Colour32Bit is 32bit colour implementing the color.Color interface
type Colour32Bit struct {
R, G, B uint32
}
// NewColour32Bit return a new 32bit colour
func NewColour32Bit(r, g, b uint32) *Colour32Bit {
return &Colour32Bit{R: r, G: g, B: b}
}
// RGBA implements the color.Color interface converting the 32bit colour to 32bit colour with alpha
func (c *Colour32Bit) RGBA() (r, g, b, a uint32) {
return c.R, c.G, c.B, 65535
}
// New returns a new RGB colour
func New(r, g, b float32) *Colour {
return &Colour{R: r, G: g, B: b}
}
// To32Bit returns each of the components of the given RGB color to uint32
func (c *Colour) To32Bit() *Colour32Bit {
return NewColour32Bit(linearTosRGB(c.R), linearTosRGB(c.G), linearTosRGB(c.B))
}
// linearTosRGBreturn an int between 0 and 1 constructed from a given float between 0 and 65535
func linearTosRGB(x float32) uint32 {
if x <= 0 {
return 0
}
if x >= 1 {
return 65535
}
if x <= 0.00313008 {
x = x * 12.02
} else {
x = (1.055)*maths.Pow32(x, 1.0/2.4) - 0.055
}
return uint32(maths.Round32(x * 65535.0))
}
// sRGBToLinear converts singel int number to float using special magic formula.
func sRGBToLinear(i uint32) float32 {
if i > 65535 {
return 1
}
x := float32(i) / 65535.0
if x <= 0.04045 {
return x / 12.92
}
return maths.Pow32((x+0.055)/1.055, 2.4)
}
// FromColor takes any colour that implements the color.Color interface and turns it into RGB colout(r, g, b are between 0 and 1)
func FromColor(c color.Color) *Colour {
r, g, b, _ := c.RGBA()
return New(sRGBToLinear(r), sRGBToLinear(g), sRGBToLinear(b))
}
// MakeZero returns black RGB colour
func (c *Colour) MakeZero() {
c.SetColour(0, 0, 0)
}
// SetColour sets the colour's components to the given r, g and b
func (c *Colour) SetColour(r, g, b float32) {
c.R, c.G, c.B = r, g, b
}
// Intensity returns the intensity of the given colour
func (c *Colour) Intensity() float32 {
return (c.R + c.G + c.B) / 3.0
}
// Add adds another colour to this one
func (c *Colour) Add(other *Colour) {
c.R += other.R
c.G += other.G
c.B += other.B
}
// Scale multiplies the colour by the given multiplier
func (c *Colour) Scale(multiplier float32) {
c.R *= multiplier
c.G *= multiplier
c.B *= multiplier
}
// Scaled returns a new colour which is the product of the original and multiplier
func (c *Colour) Scaled(multiplier float32) *Colour {
return New(
c.R*multiplier,
c.G*multiplier,
c.B*multiplier,
)
}
// UnmarshalJSON implements the json.Unmarshaler interface
func (c *Colour) UnmarshalJSON(data []byte) error {
var unmarshaled []float32
err := json.Unmarshal(data, &unmarshaled)
if err != nil {
return err
}
c.R = unmarshaled[0]
c.G = unmarshaled[1]
c.B = unmarshaled[2]
return nil
}
// AddColours adds two colours
func AddColours(first, other *Colour) *Colour {
r := first.R + other.R
g := first.G + other.G
b := first.B + other.B
return New(r, g, b)
}
// MultiplyColours returns the Product of two colours
func MultiplyColours(first, other *Colour) *Colour {
r := first.R * other.R
g := first.G * other.G
b := first.B * other.B
return New(r, g, b)
}
// MultiplyBy other colour sets the given vector to its product with the other colour
func (c *Colour) MultiplyBy(other *Colour) {
c.R *= other.R
c.G *= other.G
c.B *= other.B
} | hdrcolour/colours.go | 0.908148 | 0.544378 | colours.go | starcoder |
package mysql
import (
"fmt"
"strconv"
"strings"
"ariga.io/atlas/sql/internal/sqlx"
"ariga.io/atlas/sql/schema"
)
// FormatType converts schema type to its column form in the database.
// An error is returned if the type cannot be recognized.
func FormatType(t schema.Type) (string, error) {
var f string
switch t := t.(type) {
case *BitType:
f = strings.ToLower(t.T)
case *schema.BoolType:
// Map all flavors to a single form.
switch f = strings.ToLower(t.T); f {
case "bool", "boolean", "tinyint", "tinyint(1)":
f = "bool"
}
case *schema.BinaryType:
f = strings.ToLower(t.T)
if f == TypeVarBinary {
// Zero is also a valid length.
f = fmt.Sprintf("%s(%d)", f, t.Size)
}
case *schema.DecimalType:
if f = strings.ToLower(t.T); f != TypeDecimal && f != TypeNumeric {
return "", fmt.Errorf("mysql: unexpected decimal type: %q", t.T)
}
switch p, s := t.Precision, t.Scale; {
case p < 0 || s < 0:
return "", fmt.Errorf("mysql: decimal type must have precision > 0 and scale >= 0: %d, %d", p, s)
case p < s:
return "", fmt.Errorf("mysql: decimal type must have precision >= scale: %d < %d", p, s)
case p == 0 && s == 0:
// The default value for precision is 10 (i.e. decimal(0,0) = decimal(10)).
p = 10
fallthrough
case s == 0:
// In standard SQL, the syntax DECIMAL(M) is equivalent to DECIMAL(M,0),
f = fmt.Sprintf("decimal(%d)", p)
default:
f = fmt.Sprintf("decimal(%d,%d)", p, s)
}
case *schema.EnumType:
f = fmt.Sprintf("enum(%s)", formatValues(t.Values))
case *schema.FloatType:
f = strings.ToLower(t.T)
// FLOAT with precision > 24, become DOUBLE.
// Also, REAL is a synonym for DOUBLE (if REAL_AS_FLOAT was not set).
if f == TypeFloat && t.Precision > 24 || f == TypeReal {
f = TypeDouble
}
case *schema.IntegerType:
f = strings.ToLower(t.T)
if t.Unsigned {
f += " unsigned"
}
case *schema.JSONType:
f = strings.ToLower(t.T)
case *SetType:
f = fmt.Sprintf("enum(%s)", formatValues(t.Values))
case *schema.StringType:
f = strings.ToLower(t.T)
switch f {
case TypeChar:
// Not a single char.
if t.Size > 0 {
f += fmt.Sprintf("(%d)", t.Size)
}
case TypeVarchar:
// Zero is also a valid length.
f = fmt.Sprintf("varchar(%d)", t.Size)
}
case *schema.SpatialType:
f = strings.ToLower(t.T)
case *schema.TimeType:
f = strings.ToLower(t.T)
case *schema.UnsupportedType:
// Do not accept unsupported types as we should cover all cases.
return "", fmt.Errorf("unsupported type %q", t.T)
default:
return "", fmt.Errorf("invalid schema type %T", t)
}
return f, nil
}
// ParseType returns the schema.Type value represented by the given raw type.
// The raw value is expected to follow the format in MySQL information schema.
func ParseType(raw string) (schema.Type, error) {
parts, size, unsigned, err := parseColumn(raw)
if err != nil {
return nil, err
}
switch t := parts[0]; t {
case TypeBit:
return &BitType{
T: t,
}, nil
case TypeTinyInt, TypeSmallInt, TypeMediumInt, TypeInt, TypeBigInt:
if size == 1 {
return &schema.BoolType{
T: t,
}, nil
}
// For integer types, the size represents the display width and does not
// constrain the range of values that can be stored in the column.
// The storage byte-size is inferred from the type name (i.e TINYINT takes
// a single byte).
ft := &schema.IntegerType{
T: t,
Unsigned: unsigned,
}
if attr := parts[len(parts)-1]; attr == "zerofill" && size != 0 {
ft.Attrs = []schema.Attr{
&DisplayWidth{
N: int(size),
},
&ZeroFill{
A: attr,
},
}
}
return ft, nil
case TypeNumeric, TypeDecimal:
dt := &schema.DecimalType{
T: t,
}
if len(parts) > 1 {
p, err := strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("parse precision %q", parts[1])
}
dt.Precision = int(p)
}
if len(parts) > 2 {
s, err := strconv.ParseInt(parts[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("parse scale %q", parts[1])
}
dt.Scale = int(s)
}
return dt, nil
case TypeFloat, TypeDouble, TypeReal:
ft := &schema.FloatType{
T: t,
}
if len(parts) > 1 {
p, err := strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("parse precision %q", parts[1])
}
ft.Precision = int(p)
}
return ft, nil
case TypeBinary, TypeVarBinary:
return &schema.BinaryType{
T: t,
Size: int(size),
}, nil
case TypeTinyBlob, TypeMediumBlob, TypeBlob, TypeLongBlob:
return &schema.BinaryType{
T: t,
}, nil
case TypeChar, TypeVarchar:
return &schema.StringType{
T: t,
Size: int(size),
}, nil
case TypeTinyText, TypeMediumText, TypeText, TypeLongText:
return &schema.StringType{
T: t,
}, nil
case TypeEnum, TypeSet:
// Parse the enum values according to the MySQL format.
// github.com/mysql/mysql-server/blob/8.0/sql/field.cc#Field_enum::sql_type
rv := strings.TrimSuffix(strings.TrimPrefix(raw, t+"("), ")")
if rv == "" {
return nil, fmt.Errorf("mysql: unexpected enum type: %q", raw)
}
values := strings.Split(rv, "','")
for i := range values {
values[i] = strings.Trim(values[i], "'")
}
if t == TypeEnum {
return &schema.EnumType{
T: TypeEnum,
Values: values,
}, nil
}
return &SetType{
Values: values,
}, nil
case TypeDate, TypeDateTime, TypeTime, TypeTimestamp, TypeYear:
tt := &schema.TimeType{
T: t,
}
if len(parts) > 1 {
p, err := strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("parse precision %q", parts[1])
}
tt.Precision = int(p)
}
return tt, nil
case TypeJSON:
return &schema.JSONType{
T: t,
}, nil
case TypePoint, TypeMultiPoint, TypeLineString, TypeMultiLineString, TypePolygon, TypeMultiPolygon, TypeGeometry, TypeGeoCollection, TypeGeometryCollection:
return &schema.SpatialType{
T: t,
}, nil
default:
return &schema.UnsupportedType{
T: t,
}, nil
}
}
// mustFormat calls to FormatType and panics in case of error.
func mustFormat(t schema.Type) string {
s, err := FormatType(t)
if err != nil {
panic(err)
}
return s
}
// formatValues formats ENUM and SET values.
func formatValues(vs []string) string {
values := make([]string, len(vs))
for i := range vs {
values[i] = vs[i]
if !sqlx.IsQuoted(values[i], '"', '\'') {
values[i] = "'" + values[i] + "'"
}
}
return strings.Join(values, ",")
} | sql/mysql/convert.go | 0.55929 | 0.474327 | convert.go | starcoder |
package regexwriter
import "regexp"
// RegexAction is a combination of a regular expression and a function
type RegexAction struct {
// Search expression
regex *regexp.Regexp
// Action to take on matched/non-matched bytes
action func([][]byte)
}
// IsMatch returns whether the regular expresion is a match for the
// given bytes
func (r RegexAction) IsMatch(b []byte) bool {
return r.regex.Match(b)
}
// Matches returns all regular expression matches in the given bytes
func (r RegexAction) Matches(b []byte) [][][]byte {
return r.regex.FindAllSubmatch(b, -1)
}
// PerformAction executes the defined function on the given multidimensional
// byte array
func (r RegexAction) PerformAction(bytes [][]byte) {
r.action(bytes)
}
// CreateAction returns a RegexAction object
func CreateAction(r *regexp.Regexp, a func([][]byte)) RegexAction {
return RegexAction{r, a}
}
// RegexWriter allows binding of matching and non-matching actions to
// a writer interface
type RegexWriter struct {
// RawOutput contains the full output of the writer
RawOutput string
matchActions []RegexAction
nonMatchActions []RegexAction
}
// Reset clears this RegexWriter's set members
func (re *RegexWriter) Reset() {
re.ClearMatchActions()
re.ClearNonMatchActions()
re.RawOutput = ""
}
// Write is the implementation of the Writer interface
func (re RegexWriter) Write(b []byte) (n int, err error) {
out := string(b[:])
re.RawOutput += out
for _, v := range re.matchActions {
if v.IsMatch(b) {
for _, match := range v.Matches(b) {
v.PerformAction(match)
}
}
}
for _, v := range re.nonMatchActions {
if !v.IsMatch(b) {
// Since there's no match here and we're reusing the same function
// definition, simply make the bytes look like [][]byte
v.PerformAction([][]byte{b[:]})
}
}
return len(b), nil
}
// AddMatchAction adds a match action to the RegexWriter instance
func (re *RegexWriter) AddMatchAction(pattern string, handler func([][]byte)) {
regex := regexp.MustCompile(pattern)
if re.matchActions == nil {
re.matchActions = make([]RegexAction, 0)
}
re.matchActions = append(re.matchActions, CreateAction(regex, handler))
}
// ClearMatchActions clears the matchActions array
func (re *RegexWriter) ClearMatchActions() {
re.matchActions = nil
}
// AddNonMatchAction adds a non-matching action to the RegexWriter instance
func (re *RegexWriter) AddNonMatchAction(pattern string, handler func([][]byte)) {
regex := regexp.MustCompile(pattern)
if re.nonMatchActions == nil {
re.nonMatchActions = make([]RegexAction, 0)
}
re.nonMatchActions = append(re.nonMatchActions, CreateAction(regex, handler))
}
// ClearNonMatchActions clears the nonMatchActions array
func (re *RegexWriter) ClearNonMatchActions() {
re.nonMatchActions = nil
}
// ClearRawOutput clears the raw output member
func (re *RegexWriter) ClearRawOutput() {
re.RawOutput = ""
} | regexwriter.go | 0.688992 | 0.448849 | regexwriter.go | starcoder |
package binarytree
type Node struct {
value int
parent *Node
left *Node
right *Node
}
func NewNode(i int) *Node {
return &Node{value: i}
}
func (n *Node) Compare(m *Node) int {
if n.value < m.value {
return -1
} else if n.value > m.value {
return 1
} else {
return 0
}
}
func (n *Node) Value() int {
return n.value
}
func (n *Node) delete(parent bool) {
if parent && n.parent != nil {
if n.parent.left == n {
n.parent.left = nil
} else {
n.parent.right = nil
}
}
n.left = nil
n.right = nil
n.parent = nil
}
type BSTree struct {
head *Node
size int
}
func NewTree(root *Node) *BSTree {
if root == nil {
return &BSTree{}
}
return &BSTree{head: root, size: 1}
}
func (bst *BSTree) Insert(i int) {
n := &Node{value: i}
if bst.head == nil {
bst.head = n
bst.size++
return
}
h := bst.head
for {
if n.Compare(h) == -1 {
if h.left != nil {
h = h.left
continue
}
h.left = n
n.parent = h
} else {
if h.right != nil {
h = h.right
continue
}
h.right = n
n.parent = h
}
break
}
bst.size++
}
func (bst *BSTree) Find(i int) *Node {
h := bst.head
n := &Node{value: i}
for h != nil {
switch h.Compare(n) {
case -1:
h = h.right
case 1:
h = h.left
case 0:
return h
default:
panic("Node not found") // this should not happen
}
}
return nil
}
// 分四种情况处理:
// 1. 要删除的节点无左右孩子
// 2. 要删除的节点只有左孩子
// 3. 要删除的节点只有右孩子
// 4. 要删除的节点有左、右孩子
func (bst *BSTree) Delete(i int) bool {
var parent *Node
h := bst.head
n := &Node{value: i}
for h != nil {
switch n.Compare(h) {
case -1:
parent = h
h = h.left
case 1:
parent = h
h = h.right
case 0:
isleftleaf := false
if h != bst.head && parent.left == h { // 当前节点不是root节点
isleftleaf = true
}
if h.left == nil && h.right == nil {
if h == bst.head {
bst.head = nil
bst.size--
return true
}
h.delete(true)
}
if h.left != nil && h.right == nil {
if h == bst.head {
bst.head = h.left
bst.size--
return true
}
if isleftleaf {
parent.left = h.left
} else {
parent.right = h.left
}
h.delete(false)
}
if h.right != nil && h.left == nil {
if h == bst.head {
bst.head = h.right
bst.size--
return true
}
if isleftleaf {
parent.left = h.right
} else {
parent.right = h.right
}
h.delete(false)
}
if h.left != nil && h.right != nil {
leftmost := findLeftmost(h.right)
h.value, leftmost.value = leftmost.value, h.value
leftmost.delete(true)
}
bst.size--
return true
}
}
return false
}
func (bst *BSTree) Size() int {
return bst.size
}
func findLeftmost(n *Node) *Node {
if n == nil {
return nil
}
ret := n
for ret.left != nil {
ret = ret.left
}
return ret
}
func PreOrder(root *Node, ret *[]int) {
if root == nil {
return
}
*ret = append(*ret, root.value)
PreOrder(root.left, ret)
PreOrder(root.right, ret)
}
func InOrder(root *Node, ret *[]int) {
if root == nil {
return
}
InOrder(root.left, ret)
*ret = append(*ret, root.value)
InOrder(root.right, ret)
}
func PostOrder(root *Node, ret *[]int) {
if root == nil {
return
}
PostOrder(root.left, ret)
PostOrder(root.right, ret)
*ret = append(*ret, root.value)
} | go/binarytree/bst.go | 0.578091 | 0.400222 | bst.go | starcoder |
package iso20022
// Provides further details specific to the individual transaction(s) included in the message.
type CreditTransferTransaction9 struct {
// Unique identification, as assigned by a sending party, to unambiguously identify the credit instruction within the message.
CreditIdentification *Max35Text `xml:"CdtId"`
// Identifies whether a single entry per individual direct debit transaction or a batch entry for the sum of the amounts of all transactions within the group of a message is requested.
// Usage: Batch booking is used to request and not order a possible batch booking.
BatchBooking *BatchBookingIndicator `xml:"BtchBookg,omitempty"`
// Further specifies the type of transaction.
PaymentTypeInformation *PaymentTypeInformation21 `xml:"PmtTpInf,omitempty"`
// Amount of money moved between the instructing agent and the instructed agent.
TotalInterbankSettlementAmount *ActiveCurrencyAndAmount `xml:"TtlIntrBkSttlmAmt,omitempty"`
// Date on which the amount of money ceases to be available to the agent that owes it and when the amount of money becomes available to the agent to which it is due.
InterbankSettlementDate *ISODate `xml:"IntrBkSttlmDt,omitempty"`
// Agent that instructs the next party in the chain to carry out the (set of) instruction(s).
InstructingAgent *BranchAndFinancialInstitutionIdentification5 `xml:"InstgAgt,omitempty"`
// Agent that is instructed by the previous party in the chain to carry out the (set of) instruction(s).
InstructedAgent *BranchAndFinancialInstitutionIdentification5 `xml:"InstdAgt,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
//
// Usage: If more than one intermediary agent is present, then IntermediaryAgent1 identifies the agent between the DebtorAgent and the IntermediaryAgent2.
IntermediaryAgent1 *BranchAndFinancialInstitutionIdentification5 `xml:"IntrmyAgt1,omitempty"`
// Unambiguous identification of the account of the intermediary agent 1 at its servicing agent in the payment chain.
IntermediaryAgent1Account *CashAccount24 `xml:"IntrmyAgt1Acct,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
//
// Usage: If more than two intermediary agents are present, then IntermediaryAgent2 identifies the agent between the IntermediaryAgent1 and the IntermediaryAgent3.
IntermediaryAgent2 *BranchAndFinancialInstitutionIdentification5 `xml:"IntrmyAgt2,omitempty"`
// Unambiguous identification of the account of the intermediary agent 2 at its servicing agent in the payment chain.
IntermediaryAgent2Account *CashAccount24 `xml:"IntrmyAgt2Acct,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
//
// Usage: If IntermediaryAgent3 is present, then it identifies the agent between the IntermediaryAgent 2 and the CreditorAgent.
IntermediaryAgent3 *BranchAndFinancialInstitutionIdentification5 `xml:"IntrmyAgt3,omitempty"`
// Unambiguous identification of the account of the intermediary agent 3 at its servicing agent in the payment chain.
IntermediaryAgent3Account *CashAccount24 `xml:"IntrmyAgt3Acct,omitempty"`
// Financial institution servicing an account for the creditor.
CreditorAgent *BranchAndFinancialInstitutionIdentification5 `xml:"CdtrAgt,omitempty"`
// Unambiguous identification of the account of the creditor agent at its servicing agent to which a credit entry will be made as a result of the payment transaction.
CreditorAgentAccount *CashAccount24 `xml:"CdtrAgtAcct,omitempty"`
// Financial institution that receives an amount of money from the financial institutional debtor.
Creditor *BranchAndFinancialInstitutionIdentification5 `xml:"Cdtr"`
// Unambiguous identification of the account of the creditor to which a credit entry will be posted as a result of the payment transaction.
CreditorAccount *CashAccount24 `xml:"CdtrAcct,omitempty"`
// Ultimate financial institution to which an amount of money is due.
UltimateCreditor *BranchAndFinancialInstitutionIdentification5 `xml:"UltmtCdtr,omitempty"`
// Further information related to the processing of the payment instruction, provided by the initiating party, and intended for the creditor agent.
InstructionForCreditorAgent []*InstructionForCreditorAgent2 `xml:"InstrForCdtrAgt,omitempty"`
// Provides information on the individual debit transaction(s) included in the message.
DirectDebitTransactionInformation []*DirectDebitTransactionInformation15 `xml:"DrctDbtTxInf"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (c *CreditTransferTransaction9) SetCreditIdentification(value string) {
c.CreditIdentification = (*Max35Text)(&value)
}
func (c *CreditTransferTransaction9) SetBatchBooking(value string) {
c.BatchBooking = (*BatchBookingIndicator)(&value)
}
func (c *CreditTransferTransaction9) AddPaymentTypeInformation() *PaymentTypeInformation21 {
c.PaymentTypeInformation = new(PaymentTypeInformation21)
return c.PaymentTypeInformation
}
func (c *CreditTransferTransaction9) SetTotalInterbankSettlementAmount(value, currency string) {
c.TotalInterbankSettlementAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (c *CreditTransferTransaction9) SetInterbankSettlementDate(value string) {
c.InterbankSettlementDate = (*ISODate)(&value)
}
func (c *CreditTransferTransaction9) AddInstructingAgent() *BranchAndFinancialInstitutionIdentification5 {
c.InstructingAgent = new(BranchAndFinancialInstitutionIdentification5)
return c.InstructingAgent
}
func (c *CreditTransferTransaction9) AddInstructedAgent() *BranchAndFinancialInstitutionIdentification5 {
c.InstructedAgent = new(BranchAndFinancialInstitutionIdentification5)
return c.InstructedAgent
}
func (c *CreditTransferTransaction9) AddIntermediaryAgent1() *BranchAndFinancialInstitutionIdentification5 {
c.IntermediaryAgent1 = new(BranchAndFinancialInstitutionIdentification5)
return c.IntermediaryAgent1
}
func (c *CreditTransferTransaction9) AddIntermediaryAgent1Account() *CashAccount24 {
c.IntermediaryAgent1Account = new(CashAccount24)
return c.IntermediaryAgent1Account
}
func (c *CreditTransferTransaction9) AddIntermediaryAgent2() *BranchAndFinancialInstitutionIdentification5 {
c.IntermediaryAgent2 = new(BranchAndFinancialInstitutionIdentification5)
return c.IntermediaryAgent2
}
func (c *CreditTransferTransaction9) AddIntermediaryAgent2Account() *CashAccount24 {
c.IntermediaryAgent2Account = new(CashAccount24)
return c.IntermediaryAgent2Account
}
func (c *CreditTransferTransaction9) AddIntermediaryAgent3() *BranchAndFinancialInstitutionIdentification5 {
c.IntermediaryAgent3 = new(BranchAndFinancialInstitutionIdentification5)
return c.IntermediaryAgent3
}
func (c *CreditTransferTransaction9) AddIntermediaryAgent3Account() *CashAccount24 {
c.IntermediaryAgent3Account = new(CashAccount24)
return c.IntermediaryAgent3Account
}
func (c *CreditTransferTransaction9) AddCreditorAgent() *BranchAndFinancialInstitutionIdentification5 {
c.CreditorAgent = new(BranchAndFinancialInstitutionIdentification5)
return c.CreditorAgent
}
func (c *CreditTransferTransaction9) AddCreditorAgentAccount() *CashAccount24 {
c.CreditorAgentAccount = new(CashAccount24)
return c.CreditorAgentAccount
}
func (c *CreditTransferTransaction9) AddCreditor() *BranchAndFinancialInstitutionIdentification5 {
c.Creditor = new(BranchAndFinancialInstitutionIdentification5)
return c.Creditor
}
func (c *CreditTransferTransaction9) AddCreditorAccount() *CashAccount24 {
c.CreditorAccount = new(CashAccount24)
return c.CreditorAccount
}
func (c *CreditTransferTransaction9) AddUltimateCreditor() *BranchAndFinancialInstitutionIdentification5 {
c.UltimateCreditor = new(BranchAndFinancialInstitutionIdentification5)
return c.UltimateCreditor
}
func (c *CreditTransferTransaction9) AddInstructionForCreditorAgent() *InstructionForCreditorAgent2 {
newValue := new (InstructionForCreditorAgent2)
c.InstructionForCreditorAgent = append(c.InstructionForCreditorAgent, newValue)
return newValue
}
func (c *CreditTransferTransaction9) AddDirectDebitTransactionInformation() *DirectDebitTransactionInformation15 {
newValue := new (DirectDebitTransactionInformation15)
c.DirectDebitTransactionInformation = append(c.DirectDebitTransactionInformation, newValue)
return newValue
}
func (c *CreditTransferTransaction9) AddSupplementaryData() *SupplementaryData1 {
newValue := new (SupplementaryData1)
c.SupplementaryData = append(c.SupplementaryData, newValue)
return newValue
} | CreditTransferTransaction9.go | 0.814607 | 0.473475 | CreditTransferTransaction9.go | starcoder |
package internal
import (
"math"
"math/big"
"reflect"
"github.com/tada/catch"
"github.com/tada/dgo/dgo"
)
type (
// bf type anonymizes the big.Float to avoid collisions between a Float attribute and the Float() function while
// the bigFloatVal still inherits all functions from big.Float since the actual field is unnamed.
_bf = *big.Float
bigFloatVal struct {
_bf
}
defaultBigFloatType struct {
defaultFloatType
}
bigFloatType struct {
floatType
}
)
// DefaultBigFloatType is the unconstrained Int64 type
var DefaultBigFloatType = &defaultBigFloatType{}
var reflectBigFloatType = reflect.TypeOf(&big.Float{})
func (t *defaultBigFloatType) New(arg dgo.Value) dgo.Value {
return newBigFloat(t, arg)
}
func (t *defaultBigFloatType) ReflectType() reflect.Type {
return reflectBigFloatType
}
func (t *defaultBigFloatType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiBigFloat
}
func (t *bigFloatType) New(arg dgo.Value) dgo.Value {
return newBigFloat(t, arg)
}
func (t *bigFloatType) ReflectType() reflect.Type {
return reflectBigFloatType
}
func (t *bigFloatType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiBigFloatRange
}
// BigFloat returns the dgo.BigFloat for the given *big.Float
func BigFloat(v *big.Float) dgo.BigFloat {
return &bigFloatVal{v}
}
func (v *bigFloatVal) Assignable(other dgo.Type) bool {
return v.Equals(other) || CheckAssignableTo(nil, other, v)
}
func (v *bigFloatVal) CompareTo(other interface{}) (int, bool) {
r := 0
ok := true
compare64 := func(ov float64) {
r = v._bf.Cmp(big.NewFloat(ov))
}
switch ov := other.(type) {
case nil, nilValue:
r = 1
case *bigFloatVal:
r = v.Cmp(ov._bf)
case floatVal:
compare64(float64(ov))
case *big.Float:
r = v.Cmp(ov)
case float64:
compare64(ov)
case float32:
compare64(float64(ov))
case *big.Int:
r = v.Cmp(new(big.Float).SetInt(ov))
case uint:
r = v.Cmp(new(big.Float).SetUint64(uint64(ov)))
case uint64:
r = v.Cmp(new(big.Float).SetUint64(ov))
case dgo.Number:
r, ok = v.CompareTo(ov.Float())
default:
var i int64
if i, ok = ToInt(ov); ok {
compare64(float64(i))
}
}
return r, ok
}
func (v *bigFloatVal) Equals(other interface{}) bool {
yes := false
switch ov := other.(type) {
case *bigFloatVal:
yes = v.Cmp(ov._bf) == 0
case *big.Float:
yes = v.Cmp(ov) == 0
case floatVal:
yes = v.Cmp(big.NewFloat(float64(ov))) == 0
case float64:
yes = v.Cmp(big.NewFloat(ov)) == 0
case float32:
yes = v.Cmp(big.NewFloat(float64(ov))) == 0
}
return yes
}
func (v *bigFloatVal) Float() dgo.Float {
return v
}
func (v *bigFloatVal) Generic() dgo.Type {
return DefaultBigFloatType
}
func (v *bigFloatVal) GoBigFloat() *big.Float {
return v._bf
}
func (v *bigFloatVal) GoFloat() float64 {
if f, ok := v.ToFloat(); ok {
return f
}
panic(catch.Error(`BigFloat.ToFloat(): value %f cannot fit into a float64`, v))
}
func (v *bigFloatVal) HashCode() dgo.Hash {
return bigFloatHash(v._bf)
}
func (v *bigFloatVal) Inclusive() bool {
return true
}
func (v *bigFloatVal) Instance(value interface{}) bool {
return v.Equals(value)
}
func (v *bigFloatVal) Integer() dgo.Integer {
bi, _ := v.Int(nil)
return &bigIntVal{bi}
}
func (v *bigFloatVal) Max() dgo.Float {
return v
}
func (v *bigFloatVal) Min() dgo.Float {
return v
}
func (v *bigFloatVal) New(arg dgo.Value) dgo.Value {
return newBigFloat(v, arg)
}
func (v *bigFloatVal) ReflectTo(value reflect.Value) {
rv := reflect.ValueOf(v._bf)
k := value.Kind()
if !(k == reflect.Ptr || k == reflect.Interface) {
rv = rv.Elem()
}
value.Set(rv)
}
func (v *bigFloatVal) ReflectType() reflect.Type {
return reflectBigFloatType
}
func (v *bigFloatVal) String() string {
return TypeString(v)
}
func (v *bigFloatVal) ToBigFloat() *big.Float {
return v._bf
}
func (v *bigFloatVal) ToBigInt() *big.Int {
bi, _ := v.Int(nil)
return bi
}
func (v *bigFloatVal) ToFloat() (float64, bool) {
return demoteToFloat64(v._bf)
}
func (v *bigFloatVal) ToInt() (int64, bool) {
return demoteToInt64(v._bf)
}
func (v *bigFloatVal) ToUint() (uint64, bool) {
return demoteToUint64(v._bf)
}
func (v *bigFloatVal) Type() dgo.Type {
return v
}
func (v *bigFloatVal) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiBigFloatExact
}
func bigFloatHash(v *big.Float) dgo.Hash {
ge, _ := v.GobEncode()
return bytesHash(ge)
}
func bigFloatFromConvertible(from dgo.Value, prec uint) dgo.Float {
switch from := from.(type) {
case dgo.Number:
f := from.Float()
if _, ok := f.(dgo.BigFloat); ok {
return f
}
return &bigFloatVal{f.ToBigFloat()}
case dgo.Boolean:
if from.GoBool() {
return &bigFloatVal{big.NewFloat(1)}
}
return &bigFloatVal{big.NewFloat(0)}
case dgo.String:
if f, _, err := big.ParseFloat(from.GoString(), 0, prec, big.ToNearestEven); err == nil {
return &bigFloatVal{f}
}
}
panic(catch.Error(`the value '%s' cannot be converted to a big float`, from))
}
var precType = Integer64Type(0, math.MaxUint32, true)
func newBigFloat(t dgo.Type, arg dgo.Value) (f dgo.Float) {
prec := uint(0)
if args, ok := arg.(dgo.Arguments); ok {
args.AssertSize(`big`, 1, 2)
arg = args.Get(0)
if args.Len() > 1 {
prec = uint(args.Arg(`int`, 1, precType).(dgo.Integer).GoInt())
}
}
f = bigFloatFromConvertible(arg, prec)
if !t.Instance(f) {
panic(IllegalAssignment(t, f))
}
return f
} | internal/bigfloat.go | 0.721645 | 0.580293 | bigfloat.go | starcoder |
package iso20022
// Set of elements used to provide the total sum of entries per bank transaction code.
type TotalsPerBankTransactionCode4 struct {
// Number of individual entries for the bank transaction code.
NumberOfEntries *Max15NumericText `xml:"NbOfNtries,omitempty"`
// Total of all individual entries included in the report.
Sum *DecimalNumber `xml:"Sum,omitempty"`
// Total debit or credit amount that is the result of the netted amounts for all debit and credit entries per bank transaction code.
TotalNetEntry *AmountAndDirection35 `xml:"TtlNetNtry,omitempty"`
// Indicates whether the bank transaction code is related to booked or forecast items.
ForecastIndicator *TrueFalseIndicator `xml:"FcstInd,omitempty"`
// Set of elements used to fully identify the type of underlying transaction resulting in an entry.
BankTransactionCode *BankTransactionCodeStructure4 `xml:"BkTxCd"`
// Set of elements used to indicate when the booked amount of money will become available, that is can be accessed and starts generating interest.
Availability []*CashAvailability1 `xml:"Avlbty,omitempty"`
}
func (t *TotalsPerBankTransactionCode4) SetNumberOfEntries(value string) {
t.NumberOfEntries = (*Max15NumericText)(&value)
}
func (t *TotalsPerBankTransactionCode4) SetSum(value string) {
t.Sum = (*DecimalNumber)(&value)
}
func (t *TotalsPerBankTransactionCode4) AddTotalNetEntry() *AmountAndDirection35 {
t.TotalNetEntry = new(AmountAndDirection35)
return t.TotalNetEntry
}
func (t *TotalsPerBankTransactionCode4) SetForecastIndicator(value string) {
t.ForecastIndicator = (*TrueFalseIndicator)(&value)
}
func (t *TotalsPerBankTransactionCode4) AddBankTransactionCode() *BankTransactionCodeStructure4 {
t.BankTransactionCode = new(BankTransactionCodeStructure4)
return t.BankTransactionCode
}
func (t *TotalsPerBankTransactionCode4) AddAvailability() *CashAvailability1 {
newValue := new (CashAvailability1)
t.Availability = append(t.Availability, newValue)
return newValue
} | TotalsPerBankTransactionCode4.go | 0.777469 | 0.468365 | TotalsPerBankTransactionCode4.go | starcoder |
package digraph
// DepthFirstWalk performs a depth-first traversal of the nodes
// that can be reached from the initial input set. The callback is
// invoked for each visited node, and may return false to prevent
// vising any children of the current node
func DepthFirstWalk(node Node, cb func(n Node) bool) {
frontier := []Node{node}
seen := make(map[Node]struct{})
for len(frontier) > 0 {
// Pop the current node
n := len(frontier)
current := frontier[n-1]
frontier = frontier[:n-1]
// Check for potential cycle
if _, ok := seen[current]; ok {
continue
}
seen[current] = struct{}{}
// Visit with the callback
if !cb(current) {
continue
}
// Add any new edges to visit, in reverse order
edges := current.Edges()
for i := len(edges) - 1; i >= 0; i-- {
frontier = append(frontier, edges[i].Tail())
}
}
}
// FilterDegree returns only the nodes with the desired
// degree. This can be used with OutDegree or InDegree
func FilterDegree(degree int, degrees map[Node]int) []Node {
var matching []Node
for n, d := range degrees {
if d == degree {
matching = append(matching, n)
}
}
return matching
}
// InDegree is used to compute the in-degree of nodes
func InDegree(nodes []Node) map[Node]int {
degree := make(map[Node]int, len(nodes))
for _, n := range nodes {
if _, ok := degree[n]; !ok {
degree[n] = 0
}
for _, e := range n.Edges() {
degree[e.Tail()]++
}
}
return degree
}
// OutDegree is used to compute the in-degree of nodes
func OutDegree(nodes []Node) map[Node]int {
degree := make(map[Node]int, len(nodes))
for _, n := range nodes {
degree[n] = len(n.Edges())
}
return degree
}
// Sinks is used to get the nodes with out-degree of 0
func Sinks(nodes []Node) []Node {
return FilterDegree(0, OutDegree(nodes))
}
// Sources is used to get the nodes with in-degree of 0
func Sources(nodes []Node) []Node {
return FilterDegree(0, InDegree(nodes))
}
// Unreachable starts at a given start node, performs
// a DFS from there, and returns the set of unreachable nodes.
func Unreachable(start Node, nodes []Node) []Node {
// DFS from the start ndoe
frontier := []Node{start}
seen := make(map[Node]struct{})
for len(frontier) > 0 {
// Pop the current node
n := len(frontier)
current := frontier[n-1]
frontier = frontier[:n-1]
// Check for potential cycle
if _, ok := seen[current]; ok {
continue
}
seen[current] = struct{}{}
// Add any new edges to visit, in reverse order
edges := current.Edges()
for i := len(edges) - 1; i >= 0; i-- {
frontier = append(frontier, edges[i].Tail())
}
}
// Check for any unseen nodes
var unseen []Node
for _, node := range nodes {
if _, ok := seen[node]; !ok {
unseen = append(unseen, node)
}
}
return unseen
} | vendor/github.com/hashicorp/terraform/digraph/util.go | 0.828384 | 0.560012 | util.go | starcoder |
package testsuites
import (
"errors"
"fmt"
"mnimidamonbackend/domain/repository"
"testing"
)
// Logging the unimplemented test suites.
var unimplemented = "Unimplemented Tests"
// For testing common repository procedures. For more consistency and easier code consistency.
type CommonRepositoryTestSuiteInterface interface {
Setup(t *testing.T) // Setting up for the testing, repository initializations, dependent models insertion.
FindBeforeSaveTests(t *testing.T) // Find functionalities testing before save.
SaveSuccessfulTests(t *testing.T) // Successful saving tests.
FindAfterSaveTests(t *testing.T) // Finding after successful save.
ConstraintsTest(t *testing.T) // Repository model specific constraint testing.
UpdateTests(t *testing.T) // Updating tests.
SpecificTests(t *testing.T) // Repository specific tests.
DeleteTests(t *testing.T) // Deletion testing.
TransactionSuiteTestInterface
}
// For testing the common Transaction implementation.
type TransactionSuiteTestInterface interface {
BeginTx() TransactionSuiteTestTxInterface // Begin a transaction.
Find() error // Find something outside of the transaction.
}
// For testing the common Transaction implementation when transaction has already begun.
type TransactionSuiteTestTxInterface interface {
Create() error // Create something inside a transaction.
Find() error // Find something inside the transaction.
CorrectCheck(t *testing.T) // Check the correctness of the found thing.
repository.Transaction // Rollback and Commit functionalities.
}
// Run common repository testing suite.
func runCommonRepositoryTests(crtsi CommonRepositoryTestSuiteInterface, t *testing.T) {
t.Run("TestingSuiteSetup", func(t *testing.T) {
crtsi.Setup(t)
})
t.Run("FindBeforeSaveTests", func(t *testing.T) {
crtsi.FindBeforeSaveTests(t)
})
t.Run("SaveSuccessfulTests", func(t *testing.T) {
crtsi.SaveSuccessfulTests(t)
})
t.Run("FindAfterSaveTests", func(t *testing.T) {
crtsi.FindAfterSaveTests(t)
})
t.Run("ConstraintsTest", func(t *testing.T) {
crtsi.ConstraintsTest(t)
})
t.Run("UpdateTests", func(t *testing.T) {
crtsi.UpdateTests(t)
})
t.Run("SpecificTests", func(t *testing.T) {
crtsi.SpecificTests(t)
})
t.Run("DeleteTests", func(t *testing.T) {
crtsi.DeleteTests(t)
})
t.Run("TransactionTests", func(t *testing.T) {
runTransactionTestSuite(crtsi, t)
})
}
func runTransactionTestSuite(ti TransactionSuiteTestInterface, t *testing.T) {
runTransactionRollbackSuccessSuite(ti, t)
runTransactionCommitSuccessSuite(ti, t)
}
func runTransactionRollbackSuccessSuite(ti TransactionSuiteTestInterface, t *testing.T) {
t.Run("TransactionRollbackSuccess", func(t *testing.T) {
tix := ti.BeginTx()
if err := tix.Create(); err != nil {
t.Errorf("Expected no error, got %v", err)
}
tix.CorrectCheck(t)
if err := tix.Rollback(); err != nil {
t.Errorf("Expected no error on rollback, got %v", err)
}
if err := tix.Find(); !errors.Is(repository.ErrTxAlreadyRolledBack, err) {
t.Errorf("Expected %v, recieved %v", repository.ErrTxAlreadyRolledBack, err)
}
if err := ti.Find(); !errors.Is(repository.ErrNotFound, err) {
t.Errorf("Expected %v, got %v", repository.ErrNotFound, err)
}
})
}
func runTransactionCommitSuccessSuite(ti TransactionSuiteTestInterface, t *testing.T) {
t.Run("TransactionCommitSuccess", func(t *testing.T) {
tix := ti.BeginTx()
if err := tix.Create(); err != nil {
t.Errorf("Expected no error, got %v", err)
}
tix.CorrectCheck(t)
if err := tix.Commit(); err != nil {
t.Errorf("Expected no error on rollback, got %v", err)
}
if err := tix.Find(); !errors.Is(repository.ErrTxAlreadyRolledBack, err) {
t.Errorf("Expected %v, recieved %v", repository.ErrTxAlreadyRolledBack, err)
}
if err := ti.Find(); err != nil {
t.Errorf("Expected no error, got %v", err)
}
})
}
// Helper functions for errors.
func expectedGot(exp interface{}, got interface{}) string {
return fmt.Sprintf("Expected %v, got %v", exp, got)
}
func unexpectedErr(got interface{}) string {
return fmt.Sprintf("Expected no error, got %v", got)
} | testsuites/common_repository_suite.go | 0.535827 | 0.421909 | common_repository_suite.go | starcoder |
package transaction
import (
"github.com/neophora/neo2go/pkg/interop/attribute"
"github.com/neophora/neo2go/pkg/interop/input"
"github.com/neophora/neo2go/pkg/interop/output"
"github.com/neophora/neo2go/pkg/interop/witness"
)
// Transaction represents a NEO transaction, it's an opaque data structure
// that can be used with functions from this package. It's similar to
// Transaction class in Neo .net framework.
type Transaction struct{}
// GetHash returns the hash (256 bit BE value in a 32 byte slice) of the given
// transaction (which also is its ID). Is uses `Neo.Transaction.GetHash` syscall.
func GetHash(t Transaction) []byte {
return nil
}
// GetType returns the type of the given transaction. Possible values:
// MinerTransaction = 0x00
// IssueTransaction = 0x01
// ClaimTransaction = 0x02
// EnrollmentTransaction = 0x20
// RegisterTransaction = 0x40
// ContractTransaction = 0x80
// StateType = 0x90
// AgencyTransaction = 0xb0
// PublishTransaction = 0xd0
// InvocationTransaction = 0xd1
// It uses `Neo.Transaction.GetType` syscall.
func GetType(t Transaction) byte {
return 0x00
}
// GetAttributes returns a slice of attributes for agiven transaction. Refer to
// attribute package on how to use them. This function uses
// `Neo.Transaction.GetAttributes` syscall.
func GetAttributes(t Transaction) []attribute.Attribute {
return []attribute.Attribute{}
}
// GetReferences returns a slice of references for a given Transaction. Elements
// of this slice can be casted to any of input.Input or output.Output, depending
// on which information you're interested in (as reference technically contains
// both input and corresponding output), refer to input and output package on
// how to use them. This function uses `Neo.Transaction.GetReferences` syscall.
func GetReferences(t Transaction) []interface{} {
return []interface{}{}
}
// GetUnspentCoins returns a slice of not yet spent ouputs of a given transaction.
// This function uses `Neo.Transaction.GetUnspentCoint` syscall.
func GetUnspentCoins(t Transaction) []output.Output {
return []output.Output{}
}
// GetInputs returns a slice of inputs of a given Transaction. Refer to input
// package on how to use them. This function uses `Neo.Transaction.GetInputs`
// syscall.
func GetInputs(t Transaction) []input.Input {
return []input.Input{}
}
// GetOutputs returns a slice of outputs of a given Transaction. Refer to output
// package on how to use them. This function uses `Neo.Transaction.GetOutputs`
// syscall.
func GetOutputs(t Transaction) []output.Output {
return []output.Output{}
}
// GetScript returns the script stored in a given Invocation transaction.
// Calling it for any other Transaction type would lead to failure. It uses
// `Neo.InvocationTransaction.GetScript` syscall.
func GetScript(t Transaction) []byte {
return nil
}
// GetWitnesses returns a slice of witnesses of a given Transaction. Refer to
// witness package on how to use them. This function uses
// `Neo.Transaction.GetWitnesses` syscall.
func GetWitnesses(t Transaction) []witness.Witness {
return []witness.Witness{}
} | pkg/interop/transaction/transaction.go | 0.800731 | 0.425009 | transaction.go | starcoder |
package iso20022
// Set of elements used to provide information on the corrective interbank transaction, to which the resolution message refers.
type CorrectiveInterbankTransaction1 struct {
// Set of elements used to provide corrective information for the group header of the message under investigation.
GroupHeader *CorrectiveGroupInformation1 `xml:"GrpHdr,omitempty"`
// Unique identification, as assigned by an instructing party for an instructed party, to unambiguously identify the instruction.
//
// Usage: The instruction identification is a point to point reference that can be used between the instructing party and the instructed party to refer to the individual instruction. It can be included in several messages related to the instruction.
InstructionIdentification *Max35Text `xml:"InstrId,omitempty"`
// Unique identification, as assigned by the initiating party, to unambiguously identify the transaction. This identification is passed on, unchanged, throughout the entire end-to-end chain.
//
// Usage: The end-to-end identification can be used for reconciliation or to link tasks relating to the transaction. It can be included in several messages related to the transaction.
//
// Usage: In case there are technical limitations to pass on multiple references, the end-to-end identification must be passed on throughout the entire end-to-end chain.
EndToEndIdentification *Max35Text `xml:"EndToEndId,omitempty"`
// Unique identification, as assigned by the first instructing agent, to unambiguously identify the transaction that is passed on, unchanged, throughout the entire interbank chain.
// Usage: The transaction identification can be used for reconciliation, tracking or to link tasks relating to the transaction on the interbank level.
// Usage: The instructing agent has to make sure that the transaction identification is unique for a pre-agreed period.
TransactionIdentification *Max35Text `xml:"TxId,omitempty"`
// Amount of money moved between the instructing agent and the instructed agent.
InterbankSettlementAmount *ActiveOrHistoricCurrencyAndAmount `xml:"IntrBkSttlmAmt"`
// Date on which the amount of money ceases to be available to the agent that owes it and when the amount of money becomes available to the agent to which it is due.
InterbankSettlementDate *ISODate `xml:"IntrBkSttlmDt"`
}
func (c *CorrectiveInterbankTransaction1) AddGroupHeader() *CorrectiveGroupInformation1 {
c.GroupHeader = new(CorrectiveGroupInformation1)
return c.GroupHeader
}
func (c *CorrectiveInterbankTransaction1) SetInstructionIdentification(value string) {
c.InstructionIdentification = (*Max35Text)(&value)
}
func (c *CorrectiveInterbankTransaction1) SetEndToEndIdentification(value string) {
c.EndToEndIdentification = (*Max35Text)(&value)
}
func (c *CorrectiveInterbankTransaction1) SetTransactionIdentification(value string) {
c.TransactionIdentification = (*Max35Text)(&value)
}
func (c *CorrectiveInterbankTransaction1) SetInterbankSettlementAmount(value, currency string) {
c.InterbankSettlementAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (c *CorrectiveInterbankTransaction1) SetInterbankSettlementDate(value string) {
c.InterbankSettlementDate = (*ISODate)(&value)
} | CorrectiveInterbankTransaction1.go | 0.830732 | 0.608914 | CorrectiveInterbankTransaction1.go | starcoder |
package test_persistence
import (
dataV1 "github.com/expproletariy/pip-timers-service/data/version1"
persist "github.com/expproletariy/pip-timers-service/persistence"
cdata "github.com/pip-services3-go/pip-services3-commons-go/data"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
type TimersPersistenceFixture struct {
TIMERS1 *dataV1.TimeSession
TIMERS2 *dataV1.TimeSession
TIMERS3 *dataV1.TimeSession
persistence persist.ITimeSessionPersistence
}
func NewTimersPersistenceFixture(persistence persist.ITimeSessionPersistence) *TimersPersistenceFixture {
t := TimersPersistenceFixture{}
t.TIMERS1 = &dataV1.TimeSession{
Id: "timer1",
Name: "timer1",
User: "user1",
Tags: nil,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Status: dataV1.SessionStatusCreated,
Timers: nil,
}
t.TIMERS2 = &dataV1.TimeSession{
Id: "timer2",
Name: "timer2",
User: "user1",
Tags: nil,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Status: dataV1.SessionStatusCreated,
Timers: nil,
}
t.TIMERS3 = &dataV1.TimeSession{
Id: "timer3",
Name: "timer3",
User: "user1",
Tags: nil,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Status: dataV1.SessionStatusCreated,
Timers: nil,
}
t.persistence = persistence
return &t
}
func (c *TimersPersistenceFixture) testCreateTimers(t *testing.T) {
timerRes, err := c.persistence.Create("", c.TIMERS1)
assert.Nil(t, err)
assert.Equal(t, timerRes.Id, c.TIMERS1.Id)
assert.Equal(t, timerRes.Name, c.TIMERS1.Name)
assert.Equal(t, timerRes.User, c.TIMERS1.User)
assert.Equal(t, timerRes.Status, c.TIMERS1.Status)
timerRes, err = c.persistence.Create("", c.TIMERS2)
assert.Nil(t, err)
assert.Equal(t, timerRes.Id, c.TIMERS2.Id)
assert.Equal(t, timerRes.Name, c.TIMERS2.Name)
assert.Equal(t, timerRes.User, c.TIMERS2.User)
assert.Equal(t, timerRes.Status, c.TIMERS2.Status)
timerRes, err = c.persistence.Create("", c.TIMERS3)
assert.Nil(t, err)
assert.Equal(t, timerRes.Id, c.TIMERS3.Id)
assert.Equal(t, timerRes.Name, c.TIMERS3.Name)
assert.Equal(t, timerRes.User, c.TIMERS3.User)
assert.Equal(t, timerRes.Status, c.TIMERS3.Status)
}
func (c *TimersPersistenceFixture) TestCrudOperations(t *testing.T) {
c.testCreateTimers(t)
page, err := c.persistence.GetPageByFilter("", cdata.NewEmptyFilterParams(), cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.NotNil(t, page)
assert.Len(t, page.Data, 3)
timerSession, err := c.persistence.GetOneById("", c.TIMERS1.Id)
assert.Nil(t, err)
assert.NotNil(t, timerSession)
assert.Equal(t, timerSession.Id, c.TIMERS1.Id)
assert.Equal(t, timerSession.Name, c.TIMERS1.Name)
assert.Equal(t, timerSession.User, c.TIMERS1.User)
timerSession.Status = dataV1.SessionStatusInUse
timerSessionUpdated, err := c.persistence.Update("", timerSession)
assert.Nil(t, err)
assert.NotNil(t, timerSessionUpdated)
assert.Equal(t, timerSessionUpdated.Status, timerSession.Status)
timerSessionDeleted, err := c.persistence.DeleteById("", timerSession.Id)
assert.Nil(t, err)
assert.NotNil(t, timerSessionDeleted)
assert.Equal(t, timerSessionDeleted.Id, timerSession.Id)
timerSessionDeleted, err = c.persistence.GetOneById("", timerSession.Id)
assert.Nil(t, err)
assert.Nil(t, timerSessionDeleted)
}
func (c *TimersPersistenceFixture) TestGetWithFilters(t *testing.T) {
c.testCreateTimers(t)
const status = dataV1.SessionStatusCreated
page, err := c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"status", status,
),
cdata.NewEmptyPagingParams(),
)
assert.Nil(t, err)
assert.NotNil(t, page)
assert.Len(t, page.Data, 3)
assert.Equal(t, page.Data[0].Status, status)
const user = "user1"
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"user", user,
),
cdata.NewEmptyPagingParams(),
)
assert.Nil(t, err)
assert.NotNil(t, page)
assert.Len(t, page.Data, 3)
assert.Equal(t, page.Data[0].User, user)
const id = "timer1"
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"id", id,
),
cdata.NewEmptyPagingParams(),
)
assert.Nil(t, err)
assert.NotNil(t, page)
assert.Len(t, page.Data, 1)
assert.Equal(t, page.Data[0].Id, id)
} | test/persistence/TimersMemoryPersistenceFuxture.go | 0.552057 | 0.547646 | TimersMemoryPersistenceFuxture.go | starcoder |
package flagsfiller
import (
"flag"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"time"
)
var (
durationType = reflect.TypeOf(time.Duration(0))
stringSliceType = reflect.TypeOf([]string{})
stringToStringMapType = reflect.TypeOf(map[string]string{})
)
// FlagSetFiller is used to map the fields of a struct into flags of a flag.FlagSet
type FlagSetFiller struct {
options *fillerOptions
}
// Parse is a convenience function that creates a FlagSetFiller with the given options,
// fills and maps the flags from the given struct reference into flag.CommandLine, and uses
// flag.Parse to parse the os.Args.
// Returns an error if the given struct could not be used for filling flags.
func Parse(from interface{}, options ...FillerOption) error {
filler := New(options...)
err := filler.Fill(flag.CommandLine, from)
if err != nil {
return err
}
flag.Parse()
return nil
}
// New creates a new FlagSetFiller with zero or more of the given FillerOption's
func New(options ...FillerOption) *FlagSetFiller {
return &FlagSetFiller{options: newFillerOptions(options...)}
}
// Fill populates the flagSet with a flag for each field in given struct passed in the 'from'
// argument which must be a struct reference.
// Fill returns an error when a non-struct reference is passed as 'from' or a field has a
// default tag which could not converted to the field's type.
func (f *FlagSetFiller) Fill(flagSet *flag.FlagSet, from interface{}) error {
v := reflect.ValueOf(from)
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
return f.walkFields(flagSet, "", v.Elem(), t.Elem())
} else {
return fmt.Errorf("can only fill from struct pointer, but it was %s", t.Kind())
}
}
func (f *FlagSetFiller) walkFields(flagSet *flag.FlagSet, prefix string,
structVal reflect.Value, structType reflect.Type) error {
if prefix != "" {
prefix += "-"
}
for i := 0; i < structVal.NumField(); i++ {
field := structType.Field(i)
fieldValue := structVal.Field(i)
switch field.Type.Kind() {
case reflect.Struct:
err := f.walkFields(flagSet, prefix+field.Name, fieldValue, field.Type)
if err != nil {
return fmt.Errorf("failed to process %s of %s: %w", field.Name, structType.String(), err)
}
case reflect.Ptr:
if fieldValue.CanSet() && field.Type.Elem().Kind() == reflect.Struct {
// fill the pointer with a new struct of their type if it is nil
if fieldValue.IsNil() {
fieldValue.Set(reflect.New(field.Type.Elem()))
}
err := f.walkFields(flagSet, field.Name, fieldValue.Elem(), field.Type.Elem())
if err != nil {
return fmt.Errorf("failed to process %s of %s: %w", field.Name, structType.String(), err)
}
}
default:
addr := fieldValue.Addr()
// make sure it is exported/public
if addr.CanInterface() {
err := f.processField(flagSet, addr.Interface(), prefix+field.Name, field.Type, field.Tag)
if err != nil {
return fmt.Errorf("failed to process %s of %s: %w", field.Name, structType.String(), err)
}
}
}
}
return nil
}
func (f *FlagSetFiller) processField(flagSet *flag.FlagSet, fieldRef interface{},
name string, t reflect.Type, tag reflect.StructTag) (err error) {
var envName string
if override, exists := tag.Lookup("env"); exists {
envName = override
} else if len(f.options.envRenamer) > 0 {
envName = name
for _, renamer := range f.options.envRenamer {
envName = renamer(envName)
}
}
usage := requoteUsage(tag.Get("usage"))
if envName != "" {
usage = fmt.Sprintf("%s (env %s)", usage, envName)
}
tagDefault, hasDefaultTag := tag.Lookup("default")
var renamed string
if override, exists := tag.Lookup("flag"); exists {
if override == "" {
// empty flag override signal to skip this field
return nil
}
renamed = override
} else {
renamed = f.options.renameLongName(name)
}
switch {
case t.Kind() == reflect.String:
f.processString(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
case t.Kind() == reflect.Bool:
err = f.processBool(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
case t.Kind() == reflect.Float64:
err = f.processFloat64(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
// NOTE check time.Duration before int64 since it is aliased from int64
case t == durationType:
err = f.processDuration(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
case t.Kind() == reflect.Int64:
err = f.processInt64(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
case t.Kind() == reflect.Int:
err = f.processInt(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
case t.Kind() == reflect.Uint64:
err = f.processUint64(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
case t.Kind() == reflect.Uint:
err = f.processUint(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
case t == stringSliceType:
var override bool
if overrideValue, exists := tag.Lookup("override-value"); exists {
if value, err := strconv.ParseBool(overrideValue); err == nil {
override = value
}
}
f.processStringSlice(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage, override)
case t == stringToStringMapType:
f.processStringToStringMap(fieldRef, hasDefaultTag, tagDefault, flagSet, renamed, usage)
// ignore any other types
}
if err != nil {
return err
}
if envName != "" {
if val, exists := os.LookupEnv(envName); exists {
err := flagSet.Lookup(renamed).Value.Set(val)
if err != nil {
return fmt.Errorf("failed to set from environment variable %s: %w",
envName, err)
}
}
}
return nil
}
func (f *FlagSetFiller) processStringToStringMap(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) {
casted := fieldRef.(*map[string]string)
var val map[string]string
if hasDefaultTag {
val = parseStringToStringMap(tagDefault)
*casted = val
} else if *casted == nil {
val = make(map[string]string)
*casted = val
} else {
val = *casted
}
flagSet.Var(&strToStrMapVar{val: val}, renamed, usage)
}
func (f *FlagSetFiller) processStringSlice(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string, override bool) {
casted := fieldRef.(*[]string)
if hasDefaultTag {
*casted = parseStringSlice(tagDefault)
}
flagSet.Var(&strSliceVar{ref: casted, override: override}, renamed, usage)
}
func (f *FlagSetFiller) processUint(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) (err error) {
casted := fieldRef.(*uint)
var defaultVal uint
if hasDefaultTag {
var asInt int
asInt, err = strconv.Atoi(tagDefault)
defaultVal = uint(asInt)
if err != nil {
return fmt.Errorf("failed to parse default into uint: %w", err)
}
} else {
defaultVal = *casted
}
flagSet.UintVar(casted, renamed, defaultVal, usage)
return err
}
func (f *FlagSetFiller) processUint64(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) (err error) {
casted := fieldRef.(*uint64)
var defaultVal uint64
if hasDefaultTag {
defaultVal, err = strconv.ParseUint(tagDefault, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse default into uint64: %w", err)
}
} else {
defaultVal = *casted
}
flagSet.Uint64Var(casted, renamed, defaultVal, usage)
return err
}
func (f *FlagSetFiller) processInt(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) (err error) {
casted := fieldRef.(*int)
var defaultVal int
if hasDefaultTag {
defaultVal, err = strconv.Atoi(tagDefault)
if err != nil {
return fmt.Errorf("failed to parse default into int: %w", err)
}
} else {
defaultVal = *casted
}
flagSet.IntVar(casted, renamed, defaultVal, usage)
return err
}
func (f *FlagSetFiller) processInt64(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) (err error) {
casted := fieldRef.(*int64)
var defaultVal int64
if hasDefaultTag {
defaultVal, err = strconv.ParseInt(tagDefault, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse default into int64: %w", err)
}
} else {
defaultVal = *casted
}
flagSet.Int64Var(casted, renamed, defaultVal, usage)
return nil
}
func (f *FlagSetFiller) processDuration(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) (err error) {
casted := fieldRef.(*time.Duration)
var defaultVal time.Duration
if hasDefaultTag {
defaultVal, err = time.ParseDuration(tagDefault)
if err != nil {
return fmt.Errorf("failed to parse default into time.Duration: %w", err)
}
} else {
defaultVal = *casted
}
flagSet.DurationVar(casted, renamed, defaultVal, usage)
return nil
}
func (f *FlagSetFiller) processFloat64(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) (err error) {
casted := fieldRef.(*float64)
var defaultVal float64
if hasDefaultTag {
defaultVal, err = strconv.ParseFloat(tagDefault, 64)
if err != nil {
return fmt.Errorf("failed to parse default into float64: %w", err)
}
} else {
defaultVal = *casted
}
flagSet.Float64Var(casted, renamed, defaultVal, usage)
return nil
}
func (f *FlagSetFiller) processBool(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) (err error) {
casted := fieldRef.(*bool)
var defaultVal bool
if hasDefaultTag {
defaultVal, err = strconv.ParseBool(tagDefault)
if err != nil {
return fmt.Errorf("failed to parse default into bool: %w", err)
}
} else {
defaultVal = *casted
}
flagSet.BoolVar(casted, renamed, defaultVal, usage)
return nil
}
func (f *FlagSetFiller) processString(fieldRef interface{}, hasDefaultTag bool, tagDefault string, flagSet *flag.FlagSet, renamed string, usage string) {
casted := fieldRef.(*string)
var defaultVal string
if hasDefaultTag {
defaultVal = tagDefault
} else {
defaultVal = *casted
}
flagSet.StringVar(casted, renamed, defaultVal, usage)
}
type strSliceVar struct {
ref *[]string
override bool
}
func (s *strSliceVar) String() string {
if s.ref == nil {
return ""
}
return strings.Join(*s.ref, ",")
}
func (s *strSliceVar) Set(val string) error {
parts := parseStringSlice(val)
if s.override {
*s.ref = parts
return nil
}
*s.ref = append(*s.ref, parts...)
return nil
}
func parseStringSlice(val string) []string {
return strings.Split(val, ",")
}
type strToStrMapVar struct {
val map[string]string
}
func (s strToStrMapVar) String() string {
if s.val == nil {
return ""
}
var sb strings.Builder
first := true
for k, v := range s.val {
if !first {
sb.WriteString(",")
} else {
first = false
}
sb.WriteString(k)
sb.WriteString("=")
sb.WriteString(v)
}
return sb.String()
}
func (s strToStrMapVar) Set(val string) error {
content := parseStringToStringMap(val)
for k, v := range content {
s.val[k] = v
}
return nil
}
func parseStringToStringMap(val string) map[string]string {
result := make(map[string]string)
pairs := strings.Split(val, ",")
for _, pair := range pairs {
kv := strings.SplitN(pair, "=", 2)
if len(kv) == 2 {
result[kv[0]] = kv[1]
} else {
result[kv[0]] = ""
}
}
return result
}
// requoteUsage converts a [name] quoted usage string into the back quote form processed by flag.UnquoteUsage
func requoteUsage(usage string) string {
return strings.Map(func(r rune) rune {
switch r {
case '[':
return '`'
case ']':
return '`'
default:
return r
}
}, usage)
} | flagset.go | 0.589126 | 0.411525 | flagset.go | starcoder |
package gofakeit
import "strings"
// HackerPhrase will return a random hacker sentence
func HackerPhrase() string {
words := strings.Split(Generate(getRandValue([]string{"hacker", "phrase"})), " ")
words[0] = strings.Title(words[0])
return strings.Join(words, " ")
}
// HackerAbbreviation will return a random hacker abbreviation
func HackerAbbreviation() string {
return getRandValue([]string{"hacker", "abbreviation"})
}
// HackerAdjective will return a random hacker adjective
func HackerAdjective() string {
return getRandValue([]string{"hacker", "adjective"})
}
// HackerNoun will return a random hacker noun
func HackerNoun() string {
return getRandValue([]string{"hacker", "noun"})
}
// HackerVerb will return a random hacker verb
func HackerVerb() string {
return getRandValue([]string{"hacker", "verb"})
}
// HackeringVerb will return a random hacker ingverb
func HackeringVerb() string {
return getRandValue([]string{"hacker", "ingverb"})
}
func addHackerLookup() {
AddFuncLookup("hackerphrase", Info{
Display: "Hacker Phrase",
Category: "hacker",
Description: "Random hacker phrase",
Example: "If we calculate the program, we can get to the AI pixel through the redundant XSS matrix!",
Output: "string",
Call: func(m *map[string][]string, info *Info) (interface{}, error) {
return HackerPhrase(), nil
},
})
AddFuncLookup("hackerabbreviation", Info{
Display: "Hacker Abbreviation",
Category: "hacker",
Description: "Random hacker abbreviation",
Example: "ADP",
Output: "string",
Call: func(m *map[string][]string, info *Info) (interface{}, error) {
return HackerAbbreviation(), nil
},
})
AddFuncLookup("hackeradjective", Info{
Display: "Hacker Adjective",
Category: "hacker",
Description: "Random hacker adjective",
Example: "wireless",
Output: "string",
Call: func(m *map[string][]string, info *Info) (interface{}, error) {
return HackerAdjective(), nil
},
})
AddFuncLookup("hackernoun", Info{
Display: "Hacker Noun",
Category: "hacker",
Description: "Random hacker noun",
Example: "driver",
Output: "string",
Call: func(m *map[string][]string, info *Info) (interface{}, error) {
return HackerNoun(), nil
},
})
AddFuncLookup("hackerverb", Info{
Display: "Hacker Verb",
Category: "hacker",
Description: "Random hacker verb",
Example: "synthesize",
Output: "string",
Call: func(m *map[string][]string, info *Info) (interface{}, error) {
return HackerVerb(), nil
},
})
AddFuncLookup("hackeringverb", Info{
Display: "Hackering Verb",
Category: "hacker",
Description: "Random hackering verb",
Example: "connecting",
Output: "string",
Call: func(m *map[string][]string, info *Info) (interface{}, error) {
return HackeringVerb(), nil
},
})
} | hacker.go | 0.696887 | 0.417568 | hacker.go | starcoder |
package fuzzy
import (
"unicode"
"unicode/utf8"
)
var noop = func(r rune) rune { return r }
// Match returns true if source matches target using a fuzzy-searching
// algorithm. Note that it doesn't implement Levenshtein distance (see
// RankMatch instead), but rather a simplified version where there's no
// approximation. The method will return true only if each character in the
// source can be found in the target and occurs after the preceding matches.
func Match(source, target string) bool {
return match(source, target, noop)
}
// MatchFold is a case-insensitive version of Match.
func MatchFold(source, target string) bool {
return match(source, target, unicode.ToLower)
}
func match(source, target string, fn func(rune) rune) bool {
lenDiff := len(target) - len(source)
if lenDiff < 0 {
return false
}
if lenDiff == 0 && source == target {
return true
}
Outer:
for _, r1 := range source {
for i, r2 := range target {
if fn(r1) == fn(r2) {
target = target[i+utf8.RuneLen(r2):]
continue Outer
}
}
return false
}
return true
}
// Find will return a list of strings in targets that fuzzy matches source.
func Find(source string, targets []string) []string {
return find(source, targets, noop)
}
// FindFold is a case-insensitive version of Find.
func FindFold(source string, targets []string) []string {
return find(source, targets, unicode.ToLower)
}
func find(source string, targets []string, fn func(rune) rune) []string {
var matches []string
for _, target := range targets {
if match(source, target, fn) {
matches = append(matches, target)
}
}
return matches
}
// RankMatch is similar to Match except it will measure the Levenshtein
// distance between the source and the target and return its result. If there
// was no match, it will return -1.
// Given the requirements of match, RankMatch only needs to perform a subset of
// the Levenshtein calculation, only deletions need be considered, required
// additions and substitutions would fail the match test.
func RankMatch(source, target string) int {
return rank(source, target, noop)
}
// RankMatchFold is a case-insensitive version of RankMatch.
func RankMatchFold(source, target string) int {
return rank(source, target, unicode.ToLower)
}
func rank(source, target string, fn func(rune) rune) int {
lenDiff := len(target) - len(source)
if lenDiff < 0 {
return -1
}
if lenDiff == 0 && source == target {
return 0
}
runeDiff := 0
Outer:
for _, r1 := range source {
for i, r2 := range target {
if fn(r1) == fn(r2) {
target = target[i+utf8.RuneLen(r2):]
continue Outer
} else {
runeDiff++
}
}
return -1
}
// Count up remaining char
for len(target) > 0 {
target = target[utf8.RuneLen(rune(target[0])):]
runeDiff++
}
return runeDiff
}
// RankFind is similar to Find, except it will also rank all matches using
// Levenshtein distance.
func RankFind(source string, targets []string) ranks {
var r ranks
for _, target := range find(source, targets, noop) {
distance := LevenshteinDistance(source, target)
r = append(r, Rank{source, target, distance})
}
return r
}
// RankFindFold is a case-insensitive version of RankFind.
func RankFindFold(source string, targets []string) ranks {
var r ranks
for _, target := range find(source, targets, unicode.ToLower) {
distance := LevenshteinDistance(source, target)
r = append(r, Rank{source, target, distance})
}
return r
}
type Rank struct {
// Source is used as the source for matching.
Source string
// Target is the word matched against.
Target string
// Distance is the Levenshtein distance between Source and Target.
Distance int
}
type ranks []Rank
func (r ranks) Len() int {
return len(r)
}
func (r ranks) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
func (r ranks) Less(i, j int) bool {
return r[i].Distance < r[j].Distance
} | vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go | 0.776284 | 0.449816 | fuzzy.go | starcoder |
package examples
import (
"honnef.co/go/js/xhr"
"myitcv.io/highlightjs"
"github.com/lijianying10/react"
"github.com/lijianying10/react/examples/immtodoapp"
"github.com/lijianying10/react/jsx"
)
// ImmExamplesDef is the definition of the ImmExamples component
type ImmExamplesDef struct {
react.ComponentDef
}
// ImmExamples creates instances of the ImmExamples component
func ImmExamples() *ImmExamplesElem {
return buildImmExamplesElem()
}
// ImmExamplesState is the state type for the ImmExamples component
type ImmExamplesState struct {
examples *exampleSource
selectedTabs *tabS
}
// ComponentWillMount is a React lifecycle method for the ImmExamples component
func (p ImmExamplesDef) ComponentWillMount() {
if !fetchStarted {
for i, e := range sources.Range() {
go func(i exampleKey, e *source) {
req := xhr.NewRequest("GET", "https://raw.githubusercontent.com/myitcv/react/master/examples/"+e.file())
err := req.Send(nil)
if err != nil {
panic(err)
}
sources = sources.Set(i, e.setSrc(req.ResponseText))
newSt := p.State()
newSt.examples = sources
p.SetState(newSt)
}(i, e)
}
fetchStarted = true
}
}
// GetInitialState returns in the initial state for the ImmExamples component
func (p ImmExamplesDef) GetInitialState() ImmExamplesState {
return ImmExamplesState{
examples: sources,
selectedTabs: newTabS(),
}
}
// Render renders the ImmExamples component
func (p ImmExamplesDef) Render() react.Element {
dc := jsx.HTML(`
<h3>Using immutable data structures</h3>
<p>This page focuses on using <a href="https://myitcv.io/immutable"><code>myitcv.io/immutable</code></a>
(specifically <a href="https://github.com/myitcv/immutable/wiki/immutableGen"><code>immutableGen</code></a>) to
help make building components easier. The pattern of immutable data structures lends itself well to React's style
of composition.</p>
<p>For the source code, raising issues, questions etc, please see
<a href="https://github.com/myitcv/react/tree/master/examples" target="_blank">the Github repo</a>.</p>
<p>Note the examples below show the Go source code from <code>master</code>.</p>
`)
dc = append(dc,
p.renderExample(
exampleImmTodo,
react.Span(nil, react.S("A simple TODO app")),
react.P(nil, react.S("The immtodoapp.TodoApp component is a reimplementation of todoapp.TodoApp using immutable data structures.")),
"n/a",
immtodoapp.TodoApp(),
),
)
return react.Div(&react.DivProps{ClassName: "container"},
dc...,
)
}
func (p ImmExamplesDef) renderExample(key exampleKey, title, msg react.Element, jsxSrc string, elem react.Element) react.Element {
var goSrc string
src, _ := p.State().examples.Get(key)
if src != nil {
goSrc = src.src()
}
var code *react.DangerousInnerHTML
switch v, _ := p.State().selectedTabs.Get(key); v {
case tabGo:
code = react.NewDangerousInnerHTML(highlightjs.Highlight("go", goSrc, true).Value)
case tabJsx:
code = react.NewDangerousInnerHTML(highlightjs.Highlight("javascript", jsxSrc, true).Value)
}
return react.Div(nil,
react.H3(nil, title),
msg,
react.Div(&react.DivProps{ClassName: "row"},
react.Div(&react.DivProps{ClassName: "col-md-8"},
react.Div(&react.DivProps{ClassName: "panel panel-default with-nav-tabs"},
react.Div(&react.DivProps{ClassName: "panel-heading"},
react.Ul(
&react.UlProps{ClassName: "nav nav-tabs"},
p.buildExampleNavTab(key, tabGo, "GopherJS"),
p.buildExampleNavTab(key, tabJsx, "JSX"),
),
),
react.Div(&react.DivProps{ClassName: "panel-body"},
react.Pre(&react.PreProps{
Style: &react.CSS{
MaxHeight: "400px",
},
DangerouslySetInnerHTML: code,
}),
),
),
),
react.Div(&react.DivProps{ClassName: "col-md-4"},
plainPanel(elem),
),
),
)
}
func (p ImmExamplesDef) buildExampleNavTab(key exampleKey, t tab, title string) *react.LiElem {
lip := &react.LiProps{Role: "presentation"}
if v, _ := p.State().selectedTabs.Get(key); v == t {
lip.ClassName = "active"
}
return react.Li(
lip,
react.A(
&react.AProps{Href: "#", OnClick: immTabChange{p, key, t}},
react.S(title),
),
)
}
type immTabChange struct {
e ImmExamplesDef
key exampleKey
t tab
}
func (tc immTabChange) OnClick(e *react.SyntheticMouseEvent) {
p := tc.e
key := tc.key
t := tc.t
cts := p.State().selectedTabs
newSt := p.State()
newSt.selectedTabs = cts.Set(key, t)
p.SetState(newSt)
e.PreventDefault()
}
func (p *ImmExamplesDef) handleTabChange(key exampleKey, t tab) func(*react.SyntheticMouseEvent) {
return func(e *react.SyntheticMouseEvent) {
cts := p.State().selectedTabs
newSt := p.State()
newSt.selectedTabs = cts.Set(key, t)
p.SetState(newSt)
e.PreventDefault()
}
} | examples/imm_examples.go | 0.616243 | 0.471223 | imm_examples.go | starcoder |
package iso20022
// Limit of amounts for the customer.
type ATMTransactionAmounts2 struct {
// Currency of the limits, if different from the requested amount.
Currency *ActiveCurrencyCode `xml:"Ccy,omitempty"`
// Maximum amount allowed in the authorised currency if the withdrawal was not approved.
MaximumAuthorisableAmount *ImpliedCurrencyAndAmount `xml:"MaxAuthsbAmt,omitempty"`
// Minimum amount allowed for a withdrawal in the authorised currency.
MinimumAllowedAmount *ImpliedCurrencyAndAmount `xml:"MinAllwdAmt,omitempty"`
// Maximum amount allowed for a withdrawal in the authorised currency.
MaximumAllowedAmount *ImpliedCurrencyAndAmount `xml:"MaxAllwdAmt,omitempty"`
// Remaining daily amount of the customer totals after the withdrawal.
DailyBalance *DetailedAmount4 `xml:"DalyBal,omitempty"`
// Remaining weekly amount of the customer totals after the withdrawal.
WeeklyBalance *DetailedAmount4 `xml:"WklyBal,omitempty"`
// Remaining monthly amount of the customer totals after the withdrawal.
MonthlyBalance *DetailedAmount4 `xml:"MnthlyBal,omitempty"`
}
func (a *ATMTransactionAmounts2) SetCurrency(value string) {
a.Currency = (*ActiveCurrencyCode)(&value)
}
func (a *ATMTransactionAmounts2) SetMaximumAuthorisableAmount(value, currency string) {
a.MaximumAuthorisableAmount = NewImpliedCurrencyAndAmount(value, currency)
}
func (a *ATMTransactionAmounts2) SetMinimumAllowedAmount(value, currency string) {
a.MinimumAllowedAmount = NewImpliedCurrencyAndAmount(value, currency)
}
func (a *ATMTransactionAmounts2) SetMaximumAllowedAmount(value, currency string) {
a.MaximumAllowedAmount = NewImpliedCurrencyAndAmount(value, currency)
}
func (a *ATMTransactionAmounts2) AddDailyBalance() *DetailedAmount4 {
a.DailyBalance = new(DetailedAmount4)
return a.DailyBalance
}
func (a *ATMTransactionAmounts2) AddWeeklyBalance() *DetailedAmount4 {
a.WeeklyBalance = new(DetailedAmount4)
return a.WeeklyBalance
}
func (a *ATMTransactionAmounts2) AddMonthlyBalance() *DetailedAmount4 {
a.MonthlyBalance = new(DetailedAmount4)
return a.MonthlyBalance
} | ATMTransactionAmounts2.go | 0.851598 | 0.421314 | ATMTransactionAmounts2.go | starcoder |
package quicktime
import "bytes"
import "encoding/binary"
import "errors"
import "math"
type stscEntry struct {
FirstChunk int32
SamplesPerChunk int32
SampleID int32
}
// A STSCAtom stores a table of samples per chunk
type STSCAtom struct {
Atom *Atom
Entries []stscEntry
}
// ParseSTSC converts a generic "stsc" Atom to a STSCAtom
func ParseSTSC(atom *Atom) (STSCAtom, error) {
if atom.Type != "stsc" {
return STSCAtom{}, errors.New("Not an STSC atom")
}
if !atom.HasData() {
return STSCAtom{}, errors.New("STSC atom doesn't have data")
}
numEntries := int32Decode(atom.Data[4:8])
stsc := STSCAtom{Atom: atom,
Entries: make([]stscEntry, numEntries)}
if numEntries > 0 {
buf := bytes.NewBuffer(atom.Data[8:])
binary.Read(buf, binary.BigEndian, &stsc.Entries)
}
return stsc, nil
}
// SampleChunk converts a sample number to a chunk, chunk offset,
// and offset with the chunk
func (stsc STSCAtom) SampleChunk(s int) (chunk int, chunkStart int, offset int, err error) {
// Convert to base zero
s--
// Hmm, what's an ideal way to handle this?
switch len(stsc.Entries) {
case 0:
return -1, -1, -1, nil
case 1:
// If there's one entry, then all chunks are the same size
// (do all the math based zero)
chunk = int(math.Floor(float64(s) / float64(stsc.Entries[0].SamplesPerChunk)))
start := int(chunk * int(stsc.Entries[0].SamplesPerChunk))
offset = s - start
// Convert back to base one
chunk++
offset++
start++
//fmt.Printf("STSC: I believe frame %d is sample %d in chunk %d (which starts at %d)\n", s+1, offset, chunk, start)
// Remember that chunks and samples are 1-based
return chunk, start, offset, nil
}
sample := int32(s)
lastChunk := stsc.Entries[len(stsc.Entries)-1].FirstChunk
samplesPerChunk := stsc.Entries[0].SamplesPerChunk
nextEntry := 0
accum := int32(0)
for i := int32(0); i <= lastChunk; i++ {
if i == stsc.Entries[nextEntry].FirstChunk {
samplesPerChunk = stsc.Entries[nextEntry].SamplesPerChunk
nextEntry++
}
if samplesPerChunk > (sample) {
return int(i + 1), int(accum + 1), int(sample + 1), nil
}
sample -= samplesPerChunk
accum += samplesPerChunk
}
// If you get here, you're on the last chunk
chunk = int(lastChunk) + int(math.Floor(float64(sample)/float64(samplesPerChunk)))
offset = int(math.Remainder(float64(sample), float64(samplesPerChunk)))
// Convert back to base one
chunk++
offset++
accum++
//fmt.Printf("STSC: I believe frame %d is sample %d in chunk %d (which starts at %d)\n", s+1, offset, chunk, accum)
return chunk, int(accum), offset, nil
} | stsc_atom.go | 0.589126 | 0.431524 | stsc_atom.go | starcoder |
package statsd
import (
"context"
"math"
"sort"
"strconv"
"time"
"github.com/hligit/gostatsd"
"github.com/hligit/gostatsd/pkg/stats"
)
// percentStruct is a cache of percentile names to avoid creating them for each timer.
type percentStruct struct {
count string
mean string
sum string
sumSquares string
upper string
lower string
}
// MetricAggregator aggregates metrics.
type MetricAggregator struct {
metricMapsReceived uint64
expiryIntervalCounter time.Duration // How often to expire counters
expiryIntervalGauge time.Duration // How often to expire gauges
expiryIntervalSet time.Duration // How often to expire sets
expiryIntervalTimer time.Duration // How often to expire timers
percentThresholds map[float64]percentStruct
now func() time.Time // Returns current time. Useful for testing.
statser stats.Statser
disabledSubtypes gostatsd.TimerSubtypes
histogramLimit uint32
metricMap *gostatsd.MetricMap
}
// NewMetricAggregator creates a new MetricAggregator object.
func NewMetricAggregator(
percentThresholds []float64,
expiryIntervalCounter time.Duration,
expiryIntervalGauge time.Duration,
expiryIntervalSet time.Duration,
expiryIntervalTimer time.Duration,
disabled gostatsd.TimerSubtypes,
histogramLimit uint32,
) *MetricAggregator {
a := MetricAggregator{
expiryIntervalCounter: expiryIntervalCounter,
expiryIntervalGauge: expiryIntervalGauge,
expiryIntervalSet: expiryIntervalSet,
expiryIntervalTimer: expiryIntervalTimer,
percentThresholds: make(map[float64]percentStruct, len(percentThresholds)),
now: time.Now,
statser: stats.NewNullStatser(), // Will probably be replaced via RunMetrics
metricMap: gostatsd.NewMetricMap(),
disabledSubtypes: disabled,
histogramLimit: histogramLimit,
}
for _, pct := range percentThresholds {
sPct := strconv.Itoa(int(pct))
a.percentThresholds[pct] = percentStruct{
count: "count_" + sPct,
mean: "mean_" + sPct,
sum: "sum_" + sPct,
sumSquares: "sum_squares_" + sPct,
upper: "upper_" + sPct,
lower: "lower_" + sPct,
}
}
return &a
}
// round rounds a number to its nearest integer value.
// poor man's math.Round(x) = math.Floor(x + 0.5).
func round(v float64) float64 {
return math.Floor(v + 0.5)
}
// Flush prepares the contents of a MetricAggregator for sending via the Sender.
func (a *MetricAggregator) Flush(flushInterval time.Duration) {
a.statser.Gauge("aggregator.metricmaps_received", float64(a.metricMapsReceived), nil)
flushInSeconds := float64(flushInterval) / float64(time.Second)
a.metricMap.Counters.Each(func(key, tagsKey string, counter gostatsd.Counter) {
counter.PerSecond = float64(counter.Value) / flushInSeconds
a.metricMap.Counters[key][tagsKey] = counter
})
a.metricMap.Timers.Each(func(key, tagsKey string, timer gostatsd.Timer) {
if hasHistogramTag(timer) {
timer.Histogram = latencyHistogram(timer, a.histogramLimit)
a.metricMap.Timers[key][tagsKey] = timer
return
}
if count := len(timer.Values); count > 0 {
sort.Float64s(timer.Values)
timer.Min = timer.Values[0]
timer.Max = timer.Values[count-1]
n := len(timer.Values)
count := float64(n)
cumulativeValues := make([]float64, n)
cumulSumSquaresValues := make([]float64, n)
cumulativeValues[0] = timer.Min
cumulSumSquaresValues[0] = timer.Min * timer.Min
for i := 1; i < n; i++ {
cumulativeValues[i] = timer.Values[i] + cumulativeValues[i-1]
cumulSumSquaresValues[i] = timer.Values[i]*timer.Values[i] + cumulSumSquaresValues[i-1]
}
var sumSquares = timer.Min * timer.Min
var mean = timer.Min
var sum = timer.Min
var thresholdBoundary = timer.Max
for pct, pctStruct := range a.percentThresholds {
numInThreshold := n
if n > 1 {
numInThreshold = int(round(math.Abs(pct) / 100 * count))
if numInThreshold == 0 {
continue
}
if pct > 0 {
thresholdBoundary = timer.Values[numInThreshold-1]
sum = cumulativeValues[numInThreshold-1]
sumSquares = cumulSumSquaresValues[numInThreshold-1]
} else {
thresholdBoundary = timer.Values[n-numInThreshold]
sum = cumulativeValues[n-1] - cumulativeValues[n-numInThreshold-1]
sumSquares = cumulSumSquaresValues[n-1] - cumulSumSquaresValues[n-numInThreshold-1]
}
mean = sum / float64(numInThreshold)
}
if !a.disabledSubtypes.CountPct {
timer.Percentiles.Set(pctStruct.count, float64(numInThreshold))
}
if !a.disabledSubtypes.MeanPct {
timer.Percentiles.Set(pctStruct.mean, mean)
}
if !a.disabledSubtypes.SumPct {
timer.Percentiles.Set(pctStruct.sum, sum)
}
if !a.disabledSubtypes.SumSquaresPct {
timer.Percentiles.Set(pctStruct.sumSquares, sumSquares)
}
if pct > 0 {
if !a.disabledSubtypes.UpperPct {
timer.Percentiles.Set(pctStruct.upper, thresholdBoundary)
}
} else {
if !a.disabledSubtypes.LowerPct {
timer.Percentiles.Set(pctStruct.lower, thresholdBoundary)
}
}
}
sum = cumulativeValues[n-1]
sumSquares = cumulSumSquaresValues[n-1]
mean = sum / count
var sumOfDiffs float64
for i := 0; i < n; i++ {
sumOfDiffs += (timer.Values[i] - mean) * (timer.Values[i] - mean)
}
mid := int(math.Floor(count / 2))
if math.Mod(count, 2) == 0 {
timer.Median = (timer.Values[mid-1] + timer.Values[mid]) / 2
} else {
timer.Median = timer.Values[mid]
}
timer.Mean = mean
timer.StdDev = math.Sqrt(sumOfDiffs / count)
timer.Sum = sum
timer.SumSquares = sumSquares
timer.Count = int(round(timer.SampledCount))
timer.PerSecond = timer.SampledCount / flushInSeconds
} else {
timer.Count = 0
timer.SampledCount = 0
timer.PerSecond = 0
}
a.metricMap.Timers[key][tagsKey] = timer
})
}
func (a *MetricAggregator) RunMetrics(ctx context.Context, statser stats.Statser) {
a.statser = statser
}
func (a *MetricAggregator) Process(f ProcessFunc) {
f(a.metricMap)
}
func isExpired(interval time.Duration, now, ts gostatsd.Nanotime) bool {
return interval != 0 && time.Duration(now-ts) > interval
}
func deleteMetric(key, tagsKey string, metrics gostatsd.AggregatedMetrics) {
metrics.DeleteChild(key, tagsKey)
if !metrics.HasChildren(key) {
metrics.Delete(key)
}
}
// Reset clears the contents of a MetricAggregator.
func (a *MetricAggregator) Reset() {
a.metricMapsReceived = 0
nowNano := gostatsd.Nanotime(a.now().UnixNano())
a.metricMap.Counters.Each(func(key, tagsKey string, counter gostatsd.Counter) {
if isExpired(a.expiryIntervalCounter, nowNano, counter.Timestamp) {
deleteMetric(key, tagsKey, a.metricMap.Counters)
} else {
a.metricMap.Counters[key][tagsKey] = gostatsd.Counter{
Timestamp: counter.Timestamp,
Source: counter.Source,
Tags: counter.Tags,
}
}
})
a.metricMap.Timers.Each(func(key, tagsKey string, timer gostatsd.Timer) {
if isExpired(a.expiryIntervalTimer, nowNano, timer.Timestamp) {
deleteMetric(key, tagsKey, a.metricMap.Timers)
} else {
if hasHistogramTag(timer) {
a.metricMap.Timers[key][tagsKey] = gostatsd.Timer{
Timestamp: timer.Timestamp,
Source: timer.Source,
Tags: timer.Tags,
Values: timer.Values[:0],
Histogram: emptyHistogram(timer, a.histogramLimit),
}
} else {
a.metricMap.Timers[key][tagsKey] = gostatsd.Timer{
Timestamp: timer.Timestamp,
Source: timer.Source,
Tags: timer.Tags,
Values: timer.Values[:0],
}
}
}
})
a.metricMap.Gauges.Each(func(key, tagsKey string, gauge gostatsd.Gauge) {
if isExpired(a.expiryIntervalGauge, nowNano, gauge.Timestamp) {
deleteMetric(key, tagsKey, a.metricMap.Gauges)
}
// No reset for gauges, they keep the last value until expiration
})
a.metricMap.Sets.Each(func(key, tagsKey string, set gostatsd.Set) {
if isExpired(a.expiryIntervalSet, nowNano, set.Timestamp) {
deleteMetric(key, tagsKey, a.metricMap.Sets)
} else {
a.metricMap.Sets[key][tagsKey] = gostatsd.Set{
Values: make(map[string]struct{}),
Timestamp: set.Timestamp,
Source: set.Source,
Tags: set.Tags,
}
}
})
}
// ReceiveMap takes a single metric map and will aggregate the values
func (a *MetricAggregator) ReceiveMap(mm *gostatsd.MetricMap) {
a.metricMapsReceived++
a.metricMap.Merge(mm)
} | pkg/statsd/aggregator.go | 0.64512 | 0.412471 | aggregator.go | starcoder |
package basic
import "github.com/airmap/tegola"
// ClonePoint will return a basic.Point for given tegola.Point.
func ClonePoint(pt tegola.Point) Point {
return Point{pt.X(), pt.Y()}
}
// ClonePoint will return a basic.Point3 for given tegola.Point3.
func ClonePoint3(pt tegola.Point3) Point3 {
return Point3{pt.X(), pt.Y(), pt.Z()}
}
// CloneMultiPoint will return a basic.MultiPoint for the given tegol.MultiPoint
func CloneMultiPoint(mpt tegola.MultiPoint) MultiPoint {
var bmpt MultiPoint
for _, pt := range mpt.Points() {
bmpt = append(bmpt, ClonePoint(pt))
}
return bmpt
}
/*
// CloneMultiPoint3 will return a basic.MultiPoint3 for the given tegol.MultiPoint3
func CloneMultiPoint3(mpt tegola.MultiPoint3) MultiPoint3 {
var bmpt MultiPoint3
for _, pt := range mpt.Points() {
bmpt = append(bmpt, ClonePoint3(pt))
}
return bmpt
}
*/
// CloneLine will return a basic.Line for a given tegola.LineString
func CloneLine(line tegola.LineString) (l Line) {
for _, pt := range line.Subpoints() {
l = append(l, Point{pt.X(), pt.Y()})
}
return l
}
// CloneMultiLine will return a basic.MultiLine for a given togola.MultiLine
func CloneMultiLine(mline tegola.MultiLine) (ml MultiLine) {
for _, ln := range mline.Lines() {
ml = append(ml, CloneLine(ln))
}
return ml
}
// ClonePolygon will return a basic.Polygon for a given tegola.Polygon
func ClonePolygon(polygon tegola.Polygon) (ply Polygon) {
for _, ln := range polygon.Sublines() {
ply = append(ply, CloneLine(ln))
}
return ply
}
// CloneMultiPolygon will return a basic.MultiPolygon for a given tegola.MultiPolygon.
func CloneMultiPolygon(mpolygon tegola.MultiPolygon) (mply MultiPolygon) {
for _, ply := range mpolygon.Polygons() {
mply = append(mply, ClonePolygon(ply))
}
return mply
}
func Clone(geo tegola.Geometry) Geometry {
switch g := geo.(type) {
case tegola.Point:
return ClonePoint(g)
case tegola.MultiPoint:
return CloneMultiPoint(g)
case tegola.LineString:
return CloneLine(g)
case tegola.MultiLine:
return CloneMultiLine(g)
case tegola.Polygon:
return ClonePolygon(g)
case tegola.MultiPolygon:
return CloneMultiPolygon(g)
}
return nil
} | basic/clone.go | 0.746971 | 0.717519 | clone.go | starcoder |
<tutorial>
Performance benchmark example of using 51Degrees Hash Trie device detection.
The example shows how to:
<ol>
<li>Instantiate the 51Degrees device detection provider.
<p><pre class="prettyprint lang-go">
var provider = FiftyOneDegreesTrieV3.NewProvider(dataFile)
</pre></p>
<li>Open an input file with a list of User-Agents.
<p><pre class="prettyprint lang-go">
fin, err := os.Open("20000 User Agents.csv")
</pre></p>
<li>Read user agents from a file and calculate the ammount of time it takes to
match them all using the provider.
</ol>
This example assumes you have the 51Degrees Go API installed correctly,
see the instructions in the Go readme file in this repository:
(Device-Detection/go/README.md)
<p>The examples also assumes you have access to a Hash Trie data file and
have set the path to "20000 User Agents.csv" correctly. Both of these files
need to be available in the data folder in the root of this repository. Please
see data/TRIE.txt for more details on downloading the Hash Trie data file.</p>
</tutorial>
*/
// Snippet Start
package main
import (
"fmt"
"./src/trie"
"os"
"log"
"strings"
"sync"
"time"
"io/ioutil"
)
// Used to control multi threaded performance.
type performanceState struct {
userAgents []string
userAgentsCount int
count int
max int
progress int
calibration int
numberOfThreads int
}
// Set number of threads.
const threads = 4
// Set number of passes.
const passes = 5
// Set progress marks.
const progressMarks = 40
// Lock object used to synchronize threads when reporting progress.
var mutex = &sync.Mutex{}
// Report progress of test.
func reportProgress(state *performanceState, count int, device string){
// Lock the state whilst the counters are updated
mutex.Lock()
// Increase the count.
state.count += count
full := state.count / state.progress
empty := (state.max - state.count) / state.progress
// Update the UI.
fmt.Printf("\r\t[")
for i := 0; i < full; i++ {
fmt.Print("=")
}
for j := 0; j < empty; j++ {
fmt.Print(" ")
}
fmt.Print("]")
// If in real detection mode then print the id of the device found to prove
// the test is actually doing something!
if state.calibration == 0 {
fmt.Printf(" %s ", device)
}
// Unlock the state now that the count has been updated.
mutex.Unlock()
}
// Loop over all User-Agents read from file. If calibration is set to 1
// (enabled) then do nothing.
func runPerformanceTest(provider FiftyOneDegreesTrieV3.Provider,
state *performanceState,
wg *sync.WaitGroup) {
defer wg.Done()
var count = 0
var device string
// Loop over all User-Agents.
for _, record := range state.userAgents{
// If we're not calibrating then get the device for the User-Agent
// that had just been read.
if len(record) < 1024 && state.calibration == 0 {
// If calibrating, do nothing.
match := provider.GetMatch(record)
device = match.GetDeviceId()
FiftyOneDegreesTrieV3.DeleteMatch(match);
}
// Increase the local counter.
count++
// Print a progress marker.
if count == state.progress {
reportProgress(state, count, device);
// Reset the local counter.
count = 0
}
}
// Finally report progress.
reportProgress(state, count, device);
}
// Execute a performance test using a file of null terminated useragent strings
// as input. If calibrate is true then the file is read but no detections
// are performed.
func performanceTest(provider FiftyOneDegreesTrieV3.Provider,
state performanceState) {
var wg sync.WaitGroup
// Create the threads.
for i := 0; i < state.numberOfThreads; i++ {
wg.Add(1)
go runPerformanceTest(provider, &state, &wg)
}
// Wait for threads to finish.
wg.Wait()
}
// Perform the test and return the average time.
func performTest(provider FiftyOneDegreesTrieV3.Provider,
state performanceState, test string) time.Duration {
start := time.Now()
// Perform the test for a number of passes.
for pass := 1; pass <= passes; pass++ {
fmt.Printf("\r\n%s pass %d of %d: \n\n", test, pass, passes)
performanceTest(provider, state)
}
end := time.Since(start)
// Return the average time taken to complete the test.
return end / passes;
}
// Performance test.
func perf_trie(provider FiftyOneDegreesTrieV3.Provider, inputFile string) {
// Read the User-Agents into an array.
content, err := ioutil.ReadFile(inputFile)
if err != nil {
log.Fatal(err)
}
records := strings.Split(string(content), "\n")
// Get the number of records, used to print progress bar.
numrecords := len(records)
max := numrecords * threads
progress := max / progressMarks
state := performanceState{records, numrecords, 0, max, progress,1, threads}
// Run the process without doing any detections to get a calibration time.
calbration := performTest(provider, state, "Calibration")
// Process the User-Agents doing device detection.
state.calibration = 0;
test := performTest(provider, state, "Detection test")
totalTime := test - calbration
fmt.Println("\r\n")
fmt.Println("Time taken for a single thread: ", totalTime)
fmt.Printf("Detections per second for %d thread(s): %.2f\n",
threads,
((float64(len(records)) * threads) / totalTime.Seconds()))
fmt.Printf("Time per detection (ms): %v\n",
(totalTime.Seconds() * 1000 ) / (float64(len(records)) * threads))
}
func main() {
args := os.Args[1:]
fmt.Print("\n")
fmt.Print("\t#############################################################\n")
fmt.Print("\t# #\n")
fmt.Print("\t# This program can be used to test the performance of the #\n")
fmt.Print("\t# 51Degrees 'Hash Trie' Go API. #\n")
fmt.Print("\t# #\n")
fmt.Print("\t# The test will read a list of User Agents and calculate #\n")
fmt.Print("\t# the number of detections per second. #\n")
fmt.Print("\t# #\n")
fmt.Print("\t# Command line arguments should be a trie format data #\n")
fmt.Print("\t# file and a csv file containing a list of user agents. #\n")
fmt.Print("\t# A test file of 1 million can be downloaded from #\n")
fmt.Print("\t# http://51degrees.com/million.zip #\n")
fmt.Print("\t# #\n")
fmt.Print("\t#############################################################\n")
fmt.Print("\n")
filename := ""
if len(args) > 0 {
filename = args[0]
} else {
filename = "../data/51Degrees-LiteV3.4.trie"
}
userAgentsFile := ""
if len(args) > 1 {
userAgentsFile = args[1]
} else {
userAgentsFile = "../data/20000 User Agents.csv"
}
requiredPropertiesArg := ""
if len(args) > 2 {
requiredPropertiesArg = args[2]
} else {
requiredPropertiesArg = ""
}
fmt.Println("Data file: ", filename)
fmt.Println("User-Agents file: ", userAgentsFile)
var provider =
FiftyOneDegreesTrieV3.NewProvider(filename, requiredPropertiesArg)
// Run the performance tests.
perf_trie(provider, userAgentsFile)
} | PerfHashTrie.go | 0.547706 | 0.64072 | PerfHashTrie.go | starcoder |
package graph
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
)
// UploadSession
type UploadSession struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{};
// The date and time in UTC that the upload session will expire. The complete file must be uploaded before this expiration time is reached.
expirationDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time;
// A collection of byte ranges that the server is missing for the file. These ranges are zero indexed and of the format 'start-end' (e.g. '0-26' to indicate the first 27 bytes of the file). When uploading files as Outlook attachments, instead of a collection of ranges, this property always indicates a single value '{start}', the location in the file where the next upload should begin.
nextExpectedRanges []string;
// The URL endpoint that accepts PUT requests for byte ranges of the file.
uploadUrl *string;
}
// NewUploadSession instantiates a new uploadSession and sets the default values.
func NewUploadSession()(*UploadSession) {
m := &UploadSession{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *UploadSession) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetExpirationDateTime gets the expirationDateTime property value. The date and time in UTC that the upload session will expire. The complete file must be uploaded before this expiration time is reached.
func (m *UploadSession) GetExpirationDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.expirationDateTime
}
}
// GetNextExpectedRanges gets the nextExpectedRanges property value. A collection of byte ranges that the server is missing for the file. These ranges are zero indexed and of the format 'start-end' (e.g. '0-26' to indicate the first 27 bytes of the file). When uploading files as Outlook attachments, instead of a collection of ranges, this property always indicates a single value '{start}', the location in the file where the next upload should begin.
func (m *UploadSession) GetNextExpectedRanges()([]string) {
if m == nil {
return nil
} else {
return m.nextExpectedRanges
}
}
// GetUploadUrl gets the uploadUrl property value. The URL endpoint that accepts PUT requests for byte ranges of the file.
func (m *UploadSession) GetUploadUrl()(*string) {
if m == nil {
return nil
} else {
return m.uploadUrl
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *UploadSession) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error))
res["expirationDateTime"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetExpirationDateTime(val)
}
return nil
}
res["nextExpectedRanges"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfPrimitiveValues("string")
if err != nil {
return err
}
if val != nil {
res := make([]string, len(val))
for i, v := range val {
res[i] = *(v.(*string))
}
m.SetNextExpectedRanges(res)
}
return nil
}
res["uploadUrl"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetUploadUrl(val)
}
return nil
}
return res
}
func (m *UploadSession) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *UploadSession) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
{
err := writer.WriteTimeValue("expirationDateTime", m.GetExpirationDateTime())
if err != nil {
return err
}
}
if m.GetNextExpectedRanges() != nil {
err := writer.WriteCollectionOfStringValues("nextExpectedRanges", m.GetNextExpectedRanges())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("uploadUrl", m.GetUploadUrl())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *UploadSession) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetExpirationDateTime sets the expirationDateTime property value. The date and time in UTC that the upload session will expire. The complete file must be uploaded before this expiration time is reached.
func (m *UploadSession) SetExpirationDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.expirationDateTime = value
}
}
// SetNextExpectedRanges sets the nextExpectedRanges property value. A collection of byte ranges that the server is missing for the file. These ranges are zero indexed and of the format 'start-end' (e.g. '0-26' to indicate the first 27 bytes of the file). When uploading files as Outlook attachments, instead of a collection of ranges, this property always indicates a single value '{start}', the location in the file where the next upload should begin.
func (m *UploadSession) SetNextExpectedRanges(value []string)() {
if m != nil {
m.nextExpectedRanges = value
}
}
// SetUploadUrl sets the uploadUrl property value. The URL endpoint that accepts PUT requests for byte ranges of the file.
func (m *UploadSession) SetUploadUrl(value *string)() {
if m != nil {
m.uploadUrl = value
}
} | models/microsoft/graph/upload_session.go | 0.642769 | 0.411643 | upload_session.go | starcoder |
package amsclient
import (
"encoding/json"
)
// ClusterMetricsNodes struct for ClusterMetricsNodes
type ClusterMetricsNodes struct {
Compute *float64 `json:"compute,omitempty"`
Infra *float64 `json:"infra,omitempty"`
Master *float64 `json:"master,omitempty"`
Total *float64 `json:"total,omitempty"`
}
// NewClusterMetricsNodes instantiates a new ClusterMetricsNodes object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewClusterMetricsNodes() *ClusterMetricsNodes {
this := ClusterMetricsNodes{}
return &this
}
// NewClusterMetricsNodesWithDefaults instantiates a new ClusterMetricsNodes object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewClusterMetricsNodesWithDefaults() *ClusterMetricsNodes {
this := ClusterMetricsNodes{}
return &this
}
// GetCompute returns the Compute field value if set, zero value otherwise.
func (o *ClusterMetricsNodes) GetCompute() float64 {
if o == nil || o.Compute == nil {
var ret float64
return ret
}
return *o.Compute
}
// GetComputeOk returns a tuple with the Compute field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ClusterMetricsNodes) GetComputeOk() (*float64, bool) {
if o == nil || o.Compute == nil {
return nil, false
}
return o.Compute, true
}
// HasCompute returns a boolean if a field has been set.
func (o *ClusterMetricsNodes) HasCompute() bool {
if o != nil && o.Compute != nil {
return true
}
return false
}
// SetCompute gets a reference to the given float64 and assigns it to the Compute field.
func (o *ClusterMetricsNodes) SetCompute(v float64) {
o.Compute = &v
}
// GetInfra returns the Infra field value if set, zero value otherwise.
func (o *ClusterMetricsNodes) GetInfra() float64 {
if o == nil || o.Infra == nil {
var ret float64
return ret
}
return *o.Infra
}
// GetInfraOk returns a tuple with the Infra field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ClusterMetricsNodes) GetInfraOk() (*float64, bool) {
if o == nil || o.Infra == nil {
return nil, false
}
return o.Infra, true
}
// HasInfra returns a boolean if a field has been set.
func (o *ClusterMetricsNodes) HasInfra() bool {
if o != nil && o.Infra != nil {
return true
}
return false
}
// SetInfra gets a reference to the given float64 and assigns it to the Infra field.
func (o *ClusterMetricsNodes) SetInfra(v float64) {
o.Infra = &v
}
// GetMaster returns the Master field value if set, zero value otherwise.
func (o *ClusterMetricsNodes) GetMaster() float64 {
if o == nil || o.Master == nil {
var ret float64
return ret
}
return *o.Master
}
// GetMasterOk returns a tuple with the Master field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ClusterMetricsNodes) GetMasterOk() (*float64, bool) {
if o == nil || o.Master == nil {
return nil, false
}
return o.Master, true
}
// HasMaster returns a boolean if a field has been set.
func (o *ClusterMetricsNodes) HasMaster() bool {
if o != nil && o.Master != nil {
return true
}
return false
}
// SetMaster gets a reference to the given float64 and assigns it to the Master field.
func (o *ClusterMetricsNodes) SetMaster(v float64) {
o.Master = &v
}
// GetTotal returns the Total field value if set, zero value otherwise.
func (o *ClusterMetricsNodes) GetTotal() float64 {
if o == nil || o.Total == nil {
var ret float64
return ret
}
return *o.Total
}
// GetTotalOk returns a tuple with the Total field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *ClusterMetricsNodes) GetTotalOk() (*float64, bool) {
if o == nil || o.Total == nil {
return nil, false
}
return o.Total, true
}
// HasTotal returns a boolean if a field has been set.
func (o *ClusterMetricsNodes) HasTotal() bool {
if o != nil && o.Total != nil {
return true
}
return false
}
// SetTotal gets a reference to the given float64 and assigns it to the Total field.
func (o *ClusterMetricsNodes) SetTotal(v float64) {
o.Total = &v
}
func (o ClusterMetricsNodes) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Compute != nil {
toSerialize["compute"] = o.Compute
}
if o.Infra != nil {
toSerialize["infra"] = o.Infra
}
if o.Master != nil {
toSerialize["master"] = o.Master
}
if o.Total != nil {
toSerialize["total"] = o.Total
}
return json.Marshal(toSerialize)
}
type NullableClusterMetricsNodes struct {
value *ClusterMetricsNodes
isSet bool
}
func (v NullableClusterMetricsNodes) Get() *ClusterMetricsNodes {
return v.value
}
func (v *NullableClusterMetricsNodes) Set(val *ClusterMetricsNodes) {
v.value = val
v.isSet = true
}
func (v NullableClusterMetricsNodes) IsSet() bool {
return v.isSet
}
func (v *NullableClusterMetricsNodes) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableClusterMetricsNodes(val *ClusterMetricsNodes) *NullableClusterMetricsNodes {
return &NullableClusterMetricsNodes{value: val, isSet: true}
}
func (v NullableClusterMetricsNodes) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableClusterMetricsNodes) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pkg/api/ams/amsclient/model_cluster_metrics_nodes.go | 0.789274 | 0.447823 | model_cluster_metrics_nodes.go | starcoder |
package metrics
import (
// "fmt"
"math"
"github.com/gcla/sklearn/base"
"gonum.org/v1/gonum/mat"
)
type float = float64
type constVector = base.MatConst
// R2Score """R^2 (coefficient of determination) regression score function.
// Best possible score is 1.0 and it can be negative (because the
// model can be arbitrarily worse). A constant model that always
// predicts the expected value of y, disregarding the input features,
// would get a R^2 score of 0.0.
// Read more in the :ref:`User Guide <r2Score>`.
// Parameters
// ----------
// yTrue : array-like of shape = (nSamples) or (nSamples, nOutputs)
// Ground truth (correct) target values.
// yPred : array-like of shape = (nSamples) or (nSamples, nOutputs)
// Estimated target values.
// sampleWeight : array-like of shape = (nSamples), optional
// Sample weights.
// multioutput : string in ['rawValues', 'uniformAverage', \
// 'varianceWeighted'] or None or array-like of shape (nOutputs)
// Defines aggregating of multiple output scores.
// Array-like value defines weights used to average scores.
// Default is "uniformAverage".
// 'rawValues' :
// Returns a full set of scores in case of multioutput input.
// 'uniformAverage' :
// Scores of all outputs are averaged with uniform weight.
// 'varianceWeighted' :
// Scores of all outputs are averaged, weighted by the variances
// of each individual output.
// .. versionchanged:: 0.19
// Default value of multioutput is 'uniformAverage'.
// Returns
// -------
// z : float or ndarray of floats
// The R^2 score or ndarray of scores if 'multioutput' is
// 'rawValues'.
// Notes
// -----
// This is not a symmetric function.
// Unlike most other scores, R^2 score may be negative (it need not actually
// be the square of a quantity R).
// References
// ----------
// .. [1] `Wikipedia entry on the Coefficient of determination
// <https://en.wikipedia.org/wiki/CoefficientOfDetermination>`_
// Examples
// --------
// >>> from sklearn.metrics import r2Score
// >>> yTrue = [3, -0.5, 2, 7]
// >>> yPred = [2.5, 0.0, 2, 8]
// >>> r2Score(yTrue, yPred) # doctest: +ELLIPSIS
// 0.948...
// >>> yTrue = [[0.5, 1], [-1, 1], [7, -6]]
// >>> yPred = [[0, 2], [-1, 2], [8, -5]]
// >>> r2Score(yTrue, yPred, multioutput='varianceWeighted')
// ... # doctest: +ELLIPSIS
// 0.938...
// >>> yTrue = [1,2,3]
// >>> yPred = [1,2,3]
// >>> r2Score(yTrue, yPred)
// 1.0
// >>> yTrue = [1,2,3]
// >>> yPred = [2,2,2]
// >>> r2Score(yTrue, yPred)
// 0.0
// >>> yTrue = [1,2,3]
// >>> yPred = [3,2,1]
// >>> r2Score(yTrue, yPred)
// -3.0
// """
func R2Score(yTrue, yPred *mat.Dense, sampleWeight *mat.Dense, multioutput string) *mat.Dense {
nSamples, nOutputs := yTrue.Dims()
if sampleWeight == nil {
sampleWeight = mat.DenseCopyOf(base.MatConst{Rows: nSamples, Columns: 1, Value: 1.})
}
numerator := mat.NewDense(1, nOutputs, nil)
diff := mat.NewDense(nSamples, nOutputs, nil)
diff.Sub(yPred, yTrue)
diff2 := mat.NewDense(nSamples, nOutputs, nil)
diff2.MulElem(diff, diff)
numerator.Mul(sampleWeight.T(), diff2)
sampleWeightSum := mat.Sum(sampleWeight)
yTrueAvg := mat.NewDense(1, nOutputs, nil)
yTrueAvg.Mul(sampleWeight.T(), yTrue)
yTrueAvg.Scale(1./sampleWeightSum, yTrueAvg)
diff2.Apply(func(i int, j int, v float64) float64 {
v = yTrue.At(i, j) - yTrueAvg.At(0, j)
return v * v
}, diff2)
denominator := mat.NewDense(1, nOutputs, nil)
denominator.Mul(sampleWeight.T(), diff2)
r2score := mat.NewDense(1, nOutputs, nil)
r2score.Apply(func(i int, j int, v float64) float64 {
d := math.Max(denominator.At(i, j), 1e-20)
return 1. - numerator.At(i, j)/d
}, r2score)
switch multioutput {
case "raw_values":
return r2score
case "variance_weighted":
r2 := mat.NewDense(1, 1, nil)
r2.Mul(denominator, r2score.T())
sumden := mat.Sum(denominator)
r2.Scale(1./sumden, r2)
return r2
default: // "uniform_average":
return mat.NewDense(1, 1, []float64{mat.Sum(r2score) / float64(nOutputs)})
}
}
// MeanSquaredError regression loss
// Read more in the :ref:`User Guide <mean_squared_error>`.
// Parameters
// ----------
// y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
// Ground truth (correct) target values.
// y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
// Estimated target values.
// sample_weight : array-like of shape = (n_samples), optional
// Sample weights.
// multioutput : string in ['raw_values', 'uniform_average']
// or array-like of shape (n_outputs)
// Defines aggregating of multiple output values.
// Array-like value defines weights used to average errors.
// 'raw_values' :
// Returns a full set of errors in case of multioutput input.
// 'uniform_average' :
// Errors of all outputs are averaged with uniform weight.
// Returns
// -------
// loss : float or ndarray of floats
// A non-negative floating point value (the best value is 0.0), or an
// array of floating point values, one for each individual target.
func MeanSquaredError(yTrue, yPred mat.Matrix, sampleWeight *mat.Dense, multioutput string) *mat.Dense {
nSamples, nOutputs := yTrue.Dims()
tmp := mat.NewDense(1, nOutputs, nil)
tmp.Apply(func(_ int, j int, v float64) float64 {
N, D := 0., 0.
for i := 0; i < nSamples; i++ {
ydiff := yPred.At(i, j) - yTrue.At(i, j)
w := 1.
if sampleWeight != nil {
w = sampleWeight.At(0, j)
}
N += w * (ydiff * ydiff)
D += w
}
return N / D
}, tmp)
switch multioutput {
case "raw_values":
return tmp
default: // "uniform_average":
return mat.NewDense(1, 1, []float64{mat.Sum(tmp) / float64(nOutputs)})
}
}
// MeanAbsoluteError regression loss
// Read more in the :ref:`User Guide <mean_absolute_error>`.
// Parameters
// ----------
// y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
// Ground truth (correct) target values.
// y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
// Estimated target values.
// sample_weight : array-like of shape = (n_samples), optional
// Sample weights.
// multioutput : string in ['raw_values', 'uniform_average']
// or array-like of shape (n_outputs)
// Defines aggregating of multiple output values.
// Array-like value defines weights used to average errors.
// 'raw_values' :
// Returns a full set of errors in case of multioutput input.
// 'uniform_average' :
// Errors of all outputs are averaged with uniform weight.
// Returns
// -------
// loss : float or ndarray of floats
// If multioutput is 'raw_values', then mean absolute error is returned
// for each output separately.
// If multioutput is 'uniform_average' or an ndarray of weights, then the
// weighted average of all output errors is returned.
// MAE output is non-negative floating point. The best value is 0.0.
// Examples
// --------
// >>> from sklearn.metrics import mean_absolute_error
// >>> y_true = [3, -0.5, 2, 7]
// >>> y_pred = [2.5, 0.0, 2, 8]
// >>> mean_absolute_error(y_true, y_pred)
// 0.5
// >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
// >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
// >>> mean_absolute_error(y_true, y_pred)
// 0.75
// >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
// array([ 0.5, 1. ])
// >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
// ... # doctest: +ELLIPSIS
// 0.849...
func MeanAbsoluteError(yTrue, yPred mat.Matrix, sampleWeight *mat.Dense, multioutput string) *mat.Dense {
nSamples, nOutputs := yTrue.Dims()
tmp := mat.NewDense(1, nOutputs, nil)
tmp.Apply(func(_ int, j int, v float64) float64 {
N, D := 0., 0.
for i := 0; i < nSamples; i++ {
ydiff := yPred.At(i, j) - yTrue.At(i, j)
w := 1.
if sampleWeight != nil {
w = sampleWeight.At(0, j)
}
N += w * math.Abs(ydiff)
D += w
}
return N / D
}, tmp)
switch multioutput {
case "raw_values":
return tmp
default: // "uniform_average":
return mat.NewDense(1, 1, []float64{mat.Sum(tmp) / float64(nOutputs)})
}
} | metrics/regression.go | 0.814238 | 0.587499 | regression.go | starcoder |
package raygui
import (
rl "github.com/gen2brain/raylib-go/raylib"
)
// GUI controls states
type ControlState int
const (
Disabled ControlState = iota
// Normal is the default state for rendering GUI elements.
Normal
// Focused indicates the mouse is hovering over the GUI element.
Focused
// Pressed indicates the mouse is hovering over the GUI element and LMB is pressed down.
Pressed
// Clicked indicates the mouse is hovering over the GUI element and LMB has just been released.
Clicked
)
// IsColliding will return true if 'point' is within any of the given rectangles.
func IsInAny(point rl.Vector2, rectangles ...rl.Rectangle) bool {
for _, rect := range rectangles {
if rl.CheckCollisionPointRec(point, rect) {
return true
}
}
return false
}
// GetInteractionState determines the current state of a control based on mouse position and
// button states.
func GetInteractionState(rectangles ...rl.Rectangle) ControlState {
switch {
case !IsInAny(rl.GetMousePosition(), rectangles...):
return Normal
case rl.IsMouseButtonDown(rl.MouseLeftButton):
return Pressed
case rl.IsMouseButtonReleased(rl.MouseLeftButton) || rl.IsMouseButtonPressed(rl.MouseLeftButton):
return Clicked
default:
return Focused
}
}
// Constrain rectangle will ensure that if width/height are below given minimums, they will
// be set to an ideal minimum.
func ConstrainRectangle(bounds *rl.Rectangle, minWidth, idealWidth, minHeight, idealHeight int32) {
if int32(bounds.Width) < minWidth {
bounds.Width = float32(idealWidth)
}
if int32(bounds.Height) < minHeight {
bounds.Height = float32(idealHeight)
}
}
// InsetRectangle returns the dimensions of a rectangle inset by a margin within an outer rectangle.
func InsetRectangle(outer rl.RectangleInt32, inset int32) rl.RectangleInt32 {
return rl.RectangleInt32{
X: outer.X + inset, Y: outer.Y + inset,
Width: outer.Width - 2*inset, Height: outer.Height - 2*inset,
}
}
// DrawInsetRectangle is a helper to draw a box inset by a margin of an outer container.
func DrawInsetRectangle(outer rl.RectangleInt32, inset int32, color rl.Color) {
inside := InsetRectangle(outer, inset)
rl.DrawRectangle(inside.X, inside.Y, inside.Width, inside.Height, color)
}
// DrawBorderedRectangle is a helper to draw a box with a border around it.
func DrawBorderedRectangle(bounds rl.RectangleInt32, borderWidth int32, borderColor, insideColor rl.Color) {
inside := InsetRectangle(bounds, borderWidth)
rl.DrawRectangle(bounds.X, bounds.Y, bounds.Width, bounds.Height, borderColor)
rl.DrawRectangle(inside.X, inside.Y, inside.Width, inside.Height, insideColor)
} | raygui/raygui.go | 0.665737 | 0.497192 | raygui.go | starcoder |
package option
type Optional[T any] struct {
value T
hasValue bool
}
func Some[T any](value T) Optional[T] {
return Optional[T]{value: value, hasValue: true}
}
func None[T any]() Optional[T] {
return Optional[T]{hasValue: false}
}
func FromValueOrFalse[T any](value T, ok bool) Optional[T] {
if ok {
return Some(value)
} else {
return None[T]()
}
}
func FromValueOrError[T any](value T, err error) Optional[T] {
if err != nil {
return Some(value)
} else {
return None[T]()
}
}
func (o Optional[T]) Value() (value T, ok bool) {
return o.value, o.hasValue
}
func (o Optional[T]) ValueOrDefault() T {
if o.hasValue {
return o.value
} else {
var fallback T
return fallback
}
}
func (o Optional[T]) ValueOrElse(fallback T) T {
if o.hasValue {
return o.value
} else {
return fallback
}
}
func (o Optional[T]) ValueOrError(ifNone error) (T, error) {
if v, ok := o.Value(); ok {
return v, nil
} else {
// Reuse the default value of v created by Value()
return v, ifNone
}
}
func (o Optional[T]) ValueOrPanic(message string) T {
if v, ok := o.Value(); ok {
return v
} else {
panic(message)
}
}
func (o Optional[_]) IsPresent() bool {
return o.hasValue
}
func (o Optional[_]) IsEmpty() bool {
return !o.hasValue
}
// Function variant of `o.IsPresent()`.
// Useful as a predicate function.
func IsPresent[T any](o Optional[T]) bool {
return o.hasValue
}
// Function variant of `o.IsEmpty()`.
// Useful as a predicate function
func IsEmpty[T any](o Optional[T]) bool {
return !o.hasValue
}
func (o Optional[T]) OrElse(alternative Optional[T]) Optional[T] {
if o.IsPresent() {
return o
} else {
return alternative
}
}
func Map[T any, R any](o Optional[T], mapping func(value T) R) Optional[R] {
if v, ok := o.Value(); ok {
return Some[R](mapping(v))
} else {
return None[R]()
}
}
func FlatMap[T any, R any](o Optional[T], mapping func(value T) Optional[R]) Optional[R] {
if v, ok := o.Value(); ok {
return mapping(v)
} else {
return None[R]()
}
}
func Flatten[T any](o Optional[Optional[T]]) Optional[T] {
if v, ok := o.Value(); ok {
return v
} else {
return None[T]()
}
}
func Filter[T any](o Optional[T], predicate func(value T) bool) Optional[T] {
if v, ok := o.Value(); ok && predicate(v) {
return o
} else {
return None[T]()
}
} | monads/option/option.go | 0.838382 | 0.464173 | option.go | starcoder |
package is
import "reflect"
// Check if obj is of a specified type
func isType(obj interface{}, check reflect.Kind) bool {
return reflect.TypeOf(obj).Kind() == check
}
// Check if obj is of struct type
func isStructType(obj interface{}, check string) bool {
value := reflect.ValueOf(obj).Type().String()
if len(value) == 0 {
return false
}
if string(value[0]) == "*" {
value = value[1:]
}
return value == check
}
// Test if obj is a array
func Array(obj interface{}) bool {
return isType(obj, reflect.Array)
}
// Test if obj is a bool
func Bool(obj interface{}) bool {
return isType(obj, reflect.Bool)
}
// Test if obj is of struct bytes.Buffer
func Buffer(obj interface{}) bool {
return isStructType(obj, "bytes.Buffer")
}
// Test if obj is of byte (alias for uint8)
func Byte(obj interface{}) bool {
return Uint8(obj)
}
// Test if obj is chan
func Chan(obj interface{}) bool {
return isType(obj, reflect.Chan)
}
// Test if obj is Complex64
func Complex64(obj interface{}) bool {
return isType(obj, reflect.Complex64)
}
// Test if obj is Complex128
func Complex128(obj interface{}) bool {
return isType(obj, reflect.Complex128)
}
// Test if obj is Float32
func Float32(obj interface{}) bool {
return isType(obj, reflect.Float32)
}
// Test if obj is Float64
func Float64(obj interface{}) bool {
return isType(obj, reflect.Float64)
}
// Test if obj is Func
func Func(obj interface{}) bool {
return isType(obj, reflect.Func)
}
// Test if obj is Int
func Int(obj interface{}) bool {
return isType(obj, reflect.Int)
}
// Test if obj is Int8
func Int8(obj interface{}) bool {
return isType(obj, reflect.Int8)
}
// Test if obj is Int16
func Int16(obj interface{}) bool {
return isType(obj, reflect.Int16)
}
// Test if obj is Int32
func Int32(obj interface{}) bool {
return isType(obj, reflect.Int32)
}
// Test if obj is Int64
func Int64(obj interface{}) bool {
return isType(obj, reflect.Int64)
}
// Test if obj is Interface
func Interface(obj interface{}) bool {
return isType(obj, reflect.Interface)
}
// Test if obj is Map
func Map(obj interface{}) bool {
return isType(obj, reflect.Map)
}
// Test if obj is Nil
func Nil(obj interface{}) bool {
return obj == nil
}
// Check if struct is of a struct type
func OfStructType(obj interface{}, check string) bool {
return isStructType(obj, check)
}
// Test if obj is Ptr
func Ptr(obj interface{}) bool {
return isType(obj, reflect.Ptr)
}
// Test if obj is of struct regexp.Regexp
func Regexp(obj interface{}) bool {
return isStructType(obj, "regexp.Regexp")
}
// Test if obj is of rune (alias for int32)
func Rune(obj interface{}) bool {
return Int32(obj)
}
// Test if obj is Slice
func Slice(obj interface{}) bool {
return isType(obj, reflect.Slice)
}
// Test if obj is String
func String(obj interface{}) bool {
return isType(obj, reflect.String)
}
// Test if obj is Struct
func Struct(obj interface{}) bool {
return isType(obj, reflect.Struct)
}
// Test if obj is os of struct time.Time
func Time(obj interface{}) bool {
return isStructType(obj, "time.Time")
}
// Test if obj is Uint
func Uint(obj interface{}) bool {
return isType(obj, reflect.Uint)
}
// Test if obj is Uint8
func Uint8(obj interface{}) bool {
return isType(obj, reflect.Uint8)
}
// Test if obj is Uint16
func Uint16(obj interface{}) bool {
return isType(obj, reflect.Uint16)
}
// Test if obj is Uint32
func Uint32(obj interface{}) bool {
return isType(obj, reflect.Uint32)
}
// Test if obj is Uint64
func Uint64(obj interface{}) bool {
return isType(obj, reflect.Uint64)
}
// Test if obj is Uintptr
func Uintptr(obj interface{}) bool {
return isType(obj, reflect.Uintptr)
}
// Test if obj is UnsafePointer
func UnsafePointer(obj interface{}) bool {
return isType(obj, reflect.UnsafePointer)
} | is.go | 0.737253 | 0.422088 | is.go | starcoder |
package barneshut
import (
"fmt"
"math"
"sort"
)
type MaxRepulsiveForce struct {
AccX, AccY, X, Y float64
Norm float64 // direction and norm
Idx int // body index where repulsive vector is max
}
func (r *Run) ComputeMaxRepulsiveForce() {
r.maxRepulsiveForce.Norm = 0.0
// parse bodies
for idx := range *r.bodies {
acc := &((*r.bodiesAccel)[idx])
norm := math.Sqrt(acc.X*acc.X + acc.Y*acc.Y)
if norm > r.maxRepulsiveForce.Norm {
r.maxRepulsiveForce.AccX = acc.X
r.maxRepulsiveForce.AccY = acc.Y
r.maxRepulsiveForce.Idx = idx
r.maxRepulsiveForce.Norm = norm
r.maxRepulsiveForce.X = (*r.bodies)[idx].X
r.maxRepulsiveForce.Y = (*r.bodies)[idx].Y
}
}
}
// compute the density per village and return the density per village
func (r *Run) ComputeDensityTencilePerTerritoryString() [10]string {
var densityString [10]string
density := r.ComputeDensityTencilePerTerritory()
for tencile := range density {
densityString[tencile] = fmt.Sprintf("%3.2f", density[tencile])
}
return densityString
}
func (r *Run) ComputeDensityTencilePerTerritory() [10]float64 {
// parse all bodies
// prepare the village
villages := make([][]int, nbVillagePerAxe)
for x := range villages {
villages[x] = make([]int, nbVillagePerAxe)
}
// parse bodies
for _, b := range *r.bodies {
// compute village coordinate (from 0 to nbVillagePerAxe-1)
x := int(math.Floor(float64(nbVillagePerAxe) * b.X))
y := int(math.Floor(float64(nbVillagePerAxe) * b.Y))
villages[x][y]++
}
// var bodyCount []int
nbVillages := nbVillagePerAxe * nbVillagePerAxe
bodyCountPerVillage := make([]int, nbVillages)
for x := range villages {
for y := range villages[x] {
bodyCountPerVillage[y+x*nbVillagePerAxe] = villages[x][y]
}
}
sort.Ints(bodyCountPerVillage)
var density [10]float64
for tencile := range density {
lowIndex := int(math.Floor(float64(nbVillages) * float64(tencile) / 10.0))
highIndex := int(math.Floor(float64(nbVillages) * float64(tencile+1) / 10.0))
// log.Output( 1, fmt.Sprintf( "tencile %d ", tencile))
// log.Output( 1, fmt.Sprintf( "lowIndex %d ", lowIndex))
// log.Output( 1, fmt.Sprintf( "highIndex %d ", highIndex))
nbBodiesInTencile := 0
for _, nbBodies := range bodyCountPerVillage[lowIndex:highIndex] {
nbBodiesInTencile += nbBodies
}
density[tencile] = float64(nbBodiesInTencile) / float64(len(bodyCountPerVillage[lowIndex:highIndex]))
// we compare with then average bodies per villages
density[tencile] /= float64(len(*r.bodies)) / float64(nbVillages)
// we round the density to 0.01 precision, and put it in percentage point
density[tencile] *= 100.0 * 100.0
intDensity := math.Floor(density[tencile])
density[tencile] = float64(intDensity) / 100.0
}
return density
} | barnes-hut/barnes-hut-stats.go | 0.725551 | 0.405213 | barnes-hut-stats.go | starcoder |
package binary
import "github.com/google/gapid/core/math/u32"
// BitStream provides methods for reading and writing bits to a slice of bytes.
// Bits are packed in a least-significant-bit to most-significant-bit order.
type BitStream struct {
Data []byte // The byte slice containing the bits
ReadPos uint32 // The current read offset from the start of the Data slice (in bits)
WritePos uint32 // The current write offset from the start of the Data slice (in bits)
}
// ReadBit reads a single bit from the BitStream, incrementing ReadPos by one.
func (s *BitStream) ReadBit() uint64 {
ReadPos := s.ReadPos
s.ReadPos = ReadPos + 1
return (uint64(s.Data[ReadPos/8]) >> (ReadPos % 8)) & 1
}
// WriteBit writes a single bit to the BitStream, incrementing WritePos by one.
func (s *BitStream) WriteBit(bit uint64) {
b := s.WritePos / 8
if b == uint32(len(s.Data)) {
s.Data = append(s.Data, 0)
}
if bit&1 == 1 {
s.Data[b] |= byte(1 << (s.WritePos % 8))
} else {
s.Data[b] &= ^byte(1 << (s.WritePos % 8))
}
s.WritePos++
}
// Read reads the specified number of bits from the BitStream, increamenting the ReadPos by the
// specified number of bits and returning the bits packed into a uint64. The bits are packed into
// the uint64 from LSB to MSB.
func (s *BitStream) Read(count uint32) uint64 {
byteIdx := s.ReadPos / 8
bitIdx := s.ReadPos & 7
// Start
val := uint64(s.Data[byteIdx]) >> bitIdx
readCount := 8 - bitIdx
if count <= readCount {
s.ReadPos += count
return val & ((1 << count) - 1)
}
s.ReadPos += readCount
byteIdx++
bitIdx = 0
// Whole bytes
for ; readCount+7 < count; readCount += 8 {
val |= uint64(s.Data[byteIdx]) << readCount
byteIdx++
s.ReadPos += 8
}
// Remainder
rem := count - readCount
if rem > 0 {
val |= (uint64(s.Data[byteIdx]) & ((1 << rem) - 1)) << readCount
s.ReadPos += rem
}
return val
}
// Write writes the specified number of bits from the packed uint64, increamenting the WritePos by
// the specified number of bits. The bits are read from the uint64 from LSB to MSB.
func (s *BitStream) Write(bits uint64, count uint32) {
// Ensure the buffer is big enough for all them bits.
if reqBytes := (int(s.WritePos) + int(count) + 7) / 8; reqBytes > len(s.Data) {
if reqBytes <= cap(s.Data) {
s.Data = s.Data[:reqBytes]
} else {
buf := make([]byte, reqBytes, reqBytes*2)
copy(buf, s.Data)
s.Data = buf
}
}
byteIdx := s.WritePos / 8
bitIdx := s.WritePos & 7
// Start
if bitIdx != 0 {
writeCount := u32.Min(8-bitIdx, count)
mask := byte(((1 << writeCount) - 1) << bitIdx)
s.Data[byteIdx] = (s.Data[byteIdx] & ^mask) | (byte(bits<<bitIdx) & mask)
s.WritePos += writeCount
count, byteIdx, bitIdx, bits = count-writeCount, byteIdx+1, 0, bits>>writeCount
}
// Whole bytes
for count >= 8 {
s.Data[byteIdx] = uint8(bits)
s.WritePos += 8
count, byteIdx, bits = count-8, byteIdx+1, bits>>8
}
// Remainder
if count > 0 {
mask := byte(((1 << count) - 1) << bitIdx)
s.Data[byteIdx] = (s.Data[byteIdx] & ^mask) | (byte(bits<<bitIdx) & mask)
s.WritePos += count
}
} | core/data/binary/bitstream.go | 0.718693 | 0.648731 | bitstream.go | starcoder |
package main
/**
Golang implementation of https://github.com/skipperkongen/jts-algorithm-pack/blob/master/src/org/geodelivery/jap/concavehull/SnapHull.java
which is a Java port of st_concavehull from Postgis 2.0
*/
import (
"github.com/furstenheim/SimpleRTree"
"github.com/furstenheim/go-convex-hull-2d"
"github.com/paulmach/go.geo"
"github.com/paulmach/go.geo/reducers"
"math"
"sort"
"sync"
)
type convexHullFlatPoints FlatPoints
type lexSorter FlatPoints
func (s lexSorter) Less(i, j int) bool {
if s[2*i] < s[2*j] {
return true
}
if s[2*i] > s[2*j] {
return false
}
if s[2*i+1] < s[2*j+1] {
return true
}
if s[2*i+1] > s[2*j+1] {
return false
}
return true
}
func (s lexSorter) Len() int {
return len(s) / 2
}
func (s lexSorter) Swap(i, j int) {
s[2*i], s[2*i+1], s[2*j], s[2*j+1] = s[2*j], s[2*j+1], s[2*i], s[2*i+1]
}
const DEFAULT_SEGLENGTH = 0.001
type concaver struct {
rtree *SimpleRTree.SimpleRTree
seglength float64
}
func Compute(points FlatPoints) (concaveHull FlatPoints) {
sort.Sort(lexSorter(points))
return ComputeFromSorted(points)
}
// Compute concave hull from sorted points. Points are expected to be sorted lexicographically by (x,y)
func ComputeFromSorted(points FlatPoints) (concaveHull FlatPoints) {
// Create a copy so that convex hull and index can modify the array in different ways
pointsCopy := make(FlatPoints, 0, len(points))
pointsCopy = append(pointsCopy, points...)
rtree := SimpleRTree.New()
var wg sync.WaitGroup
wg.Add(2)
// Convex hull
go func() {
points = go_convex_hull_2d.NewFromSortedArray(points).(FlatPoints)
wg.Done()
}()
func() {
rtree.LoadSortedArray(SimpleRTree.FlatPoints(pointsCopy))
wg.Done()
}()
wg.Wait()
var c concaver
c.seglength = DEFAULT_SEGLENGTH
c.rtree = rtree
return c.computeFromSorted(points)
}
func (c *concaver) computeFromSorted(convexHull FlatPoints) (concaveHull FlatPoints) {
// degerated case
if convexHull.Len() < 3 {
return convexHull
}
concaveHull = make([]float64, 0, 2*convexHull.Len())
x0, y0 := convexHull.Take(0)
concaveHull = append(concaveHull, x0, y0)
for i := 0; i < convexHull.Len(); i++ {
x1, y1 := convexHull.Take(i)
var x2, y2 float64
if i == convexHull.Len()-1 {
x2, y2 = convexHull.Take(0)
} else {
x2, y2 = convexHull.Take(i + 1)
}
sideSplit := c.segmentize(x1, y1, x2, y2)
concaveHull = append(concaveHull, sideSplit...)
}
path := reducers.DouglasPeucker(geo.NewPathFromFlatXYData(concaveHull), c.seglength)
// reused allocated array
concaveHull = concaveHull[0:0]
reducedPoints := path.Points()
for _, p := range reducedPoints {
concaveHull = append(concaveHull, p.Lng(), p.Lat())
}
return concaveHull
}
// Split side in small edges, for each edge find closest point. Remove duplicates
func (c *concaver) segmentize(x1, y1, x2, y2 float64) (points []float64) {
dist := math.Sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
nSegments := math.Ceil(dist / c.seglength)
factor := 1 / nSegments
flatPoints := make([]float64, 0, int(2*nSegments))
vX := factor * (x2 - x1)
vY := factor * (y2 - y1)
closestPoints := make(map[int][2]float64)
closestPoints[0] = [2]float64{x1, y1}
closestPoints[int(nSegments)] = [2]float64{x2, y2}
if nSegments > 1 {
stack := make([]searchItem, 0)
stack = append(stack, searchItem{left: 0, right: int(nSegments), lastLeft: 0, lastRight: int(nSegments)})
for len(stack) > 0 {
var item searchItem
item, stack = stack[len(stack)-1], stack[:len(stack)-1]
index := (item.left + item.right) / 2
currentX := x1 + vX*float64(index)
currentY := y1 + vY*float64(index)
x, y, _, _ := c.rtree.FindNearestPoint(currentX, currentY)
isNewLeft := x != closestPoints[item.lastLeft][0] || y != closestPoints[item.lastLeft][1]
isNewRight := x != closestPoints[item.lastRight][0] || y != closestPoints[item.lastRight][1]
// we don't know the point
if isNewLeft && isNewRight {
closestPoints[index] = [2]float64{x, y}
if index-item.left > 1 {
stack = append(stack, searchItem{left: item.left, right: index, lastLeft: item.lastLeft, lastRight: index})
}
if item.right-index > 1 {
stack = append(stack, searchItem{left: index, right: item.right, lastLeft: index, lastRight: item.lastRight})
}
} else if isNewLeft {
if index-item.left > 1 {
stack = append(stack, searchItem{left: item.left, right: index, lastLeft: item.lastLeft, lastRight: item.lastRight})
}
} else if isNewRight {
// don't add point to closest points, but we need to keep looking on the right side
if item.right-index > 1 {
stack = append(stack, searchItem{left: index, right: item.right, lastLeft: item.lastLeft, lastRight: item.lastRight})
}
}
}
}
// always add last point of the segment
for i := 1; i <= int(nSegments); i++ {
point, ok := closestPoints[i]
if ok {
flatPoints = append(flatPoints, point[0], point[1])
}
}
return flatPoints
}
type searchItem struct {
left, right, lastLeft, lastRight int
}
type FlatPoints []float64
func (fp FlatPoints) Len() int {
return len(fp) / 2
}
func (fp FlatPoints) Slice(i, j int) go_convex_hull_2d.Interface {
return fp[2*i : 2*j]
}
func (fp FlatPoints) Swap(i, j int) {
fp[2*i], fp[2*i+1], fp[2*j], fp[2*j+1] = fp[2*j], fp[2*j+1], fp[2*i], fp[2*i+1]
}
func (fp FlatPoints) Take(i int) (x1, y1 float64) {
return fp[2*i], fp[2*i+1]
} | polygon-map/concave_hull.go | 0.851413 | 0.434221 | concave_hull.go | starcoder |
package omnik
import (
"encoding/hex"
"strconv"
"time"
)
// InverterMsg provides an easy way to turn an omnik message into actual values.
type InverterMsg struct {
Data []byte
}
// Sample is an inverter sample DTO.
type Sample struct {
Timestamp string
Date string
Time string
Temperature float32
EnergyTotal float32
EnergyToday float32
EnergyHours int
Power float32
PvVoltage1 float32
PvVoltage2 float32
PvVoltage3 float32
PvCurrent1 float32
PvCurrent2 float32
PvCurrent3 float32
ACVoltage1 float32
ACVoltage2 float32
ACVoltage3 float32
ACCurrent1 float32
ACCurrent2 float32
ACCurrent3 float32
ACFrequency1 float32
ACFrequency2 float32
ACFrequency3 float32
ACPower1 float32
ACPower2 float32
ACPower3 float32
}
// GetSample retrieves a sample ready for use.
func (msg *InverterMsg) GetSample(currentTime time.Time) Sample {
return Sample{
Timestamp: currentTime.Format("2006-01-02 15:04:05"),
Date: currentTime.Format("2006-01-02"),
Time: currentTime.Format("15:04:05"),
Temperature: msg.Temperature(),
EnergyTotal: msg.EnergyTotal(),
EnergyToday: msg.EnergyToday(),
EnergyHours: msg.HoursGenerated(),
Power: msg.PowerOutput(),
PvVoltage1: msg.PvVoltage(1),
PvVoltage2: msg.PvVoltage(2),
PvVoltage3: msg.PvVoltage(3),
PvCurrent1: msg.PvCurrent(1),
PvCurrent2: msg.PvCurrent(2),
PvCurrent3: msg.PvCurrent(3),
ACVoltage1: msg.ACVoltage(1),
ACVoltage2: msg.ACVoltage(2),
ACVoltage3: msg.ACVoltage(3),
ACCurrent1: msg.ACCurrent(1),
ACCurrent2: msg.ACCurrent(2),
ACCurrent3: msg.ACCurrent(3),
ACFrequency1: msg.ACFrequency(1),
ACFrequency2: msg.ACFrequency(2),
ACFrequency3: msg.ACFrequency(3),
ACPower1: msg.ACPower(1),
ACPower2: msg.ACPower(2),
ACPower3: msg.ACPower(3),
}
}
// TimeObject retrieves a time object.
func (s *Sample) TimeObject() (time.Time, error) {
return time.Parse("2006-01-02 15:04:05", s.Timestamp)
}
func (msg *InverterMsg) getInt(begin int8, numBytes int8) int64 {
if int(begin+numBytes) > len(msg.Data) {
return 0
}
byteVal := msg.Data[begin : begin+numBytes]
hexVal := hex.EncodeToString(byteVal)
intVal, _ := strconv.ParseInt(hexVal, 16, 64)
return intVal
}
func (msg *InverterMsg) getShort(begin int8, divider int8) float32 {
return float32(msg.getInt(begin, 2)) / float32(divider)
}
func (msg *InverterMsg) getLong(begin int8, divider int8) float32 {
return float32(msg.getInt(begin, 4)) / float32(divider)
}
func (msg *InverterMsg) getString(begin int8, end int8) string {
return string(msg.Data[begin:end])
}
// Temperature recorded by the inverter.
func (msg *InverterMsg) Temperature() float32 {
return msg.getShort(31, 10)
}
// ID of the inverter.
func (msg *InverterMsg) ID() string {
return msg.getString(15, 31)
}
// EnergyTotal retrieves the total energy generated by inverter in KWH.
func (msg *InverterMsg) EnergyTotal() float32 {
return msg.getLong(71, 10)
}
// EnergyToday retrieves the energy generated by inverter today in KWH.
func (msg *InverterMsg) EnergyToday() float32 {
return msg.getShort(69, 100)
}
// HoursGenerated retrieves the number of hours the inverter generated electricity.
func (msg *InverterMsg) HoursGenerated() int {
return int(msg.getLong(75, 1))
}
// PowerOutput retrieves the current power output in Watts.
func (msg *InverterMsg) PowerOutput() float32 {
return msg.getShort(59, 1)
}
// PvVoltage retrieves the voltage a given PC channel is currently generating.
func (msg *InverterMsg) PvVoltage(channel int8) float32 {
if channel < 0 || channel > 3 {
channel = 1
}
num := 33 + (channel-1)*2
return msg.getShort(num, 10)
}
// PvCurrent retrieves the current a given PC channel is currently generating in amps.
func (msg *InverterMsg) PvCurrent(channel int8) float32 {
if channel < 0 || channel > 3 {
channel = 1
}
num := 39 + (channel-1)*2
return msg.getShort(num, 10)
}
// ACCurrent retrieves the current a given AC channel is currently outputting in amps.
func (msg *InverterMsg) ACCurrent(channel int8) float32 {
if channel < 0 || channel > 3 {
channel = 1
}
num := 45 + (channel-1)*2
return msg.getShort(num, 10)
}
// ACVoltage retrieves the voltage a given AC channel is currently outputting.
func (msg *InverterMsg) ACVoltage(channel int8) float32 {
if channel < 0 || channel > 3 {
channel = 1
}
num := 51 + (channel-1)*2
return msg.getShort(num, 10)
}
// ACFrequency retrieves the current frequency of a given channel.
func (msg *InverterMsg) ACFrequency(channel int8) float32 {
if channel < 0 || channel > 3 {
channel = 1
}
num := 57 + (channel-1)*4
return msg.getShort(num, 100)
}
// ACPower retrieves the output of the given AC output channel.
func (msg *InverterMsg) ACPower(channel int8) float32 {
if channel < 0 || channel > 3 {
channel = 1
}
num := 59 + (channel-1)*4
return msg.getShort(num, 1)
} | invertermsg.go | 0.679498 | 0.46557 | invertermsg.go | starcoder |
// +build reach
package cover
import (
"io"
. "github.com/tsavola/reach/internal"
)
func Location() {
Cover()
}
func Cond(conditions ...bool) {
Cover(conditions...)
}
func Bool(x bool) bool {
Cover(!x, x)
return x
}
func Min(x, min int) int {
Cover(x == min, x > min)
return x
}
func MinInt8(x, min int8) int8 {
Cover(x == min, x > min)
return x
}
func MinInt16(x, min int16) int16 {
Cover(x == min, x > min)
return x
}
func MinInt32(x, min int32) int32 {
Cover(x == min, x > min)
return x
}
func MinInt64(x, min int64) int64 {
Cover(x == min, x > min)
return x
}
func MinUint(x, min uint) uint {
Cover(x == min, x > min)
return x
}
func MinUint8(x, min uint8) uint8 {
Cover(x == min, x > min)
return x
}
func MinUint16(x, min uint16) uint16 {
Cover(x == min, x > min)
return x
}
func MinUint32(x, min uint32) uint32 {
Cover(x == min, x > min)
return x
}
func MinUint64(x, min uint64) uint64 {
Cover(x == min, x > min)
return x
}
func MinUintptr(x, min uintptr) uintptr {
Cover(x == min, x > min)
return x
}
func MinMax(x, min, max int) int {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxInt8(x, min, max int8) int8 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxInt16(x, min, max int16) int16 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxInt32(x, min, max int32) int32 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxInt64(x, min, max int64) int64 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxUint(x, min, max uint) uint {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxUint8(x, min, max uint8) uint8 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxUint16(x, min, max uint16) uint16 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxUint32(x, min, max uint32) uint32 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxUint64(x, min, max uint64) uint64 {
Cover(x == min, x > min && x < max, x == max)
return x
}
func MinMaxUintptr(x, min, max uintptr) uintptr {
Cover(x == min, x > min && x < max, x == max)
return x
}
func Error(x error) error {
Cover(x == nil, x != nil)
return x
}
func EOF(x error) error {
Cover(x == nil, x != nil && x != io.EOF, x == io.EOF)
return x
} | cover/cover.go | 0.560974 | 0.591074 | cover.go | starcoder |
package parser
import (
"fmt"
"github.com/di-wu/parser/op"
"reflect"
"strings"
)
// InitError is an error that occurs on instantiating new structures.
type InitError struct {
// The error message. This should provide an intuitive message or advice on
// how to solve this error.
Message string
}
func (e *InitError) Error() string {
return fmt.Sprintf("parser: %s", e.Message)
}
// ExpectError is an error that occurs on when an invalid/unsupported value is
// passed to the Parser.Expect function.
type ExpectError struct {
Message string
}
func (e *ExpectError) Error() string {
return fmt.Sprintf("expect: %s", e.Message)
}
// ExpectedParseError creates an ExpectedParseError error based on the given
// start and end cursor. Resets the parser tot the start cursor.
func (p *Parser) ExpectedParseError(expected interface{}, start, end *Cursor) *ExpectedParseError {
if end == nil {
end = start
}
defer p.Jump(start)
return &ExpectedParseError{
Expected: expected,
String: p.Slice(start, end),
Conflict: *end,
}
}
// ExpectedParseError indicates that the parser Expected a different value than
// the Actual value present in the buffer.
type ExpectedParseError struct {
// The value that was expected.
Expected interface{}
// The value it actually got.
String string
// The position of the conflicting value.
Conflict Cursor
}
func Stringer(i interface{}) string {
i = ConvertAliases(i)
if reflect.TypeOf(i).Kind() == reflect.Func {
return "func"
}
switch v := i.(type) {
case rune:
return fmt.Sprintf("'%s'", string(v))
case string:
return fmt.Sprintf("%q", v)
case op.Not:
return fmt.Sprintf("!%s", Stringer(v.Value))
case op.Ensure:
return fmt.Sprintf("?%s", Stringer(v.Value))
case op.And:
and := make([]string, len(v))
for i, v := range v {
and[i] = Stringer(v)
}
return fmt.Sprintf("and[%s]", strings.Join(and, " "))
case op.Or:
or := make([]string, len(v))
for i, v := range v {
or[i] = Stringer(v)
}
return fmt.Sprintf("or[%s]", strings.Join(or, " "))
case op.XOr:
xor := make([]string, len(v))
for i, v := range v {
xor[i] = Stringer(v)
}
return fmt.Sprintf("xor[%s]", strings.Join(xor, " "))
case op.Range:
if v.Max == -1 {
switch v.Min {
case 0:
return fmt.Sprintf("%s*", Stringer(v.Value))
case 1:
return fmt.Sprintf("%s+", Stringer(v.Value))
}
}
return fmt.Sprintf("%s{%d:%d}", Stringer(v.Value), v.Min, v.Max)
default:
return fmt.Sprintf("%v", v)
}
}
func (e *ExpectedParseError) Error() string {
got := e.String
if len(e.String) == 1 {
got = fmt.Sprintf("'%s'", string([]rune(e.String)[0]))
} else {
got = fmt.Sprintf("%q", e.String)
}
return fmt.Sprintf(
"parse conflict [%02d:%03d]: expected %T %s but got %s",
e.Conflict.row, e.Conflict.column, e.Expected, Stringer(e.Expected), got,
)
}
// UnsupportedType indicates the type of the value is unsupported.
type UnsupportedType struct {
Value interface{}
}
func (e *UnsupportedType) Error() string {
return fmt.Sprintf("parse: value of type %T are not supported", e.Value)
} | errors.go | 0.677047 | 0.445349 | errors.go | starcoder |
package topojson
import (
"math"
"github.com/paulmach/orb"
)
type quantize struct {
Transform *Transform
dx, dy, kx, ky float64
}
func newQuantize(dx, dy, kx, ky float64) *quantize {
return &quantize{
dx: dx,
dy: dy,
kx: kx,
ky: ky,
Transform: &Transform{
Scale: [2]float64{1 / kx, 1 / ky},
Translate: [2]float64{-dx, -dy},
},
}
}
func (q *quantize) quantizePoint(p orb.Point) orb.Point {
x := round((p[0] + q.dx) * q.kx)
y := round((p[1] + q.dy) * q.ky)
return orb.Point{x, y}
}
func (q *quantize) quantizeMultiPoint(in orb.MultiPoint, skipEqual bool) orb.MultiPoint {
out := orb.MultiPoint{}
var last []float64
for _, p := range in {
pt := q.quantizePoint(p)
if !pointEquals([]float64{pt[0], pt[1]}, last) || !skipEqual {
out = append(out, pt)
last = []float64{pt[0], pt[1]}
}
}
if len(out) < 2 {
out = append(out, out[0])
}
return out
}
func (q *quantize) quantizeLine(in orb.LineString, skipEqual bool) orb.LineString {
out := orb.LineString{}
var last []float64
for _, p := range in {
pt := q.quantizePoint(p)
if !pointEquals([]float64{pt[0], pt[1]}, last) || !skipEqual {
out = append(out, pt)
last = []float64{pt[0], pt[1]}
}
}
if len(out) < 2 {
out = append(out, out[0])
}
return out
}
func (q *quantize) quantizeRing(in orb.Ring, skipEqual bool) orb.Ring {
out := orb.Ring{}
var last []float64
for _, p := range in {
pt := q.quantizePoint(p)
if !pointEquals([]float64{pt[0], pt[1]}, last) || !skipEqual {
out = append(out, pt)
last = []float64{pt[0], pt[1]}
}
}
if len(out) < 2 {
out = append(out, out[0])
}
return out
}
func (q *quantize) quantizeMultiLine(in orb.MultiLineString, skipEqual bool) orb.MultiLineString {
out := make(orb.MultiLineString, len(in))
for i, line := range in {
line = q.quantizeLine(line, skipEqual)
for len(line) < 4 {
line = append(line, line[0])
}
out[i] = line
}
return out
}
func (q *quantize) quantizePolygon(in orb.Polygon, skipEqual bool) orb.Polygon {
out := make(orb.Polygon, len(in))
for i, ring := range in {
out[i] = q.quantizeRing(ring, skipEqual)
}
return out
}
func (q *quantize) quantizeMultiPolygon(in orb.MultiPolygon, skipEqual bool) orb.MultiPolygon {
out := make(orb.MultiPolygon, len(in))
for i, ring := range in {
out[i] = q.quantizePolygon(ring, skipEqual)
}
return out
}
func round(v float64) float64 {
if v < 0 {
return math.Ceil(v - 0.5)
} else {
return math.Floor(v + 0.5)
}
} | encoding/topojson/quantize.go | 0.698535 | 0.595022 | quantize.go | starcoder |
package pgtypes
import "github.com/apaxa-go/helper/mathh"
//replacer:ignore
//go:generate go run $GOPATH/src/github.com/apaxa-go/generator/replacer/main.go -- $GOFILE
// SetInt8 sets z to x and returns z.
func (z *Numeric) SetInt8(x int8) *Numeric {
if x == 0 {
return z.SetZero()
}
if x < 0 {
z.sign = numericNegative
} else {
z.sign = numericPositive
}
z.weight = 0
z.digits = []int16{mathh.AbsInt16(int16(x))} // First update type, second abs!
return z
}
// SetUint8 sets z to x and returns z.
func (z *Numeric) SetUint8(x uint8) *Numeric {
if x == 0 {
return z.SetZero()
}
z.sign = numericPositive
z.weight = 0
z.digits = []int16{int16(x)}
return z
}
// Uint8 returns the uint8 representation of x.
// If x is NaN, the result is 0.
// If x cannot be represented in an uint8, the result is undefined.
func (x *Numeric) Uint8() uint8 {
if x.sign != numericPositive || x.weight != 0 || len(x.digits) == 0 {
return 0
}
return uint8(x.digits[0])
}
// Int8 returns the int8 representation of x.
// If x is NaN, the result is 0.
// If x cannot be represented in an int8, the result is undefined.
func (x *Numeric) Int8() int8 {
if x.sign == numericNaN || x.weight != 0 || len(x.digits) == 0 {
return 0
}
if x.sign == numericNegative {
return int8(-x.digits[0]) // First - negate, only after it type conversion!
}
return int8(x.digits[0])
}
//replacer:replace
//replacer:old int64 Int64
//replacer:new int Int
//replacer:new int16 Int16
//replacer:new int32 Int32
// SetInt64 sets z to x and returns z.
func (z *Numeric) SetInt64(x int64) *Numeric {
if x == 0 {
return z.SetZero()
}
if x < 0 {
z.sign = numericNegative
} else {
z.sign = numericPositive
}
z.weight = -1
z.digits = make([]int16, 0, 1) // as x!=0 there is at least 1 1000-base digit
for x != 0 {
d := mathh.AbsInt16(int16(x % numericBase))
x /= numericBase
if d != 0 || len(z.digits) > 0 { // avoid tailing zero
z.digits = append([]int16{d}, z.digits...)
}
z.weight++
}
return z
}
// SetUint64 sets z to x and returns z.
func (z *Numeric) SetUint64(x uint64) *Numeric {
if x == 0 {
return z.SetZero()
}
z.sign = numericPositive
z.weight = -1
z.digits = make([]int16, 0, 1) // as x!=0 there is at least 1 1000-base digit
for x != 0 {
d := int16(x % numericBase)
x /= numericBase
if d != 0 || len(z.digits) > 0 { // avoid tailing zero
z.digits = append([]int16{d}, z.digits...)
}
z.weight++
}
return z
}
// Uint64 returns the uint64 representation of x.
// If x is NaN, the result is 0.
// If x cannot be represented in an uint64, the result is undefined.
func (x *Numeric) Uint64() uint64 {
const maxWeight = mathh.Uint64Bytes / 2 // Interesting, this should work at least for 1-8 bytes [unsigned] integers
if x.sign != numericPositive || len(x.digits) == 0 {
return 0
}
if x.weight > maxWeight {
return mathh.MaxUint64
}
to := mathh.Min2Int(int(x.weight), len(x.digits)-1)
var r uint64
for i := 0; i <= to; i++ {
r = r*numericBase + uint64(x.digits[i])
}
for i := to + 1; i <= int(x.weight); i++ {
r *= numericBase
}
return r
}
// Int64 returns the int64 representation of x.
// If x is NaN, the result is 0.
// If x cannot be represented in an int64, the result is undefined.
func (x *Numeric) Int64() int64 {
const maxWeight = mathh.Int64Bytes / 2 // Interesting, this should work at least for 1-8 bytes [unsigned] integers
if x.sign == numericNaN || len(x.digits) == 0 {
return 0
}
var sign int64
if x.sign == numericPositive {
if x.weight > maxWeight {
return mathh.MaxInt64
}
sign = 1
} else {
if x.weight > maxWeight {
return mathh.MinInt64
}
sign = -1
}
to := mathh.Min2Int(int(x.weight), len(x.digits)-1)
var r int64
for i := 0; i <= to; i++ {
r = r*numericBase + sign*int64(x.digits[i])
}
for i := to + 1; i <= int(x.weight); i++ {
r *= numericBase
}
return r
}
//replacer:replace
//replacer:old int64 Int64
//replacer:new int Int
//replacer:new int8 Int8
//replacer:new int16 Int16
//replacer:new int32 Int32
//replacer:new uint Uint
//replacer:new uint8 Uint8
//replacer:new uint16 Uint16
//replacer:new uint32 Uint32
//replacer:new uint64 Uint64
// NewInt64 allocates and returns a new Numeric set to x.
func NewInt64(x int64) *Numeric {
var r Numeric
return r.SetInt64(x)
} | numeric-ints.go | 0.655667 | 0.42471 | numeric-ints.go | starcoder |
package tensor
import (
"reflect"
"github.com/pkg/errors"
"gonum.org/v1/gonum/blas"
"gonum.org/v1/gonum/mat"
)
// Trace returns the trace of a matrix (i.e. the sum of the diagonal elements). If the Tensor provided is not a matrix, it will return an error
func (e StdEng) Trace(t Tensor) (retVal interface{}, err error) {
if t.Dims() != 2 {
err = errors.Errorf(dimMismatch, 2, t.Dims())
return
}
if err = typeclassCheck(t.Dtype(), numberTypes); err != nil {
return nil, errors.Wrap(err, "Trace")
}
rstride := t.Strides()[0]
cstride := t.Strides()[1]
r := t.Shape()[0]
c := t.Shape()[1]
m := MinInt(r, c)
stride := rstride + cstride
switch data := t.Data().(type) {
case []int:
var trace int
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int8:
var trace int8
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int16:
var trace int16
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int32:
var trace int32
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []int64:
var trace int64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint:
var trace uint
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint8:
var trace uint8
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint16:
var trace uint16
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint32:
var trace uint32
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []uint64:
var trace uint64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []float32:
var trace float32
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []float64:
var trace float64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []complex64:
var trace complex64
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
case []complex128:
var trace complex128
for i := 0; i < m; i++ {
trace += data[i*stride]
}
retVal = trace
}
return
}
func (e StdEng) Dot(x, y Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if _, ok := x.(DenseTensor); !ok {
err = errors.Errorf("Engine only supports working on x that is a DenseTensor. Got %T instead", x)
return
}
if _, ok := y.(DenseTensor); !ok {
err = errors.Errorf("Engine only supports working on y that is a DenseTensor. Got %T instead", y)
return
}
var a, b DenseTensor
if a, err = getFloatDenseTensor(x); err != nil {
err = errors.Wrapf(err, opFail, "Dot")
return
}
if b, err = getFloatDenseTensor(y); err != nil {
err = errors.Wrapf(err, opFail, "Dot")
return
}
fo := ParseFuncOpts(opts...)
var reuse, incr DenseTensor
if reuse, err = getFloatDenseTensor(fo.reuse); err != nil {
err = errors.Wrapf(err, opFail, "Dot - reuse")
return
}
if incr, err = getFloatDenseTensor(fo.incr); err != nil {
err = errors.Wrapf(err, opFail, "Dot - incr")
return
}
switch {
case a.IsScalar() && b.IsScalar():
var res interface{}
switch a.Dtype().Kind() {
case reflect.Float64:
res = a.GetF64(0) * b.GetF64(0)
case reflect.Float32:
res = a.GetF32(0) * b.GetF32(0)
}
switch {
case incr != nil:
if !incr.IsScalar() {
err = errors.Errorf(shapeMismatch, ScalarShape(), incr.Shape())
return
}
if err = e.E.MulIncr(a.Dtype().Type, a.hdr(), b.hdr(), incr.hdr()); err != nil {
err = errors.Wrapf(err, opFail, "Dot scalar incr")
return
}
retVal = incr
case reuse != nil:
reuse.Set(0, res)
reuse.reshape()
retVal = reuse
default:
retVal = New(FromScalar(res))
}
return
case a.IsScalar():
switch {
case incr != nil:
return Mul(a.ScalarValue(), b, WithIncr(incr))
case reuse != nil:
return Mul(a.ScalarValue(), b, WithReuse(reuse))
}
// default moved out
return Mul(a.ScalarValue(), b)
case b.IsScalar():
switch {
case incr != nil:
return Mul(a, b.ScalarValue(), WithIncr(incr))
case reuse != nil:
return Mul(a, b.ScalarValue(), WithReuse(reuse))
}
return Mul(a, b.ScalarValue())
}
switch {
case a.IsVector():
switch {
case b.IsVector():
// check size
if a.len() != b.len() {
err = errors.Errorf(shapeMismatch, a.Shape(), b.Shape())
return
}
var ret interface{}
if ret, err = e.Inner(a, b); err != nil {
return nil, errors.Wrapf(err, opFail, "Dot")
}
return New(FromScalar(ret)), nil
case b.IsMatrix():
b.T()
defer b.UT()
switch {
case reuse != nil && incr != nil:
return b.MatVecMul(a, WithReuse(reuse), WithIncr(incr))
case reuse != nil:
return b.MatVecMul(a, WithReuse(reuse))
case incr != nil:
return b.MatVecMul(a, WithIncr(incr))
default:
}
return b.MatVecMul(a)
default:
}
case a.IsMatrix():
switch {
case b.IsVector():
switch {
case reuse != nil && incr != nil:
return a.MatVecMul(b, WithReuse(reuse), WithIncr(incr))
case reuse != nil:
return a.MatVecMul(b, WithReuse(reuse))
case incr != nil:
return a.MatVecMul(b, WithIncr(incr))
default:
}
return a.MatVecMul(b)
case b.IsMatrix():
switch {
case reuse != nil && incr != nil:
return a.MatMul(b, WithReuse(reuse), WithIncr(incr))
case reuse != nil:
return a.MatMul(b, WithReuse(reuse))
case incr != nil:
return a.MatMul(b, WithIncr(incr))
default:
}
return a.MatMul(b)
default:
}
default:
}
as := a.Shape()
bs := b.Shape()
axesA := BorrowInts(1)
axesB := BorrowInts(1)
defer ReturnInts(axesA)
defer ReturnInts(axesB)
var lastA, secondLastB int
lastA = len(as) - 1
axesA[0] = lastA
if len(bs) >= 2 {
secondLastB = len(bs) - 2
} else {
secondLastB = 0
}
axesB[0] = secondLastB
if as[lastA] != bs[secondLastB] {
err = errors.Errorf(shapeMismatch, as, bs)
return
}
var rd *Dense
if rd, err = a.TensorMul(b, axesA, axesB); err != nil {
panic(err)
return
}
if reuse != nil {
copyDense(reuse, rd)
ap := rd.Info().Clone()
reuse.setAP(&ap)
defer ReturnTensor(rd)
// swap out the underlying data and metadata
// reuse.data, rd.data = rd.data, reuse.data
// reuse.AP, rd.AP = rd.AP, reuse.AP
// defer ReturnTensor(rd)
retVal = reuse
} else {
retVal = rd
}
return
}
// TODO: make it take DenseTensor
func (e StdEng) SVD(a Tensor, uv, full bool) (s, u, v Tensor, err error) {
var t *Dense
var ok bool
if err = e.checkAccessible(a); err != nil {
return nil, nil, nil, errors.Wrapf(err, "opFail", "SVD")
}
if t, ok = a.(*Dense); !ok {
return nil, nil, nil, errors.Errorf("StdEng only performs SVDs for DenseTensors. Got %T instead", a)
}
if !isFloat(t.Dtype()) {
return nil, nil, nil, errors.Errorf("StdEng can only perform SVDs for float64 and float32 type. Got tensor of %v instead", t.Dtype())
}
if !t.IsMatrix() {
return nil, nil, nil, errors.Errorf(dimMismatch, 2, t.Dims())
}
var m *mat.Dense
var svd mat.SVD
if m, err = ToMat64(t, UseUnsafe()); err != nil {
return
}
switch {
case full && uv:
ok = svd.Factorize(m, mat.SVDFull)
case !full && uv:
ok = svd.Factorize(m, mat.SVDThin)
case full && !uv:
// illogical state - if you specify "full", you WANT the UV matrices
// error
err = errors.Errorf("SVD requires computation of `u` and `v` matrices if `full` was specified.")
return
default:
// by default, we return only the singular values
ok = svd.Factorize(m, mat.SVDNone)
}
if !ok {
// error
err = errors.Errorf("Unable to compute SVD")
return
}
// extract values
var um, vm mat.Dense
s = recycledDense(Float64, Shape{MinInt(t.Shape()[0], t.Shape()[1])})
svd.Values(s.Data().([]float64))
if uv {
svd.UTo(&um)
svd.VTo(&vm)
// vm.VFromSVD(&svd)
u = FromMat64(&um, UseUnsafe(), As(t.t))
v = FromMat64(&vm, UseUnsafe(), As(t.t))
}
return
}
// Inner is a thin layer over BLAS's D/Sdot.
// It returns a scalar value, wrapped in an interface{}, which is not quite nice.
func (e StdEng) Inner(a, b Tensor) (retVal interface{}, err error) {
var ad, bd DenseTensor
if ad, bd, err = e.checkTwoFloatTensors(a, b); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Inner")
}
switch A := ad.Data().(type) {
case []float32:
B := bd.Float32s()
retVal = whichblas.Sdot(len(A), A, 1, B, 1)
case []float64:
B := bd.Float64s()
retVal = whichblas.Ddot(len(A), A, 1, B, 1)
}
return
}
// MatVecMul is a thin layer over BLAS' DGEMV
// Because DGEMV computes:
// y = αA * x + βy
// we set beta to 0, so we don't have to manually zero out the reused/retval tensor data
func (e StdEng) MatVecMul(a, b, prealloc Tensor) (err error) {
// check all are DenseTensors
var ad, bd, pd DenseTensor
if ad, bd, pd, err = e.checkThreeFloatTensors(a, b, prealloc); err != nil {
return errors.Wrapf(err, opFail, "StdEng.MatVecMul")
}
m := ad.oshape()[0]
n := ad.oshape()[1]
tA := blas.NoTrans
do := a.DataOrder()
z := ad.oldAP().IsZero()
var lda int
switch {
case do.IsRowMajor() && z:
lda = n
case do.IsRowMajor() && !z:
tA = blas.Trans
lda = n
case do.IsColMajor() && z:
tA = blas.Trans
lda = m
m, n = n, m
case do.IsColMajor() && !z:
lda = m
m, n = n, m
}
incX, incY := 1, 1 // step size
// ASPIRATIONAL TODO: different incX and incY
// TECHNICAL DEBT. TECHDEBT. TECH DEBT
// Example use case:
// log.Printf("a %v %v", ad.Strides(), ad.ostrides())
// log.Printf("b %v", b.Strides())
// incX := a.Strides()[0]
// incY = b.Strides()[0]
switch A := ad.Data().(type) {
case []float64:
x := bd.Float64s()
y := pd.Float64s()
alpha, beta := float64(1), float64(0)
whichblas.Dgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY)
case []float32:
x := bd.Float32s()
y := pd.Float32s()
alpha, beta := float32(1), float32(0)
whichblas.Sgemv(tA, m, n, alpha, A, lda, x, incX, beta, y, incY)
default:
return errors.Errorf(typeNYI, "matVecMul", bd.Data())
}
return nil
}
// MatMul is a thin layer over DGEMM.
// DGEMM computes:
// C = αA * B + βC
// To prevent needless zeroing out of the slice, we just set β to 0
func (e StdEng) MatMul(a, b, prealloc Tensor) (err error) {
// check all are DenseTensors
var ad, bd, pd DenseTensor
if ad, bd, pd, err = e.checkThreeFloatTensors(a, b, prealloc); err != nil {
return errors.Wrapf(err, opFail, "StdEng.MatMul")
}
ado := a.DataOrder()
bdo := b.DataOrder()
cdo := prealloc.DataOrder()
// get result shapes. k is the shared dimension
// a is (m, k)
// b is (k, n)
// c is (m, n)
var m, n, k int
m = ad.Shape()[0]
k = ad.Shape()[1]
n = bd.Shape()[1]
// wrt the strides, we use the original strides, because that's what BLAS needs, instead of calling .Strides()
// lda in colmajor = number of rows;
// lda in row major = number of cols
var lda, ldb, ldc int
switch {
case ado.IsColMajor():
lda = m
case ado.IsRowMajor():
lda = k
}
switch {
case bdo.IsColMajor():
ldb = bd.Shape()[0]
case bdo.IsRowMajor():
ldb = n
}
switch {
case cdo.IsColMajor():
ldc = prealloc.Shape()[0]
case cdo.IsRowMajor():
ldc = prealloc.Shape()[1]
}
// check for trans
tA, tB := blas.NoTrans, blas.NoTrans
if !ad.oldAP().IsZero() {
tA = blas.Trans
if ado.IsRowMajor() {
lda = m
} else {
lda = k
}
}
if !bd.oldAP().IsZero() {
tB = blas.Trans
if bdo.IsRowMajor() {
ldb = bd.Shape()[0]
} else {
ldb = bd.Shape()[1]
}
}
switch A := ad.Data().(type) {
case []float64:
B := bd.Float64s()
C := pd.Float64s()
alpha, beta := float64(1), float64(0)
if ado.IsColMajor() && bdo.IsColMajor() {
whichblas.Dgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc)
} else {
whichblas.Dgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)
}
case []float32:
B := bd.Float32s()
C := pd.Float32s()
alpha, beta := float32(1), float32(0)
if ado.IsColMajor() && bdo.IsColMajor() {
whichblas.Sgemm(tA, tB, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc)
} else {
whichblas.Sgemm(tA, tB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)
}
default:
return errors.Errorf(typeNYI, "matMul", ad.Data())
}
return
}
// Outer is a thin wrapper over S/Dger
func (e StdEng) Outer(a, b, prealloc Tensor) (err error) {
// check all are DenseTensors
var ad, bd, pd DenseTensor
if ad, bd, pd, err = e.checkThreeFloatTensors(a, b, prealloc); err != nil {
return errors.Wrapf(err, opFail, "StdEng.Outer")
}
m := ad.Size()
n := bd.Size()
pdo := pd.DataOrder()
// the stride of a Vector is always going to be [1],
// incX := t.Strides()[0]
// incY := other.Strides()[0]
incX, incY := 1, 1
// lda := pd.Strides()[0]
var lda int
switch {
case pdo.IsColMajor():
aShape := a.Shape().Clone()
bShape := b.Shape().Clone()
if err = a.Reshape(aShape[0], 1); err != nil {
return err
}
if err = b.Reshape(1, bShape[0]); err != nil {
return err
}
if err = e.MatMul(a, b, prealloc); err != nil {
return err
}
if err = b.Reshape(bShape...); err != nil {
return
}
if err = a.Reshape(aShape...); err != nil {
return
}
return nil
case pdo.IsRowMajor():
lda = pd.Shape()[1]
}
switch x := ad.Data().(type) {
case []float64:
y := bd.Float64s()
A := pd.Float64s()
alpha := float64(1)
whichblas.Dger(m, n, alpha, x, incX, y, incY, A, lda)
case []float32:
y := bd.Float32s()
A := pd.Float32s()
alpha := float32(1)
whichblas.Sger(m, n, alpha, x, incX, y, incY, A, lda)
default:
return errors.Errorf(typeNYI, "outer", b.Data())
}
return nil
}
/* UNEXPORTED UTILITY FUNCTIONS */
func (e StdEng) checkTwoFloatTensors(a, b Tensor) (ad, bd DenseTensor, err error) {
if err = e.checkAccessible(a); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible")
}
if err = e.checkAccessible(b); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors: a is not accessible")
}
if a.Dtype() != b.Dtype() {
return nil, nil, errors.New("Expected a and b to have the same Dtype")
}
if ad, err = getFloatDenseTensor(a); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor")
}
if bd, err = getFloatDenseTensor(b); err != nil {
return nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor")
}
return
}
func (e StdEng) checkThreeFloatTensors(a, b, ret Tensor) (ad, bd, retVal DenseTensor, err error) {
if err = e.checkAccessible(a); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible")
}
if err = e.checkAccessible(b); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: a is not accessible")
}
if err = e.checkAccessible(ret); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkThreeTensors: ret is not accessible")
}
if a.Dtype() != b.Dtype() || b.Dtype() != ret.Dtype() {
return nil, nil, nil, errors.New("Expected a and b and retVal all to have the same Dtype")
}
if ad, err = getFloatDenseTensor(a); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects a to be be a DenseTensor")
}
if bd, err = getFloatDenseTensor(b); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects b to be be a DenseTensor")
}
if retVal, err = getFloatDenseTensor(ret); err != nil {
return nil, nil, nil, errors.Wrap(err, "checkTwoTensors expects retVal to be be a DenseTensor")
}
return
} | edge/vendor/gorgonia.org/tensor/defaultengine_linalg.go | 0.625667 | 0.406214 | defaultengine_linalg.go | starcoder |
package evaluator
import "errors"
var (
// ErrNotFound means the unknow string within the expression cannot be Get from neither functions or params
ErrNotFound = errors.New("neither function not variable found")
// ErrInvalidResult means the invalid result type expected with the real output
ErrInvalidResult = errors.New("invalid result type")
)
// Expression stands for an expression which can be evaluated by passing required params
type Expression struct {
exp sexp
}
// New will return a Expression by parsing the given expression string
func New(expr string) (Expression, error) {
exp, err := parse(expr)
if err != nil {
return Expression{}, err
}
return Expression{
exp: exp,
}, nil
}
// Eval evaluates the Expression with params and return the real value in the type of interface
func (e Expression) Eval(params Params) (interface{}, error) {
return e.exp.evaluate(params)
}
// EvalBool invokes method Eval and does boolean type assertion, return ErrInvalidResult if the type of result is not boolean
func (e Expression) EvalBool(params Params) (bool, error) {
r, err := e.exp.evaluate(params)
if err != nil {
return false, err
}
b, ok := r.(bool)
if !ok {
return false, ErrInvalidResult
}
return b, nil
}
// Properties returns the field names in an Expression.
// e.g. Expression constructed by `(or (and (between age 18 80) (eq gender "male") )`
// returns "age", "gender" by calling Properties.
func (e Expression) Properties() []string {
return e.exp.properties()
}
// MapParams is a simple map implementation of Params interface
type MapParams map[string]interface{}
// Get is the only method required by Params interface
func (p MapParams) Get(name string) (interface{}, error) {
v, ok := p[name]
if !ok {
return nil, ErrNotFound
}
return v, nil
}
// Params defines a Get method which gets required param for the expression
type Params interface {
Get(name string) (interface{}, error)
}
// Eval is a handy encapsulation to parse the expression and evaluate it
func Eval(expr string, params Params) (interface{}, error) {
e, err := New(expr)
if err != nil {
return nil, err
}
return e.Eval(params)
}
// EvalBool is same as Eval but return a boolean result instead of interface type
func EvalBool(expr string, params Params) (bool, error) {
e, err := New(expr)
if err != nil {
return false, err
}
return e.EvalBool(params)
} | evaluator.go | 0.73678 | 0.480966 | evaluator.go | starcoder |
package tree
type node struct {
value int
left *node
right *node
}
func newNode(value int) *node {
return &node{value, nil, nil}
}
type UnbalancedBinarySearchTree struct {
root *node
}
func (t *UnbalancedBinarySearchTree) Add(newValue int) {
if t.root == nil {
t.root = newNode(newValue)
return
}
addToNode(t.root, newValue)
}
func FromSorted(s []int) UnbalancedBinarySearchTree {
root := fromSortedSlice(s)
return UnbalancedBinarySearchTree{root}
}
func fromSortedSlice(s []int) *node {
if len(s) == 0 {
return nil
}
mid := len(s) / 2
value := s[mid]
left := fromSortedSlice(s[:mid])
right := fromSortedSlice(s[mid+1:])
return &node{value, left, right}
}
func addToNode(n *node, newValue int) {
if newValue <= n.value {
if n.left == nil {
n.left = newNode(newValue)
} else {
addToNode(n.left, newValue)
}
} else {
if n.right == nil {
n.right = newNode(newValue)
} else {
addToNode(n.right, newValue)
}
}
}
func (t *UnbalancedBinarySearchTree) OrderedElements() []int {
if t.root == nil {
return make([]int, 0)
}
var result = addToSlice(t.root, make([]int, 0))
return result
}
func addToSlice(n *node, values []int) []int {
if n == nil {
return values
}
values = addToSlice(n.left, values)
values = append(values, n.value)
values = addToSlice(n.right, values)
return values
}
func (t UnbalancedBinarySearchTree) Contains(value int) bool {
if t.root == nil {
return false
}
return containsInNode(t.root, value)
}
func containsInNode(n *node, value int) bool {
if n.value == value {
return true
}
if value < n.value {
if n.left == nil {
return false
} else {
return containsInNode(n.left, value)
}
} else {
if n.right == nil {
return false
} else {
return containsInNode(n.right, value)
}
}
}
func (t *UnbalancedBinarySearchTree) Depth() int {
if t.root == nil {
return 0
}
return nodeDepth(t.root, 0)
}
func nodeDepth(n *node, curDepth int) int {
var leftDepth = curDepth
var rightDepth = curDepth
if n.left != nil {
leftDepth = nodeDepth(n.left, curDepth+1)
}
if n.right != nil {
rightDepth = nodeDepth(n.right, curDepth+1)
}
if leftDepth > rightDepth {
return leftDepth
} else {
return rightDepth
}
} | tree/UnbalancedBinarySearchTree.go | 0.815453 | 0.463323 | UnbalancedBinarySearchTree.go | starcoder |
package pbparser
import (
"errors"
"fmt"
"strings"
)
// DataTypeCategory is an enumeration which represents the possible kinds
// of field datatypes in message, oneof and extend declaration constructs.
type DataTypeCategory int
const (
ScalarDataTypeCategory DataTypeCategory = iota
MapDataTypeCategory
NamedDataTypeCategory
)
// DataType is the interface which must be implemented by the field datatypes.
// Name() returns the name of the datatype and Category() returns the category
// of the datatype.
type DataType interface {
Name() string
Category() DataTypeCategory
}
// ScalarType is an enumeration which represents all known supported scalar
// field datatypes.
type ScalarType int
const (
AnyScalar ScalarType = iota + 1
BoolScalar
BytesScalar
DoubleScalar
FloatScalar
Fixed32Scalar
Fixed64Scalar
Int32Scalar
Int64Scalar
Sfixed32Scalar
Sfixed64Scalar
Sint32Scalar
Sint64Scalar
StringScalar
Uint32Scalar
Uint64Scalar
)
var scalarLookupMap = map[string]ScalarType{
"any": AnyScalar,
"bool": BoolScalar,
"bytes": BytesScalar,
"double": DoubleScalar,
"float": FloatScalar,
"fixed32": Fixed32Scalar,
"fixed64": Fixed64Scalar,
"int32": Int32Scalar,
"int64": Int64Scalar,
"sfixed32": Sfixed32Scalar,
"sfixed64": Sfixed64Scalar,
"sint32": Sint32Scalar,
"sint64": Sint64Scalar,
"string": StringScalar,
"uint32": Uint32Scalar,
"uint64": Uint64Scalar,
}
// ScalarDataType is a construct which represents
// all supported protobuf scalar datatypes.
type ScalarDataType struct {
scalarType ScalarType
name string
}
// Name function implementation of interface DataType for ScalarDataType
func (sdt ScalarDataType) Name() string {
return sdt.name
}
// Category function implementation of interface DataType for ScalarDataType
func (sdt ScalarDataType) Category() DataTypeCategory {
return ScalarDataTypeCategory
}
// NewScalarDataType creates and returns a new ScalarDataType for the given string.
// If a scalar data type mapping does not exist for the given string, an Error is returned.
func NewScalarDataType(s string) (ScalarDataType, error) {
key := strings.ToLower(s)
st := scalarLookupMap[key]
if st == 0 {
msg := fmt.Sprintf("'%v' is not a valid ScalarDataType", s)
return ScalarDataType{}, errors.New(msg)
}
return ScalarDataType{name: key, scalarType: st}, nil
}
// MapDataType is a construct which represents a protobuf map datatype.
type MapDataType struct {
keyType DataType
valueType DataType
}
// Name function implementation of interface DataType for MapDataType
func (mdt MapDataType) Name() string {
return "map<" + mdt.keyType.Name() + ", " + mdt.valueType.Name() + ">"
}
// Category function implementation of interface DataType for MapDataType
func (mdt MapDataType) Category() DataTypeCategory {
return MapDataTypeCategory
}
// KeyType returns key DataType for MapDataType
func (mdt MapDataType) KeyType() DataType {
return mdt.keyType
}
// KeyType returns value DataType for MapDataType
func (mdt MapDataType) ValueType() DataType {
return mdt.valueType
}
// NamedDataType is a construct which represents a message datatype as
// a RPC request or response and a message/enum datatype as a field in
// message, oneof or extend declarations.
type NamedDataType struct {
supportsStreaming bool
name string
}
// Name function implementation of interface DataType for NamedDataType
func (ndt NamedDataType) Name() string {
return ndt.name
}
// Category function implementation of interface DataType for NamedDataType
func (ndt NamedDataType) Category() DataTypeCategory {
return NamedDataTypeCategory
}
// IsStream returns true if the NamedDataType is being used in a rpc
// as a request or response and is preceded by a Stream keyword.
func (ndt NamedDataType) IsStream() bool {
return ndt.supportsStreaming
}
// stream marks a NamedDataType as being preceded by a Stream keyword.
func (ndt *NamedDataType) stream(flag bool) {
ndt.supportsStreaming = flag
} | datatype.go | 0.759225 | 0.606673 | datatype.go | starcoder |
package lafzi
import (
"math"
"sort"
)
// Database is the core of Lafzi. Used to store the position of tokens within the submitted documents.
type Database struct {
storage dataStorage
}
// Document is the Arabic document that will be indexed.
type Document struct {
ID int64
ArabicText string
}
type documentTokens struct {
ID int64
TokenCount int
TokenIndexes []int
}
type documentScore struct {
ID int64
TokenCount int
NLongestSubSequence int
SubSequenceDensity float64
}
// OpenDatabase open and creates database at the specified path.
func OpenDatabase(path string, storageType StorageType) (*Database, error) {
var err error
var storage dataStorage
switch storageType {
case SQLite:
storage, err = newSQLiteStorage(path)
default:
storage, err = newBoltStorage(path)
}
if err != nil {
return nil, err
}
return &Database{storage}, nil
}
// Close closes the database and prevent any read and write.
func (db *Database) Close() {
db.storage.close()
}
// AddDocuments adds the documents to database.
func (db *Database) AddDocuments(documents ...Document) error {
return db.storage.saveDocuments(documents...)
}
// Search looks for documents whose transliterations contain the specified keyword.
func (db *Database) Search(keyword string) ([]int64, error) {
// Convert keyword into tokens
query := queryFromLatin(keyword)
tokens := tokenizeQuery(query)
if len(tokens) == 0 {
return nil, nil
}
// Find documents that contains the tokens
documents, err := db.storage.findTokens(tokens...)
if err != nil {
return nil, err
}
// Calculate score and filter the dictionary documents.
// Here we want at least 3/4 of tokens found in each document.
countThreshold := int(math.Ceil(float64(len(tokens)) * 3 / 4))
if countThreshold <= 1 {
countThreshold = len(tokens)
}
documentScores := []documentScore{}
for _, doc := range documents {
// Make sure count of token inside this document pass the threshold
if doc.TokenCount < countThreshold {
continue
}
// Make sure length of longest sub sequence pass the threshold as well
longestSubSequence := db.getLongestSubSequence(doc.TokenIndexes)
nLongestSubSequence := len(longestSubSequence)
if nLongestSubSequence < countThreshold {
continue
}
// Calculate sequence density
density := db.getSequenceDensity(longestSubSequence)
if density < 0.5 {
continue
}
documentScores = append(documentScores, documentScore{
ID: doc.ID,
TokenCount: doc.TokenCount,
NLongestSubSequence: nLongestSubSequence,
SubSequenceDensity: density,
})
}
// Sort document scores with following order:
// - token count, descending
// - sub sequence density, descending
// - document id, ascending
sort.Slice(documentScores, func(a, b int) bool {
scoreA := documentScores[a]
scoreB := documentScores[b]
if scoreA.TokenCount != scoreB.TokenCount {
return scoreA.TokenCount > scoreB.TokenCount
}
if scoreA.SubSequenceDensity != scoreB.SubSequenceDensity {
return scoreA.SubSequenceDensity > scoreB.SubSequenceDensity
}
return scoreA.ID < scoreB.ID
})
result := make([]int64, len(documentScores))
for i, score := range documentScores {
result[i] = score.ID
}
return result, nil
}
func (db *Database) getLongestSubSequence(sequence []int) []int {
var maxStart, maxLength int
var currentStart, currentLength int
for i := 1; i < len(sequence); i++ {
// If current number difference with the previous is less than five,
// it's still within one sequence.
if sequence[i]-sequence[i-1] <= 5 {
currentLength++
continue
}
// If not, then it's a brand new sequence.
// Check if it's larger than current biggest sub sequence
if currentLength > maxLength {
maxStart = currentStart
maxLength = currentLength
}
currentStart = i
currentLength = 0
}
// There are cases where a sequence only have exactly one sub sequence
// (sequence = sub sequence). In this case, maxLength will be 0, so we need
// to check it here.
if currentLength > maxLength {
maxStart = currentStart
maxLength = currentLength
}
return sequence[maxStart : maxStart+maxLength+1]
}
func (db *Database) getSequenceDensity(sequence []int) float64 {
var sigma float64
for i := 0; i < len(sequence)-1; i++ {
tmp := sequence[i+1] - sequence[i]
sigma += 1 / float64(tmp)
}
switch nSequence := len(sequence); nSequence {
case 1:
return 1
case 0:
return 0
default:
return (1 / float64(nSequence-1)) * sigma
}
} | lafzi.go | 0.635788 | 0.405096 | lafzi.go | starcoder |
package mortgage
import (
"fmt"
"math"
"strconv"
"time"
"github.com/keep94/toolbox/date_util"
)
// Term represents a single term within an amortization schedule.
type Term struct {
Date time.Time
Payment int64
Interest int64
Balance int64
}
// Principal returns the principal paid during this term
func (t *Term) Principal() int64 {
return t.Payment - t.Interest
}
// Loan represents a loan. Loan instances are immutable.
type Loan struct {
amount int64
rate float64
length int
payment int64
}
// NewLoan returns a new loan. Payments on returned loan are monthly.
// amount is the amount borrowed;
// rate is the annual interest rate, 0.03 = 3%;
// durationInMonths is the number of months of the loan.
// amount and durationInMonths must be positive.
func NewLoan(amount int64, rate float64, durationInMonths int) *Loan {
if amount <= 0 || durationInMonths <= 0 {
panic("Amount and durationInMonths must be positive.")
}
payment := solveForPayment(amount, rate/12.0, durationInMonths)
return &Loan{amount, rate, durationInMonths, payment}
}
// Amount returns the amount borrowed
func (l *Loan) Amount() int64 {
return l.amount
}
// Rate returns the annual interest rate, 0.03 = 3%.
func (l *Loan) Rate() float64 {
return l.rate
}
// DurationInMonths returns the number of months of the loan. Depending on the
// rounding of payment, this may be different than the actual number of months
// needed to pay off the loan.
func (l *Loan) DurationInMonths() int {
return l.length
}
// Payment returns the payment due each term
func (l *Loan) Payment() int64 {
return l.payment
}
// Terms returns all the terms needed to pay off this loan. year and
// month are the origination month of the loan.
// maxTerms is the maximum number of terms this method will return.
// The number of terms returned may differ from the duration of the loan
// depending on the rounding of the payment.
func (l *Loan) Terms(year, month, maxTerms int) []*Term {
var result []*Term
date := date_util.YMD(year, month, 1)
balance := l.amount
monthlyRate := l.rate / 12.0
for balance > 0 {
date = date.AddDate(0, 1, 0)
interest := toInt64(float64(balance) * monthlyRate)
balance += interest
payment := l.payment
if payment > balance {
payment = balance
}
balance -= payment
result = append(result, &Term{
Date: date,
Payment: payment,
Interest: interest,
Balance: balance})
if len(result) == maxTerms {
break
}
}
return result
}
// FormatUSD returns amount as dollars and cents.
// 347 -> "3.47"
func FormatUSD(x int64) string {
return fmt.Sprintf("%.2f", float64(x)/100.0)
}
// ParseUSD is the inverse of FormatUSD.
// "3.47" -> 347
func ParseUSD(s string) (v int64, e error) {
f, e := strconv.ParseFloat(s, 64)
if e != nil {
return
}
v = int64(math.Floor(f*100.0 + 0.5))
return
}
func solveForPayment(
amount int64, rate float64, length int) int64 {
amountF := float64(amount)
lengthF := float64(length)
if rate == 0.0 {
return toInt64(amountF / lengthF)
}
result := toInt64(amountF * rate * (1.0 + 1.0/(math.Pow((1.0+rate), lengthF)-1.0)))
if result <= 0 {
result = 1
}
interestOnly := toInt64(amountF * rate)
if result <= interestOnly {
result = interestOnly + 1
}
return result
}
func toInt64(x float64) int64 {
return int64(x + 0.5)
} | mortgage.go | 0.824179 | 0.527134 | mortgage.go | starcoder |
package main
import (
"github.com/go-gl/glfw/v3.2/glfw"
)
const (
// The "camera speed" is an arbitrary value that controls how much the
// camera moves in response to an input event.
cameraSpeed = 0.05
)
// WASD keys control camera rotation.
func (c *camera) handleRotation(window *glfw.Window, program uint32) {
if window.GetKey(glfw.KeyW) == glfw.Press {
c.adjustPitch(cameraSpeed)
} else if window.GetKey(glfw.KeyA) == glfw.Press {
c.adjustYaw(cameraSpeed)
} else if window.GetKey(glfw.KeyS) == glfw.Press {
c.adjustPitch(-cameraSpeed)
} else if window.GetKey(glfw.KeyD) == glfw.Press {
c.adjustYaw(-cameraSpeed)
} else {
// Short circuit (and avoid updating the "view" uniform) if the camera
// wasn't moved.
return
}
setUniformMatrix4fv(program, viewUniform, c.view())
setUniform3f(program, viewPositionUniform, c.eye[0], c.eye[1], c.eye[2])
}
// Mouse scroll controls camera zoom.
func (c *camera) zoomCallback(program uint32) glfw.ScrollCallback {
return func(window *glfw.Window, xOffset, yOffset float64) {
c.adjustDistanceToOrigin(-yOffset)
setUniformMatrix4fv(program, viewUniform, c.view())
setUniform3f(program, viewPositionUniform, c.eye[0], c.eye[1], c.eye[2])
}
}
// Number keys (1-9) control the Rubik's Cube. Each key rotates some "slice" of
// the cube 90 degrees counter-clockwise along some coordinate axis.
func (r rubiksCube) cubeControlCallback(vao, vbo, ebo uint32) glfw.KeyCallback {
return func(
window *glfw.Window,
key glfw.Key,
scancode int,
action glfw.Action,
mods glfw.ModifierKey,
) {
if action != glfw.Press {
return
}
// We rely on the fact that consecutive number keys are specified using
// consecutive constants.
switch key {
case glfw.Key1, glfw.Key2, glfw.Key3:
r.rotateX(int(key - glfw.Key2))
case glfw.Key4, glfw.Key5, glfw.Key6:
r.rotateY(int(key - glfw.Key5))
case glfw.Key7, glfw.Key8, glfw.Key9:
r.rotateZ(int(key - glfw.Key8))
default:
return
}
r.buffer(vao, vbo, ebo)
}
} | input.go | 0.685213 | 0.421909 | input.go | starcoder |
package schematic
import (
"fmt"
"github.com/df-mc/dragonfly/dragonfly/block"
"github.com/df-mc/dragonfly/dragonfly/world"
"github.com/df-mc/dragonfly/dragonfly/world/chunk"
"reflect"
)
// schematic implements the structure of a Schematic, providing methods to read from it.
type schematic struct {
Data map[string]interface{}
w, h, l int
materials string
blocks []uint8
metadata []uint8
}
// init initialises the schematic structure, parsing several values from the NBT data.
func (s *schematic) init() error {
s.w, s.h, s.l = int(s.Data["Width"].(int16)), int(s.Data["Height"].(int16)), int(s.Data["Length"].(int16))
s.materials = s.Data["Materials"].(string)
blocks, metadata := reflect.ValueOf(s.Data["Blocks"]), reflect.ValueOf(s.Data["Data"])
blockSlice, metadataSlice := reflect.MakeSlice(reflect.SliceOf(blocks.Type().Elem()), blocks.Len(), blocks.Len()), reflect.MakeSlice(reflect.SliceOf(blocks.Type().Elem()), metadata.Len(), metadata.Len())
reflect.Copy(blockSlice, blocks)
reflect.Copy(metadataSlice, metadata)
s.blocks, s.metadata = blockSlice.Interface().([]byte), metadataSlice.Interface().([]byte)
if len(s.blocks) != s.w*s.h*s.l || len(s.metadata) != s.w*s.h*s.l {
return fmt.Errorf("blocks and metadata were expected to be %v bytes long both (%v*%v*%v), but blocks has length %v and metadata has length %v", s.w*s.h*s.l, s.w, s.h, s.l, len(s.blocks), len(s.metadata))
}
return nil
}
// Dimensions returns the dimensions of the schematic as width, height and length respectively.
func (s *schematic) Dimensions() [3]int {
return [3]int{s.w, s.h, s.l}
}
// At returns the block found at a given position in the schematic. If any of the X, Y or Z coordinates passed
// are out of the bounds of the schematic, At will panic.
func (s *schematic) At(x, y, z int, _ func(int, int, int) world.Block) world.Block {
index := (y*s.l+z)*s.w + x
id, meta := s.blocks[index], s.metadata[index]
if id == 0 {
// Don't write air blocks: We simply return nil so that no block is placed at all.
return nil
}
old := oldBlock{id: id, metadata: meta}
if converted, ok := editionConversion[old]; ok {
old = converted
}
n, ok := conversion[old]
if !ok {
return block.Air{}
}
rid, ok := chunk.StateToRuntimeID(n.name, n.properties)
if !ok {
return block.Air{}
}
ret, ok := world_blockByRuntimeID(rid)
if !ok {
return block.Air{}
}
return ret
}
// AdditionalLiquidAt always returns nil.
func (*schematic) AdditionalLiquidAt(int, int, int) world.Liquid {
return nil
} | structure.go | 0.808521 | 0.410077 | structure.go | starcoder |
package base62_go
import (
"fmt"
"math"
"math/big"
"strconv"
"strings"
)
const base = 62
type Encoding struct {
encode string
padding int
}
// NewEncoding returns a new Encoding defined by the given alphabet
func NewEncoding(encoder string) *Encoding {
return &Encoding{
encode: encoder,
}
}
// EncodeBytes returns the base62 encoding of b
func (e *Encoding) EncodeBytes(b []byte) string {
n := new(big.Int)
n.SetBytes(b)
return e.EncodeBigInt(n)
}
// EncodeInt64 returns the base62 encoding of n
func (e *Encoding) EncodeInt64(n int64) string {
var (
b = make([]byte, 0)
rem int64
)
// Progressively divide by base, store remainder each time
// Prepend as an additional character is the higher power
for n > 0 {
rem = n % base
n = n / base
b = append([]byte{e.encode[rem]}, b...)
}
s := string(b)
if e.padding > 0 {
s = e.pad(s, e.padding)
}
return s
}
// EncodeBigInt returns the base62 encoding of an arbitrary precision integer
func (e *Encoding) EncodeBigInt(n *big.Int) string {
var (
b = make([]byte, 0)
rem = new(big.Int)
bse = new(big.Int)
zero = new(big.Int)
)
bse.SetInt64(base)
zero.SetInt64(0)
// Progressively divide by base, until we hit zero
// store remainder each time
// Prepend as an additional character is the higher power
for n.Cmp(zero) == 1 {
n, rem = n.DivMod(n, bse, rem)
b = append([]byte{e.encode[rem.Int64()]}, b...)
}
s := string(b)
if e.padding > 0 {
s = e.pad(s, e.padding)
}
return s
}
// DecodeToBytes returns a byte array from a base62 encoded string
func (e *Encoding) DecodeToBytes(s string, padding ...int) []byte {
nBytes := e.DecodeToBigInt(s).Bytes()
if len(padding) > 0 && padding[0] > 0 && len(nBytes) < padding[0] {
paddingBytes := make([]byte, padding[0]-len(nBytes))
nBytes = append(paddingBytes, nBytes...)
}
return nBytes
}
// DecodeToInt64 decodes a base62 encoded string
func (e *Encoding) DecodeToInt64(s string) int64 {
var (
n int64
c int64
idx int
power int
)
for i, v := range s {
idx = strings.IndexRune(e.encode, v)
// Work downwards through powers of our base
power = len(s) - (i + 1)
// Calculate value at this position and add
c = int64(idx) * int64(math.Pow(float64(base), float64(power)))
n = n + c
}
return int64(n)
}
// DecodeToBigInt returns an arbitrary precision integer from the base62 encoded string
func (e *Encoding) DecodeToBigInt(s string) *big.Int {
var (
n = new(big.Int)
c = new(big.Int)
idx = new(big.Int)
power = new(big.Int)
exp = new(big.Int)
bse = new(big.Int)
)
bse.SetInt64(base)
// Run through each character to decode
for i, v := range s {
// Get index/position of the rune as a big int
idx.SetInt64(int64(strings.IndexRune(e.encode, v)))
// Work downwards through exponents
exp.SetInt64(int64(len(s) - (i + 1)))
// Calculate power for this exponent
power.Exp(bse, exp, nil)
// Multiplied by our index, gives us the value for this character
c = c.Mul(idx, power)
// Finally add to running total
n.Add(n, c)
}
return n
}
// pad a string to a minimum length with zero characters
func (e *Encoding) pad(s string, minlen int) string {
if len(s) >= minlen {
return s
}
format := fmt.Sprint(`%0`, strconv.Itoa(minlen), "s")
return fmt.Sprintf(format, s)
}
// SetEncodePadding sets the padding of encoded string
func (e *Encoding) SetEncodePadding(n int) {
e.padding = n
} | encoding.go | 0.832509 | 0.413951 | encoding.go | starcoder |
package ent
import (
"fmt"
"strings"
"entgo.io/ent/dialect/sql"
"github.com/masseelch/elk/internal/integration/pets/ent/pet"
"github.com/masseelch/elk/internal/integration/pets/ent/toy"
)
// Toy is the model entity for the Toy schema.
type Toy struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// Color holds the value of the "color" field.
Color toy.Color `json:"color,omitempty"`
// Material holds the value of the "material" field.
Material toy.Material `json:"material,omitempty"`
// Title holds the value of the "title" field.
Title string `json:"title,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the ToyQuery when eager-loading is set.
Edges ToyEdges `json:"edges"`
pet_toys *int
}
// ToyEdges holds the relations/edges for other nodes in the graph.
type ToyEdges struct {
// Owner holds the value of the owner edge.
Owner *Pet `json:"owner,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// OwnerOrErr returns the Owner value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e ToyEdges) OwnerOrErr() (*Pet, error) {
if e.loadedTypes[0] {
if e.Owner == nil {
// The edge owner was loaded in eager-loading,
// but was not found.
return nil, &NotFoundError{label: pet.Label}
}
return e.Owner, nil
}
return nil, &NotLoadedError{edge: "owner"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Toy) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case toy.FieldID:
values[i] = new(sql.NullInt64)
case toy.FieldColor, toy.FieldMaterial, toy.FieldTitle:
values[i] = new(sql.NullString)
case toy.ForeignKeys[0]: // pet_toys
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Toy", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Toy fields.
func (t *Toy) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case toy.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
t.ID = int(value.Int64)
case toy.FieldColor:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field color", values[i])
} else if value.Valid {
t.Color = toy.Color(value.String)
}
case toy.FieldMaterial:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field material", values[i])
} else if value.Valid {
t.Material = toy.Material(value.String)
}
case toy.FieldTitle:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field title", values[i])
} else if value.Valid {
t.Title = value.String
}
case toy.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for edge-field pet_toys", value)
} else if value.Valid {
t.pet_toys = new(int)
*t.pet_toys = int(value.Int64)
}
}
}
return nil
}
// QueryOwner queries the "owner" edge of the Toy entity.
func (t *Toy) QueryOwner() *PetQuery {
return (&ToyClient{config: t.config}).QueryOwner(t)
}
// Update returns a builder for updating this Toy.
// Note that you need to call Toy.Unwrap() before calling this method if this Toy
// was returned from a transaction, and the transaction was committed or rolled back.
func (t *Toy) Update() *ToyUpdateOne {
return (&ToyClient{config: t.config}).UpdateOne(t)
}
// Unwrap unwraps the Toy entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (t *Toy) Unwrap() *Toy {
tx, ok := t.config.driver.(*txDriver)
if !ok {
panic("ent: Toy is not a transactional entity")
}
t.config.driver = tx.drv
return t
}
// String implements the fmt.Stringer.
func (t *Toy) String() string {
var builder strings.Builder
builder.WriteString("Toy(")
builder.WriteString(fmt.Sprintf("id=%v", t.ID))
builder.WriteString(", color=")
builder.WriteString(fmt.Sprintf("%v", t.Color))
builder.WriteString(", material=")
builder.WriteString(fmt.Sprintf("%v", t.Material))
builder.WriteString(", title=")
builder.WriteString(t.Title)
builder.WriteByte(')')
return builder.String()
}
// Toys is a parsable slice of Toy.
type Toys []*Toy
func (t Toys) config(cfg config) {
for _i := range t {
t[_i].config = cfg
}
} | internal/integration/pets/ent/toy.go | 0.656218 | 0.415788 | toy.go | starcoder |
package blur
import (
"image"
"image/color"
"math"
"os"
"github.com/Nyarum/img/utils"
)
type Style float64
const (
// Ignore edges, may leave them semi-transparent
IGNORE Style = iota
// Clamp edges, may leave them looking unblurred
CLAMP
// Wrap edges, may change colour of edges
WRAP
)
func abs(num int) int {
if num < 0 {
return -num
}
return num
}
func correct(num int) bool {
return (num > 0) && (num%2 != 0)
}
// A Kernel is a 2-dimensional array of ratios. A 1-dimensional, horizontal or
// vertical, kernel can easily be defined by a 2-dimensional array in the
// obvious manner. The weights are taken as row by column, so kernel[0]
// references the first row and kernel[i][0] (for all i) is the first column.
type Kernel [][]float64
func NewHorizontalKernel(width int, f func(x int) float64) Kernel {
if !correct(width) {
utils.Warn("Error: kernel size wrong!")
os.Exit(2)
}
mx := (width - 1) / 2
k := [][]float64{make([]float64, width)}
for x := 0; x < width; x++ {
k[0][x] = f(mx - x)
}
return k
}
func NewVerticalKernel(height int, f func(y int) float64) Kernel {
if !correct(height) {
utils.Warn("Error: kernel size wrong!")
os.Exit(2)
}
my := (height - 1) / 2
k := make([][]float64, height)
for y := 0; y < height; y++ {
k[y] = []float64{f(my - y)}
}
return k
}
// NewKernel creates a new Kernel of the dimensions given, it is populated by
// the given function which itself is passed the signed x and y offsets from the
// mid point.
func NewKernel(height, width int, f func(x, y int) float64) Kernel {
if !correct(width) || !correct(height) {
utils.Warn("Error: kernel size wrong!")
os.Exit(2)
// should return error really!
}
mx := (width - 1) / 2
my := (height - 1) / 2
k := make([][]float64, height)
for y := 0; y < height; y++ {
k[y] = make([]float64, width)
for x := 0; x < width; x++ {
k[y][x] = f(mx-x, my-y)
}
}
return k
}
// Normalised returns a copy of the Kernel where the sum of all entries is 1.
func (k Kernel) Normalised() Kernel {
total := 0.0
for y := 0; y < k.Height(); y++ {
for x := 0; x < k.Width(); x++ {
total += k[y][x]
}
}
nk := make([][]float64, k.Height())
for y := 0; y < k.Height(); y++ {
nk[y] = make([]float64, k.Width())
for x := 0; x < k.Width(); x++ {
nk[y][x] = k[y][x] / total
}
}
return nk
}
// Height returns the height of the Kernel.
func (k Kernel) Height() int {
return len(k)
}
// Width returns the width of the Kernel.
func (k Kernel) Width() int {
if k.Height() > 0 {
return len(k[0])
}
return 0
}
// Mid returns the centre Point of the Kernel.
func (k Kernel) Mid() image.Point {
return image.Pt((k.Width()-1)/2, (k.Height()-1)/2)
}
func Convolve(in image.Image, weights Kernel, style Style) image.Image {
bnds := in.Bounds()
mid := weights.Mid()
o := image.NewRGBA(bnds)
for y := bnds.Min.Y; y < bnds.Max.Y; y++ {
for x := bnds.Min.X; x < bnds.Max.X; x++ {
var r, g, b, a, offset float64
for oy := 0; oy < weights.Height(); oy++ {
for ox := 0; ox < weights.Width(); ox++ {
factor := weights[oy][ox]
pt := image.Pt(x+ox-mid.X, y+oy-mid.Y)
if pt == weights.Mid() {
// Ignore!
} else if pt.In(bnds) {
or, og, ob, oa := in.At(pt.X, pt.Y).RGBA()
r += float64(or) * factor
g += float64(og) * factor
b += float64(ob) * factor
a += float64(oa) * factor
} else {
switch style {
case CLAMP:
offset += factor
case WRAP:
if pt.X >= bnds.Max.X {
pt.X = pt.X - bnds.Max.X
} else if pt.X < bnds.Min.X {
pt.X = bnds.Dx() + pt.X
}
if pt.Y >= bnds.Max.Y {
pt.Y = pt.Y - bnds.Max.Y
} else if pt.Y < bnds.Min.Y {
pt.Y = bnds.Dy() + pt.Y
}
or, og, ob, oa := in.At(pt.X, pt.Y).RGBA()
r += float64(or) * factor
g += float64(og) * factor
b += float64(ob) * factor
a += float64(oa) * factor
}
}
}
}
if offset != 0 && style == CLAMP {
or, og, ob, oa := in.At(x, y).RGBA()
r += float64(or) * offset
g += float64(og) * offset
b += float64(ob) * offset
a += float64(oa) * offset
}
o.Set(x, y, color.RGBA{
uint8(utils.Truncatef(r / 255)),
uint8(utils.Truncatef(g / 255)),
uint8(utils.Truncatef(b / 255)),
uint8(utils.Truncatef(a / 255)),
})
}
}
return o
}
// Perform a convolution with two Kernels in succession.
func Convolve2(in image.Image, a, b Kernel, style Style) image.Image {
return Convolve(Convolve(in, a, style), b, style)
}
// Box performs a box blur on the Image given.
func Box(in image.Image, radius int, style Style) image.Image {
f := func(n int) float64 { return 1.0 }
tall := NewVerticalKernel(radius*2+1, f).Normalised()
wide := NewHorizontalKernel(radius*2+1, f).Normalised()
return Convolve2(in, tall, wide, style)
}
// Gaussian performs a gaussian blur on the Image given.
func Gaussian(in image.Image, radius int, sigma float64, style Style) image.Image {
f := func(n int) float64 {
return math.Exp(-float64(n*n) / (2 * sigma * sigma))
}
tall := NewVerticalKernel(radius*2+1, f).Normalised()
wide := NewHorizontalKernel(radius*2+1, f).Normalised()
return Convolve2(in, tall, wide, style)
} | blur/blur.go | 0.839405 | 0.443902 | blur.go | starcoder |
package conf
// StringVar defines a string flag and environment variable with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) StringVar(p *string, name string, value string, usage string) {
c.env().StringVar(p, name, value, usage)
c.flag().StringVar(p, name, value, usage)
}
// String defines a string flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag and/or environment variable.
func (c *Configurator) String(name string, value string, usage string) *string {
p := new(string)
c.StringVar(p, name, value, usage)
return p
}
// StringVarE defines a string environment variable with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the environment variable.
func (c *Configurator) StringVarE(p *string, name string, value string, usage string) {
c.env().StringVar(p, name, value, usage)
}
// StringE defines a string environment variable with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the environment variable.
func (c *Configurator) StringE(name string, value string, usage string) *string {
p := new(string)
c.StringVarE(p, name, value, usage)
return p
}
// StringVarF defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func (c *Configurator) StringVarF(p *string, name string, value string, usage string) {
c.flag().StringVar(p, name, value, usage)
}
// StringF defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func (c *Configurator) StringF(name string, value string, usage string) *string {
p := new(string)
c.StringVarF(p, name, value, usage)
return p
}
// StringVar defines a string flag and environment variable with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag and/or environment variable.
func StringVar(p *string, name string, value string, usage string) {
Global.StringVar(p, name, value, usage)
}
// String defines a string flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag and/or environment variable.
func String(name string, value string, usage string) *string {
return Global.String(name, value, usage)
}
// StringVarE defines a string environment variable with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the environment variable.
func StringVarE(p *string, name string, value string, usage string) {
Global.StringVarE(p, name, value, usage)
}
// StringE defines a string environment variable with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the environment variable.
func StringE(name string, value string, usage string) *string {
return Global.StringE(name, value, usage)
}
// StringVarF defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func StringVarF(p *string, name string, value string, usage string) {
Global.StringVarF(p, name, value, usage)
}
// StringF defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func StringF(name string, value string, usage string) *string {
return Global.StringF(name, value, usage)
} | value_string.go | 0.837852 | 0.658568 | value_string.go | starcoder |
package keeper
import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/incentive/types"
)
// AccumulateSwapRewards calculates new rewards to distribute this block and updates the global indexes to reflect this.
// The provided rewardPeriod must be valid to avoid panics in calculating time durations.
func (k Keeper) AccumulateSwapRewards(ctx sdk.Context, rewardPeriod types.MultiRewardPeriod) {
previousAccrualTime, found := k.GetSwapRewardAccrualTime(ctx, rewardPeriod.CollateralType)
if !found {
previousAccrualTime = ctx.BlockTime()
}
indexes, found := k.GetSwapRewardIndexes(ctx, rewardPeriod.CollateralType)
if !found {
indexes = types.RewardIndexes{}
}
acc := types.NewAccumulator(previousAccrualTime, indexes)
totalSource := k.getSwapTotalSourceShares(ctx, rewardPeriod.CollateralType)
acc.Accumulate(rewardPeriod, totalSource, ctx.BlockTime())
k.SetSwapRewardAccrualTime(ctx, rewardPeriod.CollateralType, acc.PreviousAccumulationTime)
if len(acc.Indexes) > 0 {
// the store panics when setting empty or nil indexes
k.SetSwapRewardIndexes(ctx, rewardPeriod.CollateralType, acc.Indexes)
}
}
// getSwapTotalSourceShares fetches the sum of all source shares for a swap reward.
// In the case of swap, these are the total (swap module) shares in a particular pool.
func (k Keeper) getSwapTotalSourceShares(ctx sdk.Context, poolID string) sdk.Dec {
totalShares, found := k.swapKeeper.GetPoolShares(ctx, poolID)
if !found {
totalShares = sdk.ZeroInt()
}
return totalShares.ToDec()
}
// InitializeSwapReward creates a new claim with zero rewards and indexes matching the global indexes.
// If the claim already exists it just updates the indexes.
func (k Keeper) InitializeSwapReward(ctx sdk.Context, poolID string, owner sdk.AccAddress) {
claim, found := k.GetSwapClaim(ctx, owner)
if !found {
claim = types.NewSwapClaim(owner, sdk.Coins{}, nil)
}
globalRewardIndexes, found := k.GetSwapRewardIndexes(ctx, poolID)
if !found {
globalRewardIndexes = types.RewardIndexes{}
}
claim.RewardIndexes = claim.RewardIndexes.With(poolID, globalRewardIndexes)
k.SetSwapClaim(ctx, claim)
}
// SynchronizeSwapReward updates the claim object by adding any accumulated rewards
// and updating the reward index value.
func (k Keeper) SynchronizeSwapReward(ctx sdk.Context, poolID string, owner sdk.AccAddress, shares sdk.Int) {
claim, found := k.GetSwapClaim(ctx, owner)
if !found {
return
}
claim = k.synchronizeSwapReward(ctx, claim, poolID, owner, shares)
k.SetSwapClaim(ctx, claim)
}
// synchronizeSwapReward updates the reward and indexes in a swap claim for one pool.
func (k *Keeper) synchronizeSwapReward(ctx sdk.Context, claim types.SwapClaim, poolID string, owner sdk.AccAddress, shares sdk.Int) types.SwapClaim {
globalRewardIndexes, found := k.GetSwapRewardIndexes(ctx, poolID)
if !found {
// The global factor is only not found if
// - the pool has not started accumulating rewards yet (either there is no reward specified in params, or the reward start time hasn't been hit)
// - OR it was wrongly deleted from state (factors should never be removed while unsynced claims exist)
// If not found we could either skip this sync, or assume the global factor is zero.
// Skipping will avoid storing unnecessary factors in the claim for non rewarded pools.
// And in the event a global factor is wrongly deleted, it will avoid this function panicking when calculating rewards.
return claim
}
userRewardIndexes, found := claim.RewardIndexes.Get(poolID)
if !found {
// Normally the reward indexes should always be found.
// But if a pool was not rewarded then becomes rewarded (ie a reward period is added to params), then the indexes will be missing from claims for that pool.
// So given the reward period was just added, assume the starting value for any global reward indexes, which is an empty slice.
userRewardIndexes = types.RewardIndexes{}
}
newRewards, err := k.CalculateRewards(userRewardIndexes, globalRewardIndexes, shares.ToDec())
if err != nil {
// Global reward factors should never decrease, as it would lead to a negative update to claim.Rewards.
// This panics if a global reward factor decreases or disappears between the old and new indexes.
panic(fmt.Sprintf("corrupted global reward indexes found: %v", err))
}
claim.Reward = claim.Reward.Add(newRewards...)
claim.RewardIndexes = claim.RewardIndexes.With(poolID, globalRewardIndexes)
return claim
}
// GetSynchronizedSwapClaim fetches a swap claim from the store and syncs rewards for all rewarded pools.
func (k Keeper) GetSynchronizedSwapClaim(ctx sdk.Context, owner sdk.AccAddress) (types.SwapClaim, bool) {
claim, found := k.GetSwapClaim(ctx, owner)
if !found {
return types.SwapClaim{}, false
}
k.IterateSwapRewardIndexes(ctx, func(poolID string, _ types.RewardIndexes) bool {
shares, found := k.swapKeeper.GetDepositorSharesAmount(ctx, owner, poolID)
if !found {
shares = sdk.ZeroInt()
}
claim = k.synchronizeSwapReward(ctx, claim, poolID, owner, shares)
return false
})
return claim, true
} | x/incentive/keeper/rewards_swap.go | 0.791096 | 0.467453 | rewards_swap.go | starcoder |
package steps
import (
"bytes"
"github.com/bitflow-stream/go-bitflow/bitflow"
"github.com/bitflow-stream/go-bitflow/script/reg"
)
type ExpressionProcessor struct {
bitflow.NoopProcessor
Filter bool
checker bitflow.HeaderChecker
expressions []*Expression
}
func RegisterExpression(b reg.ProcessorRegistry) {
b.RegisterStep("do",
func(p *bitflow.SamplePipeline, params map[string]interface{}) error {
return addExpression(p, params, false)
},
"Execute the given expression on every sample").
Required("expr", reg.String(), "Allows arithmetic and boolean operations. Can also perform them on sample fields "+
"(e.g. field1 [+,-,*,/] field2).",
"Following additional functions are implemented: ",
"tag(string) string: Access tag by tag key (string). Returns tag string or empty string if key does not exist.",
"",
"has_tag(string) bool: Check existence of tag key.",
"",
"set_tag(string, string) bitflow.SampleAndHeader: Adds or replaces a tag at the current sample and returns the result.",
"First argument is key, second is the tag value.",
"",
"timestamp() float64: Returns the timestamp of the current sample.",
"",
"now() float64: Returns the current local system time.",
"",
"num() float64: Returns the current number of processed samples.",
"",
"str(_) string: Converts an arbitrary argument to string.",
"",
"strToFloat(string) float64: Converts a string argument to float64.",
"",
"date_str(float64) string: Converts the timestamp argument to a string representation.",
"",
"set_timestamp(float64) bitflow.SampleAndHeader: Sets the timestamp of the current sample and returns the result.",
"",
"floor(float64) float64: Applies the floor operation on the argument.",
"",
"set(string, float64, optional: bitflow.SampleAndHeader) bitflow.SampleAndHeader:",
" Sets or replaces the value (2nd argument) of the sample field (1st argument).",
"Third argument is optional. If set, the setting is applied on the passed sample.",
"Otherwise it is applied on the current sample.",
"",
"get_sample_and_header(): bitflow.SampleAndHeader: Returns the current sample.",
"",
"Note that arithmetic, boolean and function expressions can be combines as long as the arguments and return types match.",
"Some examples: ",
"expr='set_tag(\"my_system_time\", str(now()))'",
"expr='set_timestamp(now())'",
"expr='set(\"field3\", field1 + field2)'",
"expr='set(\"field1\", field1 * 10)'",
"expr='set(\"field3\", field1 * 10, set(\"field2\", now(), set_tag(\"new_tag\", \"awesome\")))'",
"",
"Currently the field to value mapping is done once before each sample is processed.",
"Therefore, interdependent arithmetic operations produce possibly unexpected results.",
"Example: expr='set(\"field1\", field1 + 10, set(\"field1\", 10))'",
"The expected value for field1 would be 20.",
"However, the actual result would be the original value of field1 + 10 or an error if field1 does not exist in the sample.")
}
func RegisterFilterExpression(b reg.ProcessorRegistry) {
b.RegisterStep("filter",
func(p *bitflow.SamplePipeline, params map[string]interface{}) error {
return addExpression(p, params, true)
},
"Filter the samples based on a boolean expression").
Required("expr", reg.String())
}
func addExpression(p *bitflow.SamplePipeline, params map[string]interface{}, filter bool) error {
proc := &ExpressionProcessor{Filter: filter}
err := proc.AddExpression(params["expr"].(string))
if err == nil {
p.Add(proc)
}
return err
}
func (p *ExpressionProcessor) AddExpression(expressionString string) error {
expr, err := NewExpression(expressionString)
if err != nil {
return err
}
p.expressions = append(p.expressions, expr)
return nil
}
func (p *ExpressionProcessor) Sample(sample *bitflow.Sample, header *bitflow.Header) error {
if outSample, outHeader, err := p.evaluate(sample, header); err != nil {
return err
} else if outSample != nil && outHeader != nil {
return p.NoopProcessor.Sample(outSample, outHeader)
}
return nil
}
func (p *ExpressionProcessor) MergeProcessor(otherProcessor bitflow.SampleProcessor) bool {
if other, ok := otherProcessor.(*ExpressionProcessor); !ok {
return false
} else {
if other.Filter != p.Filter {
return false
}
p.expressions = append(p.expressions, other.expressions...)
return true
}
}
func (p *ExpressionProcessor) String() string {
var str bytes.Buffer
for _, expr := range p.expressions {
if str.Len() > 0 {
if p.Filter {
str.WriteString(" && ")
} else {
str.WriteString("; ")
}
}
str.WriteString(expr.expr.String())
}
res := "Expression"
if p.Filter {
res += " filter"
}
return res + ": " + str.String()
}
func (p *ExpressionProcessor) evaluate(sample *bitflow.Sample, header *bitflow.Header) (*bitflow.Sample, *bitflow.Header, error) {
if p.checker.HeaderChanged(header) {
for _, expr := range p.expressions {
if err := expr.UpdateHeader(header); err != nil {
return nil, nil, err
}
}
}
outSample := sample
outHeader := header
var err error
var res bool
for _, expr := range p.expressions {
if p.Filter {
res, err = expr.EvaluateBool(outSample, outHeader)
if err != nil {
return nil, nil, err
}
if !res {
return nil, nil, nil
}
} else {
outSample, outHeader, err = expr.Evaluate(outSample, outHeader)
}
}
return outSample, outHeader, err
} | steps/expression_processor.go | 0.72952 | 0.605857 | expression_processor.go | starcoder |
package kafkazk
import (
"math"
"sort"
)
// DegreeDistribution counts broker to broker relationships.
type DegreeDistribution struct {
// Relationships is a an adjacency list
// where an edge between brokers is defined as
// a common occupancy in at least one replica set.
// For instance, given the replica set [1001,1002,1003],
// ID 1002 has a relationship with 1001 and 1003.
Relationships map[int]map[int]struct{}
}
// NewDegreeDistribution returns a new DegreeDistribution.
func NewDegreeDistribution() DegreeDistribution {
return DegreeDistribution{
Relationships: make(map[int]map[int]struct{}),
}
}
// Add takes a []int of broker IDs representing a
// replica set and updates the adjacency lists for
// each broker in the set.
func (dd DegreeDistribution) Add(nodes []int) {
for _, node := range nodes {
if _, exists := dd.Relationships[node]; !exists {
dd.Relationships[node] = make(map[int]struct{})
}
for _, neighbor := range nodes {
if node != neighbor {
dd.Relationships[node][neighbor] = struct{}{}
}
}
}
}
// Count takes a node ID and returns the degree distribution.
func (dd DegreeDistribution) Count(n int) int {
c, exists := dd.Relationships[n]
if !exists {
return 0
}
return len(c)
}
// DegreeDistributionStats holds general statistical
// information describing the DegreeDistribution counts.
type DegreeDistributionStats struct {
Min float64
Max float64
Avg float64
}
// Stats returns a DegreeDistributionStats.
func (dd DegreeDistribution) Stats() DegreeDistributionStats {
dds := DegreeDistributionStats{}
if len(dd.Relationships) == 0 {
return dds
}
vals := []int{}
for node := range dd.Relationships {
vals = append(vals, dd.Count(node))
}
sort.Ints(vals)
var s int
for _, v := range vals {
s += v
}
dds.Min = float64(vals[0])
dds.Max = float64(vals[len(vals)-1])
dds.Avg = float64(s) / float64(len(vals))
return dds
}
// DegreeDistribution returns the DegreeDistribution for the PartitionMap.
func (pm *PartitionMap) DegreeDistribution() DegreeDistribution {
d := NewDegreeDistribution()
for _, partn := range pm.Partitions {
d.Add(partn.Replicas)
}
return d
}
// StorageDiff takes two BrokerMaps and returns a per broker ID
// diff in storage as a [2]float64: [absolute, percentage] diff.
func (b BrokerMap) StorageDiff(b2 BrokerMap) map[int][2]float64 {
d := map[int][2]float64{}
for bid := range b {
if bid == StubBrokerID {
continue
}
if _, exist := b2[bid]; !exist {
continue
}
diff := b2[bid].StorageFree - b[bid].StorageFree
p := diff / b[bid].StorageFree * 100
d[bid] = [2]float64{diff, p}
}
return d
}
// StorageRangeSpread returns the range spread
// of free storage for all brokers in the BrokerMap.
func (b BrokerMap) StorageRangeSpread() float64 {
l, h := b.MinMax()
// Return range spread.
return (h - l) / l * 100
}
// StorageRange returns the range of free
// storage for all brokers in the BrokerMap.
func (b BrokerMap) StorageRange() float64 {
l, h := b.MinMax()
// Return range.
return h - l
}
func (b BrokerMap) MinMax() (float64, float64) {
// Get the high/low StorageFree values.
h, l := 0.00, math.MaxFloat64
for id := range b {
if id == StubBrokerID {
continue
}
v := b[id].StorageFree
// Update the high/low.
if v > h {
h = v
}
if v < l {
l = v
}
}
return l, h
}
// StorageStdDev returns the standard deviation
// of free storage for all brokers in the BrokerMap.
func (b BrokerMap) StorageStdDev() float64 {
var m float64
var t float64
var s float64
var l float64
for id := range b {
if id == StubBrokerID {
continue
}
l++
t += b[id].StorageFree
}
m = t / l
for id := range b {
if id == StubBrokerID {
continue
}
s += math.Pow(m-b[id].StorageFree, 2)
}
msq := s / l
return math.Sqrt(msq)
}
// HMean returns the harmonic mean of broker storage free.
func (b BrokerMap) HMean() float64 {
var t float64
var c float64
for _, br := range b {
if br.ID != StubBrokerID && br.StorageFree > 0 {
c++
t += (1.00 / br.StorageFree)
}
}
return c / t
}
// Mean returns the arithmetic mean of broker storage free.
func (b BrokerMap) Mean() float64 {
var t float64
var c float64
for _, br := range b {
if br.ID != StubBrokerID && br.StorageFree > 0 {
c++
t += br.StorageFree
}
}
return t / c
}
// AboveMean returns a sorted []int of broker IDs that are above the mean
// by d percent (0.00 < d). The mean type is provided as a function f.
func (b BrokerMap) AboveMean(d float64, f func() float64) []int {
m := f()
var ids []int
if d <= 0.00 {
return ids
}
for _, br := range b {
if br.ID == StubBrokerID {
continue
}
if (br.StorageFree-m)/m > d {
ids = append(ids, br.ID)
}
}
sort.Ints(ids)
return ids
}
// BelowMean returns a sorted []int of broker IDs that are below the mean
// by d percent (0.00 < d). The mean type is provided as a function f.
func (b BrokerMap) BelowMean(d float64, f func() float64) []int {
m := f()
var ids []int
if d <= 0.00 {
return ids
}
for _, br := range b {
if br.ID == StubBrokerID {
continue
}
if (m-br.StorageFree)/m > d {
ids = append(ids, br.ID)
}
}
sort.Ints(ids)
return ids
} | kafkazk/stats.go | 0.827201 | 0.474753 | stats.go | starcoder |
package main
import (
"fmt"
"sort"
)
// https://gist.github.com/inky/3188870
var Notes = []string{"A", "A#", "B", "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#"}
// Scales as steps from the previous note
var Scales = map[string][]int{
"Major (Ionian)": {2, 2, 1, 2, 2, 2, 1},
"Dorian Mode": {2, 1, 2, 2, 2, 1, 2},
"Phrygian Mode": {1, 2, 2, 2, 1, 2, 2},
"Lydian Mode": {2, 2, 2, 1, 2, 2, 1},
"Mixolydian Mode": {2, 2, 1, 2, 2, 1, 2},
"Natural Minor (Aeolian)": {2, 1, 2, 2, 1, 2, 2},
"Locrian Mode": {1, 2, 2, 1, 2, 2, 2},
"Harmonic Minor": {2, 1, 2, 2, 1, 3, 1},
"Locrian nat6": {1, 2, 2, 1, 3, 1, 2},
"Ionian #5": {2, 2, 1, 3, 1, 2, 1},
"Ukranian minor": {2, 1, 3, 1, 2, 1, 2},
"Phrygian dominant": {1, 3, 1, 2, 1, 2, 2},
"Lydian #2": {3, 1, 2, 1, 2, 2, 1},
"Super Locrian diminished": {1, 2, 1, 2, 2, 1, 3},
"Diminished": {2, 1, 2, 1, 2, 1, 2, 1},
"Dominant Diminished": {1, 2, 1, 2, 1, 2, 1, 2},
"Pentatonic Major": {2, 2, 3, 2, 3},
"Pentatonic Minor": {3, 2, 2, 3, 2},
"Metallica": {1, 1, 1, 2, 1, 1, 1, 2, 2},
}
// Chords as the distance from the root note
var Chords = map[string][]int{
"Major": {0, 4, 7},
"Minor": {0, 3, 7},
"Augmented": {0, 4, 8},
"Diminished": {0, 4, 6},
"sus2": {0, 2, 7},
"sus4": {0, 5, 7},
"Power": {0, 7},
"7": {0, 4, 7, 10},
"m7": {0, 3, 7, 10},
"maj7": {0, 4, 7, 11},
"dom7": {0, 4, 7, 10},
"dim7": {0, 3, 6, 9},
"dom7f5": {0, 4, 6, 10},
"halfdim7": {0, 3, 6, 10},
"majdim7": {0, 3, 6, 11},
"minmaj7": {0, 3, 7, 11},
"augmaj7": {0, 4, 8, 11},
"aug7": {0, 4, 8, 10},
"7sus2": {0, 5, 7, 10},
"9": {0, 4, 7, 10, 14},
"m9": {0, 3, 7, 10, 14},
"maj9": {0, 4, 7, 11, 14},
"11": {0, 4, 7, 10, 14, 17},
"m11": {0, 3, 7, 10, 14, 17},
}
func NotePosition(note string) (int, error) {
for i := range Notes {
if Notes[i] == note {
return i, nil
}
}
return -1, fmt.Errorf("note '%s' doesn't exist", note)
}
func GetNote(note string, steps int) (string, error) {
pos, err := NotePosition(note)
if err != nil {
return "", err
}
return Notes[(pos+steps)%len(Notes)], nil
}
func GetScale(note, scale string) ([]string, error) {
pos, err := NotePosition(note)
if err != nil {
return nil, err
}
if _, ok := Scales[scale]; !ok {
return nil, fmt.Errorf("scale '%s' doesn't exist", scale)
}
ret := make([]string, 0, len(Scales[scale]))
for _, steps := range Scales[scale] {
ret = append(ret, Notes[pos])
pos = (pos + steps) % len(Notes)
}
return ret, nil
}
func GetChord(note, chord string) ([]string, error) {
pos, err := NotePosition(note)
if err != nil {
return nil, err
}
if _, ok := Chords[chord]; !ok {
return nil, fmt.Errorf("chord '%s' doesn't exist", chord)
}
ret := make([]string, 0, len(Chords[chord]))
for _, distance := range Chords[chord] {
ret = append(ret, Notes[(pos+distance)%len(Notes)])
}
return ret, nil
}
func IsChordInScale(chordNotes, scaleNotes []string) bool {
for i := range chordNotes {
found := false
for j := range scaleNotes {
if chordNotes[i] == scaleNotes[j] {
found = true
break
}
}
if !found {
return false
}
}
return true
}
type ChordMap map[string][]string
// Get the chords that can be played with the notes in the given scale
func GetChordsInScale(root, scale string) (ChordMap, error) {
scalenotes, err := GetScale(root, scale)
if err != nil {
return nil, err
}
scalechords := ChordMap{}
// Get the chords in a map with the Note name as the key
for i := range Notes {
for j := range Chords {
notes, err := GetChord(Notes[i], j)
if err != nil {
return nil, err
}
if IsChordInScale(notes, scalenotes) {
scalechords[Notes[i]] = append(scalechords[Notes[i]], j)
}
}
sort.Strings(scalechords[Notes[i]])
}
return scalechords, err
} | music.go | 0.529993 | 0.575409 | music.go | starcoder |
package main
import (
"fmt"
"image/color"
"github.com/hajimehoshi/ebiten/v2"
)
var (
// Zoom controls the zoom amount
Zoom = 8
// WindowWidth is the width of the window
WindowWidth = 500
// WindowHeight is the height of the window
WindowHeight = 500
// BoxWidth is the width of the underlying array of particles
BoxWidth = WindowWidth / Zoom
// BoxHeight is the height of the underlying array of particles
BoxHeight = WindowHeight / Zoom
)
// ParticleSize is the size of the sand particles
const ParticleSize = 1
// Particle holds the data each particle contains like its image, x position, and y position
type Particle struct {
Img *ebiten.Image
X, Y float64
}
// Game is the main game struct holding the game state
type Game struct {
particleImg *ebiten.Image
col uint8
bCol bool
op *ebiten.DrawImageOptions
particles []*Particle
}
// Init is the initialization function of Game
func (g *Game) Init() {
g.op = &ebiten.DrawImageOptions{}
g.particleImg = ebiten.NewImage(ParticleSize, ParticleSize)
g.particleImg.Fill(color.RGBA{255, 255, 0, 255})
g.particles = make([]*Particle, BoxWidth*BoxHeight)
}
// Update does the update logic of the game
func (g *Game) Update() error {
if ebiten.IsKeyPressed(ebiten.KeySpace) {
g.particles = make([]*Particle, len(g.particles))
}
WindowWidth, WindowHeight = ebiten.WindowSize()
BoxWidth, BoxHeight = (WindowWidth/Zoom)+1, (WindowHeight/Zoom)+1
diff := BoxWidth*BoxHeight - len(g.particles)
if diff != 0 {
g.particles = make([]*Particle, len(g.particles)+diff)
}
if g.bCol {
g.col--
if g.col == 150 {
g.bCol = false
}
} else {
g.col++
if g.col == 255 {
g.bCol = true
}
}
g.particleImg.Fill(color.RGBA{g.col, g.col, 0, 255})
x, y := ebiten.CursorPosition()
if ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) {
if x > 0-ParticleSize && y > 0-ParticleSize && x < BoxWidth && y < BoxHeight {
g.particles[y*BoxWidth+x] = &Particle{g.particleImg, float64(x), float64(y)}
}
} else if ebiten.IsMouseButtonPressed(ebiten.MouseButtonRight) {
if x > 0-ParticleSize && y > 0-ParticleSize && x < BoxWidth && y < BoxHeight {
g.particles[y*BoxWidth+x] = nil
}
}
doNotChange := map[int]bool{}
for i := range g.particles {
if g.particles[i] != nil {
p := g.particles[i]
_, ok := doNotChange[i]
if ok {
continue
}
if p.Y < float64(BoxHeight-ParticleSize) {
if int(p.Y+1)*BoxWidth+int(p.X) < len(g.particles) && g.particles[int(p.Y+1)*BoxWidth+int(p.X)] == nil {
p.Y++
j := int(p.Y)*BoxWidth + int(p.X)
g.particles[j] = p
doNotChange[j] = true
g.particles[i] = nil
} else if int(p.X+1) < BoxWidth-ParticleSize && int(p.Y+1)*BoxWidth+int(p.X+1) < len(g.particles) && g.particles[int(p.Y+1)*BoxWidth+int(p.X+1)] == nil {
p.X++
p.Y++
j := int(p.Y)*BoxWidth + int(p.X)
g.particles[j] = p
doNotChange[j] = true
g.particles[i] = nil
} else if p.X-1 >= 0 && int(p.Y+1)*BoxWidth+int(p.X-1) < len(g.particles) && g.particles[int(p.Y+1)*BoxWidth+int(p.X-1)] == nil {
p.X--
p.Y++
j := int(p.Y)*BoxWidth + int(p.X)
g.particles[j] = p
doNotChange[j] = true
g.particles[i] = nil
} else {
doNotChange[i] = true
}
} else {
p.Y = float64(BoxHeight - ParticleSize)
j := int(p.Y)*BoxWidth + int(p.X)
if j > 0 && j < len(g.particles) {
g.particles[j] = p
}
}
}
}
return nil
}
// Draw has code that draws the particles and other items onto the window
func (g *Game) Draw(screen *ebiten.Image) {
screen.Fill(color.RGBA{97, 202, 255, 255})
for _, p := range g.particles {
if p != nil {
g.op.GeoM.Reset()
g.op.GeoM.Translate(p.X, p.Y)
screen.DrawImage(p.Img, g.op)
}
}
}
// Layout takes outside side and divides by Zoom giving the zoom effect
func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {
return outsideWidth / Zoom, outsideHeight / Zoom
}
func main() {
ebiten.SetWindowSize(WindowWidth, WindowHeight)
ebiten.SetWindowTitle("Sandbox")
ebiten.SetWindowResizable(true)
fmt.Print("Enter zoom amount (Integers 1 and above; default: 8): ")
fmt.Scanf("%d", &Zoom)
if Zoom < 1 {
fmt.Println("Invalid number! Defaulting to 8...")
Zoom = 8
}
fmt.Println()
fmt.Println(" --- Instructions --- ")
fmt.Println("[Space] Or [Resizing Window] - Clears all sand")
fmt.Println("[Left-Click] - Places sand")
fmt.Println("[Right-Click] - Removes sand")
fmt.Println(" --- Instructions --- ")
fmt.Println()
fmt.Println("Enjoy!")
fmt.Println()
game := &Game{}
game.Init()
fmt.Println("Starting game...")
if err := ebiten.RunGame(game); err != nil {
panic(err)
}
} | sandbox.go | 0.524151 | 0.402451 | sandbox.go | starcoder |
package test_persistence
import (
"testing"
cdata "github.com/pip-services3-go/pip-services3-commons-go/data"
data1 "github.com/pip-templates/pip-templates-microservice-go/data/version1"
persist "github.com/pip-templates/pip-templates-microservice-go/persistence"
"github.com/stretchr/testify/assert"
)
type BeaconsPersistenceFixture struct {
Beacon1 data1.BeaconV1
Beacon2 data1.BeaconV1
Beacon3 data1.BeaconV1
persistence persist.IBeaconsPersistence
}
func NewBeaconsPersistenceFixture(persistence persist.IBeaconsPersistence) *BeaconsPersistenceFixture {
c := BeaconsPersistenceFixture{}
c.Beacon1 = data1.BeaconV1{
Id: "1",
Udi: "00001",
Type: data1.AltBeacon,
SiteId: "1",
Label: "TestBeacon1",
Center: data1.GeoPointV1{Type: "Point", Coordinates: [][]float32{{0.0, 0.0}}},
Radius: 50,
}
c.Beacon2 = data1.BeaconV1{
Id: "2",
Udi: "00002",
Type: data1.IBeacon,
SiteId: "1",
Label: "TestBeacon2",
Center: data1.GeoPointV1{Type: "Point", Coordinates: [][]float32{{2.0, 2.0}}},
Radius: 70,
}
c.Beacon3 = data1.BeaconV1{
Id: "3",
Udi: "00003",
Type: data1.AltBeacon,
SiteId: "2",
Label: "TestBeacon3",
Center: data1.GeoPointV1{Type: "Point", Coordinates: [][]float32{{10.0, 10.0}}},
Radius: 50,
}
c.persistence = persistence
return &c
}
func (c *BeaconsPersistenceFixture) testCreateBeacons(t *testing.T) {
// Create the first beacon
beacon, err := c.persistence.Create("", &c.Beacon1)
assert.Nil(t, err)
assert.NotNil(t, beacon)
assert.Equal(t, c.Beacon1.Udi, beacon.Udi)
assert.Equal(t, c.Beacon1.SiteId, beacon.SiteId)
assert.Equal(t, c.Beacon1.Type, beacon.Type)
assert.Equal(t, c.Beacon1.Label, beacon.Label)
assert.NotNil(t, beacon.Center)
// Create the second beacon
beacon, err = c.persistence.Create("", &c.Beacon2)
assert.Nil(t, err)
assert.NotNil(t, beacon)
assert.Equal(t, c.Beacon2.Udi, beacon.Udi)
assert.Equal(t, c.Beacon2.SiteId, beacon.SiteId)
assert.Equal(t, c.Beacon2.Type, beacon.Type)
assert.Equal(t, c.Beacon2.Label, beacon.Label)
assert.NotNil(t, beacon.Center)
// Create the third beacon
beacon, err = c.persistence.Create("", &c.Beacon3)
assert.Nil(t, err)
assert.NotNil(t, beacon)
assert.Equal(t, c.Beacon3.Udi, beacon.Udi)
assert.Equal(t, c.Beacon3.SiteId, beacon.SiteId)
assert.Equal(t, c.Beacon3.Type, beacon.Type)
assert.Equal(t, c.Beacon3.Label, beacon.Label)
assert.NotNil(t, beacon.Center)
}
func (c *BeaconsPersistenceFixture) TestCrudOperations(t *testing.T) {
var beacon1 data1.BeaconV1
// Create items
c.testCreateBeacons(t)
// Get all beacons
page, err := c.persistence.GetPageByFilter("", cdata.NewEmptyFilterParams(), cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.NotNil(t, page)
assert.Len(t, page.Data, 3)
beacon1 = *page.Data[0]
// Update the beacon
beacon1.Label = "ABC"
beacon, err := c.persistence.Update("", &beacon1)
assert.Nil(t, err)
assert.NotNil(t, beacon)
assert.Equal(t, beacon1.Id, beacon.Id)
assert.Equal(t, "ABC", beacon.Label)
// Get beacon by udi
beacon, err = c.persistence.GetOneByUdi("", beacon1.Udi)
assert.Nil(t, err)
assert.NotNil(t, beacon)
assert.Equal(t, beacon1.Id, beacon.Id)
// Delete the beacon
beacon, err = c.persistence.DeleteById("", beacon1.Id)
assert.Nil(t, err)
assert.NotNil(t, beacon)
assert.Equal(t, beacon1.Id, beacon.Id)
// Try to get deleted beacon
beacon, err = c.persistence.GetOneById("", beacon1.Id)
assert.Nil(t, err)
assert.Nil(t, beacon)
}
func (c *BeaconsPersistenceFixture) TestGetWithFilters(t *testing.T) {
// Create items
c.testCreateBeacons(t)
// Filter by id
page, err := c.persistence.GetPageByFilter("",
cdata.NewFilterParamsFromTuples(
"id", "1",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 1)
// Filter by udi
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"udi", "00002",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 1)
// Filter by udis
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"udis", "00001,00003",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 2)
// Filter by site_id
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"site_id", "1",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 2)
} | test/persistence/BeaconsPersistenceFixture.go | 0.655115 | 0.606673 | BeaconsPersistenceFixture.go | starcoder |
func uniquePathsWithObstaclesMemo(obstacleGrid [][]int) int {
numR, numC := len(obstacleGrid), len(obstacleGrid[0])
if obstacleGrid[0][0] == 1 { return 0 }
memo := make([][]int, numR)
for row := range memo {
memo[row] = make([]int, numC)
}
var dfs func(int, int) int
dfs = func(cellR, cellC int) int {
if cellR < 0 || cellC < 0 || obstacleGrid[cellR][cellC] == 1 {
return 0
}
if cellR == 0 && cellC == 0 {
return 1
}
if numPaths := memo[cellR][cellC]; numPaths != 0 {
return numPaths
}
//recursive from left and from right
memo[cellR][cellC] = dfs(cellR-1, cellC) + dfs(cellR, cellC-1)
return memo[cellR][cellC]
}
return dfs(numR-1, numC-1)
}
//bottom up DP 2D
//time: O(m*n)
//space: O(m*n)
func uniquePathsWithObstacles2D(obstacleGrid [][]int) int {
numR, numC := len(obstacleGrid), len(obstacleGrid[0])
if obstacleGrid[0][0] == 1 { return 0 }
dp := make([][]int, numR)
for row := range dp {
dp[row] = make([]int, numC)
}
for row := range dp {
if obstacleGrid[row][0] == 1 {
break
}
dp[row][0] = 1
}
for col := 0; col < numC; col++ {
if obstacleGrid[0][col] == 1 {
break
}
dp[0][col] = 1
}
for row := 1; row < numR; row++ {
for col := 1; col < numC; col++ {
if obstacleGrid[row][col] == 1 {
dp[row][col] = 0
continue
}
dp[row][col] = dp[row-1][col] + dp[row][col-1]
}
}
return dp[numR-1][numC-1]
}
//bottom-up 1D
//time: O(m*n)
//space: O(n)
func uniquePathsWithObstacles(obstacleGrid [][]int) int {
numR, numC := len(obstacleGrid), len(obstacleGrid[0])
if obstacleGrid[0][0] == 1 { return 0 }
dp := make([]int, numC)
//iter through row = 0 col vals
for col := 0; col < numC; col++ {
if obstacleGrid[0][col] == 1 {
break
}
dp[col] = 1
}
for row := 1; row < numR; row++ {
//val at prev row
prev := dp[0]
if obstacleGrid[row][0] == 1 {
dp[0] = 0
} else {
dp[0] = prev
}
for col := 1; col < numC; col++ {
prev = dp[col]
if obstacleGrid[row][col] == 1 {
dp[col] = 0
} else {
dp[col] = prev + dp[col-1]
}
}
}
return dp[numC-1]
} | unique-paths-ii/unique-paths-ii.go | 0.612657 | 0.435481 | unique-paths-ii.go | starcoder |
package coltypes
import (
"strings"
"github.com/lib/pq/oid"
"github.com/znbasedb/znbase/pkg/sql/pgwire/pgcode"
"github.com/znbasedb/znbase/pkg/sql/pgwire/pgerror"
"github.com/znbasedb/znbase/pkg/sql/sem/types"
)
var (
// Bool is an immutable T instance.
Bool = &TBool{VisBool}
// Boolean is same as Bool
Boolean = &TBool{VisBoolean}
// Bit is an immutable T instance.
Bit = &TBitArray{Width: 1, VisibleType: VisBIT}
// VarBit is an immutable T instance.
VarBit = &TBitArray{Width: 0, Variable: true, VisibleType: VisVARBIT}
// BitV is same as VarBit
BitV = &TBitArray{Width: 0, Variable: true, VisibleType: VisBITV}
// Int2 is an immutable T instance.
Int2 = &TInt{Width: 16, VisibleType: VisINT2}
// SmallInt is same as Int2
SmallInt = &TInt{Width: 16, VisibleType: VisSMALLINT}
// Int4 is an immutable T instance.
Int4 = &TInt{Width: 32, VisibleType: VisINT4}
// Int8 is an immutable T instance.
Int8 = &TInt{Width: 64, VisibleType: VisINT8}
// Int64 is same as Int8
Int64 = &TInt{Width: 64, VisibleType: VisINT64}
// BigInt is same as Int8
BigInt = &TInt{Width: 64, VisibleType: VisBIGINT}
// Serial2 is an immutable T instance.
Serial2 = &TSerial{&TInt{Width: 16, VisibleType: VisSERIAL2}}
// Serial4 is an immutable T instance.
Serial4 = &TSerial{&TInt{Width: 32, VisibleType: VisSERIAL4}}
// Serial8 is an immutable T instance.
Serial8 = &TSerial{&TInt{Width: 64, VisibleType: VisSERIAL8}}
// BigSerial is same as Serial
BigSerial = &TSerial{&TInt{Width: 64, VisibleType: VisBIGSERIAL}}
// SmallSerial is same as Serial
SmallSerial = &TSerial{&TInt{Width: 16, VisibleType: VisSMALLSERIAL}}
// Real is same as Float4
Real = &TFloat{Short: true, VisibleType: VisREAL}
// Float4 is an immutable T instance.
Float4 = &TFloat{Short: true, VisibleType: VisFLOAT4}
// Float8 is an immutable T instance.
Float8 = &TFloat{VisibleType: VisFLOAT8}
// Double is same as Float8
Double = &TFloat{VisibleType: VisDouble}
// Float is same as Float8
Float = &TFloat{VisibleType: VisFLOAT}
// Decimal is an immutable T instance.
Decimal = &TDecimal{VisibleType: VisDECIMAL}
// Dec is same as Decimal
Dec = &TDecimal{VisibleType: VisDEC}
// Numeric is same as Decimal
Numeric = &TDecimal{VisibleType: VisNUMERIC}
// Date is an immutable T instance.
Date = &TDate{VisibleType: VisDATE}
// Time is an immutable T instance.
Time = &TTime{VisibleType: VisTIME}
// Timestamp is an immutable T instance.
Timestamp = &TTimestamp{VisibleType: VisTIMESTAMP}
// TimestampWithTZ is an immutable T instance.
TimestampWithTZ = &TTimestampTZ{VisibleType: VisTIMESTAMPWTZ}
// TimestampWithoutTZ is same as TimestampWithTZ
TimestampWithoutTZ = &TTimestampTZ{VisibleType: VisTIMESTAMPWOTZ}
// TimestampTZ is same as TimestampWithTZ
TimestampTZ = &TTimestampTZ{VisibleType: VisTIMESTAMPTZ}
// Interval is an immutable T instance.
Interval = &TInterval{VisibleType: VisINTERVAL}
// Char is an immutable T instance. See strings.go for details.
Char = &TString{Variant: TStringVariantCHAR, N: 1, VisibleType: VisCHAR}
// VarChar is an immutable T instance. See strings.go for details.
VarChar = &TString{Variant: TStringVariantVARCHAR, VisibleType: VisVARCHAR}
// String is an immutable T instance. See strings.go for details.
String = &TString{Variant: TStringVariantSTRING, VisibleType: VisSTRING}
// Void is an immutable T instance. See strings.go for details. now only use for udr function return type
Void = &TVoid{Variant: TStringVariantVOID, VisibleType: VisVOID}
// QChar is an immutable T instance. See strings.go for details.
QChar = &TString{Variant: TStringVariantQCHAR, VisibleType: VisCHAR}
// Character is same as Char
Character = &TString{Variant: TStringVariantCHAR, N: 1, VisibleType: VisCHARACTER}
// Text is same as String
Text = &TString{Variant: TStringVariantSTRING, VisibleType: VisTEXT}
// CharV is same as String
CharV = &TString{Variant: TStringVariantSTRING, VisibleType: VisCHARV}
// CharacterV is same as String
CharacterV = &TString{Variant: TStringVariantSTRING, VisibleType: VisCHARACTERV}
// Name is an immutable T instance.
Name = &TName{}
// Bytes is an immutable T instance.
Bytes = &TBytes{VisibleType: VisBYTES}
// Bytea is same as Bytes
Bytea = &TBytes{VisibleType: VisBYTEA}
// Blob is same as Bytes
Blob = &TBytes{VisibleType: VisBLOB}
// Clob is same as Text
Clob = &TString{Variant: TStringVariantSTRING, VisibleType: VisCLOB}
// Int2vector is an immutable T instance.
Int2vector = &TVector{Name: "INT2VECTOR", ParamType: Int8}
// UUID is an immutable T instance.
UUID = &TUUID{VisibleType: VisUUID}
// INet is an immutable T instance.
INet = &TIPAddr{VisibleType: VisINET}
// JSON is an immutable T instance.
JSON = &TJSON{VisibleType: VisJSON}
// JSONB is same as JSON
JSONB = &TJSON{VisibleType: VisJSONB}
// Oid is an immutable T instance.
Oid = &TOid{Name: "OID"}
// RegClass is an immutable T instance.
RegClass = &TOid{Name: "REGCLASS"}
// RegNamespace is an immutable T instance.
RegNamespace = &TOid{Name: "REGNAMESPACE"}
// RegProc is an immutable T instance.
RegProc = &TOid{Name: "REGPROC"}
// RegProcedure is an immutable T instance.
RegProcedure = &TOid{Name: "REGPROCEDURE"}
// RegType is an immutable T instance.
RegType = &TOid{Name: "REGTYPE"}
// OidVector is an immutable T instance.
OidVector = &TVector{Name: "OIDVECTOR", ParamType: Oid}
)
var errBitLengthNotPositive = pgerror.NewError(pgcode.InvalidParameterValue,
"length for type bit must be at least 1")
var errBitTypeNotPositive = pgerror.NewError(pgcode.InvalidParameterValue,
"BitType only support BIT, VARBIT, BIT VARYING")
// NewBitArrayType creates a new BIT type with the given bit width.
func NewBitArrayType(width int, varying bool, visibleType string) (*TBitArray, error) {
if width < 1 {
return nil, errBitLengthNotPositive
}
switch visibleType {
case "varbit":
return &TBitArray{Width: uint(width), Variable: varying, VisibleType: VisVARBIT}, nil
case "bit":
if varying {
return &TBitArray{Width: uint(width), Variable: varying, VisibleType: VisBITV}, nil
}
return &TBitArray{Width: uint(width), Variable: varying, VisibleType: VisBIT}, nil
}
return nil, errBitTypeNotPositive
}
var errFloatPrecAtLeast1 = pgerror.NewError(pgcode.InvalidParameterValue,
"precision for type float must be at least 1 bit")
var errFloatPrecMax54 = pgerror.NewError(pgcode.InvalidParameterValue,
"precision for type float must be less than 54 bits")
// NewFloat creates a type alias for FLOAT with the given precision.
func NewFloat(prec int64) (*TFloat, error) {
if prec < 1 {
return nil, errFloatPrecAtLeast1
}
if prec <= 24 {
return Float4, nil
}
if prec <= 54 {
return Float8, nil
}
return nil, errFloatPrecMax54
}
// ArrayOf creates a type alias for an array of the given element type and fixed bounds.
func ArrayOf(colType T, bounds []int32) (T, error) {
if ok, issueNum := canBeInArrayColType(colType); !ok {
return nil, pgerror.UnimplementedWithIssueDetailErrorf(issueNum,
colType.String(), "arrays of %s not allowed", colType)
}
return &TArray{ParamType: colType, Bounds: bounds}, nil
}
// EnumArrayOf stores enum values
func EnumArrayOf(colType T, bounds []string) (T, error) {
if ok, issueNum := canBeInArrayColType(colType); !ok {
return nil, pgerror.UnimplementedWithIssueDetailErrorf(issueNum,
colType.String(), "arrays of %s not allowed", colType)
}
return &TEnum{ParamType: colType, Bounds: bounds}, nil
}
// SetArrayOf stores set values
func SetArrayOf(colType T, bounds []string) (T, error) {
for _, bound := range bounds {
if strings.Contains(bound, ",") {
return nil, pgerror.NewErrorf(pgcode.InvalidColumnReference, "Illegal SET '%s' value found during parsing", bound)
}
}
return &TSet{Variant: TStringVariantSET, VisibleType: VisSET, Bounds: bounds}, nil
}
var typNameLiterals map[string]T
func init() {
typNameLiterals = make(map[string]T)
for o, t := range types.OidToType {
name := strings.ToLower(oid.TypeName[o])
if _, ok := typNameLiterals[name]; !ok {
colTyp, err := DatumTypeToColumnType(t)
if err != nil {
continue
}
typNameLiterals[name] = colTyp
}
}
}
// TypeForNonKeywordTypeName returns the column type for the string name of a
// type, if one exists. The third return value indicates:
// 0 if no error or the type is not known in postgres.
// -1 if the type is known in postgres.
// >0 for a github issue number.
func TypeForNonKeywordTypeName(name string) (T, bool, int) {
t, ok := typNameLiterals[name]
if ok {
return t, ok, 0
}
return nil, false, postgresPredefinedTypeIssues[name]
}
// The following map must include all types predefined in PostgreSQL
// that are also not yet defined in ZNBaseDB and link them to
// github issues. It is also possible, but not necessary, to include
// PostgreSQL types that are already implemented in ZNBaseDB.
var postgresPredefinedTypeIssues = map[string]int{
"box": 21286,
"cidr": 18846,
"circle": 21286,
"line": 21286,
"lseg": 21286,
"macaddr": -1,
"macaddr8": -1,
"money": -1,
"path": 21286,
"pg_lsn": -1,
"point": 21286,
"polygon": 21286,
"tsquery": 7821,
"tsvector": 7821,
"txid_snapshot": -1,
"xml": -1,
} | pkg/sql/coltypes/aliases.go | 0.538983 | 0.411732 | aliases.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
type Point struct {
x, y int
}
func newPoint(arr []int) (Point, error) {
if len(arr) != 2 {
return Point{}, fmt.Errorf("invalid input: %v", arr)
}
return Point{arr[0], arr[1]}, nil
}
func (p1 Point) Equal(p2 Point) bool {
return p1.x == p2.x && p1.y == p2.y
}
type Vector struct {
from, to Point
}
func newVector(arr []Point) (Vector, error) {
if len(arr) != 2 {
return Vector{}, fmt.Errorf("invalid input: %v", arr)
}
return Vector{arr[0], arr[1]}, nil
}
type Counter struct {
counter map[Point]int
}
func newCounter() *Counter {
counts := make(map[Point]int)
return &Counter{counts}
}
func (c *Counter) add(p Point) {
if _, ok := c.counter[p]; ok {
c.counter[p] += 1
} else {
c.counter[p] = 1
}
}
func (v Vector) interpolate(diag bool) []Point {
var (
dx, dy int
point Point
points []Point
)
switch {
case (v.to.x - v.from.x) > 0:
dx = 1
case (v.to.x - v.from.x) < 0:
dx = -1
default:
dx = 0
}
switch {
case (v.to.y - v.from.y) > 0:
dy = 1
case (v.to.y - v.from.y) < 0:
dy = -1
default:
dy = 0
}
point = v.from
if diag {
points = append(points, point)
for point != v.to {
point.x += dx
point.y += dy
points = append(points, point)
}
} else {
if v.from.x == v.to.x {
points = append(points, point)
for point.y != v.to.y {
point.y += dy
points = append(points, point)
}
} else if v.from.y == v.to.y {
points = append(points, point)
for point.x != v.to.x {
point.x += dx
points = append(points, point)
}
}
}
return points
}
func parseLine(str string) (Vector, error) {
raw := strings.Split(str, "->")
if len(raw) != 2 {
return Vector{}, fmt.Errorf("invalid input: %v", str)
}
var vector []Point
for _, field := range raw {
points := strings.Split(field, ",")
coords := []int{}
for _, val := range points {
i, err := strconv.Atoi(strings.TrimSpace(val))
if err != nil {
return Vector{}, err
}
coords = append(coords, i)
}
point, err := newPoint(coords)
if err != nil {
return Vector{}, err
}
vector = append(vector, point)
}
return newVector(vector)
}
func readFile(filename string) ([]Vector, error) {
var arr []Vector
file, err := os.Open(filename)
if err != nil {
return arr, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if err != nil {
return arr, err
}
vec, err := parseLine(line)
if err != nil {
return arr, err
}
// fmt.Printf("%v", vec)
// fmt.Printf("=> %v\n\n", vec.interpolate())
arr = append(arr, vec)
}
err = scanner.Err()
return arr, err
}
func overlaps(vectors []Vector, diag bool) int {
counter := newCounter()
for _, v := range vectors {
points := v.interpolate(diag)
for _, p := range points {
counter.add(p)
}
}
count := 0
for _, val := range counter.counter {
if val > 1 {
count += 1
}
}
return count
}
func main() {
if len(os.Args) < 2 {
log.Fatal("No arguments provided")
}
filename := os.Args[1]
arr, err := readFile(filename)
if err != nil {
log.Fatal(err)
}
result1 := overlaps(arr, false)
fmt.Printf("Puzzle 1: %v\n", result1)
result2 := overlaps(arr, true)
fmt.Printf("Puzzle 2: %v\n", result2)
} | 2021/day05/solution.go | 0.506591 | 0.415254 | solution.go | starcoder |
package mcpiapi
import (
"fmt"
"strconv"
)
// World has methods for manipulating the Minecraft world.
type World object
// Checkpoint provides access to the checkpoint object for this world.
func (obj World) Checkpoint() Checkpoint {
return Checkpoint(obj)
}
// GetBlock returns the block type at the given coordinates.
// Block types can be 0-108.
func (obj World) GetBlock(x, y, z int) (blockTypeId int, err error) {
s := fmt.Sprintf("world.getBlock(%d,%d,%d)", x, y, z)
blockTypeId = 0
var r string
var i int64
r, err = object(obj).sendReceive(s)
if err != nil {
return
}
i, err = strconv.ParseInt(r, 10, 32)
if err != nil {
return
}
blockTypeId = int(i)
return
}
// SetBlock sets the block type and block data at the given coordinate.
// Block types are 0-255 and block data can be 0-15. Block data represents
// extra attributes like the wool color.
func (obj World) SetBlock(x, y, z, blockTypeId, blockData int) error {
var s string
if blockData < 0 {
s = fmt.Sprintf("world.setBlock(%d,%d,%d,%d)", x, y, z, blockTypeId)
} else {
s = fmt.Sprintf("world.setBlock(%d,%d,%d,%d,%d)", x, y, z, blockTypeId, blockData)
}
return object(obj).send(s)
}
// SetBlocks sets a range of blocks to the block type and block data provided.
// Block types are 0-255 and block data can be 0-15. Block data represents
// extra attributes like the wool color.
// Note: Setting a huge number of blocks can cause lag in the Minecraft game.
func (obj World) SetBlocks(x1, y1, z1, x2, y2, z2, blockTypeId, blockData int) error {
var s string
if blockData < 0 {
s = fmt.Sprintf("world.setBlocks(%d,%d,%d,%d,%d,%d,%d)", x1, y1, z1, x2, y2, z2, blockTypeId)
} else {
s = fmt.Sprintf("world.setBlocks(%d,%d,%d,%d,%d,%d,%d,%d)", x1, y1, z1, x2, y2, z2, blockTypeId, blockData)
}
return object(obj).send(s)
}
// GetHeight returns the height of the ground at the given coordinate.
func (obj World) GetHeight(x, z int) (y int, err error) {
s := fmt.Sprintf("world.getHeight(%d,%d)", x, z)
y = -1
var r string
var i int64
r, err = object(obj).sendReceive(s)
if err != nil {
return
}
i, err = strconv.ParseInt(r, 10, 32)
if err != nil {
return
}
y = int(i)
return
}
// Setting is used to enable or disable various Minecraft world settings.
func (obj World) Setting(key string, enable bool) error {
var s string
val := 0
if enable {
val = 1
}
s = fmt.Sprintf("world.setting(\"%s\",%d)", key, val)
return object(obj).send(s)
}
// Checkpoint has methods for managing saving and restoring the world's state.
type Checkpoint object
// Save saves the world's current state so that it can be restored later.
func (obj Checkpoint) Save() error {
s := "world.checkpoint.save()"
return object(obj).send(s)
}
// Restore restores the world's state to that of the last checkpoint.
func (obj Checkpoint) Restore() error {
s := "world.checkpoint.restore()"
return object(obj).send(s)
} | world.go | 0.807157 | 0.49048 | world.go | starcoder |
package graphblas
import (
"context"
"log"
"github.com/rossmerr/graphblas/constraints"
)
// DenseMatrix a dense matrix
type DenseMatrix[T constraints.Number] struct {
c int // number of rows in the sparse matrix
r int // number of columns in the sparse matrix
data [][]T
}
// NewDenseMatrix returns a DenseMatrix
func NewDenseMatrix[T constraints.Number](r, c int) *DenseMatrix[T] {
return newMatrix[T](r, c, nil)
}
// NewDenseMatrixFromArray returns a DenseMatrix
func NewDenseMatrixFromArray[T constraints.Number](data [][]T) *DenseMatrix[T] {
r := len(data)
c := len(data[0])
s := &DenseMatrix[T]{data: data, r: r, c: c}
return s
}
func newMatrix[T constraints.Number](r, c int, initialise func([]T, int)) *DenseMatrix[T] {
s := &DenseMatrix[T]{data: make([][]T, r), r: r, c: c}
for i := 0; i < r; i++ {
s.data[i] = make([]T, c)
if initialise != nil {
initialise(s.data[i], i)
}
}
return s
}
// Columns the number of columns of the matrix
func (s *DenseMatrix[T]) Columns() int {
return s.c
}
// Rows the number of rows of the matrix
func (s *DenseMatrix[T]) Rows() int {
return s.r
}
// Update does a At and Set on the matrix element at r-th, c-th
func (s *DenseMatrix[T]) Update(r, c int, f func(T) T) {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
if c < 0 || c >= s.Columns() {
log.Panicf("Column '%+v' is invalid", c)
}
s.data[r][c] = f(s.data[r][c])
return
}
// At returns the value of a matrix element at r-th, c-th
func (s *DenseMatrix[T]) At(r, c int) T {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
if c < 0 || c >= s.Columns() {
log.Panicf("Column '%+v' is invalid", c)
}
return s.data[r][c]
}
// Set sets the value at r-th, c-th of the matrix
func (s *DenseMatrix[T]) Set(r, c int, value T) {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
if c < 0 || c >= s.Columns() {
log.Panicf("Column '%+v' is invalid", c)
}
s.data[r][c] = value
}
// ColumnsAt return the columns at c-th
func (s *DenseMatrix[T]) ColumnsAt(c int) Vector[T] {
if c < 0 || c >= s.Columns() {
log.Panicf("Column '%+v' is invalid", c)
}
columns := NewDenseVector[T](s.r)
for r := 0; r < s.r; r++ {
columns.SetVec(r, s.data[r][c])
}
return columns
}
// RowsAt return the rows at r-th
func (s *DenseMatrix[T]) RowsAt(r int) Vector[T] {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
rows := NewDenseVector[T](s.c)
for i := 0; i < s.c; i++ {
rows.SetVec(i, s.data[r][i])
}
return rows
}
// RowsAtToArray return the rows at r-th
func (s *DenseMatrix[T]) RowsAtToArray(r int) []T {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
rows := make([]T, s.c)
for i := 0; i < s.c; i++ {
rows[i] = s.data[r][i]
}
return rows
}
// Copy copies the matrix
func (s *DenseMatrix[T]) Copy() Matrix[T] {
v := Default[T]()
matrix := newMatrix(s.Rows(), s.Columns(), func(row []T, r int) {
for c := 0; c < s.Columns(); c++ {
v = s.data[r][c]
if v != Default[T]() {
row[c] = v
} else {
row[c] = v
}
}
})
return matrix
}
// Scalar multiplication of a matrix by alpha
func (s *DenseMatrix[T]) Scalar(alpha T) Matrix[T] {
return Scalar[T](context.Background(), s, alpha)
}
// Multiply multiplies a matrix by another matrix
func (s *DenseMatrix[T]) Multiply(m Matrix[T]) Matrix[T] {
matrix := newMatrix[T](s.Rows(), m.Columns(), nil)
MatrixMatrixMultiply[T](context.Background(), s, m, nil, matrix)
return matrix
}
// Add addition of a matrix by another matrix
func (s *DenseMatrix[T]) Add(m Matrix[T]) Matrix[T] {
matrix := s.Copy()
Add[T](context.Background(), s, m, nil, matrix)
return matrix
}
// Subtract subtracts one matrix from another matrix
func (s *DenseMatrix[T]) Subtract(m Matrix[T]) Matrix[T] {
matrix := m.Copy()
Subtract[T](context.Background(), s, m, nil, matrix)
return matrix
}
// Negative the negative of a matrix
func (s *DenseMatrix[T]) Negative() Matrix[T] {
matrix := s.Copy()
Negative[T](context.Background(), s, nil, matrix)
return matrix
}
// Transpose swaps the rows and columns
func (s *DenseMatrix[T]) Transpose() Matrix[T] {
matrix := newMatrix[T](s.Columns(), s.Rows(), nil)
Transpose[T](context.Background(), s, nil, matrix)
return matrix
}
// Equal the two matrices are equal
func (s *DenseMatrix[T]) Equal(m Matrix[T]) bool {
return Equal[T](context.Background(), s, m)
}
// NotEqual the two matrices are not equal
func (s *DenseMatrix[T]) NotEqual(m Matrix[T]) bool {
return NotEqual[T](context.Background(), s, m)
}
// Size of the matrix
func (s *DenseMatrix[T]) Size() int {
return s.r * s.c
}
// Values the number of elements in the matrix
func (s *DenseMatrix[T]) Values() int {
return s.r * s.c
}
// Clear removes all elements from a matrix
func (s *DenseMatrix[T]) Clear() {
s.data = make([][]T, s.r)
for i := 0; i < s.r; i++ {
s.data[i] = make([]T, s.c)
}
}
// RawMatrix returns the raw matrix
func (s *DenseMatrix[T]) RawMatrix() [][]T {
return s.data
}
// Enumerate iterates through all non-zero elements, order is not guaranteed
func (s *DenseMatrix[T]) Enumerate() Enumerate[T] {
return s.iterator()
}
func (s *DenseMatrix[T]) iterator() *denseMatrixIterator[T] {
i := &denseMatrixIterator[T]{
matrix: s,
size: s.Values(),
last: 0,
c: 0,
r: 0,
}
return i
}
type denseMatrixIterator[T constraints.Number] struct {
matrix *DenseMatrix[T]
size int
last int
c int
r int
cOld int
rOld int
}
// HasNext checks the iterator has any more values
func (s *denseMatrixIterator[T]) HasNext() bool {
if s.last >= s.size {
return false
}
return true
}
func (s *denseMatrixIterator[T]) next() {
if s.c == s.matrix.Columns() {
s.c = 0
s.r++
}
s.cOld = s.c
s.c++
s.last++
}
// Next moves the iterator and returns the row, column and value
func (s *denseMatrixIterator[T]) Next() (int, int, T) {
s.next()
return s.r, s.cOld, s.matrix.At(s.r, s.cOld)
}
// Map replace each element with the result of applying a function to its value
func (s *DenseMatrix[T]) Map() Map[T] {
t := s.iterator()
i := &denseMatrixMap[T]{t}
return i
}
type denseMatrixMap[T constraints.Number] struct {
*denseMatrixIterator[T]
}
// HasNext checks the iterator has any more values
func (s *denseMatrixMap[T]) HasNext() bool {
return s.denseMatrixIterator.HasNext()
}
// Map move the iterator and uses a higher order function to changes the elements current value
func (s *denseMatrixMap[T]) Map(f func(int, int, T) T) {
s.next()
s.matrix.Set(s.r, s.cOld, f(s.r, s.cOld, s.matrix.At(s.r, s.cOld)))
}
// Element of the mask for each tuple that exists in the matrix for which the value of the tuple cast to Boolean is true
func (s *DenseMatrix[T]) Element(r, c int) bool {
return s.element(r, c)
}
func (s *DenseMatrix[T]) element(r, c int) bool {
return s.At(r, c) > Default[T]()
} | denseMatrix.go | 0.845688 | 0.686753 | denseMatrix.go | starcoder |
package field
type Type uint32
const (
TypeAny Type = 1 << iota // any
TypeArray // array
TypeNil // nil
TypeString // string
TypeBool // bool
TypeInt // int
TypeInt8 // int8
TypeInt16 // int16
TypeInt32 // int32
TypeInt64 // int64
TypeUint // uint
TypeUint8 // uint8
TypeUint16 // uint16
TypeUint32 // uint32
TypeUint64 // uint64
TypeFloat32 // float32
TypeFloat64 // float64
TypeComplex64 // complex64
TypeComplex128 // complex128
TypeUintptr // uintptr
TypeBinary // bytes
TypeDuration // duration
TypeTime // time
TypeError // error
)
func (t Type) IsAny() bool {
return t&TypeAny > 0
}
func (t Type) IsArray() bool {
return t&TypeArray > 0
}
func (t Type) IsNil() bool {
return t&TypeNil > 0
}
func (t Type) IsBool() bool {
return t&TypeBool > 0
}
func (t Type) IsString() bool {
return t&TypeString > 0
}
func (t Type) IsInt() bool {
return t&TypeInt > 0
}
func (t Type) IsInt8() bool {
return t&TypeInt8 > 0
}
func (t Type) IsInt16() bool {
return t&TypeInt16 > 0
}
func (t Type) IsInt32() bool {
return t&TypeInt32 > 0
}
func (t Type) IsInt64() bool {
return t&TypeInt64 > 0
}
func (t Type) IsUint() bool {
return t&TypeUint > 0
}
func (t Type) IsUint8() bool {
return t&TypeUint8 > 0
}
func (t Type) IsUint16() bool {
return t&TypeUint16 > 0
}
func (t Type) IsUint32() bool {
return t&TypeUint32 > 0
}
func (t Type) IsUint64() bool {
return t&TypeUint64 > 0
}
func (t Type) IsFloat32() bool {
return t&TypeFloat32 > 0
}
func (t Type) IsFloat64() bool {
return t&TypeFloat64 > 0
}
func (t Type) IsComplex64() bool {
return t&TypeComplex64 > 0
}
func (t Type) IsComplex128() bool {
return t&TypeComplex128 > 0
}
func (t Type) IsUintptr() bool {
return t&TypeUintptr > 0
}
func (t Type) IsBinary() bool {
return t&TypeBinary > 0
}
func (t Type) IsDuration() bool {
return t&TypeDuration > 0
}
func (t Type) IsTime() bool {
return t&TypeTime > 0
}
func (t Type) IsError() bool {
return t&TypeError > 0
} | field/type.go | 0.530236 | 0.50653 | type.go | starcoder |
package tetra3d
import (
"strconv"
"strings"
"github.com/kvartborg/vector"
)
// NodeType represents a Node's type. Node types are categorized, and can be said to extend or "be of" more general types.
// For example, a BoundingSphere has a type of NodeTypeBoundingSphere. That type can also be said to be NodeTypeBounding
// (because it is a bounding object). However, it is not of type NodeTypeBoundingTriangles, as that is a different category.
type NodeType string
const (
NodeTypeNode NodeType = "Node"
NodeTypeModel NodeType = "NodeModel"
NodeTypeCamera NodeType = "NodeCamera"
NodeTypePath NodeType = "NodePath"
NodeTypeBounding NodeType = "NodeBounding"
NodeTypeBoundingAABB NodeType = "NodeBoundingAABB"
NodeTypeBoundingCapsule NodeType = "NodeBoundingCapsule"
NodeTypeBoundingTriangles NodeType = "NodeBoundingTriangles"
NodeTypeBoundingSphere NodeType = "NodeBoundingSphere"
NodeTypeLight NodeType = "NodeLight"
NodeTypeAmbientLight NodeType = "NodeLightAmbient"
NodeTypePointLight NodeType = "NodeLightPoint"
NodeTypeDirectionalLight NodeType = "NodeLightDirectional"
)
// Is returns true if a NodeType satisfies another NodeType category. A specific node type can be said to
// contain a more general one, but not vice-versa. For example, a Model (which has type NodeTypeModel) can be
// said to be a Node (NodeTypeNode), but the reverse is not true (a NodeTypeNode is not a NodeTypeModel).
func (nt NodeType) Is(other NodeType) bool {
if nt == other {
return true
}
return strings.Contains(string(nt), string(other))
}
// INode represents an object that exists in 3D space and can be positioned relative to an origin point.
// By default, this origin point is {0, 0, 0} (or world origin), but Nodes can be parented
// to other Nodes to change this origin (making their movements relative and their transforms
// successive). Models and Cameras are two examples of objects that fully implement the INode interface
// by means of embedding Node.
type INode interface {
Name() string
SetName(name string)
Clone() INode
SetData(data interface{})
Data() interface{}
Type() NodeType
setLibrary(lib *Library)
Library() *Library
setParent(INode)
Parent() INode
Unparent()
// Scene looks for the Node's parents recursively to return what scene it exists in.
// If the node is not within a tree (i.e. unparented), this will return nil.
Scene() *Scene
Root() INode
Children() NodeFilter
ChildrenRecursive() NodeFilter
AddChildren(...INode)
RemoveChildren(...INode)
// updateLocalTransform(newParent INode)
dirtyTransform()
ResetLocalTransform()
SetWorldTransform(transform Matrix4)
LocalRotation() Matrix4
SetLocalRotation(rotation Matrix4)
LocalPosition() vector.Vector
SetLocalPosition(position vector.Vector)
LocalScale() vector.Vector
SetLocalScale(scale vector.Vector)
WorldRotation() Matrix4
SetWorldRotation(rotation Matrix4)
WorldPosition() vector.Vector
SetWorldPosition(position vector.Vector)
WorldScale() vector.Vector
SetWorldScale(scale vector.Vector)
Move(x, y, z float64)
MoveVec(moveVec vector.Vector)
Rotate(x, y, z, angle float64)
Grow(x, y, z float64)
Transform() Matrix4
Visible() bool
SetVisible(visible, recursive bool)
Get(path string) INode
HierarchyAsString() string
Path() string
Tags() *Tags
IsBone() bool
// IsRootBone() bool
AnimationPlayer() *AnimationPlayer
setOriginalLocalPosition(vector.Vector)
}
// Tags is an unordered set of string tags to values, representing a means of identifying Nodes or carrying data on Nodes.
type Tags struct {
tags map[string]interface{}
}
// NewTags returns a new Tags object.
func NewTags() *Tags {
return &Tags{map[string]interface{}{}}
}
func (tags *Tags) Clone() *Tags {
newTags := NewTags()
for k, v := range tags.tags {
newTags.Set(k, v)
}
return newTags
}
// Clear clears the Tags object of all tags.
func (tags *Tags) Clear() {
tags.tags = map[string]interface{}{}
}
// Set sets all the tag specified to the Tags object.
func (tags *Tags) Set(tagName string, value interface{}) {
tags.tags[tagName] = value
}
// Remove removes the tag specified from the Tags object.
func (tags *Tags) Remove(tag string) {
delete(tags.tags, tag)
}
// Has returns true if the Tags object has all of the tags specified, and false otherwise.
func (tags *Tags) Has(nodeTags ...string) bool {
for _, t := range nodeTags {
if _, exists := tags.tags[t]; !exists {
return false
}
}
return true
}
// Get returns the value associated with the specified tag (key).
// Note that this does not sanity check to ensure the tag exists first.
func (tags *Tags) Get(tagName string) interface{} {
return tags.tags[tagName]
}
// IsString returns true if the value associated with the specified tag is a string. If the
// tag doesn't exist, this returns false.
func (tags *Tags) IsString(tagName string) bool {
if _, exists := tags.tags[tagName]; exists {
if _, ok := tags.tags[tagName].(string); ok {
return true
}
}
return false
}
// GetAsString returns the value associated with the specified tag (key) as a string.
// Note that this does not sanity check to ensure the tag exists first.
func (tags *Tags) GetAsString(tagName string) string {
return tags.tags[tagName].(string)
}
// IsFloat returns true if the value associated with the specified tag is a float64. If the
// tag doesn't exist, this returns false.
func (tags *Tags) IsFloat(tagName string) bool {
if _, exists := tags.tags[tagName]; exists {
if _, ok := tags.tags[tagName].(float64); ok {
return true
}
}
return false
}
// GetAsFloat returns the value associated with the specified tag (key) as a float.
// Note that this does not sanity check to ensure the tag exists first.
func (tags *Tags) GetAsFloat(tagName string) float64 {
return tags.tags[tagName].(float64)
}
// IsInt returns true if the value associated with the specified tag is a float64. If the
// tag doesn't exist, this returns false.
func (tags *Tags) IsInt(tagName string) bool {
if _, exists := tags.tags[tagName]; exists {
if _, ok := tags.tags[tagName].(int); ok {
return true
}
}
return false
}
// GetAsInt returns the value associated with the specified tag (key) as a float.
// Note that this does not sanity check to ensure the tag exists first.
func (tags *Tags) GetAsInt(tagName string) int {
return tags.tags[tagName].(int)
}
// Node represents a minimal struct that fully implements the Node interface. Model and Camera embed Node
// into their structs to automatically easily implement Node.
type Node struct {
name string
position vector.Vector
scale vector.Vector
rotation Matrix4
originalLocalPosition vector.Vector
visible bool
data interface{} // A place to store a pointer to something if you need it
children []INode
parent INode
cachedTransform Matrix4
isTransformDirty bool
tags *Tags // Tags is an unordered set of string tags, representing a means of identifying Nodes.
animationPlayer *AnimationPlayer
inverseBindMatrix Matrix4 // Specifically for bones in an armature used for animating skinned meshes
isBone bool
library *Library // The Library this Node was instantiated from (nil if it wasn't instantiated with a library at all)
scene *Scene
}
// NewNode returns a new Node.
func NewNode(name string) *Node {
nb := &Node{
name: name,
position: vector.Vector{0, 0, 0},
scale: vector.Vector{1, 1, 1},
rotation: NewMatrix4(),
children: []INode{},
visible: true,
isTransformDirty: true,
tags: NewTags(),
// We set this just in case we call a transform property getter before setting it and caching anything
cachedTransform: NewMatrix4(),
originalLocalPosition: vector.Vector{0, 0, 0},
}
nb.animationPlayer = NewAnimationPlayer(nb)
return nb
}
func (node *Node) setOriginalLocalPosition(position vector.Vector) {
node.originalLocalPosition = position
}
// Name returns the object's name.
func (node *Node) Name() string {
return node.name
}
// SetName sets the object's name.
func (node *Node) SetName(name string) {
node.name = name
}
// Type returns the NodeType for this object.
func (node *Node) Type() NodeType {
return NodeTypeNode
}
// Library returns the Library from which this Node was instantiated. If it was created through code, this will be nil.
func (node *Node) Library() *Library {
return node.library
}
func (node *Node) setLibrary(library *Library) {
node.library = library
}
// Clone returns a new Node.
func (node *Node) Clone() INode {
newNode := NewNode(node.name)
newNode.position = node.position.Clone()
newNode.scale = node.scale.Clone()
newNode.rotation = node.rotation.Clone()
newNode.visible = node.visible
newNode.data = node.data
newNode.isTransformDirty = true
newNode.tags = node.tags.Clone()
newNode.animationPlayer = node.animationPlayer.Clone()
newNode.library = node.library
if node.animationPlayer.RootNode == node {
newNode.animationPlayer.SetRoot(newNode)
}
for _, child := range node.children {
childClone := child.Clone()
childClone.setParent(newNode)
newNode.children = append(newNode.children, childClone)
}
for _, child := range newNode.children {
if model, isModel := child.(*Model); isModel && model.SkinRoot == node {
model.ReassignBones(newNode)
}
}
newNode.isBone = node.isBone
if newNode.isBone {
newNode.inverseBindMatrix = node.inverseBindMatrix.Clone()
}
return newNode
}
// SetData sets user-customizeable data that could be usefully stored on this node.
func (node *Node) SetData(data interface{}) {
node.data = data
}
// Data returns a pointer to user-customizeable data that could be usefully stored on this node.
func (node *Node) Data() interface{} {
return node.data
}
// Transform returns a Matrix4 indicating the global position, rotation, and scale of the object, transforming it by any parents'.
// If there's no change between the previous Transform() call and this one, Transform() will return a cached version of the
// transform for efficiency.
func (node *Node) Transform() Matrix4 {
// T * R * S * O
if !node.isTransformDirty {
return node.cachedTransform
}
transform := NewMatrix4Scale(node.scale[0], node.scale[1], node.scale[2])
transform = transform.Mult(node.rotation)
transform = transform.Mult(NewMatrix4Translate(node.position[0], node.position[1], node.position[2]))
if node.parent != nil {
transform = transform.Mult(node.parent.Transform())
}
node.cachedTransform = transform
node.isTransformDirty = false
// We want to call child.Transform() here to ensure the children also rebuild their transforms as necessary; otherwise,
// children (i.e. BoundingAABBs) may not be rotating along with their owning Nodes (as they don't get rendered).
for _, child := range node.children {
child.Transform()
}
return transform
}
// SetWorldTransform sets the Node's global (world) transform to the full 4x4 transformation matrix provided.
func (node *Node) SetWorldTransform(transform Matrix4) {
position, scale, rotationMatrix := transform.Decompose()
node.SetWorldPosition(position)
node.SetWorldScale(scale)
node.SetWorldRotation(rotationMatrix)
}
// dirtyTransform sets this Node and all recursive children's isTransformDirty flags to be true, indicating that they need to be
// rebuilt. This should be called when modifying the transformation properties (position, scale, rotation) of the Node.
func (node *Node) dirtyTransform() {
if !node.isTransformDirty {
for _, child := range node.ChildrenRecursive() {
child.dirtyTransform()
}
}
node.isTransformDirty = true
}
// updateLocalTransform updates the local transform properties for a Node given a change in parenting. This is done so that, for example,
// parenting an object with a given postiion, scale, and rotation keeps those visual properties when parenting (by updating them to take into
// account the parent's transforms as well).
// func (node *Node) updateLocalTransform(newParent INode) {
// if newParent != nil {
// parentTransform := newParent.Transform()
// parentPos, parentScale, parentRot := parentTransform.Decompose()
// diff := node.position.Sub(parentPos)
// diff[0] /= parentScale[0]
// diff[1] /= parentScale[1]
// diff[2] /= parentScale[2]
// node.position = parentRot.Transposed().MultVec(diff)
// node.rotation = node.rotation.Mult(parentRot.Transposed())
// node.scale[0] /= parentScale[0]
// node.scale[1] /= parentScale[1]
// node.scale[2] /= parentScale[2]
// } else {
// // Reverse
// parentTransform := node.Parent().Transform()
// parentPos, parentScale, parentRot := parentTransform.Decompose()
// pr := parentRot.MultVec(node.position)
// pr[0] *= parentScale[0]
// pr[1] *= parentScale[1]
// pr[2] *= parentScale[2]
// node.position = parentPos.Add(pr)
// node.rotation = node.rotation.Mult(parentRot)
// node.scale[0] *= parentScale[0]
// node.scale[1] *= parentScale[1]
// node.scale[2] *= parentScale[2]
// }
// node.dirtyTransform()
// }
// LocalPosition returns a 3D Vector consisting of the object's local position (position relative to its parent). If this object has no parent, the position will be
// relative to world origin (0, 0, 0).
func (node *Node) LocalPosition() vector.Vector {
return node.position
}
// ResetLocalTransform resets the local transform properties (position, scale, and rotation) for the Node. This can be useful because
// by default, when you parent one Node to another, the local transform properties (position, scale, and rotation) are altered to keep the
// object in the same absolute location, even though the origin changes.
func (node *Node) ResetLocalTransform() {
node.position[0] = 0
node.position[1] = 0
node.position[2] = 0
node.scale[0] = 1
node.scale[1] = 1
node.scale[2] = 1
node.rotation = NewMatrix4()
node.dirtyTransform()
}
// WorldPosition returns a 3D Vector consisting of the object's world position (position relative to the world origin point of {0, 0, 0}).
func (node *Node) WorldPosition() vector.Vector {
position := node.Transform().Row(3)[:3] // We don't want to have to decompose if we don't have to
return position
}
// SetLocalPosition sets the object's local position (position relative to its parent). If this object has no parent, the position should be
// relative to world origin (0, 0, 0). position should be a 3D vector (i.e. X, Y, and Z components).
func (node *Node) SetLocalPosition(position vector.Vector) {
node.position[0] = position[0]
node.position[1] = position[1]
node.position[2] = position[2]
node.dirtyTransform()
}
// SetWorldPosition sets the object's world position (position relative to the world origin point of {0, 0, 0}).
// position needs to be a 3D vector (i.e. X, Y, and Z components).
func (node *Node) SetWorldPosition(position vector.Vector) {
if node.parent != nil {
parentTransform := node.parent.Transform()
parentPos, parentScale, parentRot := parentTransform.Decompose()
pr := parentRot.Transposed().MultVec(position.Sub(parentPos))
pr[0] /= parentScale[0]
pr[1] /= parentScale[1]
pr[2] /= parentScale[2]
node.position = pr
} else {
node.position[0] = position[0]
node.position[1] = position[1]
node.position[2] = position[2]
}
node.dirtyTransform()
}
// LocalScale returns the object's local scale (scale relative to its parent). If this object has no parent, the scale will be absolute.
func (node *Node) LocalScale() vector.Vector {
return node.scale
}
// SetLocalScale sets the object's local scale (scale relative to its parent). If this object has no parent, the scale would be absolute.
// scale should be a 3D vector (i.e. X, Y, and Z components).
func (node *Node) SetLocalScale(scale vector.Vector) {
node.scale[0] = scale[0]
node.scale[1] = scale[1]
node.scale[2] = scale[2]
node.dirtyTransform()
}
// WorldScale returns the object's absolute world scale as a 3D vector (i.e. X, Y, and Z components).
func (node *Node) WorldScale() vector.Vector {
_, scale, _ := node.Transform().Decompose()
return scale
}
// SetWorldScale sets the object's absolute world scale. scale should be a 3D vector (i.e. X, Y, and Z components).
func (node *Node) SetWorldScale(scale vector.Vector) {
if node.parent != nil {
parentTransform := node.parent.Transform()
_, parentScale, _ := parentTransform.Decompose()
node.scale = vector.Vector{
scale[0] / parentScale[0],
scale[1] / parentScale[1],
scale[2] / parentScale[2],
}
} else {
node.scale[0] = scale[0]
node.scale[1] = scale[1]
node.scale[2] = scale[2]
}
node.dirtyTransform()
}
// LocalRotation returns the object's local rotation Matrix4.
func (node *Node) LocalRotation() Matrix4 {
return node.rotation
}
// SetLocalRotation sets the object's local rotation Matrix4 (relative to any parent).
func (node *Node) SetLocalRotation(rotation Matrix4) {
node.rotation = rotation.Clone()
node.dirtyTransform()
}
// WorldRotation returns an absolute rotation Matrix4 representing the object's rotation.
func (node *Node) WorldRotation() Matrix4 {
_, _, rotation := node.Transform().Decompose()
return rotation
}
// SetWorldRotation sets an object's rotation to the provided rotation Matrix4.
func (node *Node) SetWorldRotation(rotation Matrix4) {
if node.parent != nil {
parentTransform := node.parent.Transform()
_, _, parentRot := parentTransform.Decompose()
node.rotation = parentRot.Transposed().Mult(rotation)
} else {
node.rotation = rotation.Clone()
}
node.dirtyTransform()
}
func (node *Node) Move(x, y, z float64) {
node.position[0] += x
node.position[1] += y
node.position[2] += z
node.dirtyTransform()
}
func (node *Node) MoveVec(vec vector.Vector) {
node.Move(vec[0], vec[1], vec[2])
}
func (node *Node) Rotate(x, y, z, angle float64) {
localRot := node.LocalRotation()
localRot = localRot.Rotated(x, y, z, angle)
node.SetLocalRotation(localRot)
}
// Grow scales the object additively (i.e. calling Node.Grow(1, 0, 0) will scale it +1 on the X-axis).
func (node *Node) Grow(x, y, z float64) {
scale := node.LocalScale()
scale[0] += x
scale[1] += y
scale[2] += z
node.SetLocalScale(scale)
}
// Parent returns the Node's parent. If the Node has no parent, this will return nil.
func (node *Node) Parent() INode {
return node.parent
}
// setParent sets the Node's parent.
func (node *Node) setParent(parent INode) {
node.parent = parent
}
// Scene looks for the Node's parents recursively to return what scene it exists in.
// If the node is not within a tree (i.e. unparented), this will return nil.
func (node *Node) Scene() *Scene {
root := node.Root()
if root != nil {
return root.(*Node).scene
}
return nil
}
// addChildren adds the children to the parent node, but sets their parent to be the parent node passed. This is done so children have the
// correct, specific Node as parent (because I can't really think of a better way to do this rn). Basically, without this approach,
// after parent.AddChildren(child), child.Parent() wouldn't be parent, but rather parent.Node, which is no good.
func (node *Node) addChildren(parent INode, children ...INode) {
for _, child := range children {
// child.updateLocalTransform(parent)
if child.Parent() != nil {
child.Parent().RemoveChildren(child)
}
child.setParent(parent)
node.children = append(node.children, child)
}
}
// AddChildren parents the provided children Nodes to the passed parent Node, inheriting its transformations and being under it in the scenegraph
// hierarchy. If the children are already parented to other Nodes, they are unparented before doing so.
func (node *Node) AddChildren(children ...INode) {
node.addChildren(node, children...)
}
// RemoveChildren removes the provided children from this object.
func (node *Node) RemoveChildren(children ...INode) {
for _, child := range children {
for i, c := range node.children {
if c == child {
// child.updateLocalTransform(nil)
child.setParent(nil)
node.children[i] = nil
node.children = append(node.children[:i], node.children[i+1:]...)
break
}
}
}
}
// Unparent unparents the Node from its parent, removing it from the scenegraph. Note that this needs to be overridden for objects that embed Node.
func (node *Node) Unparent() {
if node.parent != nil {
node.parent.RemoveChildren(node)
}
}
// Children() returns the Node's children.
func (node *Node) Children() NodeFilter {
return append(make(NodeFilter, 0, len(node.children)), node.children...)
}
func (node *Node) ChildrenRecursive() NodeFilter {
out := node.Children()
for _, child := range node.children {
out = append(out, child.ChildrenRecursive()...)
}
return out
}
// Visible returns whether the Object is visible.
func (node *Node) Visible() bool {
return node.visible
}
// SetVisible sets the object's visibility. If recursive is true, all recursive children of this Node will have their visibility set the same way.
func (node *Node) SetVisible(visible bool, recursive bool) {
node.visible = visible
if recursive {
for _, child := range node.ChildrenRecursive() {
child.SetVisible(visible, true)
}
}
}
// Tags represents an unordered set of string tags that can be used to identify this object.
func (node *Node) Tags() *Tags {
return node.tags
}
// HierarchyAsString returns a string displaying the hierarchy of this Node, and all recursive children.
// Nodes will have a "+" next to their name, Models an "M", and Cameras a "C".
// BoundingSpheres will have BS, BoundingAABB AABB, BoundingCapsule CAP, and BoundingTriangles TRI.
// Lights will have an L next to their name.
// This is a useful function to debug the layout of a node tree, for example.
func (node *Node) HierarchyAsString() string {
var printNode func(node INode, level int) string
printNode = func(node INode, level int) string {
prefix := "+"
nodeType := node.Type()
if nodeType.Is(NodeTypeModel) {
nodeType = "MODEL"
} else if nodeType.Is(NodeTypeLight) {
nodeType = "LIGHT"
} else if nodeType.Is(NodeTypeCamera) {
prefix = "CAM"
} else if nodeType.Is(NodeTypeBoundingSphere) {
prefix = "BS"
} else if nodeType.Is(NodeTypeBoundingAABB) {
prefix = "AABB"
} else if nodeType.Is(NodeTypeBoundingCapsule) {
prefix = "CAP"
} else if nodeType.Is(NodeTypeBoundingTriangles) {
prefix = "TRI"
} else if nodeType.Is(NodeTypePath) {
prefix = "CURVE"
}
str := ""
for i := 0; i < level; i++ {
str += " "
}
wp := node.LocalPosition()
wpStr := "[" + strconv.FormatFloat(wp[0], 'f', -1, 64) + ", " + strconv.FormatFloat(wp[1], 'f', -1, 64) + ", " + strconv.FormatFloat(wp[2], 'f', -1, 64) + "]"
str += "\\-: [" + prefix + "] " + node.Name() + " : " + wpStr + "\n"
for _, child := range node.Children() {
str += printNode(child, level+1)
}
return str
}
return printNode(node, 0)
}
// Get searches a node's hierarchy using a string to find a specified node. The path is in the format of names of nodes, separated by forward
// slashes ('/'), and is relative to the node you use to call Get. As an example of Get, if you had a cup parented to a desk, which was
// parented to a room, that was finally parented to the root of the scene, it would be found at "Room/Desk/Cup". Note also that you can use "../" to
// "go up one" in the hierarchy (so cup.Get("../") would return the Desk node).
// Since Get uses forward slashes as path separation, it would be good to avoid using forward slashes in your Node names. Also note that Get()
// trims the extra spaces from the beginning and end of Node Names, so avoid using spaces at the beginning or end of your Nodes' names.
func (node *Node) Get(path string) INode {
var search func(node INode) INode
split := []string{}
for _, s := range strings.Split(path, `/`) {
if len(strings.TrimSpace(s)) > 0 {
split = append(split, s)
}
}
search = func(node INode) INode {
if node == nil {
return nil
} else if len(split) == 0 {
return node
}
if split[0] == ".." {
split = split[1:]
return search(node.Parent())
}
for _, child := range node.Children() {
if child.Name() == split[0] {
if len(split) <= 1 {
return child
} else {
split = split[1:]
return search(child)
}
}
}
return nil
}
return search(node)
}
// Path returns a string indicating the hierarchical path to get this Node from the root. The path returned will be absolute, such that
// passing it to Get() called on the scene root node will return this node. The path returned will not contain the root node's name ("Root").
func (node *Node) Path() string {
root := node.Root()
if root == nil {
return ""
}
parent := node.Parent()
path := node.Name()
for parent != nil && parent != root {
path = parent.Name() + "/" + path
parent = parent.Parent()
}
return path
}
// Root returns the root node in this tree by recursively traversing this node's hierarchy of
// parents upwards.
func (node *Node) Root() INode {
if node.parent == nil {
return node
}
parent := node.Parent()
for parent != nil {
next := parent.Parent()
if next == nil {
break
}
parent = next
}
return parent
}
// IsBone returns if the Node is a "bone" (a node that was a part of an armature and so can play animations back to influence a skinned mesh).
func (node *Node) IsBone() bool {
return node.isBone
}
// // IsRootBone returns if the Node SHOULD be the root of an Armature (a Node that was the base of an armature).
// func (node *Node) IsRootBone() bool {
// return node.IsBone() && (node.parent == nil || !node.parent.IsBone())
// }
func (node *Node) AnimationPlayer() *AnimationPlayer {
return node.animationPlayer
} | node.go | 0.812049 | 0.663676 | node.go | starcoder |
package terminus
import (
"github.com/gdamore/tcell"
)
// IEntity is the interface through which custom
// implementations of Entity can be created
type IEntity interface {
Init()
Update(delta float64)
Draw()
SetScene(scene *Scene)
GetEntity() *Entity
}
// Entity represents a simple entity to be rendered
// to the game screen
type Entity struct {
scene *Scene
game *Game
x int
y int
sprite rune
colors []tcell.Color
}
// NewEntity takes an x position and a y position and
// creates an Entity
func NewEntity(x, y int) *Entity {
entity := &Entity{
x: x,
y: y,
}
return entity
}
// NewSpriteEntity takes an x position, a y position, and a rune
// to be used as a visual representation, and creates an Entity
// colors: optional - foreground, background required if used
func NewSpriteEntity(x, y int, sprite rune, colors ...tcell.Color) *Entity {
entity := &Entity{
x: x,
y: y,
sprite: sprite,
colors: colors,
}
return entity
}
// Init fires duting game.Init and can be overridden
func (entity *Entity) Init() {}
// Update fires after the scene update on each pass
// through the game loop, and can be overridden
func (entity *Entity) Update(delta float64) {}
// Draw fires during scene.Draw and can be overridden.
// Be careful, overridding this means that you will
// need to handle rendering on your own.
func (entity *Entity) Draw() {
screen := entity.game.screen
game := entity.game
currentScene := game.CurrentScene()
var style tcell.Style
if len(entity.colors) == 2 {
style = tcell.StyleDefault.
Foreground(entity.colors[0]).
Background(entity.colors[1])
} else {
style = currentScene.style
}
if 0 != entity.sprite {
screen.SetContent(entity.x, entity.y, entity.sprite, nil, style)
}
}
// GetEntity returns the entity in question
func (entity *Entity) GetEntity() *Entity {
return entity
}
// SetScene Sets the Entity's Scene and Game
func (entity *Entity) SetScene(scene *Scene) {
entity.game = scene.game
entity.scene = scene
}
// GetScene gets the Scene that the Entity is associated with
func (entity *Entity) GetScene() *Scene {
return entity.scene
}
// GetGame gets the Game the the Entity is associated with
func (entity *Entity) GetGame() *Game {
return entity.game
}
// GetX gets the current x position of the Entity
func (entity *Entity) GetX() int {
return entity.x
}
// GetY gets the current y position of the Entity
func (entity *Entity) GetY() int {
return entity.y
}
// SetPosition sets the entity's x and y position
// simultaneously
func (entity *Entity) SetPosition(x, y int) {
entity.x, entity.y = x, y
entity.scene.redraw = true
}
// GetPosition returns the entity's current x and y
// position
func (entity *Entity) GetPosition() (int, int) {
return entity.x, entity.y
}
// SetSprite sets the Entity's sprite rune
func (entity *Entity) SetSprite(sprite rune) {
entity.sprite = sprite
entity.scene.redraw = true
}
// GetSprite returns the rune that represents the Entity
func (entity *Entity) GetSprite() rune {
return entity.sprite
}
// SetColor changes the entity's style foreground and
// background colors
func (entity *Entity) SetColor(fg, bg tcell.Color) {
entity.colors = []tcell.Color{fg, bg}
entity.scene.redraw = true
}
// Overlaps checks if the entity overlaps the target
// entity
func (entity *Entity) Overlaps(target IEntity) bool {
return entity.x == target.GetEntity().x && entity.y == target.GetEntity().y
}
// OverlapsPoint checks if the entity overlaps the
// specified screen point
func (entity *Entity) OverlapsPoint(x, y int) bool {
return entity.x == x && entity.y == y
}
// CheckDir checks if the entity is the specified
// distance away from the target point
func (entity *Entity) CheckDir(axis rune, distance, point int) bool {
if axis == 'x' {
return (entity.x + distance) == point
} else if axis == 'y' {
return (entity.y + distance) == point
}
return false
}
// IsLeftOf checks if the entity is directly to the
// left of the target entity
func (entity *Entity) IsLeftOf(target IEntity) bool {
return entity.y == target.GetEntity().y && entity.CheckDir('x', 1, target.GetEntity().x)
}
// IsRightOf checks if the entity is directly to the
// right of the target entity
func (entity *Entity) IsRightOf(target IEntity) bool {
return entity.y == target.GetEntity().y && entity.CheckDir('x', -1, target.GetEntity().x)
}
// IsAbove checks if the entity is directly above
// the target entity
func (entity *Entity) IsAbove(target IEntity) bool {
return entity.x == target.GetEntity().x && entity.CheckDir('y', 1, target.GetEntity().y)
}
// IsBelow checks if the entity is directly below
// the target entity
func (entity *Entity) IsBelow(target IEntity) bool {
return entity.x == target.GetEntity().x && entity.CheckDir('y', -1, target.GetEntity().y)
} | entity.go | 0.873943 | 0.435661 | entity.go | starcoder |
package main
import "math"
type Point []float64
/**
* Struct que define um grupo de pontos
* groups: slice representando o conjunto de grupos. Cada grupo é representado por uma slice de inteiros positivos que são os ids dos pontos mapesdos em points. O primeiro ponto de cada grupo é o lider do grupo.
* points: mapa de pontos.
*/
type Groups struct {
groups [][]int
points map[int]Point
}
/**
* Método de Point que calcula a distância euclidiana entre dois pontos
* parâmetros: um ponto p2.
* retorno: a distância euclidiana entre p1 e p2.
* pré-condição: p1 e p2 devem ter o mesmo número de dimensões.
*/
func (p1 Point) Dist(p2 Point) float64 {
var sum, sub float64 = 0, 0
for i := 0; i < len(p1); i += 1 {
sub = (p1[i] - p2[i])
sum += sub * sub
}
return math.Sqrt(sum)
}
/**
* Função que monta os grupos segundo o algoritimo de agrupamento por líder
* parâmetros: a distância maxima entre um ponto e seu líder e u ponteiro para o mapa de pontos.
* retorno: um struct Groups contendo os grupos formados.
* condição: todos os pontos devem ter o mesmo número de dimensões.
* pós-condição: estruturas inalteradas.
*/
func makeGroups(dist float64, p *map[int]Point) *Groups {
g := Groups{points: *p} // inicializando g com o ponteiro para o mapa de pontos.
var lider bool // variável auxiliar usada para reconhecer novos líderes.
// Montando os grupos.
// Criando o primeiro grupo e adicionando o primeiro ponto como seu líder.
g.groups = append(g.groups, make([]int, 1))
g.groups[0][0] = 1
// Adicionando/criando os demais pontos/grupos.
for i := 2; i <= len(g.points); i += 1 { // para cada ponto i no mapa de pontos.
for j := 0; j < len(g.groups); j += 1 { // para cada grupo j em g.
p := g.groups[j][0] // posição do líder do grupo j no mapa.
lider = true
// Verificando se a distância do ponto i ao lider do grupo j é menor ou igual a dist. Caso verdadeiro, i é adicionado a j.
if g.points[i].Dist(g.points[p]) <= dist {
g.groups[j] = append(g.groups[j], i)
lider = false
break
}
}
// Caso i seja líder, um novo grupo será criado para i.
if lider {
g.groups = append(g.groups, make([]int, 1))
g.groups[len(g.groups)-1][0] = i
}
}
return &g
}
/**
* Método para calculo do centro de massa de um grupo
* parâmetros: a posição do grupo na lista de grupos.
* retorno: ponto do centro de massa do grupo.
* pós-condição: estruturas inalteradas.
*/
func (g Groups) centroMassa(pos int) Point {
c := make([]float64, len(g.points[1]))
// Inicializando c
for i := 0; i < len(c); i += 1 {
c[i] = 0
}
// Realizando o somatório de todos os pontos do grupo em c
for i := 0; i < len(g.groups[pos]); i += 1 {
p := g.groups[pos][i]
for j := 0; j < len(c); j += 1 {
c[j] += g.points[p][j]
}
}
// Dividindo cada coordenada de c pelo número de elementos no grupo
for i := 0; i < len(c); i += 1 {
c[i] /= float64(len(g.groups[pos]))
}
return c
}
/**
* Método para calculo da SSE de um agrupamento
* retorno: SSE do agrupamento (float64).
* pós-condição: estruturas inalteradas.
*/
func (g Groups) sse() float64 {
var sse, groupSum float64 // resultado da sse e auxiliar para o somatório de cada grupo.
sse = 0
for i := 0; i < len(g.groups); i += 1 { // para cada grupo i na lista de grupos.
cMassa := g.centroMassa(i)
groupSum = 0
for j := 0; j < len(g.groups[i]); j += 1 { // para cada elemento j do grupo i.
d := g.points[g.groups[i][j]].Dist(cMassa) // d = distância entre o ponto j e o centro de massa do grupo.
groupSum += d * d
}
sse += groupSum // SSE será a soma de todos os somatórios parciais.
}
return sse
} | LP_TRABALHO_1_Rafael_Belmock_Pedruzzi/point.go | 0.510985 | 0.462352 | point.go | starcoder |
package goptimization
import (
"fmt"
"math"
"github.com/pkg/errors"
"gonum.org/v1/gonum/mat"
)
// Simplex Solve a linear problem wihtout strict inequality constraints.
// Input follows standard form:
// Maximize z = Σ(1<=j<=n) c_j*x_j
// Constraints:
// 1<=i<=m, Σ(1<=j<=n) a_i_j*x_j <= b_i
// 1<=j<=n x_j >= 0
// - Define the canonical form of the problem (add slack variables and transfrom inequality constraints to equality constraints)
// - Check the basic solution is feasible, if not you need to run two phases simplex
// - Run iterations
// - Stop when the optimal solution is found or after maxIter
// Apply
// - First Danzig critera: for entering variable, pick the nonbasic variable with the largest reduced cost.
// - Bland's rule to avoid cycles : Choose the entering basic variable xj such that j is the smallest
// index with c¯j < 0. Also choose the leaving basic variable i with the smallest index (in case of ties in the ratio test)
func Simplex(c, A, b *mat.Dense, maxIter int) (int, *mat.Dense, float64, error) {
totalIter := 0
cf := CanonicalForm{}
err := cf.New(c, A, b)
if err != nil {
return 0, nil, 0, err
}
for i := 0; i < maxIter; i++ {
end, err := cf.Iter(0)
if err != nil {
return 0, nil, 0, err
}
if end {
break
}
totalIter++
}
results, score := cf.GetResults()
return totalIter, results, score, nil
}
// CanonicalForm Canonical form of a linear optimizattion problem
type CanonicalForm struct {
// Positivity constraints
n int
// Equality constraints
m int
// Matrix (m, n+m)
A *mat.Dense
// Column vector (n+m)
x *mat.Dense
// Row vector (n+m)
c *mat.Dense
// Column vector (m)
b *mat.Dense
//Feasible solutions in the current dictionary
// Column Vector (m)
xBStar *mat.Dense
// Column Vector (n)
xN *mat.Dense
// Matrix (m, m)
B *mat.Dense
// Matrix (m,n)
AN *mat.Dense
//Row Vector (m)
cB *mat.Dense
//Row Vector (n)
cN *mat.Dense
remap []int
}
//New Initialize all the parameters in order to run the simplex algorithm
func (cf *CanonicalForm) New(c, A, b *mat.Dense) error {
rows, cols := c.Dims()
if rows > 1 {
return errors.New("z dims.r > 1")
}
cf.n = cols
rows, cols = A.Dims()
if cols > cf.n {
return errors.New("A dims.c > z dims.r")
}
cf.m = rows
cf.A = A
cf.A = cf.A.Grow(0, cf.m).(*mat.Dense)
//Add slack variables
for i := 0; i < cf.m; i++ {
cf.A.Set(i, cf.n+i, 1.0)
}
cf.b = b
cf.c = c
cf.c = cf.c.Grow(0, cf.m).(*mat.Dense)
cf.x = mat.NewDense(cf.n+cf.m, 1, nil)
cf.xBStar = mat.DenseCopyOf(b)
cf.xN = cf.x.Slice(0, cf.n, 0, 1).(*mat.Dense)
cf.B = cf.A.Slice(0, cf.m, cf.n, cf.n+cf.m).(*mat.Dense)
cf.AN = cf.A.Slice(0, cf.m, 0, cf.n).(*mat.Dense)
cf.cB = cf.c.Slice(0, 1, cf.n, cf.n+cf.m).(*mat.Dense)
cf.cN = cf.c.Slice(0, 1, 0, cf.n).(*mat.Dense)
//Store the entring and leaving pairs for each iteration
cf.remap = make([]int, cf.n+cf.m)
for i := 0; i < cf.n+cf.m; i++ {
cf.remap[i] = i
}
return nil
}
//FindY Extract and solve a sub problem of the current dictionary
// The current dictionary is:
// (1) xB = xBStar - B^-1*AN*xN
// (2) z = zStar + (cN - cB*B^-1*AN)xN
// Set y=cB*B^-1 and solve it
func (cf *CanonicalForm) FindY() (*mat.Dense, error) {
var y, BInv mat.Dense
err := BInv.Inverse(cf.B)
if err != nil {
return nil, err
}
y.Mul(cf.cB, &BInv)
fmt.Printf("y:\n %v\n\n", mat.Formatted(&y, mat.Prefix(" "), mat.Excerpt(8)))
return &y, nil
}
//FindEnteringVariable Define the best entering varialbe following Danzig criteria and Bland's rule
// Find one column a^k of A not in B with y*a^k<c^k
// If there is no entering column, the current solution is optimal
func (cf *CanonicalForm) FindEnteringVariable(y *mat.Dense, forceEnteringVarIndex int) (int, error) {
var m mat.Dense
m.Mul(y, cf.AN)
m.Sub(cf.cN, &m)
_, c := m.Dims()
enteringVarIndex := -1
if forceEnteringVarIndex != 0 {
if m.At(0, forceEnteringVarIndex) > 0 {
enteringVarIndex = forceEnteringVarIndex
}
} else {
max := 0.0
for j := 0; j < c; j++ {
//First Danzig criteria and Bland's rule
if m.At(0, j) > 0 && m.At(0, j) > max {
max = m.At(0, j)
enteringVarIndex = j
}
}
}
fmt.Println("enteringVarIndex", enteringVarIndex)
return enteringVarIndex, nil
}
//SolveBd FindY describes the current dictionary.
// To find the best leaving variable we start from (1) and set d=B^-1*a^k
// When we solve it, we get the equation to maximize in order to find the leaving variable
func (cf *CanonicalForm) SolveBd(enteringVarIndex int) (*mat.Dense, error) {
var d mat.Dense
err := d.Solve(cf.B, cf.AN.ColView(enteringVarIndex))
if err != nil {
return nil, err
}
fmt.Printf("d:\n %v\n\n", mat.Formatted(&d, mat.Prefix(" "), mat.Excerpt(8)))
return &d, nil
}
// FindLeavingVariable Define what is the best leaving variable following Bland's rule
// Find the biggest x_kStar with x_BStar - x_kStar*d >= 0
// If d<=0, the algorithm ends and the problem is unbounded.
// Otherwise, the biggest x_kStar force one of the components of x_BStar - x_kStar*d to be equal to zero
// and defines the leaving variable
func (cf *CanonicalForm) FindLeavingVariable(d *mat.Dense) (float64, int, error) {
r, _ := d.Dims()
x := math.Inf(1)
leavingVarIndex := -1
found := false
for i := 0; i < r; i++ {
if d.At(i, 0) <= 0 {
continue
}
found = true
tmp := cf.xBStar.At(i, 0) / d.At(i, 0)
fmt.Println("xLeaving:", i, tmp)
//Bland's rule
if tmp < x {
x = tmp
leavingVarIndex = i
}
}
if !found {
return -1.0, -1, nil
}
fmt.Println("x", x)
fmt.Println("leavingVarIndex", leavingVarIndex)
return x, leavingVarIndex, nil
}
// Update Update the dictionary in order to run anotheriteration
// Replace the leaving variable with the entering variable in xBStar
// Replace the leaving column in the base B with the entering column
func (cf *CanonicalForm) Update(d, y *mat.Dense, x float64, enteringVarIndex, leavingVarIndex int) error {
var tmp mat.Dense
tmp.Scale(x, d)
cf.xBStar.Sub(cf.xBStar, &tmp)
cf.xBStar.Set(leavingVarIndex, 0, x)
fmt.Printf("xBStar:\n %v\n\n", mat.Formatted(cf.xBStar, mat.Prefix(" "), mat.Excerpt(8)))
r, _ := d.Dims()
leavingCol := mat.DenseCopyOf(cf.B.ColView(leavingVarIndex))
leavingC := cf.cB.At(0, leavingVarIndex)
for i := 0; i < r; i++ {
cf.B.Set(i, leavingVarIndex, cf.AN.At(i, enteringVarIndex))
cf.AN.Set(i, enteringVarIndex, leavingCol.At(i, 0))
}
cf.cB.Set(0, leavingVarIndex, cf.cN.At(0, enteringVarIndex))
cf.cN.Set(0, enteringVarIndex, leavingC)
fmt.Printf("A:\n %v\n\n", mat.Formatted(cf.A, mat.Prefix(" "), mat.Excerpt(8)))
fmt.Printf("cB:\n %v\n\n", mat.Formatted(cf.cB, mat.Prefix(" "), mat.Excerpt(8)))
return nil
}
//Iter Run one iteration of the simplex algorithm
func (cf *CanonicalForm) Iter(forceEnteringVarIndex int) (bool, error) {
//Solve yB=c_B
y, err := cf.FindY()
if err != nil {
return false, err
}
//Find a entering column/variable
enteringVarIndex, err := cf.FindEnteringVariable(y, forceEnteringVarIndex)
if err != nil {
return false, err
}
// The algorithm ends when there is no candidates
if enteringVarIndex == -1 {
return true, nil
}
//Solve Bd=a^k
d, err := cf.SolveBd(enteringVarIndex)
if err != nil {
return false, err
}
// Find the leaving column/variable
x, leavingVarIndex, err := cf.FindLeavingVariable(d)
if err != nil {
return false, err
}
// The algorithm ends when there is no candidates
if leavingVarIndex == -1 {
return true, nil
}
//Store the new pair of entering/leaving variables
tmp := cf.remap[cf.n+leavingVarIndex]
cf.remap[cf.n+leavingVarIndex] = enteringVarIndex
cf.remap[enteringVarIndex] = tmp
// Update the dictionary for the next iteration
err = cf.Update(d, y, x, enteringVarIndex, leavingVarIndex)
if err != nil {
return false, err
}
return false, nil
}
// GetResults Build the solution.
// It returns a matrix (n+m,1), the first n components are the best value for the problem and the others are the "leftover" for each constraint.
// Also returns the maximum score.
func (cf *CanonicalForm) GetResults() (*mat.Dense, float64) {
total := float64(0)
rows, cols := cf.x.Dims()
result := mat.NewDense(rows, cols, nil)
result.Zero()
for i := cf.n; i < cf.n+cf.m; i++ {
result.Set(cf.remap[i], 0, cf.xBStar.At(i-cf.n, 0))
if cf.remap[i] < cf.n {
total += cf.xBStar.At(i-cf.n, 0) * cf.c.At(0, i)
}
}
fmt.Printf("result:\n %v\n\n", mat.Formatted(result, mat.Prefix(" "), mat.Excerpt(8)))
fmt.Println("Score:", total)
return result, total
} | simplex.go | 0.643441 | 0.472562 | simplex.go | starcoder |
package iso8583
import (
"fmt"
"reflect"
"github.com/moov-io/iso8583/field"
)
type Message struct {
fields map[int]field.Field
spec *MessageSpec
data interface{}
fieldsMap map[int]struct{}
bitmap *field.Bitmap
}
func NewMessage(spec *MessageSpec) *Message {
fields := spec.CreateMessageFields()
return &Message{
fields: fields,
spec: spec,
fieldsMap: map[int]struct{}{},
}
}
func (m *Message) Data() interface{} {
return m.data
}
func (m *Message) SetData(data interface{}) error {
m.data = data
if m.data == nil {
return nil
}
// get the struct
str := reflect.ValueOf(data).Elem()
if reflect.TypeOf(str).Kind() != reflect.Struct {
return fmt.Errorf("failed to set data as struct is expected, got: %s", reflect.TypeOf(str).Kind())
}
for i, fl := range m.fields {
fieldName := fmt.Sprintf("F%d", i)
// get the struct field
dataField := str.FieldByName(fieldName)
if dataField == (reflect.Value{}) || dataField.IsNil() {
continue
}
if dataField.Type() != reflect.TypeOf(fl) {
return fmt.Errorf("failed to set data: type of the field %d: %v does not match the type in the spec: %v", i, dataField.Type(), reflect.TypeOf(fl))
}
// set data field spec for the message spec field
specField := m.fields[i]
df := dataField.Interface().(field.Field)
df.SetSpec(specField.Spec())
// use data field as a message field
m.fields[i] = df
m.fieldsMap[i] = struct{}{}
}
return nil
}
func (m *Message) Bitmap() *field.Bitmap {
if m.bitmap != nil {
return m.bitmap
}
m.bitmap = m.fields[1].(*field.Bitmap)
m.fieldsMap[1] = struct{}{}
return m.bitmap
}
func (m *Message) MTI(val string) {
m.fieldsMap[0] = struct{}{}
m.fields[0].SetBytes([]byte(val))
}
func (m *Message) Field(id int, val string) {
m.fieldsMap[id] = struct{}{}
m.fields[id].SetBytes([]byte(val))
}
func (m *Message) BinaryField(id int, val []byte) {
m.fieldsMap[id] = struct{}{}
m.fields[id].SetBytes(val)
}
func (m *Message) GetMTI() string {
// check index
return m.fields[0].String()
}
func (m *Message) GetString(id int) string {
if _, ok := m.fieldsMap[id]; ok {
return m.fields[id].String()
}
return ""
}
func (m *Message) GetBytes(id int) []byte {
if _, ok := m.fieldsMap[id]; ok {
return m.fields[id].Bytes()
}
return nil
}
func (m *Message) Pack() ([]byte, error) {
packed := []byte{}
m.Bitmap().Reset()
// build the bitmap
maxId := 0
for id := range m.fieldsMap {
if id > maxId {
maxId = id
}
// indexes 0 and 1 are for mti and bitmap
// regular field number startd from index 2
if id < 2 {
continue
}
m.Bitmap().Set(id)
}
// pack fields
for i := 0; i <= maxId; i++ {
if _, ok := m.fieldsMap[i]; ok {
field, ok := m.fields[i]
if !ok {
return nil, fmt.Errorf("failed to pack field %d: no specification found", i)
}
packedField, err := field.Pack()
if err != nil {
return nil, fmt.Errorf("failed to pack field %d (%s): %v", i, field.Spec().Description, err)
}
packed = append(packed, packedField...)
}
}
return packed, nil
}
func (m *Message) Unpack(src []byte) error {
var off int
m.fieldsMap = map[int]struct{}{}
m.Bitmap().Reset()
// unpack MTI
read, err := m.fields[0].Unpack(src)
if err != nil {
return err
}
off = read
// unpack Bitmap
read, err = m.fields[1].Unpack(src[off:])
if err != nil {
return err
}
off += read
for i := 2; i <= m.Bitmap().Len(); i++ {
if m.Bitmap().IsSet(i) {
field, ok := m.fields[i]
if !ok {
return fmt.Errorf("failed to unpack field %d: no specification found", i)
}
m.fieldsMap[i] = struct{}{}
read, err = field.Unpack(src[off:])
if err != nil {
return fmt.Errorf("failed to unpack field %d (%s): %v", i, field.Spec().Description, err)
}
err = m.linkDataFieldWithMessageField(i, field)
if err != nil {
return fmt.Errorf("failed to unpack field %d: %v", i, err)
}
off += read
}
}
return nil
}
func (m *Message) linkDataFieldWithMessageField(i int, fl field.Field) error {
if m.data == nil {
return nil
}
// get the struct
str := reflect.ValueOf(m.data).Elem()
fieldName := fmt.Sprintf("F%d", i)
// get the struct field
dataField := str.FieldByName(fieldName)
if dataField == (reflect.Value{}) {
return nil
}
if dataField.Type() != reflect.TypeOf(fl) {
return fmt.Errorf("field type: %v does not match the type in the spec: %v", dataField.Type(), reflect.TypeOf(fl))
}
dataField.Addr().Elem().Set(reflect.ValueOf(fl))
return nil
} | message.go | 0.650134 | 0.403978 | message.go | starcoder |
package nft
import (
"fmt"
"github.com/iov-one/weave"
"github.com/iov-one/weave/errors"
)
const UnlimitedCount = -1
type ApprovalMeta []Approval
type Approvals map[Action]ApprovalMeta
func (m ActionApprovals) Clone() ActionApprovals {
return m
}
func (m Approval) Clone() Approval {
return m
}
func (m ApprovalMeta) Clone() ApprovalMeta {
return m
}
func (m ApprovalMeta) Validate() error {
for _, v := range m {
if err := v.Validate(); err != nil {
return err
}
}
return nil
}
func (m Approval) Validate() error {
if err := m.Options.Validate(); err != nil {
return err
}
if err := m.AsAddress().Validate(); err != nil {
return err
}
return m.Options.Validate()
}
func (a Approval) AsAddress() weave.Address {
return weave.Address(a.Address)
}
func (a Approval) Equals(o Approval) bool {
return a.AsAddress().Equals(o.AsAddress()) &&
a.Options.Equals(o.Options)
}
func (a ApprovalOptions) Equals(o ApprovalOptions) bool {
return a.Immutable == o.Immutable && a.Count == o.Count && a.UntilBlockHeight == o.UntilBlockHeight
}
func (a ApprovalOptions) EqualsAfterUse(used ApprovalOptions) bool {
if a.Count == UnlimitedCount || a.Immutable {
return a.Equals(used)
}
return a.Count == used.Count+1 &&
a.Immutable == used.Immutable &&
a.UntilBlockHeight == used.UntilBlockHeight
}
func (a ApprovalOptions) Validate() error {
if a.Count == 0 || a.Count < UnlimitedCount {
return errors.ErrInternal.New("Approval count should either be unlimited or above zero")
}
return nil
}
//This requires all the model-specific actions to be passed here
//TODO: Not sure I'm a fan of array of maps, but it makes sense
//given we validate using protobuf enum value maps
func (m Approvals) Validate(actionMaps ...map[Action]int32) error {
for action, meta := range m {
if err := meta.Validate(); err != nil {
return err
}
if !isValidAction(action) {
return errors.ErrInternal.New(fmt.Sprintf("illegal action: %s", action))
}
for _, actionMap := range actionMaps {
if _, ok := actionMap[action]; ok {
return errors.ErrInternal.New(fmt.Sprintf("illegal action: %s", action))
}
}
}
return nil
}
func (m Approvals) FilterExpired(blockHeight int64) Approvals {
res := make(map[Action]ApprovalMeta, 0)
for action, approvals := range m {
for _, approval := range approvals {
if approval.Options.UntilBlockHeight > 0 && approval.Options.UntilBlockHeight < blockHeight {
continue
}
if approval.Options.Count == 0 {
continue
}
if _, ok := res[action]; !ok {
res[action] = make([]Approval, 0)
}
res[action] = append(res[action], approval)
}
}
return res
}
func (m Approvals) AsPersistable() []ActionApprovals {
r := make([]ActionApprovals, 0)
for k, v := range m {
r = append(r, ActionApprovals{Action: k, Approvals: v})
}
return r
}
func (m Approvals) IsEmpty() bool {
return len(m) == 0
}
func (m Approvals) MetaByAction(action Action) ApprovalMeta {
return m[action]
}
func (m Approvals) ForAction(action Action) Approvals {
res := make(map[Action]ApprovalMeta, 0)
res[action] = m.MetaByAction(action)
return res
}
func (m Approvals) ForAddress(addr weave.Address) Approvals {
res := make(map[Action]ApprovalMeta, 0)
for k, v := range m {
r := make([]Approval, 0)
for _, vv := range v {
if vv.AsAddress().Equals(addr) {
r = append(r, vv)
}
}
if len(r) > 0 {
res[k] = r
}
}
return res
}
func (m Approvals) Filter(obsolete Approvals) Approvals {
res := make(map[Action]ApprovalMeta, 0)
ApprovalsLoop:
for action, approvals := range m {
obsoleteApprovals := obsolete[action]
for _, approval := range approvals {
for _, obsoleteApproval := range obsoleteApprovals {
if approval.Equals(obsoleteApproval) {
continue ApprovalsLoop
}
}
res[action] = append(res[action], approval)
}
}
return res
}
func (m Approvals) Add(action Action, approval Approval) Approvals {
m[action] = append(m[action], approval)
return m
}
func (m Approvals) UseCount() Approvals {
res := make(map[Action]ApprovalMeta, 0)
for action, approvals := range m {
for _, approval := range approvals {
if approval.Options.Count == 0 {
continue
}
if _, ok := res[action]; !ok {
res[action] = make([]Approval, 0)
}
if !approval.Options.Immutable {
approval.Options.Count--
}
res[action] = append(res[action], approval)
}
}
return res
}
func (m Approvals) MergeUsed(used Approvals) Approvals {
for action, aUsed := range used {
found := false
aDest := m[action]
for _, u := range aUsed {
for idx, dest := range aDest {
if u.AsAddress().Equals(dest.AsAddress()) &&
dest.Options.EqualsAfterUse(u.Options) {
aDest[idx] = u
found = true
break
}
}
if !found {
m[action] = append(m[action])
}
}
}
return m
}
func (m Approvals) Intersect(others Approvals) Approvals {
res := make(map[Action]ApprovalMeta, 0)
for action, approvals := range others {
mApprovals := m[action]
for _, src := range approvals {
for _, dest := range mApprovals {
if dest.Equals(src) {
if _, ok := res[action]; !ok {
res[action] = make([]Approval, 0)
}
res[action] = append(res[action], dest)
}
}
}
}
return res
} | x/nft/approvals.go | 0.519765 | 0.427516 | approvals.go | starcoder |
package pipeline
import (
"errors"
"fmt"
)
type (
// Pipeline holds and runs intermediate actions, called "steps".
Pipeline struct {
steps []Step
context Context
beforeHooks []Listener
finalizer ResultHandler
options options
}
// Result is the object that is returned after each step and after running a pipeline.
Result struct {
// Err contains the step's returned error, nil otherwise.
// In an aborted pipeline with ErrAbort it will still be nil.
Err error
// Name is an optional identifier for a result.
// ActionFunc may set this property before returning to help a ResultHandler with further processing.
Name string
aborted bool
}
// Step is an intermediary action and part of a Pipeline.
Step struct {
// Name describes the step's human-readable name.
// It has no other uses other than easily identifying a step for debugging or logging.
Name string
// F is the ActionFunc assigned to a pipeline Step.
// This is required.
F ActionFunc
// H is the ResultHandler assigned to a pipeline Step.
// This is optional, and it will be called in any case if it is set after F completed.
// Use cases could be logging, updating a GUI or handle errors while continuing the pipeline.
// The function may return nil even if the Result contains an error, in which case the pipeline will continue.
// This function is called before the next step's F is invoked.
H ResultHandler
}
// Context contains arbitrary data relevant for the pipeline execution.
Context interface{}
// Listener is a simple func that listens to Pipeline events.
Listener func(step Step)
// ActionFunc is the func that contains your business logic.
// The context is a user-defined arbitrary data of type interface{} that gets provided in every Step, but may be nil if not set.
ActionFunc func(ctx Context) Result
// ResultHandler is a func that gets called when a step's ActionFunc has finished with any Result.
// Context may be nil.
ResultHandler func(ctx Context, result Result) error
)
// NewPipeline returns a new quiet Pipeline instance with KeyValueContext.
func NewPipeline() *Pipeline {
return &Pipeline{}
}
// NewPipelineWithContext returns a new Pipeline instance with the given context.
func NewPipelineWithContext(ctx Context) *Pipeline {
return &Pipeline{context: ctx}
}
// WithBeforeHooks takes a list of listeners.
// Each Listener.Accept is called once in the given order just before the ActionFunc is invoked.
// The listeners should return as fast as possible, as they are not intended to do actual business logic.
func (p *Pipeline) WithBeforeHooks(listeners []Listener) *Pipeline {
p.beforeHooks = listeners
return p
}
// AddBeforeHook adds the given listener to the list of hooks.
// See WithBeforeHooks.
func (p *Pipeline) AddBeforeHook(listener Listener) *Pipeline {
return p.WithBeforeHooks(append(p.beforeHooks, listener))
}
// AddStep appends the given step to the Pipeline at the end and returns itself.
func (p *Pipeline) AddStep(step Step) *Pipeline {
p.steps = append(p.steps, step)
return p
}
// WithSteps appends the given arrway of steps to the Pipeline at the end and returns itself.
func (p *Pipeline) WithSteps(steps ...Step) *Pipeline {
p.steps = steps
return p
}
// WithNestedSteps is similar to AsNestedStep, but it accepts the steps given directly as parameters.
func (p *Pipeline) WithNestedSteps(name string, steps ...Step) Step {
return NewStep(name, func(_ Context) Result {
nested := &Pipeline{beforeHooks: p.beforeHooks, steps: steps, context: p.context, options: p.options}
return nested.Run()
})
}
// AsNestedStep converts the Pipeline instance into a Step that can be used in other pipelines.
// The properties are passed to the nested pipeline.
func (p *Pipeline) AsNestedStep(name string) Step {
return NewStep(name, func(_ Context) Result {
nested := &Pipeline{beforeHooks: p.beforeHooks, steps: p.steps, context: p.context, options: p.options}
return nested.Run()
})
}
// WithContext returns itself while setting the context for the pipeline steps.
func (p *Pipeline) WithContext(ctx Context) *Pipeline {
p.context = ctx
return p
}
// WithFinalizer returns itself while setting the finalizer for the pipeline.
// The finalizer is a handler that gets called after the last step is in the pipeline is completed.
// If a pipeline aborts early then it is also called.
func (p *Pipeline) WithFinalizer(handler ResultHandler) *Pipeline {
p.finalizer = handler
return p
}
// Run executes the pipeline and returns the result.
// Steps are executed sequentially as they were added to the Pipeline.
// If a Step returns a Result with a non-nil error, the Pipeline is aborted and its Result contains the affected step's error.
// However, if Result.Err is wrapped in ErrAbort, then the pipeline is aborted, but the final Result.Err will be nil.
func (p *Pipeline) Run() Result {
result := p.doRun()
if p.finalizer != nil {
result.Err = p.finalizer(p.context, result)
}
return result
}
func (p *Pipeline) doRun() Result {
for _, step := range p.steps {
for _, hooks := range p.beforeHooks {
hooks(step)
}
result := step.F(p.context)
var err error
if step.H != nil {
err = step.H(p.context, result)
} else {
err = result.Err
}
if err != nil {
if errors.Is(err, ErrAbort) {
// Abort pipeline without error
return Result{aborted: true}
}
if p.options.disableErrorWrapping {
return Result{Err: err}
}
return Result{Err: fmt.Errorf("step '%s' failed: %w", step.Name, err)}
}
}
return Result{}
} | pipeline.go | 0.706494 | 0.438605 | pipeline.go | starcoder |
package main
import (
"errors"
"math/bits"
"sort"
"github.com/golang-collections/collections/queue"
"github.com/golang-collections/go-datastructures/bitarray"
"github.com/hillbig/rsdic"
)
func prevPowerOf2(n uint) int {
var u uint = 0
for n > 0 {
n >>= 1
u++
}
return 1 << (u - 1)
}
func nextPowerOf2(n uint) int {
if bits.OnesCount(n) == 1 {
return int(n)
}
var u uint = 0
for n > 0 {
n >>= 1
u++
}
return 1 << u
}
/*
The work for these structures comes from the work of
Brisaboa et al. Some of the paper titles are listed:
"k2-trees for Compact Web Graph Representation"
We can build a k-squared tree from adjacency lists by
recursive descent using the theoretical structure below.
Quadrant Ordering
_____________
| | |
| 0 | 1 |
| | |
-------------
| | |
| 2 | 3 |
| | |
-------------
We are input a list of adjacency lists that represent a graph.
For each edge in the graph, we build a path in the k2-tree.
Starting from the root, we insert k2-tree nodes based on
the position of the edge in the graph's adjacency matrix.
For example, if the edge in question lies in quadrant 2 of
the adjacency matrix, we insert a k2-tree node into the
children list for the root node if it doesn't exist already.
Continue recursively into the found quadrant until the search
space is one cell of the adjacency matrix.
*/
// K2Tree represents a k-squared tree
type K2Tree interface {
GetChild(x int, c int) (bool, error)
}
type k2Tree struct {
tree bitarray.BitArray
leaves bitarray.BitArray
lenTree int
lenLeaves int
rank *rsdic.RSDic
}
type k2TreeNode struct {
children []*k2TreeNode
value bool
level int
}
// GetChild gets the cth child of node at pos x in tree
func (kt *k2Tree) GetChild(x int, c int) (bool, error) {
n, err := kt.tree.GetBit(uint64(x))
if err != nil {
return false, err
}
if !n {
return false, errors.New("Bit at pos x is not set")
}
pos := (int(kt.rank.Rank(uint64(x), true))+1)*4 + c
if pos < kt.lenTree {
return kt.tree.GetBit(uint64(pos))
}
return kt.leaves.GetBit(uint64(pos - kt.lenTree))
}
func addK2TreeNode(root *k2TreeNode, row int, col int, n int) {
var path []int
k := 2
for n/k >= 1 {
blockSize := n / k
if row < blockSize && col < blockSize {
// In quadrant 0
path = append(path, 0)
} else if row < blockSize && col >= blockSize {
// In quadrant 1
path = append(path, 1)
col -= blockSize
} else if row >= blockSize && col < blockSize {
// In quadrant 2
path = append(path, 2)
row -= blockSize
} else {
// In quadrant 3
path = append(path, 3)
row -= blockSize
col -= blockSize
}
k *= 2
}
for _, v := range path {
root.value = true
if root.children == nil {
root.children = make([]*k2TreeNode, 4)
for i := range root.children {
root.children[i] = &k2TreeNode{value: false, level: root.level + 1}
}
}
root.children[v].value = true
root = root.children[v]
}
}
func newK2Tree(graph [][]int) *k2Tree {
nNodes := len(graph)
root := k2TreeNode{value: true, level: 0}
cursors := make([]int, nNodes)
for _, row := range graph {
sort.Ints(row)
}
for i := 0; i < nNodes; i++ {
nEdges := len(graph[i])
for j := 0; j < nNodes; j++ {
if cursors[i] < nEdges {
if graph[i][cursors[i]] == j {
addK2TreeNode(&root, i, j, nextPowerOf2(uint(nNodes)))
cursors[i]++
}
}
}
}
maxLevel := 0
qu := queue.New()
qu.Enqueue(&root)
for qu.Len() > 0 {
var node *k2TreeNode = qu.Dequeue().(*k2TreeNode)
if node.level > maxLevel {
maxLevel = node.level
}
// fmt.Print(node.level, " ", node.value, " ")
for _, child := range node.children {
qu.Enqueue(child)
}
}
// fmt.Println()
var tree []bool
var leaves []bool
var rank *rsdic.RSDic = rsdic.New()
qu.Enqueue(&root)
for qu.Len() > 0 {
var node *k2TreeNode = qu.Dequeue().(*k2TreeNode)
if node.level != maxLevel && node.level != 0 {
tree = append(tree, node.value)
rank.PushBack(node.value)
} else if node.level == maxLevel {
leaves = append(leaves, node.value)
}
for _, child := range node.children {
qu.Enqueue(child)
}
}
// fmt.Println(tree)
// fmt.Println(leaves)
ktree := &k2Tree{
tree: bitarray.NewBitArray(uint64(len(tree))),
leaves: bitarray.NewBitArray(uint64(len(leaves))),
lenTree: len(tree),
lenLeaves: len(leaves),
rank: rank,
}
for i, v := range tree {
if v {
ktree.tree.SetBit(uint64(i))
}
}
for i, v := range leaves {
if v {
ktree.leaves.SetBit(uint64(i))
}
}
return ktree
}
// NewK2Tree creates a new K2Tree
func NewK2Tree(graph [][]int) K2Tree {
return newK2Tree(graph)
} | k2tree.go | 0.636127 | 0.478529 | k2tree.go | starcoder |
package slice
// NewSliceSliceInt creates slice length n
func NewSliceSliceInt(n, m int) SliceSliceInt {
newSlice := make([]SliceInt, n)
for i := range newSlice {
newSlice[i] = NewSliceInt(m)
}
return newSlice
}
// Copy makes a new independent copy of slice
func (slice SliceSliceInt) Copy() SliceSliceInt {
newSlice := make([]SliceInt, len(slice))
for i := range newSlice {
newSlice[i] = slice[i].Copy()
}
return newSlice
}
// String is for print
func (slice SliceSliceInt) String() string {
return slice.Print("\n")
}
// SpiralIterator returns []Coordinate in spiral order
func (slice SliceSliceInt) SpiralIterator() []Coordinate {
data := make([]Coordinate, len(slice)*len(slice[0]))
return spiralTopRight(data, 0, 0, len(slice)-1, len(slice[0])-1)
}
// NewSliceSliceFloat64 creates slice length n
func NewSliceSliceFloat64(n, m int) SliceSliceFloat64 {
newSlice := make([]SliceFloat64, n)
for i := range newSlice {
newSlice[i] = NewSliceFloat64(m)
}
return newSlice
}
// Copy makes a new independent copy of slice
func (slice SliceSliceFloat64) Copy() SliceSliceFloat64 {
newSlice := make([]SliceFloat64, len(slice))
for i := range newSlice {
newSlice[i] = slice[i].Copy()
}
return newSlice
}
// String is for print
func (slice SliceSliceFloat64) String() string {
return slice.Print("\n")
}
// SpiralIterator returns []Coordinate in spiral order
func (slice SliceSliceFloat64) SpiralIterator() []Coordinate {
data := make([]Coordinate, len(slice)*len(slice[0]))
return spiralTopRight(data, 0, 0, len(slice)-1, len(slice[0])-1)
}
// NewSliceSliceString creates slice length n
func NewSliceSliceString(n, m int) SliceSliceString {
newSlice := make([]SliceString, n)
for i := range newSlice {
newSlice[i] = NewSliceString(m)
}
return newSlice
}
// Copy makes a new independent copy of slice
func (slice SliceSliceString) Copy() SliceSliceString {
newSlice := make([]SliceString, len(slice))
for i := range newSlice {
newSlice[i] = slice[i].Copy()
}
return newSlice
}
// String is for print
func (slice SliceSliceString) String() string {
return slice.Print("\n")
}
// SpiralIterator returns []Coordinate in spiral order
func (slice SliceSliceString) SpiralIterator() []Coordinate {
data := make([]Coordinate, len(slice)*len(slice[0]))
return spiralTopRight(data, 0, 0, len(slice)-1, len(slice[0])-1)
}
// NewSliceSliceByte creates slice length n
func NewSliceSliceByte(n, m int) SliceSliceByte {
newSlice := make([]SliceByte, n)
for i := range newSlice {
newSlice[i] = NewSliceByte(m)
}
return newSlice
}
// Copy makes a new independent copy of slice
func (slice SliceSliceByte) Copy() SliceSliceByte {
newSlice := make([]SliceByte, len(slice))
for i := range newSlice {
newSlice[i] = slice[i].Copy()
}
return newSlice
}
// String is for print
func (slice SliceSliceByte) String() string {
return slice.Print("\n")
}
// SpiralIterator returns []Coordinate in spiral order
func (slice SliceSliceByte) SpiralIterator() []Coordinate {
data := make([]Coordinate, len(slice)*len(slice[0]))
return spiralTopRight(data, 0, 0, len(slice)-1, len(slice[0])-1)
}
// NewSliceSliceBool creates slice length n
func NewSliceSliceBool(n, m int) SliceSliceBool {
newSlice := make([]SliceBool, n)
for i := range newSlice {
newSlice[i] = NewSliceBool(m)
}
return newSlice
}
// Copy makes a new independent copy of slice
func (slice SliceSliceBool) Copy() SliceSliceBool {
newSlice := make([]SliceBool, len(slice))
for i := range newSlice {
newSlice[i] = slice[i].Copy()
}
return newSlice
}
// String is for print
func (slice SliceSliceBool) String() string {
return slice.Print("\n")
}
// SpiralIterator returns []Coordinate in spiral order
func (slice SliceSliceBool) SpiralIterator() []Coordinate {
data := make([]Coordinate, len(slice)*len(slice[0]))
return spiralTopRight(data, 0, 0, len(slice)-1, len(slice[0])-1)
} | datastructures/slice/gen-slice-2d.go | 0.777891 | 0.496277 | gen-slice-2d.go | starcoder |
package tcp
// Header represent a TCP packet, Due performance impact, don't use this type and its methods.
type Header struct {
// Indicate source port number of TCP packet as stream identifier
SourcePort uint16
// Indicate destination port of TCP packet as protocol identifier
DestinationPort uint16
// Represents the TCP segment’s window index, mark the ordering of a group of messages.
// When handshaking, this contains the Initial Sequence Number (ISN).
SequenceNumber uint32
// Represents the window’s index of the next byte the sender expects to receive.
// After the handshake, the ACK field must always be populated.
AckNumber uint32
// Indicate the length of the header or offset of the data.
// It encode||decode as 4-bit **words** means 40 bytes encode as 10 words.
// The minimum size header is 5 words and the maximum is 15 words thus giving
// the minimum size of 20 bytes and maximum of 60 bytes,
// allowing for up to 40 bytes of options in the header.
DataOffset uint8
Flags Flags
// The Window Size field is used to advertise the window size.
// In other words, this is the number of bytes the receiver is willing to accept.
// Since it is a 16-bit field, the maximum window size is 65,535 bytes.
// Window specifies the number of window size units[c] that the sender of this
// segment is currently willing to receive.
Window uint16
// Used to verify the integrity of the TCP segment.
// Generated by the protocol sender as a mathematical technique
// to help the receiver detect messages that are corrupted or tampered with.
// If TCP carry by IP, the algorithm is the same as for the Internet Protocol(IP),
// but the input segment also contains the TCP data and also a pseudo-header from the IP datagram.
Checksum uint16
// The Urgent Pointer is used when the U-flag is set. The pointer indicates the position of the urgent data in the stream.
// It is often set to zero and ignored, but in conjunction with one of the control flags,
// it can be used as a data offset to mark a subset of a message as requiring priority processing.
UrgentPointer uint16
// After the header, several options (0 to 40 bytes) can be provided. An example of these options is:
// - The Maximum Segment Size (MSS), where the sender informs the other side of the maximum size of the segments.
// - Special acknowledgment
// - Window scaling algorithms
Options []Option
Padding []byte
// After the possible options, the actual data follows. The data, however, is not required.
// For example, the handshake is accomplished with only TCP header
Payload []byte
} | tcp/header.go | 0.689619 | 0.410727 | header.go | starcoder |
package spf
import (
"strings"
"unicode/utf8"
)
// lexer represents lexing structure
type lexer struct {
start int
pos int
prev int
length int
input string
}
// lex reads SPF record and returns list of Tokens along with
// their modifiers and values. Parser should parse the Tokens and execute
// relevant actions
func lex(input string) []*token {
var tokens []*token
l := &lexer{0, 0, 0, len(input), input}
for {
token := l.scan()
if token.mechanism == tEOF {
break
}
tokens = append(tokens, token)
}
return tokens
}
// scan scans input and returns a Token structure
func (l *lexer) scan() *token {
for {
r, eof := l.next()
if eof {
return &token{tEOF, tEOF, ""}
} else if isWhitespace(r) || l.eof() { // we just scanned some meaningful data
token := l.scanIdent()
l.scanWhitespaces()
l.moveon()
return token
}
}
}
// Lexer.eof() return true when scanned record has ended, false otherwise
func (l *lexer) eof() bool { return l.pos >= l.length }
// Lexer.next() returns next read rune and boolean indicator whether scanned
// record has ended. Method also moves `pos` value to size (length of read rune),
// and `prev` to previous `pos` location.
func (l *lexer) next() (rune, bool) {
if l.eof() {
return 0, true
}
r, size := utf8.DecodeRuneInString(l.input[l.pos:])
// TODO(zaccone): check for operation success/failure
l.prev = l.pos
l.pos += size
return r, false
}
// Lexer.moveon() sets Lexer.start to Lexer.pos. This is usually done once the
// ident has been scanned.
func (l *lexer) moveon() { l.start = l.pos }
// Lexer.back() moves back current Lexer.pos to a previous position.
func (l *lexer) back() { l.pos = l.prev }
// scanWhitespaces moves position to a first rune which is not a
// whitespace or tab
func (l *lexer) scanWhitespaces() {
for {
if ch, eof := l.next(); eof {
return
} else if !isWhitespace(ch) {
l.back()
return
}
}
}
// scanIdent is a Lexer method executed after an ident was found.
// It operates on a slice with constraints [l.start:l.pos).
// A cursor tries to find delimiters and set proper `mechanism`, `qualifier`
// and value itself.
// The default token has `mechanism` set to tErr, that is, error state.
func (l *lexer) scanIdent() *token {
t := &token{tErr, qPlus, ""}
start := l.start
cursor := l.start
hasQualifier := false
loop:
for cursor < l.pos {
ch, size := utf8.DecodeRuneInString(l.input[cursor:])
cursor += size
switch ch {
case '+', '-', '~', '?':
if hasQualifier {
t.qualifier = qErr // multiple qualifiers
} else {
t.qualifier, _ = qualifiers[ch]
hasQualifier = true
}
l.start = cursor
continue
case '=', ':', '/':
if t.qualifier != qErr {
t.mechanism = tokenTypeFromString(l.input[l.start : cursor-size])
p := cursor
if ch == '/' { // special case for (mx|a) dual-cidr-length
p = cursor - size
ch = ':' // replace ch with expected delimiter for checkTokenSyntax
}
t.value = strings.TrimSpace(l.input[p:l.pos])
}
if t.value == "" || !checkTokenSyntax(t, ch) {
t.qualifier = qErr
t.mechanism = tErr
}
break loop
}
}
if t.isErr() {
t.mechanism = tokenTypeFromString(strings.TrimSpace(l.input[l.start:cursor]))
if t.isErr() {
t.mechanism = tErr
t.qualifier = qErr
t.value = strings.TrimSpace(l.input[start:l.pos])
}
}
return t
}
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
// isDigit returns true if rune is a numer (between '0' and '9'), false otherwise
func isDigit(ch rune) bool { return ch >= '0' && ch <= '9' } | lexer.go | 0.562177 | 0.411347 | lexer.go | starcoder |
package tree
type Tree struct {
Root *TreeNode
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func NewTreeNode() *TreeNode {
return &TreeNode{}
}
func (t *Tree) Insert(val int) {
// Tree is empty
if t.Root == nil {
t.Root = &TreeNode{Val: val}
} else { // Insrt the element to the existing tree
t.Root.insertNode(val)
}
}
func (current *TreeNode) insertNode(value int) {
// Inser to the left
if value < current.Val {
if current.Left == nil {
current.Left = &TreeNode{Val: value}
} else {
current.Left.insertNode(value)
}
} else { // Inser to the right
if current.Right == nil {
current.Right = &TreeNode{Val: value}
} else {
current.Right.insertNode(value)
}
}
}
func (t *Tree) Contains(val int) bool {
// Tree is empty
if t.Root == nil {
return false
}
return t.Root.searchNode(val)
}
func (current *TreeNode) searchNode(value int) bool {
// Node is empty
if current == nil {
return false
}
// Node is found
if current.Val == value {
return true
}
// Search left
if value < current.Val {
return current.Left.searchNode(value)
} else { // Search right
return current.Right.searchNode(value)
}
}
func (t *Tree) Delete(val int) bool {
nodeToRemove := t.Root.FindNode(val)
// Node is not found
if nodeToRemove == nil {
return false
}
parent := t.Root.FindParent(val)
// Node is root
if parent == nil {
t.Root = nil
return true
}
count := t.Root.Size()
// Removing the only node in the tree
if count == 1 {
t.Root = nil
return true
}
// Case 1: Node is a leaf
if nodeToRemove.Left == nil && nodeToRemove.Right == nil {
// Node is a left child
if nodeToRemove.Val < parent.Val {
parent.Left = nil
// Node is a right child
} else {
parent.Right = nil
}
}
// Case 2 and 3: Node has one child
if nodeToRemove.Left == nil || nodeToRemove.Right == nil {
// Case 2: Node is a left child
if nodeToRemove.Val < parent.Val {
if nodeToRemove.Left != nil {
parent.Left = nodeToRemove.Left
} else {
parent.Left = nodeToRemove.Right
}
// Case 3: Node is a right child
} else {
if nodeToRemove.Left != nil {
parent.Right = nodeToRemove.Left
} else {
parent.Right = nodeToRemove.Right
}
}
}
} | tree/tree.go | 0.664976 | 0.478224 | tree.go | starcoder |
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"contact": {},
"license": {},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/models": {
"get": {
"description": "Query all available room models",
"produces": [
"application/json"
],
"tags": [
"models"
],
"summary": "Query models",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/model.RoomModel"
}
}
},
"400": {
"description": "bad request",
"schema": {
"type": "string"
}
},
"500": {
"description": "internal server error",
"schema": {
"type": "string"
}
}
}
}
},
"/models/{id}": {
"get": {
"description": "Query a single room model by id with containing sensors",
"produces": [
"application/json"
],
"tags": [
"models"
],
"summary": "Query room model",
"parameters": [
{
"type": "integer",
"description": "RoomModel ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/model.RoomModel"
}
},
"400": {
"description": "bad request",
"schema": {
"type": "string"
}
},
"404": {
"description": "not found",
"schema": {
"type": "string"
}
},
"500": {
"description": "internal server error",
"schema": {
"type": "string"
}
}
}
}
},
"/sensors": {
"get": {
"description": "Query all available sensors.",
"produces": [
"application/json"
],
"tags": [
"sensors"
],
"summary": "Query sensors",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/model.Sensor"
}
}
},
"400": {
"description": "bad request",
"schema": {
"type": "string"
}
},
"500": {
"description": "internal server error",
"schema": {
"type": "string"
}
}
}
}
},
"/sensors/{id}": {
"get": {
"description": "Query a single sensor by id",
"produces": [
"application/json"
],
"tags": [
"sensors"
],
"summary": "Query sensor",
"parameters": [
{
"type": "integer",
"description": "Sensor ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/model.Sensor"
}
},
"400": {
"description": "bad request",
"schema": {
"type": "string"
}
},
"404": {
"description": "not found",
"schema": {
"type": "string"
}
},
"500": {
"description": "internal server error",
"schema": {
"type": "string"
}
}
}
},
"patch": {
"description": "Updates the mesh id and anomaly preferences of a single sensor.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"sensors"
],
"summary": "Update sensor preferences",
"parameters": [
{
"type": "integer",
"description": "SensorId",
"name": "id",
"in": "path",
"required": true
},
{
"description": "UpdateSensor",
"name": "update_sensor",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/api.UpdateSensor"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/model.Sensor"
}
},
"400": {
"description": "bad request",
"schema": {
"type": "string"
}
},
"500": {
"description": "internal server error",
"schema": {
"type": "string"
}
}
}
}
},
"/sensors/{id}/anomalies": {
"get": {
"description": "Query anomalies for a specific sensor",
"produces": [
"application/json"
],
"tags": [
"sensors"
],
"summary": "Query anomalies",
"parameters": [
{
"type": "integer",
"description": "Sensor ID",
"name": "id",
"in": "path",
"required": true
},
{
"type": "string",
"description": "Start Date",
"name": "start_date",
"in": "query"
},
{
"type": "string",
"description": "End Date",
"name": "end_date",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/api.Anomaly"
}
}
},
"400": {
"description": "bad request",
"schema": {
"type": "string"
}
},
"404": {
"description": "not found",
"schema": {
"type": "string"
}
},
"500": {
"description": "internal server error",
"schema": {
"type": "string"
}
}
}
}
},
"/sensors/{id}/data": {
"get": {
"description": "Query data for a specific sensor",
"produces": [
"application/json"
],
"tags": [
"sensors"
],
"summary": "Query sensor data",
"parameters": [
{
"type": "integer",
"description": "Sensor ID",
"name": "id",
"in": "path",
"required": true
},
{
"type": "integer",
"description": "Data Limit",
"name": "limit",
"in": "query"
},
{
"type": "integer",
"description": "Include only every nth element [1-16]",
"name": "density",
"in": "query"
},
{
"type": "string",
"description": "Start Date",
"name": "start_date",
"in": "query"
},
{
"type": "string",
"description": "End Date",
"name": "end_date",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/model.Data"
}
}
},
"400": {
"description": "bad request",
"schema": {
"type": "string"
}
},
"404": {
"description": "not found",
"schema": {
"type": "string"
}
},
"500": {
"description": "internal server error",
"schema": {
"type": "string"
}
}
}
}
}
},
"definitions": {
"api.Anomaly": {
"type": "object",
"properties": {
"end_data": {
"type": "Data"
},
"peak_data": {
"type": "Data"
},
"start_data": {
"type": "Data"
},
"type": {
"type": "string"
}
}
},
"api.UpdateSensor": {
"type": "object",
"properties": {
"gradient_bound": {
"type": "number"
},
"lower_bound": {
"type": "number"
},
"mesh_id": {
"type": "string"
},
"upper_bound": {
"type": "number"
}
}
},
"model.Data": {
"type": "object",
"properties": {
"date": {
"type": "object",
"$ref": "#/definitions/model.Date"
},
"gradient": {
"type": "number"
},
"id": {
"type": "integer"
},
"sensor_id": {
"type": "integer"
},
"value": {
"type": "number"
}
}
},
"model.Date": {
"type": "object"
},
"model.RoomModel": {
"type": "object",
"properties": {
"floors": {
"type": "integer"
},
"id": {
"type": "integer"
},
"image_url": {
"type": "string"
},
"location": {
"type": "string"
},
"name": {
"type": "string"
},
"sensors": {
"type": "array",
"items": {
"$ref": "#/definitions/model.Sensor"
}
},
"type": {
"type": "string"
},
"url": {
"type": "string"
}
}
},
"model.Sensor": {
"type": "object",
"properties": {
"description": {
"type": "string"
},
"gradient_bound": {
"type": "number"
},
"id": {
"type": "integer"
},
"import_name": {
"type": "string"
},
"latest_data": {
"type": "object",
"$ref": "#/definitions/model.Data"
},
"lower_bound": {
"type": "number"
},
"measurement_unit": {
"type": "string"
},
"mesh_id": {
"type": "integer"
},
"name": {
"type": "string"
},
"range": {
"type": "string"
},
"room_model_id": {
"type": "integer"
},
"upper_bound": {
"type": "number"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "0.1.9",
Host: "",
BasePath: "/",
Schemes: []string{},
Title: "vi-sense BIM API",
Description: "This API provides information about 3D room models with associated sensors and their data.",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
} | app/docs/docs.go | 0.588889 | 0.404919 | docs.go | starcoder |
package logx
import (
"fmt"
"os"
)
// Multi is a type of Log that is an alias for an array where each Log function will affect
// each Log instance in the array.
type Multi []Log
// Add appends the specified Log 'l' the Stack array.
func (m *Multi) Add(l Log) {
if l == nil {
return
}
*m = append(*m, l)
}
// Multiple returns a Stack struct that contains the Log instances
// specified in the 'l' vardict.
func Multiple(l ...Log) *Multi {
m := Multi(l)
return &m
}
// SetLevel changes the current logging level of this Log instance.
func (m Multi) SetLevel(l Level) {
for i := range m {
m[i].SetLevel(l)
}
}
// SetPrefix changes the current logging prefox of this Log instance.
func (m Multi) SetPrefix(p string) {
for i := range m {
m[i].SetPrefix(p)
}
}
// SetPrintLevel sets the logging level used when 'Print*' statements are called.
func (m Multi) SetPrintLevel(n Level) {
for i := range m {
m[i].SetPrintLevel(n)
}
}
// Print writes a message to the logger.
// The function arguments are similar to fmt.Sprint and fmt.Print. The only argument is a vardict of
// interfaces that can be used to output a string value.
// This function is affected by the setting of 'SetPrintLevel'. By default, this will print as an 'Info'
// logging message.
func (m Multi) Print(v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Print, 1, "", v...)
} else {
m[i].Print(v...)
}
}
}
// Panic writes a panic message to the logger.
// This function will result in the program exiting with a Go 'panic()' after being called. The function arguments
// are similar to fmt.Sprint and fmt.Print. The only argument is a vardict of interfaces that can be used to output
// a string value.
func (m Multi) Panic(v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Panic, 1, "", v...)
} else {
// Write as Error here to prevent the non-flexable logger from exiting the program
// before all logs can be written.
m[i].Error("", v...)
}
}
panic(fmt.Sprint(v...))
}
// Println writes a message to the logger.
// The function arguments are similar to fmt.Sprintln and fmt.Println. The only argument is a vardict of
// interfaces that can be used to output a string value.
// This function is affected by the setting of 'SetPrintLevel'. By default, this will print as an 'Info'
// logging message.
func (m Multi) Println(v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Print, 1, "", v...)
} else {
m[i].Println(v...)
}
}
}
// Panicln writes a panic message to the logger.
// This function will result in the program exiting with a Go 'panic()' after being called. The function arguments
// are similar to fmt.Sprintln and fmt.Println. The only argument is a vardict of interfaces that can be used to
// output a string value.
func (m Multi) Panicln(v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Panic, 1, "", v...)
} else {
// Write as Error here to prevent the non-flexable logger from exiting the program
// before all logs can be written.
m[i].Error("", v...)
}
}
panic(fmt.Sprint(v...))
}
// Info writes a informational message to the logger.
// The function arguments are similar to fmt.Sprintf and fmt.Printf. The first argument is
// a string that can contain formatting characters. The second argument is a vardict of
// interfaces that can be omitted or used in the supplied format string.
func (m Multi) Info(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Info, 1, s, v...)
} else {
m[i].Info(s, v...)
}
}
}
// Error writes a error message to the logger.
// The function arguments are similar to fmt.Sprintf and fmt.Printf. The first argument is
// a string that can contain formatting characters. The second argument is a vardict of
// interfaces that can be omitted or used in the supplied format string.
func (m Multi) Error(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Error, 1, s, v...)
} else {
m[i].Error(s, v...)
}
}
}
// Fatal writes a fatal message to the logger.
// This function will result in the program
// exiting with a non-zero error code after being called, unless the logx.FatalExits' setting is 'false'.
// The function arguments are similar to fmt.Sprintf and fmt.Printf. The first argument is
// a string that can contain formatting characters. The second argument is a vardict of
// interfaces that can be omitted or used in the supplied format string.
func (m Multi) Fatal(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Fatal, 1, s, v...)
} else {
// Write as Error here to prevent the non-flexable logger from exiting the program
// before all logs can be written.
m[i].Error(s, v...)
}
}
if FatalExits {
os.Exit(1)
}
}
// Trace writes a tracing message to the logger.
// The function arguments are similar to fmt.Sprintf and fmt.Printf. The first argument is
// a string that can contain formatting characters. The second argument is a vardict of
// interfaces that can be omitted or used in the supplied format string.
func (m Multi) Trace(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Trace, 1, s, v...)
} else {
m[i].Trace(s, v...)
}
}
}
// Debug writes a debugging message to the logger.
// The function arguments are similar to fmt.Sprintf and fmt.Printf. The first argument is
// a string that can contain formatting characters. The second argument is a vardict of
// interfaces that can be omitted or used in the supplied format string.
func (m Multi) Debug(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Debug, 1, s, v...)
} else {
m[i].Debug(s, v...)
}
}
}
// Printf writes a message to the logger.
// The function arguments are similar to fmt.Sprintf and fmt.Printf. The first argument is
// a string that can contain formatting characters. The second argument is a vardict of
// interfaces that can be omitted or used in the supplied format string.
// This function is affected by the setting of 'SetPrintLevel'. By default, this will print as an 'Info'
// logging message.
func (m Multi) Printf(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Print, 1, s, v...)
} else {
m[i].Printf(s, v...)
}
}
}
// Panicf writes a panic message to the logger.
// This function will result in the program exiting with a Go 'panic()' after being called. The function arguments
// are similar to fmt.Sprintf and fmt.Printf. The first argument is a string that can contain formatting characters.
// The second argument is a vardict of interfaces that can be omitted or used in the supplied format string.
func (m Multi) Panicf(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Panic, 1, s, v...)
} else {
// Write as Error here to prevent the non-flexable logger from exiting the program
// before all logs can be written.
m[i].Error(s, v...)
}
}
panic(fmt.Sprintf(s, v...))
}
// Warning writes a warning message to the logger.
// The function arguments are similar to fmt.Sprintf and fmt.Printf. The first argument is
// a string that can contain formatting characters. The second argument is a vardict of
// interfaces that can be omitted or used in the supplied format string.
func (m Multi) Warning(s string, v ...interface{}) {
for i := range m {
if x, ok := m[i].(LogWriter); ok {
x.Log(Warning, 1, s, v...)
} else {
m[i].Warning(s, v...)
}
}
} | v2/multiple.go | 0.584153 | 0.433022 | multiple.go | starcoder |
package timeutil
import (
"fmt"
"strings"
"time"
)
type periodicStart struct {
dayOfWeek int
hourOfDay int
minuteOfHour int
secondOfMinute int
}
// Periodic keeps track of a repeating period of time
type Periodic struct {
start *periodicStart
duration time.Duration
}
// Period is a span of time from Start to End
type Period struct {
Start time.Time
End time.Time
}
// ParsePeriodic returns a Periodic specified as a start and duration.
func ParsePeriodic(start, duration string) (*Periodic, error) {
var err error
pc := &Periodic{}
if pc.start, err = parseStart(start); err != nil {
return nil, fmt.Errorf("unable to parse start: %v", err)
}
if pc.duration, err = time.ParseDuration(duration); err != nil {
return nil, fmt.Errorf("unable to parse duration: %v", err)
}
if pc.duration < time.Duration(0) {
return nil, fmt.Errorf("duration cannot be negative")
}
// check that the duration of the window does not exceed the period.
if (pc.start.dayOfWeek == -1 && pc.duration >= 24*time.Hour) || pc.duration >= 7*24*time.Hour {
return nil, fmt.Errorf("duration cannot exceed period")
}
return pc, nil
}
var weekdays = map[string]int{
"sun": int(time.Sunday),
"mon": int(time.Monday),
"tue": int(time.Tuesday),
"wed": int(time.Wednesday),
"thu": int(time.Thursday),
"fri": int(time.Friday),
"sat": int(time.Saturday),
}
// parseStart parses a string into a periodicStart.
func parseStart(start string) (*periodicStart, error) {
ps := &periodicStart{}
ps.dayOfWeek = -1
f := strings.Fields(start)
switch len(f) {
case 1: // no day provided
case 2:
if dow, ok := weekdays[strings.ToLower(f[0])]; ok {
ps.dayOfWeek = dow
} else {
return nil, fmt.Errorf("invalid day of week %q", f[0])
}
// shift
f = f[1:]
default:
return nil, fmt.Errorf("wrong number of fields")
}
n, err := fmt.Sscanf(f[0], "%d:%d", &ps.hourOfDay, &ps.minuteOfHour)
// check Sscanf failure
if n != 2 || err != nil {
return nil, fmt.Errorf("invalid time of day %q: %v", f[0], err)
}
// check hour range
if ps.hourOfDay < 0 || ps.hourOfDay > 23 {
return nil, fmt.Errorf("invalid time of day %q: hour must be >= 0 and <= 23", f[0])
}
// check minute range
if ps.minuteOfHour < 0 || ps.minuteOfHour > 59 {
return nil, fmt.Errorf("invalid time of day %q: minute must be >= 0 and <= 59", f[0])
}
return ps, nil
}
// DurationToStart returns the duration between the supplied time and the start
// of Periodic's relevant period.
// If we're in a period, a value <= 0 is returned, indicating how
// deep into period we are.
// If we're outside a period, a value > 0 is returned, indicating how long
// before the next period starts.
func (pc *Periodic) DurationToStart(ref time.Time) time.Duration {
prev := pc.Previous(ref)
if prev.End.After(ref) || prev.End.Equal(ref) {
return prev.Start.Sub(ref)
}
return pc.Next(ref).Start.Sub(ref)
}
func (pc *Periodic) shiftTimeByDays(ref time.Time, daydiff int) time.Time {
rt := time.Date(ref.Year(),
ref.Month(),
ref.Day()+daydiff,
pc.start.hourOfDay,
pc.start.minuteOfHour,
pc.start.secondOfMinute,
0,
ref.Location())
return rt
}
// Previous returns Periodic's previous Period occurrence relative to ref.
func (pc *Periodic) Previous(ref time.Time) (p *Period) {
p = &Period{}
if pc.start.dayOfWeek != -1 { // Weekly
if pc.cmpDayOfWeek(ref) >= 0 {
// this week
p.Start = pc.shiftTimeByDays(ref, -(int(ref.Weekday()) - pc.start.dayOfWeek))
} else {
// last week
p.Start = pc.shiftTimeByDays(ref, -(int(ref.Weekday()) + (7 - pc.start.dayOfWeek)))
}
} else if pc.start.hourOfDay != -1 { // Daily
if pc.cmpHourOfDay(ref) >= 0 {
// today
p.Start = pc.shiftTimeByDays(ref, 0)
} else {
// yesterday
p.Start = pc.shiftTimeByDays(ref, -1)
}
} // XXX(mischief): other intervals unsupported atm.
p.End = p.Start.Add(pc.duration)
return
}
// Next returns Periodic's next Period occurrence relative to ref.
func (pc *Periodic) Next(ref time.Time) (p *Period) {
p = &Period{}
if pc.start.dayOfWeek != -1 { // Weekly
if pc.cmpDayOfWeek(ref) < 0 {
// This week
p.Start = pc.shiftTimeByDays(ref, pc.start.dayOfWeek-int(ref.Weekday()))
} else {
// Next week
p.Start = pc.shiftTimeByDays(ref, (7-int(ref.Weekday()))+pc.start.dayOfWeek)
}
} else if pc.start.hourOfDay != -1 { // Daily
if pc.cmpHourOfDay(ref) < 0 {
// Today
p.Start = pc.shiftTimeByDays(ref, 0)
} else {
// Tomorrow
p.Start = pc.shiftTimeByDays(ref, 1)
}
} // XXX(mischief): other intervals unsupported atm.
p.End = p.Start.Add(pc.duration)
return
}
// cmpDayOfWeek compares ref to Periodic occurring in the same week as ref.
// The return value is less than, equal to, or greater than zero if ref occurs
// before, equal to, or after the start of Periodic within the same week.
func (pc *Periodic) cmpDayOfWeek(ref time.Time) time.Duration {
pStart := pc.shiftTimeByDays(ref, -int(ref.Weekday())+pc.start.dayOfWeek)
return ref.Sub(pStart)
}
// cmpHourOfDay compares ref to Periodic occurring in the same day as ref.
// The return value is less than, equal to, or greater than zero if ref occurs
// before, equal to, or after the start of Periodic in the same day.
func (pc *Periodic) cmpHourOfDay(ref time.Time) time.Duration {
pStart := pc.shiftTimeByDays(ref, 0)
return ref.Sub(pStart)
} | vendor/github.com/coreos/locksmith/pkg/timeutil/periodic.go | 0.772831 | 0.447098 | periodic.go | starcoder |
package meta
import (
"github.com/puppetlabs/wash/cmd/internal/find/parser/predicate"
)
/*
If metadata predicates are constructed on metadata values, then metadata
schema predicates are constructed on metadata schemas. Thus, one would expect
that metadata schema predicate parsing is symmetric with metadata predicate
parsing, where instead of walking the metadata values, we walk the metadata
schema. Unfortunately, metadata schemas are JSON schemas. Walking a JSON schema
is more complicated than walking a JSON object because there's a lot more rules
associated with a JSON schema than a JSON object. Thus, it is easier to delegate
to a JSON schema validator.
Consider the expression ".key1 .key2 5 -o 6". This reads "return
true if m['key1']['key2'] == 5 OR m['key1'] == 6". The schema predicate
would return true if "m['key1']['key2'] == number OR m['key1'] == number".
Since we don't care about primitive types, this reduces to
"m['key1']['key2'] == primitive_type OR m['key1'] == primitive_type".
If we normalize all primitive types to the "null" type, then our final schema
predicate is "m['key1']['key2'] == null OR m['key1'] == null". Now if we let
LHS = {"KEY1":{"KEY2":null}} represent the LHS' JSON serialization, and
RHS = {"KEY1":null} represent the RHS' JSON serialization, then our schema
predicate would return true iff the JSON schema validator returned true for
the LHS OR if the validator returned true for the RHS.
Generating the JSON object for a key sequence is tricky because unlike metadata
predicates, child nodes need to know the current key sequence. For example,
in the expression ".key1 .key2 5", we want our generated JSON object to be
{"KEY1": {"KEY2": null}}, and we want this object to be generated by the "5"
node since that is where the key sequence ends. Since our schema predicate
consists of validating JSON objects against the metadata schema, and since those
JSON objects are generated from key sequences, this implies that schema predicates
are generated from key sequences. That is why every schemaP is associated with a
key sequence.
NOTE: We'll need to munge the JSON schema to ensure that we get the right validation.
That munging is done by the schema type.
NOTE: A fundamental invariant of schema predicates is that they always return true
iff their predicate returns true.
*/
type schemaPredicate interface {
predicate.Predicate
updateKS(func(keySequence) keySequence)
}
// valueSchemaP is a base class for schema predicates on values
type valueSchemaP struct {
ks keySequence
}
func newObjectValueSchemaP() *valueSchemaP {
return &valueSchemaP{
ks: (keySequence{}).EndsWithObject(),
}
}
func newArrayValueSchemaP() *valueSchemaP {
return &valueSchemaP{
ks: (keySequence{}).EndsWithArray(),
}
}
func newPrimitiveValueSchemaP() *valueSchemaP {
return &valueSchemaP{
ks: (keySequence{}).EndsWithPrimitiveValue(),
}
}
func (p1 *valueSchemaP) IsSatisfiedBy(v interface{}) bool {
s, ok := v.(schema)
if !ok {
return false
}
return s.IsValidKeySequence(p1.ks)
}
/*
"Not(valueSchemaP) == valueSchemaP". To see why, consider the negation of
the predicate counterpart (like "! 5"). Since the predicate's negation still
returns false for a mis-typed value, and since schemaPs operate at the
type-level, both these conditions imply that the type-level predicate, and
hence the schemaP, does not change when the predicate counterpart is negated.
In our example, "! 5" only returns true for numeric values. Thus, its
corresponding schemaP should still expect a numeric value (specifically a
"primitive value" since primitive types are normalized to "null").
*/
func (p1 *valueSchemaP) Negate() predicate.Predicate {
return p1
}
func (p1 *valueSchemaP) updateKS(updateFunc func(keySequence) keySequence) {
p1.ks = updateFunc(p1.ks)
}
// schemaPBinaryOp is a base class for schemaP binary operators
type schemaPBinaryOp struct {
p1 schemaPredicate
p2 schemaPredicate
}
func (op schemaPBinaryOp) updateKS(updateFunc func(keySequence) keySequence) {
op.p1.updateKS(updateFunc)
op.p2.updateKS(updateFunc)
}
// Note that we need the schemaPAnd/schemaPOr classes to ensure that
// De'Morgan's law is enforced. Also, Combine for these classes is not
// implemented b/c it is not needed -- predicateAnd/predicateOr's combine
// handles schema predicates.
type schemaPAnd struct {
schemaPBinaryOp
}
func newSchemaPAnd(p1 schemaPredicate, p2 schemaPredicate) *schemaPAnd {
return &schemaPAnd{
schemaPBinaryOp: schemaPBinaryOp{
p1: p1,
p2: p2,
},
}
}
func (op *schemaPAnd) IsSatisfiedBy(v interface{}) bool {
return op.p1.IsSatisfiedBy(v) && op.p2.IsSatisfiedBy(v)
}
func (op *schemaPAnd) Negate() predicate.Predicate {
return newSchemaPOr(op.p1.Negate().(schemaPredicate), op.p2.Negate().(schemaPredicate))
}
type schemaPOr struct {
schemaPBinaryOp
}
func newSchemaPOr(p1 schemaPredicate, p2 schemaPredicate) *schemaPOr {
return &schemaPOr{
schemaPBinaryOp: schemaPBinaryOp{
p1: p1,
p2: p2,
},
}
}
func (op *schemaPOr) IsSatisfiedBy(v interface{}) bool {
return op.p1.IsSatisfiedBy(v) || op.p2.IsSatisfiedBy(v)
}
func (op *schemaPOr) Negate() predicate.Predicate {
return newSchemaPAnd(op.p1.Negate().(schemaPredicate), op.p2.Negate().(schemaPredicate))
} | cmd/internal/find/primary/meta/schemaPredicate.go | 0.77081 | 0.508666 | schemaPredicate.go | starcoder |
package container
/**
* @Date: 2020/6/26 16:56
* @Description: 红黑树实现
红黑树的特性:
(1)每个节点或者是黑色,或者是红色。
(2)根节点是黑色。
(3)每个叶子节点(NIL)是黑色。 [注意:这里叶子节点,是指为空(NIL)的叶子节点!]
(4)如果一个节点是红色的,则它的子节点必须是黑色的。
(5)从一个节点到该节点的子孙节点的所有路径上包含相同数目的黑节点。
参考:
https://zh.wikipedia.org/wiki/%E7%BA%A2%E9%BB%91%E6%A0%91
https://www.cnblogs.com/skywang12345/p/3245399.html [这个图是错的,伪代码是对的]
**/
//>> 不要在内部私自更改会影响排序的Key
type RbtValue interface {
FindComparator
}
type Color bool
const (
RED = false
BLACK = true
)
type RBTNode struct {
parent *RBTNode
left *RBTNode
right *RBTNode
color Color
Value RbtValue
}
func (n *RBTNode) grandparent() *RBTNode {
if n.parent == nil {
return nil
}
return n.parent.parent
}
func (n *RBTNode) uncle() *RBTNode {
if n.grandparent() == nil {
return nil
}
if n.grandparent().left == n.parent {
return n.grandparent().right
}
return n.grandparent().left
}
// 子树n的最小值
func minimum(n *RBTNode) *RBTNode {
for n.left != nil {
n = n.left
}
return n
}
// 子树n的最大值
func maximum(n *RBTNode) *RBTNode {
for n.right != nil {
n = n.right
}
return n
}
//>> 前驱
func (n *RBTNode) preSuccessor() *RBTNode {
if n.left != nil {
return maximum(n.left)
}
if n.parent != nil {
if n.parent.right == n {
return n.parent
}
for n.parent != nil && n.parent.left == n {
n = n.parent
}
return n.parent
}
return nil
}
//>> 后继
func (n *RBTNode) successor() *RBTNode {
if n.right != nil {
return minimum(n.right)
}
y := n.parent
for y != nil && n == y.right {
n = y
y = n.parent
}
return y
}
func (n *RBTNode) getValue() RbtValue {
if n == nil {
return nil
}
return n.Value
}
func (n *RBTNode) getColor() Color {
if n == nil {
return BLACK
}
return n.color
}
func (n *RBTNode) compare(key interface{}) int {
return n.Value.Compare(key)
}
type RBTree struct {
root *RBTNode
size int
}
func NewRBTree() *RBTree {
return &RBTree{}
}
//>> 查找, 如果有相同key的Value可能返回任意一个
func (rb *RBTree) Find(key interface{}) RBTIterator {
return NewRBTIterator(rb.findNode(key))
}
//>> 返回第一个大于或等于key的Value
func (rb *RBTree) LowerBound(key interface{}) RBTIterator {
return NewRBTIterator(rb.lowerBound(key))
}
//>> 返回第一个大于key的Value
func (rb *RBTree) UpperBound(key interface{}) RBTIterator {
return NewRBTIterator(rb.upperBound(key))
}
//>> 返回插入节点迭代器
func (rb *RBTree) Insert(val RbtValue) RBTIterator {
node := rb.insert(val)
return NewRBTIterator(node)
}
//>> 返回删除节点个数和下一个节点的迭代器
func (rb *RBTree) Erase(key interface{}) (RBTIterator, int) {
node, cnt := rb.erase(key)
return NewRBTIterator(node), cnt
}
//>> 返回删除节点的下一个节点的迭代器
func (rb *RBTree) EraseAt(where Iterator) RBTIterator {
rbIter, ok := where.(RBTIterator)
if !ok {
return NewRBTIterator(nil)
}
if !rbIter.IsValid() {
return rbIter
}
successor := where.Next().(RBTIterator)
rb.eraseNode2(successor.node)
return successor
}
func (rb *RBTree) Size() int {
return rb.size
}
func (rb *RBTree) Empty() bool {
return rb.size == 0
}
func (rb *RBTree) Begin() RBTIterator {
if rb.root == nil {
return NewRBTIterator(nil)
}
return NewRBTIterator(minimum(rb.root))
}
func (rb *RBTree) End() RBTIterator {
if rb.root == nil {
return NewRBTIterator(nil)
}
return NewRBTIterator(maximum(rb.root))
}
func (rb *RBTree) Clear() {
rb.root = nil
rb.size = 0
}
func (rb *RBTree) Foreach(fun func(val interface{}) bool) {
if rb.root == nil {
return
}
for i := minimum(rb.root); i != nil; i = i.successor() {
if !fun(i.getValue()) {
return
}
}
}
//>> 返回一个等于key的Node
func (rb *RBTree) findNode(key interface{}) *RBTNode {
cur := rb.root
for cur != nil {
if cur.compare(key) < 0 {
cur = cur.right
} else if cur.compare(key) == 0 {
return cur
} else {
cur = cur.left
}
}
return nil
}
//>> 返回第一个大于或等于key的Node
func (rb *RBTree) lowerBound(key interface{}) *RBTNode {
cur := rb.root
var target *RBTNode
for cur != nil {
if cur.compare(key) >= 0 {
target = cur
cur = cur.left
} else {
cur = cur.right
}
}
return target
}
//>> 返回第一个大于key的Node
func (rb *RBTree) upperBound(key interface{}) *RBTNode {
cur := rb.root
var target *RBTNode
for cur != nil {
if cur.compare(key) > 0 {
target = cur
cur = cur.left
} else {
cur = cur.right
}
}
return target
}
//>> 旋转前:x是"根", y是x的右孩子
//>> 旋转后:y是"根",x是y的左孩子
//>> 看起来把x向左下放了
func (rb *RBTree) leftRotate(x *RBTNode) {
y := x.right
x.right = y.left
if y.left != nil {
y.left.parent = x
}
y.parent = x.parent
if x.parent == nil {
rb.root = y
} else if x == x.parent.left {
x.parent.left = y
} else {
x.parent.right = y
}
y.left = x
x.parent = y
}
//>> 旋转前:x是"根", y是x的左孩子
//>> 旋转后:y是"根",x是y的右孩子
//>> 看起来把x向右下放了
func (rb *RBTree) rightRotate(x *RBTNode) {
y := x.left
x.left = y.right
if y.right != nil {
y.right.parent = x
}
y.parent = x.parent
if x.parent == nil {
rb.root = y
} else if x == x.parent.right {
x.parent.right = y
} else {
x.parent.left = y
}
y.right = x
x.parent = y
}
//>> 插入
func (rb *RBTree) insert(value RbtValue) *RBTNode {
cur := rb.root
var p *RBTNode
for cur != nil {
p = cur
if cur.compare(value.Key()) <= 0 {
cur = cur.right
} else {
cur = cur.left
}
}
node := &RBTNode{
parent: p,
Value: value,
color: RED,
}
rb.size++
if p == nil {
rb.root = node
rb.root.color = BLACK
} else {
if p.compare(value.Key()) <= 0 {
p.right = node
} else {
p.left = node
}
rb.rbInsertFixup(node)
}
return node
}
//>> 参考《算法导论》 Re-balance
func (rb *RBTree) rbInsertFixup(n *RBTNode) {
var y *RBTNode
for n.parent != nil && n.parent.color == RED {
//>> 父节点是黑色的话没有违反任何性质无需处理
if n.parent == n.parent.parent.left {
//>> 父节点P是其父节点的左子节点
y = n.parent.parent.right
if y != nil && y.color == RED {
//>> 父节点和叔父节点二者都是红色,只先将两者变色, 然后迭代解决祖父节点(祖父是红色可能违反性质4)
n.parent.color = BLACK
y.color = BLACK
n.parent.parent.color = RED
n = n.parent.parent
} else {
//>> 叔父节点是黑色
if n == n.parent.right {
//>> 新节点是右子节点
//>> 这种交叉的情况(新节点是右子节点,父节左子节点)不好处理, 先想办法处理成两个红色在一边(都是左子节点)再继续操作
//>> 即把父节点设为当前节点然后左旋一次
n = n.parent
rb.leftRotate(n)
}
//>> 左旋完后n(原先的父节点)必是左孩子,其叔父也必是黑色
//>> 但n和n的父亲(原先的新节点)仍都是红色,违反性质4,所以还得继续处理
//>> 先把n的父变为黑色,此时左子树多了一个黑色,暂时违反性质5
//>> 把祖父变为红色,此时祖父的父亲也可能是红色,暂时违反性质4
//>> 右旋祖父节点,祖父(红色)变到右边去了,父变成新的“根节点”,并且是黑色(左右子树都会通过所以不会违反性质5),所以上面两个问题都解决了
n.parent.color = BLACK
n.parent.parent.color = RED
rb.rightRotate(n.parent.parent)
}
} else { // 和上面对称的情况
y = n.parent.parent.left
if y != nil && y.color == RED {
n.parent.color = BLACK
y.color = BLACK
n.parent.parent.color = RED
n = n.parent.parent
} else {
if n == n.parent.left {
n = n.parent
rb.rightRotate(n)
}
n.parent.color = BLACK
n.parent.parent.color = RED
rb.leftRotate(n.parent.parent)
}
}
}
rb.root.color = BLACK
}
//>> 自己研究的
func (rb *RBTree) insertCase(node *RBTNode) {
//>> case 1 是根节点
if node.parent == nil {
rb.root = node
rb.root.color = BLACK
return
}
//>> case 2 父节点P是黑色
if node.parent.color == BLACK {
return
}
//>> 余下parent是红色, 所以一定存在grandparent
//>> case 3 如果父节点和叔父节点二者都是红色
if node.uncle() != nil && node.uncle().color == RED {
node.parent.color = BLACK
node.uncle().color = BLACK
node.grandparent().color = RED
rb.insertCase(node.grandparent())
return
}
//>> case 4 父节点是红色而叔父节点黑色或缺少,并且新节点N是其父节点P的右子节点而父节点P又是其父节点的左子节点
if node == node.parent.right && node.parent == node.grandparent().left {
node = node.parent
rb.leftRotate(node)
//>> 按case 5处理以前的父节点P以解决仍然失效的性质
rb.insertCase(node)
return
}
if node == node.parent.left && node.parent == node.grandparent().right {
node = node.parent
rb.rightRotate(node)
rb.insertCase(node)
return
}
//>> case 5 父节点P是红色而叔父节点U是黑色或缺少,并且新节点N是其父节点的左子节点,而父节点P又是其父节点G的左子节点
node.parent.color = BLACK
node.grandparent().color = RED
if node == node.parent.left && node.parent == node.grandparent().left {
rb.rightRotate(node.grandparent())
} else if node == node.parent.right && node.parent == node.grandparent().right {
rb.leftRotate(node.grandparent())
}
}
func (rb *RBTree) erase(key interface{}) (*RBTNode, int) {
node := rb.lowerBound(key)
if node.compare(key) != 0 {
return nil, 0
}
cnt := 0
var successor *RBTNode
for node != nil && node.compare(key) == 0 {
successor = node.successor()
rb.eraseNode2(node)
node = successor
cnt++
}
return successor, cnt
}
//>> 参考《算法导论》 当有两个子节点时把后继节点的值拷贝到n, 然后删除后继节点,实现简单,但是这种做法会导致之前保存的后继节点指针失效
func (rb *RBTree) eraseNode_NotUse(n *RBTNode) {
//>> y是要删除的节点
var y *RBTNode
if n.left != nil && n.right != nil {
//>> 如果左右子节点都不为空,转化为删除后继节点
//>> 左右子节点都不为空节点的后继节点必定最多只有一个非空子节点,即转化为第2种情况
y = n.successor()
n.Value = y.Value
} else {
y = n
}
//>> x是y可能存在的一个非空子节点,用x代替y的位置,弃掉y
var x *RBTNode
if y.left != nil {
x = y.left
} else {
x = y.right
}
nn := y.parent
if x != nil {
x.parent = y.parent
}
if y.parent == nil {
rb.root = x
} else if y.parent.left == y {
y.parent.left = x
} else {
y.parent.right = x
}
//>> 如果删除了一个黑节点,可能会破坏性质2,4,5
if y.color == BLACK {
rb.rbDeleteFixup(x, nn)
}
rb.size--
}
//>> 参考C++ STL, 不会破坏后继节点, 先把后继节点和要删除的节点位置交换(Relink), 再把要删除的节点剥出来
func (rb *RBTree) eraseNode2(n *RBTNode) {
erasedNode := n
var fixNode, fixNodeParent, pNode *RBTNode
pNode = erasedNode
if pNode.left == nil {
fixNode = pNode.right
} else if pNode.right == nil {
fixNode = pNode.left
} else {
pNode = n.successor()
fixNode = pNode.right
}
if pNode == erasedNode {
fixNodeParent = erasedNode.parent
if fixNode != nil {
fixNode.parent = fixNodeParent // link up
}
if rb.root == erasedNode {
rb.root = fixNode // link down from root
} else if fixNodeParent.left == erasedNode {
fixNodeParent.left = fixNode // link down to left
} else {
fixNodeParent.right = fixNode // link down to right
}
} else {
//>> pNode is erasedNode's successor, swap(pNode, erasedNode)
erasedNode.left.parent = pNode // link left up
pNode.left = erasedNode.left // link successor down
if pNode == erasedNode.right {
fixNodeParent = pNode // successor is next to erased
} else { // successor further down, link in place of erased
fixNodeParent = pNode.parent // parent is successor's
if fixNode != nil {
fixNode.parent = fixNodeParent // link fix up
}
fixNodeParent.left = fixNode // link fix down
pNode.right = erasedNode.right // link next down
erasedNode.right.parent = pNode // right up
}
if rb.root == erasedNode {
rb.root = pNode // link down from root
} else if erasedNode.parent.left == erasedNode {
erasedNode.parent.left = pNode // link down to left
} else {
erasedNode.parent.right = pNode // link down to right
}
pNode.parent = erasedNode.parent // link successor up
pNode.color, erasedNode.color = erasedNode.color, pNode.color // recolor it
}
if erasedNode.color == BLACK { // erasing black link, must recolor/rebalance tree
rb.rbDeleteFixup(fixNode, fixNodeParent)
}
rb.size--
}
//>> 参考《算法导论》 Re-balance
func (rb *RBTree) rbDeleteFixup(x, parent *RBTNode) {
if x != nil && x.color == RED {
//>> 如果x是红色, 只要重绘为黑色所有性质都没有破坏(删掉一个黑的但又补回来了)
x.color = BLACK
return
} else if x == rb.root {
return
}
//>> 因为删除了一个黑色节点,所以少了一个黑色节点,性质5遭到破坏
var w *RBTNode
for x != rb.root && x.getColor() == BLACK {
if x != nil {
parent = x.parent
}
if x == parent.left {
w = parent.right
if w.color == RED {
//>> case 1 兄弟为红色
//>> 左旋父节点,原来的兄弟节点变为x的祖父节点,所以要对调兄弟节点和父节点的颜色,保持所有路径上的黑色节点数不变
//>> 但现在x的兄弟变为黑色(原先是红色),父亲变为红色了
w.color = BLACK
parent.color = RED
rb.leftRotate(parent)
w = parent.right //>> 新的兄弟节点(原来兄弟节点的左孩子)必为黑色
}
if w.left.getColor() == BLACK && w.right.getColor() == BLACK {
//>> case 2 兄弟和兄弟的儿子都是黑色
//>> 先把兄弟变为红色(通过兄弟的路径少了一个黑色)
//>> 如果父亲是红色,把父亲重绘为黑色就完事了(回到了最开始好处理的情况)
//>> 如果父亲是黑色,兄弟变为红色后没有破坏性质4,但通过兄弟的路径少了一个黑色,又因为之前x这边已经删除了一个黑色,所以x的父亲的这颗子树反而变得平衡了
//>> 但是x的父亲仍然比它的兄弟子树少一个黑色节点,所以继续处理x的父亲
w.color = RED
x = parent
} else {
if w.right.getColor() == BLACK {
//>> case 3 兄弟和兄弟的右儿子是黑色
//>> 右旋兄弟节点,这样兄弟的左儿子成为兄弟的父亲和x的新兄弟,对调颜色保持所有路径上的黑色节点数不变
//>> 但是现在x有了一个黑色兄弟,并且他的右儿子是红色的,所以进入了最后一个case(终于要看到曙光了)
if w.left != nil {
w.left.color = BLACK
}
w.color = RED
rb.rightRotate(w)
w = parent.right
}
//>> case 4 兄弟是黑色,并且兄弟的右儿子是红色的
//>> 上面的操作都是为了把不是case 4情况的变成case 4
//>> 左旋转x的父亲,x的父亲成为原先兄弟的左儿子,即x的祖父(原先兄弟的右儿子仍是它原来的右儿子)
//>> 交换x的父亲和兄弟的颜色,即:
//>> 1.把x父亲(老的子树根)的颜色复制给兄弟(新的子树根),保持左旋后在根上维持原先颜色, 所以性质4没有违反
//>> 2.把x父亲变为黑色(原先兄弟的颜色),不会违反性质4,因为原先兄弟(黑色)现在变成了x的祖父,所以通过x的路径增加了一个黑色节点(把删掉的补回来了),所以
//>> 左旋后在根上颜色没有变,但原先的兄弟(黑色)没了,所以把兄弟的右儿子变为黑色补回来,保持右边性质5平衡
w.color = parent.color
if w.right != nil {
w.right.color = BLACK
}
parent.color = BLACK
rb.leftRotate(parent)
x = rb.root
}
} else { //>> 与上面对称
w = parent.left
if w.color == RED {
w.color = BLACK
parent.color = RED
rb.rightRotate(parent)
w = parent.left
}
if w.left.getColor() == BLACK && w.right.getColor() == BLACK {
w.color = RED
x = parent
} else {
if w.left.getColor() == BLACK {
if w.right != nil {
w.right.color = BLACK
}
w.color = RED
rb.leftRotate(w)
w = parent.left
}
w.color = parent.color
parent.color = BLACK
if w.left != nil {
w.left.color = BLACK
}
rb.rightRotate(parent)
x = rb.root
}
}
}
if x != nil {
x.color = BLACK
}
}
func (t *RBTree) height(node *RBTNode) int {
if node == nil {
return 0
}
left := t.height(node.left)
right := t.height(node.right)
if left > right {
return left + 1
} else {
return right + 1
}
}
type RBTIterator struct {
node *RBTNode
}
func NewRBTIterator(node *RBTNode) RBTIterator {
return RBTIterator{node: node}
}
func (iter RBTIterator) IsValid() bool {
return iter.node != nil
}
func (iter RBTIterator) Next() Iterator {
if iter.IsValid() {
iter.node = iter.node.successor()
}
return iter
}
func (iter RBTIterator) Prev() Iterator {
if iter.IsValid() {
iter.node = iter.node.preSuccessor()
}
return iter
}
func (iter RBTIterator) Key() interface{} {
if iter.IsValid() {
return iter.node.Value.Key()
}
return nil
}
func (iter RBTIterator) Value() interface{} {
return iter.node.getValue()
} | src/gostd/container/rbtree.go | 0.575111 | 0.544135 | rbtree.go | starcoder |
package network
import (
"encoding/json"
"fmt"
"testing"
"github.com/ingrammicro/cio/api/types"
"github.com/ingrammicro/cio/utils"
"github.com/stretchr/testify/assert"
)
// ListCertificatesMocked test mocked function
func ListCertificatesMocked(
t *testing.T,
loadBalancerID string,
certificatesIn []*types.Certificate,
) []*types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificatesIn)
assert.Nil(err, "Certificates test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID)).Return(dIn, 200, nil)
certificatesOut, err := ds.ListCertificates(loadBalancerID)
assert.Nil(err, "Error getting certificates")
assert.Equal(certificatesIn, certificatesOut, "ListCertificates returned different certificates")
return certificatesOut
}
// ListCertificatesFailErrMocked test mocked function
func ListCertificatesFailErrMocked(
t *testing.T,
loadBalancerID string,
certificatesIn []*types.Certificate,
) []*types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificatesIn)
assert.Nil(err, "Certificates test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID)).
Return(dIn, 200, fmt.Errorf("mocked error"))
certificatesOut, err := ds.ListCertificates(loadBalancerID)
assert.NotNil(err, "We are expecting an error")
assert.Nil(certificatesOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return certificatesOut
}
// ListCertificatesFailStatusMocked test mocked function
func ListCertificatesFailStatusMocked(
t *testing.T,
loadBalancerID string,
certificatesIn []*types.Certificate,
) []*types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificatesIn)
assert.Nil(err, "Certificates test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID)).Return(dIn, 499, nil)
certificatesOut, err := ds.ListCertificates(loadBalancerID)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(certificatesOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return certificatesOut
}
// ListCertificatesFailJSONMocked test mocked function
func ListCertificatesFailJSONMocked(
t *testing.T,
loadBalancerID string,
certificatesIn []*types.Certificate,
) []*types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID)).Return(dIn, 200, nil)
certificatesOut, err := ds.ListCertificates(loadBalancerID)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(certificatesOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return certificatesOut
}
// GetCertificateMocked test mocked function
func GetCertificateMocked(t *testing.T, loadBalancerID string, certificateIn *types.Certificate) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID)).
Return(dIn, 200, nil)
certificateOut, err := ds.GetCertificate(loadBalancerID, certificateIn.ID)
assert.Nil(err, "Error getting certificate")
assert.Equal(*certificateIn, *certificateOut, "GetCertificate returned different certificate")
return certificateOut
}
// GetCertificateFailErrMocked test mocked function
func GetCertificateFailErrMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID)).
Return(dIn, 200, fmt.Errorf("mocked error"))
certificateOut, err := ds.GetCertificate(loadBalancerID, certificateIn.ID)
assert.NotNil(err, "We are expecting an error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return certificateOut
}
// GetCertificateFailStatusMocked test mocked function
func GetCertificateFailStatusMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID)).
Return(dIn, 499, nil)
certificateOut, err := ds.GetCertificate(loadBalancerID, certificateIn.ID)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return certificateOut
}
// GetCertificateFailJSONMocked test mocked function
func GetCertificateFailJSONMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Get", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID)).
Return(dIn, 200, nil)
certificateOut, err := ds.GetCertificate(loadBalancerID, certificateIn.ID)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return certificateOut
}
// CreateCertificateMocked test mocked function
func CreateCertificateMocked(t *testing.T, loadBalancerID string, certificateIn *types.Certificate) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// to json
dOut, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Post", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID), mapIn).Return(dOut, 200, nil)
certificateOut, err := ds.CreateCertificate(loadBalancerID, mapIn)
assert.Nil(err, "Error creating certificate")
assert.Equal(certificateIn, certificateOut, "CreateCertificate returned different certificate")
return certificateOut
}
// CreateCertificateFailErrMocked test mocked function
func CreateCertificateFailErrMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// to json
dOut, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Post", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID), mapIn).
Return(dOut, 200, fmt.Errorf("mocked error"))
certificateOut, err := ds.CreateCertificate(loadBalancerID, mapIn)
assert.NotNil(err, "We are expecting an error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return certificateOut
}
// CreateCertificateFailStatusMocked test mocked function
func CreateCertificateFailStatusMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// to json
dOut, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Post", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID), mapIn).Return(dOut, 499, nil)
certificateOut, err := ds.CreateCertificate(loadBalancerID, mapIn)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return certificateOut
}
// CreateCertificateFailJSONMocked test mocked function
func CreateCertificateFailJSONMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Post", fmt.Sprintf(APIPathNetworkLoadBalancerCertificates, loadBalancerID), mapIn).Return(dIn, 200, nil)
certificateOut, err := ds.CreateCertificate(loadBalancerID, mapIn)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return certificateOut
}
// UpdateCertificateMocked test mocked function
func UpdateCertificateMocked(t *testing.T, loadBalancerID string, certificateIn *types.Certificate) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// to json
dOut, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID), mapIn).
Return(dOut, 200, nil)
certificateOut, err := ds.UpdateCertificate(loadBalancerID, certificateIn.ID, mapIn)
assert.Nil(err, "Error updating certificate")
assert.Equal(certificateIn, certificateOut, "UpdateCertificate returned different certificate")
return certificateOut
}
// UpdateCertificateFailErrMocked test mocked function
func UpdateCertificateFailErrMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// to json
dOut, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID), mapIn).
Return(dOut, 200, fmt.Errorf("mocked error"))
certificateOut, err := ds.UpdateCertificate(loadBalancerID, certificateIn.ID, mapIn)
assert.NotNil(err, "We are expecting an error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return certificateOut
}
// UpdateCertificateFailStatusMocked test mocked function
func UpdateCertificateFailStatusMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// to json
dOut, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID), mapIn).
Return(dOut, 499, nil)
certificateOut, err := ds.UpdateCertificate(loadBalancerID, certificateIn.ID, mapIn)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return certificateOut
}
// UpdateCertificateFailJSONMocked test mocked function
func UpdateCertificateFailJSONMocked(
t *testing.T,
loadBalancerID string,
certificateIn *types.Certificate,
) *types.Certificate {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Put", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID), mapIn).
Return(dIn, 200, nil)
certificateOut, err := ds.UpdateCertificate(loadBalancerID, certificateIn.ID, mapIn)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(certificateOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return certificateOut
}
// DeleteCertificateMocked test mocked function
func DeleteCertificateMocked(t *testing.T, loadBalancerID string, certificateIn *types.Certificate) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID)).
Return(dIn, 200, nil)
err = ds.DeleteCertificate(loadBalancerID, certificateIn.ID)
assert.Nil(err, "Error deleting certificate")
}
// DeleteCertificateFailErrMocked test mocked function
func DeleteCertificateFailErrMocked(t *testing.T, loadBalancerID string, certificateIn *types.Certificate) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID)).
Return(dIn, 200, fmt.Errorf("mocked error"))
err = ds.DeleteCertificate(loadBalancerID, certificateIn.ID)
assert.NotNil(err, "We are expecting an error")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
}
// DeleteCertificateFailStatusMocked test mocked function
func DeleteCertificateFailStatusMocked(t *testing.T, loadBalancerID string, certificateIn *types.Certificate) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewCertificateService(cs)
assert.Nil(err, "Couldn't load certificate service")
assert.NotNil(ds, "Certificate service not instanced")
// to json
dIn, err := json.Marshal(certificateIn)
assert.Nil(err, "Certificate test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathNetworkLoadBalancerCertificate, loadBalancerID, certificateIn.ID)).
Return(dIn, 499, nil)
err = ds.DeleteCertificate(loadBalancerID, certificateIn.ID)
assert.NotNil(err, "We are expecting an status code error")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
} | api/network/certificates_api_mocked.go | 0.675765 | 0.425963 | certificates_api_mocked.go | starcoder |
package c80machine
import (
"github.com/reiver/go-frame256x288"
"github.com/reiver/go-palette2048"
"github.com/reiver/go-spritesheet8x8x256"
"github.com/reiver/go-spritesheet32x32x256"
"github.com/reiver/go-text32x36"
)
// Type represents a fantasy virtual machine.
type Type struct {
memory [memoryByteSize]uint8
}
// Palette provides access to the machines palette.
func (receiver *Type) Palette() palette2048.Slice {
beginning := PTR_PALETTE
ending := beginning + LEN_PALETTE
p := receiver.memory[beginning:ending]
return palette2048.Slice(p)
}
// frame provides access the machine's frame.
func (receiver *Type) frame() frame256x288.Slice {
beginning := PTR_FRAME
ending := beginning + LEN_FRAME
p := receiver.memory[beginning:ending]
return frame256x288.Slice(p)
}
func (receiver *Type) tilemap() []uint8 {
beginning := PTR_TILEMAP
ending := beginning + LEN_TILEMAP
p := receiver.memory[beginning:ending]
return p
}
func (receiver *Type) _tiles() []uint8 {
beginning := PTR_TILES
ending := beginning + LEN_TILES
p := receiver.memory[beginning:ending]
return p
}
// tiles provides access to the machine's sprite sheet for 8x8 pixel sprite (background) tiles.
func (receiver *Type) tiles() spritesheet8x8x256.Paletted {
p := receiver._tiles()
return spritesheet8x8x256.Paletted{
Pix: p,
Palette: receiver.Palette(),
Category: "tiles",
}
}
func (receiver *Type) _sprites8x8() []uint8 {
beginning := PTR_SPRITES8x8
ending := beginning + LEN_SPRITES8x8
p := receiver.memory[beginning:ending]
return p
}
// sprites8x8 provides access to the machine's sprite sheet for 8x8 pixel sprites.
func (receiver *Type) sprites8x8() spritesheet8x8x256.Paletted {
p := receiver._sprites8x8()
return spritesheet8x8x256.Paletted{
Pix: p,
Palette: receiver.Palette(),
Category: "sprites8x8",
}
}
func (receiver *Type) _sprites32x32() []uint8 {
beginning := PTR_SPRITES32x32
ending := beginning + LEN_SPRITES32x32
p := receiver.memory[beginning:ending]
return p
}
// sprites32x32 provides access to the machine's sprite sheet for 32x32 pixel sprites.
func (receiver *Type) sprites32x32() spritesheet32x32x256.Paletted {
p := receiver._sprites32x32()
return spritesheet32x32x256.Paletted{
Pix: p,
Palette: receiver.Palette(),
Category: "sprites32x32",
}
}
// textMatrix provides access to the machine's text matrix.
func (receiver *Type) textMatrix() text32x36.Slice {
beginning := PTR_TEXTMATRIX
ending := beginning + LEN_TEXTMATRIX
p := receiver.memory[beginning:ending]
return text32x36.Slice(p)
} | machine/type.go | 0.79162 | 0.425068 | type.go | starcoder |
package views
import (
"github.com/pkg/errors"
"github.com/hyperledger-labs/fabric-smart-client/integration/fabric/iou/states"
"github.com/hyperledger-labs/fabric-smart-client/platform/fabric"
"github.com/hyperledger-labs/fabric-smart-client/platform/fabric/services/state"
"github.com/hyperledger-labs/fabric-smart-client/platform/view/services/assert"
"github.com/hyperledger-labs/fabric-smart-client/platform/view/view"
)
type CreateIOUResponderView struct{}
func (i *CreateIOUResponderView) Call(context view.Context) (interface{}, error) {
// As a first step, the lender responds to the request to exchange recipient identities.
lender, borrower, err := state.RespondExchangeRecipientIdentities(context)
assert.NoError(err, "failed exchanging recipient identities")
// When the borrower runs the CollectEndorsementsView, at some point, the borrower sends the assembled transaction
// to the lender. Therefore, the lender waits to receive the transaction.
tx, err := state.ReceiveTransaction(context)
assert.NoError(err, "failed receiving transaction")
// The lender can now inspect the transaction to ensure it is as expected.
// Here are examples of possible checks
// Namespaces are properly populated
assert.Equal(1, len(tx.Namespaces()), "expected only one namespace")
assert.Equal("iou", tx.Namespaces()[0], "expected the [iou] namespace, got [%s]", tx.Namespaces()[0])
// Commands are properly populated
assert.Equal(1, tx.Commands().Count(), "expected only a single command, got [%s]", tx.Commands().Count())
switch command := tx.Commands().At(0); command.Name {
case "create":
// If the create command is attached to the transaction then...
// No inputs expected. The single output at index 0 should be an IOU state
assert.Equal(0, tx.NumInputs(), "invalid number of inputs, expected 0, was [%d]", tx.NumInputs())
assert.Equal(1, tx.NumOutputs(), "invalid number of outputs, expected 1, was [%d]", tx.NumInputs())
iouState := &states.IOU{}
assert.NoError(tx.GetOutputAt(0, iouState))
assert.False(iouState.Amount < 5, "invalid amount, expected at least 5, was [%d]", iouState.Amount)
assert.Equal(2, iouState.Owners().Count(), "invalid state, expected 2 identities, was [%d]", iouState.Owners().Count())
assert.True(iouState.Owners().Contain(lender), "invalid state, it does not contain lender identity")
assert.True(command.Ids.Match([]view.Identity{lender, borrower}), "the command does not contain the lender and borrower identities")
assert.True(iouState.Owners().Match([]view.Identity{lender, borrower}), "the state does not contain the lender and borrower identities")
assert.NoError(tx.HasBeenEndorsedBy(borrower), "the borrower has not endorsed")
default:
return nil, errors.Errorf("invalid command, expected [create], was [%s]", command.Name)
}
// The lender is ready to send back the transaction signed
_, err = context.RunView(state.NewEndorseView(tx))
assert.NoError(err)
// Finally, the lender waits that the transaction completes its lifecycle
return context.RunView(state.NewFinalityView(tx))
}
type UpdateIOUResponderView struct{}
func (i *UpdateIOUResponderView) Call(context view.Context) (interface{}, error) {
// When the borrower runs the CollectEndorsementsView, at some point, the borrower sends the assembled transaction
// to the lender. Therefore, the lender waits to receive the transaction.
tx, err := state.ReceiveTransaction(context)
assert.NoError(err, "failed receiving transaction")
// The lender can now inspect the transaction to ensure it is as expected.
// Here are examples of possible checks
// Namespaces are properly populated
assert.Equal(1, len(tx.Namespaces()), "expected only one namespace")
assert.Equal("iou", tx.Namespaces()[0], "expected the [iou] namespace, got [%s]", tx.Namespaces()[0])
switch command := tx.Commands().At(0); command.Name {
case "update":
// If the update command is attached to the transaction then...
// One input and one output containing IOU states are expected
assert.Equal(1, tx.NumInputs(), "invalid number of inputs, expected 1, was %d", tx.NumInputs())
assert.Equal(1, tx.NumOutputs(), "invalid number of outputs, expected 1, was %d", tx.NumInputs())
inState := &states.IOU{}
assert.NoError(tx.GetInputAt(0, inState))
outState := &states.IOU{}
assert.NoError(tx.GetOutputAt(0, outState))
// Additional checks
// Same IDs
assert.Equal(inState.LinearID, outState.LinearID, "invalid state id, [%s] != [%s]", inState.LinearID, outState.LinearID)
// Valid Amount
assert.False(outState.Amount >= inState.Amount, "invalid amount, [%d] expected to be less or equal [%d]", outState.Amount, inState.Amount)
// Same owners
assert.True(inState.Owners().Match(outState.Owners()), "invalid owners, input and output should have the same owners")
assert.Equal(2, inState.Owners().Count(), "invalid state, expected 2 identities, was [%d]", inState.Owners().Count())
// Is the lender one of the owners?
lenderFound := fabric.GetDefaultLocalMembership(context).IsMe(inState.Owners()[0]) != fabric.GetDefaultLocalMembership(context).IsMe(inState.Owners()[1])
assert.True(lenderFound, "lender identity not found")
// Did the borrower sign?
assert.NoError(tx.HasBeenEndorsedBy(inState.Owners().Filter(
func(identity view.Identity) bool {
return !fabric.GetDefaultLocalMembership(context).IsMe(identity)
})...), "the borrower has not endorsed")
default:
return nil, errors.Errorf("invalid command, expected [create], was [%s]", command.Name)
}
// The lender is ready to send back the transaction signed
_, err = context.RunView(state.NewEndorseView(tx))
assert.NoError(err)
// Finally, the lender waits that the transaction completes its lifecycle
return context.RunView(state.NewFinalityView(tx))
} | integration/fabric/iou/views/lender.go | 0.715225 | 0.482612 | lender.go | starcoder |
package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
func (e StdEng) Transpose(a Tensor, expStrides []int) error {
if !a.IsNativelyAccessible() {
return errors.Errorf("Cannot Transpose() on non-natively accessible tensor")
}
if dt, ok := a.(DenseTensor); ok {
e.denseTranspose(dt, expStrides)
return nil
}
return errors.Errorf("Tranpose for tensor of %T not supported", a)
}
func (e StdEng) denseTranspose(a DenseTensor, expStrides []int) {
if a.rtype() == String.Type {
e.denseTransposeString(a, expStrides)
return
}
switch a.rtype().Size() {
case 1:
e.denseTranspose1(a, expStrides)
case 2:
e.denseTranspose2(a, expStrides)
case 4:
e.denseTranspose4(a, expStrides)
case 8:
e.denseTranspose8(a, expStrides)
default:
e.denseTransposeArbitrary(a, expStrides)
}
}
func (e StdEng) denseTranspose1(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u8s := tmpArr.Uint8s()
orig := a.hdr().Uint8s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u8s[j] = orig[i]
j++
}
copy(orig, u8s)
}
func (e StdEng) denseTranspose2(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u16s := tmpArr.Uint16s()
orig := a.hdr().Uint16s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u16s[j] = orig[i]
j++
}
copy(orig, u16s)
}
func (e StdEng) denseTranspose4(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u32s := tmpArr.Uint32s()
orig := a.hdr().Uint32s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u32s[j] = orig[i]
j++
}
copy(orig, u32s)
}
func (e StdEng) denseTranspose8(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
u64s := tmpArr.Uint64s()
orig := a.hdr().Uint64s()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
u64s[j] = orig[i]
j++
}
copy(orig, u64s)
}
func (e StdEng) denseTransposeString(a DenseTensor, expStrides []int) {
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
strs := tmpArr.Strings()
orig := a.hdr().Strings()
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
strs[j] = orig[i]
j++
}
copy(orig, strs)
}
func (e StdEng) denseTransposeArbitrary(a DenseTensor, expStrides []int) {
rtype := a.rtype()
typeSize := int(rtype.Size())
var tmpArr array
e.makeArray(&tmpArr, a.Dtype(), a.Size())
// arbs := storage.AsByteSlice(tmpArr.hdr(), rtype)
arbs := tmpArr.byteSlice()
orig := storage.AsByteSlice(a.hdr(), rtype)
it := newFlatIterator(a.Info())
var j int
for i, err := it.Next(); err == nil; i, err = it.Next() {
srcStart := i * typeSize
srcEnd := srcStart + typeSize
dstStart := j * typeSize
dstEnd := dstStart + typeSize
copy(arbs[dstStart:dstEnd], orig[srcStart:srcEnd])
j++
}
copy(orig, arbs)
} | vendor/gorgonia.org/tensor/defaultengine_matop_transpose.go | 0.522933 | 0.429429 | defaultengine_matop_transpose.go | starcoder |
package parser
import (
"fmt"
"regexp"
"strconv"
c "opensource.go.fig.lu/oops2core/internal/common"
)
type armRegisters c.ARMRegisters
var crashRegexp = regexp.MustCompile(`(?sm)` +
` pc : \[<([[:xdigit:]]+)>\].*?` + // 1
` lr : \[<([[:xdigit:]]+)>\].*?` +
` psr: ([[:xdigit:]]+).*?` +
` sp : ([[:xdigit:]]+).*?` +
` ip : ([[:xdigit:]]+).*?` + // 5
` fp : ([[:xdigit:]]+).*?` +
` r10: ([[:xdigit:]]+).*?` +
` r9 : ([[:xdigit:]]+).*?` +
` r8 : ([[:xdigit:]]+).*?` +
` r7 : ([[:xdigit:]]+).*?` + // 10
` r6 : ([[:xdigit:]]+).*?` +
` r5 : ([[:xdigit:]]+).*?` +
` r4 : ([[:xdigit:]]+).*?` +
` r3 : ([[:xdigit:]]+).*?` +
` r2 : ([[:xdigit:]]+).*?` + // 15
` r1 : ([[:xdigit:]]+).*?` +
` r0 : ([[:xdigit:]]+).*?` +
`Stack: \(.*?\)$\r?\n((^.* [[:xdigit:]]+: [[:xdigit:] ]+$(\r?\n)?)+)`)
const crashRegexpStackSubgroup = 18
var stackElementRegexp = regexp.MustCompile(`([[:xdigit:]]{8})( |\n)`)
func parseWord(text string) (uint32, error) {
ret, err := strconv.ParseUint(text, 16, 32)
if err != nil {
return 0, err
}
return uint32(ret), nil
}
func parseRegisters(m []string) (c.ARMRegisters, error) {
var ret c.ARMRegisters
var err error
orderInText := []*uint32{
&ret.R[c.PC], &ret.R[c.LR], &ret.CPSR,
&ret.R[c.SP], &ret.R[c.IP], &ret.R[c.FP],
&ret.R[c.R10], &ret.R[c.R9], &ret.R[c.R8],
&ret.R[c.R7], &ret.R[c.R6], &ret.R[c.R5],
&ret.R[c.R4], &ret.R[c.R3], &ret.R[c.R2],
&ret.R[c.R1], &ret.R[c.R0]}
for i, reg := range orderInText {
*reg, err = parseWord(m[i+1])
if err != nil {
return c.ARMRegisters{}, err
}
}
return ret, nil
}
func parseStack(m []string) (c.Stack, error) {
var ret c.Stack
elems := stackElementRegexp.FindAllStringSubmatch(m[crashRegexpStackSubgroup], -1)
for _, z := range elems {
w, err := parseWord(z[1])
if err != nil {
return c.Stack{}, nil
}
ret = append(ret, w)
}
return ret, nil
}
// ParseCrash extracts information about registers
// and stack from the provided crash report text.
func ParseCrash(crashText string) (c.CrashInfo, error) {
m := crashRegexp.FindStringSubmatch(crashText)
if len(m) < crashRegexp.NumSubexp() {
return c.CrashInfo{}, fmt.Errorf("can't parse crash text")
}
regs, err := parseRegisters(m)
if err != nil {
return c.CrashInfo{}, err
}
stack, err := parseStack(m)
if err != nil {
return c.CrashInfo{}, err
}
return c.CrashInfo{Regs: regs, Stack: stack}, nil
} | internal/parser/parser.go | 0.557364 | 0.562958 | parser.go | starcoder |
package imagekit
import (
"bytes"
"fmt"
"image"
"image/gif"
"image/jpeg"
"image/png"
"github.com/disintegration/imaging"
)
// GetThumbnail creates an image in the given size, trying to encode it to the given max bytes
func GetThumbnail(imageBytes []byte, width, height int, maxBytes int) ([]byte, string, error) {
return process(imageBytes, maxBytes, func(image image.Image) image.Image {
return imaging.Thumbnail(image, width, height, imaging.MitchellNetravali)
})
}
// Resize resizes the image to the specified size, trying to encode it to the given max bytes
func Resize(imageBytes []byte, width, height int, maxBytes int) ([]byte, string, error) {
return process(imageBytes, maxBytes, func(image image.Image) image.Image {
return imaging.Resize(image, width, height, imaging.MitchellNetravali)
})
}
// Fit scales down the image to fit in the bounding box
func Fit(imageBytes []byte, width, height int, maxBytes int) ([]byte, string, error) {
return process(imageBytes, maxBytes, func(image image.Image) image.Image {
return imaging.Fit(image, width, height, imaging.MitchellNetravali)
})
}
// FitRect scales the given dimensiosn to fit inside the maxWidth-maxHeight box.
func FitRect(width, height int, maxWidth, maxHeight int) (newWidth, newHeight int) {
srcAspectRatio := float64(width) / float64(height)
maxAspectRatio := float64(maxWidth) / float64(maxHeight)
if srcAspectRatio > maxAspectRatio {
newWidth = maxWidth
newHeight = int(float64(newWidth) / srcAspectRatio)
} else {
newHeight = maxHeight
newWidth = int(float64(newHeight) * srcAspectRatio)
}
return
}
func process(imageBytes []byte, maxBytes int, imageProcessor func(image image.Image) image.Image) ([]byte, string, error) {
image, format, err := image.Decode(bytes.NewReader(imageBytes))
if err != nil {
return nil, "", fmt.Errorf("Unable to decode image from %v bytes: %v", len(imageBytes), err)
}
result := imageProcessor(image)
var buffer bytes.Buffer
imageFormat, err := encodeImage(&buffer, format, result, maxBytes, 100)
if err != nil {
return nil, "", fmt.Errorf("Unable to encode rescaled image: %v", err)
}
mimeType, err := GetMimeType(imageFormat)
if err != nil {
return nil, "", fmt.Errorf("Unable to get mime type for format %v: %v", format, err)
}
return buffer.Bytes(), mimeType, nil
}
func encodeImage(buffer *bytes.Buffer, format string, image image.Image, maxBytes int, quality int) (string, error) {
var err error
switch format {
case "png":
err = png.Encode(buffer, image)
case "gif":
err = gif.Encode(buffer, image, nil)
case "jpeg":
err = jpeg.Encode(buffer, image, &jpeg.Options{Quality: quality})
default:
err = fmt.Errorf("Unknown image format: %v", format)
}
if err != nil {
return "", err
}
if buffer.Len() > maxBytes && quality > 35 {
format = "jpeg"
buffer.Reset()
return encodeImage(buffer, format, image, maxBytes, quality-15)
}
return format, nil
}
func ParseMimeType(imageBytes []byte) (string, error) {
_, imageFormat, err := image.DecodeConfig(bytes.NewReader(imageBytes))
if err != nil {
return "", fmt.Errorf("Clould not get mime type from image: %v", err)
}
mimeType, err := GetMimeType(imageFormat)
if err != nil {
return "", err
}
return mimeType, err
}
// GetMimeType returns the mime type for the given image format cooresponding to registered image types from the image package.
func GetMimeType(imageFormat string) (string, error) {
switch imageFormat {
case "png":
return "image/png", nil
case "gif":
return "image/gif", nil
case "jpg":
case "jpeg":
return "image/jpeg", nil
}
return "", fmt.Errorf("Unknown image format: ", imageFormat)
} | imagekit/images.go | 0.693369 | 0.535098 | images.go | starcoder |
package chess
type Piece rune
const (
BlackRook Piece = '♜'
WhiteRook Piece = '♖'
BlackKnight Piece = '♞'
WhiteKnight Piece = '♘'
BlackKing Piece = '♚'
WhiteKing Piece = '♔'
BlackQueen Piece = '♛'
WhiteQueen Piece = '♕'
BlackBishop Piece = '♝'
WhiteBishop Piece = '♗'
BlackPawn Piece = '♟'
WhitePawn Piece = '♙'
)
// Value returns a piece's value. Returns 0 for king.
func Value(p Piece) int {
switch p {
case BlackPawn, WhitePawn:
return 1
case BlackKnight, WhiteKnight, BlackBishop, WhiteBishop:
return 3
case BlackRook, WhiteRook:
return 5
case BlackQueen, WhiteQueen:
return 9
}
return 0
}
func IsBlack(p Piece) bool {
switch p {
default:
return false
case BlackRook, BlackKnight, BlackKing, BlackQueen, BlackBishop, BlackPawn:
return true
}
}
type Chessboard struct {
Board [8][8]Piece
WhiteCantCastleLeft, WhiteCantCastleRight, BlackCantCastleLeft, BlackCantCastleRight bool
CanBeEnPassant *[2]int8
}
func NewChessboard() *Chessboard {
return &Chessboard{Board: [8][8]Piece{
[8]Piece{'♜', '♞', '♝', '♛', '♚', '♝', '♞', '♜'},
[8]Piece{'♟', '♟', '♟', '♟', '♟', '♟', '♟', '♟'},
[8]Piece{0, 0, 0, 0, 0, 0, 0, 0},
[8]Piece{0, 0, 0, 0, 0, 0, 0, 0},
[8]Piece{0, 0, 0, 0, 0, 0, 0, 0},
[8]Piece{0, 0, 0, 0, 0, 0, 0, 0},
[8]Piece{'♙', '♙', '♙', '♙', '♙', '♙', '♙', '♙'},
[8]Piece{'♖', '♘', '♗', '♕', '♔', '♗', '♘', '♖'},
}}
}
// TotalValue returns the total value worth of pieces the specified colour has
func (cb *Chessboard) TotalValue(black bool) (value int) {
for y, _ := range cb.Board {
for _, p := range cb.Board[y] {
if p == 0 || IsBlack(p) != black {
continue
}
value += Value(p)
}
}
return
}
// IsStalemated returns true if nobody can move.
func (cb *Chessboard) IsStalemated() bool {
return cb.IsCheckmated(true) && cb.IsCheckmated(false)
}
// IsCheckmated returns true if the colour cannot move anywhere.
func (cb *Chessboard) IsCheckmated(black bool) bool {
for y, _ := range cb.Board {
for x, p := range cb.Board[y] {
if p == 0 || IsBlack(p) != black {
continue
}
moves, enpassant, castleleft, castleright := cb.PossibleMoves(int8(x), int8(y))
if len(moves) > 0 || len(enpassant) > 0 || castleleft || castleright {
return false
}
}
}
return true
}
// Threat returns all threatened spaces by a particular colour.
func (cb *Chessboard) Threat(black bool) (threatBoard [8][8]bool) {
var threatMoves [][2]int8
for y, _ := range cb.Board {
for x, p := range cb.Board[y] {
if p == 0 || IsBlack(p) != black {
continue
}
if possibleThreats := cb.PossibleThreats(int8(x), int8(y)); possibleThreats != nil {
threatMoves = append(threatMoves, possibleThreats...)
}
}
}
for _, pos := range threatMoves {
threatBoard[pos[1]][pos[0]] = true
}
return
}
// move returns a possible move (nil if none), and if it hit a piece
func (cb *Chessboard) move(x, y int8, black bool) (*[2]int8, bool) {
if x < 0 || x > 7 || y < 0 || y > 7 {
return nil, false
}
if cb.Board[y][x] != 0 {
if IsBlack(cb.Board[y][x]) != black {
return &[2]int8{x, y}, true
}
return nil, true
}
return &[2]int8{x, y}, false
}
// pawnMoves returns both moves and threatened spaces
func (cb *Chessboard) pawnMoves(x, y int8, black bool) (Moves, Threats [][2]int8, EnPassantKill *[2]int8) {
if black {
// regular movement
move, hit := cb.move(x, y+1, black)
if move != nil && !hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
if y == 1 { // double-move
move, hit = cb.move(x, y+2, black)
if move != nil && !hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
}
}
// kill moves
move, hit = cb.move(x+1, y+1, black)
if move != nil {
if hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
Threats = append(Threats, [2]int8{move[0], move[1]})
} else if hit {
Threats = append(Threats, [2]int8{x + 1, y + 1})
}
move, hit = cb.move(x-1, y+1, black)
if move != nil && hit {
if hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
Threats = append(Threats, [2]int8{move[0], move[1]})
} else if hit {
Threats = append(Threats, [2]int8{x - 1, y + 1})
}
} else {
move, hit := cb.move(x, y-1, black)
if move != nil && !hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
if y == 6 { // double-move
move, hit = cb.move(x, y-2, black)
if move != nil && !hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
}
}
// kill moves
move, hit = cb.move(x+1, y-1, black)
if move != nil {
if hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
Threats = append(Threats, [2]int8{move[0], move[1]})
} else if hit {
Threats = append(Threats, [2]int8{x + 1, y - 1})
}
move, hit = cb.move(x-1, y-1, black)
if move != nil && hit {
if hit {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
Threats = append(Threats, [2]int8{move[0], move[1]})
} else if hit {
Threats = append(Threats, [2]int8{x - 1, y - 1})
}
}
specialThreats := cb.canEnPassant(x, y)
if specialThreats != nil {
Threats = append(Threats, *specialThreats)
EnPassantKill = specialThreats
}
return
}
func (cb *Chessboard) kingMoves(x, y int8, black bool) (Moves [][2]int8) {
// up
move, _ := cb.move(x, y-1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// up-right
move, _ = cb.move(x+1, y-1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// right
move, _ = cb.move(x+1, y, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// down-right
move, _ = cb.move(x+1, y+1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// down
move, _ = cb.move(x, y+1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// down-left
move, _ = cb.move(x-1, y+1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// left
move, _ = cb.move(x-1, y, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// up-left
move, _ = cb.move(x-1, y-1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
return
}
func (cb *Chessboard) knightMoves(x, y int8, black bool) (Moves [][2]int8) {
// up-left
move, _ := cb.move(x-1, y-2, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// up-right
move, _ = cb.move(x+1, y-2, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// right-up
move, _ = cb.move(x+2, y-1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// right-down
move, _ = cb.move(x+2, y+1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// down-right
move, _ = cb.move(x+1, y+2, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// down-left
move, _ = cb.move(x-1, y+2, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// left-down
move, _ = cb.move(x-2, y+1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
// left-up
move, _ = cb.move(x-2, y-1, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
return
}
func (cb *Chessboard) bishopMoves(x, y int8, black bool) (Moves [][2]int8) {
// up-left
for ty, tx := y-1, x-1; tx >= 0 && ty >= 0; tx, ty = tx-1, ty-1 {
move, hit := cb.move(tx, ty, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
// up-right
for ty, tx := y-1, x+1; tx < 8 && ty >= 0; tx, ty = tx+1, ty-1 {
move, hit := cb.move(tx, ty, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
// down-right
for ty, tx := y+1, x-1; tx >= 0 && ty < 8; tx, ty = tx-1, ty+1 {
move, hit := cb.move(tx, ty, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
// down-left
for ty, tx := y+1, x+1; tx < 8 && ty <= 8; tx, ty = tx+1, ty+1 {
move, hit := cb.move(tx, ty, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
return
}
func (cb *Chessboard) rookMoves(x, y int8, black bool) (Moves [][2]int8) {
// up
for ty := y - 1; ty >= 0; ty-- {
move, hit := cb.move(x, ty, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
// down
for ty := y + 1; ty < 8; ty++ {
move, hit := cb.move(x, ty, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
// left
for tx := x - 1; tx >= 0; tx-- {
move, hit := cb.move(tx, y, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
// right
for tx := x + 1; tx < 8; tx++ {
move, hit := cb.move(tx, y, black)
if move != nil {
Moves = append(Moves, [2]int8{move[0], move[1]})
}
if hit {
break
}
}
return
}
// Returns whether a particular colour is in check.
func (cb *Chessboard) IsCheck(black bool) bool {
var kx, ky int
for y, row := range cb.Board {
for x, _ := range row {
if (black && cb.Board[y][x] == BlackKing) || (!black && cb.Board[y][x] == WhiteKing) {
kx, ky = x, y
break
}
}
}
return cb.Threat(!black)[ky][kx]
}
// Returns false if the move would put the player moving in check
func (cb *Chessboard) TestMove(from, to [2]int8) bool {
board := new(Chessboard)
*board = *cb
board.Board[to[1]][to[0]] = board.Board[from[1]][from[0]]
board.Board[from[1]][from[0]] = 0
if board.IsCheck(IsBlack(board.Board[to[1]][to[0]])) {
return false
}
return true
}
// Performs a move and returns true if a move would result in a check for the opposing player
func (cb *Chessboard) DoMove(from, to [2]int8) bool {
black := IsBlack(cb.Board[from[1]][from[0]])
cb.CanBeEnPassant = nil
if (black && from[1] == 1 && to[1] == 3 && cb.Board[from[1]][from[0]] == BlackPawn) || (!black && from[1] == 6 && to[1] == 4 && cb.Board[from[1]][from[0]] == WhitePawn) {
cb.CanBeEnPassant = &to
}
if black {
if cb.Board[from[1]][from[0]] == BlackKing {
cb.BlackCantCastleLeft = true
cb.BlackCantCastleRight = true
} else if cb.Board[from[1]][from[0]] == BlackRook {
if from[0] == 0 && from[0] == 7 {
cb.BlackCantCastleLeft = true
} else if from[0] == 7 && from[0] == 7 {
cb.BlackCantCastleRight = true
}
}
} else {
if cb.Board[from[1]][from[0]] == WhiteKing {
cb.WhiteCantCastleLeft = true
cb.WhiteCantCastleRight = true
} else if cb.Board[from[1]][from[0]] == WhiteRook {
if from[0] == 0 && from[0] == 0 {
cb.WhiteCantCastleLeft = true
} else if from[0] == 7 && from[0] == 0 {
cb.WhiteCantCastleRight = true
}
}
}
cb.Board[to[1]][to[0]] = cb.Board[from[1]][from[0]]
cb.Board[from[1]][from[0]] = 0
if cb.IsCheck(!IsBlack(cb.Board[to[1]][to[0]])) {
return true
}
return false
}
// Returns false if the move would put the player moving in check
func (cb *Chessboard) TestEnPassant(from, to [2]int8) bool {
board := new(Chessboard)
*board = *cb
var modifier int8 // y modifier
if IsBlack(board.Board[from[1]][from[0]]) {
modifier = 1
} else {
modifier = -1
}
board.Board[to[1]][to[0]] = 0
board.Board[to[1]+modifier][to[0]] = board.Board[from[1]][from[0]]
board.Board[from[1]][from[0]] = 0
if board.IsCheck(IsBlack(board.Board[to[1]+modifier][to[0]])) {
return false
}
return true
}
// Performs a move and returns true if a move would result in a check for the opposing player
func (cb *Chessboard) DoEnPassant(from, to [2]int8) bool {
var modifier int8 // y modifier
if IsBlack(cb.Board[from[1]][from[0]]) {
modifier = 1
} else {
modifier = -1
}
cb.Board[to[1]][to[0]] = 0
cb.Board[to[1]+modifier][to[0]] = cb.Board[from[1]][from[0]]
cb.Board[from[1]][from[0]] = 0
cb.CanBeEnPassant = nil
if cb.IsCheck(!IsBlack(cb.Board[to[1]+modifier][to[0]])) {
return true
}
return false
}
// Returns false if the move would put the player moving in check
func (cb *Chessboard) TestCastle(from [2]int8, left bool) bool {
board := new(Chessboard)
*board = *cb
black := IsBlack(board.Board[from[1]][from[0]])
if left {
board.Board[from[1]][2] = board.Board[from[1]][from[0]]
board.Board[from[1]][from[0]] = 0
board.Board[from[1]][3] = board.Board[from[1]][0]
board.Board[from[1]][0] = 0
} else {
board.Board[from[1]][6] = board.Board[from[1]][from[0]]
board.Board[from[1]][from[0]] = 0
board.Board[from[1]][5] = board.Board[from[1]][7]
board.Board[from[1]][7] = 0
}
if board.IsCheck(black) {
return false
}
return true
}
// Performs a move and returns true if a move would result in a check for the opposing player
func (cb *Chessboard) DoCastle(from [2]int8, left bool) bool {
black := IsBlack(cb.Board[from[1]][from[0]])
if left {
cb.Board[from[1]][2] = cb.Board[from[1]][from[0]]
cb.Board[from[1]][from[0]] = 0
cb.Board[from[1]][3] = cb.Board[from[1]][0]
cb.Board[from[1]][0] = 0
} else {
cb.Board[from[1]][6] = cb.Board[from[1]][from[0]]
cb.Board[from[1]][from[0]] = 0
cb.Board[from[1]][5] = cb.Board[from[1]][7]
cb.Board[from[1]][7] = 0
}
if black {
cb.BlackCantCastleLeft = true
cb.BlackCantCastleRight = true
} else {
cb.WhiteCantCastleLeft = true
cb.WhiteCantCastleRight = true
}
if cb.IsCheck(!black) {
return true
}
return false
}
// PromotePawn promotes a pawn at x, y, returns true on success, and false on failure.
func (cb *Chessboard) PromotePawn(x, y int8, to Piece) bool {
if y != 0 && y != 7 {
return false
}
piece := cb.Board[y][x]
black := IsBlack(piece)
switch to {
case WhiteRook, WhiteKnight, WhiteQueen, WhiteBishop:
if !black {
cb.Board[y][x] = to
return true
}
case BlackRook, BlackKnight, BlackQueen, BlackBishop:
if black {
cb.Board[y][x] = to
return true
}
}
return false
}
type MoveType int
const (
RegularMove MoveType = iota
EnPassant
CastleLeft
CastleRight
)
// Checks if a piece can move from the from position, to the to position.
func (cb *Chessboard) IsLegal(from, to [2]int8, movet MoveType) bool {
moves, enpassantkill, castleleft, castleright := cb.PossibleMoves(from[0], from[1])
switch movet {
case RegularMove:
for _, move := range moves {
if to == move {
return true
}
}
case EnPassant:
for _, move := range enpassantkill {
if to == move {
return true
}
}
case CastleLeft:
return castleleft
case CastleRight:
return castleright
}
return false
}
// canCastle checks if the king at x/y can castle. Returns 2 bools, first is `CanCastleLeft`, second is `CanCastleRight`.
func (cb *Chessboard) canCastle(x, y int8) (CanCastleLeft, CanCastleRight bool) {
if cb.Board[y][x] != BlackKing && cb.Board[y][x] != WhiteKing {
return
}
black := IsBlack(cb.Board[y][x])
if ((black && !cb.BlackCantCastleLeft) || (!black && !cb.WhiteCantCastleLeft)) && cb.Board[y][x-1] == 0 && cb.Board[y][x-2] == 0 && cb.Board[y][x-3] == 0 {
CanCastleLeft = true
}
if ((black && !cb.BlackCantCastleRight) || (!black && !cb.WhiteCantCastleRight)) && cb.Board[y][x+1] == 0 && cb.Board[y][x+2] == 0 {
CanCastleRight = true
}
return
}
// Should return all legal moves by a piece at x, y.
func (cb *Chessboard) PossibleMoves(x, y int8) (OutMoves, OutEnPassantKill [][2]int8, CanCastleLeft, CanCastleRight bool) {
var (
Moves [][2]int8
EnPassantKill *[2]int8
)
switch P := cb.Board[y][x]; P {
case BlackRook:
Moves = append(Moves, cb.rookMoves(x, y, true)...)
case WhiteRook:
Moves = append(Moves, cb.rookMoves(x, y, false)...)
case BlackKnight:
Moves = append(Moves, cb.knightMoves(x, y, true)...)
case WhiteKnight:
Moves = append(Moves, cb.knightMoves(x, y, false)...)
case BlackBishop:
Moves = append(Moves, cb.bishopMoves(x, y, true)...)
case WhiteBishop:
Moves = append(Moves, cb.bishopMoves(x, y, false)...)
case BlackQueen:
Moves = append(Moves, cb.rookMoves(x, y, true)...)
Moves = append(Moves, cb.bishopMoves(x, y, true)...)
case WhiteQueen:
Moves = append(Moves, cb.rookMoves(x, y, false)...)
Moves = append(Moves, cb.bishopMoves(x, y, false)...)
case BlackPawn:
var pawnMoves [][2]int8
pawnMoves, _, EnPassantKill = cb.pawnMoves(x, y, true)
Moves = append(Moves, pawnMoves...)
case WhitePawn:
var pawnMoves [][2]int8
pawnMoves, _, EnPassantKill = cb.pawnMoves(x, y, false)
Moves = append(Moves, pawnMoves...)
case BlackKing:
Moves = append(Moves, cb.kingMoves(x, y, true)...)
CanCastleLeft, CanCastleRight = cb.canCastle(x, y)
case WhiteKing:
Moves = append(Moves, cb.kingMoves(x, y, false)...)
CanCastleLeft, CanCastleRight = cb.canCastle(x, y)
}
// Test if they're *really* possible.
from := [2]int8{x, y}
for _, move := range Moves {
if cb.TestMove(from, move) {
OutMoves = append(OutMoves, move)
}
}
if EnPassantKill != nil {
move := *EnPassantKill
if cb.TestEnPassant(from, move) {
OutEnPassantKill = [][2]int8{move}
}
}
if CanCastleLeft {
CanCastleLeft = cb.TestCastle(from, true)
}
if CanCastleRight {
CanCastleRight = cb.TestCastle(from, false)
}
return
}
// canEnPassant returns a piece the pawn in x, y could eliminate via en passant or nil.
func (cb *Chessboard) canEnPassant(x, y int8) *[2]int8 {
if cb.Board[y][x] != BlackPawn && cb.Board[y][x] != WhitePawn || cb.CanBeEnPassant == nil || y < 2 || y > 5 {
return nil
}
var modifier int8 // y modifier
if IsBlack(cb.Board[y][x]) {
modifier = 1
} else {
modifier = -1
}
var out *[2]int8
black := IsBlack(cb.Board[y][x])
move := *cb.CanBeEnPassant
if y == move[1] && IsBlack(cb.Board[move[1]][move[0]]) != black && cb.Board[move[1]+modifier][move[0]] == 0 {
if x > 0 && x-1 == move[0] {
out = &[2]int8{move[0], move[1]}
}
if x < 7 && x+1 == move[0] {
out = &[2]int8{move[0], move[1]}
}
}
return out
}
// PossibleThreats returns all the possibly threatened spaces, can contain duplicates
func (cb *Chessboard) PossibleThreats(x, y int8) (Moves [][2]int8) {
switch P := cb.Board[y][x]; P {
case BlackRook, WhiteRook:
Moves = append(Moves, cb.rookMoves(x, y, true)...)
Moves = append(Moves, cb.rookMoves(x, y, false)...)
case BlackKnight, WhiteKnight:
Moves = append(Moves, cb.knightMoves(x, y, true)...)
Moves = append(Moves, cb.knightMoves(x, y, false)...)
case BlackBishop, WhiteBishop:
Moves = append(Moves, cb.bishopMoves(x, y, true)...)
Moves = append(Moves, cb.bishopMoves(x, y, false)...)
case BlackQueen, WhiteQueen:
Moves = append(Moves, cb.rookMoves(x, y, true)...)
Moves = append(Moves, cb.bishopMoves(x, y, true)...)
Moves = append(Moves, cb.rookMoves(x, y, false)...)
Moves = append(Moves, cb.bishopMoves(x, y, false)...)
case BlackPawn:
_, pawnThreats, _ := cb.pawnMoves(x, y, true)
Moves = append(Moves, pawnThreats...)
case WhitePawn:
_, pawnThreats, _ := cb.pawnMoves(x, y, false)
Moves = append(Moves, pawnThreats...)
case BlackKing, WhiteKing:
Moves = append(Moves, cb.kingMoves(x, y, true)...)
Moves = append(Moves, cb.kingMoves(x, y, false)...)
}
return
} | chess/chess.go | 0.69987 | 0.428712 | chess.go | starcoder |
package longpalsubseq
import (
"fmt"
)
// I wish to avoid converting our integers to float64, just for the sake of using math.Max.
// Instead, let's create a simple helper function to return the max of two integers.
func max(x, y int) int {
if x > y {
return x
} else {
return y
}
}
func printLongestPalindromeSubseq(s string, T [][]int) string {
// Create result slice of bytes that represent the length of the longest palindromic subsequence.
// In each slice, we will store the bytes representing the character at each position.
res := make([]byte, T[0][len(s)-1])
// The length of the longest palindromic subsequence is stored at T[0][N], where N is the length of the input string. We will start there and work backwards.
i := 0
j := T[0][len(s)-1]
// Let's also setup two pointers that represent the left and right side of our resulting string.
// In most cases, we're going to add strings to both sides simultaneously.
// "l" represents the left side and "r" represents the right side.
l := 0
r := len(res) - 1
for i <= j {
// This handles the case where the original string's slice increased the size of the longest palindromic substring.
// We verify that the characters at the start and end of the slice match.
if s[i] == s[j] && T[i][j] == T[i+1][j-1]+2 {
res[l] = s[i]
// Move l toward the right, since we added an item to the result on the left side.
l++
res[r] = s[j]
// Move r toward the left, since we added an item to the result on the right side.
r--
i++
j--
} else if T[i][j] == T[i+1][j] {
// If T[i][j] equals the value of T[i+1][j], then we can assume that the longest palindromic subsequence was contained in the slice represented by T[i+1][j].
// Add the left side of slice represented by T[i+1][j]
res[l] = s[i+1]
// Move l toward the right, since we added an item to the result on the left side.
l++
// If T[i+1][j] is greater than 1, then we have to add the left and right side of its represented slice.
if T[i+1][j] > 1 {
res[r] = s[j]
// Move r toward the left, since we added an item to the result on the right side.
r--
}
// Backtrack from T[i][j] to where our solution came from (T[i+1][j])
i++
} else if T[i][j] == T[i][j-1] {
// If T[i][j] equals the value of T[i][j-1], then we can assume that the longest palindromic subsequence was contained in the slice represented by T[i][j-1].
// If T[i][j-1] is greater than 1, then we have to add the left and right side of its represented slice.
if T[i][j-1] > 0 {
res[l] = s[i]
// Move l toward the right, since we added an item to the result on the left side.
l++
}
// Add the right side of slice represented by T[i][j-1]
res[r] = s[j-1]
// Move r toward the left, since we added an item to the result on the right side.
r--
// Backtrack from T[i][j] to where our solution came from (T[i][j-1])
j--
} else if i == j {
// We need to handle a case where the longest palindromic subsequence is of odd length.
// For example "abdba": up to this point, we will have added "ab ba" to our resulting string.
// We would need to add the single character represented at T[i][j].
res[len(res)/2] = s[i]
break
}
}
return fmt.Sprintf("%s", res)
}
func longestPalindromeSubseq(s string) (int, [][]int) {
// We are going to build a N*N 2D matrix that represents the longest palindromic subsequence for every slice of the input string.
// N represents the length of the input string.
T := make([][]int, len(s))
for i, _ := range T {
T[i] = make([]int, len(s))
// While we are creating the result matrix, let's also account for slice of length 1.
// For example, if input is "bbbab", longest palindromic substring at s[0], s[1], s[2] and so on are all 1.
T[i][i] = 1
}
// "length" represents the length of the slice of the input string we will review.
// Start at 2, because we already took care of length 1. (Iterate up to length-1 because we've already taken care of length 1.)
for length := 2; length <= len(s); length++ {
// Now let's iterate through slices of input string of equal size to length
for i := 0; i <= len(s)-length; i++ {
// Example: "ab", "i" = s[0] ("a") and "j" = s[1] ("b")
// Subtract 1 since resulting array is indexed starting at 0.
j := i + length - 1
if length == 2 {
// Consider example of "aa"
// A slice of s[0..1] has a palindrome of "aa" or length 2.
if s[i] == s[j] {
T[i][j] = 2
} else {
// Now consider above example of "ab"
// A slice of s[0..1] has a palindrome of "a" or "b", equaling length 1.
T[i][j] = 1
}
} else if s[i] == s[j] {
// Consider a length 3 slice of "abad".
// If we start with s[0..2] or "aba", the resulting palindrome would be length 3.
// We can reach that value by adding 2 to the result of s[1..2] ("ba").
T[i][j] = T[i+1][j-1] + 2
} else {
// Consider a length 3 slice of "adbb".
// Let's move forward to the 2nd of the length 3 slices in this example.
// Evaluating "dbb", the longest palindrome is of length 2 in that slice.
// To reach that value, we take either the max of the palindromes at s[1..2] ("db") or s[2..3] ("bb").
// The first is of length 1 and the latter is of length 2.
T[i][j] = max(T[i+1][j], T[i][j-1])
}
}
}
// Returning T so that we can pass it to our function to print the longest palindromic subsequence.
return T[0][len(s)-1], T
} | longpalsubseq/longpalsubseq.go | 0.756627 | 0.65368 | longpalsubseq.go | starcoder |
package recursion
/*
# https://leetcode.com/explore/learn/card/recursion-ii/507/beyond-recursion/3006/
A city's skyline is the outer contour of the silhouette formed by all the buildings in that city when viewed from a distance. Now suppose you are given the locations and height of all the buildings as shown on a cityscape photo (Figure A), write a program to output the skyline formed by these buildings collectively (Figure B).
Buildings Skyline Contour
The geometric information of each building is represented by a triplet of integers [Li, Ri, Hi], where Li and Ri are the x coordinates of the left and right edge of the ith building, respectively, and Hi is its height. It is guaranteed that 0 ≤ Li, Ri ≤ INT_MAX, 0 < Hi ≤ INT_MAX, and Ri - Li > 0. You may assume all buildings are perfect rectangles grounded on an absolutely flat surface at height 0.
For instance, the dimensions of all buildings in Figure A are recorded as: [ [2 9 10], [3 7 15], [5 12 12], [15 20 10], [19 24 8] ] .
The output is a list of "key points" (red dots in Figure B) in the format of [ [x1,y1], [x2, y2], [x3, y3], ... ] that uniquely defines a skyline. A key point is the left endpoint of a horizontal line segment. Note that the last key point, where the rightmost building ends, is merely used to mark the termination of the skyline, and always has zero height. Also, the ground in between any two adjacent buildings should be considered part of the skyline contour.
For instance, the skyline in Figure B should be represented as:[ [2 10], [3 15], [7 12], [12 0], [15 10], [20 8], [24, 0] ].
Notes:
The number of buildings in any input list is guaranteed to be in the range [0, 10000].
The input list is already sorted in ascending order by the left x position Li.
The output list must be sorted by the x position.
There must be no consecutive horizontal lines of equal height in the output skyline. For instance, [...[2 3], [4 5], [7 5], [11 5], [12 7]...] is not acceptable; the three lines of height 5 should be merged into one in the final output as such: [...[2 3], [4 5], [12 7], ...]
*/
func GetSkyline(buildings [][]int) [][]int {
}
func getSkyline(buildings [][]int) [][]int {
if buildings == nil || len(buildings) == 0 {
return nil
}
// edge:=
return skyline(buildings,nil)
}
// build:建筑,edge:边缘
func skyline(buildings [][]int, edge [][]int)[][]int {
if buildings==nil||len(buildings)==0{
return edge
}
build:=buildings[0]
edge = append(edge, []int{build[0], 0})
edge = append(edge, []int{build[0], build[1]})
edge = append(edge, []int{build[1], build[2]})
edge = append(edge, []int{build[2], 0}))
edge=skyline(buildings[0:],ededge)
return edge
}
func handleEdge(edge [][]int,build []int)[][]int{
if edge==nil{
// 增加2个边缘点
edge = append(edge, []int{build[0], build[1]})
edge = append(edge, []int{build[2], 0}))
}
if build==nil{
return edge
}
// 找出新build在当前edge中的位置
var leftIndex,rightIndex int
// 左边点
leftPoint:=[]int{build[0],build[1]}
// 右边点
rightPoint:=[]int{build[2],build[1]}
for i:=0;i<len(edge)-1;i++{
if edge[i][0]<leftPoint[0]&&leftPoint[0]<edge[i+1][0]{
leftIndex=i
// 在边缘内部(左右边缘高度均高于build高度)
if edge[i][1]>leftPoint[1]&&edge[i+1][1]>leftPoint[1]{
leftIndex=-1
}
}
if edge[i][0]<rightPoint[0]&&rightPoint[0]<edge[i+1][0]{
rightIndex=i
// 在边缘内部(左右边缘高度均高于build高度)
if edge[i][1]>rightPoint[0]&&edge[i+1][1]>rightPoint[0]{
rightIndex=-1
}
}
}
// 重新计算edge
return edge
} | recursion/skyline.go | 0.72487 | 0.800068 | skyline.go | starcoder |
package label
import (
"bytes"
"fmt"
"sort"
"unicode/utf8"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/TyeMcQueen/go-lager"
"google.golang.org/api/monitoring/v3"
)
// A label.Set tracks the possible label names and seen label values for a
// metric.
type Set struct {
labelKeys, // Label names for a StackDriver metric.
resourceKeys, // Label names for a monitored resource.
keptKeys []string // The above 2 lists minus any ignored labels.
valueSet Values // The list of seen label values.
SkippedKeys []string // Sorted list of omitted keys.
}
// A label.Values contains all of the seen label values and provides a mapping
// between each unique value and a rune. This allows a list of label values
// to be recorded very compactly which also provides an efficient way to find
// the prior metric having identical label values.
type Values struct {
values []string
valPos map[string]rune
}
// A RuneList is just a string. It is used to store a list of values of type
// `rune`.
type RuneList string
// Converts a RuneList into a string containing the (decimal) rune values
// separated by "."s. Makes a RuneList easy to read instead of a string full
// of control characters. Useful if a RuneList ends up in a log message.
func (rl RuneList) String() string {
b := new(bytes.Buffer)
sep := ""
for _, r := range rl {
fmt.Fprintf(b, "%s%d", sep, r)
sep = "."
}
return b.String()
}
// Returns 1 plus the count of already-seen unique label values.
func (vs *Values) Len() int {
return len(vs.values)
}
// Converts a label value into a rune.
func (vs *Values) Rune(val string) rune {
if p, ok := vs.valPos[val]; ok {
return p
}
p := rune(len(vs.values))
vs.valPos[val] = p
vs.values = append(vs.values, val)
return p
}
// Converts a rune into a label value. Will panic() if the rune value is
// out-of-range.
func (vs *Values) Value(pos rune) string {
if pos < 1 || len(vs.values) <= int(pos) {
lager.Panic().Map(
"Rune out of range", pos,
"Value list len", len(vs.values),
)
}
return vs.values[pos]
}
// Returns the number of kept label names.
func (ls *Set) Len() int { return len(ls.keptKeys) }
// Returns the list of kept label names.
func (ls *Set) KeptKeys() []string { return ls.keptKeys }
// Converts a RuneList into a list of LabelPairs ready to export.
func (ls *Set) LabelPairs(rl RuneList) []*dto.LabelPair {
pairs := make([]*dto.LabelPair, len(ls.keptKeys))
values := ls.ValueList(rl)
for i, k := range ls.keptKeys {
pairs[i] = &dto.LabelPair{
Name: proto.String(k),
Value: proto.String(values[i]),
}
}
return pairs
}
// Initializes a new label.Set to track the passed-in metric labels and
// resource labels, ignoring any labels in `skipKeys`.
func (ls *Set) Init(
skipKeys []string,
labelDescs []*monitoring.LabelDescriptor,
resourceLabels map[string]bool,
) {
skip := make(map[string]int, len(skipKeys))
for _, k := range skipKeys {
skip[k] = 1
}
ls.valueSet = Values{
valPos: make(map[string]rune, 32),
values: make([]string, 1, 32),
};
ls.valueSet.values[0] = "n/a" // Skip \x00 as a rune for future use.
ls.labelKeys = make([]string, len(labelDescs))
skips := 0
o := 0
for _, ld := range labelDescs {
if 0 < skip[ld.Key] {
if 1 == skip[ld.Key] {
skips++
}
skip[ld.Key]++
} else {
ls.labelKeys[o] = ld.Key
o++
}
}
ls.labelKeys = ls.labelKeys[0:o]
sort.Strings(ls.labelKeys)
ls.resourceKeys = make([]string, len(resourceLabels))
o = 0
for k, _ := range resourceLabels {
if 0 < skip[k] {
if 1 == skip[k] {
skips++
}
skip[k]++
} else {
ls.resourceKeys[o] = k
o++
}
}
ls.resourceKeys = ls.resourceKeys[0:o]
sort.Strings(ls.resourceKeys)
if 0 < skips {
ls.SkippedKeys = make([]string, skips)
o = 0
for k, n := range skip {
if 1 < n {
ls.SkippedKeys[o] = k
o++
}
}
}
sort.Strings(ls.SkippedKeys)
ls.keptKeys = append(ls.labelKeys, ls.resourceKeys...)
}
// Converts the label values from a TimeSeries into a RuneList.
func (ls *Set) RuneList(
metricLabels map[string]string,
resourceLabels map[string]string,
) RuneList {
b := make([]byte, func() int {
l := len(ls.labelKeys) + len(ls.resourceKeys)
r := ls.valueSet.Len()
l *= utf8.RuneLen(rune(r-1+l))
return l
}())
o := 0
for _, k := range ls.labelKeys {
val := metricLabels[k]
r := ls.valueSet.Rune(val)
o += utf8.EncodeRune(b[o:], r)
}
for _, k := range ls.resourceKeys {
val := resourceLabels[k]
r := ls.valueSet.Rune(val)
o += utf8.EncodeRune(b[o:], r)
}
return RuneList(string(b[:o]))
}
// Converts a RuneList into a list of just the label value strings.
func (ls *Set) ValueList(rl RuneList) []string {
l := len(ls.keptKeys)
vals := make([]string, l)
i := 0
for _, r := range rl {
if len(vals) <= i {
lager.Panic().Map(
"RuneList too long", rl,
"labelKeys", len(ls.labelKeys),
"resourceKeys", len(ls.resourceKeys),
)
}
vals[i] = ls.valueSet.Value(r)
i++
}
return vals
} | mon2prom/label/label.go | 0.708515 | 0.472623 | label.go | starcoder |
package latlong
import (
"bytes"
"fmt"
"math"
"strconv"
"github.com/golang/geo/s1"
)
// Angle is Angle with precision.
type Angle struct {
radian s1.Angle
radianprec s1.Angle
}
// NewAngle is constructor for Angle
func NewAngle(degree, degreeprec float64) (a Angle) {
a.radian = s1.Angle(degree) * s1.Degree
a.radianprec = s1.Angle(degreeprec) * s1.Degree
return
}
// NewAngleFromS1Angle is constructor for Angle
func NewAngleFromS1Angle(angle, angleprec s1.Angle) (a Angle) {
a.radian = angle
a.radianprec = angleprec
return
}
// S1Angle returns s1.Angle
func (a Angle) S1Angle() s1.Angle {
return a.radian
}
// Degrees returns Degree
func (a Angle) Degrees() float64 {
return a.S1Angle().Degrees()
}
// PrecS1Angle returns precicion s1.Angle
func (a Angle) PrecS1Angle() s1.Angle {
return a.radianprec
}
// PrecDegrees returns precicion Degree
func (a Angle) PrecDegrees() float64 {
return a.PrecS1Angle().Degrees()
}
func (a Angle) preclog() (lngprec int) {
if a.radianprec.Degrees() != 0 {
lngprec = int(math.Ceil(-math.Log10(a.radianprec.Degrees())))
if lngprec < 0 {
lngprec = 0
}
} else {
lngprec = 2
}
return
}
func (a Angle) String() (s string) {
return strconv.FormatFloat(a.radian.Degrees(), 'f', a.preclog(), 64)
}
// MarshalJSON is a marshaler for JSON.
func (a Angle) MarshalJSON() ([]byte, error) {
return []byte(a.String()), nil
}
// UnmarshalJSON is a unmarshaler for JSON.
func (a *Angle) UnmarshalJSON(data []byte) (err error) {
data = bytes.TrimSpace(data)
*a = AngleFromBytes(data)
if isErrorDeg(*a) {
err = fmt.Errorf("Error Degree on JSON Deg %s", string(data))
}
return
}
// AngleFromBytes creates Angle from []byte to unmarshal.
func AngleFromBytes(part []byte) (a Angle) {
part = bytes.TrimSpace(part)
pos := bytes.Index(part, []byte(`.`))
if pos == -1 {
pos = len(part)
}
if pos < 3 && false {
a = getErrorDeg()
} else if pos < 5 {
a = getDeg(part, pos)
} else if pos < 7 {
a = getDegMin(part, pos)
} else if pos < 9 {
a = getDegMinSec(part, pos)
} else {
a = getErrorDeg()
}
return
}
func isErrorDeg(a Angle) bool {
erra := getErrorDeg()
if a.radian == erra.radian && a.radianprec == erra.radianprec {
return true
}
return false
}
func getErrorDeg() (a Angle) {
a.radian = 0
a.radianprec = 360
return
}
func getDeg(part []byte, pos int) Angle {
var deg, degprec float64
var err error
deg, err = strconv.ParseFloat(string(part), 64)
if err != nil {
return getErrorDeg()
}
if l := len(part); l == pos {
degprec = 1
} else {
degprec = math.Pow10(pos - l + 1)
}
return Angle{radian: s1.Angle(deg) * s1.Degree, radianprec: s1.Angle(degprec) * s1.Degree}
}
func getDegMin(part []byte, pos int) Angle {
var err error
var deg, degprec float64
if deg, err = strconv.ParseFloat(string(part[1:pos-2]), 64); err != nil {
return getErrorDeg()
}
var min float64
if min, err = strconv.ParseFloat(string(part[pos-2:]), 64); err != nil {
return getErrorDeg()
}
deg += min / 60
switch part[0] {
case '-':
deg = -deg
case '+':
break
default:
return getErrorDeg()
}
if l := len(part); l == pos {
degprec = float64(1) / 60
} else {
degprec = math.Pow10(pos-l+1) / 60
}
return Angle{radian: s1.Angle(deg) * s1.Degree, radianprec: s1.Angle(degprec) * s1.Degree}
}
func getDegMinSec(part []byte, pos int) Angle {
var err error
var deg, degprec float64
if deg, err = strconv.ParseFloat(string(part[1:pos-4]), 64); err != nil {
return getErrorDeg()
}
var min float64
if min, err = strconv.ParseFloat(string(part[pos-4:pos-2]), 64); err != nil {
return getErrorDeg()
}
deg += min / 60
var sec float64
if sec, err = strconv.ParseFloat(string(part[pos-2:]), 64); err != nil {
return getErrorDeg()
}
deg += sec / 3600
switch part[0] {
case '-':
deg = -deg
case '+':
break
default:
return getErrorDeg()
}
if l := len(part); l == pos {
degprec = float64(1) / 3600
} else {
degprec = math.Pow10(pos-l+1) / 3600
}
return Angle{radian: s1.Angle(deg) * s1.Degree, radianprec: s1.Angle(degprec) * s1.Degree}
} | Angle.go | 0.815122 | 0.403244 | Angle.go | starcoder |
package dataset
import (
"github.com/clambin/simplejson/v3/query"
"time"
)
// Dataset is a convenience data structure to construct a SimpleJSON table response. Use this when you're adding
// data for a range of (possibly out of order) timestamps.
type Dataset struct {
data [][]float64
timestamps *Indexer[time.Time]
columns *Indexer[string]
}
// New creates a new Dataset
func New() *Dataset {
return &Dataset{
timestamps: MakeIndexer[time.Time](),
columns: MakeIndexer[string](),
}
}
// Add adds a value for a specified timestamp and column to the dataset. If there is already a value for that
// timestamp and column, the specified value is added to the existing value.
func (d *Dataset) Add(timestamp time.Time, column string, value float64) {
d.ensureColumnExists(column)
row, added := d.timestamps.Add(timestamp)
if added {
d.data = append(d.data, make([]float64, d.columns.Count()))
}
col, _ := d.columns.GetIndex(column)
d.data[row][col] += value
}
func (d *Dataset) ensureColumnExists(column string) {
_, added := d.columns.Add(column)
if !added {
return
}
// new column. add data for the new column to each row
for key, entry := range d.data {
entry = append(entry, 0)
d.data[key] = entry
}
}
// Size returns the number of rows in the dataset.
func (d Dataset) Size() int {
return d.timestamps.Count()
}
// AddColumn adds a new column to the dataset. For each timestamp, processor is called with the values for the
// existing columns. Processor's return value is then added for the new column.
func (d *Dataset) AddColumn(column string, processor func(values map[string]float64) float64) {
columns := d.columns.List()
for index, row := range d.data {
d.data[index] = append(row, processor(d.rowValues(row, columns)))
}
d.columns.Add(column)
}
func (d Dataset) rowValues(row []float64, columns []string) (values map[string]float64) {
values = make(map[string]float64)
for _, column := range columns {
idx, _ := d.columns.GetIndex(column)
values[column] = row[idx]
}
return
}
// GetTimestamps returns the (sorted) list of timestamps in the dataset.
func (d Dataset) GetTimestamps() (timestamps []time.Time) {
return d.timestamps.List()
}
// GetColumns returns the (sorted) list of column names.
func (d Dataset) GetColumns() (columns []string) {
return d.columns.List()
}
// GetValues returns the value for the specified column for each timestamp in the dataset. The values are sorted by timestamp.
func (d Dataset) GetValues(column string) (values []float64, ok bool) {
var index int
index, ok = d.columns.GetIndex(column)
if !ok {
return
}
values = make([]float64, len(d.data))
for i, timestamp := range d.timestamps.List() {
rowIndex, _ := d.timestamps.GetIndex(timestamp)
values[i] = d.data[rowIndex][index]
}
return
}
// FilterByRange removes any rows in the dataset that are outside the specified from/to time range. If from/to is zero,
// it is ignored.
func (d *Dataset) FilterByRange(from, to time.Time) {
// make a list of all records to be removed, and the remaining timestamps
timestamps := make([]time.Time, 0, d.timestamps.Count())
var remove bool
for _, timestamp := range d.timestamps.List() {
if !from.IsZero() && timestamp.Before(from) {
remove = true
continue
} else if !to.IsZero() && timestamp.After(to) {
remove = true
continue
}
timestamps = append(timestamps, timestamp)
}
// nothing to do here?
if !remove {
return
}
// create a new data list from the timestamps we want to keep
data := make([][]float64, len(timestamps))
ts := MakeIndexer[time.Time]()
for index, timestamp := range timestamps {
i, _ := d.timestamps.GetIndex(timestamp)
data[index] = d.data[i]
ts.Add(timestamp)
}
d.data = data
d.timestamps = ts
}
// Accumulate accumulates the values for each column by time. E.g. if the values were 1, 1, 1, 1, the result would be
// 1, 2, 3, 4.
func (d *Dataset) Accumulate() {
accumulated := make([]float64, d.columns.Count())
for _, timestamp := range d.timestamps.List() {
row, _ := d.timestamps.GetIndex(timestamp)
for index, value := range d.data[row] {
accumulated[index] += value
}
copy(d.data[row], accumulated)
}
}
// Copy returns a copy of the dataset
func (d Dataset) Copy() (clone *Dataset) {
clone = &Dataset{
data: make([][]float64, len(d.data)),
timestamps: d.timestamps.Copy(),
columns: d.columns.Copy(),
}
for index, row := range d.data {
clone.data[index] = make([]float64, len(row))
copy(clone.data[index], row)
}
return
}
// GenerateTableResponse creates a TableResponse for the dataset
func (d Dataset) GenerateTableResponse() (response *query.TableResponse) {
response = &query.TableResponse{
Columns: []query.Column{{
Text: "timestamp",
Data: query.TimeColumn(d.GetTimestamps()),
}},
}
for _, column := range d.GetColumns() {
values, _ := d.GetValues(column)
if column == "" {
column = "(unknown)"
}
response.Columns = append(response.Columns, query.Column{
Text: column,
Data: query.NumberColumn(values),
})
}
return
} | dataset/dataset.go | 0.826747 | 0.760139 | dataset.go | starcoder |
package numberz
import (
"github.com/modfin/henry/compare"
"github.com/modfin/henry/slicez"
"math"
"sort"
)
// Min returns the minimum of the number supplied
func Min[N compare.Number](a ...N) N {
return slicez.Min(a...)
}
// Max returns the maximum of the number supplied
func Max[N compare.Number](a ...N) N {
return slicez.Max(a...)
}
// Range returns the range of the number supplied
func Range[N compare.Number](a ...N) N {
return Max(a...) - Min(a...)
}
// Sum returns the sum of the number supplied
func Sum[N compare.Number](a ...N) N {
var zero N
return slicez.Fold(a, func(acc N, val N) N {
return acc + val
}, zero)
}
// VPow returns a vector containing the result of each element of "vector" to the power och "pow"
func VPow[N compare.Number](vector []N, pow N) []N {
return slicez.Map(vector, func(a N) N {
return N(math.Pow(float64(a), float64(pow)))
})
}
// VMul will return a vector containing elements x and y multiplied such that x[i]*y[i] = returned[i]
func VMul[N compare.Number](x []N, y []N) []N {
l := Min(len(x), len(y))
x, y = x[:l], y[:l]
return slicez.Zip(x, y, func(a, b N) N {
return a * b
})
}
// VAdd will return a vector containing elements x and y added such that x[i]+y[i] = returned[i]
func VAdd[N compare.Number](x []N, y []N) []N {
l := Min(len(x), len(y))
x, y = x[:l], y[:l]
return slicez.Zip(x, y, func(a, b N) N {
return a + b
})
}
// VSub will return a vector containing elements y subtracted from x such that x[i]-y[i] = returned[i]
func VSub[N compare.Number](x []N, y []N) []N {
l := Min(len(y), len(y))
y, y = y[:l], y[:l]
return slicez.Zip(x, y, func(a, b N) N {
return a - b
})
}
// VDot will return the dot product of two vectors
func VDot[N compare.Number](x []N, y []N) N {
return Sum(VMul(x, y)...)
}
// Mean will return the mean of a vector
func Mean[N compare.Number](vector ...N) float64 {
if len(vector) == 0 {
var zero N
return float64(zero)
}
return float64(Sum(vector...)) / float64(len(vector))
}
// MAD will return the Mean Absolute Deviation of a vector
func MAD[N compare.Number](vector ...N) float64 {
mean := Mean(vector...)
count := float64(len(vector))
return slicez.Fold(vector, func(accumulator float64, val N) float64 {
return accumulator + math.Abs(float64(val)-mean)/count
}, 0.0)
}
// Var will return Variance of a sample
func Var[N compare.Number](samples ...N) float64 {
avg := Mean(samples...)
partial := slicez.Map(samples, func(x N) float64 {
return math.Pow(float64(x)-avg, 2)
})
return Sum(partial...) / float64(len(partial)-1)
}
// StdDev will return the sample standard deviation
func StdDev[N compare.Number](samples ...N) float64 {
return math.Sqrt(Var(samples...))
}
// StdErr will return the sample standard error
func StdErr[N compare.Number](n ...N) float64 {
return StdDev(n...) / math.Sqrt(float64(len(n)))
}
// SNR will return the Signal noise ratio of the vector
func SNR[N compare.Number](sample ...N) float64 {
return Mean(sample...) / StdDev(sample...)
}
// ZScore will return the z-score of vector
func ZScore[N compare.Number](x N, pop []N) float64 {
return (float64(x) - Mean(pop...)) / StdDev(pop...)
}
// Skew will return the skew of a sample
func Skew[N compare.Number](sample ...N) float64 {
count := float64(len(sample))
mean := Mean(sample...)
sd := StdDev(sample...)
d := (count - 1) * math.Pow(sd, 3)
return slicez.Fold(sample, func(accumulator float64, val N) float64 {
return accumulator + math.Pow(float64(val)-mean, 3)/d
}, 0)
}
// Corr returns the correlation between two vectors
func Corr[N compare.Number](x []N, y []N) float64 {
l := Min(len(x), len(y))
x, y = x[:l], y[:l]
xm := Mean(x...)
ym := Mean(y...)
dx := slicez.Map(x, func(a N) float64 {
return float64(a) - xm
})
dy := slicez.Map(y, func(a N) float64 {
return float64(a) - ym
})
t := Sum(slicez.Zip(dx, dy, func(a float64, b float64) float64 {
return a * b
})...)
n1 := Sum(VPow(dx, 2)...)
n2 := Sum(VPow(dy, 2)...)
return t / (math.Sqrt(n1 * n2))
}
// Cov returns the co-variance between two vectors
func Cov[N compare.Number](x []N, y []N) float64 {
l := Min(len(x), len(y))
x, y = x[:l], y[:l]
xm := Mean(x...)
ym := Mean(y...)
dx := slicez.Map(x, func(a N) float64 {
return float64(a) - xm
})
dy := slicez.Map(y, func(a N) float64 {
return float64(a) - ym
})
t := Sum(slicez.Zip(dx, dy, func(a float64, b float64) float64 {
return a * b
})...)
return t / float64(l)
}
// R2 returns the r^2 between two vectors
func R2[N compare.Number](x []N, y []N) float64 {
return math.Pow(Corr(x, y), 2)
}
// LinReg returns the liniar regression of two verctors such that y = slope*x + intercept
func LinReg[N compare.Number](x []N, y []N) (intercept, slope float64) {
l := Min(len(x), len(y))
x, y = x[:l], y[:l]
sum_x := float64(Sum(x...))
sum_x2 := float64(Sum(VPow(x, 2)...))
sum_y := float64(Sum(y...))
sum_xy := float64(Sum(VMul(x, y)...))
n := float64(l)
slope = (sum_y*sum_x2 - sum_x*sum_xy) / (n*sum_x2 - math.Pow(sum_x, 2))
intercept = (n*sum_xy - sum_x*sum_y) / (n*sum_x2 - math.Pow(sum_x, 2))
return intercept, slope
}
// FTest returns the F-Test of two vectors
func FTest[N compare.Number](x []N, y []N) float64 {
return Var(x...) / Var(y...)
}
// Median returns the median of a vector
func Median[N compare.Number](vector ...N) float64 {
l := len(vector)
inter := slicez.SortFunc(vector, func(i, j N) bool {
return i < j
})
inter = slicez.Drop(inter, l/2-1)
inter = slicez.DropRight(inter, l/2-1)
if len(inter) == 2 {
return Mean(inter...)
}
return float64(inter[len(inter)/2])
}
type modecount[N compare.Number] struct {
val N
c int
}
// Mode returns the mode of a vector
func Mode[N compare.Number](vector ...N) N {
return Modes(vector...)[0]
}
// Modes return the modes of all numbers in the vector
func Modes[N compare.Number](vector ...N) []N {
m := map[N]int{}
for _, n := range vector {
m[n] = m[n] + 1
}
var counts []modecount[N]
for n, c := range m {
counts = append(counts, modecount[N]{n, c})
}
sort.Slice(counts, func(i, j int) bool {
return counts[i].c > counts[j].c
})
max := counts[0].c
inter := slicez.TakeWhile(counts, func(a modecount[N]) bool {
return a.c == max
})
inter = slicez.SortFunc(inter, func(a, b modecount[N]) bool {
return a.val < b.val
})
return slicez.Map(
inter,
func(a modecount[N]) N {
return a.val
},
)
}
// GCD returns the Greatest common divisor of a vector
func GCD[I compare.Integer](vector ...I) (gcd I) {
if len(vector) == 0 {
return gcd
}
gcd = vector[0]
for _, b := range vector[1:] {
for b != 0 {
t := b
b = gcd % b
gcd = t
}
}
return gcd
}
// LCM return the Least Common Multiple of a vector
func LCM[I compare.Integer](a, b I, vector ...I) I {
result := a * b / GCD(a, b)
for i := 0; i < len(vector); i++ {
result = LCM(result, vector[i])
}
return result
}
// Percentile return "score" percentile of a vector
func Percentile[N compare.Number](score N, vector ...N) float64 {
count := len(slicez.Filter(vector, func(n N) bool { return n < score }))
return float64(count) / float64(len(vector))
}
// BitOR returns the bit wise OR between elements in a vector
func BitOR[I compare.Integer](vector []I) (i I) {
if len(vector) == 0 {
return i
}
if len(vector) == 1 {
return vector[0]
}
return slicez.Fold(vector[1:], func(accumulator I, val I) I {
return accumulator | val
}, vector[0])
}
// BitAND returns the bit wise AND between elements in a vector
func BitAND[I compare.Integer](a []I) (i I) {
if len(a) == 0 {
return i
}
if len(a) == 1 {
return a[0]
}
return slicez.Fold(a[1:], func(accumulator I, val I) I {
return accumulator & val
}, a[0])
}
// BitXOR returns the bit wise XOR between elements in a vector
func BitXOR[I compare.Integer](a []I) (i I) {
if len(a) == 0 {
return i
}
if len(a) == 1 {
return a[0]
}
return slicez.Fold(a[1:], func(accumulator I, val I) I {
return accumulator ^ val
}, a[0])
} | exp/numberz/numbers.go | 0.896523 | 0.72645 | numbers.go | starcoder |
package value
import (
"fmt"
"github.com/chewxy/hm"
"github.com/pkg/errors"
"gorgonia.org/tensor"
)
// DualValue ...
type DualValue struct {
Value
D Value // the derivative wrt to each input
}
// SetDeriv ...
func (dv *DualValue) SetDeriv(d Value) error {
if t, ok := d.(tensor.Tensor); ok && t.IsScalar() {
d, _ = AnyToScalar(t.ScalarValue())
}
dv.D = d
return dv.sanity()
}
// SetValue ...
func (dv *DualValue) SetValue(v Value) error {
dv.Value = v
return dv.sanity()
}
// Clone ...
func (dv *DualValue) Clone() (retVal interface{}, err error) {
var v, d Value
if v, err = CloneValue(dv.Value); err != nil {
return nil, errors.Wrap(err, cloneFail)
}
if dv.D != nil {
if d, err = CloneValue(dv.D); err != nil {
return nil, errors.Wrap(err, cloneFail)
}
}
dv2 := BorrowDV()
dv2.Value = v
dv2.D = d
retVal = dv2
return
}
// Type ...
func (dv *DualValue) Type() hm.Type { return TypeOf(dv.Value) }
// Dtype ...
func (dv *DualValue) Dtype() tensor.Dtype { return dv.Value.Dtype() }
// ValueEq ...
func (dv *DualValue) ValueEq(a Value) bool {
switch at := a.(type) {
case *DualValue:
if at == dv {
return true
}
veq := Eq(at.Value, dv.Value)
deq := Eq(at.D, dv.D)
return veq && deq
// case Value:
// return ValueEq(at, dv.Value)
default:
return false
}
}
func (dv *DualValue) String() string {
return fmt.Sprintf("%#+v", dv.Value)
}
func (dv *DualValue) sanity() error {
// check that d and v are the same type
dvv := typeCheckTypeOf(dv.Value)
dvd := typeCheckTypeOf(dv.D)
if !dvv.Eq(dvd) {
return errors.Errorf("DualValues do not have the same types: %v and %v", dvv, dvd)
}
ReturnType(dvv)
ReturnType(dvd)
// TODO: check that the shapes are the same
return nil
}
// clones the DualValue and zeroes out the ndarrays
func (dv *DualValue) clone0() (retVal *DualValue, err error) {
var v, d Value
if v, err = CloneValue(dv.Value); err != nil {
return nil, errors.Wrap(err, cloneFail)
}
if d, err = CloneValue(dv.D); err != nil {
return nil, errors.Wrap(err, cloneFail)
}
v = ZeroValue(v)
d = ZeroValue(d)
dv2 := BorrowDV()
dv2.Value = v
dv2.D = d
retVal = dv2
return
} | internal/value/dual.go | 0.584627 | 0.403391 | dual.go | starcoder |
package interval
import "time"
// Interval presentat time interval [From, To).
type Interval struct {
From time.Time
To time.Time
}
// Offset offsets time interval with the given years, months and days.
func (tv Interval) Offset(years, months, days int) Interval {
tv.From = tv.From.AddDate(years, months, days)
tv.To = tv.To.AddDate(years, months, days)
return tv
}
// OffsetDay offsets time interval with the given days.
func (tv Interval) OffsetDay(days int) Interval {
return tv.Offset(0, 0, days)
}
// OffsetMonth offsets time interval with the given months.
func (tv Interval) OffsetMonth(months int) Interval {
return tv.Offset(0, months, 0)
}
// OffsetYear offsets time interval with the given years.
func (tv Interval) OffsetYear(years int) Interval {
return tv.Offset(years, 0, 0)
}
// Today returns the time interval for today.
func Today() Interval {
return ReferenceTime(time.Now()).ThisDay()
}
// ThisWeek returns the time interval for this week.
func ThisWeek(firstDay time.Weekday) Interval {
return ReferenceTime(time.Now()).ThisWeek(firstDay)
}
// ThisMonth returns the time interval for this month.
func ThisMonth() Interval {
return ReferenceTime(time.Now()).ThisMonth()
}
// ThisQuarter returns the time interval for this quarter.
func ThisQuarter() Interval {
return ReferenceTime(time.Now()).ThisQuarter()
}
// ThisYear returns the time interval for this year.
func ThisYear() Interval {
return ReferenceTime(time.Now()).ThisYear()
}
// ReferenceTime presentat refenced time of time interval.
type ReferenceTime time.Time
// ThisDay returns the time interval for the day of reference time.
func (rt ReferenceTime) ThisDay() Interval {
t := time.Time(rt)
year, month, day := t.Date()
from := time.Date(year, month, day, 0, 0, 0, 0, t.Location())
to := from.AddDate(0, 0, 1)
return Interval{from, to}
}
// ThisWeek returns the time interval for the week of reference time.
func (rt ReferenceTime) ThisWeek(firstDay time.Weekday) Interval {
t := time.Time(rt)
year, month, day := t.Date()
from := time.Date(year, month, day, 0, 0, 0, 0, t.Location())
intervalDays := int(from.Weekday() - firstDay)
if intervalDays < 0 {
intervalDays = 7 + intervalDays
}
if intervalDays != 0 {
from = from.AddDate(0, 0, -intervalDays)
}
to := from.AddDate(0, 0, 7)
return Interval{from, to}
}
// ThisMonth returns the time interval for the month of reference time.
func (rt ReferenceTime) ThisMonth() Interval {
t := time.Time(rt)
year, month, _ := t.Date()
from := time.Date(year, month, 1, 0, 0, 0, 0, t.Location())
to := from.AddDate(0, 1, 0)
return Interval{from, to}
}
const monthsPerQuarter = 3
// ThisQuarter returns the time interval for the quarter of reference time.
func (rt ReferenceTime) ThisQuarter() Interval {
t := time.Time(rt)
year, month, _ := t.Date()
from := time.Date(year, ((month-1)/monthsPerQuarter)*monthsPerQuarter+1, 1, 0, 0, 0, 0, t.Location())
to := from.AddDate(0, monthsPerQuarter, 0)
return Interval{from, to}
}
// ThisYear returns the time interval for the year of reference time.
func (rt ReferenceTime) ThisYear() Interval {
t := time.Time(rt)
year, _, _ := t.Date()
from := time.Date(year, 1, 1, 0, 0, 0, 0, t.Location())
to := from.AddDate(1, 0, 0)
return Interval{from, to}
} | interval.go | 0.913416 | 0.705316 | interval.go | starcoder |
package types
import (
"context"
"sync"
"github.com/dolthub/dolt/go/store/atomicerr"
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/util/functions"
)
type DiffChangeType uint8
const (
DiffChangeAdded DiffChangeType = iota
DiffChangeRemoved
DiffChangeModified
)
type ValueChanged struct {
ChangeType DiffChangeType
Key, OldValue, NewValue Value
}
func sendChange(changes chan<- ValueChanged, stopChan <-chan struct{}, change ValueChanged) bool {
select {
case changes <- change:
return true
case <-stopChan:
return false
}
}
// Streams the diff from |last| to |current| into |changes|, using both left-right and top-down approach in parallel.
// The left-right diff is expected to return results earlier, whereas the top-down approach is faster overall. This "best" algorithm runs both:
// - early results from left-right are sent to |changes|.
// - if/when top-down catches up, left-right is stopped and the rest of the changes are streamed from top-down.
func orderedSequenceDiffBest(ctx context.Context, last orderedSequence, current orderedSequence, ae *atomicerr.AtomicError, changes chan<- ValueChanged, stopChan <-chan struct{}) bool {
lrChanges := make(chan ValueChanged)
tdChanges := make(chan ValueChanged)
// Give the stop channels a buffer size of 1 so that they won't block (see below).
lrStopChan := make(chan struct{}, 1)
tdStopChan := make(chan struct{}, 1)
// Ensure all diff functions have finished doing work by the time this function returns, otherwise database reads might cause deadlock - e.g. https://github.com/attic-labs/noms/issues/2165.
wg := &sync.WaitGroup{}
defer func() {
// Stop diffing. The left-right or top-down diff might have already finished, but sending to the stop channels won't block due to the buffer.
lrStopChan <- struct{}{}
tdStopChan <- struct{}{}
wg.Wait()
}()
wg.Add(2)
go func() {
defer wg.Done()
defer close(lrChanges)
orderedSequenceDiffLeftRight(ctx, last, current, ae, lrChanges, lrStopChan)
}()
go func() {
defer wg.Done()
defer close(tdChanges)
orderedSequenceDiffTopDown(ctx, last, current, ae, tdChanges, tdStopChan)
}()
// Stream left-right changes while the top-down diff algorithm catches up.
var lrChangeCount, tdChangeCount int
for multiplexing := true; multiplexing; {
if ae.IsSet() {
return false
}
select {
case <-stopChan:
return false
case c, ok := <-lrChanges:
if !ok {
// Left-right diff completed.
return true
}
lrChangeCount++
if !sendChange(changes, stopChan, c) {
return false
}
case c, ok := <-tdChanges:
if !ok {
// Top-down diff completed.
return true
}
tdChangeCount++
if tdChangeCount > lrChangeCount {
// Top-down diff has overtaken left-right diff.
if !sendChange(changes, stopChan, c) {
return false
}
lrStopChan <- struct{}{}
multiplexing = false
}
}
}
for c := range tdChanges {
if !sendChange(changes, stopChan, c) {
return false
}
}
return true
}
// Streams the diff from |last| to |current| into |changes|, using a top-down approach.
// Top-down is parallel and efficiently returns the complete diff, but compared to left-right it's slow to start streaming changes.
func orderedSequenceDiffTopDown(ctx context.Context, last orderedSequence, current orderedSequence, ae *atomicerr.AtomicError, changes chan<- ValueChanged, stopChan <-chan struct{}) bool {
return orderedSequenceDiffInternalNodes(ctx, last, current, ae, changes, stopChan)
}
// TODO - something other than the literal edit-distance, which is way too much cpu work for this case - https://github.com/attic-labs/noms/issues/2027
func orderedSequenceDiffInternalNodes(ctx context.Context, last orderedSequence, current orderedSequence, ae *atomicerr.AtomicError, changes chan<- ValueChanged, stopChan <-chan struct{}) bool {
if last.treeLevel() > current.treeLevel() && !ae.IsSet() {
lastChild, err := last.getCompositeChildSequence(ctx, 0, uint64(last.seqLen()))
if ae.SetIfError(err) {
return false
}
return orderedSequenceDiffInternalNodes(ctx, lastChild.(orderedSequence), current, ae, changes, stopChan)
}
if current.treeLevel() > last.treeLevel() && !ae.IsSet() {
currentChild, err := current.getCompositeChildSequence(ctx, 0, uint64(current.seqLen()))
if ae.SetIfError(err) {
return false
}
return orderedSequenceDiffInternalNodes(ctx, last, currentChild.(orderedSequence), ae, changes, stopChan)
}
if last.isLeaf() && current.isLeaf() && !ae.IsSet() {
return orderedSequenceDiffLeftRight(ctx, last, current, ae, changes, stopChan)
}
if ae.IsSet() {
return false
}
compareFn := last.getCompareFn(current)
initialSplices, err := calcSplices(uint64(last.seqLen()), uint64(current.seqLen()), DEFAULT_MAX_SPLICE_MATRIX_SIZE,
func(i uint64, j uint64) (bool, error) { return compareFn(int(i), int(j)) })
if ae.SetIfError(err) {
return false
}
for _, splice := range initialSplices {
if ae.IsSet() {
return false
}
var lastChild, currentChild orderedSequence
functions.All(
func() {
seq, err := last.getCompositeChildSequence(ctx, splice.SpAt, splice.SpRemoved)
if !ae.SetIfError(err) {
lastChild = seq.(orderedSequence)
}
},
func() {
seq, err := current.getCompositeChildSequence(ctx, splice.SpFrom, splice.SpAdded)
if !ae.SetIfError(err) {
currentChild = seq.(orderedSequence)
}
},
)
if !orderedSequenceDiffInternalNodes(ctx, lastChild, currentChild, ae, changes, stopChan) {
return false
}
}
return true
}
// Streams the diff from |last| to |current| into |changes|, using a left-right approach.
// Left-right immediately descends to the first change and starts streaming changes, but compared to top-down it's serial and much slower to calculate the full diff.
func orderedSequenceDiffLeftRight(ctx context.Context, last orderedSequence, current orderedSequence, ae *atomicerr.AtomicError, changes chan<- ValueChanged, stopChan <-chan struct{}) bool {
lastCur, err := newCursorAt(ctx, last, emptyKey, false, false)
if ae.SetIfError(err) {
return false
}
currentCur, err := newCursorAt(ctx, current, emptyKey, false, false)
if ae.SetIfError(err) {
return false
}
for lastCur.valid() && currentCur.valid() {
if ae.IsSet() {
return false
}
err := fastForward(ctx, lastCur, currentCur)
if ae.SetIfError(err) {
return false
}
for lastCur.valid() && currentCur.valid() {
if ae.IsSet() {
return false
}
equals, err := lastCur.seq.getCompareFn(currentCur.seq)(lastCur.idx, currentCur.idx)
if ae.SetIfError(err) {
return false
}
if equals {
break
}
lastKey, err := getCurrentKey(lastCur)
if ae.SetIfError(err) {
return false
}
currentKey, err := getCurrentKey(currentCur)
if ae.SetIfError(err) {
return false
}
if isLess, err := currentKey.Less(last.format(), lastKey); ae.SetIfError(err) {
return false
} else if isLess {
mv, err := getMapValue(currentCur)
if ae.SetIfError(err) {
return false
}
if !sendChange(changes, stopChan, ValueChanged{DiffChangeAdded, currentKey.v, nil, mv}) {
return false
}
_, err = currentCur.advance(ctx)
if ae.SetIfError(err) {
return false
}
} else {
if isLess, err := lastKey.Less(last.format(), currentKey); ae.SetIfError(err) {
return false
} else if isLess {
mv, err := getMapValue(lastCur)
if ae.SetIfError(err) {
return false
}
if !sendChange(changes, stopChan, ValueChanged{DiffChangeRemoved, lastKey.v, mv, nil}) {
return false
}
_, err = lastCur.advance(ctx)
if ae.SetIfError(err) {
return false
}
} else {
lmv, err := getMapValue(lastCur)
if ae.SetIfError(err) {
return false
}
cmv, err := getMapValue(currentCur)
if ae.SetIfError(err) {
return false
}
if !sendChange(changes, stopChan, ValueChanged{DiffChangeModified, lastKey.v, lmv, cmv}) {
return false
}
_, err = lastCur.advance(ctx)
if ae.SetIfError(err) {
return false
}
_, err = currentCur.advance(ctx)
if ae.SetIfError(err) {
return false
}
}
}
}
}
for lastCur.valid() && !ae.IsSet() {
lastKey, err := getCurrentKey(lastCur)
if ae.SetIfError(err) {
return false
}
mv, err := getMapValue(lastCur)
if ae.SetIfError(err) {
return false
}
if !sendChange(changes, stopChan, ValueChanged{DiffChangeRemoved, lastKey.v, mv, nil}) {
return false
}
_, err = lastCur.advance(ctx)
if ae.SetIfError(err) {
return false
}
}
for currentCur.valid() && !ae.IsSet() {
currKey, err := getCurrentKey(currentCur)
if ae.SetIfError(err) {
return false
}
mv, err := getMapValue(currentCur)
if ae.SetIfError(err) {
return false
}
if !sendChange(changes, stopChan, ValueChanged{DiffChangeAdded, currKey.v, nil, mv}) {
return false
}
_, err = currentCur.advance(ctx)
if ae.SetIfError(err) {
return false
}
}
return true
}
/**
* Advances |a| and |b| past their common sequence of equal values.
*/
func fastForward(ctx context.Context, a *sequenceCursor, b *sequenceCursor) error {
if a.valid() && b.valid() {
_, _, err := doFastForward(ctx, true, a, b)
if err != nil {
return err
}
}
return nil
}
func syncWithIdx(ctx context.Context, cur *sequenceCursor, hasMore bool, allowPastEnd bool) error {
err := cur.sync(ctx)
if err != nil {
return err
}
if hasMore {
cur.idx = 0
} else if allowPastEnd {
cur.idx = cur.length()
} else {
cur.idx = cur.length() - 1
}
return nil
}
/*
* Returns an array matching |a| and |b| respectively to whether that cursor has more values.
*/
func doFastForward(ctx context.Context, allowPastEnd bool, a *sequenceCursor, b *sequenceCursor) (aHasMore bool, bHasMore bool, err error) {
d.PanicIfFalse(a.valid())
d.PanicIfFalse(b.valid())
aHasMore = true
bHasMore = true
for aHasMore && bHasMore {
equals, err := isCurrentEqual(a, b)
if err != nil {
return false, false, err
}
if !equals {
break
}
parentsEqAndNotNil := nil != a.parent && nil != b.parent
if parentsEqAndNotNil {
parentsEqAndNotNil, err = isCurrentEqual(a.parent, b.parent)
if err != nil {
return false, false, err
}
}
if parentsEqAndNotNil {
// Key optimisation: if the sequences have common parents, then entire chunks can be
// fast-forwarded without reading unnecessary data.
aHasMore, bHasMore, err = doFastForward(ctx, false, a.parent, b.parent)
if err != nil {
return false, false, err
}
err := syncWithIdx(ctx, a, aHasMore, allowPastEnd)
if err != nil {
return false, false, err
}
err = syncWithIdx(ctx, b, bHasMore, allowPastEnd)
if err != nil {
return false, false, err
}
} else {
aHasMore, err = a.advanceMaybeAllowPastEnd(ctx, allowPastEnd)
if err != nil {
return false, false, err
}
bHasMore, err = b.advanceMaybeAllowPastEnd(ctx, allowPastEnd)
if err != nil {
return false, false, err
}
}
}
return aHasMore, bHasMore, nil
}
func isCurrentEqual(a *sequenceCursor, b *sequenceCursor) (bool, error) {
return a.seq.getCompareFn(b.seq)(a.idx, b.idx)
} | go/store/types/ordered_sequences_diff.go | 0.565539 | 0.429848 | ordered_sequences_diff.go | starcoder |
package model
import (
"container/list"
)
// IndexOfEdge returns the index (starting at 0) of the edge in the array. If the edge is not in the array, -1 will be returned.
func IndexOfEdge(edges []CircuitEdge, edge CircuitEdge) int {
for index, e := range edges {
if e.Equals(edge) {
return index
}
}
return -1
}
// MergeEdgesByIndex combines the edges so that the attached vertex at the specified index is no longer used in the edges.
// The vertexIndex is the index of the edge starting with the vertex to detach in the edges array.
// After merging:
// - the replacement edge is stored in vertexIndex-1 (or the last entry if vertexIndex is 0),
// - the edge at vertexIndex is removed,
// - the length of updatedEdges is one less than edges.
// This may update the supplied array, so it should be updated with the returned array.
// In addition to returning the updated array, this also returns the two detached edges.
func MergeEdgesByIndex(edges []CircuitEdge, vertexIndex int) (updatedEdges []CircuitEdge, detachedEdgeA CircuitEdge, detachedEdgeB CircuitEdge) {
// There must be at least 2 edges to merge edges.
if lastIndex := len(edges) - 1; lastIndex <= 0 {
return []CircuitEdge{}, nil, nil
} else if vertexIndex <= 0 {
detachedEdgeA = edges[lastIndex]
detachedEdgeB = edges[0]
edges = edges[1:]
// Need additional -1 since array has one fewer element in it.
edges[lastIndex-1] = detachedEdgeA.Merge(detachedEdgeB)
} else {
if vertexIndex >= lastIndex {
vertexIndex = lastIndex
}
detachedEdgeA = edges[vertexIndex-1]
detachedEdgeB = edges[vertexIndex]
edges = append(edges[:vertexIndex-1], edges[vertexIndex:]...)
edges[vertexIndex-1] = detachedEdgeA.Merge(detachedEdgeB)
}
return edges, detachedEdgeA, detachedEdgeB
}
// MergeEdgesByVertex combines the edges so that the supplied vertex is no longer used in the edges.
// The array of edges must be ordered so that the 0th edge starts with the 0th vertex in the GetAttachedVertices array, the 1st edge starts with the 1st vertex, and so on.
// If the supplied vertex is not in the array of edges, or there are too few edges (less than 2), the array will be returned unmodified (with nil for the other variables).
// After successfully merging:
// - the merged edge replaces the edge ending with the supplied vertex in the array,
// - the edge starting with the supplied vertex is removed from the array,
// - the length of updatedEdges is one less than edges.
// This may update the supplied array, so it should be updated with the returned array.
// In addition to returning the updated array, this also returns the two detached edges and the merged edge.
func MergeEdgesByVertex(edges []CircuitEdge, vertex CircuitVertex) (updatedEdges []CircuitEdge, detachedEdgeA CircuitEdge, detachedEdgeB CircuitEdge, mergedEdge CircuitEdge) {
vertexIndex := -1
for i, e := range edges {
if e.GetStart() == vertex {
vertexIndex = i
break
}
}
if vertexIndex < 0 || len(edges) < 2 {
return edges, nil, nil, nil
}
updated, detachedEdgeA, detachedEdgeB := MergeEdgesByIndex(edges, vertexIndex)
updatedLen := len(updated)
return updated, detachedEdgeA, detachedEdgeB, updated[(vertexIndex-1+updatedLen)%updatedLen]
}
// MergeEdgesCopy combines the edges so that the supplied vertex is no longer used in the edges.
// After successfully merging:
// - the merged edge replaces the edge ending with the supplied vertex in the array,
// - the edge starting with the supplied vertex is removed from the array,
// - the length of updatedEdges is one less than edges.
// This does not modify the supplied array, so it is safe to use with algorithms that clone the edges array into multiple circuits.
// In addition to returning the updated array, this also returns the two detached edges and the merged edge.
func MergeEdgesCopy(edges []CircuitEdge, vertex CircuitVertex) (updatedEdges []CircuitEdge, detachedEdgeA CircuitEdge, detachedEdgeB CircuitEdge, mergedEdge CircuitEdge) {
if len(edges) < 2 {
return edges, nil, nil, nil
}
for i, e := range edges {
if e.GetStart() == vertex {
lenEdges := len(edges)
updatedEdges = make([]CircuitEdge, 0, lenEdges-1)
if i == 0 {
detachedEdgeA = edges[lenEdges-1]
detachedEdgeB = edges[i]
mergedEdge = detachedEdgeA.Merge(detachedEdgeB)
updatedEdges = append(updatedEdges, edges[1:lenEdges-1]...)
updatedEdges = append(updatedEdges, mergedEdge)
return updatedEdges, detachedEdgeA, detachedEdgeB, mergedEdge
} else {
detachedEdgeA = edges[i-1]
detachedEdgeB = edges[i]
mergedEdge = detachedEdgeA.Merge(detachedEdgeB)
updatedEdges = append(updatedEdges, edges[:i-1]...)
updatedEdges = append(updatedEdges, mergedEdge)
if i < lenEdges-1 {
updatedEdges = append(updatedEdges, edges[i+1:]...)
}
return updatedEdges, detachedEdgeA, detachedEdgeB, mergedEdge
}
}
}
return edges, nil, nil, nil
}
// MergeEdgesList combines the edges so that the supplied vertex is no longer used in the linked list of edges.
// In addition to updating the linked list, this also returns the two detached edges and the linked list element for the merged edge.
// If the vertex is not presed in the list of edges, it will be unmodified and nil will be returned.
func MergeEdgesList(edges *list.List, vertex CircuitVertex) (detachedEdgeA CircuitEdge, detachedEdgeB CircuitEdge, mergedLink *list.Element) {
// There must be at least 2 edges to merge edges.
if edges.Len() < 2 {
return nil, nil, nil
}
for i, link := 0, edges.Front(); i < edges.Len(); i, link = i+1, link.Next() {
detachedEdgeB = link.Value.(CircuitEdge)
if detachedEdgeB.GetStart() == vertex {
mergedLink = link.Prev()
if link.Prev() == nil {
mergedLink = edges.Back()
}
detachedEdgeA = mergedLink.Value.(CircuitEdge)
mergedLink.Value = detachedEdgeA.Merge(detachedEdgeB)
edges.Remove(link)
return detachedEdgeA, detachedEdgeB, mergedLink
}
}
return nil, nil, nil
}
// MoveVertex removes an attached vertex from its current location and moves it so that it splits the supplied edge.
// The vertices adjacent to the vertex's original location will be merged into a new edge.
// This may update the supplied array, so it should be updated with the returned array.
// In addition to returning the updated array, this also returns the merged edge and the two edges at the vertex's new location.
// If the vertex or edge is not in the circuit, this will return the original, unmodified, array and nil for the edges.
// Complexity: MoveVertex is O(N)
func MoveVertex(edges []CircuitEdge, vertex CircuitVertex, edge CircuitEdge) (updatedEdges []CircuitEdge, mergedEdge CircuitEdge, splitEdgeA CircuitEdge, splitEdgeB CircuitEdge) {
// There must be at least 3 edges to move edges (2 that are initially attached to the vertex, and 1 other edge to attach it to).
numEdges := len(edges)
if numEdges < 3 {
return edges, nil, nil, nil
}
// To avoid creating an extra array, this algorithm bubbles the second edge from the moved vertex's original location so that it is adjacent to the destination location.
// These indices are used to enable that bubbling, and to track where to put the merged and split edges.
mergedIndex, fromIndex, toIndex := -1, -1, -1
prevIndex := len(edges) - 1
for i, e := range edges {
if e.GetStart() == vertex {
mergedIndex = prevIndex
fromIndex = i
} else if e.GetStart() == edge.GetStart() && e.GetEnd() == edge.GetEnd() {
toIndex = i
}
prevIndex = i
}
// If either index is less than zero, then either the vertex to move or the destination edge do not exist in the array.
if fromIndex < 0 || toIndex < 0 {
return edges, nil, nil, nil
}
// Merge the source edges, and split the destination edge.
splitEdgeA, splitEdgeB = edge.Split(vertex)
mergedEdge = edges[mergedIndex].Merge(edges[fromIndex])
edges[mergedIndex] = mergedEdge
// Determine whether the second edge (from the original location) needs to be bubbled up or down the array.
var delta int
if fromIndex > toIndex {
delta = -1
edges[toIndex] = splitEdgeA
edges[fromIndex] = splitEdgeB
} else {
delta = 1
edges[toIndex] = splitEdgeB
edges[fromIndex] = splitEdgeA
}
for next := fromIndex + delta; next != toIndex; fromIndex, next = next, next+delta {
edges[next], edges[fromIndex] = edges[fromIndex], edges[next]
}
return edges, mergedEdge, splitEdgeA, splitEdgeB
}
// SplitEdge replaces the supplied edge with the two edges that are created by adding the supplied vertex to the edge.
// This may update the supplied array, so it should be updated with the returned array.
// If the supplied edge does not exist, the array will be returned unchanged, along with an index of -1.
// If the supplied edge does exist, this will return the updated array and the index of the replaced edge (which becomes the index of the first new edge, the index of the second new edge is always that index+1).
func SplitEdge(edges []CircuitEdge, edgeToSplit CircuitEdge, vertexToAdd CircuitVertex) (updatedEdges []CircuitEdge, edgeIndex int) {
edgeIndex = IndexOfEdge(edges, edgeToSplit)
if edgeIndex == -1 {
return edges, -1
}
edgeA, edgeB := edgeToSplit.Split(vertexToAdd)
// copy all elements starting at the index one to the right to create a duplicate record at index and index+1.
edges = append(edges[:edgeIndex+1], edges[edgeIndex:]...)
// replace both duplicated edges so that the previous edge is no longer in the circuit and the two supplied edges replace it.
edges[edgeIndex] = edgeA
edges[edgeIndex+1] = edgeB
return edges, edgeIndex
}
// SplitEdgeCopy replaces the supplied edge with the two edges that are created by adding the supplied vertex to the edge.
// This does not modify the supplied array, so it is safe to use with algorithms that clone the edges array into multiple circuits.
// If the supplied edge does not exist, the array will be returned unchanged, along with an index of -1.
// If the supplied edge does exist, this will return the updated array and the index of the replaced edge (which becomes the index of the first new edge, the index of the second new edge is always that index+1).
func SplitEdgeCopy(edges []CircuitEdge, edgeToSplit CircuitEdge, vertexToAdd CircuitVertex) (updated []CircuitEdge, edgeIndex int) {
updated = make([]CircuitEdge, len(edges)+1)
edgeIndex = -1
updatedIndex := 0
for _, e := range edges {
if e.Equals(edgeToSplit) {
edgeA, edgeB := edgeToSplit.Split(vertexToAdd)
edgeIndex = updatedIndex
updated[updatedIndex] = edgeA
updatedIndex++
updated[updatedIndex] = edgeB
} else {
updated[updatedIndex] = e
}
updatedIndex++
}
if edgeIndex == -1 {
return edges, edgeIndex
} else {
return updated, edgeIndex
}
}
// SplitEdgeList replaces the supplied edge with the two edges that are created by adding the supplied vertex to the edge.
// This requires the supplied edge to exist in the linked list of circuit edges.
// If it does not exist, the linked list will remain unchanged, and nil will be returned.
// If it does exist, this will update the linked list, and return the newly added element in the linked list.
func SplitEdgeList(edges *list.List, edgeToSplit CircuitEdge, vertexToAdd CircuitVertex) *list.Element {
for i, link := 0, edges.Front(); i < edges.Len(); i, link = i+1, link.Next() {
if edge := link.Value.(CircuitEdge); edge.Equals(edgeToSplit) {
edgeA, edgeB := edge.Split(vertexToAdd)
link.Value = edgeA
return edges.InsertAfter(edgeB, link)
}
}
return nil
} | model/utilsedge.go | 0.818773 | 0.783036 | utilsedge.go | starcoder |
package ring
import (
"fmt"
"math/bits"
"github.com/ldsec/lattigo/v2/utils"
)
// IsPrime applies a Miller-Rabin test on the given uint64 variable, returning true if the input is probably prime, and false otherwise.
func IsPrime(num uint64) bool {
if num < 2 {
return false
}
for _, smallPrime := range smallPrimes {
if num == smallPrime {
return true
}
}
for _, smallPrime := range smallPrimes {
if num%smallPrime == 0 {
return false
}
}
s := num - 1
k := 0
for (s & 1) == 0 {
s >>= 1
k++
}
bredParams := BRedParams(num)
var mask, b uint64
mask = (1 << uint64(bits.Len64(num))) - 1
prng, err := utils.NewPRNG()
if err != nil {
panic(err)
}
for trial := 0; trial < 50; trial++ {
b = RandUniform(prng, num-1, mask)
for b < 2 {
b = RandUniform(prng, num-1, mask)
}
x := ModExp(b, s, num)
if x != 1 {
i := 0
for x != num-1 {
if i == k-1 {
return false
}
i++
x = BRed(x, x, num, bredParams)
}
}
}
return true
}
// GenerateNTTPrimes generates n NthRoot NTT friendly primes given logQ = size of the primes.
// It will return all the appropriate primes, up to the number of n, with the
// best available deviation from the base power of 2 for the given n.
func GenerateNTTPrimes(logQ, NthRoot, n uint64) (primes []uint64) {
if logQ > 61 {
panic("logQ must be between 1 and 61")
}
if logQ == 61 {
return GenerateNTTPrimesP(logQ, NthRoot, n)
}
return GenerateNTTPrimesQ(logQ, NthRoot, n)
}
// NextNTTPrime returns the next NthRoot NTT prime after q.
// The input q must be itself an NTT prime for the given NthRoot.
func NextNTTPrime(q, NthRoot uint64) (qNext uint64, err error) {
qNext = q + NthRoot
for !IsPrime(qNext) {
qNext += NthRoot
if bits.Len64(qNext) > 61 {
return 0, fmt.Errorf("Next NTT prime exceeds the maximum bit-size of 61 bits")
}
}
return qNext, nil
}
// PreviousNTTPrime returns the previous NthRoot NTT prime after q.
// The input q must be itself an NTT prime for the given NthRoot.
func PreviousNTTPrime(q, NthRoot uint64) (qPrev uint64, err error) {
if q < NthRoot {
return 0, fmt.Errorf("Previous NTT prime is smaller than NthRoot")
}
qPrev = q - NthRoot
for !IsPrime(qPrev) {
if q < NthRoot {
return 0, fmt.Errorf("Previous NTT prime is smaller than NthRoot")
}
qPrev -= NthRoot
}
return qPrev, nil
}
// GenerateNTTPrimesQ generates "levels" different NthRoot NTT-friendly
// primes starting from 2**LogQ and alternating between upward and downward.
func GenerateNTTPrimesQ(logQ, NthRoot, levels uint64) (primes []uint64) {
var nextPrime, previousPrime, Qpow2 uint64
var checkfornextprime, checkforpreviousprime bool
primes = []uint64{}
Qpow2 = 1 << logQ
nextPrime = Qpow2 + 1
previousPrime = Qpow2 + 1
checkfornextprime = true
checkforpreviousprime = true
for true {
if !(checkfornextprime || checkforpreviousprime) {
panic("GenerateNTTPrimesQ error: cannot generate enough primes for the given parameters")
}
if checkfornextprime {
if nextPrime > 0xffffffffffffffff-NthRoot {
checkfornextprime = false
} else {
nextPrime += NthRoot
if IsPrime(nextPrime) {
primes = append(primes, nextPrime)
if uint64(len(primes)) == levels {
return
}
}
}
}
if checkforpreviousprime {
if previousPrime < NthRoot {
checkforpreviousprime = false
} else {
previousPrime -= NthRoot
if IsPrime(previousPrime) {
primes = append(primes, previousPrime)
if uint64(len(primes)) == levels {
return
}
}
}
}
}
return
}
// GenerateNTTPrimesP generates "levels" different NthRoot NTT-friendly
// primes starting from 2**LogP and downward.
// Special case were primes close to 2^{LogP} but with a smaller bit-size than LogP are sought.
func GenerateNTTPrimesP(logP, NthRoot, n uint64) (primes []uint64) {
var x, Ppow2 uint64
primes = []uint64{}
Ppow2 = 1 << logP
x = Ppow2 + 1
for true {
// We start by subtracting 2N to ensure that the prime bit-length is smaller than LogP
if x > NthRoot {
x -= NthRoot
if IsPrime(x) {
primes = append(primes, x)
if uint64(len(primes)) == n {
return primes
}
}
} else {
panic("GenerateNTTPrimesP error: cannot generate enough primes for the given parameters")
}
}
return
} | ring/primes.go | 0.738103 | 0.42179 | primes.go | starcoder |
package main
import (
"math"
"math/rand"
"time"
)
type Boid struct {
position Vector2D
velocity Vector2D
id int
}
func createBoid(bid int) {
b := Boid{
position: Vector2D{x: rand.Float64() * screenWidth1, y: rand.Float64() * screenHeight1},
velocity: Vector2D{x: (rand.Float64() * 2) - 1.0, y: (rand.Float64() * 2) - 1.0},
id: bid,
}
boids[bid] = &b
boidMap[int(b.position.x)][int(b.position.y)] = b.id
go b.start()
}
func (b *Boid) start() {
for {
b.moveOne()
time.Sleep(5 * time.Millisecond)
}
}
func (b *Boid) moveOne() {
aceleration := b.calcAcceleration()
rwlock.Lock()
b.velocity = b.velocity.Add(aceleration).Limit(-1, 1)
boidMap[int(b.position.x)][int(b.position.y)] = -1
b.position = b.position.Add(b.velocity)
boidMap[int(b.position.x)][int(b.position.y)] = b.id
rwlock.Unlock()
}
//calculate the acceleration of the neighbouring boid
func (b *Boid) calcAcceleration() Vector2D {
upper, lower := b.position.AddV(viewRadius), b.position.AddV(-viewRadius)
avgPosition, avgVelocity, seperation := Vector2D{0, 0}, Vector2D{0, 0}, Vector2D{0, 0}
count := 0.0
rwlock.RLock()
//reading the view map
//iterate over the view box and get the velocity of other boids
for i := math.Max(lower.x, 0); i <= math.Min(upper.x, screenWidth1); i++ {
for j := math.Max(lower.y, 0); j <= math.Min(upper.y, screenHeight1); j++ {
if otherBoidId := boidMap[int(i)][int(j)]; otherBoidId != -1 && otherBoidId != b.id {
if dist := boids[otherBoidId].position.Distance(b.position); dist < viewRadius {
count++
avgVelocity = avgVelocity.Add(boids[otherBoidId].velocity)
avgPosition = avgPosition.Add(boids[otherBoidId].position)
seperation = seperation.Add(b.position.Subtract(boids[otherBoidId].position).DivideV(dist))
}
}
}
}
rwlock.RUnlock()
accel := Vector2D{x: b.borderBounce(b.position.x, screenWidth1), y: b.borderBounce(b.position.y, screenHeight1)}
if count > 0 {
avgVelocity = avgVelocity.DivideV(count)
avgPosition = avgPosition.DivideV(count)
accelAlignment := avgVelocity.Subtract(b.velocity).MultiplyV(adjRate)
accelCohesion := avgPosition.Subtract(b.position).MultiplyV(adjRate)
accelSeperation := seperation.MultiplyV(adjRate)
accel = accel.Add(accelAlignment).Add(accelCohesion).Add(accelSeperation)
}
return accel
}
func (b *Boid) borderBounce(pos, maxBorderPos float64) float64 {
if pos < viewRadius {
return 1 / pos
} else if pos > maxBorderPos - viewRadius {
return 1 / (pos - maxBorderPos)
}
return 0
} | boids/boid.go | 0.702224 | 0.493653 | boid.go | starcoder |
package iterator
import (
. "github.com/Wei-N-Ning/gotypes/pkg/option"
)
func parMapImpl[T, R any](iter Iterator[T], f func(x T) R) <-chan Option[R] {
ch := make(chan Option[R], 1024)
outIter := Map(iter, func(x T) Iterator[R] {
return OnceWith(func() R { return f(x) })
})
go func() {
defer close(ch)
outIter.ForEach(func(elem Iterator[R]) {
ch <- elem.Next()
})
ch <- None[R]()
}()
return ch
}
// ParMap respects the original order, but this causes significant overhead.
// If order is not important, use ParMapUnord instead.
// See parmap_test.go for a rough comparison between these two versions.
func ParMap[T, R any](iter Iterator[T], f func(x T) R) Iterator[R] {
return Iterator[R]{ch: parMapImpl(iter, f), inner: iter}
}
func parMapUnorderedImpl[T, R any](iter Iterator[T], f func(x T) R) <-chan Option[R] {
ch := make(chan Option[R], 1024)
aggregator := make(chan R, 1024)
go func() {
defer func() {
ch <- None[R]()
close(ch)
}()
num := 0
iter.ForEach(func(x T) {
go func() {
aggregator <- f(x)
}()
num += 1
})
for i := 0; i < num; i++ {
ch <- Some[R](<-aggregator)
}
}()
return ch
}
// ParMapUnord disregard the order but can achieve better performance.
func ParMapUnord[T, R any](iter Iterator[T], f func(x T) R) Iterator[R] {
return Iterator[R]{ch: parMapUnorderedImpl(iter, f), inner: iter}
}
func ParMapReduce[T, R any](iter Iterator[T], init R, mapper func(x T) R, reducer func(R, R) R) R {
// the buffer size affects the creation time of the channel
// (e.g. if given math.MaxInt32, this statement can take a few hundred ms)
rw := make(chan R, 1024)
// map
numTasks := 0
iter.ForEach(func(x T) {
numTasks += 1
go func() {
rw <- mapper(x)
}()
})
// reduce
for {
// the terminating condition
if numTasks == 0 {
break
}
for i := 0; i < numTasks/2; i++ {
first := <-rw
second := <-rw
go func() {
rw <- reducer(first, second)
}()
}
// handle tail task
if numTasks%2 == 1 {
init = reducer(init, <-rw)
}
numTasks = numTasks / 2
}
return init
} | pkg/iterator/parmap.go | 0.639061 | 0.433022 | parmap.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.