code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package ts
import (
"fmt"
"time"
)
// Resolution is used to enumerate the different resolution values supported by
// ZNBase.
type Resolution int64
func (r Resolution) String() string {
switch r {
case Resolution10s:
return "10s"
case Resolution30m:
return "30m"
case resolution1ns:
return "1ns"
case resolution50ns:
return "50ns"
case resolutionInvalid:
return "BAD"
}
return fmt.Sprintf("%d", r)
}
// Resolution enumeration values are directly serialized and persisted into
// system keys; these values must never be altered or reordered. If new rollup
// resolutions are added, the IsRollup() method must be modified as well.
const (
// Resolution10s stores data with a sample resolution of 10 seconds.
Resolution10s Resolution = 1
// Resolution30m stores roll-up data from a higher resolution at a sample
// resolution of 30 minutes.
Resolution30m Resolution = 2
// resolution1ns stores data with a sample resolution of 1 nanosecond. Used
// only for testing.
resolution1ns Resolution = 998
// resolution50ns stores roll-up data from the 1ns resolution at a sample
// resolution of 50 nanoseconds. Used for testing.
resolution50ns Resolution = 999
// resolutionInvalid is an invalid resolution used only for testing. It causes
// an error to be thrown in certain methods. It is invalid because its sample
// period is not a divisor of its slab period.
resolutionInvalid Resolution = 1000
)
// sampleDurationByResolution is a map used to retrieve the sample duration
// corresponding to a Resolution value. Sample durations are expressed in
// nanoseconds.
var sampleDurationByResolution = map[Resolution]int64{
Resolution10s: int64(time.Second * 10),
Resolution30m: int64(time.Minute * 30),
resolution1ns: 1, // 1ns resolution only for tests.
resolution50ns: 50, // 50ns rollup only for tests.
resolutionInvalid: 10, // Invalid resolution.
}
// slabDurationByResolution is a map used to retrieve the slab duration
// corresponding to a Resolution value; the slab duration determines how many
// samples are stored at a single ZNBase key/value. Slab durations are
// expressed in nanoseconds.
var slabDurationByResolution = map[Resolution]int64{
Resolution10s: int64(time.Hour),
Resolution30m: int64(time.Hour * 24),
resolution1ns: 10, // 1ns resolution only for tests.
resolution50ns: 1000, // 50ns rollup only for tests.
resolutionInvalid: 11,
}
// SampleDuration returns the sample duration corresponding to this resolution
// value, expressed in nanoseconds.
func (r Resolution) SampleDuration() int64 {
duration, ok := sampleDurationByResolution[r]
if !ok {
panic(fmt.Sprintf("no sample duration found for resolution value %v", r))
}
return duration
}
// SlabDuration returns the slab duration corresponding to this resolution
// value, expressed in nanoseconds. The slab duration determines how many
// consecutive samples are stored in a single ZNBase key/value.
func (r Resolution) SlabDuration() int64 {
duration, ok := slabDurationByResolution[r]
if !ok {
panic(fmt.Sprintf("no slab duration found for resolution value %v", r))
}
return duration
}
// IsRollup returns true if this resolution contains rollup data: statistical
// values about a large number of samples taken over a long period, such as
// the min, max and sum.
func (r Resolution) IsRollup() bool {
return r == Resolution30m || r == resolution50ns
}
// TargetRollupResolution returns a target resolution that data from this
// resolution should be rolled up into in lieu of deletion. For example,
// Resolution10s has a target rollup resolution of Resolution30m.
func (r Resolution) TargetRollupResolution() (Resolution, bool) {
switch r {
case Resolution10s:
return Resolution30m, true
case resolution1ns:
return resolution50ns, true
}
return r, false
}
func normalizeToPeriod(timestampNanos int64, period int64) int64 {
return timestampNanos - timestampNanos%period
}
func (r Resolution) normalizeToSlab(timestampNanos int64) int64 {
return normalizeToPeriod(timestampNanos, r.SlabDuration())
} | pkg/ts/resolution.go | 0.841598 | 0.496704 | resolution.go | starcoder |
package carbon
import (
"bytes"
"time"
)
// formats common formatting symbols
// 常规格式化符号
var formats = map[byte]string{
'd': "02", // Day: Day of the month, 2 digits with leading zeros. Eg: 01 to 31.
'D': "Mon", // Day: A textual representation of a day, three letters. Eg: Mon through Sun.
'j': "2", // Day: Day of the month without leading zeros. Eg: 1 to 31.
'l': "Monday", // Day: A full textual representation of the day of the week. Eg: Sunday through Saturday.
'F': "January", // Month: A full textual representation of a month, such as January or March. Eg: January through December.
'm': "01", // Month: Numeric representation of a month, with leading zeros. Eg: 01 through 12.
'M': "Jan", // Month: A short textual representation of a month, three letters. Eg: Jan through Dec.
'n': "1", // Month: Numeric representation of a month, without leading zeros. Eg: 1 through 12.
'Y': "2006", // Year: A full numeric representation of a year, 4 digits. Eg: 1999 or 2003.
'y': "06", // Year: A two digit representation of a year. Eg: 99 or 03.
'a': "pm", // Time: Lowercase morning or afternoon sign. Eg: am or pm.
'A': "PM", // Time: Uppercase morning or afternoon sign. Eg: AM or PM.
'g': "3", // Time: 12-hour format of an hour without leading zeros. Eg: 1 through 12.
'h': "03", // Time: 12-hour format of an hour with leading zeros. Eg: 01 through 12.
'H': "15", // Time: 24-hour format of an hour with leading zeros. Eg: 00 through 23.
'i': "04", // Time: Minutes with leading zeros. Eg: 00 to 59.
's': "05", // Time: Seconds with leading zeros. Eg: 00 through 59.
'O': "-0700", // Zone: Difference to Greenwich time (GMT) in hours. Eg: +0200.
'P': "-07:00", // Zone: Difference to Greenwich time (GMT) with colon between hours and minutes. Eg: +02:00.
'T': "MST", // Zone: Timezone abbreviation. Eg: UTC, EST, MDT ...
'c': "2006-01-02T15:04:05-07:00", // Format: ISO 8601 date. Eg: 2004-02-12T15:19:21+00:00.
'r': "Mon, 02 Jan 06 15:04 MST", // Format: RFC 2822 formatted date. Eg: Thu, 21 Dec 2000 16:01:07 +0200.
}
// format2layout convert a format string into a layout string
// format 转 layout
func format2layout(format string) string {
runes := []rune(format)
buffer := bytes.NewBuffer(nil)
for i := 0; i < len(runes); i++ {
if layout, ok := formats[byte(runes[i])]; ok {
buffer.WriteString(layout)
} else {
switch runes[i] {
case '\\': // 原样输出,不解析
buffer.WriteRune(runes[i+1])
i++
continue
default:
buffer.WriteRune(runes[i])
}
}
}
return buffer.String()
}
// getLocationByTimezone get a Location instance by a timezone string
// 通过时区获取 Location 实例
func getLocationByTimezone(timezone string) (*time.Location, error) {
loc, err := time.LoadLocation(timezone)
if err != nil {
err = invalidTimezoneError(timezone)
}
return loc, err
}
// parseByDuration parse as a Duration instance by a duration string
// 通过持续时长解析
func parseByDuration(duration string) (time.Duration, error) {
td, err := time.ParseDuration(duration)
if err != nil {
err = invalidDurationError(duration)
}
return td, err
}
// getAbsValue get absolute value
// 获取绝对值
func getAbsValue(value int64) int64 {
return (value ^ value>>31) - value>>31
} | helper.go | 0.548674 | 0.508361 | helper.go | starcoder |
package lnk
import (
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"strings"
)
// ExtraDataSection represents section 2.5 of the specification.
type ExtraDataSection struct {
Blocks []ExtraDataBlock
// Terminal block at the end of the ExtraData section.
// Value must be smaller than 0x04.
TerminalBlock uint32
}
/*
ExtraDataBlock represents one of the optional data blocks at the end of the
lnk file.
Each data block starts with a uint32 size and a uint32 signature.
Detection is as follows:
1. Read the uint32 size. If size < 0x04, it's the terminal block.
2. Read the datablock (size-4) more bytes from the io.Reader.
3. Read the uint32 signature. It will designate the datablock.
4. Parse the data based on the signature.
*/
type ExtraDataBlock struct {
Size uint32
Signature uint32
Type string
Data []byte
}
// DataBlock reads and populates an ExtraData.
func DataBlock(r io.Reader) (extra ExtraDataSection, err error) {
var db ExtraDataBlock
for {
// Read size.
var size uint32
err = binary.Read(r, binary.LittleEndian, &size)
if err != nil {
return extra, fmt.Errorf("golnk.readDataBlock: read size - %s", err.Error())
}
// fmt.Println("Size", size)
// Have we reached the TerminalBlock?
if size < 0x04 {
extra.TerminalBlock = size
break
}
db.Size = size
// Read block's signature.
err = binary.Read(r, binary.LittleEndian, &db.Signature)
if err != nil {
return extra, fmt.Errorf("golnk.readDataBlock: read signature - %s", err.Error())
}
// fmt.Println("Signature", hex.EncodeToString(uint32Byte(db.Signature)))
db.Type = blockSignature(db.Signature)
// fmt.Println("Type:", db.Type)
// Read the rest of the data. Size-8.
data := make([]byte, db.Size-8)
err = binary.Read(r, binary.LittleEndian, &data)
if err != nil {
return extra, fmt.Errorf("golnk.readDataBlock: read data - %s", err.Error())
}
db.Data = data
// fmt.Println(hex.Dump(data))
extra.Blocks = append(extra.Blocks, db)
}
return extra, nil
}
// blockSignature returns the block type based on signature.
func blockSignature(sig uint32) string {
signatureMap := map[uint32]string{
0xA0000002: "ConsoleDataBlock",
0xA0000004: "ConsoleFEDataBlock",
0xA0000006: "DarwinDataBlock",
0xA0000001: "EnvironmentVariableDataBlock",
0xA0000007: "IconEnvironmentDataBlock",
0xA0000009: "PropertyStoreDataBlock",
0xA0000008: "ShimDataBlock",
0xA0000005: "SpecialFolderDataBlock",
0xA0000003: "TrackerDataBlock",
0xA000000C: "VistaAndAboveIDListDataBlock",
0xA000000B: "KnownFolderDataBlock",
}
if val, exists := signatureMap[sig]; exists {
return val
}
return "Signature Not Found - " + hex.EncodeToString(uint32Byte(sig))
}
// String prints the ExtraData blocks' Type, Size, and a hexdump of their content.
func (e ExtraDataSection) String() string {
var sb strings.Builder
for _, b := range e.Blocks {
sb.WriteString(fmt.Sprintf("Size: %s\n", uint32TableStr(b.Size)))
sb.WriteString(fmt.Sprintf("Signature: %s\n", uint32StrHex(b.Signature)))
sb.WriteString(fmt.Sprintf("Type: %s\n", b.Type))
sb.WriteString("Dump\n")
sb.WriteString(b.Dump())
sb.WriteString("-------------------------\n")
}
return sb.String()
}
// Dump returns the hex.Dump of ExtraDataBlock.
func (db ExtraDataBlock) Dump() string {
return hex.Dump(db.Data)
} | extradata.go | 0.606732 | 0.4133 | extradata.go | starcoder |
package code
import "math"
func Unreachable() Instruction {
return Instruction{Opcode: OpUnreachable}
}
func Nop() Instruction {
return Instruction{Opcode: OpNop}
}
func Block(blockType ...uint64) Instruction {
typ := uint64(BlockTypeEmpty)
if len(blockType) != 0 {
typ = blockType[0]
}
return Instruction{Opcode: OpBlock, Immediate: typ}
}
func Loop(blockType ...uint64) Instruction {
typ := uint64(BlockTypeEmpty)
if len(blockType) != 0 {
typ = blockType[0]
}
return Instruction{Opcode: OpLoop, Immediate: typ}
}
func If(blockType ...uint64) Instruction {
typ := uint64(BlockTypeEmpty)
if len(blockType) != 0 {
typ = blockType[0]
}
return Instruction{Opcode: OpIf, Immediate: typ}
}
func Else() Instruction {
return Instruction{Opcode: OpElse}
}
func End() Instruction {
return Instruction{Opcode: OpEnd}
}
func Br(labelidx int) Instruction {
return Instruction{Opcode: OpBr, Immediate: uint64(labelidx)}
}
func BrIf(labelidx int) Instruction {
return Instruction{Opcode: OpBrIf, Immediate: uint64(labelidx)}
}
func BrTable(labelidx int, labelidxN ...int) Instruction {
labels := make([]int, len(labelidxN))
if len(labelidxN) > 0 {
labels[0], labelidx = labelidx, labelidxN[len(labelidxN)-1]
copy(labels[1:], labelidxN[:len(labelidxN)-1])
}
return Instruction{Opcode: OpBrTable, Immediate: uint64(labelidx), Labels: labels}
}
func Return() Instruction {
return Instruction{Opcode: OpReturn}
}
func Call(funcidx uint32) Instruction {
return Instruction{Opcode: OpCall, Immediate: uint64(funcidx)}
}
func CallIndirect(tableidx uint32) Instruction {
return Instruction{Opcode: OpCallIndirect, Immediate: uint64(tableidx)}
}
func Drop() Instruction {
return Instruction{Opcode: OpDrop}
}
func Select() Instruction {
return Instruction{Opcode: OpSelect}
}
func LocalGet(localidx uint32) Instruction {
return Instruction{Opcode: OpLocalGet, Immediate: uint64(localidx)}
}
func LocalSet(localidx uint32) Instruction {
return Instruction{Opcode: OpLocalSet, Immediate: uint64(localidx)}
}
func LocalTee(localidx uint32) Instruction {
return Instruction{Opcode: OpLocalTee, Immediate: uint64(localidx)}
}
func GlobalGet(globalidx uint32) Instruction {
return Instruction{Opcode: OpGlobalGet, Immediate: uint64(globalidx)}
}
func GlobalSet(globalidx uint32) Instruction {
return Instruction{Opcode: OpGlobalSet, Immediate: uint64(globalidx)}
}
func I32Load(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Load, Immediate: memarg(offset, align)}
}
func I64Load(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Load, Immediate: memarg(offset, align)}
}
func F32Load(offset, align uint32) Instruction {
return Instruction{Opcode: OpF32Load, Immediate: memarg(offset, align)}
}
func F64Load(offset, align uint32) Instruction {
return Instruction{Opcode: OpF64Load, Immediate: memarg(offset, align)}
}
func I32Load8S(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Load8S, Immediate: memarg(offset, align)}
}
func I32Load8U(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Load8U, Immediate: memarg(offset, align)}
}
func I32Load16S(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Load16S, Immediate: memarg(offset, align)}
}
func I32Load16U(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Load16U, Immediate: memarg(offset, align)}
}
func I64Load8S(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Load8S, Immediate: memarg(offset, align)}
}
func I64Load8U(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Load8U, Immediate: memarg(offset, align)}
}
func I64Load16S(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Load16S, Immediate: memarg(offset, align)}
}
func I64Load16U(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Load16U, Immediate: memarg(offset, align)}
}
func I64Load32S(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Load32S, Immediate: memarg(offset, align)}
}
func I64Load32U(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Load32U, Immediate: memarg(offset, align)}
}
func I32Store(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Store, Immediate: memarg(offset, align)}
}
func I64Store(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Store, Immediate: memarg(offset, align)}
}
func F32Store(offset, align uint32) Instruction {
return Instruction{Opcode: OpF32Store, Immediate: memarg(offset, align)}
}
func F64Store(offset, align uint32) Instruction {
return Instruction{Opcode: OpF64Store, Immediate: memarg(offset, align)}
}
func I32Store8(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Store8, Immediate: memarg(offset, align)}
}
func I32Store16(offset, align uint32) Instruction {
return Instruction{Opcode: OpI32Store16, Immediate: memarg(offset, align)}
}
func I64Store8(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Store8, Immediate: memarg(offset, align)}
}
func I64Store16(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Store16, Immediate: memarg(offset, align)}
}
func I64Store32(offset, align uint32) Instruction {
return Instruction{Opcode: OpI64Store32, Immediate: memarg(offset, align)}
}
func MemorySize() Instruction {
return Instruction{Opcode: OpMemorySize}
}
func MemoryGrow() Instruction {
return Instruction{Opcode: OpMemoryGrow}
}
func I32Const(v int32) Instruction {
return Instruction{Opcode: OpI32Const, Immediate: uint64(v)}
}
func I64Const(v int64) Instruction {
return Instruction{Opcode: OpI64Const, Immediate: uint64(v)}
}
func F32Const(v float32) Instruction {
return Instruction{Opcode: OpF32Const, Immediate: uint64(math.Float32bits(v))}
}
func F64Const(v float64) Instruction {
return Instruction{Opcode: OpF64Const, Immediate: math.Float64bits(v)}
}
func I32Eqz() Instruction {
return Instruction{Opcode: OpI32Eqz}
}
func I32Eq() Instruction {
return Instruction{Opcode: OpI32Eq}
}
func I32Ne() Instruction {
return Instruction{Opcode: OpI32Ne}
}
func I32LtS() Instruction {
return Instruction{Opcode: OpI32LtS}
}
func I32LtU() Instruction {
return Instruction{Opcode: OpI32LtU}
}
func I32GtS() Instruction {
return Instruction{Opcode: OpI32GtS}
}
func I32GtU() Instruction {
return Instruction{Opcode: OpI32GtU}
}
func I32LeS() Instruction {
return Instruction{Opcode: OpI32LeS}
}
func I32LeU() Instruction {
return Instruction{Opcode: OpI32LeU}
}
func I32GeS() Instruction {
return Instruction{Opcode: OpI32GeS}
}
func I32GeU() Instruction {
return Instruction{Opcode: OpI32GeU}
}
func I64Eqz() Instruction {
return Instruction{Opcode: OpI64Eqz}
}
func I64Eq() Instruction {
return Instruction{Opcode: OpI64Eq}
}
func I64Ne() Instruction {
return Instruction{Opcode: OpI64Ne}
}
func I64LtS() Instruction {
return Instruction{Opcode: OpI64LtS}
}
func I64LtU() Instruction {
return Instruction{Opcode: OpI64LtU}
}
func I64GtS() Instruction {
return Instruction{Opcode: OpI64GtS}
}
func I64GtU() Instruction {
return Instruction{Opcode: OpI64GtU}
}
func I64LeS() Instruction {
return Instruction{Opcode: OpI64LeS}
}
func I64LeU() Instruction {
return Instruction{Opcode: OpI64LeU}
}
func I64GeS() Instruction {
return Instruction{Opcode: OpI64GeS}
}
func I64GeU() Instruction {
return Instruction{Opcode: OpI64GeU}
}
func F32Eq() Instruction {
return Instruction{Opcode: OpF32Eq}
}
func F32Ne() Instruction {
return Instruction{Opcode: OpF32Ne}
}
func F32Lt() Instruction {
return Instruction{Opcode: OpF32Lt}
}
func F32Gt() Instruction {
return Instruction{Opcode: OpF32Gt}
}
func F32Le() Instruction {
return Instruction{Opcode: OpF32Le}
}
func F32Ge() Instruction {
return Instruction{Opcode: OpF32Ge}
}
func F64Eq() Instruction {
return Instruction{Opcode: OpF64Eq}
}
func F64Ne() Instruction {
return Instruction{Opcode: OpF64Ne}
}
func F64Lt() Instruction {
return Instruction{Opcode: OpF64Lt}
}
func F64Gt() Instruction {
return Instruction{Opcode: OpF64Gt}
}
func F64Le() Instruction {
return Instruction{Opcode: OpF64Le}
}
func F64Ge() Instruction {
return Instruction{Opcode: OpF64Ge}
}
func I32Clz() Instruction {
return Instruction{Opcode: OpI32Clz}
}
func I32Ctz() Instruction {
return Instruction{Opcode: OpI32Ctz}
}
func I32Popcnt() Instruction {
return Instruction{Opcode: OpI32Popcnt}
}
func I32Add() Instruction {
return Instruction{Opcode: OpI32Add}
}
func I32Sub() Instruction {
return Instruction{Opcode: OpI32Sub}
}
func I32Mul() Instruction {
return Instruction{Opcode: OpI32Mul}
}
func I32DivS() Instruction {
return Instruction{Opcode: OpI32DivS}
}
func I32DivU() Instruction {
return Instruction{Opcode: OpI32DivU}
}
func I32RemS() Instruction {
return Instruction{Opcode: OpI32RemS}
}
func I32RemU() Instruction {
return Instruction{Opcode: OpI32RemU}
}
func I32And() Instruction {
return Instruction{Opcode: OpI32And}
}
func I32Or() Instruction {
return Instruction{Opcode: OpI32Or}
}
func I32Xor() Instruction {
return Instruction{Opcode: OpI32Xor}
}
func I32Shl() Instruction {
return Instruction{Opcode: OpI32Shl}
}
func I32ShrS() Instruction {
return Instruction{Opcode: OpI32ShrS}
}
func I32ShrU() Instruction {
return Instruction{Opcode: OpI32ShrU}
}
func I32Rotl() Instruction {
return Instruction{Opcode: OpI32Rotl}
}
func I32Rotr() Instruction {
return Instruction{Opcode: OpI32Rotr}
}
func I64Clz() Instruction {
return Instruction{Opcode: OpI64Clz}
}
func I64Ctz() Instruction {
return Instruction{Opcode: OpI64Ctz}
}
func I64Popcnt() Instruction {
return Instruction{Opcode: OpI64Popcnt}
}
func I64Add() Instruction {
return Instruction{Opcode: OpI64Add}
}
func I64Sub() Instruction {
return Instruction{Opcode: OpI64Sub}
}
func I64Mul() Instruction {
return Instruction{Opcode: OpI64Mul}
}
func I64DivS() Instruction {
return Instruction{Opcode: OpI64DivS}
}
func I64DivU() Instruction {
return Instruction{Opcode: OpI64DivU}
}
func I64RemS() Instruction {
return Instruction{Opcode: OpI64RemS}
}
func I64RemU() Instruction {
return Instruction{Opcode: OpI64RemU}
}
func I64And() Instruction {
return Instruction{Opcode: OpI64And}
}
func I64Or() Instruction {
return Instruction{Opcode: OpI64Or}
}
func I64Xor() Instruction {
return Instruction{Opcode: OpI64Xor}
}
func I64Shl() Instruction {
return Instruction{Opcode: OpI64Shl}
}
func I64ShrS() Instruction {
return Instruction{Opcode: OpI64ShrS}
}
func I64ShrU() Instruction {
return Instruction{Opcode: OpI64ShrU}
}
func I64Rotl() Instruction {
return Instruction{Opcode: OpI64Rotl}
}
func I64Rotr() Instruction {
return Instruction{Opcode: OpI64Rotr}
}
func F32Abs() Instruction {
return Instruction{Opcode: OpF32Abs}
}
func F32Neg() Instruction {
return Instruction{Opcode: OpF32Neg}
}
func F32Ceil() Instruction {
return Instruction{Opcode: OpF32Ceil}
}
func F32Floor() Instruction {
return Instruction{Opcode: OpF32Floor}
}
func F32Trunc() Instruction {
return Instruction{Opcode: OpF32Trunc}
}
func F32Nearest() Instruction {
return Instruction{Opcode: OpF32Nearest}
}
func F32Sqrt() Instruction {
return Instruction{Opcode: OpF32Sqrt}
}
func F32Add() Instruction {
return Instruction{Opcode: OpF32Add}
}
func F32Sub() Instruction {
return Instruction{Opcode: OpF32Sub}
}
func F32Mul() Instruction {
return Instruction{Opcode: OpF32Mul}
}
func F32Div() Instruction {
return Instruction{Opcode: OpF32Div}
}
func F32Min() Instruction {
return Instruction{Opcode: OpF32Min}
}
func F32Max() Instruction {
return Instruction{Opcode: OpF32Max}
}
func F32Copysign() Instruction {
return Instruction{Opcode: OpF32Copysign}
}
func F64Abs() Instruction {
return Instruction{Opcode: OpF64Abs}
}
func F64Neg() Instruction {
return Instruction{Opcode: OpF64Neg}
}
func F64Ceil() Instruction {
return Instruction{Opcode: OpF64Ceil}
}
func F64Floor() Instruction {
return Instruction{Opcode: OpF64Floor}
}
func F64Trunc() Instruction {
return Instruction{Opcode: OpF64Trunc}
}
func F64Nearest() Instruction {
return Instruction{Opcode: OpF64Nearest}
}
func F64Sqrt() Instruction {
return Instruction{Opcode: OpF64Sqrt}
}
func F64Add() Instruction {
return Instruction{Opcode: OpF64Add}
}
func F64Sub() Instruction {
return Instruction{Opcode: OpF64Sub}
}
func F64Mul() Instruction {
return Instruction{Opcode: OpF64Mul}
}
func F64Div() Instruction {
return Instruction{Opcode: OpF64Div}
}
func F64Min() Instruction {
return Instruction{Opcode: OpF64Min}
}
func F64Max() Instruction {
return Instruction{Opcode: OpF64Max}
}
func F64Copysign() Instruction {
return Instruction{Opcode: OpF64Copysign}
}
func I32WrapI64() Instruction {
return Instruction{Opcode: OpI32WrapI64}
}
func I32TruncF32S() Instruction {
return Instruction{Opcode: OpI32TruncF32S}
}
func I32TruncF32U() Instruction {
return Instruction{Opcode: OpI32TruncF32U}
}
func I32TruncF64S() Instruction {
return Instruction{Opcode: OpI32TruncF64S}
}
func I32TruncF64U() Instruction {
return Instruction{Opcode: OpI32TruncF64U}
}
func I64ExtendI32S() Instruction {
return Instruction{Opcode: OpI64ExtendI32S}
}
func I64ExtendI32U() Instruction {
return Instruction{Opcode: OpI64ExtendI32U}
}
func I64TruncF32S() Instruction {
return Instruction{Opcode: OpI64TruncF32S}
}
func I64TruncF32U() Instruction {
return Instruction{Opcode: OpI64TruncF32U}
}
func I64TruncF64S() Instruction {
return Instruction{Opcode: OpI64TruncF64S}
}
func I64TruncF64U() Instruction {
return Instruction{Opcode: OpI64TruncF64U}
}
func F32ConvertI32S() Instruction {
return Instruction{Opcode: OpF32ConvertI32S}
}
func F32ConvertI32U() Instruction {
return Instruction{Opcode: OpF32ConvertI32U}
}
func F32ConvertI64S() Instruction {
return Instruction{Opcode: OpF32ConvertI64S}
}
func F32ConvertI64U() Instruction {
return Instruction{Opcode: OpF32ConvertI64U}
}
func F32DemoteF64() Instruction {
return Instruction{Opcode: OpF32DemoteF64}
}
func F64ConvertI32S() Instruction {
return Instruction{Opcode: OpF64ConvertI32S}
}
func F64ConvertI32U() Instruction {
return Instruction{Opcode: OpF64ConvertI32U}
}
func F64ConvertI64S() Instruction {
return Instruction{Opcode: OpF64ConvertI64S}
}
func F64ConvertI64U() Instruction {
return Instruction{Opcode: OpF64ConvertI64U}
}
func F64PromoteF32() Instruction {
return Instruction{Opcode: OpF64PromoteF32}
}
func I32ReinterpretF32() Instruction {
return Instruction{Opcode: OpI32ReinterpretF32}
}
func I64ReinterpretF64() Instruction {
return Instruction{Opcode: OpI64ReinterpretF64}
}
func F32ReinterpretI32() Instruction {
return Instruction{Opcode: OpF32ReinterpretI32}
}
func F64ReinterpretI64() Instruction {
return Instruction{Opcode: OpF64ReinterpretI64}
}
func I32Extend8S() Instruction {
return Instruction{Opcode: OpI32Extend8S}
}
func I32Extend16S() Instruction {
return Instruction{Opcode: OpI32Extend16S}
}
func I64Extend8S() Instruction {
return Instruction{Opcode: OpI64Extend8S}
}
func I64Extend16S() Instruction {
return Instruction{Opcode: OpI64Extend16S}
}
func I64Extend32S() Instruction {
return Instruction{Opcode: OpI64Extend32S}
}
func I32TruncSatF32S() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI32TruncSatF32S}
}
func I32TruncSatF32U() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI32TruncSatF32U}
}
func I32TruncSatF64S() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI32TruncSatF64S}
}
func I32TruncSatF64U() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI32TruncSatF64U}
}
func I64TruncSatF32S() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI64TruncSatF32S}
}
func I64TruncSatF32U() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI64TruncSatF32U}
}
func I64TruncSatF64S() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI64TruncSatF64S}
}
func I64TruncSatF64U() Instruction {
return Instruction{Opcode: OpPrefix, Immediate: OpI64TruncSatF64U}
} | wasm/code/instructions.go | 0.783409 | 0.539711 | instructions.go | starcoder |
package staticarray
import (
"github.com/influxdata/flux/array"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
type times struct {
data []values.Time
alloc *memory.Allocator
}
func Time(data []values.Time) array.Time {
return ×{data: data}
}
func (a *times) Type() semantic.Type {
return semantic.Time
}
func (a *times) IsNull(i int) bool {
return false
}
func (a *times) IsValid(i int) bool {
return i >= 0 && i < len(a.data)
}
func (a *times) Len() int {
return len(a.data)
}
func (a *times) NullN() int {
return 0
}
func (a *times) Value(i int) values.Time {
return a.data[i]
}
func (a *times) Copy() array.Base {
panic("implement me")
}
func (a *times) Free() {
if a.alloc != nil {
a.alloc.Free(cap(a.data) * timeSize)
}
a.data = nil
}
func (a *times) Slice(start, stop int) array.BaseRef {
return a.TimeSlice(start, stop)
}
func (a *times) TimeSlice(start, stop int) array.TimeRef {
return ×{data: a.data[start:stop]}
}
func (a *times) TimeValues() []values.Time {
return a.data
}
func TimeBuilder(a *memory.Allocator) array.TimeBuilder {
return &timeBuilder{alloc: a}
}
type timeBuilder struct {
data []values.Time
alloc *memory.Allocator
}
func (b *timeBuilder) Type() semantic.Type {
return semantic.Time
}
func (b *timeBuilder) Len() int {
return len(b.data)
}
func (b *timeBuilder) Cap() int {
return cap(b.data)
}
func (b *timeBuilder) Reserve(n int) {
newCap := len(b.data) + n
if newCap := len(b.data) + n; newCap <= cap(b.data) {
return
}
if err := b.alloc.Allocate(newCap * timeSize); err != nil {
panic(err)
}
data := make([]values.Time, len(b.data), newCap)
copy(data, b.data)
b.alloc.Free(cap(b.data) * timeSize)
b.data = data
}
func (b *timeBuilder) BuildArray() array.Base {
return b.BuildTimeArray()
}
func (b *timeBuilder) Free() {
panic("implement me")
}
func (b *timeBuilder) Append(v values.Time) {
if len(b.data) == cap(b.data) {
// Grow the slice in the same way as built-in append.
n := len(b.data)
if n == 0 {
n = 2
}
b.Reserve(n)
}
b.data = append(b.data, v)
}
func (b *timeBuilder) AppendNull() {
// The staticarray does not support nulls so it will do the current behavior of just appending
// the zero value.
b.Append(0)
}
func (b *timeBuilder) AppendValues(v []values.Time, valid ...[]bool) {
if newCap := len(b.data) + len(v); newCap > cap(b.data) {
b.Reserve(newCap - cap(b.data))
}
b.data = append(b.data, v...)
}
func (b *timeBuilder) BuildTimeArray() array.Time {
return ×{
data: b.data,
alloc: b.alloc,
}
} | internal/staticarray/time.go | 0.651798 | 0.552057 | time.go | starcoder |
package main
// Rect is a position, width and height
type Rect struct {
X int
Y int
W int
H int
}
// Box has one outer and one innner rectangle.
// This is useful when having margins that surrounds content.
type Box struct {
frame *Rect // The rectangle around the box, for placement
inner *Rect // The rectangle inside the box, for content
}
// Create a new Box / container.
func NewBox() *Box {
return &Box{&Rect{0, 0, 0, 0}, &Rect{0, 0, 0, 0}}
}
// Place a Box at the center of the given container.
func (b *Box) Center(container *Box) {
widthleftover := container.inner.W - b.frame.W
heightleftover := container.inner.H - b.frame.H
b.frame.X = container.inner.X + widthleftover/2
b.frame.Y = container.inner.Y + heightleftover/2
}
// Place a Box so that it fills the entire given container.
func (b *Box) Fill(container *Box) {
b.frame.X = container.inner.X
b.frame.Y = container.inner.Y
b.frame.W = container.inner.W
b.frame.H = container.inner.H
}
// Place a Box inside a given container, with the given margins.
// Margins are given in number of characters.
func (b *Box) FillWithMargins(container *Box, margins int) {
b.Fill(container)
b.frame.X += margins
b.frame.Y += margins
b.frame.W -= margins * 2
b.frame.H -= margins * 2
}
// Place a Box inside a given container, using the given percentage wise ratios.
// horizmarginp can for example be 0.1 for a 10% horizontal margin around
// the inner box. vertmarginp works similarly, but for the vertical margins.
func (b *Box) FillWithPercentageMargins(container *Box, horizmarginp float32, vertmarginp float32) {
horizmargin := int(float32(container.inner.W) * horizmarginp)
vertmargin := int(float32(container.inner.H) * vertmarginp)
b.Fill(container)
b.frame.X += horizmargin
b.frame.Y += vertmargin
b.frame.W -= horizmargin * 2
b.frame.H -= vertmargin * 2
}
// Retrieves the position of the inner rectangle.
func (b *Box) GetContentPos() (int, int) {
return b.inner.X, b.inner.Y
}
// Set the size of the Box to 1/3 of the size of the inner rectangle
// of the given container.
func (b *Box) SetThirdSize(container *Box) {
b.frame.W = container.inner.W / 3
b.frame.H = container.inner.H / 3
}
// Set the position of the Box to 1/3 of the size of the inner rectangle
// of the given container.
func (b *Box) SetThirdPlace(container *Box) {
b.frame.X = container.inner.X + container.inner.W/3
b.frame.Y = container.inner.Y + container.inner.H/3
}
// Place a Box so that it either fills the given
// container, or is placed 1/3 from the upper left edge,
// depending on how much space is left.
func (b *Box) SetNicePlacement(container *Box) {
b.frame.X = container.inner.X
b.frame.Y = container.inner.Y
leftoverwidth := container.inner.W - b.frame.W
leftoverheight := container.inner.H - b.frame.H
if leftoverwidth > b.frame.W {
b.frame.X += leftoverwidth / 3
}
if leftoverheight > b.frame.H {
b.frame.Y += leftoverheight / 3
}
}
// Place a Box within the given container.
func (b *Box) Place(container *Box) {
b.frame.X = container.inner.X
b.frame.Y = container.inner.Y
}
// Place a box at the bottom center of a given container, but a bit to the left
func (b *Box) BottomCenterLeft(container *Box) {
widthleftover := container.inner.W - b.frame.W
b.frame.X = container.inner.X + widthleftover/3
b.frame.Y = container.inner.Y + container.inner.H - 2
}
// Place a box at the bottom center of a given container, but a bit to the right
func (b *Box) BottomCenterRight(container *Box) {
widthleftover := container.inner.W - b.frame.W
b.frame.X = container.inner.X + container.inner.W - (widthleftover / 2)
b.frame.Y = container.inner.Y + container.inner.H - 2
}
// Get the inner rectangle (content size + pos)
func (b *Box) GetInner() *Rect {
return b.inner
}
// Get the outer frame (box size + pos)
func (b *Box) GetFrame() *Rect {
return b.frame
}
// Set the inner rectangle (content size + pos)
func (b *Box) SetInner(r *Rect) {
b.inner = r
}
// Set the outer frame (box size + pos)
func (b *Box) SetFrame(r *Rect) {
b.frame = r
} | cmd/widget/boxes.go | 0.909581 | 0.461138 | boxes.go | starcoder |
package pilosa
import (
"fmt"
"github.com/m3dbx/pilosa/roaring"
)
// iterator is an interface for looping over row/column pairs.
type iterator interface {
Seek(rowID, columnID uint64)
Next() (rowID, columnID uint64, eof bool)
}
// bufIterator wraps an iterator to provide the ability to unread values.
type bufIterator struct {
buf struct {
rowID uint64
columnID uint64
eof bool
full bool
}
itr iterator
}
// newBufIterator returns a buffered iterator that wraps itr.
func newBufIterator(itr iterator) *bufIterator {
return &bufIterator{itr: itr}
}
// Seek moves to the first pair equal to or greater than pseek/bseek.
func (itr *bufIterator) Seek(rowID, columnID uint64) {
itr.buf.full = false
itr.itr.Seek(rowID, columnID)
}
// Next returns the next pair in the row.
// If a value has been buffered then it is returned and the buffer is cleared.
func (itr *bufIterator) Next() (rowID, columnID uint64, eof bool) {
if itr.buf.full {
itr.buf.full = false
return itr.buf.rowID, itr.buf.columnID, itr.buf.eof
}
// Read values onto buffer in case of unread.
itr.buf.rowID, itr.buf.columnID, itr.buf.eof = itr.itr.Next()
return itr.buf.rowID, itr.buf.columnID, itr.buf.eof
}
// Peek reads the next value but leaves it on the buffer.
func (itr *bufIterator) Peek() (rowID, columnID uint64, eof bool) {
rowID, columnID, eof = itr.Next()
itr.Unread()
return
}
// Unread pushes previous pair on to the buffer.
// Panics if the buffer is already full.
func (itr *bufIterator) Unread() {
if itr.buf.full {
panic("pilosa.BufIterator: buffer full")
}
itr.buf.full = true
}
// limitIterator wraps an Iterator and limits it to a max column/row pair.
type limitIterator struct {
itr iterator
maxRowID uint64
maxColumnID uint64
eof bool
}
// newLimitIterator returns a new LimitIterator.
func newLimitIterator(itr iterator, maxRowID, maxColumnID uint64) *limitIterator { // nolint: unparam
return &limitIterator{
itr: itr,
maxRowID: maxRowID,
maxColumnID: maxColumnID,
}
}
// Seek moves the underlying iterator to a column/row pair.
func (itr *limitIterator) Seek(rowID, columnID uint64) { itr.itr.Seek(rowID, columnID) }
// Next returns the next row/column ID pair.
// If the underlying iterator returns a pair higher than the max then EOF is returned.
func (itr *limitIterator) Next() (rowID, columnID uint64, eof bool) {
// Always return EOF once it is reached by limit or the underlying iterator.
if itr.eof {
return 0, 0, true
}
// Retrieve pair from underlying iterator.
// Mark as EOF if it is beyond the limit (or at EOF).
rowID, columnID, eof = itr.itr.Next()
if eof || rowID > itr.maxRowID || (rowID == itr.maxRowID && columnID > itr.maxColumnID) {
itr.eof = true
return 0, 0, true
}
return rowID, columnID, false
}
// sliceIterator iterates over a pair of row/column ID slices.
type sliceIterator struct {
rowIDs []uint64
columnIDs []uint64
i, n int
}
// newSliceIterator returns an iterator to iterate over a set of row/column ID pairs.
// Both slices MUST have an equal length. Otherwise the function will panic.
func newSliceIterator(rowIDs, columnIDs []uint64) *sliceIterator {
if len(columnIDs) != len(rowIDs) {
panic(fmt.Sprintf("pilosa.SliceIterator: pair length mismatch: %d != %d", len(rowIDs), len(columnIDs)))
}
return &sliceIterator{
rowIDs: rowIDs,
columnIDs: columnIDs,
n: len(rowIDs),
}
}
// Seek moves the cursor to a given pair.
// If the pair is not found, the iterator seeks to the next pair.
func (itr *sliceIterator) Seek(bseek, pseek uint64) {
for i := 0; i < itr.n; i++ {
rowID := itr.rowIDs[i]
columnID := itr.columnIDs[i]
if (bseek == rowID && pseek <= columnID) || bseek < rowID {
itr.i = i
return
}
}
// Seek to the end of the slice if all values are less than seek pair.
itr.i = itr.n
}
// Next returns the next row/column ID pair.
func (itr *sliceIterator) Next() (rowID, columnID uint64, eof bool) {
if itr.i >= itr.n {
return 0, 0, true
}
rowID = itr.rowIDs[itr.i]
columnID = itr.columnIDs[itr.i]
itr.i++
return rowID, columnID, false
}
// roaringIterator converts a roaring.Iterator to output column/row pairs.
type roaringIterator struct {
itr *roaring.Iterator
}
// newRoaringIterator returns a new iterator wrapping itr.
func newRoaringIterator(itr *roaring.Iterator) *roaringIterator {
return &roaringIterator{itr: itr}
}
// Seek moves the cursor to a pair matching bseek/pseek.
// If the pair is not found then it moves to the next pair.
func (itr *roaringIterator) Seek(bseek, pseek uint64) {
itr.itr.Seek((bseek * ShardWidth) + pseek)
}
// Next returns the next column/row ID pair.
func (itr *roaringIterator) Next() (rowID, columnID uint64, eof bool) {
v, eof := itr.itr.Next()
return v / ShardWidth, v % ShardWidth, eof
} | iterator.go | 0.776708 | 0.541348 | iterator.go | starcoder |
package tetra3d
import "github.com/kvartborg/vector"
// The goal of fastmath.go is to provide vector operations that don't clone the vector to use. This means the main usage is not to use the results
// directly, but rather as intermediary steps (i.e. use fastVectorSub to compare distances, or fastMatrixMult to multiply a vector by that final matrix).
// Be careful with it, me!
var standinVector = vector.Vector{0, 0, 0}
var standinMatrix = NewEmptyMatrix4()
func fastVectorSub(a, b vector.Vector) vector.Vector {
standinVector[0] = a[0] - b[0]
standinVector[1] = a[1] - b[1]
standinVector[2] = a[2] - b[2]
return standinVector
}
func fastVectorDistanceSquared(a, b vector.Vector) float64 {
sub := fastVectorSub(a, b)
return sub[0]*sub[0] + sub[1]*sub[1] + sub[2]*sub[2]
}
func fastMatrixMult(matrix, other Matrix4) Matrix4 {
standinMatrix[0][0] = matrix[0][0]*other[0][0] + matrix[0][1]*other[1][0] + matrix[0][2]*other[2][0] + matrix[0][3]*other[3][0]
standinMatrix[1][0] = matrix[1][0]*other[0][0] + matrix[1][1]*other[1][0] + matrix[1][2]*other[2][0] + matrix[1][3]*other[3][0]
standinMatrix[2][0] = matrix[2][0]*other[0][0] + matrix[2][1]*other[1][0] + matrix[2][2]*other[2][0] + matrix[2][3]*other[3][0]
standinMatrix[3][0] = matrix[3][0]*other[0][0] + matrix[3][1]*other[1][0] + matrix[3][2]*other[2][0] + matrix[3][3]*other[3][0]
standinMatrix[0][1] = matrix[0][0]*other[0][1] + matrix[0][1]*other[1][1] + matrix[0][2]*other[2][1] + matrix[0][3]*other[3][1]
standinMatrix[1][1] = matrix[1][0]*other[0][1] + matrix[1][1]*other[1][1] + matrix[1][2]*other[2][1] + matrix[1][3]*other[3][1]
standinMatrix[2][1] = matrix[2][0]*other[0][1] + matrix[2][1]*other[1][1] + matrix[2][2]*other[2][1] + matrix[2][3]*other[3][1]
standinMatrix[3][1] = matrix[3][0]*other[0][1] + matrix[3][1]*other[1][1] + matrix[3][2]*other[2][1] + matrix[3][3]*other[3][1]
standinMatrix[0][2] = matrix[0][0]*other[0][2] + matrix[0][1]*other[1][2] + matrix[0][2]*other[2][2] + matrix[0][3]*other[3][2]
standinMatrix[1][2] = matrix[1][0]*other[0][2] + matrix[1][1]*other[1][2] + matrix[1][2]*other[2][2] + matrix[1][3]*other[3][2]
standinMatrix[2][2] = matrix[2][0]*other[0][2] + matrix[2][1]*other[1][2] + matrix[2][2]*other[2][2] + matrix[2][3]*other[3][2]
standinMatrix[3][2] = matrix[3][0]*other[0][2] + matrix[3][1]*other[1][2] + matrix[3][2]*other[2][2] + matrix[3][3]*other[3][2]
standinMatrix[0][3] = matrix[0][0]*other[0][3] + matrix[0][1]*other[1][3] + matrix[0][2]*other[2][3] + matrix[0][3]*other[3][3]
standinMatrix[1][3] = matrix[1][0]*other[0][3] + matrix[1][1]*other[1][3] + matrix[1][2]*other[2][3] + matrix[1][3]*other[3][3]
standinMatrix[2][3] = matrix[2][0]*other[0][3] + matrix[2][1]*other[1][3] + matrix[2][2]*other[2][3] + matrix[2][3]*other[3][3]
standinMatrix[3][3] = matrix[3][0]*other[0][3] + matrix[3][1]*other[1][3] + matrix[3][2]*other[2][3] + matrix[3][3]*other[3][3]
return standinMatrix
}
func vectorCross(vecA, vecB, failsafeVec vector.Vector) vector.Vector {
cross, _ := vecA.Cross(vecB)
if cross.Magnitude() < 0.0001 {
cross, _ = vecA.Cross(failsafeVec)
// If it's still < 0, then it's not a separating axis
if cross.Magnitude() < 0.0001 {
return nil
}
}
return cross
}
type VectorPool struct {
Vectors []vector.Vector
RetrievalIndex int
}
func NewVectorPool(vectorCount int) *VectorPool {
pool := &VectorPool{
Vectors: []vector.Vector{},
}
for i := 0; i < vectorCount; i++ {
pool.Vectors = append(pool.Vectors, vector.Vector{0, 0, 0, 0})
}
return pool
}
func (pool *VectorPool) Reset() {
pool.RetrievalIndex = 0
}
func (pool *VectorPool) Get() vector.Vector {
v := pool.Vectors[pool.RetrievalIndex]
pool.RetrievalIndex++
return v
}
func (pool *VectorPool) MultVecW(matrix Matrix4, vect vector.Vector) vector.Vector {
v := pool.Get()
v[0] = matrix[0][0]*vect[0] + matrix[1][0]*vect[1] + matrix[2][0]*vect[2] + matrix[3][0]
v[1] = matrix[0][1]*vect[0] + matrix[1][1]*vect[1] + matrix[2][1]*vect[2] + matrix[3][1]
v[2] = matrix[0][2]*vect[0] + matrix[1][2]*vect[1] + matrix[2][2]*vect[2] + matrix[3][2]
v[3] = matrix[0][3]*vect[0] + matrix[1][3]*vect[1] + matrix[2][3]*vect[2] + matrix[3][3]
return v
} | fastmath.go | 0.697815 | 0.700319 | fastmath.go | starcoder |
package util
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian bigEndian
type littleEndian struct{}
func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 }
func (littleEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v)
b[1] = byte(v >> 8)
}
func (littleEndian) ToUint16(v uint16) []byte {
b := make([]byte, 2)
b[0] = byte(v)
b[1] = byte(v >> 8)
return b
}
func (littleEndian) Uint24(b []byte) uint32 { return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 }
func (littleEndian) PutUint24(b []byte, v uint32) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
}
func (littleEndian) ToUint24(v uint32) []byte {
b := make([]byte, 3)
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
return b
}
func (littleEndian) Uint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
}
func (littleEndian) ToUint32(v uint32) []byte {
b := make([]byte, 4)
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
return b
}
func (littleEndian) Uint64(b []byte) uint64 {
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (littleEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
}
func (littleEndian) ToUint64(v uint64) []byte {
b := make([]byte, 8)
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
return b
}
type bigEndian struct{}
func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 }
func (bigEndian) PutUint16(b []byte, v uint16) {
b[0] = byte(v >> 8)
b[1] = byte(v)
}
func (bigEndian) ToUint16(v uint16) []byte {
b := make([]byte, 2)
b[0] = byte(v >> 8)
b[1] = byte(v)
return b
}
func (bigEndian) Uint24(b []byte) uint32 { return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 }
func (bigEndian) PutUint24(b []byte, v uint32) {
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
func (bigEndian) ToUint24(v uint32) []byte {
b := make([]byte, 3)
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
return b
}
func (bigEndian) Uint32(b []byte) uint32 {
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) PutUint32(b []byte, v uint32) {
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
func (bigEndian) ToUint32(v uint32) []byte {
b := make([]byte, 4)
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
return b
}
func (bigEndian) Uint64(b []byte) uint64 {
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func (bigEndian) PutUint64(b []byte, v uint64) {
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
func (bigEndian) ToUint64(v uint64) []byte {
b := make([]byte, 8)
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
return b
}
//哥伦布解码
func GetUev(buff []byte, start int) (value int, pos int) {
l := len(buff)
var nZeroNum uint = 0
for start < l*8 {
if (buff[start/8] & (0x80 >> uint(start%8))) > 0 {
break
}
nZeroNum += 1
start += 1
}
dwRet := 0
start += 1
var i uint
for i = 0; i < nZeroNum; i++ {
dwRet <<= 1
if (buff[start/8] & (0x80 >> uint(start%8))) > 0 {
dwRet += 1
}
start += 1
}
return (1 << nZeroNum) - 1 + dwRet, start
} | bblive/util/util.go | 0.511473 | 0.516656 | util.go | starcoder |
package permits
import (
"sync"
"time"
"k8s.io/klog/v2"
)
// PermitGiver provides different operations regarding permit for a given key
type PermitGiver interface {
RegisterPermits(key string, numPermits int)
TryPermit(key string, timeout time.Duration) bool
ReleasePermit(key string)
DeletePermits(key string)
Close()
}
type permit struct {
c chan struct{}
lastAcquiredPermitTime time.Time
}
// NewPermitGiver returns a new PermitGiver
func NewPermitGiver(stalePermitKeyTimeout time.Duration, janitorFrequency time.Duration) PermitGiver {
stopC := make(chan struct{})
pg := permitGiver{
keyPermitsMap: sync.Map{},
stopC: stopC,
}
go func() {
ticker := time.NewTicker(janitorFrequency)
klog.Info("Janitor initialized")
for {
select {
case <-stopC:
return
case <-ticker.C:
pg.cleanupStalePermitEntries(stalePermitKeyTimeout)
}
}
}()
return &pg
}
type permitGiver struct {
keyPermitsMap sync.Map
stopC chan struct{}
}
func (pg *permitGiver) RegisterPermits(key string, numPermits int) {
if pg.isClosed() {
return
}
p := permit{
c: make(chan struct{}, numPermits),
}
_, loaded := pg.keyPermitsMap.LoadOrStore(key, p)
if loaded {
close(p.c)
klog.V(4).Infof("Permits have already registered for key: %s", key)
} else {
klog.V(2).Infof("Permit registered for key: %s", key)
}
}
func (pg *permitGiver) DeletePermits(key string) {
if pg.isClosed() {
return
}
if obj, ok := pg.keyPermitsMap.Load(key); ok {
p := obj.(permit)
close(p.c)
pg.keyPermitsMap.Delete(key)
}
}
func (pg *permitGiver) TryPermit(key string, timeout time.Duration) bool {
if pg.isClosed() {
return false
}
obj, ok := pg.keyPermitsMap.Load(key)
if !ok {
klog.Errorf("There is no permit registered for key: %s", key)
return false
}
p := obj.(permit)
tick := time.NewTicker(timeout)
defer tick.Stop()
for {
select {
case p.c <- struct{}{}:
p.lastAcquiredPermitTime = time.Now()
return true
case <-tick.C:
return false
case <-pg.stopC:
klog.V(2).Infof("PermitGiver has been stopped")
return false
}
}
}
func (pg *permitGiver) ReleasePermit(key string) {
obj, ok := pg.keyPermitsMap.Load(key)
if !ok {
klog.Errorf("There is no permit registered for key: %s", key)
return
}
p := obj.(permit)
if len(p.c) == 0 {
klog.V(4).Infof("There are currently no permits allocated for key: %s. Nothing to release", key)
return
}
select {
case <-p.c:
default:
}
}
func (pg *permitGiver) isClosed() bool {
select {
case <-pg.stopC:
klog.Errorf("PermitGiver has been closed, no operations are now permitted.")
return true
default:
return false
}
}
func (pg *permitGiver) isPermitAllocated(key string) bool {
_, ok := pg.keyPermitsMap.Load(key)
if !ok {
return false
}
return true
}
func (pg *permitGiver) cleanupStalePermitEntries(stalePermitKeyTimeout time.Duration) {
pg.keyPermitsMap.Range(func(key, value interface{}) bool {
p := value.(permit)
timeout := time.Now().Add(-stalePermitKeyTimeout).Sub(p.lastAcquiredPermitTime)
if timeout > 0 && len(p.c) == 0 {
pg.keyPermitsMap.Delete(key)
}
return true
})
}
func (pg *permitGiver) Close() {
close(pg.stopC)
// close all permit channels
pg.keyPermitsMap.Range(func(key, value interface{}) bool {
p := value.(permit)
close(p.c)
return true
})
} | vendor/github.com/gardener/machine-controller-manager/pkg/util/permits/permits.go | 0.578805 | 0.404037 | permits.go | starcoder |
package ent
import (
"fmt"
"time"
"github.com/jmoiron/sqlx"
"github.com/lolopinto/ent/ent/sql"
)
// LoadNodeRawData is the public API to load the raw data for an ent without privacy checks
func LoadNodeRawData(id string, entLoader Loader) (map[string]interface{}, error) {
l := &loadNodeLoader{
id: id,
entLoader: entLoader,
rawData: true,
}
err := loadData(l)
return l.dataRow, err
}
// LoadNodesRawData loads raw data for multiple objects given their ids
func LoadNodesRawData(ids []string, entLoader Loader) ([]map[string]interface{}, error) {
l := &loadNodesLoader{
ids: ids,
entLoader: entLoader,
rawData: true,
}
err := loadData(l)
return l.dataRows, err
}
// LoadNodesRawDataViaQueryClause takes a query clause e.g. sql.Eq("id", "{id of foreign key here}")
// and returns the raw data for all nodes that map to that
func LoadNodesRawDataViaQueryClause(entLoader Loader, clause sql.QueryClause) ([]map[string]interface{}, error) {
l := &loadNodesLoader{
entLoader: entLoader,
clause: clause,
rawData: true,
}
err := loadData(l)
return l.dataRows, err
}
// LoadNodeRawDataViaQueryClause takes a query clause e.g. sql.Eq("email_address", "<EMAIL>")
// and returns the raw data for a (the) node that maps to that
func LoadNodeRawDataViaQueryClause(entLoader Loader, clause sql.QueryClause) (map[string]interface{}, error) {
l := &loadNodeLoader{
entLoader: entLoader,
clause: clause,
rawData: true,
}
err := loadData(l)
return l.dataRow, err
}
// LoadRawNodesByType loads the nodes at the end of an edge
func LoadRawNodesByType(id string, edgeType EdgeType, entLoader Loader) ([]map[string]interface{}, error) {
// options... once we do EntQuery
l := &loadNodesLoader{
entLoader: entLoader,
rawData: true,
}
err := chainLoaders(
[]loader{
&loadEdgesByType{
id: id,
edgeType: edgeType,
outputID2s: true,
},
l,
},
)
return l.dataRows, err
}
// LoadEdgesByType loads the edges for a given type
func LoadEdgesByType(id string, edgeType EdgeType, options ...func(*LoadEdgeConfig)) ([]*AssocEdge, error) {
l := &loadEdgesByType{
id: id,
edgeType: edgeType,
options: options,
}
return l.LoadData()
}
// GenLoadEdgesByType handles loading of edges concurrently.
// Because we get strong typing across all edges and for a consistent API with loading Nodes,
// we use the EdgesResult struct here
func GenLoadEdgesByType(id string, edgeType EdgeType, options ...func(*LoadEdgeConfig)) <-chan *AssocEdgesResult {
res := make(chan *AssocEdgesResult)
go func() {
edges, err := LoadEdgesByType(id, edgeType, options...)
res <- &AssocEdgesResult{
Edges: edges,
Err: err,
}
}()
return res
}
// LoadUniqueEdgeByType loads the unique edge for a given type.
// Applies a limit 1 to the query
func LoadUniqueEdgeByType(id string, edgeType EdgeType) (*AssocEdge, error) {
edges, err := LoadEdgesByType(id, edgeType, Limit(1))
if err != nil {
return nil, err
}
return edges[0], err
}
// GenLoadUniqueEdgeByType is the concurrent version of LoadUniqueEdgeByType
func GenLoadUniqueEdgeByType(id string, edgeType EdgeType) <-chan *AssocEdgeResult {
res := make(chan *AssocEdgeResult)
go func() {
edge, err := LoadUniqueEdgeByType(id, edgeType)
res <- &AssocEdgeResult{
Edge: edge,
Err: err,
}
}()
return res
}
// GenLoadEdgeByType is the concurrent version of LoadEdgeByType
func GenLoadEdgeByType(id1, id2 string, edgeType EdgeType) <-chan *AssocEdgeResult {
res := make(chan *AssocEdgeResult)
go func() {
edge, err := LoadEdgeByType(id1, id2, edgeType)
res <- &AssocEdgeResult{
Edge: edge,
Err: err,
}
}()
return res
}
// LoadEdgeByType checks if an edge exists between 2 ids
func LoadEdgeByType(id string, id2 string, edgeType EdgeType) (*AssocEdge, error) {
// TODO 2/25/2020 the logic we eventually want here is if count is less than say 10,000 or whatever we decide the cache limit
// is, load that, and do a check for id2 in memory
// otherwise, if no cache or if count is a lot bigger than the limit,
// need to do a check in the database directly
// probably not worth having cache for each edge by default.
// need to provide a way for places to override it as needed
edges, err := LoadEdgesByType(id, edgeType)
if err != nil {
return nil, err
}
for _, edge := range edges {
if edge.ID2 == id2 {
return edge, nil
}
}
// no edge
return nil, nil
}
// EdgeOptions is a struct that can be used to configure an edge.
// Time refers to the time associated with the edge. If not specified, defaults to current time
// Data refers to whatever information that needs to be stored/associated with the edge
// It's up to 255 characters (hmm not true right now)
type EdgeOptions struct {
Time time.Time
Data string
}
// LoadEdgeConfig configures the way to load edges
// This will eventually be used in EntQuery but allows us to start testing and building some things...
type LoadEdgeConfig struct {
limit *int
}
func (cfg *LoadEdgeConfig) getKey() string {
if cfg.limit == nil {
return ""
}
return fmt.Sprintf("limit:%d", cfg.limit)
}
// Limit is an option passed to edge queries to limit the number of edges returned
func Limit(limit int) func(*LoadEdgeConfig) {
return func(cfg *LoadEdgeConfig) {
cfg.limit = &limit
}
}
// GetEdgeInfo gets the edge information for a given edgeType
// TODO figure out correct long-term API here
// this is the single get of GenLoadAssocEdges so shouldn't be too hard
func GetEdgeInfo(edgeType EdgeType, tx *sqlx.Tx) (*AssocEdgeData, error) {
l := &loadNodeLoader{
id: string(edgeType),
entLoader: &AssocEdgeLoader{},
}
err := loadData(l, cfgtx(tx))
if err != nil {
return nil, err
}
return l.GetEntity().(*AssocEdgeData), nil
}
// GetEdgeInfos gets the edge information for a list of edges
func GetEdgeInfos(edgeTypes []string) (map[EdgeType]*AssocEdgeData, error) {
entLoader := &AssocEdgeLoader{}
l := &loadNodesLoader{
entLoader: entLoader,
ids: edgeTypes,
}
err := loadData(l)
return entLoader.GetMap(), err
}
// GenLoadAssocEdges loads all assoc edges from the db
// TODO correct cache for this. we should load this once per request or have this
// be in a central cache easily available
func GenLoadAssocEdges() <-chan AssocEdgeDatasResult {
res := make(chan AssocEdgeDatasResult)
go func() {
entLoader := &AssocEdgeLoader{}
err := chainLoaders(
[]loader{
&loadAssocEdgeConfigExists{},
&loadNodesLoader{
rawQuery: "SELECT * FROM assoc_edge_config",
entLoader: entLoader,
},
},
)
if err != nil {
res <- AssocEdgeDatasResult{
Err: err,
}
} else {
res <- AssocEdgeDatasResult{
Edges: entLoader.results,
}
}
}()
return res
}
// LoadRawQuery takes a raw query string and runs it and gets the results in the raw format
func LoadRawQuery(query string, loader Loader) ([]map[string]interface{}, error) {
l := &loadNodesLoader{
rawQuery: query,
entLoader: loader,
rawData: true,
}
err := loadData(l)
return l.dataRows, err
} | ent/primitives.go | 0.680135 | 0.411998 | primitives.go | starcoder |
package output
import (
"encoding/json"
"errors"
"sync/atomic"
"time"
"github.com/Jeffail/benthos/lib/log"
"github.com/Jeffail/benthos/lib/metrics"
"github.com/Jeffail/benthos/lib/processor/condition"
"github.com/Jeffail/benthos/lib/response"
"github.com/Jeffail/benthos/lib/types"
"github.com/Jeffail/benthos/lib/util/throttle"
)
//------------------------------------------------------------------------------
var (
// ErrSwitchNoConditionMet is returned when a message does not match any
// output conditions.
ErrSwitchNoConditionMet = errors.New("no switch output conditions were met by message")
// ErrSwitchNoOutputs is returned when creating a Switch type with less than
// 2 outputs.
ErrSwitchNoOutputs = errors.New("attempting to create switch with less than 2 outputs")
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSwitch] = TypeSpec{
constructor: NewSwitch,
description: `
The switch output type allows you to configure multiple conditional output
targets by listing child outputs paired with conditions. Conditional logic is
currently applied per whole message batch. In order to multiplex per message of
a batch use the ` + "[`broker`](#broker)" + ` output with the pattern
` + "`fan_out`" + `.
In the following example, messages containing "foo" will be sent to both the
` + "`foo`" + ` and ` + "`baz`" + ` outputs. Messages containing "bar" will be
sent to both the ` + "`bar`" + ` and ` + "`baz`" + ` outputs. Messages
containing both "foo" and "bar" will be sent to all three outputs. And finally,
messages that do not contain "foo" or "bar" will be sent to the ` + "`baz`" + `
output only.
` + "``` yaml" + `
output:
type: switch
switch:
outputs:
- output:
type: foo
foo:
foo_field_1: value1
condition:
type: text
text:
operator: contains
arg: foo
fallthrough: true
- output:
type: bar
bar:
bar_field_1: value2
bar_field_2: value3
condition:
type: text
text:
operator: contains
arg: bar
fallthrough: true
- output:
type: baz
baz:
baz_field_1: value4
processors:
- type: baz_processor
processors:
- type: some_processor
` + "```" + `
The switch output requires a minimum of two outputs. If no condition is defined
for an output, it behaves like a static ` + "`true`" + ` condition. If
` + "`fallthrough`" + ` is set to ` + "`true`" + `, the switch output will
continue evaluating additional outputs after finding a match. If an output
applies back pressure it will block all subsequent messages, and if an output
fails to send a message, it will be retried continuously until completion or
service shut down. Messages that do not match any outputs will be dropped.`,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
outSlice := []interface{}{}
for _, out := range conf.Switch.Outputs {
sanOutput, err := SanitiseConfig(out.Output)
if err != nil {
return nil, err
}
var sanCond interface{}
if sanCond, err = condition.SanitiseConfig(out.Condition); err != nil {
return nil, err
}
sanit := map[string]interface{}{
"output": sanOutput,
"fallthrough": out.Fallthrough,
"condition": sanCond,
}
outSlice = append(outSlice, sanit)
}
return map[string]interface{}{
"outputs": outSlice,
}, nil
},
}
}
//------------------------------------------------------------------------------
// SwitchConfig contains configuration fields for the Switch output type.
type SwitchConfig struct {
Outputs []SwitchConfigOutput `json:"outputs" yaml:"outputs"`
}
// NewSwitchConfig creates a new SwitchConfig with default values.
func NewSwitchConfig() SwitchConfig {
return SwitchConfig{
Outputs: []SwitchConfigOutput{},
}
}
// SwitchConfigOutput contains configuration fields per output of a switch type.
type SwitchConfigOutput struct {
Condition condition.Config `json:"condition" yaml:"condition"`
Fallthrough bool `json:"fallthrough" yaml:"fallthrough"`
Output Config `json:"output" yaml:"output"`
}
// NewSwitchConfigOutput creates a new switch output config with default values.
func NewSwitchConfigOutput() SwitchConfigOutput {
cond := condition.NewConfig()
cond.Type = condition.TypeStatic
cond.Static = true
return SwitchConfigOutput{
Condition: cond,
Fallthrough: false,
Output: NewConfig(),
}
}
//------------------------------------------------------------------------------
// UnmarshalJSON ensures that when parsing configs that are in a map or slice
// the default values are still applied.
func (s *SwitchConfigOutput) UnmarshalJSON(bytes []byte) error {
type confAlias SwitchConfigOutput
aliased := confAlias(NewSwitchConfigOutput())
if err := json.Unmarshal(bytes, &aliased); err != nil {
return err
}
*s = SwitchConfigOutput(aliased)
return nil
}
// UnmarshalYAML ensures that when parsing configs that are in a map or slice
// the default values are still applied.
func (s *SwitchConfigOutput) UnmarshalYAML(unmarshal func(interface{}) error) error {
type confAlias SwitchConfigOutput
aliased := confAlias(NewSwitchConfigOutput())
if err := unmarshal(&aliased); err != nil {
return err
}
*s = SwitchConfigOutput(aliased)
return nil
}
//------------------------------------------------------------------------------
// Switch is a broker that implements types.Consumer and broadcasts each message
// out to an array of outputs.
type Switch struct {
running int32
logger log.Modular
stats metrics.Type
throt *throttle.Type
transactions <-chan types.Transaction
outputTsChans []chan types.Transaction
outputResChans []chan types.Response
outputs []types.Output
conditions []types.Condition
fallthroughs []bool
closedChan chan struct{}
closeChan chan struct{}
}
// NewSwitch creates a new Switch type by providing outputs. Messages will be
// sent to a subset of outputs according to condition and fallthrough settings.
func NewSwitch(
conf Config,
mgr types.Manager,
logger log.Modular,
stats metrics.Type,
) (Type, error) {
lOutputs := len(conf.Switch.Outputs)
if lOutputs < 2 {
return nil, ErrSwitchNoOutputs
}
o := &Switch{
running: 1,
stats: stats,
logger: logger.NewModule(".broker.switch"),
transactions: nil,
outputs: make([]types.Output, lOutputs),
conditions: make([]types.Condition, lOutputs),
fallthroughs: make([]bool, lOutputs),
closedChan: make(chan struct{}),
closeChan: make(chan struct{}),
}
var err error
for i, oConf := range conf.Switch.Outputs {
if o.outputs[i], err = New(oConf.Output, mgr, logger, stats); err != nil {
return nil, err
}
if o.conditions[i], err = condition.New(oConf.Condition, mgr, logger, stats); err != nil {
return nil, err
}
o.fallthroughs[i] = oConf.Fallthrough
}
o.throt = throttle.New(throttle.OptCloseChan(o.closeChan))
o.outputTsChans = make([]chan types.Transaction, len(o.outputs))
o.outputResChans = make([]chan types.Response, len(o.outputs))
for i := range o.outputTsChans {
o.outputTsChans[i] = make(chan types.Transaction)
o.outputResChans[i] = make(chan types.Response)
if err := o.outputs[i].Consume(o.outputTsChans[i]); err != nil {
return nil, err
}
}
return o, nil
}
//------------------------------------------------------------------------------
// Consume assigns a new transactions channel for the broker to read.
func (o *Switch) Consume(transactions <-chan types.Transaction) error {
if o.transactions != nil {
return types.ErrAlreadyStarted
}
o.transactions = transactions
go o.loop()
return nil
}
//------------------------------------------------------------------------------
// loop is an internal loop that brokers incoming messages to many outputs.
func (o *Switch) loop() {
var (
mMsgDrop = o.stats.GetCounter("broker.switch.messages.dropped")
mMsgRcvd = o.stats.GetCounter("broker.switch.messages.received")
mMsgSnt = o.stats.GetCounter("broker.switch.messages.sent")
mOutputErr = o.stats.GetCounter("broker.switch.output.error")
)
defer func() {
for i, output := range o.outputs {
output.CloseAsync()
close(o.outputTsChans[i])
}
for _, output := range o.outputs {
if err := output.WaitForClose(time.Second); err != nil {
for err != nil {
err = output.WaitForClose(time.Second)
}
}
}
close(o.closedChan)
}()
for atomic.LoadInt32(&o.running) == 1 {
var ts types.Transaction
var open bool
select {
case ts, open = <-o.transactions:
if !open {
return
}
case <-o.closeChan:
return
}
mMsgRcvd.Incr(1)
var outputTargets []int
for i, oCond := range o.conditions {
if oCond.Check(ts.Payload) {
outputTargets = append(outputTargets, i)
if !o.fallthroughs[i] {
break
}
}
}
if len(outputTargets) == 0 {
select {
case ts.ResponseChan <- response.NewAck():
mMsgDrop.Incr(1)
case <-o.closeChan:
return
}
continue
}
for len(outputTargets) > 0 {
for _, i := range outputTargets {
msgCopy := ts.Payload.Copy()
select {
case o.outputTsChans[i] <- types.NewTransaction(msgCopy, o.outputResChans[i]):
case <-o.closeChan:
return
}
}
newTargets := []int{}
for _, i := range outputTargets {
select {
case res := <-o.outputResChans[i]:
if res.Error() != nil {
newTargets = append(newTargets, i)
o.logger.Errorf("Failed to dispatch switch message: %v\n", res.Error())
mOutputErr.Incr(1)
if !o.throt.Retry() {
return
}
} else {
o.throt.Reset()
mMsgSnt.Incr(1)
}
case <-o.closeChan:
return
}
}
outputTargets = newTargets
}
select {
case ts.ResponseChan <- response.NewAck():
case <-o.closeChan:
return
}
}
}
// CloseAsync shuts down the Switch broker and stops processing requests.
func (o *Switch) CloseAsync() {
if atomic.CompareAndSwapInt32(&o.running, 1, 0) {
close(o.closeChan)
}
}
// WaitForClose blocks until the Switch broker has closed down.
func (o *Switch) WaitForClose(timeout time.Duration) error {
select {
case <-o.closedChan:
case <-time.After(timeout):
return types.ErrTimeout
}
return nil
}
//------------------------------------------------------------------------------ | lib/output/switch.go | 0.714329 | 0.518912 | switch.go | starcoder |
package util
import (
"reflect"
"strconv"
"mongoid/log"
)
// MarshalFromDB casts the given fromValue into the given intoType according to expected DB value conversions, returning an interface to the newly cast value.
// If fromValue is already the type of intoType, it may be returned directly, but it is not guaranteed to do so.
// If a value conversion would result in loss of data or precision, this function will panic.
func MarshalFromDB(intoType reflect.Type, fromValue interface{}) interface{} {
if reflect.TypeOf(fromValue) == intoType {
return fromValue
}
switch intoType.Kind() {
case reflect.Int8:
fallthrough
case reflect.Int16:
fallthrough
case reflect.Int32:
fallthrough
case reflect.Int64:
fallthrough
case reflect.Int:
dstPtr := reflect.New(intoType)
dst := reflect.Indirect(dstPtr)
src := reflect.ValueOf(fromValue)
if dst.OverflowInt(src.Int()) {
log.Panicf("Overflow detected while storing %v within %v", src.Type(), dst.Type())
}
dst.SetInt(src.Int())
return dst.Interface()
case reflect.Uint8:
fallthrough
case reflect.Uint16:
fallthrough
case reflect.Uint32:
fallthrough
case reflect.Uint64:
fallthrough
case reflect.Uint:
dstPtr := reflect.New(intoType)
dst := reflect.Indirect(dstPtr)
var srcStr string
switch fromValue.(type) {
case int64:
srcStr = strconv.FormatInt(fromValue.(int64), 10)
case int32:
srcStr = strconv.FormatInt(int64(fromValue.(int32)), 10)
case string:
srcStr = fromValue.(string)
}
srcUint64, srcUint64Err := strconv.ParseUint(srcStr, 10, 64)
if srcUint64Err != nil {
log.Panicf("Error detected while storing %v within %v: %v", reflect.TypeOf(fromValue), intoType, srcUint64Err)
}
if dst.OverflowUint(srcUint64) {
log.Panicf("Overflow detected while storing %v within %v", reflect.TypeOf(fromValue), intoType)
}
dst.SetUint(srcUint64)
return dst.Interface()
case reflect.Complex64:
fallthrough
case reflect.Complex128:
dstPtr := reflect.New(intoType)
dst := reflect.Indirect(dstPtr)
var srcStr string
switch fromValue.(type) {
case complex64:
srcStr = strconv.FormatComplex(complex128(fromValue.(complex64)), 'f', -1, 64)
case complex128:
srcStr = strconv.FormatComplex(fromValue.(complex128), 'f', -1, 128)
case string:
srcStr = fromValue.(string)
}
var dstBits int
switch intoType.Kind() {
case reflect.Complex64:
dstBits = 64
case reflect.Complex128:
dstBits = 128
}
srcComplex128, srcComplex128Err := strconv.ParseComplex(srcStr, dstBits)
if srcComplex128Err != nil {
log.Panicf("Error detected while storing %v within %v: %v", reflect.TypeOf(fromValue), intoType, srcComplex128Err)
}
if dst.OverflowComplex(srcComplex128) {
log.Panicf("Overflow detected while storing %v within %v", reflect.TypeOf(fromValue), intoType)
}
dst.SetComplex(srcComplex128)
return dst.Interface()
}
log.Panicf("Unhandled kind: %v", intoType.Kind())
return nil
} | util/marshal_from_db.go | 0.59514 | 0.564819 | marshal_from_db.go | starcoder |
package util
import (
"bytes"
"encoding/binary"
"fmt"
"math/big"
"time"
)
const (
// uint256Size is the number of bytes needed to represent an unsigned
// 256-bit integer.
uint256Size = 32
)
// BigToLEUint256 returns the passed big integer as an unsigned 256-bit integer
// encoded as little-endian bytes. Numbers which are larger than the max
// unsigned 256-bit integer are truncated.
func BigToLEUint256(n *big.Int) [uint256Size]byte {
// Pad or truncate the big-endian big int to correct number of bytes.
nBytes := n.Bytes()
nlen := len(nBytes)
pad := 0
start := 0
if nlen <= uint256Size {
pad = uint256Size - nlen
} else {
start = nlen - uint256Size
}
var buf [uint256Size]byte
copy(buf[pad:], nBytes[start:])
// Reverse the bytes to little endian and return them.
for i := 0; i < uint256Size/2; i++ {
buf[i], buf[uint256Size-1-i] = buf[uint256Size-1-i], buf[i]
}
return buf
}
// LEUint256ToBig returns the passed unsigned 256-bit integer
// encoded as little-endian as a big integer.
func LEUint256ToBig(n [uint256Size]byte) *big.Int {
var buf [uint256Size]byte
copy(buf[:], n[:])
// Reverse the bytes to big endian and create a big.Int.
for i := 0; i < uint256Size/2; i++ {
buf[i], buf[uint256Size-1-i] = buf[uint256Size-1-i], buf[i]
}
v := new(big.Int).SetBytes(buf[:])
return v
}
// HeightToBigEndianBytes returns an 4-byte big endian representation of
// the provided block height.
func HeightToBigEndianBytes(height uint32) []byte {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, height)
return b
}
// BigEndianBytesToHeight returns the block height of the provided 4-byte big
// endian representation.
func BigEndianBytesToHeight(b []byte) uint32 {
return binary.BigEndian.Uint32(b[0:4])
}
// ReversePrevBlockWords reverses each 4-byte word in the provided hex encoded
// previous block hash.
func ReversePrevBlockWords(hashE string) string {
buf := bytes.NewBufferString("")
for i := 0; i < len(hashE); i += 8 {
buf.WriteString(hashE[i+6 : i+8])
buf.WriteString(hashE[i+4 : i+6])
buf.WriteString(hashE[i+2 : i+4])
buf.WriteString(hashE[i : i+2])
}
return buf.String()
}
// HexReversed reverses a hex string.
func HexReversed(in string) (string, error) {
if len(in)%2 != 0 {
return "", fmt.Errorf("incorrect hex input length")
}
buf := bytes.NewBufferString("")
for i := len(in) - 1; i > -1; i -= 2 {
buf.WriteByte(in[i-1])
buf.WriteByte(in[i])
}
return buf.String(), nil
}
// NanoToBigEndianBytes returns an 8-byte big endian representation of
// the provided nanosecond time.
func NanoToBigEndianBytes(nano int64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(nano))
return b
}
// BigEndianBytesToNano returns nanosecond time of the provided 8-byte big
// endian representation.
func BigEndianBytesToNano(b []byte) int64 {
return int64(binary.BigEndian.Uint64(b[0:8]))
}
// BigEndianBytesToTime returns a time instance of the provided 8-byte big
// endian representation.
func BigEndianBytesToTime(b []byte) *time.Time {
t := time.Unix(0, BigEndianBytesToNano(b))
return &t
} | util/conversion.go | 0.738103 | 0.421433 | conversion.go | starcoder |
package image
import (
"fmt"
"image"
"image/color"
"image/draw"
statepb "github.com/GoogleCloudPlatform/testgrid/pb/state"
tspb "github.com/GoogleCloudPlatform/testgrid/pb/test_status"
"github.com/GoogleCloudPlatform/testgrid/pkg/updater"
)
var Decode = image.Decode
// Tiles converts a tile-set image into an array of images
func Tiles(img image.Gray, size int) []image.Gray {
var set []image.Gray
bounds := img.Bounds()
for y := bounds.Min.Y; y < bounds.Max.Y; y += size {
for x := bounds.Min.X; x < bounds.Max.X; x += size {
r := image.Rect(x, y, x+size, y+size)
set = append(set, *img.SubImage(r).(*image.Gray))
}
}
return set
}
// Gray converts the image into a gray image.
func Gray(img image.Image) image.Gray {
bounds := img.Bounds()
gray := image.NewGray(bounds)
model := color.GrayModel
// increase right and down
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
gray.Set(x, y, model.Convert(img.At(x, y)))
}
}
return *gray
}
// Print image to stdout
func Print(im image.Image) {
pi := image.NewPaletted(im.Bounds(), []color.Color{
color.Gray{Y: 255},
color.Gray{Y: 160},
color.Gray{Y: 70},
color.Gray{Y: 35},
color.Gray{Y: 0},
})
width := im.Bounds().Dx()
draw.FloydSteinberg.Draw(pi, im.Bounds(), im, image.ZP)
shade := []string{" ", "░", "▒", "▓", "█"}
for i, p := range pi.Pix {
fmt.Print(shade[p])
if (i+1)%width == 0 {
fmt.Print("\n")
}
}
}
type Color interface {
color.Color
Meta() (icon, message, id string)
}
type testgridColor struct {
color.Color
icon string
message string
id string
}
func (c testgridColor) Meta() (icon, message, id string) {
return c.icon, c.message, c.id
}
func MetaColor(c color.Color, icon, message, id string) Color {
return testgridColor{
Color: c,
icon: icon,
message: message,
id: id,
}
}
var colorMap = map[tspb.TestStatus]color.Color{
tspb.TestStatus_NO_RESULT: color.RGBA{0xff, 0xff, 0xff, 0xff}, // white
tspb.TestStatus_PASS: color.RGBA{0, 0xcc, 0x33, 0xff}, // green
tspb.TestStatus_PASS_WITH_ERRORS: color.RGBA{0, 0xcc, 0x33, 0xff},
tspb.TestStatus_PASS_WITH_SKIPS: color.RGBA{0, 0xcc, 0x33, 0xff},
tspb.TestStatus_RUNNING: color.RGBA{0xbb, 0xbb, 0xbb, 0xff}, // gray
tspb.TestStatus_CATEGORIZED_ABORT: color.RGBA{0xbb, 0xbb, 0xbb, 0xff},
tspb.TestStatus_UNKNOWN: color.RGBA{0xbb, 0xbb, 0xbb, 0xff},
tspb.TestStatus_CANCEL: color.RGBA{0xbb, 0xbb, 0xbb, 0xff},
tspb.TestStatus_BLOCKED: color.RGBA{0xbb, 0xbb, 0xbb, 0xff},
tspb.TestStatus_TIMED_OUT: color.RGBA{0xaa, 0, 0, 0xff}, // red
tspb.TestStatus_CATEGORIZED_FAIL: color.RGBA{0xaa, 0, 0, 0xff},
tspb.TestStatus_BUILD_FAIL: color.RGBA{0, 0, 0, 0xff}, // black
tspb.TestStatus_FAIL: color.RGBA{0xaa, 0, 0, 0xff},
tspb.TestStatus_FLAKY: color.RGBA{0x66, 0x00, 0x99, 0xff}, // purple
tspb.TestStatus_TOOL_FAIL: color.RGBA{0xaa, 0, 0, 0xff},
tspb.TestStatus_BUILD_PASSED: color.RGBA{0xaa, 0xee, 0xbb, 0xff}, // light green
}
var palette = color.Palette{
colorMap[tspb.TestStatus_NO_RESULT],
colorMap[tspb.TestStatus_PASS],
colorMap[tspb.TestStatus_PASS_WITH_ERRORS],
colorMap[tspb.TestStatus_PASS_WITH_SKIPS],
colorMap[tspb.TestStatus_RUNNING],
colorMap[tspb.TestStatus_CATEGORIZED_ABORT],
colorMap[tspb.TestStatus_UNKNOWN],
colorMap[tspb.TestStatus_CANCEL],
colorMap[tspb.TestStatus_BLOCKED],
colorMap[tspb.TestStatus_TIMED_OUT],
colorMap[tspb.TestStatus_CATEGORIZED_FAIL],
colorMap[tspb.TestStatus_BUILD_FAIL],
colorMap[tspb.TestStatus_FAIL],
colorMap[tspb.TestStatus_FLAKY],
colorMap[tspb.TestStatus_TOOL_FAIL],
colorMap[tspb.TestStatus_BUILD_PASSED],
}
type Image struct {
Cols []updater.InflatedColumn
}
const Max = 10000
var rowNames = func() []string {
names := make([]string, 0, Max)
for y := 0; y < Max; y++ {
names = append(names, fmt.Sprintf("%04d", y))
}
return names
}()
func New(r image.Rectangle) *Image {
dx, dy := r.Dx(), r.Dy()
if dx >= Max || dy >= Max {
panic(fmt.Sprintf("%dx%d > %dx%d", dx, dy, Max, Max))
}
img := Image{
Cols: make([]updater.InflatedColumn, 0, dx),
}
for x := 0; x < dx; x++ {
ic := updater.InflatedColumn{
Column: &statepb.Column{
Build: fmt.Sprintf("%04d", x),
Name: fmt.Sprintf("%04d", x),
},
Cells: make(map[string]updater.Cell, dy),
}
for y := 0; y < dy; y++ {
name := rowNames[y]
ic.Cells[name] = updater.Cell{}
}
img.Cols = append(img.Cols, ic)
}
return &img
}
func (i *Image) ColorModel() color.Model {
return palette
}
func (i *Image) Bounds() image.Rectangle {
var dy int
for _, ic := range i.Cols {
dy = len(ic.Cells)
break
}
return image.Rect(0, 0, len(i.Cols), dy)
}
var clear = color.RGBA{0, 0, 0, 0}
func (i *Image) At(x, y int) color.Color {
name := rowNames[y]
cell := i.Cols[x].Cells[name]
c, ok := colorMap[cell.Result]
if !ok {
c = colorMap[0]
}
return MetaColor(c, cell.Icon, cell.Message, cell.CellID)
}
func (i *Image) Set(x, y int, c color.Color) {
name := rowNames[y]
cells := i.Cols[x].Cells
cell := cells[name]
cell.Result = tspb.TestStatus(palette.Index(c))
tgc, ok := c.(testgridColor)
if ok {
if tgc.message != "" {
cell.Message = tgc.message
}
if tgc.icon != "" {
cell.Icon = tgc.icon
}
if tgc.id != "" {
cell.CellID = tgc.id
}
}
cells[name] = cell
} | hackathon/pkg/image/image.go | 0.657978 | 0.469885 | image.go | starcoder |
package dataset
import (
"path"
log "github.com/unchartedsoftware/plog"
"github.com/uncharted-distil/distil-compute/model"
"github.com/uncharted-distil/distil-compute/primitive/compute"
"github.com/uncharted-distil/distil/api/env"
"github.com/uncharted-distil/distil/api/serialization"
)
// D3M captures the needed information for a D3M dataset.
type D3M struct {
DatasetName string
DatasetPath string
}
// NewD3MDataset creates a new d3m dataset from a dataset folder.
func NewD3MDataset(datasetName string, datasetPath string) (*D3M, error) {
return &D3M{
DatasetName: datasetName,
DatasetPath: datasetPath,
}, nil
}
// CreateDataset processes the D3M dataset and updates it as needed to meet distil needs.
func (d *D3M) CreateDataset(rootDataPath string, datasetName string, config *env.Config) (*serialization.RawDataset, error) {
log.Infof("creating dataset from d3m dataset source")
if datasetName == "" {
datasetName = d.DatasetName
}
// read the dataset
ds, err := serialization.ReadDataset(path.Join(d.DatasetPath, compute.D3MDataSchema))
if err != nil {
return nil, err
}
// update the id & name & storage name
datasetID := model.NormalizeDatasetID(datasetName)
ds.Metadata.Name = datasetName
ds.Metadata.ID = datasetName
ds.Metadata.StorageName = datasetID
// update the non main data resources to be absolute paths
mainDR := ds.Metadata.GetMainDataResource()
for _, dr := range ds.Metadata.DataResources {
if dr != mainDR {
dr.ResPath = model.GetResourcePathFromFolder(d.DatasetPath, dr)
}
}
ds.DefinitiveTypes = d.isFullySpecified(ds)
return ds, nil
}
func (d *D3M) isFullySpecified(ds *serialization.RawDataset) bool {
// fully specified means all variables are in the metadata, there are no
// unknown types and there is at least one non string, non index type
// (to avoid to case where everything was initialized to text)
mainDR := ds.Metadata.GetMainDataResource()
if len(ds.Data[0]) != len(mainDR.Variables) {
log.Infof("not every variable is specified in the metadata")
return false
}
// find one non string and non index, and make sure no unknowns exist
foundComplexType := false
varMapIndex := map[int]*model.Variable{}
for _, v := range mainDR.Variables {
if v.Type == model.UnknownSchemaType {
log.Infof("at least one variable is unknown type")
return false
} else if !foundComplexType && d.variableIsTyped(v) {
foundComplexType = true
}
varMapIndex[v.Index] = v
}
if !foundComplexType {
log.Infof("all variables are either an index or a string")
return false
}
// check the variable list against the header in the data
for i, h := range ds.Data[0] {
if varMapIndex[i] == nil || varMapIndex[i].HeaderName != h {
log.Infof("header in data file does not match metadata variable list (%s differs from %s at position %d)", h, varMapIndex[i].HeaderName, i)
return false
}
}
log.Infof("metadata is fully specified")
return true
}
func (d *D3M) variableIsTyped(variable *model.Variable) bool {
// a variable is typed if:
// it isnt a string and not an index
// it is a string but references another resource
if !model.IsText(variable.Type) && !model.IsIndexRole(variable.SelectedRole) {
return true
}
return variable.RefersTo != nil
}
// GetDefinitiveTypes returns an empty list as definitive types.
func (d *D3M) GetDefinitiveTypes() []*model.Variable {
return []*model.Variable{}
}
// CleanupTempFiles does nothing since this creates no temp files.
func (d *D3M) CleanupTempFiles() {
} | api/dataset/d3m.go | 0.608361 | 0.447581 | d3m.go | starcoder |
package plaid
import (
"encoding/json"
)
// InvestmentTransaction A transaction within an investment account.
type InvestmentTransaction struct {
// The ID of the Investment transaction, unique across all Plaid transactions. Like all Plaid identifiers, the `investment_transaction_id` is case sensitive.
InvestmentTransactionId string `json:"investment_transaction_id"`
// A legacy field formerly used internally by Plaid to identify certain canceled transactions.
CancelTransactionId NullableString `json:"cancel_transaction_id,omitempty"`
// The `account_id` of the account against which this transaction posted.
AccountId string `json:"account_id"`
// The `security_id` to which this transaction is related.
SecurityId NullableString `json:"security_id"`
// The [ISO 8601](https://wikipedia.org/wiki/ISO_8601) posting date for the transaction, or transacted date for pending transactions.
Date string `json:"date"`
// The institution’s description of the transaction.
Name string `json:"name"`
// The number of units of the security involved in this transaction.
Quantity float32 `json:"quantity"`
// The complete value of the transaction. Positive values when cash is debited, e.g. purchases of stock; negative values when cash is credited, e.g. sales of stock. Treatment remains the same for cash-only movements unassociated with securities.
Amount float32 `json:"amount"`
// The price of the security at which this transaction occurred.
Price float32 `json:"price"`
// The combined value of all fees applied to this transaction
Fees NullableFloat32 `json:"fees"`
// Value is one of the following: `buy`: Buying an investment `sell`: Selling an investment `cancel`: A cancellation of a pending transaction `cash`: Activity that modifies a cash position `fee`: A fee on the account `transfer`: Activity which modifies a position, but not through buy/sell activity e.g. options exercise, portfolio transfer For descriptions of possible transaction types and subtypes, see the [Investment transaction types schema](https://plaid.com/docs/api/accounts/#investment-transaction-types-schema).
Type string `json:"type"`
// For descriptions of possible transaction types and subtypes, see the [Investment transaction types schema](https://plaid.com/docs/api/accounts/#investment-transaction-types-schema).
Subtype string `json:"subtype"`
// The ISO-4217 currency code of the transaction. Always `null` if `unofficial_currency_code` is non-`null`.
IsoCurrencyCode NullableString `json:"iso_currency_code"`
// The unofficial currency code associated with the holding. Always `null` if `iso_currency_code` is non-`null`. Unofficial currency codes are used for currencies that do not have official ISO currency codes, such as cryptocurrencies and the currencies of certain countries. See the [currency code schema](https://plaid.com/docs/api/accounts#currency-code-schema) for a full listing of supported `iso_currency_code`s.
UnofficialCurrencyCode NullableString `json:"unofficial_currency_code"`
AdditionalProperties map[string]interface{}
}
type _InvestmentTransaction InvestmentTransaction
// NewInvestmentTransaction instantiates a new InvestmentTransaction object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewInvestmentTransaction(investmentTransactionId string, accountId string, securityId NullableString, date string, name string, quantity float32, amount float32, price float32, fees NullableFloat32, type_ string, subtype string, isoCurrencyCode NullableString, unofficialCurrencyCode NullableString) *InvestmentTransaction {
this := InvestmentTransaction{}
this.InvestmentTransactionId = investmentTransactionId
this.AccountId = accountId
this.SecurityId = securityId
this.Date = date
this.Name = name
this.Quantity = quantity
this.Amount = amount
this.Price = price
this.Fees = fees
this.Type = type_
this.Subtype = subtype
this.IsoCurrencyCode = isoCurrencyCode
this.UnofficialCurrencyCode = unofficialCurrencyCode
return &this
}
// NewInvestmentTransactionWithDefaults instantiates a new InvestmentTransaction object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewInvestmentTransactionWithDefaults() *InvestmentTransaction {
this := InvestmentTransaction{}
return &this
}
// GetInvestmentTransactionId returns the InvestmentTransactionId field value
func (o *InvestmentTransaction) GetInvestmentTransactionId() string {
if o == nil {
var ret string
return ret
}
return o.InvestmentTransactionId
}
// GetInvestmentTransactionIdOk returns a tuple with the InvestmentTransactionId field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetInvestmentTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.InvestmentTransactionId, true
}
// SetInvestmentTransactionId sets field value
func (o *InvestmentTransaction) SetInvestmentTransactionId(v string) {
o.InvestmentTransactionId = v
}
// GetCancelTransactionId returns the CancelTransactionId field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *InvestmentTransaction) GetCancelTransactionId() string {
if o == nil || o.CancelTransactionId.Get() == nil {
var ret string
return ret
}
return *o.CancelTransactionId.Get()
}
// GetCancelTransactionIdOk returns a tuple with the CancelTransactionId field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *InvestmentTransaction) GetCancelTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.CancelTransactionId.Get(), o.CancelTransactionId.IsSet()
}
// HasCancelTransactionId returns a boolean if a field has been set.
func (o *InvestmentTransaction) HasCancelTransactionId() bool {
if o != nil && o.CancelTransactionId.IsSet() {
return true
}
return false
}
// SetCancelTransactionId gets a reference to the given NullableString and assigns it to the CancelTransactionId field.
func (o *InvestmentTransaction) SetCancelTransactionId(v string) {
o.CancelTransactionId.Set(&v)
}
// SetCancelTransactionIdNil sets the value for CancelTransactionId to be an explicit nil
func (o *InvestmentTransaction) SetCancelTransactionIdNil() {
o.CancelTransactionId.Set(nil)
}
// UnsetCancelTransactionId ensures that no value is present for CancelTransactionId, not even an explicit nil
func (o *InvestmentTransaction) UnsetCancelTransactionId() {
o.CancelTransactionId.Unset()
}
// GetAccountId returns the AccountId field value
func (o *InvestmentTransaction) GetAccountId() string {
if o == nil {
var ret string
return ret
}
return o.AccountId
}
// GetAccountIdOk returns a tuple with the AccountId field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetAccountIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.AccountId, true
}
// SetAccountId sets field value
func (o *InvestmentTransaction) SetAccountId(v string) {
o.AccountId = v
}
// GetSecurityId returns the SecurityId field value
// If the value is explicit nil, the zero value for string will be returned
func (o *InvestmentTransaction) GetSecurityId() string {
if o == nil || o.SecurityId.Get() == nil {
var ret string
return ret
}
return *o.SecurityId.Get()
}
// GetSecurityIdOk returns a tuple with the SecurityId field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *InvestmentTransaction) GetSecurityIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.SecurityId.Get(), o.SecurityId.IsSet()
}
// SetSecurityId sets field value
func (o *InvestmentTransaction) SetSecurityId(v string) {
o.SecurityId.Set(&v)
}
// GetDate returns the Date field value
func (o *InvestmentTransaction) GetDate() string {
if o == nil {
var ret string
return ret
}
return o.Date
}
// GetDateOk returns a tuple with the Date field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetDateOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Date, true
}
// SetDate sets field value
func (o *InvestmentTransaction) SetDate(v string) {
o.Date = v
}
// GetName returns the Name field value
func (o *InvestmentTransaction) GetName() string {
if o == nil {
var ret string
return ret
}
return o.Name
}
// GetNameOk returns a tuple with the Name field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetNameOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Name, true
}
// SetName sets field value
func (o *InvestmentTransaction) SetName(v string) {
o.Name = v
}
// GetQuantity returns the Quantity field value
func (o *InvestmentTransaction) GetQuantity() float32 {
if o == nil {
var ret float32
return ret
}
return o.Quantity
}
// GetQuantityOk returns a tuple with the Quantity field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetQuantityOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Quantity, true
}
// SetQuantity sets field value
func (o *InvestmentTransaction) SetQuantity(v float32) {
o.Quantity = v
}
// GetAmount returns the Amount field value
func (o *InvestmentTransaction) GetAmount() float32 {
if o == nil {
var ret float32
return ret
}
return o.Amount
}
// GetAmountOk returns a tuple with the Amount field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetAmountOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Amount, true
}
// SetAmount sets field value
func (o *InvestmentTransaction) SetAmount(v float32) {
o.Amount = v
}
// GetPrice returns the Price field value
func (o *InvestmentTransaction) GetPrice() float32 {
if o == nil {
var ret float32
return ret
}
return o.Price
}
// GetPriceOk returns a tuple with the Price field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetPriceOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Price, true
}
// SetPrice sets field value
func (o *InvestmentTransaction) SetPrice(v float32) {
o.Price = v
}
// GetFees returns the Fees field value
// If the value is explicit nil, the zero value for float32 will be returned
func (o *InvestmentTransaction) GetFees() float32 {
if o == nil || o.Fees.Get() == nil {
var ret float32
return ret
}
return *o.Fees.Get()
}
// GetFeesOk returns a tuple with the Fees field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *InvestmentTransaction) GetFeesOk() (*float32, bool) {
if o == nil {
return nil, false
}
return o.Fees.Get(), o.Fees.IsSet()
}
// SetFees sets field value
func (o *InvestmentTransaction) SetFees(v float32) {
o.Fees.Set(&v)
}
// GetType returns the Type field value
func (o *InvestmentTransaction) GetType() string {
if o == nil {
var ret string
return ret
}
return o.Type
}
// GetTypeOk returns a tuple with the Type field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetTypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Type, true
}
// SetType sets field value
func (o *InvestmentTransaction) SetType(v string) {
o.Type = v
}
// GetSubtype returns the Subtype field value
func (o *InvestmentTransaction) GetSubtype() string {
if o == nil {
var ret string
return ret
}
return o.Subtype
}
// GetSubtypeOk returns a tuple with the Subtype field value
// and a boolean to check if the value has been set.
func (o *InvestmentTransaction) GetSubtypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Subtype, true
}
// SetSubtype sets field value
func (o *InvestmentTransaction) SetSubtype(v string) {
o.Subtype = v
}
// GetIsoCurrencyCode returns the IsoCurrencyCode field value
// If the value is explicit nil, the zero value for string will be returned
func (o *InvestmentTransaction) GetIsoCurrencyCode() string {
if o == nil || o.IsoCurrencyCode.Get() == nil {
var ret string
return ret
}
return *o.IsoCurrencyCode.Get()
}
// GetIsoCurrencyCodeOk returns a tuple with the IsoCurrencyCode field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *InvestmentTransaction) GetIsoCurrencyCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.IsoCurrencyCode.Get(), o.IsoCurrencyCode.IsSet()
}
// SetIsoCurrencyCode sets field value
func (o *InvestmentTransaction) SetIsoCurrencyCode(v string) {
o.IsoCurrencyCode.Set(&v)
}
// GetUnofficialCurrencyCode returns the UnofficialCurrencyCode field value
// If the value is explicit nil, the zero value for string will be returned
func (o *InvestmentTransaction) GetUnofficialCurrencyCode() string {
if o == nil || o.UnofficialCurrencyCode.Get() == nil {
var ret string
return ret
}
return *o.UnofficialCurrencyCode.Get()
}
// GetUnofficialCurrencyCodeOk returns a tuple with the UnofficialCurrencyCode field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *InvestmentTransaction) GetUnofficialCurrencyCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.UnofficialCurrencyCode.Get(), o.UnofficialCurrencyCode.IsSet()
}
// SetUnofficialCurrencyCode sets field value
func (o *InvestmentTransaction) SetUnofficialCurrencyCode(v string) {
o.UnofficialCurrencyCode.Set(&v)
}
func (o InvestmentTransaction) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["investment_transaction_id"] = o.InvestmentTransactionId
}
if o.CancelTransactionId.IsSet() {
toSerialize["cancel_transaction_id"] = o.CancelTransactionId.Get()
}
if true {
toSerialize["account_id"] = o.AccountId
}
if true {
toSerialize["security_id"] = o.SecurityId.Get()
}
if true {
toSerialize["date"] = o.Date
}
if true {
toSerialize["name"] = o.Name
}
if true {
toSerialize["quantity"] = o.Quantity
}
if true {
toSerialize["amount"] = o.Amount
}
if true {
toSerialize["price"] = o.Price
}
if true {
toSerialize["fees"] = o.Fees.Get()
}
if true {
toSerialize["type"] = o.Type
}
if true {
toSerialize["subtype"] = o.Subtype
}
if true {
toSerialize["iso_currency_code"] = o.IsoCurrencyCode.Get()
}
if true {
toSerialize["unofficial_currency_code"] = o.UnofficialCurrencyCode.Get()
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *InvestmentTransaction) UnmarshalJSON(bytes []byte) (err error) {
varInvestmentTransaction := _InvestmentTransaction{}
if err = json.Unmarshal(bytes, &varInvestmentTransaction); err == nil {
*o = InvestmentTransaction(varInvestmentTransaction)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "investment_transaction_id")
delete(additionalProperties, "cancel_transaction_id")
delete(additionalProperties, "account_id")
delete(additionalProperties, "security_id")
delete(additionalProperties, "date")
delete(additionalProperties, "name")
delete(additionalProperties, "quantity")
delete(additionalProperties, "amount")
delete(additionalProperties, "price")
delete(additionalProperties, "fees")
delete(additionalProperties, "type")
delete(additionalProperties, "subtype")
delete(additionalProperties, "iso_currency_code")
delete(additionalProperties, "unofficial_currency_code")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableInvestmentTransaction struct {
value *InvestmentTransaction
isSet bool
}
func (v NullableInvestmentTransaction) Get() *InvestmentTransaction {
return v.value
}
func (v *NullableInvestmentTransaction) Set(val *InvestmentTransaction) {
v.value = val
v.isSet = true
}
func (v NullableInvestmentTransaction) IsSet() bool {
return v.isSet
}
func (v *NullableInvestmentTransaction) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableInvestmentTransaction(val *InvestmentTransaction) *NullableInvestmentTransaction {
return &NullableInvestmentTransaction{value: val, isSet: true}
}
func (v NullableInvestmentTransaction) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableInvestmentTransaction) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_investment_transaction.go | 0.820937 | 0.572125 | model_investment_transaction.go | starcoder |
// Collection of comparison functions used in testing
package compare
import (
"fmt"
"os"
"regexp"
"testing"
)
func SkipOnDemand(envVar string, t *testing.T) {
if os.Getenv(envVar) != "" {
t.Skip(fmt.Sprintf("Skipped on user request: environment variable '%s' was set ", envVar))
}
}
func OkIsNil(label string, val interface{}, t *testing.T) {
if val == nil {
t.Logf("ok - %s is nil\n", label)
} else {
t.Logf("not ok - %s is NOT nil\n", label)
t.Fail()
}
}
func OkIsNotNil(label string, val interface{}, t *testing.T) {
if val != nil {
t.Logf("ok - %s is not nil\n", label)
} else {
t.Logf("not ok - %s is nil\n", label)
t.Fail()
}
}
// Compares two integers
func OkEqualInt(label string, a, b int, t *testing.T) {
if a == b {
t.Logf("ok - %s: expected: %d\n", label, a)
} else {
t.Logf("not ok - %s: Numbers are not equal - expected %d, but got %d", label, b, a)
t.Fail()
}
}
// Compares two strings
func OkEqualString(label, a, b string, t *testing.T) {
if a == b {
t.Logf("ok - %s: expected: '%s'\n", label, a)
} else {
t.Logf("not ok - %s: Strings are not equal - expected '%s', but got '%s'", label, b, a)
t.Fail()
}
}
// Compares two booleans
func OkEqualBool(label string, a, b bool, t *testing.T) {
if a == b {
t.Logf("ok - %s: expected: %v\n", label, a)
} else {
t.Logf("not ok - %s: Values are not the same - expected %v, but got %v", label, b, a)
t.Fail()
}
}
// Checks that a string matches a given regular expression
func OkMatchesString(label, val, regex string, t *testing.T) {
re := regexp.MustCompile(regex)
if re.MatchString(val) {
t.Logf("ok - %s: '%s' matches '%s'\n", label, val, regex)
} else {
t.Logf("not ok - %s: String '%s' doesn't match '%s'", label, val, regex)
t.Fail()
}
}
// Compares two string slices
func OkEqualStringSlices(t *testing.T, found, expected []string) {
if len(expected) != len(found) {
t.Logf("not ok - slice Found has %d elements, while slice Expected has %d\n", len(found), len(expected))
t.Logf("Found: %v", found)
t.Logf("Expected: %v", expected)
t.Fail()
return
}
for N := 0; N < len(found); N++ {
if found[N] == expected[N] {
t.Logf("ok - element %d of Found and the same in Expected are equal [%v]\n", N, found[N])
} else {
t.Logf("not ok - element %d of Found differs from the corresponding one in Expected. "+
"Expected '%s' - found: '%s'\n", N, expected[N], found[N])
t.Fail()
}
}
}
// Compares two integer slices
func OkEqualIntSlices(t *testing.T, found, expected []int) {
if len(expected) != len(found) {
t.Logf("not ok - slice Found has %d elements, while slice Expected has %d\n", len(found), len(expected))
t.Logf("Found: %v", found)
t.Logf("Expected: %v", expected)
t.Fail()
return
}
for N := 0; N < len(found); N++ {
if found[N] == expected[N] {
t.Logf("ok - element %d of Found and the same in Expected are equal [%v]\n", N, found[N])
} else {
t.Logf("not ok - element %d of Found differs from the corresponding one in Expected. "+
"Expected '%d' - found: '%d'\n", N, expected[N], found[N])
t.Fail()
}
}
}
// Compares two byte slices
func OkEqualByteSlices(t *testing.T, found, expected []byte) {
if len(expected) != len(found) {
t.Logf("not ok - slice Found has %d elements, while slice Expected has %d\n", len(found), len(expected))
t.Logf("Found: %v", found)
t.Logf("Expected: %v", expected)
t.Fail()
return
}
for N := 0; N < len(found); N++ {
if found[N] == expected[N] {
t.Logf("ok - byte %d of Found and the same in Expected are equal [%x]\n", N, found[N])
} else {
t.Logf("not ok - byte %d of Found differs from the corresponding one in Expected. "+
"Expected '%x' - found: '%x'\n", N, expected[N], found[N])
t.Fail()
}
}
} | compare/compare.go | 0.665628 | 0.548432 | compare.go | starcoder |
package pdk
import (
"log"
"time"
)
// Statter is the interface that stats collectors must implement to get stats out of the PDK.
type Statter interface {
Count(name string, value int64, rate float64, tags ...string)
Gauge(name string, value float64, rate float64, tags ...string)
Histogram(name string, value float64, rate float64, tags ...string)
Set(name string, value string, rate float64, tags ...string)
Timing(name string, value time.Duration, rate float64, tags ...string)
}
// NopStatter does nothing.
type NopStatter struct{}
// Count does nothing.
func (NopStatter) Count(name string, value int64, rate float64, tags ...string) {}
// Gauge does nothing.
func (NopStatter) Gauge(name string, value float64, rate float64, tags ...string) {}
// Histogram does nothing.
func (NopStatter) Histogram(name string, value float64, rate float64, tags ...string) {}
// Set does nothing.
func (NopStatter) Set(name string, value string, rate float64, tags ...string) {}
// Timing does nothing.
func (NopStatter) Timing(name string, value time.Duration, rate float64, tags ...string) {}
// Logger is the interface that loggers must implement to get PDK logs.
type Logger interface {
Printf(format string, v ...interface{})
Debugf(format string, v ...interface{})
}
// NopLogger logs nothing.
type NopLogger struct{}
// Printf does nothing.
func (NopLogger) Printf(format string, v ...interface{}) {}
// Debugf does nothing.
func (NopLogger) Debugf(format string, v ...interface{}) {}
// StdLogger only prints on Printf.
type StdLogger struct {
*log.Logger
}
// Printf implements Logger interface.
func (s StdLogger) Printf(format string, v ...interface{}) {
s.Logger.Printf(format, v...)
}
// Debugf implements Logger interface, but prints nothing.
func (StdLogger) Debugf(format string, v ...interface{}) {}
// VerboseLogger prints on both Printf and Debugf.
type VerboseLogger struct {
*log.Logger
}
// Printf implements Logger interface.
func (s VerboseLogger) Printf(format string, v ...interface{}) {
s.Logger.Printf(format, v...)
}
// Debugf implements Logger interface.
func (s VerboseLogger) Debugf(format string, v ...interface{}) {
s.Logger.Printf(format, v...)
} | statlogiface.go | 0.692746 | 0.422981 | statlogiface.go | starcoder |
package ocr
import (
"errors"
"strings"
)
// Recognize returns the numbers in the given 3 x 4 grids of pipes, underscores, and spaces.
// Non recognized digits are returned as '?'.
func Recognize(input string) (numbers []string) {
partitionedInput, err := partitionInput(input)
if err != nil {
return []string{"?"}
}
result := make([]string, len(partitionedInput))
for i, number := range partitionedInput {
for _, digit := range number {
result[i] += recognizeDigit(digit)
}
}
return result
}
func partitionInput(input string) (numbers [][]string, err error) {
rows := strings.Split(input, "\n")
numbers = [][]string{}
for i, row := range rows {
if len(row)%3 != 0 {
return [][]string{}, errors.New("Cannot partition input. Non 3 characters wide digit detected")
}
if i == 0 || (len(rows) > 5 && i >= 4 && (i-4)%4 == 0 && i != len(rows)-1) {
numbers = append(numbers, make([]string, len(rows[1])/3))
for i := range numbers[0] {
numbers[len(numbers)-1][i] = "\n"
}
} else {
for j, char := range row {
numbers[len(numbers)-1][j/3] += string(char)
if (j+1)%3 == 0 {
numbers[len(numbers)-1][j/3] += "\n"
}
}
}
}
return numbers, nil
}
func recognizeDigit(input string) string {
rows := strings.Split(input, "\n")
switch {
case rows[1] == " _ " && rows[2] == "| |" && rows[3] == "|_|":
return "0"
case rows[1] == " " && rows[2] == " |" && rows[3] == " |":
return "1"
case rows[1] == " _ " && rows[2] == " _|" && rows[3] == "|_ ":
return "2"
case rows[1] == " _ " && rows[2] == " _|" && rows[3] == " _|":
return "3"
case rows[1] == " " && rows[2] == "|_|" && rows[3] == " |":
return "4"
case rows[1] == " _ " && rows[2] == "|_ " && rows[3] == " _|":
return "5"
case rows[1] == " _ " && rows[2] == "|_ " && rows[3] == "|_|":
return "6"
case rows[1] == " _ " && rows[2] == " |" && rows[3] == " |":
return "7"
case rows[1] == " _ " && rows[2] == "|_|" && rows[3] == "|_|":
return "8"
case rows[1] == " _ " && rows[2] == "|_|" && rows[3] == " _|":
return "9"
default:
return "?"
}
} | solutions/go/ocr-numbers/ocr_numbers.go | 0.579876 | 0.474814 | ocr_numbers.go | starcoder |
package anansi
import (
"bytes"
"io"
"unicode/utf8"
"github.com/jcorbin/anansi/ansi"
)
// Buffer implements a deferred buffer of ANSI output, providing
// convenience methods for writing various ansi escape sequences, and keeping
// an observant processor up to date.
type Buffer struct {
buf bytes.Buffer
off int
}
// Len returns the number of unwritten bytes in the buffer.
func (b *Buffer) Len() int {
return b.buf.Len()
}
// Grow the internal buffer to have room for at least n bytes.
func (b *Buffer) Grow(n int) {
b.buf.Grow(n)
}
// Bytes returns a byte slice containing all bytes written into the internal
// buffer. Returned slice is only valid until the next call to a buffer method.
func (b *Buffer) Bytes() []byte {
return b.buf.Bytes()
}
// Reset the internal buffer.
func (b *Buffer) Reset() {
b.buf.Reset()
b.off = 0
}
// WriteTo writes all bytes from the internal buffer to the given io.Writer.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
n, err = b.buf.WriteTo(w)
if b.off -= int(n); b.off < 0 {
b.off = 0
}
return n, err
}
// WriteESC writes one or more ANSI escapes to the internal buffer, returning
// the number of bytes written.
func (b *Buffer) WriteESC(seqs ...ansi.Escape) int {
need := 0
for i := range seqs {
need += seqs[i].Size()
}
b.buf.Grow(need)
p := b.buf.Bytes()
p = p[len(p):]
for i := range seqs {
p = seqs[i].AppendTo(p)
}
n, _ := b.buf.Write(p)
return n
}
// WriteSeq writes one or more ANSI ansi.escape sequences to the internal
// buffer, returning the number of bytes written. Skips any zero sequences
// provided.
func (b *Buffer) WriteSeq(seqs ...ansi.Seq) int {
need := 0
for i := range seqs {
need += seqs[i].Size()
}
b.buf.Grow(need)
p := b.buf.Bytes()
p = p[len(p):]
for i := range seqs {
p = seqs[i].AppendTo(p)
}
n, _ := b.buf.Write(p)
return n
}
// WriteSGR writes one or more ANSI SGR sequences to the internal buffer,
// returning the number of bytes written; updates Attr cursor state. Skips any
// zero attr values (NOTE 0 attr value is merely implicit clear, not the
// explicit SGRAttrClear).
func (b *Buffer) WriteSGR(attrs ...ansi.SGRAttr) int {
need := 0
for i := range attrs {
if attrs[i] != 0 {
need += attrs[i].Size()
}
}
b.buf.Grow(need)
p := b.buf.Bytes()
p = p[len(p):]
for i := range attrs {
if attrs[i] != 0 {
p = attrs[i].AppendTo(p)
}
}
n, _ := b.buf.Write(p)
return n
}
// Write to the internal buffer.
func (b *Buffer) Write(p []byte) (n int, err error) {
return b.buf.Write(p)
}
// WriteString to the internal buffer.
func (b *Buffer) WriteString(s string) (n int, err error) {
return b.buf.WriteString(s)
}
// WriteRune to the internal buffer.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
return b.buf.WriteRune(r)
}
// WriteByte to the internal buffer.
func (b *Buffer) WriteByte(c byte) error {
return b.buf.WriteByte(c)
}
// Skip Process()ing of n bytes written to the internal buffer. Useful when the
// processor wants to intermediate a buffer write, handling its own semantic
// update and avoiding (re)parsing the written bytes.
func (b *Buffer) Skip(n int) {
b.off += n
}
// Discard processed bytes, re-using internal buffer space during the next Write*.
func (b *Buffer) Discard() {
if b.off > 0 {
b.buf.Next(b.off)
b.off = 0
}
}
// Process bytes written to the internal buffer, decoding runes and escape
// sequences, and passing them to the given processor.
func (b *Buffer) Process(proc Processor) {
b.off += Process(proc, b.buf.Bytes()[b.off:])
}
// Process decodes ansi escapes and utf8 runes from p, passing them to proc.
func Process(proc Processor, p []byte) (n int) {
for n < len(p) {
e, a, m := ansi.DecodeEscape(p[n:])
n += m
if e == 0 {
switch r, m := utf8.DecodeRune(p[n:]); r {
case '\x1b':
return n
default:
n += m
e = ansi.Escape(r)
}
}
proc.ProcessANSI(e, a)
}
return n
}
// Processor receives decoded ANSI escape sequences and Unicode runes from
// Buffer.Process.
type Processor interface {
ProcessANSI(e ansi.Escape, a []byte)
}
var _ ansiWriter = &Buffer{} | buffer.go | 0.709724 | 0.415017 | buffer.go | starcoder |
package main
import (
"fmt"
"path/filepath"
"github.com/derWhity/AdventOfCode/lib/input"
)
// grid represents a 3-dimensional grid of cube states
type grid map[int]map[int]map[int]bool
func (g grid) set(x, y, z int, val bool) {
plane, ok := g[x]
if !ok {
plane = map[int]map[int]bool{}
g[x] = plane
}
row, ok := plane[y]
if !ok {
row = map[int]bool{}
plane[y] = row
}
row[z] = val
}
func (g grid) get(x, y, z int) bool {
plane, ok := g[x]
if !ok {
return false
}
row, ok := plane[y]
if !ok {
return false
}
return row[z]
}
// determineNewState calculates the new state the given cube will have
func (g grid) determineNewStateAt(xPos, yPos, zPos int) bool {
var activeNeighbours uint
for x := xPos - 1; x <= xPos+1; x++ {
for y := yPos - 1; y <= yPos+1; y++ {
for z := zPos - 1; z <= zPos+1; z++ {
if (x != xPos || y != yPos || z != zPos) && g.get(x, y, z) {
activeNeighbours++
}
}
}
}
selfActive := g.get(xPos, yPos, zPos)
if selfActive {
return activeNeighbours == 2 || activeNeighbours == 3
}
return activeNeighbours == 3
}
func failOnError(err error) {
if err != nil {
panic(err)
}
}
func main() {
items, err := input.ReadString(filepath.Join("..", "input.txt"), true)
failOnError(err)
pocketDimension := grid{}
// Three vectors
scanCube := [][]int{
{-1, 1}, // X - from, to
{-1, 1}, // Y - from, to
{-1, 1}, // Z - from, to
}
for x, line := range items {
for y, value := range line {
pocketDimension.set(x, y, 0, value == '#')
scanCube[1][1]++
}
scanCube[0][1]++
}
for i := 0; i < 6; i++ {
newDimentionState := grid{}
// Iterate through the rounds
for x := scanCube[0][0]; x <= scanCube[1][1]; x++ {
for y := scanCube[1][0]; y <= scanCube[1][1]; y++ {
for z := scanCube[2][0]; z <= scanCube[2][1]; z++ {
newDimentionState.set(x, y, z, pocketDimension.determineNewStateAt(x, y, z))
}
}
}
pocketDimension = newDimentionState
// Grow the scan cube by 1 in each direction
scanCube[0][0]--
scanCube[1][0]--
scanCube[2][0]--
scanCube[0][1]++
scanCube[1][1]++
scanCube[2][1]++
// Finally, count the active cubes
var sumCubes uint
for _, yGrid := range pocketDimension {
for _, zGrid := range yGrid {
for _, val := range zGrid {
if val {
sumCubes++
}
}
}
}
fmt.Printf("Iteration #%d: Number of active cubes: %d\n", i, sumCubes)
}
} | 2020/day_17/star_01/main.go | 0.546254 | 0.456107 | main.go | starcoder |
package siastats
import (
"encoding/json"
)
// PlotBand struct for PlotBand
type PlotBand struct {
Color *string `json:"color,omitempty"`
From *int64 `json:"from,omitempty"`
To *int64 `json:"to,omitempty"`
Label *PlotBandLabel `json:"label,omitempty"`
}
// NewPlotBand instantiates a new PlotBand object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewPlotBand() *PlotBand {
this := PlotBand{}
return &this
}
// NewPlotBandWithDefaults instantiates a new PlotBand object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPlotBandWithDefaults() *PlotBand {
this := PlotBand{}
return &this
}
// GetColor returns the Color field value if set, zero value otherwise.
func (o *PlotBand) GetColor() string {
if o == nil || o.Color == nil {
var ret string
return ret
}
return *o.Color
}
// GetColorOk returns a tuple with the Color field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *PlotBand) GetColorOk() (*string, bool) {
if o == nil || o.Color == nil {
return nil, false
}
return o.Color, true
}
// HasColor returns a boolean if a field has been set.
func (o *PlotBand) HasColor() bool {
if o != nil && o.Color != nil {
return true
}
return false
}
// SetColor gets a reference to the given string and assigns it to the Color field.
func (o *PlotBand) SetColor(v string) {
o.Color = &v
}
// GetFrom returns the From field value if set, zero value otherwise.
func (o *PlotBand) GetFrom() int64 {
if o == nil || o.From == nil {
var ret int64
return ret
}
return *o.From
}
// GetFromOk returns a tuple with the From field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *PlotBand) GetFromOk() (*int64, bool) {
if o == nil || o.From == nil {
return nil, false
}
return o.From, true
}
// HasFrom returns a boolean if a field has been set.
func (o *PlotBand) HasFrom() bool {
if o != nil && o.From != nil {
return true
}
return false
}
// SetFrom gets a reference to the given int64 and assigns it to the From field.
func (o *PlotBand) SetFrom(v int64) {
o.From = &v
}
// GetTo returns the To field value if set, zero value otherwise.
func (o *PlotBand) GetTo() int64 {
if o == nil || o.To == nil {
var ret int64
return ret
}
return *o.To
}
// GetToOk returns a tuple with the To field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *PlotBand) GetToOk() (*int64, bool) {
if o == nil || o.To == nil {
return nil, false
}
return o.To, true
}
// HasTo returns a boolean if a field has been set.
func (o *PlotBand) HasTo() bool {
if o != nil && o.To != nil {
return true
}
return false
}
// SetTo gets a reference to the given int64 and assigns it to the To field.
func (o *PlotBand) SetTo(v int64) {
o.To = &v
}
// GetLabel returns the Label field value if set, zero value otherwise.
func (o *PlotBand) GetLabel() PlotBandLabel {
if o == nil || o.Label == nil {
var ret PlotBandLabel
return ret
}
return *o.Label
}
// GetLabelOk returns a tuple with the Label field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *PlotBand) GetLabelOk() (*PlotBandLabel, bool) {
if o == nil || o.Label == nil {
return nil, false
}
return o.Label, true
}
// HasLabel returns a boolean if a field has been set.
func (o *PlotBand) HasLabel() bool {
if o != nil && o.Label != nil {
return true
}
return false
}
// SetLabel gets a reference to the given PlotBandLabel and assigns it to the Label field.
func (o *PlotBand) SetLabel(v PlotBandLabel) {
o.Label = &v
}
func (o PlotBand) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Color != nil {
toSerialize["color"] = o.Color
}
if o.From != nil {
toSerialize["from"] = o.From
}
if o.To != nil {
toSerialize["to"] = o.To
}
if o.Label != nil {
toSerialize["label"] = o.Label
}
return json.Marshal(toSerialize)
}
type NullablePlotBand struct {
value *PlotBand
isSet bool
}
func (v NullablePlotBand) Get() *PlotBand {
return v.value
}
func (v *NullablePlotBand) Set(val *PlotBand) {
v.value = val
v.isSet = true
}
func (v NullablePlotBand) IsSet() bool {
return v.isSet
}
func (v *NullablePlotBand) Unset() {
v.value = nil
v.isSet = false
}
func NewNullablePlotBand(val *PlotBand) *NullablePlotBand {
return &NullablePlotBand{value: val, isSet: true}
}
func (v NullablePlotBand) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullablePlotBand) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_plot_band.go | 0.852537 | 0.428114 | model_plot_band.go | starcoder |
package vibrant
import (
"image/color"
)
// Constants used for manipulating a QuantizedColor.
const (
quantizeWordWidth = 5
quantizeWordMask = (1 << quantizeWordWidth) - 1
shouldRoundUpMask = 1 << ((8 - quantizeWordWidth) - 1)
roundUpMask = shouldRoundUpMask << 1
)
// QuantizedColorSlice attaches the methods of sort.Interface to []QuantizedColor, sorting in increasing order.
type QuantizedColorSlice []QuantizedColor
func (s QuantizedColorSlice) Len() int { return len(s) }
func (s QuantizedColorSlice) Less(i, j int) bool { return uint16(s[i]) < uint16(s[j]) }
func (s QuantizedColorSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// QuantizedColorGenerator creates a new QuantizedColor from a given red, green, and blue value.
var QuantizedColorGenerator = func(r, g, b uint8) QuantizedColor {
quantizedRed := quantizeColorValue(r)
quantizedGreen := quantizeColorValue(g)
quantizedBlue := quantizeColorValue(b)
return QuantizedColor((quantizedRed << (quantizeWordWidth + quantizeWordWidth)) | (quantizedGreen << quantizeWordWidth) | quantizedBlue)
}
// QuantizedColorModel is the color.Model for the QuantizedColor type.
var QuantizedColorModel = color.ModelFunc(func(c color.Color) color.Color {
if _, ok := c.(QuantizedColor); ok {
return c
}
nrgba := color.NRGBAModel.Convert(c).(color.NRGBA)
return QuantizedColorGenerator(nrgba.R, nrgba.G, nrgba.B)
})
// QuantizedColor represents a reduced RGB color space.
type QuantizedColor uint16
// RGBA implements the color.Color interface.
func (q QuantizedColor) RGBA() (uint32, uint32, uint32, uint32) {
r := uint32(q.ApproximateRed())
r |= r << 8
g := uint32(q.ApproximateGreen())
g |= g << 8
b := uint32(q.ApproximateBlue())
b |= b << 8
a := uint32(0xFFFF)
return r, g, b, a
}
// ApproximateRGBA is the approximate RGBA value of the quantized color.
func (q QuantizedColor) ApproximateRGBA() uint32 {
r := uint32(q.ApproximateRed())
g := uint32(q.ApproximateGreen())
b := uint32(q.ApproximateBlue())
a := uint32(0xFF)
return (a << 24) | (r << 16) | (g << 8) | b
}
// QuantizedRed is the red component of the quantized color.
func (q QuantizedColor) QuantizedRed() uint8 {
return uint8((q >> (quantizeWordWidth + quantizeWordWidth)) & quantizeWordMask)
}
// QuantizedGreen is the green component of a quantized color.
func (q QuantizedColor) QuantizedGreen() uint8 {
return uint8((q >> quantizeWordWidth) & quantizeWordMask)
}
// QuantizedBlue is the blue component of a quantized color.
func (q QuantizedColor) QuantizedBlue() uint8 {
return uint8(q & quantizeWordMask)
}
// ApproximateRed is the approximate red component of the quantized color.
func (q QuantizedColor) ApproximateRed() uint8 {
return modifyWordWidth(q.QuantizedRed(), quantizeWordWidth, 8)
}
// ApproximateGreen is the approximate green component of a quantized color.
func (q QuantizedColor) ApproximateGreen() uint8 {
return modifyWordWidth(q.QuantizedGreen(), quantizeWordWidth, 8)
}
// ApproximateBlue is the approximate blue component of a quantized color.
func (q QuantizedColor) ApproximateBlue() uint8 {
return modifyWordWidth(q.QuantizedBlue(), quantizeWordWidth, 8)
}
// SwapRedGreen returns a new QuantizedColor whose red and green color components have been swapped.
func (q QuantizedColor) SwapRedGreen() QuantizedColor {
return QuantizedColor(uint16(q.QuantizedGreen())<<(quantizeWordWidth+quantizeWordWidth) | uint16(q.QuantizedRed())<<quantizeWordWidth | uint16(q.QuantizedBlue()))
}
// SwapRedBlue returns a new QuantizedColor whose red and blue color components have been swapped.
func (q QuantizedColor) SwapRedBlue() QuantizedColor {
return QuantizedColor(uint16(q.QuantizedBlue())<<(quantizeWordWidth+quantizeWordWidth) | uint16(q.QuantizedGreen())<<quantizeWordWidth | uint16(q.QuantizedRed()))
}
func quantizeColorValue(value uint8) uint16 {
if value&shouldRoundUpMask == shouldRoundUpMask {
value = value | roundUpMask
}
return uint16(modifyWordWidth(value, 8, quantizeWordWidth))
}
func modifyWordWidth(value uint8, currentWidth uint8, targetWidth uint8) uint8 {
var modifiedValue uint8
if targetWidth > currentWidth {
// If we're approximating up in word width, we'll shift up
modifiedValue = value << (targetWidth - currentWidth)
} else {
// Else, we will just shift and keep the MSB
modifiedValue = value >> (currentWidth - targetWidth)
}
return modifiedValue & ((1 << targetWidth) - 1)
} | vendor/github.com/RobCherry/vibrant/quantized_color.go | 0.777764 | 0.461805 | quantized_color.go | starcoder |
package shapes
import (
"image"
"github.com/remogatto/mathgl"
gl "github.com/remogatto/opengles2"
"github.com/remogatto/shaders"
)
var (
// DefaultSegmentVS is a default vertex shader for the segment.
DefaultSegmentVS = (shaders.VertexShader)(
`precision mediump float;
attribute vec4 pos;
attribute vec4 color;
varying vec4 vColor;
uniform mat4 model;
uniform mat4 projection;
uniform mat4 view;
void main() {
gl_Position = projection*model*view*pos;
vColor = color;
}`)
// DefaultSegmentVS is a default fragment shader for the
// segment.
DefaultSegmentFS = (shaders.FragmentShader)(
`precision mediump float;
varying vec4 vColor;
void main() {
gl_FragColor = vColor;
}`)
)
// Segment is a structure representing a segment.
type Segment struct {
Base
// Points of the segment
x1, y1, x2, y2 float32
}
// NewSegment returns a new segment object. It takes a program
// shader and segment coordinates as arguments.
func NewSegment(program shaders.Program, x1, y1, x2, y2 float32) *Segment {
segment := new(Segment)
// Set the geometry
segment.x1, segment.x2 = x1, x2
segment.y1, segment.y2 = y1, y2
segment.vertices = []float32{
segment.x1, segment.y1,
segment.x2, segment.y2,
}
// Set the default color
segment.SetColor(DefaultColor)
// Size of the segment bounding box
// segment.w = float32(math.Abs(float64(x1 - x2)))
// segment.h = float32(math.Abs(float64(y1 - y2)))
segment.bounds = image.Rect(int(x1), int(y1), int(x2), int(y2))
// Center of the segment
segment.x = (segment.x1 + segment.x2) / 2
segment.y = (segment.y1 + segment.y2) / 2
segment.program = program
segment.program.Use()
// Get variables IDs from shaders
segment.posId = segment.program.GetAttribute("pos")
segment.colorId = segment.program.GetAttribute("color")
segment.projMatrixId = segment.program.GetUniform("projection")
segment.modelMatrixId = segment.program.GetUniform("model")
segment.viewMatrixId = segment.program.GetUniform("view")
// Fill the model matrix with the identity.
segment.modelMatrix = mathgl.Ident4f()
return segment
}
// Draw actually renders the segment on the surface.
func (segment *Segment) Draw() {
segment.program.Use()
gl.VertexAttribPointer(segment.posId, 2, gl.FLOAT, false, 0, &segment.vertices[0])
gl.EnableVertexAttribArray(segment.posId)
gl.VertexAttribPointer(segment.colorId, 4, gl.FLOAT, false, 0, &segment.vColor[0])
gl.EnableVertexAttribArray(segment.colorId)
gl.UniformMatrix4fv(int32(segment.modelMatrixId), 1, false, (*float32)(&segment.modelMatrix[0]))
gl.UniformMatrix4fv(int32(segment.projMatrixId), 1, false, (*float32)(&segment.projMatrix[0]))
gl.UniformMatrix4fv(int32(segment.viewMatrixId), 1, false, (*float32)(&segment.viewMatrix[0]))
gl.DrawArrays(gl.LINES, 0, 2)
gl.Flush()
gl.Finish()
} | segment.go | 0.803714 | 0.454593 | segment.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTExportModelEdgeGeometry1125 struct for BTExportModelEdgeGeometry1125
type BTExportModelEdgeGeometry1125 struct {
BtType *string `json:"btType,omitempty"`
EndPoint *BTVector3d389 `json:"endPoint,omitempty"`
EndVector *BTVector3d389 `json:"endVector,omitempty"`
Length *float64 `json:"length,omitempty"`
MidPoint *BTVector3d389 `json:"midPoint,omitempty"`
QuarterPoint *BTVector3d389 `json:"quarterPoint,omitempty"`
StartPoint *BTVector3d389 `json:"startPoint,omitempty"`
StartVector *BTVector3d389 `json:"startVector,omitempty"`
}
// NewBTExportModelEdgeGeometry1125 instantiates a new BTExportModelEdgeGeometry1125 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTExportModelEdgeGeometry1125() *BTExportModelEdgeGeometry1125 {
this := BTExportModelEdgeGeometry1125{}
return &this
}
// NewBTExportModelEdgeGeometry1125WithDefaults instantiates a new BTExportModelEdgeGeometry1125 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTExportModelEdgeGeometry1125WithDefaults() *BTExportModelEdgeGeometry1125 {
this := BTExportModelEdgeGeometry1125{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTExportModelEdgeGeometry1125) SetBtType(v string) {
o.BtType = &v
}
// GetEndPoint returns the EndPoint field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetEndPoint() BTVector3d389 {
if o == nil || o.EndPoint == nil {
var ret BTVector3d389
return ret
}
return *o.EndPoint
}
// GetEndPointOk returns a tuple with the EndPoint field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetEndPointOk() (*BTVector3d389, bool) {
if o == nil || o.EndPoint == nil {
return nil, false
}
return o.EndPoint, true
}
// HasEndPoint returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasEndPoint() bool {
if o != nil && o.EndPoint != nil {
return true
}
return false
}
// SetEndPoint gets a reference to the given BTVector3d389 and assigns it to the EndPoint field.
func (o *BTExportModelEdgeGeometry1125) SetEndPoint(v BTVector3d389) {
o.EndPoint = &v
}
// GetEndVector returns the EndVector field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetEndVector() BTVector3d389 {
if o == nil || o.EndVector == nil {
var ret BTVector3d389
return ret
}
return *o.EndVector
}
// GetEndVectorOk returns a tuple with the EndVector field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetEndVectorOk() (*BTVector3d389, bool) {
if o == nil || o.EndVector == nil {
return nil, false
}
return o.EndVector, true
}
// HasEndVector returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasEndVector() bool {
if o != nil && o.EndVector != nil {
return true
}
return false
}
// SetEndVector gets a reference to the given BTVector3d389 and assigns it to the EndVector field.
func (o *BTExportModelEdgeGeometry1125) SetEndVector(v BTVector3d389) {
o.EndVector = &v
}
// GetLength returns the Length field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetLength() float64 {
if o == nil || o.Length == nil {
var ret float64
return ret
}
return *o.Length
}
// GetLengthOk returns a tuple with the Length field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetLengthOk() (*float64, bool) {
if o == nil || o.Length == nil {
return nil, false
}
return o.Length, true
}
// HasLength returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasLength() bool {
if o != nil && o.Length != nil {
return true
}
return false
}
// SetLength gets a reference to the given float64 and assigns it to the Length field.
func (o *BTExportModelEdgeGeometry1125) SetLength(v float64) {
o.Length = &v
}
// GetMidPoint returns the MidPoint field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetMidPoint() BTVector3d389 {
if o == nil || o.MidPoint == nil {
var ret BTVector3d389
return ret
}
return *o.MidPoint
}
// GetMidPointOk returns a tuple with the MidPoint field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetMidPointOk() (*BTVector3d389, bool) {
if o == nil || o.MidPoint == nil {
return nil, false
}
return o.MidPoint, true
}
// HasMidPoint returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasMidPoint() bool {
if o != nil && o.MidPoint != nil {
return true
}
return false
}
// SetMidPoint gets a reference to the given BTVector3d389 and assigns it to the MidPoint field.
func (o *BTExportModelEdgeGeometry1125) SetMidPoint(v BTVector3d389) {
o.MidPoint = &v
}
// GetQuarterPoint returns the QuarterPoint field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetQuarterPoint() BTVector3d389 {
if o == nil || o.QuarterPoint == nil {
var ret BTVector3d389
return ret
}
return *o.QuarterPoint
}
// GetQuarterPointOk returns a tuple with the QuarterPoint field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetQuarterPointOk() (*BTVector3d389, bool) {
if o == nil || o.QuarterPoint == nil {
return nil, false
}
return o.QuarterPoint, true
}
// HasQuarterPoint returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasQuarterPoint() bool {
if o != nil && o.QuarterPoint != nil {
return true
}
return false
}
// SetQuarterPoint gets a reference to the given BTVector3d389 and assigns it to the QuarterPoint field.
func (o *BTExportModelEdgeGeometry1125) SetQuarterPoint(v BTVector3d389) {
o.QuarterPoint = &v
}
// GetStartPoint returns the StartPoint field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetStartPoint() BTVector3d389 {
if o == nil || o.StartPoint == nil {
var ret BTVector3d389
return ret
}
return *o.StartPoint
}
// GetStartPointOk returns a tuple with the StartPoint field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetStartPointOk() (*BTVector3d389, bool) {
if o == nil || o.StartPoint == nil {
return nil, false
}
return o.StartPoint, true
}
// HasStartPoint returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasStartPoint() bool {
if o != nil && o.StartPoint != nil {
return true
}
return false
}
// SetStartPoint gets a reference to the given BTVector3d389 and assigns it to the StartPoint field.
func (o *BTExportModelEdgeGeometry1125) SetStartPoint(v BTVector3d389) {
o.StartPoint = &v
}
// GetStartVector returns the StartVector field value if set, zero value otherwise.
func (o *BTExportModelEdgeGeometry1125) GetStartVector() BTVector3d389 {
if o == nil || o.StartVector == nil {
var ret BTVector3d389
return ret
}
return *o.StartVector
}
// GetStartVectorOk returns a tuple with the StartVector field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportModelEdgeGeometry1125) GetStartVectorOk() (*BTVector3d389, bool) {
if o == nil || o.StartVector == nil {
return nil, false
}
return o.StartVector, true
}
// HasStartVector returns a boolean if a field has been set.
func (o *BTExportModelEdgeGeometry1125) HasStartVector() bool {
if o != nil && o.StartVector != nil {
return true
}
return false
}
// SetStartVector gets a reference to the given BTVector3d389 and assigns it to the StartVector field.
func (o *BTExportModelEdgeGeometry1125) SetStartVector(v BTVector3d389) {
o.StartVector = &v
}
func (o BTExportModelEdgeGeometry1125) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.EndPoint != nil {
toSerialize["endPoint"] = o.EndPoint
}
if o.EndVector != nil {
toSerialize["endVector"] = o.EndVector
}
if o.Length != nil {
toSerialize["length"] = o.Length
}
if o.MidPoint != nil {
toSerialize["midPoint"] = o.MidPoint
}
if o.QuarterPoint != nil {
toSerialize["quarterPoint"] = o.QuarterPoint
}
if o.StartPoint != nil {
toSerialize["startPoint"] = o.StartPoint
}
if o.StartVector != nil {
toSerialize["startVector"] = o.StartVector
}
return json.Marshal(toSerialize)
}
type NullableBTExportModelEdgeGeometry1125 struct {
value *BTExportModelEdgeGeometry1125
isSet bool
}
func (v NullableBTExportModelEdgeGeometry1125) Get() *BTExportModelEdgeGeometry1125 {
return v.value
}
func (v *NullableBTExportModelEdgeGeometry1125) Set(val *BTExportModelEdgeGeometry1125) {
v.value = val
v.isSet = true
}
func (v NullableBTExportModelEdgeGeometry1125) IsSet() bool {
return v.isSet
}
func (v *NullableBTExportModelEdgeGeometry1125) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTExportModelEdgeGeometry1125(val *BTExportModelEdgeGeometry1125) *NullableBTExportModelEdgeGeometry1125 {
return &NullableBTExportModelEdgeGeometry1125{value: val, isSet: true}
}
func (v NullableBTExportModelEdgeGeometry1125) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTExportModelEdgeGeometry1125) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_export_model_edge_geometry_1125.go | 0.792344 | 0.529203 | model_bt_export_model_edge_geometry_1125.go | starcoder |
package types
import (
"strings"
"time"
)
const (
//TimeLongMonth long format of month
TimeLongMonth = "January"
//TimeMonth format of month
TimeMonth = "Jan"
//TimeNumMonth number format of month
TimeNumMonth = "1"
//TimeZeroMonth zero format of month
TimeZeroMonth = "01"
//TimeLongWeekDay long format of weekday
TimeLongWeekDay = "Monday"
//TimeWeekDay format of weekday
TimeWeekDay = "Mon"
//TimeDay format of day
TimeDay = "2"
//TimeZeroDay zero format of day
TimeZeroDay = "02"
//TimeHour24 24 hours format of hour
TimeHour24 = "15"
//TimeHour12 12 hours format of hour
TimeHour12 = "3"
//TimeZeroHour12 12 hours zero format of hour
TimeZeroHour12 = "03"
//TimeMinute format of minute
TimeMinute = "4"
//TimeZeroMinute zero format of minute
TimeZeroMinute = "04"
//TimeSecond format of second
TimeSecond = "5"
//TimeZeroSecond zero format of second
TimeZeroSecond = "05"
//TimeLongYear long format of year
TimeLongYear = "2006"
//TimeYear format of year
TimeYear = "06"
//TimePM format of PM
TimePM = "PM"
//Timepm format of pm
Timepm = "pm"
//TimeTZ MST
TimeTZ = "MST"
//TimeISO8601TZ ISO8601TZ
TimeISO8601TZ = "Z0700" // prints Z for UTC
//TimeISO8601SecondsTZ ISO8601SecondsTZ
TimeISO8601SecondsTZ = "Z070000"
//TimeISO8601ShortTZ ISO8601ShortTZ
TimeISO8601ShortTZ = "Z07"
//TimeISO8601ColonTZ ISO8601ColonTZ
TimeISO8601ColonTZ = "Z07:00" // prints Z for UTC
//TimeISO8601ColonSecondsTZ ISO8601ColonSecondsTZ
TimeISO8601ColonSecondsTZ = "Z07:00:00"
//TimeNumTZ NumTZ
TimeNumTZ = "-0700" // always numeric
//TimeNumSecondsTz NumSecondsTz
TimeNumSecondsTz = "-070000"
//TimeNumShortTZ NumShortTZ
TimeNumShortTZ = "-07" // always numeric
//TimeNumColonTZ NumColonTZ
TimeNumColonTZ = "-07:00" // always numeric
//TimeNumColonSecondsTZ NumColonSecondsTZ
TimeNumColonSecondsTZ = "-07:00:00"
)
const (
//TimeFormatDateTime yyyyMMDDHHmmSS
TimeFormatDateTime = TimeLongYear + TimeZeroMonth + TimeZeroDay + TimeHour24 + TimeZeroMinute + TimeZeroSecond
//TimeFormatDateTimeWithDash yyyy-MM-DD HH:mm:ss
TimeFormatDateTimeWithDash = TimeLongYear + "-" + TimeZeroMonth + "-" + TimeZeroDay + " " + TimeHour24 + ":" + TimeZeroMinute + ":" + TimeZeroSecond
)
//FormatLayout convert RFC layout to golang magic time number
func FormatLayout(layout string) string {
f := strings.Replace(layout, "yyyy", TimeLongYear, -1)
f = strings.Replace(f, "yy", TimeYear, -1)
f = strings.Replace(f, "MM", TimeZeroMonth, -1)
f = strings.Replace(f, "M", TimeNumMonth, -1)
f = strings.Replace(f, "dd", TimeZeroDay, -1)
f = strings.Replace(f, "d", TimeDay, -1)
f = strings.Replace(f, "HH", TimeHour24, -1)
f = strings.Replace(f, "hh", TimeZeroHour12, -1)
f = strings.Replace(f, "h", TimeHour12, -1)
f = strings.Replace(f, "mm", TimeZeroMinute, -1)
f = strings.Replace(f, "m", TimeMinute, -1)
f = strings.Replace(f, "ss", TimeZeroSecond, -1)
f = strings.Replace(f, "s", TimeSecond, -1)
return f
}
//FormatTime format time with special format, eg. YYYY-MM-dd HH:mm:ss
func FormatTime(t *time.Time, format string) string {
if t == nil {
return ""
}
return t.Format(FormatLayout(format))
}
//SwitchTimezone convert timezone
func SwitchTimezone(t time.Time, offset int) time.Time {
return t.In(time.FixedZone("", offset*60*60))
}
//ParseTime try to parse string with specified layout, if not, return d
func ParseTime(v string, layout string, d *time.Time) *time.Time {
if t, err := time.Parse(layout, v); err == nil {
return &t
}
return d
}
//Now 获取指定时区的当前时间
func Now(offset int) time.Time {
return time.Now().In(time.FixedZone("", offset*60*60))
}
//Today 获取指定时区的今天零点
func Today(offset int) time.Time {
now := Now(offset)
return time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
} | types/time_helper.go | 0.514888 | 0.468 | time_helper.go | starcoder |
package day21
import "fmt"
func Transformations(pattern string) []string {
size := sizeOf(pattern)
if instructions, found := transformations[size]; found {
return transformPattern(pattern, instructions)
} else {
panic(fmt.Sprintln("Grid size something other than 2 or 3:", size))
}
}
func transformPattern(pattern string, transformations [][]int) (transformed []string) {
transformed = append(transformed, pattern)
for _, transformation := range transformations {
var altered string
for _, c := range transformation {
altered += string(pattern[c])
}
if !contains(transformed, altered) {
transformed = append(transformed, altered)
}
}
return transformed
}
var transformations = map[int][][]int{
2: {
{3, 0, 2, 4, 1}, // rotate 90
{4, 3, 2, 1, 0}, // rotate 180
{1, 4, 2, 0, 3}, // rotate 270
{3, 4, 2, 0, 1}, // flip horizontal
{0, 3, 2, 1, 4}, // flip horizontal, rotate 90
{1, 0, 2, 4, 3}, // flip horizontal, rotate 180
{4, 1, 2, 3, 0}, // flip horizontal, rotate 270
{1, 0, 2, 4, 3}, // flip vertical (is same as flip horizontal, rotate 180)
{4, 1, 2, 3, 0}, // flip vertical, rotate 90 (is the same as flip horizontal, rotate 270)
{3, 4, 2, 0, 1}, // flip vertical, rotate 180 (is the same as flip horizontal)
{0, 3, 2, 1, 4}, // flip vertical, rotate 270 (is the same as flip horizontal, rotate 90)
},
3: {
{8, 4, 0, 3, 9, 5, 1, 7, 10, 6, 2}, // rotate 90
{10, 9, 8, 3, 6, 5, 4, 7, 2, 1, 0}, // rotate 180
{2, 6, 10, 3, 1, 5, 9, 7, 0, 4, 8}, // rotate 270
{8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2}, // flip horizontal
{0, 4, 8, 3, 1, 5, 9, 7, 2, 6, 10}, // flip horizontal, rotate 90
{2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8}, // flip horizontal, rotate 180
{10, 6, 2, 3, 9, 5, 1, 7, 8, 4, 0}, // flip horizontal, rotate 270
{2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8}, // flip vertical (same as flip horizontal, rotate 180)
{10, 6, 2, 3, 9, 5, 1, 7, 8, 4, 0}, // flip vertical, rotate 90 (same as flip horizontal, rotate 270)
{8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2}, // flip vertical, rotate 180 (same as flip horizontal)
{10, 6, 2, 3, 9, 5, 1, 7, 8, 4, 0}, // flip vertical, rotate 270 (same as flip horizontal, rotate 90)
},
}
/*
3x3 Transformations:
start:
0123
4567
89x
rotate 90:
8403
9517
x62
rotate 180:
x983
6547
210
rotate 270:
26x3
1597
048
flip horizontal:
89x3
4567
012
flip horizontal, rotate 90:
0483
1597
26x
flip horizontal, rotate 180:
2103
6547
x98
flip horizontal, rotate 270:
x623
9517
840
flip vertical:
2103
6547
x98
flip vertical, rotate 90:
x623
9517
840
flip vertical, rotate 180:
89x3
4567
012
flip vertical, rotate 270:
0483
1597
26x
*/
/*
2x2 Transformations:
start:
012
34
90:
302
41
180:
432
10
270:
142
03
flip horizontal:
342
01
flip horizontal (90):
032
14
flip horizontal (180): (same as flip vertical)
102
43
flip horizontal (270):
412
30
flip vertical:
102
43
flip vertical (90): (same as flip horizontal, 270)
412
30
flip vertical (180): (same as flip horizontal)
342
01
flip vertical (270): (same as flip horizontal, rotate 90)
032
14
*/ | go/2017/day21/transform.go | 0.560253 | 0.5564 | transform.go | starcoder |
package iterator
import (
"sync/atomic"
"github.com/apache/arrow/go/arrow"
"github.com/apache/arrow/go/arrow/array"
"github.com/gomem/gomem/internal/debug"
)
// StepValue holds the value for a given step.
type StepValue struct {
Values []interface{}
ValuesJSON []interface{}
Exists []bool
Dtypes []arrow.DataType
}
// Value returns the value at index i and the data type for that value.
func (sv StepValue) Value(i int) (interface{}, arrow.DataType) {
return sv.Values[i], sv.Dtypes[i]
}
// StepIterator iterates over multiple iterators in step.
type StepIterator interface {
Values() *StepValue
ValuesJSON() (*StepValue, error)
Next() bool
Retain()
Release()
}
// stepIterator has a max number of elements it
// can iterator over that must fit into uint64
// which I doubt anyone is going to go over.
type stepIterator struct {
refCount int64
iterators []ValueIterator
index uint64
stepValue *StepValue
dtypes []arrow.DataType
}
// NewStepIteratorForColumns creates a new StepIterator given a slice of columns.
func NewStepIteratorForColumns(cols []array.Column) StepIterator {
itrs := make([]ValueIterator, 0, len(cols))
dtypes := make([]arrow.DataType, 0, len(cols))
for i := range cols {
itrs = append(itrs, NewValueIterator(&cols[i]))
dtypes = append(dtypes, cols[i].DataType())
}
// NewStepIterator will retain the value iterators refs
// so we need to remove our ref to them.
for i := range itrs {
defer itrs[i].Release()
}
return NewStepIterator(dtypes, itrs...)
}
// NewStepIterator creates a new StepIterator given a bunch of ValueIterators.
func NewStepIterator(dtypes []arrow.DataType, iterators ...ValueIterator) StepIterator {
for i := range iterators {
iterators[i].Retain()
}
return &stepIterator{
refCount: 1,
iterators: iterators,
index: 0,
dtypes: dtypes,
}
}
// Values returns the values in the current step as a StepValue.
func (s *stepIterator) Values() *StepValue {
if s.stepValue.Values != nil {
return s.stepValue
}
s.stepValue.Values = make([]interface{}, len(s.iterators))
for i, iterator := range s.iterators {
if s.stepValue.Exists[i] {
s.stepValue.Values[i] = iterator.ValueInterface()
} else {
s.stepValue.Values[i] = nil
}
}
return s.stepValue
}
// ValuesJSON returns the json values in the current step as a StepValue.
func (s *stepIterator) ValuesJSON() (*StepValue, error) {
if s.stepValue.ValuesJSON != nil {
return s.stepValue, nil
}
var err error
s.stepValue.ValuesJSON = make([]interface{}, len(s.iterators))
for i, iterator := range s.iterators {
if s.stepValue.Exists[i] {
s.stepValue.ValuesJSON[i], err = iterator.ValueAsJSON()
if err != nil {
return nil, err
}
} else {
s.stepValue.ValuesJSON[i] = nil
}
}
return s.stepValue, nil
}
// Next returns false when there are no more rows in any iterator.
func (s *stepIterator) Next() bool {
// build the step values
step := &StepValue{
Values: nil,
Exists: make([]bool, len(s.iterators)),
Dtypes: s.dtypes,
}
next := false
for i, iterator := range s.iterators {
exists := iterator.Next()
next = exists || next
step.Exists[i] = exists
}
s.stepValue = step
return next
}
func (s *stepIterator) Retain() {
atomic.AddInt64(&s.refCount, 1)
}
func (s *stepIterator) Release() {
refs := atomic.AddInt64(&s.refCount, -1)
debug.Assert(refs >= 0, "too many releases")
if refs == 0 {
for i := range s.iterators {
s.iterators[i].Release()
}
s.iterators = nil
}
} | pkg/iterator/stepiterator.go | 0.672332 | 0.407864 | stepiterator.go | starcoder |
package value
import (
"fmt"
"strings"
)
type Row []Value
func (r *Row) String() string {
result := make([]string, len(*r))
for i, val := range *r {
result[i] = val.String()
}
return strings.Join(result, "\t")
}
type Value interface {
Gt(Value) bool
Ge(Value) bool
Lt(Value) bool
Le(Value) bool
Eq(Value) bool
String() string
}
type Int struct {
Val int64
}
func (i Int) String() string { return fmt.Sprint(i.Val) }
func (i Int) Gt(v Value) bool {
switch v.(type) {
case Int:
return i.Val > v.(Int).Val
case Float:
return float64(i.Val) > v.(Float).Val
default:
return false
}
}
func (i Int) Ge(v Value) bool {
switch v.(type) {
case Int:
return i.Val >= v.(Int).Val
case Float:
return float64(i.Val) >= v.(Float).Val
default:
return false
}
}
func (i Int) Lt(v Value) bool {
switch v.(type) {
case Int:
return i.Val < v.(Int).Val
case Float:
return float64(i.Val) < v.(Float).Val
default:
return false
}
}
func (i Int) Le(v Value) bool {
switch v.(type) {
case Int:
return i.Val <= v.(Int).Val
case Float:
return float64(i.Val) <= v.(Float).Val
default:
return false
}
}
func (i Int) Eq(v Value) bool {
switch v.(type) {
case Int:
return i.Val == v.(Int).Val
case Float:
return float64(i.Val) == v.(Float).Val
default:
return false
}
}
type Float struct {
Val float64
}
func (i Float) String() string { return fmt.Sprint(i.Val) }
func (i Float) Gt(v Value) bool {
switch v.(type) {
case Float:
return i.Val > v.(Float).Val
case Int:
return i.Val > float64(v.(Int).Val)
default:
return false
}
}
func (i Float) Ge(v Value) bool {
switch v.(type) {
case Float:
return i.Val >= v.(Float).Val
case Int:
return i.Val >= float64(v.(Int).Val)
default:
return false
}
}
func (i Float) Lt(v Value) bool {
switch v.(type) {
case Float:
return i.Val < v.(Float).Val
case Int:
return i.Val < float64(v.(Int).Val)
default:
return false
}
}
func (i Float) Le(v Value) bool {
switch v.(type) {
case Float:
return i.Val <= v.(Float).Val
case Int:
return i.Val <= float64(v.(Int).Val)
default:
return false
}
}
func (i Float) Eq(v Value) bool {
switch v.(type) {
case Float:
return i.Val == v.(Float).Val
case Int:
return i.Val == float64(v.(Int).Val)
default:
return false
}
}
type Str struct {
Val string
}
func (i Str) String() string { return i.Val }
func (i Str) Gt(v Value) bool { return i.Val > v.(Str).Val }
func (i Str) Ge(v Value) bool { return i.Val >= v.(Str).Val }
func (i Str) Lt(v Value) bool { return i.Val < v.(Str).Val }
func (i Str) Le(v Value) bool { return i.Val <= v.(Str).Val }
func (i Str) Eq(v Value) bool { return i.Val == v.(Str).Val }
type Bool struct {
Val bool
}
func (i Bool) String() string { return fmt.Sprint(i.Val) }
func (i Bool) Gt(v Value) bool { return i.Val == true && v.(Bool).Val == false }
func (i Bool) Ge(v Value) bool { return i.Val == v.(Bool).Val || i.Val == true }
func (i Bool) Lt(v Value) bool { return i.Val == false && v.(Bool).Val == true }
func (i Bool) Le(v Value) bool { return i.Val == v.(Bool).Val || v.(Bool).Val == true }
func (i Bool) Eq(v Value) bool { return i.Val == v.(Bool).Val }
type Null struct{}
func (i Null) String() string { return "null" }
func (i Null) Gt(v Value) bool { return false }
func (i Null) Ge(v Value) bool { return false }
func (i Null) Lt(v Value) bool { return false }
func (i Null) Le(v Value) bool { return false }
func (i Null) Eq(v Value) bool { return true }
type Alien struct {
Val interface{}
}
func (i Alien) Gt(v Value) bool { return false }
func (i Alien) Ge(v Value) bool { return false }
func (i Alien) Lt(v Value) bool { return false }
func (i Alien) Le(v Value) bool { return false }
func (i Alien) Eq(v Value) bool { return false }
func (i Alien) String() string { return fmt.Sprint(i.Val) }
type Int96 struct {
}
func NewFromParquetValue(v interface{}) Value {
switch v.(type) {
case nil:
return Null{}
case int:
return Int{Val: int64(v.(int))}
case int8:
return Int{Val: int64(v.(int8))}
case int16:
return Int{Val: int64(v.(int16))}
case int32:
return Int{Val: int64(v.(int32))}
case int64:
return Int{Val: v.(int64)}
case uint:
return Int{Val: int64(v.(uint))}
case uint8:
return Int{Val: int64(v.(uint8))}
case uint16:
return Int{Val: int64(v.(uint16))}
case uint32:
return Int{Val: int64(v.(uint32))}
case uint64:
return Int{Val: int64(v.(uint64))}
case float32:
return Float{Val: float64(v.(float32))}
case float64:
return Float{Val: v.(float64)}
case bool:
return Bool{Val: v.(bool)}
case string:
return Str{Val: v.(string)}
default:
return Alien{Val: v}
}
}
func IsComparable(v1, v2 Value) bool {
switch v1.(type) {
case Int, Float:
switch v2.(type) {
case Int:
return true
case Float:
return true
default:
return false
}
case Bool:
if _, ok := v2.(Bool); ok {
return true
}
case Str:
if _, ok := v2.(Str); ok {
return true
}
case Null:
if _, ok := v2.(Null); ok {
return true
}
default:
return false
}
return false
}
func Compare(v1, v2 Value, op string) (bool, error) {
if !IsComparable(v1, v2) {
return false, fmt.Errorf("%t and %t are not comparable", v1, v2)
}
switch op {
case "=":
return v1.Eq(v2), nil
case "!=", "<>":
return !v1.Eq(v2), nil
case ">":
return v1.Gt(v2), nil
case ">=":
return v1.Ge(v2), nil
case "<":
return v1.Lt(v2), nil
case "<=":
return v1.Le(v2), nil
}
return false, fmt.Errorf("unknow operation %s", op)
} | value/value.go | 0.585931 | 0.413181 | value.go | starcoder |
package dog
import (
"github.com/emer/etable/etable"
"github.com/emer/etable/etensor"
"github.com/goki/mat32"
)
// dog.Filter specifies a DoG Difference of Gaussians filter function.
type Filter struct {
On bool `desc:"is this filter active?"`
Wt float32 `viewif:"On" desc:"how much relative weight does this filter have when combined with other filters"`
Gain float32 `viewif:"On" def:"8" desc:"overall gain multiplier applied after dog filtering -- only relevant if not using renormalization (otherwize it just gets renormed away)"`
OnGain float32 `viewif:"On" def:"1" desc:"gain for the on component of filter, only relevant for color-opponent DoG's"`
Size int `viewif:"On" desc:"size of the overall filter -- number of pixels wide and tall for a square matrix used to encode the filter -- filter is centered within this square -- typically an even number, min effective size ~6"`
Spacing int `viewif:"On" desc:"how far apart to space the centers of the dog filters -- 1 = every pixel, 2 = every other pixel, etc -- high-res should be 1 or 2, lower res can be increments therefrom"`
OnSig float32 `viewif:"On" def:"0.125" desc:"gaussian sigma for the narrower On gaussian, in normalized units relative to Size"`
OffSig float32 `viewif:"On" def:"0.25" desc:"gaussian sigma for the wider Off gaussian, in normalized units relative to Size"`
CircleEdge bool `viewif:"On" def:"true" desc:"cut off the filter (to zero) outside a circle of diameter = Size -- makes the filter more radially symmetric"`
}
func (gf *Filter) Defaults() {
gf.On = true
gf.Wt = 1
gf.Gain = 8
gf.OnGain = 1
gf.Size = 12
gf.Spacing = 2
gf.OnSig = 0.125
gf.OffSig = 0.25
gf.CircleEdge = true
}
func (gf *Filter) Update() {
}
// SetSize sets the size and spacing -- these are the main params
// that need to be varied for standard V1 dogs.
func (gf *Filter) SetSize(sz, spc int) {
gf.Size = sz
gf.Spacing = spc
}
// GaussDenSig returns gaussian density for given value and sigma
func GaussDenSig(x, sig float32) float32 {
x /= sig
return 0.398942280 * mat32.Exp(-0.5*x*x) / sig
}
// ToTensor renders dog filters into the given etable etensor.Tensor,
// setting dimensions to [3][Y][X] where Y = X = Size, and
// first one is On-filter, second is Off-filter, and third is Net On - Off
func (gf *Filter) ToTensor(tsr *etensor.Float32) {
tsr.SetShape([]int{int(FiltersN), gf.Size, gf.Size}, nil, []string{"3", "Y", "X"})
ctr := 0.5 * float32(gf.Size-1)
radius := float32(gf.Size) * 0.5
gsOn := gf.OnSig * float32(gf.Size)
gsOff := gf.OffSig * float32(gf.Size)
var posSum, negSum, onSum, offSum float32
for y := 0; y < gf.Size; y++ {
for x := 0; x < gf.Size; x++ {
xf := float32(x) - ctr
yf := float32(y) - ctr
dist := mat32.Hypot(xf, yf)
var ong, offg float32
if !(gf.CircleEdge && (dist > radius)) {
ong = GaussDenSig(dist, gsOn)
offg = GaussDenSig(dist, gsOff)
}
tsr.Set([]int{int(On), y, x}, ong)
tsr.Set([]int{int(Off), y, x}, offg)
onSum += ong
offSum += offg
net := ong - offg
tsr.Set([]int{int(Net), y, x}, net)
if net > 0 {
posSum += net
} else if net < 0 {
negSum += -net
}
}
}
// renorm each half, separate components
for y := 0; y < gf.Size; y++ {
for x := 0; x < gf.Size; x++ {
val := tsr.Value([]int{int(Net), y, x})
if val > 0 {
val /= posSum
} else if val < 0 {
val /= negSum
}
tsr.Set([]int{int(Net), y, x}, val)
on := tsr.Value([]int{int(On), y, x})
tsr.Set([]int{int(On), y, x}, on/onSum)
off := tsr.Value([]int{int(Off), y, x})
tsr.Set([]int{int(Off), y, x}, off/offSum)
}
}
}
// ToTable renders filters into the given etable.Table
// setting a column named Version and a column named Filter
// to the filter for that version (on, off, net)
// This is useful for display and validation purposes.
func (gf *Filter) ToTable(tab *etable.Table) {
tab.SetFromSchema(etable.Schema{
{"Version", etensor.STRING, nil, nil},
{"Filter", etensor.FLOAT32, []int{int(FiltersN), gf.Size, gf.Size}, []string{"Version", "Y", "X"}},
}, 3)
gf.ToTensor(tab.Cols[1].(*etensor.Float32))
tab.SetCellStringIdx(0, int(On), "On")
tab.SetCellStringIdx(0, int(Off), "Off")
tab.SetCellStringIdx(0, int(Net), "Net")
}
// FilterTensor extracts the given filter subspace from set of 3 filters in input tensor
// 0 = On, 1 = Off, 2 = Net
func (gf *Filter) FilterTensor(tsr *etensor.Float32, filt Filters) *etensor.Float32 {
return tsr.SubSpace([]int{int(filt)}).(*etensor.Float32)
}
// Filters is the type of filter
type Filters int
const (
On Filters = iota
Off
Net
FiltersN
) | dog/dog.go | 0.740268 | 0.44354 | dog.go | starcoder |
package asetypes
import (
"bytes"
"encoding/binary"
"fmt"
"strings"
"time"
"unicode/utf16"
"github.com/SAP/go-dblib/asetime"
)
// GoValue returns a value-interface based on a given byte slice and
// depending on the ASE data type.
func (t DataType) GoValue(endian binary.ByteOrder, bs []byte) (interface{}, error) {
if t.ByteSize() != -1 && len(bs) != t.ByteSize() {
return nil, fmt.Errorf("byte slice has invalid length of %d, expected %d bytes", len(bs), t.ByteSize())
}
val, err := t.goValue(endian, bs)
if err != nil {
return nil, fmt.Errorf("error converting %v into value of type %s: %w", bs, t, err)
}
return val, nil
}
func (t DataType) goValue(endian binary.ByteOrder, bs []byte) (interface{}, error) {
buffer := bytes.NewBuffer(bs)
switch t {
case INT1:
var x uint8
err := binary.Read(buffer, endian, &x)
return x, err
case INT2:
var x int16
err := binary.Read(buffer, endian, &x)
return x, err
case INT4:
var x int32
err := binary.Read(buffer, endian, &x)
return x, err
case INT8:
var x int64
err := binary.Read(buffer, endian, &x)
return x, err
case INTN:
switch len(bs) {
case 0:
return 0, nil
case 1:
return INT1.GoValue(endian, bs)
case 2:
return INT2.GoValue(endian, bs)
case 4:
return INT4.GoValue(endian, bs)
case 8:
return INT8.GoValue(endian, bs)
default:
return nil, fmt.Errorf("invalid length for INTN: %d", len(bs))
}
case UINT2:
var x uint16
err := binary.Read(buffer, endian, &x)
return x, err
case UINT4:
var x uint32
err := binary.Read(buffer, endian, &x)
return x, err
case UINT8:
var x uint64
err := binary.Read(buffer, endian, &x)
return x, err
case UINTN:
switch len(bs) {
case 0:
return 0, nil
case 1:
return INT1.GoValue(endian, bs)
case 2:
return UINT2.GoValue(endian, bs)
case 4:
return UINT4.GoValue(endian, bs)
case 8:
return UINT8.GoValue(endian, bs)
default:
return nil, fmt.Errorf("invalid length for UINTN: %d", len(bs))
}
case FLT4:
var x float32
err := binary.Read(buffer, endian, &x)
return x, err
case FLT8:
var x float64
err := binary.Read(buffer, endian, &x)
return x, err
case FLTN:
switch len(bs) {
case 0:
return 0, nil
case 4:
return FLT4.GoValue(endian, bs)
case 8:
return FLT8.GoValue(endian, bs)
default:
return nil, fmt.Errorf("invalid length for FLTN: %d", len(bs))
}
case BIT:
if bs[0] == 0x1 {
return true, nil
}
return false, nil
case LONGBINARY, BINARY, VARBINARY, IMAGE:
// Noop
return bs, nil
case CHAR, VARCHAR, TEXT, LONGCHAR:
return string(bs), nil
case UNITEXT:
runes := []rune{}
for i := 0; i < len(bs); i++ {
// Determine if byte is a utf16 surrogate - if so two
// bytes must be consumed to form one utf16 code point
if utf16.IsSurrogate(rune(bs[i])) {
r := utf16.DecodeRune(rune(bs[i]), rune(bs[i+1]))
runes = append(runes, r)
i++
} else {
runes = append(runes, rune(bs[i]))
}
}
s := string(runes)
// Trim null bytes from the right - ASE always sends the
// maximum bytes for the TEXT datatype, causing the string
// to have a couple thousand null bytes. These are also
// carried over in a string() conversion and cause
// false-negatives in comparisons.
s = strings.TrimRight(s, "\x00")
return s, nil
case MONEY:
dec, err := NewDecimal(ASEMoneyPrecision, ASEMoneyScale)
if err != nil {
return nil, fmt.Errorf("error creating decimal: %w", err)
}
mnyhigh := endian.Uint32(bs[:4])
mnylow := endian.Uint32(bs[4:])
mny := int64(int64(mnyhigh)<<32 + int64(mnylow))
dec.SetInt64(mny)
return dec, nil
case SHORTMONEY:
dec, err := NewDecimal(ASEShortMoneyPrecision, ASEShortMoneyScale)
if err != nil {
return nil, fmt.Errorf("error creating decimal: %w", err)
}
dec.SetInt64(int64(int32(endian.Uint32(bs))))
return dec, nil
case DECN, NUMN:
dec, err := NewDecimal(ASEDecimalDefaultPrecision, ASEDecimalDefaultScale)
if err != nil {
return nil, fmt.Errorf("error creating decimal: %w", err)
}
dec.SetBytes(bs[1:])
if bs[0] == 0x1 {
dec.Negate()
}
// User must set precision and scale
return dec, nil
case DATE:
x := int32(endian.Uint32(bs))
days := asetime.ASEDuration(x) * asetime.Day
return asetime.Epoch1900().AddDate(0, 0, days.Days()), nil
case TIME:
x := int(int32(endian.Uint32(bs)))
dur := asetime.FractionalSecondToMillisecond(x)
t := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)
return t.Add(time.Duration(dur.Milliseconds()) * time.Millisecond), nil
case SHORTDATE:
days := endian.Uint16(bs[:2])
mins := endian.Uint16(bs[2:])
t := asetime.Epoch1900()
t = t.AddDate(0, 0, int(days))
t = t.Add(time.Duration(int(mins)) * time.Minute)
return t, nil
case DATETIME:
days := asetime.ASEDuration(int32(endian.Uint32(bs[:4]))) * asetime.Day
ms := asetime.FractionalSecondToMillisecond(int(endian.Uint32(bs[4:])))
t := asetime.Epoch1900()
t = t.AddDate(0, 0, days.Days())
t = t.Add(time.Duration(ms.Microseconds()) * time.Microsecond)
return t, nil
case DATETIMEN:
// TODO length-based
return nil, nil
case BIGDATETIMEN:
dur := asetime.ASEDuration(endian.Uint64(bs))
t := time.Date(0, time.January, 1, 0, 0, 0, 0, time.UTC)
t = t.AddDate(0, 0, dur.Days())
ms := dur.Microseconds() - (dur.Days() * int(asetime.Day))
t = t.Add(time.Duration(ms) * time.Microsecond)
return t, nil
case BIGTIMEN:
dur := asetime.ASEDuration(endian.Uint64(bs))
t := asetime.EpochRataDie()
t = t.Add(time.Duration(dur) * time.Microsecond)
return t, nil
default:
return nil, fmt.Errorf("unhandled data type %s", t)
}
} | asetypes/goValue.go | 0.578567 | 0.478285 | goValue.go | starcoder |
package transform
import (
"kanzi/util"
)
// Bijective version of the Burrows-Wheeler Transform
// The main advantage over the regular BWT is that there is no need for a primary
// index (hence the bijectivity). BWTS is about 10% slower than BWT.
// Forward transform based on the code at https://code.google.com/p/mk-bwts/
// by <NAME> and DivSufSort (port of libDivSufSort by Yuta Mori)
type BWTS struct {
size uint
buffer []int
buckets []int
saAlgo *util.DivSufSort
}
func NewBWTS(sz uint) (*BWTS, error) {
this := new(BWTS)
this.size = sz
this.buffer = make([]int, sz)
this.buckets = make([]int, 256)
return this, nil
}
func (this *BWTS) Size() uint {
return this.size
}
func (this *BWTS) SetSize(sz uint) bool {
this.size = sz
return true
}
func (this *BWTS) Forward(src, dst []byte) (uint, uint, error) {
count := int(this.size)
if this.size == 0 {
count = len(src)
}
if count < 2 {
if count == 1 {
dst[0] = src[0]
}
return uint(count), uint(count), nil
}
// Lazy dynamic memory allocations
if len(this.buffer) < count {
this.buffer = make([]int, count)
}
if this.saAlgo == nil {
var err error
if this.saAlgo, err = util.NewDivSufSort(); err != nil {
return 0, 0, err
}
} else {
this.saAlgo.Reset()
}
// Compute suffix array
sa := this.saAlgo.ComputeSuffixArray(src[0:count])
// Aliasing
isa := this.buffer
for i := 0; i < count; i++ {
isa[sa[i]] = i
}
min := isa[0]
idxMin := 0
for i := 1; i < count && min > 0; i++ {
if isa[i] >= min {
continue
}
headRank := this.moveLyndonWordHead(sa, src, count, idxMin, i-idxMin, min)
refRank := headRank
for j := i - 1; j > idxMin; j-- {
// iterate through the new lyndon word from end to start
testRank := isa[j]
startRank := testRank
for testRank < count-1 {
nextRankStart := sa[testRank+1]
if j > nextRankStart || src[j] != src[nextRankStart] || refRank < isa[nextRankStart+1] {
break
}
sa[testRank] = nextRankStart
isa[nextRankStart] = testRank
testRank++
}
sa[testRank] = j
isa[j] = testRank
refRank = testRank
if startRank == testRank {
break
}
}
min = isa[i]
idxMin = i
}
min = count
for i := 0; i < count; i++ {
if isa[i] >= min {
dst[isa[i]] = src[i-1]
continue
}
if min < count {
dst[min] = src[i-1]
}
min = isa[i]
}
dst[0] = src[count-1]
return uint(count), uint(count), nil
}
func (this *BWTS) moveLyndonWordHead(sa []int, data []byte, count, start, size, rank int) int {
isa := this.buffer
end := start + size
for rank+1 < count {
nextStart0 := sa[rank+1]
if nextStart0 <= end {
break
}
nextStart := nextStart0
k := 0
for k < size && nextStart < count && data[start+k] == data[nextStart] {
k++
nextStart++
}
if k == size && rank < isa[nextStart] {
break
}
if k < size && nextStart < count && data[start+k] < data[nextStart] {
break
}
sa[rank] = nextStart0
isa[nextStart0] = rank
rank++
}
sa[rank] = start
isa[start] = rank
return rank
}
func (this *BWTS) Inverse(src, dst []byte) (uint, uint, error) {
count := int(this.size)
if this.size == 0 {
count = len(src)
}
if count < 2 {
if count == 1 {
dst[0] = src[0]
}
return uint(count), uint(count), nil
}
// Lazy dynamic memory allocation
if len(this.buffer) < count {
this.buffer = make([]int, count)
}
// Aliasing
buckets_ := this.buckets
lf := this.buffer
// Initialize histogram
for i := range this.buckets {
buckets_[i] = 0
}
for i := 0; i < count; i++ {
buckets_[src[i]]++
}
// Histogram
for i, j := 0, 0; i < 256; i++ {
t := buckets_[i]
buckets_[i] = j
j += t
}
for i := 0; i < count; i++ {
lf[i] = buckets_[src[i]]
buckets_[src[i]]++
}
// Build inverse
for i, j := 0, count-1; j >= 0; i++ {
if lf[i] < 0 {
continue
}
p := i
for {
dst[j] = src[p]
j--
t := lf[p]
lf[p] = -1
p = t
if lf[p] < 0 {
break
}
}
}
return uint(count), uint(count), nil
} | go/src/kanzi/transform/BWTS.go | 0.714628 | 0.420897 | BWTS.go | starcoder |
package bytes
import (
"errors"
)
var (
// ErrNoEnoughHeader represents no enough space for header to store data in a buffer.
ErrNoEnoughHeader = errors.New("bytes.WriteOnlyBuffer: no enough header space to write")
)
// WriteOnlyBuffer defines a buffer only used for easy-write and full-read.
// For write header, when create a new WriteOnlyBuffer, a header size need to be assigned.
type WriteOnlyBuffer interface {
// WriteHeader writes slice p to fixed length header. The return value n is the length of
// writting in to the header. If the length of p > the free space of header,
// WriteHeader will return with ErrNoEnoughHeader
WriteHeader(p []byte) (n int, err error)
// TakeFreeHeader returns the free slice of n in header and mark as used.
// For high performance, thinking of using TakeFreeHeader instead of WriteHeader method as much as possible.
TakeFreeHeader(n int) ([]byte, error)
// WriteTail appends the contents of p to the tail of the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, WriteTail will panic with ErrTooLarge.
WriteTail(p []byte) (n int, err error)
// FreeTail returns the free memory of tail.
FreeTail() ([]byte, int)
// Bytes returns a slice of length len(b.buf) holding the unread portion of the buffer.
// The slice is valid for use only until the next buffer modification (that is,
// only until the next call to a method like WriteTail).
// The slice aliases the buffer content at least until the next buffer modification,
// so immediate changes to the slice will affect the result of future reads.
Bytes() []byte
Len() int
// Reset resets the buffer to be empty,
// but it retains the underlying storage for use by future writes.
Reset()
}
// writeOnlyBuffer is a implementation of WriteOnlyBuffer.
type writeOnlyBuffer struct {
hlen int // the initial size of header.
start int // the start index of buf.
end int // the end index of buf data.
buf []byte // contents are the bytes buf[start : len(buf)]
}
// NewWriteOnlyBuffer creates a new WriteOnlyBuffer with header length with hl, and initial tail length with tl.
func NewWriteOnlyBuffer(hl uint) WriteOnlyBuffer {
wb := &writeOnlyBuffer{hlen: int(hl)}
buf := make([]byte, hl)
wb.start = int(hl)
wb.end = wb.start
wb.buf = buf
return wb
}
func NewWriteOnlyBufferWithBytes(head uint, buf []byte) WriteOnlyBuffer {
wb := &writeOnlyBuffer{}
if head > 0 {
newbuf := make([]byte, int(head)+len(buf), int(head)+len(buf))
copy(newbuf[head:], buf[0:])
wb.start = int(head)
wb.end = wb.start + len(buf)
wb.buf = newbuf
} else {
wb.start = 0
wb.end = len(buf)
wb.buf = buf
}
return wb
}
func (wb *writeOnlyBuffer) WriteHeader(p []byte) (n int, err error) {
if len(p) > wb.start {
return 0, ErrNoEnoughHeader
}
wb.start = wb.start - len(p)
copy(wb.buf[wb.start:], p[0:])
return len(p), nil
}
func (wb *writeOnlyBuffer) TakeFreeHeader(n int) ([]byte, error) {
if n > wb.start {
return nil, ErrNoEnoughHeader
}
wb.start -= n
return wb.buf[wb.start : wb.start+n], nil
}
func (wb *writeOnlyBuffer) grow(n int) {
c := cap(wb.buf)
buf := makeSlice(2*c + n)
copy(buf[wb.start:], wb.buf[wb.start:])
wb.buf = buf
}
func (wb *writeOnlyBuffer) tryGrowByReslice(n int) bool {
if n <= cap(wb.buf)-wb.end {
wb.buf = wb.buf[:wb.end+n]
return true
}
return false
}
func (wb *writeOnlyBuffer) WriteTail(p []byte) (n int, err error) {
ok := wb.tryGrowByReslice(len(p))
if !ok {
wb.grow(len(p))
}
n = copy(wb.buf[wb.end:], p)
wb.end += n
return
}
func (wb *writeOnlyBuffer) FreeTail() ([]byte, int) {
return wb.buf[wb.end:], len(wb.buf) - wb.end
}
func (wb *writeOnlyBuffer) Bytes() []byte {
return wb.buf[wb.start:wb.end]
}
func (wb *writeOnlyBuffer) Len() int {
return wb.end - wb.start
}
// Reset reset the memory for reuse.
// Notice: reset won't cause GC, you should care about memory leak.
func (wb *writeOnlyBuffer) Reset() {
wb.start = wb.hlen
wb.end = wb.start
wb.buf = wb.buf[:0]
} | bytes/writeonly_buffer.go | 0.625781 | 0.411052 | writeonly_buffer.go | starcoder |
package canvas
import (
"image"
"image/color"
"math"
"github.com/Laughs-In-Flowers/warhola/lib/util/mth"
"github.com/Laughs-In-Flowers/warhola/lib/util/prl"
"github.com/Laughs-In-Flowers/xrr"
)
// An interface for performing (relatively default & easy) operations on an image.
type Operator interface {
Adjuster
Blender
Convoluter
Noiser
Transformer
Translater
}
type AdjustmentFunc func(color.RGBA) color.RGBA
type Adjuster interface {
Adjust(AdjustmentFunc) error
}
func (c *canvas) Adjust(fn AdjustmentFunc) error {
return c.mutate(func() (*pxl, error) {
return adjustment(c.pxl, fn)
})
}
func adjustment(p *pxl, afn AdjustmentFunc) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
srcP := p.clone(WorkingColorModelFn)
sb := srcP.Bounds()
w, h := sb.Dx(), sb.Dy()
dstP := scratch(srcP, WorkingColorModelFn, w, h)
prl.Run(h, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < w; x++ {
srcPos := y*srcP.str + x*4
c := color.RGBA{}
c.R = srcP.pix[srcPos+0]
c.G = srcP.pix[srcPos+1]
c.B = srcP.pix[srcPos+2]
c.A = srcP.pix[srcPos+3]
c = afn(c)
dstP.pix[srcPos+0] = c.R
dstP.pix[srcPos+1] = c.G
dstP.pix[srcPos+2] = c.B
dstP.pix[srcPos+3] = c.A
}
}
})
return dstP, nil
})
}
type BlendPosition int
const (
NoBlendPosition BlendPosition = iota
FG
BG
)
type BlendFunc func(RGBA164, RGBA164) RGBA164
type Blender interface {
Blend(image.Image, BlendPosition, BlendFunc) error
}
// Blend the provided image with the Canvas, at the provided position using the
// provided BlendFunc
func (c *canvas) Blend(i image.Image, pos BlendPosition, fn BlendFunc) error {
return c.mutate(func() (*pxl, error) {
return blend(c.pxl, i, pos, fn)
})
}
var NoBlendPositionError = xrr.Xrror("no blend position")
func blend(p *pxl, i image.Image, pos BlendPosition, bfn BlendFunc) (*pxl, error) {
if pos == NoBlendPosition {
return p, NoBlendPositionError
}
np := scratch(p, i.ColorModel(), 0, 0)
existingTo(i, np)
var bg, fg *pxl
switch pos {
case BG:
bg = np
fg = p
case FG:
bg = p
fg = np
}
bgBounds := bg.Bounds()
fgBounds := fg.Bounds()
var w, h int
if bgBounds.Dx() < fgBounds.Dx() {
w = bgBounds.Dx()
} else {
w = fgBounds.Dx()
}
if bgBounds.Dy() < fgBounds.Dy() {
h = bgBounds.Dy()
} else {
h = fgBounds.Dy()
}
bgSrc := bg.clone(WorkingColorModelFn)
fgSrc := fg.clone(WorkingColorModelFn)
dstP := scratch(bg, WorkingColorModelFn, w, h)
prl.Run(h, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < w; x++ {
bgPos := y*bgSrc.str + x*4
fgPos := y*fgSrc.str + x*4
result := bfn(
newRGBA164(bgSrc.pix[bgPos+0], bgSrc.pix[bgPos+1], bgSrc.pix[bgPos+2], bgSrc.pix[bgPos+3]),
newRGBA164(fgSrc.pix[fgPos+0], fgSrc.pix[fgPos+1], fgSrc.pix[fgPos+2], fgSrc.pix[fgPos+3]),
)
result.Clamp()
dstPos := y*dstP.str + x*4
dstP.pix[dstPos+0] = uint8(result.R * 255)
dstP.pix[dstPos+1] = uint8(result.G * 255)
dstP.pix[dstPos+2] = uint8(result.B * 255)
dstP.pix[dstPos+3] = uint8(result.A * 255)
}
}
})
return dstP, nil
}
type Convoluter interface {
Convolve(mth.Matrix, float64, bool, bool) error
}
func (c *canvas) Convolve(m mth.Matrix, bias float64, wrap, keepAlpha bool) error {
return c.mutate(func() (*pxl, error) {
return convolve(c.pxl, m, bias, wrap, keepAlpha)
})
}
func convolve(p *pxl, m mth.Matrix, bias float64, wrap, keepAlpha bool) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
srcP := p.clone(color.RGBAModel)
// Kernel attributes
lenX := m.MaxX()
lenY := m.MaxY()
radiusX := lenX / 2
radiusY := lenY / 2
// Pad the source image, basically pre-computing the pixels outside of image bounds
switch {
case wrap:
srcP = p.pad(pmWrap, radiusX, radiusY)
default:
srcP = p.pad(pmExtend, radiusX, radiusY)
}
// src bounds now includes padded pixels
srcPBounds := srcP.Bounds()
srcW, srcH := srcPBounds.Dx(), srcPBounds.Dy()
dstP := scratch(srcP, color.RGBAModel, srcW, srcH)
// To keep alpha we simply don't convolve it
switch {
case keepAlpha:
// Notice we can't use lenY since it will be larger than the actual padding pixels
// as it includes the identity element
prl.Run(srcH-(radiusY*2), func(start, end int) {
// Correct range so we don't iterate over the padded pixels on the main loop
for y := start + radiusY; y < end+radiusY; y++ {
for x := radiusX; x < srcW-radiusX; x++ {
var r, g, b float64
// Kernel has access to the padded pixels
for ky := 0; ky < lenY; ky++ {
iy := y - radiusY + ky
for kx := 0; kx < lenX; kx++ {
ix := x - radiusX + kx
kvalue := m.At(kx, ky)
ipos := iy*srcP.str + ix*4
r += float64(srcP.pix[ipos+0]) * kvalue
g += float64(srcP.pix[ipos+1]) * kvalue
b += float64(srcP.pix[ipos+2]) * kvalue
}
}
// Map x and y indices to non-padded range
pos := (y-radiusY)*dstP.str + (x-radiusX)*4
dstP.pix[pos+0] = uint8(math.Max(math.Min(r+bias, 255), 0))
dstP.pix[pos+1] = uint8(math.Max(math.Min(g+bias, 255), 0))
dstP.pix[pos+2] = uint8(math.Max(math.Min(b+bias, 255), 0))
dstP.pix[pos+3] = srcP.pix[y*srcP.str+x*4+3]
}
}
})
default:
// Notice we can't use lenY since it will be larger than the actual padding pixels
// as it includes the identity element
prl.Run(srcH-(radiusY*2), func(start, end int) {
// Correct range so we don't iterate over the padded pixels on the main loop
for y := start + radiusY; y < end+radiusY; y++ {
for x := radiusX; x < srcW-radiusX; x++ {
var r, g, b, a float64
// Kernel has access to the padded pixels
for ky := 0; ky < lenY; ky++ {
iy := y - radiusY + ky
for kx := 0; kx < lenX; kx++ {
ix := x - radiusX + kx
kvalue := m.At(kx, ky)
ipos := iy*srcP.str + ix*4
r += float64(srcP.pix[ipos+0]) * kvalue
g += float64(srcP.pix[ipos+1]) * kvalue
b += float64(srcP.pix[ipos+2]) * kvalue
a += float64(srcP.pix[ipos+3]) * kvalue
}
}
// Map x and y indices to non-padded range
pos := (y-radiusY)*dstP.str + (x-radiusX)*4
dstP.pix[pos+0] = uint8(math.Max(math.Min(r+bias, 255), 0))
dstP.pix[pos+1] = uint8(math.Max(math.Min(g+bias, 255), 0))
dstP.pix[pos+2] = uint8(math.Max(math.Min(b+bias, 255), 0))
dstP.pix[pos+3] = uint8(math.Max(math.Min(a, 255), 0))
}
}
})
}
return dstP, nil
})
}
type NoiseFunc func() uint8
type Noiser interface {
Noise(NoiseFunc, bool) error
}
func (c *canvas) Noise(fn NoiseFunc, monochrome bool) error {
return c.mutate(func() (*pxl, error) {
return generateNoise(c.pxl, fn, monochrome)
})
}
func generateNoise(p *pxl, fn NoiseFunc, monochrome bool) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
if !monochrome {
return generateColorNoise(p, fn)
}
return generateMonoNoise(p, fn)
})
}
func generateColorNoise(p *pxl, nfn NoiseFunc) (*pxl, error) {
dstP := p.clone(WorkingColorModelFn)
width, height := dstP.Bounds().Dx(), dstP.Bounds().Dy()
prl.Run(height, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < width; x++ {
pos := y*dstP.str + x*4
dstP.pix[pos+0] = nfn()
dstP.pix[pos+1] = nfn()
dstP.pix[pos+2] = nfn()
dstP.pix[pos+3] = 0xFF
}
}
})
return dstP.clone(p.ColorModel()), nil
}
func generateMonoNoise(p *pxl, nfn NoiseFunc) (*pxl, error) {
dstP := p.clone(WorkingColorModelFn)
width, height := dstP.Bounds().Dx(), dstP.Bounds().Dy()
prl.Run(height, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < width; x++ {
pos := y*dstP.str + x*4
v := nfn()
dstP.pix[pos+0] = v
dstP.pix[pos+1] = v
dstP.pix[pos+2] = v
dstP.pix[pos+3] = 0xFF
}
}
})
return dstP.clone(p.ColorModel()), nil
}
type Transformer interface {
Cropper
Resizer
}
// An interface for cropping a Canvas.
type Cropper interface {
Crop(image.Rectangle) error
}
var EmptyIntersectError = xrr.Xrror("Unable to crop empty intersect of %v and provided %v").Out
// crops the canvas
func (c *canvas) Crop(r image.Rectangle) error {
return c.mutate(func() (*pxl, error) {
return crop(c.pxl, r)
})
}
func crop(p *pxl, r image.Rectangle) (*pxl, error) {
exp := r
r = r.Intersect(p.rect)
if r.Empty() {
return p, EmptyIntersectError(p.rect, exp)
}
i := p.PixOffset(r.Min.X, r.Min.Y)
nr := image.Rectangle{image.Point{0, 0}, r.Size()}
return &pxl{
m: p.m,
pix: p.pix[i:],
str: p.str,
rect: nr,
paletteFn: p.paletteFn,
measure: newMeasure(&r, p.measure.pp, p.measure.ppu),
}, nil
}
type ResampleFilterFunc func(float64) float64
type ResampleFilter struct {
Key string
Support float64
Fn ResampleFilterFunc
}
func (r ResampleFilter) String() string {
return r.Key
}
var (
NearestNeighbor = ResampleFilter{
"nearestneighbor",
0,
nil,
}
Linear = ResampleFilter{
"linear",
1.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return 1.0 - x
}
return 0
},
}
)
type Resizer interface {
Resize(w, h int, f ResampleFilter) error
}
var ZeroResizeError = xrr.Xrror("zero value prevents resizing\n\twidth %d\n\theight %d\n\tempty canvas: %t").Out
// resize the canvas
func (c *canvas) Resize(w, h int, filter ResampleFilter) error {
return c.mutate(func() (*pxl, error) {
return resize(c.pxl, w, h, filter)
})
}
func resize(p *pxl, w, h int, f ResampleFilter) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
b := p.Bounds().Empty()
if w <= 0 || h <= 0 || b {
return p, ZeroResizeError(w, h, b)
}
switch {
case f.Support <= 0:
p = nearest(p, w, h)
default:
p = resampleH(p, w, f)
p = resampleV(p, h, f)
}
return p, nil
})
}
func nearest(p *pxl, w, h int) *pxl {
srcP := p.clone(color.RGBAModel)
srcW, srcH := srcP.Bounds().Dx(), srcP.Bounds().Dy()
srcStride := srcP.str
dstP := scratch(srcP, srcP.ColorModel(), w, h)
dstStride := dstP.str
dx := float64(srcW) / float64(w)
dy := float64(srcH) / float64(h)
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
pos := y*dstStride + x*4
ipos := int((float64(y)+0.5)*dy)*srcStride + int((float64(x)+0.5)*dx)*4
dstP.pix[pos+0] = srcP.pix[ipos+0]
dstP.pix[pos+1] = srcP.pix[ipos+1]
dstP.pix[pos+2] = srcP.pix[ipos+2]
dstP.pix[pos+3] = srcP.pix[ipos+3]
}
}
return dstP.clone(p.ColorModel())
}
func resampleH(p *pxl, w int, f ResampleFilter) *pxl {
srcP := p.clone(WorkingColorModelFn)
srcWidth, srcHeight := srcP.Bounds().Dx(), srcP.Bounds().Dy()
srcStride := srcP.Stride()
delta := float64(srcWidth) / float64(w)
scale := math.Max(delta, 1.0)
dstP := scratch(srcP, srcP.ColorModel(), w, srcHeight)
dstStride := dstP.Stride()
filterRadius := math.Ceil(scale * f.Support)
prl.Run(srcHeight, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < w; x++ {
ix := (float64(x)+0.5)*delta - 0.5
istart, iend := int(ix-filterRadius+0.5), int(ix+filterRadius)
if istart < 0 {
istart = 0
}
if iend >= srcWidth {
iend = srcWidth - 1
}
var r, g, b, a float64
var sum float64
for kx := istart; kx <= iend; kx++ {
srcPos := y*srcStride + kx*4
normPos := (float64(kx) - ix) / scale
fValue := f.Fn(normPos)
r += float64(srcP.pix[srcPos+0]) * fValue
g += float64(srcP.pix[srcPos+1]) * fValue
b += float64(srcP.pix[srcPos+2]) * fValue
a += float64(srcP.pix[srcPos+3]) * fValue
sum += fValue
}
dstPos := y*dstStride + x*4
dstP.pix[dstPos+0] = uint8(mth.Clamp((r/sum)+0.5, 0, 255))
dstP.pix[dstPos+1] = uint8(mth.Clamp((g/sum)+0.5, 0, 255))
dstP.pix[dstPos+2] = uint8(mth.Clamp((b/sum)+0.5, 0, 255))
dstP.pix[dstPos+3] = uint8(mth.Clamp((a/sum)+0.5, 0, 255))
}
}
})
return dstP.clone(p.ColorModel())
}
func resampleV(p *pxl, h int, f ResampleFilter) *pxl {
srcP := p.clone(WorkingColorModelFn)
srcWidth, srcHeight := srcP.Bounds().Dx(), srcP.Bounds().Dy()
srcStride := srcP.Stride()
delta := float64(srcHeight) / float64(h)
scale := math.Max(delta, 1.0)
dstP := scratch(srcP, srcP.ColorModel(), srcWidth, h)
dstStride := dstP.Stride()
filterRadius := math.Ceil(scale * f.Support)
prl.Run(h, func(start, end int) {
for y := start; y < end; y++ {
iy := (float64(y)+0.5)*delta - 0.5
istart, iend := int(iy-filterRadius+0.5), int(iy+filterRadius)
if istart < 0 {
istart = 0
}
if iend >= srcHeight {
iend = srcHeight - 1
}
for x := 0; x < srcWidth; x++ {
var r, g, b, a float64
var sum float64
for ky := istart; ky <= iend; ky++ {
srcPos := ky*srcStride + x*4
normPos := (float64(ky) - iy) / scale
fValue := f.Fn(normPos)
r += float64(srcP.pix[srcPos+0]) * fValue
g += float64(srcP.pix[srcPos+1]) * fValue
b += float64(srcP.pix[srcPos+2]) * fValue
a += float64(srcP.pix[srcPos+3]) * fValue
sum += fValue
}
dstPos := y*dstStride + x*4
dstP.pix[dstPos+0] = uint8(mth.Clamp((r/sum)+0.5, 0, 255))
dstP.pix[dstPos+1] = uint8(mth.Clamp((g/sum)+0.5, 0, 255))
dstP.pix[dstPos+2] = uint8(mth.Clamp((b/sum)+0.5, 0, 255))
dstP.pix[dstPos+3] = uint8(mth.Clamp((a/sum)+0.5, 0, 255))
}
}
})
return dstP.clone(p.ColorModel())
}
type Translater interface {
Flip(TDir) error
Rotate(float64, bool, image.Point) error
Shear(TDir, float64) error
Translate(int, int) error
}
type TDir int
const (
NoTDir TDir = iota
THorizontal
TVertical
)
var NoDirectionError = xrr.Xrror("'%s' is not a direction to flip").Out
func (c *canvas) Flip(dir TDir) error {
return c.mutate(func() (*pxl, error) {
return flip(c.pxl, dir)
})
}
func flip(p *pxl, dir TDir) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
srcP := p.clone(WorkingColorModelFn)
dstP := srcP.clone(WorkingColorModelFn)
b := dstP.Bounds()
w, h := b.Dx(), b.Dy()
switch dir {
case THorizontal:
prl.Run(h, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < w; x++ {
iy := y * dstP.str
pos := iy + (x * 4)
flippedX := w - x - 1
flippedPos := iy + (flippedX * 4)
dstP.pix[pos+0] = srcP.pix[flippedPos+0]
dstP.pix[pos+1] = srcP.pix[flippedPos+1]
dstP.pix[pos+2] = srcP.pix[flippedPos+2]
dstP.pix[pos+3] = srcP.pix[flippedPos+3]
}
}
})
case TVertical:
prl.Run(h, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < w; x++ {
pos := y*dstP.str + (x * 4)
flippedY := h - y - 1
flippedPos := flippedY*dstP.str + (x * 4)
dstP.pix[pos+0] = srcP.pix[flippedPos+0]
dstP.pix[pos+1] = srcP.pix[flippedPos+1]
dstP.pix[pos+2] = srcP.pix[flippedPos+2]
dstP.pix[pos+3] = srcP.pix[flippedPos+3]
}
}
})
default:
return p, NoDirectionError(dir)
}
return dstP, nil
})
}
func (c *canvas) Rotate(angle float64, preserve bool, at image.Point) error {
return c.mutate(func() (*pxl, error) {
return rotate(c.pxl, angle, preserve, at)
})
}
func rotate(p *pxl, angle float64, preserve bool, at image.Point) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
srcP := p.clone(WorkingColorModelFn)
b := srcP.Bounds()
srcW, srcH := b.Dx(), b.Dy()
supersample := false
absAngle := int(math.Abs(angle) + 0.5)
if absAngle%360 == 0 {
return p, nil
} else if absAngle%90 != 0 {
// Supersampling is required for non-special angles
// Special angles = 90, 180, 270...
supersample = true
}
pivotX, pivotY := float64(srcW/2), float64(srcH/2)
if at != image.ZP {
pivotX, pivotY = float64(at.X), float64(at.Y)
}
var rErr error
if supersample {
// Supersample, currently hard set to 2x
srcW, srcH = srcW*2, srcH*2
srcP, rErr = resize(srcP, srcW, srcH, NearestNeighbor)
pivotX, pivotY = pivotX*2, pivotY*2
}
if rErr != nil {
return p, rErr
}
// Convert to radians, positive degree maps to clockwise rotation
angleRadians := -angle * (math.Pi / 180)
var dstW, dstH int
if preserve {
// Reserve larger size in destination image for full image bounds rotation
// If not preserving size, always take image center as pivot
pivotX, pivotY = float64(srcW)/2, float64(srcH)/2
a := math.Abs(float64(srcW) * math.Sin(angleRadians))
b := math.Abs(float64(srcW) * math.Cos(angleRadians))
c := math.Abs(float64(srcH) * math.Sin(angleRadians))
d := math.Abs(float64(srcH) * math.Cos(angleRadians))
dstW, dstH = int(c+b+0.5), int(a+d+0.5)
} else {
dstW, dstH = srcW, srcH
}
dstP := scratch(srcP, WorkingColorModelFn, dstW, dstH)
// Calculate offsets in case entire image is being displayed
// Otherwise areas clipped by rotation won't be available
offsetX := (dstW - srcW) / 2
offsetY := (dstH - srcH) / 2
prl.Run(srcH, func(start, end int) {
// Correct range to include the pixels visible in new bounds
// Note that cannot be done in prl.Runize function input height, otherwise ranges would overlap
yStart := int((float64(start)/float64(srcH))*float64(dstH)) - offsetY
yEnd := int((float64(end)/float64(srcH))*float64(dstH)) - offsetY
xStart := -offsetX
xEnd := srcW + offsetX
for y := yStart; y < yEnd; y++ {
dy := float64(y) - pivotY + 0.5
for x := xStart; x < xEnd; x++ {
dx := float64(x) - pivotX + 0.5
ix := int((math.Cos(angleRadians)*dx - math.Sin(angleRadians)*dy + pivotX))
iy := int((math.Sin(angleRadians)*dx + math.Cos(angleRadians)*dy + pivotY))
if ix < 0 || ix >= srcW || iy < 0 || iy >= srcH {
continue
}
srcPos := iy*srcP.str + ix*4
dstPos := (y+offsetY)*dstP.str + (x+offsetX)*4
copy(dstP.pix[dstPos:dstPos+4], srcP.pix[srcPos:srcPos+4])
}
}
})
if supersample {
// Downsample to original bounds as part of the Supersampling
dstP, rErr = resize(dstP, dstW/2, dstH/2, Linear)
}
if rErr != nil {
return p, rErr
}
return dstP, nil
})
}
func (c *canvas) Shear(dir TDir, angle float64) error {
return c.mutate(func() (*pxl, error) {
return shear(c.pxl, dir, angle)
})
}
func shear(p *pxl, dir TDir, angle float64) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
srcP := p.clone(WorkingColorModelFn)
b := srcP.Bounds()
srcW, srcH := b.Dx(), b.Dy()
// Supersample, currently hard set to 2x
srcW, srcH = srcW*2, srcH*2
var sErr error
srcP, sErr = resize(srcP, srcW, srcH, NearestNeighbor)
if sErr != nil {
return p, sErr
}
// Calculate shear factor
k := math.Tan(angle * (math.Pi / 180))
var dstH, dstW int
var dstP *pxl
switch dir {
case THorizontal:
dstW, dstH = srcW+int(float64(srcH)*math.Abs(k)), srcH
dstP = scratch(srcP, WorkingColorModelFn, dstW, dstH)
pivotX := float64(dstW) / 2
pivotY := float64(dstH) / 2
dx := (dstW - srcW) / 2
dy := (dstH - srcH) / 2
prl.Run(dstH, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < dstW; x++ {
// Move positions to revolve around pivot
ix := x - int(pivotX) - dx
iy := y - int(pivotY) - dy
// Apply linear transformation
ix = ix + int(float64(iy)*k)
// Move positions back to image coordinates
ix += int(pivotX)
iy += int(pivotY)
if ix < 0 || ix >= srcW || iy < 0 || iy >= srcH {
continue
}
srcPos := iy*srcP.str + ix*4
dstPos := y*dstP.str + x*4
dstP.pix[dstPos+0] = srcP.pix[srcPos+0]
dstP.pix[dstPos+1] = srcP.pix[srcPos+1]
dstP.pix[dstPos+2] = srcP.pix[srcPos+2]
dstP.pix[dstPos+3] = srcP.pix[srcPos+3]
}
}
})
case TVertical:
dstW, dstH = srcW, srcH+int(float64(srcW)*math.Abs(k))
dstP = scratch(srcP, WorkingColorModelFn, dstW, dstH)
pivotX := float64(dstW) / 2
pivotY := float64(dstH) / 2
dx := (dstW - srcW) / 2
dy := (dstH - srcH) / 2
prl.Run(dstH, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < dstW; x++ {
// Move positions to revolve around pivot
ix := x - int(pivotX) - dx
iy := y - int(pivotY) - dy
// Apply linear transformation
iy = iy + int(float64(ix)*k)
// Move positions back to image coordinates
ix += int(pivotX)
iy += int(pivotY)
if ix < 0 || ix >= srcW || iy < 0 || iy >= srcH {
continue
}
srcPos := iy*srcP.str + ix*4
dstPos := y*dstP.str + x*4
dstP.pix[dstPos+0] = srcP.pix[srcPos+0]
dstP.pix[dstPos+1] = srcP.pix[srcPos+1]
dstP.pix[dstPos+2] = srcP.pix[srcPos+2]
dstP.pix[dstPos+3] = srcP.pix[srcPos+3]
}
}
})
default:
return p, NoDirectionError(dir)
}
// Downsample to original bounds as part of the Supersampling
dstP, sErr = resize(dstP, dstW/2, dstH/2, Linear)
if sErr != nil {
return p, sErr
}
return dstP, nil
})
}
func (c *canvas) Translate(dx, dy int) error {
return c.mutate(func() (*pxl, error) {
return translate(c.pxl, dx, dy)
})
}
func translate(p *pxl, dx, dy int) (*pxl, error) {
return mutate(p, func() (*pxl, error) {
srcP := p.clone(WorkingColorModelFn)
if dx == 0 && dy == 0 {
return p, nil
}
b := srcP.Bounds()
w, h := b.Dx(), b.Dy()
dstP := scratch(srcP, WorkingColorModelFn, w, h)
prl.Run(h, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < w; x++ {
ix, iy := x-dx, y+dy
if ix < 0 || ix >= w || iy < 0 || iy >= h {
continue
}
srcPos := iy*srcP.str + ix*4
dstPos := y*srcP.str + x*4
copy(dstP.pix[dstPos:dstPos+4], srcP.pix[srcPos:srcPos+4])
}
}
})
return dstP, nil
})
} | lib/canvas/operator.go | 0.639286 | 0.519948 | operator.go | starcoder |
package main
import (
"fmt"
"math/rand"
"time"
)
import (
"github.com/nickdavies/go-astar/astar"
)
func main() {
var start_t int64
var end_t int64
var seed int64 = 0
// Setup the aStar structs
ast := astar.NewAStar(50, 50)
p2p := astar.NewPointToPoint()
p2l := astar.NewListToPoint(true)
// Generate a random map
grid, source, target, me := GenerateRandomMap(ast, seed, 50, 600, 24, 100000)
PrintGrid(grid)
// Route from source to target (point to point)
start_t = time.Now().UnixNano()
end := ast.FindPath(p2p, source, target)
end_t = time.Now().UnixNano()
first_path_t := float64(end_t-start_t) / float64(time.Millisecond)
DrawPath(grid, end, "*")
PrintGrid(grid)
// record path as array so it can be used in the next search
p := end
path := make([]astar.Point, 0)
for p != nil {
path = append(path, p.Point)
p = p.Parent
}
start_t = time.Now().UnixNano()
end = ast.FindPath(p2l, path, me)
end_t = time.Now().UnixNano()
second_path_t := float64(end_t-start_t) / float64(time.Millisecond)
DrawPath(grid, end, ".")
PrintGrid(grid)
fmt.Println("me", me)
fmt.Println("end", end)
fmt.Println("end_grid", grid[end.Row][end.Col])
fmt.Println(first_path_t)
fmt.Println(second_path_t)
}
func GenerateRandomMap(ast astar.AStar, map_seed int64, grid_size, wall_count, wall_size, wall_weight int) ([][]string, []astar.Point, []astar.Point, []astar.Point) {
if map_seed == 0 {
map_seed = time.Now().UnixNano()
}
fmt.Println("Map Seed", map_seed)
rand.Seed(map_seed)
grid := make([][]string, grid_size)
for i := 0; i < len(grid); i++ {
grid[i] = make([]string, grid_size)
}
for walls := 0; walls < wall_count; {
size := GetRandInt(wall_size)
direction := GetRandInt(2)
if direction == 0 {
c := GetRandInt(grid_size)
r := GetRandInt(grid_size - size)
for i := 0; i < size; i++ {
grid[r+i][c] = "#"
ast.FillTile(astar.Point{r + i, c}, wall_weight)
}
} else {
c := GetRandInt(grid_size - size)
r := GetRandInt(grid_size)
for i := 0; i < size; i++ {
grid[r][c+i] = "#"
ast.FillTile(astar.Point{r, c + i}, wall_weight)
}
}
walls += size
}
for i := 0; i < grid_size; i++ {
grid[0][i] = "#"
ast.FillTile(astar.Point{0, i}, -1)
grid[i][0] = "#"
ast.FillTile(astar.Point{i, 0}, -1)
grid[grid_size-1][i] = "#"
ast.FillTile(astar.Point{grid_size - 1, i}, -1)
grid[i][grid_size-1] = "#"
ast.FillTile(astar.Point{i, grid_size - 1}, -1)
}
source := make([]astar.Point, 1)
for {
r := GetRandInt(grid_size)
c := GetRandInt(grid_size)
if grid[r][c] != "#" {
grid[r][c] = "a"
source[0].Row = r
source[0].Col = c
break
}
}
target := make([]astar.Point, 1)
for {
r := GetRandInt(grid_size)
c := GetRandInt(grid_size)
if grid[r][c] != "#" && grid[r][c] != "a" {
grid[r][c] = "b"
target[0].Row = r
target[0].Col = c
break
}
}
me := make([]astar.Point, 1)
for {
r := GetRandInt(grid_size)
c := GetRandInt(grid_size)
if grid[r][c] != "#" && grid[r][c] != "a" && grid[r][c] != "b" {
grid[r][c] = "c"
me[0].Row = r
me[0].Col = c
break
}
}
return grid, source, target, me
}
func DrawPath(grid [][]string, path *astar.PathPoint, path_char string) {
for {
if grid[path.Row][path.Col] == "#" {
grid[path.Row][path.Col] = "X"
} else if grid[path.Row][path.Col] == "" {
grid[path.Row][path.Col] = path_char
}
path = path.Parent
if path == nil {
break
}
}
}
func PrintGrid(grid [][]string) {
for i := 0; i < len(grid); i++ {
for j := 0; j < len(grid[0]); j++ {
if grid[i][j] == "" {
fmt.Printf(" ")
} else {
fmt.Print(grid[i][j])
}
}
fmt.Print("\n")
}
fmt.Print("\n")
}
func GetRandInt(limit int) int {
return rand.Intn(limit)
} | example.go | 0.567457 | 0.471771 | example.go | starcoder |
package yamlpath
/*
filterNode represents a node of a filter expression parse tree. Each node is labelled with a lexeme.
Terminal nodes have one of the following lexemes: root, lexemeFilterAt, lexemeFilterIntegerLiteral,
lexemeFilterFloatLiteral, lexemeFilterStringLiteral.
root and lexemeFilterAt nodes also have a slice of lexemes representing the subpath of `$`` or `@``,
respectively.
Non-terminal nodes represent either basic filters (simpler predicates of one or two terminal
nodes) or filter expressions (more complex predicates of basic filters). A filter existence expression
is represented as a terminal node with lexemeFilterAt or (less commonly) root.
The following examples illustrate the approach.
The basic filter `@.child > 3` is represented as the following parse tree (where each node is indicated by
its lexeme and `<...>` represents the node's children):
lexemeFilterGreaterThan<lexemeFilterAt,lexemeFilterIntegerLiteral>
or, graphically:
>
/ \
@.child 3
The filter expression `@.child > 3 && @.other` is represented as the parse tree:
lexemeFilterConjunction<lexemeFilterGreaterThan<lexemeFilterAt,lexemeFilterIntegerLiteral>,lexemeFilterAt>
or, graphically:
&&
/ \
> @.other
/ \
@.child 3
The filter expression `(@.child < 5 || @.child > 10) && @.other == 'x'` is represented as the parse tree:
lexemeFilterConjunction<lexemeFilterDisjunction<lexemeFilterLessThan<lexemeFilterAt,lexemeFilterIntegerLiteral>,
lexemeFilterGreaterThan<lexemeFilterAt,lexemeFilterIntegerLiteral>
>,
lexemeFilterEquality<lexemeFilterAt,lexemeFilterStringLiteral>
>
or, graphically:
&&
/ \
|| ==
/ \ / \
< > @.other 'x'
/ \ / \
@.child 5 @.child 10
Note that brackets do not appear in the parse tree.
*/
type filterNode struct {
lexeme lexeme
subpath []lexeme // empty unless lexeme is root or lexemeFilterAt
children []*filterNode
}
func newFilterNode(lexemes []lexeme) *filterNode {
return newParser(lexemes).parse()
}
func (n *filterNode) isItemFilter() bool {
return n.lexeme.typ == lexemeFilterAt || n.lexeme.typ == lexemeRoot
}
func (n *filterNode) isLiteral() bool {
return n.isStringLiteral() || n.isNumericLiteral() || n.isRegularExpressionLiteral()
}
func (n *filterNode) isStringLiteral() bool {
return n.lexeme.typ == lexemeFilterStringLiteral
}
func (n *filterNode) isNumericLiteral() bool {
return n.lexeme.typ == lexemeFilterFloatLiteral || n.lexeme.typ == lexemeFilterIntegerLiteral
}
func (n *filterNode) isRegularExpressionLiteral() bool {
return n.lexeme.typ == lexemeFilterRegularExpressionLiteral
}
// parser holds the state of the filter expression parser.
type parser struct {
input []lexeme // the lexemes being scanned
pos int // current position in the input
stack []*filterNode // parser stack
tree *filterNode // parse tree
}
// newParser creates a new parser for the input slice of lexemes.
func newParser(input []lexeme) *parser {
l := &parser{
input: input,
stack: make([]*filterNode, 0),
}
return l
}
// push pushes a parse tree on the stack.
func (p *parser) push(tree *filterNode) {
p.stack = append(p.stack, tree)
}
// pop pops a parse tree from the stack, which must be non-empty.
func (p *parser) pop() *filterNode {
index := len(p.stack) - 1
element := p.stack[index]
p.stack = p.stack[:index]
return element
}
// nextLexeme returns the next item from the input.
// The caller must peek to ensure there is more input before calling nextLexeme.
func (p *parser) nextLexeme() lexeme {
next := p.input[p.pos]
p.pos++
return next
}
// peek returns the next item from the input without consuming the item.
func (p *parser) peek() lexeme {
if p.pos >= len(p.input) {
return lexeme{lexemeEOF, ""}
}
return p.input[p.pos]
}
func (p *parser) parse() *filterNode {
if p.peek().typ == lexemeEOF {
return nil
}
p.expression()
return p.tree
}
func (p *parser) expression() {
p.conjunction()
for p.peek().typ == lexemeFilterOr {
p.push(p.tree)
p.or()
}
}
func (p *parser) or() {
n := p.nextLexeme()
p.conjunction()
p.tree = &filterNode{
lexeme: n,
subpath: []lexeme{},
children: []*filterNode{
p.pop(),
p.tree,
},
}
}
func (p *parser) conjunction() {
p.basicFilter()
for p.peek().typ == lexemeFilterAnd {
p.push(p.tree)
p.and()
}
}
func (p *parser) and() {
n := p.nextLexeme()
p.basicFilter()
p.tree = &filterNode{
lexeme: n,
subpath: []lexeme{},
children: []*filterNode{
p.pop(),
p.tree,
},
}
}
// basicFilter consumes then next basic filter and sets it as the parser's tree. If a basic filter it not next, nil is set.
func (p *parser) basicFilter() {
n := p.peek()
switch n.typ {
case lexemeFilterNot:
p.nextLexeme()
p.basicFilter()
p.tree = &filterNode{
lexeme: n,
subpath: []lexeme{},
children: []*filterNode{
p.tree,
},
}
return
case lexemeFilterOpenBracket:
p.nextLexeme()
p.expression()
if p.peek().typ == lexemeFilterCloseBracket {
p.nextLexeme()
}
return
}
p.filterTerm()
n = p.peek()
if n.typ.isComparisonOrMatch() {
p.nextLexeme()
filterTerm := p.tree
p.filterTerm()
p.tree = &filterNode{
lexeme: n,
subpath: []lexeme{},
children: []*filterNode{
filterTerm,
p.tree,
},
}
}
}
// filterTerm consumes the next filter term and sets it as the parser's tree. If a filter term is not next, nil is set.
func (p *parser) filterTerm() {
n := p.peek()
switch n.typ {
case lexemeEOF, lexemeError:
p.tree = nil
case lexemeFilterAt, lexemeRoot:
p.nextLexeme()
subpath := []lexeme{}
filterNestingLevel := 1
f:
for {
s := p.peek()
switch s.typ {
case lexemeIdentity, lexemeDotChild, lexemeBracketChild, lexemeRecursiveDescent, lexemeArraySubscript:
case lexemeFilterBegin:
filterNestingLevel++
case lexemeFilterEnd:
filterNestingLevel--
if filterNestingLevel == 0 {
break f
}
case lexemeEOF:
break f
default:
// allow any other lexemes only in a nested filter
if filterNestingLevel == 1 {
break f
}
}
subpath = append(subpath, s)
p.nextLexeme()
}
p.tree = &filterNode{
lexeme: n,
subpath: subpath,
children: []*filterNode{},
}
case lexemeFilterIntegerLiteral, lexemeFilterFloatLiteral, lexemeFilterStringLiteral, lexemeFilterRegularExpressionLiteral:
p.nextLexeme()
p.tree = &filterNode{
lexeme: n,
subpath: []lexeme{},
children: []*filterNode{},
}
}
} | pkg/yamlpath/filter_parser.go | 0.888209 | 0.789153 | filter_parser.go | starcoder |
package merkletree
import (
"bytes"
"errors"
"fmt"
)
type RootMismatchError struct {
ExpectedRoot []byte
CalculatedRoot []byte
}
func (e RootMismatchError) Error() string {
return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot)
}
// MerkleVerifier is a class which knows how to verify merkle inclusion and consistency proofs.
type MerkleVerifier struct {
treeHasher *TreeHasher
}
// NewMerkleVerifier returns a new MerkleVerifier for a tree based on the passed in hasher.
func NewMerkleVerifier(h HasherFunc) MerkleVerifier {
return MerkleVerifier{
treeHasher: NewTreeHasher(h),
}
}
// VerifyInclusionProof verifies the correctness of the proof given the passed in information about the tree and leaf.
func (m MerkleVerifier) VerifyInclusionProof(leafIndex, treeSize int64, proof [][]byte, root []byte, leaf []byte) error {
calcRoot, err := m.RootFromInclusionProof(leafIndex, treeSize, proof, leaf)
if err != nil {
return err
}
if len(calcRoot) == 0 {
return errors.New("calculated empty root")
}
if !bytes.Equal(calcRoot, root) {
return RootMismatchError{
CalculatedRoot: calcRoot,
ExpectedRoot: root,
}
}
return nil
}
// RootFromInclusionProof calculates the expected tree root given the proof and leaf.
func (m MerkleVerifier) RootFromInclusionProof(leafIndex, treeSize int64, proof [][]byte, leaf []byte) ([]byte, error) {
if leafIndex > treeSize {
return nil, fmt.Errorf("leafIndex %d > treeSize %d", leafIndex, treeSize)
}
if leafIndex == 0 {
return nil, errors.New("leafIndex is zero")
}
node := leafIndex - 1
lastNode := treeSize - 1
nodeHash := m.treeHasher.HashLeaf(leaf)
proofIndex := 0
for lastNode > 0 {
if proofIndex == len(proof) {
return nil, fmt.Errorf("insuficient number of proof components (%d) for treeSize %d", len(proof), treeSize)
}
if isRightChild(node) {
nodeHash = m.treeHasher.HashChildren(proof[proofIndex], nodeHash)
proofIndex++
} else if node < lastNode {
nodeHash = m.treeHasher.HashChildren(nodeHash, proof[proofIndex])
proofIndex++
} else {
// the sibling does not exist and the parent is a dummy copy; do nothing.
}
node = parent(node)
lastNode = parent(lastNode)
}
if proofIndex != len(proof) {
return nil, fmt.Errorf("invalid proof, expected %d components, but have %d", proofIndex, len(proof))
}
return nodeHash, nil
}
// VerifyConsistencyProof checks that the passed in consistency proof is valid between the passed in tree snapshots.
func (m MerkleVerifier) VerifyConsistencyProof(snapshot1, snapshot2 int64, root1, root2 []byte, proof [][]byte) error {
if snapshot1 > snapshot2 {
return fmt.Errorf("snapshot1 (%d) > snapshot2 (%d)", snapshot1, snapshot2)
}
if snapshot1 == snapshot2 {
if !bytes.Equal(root1, root2) {
return fmt.Errorf("root1:\n%v\ndoes not match root2:\n%v", root1, root2)
}
if len(proof) > 0 {
return fmt.Errorf("root1 and root2 match, but proof is non-empty")
}
// proof ok
return nil
}
if snapshot1 == 0 {
// Any snapshot greater than 0 is consistent with snapshot 0.
if len(proof) > 0 {
return fmt.Errorf("expected empty proof, but provided proof has %d components", len(proof))
}
return nil
}
if len(proof) == 0 {
return errors.New("empty proof")
}
node := snapshot1 - 1
lastNode := snapshot2 - 1
proofIndex := 0
for isRightChild(node) {
node = parent(node)
lastNode = parent(lastNode)
}
var node1Hash []byte
var node2Hash []byte
if node > 0 {
node1Hash = proof[proofIndex]
node2Hash = proof[proofIndex]
proofIndex++
} else {
// The tree at snapshot1 was balanced, nothing to verify for root1.
node1Hash = root1
node2Hash = root1
}
for node > 0 {
if proofIndex == len(proof) {
return errors.New("insufficient number of proof components")
}
if isRightChild(node) {
node1Hash = m.treeHasher.HashChildren(proof[proofIndex], node1Hash)
node2Hash = m.treeHasher.HashChildren(proof[proofIndex], node2Hash)
proofIndex++
} else if node < lastNode {
// The sibling only exists in the later tree. The parent in the snapshot1 tree is a dummy copy.
node2Hash = m.treeHasher.HashChildren(node2Hash, proof[proofIndex])
proofIndex++
} else {
// Else the sibling does not exist in either tree. Do nothing.
}
node = parent(node)
lastNode = parent(lastNode)
}
// Verify the first root.
if !bytes.Equal(node1Hash, root1) {
return fmt.Errorf("failed to verify root1:\n%v\ncalculated root of:\n%v\nfrom proof", root1, node1Hash)
}
for lastNode > 0 {
if proofIndex == len(proof) {
return errors.New("can't verify newer root; insufficient number of proof components")
}
node2Hash = m.treeHasher.HashChildren(node2Hash, proof[proofIndex])
proofIndex++
lastNode = parent(lastNode)
}
// Verify the second root.
if !bytes.Equal(node2Hash, root2) {
return fmt.Errorf("failed to verify root2:\n%v\ncalculated root of:\n%v\nfrom proof", root2, node2Hash)
}
if proofIndex != len(proof) {
return errors.New("proof has too many components")
}
// proof ok
return nil
}
func parent(leafIndex int64) int64 {
return leafIndex >> 1
}
func isRightChild(leafIndex int64) bool {
return leafIndex&1 == 1
} | vendor/github.com/cloudflare/cfssl/vendor/github.com/google/certificate-transparency/go/merkletree/merkle_verifier.go | 0.728941 | 0.572215 | merkle_verifier.go | starcoder |
package redisai
import (
"fmt"
"github.com/RedisAI/redisai-go/redisai/converters"
"github.com/gomodule/redigo/redis"
"reflect"
)
// TensorInterface is an interface that represents the skeleton of a tensor ( n-dimensional array of numerical data )
// needed to map it to a RedisAI Model with the proper operations
type TensorInterface interface {
// Shape returns the size - in each dimension - of the tensor.
Shape() []int64
SetShape(shape []int64)
// NumDims returns the number of dimensions of the tensor.
NumDims() int64
// Len returns the number of elements in the tensor.
Len() int64
Dtype() reflect.Type
// Data returns the underlying tensor data
Data() interface{}
SetData(interface{})
}
func TensorGetTypeStrFromType(dtype reflect.Type) (typestr string, err error) {
switch dtype {
case reflect.TypeOf(([]uint8)(nil)):
typestr = TypeUint8
case reflect.TypeOf(([]byte)(nil)):
typestr = TypeUint8
case reflect.TypeOf(([]int)(nil)):
typestr = TypeInt32
case reflect.TypeOf(([]int8)(nil)):
typestr = TypeInt8
case reflect.TypeOf(([]int16)(nil)):
typestr = TypeInt16
case reflect.TypeOf(([]int32)(nil)):
typestr = TypeInt32
case reflect.TypeOf(([]int64)(nil)):
typestr = TypeInt64
case reflect.TypeOf(([]uint)(nil)):
typestr = TypeUint8
case reflect.TypeOf(([]uint16)(nil)):
typestr = TypeUint16
case reflect.TypeOf(([]float32)(nil)):
typestr = TypeFloat32
case reflect.TypeOf(([]float64)(nil)):
typestr = TypeFloat64
case reflect.TypeOf(([]uint32)(nil)):
fallthrough
// unsupported data type
case reflect.TypeOf(([]uint64)(nil)):
fallthrough
// unsupported data type
default:
err = fmt.Errorf("redisai Tensor does not support the following type %v", dtype)
}
return
}
func tensorSetFlatArgs(name string, dt string, dims []int64, data interface{}) (redis.Args, error) {
args := redis.Args{}
var err error = nil
args = args.Add(name, dt).AddFlat(dims)
if data != nil {
var dtype = reflect.TypeOf(data)
switch dtype {
case reflect.TypeOf(([]uint8)(nil)):
fallthrough
case reflect.TypeOf(([]byte)(nil)):
args = args.Add("BLOB", data)
case reflect.TypeOf(""):
fallthrough
case reflect.TypeOf(([]int)(nil)):
fallthrough
case reflect.TypeOf(([]int8)(nil)):
fallthrough
case reflect.TypeOf(([]int16)(nil)):
fallthrough
case reflect.TypeOf(([]int32)(nil)):
fallthrough
case reflect.TypeOf(([]int64)(nil)):
fallthrough
case reflect.TypeOf(([]uint)(nil)):
fallthrough
case reflect.TypeOf(([]uint16)(nil)):
fallthrough
case reflect.TypeOf(([]float32)(nil)):
fallthrough
case reflect.TypeOf(([]float64)(nil)):
args = args.Add("VALUES").AddFlat(data)
// unsupported data type
case reflect.TypeOf(([]uint32)(nil)):
fallthrough
// unsupported data type
case reflect.TypeOf(([]uint64)(nil)):
fallthrough
// unsupported data type
default:
err = fmt.Errorf("redisai.tensorSetFlatArgs: AI.TENSOR does not support the following type %v", reflect.TypeOf(data))
}
}
return args, err
}
func tensorSetInterfaceArgs(keyName string, tensorInterface TensorInterface) (args redis.Args, err error) {
typestr, err := TensorGetTypeStrFromType(tensorInterface.Dtype())
if err != nil {
return
}
return tensorSetFlatArgs(keyName, typestr, tensorInterface.Shape(), tensorInterface.Data())
}
func tensorGetParseToInterface(reply interface{}, tensor TensorInterface) (err error) {
err, _, shape, data := ProcessTensorGetReply(reply, err)
tensor.SetShape(shape)
tensor.SetData(data)
return
}
func ProcessTensorReplyValues(dtype string, reply interface{}) (data interface{}, err error) {
switch dtype {
case TypeFloat:
data, err = converters.Float32s(reply, err)
case TypeDouble:
data, err = redis.Float64s(reply, err)
case TypeInt8:
data, err = converters.Int8s(reply, err)
case TypeInt16:
data, err = converters.Int16s(reply, err)
case TypeInt32:
data, err = redis.Ints(reply, err)
case TypeInt64:
data, err = redis.Int64s(reply, err)
case TypeUint8:
data, err = converters.Uint8s(reply, err)
case TypeUint16:
data, err = converters.Uint16s(reply, err)
}
return data, err
}
func ProcessTensorGetReply(reply interface{}, errIn error) (err error, dtype string, shape []int64, data interface{}) {
var replySlice []interface{}
var key string
err = errIn
replySlice, err = redis.Values(reply, err)
if err != nil {
return
}
for pos := 0; pos < len(replySlice); pos += 2 {
key, err = redis.String(replySlice[pos], err)
if err != nil {
return
}
switch key {
case "dtype":
dtype, err = redis.String(replySlice[pos+1], err)
if err != nil {
return
}
case "shape":
shape, err = redis.Int64s(replySlice[pos+1], err)
if err != nil {
return
}
case "blob":
data, err = redis.Bytes(replySlice[pos+1], err)
if err != nil {
return
}
case "values":
data, err = ProcessTensorReplyValues(dtype, replySlice[pos+1])
}
}
return
} | redisai/tensor.go | 0.52975 | 0.601828 | tensor.go | starcoder |
package rog
// FOVAlgo takes a FOVMap x,y vantage, radius of the view, whether to include walls and then marks in the map which cells are viewable.
type FOVAlgo func(*Map, int, int, int, bool)
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// Circular Raycasting
func fovCircularCastRay(fov *Map, xo, yo, xd, yd, r2 int, walls bool) {
curx := xo
cury := yo
in := false
blocked := false
if fov.In(curx, cury) {
in = true
fov.seen[cury][curx] = true
}
for _, p := range Line(xo, yo, xd, yd) {
curx = p.X
cury = p.Y
if r2 > 0 {
curRadius := (curx-xo)*(curx-xo) + (cury-yo)*(cury-yo)
if curRadius > r2 {
break
}
}
if fov.In(curx, cury) {
in = true
if !blocked && fov.blocked[cury][curx] {
blocked = true
} else if blocked {
break
}
if walls || !blocked {
fov.seen[cury][curx] = true
}
} else if in {
break
}
}
}
func fovCircularPostProc(fov *Map, x0, y0, x1, y1, dx, dy int) {
for cx := x0; cx <= x1; cx++ {
for cy := y0; cy <= y1; cy++ {
x2 := cx + dx
y2 := cy + dy
if fov.In(cx, cy) && fov.Look(cx, cy) && !fov.blocked[cy][cx] {
if x2 >= x0 && x2 <= x1 {
if fov.In(x2, cy) && fov.blocked[cy][x2] {
fov.seen[cy][x2] = true
}
}
if y2 >= y0 && y2 <= y1 {
if fov.In(cx, y2) && fov.blocked[y2][cx] {
fov.seen[y2][cx] = true
}
}
if x2 >= x0 && x2 <= x1 && y2 >= y0 && y2 <= y1 {
if fov.In(x2, y2) && fov.blocked[y2][x2] {
fov.seen[y2][x2] = true
}
}
}
}
}
}
// FOVCicular raycasts out from the vantage in a circle.
func FOVCircular(fov *Map, x, y, r int, walls bool) {
xo := 0
yo := 0
xmin := 0
ymin := 0
xmax := fov.Width()
ymax := fov.Height()
r2 := r * r
if r > 0 {
xmin = max(0, x-r)
ymin = max(0, y-r)
xmax = min(fov.Width(), x+r+1)
ymax = min(fov.Height(), y+r+1)
}
xo = xmin
yo = ymin
for xo < xmax {
fovCircularCastRay(fov, x, y, xo, yo, r2, walls)
xo++
}
xo = xmax - 1
yo = ymin + 1
for yo < ymax {
fovCircularCastRay(fov, x, y, xo, yo, r2, walls)
yo++
}
xo = xmax - 2
yo = ymax - 1
for xo >= 0 {
fovCircularCastRay(fov, x, y, xo, yo, r2, walls)
xo--
}
xo = xmin
yo = ymax - 2
for yo > 0 {
fovCircularCastRay(fov, x, y, xo, yo, r2, walls)
yo--
}
if walls {
fovCircularPostProc(fov, xmin, ymin, x, y, -1, -1)
fovCircularPostProc(fov, x, ymin, xmax-1, y, 1, -1)
fovCircularPostProc(fov, xmin, y, x, ymax-1, -1, 1)
fovCircularPostProc(fov, x, y, xmax-1, ymax-1, 1, 1)
}
} | fov.go | 0.592549 | 0.572125 | fov.go | starcoder |
// Quicksort is a divide and conquer algorithm. Quicksort first divides a large array into two smaller sub-arrays: the
// low elements and the high elements. Quicksort can then recursively sort the sub-arrays.
// The steps are:
// Pick an element, called a pivot, from the array.
// Partitioning: reorder the array so that all elements with values less than the pivot come before the pivot, while
// all elements with values greater than the pivot come after it (equal values can go either way). After this
// partitioning, the pivot is in its final position. This is called the partition operation.
// Recursively apply the above steps to the sub-array of elements with smaller values and separately to the sub-array
// of elements with greater values.
// The base case of the recursion is arrays of size zero or one, which never need to be sorted.
// The pivot selection and partitioning steps can be done in several different ways; the choice of specific
// implementation schemes greatly affects the algorithm's performance.
// source : https://en.wikipedia.org/wiki/Quicksort
package main
import (
"fmt"
"math/rand"
"time"
)
// swap initiates the swaps when comparing two elements in an array
func swap(arr []int, i int, j int) {
arr[i], arr[j] = arr[j], arr[i]
}
// quickSort takes in a slice and using a pivot point recursively sorts the data and returns a slice
func quickSort(arr []int, left int, right int) []int {
var pivot int
var parIndex int
if left < right {
pivot = right
parIndex = left
for i := left; i < right; i++ {
if arr[i] < arr[pivot] {
swap(arr, i, parIndex)
parIndex++
}
}
swap(arr, parIndex, pivot)
quickSort(arr, left, parIndex-1)
quickSort(arr, parIndex+1, right)
}
return arr
}
// QuickSort implements the stated formula
func QuickSort(arr []int) []int {
return quickSort(arr, 0, len(arr)-1)
}
func main() {
a := time.Now()
var arr [100]int
var slice = arr[:]
// set up random
source := rand.NewSource(time.Now().UnixNano())
random := rand.New(source)
// initialize an array with random numbers from 0 to 10000000
for index := range arr {
arr[index] = random.Intn(10000000)
}
// output before sort
fmt.Printf("Before: %v\n", arr)
b := time.Now()
// sorting
result := QuickSort(slice)
// output after sort
fmt.Printf("After: %v\n", result)
fmt.Printf("Time for sort to complete: %v\n", time.Since(b))
fmt.Printf("Time overall: %v\n", time.Since(a))
} | quick-sort/quickSort.go | 0.752649 | 0.752149 | quickSort.go | starcoder |
package iso20022
// Structured information supplied to enable the matching, ie, reconciliation, of a payment with the items that the payment is intended to settle, eg, commercial invoices in an accounts receivable system.
type StructuredRemittanceInformation2 struct {
// Specifies the nature of the referred document/transaction, eg, invoice or credit note.
ReferredDocumentType *DocumentType1Code `xml:"RfrdDocTp,omitempty"`
// Date associated with the referred document, eg, date of issue.
ReferredDocumentRelatedDate *ISODate `xml:"RfrdDocRltdDt,omitempty"`
// Amount of money and currency of a document referred to in the remittance section. The amount is typically either the original amount due and payable, or the amount actually remitted for the referred document.
ReferredDocumentAmount []*ReferredDocumentAmount1Choice `xml:"RfrdDocAmt,omitempty"`
// Unique and unambiguous identification of a document that distinguishes that document from another document referred to in the remittance information, usually assigned by the originator of the referred document/transaction.
DocumentReferenceNumber *Max35Text `xml:"DocRefNb,omitempty"`
// Unique and unambiguous reference assigned by the creditor to refer to the payment transaction.
//
// Usage: if available, the initiating party should provide this reference in the structured remittance information, to enable reconciliation by the creditor upon receipt of the cash.
//
// If the business context requires the use of a creditor reference or a payment remit identification, and only one identifier can be passed through the end-to-end chain, the creditor's reference or payment remittance identification should be quoted in the end-to-end transaction identification.
CreditorReference *Max35Text `xml:"CdtrRef,omitempty"`
// Identification of the organization issuing the invoice when different than the creditor or final party
Invoicer *PartyIdentification1 `xml:"Invcr,omitempty"`
// Identification of the party to whom an invoice is issued, when different than the originator or debtor.
Invoicee *PartyIdentification1 `xml:"Invcee,omitempty"`
}
func (s *StructuredRemittanceInformation2) SetReferredDocumentType(value string) {
s.ReferredDocumentType = (*DocumentType1Code)(&value)
}
func (s *StructuredRemittanceInformation2) SetReferredDocumentRelatedDate(value string) {
s.ReferredDocumentRelatedDate = (*ISODate)(&value)
}
func (s *StructuredRemittanceInformation2) AddReferredDocumentAmount() *ReferredDocumentAmount1Choice {
newValue := new (ReferredDocumentAmount1Choice)
s.ReferredDocumentAmount = append(s.ReferredDocumentAmount, newValue)
return newValue
}
func (s *StructuredRemittanceInformation2) SetDocumentReferenceNumber(value string) {
s.DocumentReferenceNumber = (*Max35Text)(&value)
}
func (s *StructuredRemittanceInformation2) SetCreditorReference(value string) {
s.CreditorReference = (*Max35Text)(&value)
}
func (s *StructuredRemittanceInformation2) AddInvoicer() *PartyIdentification1 {
s.Invoicer = new(PartyIdentification1)
return s.Invoicer
}
func (s *StructuredRemittanceInformation2) AddInvoicee() *PartyIdentification1 {
s.Invoicee = new(PartyIdentification1)
return s.Invoicee
} | StructuredRemittanceInformation2.go | 0.748444 | 0.424531 | StructuredRemittanceInformation2.go | starcoder |
package bruteForce
import (
"image/color"
compgeo "github.com/200sc/go-compgeo"
"github.com/200sc/go-compgeo/dcel"
"github.com/200sc/go-compgeo/dcel/pointLoc"
"github.com/200sc/go-compgeo/dcel/pointLoc/visualize"
"github.com/200sc/go-compgeo/geom"
)
// PlumbLine method is a name for a linear PIP check that
// shoots a ray out and checks how many times that ray intersects
// a polygon. The variation on a DCEL will iteratively perform
// plumb line on each face of the DCEL.
func PlumbLine(dc *dcel.DCEL) pointLoc.LocatesPoints {
return &Iterator{dc}
}
// Iterator is a simple dcel wrapper for the following pointLocate method
type Iterator struct {
*dcel.DCEL
}
// PointLocate on an iterator performs plumb line on each
// of a DCEL's faces in order.
func (i *Iterator) PointLocate(vs ...float64) (*dcel.Face, error) {
if len(vs) < 2 {
return nil, compgeo.InsufficientDimensionsError{}
}
p := geom.NewPoint(vs[0], vs[1], 0)
containFn := contains
if visualize.VisualCh != nil {
containFn = VisualizeContains
}
for j := 1; j < len(i.Faces); j++ {
f := i.Faces[j]
if containFn(f, p) {
return f, nil
}
}
return nil, nil
}
func contains(f *dcel.Face, p geom.D2) bool {
return f.Contains(p)
}
// VisualizeContains returns whether a point lies inside f.
// We cannot assume that f is convex, or anything
// besides some polygon. That leaves us with a rather
// complex form of PIP--
// It also sends visualization singals while doing this.
func VisualizeContains(f *dcel.Face, p geom.D2) bool {
x := p.X()
y := p.Y()
contains := false
bounds := f.Bounds()
min := bounds.At(0).(geom.D2)
max := bounds.At(1).(geom.D2)
visualize.HighlightColor = color.RGBA{0, 0, 255, 255}
visualize.DrawFace(f)
if x < min.Val(0) || x > max.Val(0) ||
y < min.Val(1) || y > max.Val(1) {
return contains
}
e1 := f.Outer.Prev
e2 := f.Outer
for {
visualize.HighlightColor = color.RGBA{0, 0, 255, 255}
visualize.DrawLine(e2.Origin, e1.Origin)
if (e2.Y() > y) != (e1.Y() > y) {
if x < (e1.X()-e2.X())*(y-e2.Y())/(e1.Y()-e2.Y())+e2.X() {
visualize.HighlightColor = color.RGBA{0, 255, 0, 255}
visualize.DrawLine(e2.Origin, e1.Origin)
contains = !contains
}
}
e1 = e1.Next
e2 = e2.Next
if e1 == f.Outer.Prev {
break
}
}
return contains
} | dcel/pointLoc/bruteForce/plumbline.go | 0.809238 | 0.420719 | plumbline.go | starcoder |
package bloom
import (
"fmt"
"math"
"github.com/damnever/bitarray"
"github.com/spaolacci/murmur3"
)
// Bloom interface encapsulates our useful features
type Bloom interface {
Add(item string) error
Check(item string) (bool, error)
}
type bloom struct {
// Public Vars
Capacity int
ErrorRate float64
// Private vars
numHashes int
hashFuncs []murmur3.Hash128
bitArraySize int
bitArray *bitarray.BitArray
}
// New creates an instance of the bloom filter
func New(capacity int, errorRate float64) (bloom, error) {
/*
Initialize the bloom filter here, need to do a few things
- Initialize the hash functions
- Create the hash array
- hopefully not die
*/
var b bloom
// Preconditions:
if capacity == 0 {
return b, fmt.Errorf("capacity cannot be zero")
}
if !(errorRate > 0) || !(errorRate < 100) {
return b, fmt.Errorf("probability must be greater than 0 and less than 100")
}
// Initialize the hash functions
baSize := optimalBitArraySize(float64(capacity), errorRate)
nHashes := optimalHashFunk(baSize, capacity)
hashers := make([]murmur3.Hash128, 0)
for i := 0; i < nHashes; i++ {
hashers = append(hashers, murmur3.New128WithSeed(uint32(capacity)))
}
b = bloom{
Capacity: capacity,
ErrorRate: errorRate,
hashFuncs: hashers,
bitArray: bitarray.New(baSize),
bitArraySize: baSize,
numHashes: nHashes,
}
fmt.Printf("created with array size: %v, using %v hash functions.\n", b.bitArraySize, b.numHashes)
return b, nil
}
func optimalBitArraySize(n, p float64) int {
// calculate our bit array size from the probability p and
m := -(n * math.Log(p)) / (math.Pow(math.Log(2), 2))
return int(m)
}
func optimalHashFunk(m, n int) int {
floatM := float64(m)
floatN := float64(n)
numFuncs := (floatM / floatN) * math.Log(2)
return int(numFuncs)
}
func (b bloom) Add(item string) error {
for i := 0; i < b.numHashes; i++ {
// Get the hash value
_, err := b.hashFuncs[i].Write([]byte(item))
if err != nil {
return fmt.Errorf("failed to hash the item, %v", err)
}
indexPre, _ := b.hashFuncs[i].Sum128()
// fmt.Printf("indexPre: %v\n", indexPre)
index := indexPre % uint64(b.bitArraySize)
// fmt.Printf("index: %v\n", index)
// Insert to bit array
_, err = b.bitArray.Put(int(index), 1)
if err != nil {
return fmt.Errorf("failed to put bitarray item: %v", index)
}
b.hashFuncs[i].Reset()
}
return nil
}
// Check answers back whether the item is definitely not in the set (true) or might be in the set (false)
func (b bloom) Check(item string) (bool, error) {
for i := 0; i < b.numHashes; i++ {
// Get the hash value
_, err := b.hashFuncs[i].Write([]byte(item))
if err != nil {
return false, fmt.Errorf("failed to hash the item, %v", err)
}
indexPre, _ := b.hashFuncs[i].Sum128()
// fmt.Printf("indexPre: %v\n", indexPre)
index := indexPre % uint64(b.bitArraySize)
// fmt.Printf("index: %v\n", index)
// Check for existence
res, err := b.bitArray.Get(int(index))
// fmt.Printf("res: %v\n", res)
if err != nil {
return false, fmt.Errorf("failed to check index, %v", err)
}
if res == 1 {
return true, nil
}
b.hashFuncs[i].Reset()
}
return false, nil
} | pkg/bloom/bloom.go | 0.643441 | 0.439687 | bloom.go | starcoder |
package strategy
import (
"github.com/zimmski/tavor/log"
"github.com/zimmski/tavor/rand"
"github.com/zimmski/tavor/token"
"github.com/zimmski/tavor/token/sequences"
)
// RandomStrategy implements a fuzzing strategy that generates a random permutation of a token graph.
// The strategy does exactly one iteration which permutates at random all reachable tokens in the graph. The determinism is dependent on the random generator and is therefore for example deterministic if a seed for the random generator produces always the same outputs.
type RandomStrategy struct {
root token.Token
}
// NewRandomStrategy returns a new instance of the random fuzzing strategy
func NewRandomStrategy(tok token.Token) *RandomStrategy {
return &RandomStrategy{
root: tok,
}
}
func init() {
Register("random", func(tok token.Token) Strategy {
return NewRandomStrategy(tok)
})
}
// Fuzz starts the first iteration of the fuzzing strategy returning a channel which controls the iteration flow.
// The channel returns a value if the iteration is complete and waits with calculating the next iteration until a value is put in. The channel is automatically closed when there are no more iterations. The error return argument is not nil if an error occurs during the setup of the fuzzing strategy.
func (s *RandomStrategy) Fuzz(r rand.Rand) (chan struct{}, error) {
if token.LoopExists(s.root) {
return nil, &Error{
Message: "found endless loop in graph. Cannot proceed.",
Type: ErrorEndlessLoopDetected,
}
}
continueFuzzing := make(chan struct{})
go func() {
log.Debug("start random fuzzing routine")
s.fuzz(s.root, r, token.NewVariableScope())
s.fuzzYADDA(s.root, r)
log.Debug("done with fuzzing step")
// done with the last fuzzing step
continueFuzzing <- struct{}{}
log.Debug("finished fuzzing. Wait till the outside is ready to close.")
if _, ok := <-continueFuzzing; ok {
log.Debug("close fuzzing channel")
close(continueFuzzing)
}
}()
return continueFuzzing, nil
}
func (s *RandomStrategy) fuzz(tok token.Token, r rand.Rand, variableScope *token.VariableScope) {
log.Debugf("Fuzz (%p)%#v with maxPermutations %d", tok, tok, tok.Permutations())
if t, ok := tok.(token.Scoping); ok && t.Scoping() {
variableScope = variableScope.Push()
}
err := tok.Permutation(uint(r.Int63n(int64(tok.Permutations())) + 1))
if err != nil {
log.Panic(err)
}
if t, ok := tok.(token.Follow); !ok || t.Follow() {
switch t := tok.(type) {
case token.ForwardToken:
if v := t.Get(); v != nil {
s.fuzz(v, r, variableScope)
}
case token.ListToken:
l := t.Len()
for i := 0; i < l; i++ {
c, _ := t.Get(i)
s.fuzz(c, r, variableScope)
}
}
}
if t, ok := tok.(token.Scoping); ok && t.Scoping() {
variableScope = variableScope.Pop()
}
}
func (s *RandomStrategy) fuzzYADDA(root token.Token, r rand.Rand) {
// TODO FIXME AND FIXME FIXME FIXME this should be done automatically somehow
// since this doesn't work in other heuristics...
// especially the fuzz again part is tricky. the whole reason is because of dynamic repeats that clone during a reset. so the "reset" or regenerating of new child tokens has to be done better
token.ResetCombinedScope(root)
token.ResetResetTokens(root)
token.ResetCombinedScope(root)
err := token.Walk(root, func(tok token.Token) error {
switch tok.(type) {
case *sequences.SequenceExistingItem:
log.Debugf("Fuzz again %p(%#v)", tok, tok)
err := tok.Permutation(uint(r.Int63n(int64(tok.Permutations())) + 1))
if err != nil {
log.Panic(err)
}
}
return nil
})
if err != nil {
panic(err)
}
} | fuzz/strategy/random.go | 0.596316 | 0.40116 | random.go | starcoder |
package srg
import (
"github.com/serulian/compiler/compilergraph"
"github.com/serulian/compiler/sourceshape"
)
// SRGImplementableIterator is an iterator of SRGImplementable's.
type SRGImplementableIterator struct {
nodeIterator compilergraph.NodeIterator
srg *SRG // The parent SRG.
}
func (sii SRGImplementableIterator) Next() bool {
return sii.nodeIterator.Next()
}
func (sii SRGImplementableIterator) Implementable() SRGImplementable {
return SRGImplementable{sii.nodeIterator.Node(), sii.srg}
}
// SRGImplementable wraps a node that can have a body.
type SRGImplementable struct {
compilergraph.GraphNode
srg *SRG // The parent SRG.
}
// Body returns the statement block forming the implementation body for
// this implementable, if any.
func (m SRGImplementable) Body() (compilergraph.GraphNode, bool) {
return m.TryGetNode(sourceshape.NodePredicateBody)
}
// Name returns the name of the implementable, if any.
func (m SRGImplementable) Name() (string, bool) {
if m.IsMember() {
return m.ContainingMember().Name()
}
return "", false
}
// Parameters returns the parameters defined on this implementable, if any.
func (m SRGImplementable) Parameters() []SRGParameter {
// If this is a member, return its parameters.
if m.IsMember() {
return m.ContainingMember().Parameters()
}
// Otherwise, check for a function lambda (of either kind) and return its
// parameters.
switch m.GraphNode.Kind() {
case sourceshape.NodeTypeLambdaExpression:
var parameters = make([]SRGParameter, 0)
pit := m.GraphNode.StartQuery().
Out(sourceshape.NodeLambdaExpressionParameter, sourceshape.NodeLambdaExpressionInferredParameter).
BuildNodeIterator()
for pit.Next() {
parameters = append(parameters, SRGParameter{pit.Node(), m.srg})
}
return parameters
default:
return make([]SRGParameter, 0)
}
}
// Node returns the underlying node for this implementable in the SRG.
func (m SRGImplementable) Node() compilergraph.GraphNode {
return m.GraphNode
}
// ContainingMember returns the containing member of this implementable. If the node is,
// itself, a member, itself is returned.
func (m SRGImplementable) ContainingMember() SRGMember {
if m.IsMember() {
return SRGMember{m.GraphNode, m.srg}
}
if parentProp, found := m.GraphNode.TryGetIncomingNode(sourceshape.NodePropertyGetter); found {
return SRGMember{parentProp, m.srg}
}
if parentProp, found := m.GraphNode.TryGetIncomingNode(sourceshape.NodePropertySetter); found {
return SRGMember{parentProp, m.srg}
}
panic("No containing member found")
}
// IsPropertySetter returns true if this implementable is a property setter.
func (m SRGImplementable) IsPropertySetter() bool {
containingMember := m.ContainingMember()
if containingMember.MemberKind() != PropertyMember {
return false
}
setter, found := containingMember.Setter()
if !found {
return false
}
return m.GraphNode.NodeId == setter.NodeId
}
// IsMember returns true if this implementable is an SRGMember.
func (m SRGImplementable) IsMember() bool {
switch m.GraphNode.Kind() {
case sourceshape.NodeTypeConstructor:
fallthrough
case sourceshape.NodeTypeFunction:
fallthrough
case sourceshape.NodeTypeProperty:
fallthrough
case sourceshape.NodeTypeOperator:
fallthrough
case sourceshape.NodeTypeField:
fallthrough
case sourceshape.NodeTypeVariable:
return true
default:
return false
}
}
// AsImplementable returns the given node as an SRGImplementable (if applicable).
func (g *SRG) AsImplementable(node compilergraph.GraphNode) (SRGImplementable, bool) {
switch node.Kind() {
case sourceshape.NodeTypeConstructor:
fallthrough
case sourceshape.NodeTypeFunction:
fallthrough
case sourceshape.NodeTypeProperty:
fallthrough
case sourceshape.NodeTypeOperator:
fallthrough
case sourceshape.NodeTypeField:
fallthrough
case sourceshape.NodeTypeVariable:
fallthrough
case sourceshape.NodeTypePropertyBlock:
return SRGImplementable{node, g}, true
default:
return SRGImplementable{}, false
}
} | graphs/srg/implementable.go | 0.756178 | 0.504455 | implementable.go | starcoder |
package mario
type AllOne struct {
list *frequencyList // freq list
freqNodeMap map[int]*frequencyNode // freq - freq node
keyNodeMap map[string]*keysNode // key - key node
}
func Constructor() AllOne {
return AllOne{
list: newFrequencyList(),
freqNodeMap: make(map[int]*frequencyNode),
keyNodeMap: make(map[string]*keysNode),
}
}
func (a *AllOne) Inc(key string) {
prevFreqNode := a.list.head
keyNode, ok := a.keyNodeMap[key]
if !ok {
keyNode = &keysNode{
key: key,
freq: 1,
}
a.keyNodeMap[key] = keyNode
} else {
currFreqNode, _ := a.freqNodeMap[keyNode.freq] // we consider that if keyNode is exist, it's freqNode will exist
prevFreqNode = currFreqNode
// rm keyNode from currFreqNode and maintain currFreqNode
removeKeyNode(keyNode)
if currFreqNode.keyList.isEmpty() {
prevFreqNode = prevFreqNode.prev
removeFreqNode(currFreqNode)
delete(a.freqNodeMap, keyNode.freq)
}
keyNode.freq++
}
// add keyNode to freqNode:freq=keyNode.freq.keyList
nextFreqNode, ok := a.freqNodeMap[keyNode.freq]
if !ok {
nextFreqNode = &frequencyNode{
freq: keyNode.freq,
keyList: newKeysList(),
}
a.freqNodeMap[keyNode.freq] = nextFreqNode
addAfterFreqNode(prevFreqNode, nextFreqNode)
}
nextFreqNode.keyList.add(keyNode)
}
func (a *AllOne) Dec(key string) {
keyNode, ok := a.keyNodeMap[key]
if !ok {
return
}
currFreqNode, _ := a.freqNodeMap[keyNode.freq]
prevFreqNode := currFreqNode.prev
// rm keyNode from currFreqNode and maintain currFreqNode
removeKeyNode(keyNode)
if currFreqNode.keyList.isEmpty() {
removeFreqNode(currFreqNode)
delete(a.freqNodeMap, currFreqNode.freq)
}
// freq = 1, del keyNode
if keyNode.freq <= 1 {
delete(a.keyNodeMap, key)
return
}
// freq > 1, add keyNode to freqNode:freq=keyNode.freq-1.keyList
keyNode.freq--
// make sure freqNode:freq=keyNode.freq-1 is exist
freqNodeSubOne, ok := a.freqNodeMap[keyNode.freq]
if !ok {
freqNodeSubOne = &frequencyNode{
freq: keyNode.freq,
keyList: newKeysList(),
}
a.freqNodeMap[keyNode.freq] = freqNodeSubOne
addAfterFreqNode(prevFreqNode, freqNodeSubOne)
}
freqNodeSubOne.keyList.add(keyNode)
}
func (a *AllOne) GetMaxKey() string {
if a.list.isEmpty() {
return ""
}
return a.list.tail.prev.keyList.head.next.key
}
func (a *AllOne) GetMinKey() string {
if a.list.isEmpty() {
return ""
}
return a.list.head.next.keyList.head.next.key
} | solutions/1-1000/401-500/431-440/432/main.go | 0.513912 | 0.403743 | main.go | starcoder |
package strdist
import (
"strings"
"unicode/utf8"
)
// DfltHammingFinder is a HammingFinder with some suitable default values
// already set.
var DfltHammingFinder *Finder
// CaseBlindHammingFinder is a HammingFinder with some suitable default
// values already set.
var CaseBlindHammingFinder *Finder
func init() {
var err error
DfltHammingFinder, err =
NewHammingFinder(DfltMinStrLen, DfltHammingThreshold, NoCaseChange)
if err != nil {
panic("Cannot construct the default HammingFinder: " + err.Error())
}
CaseBlindHammingFinder, err =
NewHammingFinder(DfltMinStrLen, DfltHammingThreshold, ForceToLower)
if err != nil {
panic("Cannot construct the case-blind HammingFinder: " +
err.Error())
}
}
// DfltHammingThreshold is a default value for deciding whether a distance
// between two strings is sufficiently small for them to be considered
// similar
const DfltHammingThreshold = 5.0
// HammingAlgo encapsulates the details needed to provide the Hamming distance.
type HammingAlgo struct {
s string
}
// NewHammingFinder returns a new Finder having a Hamming algo and an
// error which will be non-nil if the parameters are invalid - see
// NewFinder for details.
func NewHammingFinder(minStrLen int, threshold float64, cm CaseMod) (*Finder, error) {
return NewFinder(minStrLen, threshold, cm,
&HammingAlgo{})
}
// Prep for a HammingAlgo will pre-calculate the lower-case equivalent for
// the target string if the caseMod is set to ForceToLower
func (a *HammingAlgo) Prep(s string, cm CaseMod) {
if cm == ForceToLower {
a.s = strings.ToLower(s)
return
}
a.s = s
}
// Dist for a HammingAlgo will calculate the Hamming distance between the two
// strings
func (a *HammingAlgo) Dist(_, s string, cm CaseMod) float64 {
if cm == ForceToLower {
return HammingDistance(a.s, strings.ToLower(s))
}
return HammingDistance(a.s, s)
}
// HammingDistance returns the Hamming distance of the two strings. if the
// two strings are of different length then the Hamming distance is increased
// by the difference in lengths. Note that it compares runes rather than
// characters or chars
func HammingDistance(a, b string) float64 {
var d = utf8.RuneCountInString(b) - utf8.RuneCountInString(a)
if d < 0 {
d *= -1
a, b = b, a // a is longer than b so swap
}
var offset int
for _, aRune := range a {
bRune, width := utf8.DecodeRuneInString(b[offset:])
offset += width
if bRune != aRune {
d++
}
}
return float64(d)
} | strdist/hamming.go | 0.659295 | 0.40251 | hamming.go | starcoder |
package gonpy
//go:generate go run gen.go defs.template
import (
"encoding/binary"
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
)
// NpyReader can read data from a Numpy binary array into a Go slice.
type NpyReader struct {
// The numpy data type of the array
Dtype string
// The endianness of the binary data
Endian binary.ByteOrder
// The version number of the file format
Version int
// The shape of the array as specified in the file.
Shape []int
// If true, the data are flattened in column-major order,
// otherwise they are flattened in row-major order.
ColumnMajor bool
// Read the data from this source
r io.Reader
// Number of elements in the array to be read (obtained from
// header).
nElt int
}
// NewFileReader returns a NpyReader that can be used to obtain array
// data from the given named file. Call one of the GetXXX methods to
// obtain the data as a Go slice.
func NewFileReader(f string) (*NpyReader, error) {
fid, err := os.Open(f)
if err != nil {
return nil, err
}
r, err := NewReader(fid)
return r, err
}
// Parse the shape string in the file header.
func parseShape(header []byte) ([]int, int, error) {
re := regexp.MustCompile(`'shape':\s*\(([^\(]*)\)`)
ma := re.FindSubmatch(header)
if ma == nil {
return nil, 0, fmt.Errorf("Shape not found in header.\n")
}
shapes := string(ma[1])
shape := make([]int, 0)
nElt := 1
for _, s := range strings.Split(shapes, ",") {
s = strings.Trim(s, " ")
if len(s) == 0 {
break
}
x, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
nElt *= x
shape = append(shape, x)
}
return shape, nElt, nil
}
// NewReader returns a NpyReader that can be used to obtain array data
// as a Go slice. The Go slice has a type matching the dtype in the
// Numpy file. Call one of the GetXX methods to obtain the slice.
func NewReader(r io.Reader) (*NpyReader, error) {
// Check the magic number
b := make([]byte, 6)
n, err := r.Read(b)
if err != nil {
return nil, err
} else if n != 6 {
return nil, fmt.Errorf("Input appears to be truncated")
} else if string(b) != "\x93NUMPY" {
return nil, fmt.Errorf("Not npy format data (wrong magic number)")
}
// Get the major version number
var version uint8
err = binary.Read(r, binary.LittleEndian, &version)
if err != nil {
return nil, err
}
if version != 1 && version != 2 {
return nil, fmt.Errorf("Invalid version number %d", version)
}
// Check the minor version number
var minor uint8
err = binary.Read(r, binary.LittleEndian, &minor)
if err != nil {
return nil, err
}
if minor != 0 {
return nil, fmt.Errorf("Invalid minor version number %d", version)
}
// Get the size in bytes of the header
var headerLength int
if version == 1 {
var hl uint16
err = binary.Read(r, binary.LittleEndian, &hl)
headerLength = int(hl)
} else {
var hl uint32
err = binary.Read(r, binary.LittleEndian, &hl)
headerLength = int(hl)
}
if err != nil {
return nil, err
}
// Read the header
header := make([]byte, headerLength)
_, err = r.Read(header)
if err != nil {
return nil, err
}
// Get the dtype
re := regexp.MustCompile(`'descr':\s*'([^']*)'`)
ma := re.FindSubmatch(header)
if ma == nil {
return nil, fmt.Errorf("dtype description not found in header")
}
dtype := string(ma[1])
// Get the order information
re = regexp.MustCompile(`'fortran_order':\s*(False|True)`)
ma = re.FindSubmatch(header)
if ma == nil {
return nil, fmt.Errorf("fortran_order not found in header")
}
fortranOrder := string(ma[1])
// Get the shape information
shape, nElt, err := parseShape(header)
if err != nil {
return nil, err
}
var endian binary.ByteOrder = binary.LittleEndian
if strings.HasPrefix(dtype, ">") {
endian = binary.BigEndian
}
rdr := &NpyReader{
Dtype: dtype[1:],
ColumnMajor: fortranOrder == "True",
Shape: shape,
Endian: endian,
Version: int(version),
nElt: nElt,
r: r,
}
return rdr, nil
} | reader.go | 0.663669 | 0.441131 | reader.go | starcoder |
package cipherio;
// A reader specifically for encrypted data.
// This will wrap another reader that is expected to deliver encrypted content.
// This will buffer any content necessary to get enough data to decrypt chunks.
import (
"crypto/cipher"
"io"
"syscall"
"github.com/pkg/errors"
"github.com/eriq-augustine/elfs/util"
)
// A ReadSeekCloser that will read an encrypted file, decrypt them, and return the cleartext
// all in chunks of size IO_BLOCK_SIZE.
// Note that the cleartext will be in chunks of IO_BLOCK_SIZE,
// but the cipertext read will be slightly larger.
type CipherReader struct {
gcm cipher.AEAD
ciphertextBuffer []byte
// We will always read from disk in chunks of IO_BLOCK_SIZE (+ cipher overhead).
// So, we will need to keep a buffer on hand of what we have read from disk that the reader has not
// yet requested.
cleartextBuffer []byte
// We need to keep the original slice around so we can resize without reallocating.
// We will be reslicing the cleartextBuffer as the cleartext is requested.
originalCleartextBuffer []byte
// Since we may seek, we need to keep around the original IV.
iv []byte
originalIV []byte
reader util.ReadSeekCloser
// These are NOT offsets into the respective buffers,
// there are absolute offsets into the cipher/clear text files.
// Note that one cannot always be translated into the other using the bocksize and cipher overhead.
// This is because the ciphertextOffset is the offset into the file on disk.
// The cleartextOffset, however, is the offset into the bytes we have passed the user.
// So the cipher offset may be further along (after blocksize translation) because we may have
// data in the cleartext buffer.
ciphertextOffset int64
cleartextOffset int64
cleartextSize int64
cipherBlockSize int64
}
// Caller gives up control of the reader.
func NewCipherReader(reader util.ReadSeekCloser,
blockCipher cipher.Block, rawIV []byte,
ciphertextSize int64) (util.ReadSeekCloser, error) {
gcm, err := cipher.NewGCM(blockCipher);
if err != nil {
return nil, errors.WithStack(err);
}
var cleartextBuffer []byte = make([]byte, 0, IO_BLOCK_SIZE);
var cipherBlockSize int64 = int64(IO_BLOCK_SIZE + gcm.Overhead());
// Note that this is exact since we can't write partial blocks.
var numBlocks int64 = ciphertextSize / cipherBlockSize;
var cleartextSize int64 = int64(numBlocks * IO_BLOCK_SIZE) + (ciphertextSize - (numBlocks * cipherBlockSize) - int64(gcm.Overhead()));
var rtn CipherReader = CipherReader{
gcm: gcm,
// Allocate enough room for the ciphertext.
ciphertextBuffer: make([]byte, 0, IO_BLOCK_SIZE + gcm.Overhead()),
cleartextBuffer: cleartextBuffer,
originalCleartextBuffer: cleartextBuffer,
// Make a copy of the IV since we will be incrementing it for each chunk.
iv: append([]byte(nil), rawIV...),
originalIV: append([]byte(nil), rawIV...),
reader: reader,
ciphertextOffset: 0,
cleartextOffset: 0,
cleartextSize: cleartextSize,
cipherBlockSize: cipherBlockSize,
};
return &rtn, nil;
}
func (this *CipherReader) Read(outBuffer []byte) (int, error) {
// We are done if we are staring at the end of the file.
// Note that we may seek back and read more.
if (this.cleartextOffset >= this.cleartextSize) {
return 0, io.EOF;
}
// Keep track of the offset when we started this read so we can calculate final read size correctly.
var originalCleartextOffset = this.cleartextOffset;
// We will keep reading until there is no more to read or the buffer is full.
// We will return insize the loop with an EOF if there is no more to read.
for (len(outBuffer) > 0) {
// First, make sure that we have data in the cleartext buffer.
err := this.populateCleartextBuffer();
if (err != nil) {
return 0, errors.WithStack(err);
}
// Figure out how much to read from the buffer (min of room left for output and avaible in cleartext).
var copyLength int = util.MinInt(len(this.cleartextBuffer), len(outBuffer));
copy(outBuffer, this.cleartextBuffer[0:copyLength]);
// Reslice the cleartext buffer and outBuffers to show the copy.
outBuffer = outBuffer[copyLength:];
this.cleartextBuffer = this.cleartextBuffer[copyLength:];
// Note the copy in the cleartext offset.
this.cleartextOffset += int64(copyLength);
// Reset the cleartext buffer if necessary
if (len(this.cleartextBuffer) == 0) {
this.cleartextBuffer = this.originalCleartextBuffer;
}
// If we have reached an EOF then we have read everything possible,
// and either fell short of the requested amount or got that amount exactly.
// Note that we are checking the cleartext offset instead of the ciphertext offset because
// the cleartext offset indicates that there is nothing left in the cleartext buffer.
if (this.cleartextOffset >= this.cleartextSize) {
return int(this.cleartextOffset - originalCleartextOffset), io.EOF;
}
}
// The output buffer is filled and we have not reached the end of the file.
return int(this.cleartextOffset - originalCleartextOffset), nil;
}
// Make sure that there is data in the cleartext buffer.
// If there is, then just return.
func (this *CipherReader) populateCleartextBuffer() error {
if (len(this.cleartextBuffer) != 0) {
return nil;
}
return errors.WithStack(this.readChunk());
}
func (this *CipherReader) readChunk() error {
// The cleartext buffer better be totally used (empty).
if (len(this.cleartextBuffer) != 0) {
return errors.New("Cleartext buffer is not empty.");
}
// Resize the buffer (without allocating) to ensure we only read exactly what we want.
this.ciphertextBuffer = this.ciphertextBuffer[0:IO_BLOCK_SIZE + this.gcm.Overhead()];
// Get the ciphertext.
readSize, err := this.reader.Read(this.ciphertextBuffer);
if (err != nil) {
if (err != io.EOF) {
return errors.Wrap(err, "Failed to read ciphertext chunk");
}
}
if (readSize == 0) {
return nil;
}
// Move the cipher offset forward.
this.ciphertextOffset += int64(readSize);
// Reset the cleartext buffer.
this.cleartextBuffer = this.originalCleartextBuffer;
this.cleartextBuffer, err = this.gcm.Open(this.cleartextBuffer, this.iv, this.ciphertextBuffer[0:readSize], nil);
if (err != nil) {
return errors.Wrap(err, "Failed to decrypt chunk");
}
// Prepare the IV for the next decrypt.
util.IncrementBytes(this.iv);
return nil;
}
func (this *CipherReader) Seek(offset int64, whence int) (int64, error) {
absoluteOffset, err := this.absoluteSeekOffset(offset, whence);
if (err != nil) {
return this.cleartextOffset, errors.WithStack(err);
}
// It is not strange to Seek(io.SeekCurrent, 0) just to see where the reader is.
if (absoluteOffset == this.cleartextOffset) {
return this.cleartextOffset, nil;
}
// Clear all the buffers and set the offsets to 0.
// It is possible that we only need to seek a but within the current buffer,
// but it is easier to just treat all casses the same.
this.cleartextBuffer = this.originalCleartextBuffer;
this.ciphertextBuffer = this.ciphertextBuffer[0:IO_BLOCK_SIZE + this.gcm.Overhead()];
this.iv = append([]byte(nil), this.originalIV...);
this.ciphertextOffset = 0;
this.cleartextOffset = 0;
this.reader.Seek(0, io.SeekStart);
// Skip the required number of blocks.
var skipBlocks int64 = absoluteOffset / IO_BLOCK_SIZE;
util.IncrementBytesByCount(this.iv, int(skipBlocks));
this.ciphertextOffset = skipBlocks * this.cipherBlockSize;
this.reader.Seek(this.ciphertextOffset, io.SeekStart);
// Now read the current block and set the cleartext buffer and offset accordingly.
err = this.readChunk();
if (err != nil) {
// If we fail a read at this point, it is pretty un-recoverable.
return 0, errors.WithStack(err);
}
// The cleartext buffer should be filled, so reslice the buffer to the offset.
var bufferOffset int = int(absoluteOffset - (skipBlocks * IO_BLOCK_SIZE));
this.cleartextBuffer = this.cleartextBuffer[bufferOffset:];
// Finally, we can change the cleartext offset.
this.cleartextOffset = absoluteOffset;
return this.cleartextOffset, nil;
}
// Deall with all the different wences and give the absolute offset from the start of the file.
// If the seek offset is not valid in any way, a corresponding error will be retutned.
func (this *CipherReader) absoluteSeekOffset(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
// Nothing to do.
case io.SeekCurrent:
offset = this.cleartextOffset + offset;
case io.SeekEnd:
offset = this.cleartextSize + offset;
default:
return 0, errors.Wrapf(syscall.EINVAL, "Unknown whence for seek: %d", whence);
}
if (offset < 0 || offset > this.cleartextSize) {
return 0, errors.WithStack(syscall.EINVAL);
}
return offset, nil;
}
func (this *CipherReader) Close() error {
this.gcm = nil;
this.ciphertextBuffer = nil;
this.cleartextBuffer = nil;
this.originalCleartextBuffer = nil;
this.iv = nil;
err := this.reader.Close();
this.reader = nil;
return errors.WithStack(err);
} | cipherio/reader.go | 0.7413 | 0.400984 | reader.go | starcoder |
package tchart
import (
"fmt"
)
var dots = []rune{' ', '⠂', '▤', '▥'}
// Segment represents a dial segment.
type Segment []int
// Segments represents a collection of segments.
type Segments []Segment
// Matrix represents a number dial.
type Matrix [][]rune
// Orientation tracks char orientations.
type Orientation int
// DotMatrix tracks a char matrix.
type DotMatrix struct {
row, col int
}
// NewDotMatrix returns a new matrix.
func NewDotMatrix(row, col int) DotMatrix {
return DotMatrix{
row: row,
col: col,
}
}
// Print prints the matrix.
func (d DotMatrix) Print(n int) Matrix {
m := make(Matrix, d.row)
segs := asSegments(n)
for row := 0; row < d.row; row++ {
for col := 0; col < d.col; col++ {
m[row] = append(m[row], segs.CharFor(row, col))
}
}
return m
}
func asSegments(n int) Segment {
switch n {
case 0:
return Segment{1, 1, 1, 0, 1, 1, 1}
case 1:
return Segment{0, 0, 1, 0, 0, 1, 0}
case 2:
return Segment{1, 0, 1, 1, 1, 0, 1}
case 3:
return Segment{1, 0, 1, 1, 0, 1, 1}
case 4:
return Segment{0, 1, 0, 1, 0, 1, 0}
case 5:
return Segment{1, 1, 0, 1, 0, 1, 1}
case 6:
return Segment{0, 1, 0, 1, 1, 1, 1}
case 7:
return Segment{1, 0, 1, 0, 0, 1, 0}
case 8:
return Segment{1, 1, 1, 1, 1, 1, 1}
case 9:
return Segment{1, 1, 1, 1, 0, 1, 0}
default:
panic(fmt.Sprintf("NYI %d", n))
}
}
// CharFor return a char based on row/col.
func (s Segment) CharFor(row, col int) rune {
c := ' '
segs := ToSegments(row, col)
if segs == nil {
return c
}
for _, seg := range segs {
if s[seg] == 1 {
c = charForSeg(seg, row, col)
}
}
return c
}
func charForSeg(seg, row, col int) rune {
switch seg {
case 0, 3, 6:
return dots[2]
}
if row == 0 && (col == 0 || col == 2) {
return dots[2]
}
return dots[3]
}
var segs = map[int][][]int{
0: {{1, 0}, {0}, {2, 0}},
1: {{1}, nil, {2}},
2: {{1, 3}, {3}, {2, 3}},
3: {{4}, nil, {5}},
4: {{4, 6}, {6}, {5, 6}},
}
// ToSegments return path segments.
func ToSegments(row, col int) []int {
return segs[row][col]
} | internal/tchart/dot_matrix.go | 0.778986 | 0.508788 | dot_matrix.go | starcoder |
package ghcdieselfuelprice
import (
"fmt"
"time"
"github.com/gobuffalo/pop/v5"
"go.uber.org/zap"
"github.com/transcom/mymove/pkg/models"
"github.com/transcom/mymove/pkg/unit"
)
func priceInMillicents(price float64) unit.Millicents {
priceInMillicents := unit.Millicents(int(price * 100000))
return priceInMillicents
}
func publicationDateInTime(publicationDate string) (time.Time, error) {
publicationDateInTime, err := time.Parse("20060102", publicationDate)
return publicationDateInTime, err
}
// RunStorer stores the final EIA weekly average diesel fuel price data in the ghc_diesel_fuel_price table
func (d *DieselFuelPriceInfo) RunStorer(dbTx *pop.Connection) error {
priceInMillicents := priceInMillicents(d.dieselFuelPriceData.price)
publicationDate, err := publicationDateInTime(d.dieselFuelPriceData.publicationDate)
if err != nil {
return err
}
var newGHCDieselFuelPrice models.GHCDieselFuelPrice
newGHCDieselFuelPrice.PublicationDate = publicationDate
newGHCDieselFuelPrice.FuelPriceInMillicents = priceInMillicents
var lastGHCDieselFuelPrice models.GHCDieselFuelPrice
err = dbTx.Where("publication_date = ?", publicationDate).First(&lastGHCDieselFuelPrice)
if err != nil {
d.logger.Info("no existing GHCDieselFuelPrice record found with", zap.String("publication_date", publicationDate.String()))
verrs, err := dbTx.ValidateAndCreate(&newGHCDieselFuelPrice)
if err != nil {
return fmt.Errorf("failed to create ghcDieselFuelPrice: %w", err)
}
if verrs.HasAny() {
return fmt.Errorf("failed to validate ghcDieselFuelPrice: %w", verrs)
}
} else if priceInMillicents != lastGHCDieselFuelPrice.FuelPriceInMillicents {
lastGHCDieselFuelPrice.FuelPriceInMillicents = priceInMillicents
verrs, err := dbTx.ValidateAndUpdate(&lastGHCDieselFuelPrice)
if err != nil {
return fmt.Errorf("failed to update ghcDieselFuelPrice: %w", err)
}
if verrs.HasAny() {
return fmt.Errorf("failed to validate ghcDieselFuelPrice: %w", verrs)
}
}
return nil
} | pkg/services/ghcdieselfuelprice/ghc_diesel_fuel_price_storer.go | 0.717507 | 0.4184 | ghc_diesel_fuel_price_storer.go | starcoder |
package config
import (
"reflect"
"sort"
"github.com/go-yaml/yaml"
)
// LastCompareField returns last equal compared field of IsEqualTo evaluation.
func (a *Container) LastCompareField() string {
return a.lastCompareField
}
// IsEqualTo compares the container spec against another one.
// It returns false if at least one property is unequal.
func (a *Container) IsEqualTo(b *Container) bool {
for _, field := range getComparableFields() {
a.lastCompareField = field
if equal, _ := compareYaml(field, a, b); !equal {
// TODO: return err
return false
}
}
return true
}
// IsEqualTo compares the ContainerName against another one.
// namespace and name should be same.
func (a *ContainerName) IsEqualTo(b *ContainerName) bool {
return a.IsEqualNs(b) && a.Name == b.Name
}
// IsEqualNs returns true if both containers have same namespace.
func (a *ContainerName) IsEqualNs(b *ContainerName) bool {
return a.GetNamespace() == b.GetNamespace()
}
// internals
// TODO: here would be nice to say few words about our approach of container specs comparison.
func compareYaml(name string, a, b *Container) (bool, error) {
av := reflect.Indirect(reflect.ValueOf(a)).FieldByName(name)
bv := reflect.Indirect(reflect.ValueOf(b)).FieldByName(name)
isSlice := av.Type().Kind() == reflect.Slice
isMap := av.Type().Kind() == reflect.Map
// empty values and nil pointer should be considered equal
if av.IsNil() && !isSlice && !isMap {
av = reflect.New(av.Type().Elem())
}
if bv.IsNil() && !isSlice && !isMap {
bv = reflect.New(bv.Type().Elem())
}
// sort lists which should not consider different order to be a change
if isSlice && name != "Entrypoint" && name != "Cmd" {
aSorted := newYamlSortable(av)
sort.Sort(aSorted)
av = reflect.ValueOf(aSorted)
bSorted := newYamlSortable(bv)
sort.Sort(bSorted)
bv = reflect.ValueOf(bSorted)
}
yml1, err := yaml.Marshal(av.Interface())
if err != nil {
return false, err
}
yml2, err := yaml.Marshal(bv.Interface())
if err != nil {
return false, err
}
return string(yml1) == string(yml2), nil
}
type yamlSortable []interface{}
func newYamlSortable(slice reflect.Value) yamlSortable {
sortable := yamlSortable{}
for i := 0; i < slice.Len(); i++ {
sortable = append(sortable, slice.Index(i).Interface())
}
return sortable
}
func (items yamlSortable) Len() int {
return len(items)
}
func (items yamlSortable) Less(i, j int) bool {
yml1, _ := yaml.Marshal(items[i])
yml2, _ := yaml.Marshal(items[j])
return string(yml1) < string(yml2)
}
func (items yamlSortable) Swap(i, j int) {
items[i], items[j] = items[j], items[i]
} | src/compose/config/compare.go | 0.631481 | 0.441553 | compare.go | starcoder |
package main
import (
"io/ioutil"
"net/url"
"path"
"strings"
"sync"
"net/http"
)
// SmokeTest contains a URI and expected status code.
type SmokeTest struct {
URI string `json:"uri"`
ExpectedStatusCode int `json:"status_code"`
Content string `json:"content"`
}
// SmokeTestResult is the result of a SmokeTest.
type SmokeTestResult struct {
Test SmokeTest
ActualStatusCode int
ActualContent []byte
}
// Passed determines if the SmokeTest passed successfully
func (result SmokeTestResult) Passed() bool {
if !result.statusCodeMatched() {
return false
}
if result.Test.Content != "" && !result.contentMatched() {
return false
}
return true
}
func (result SmokeTestResult) statusCodeMatched() bool {
return (result.Test.ExpectedStatusCode == result.ActualStatusCode)
}
func (result SmokeTestResult) contentMatched() bool {
return (strings.Contains(string(result.ActualContent), result.Test.Content) == true)
}
// SmokeTestResults is a slice of smoke test results
type SmokeTestResults []SmokeTestResult
// PassedCount is the number of smoke tests that passed
func (results SmokeTestResults) PassedCount() int {
var passedCount int
for _, result := range results {
if result.Passed() {
passedCount++
}
}
return passedCount
}
// SmokeTests is a slice of smoke tests to perform.
type SmokeTests []SmokeTest
// Vape contains dependencies used to run the application.
type Vape struct {
client HTTPClient
baseURL *url.URL
concurrency int
authorization string
}
// NewVape builds a Vape from the given dependencies.
func NewVape(client HTTPClient, baseURL *url.URL, concurrency int, authorization string) Vape {
return Vape{
client: client,
baseURL: baseURL,
concurrency: concurrency,
authorization: authorization,
}
}
func (v Vape) worker(wg *sync.WaitGroup, tests <-chan SmokeTest, resultCh chan<- SmokeTestResult, errorCh chan<- error) {
for test := range tests {
result, err := v.performTest(test)
if err != nil {
errorCh <- err
} else {
resultCh <- result
}
wg.Done()
}
}
// Process takes a list of URIs and concurrently performs a smoke test on each.
func (v Vape) Process(tests SmokeTests) (results SmokeTestResults, errors []error) {
testCount := len(tests)
jobCh := make(chan SmokeTest, testCount)
resultCh := make(chan SmokeTestResult, testCount)
errorCh := make(chan error, testCount)
var wg sync.WaitGroup
for w := 1; w <= v.concurrency; w++ {
go v.worker(&wg, jobCh, resultCh, errorCh)
}
for _, job := range tests {
jobCh <- job
wg.Add(1)
}
close(jobCh)
wg.Wait()
for i := 0; i < testCount; i++ {
select {
case err := <-errorCh:
errors = append(errors, err)
case result := <-resultCh:
results = append(results, result)
}
}
return results, errors
}
// performTest tests the status code of a HTTP request of a given URI.
func (v Vape) performTest(test SmokeTest) (SmokeTestResult, error) {
url := *v.baseURL
u, err := url.Parse(path.Join(url.Path + test.URI))
if err != nil {
return SmokeTestResult{}, err
}
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return SmokeTestResult{}, err
}
if v.authorization != "" {
req.Header.Add("Authorization", v.authorization)
}
resp, err := v.client.Do(req)
if err != nil {
return SmokeTestResult{}, err
}
result := SmokeTestResult{
ActualStatusCode: resp.StatusCode,
Test: test,
}
if test.Content != "" {
defer resp.Body.Close()
actualContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, err
}
result.ActualContent = actualContent
}
return result, nil
} | vape.go | 0.661048 | 0.435181 | vape.go | starcoder |
package tengo
import (
"errors"
"fmt"
"strings"
)
var (
// ErrStackOverflow is a stack overflow error.
ErrStackOverflow = errors.New("stack overflow")
// ErrObjectAllocLimit is an objects allocation limit error.
ErrObjectAllocLimit = errors.New("object allocation limit exceeded")
// ErrIndexOutOfBounds is an error where a given index is out of the
// bounds.
ErrIndexOutOfBounds = errors.New("index out of bounds")
// ErrInvalidIndexType represents an invalid index type.
ErrInvalidIndexType = errors.New("invalid index type")
// ErrInvalidIndexValueType represents an invalid index value type.
ErrInvalidIndexValueType = errors.New("invalid index value type")
// ErrInvalidIndexOnError represents an invalid index on error.
ErrInvalidIndexOnError = errors.New("invalid index on error")
// ErrInvalidOperator represents an error for invalid operator usage.
ErrInvalidOperator = errors.New("invalid operator")
// ErrBytesLimit represents an error where the size of bytes value exceeds
// the limit.
ErrBytesLimit = errors.New("exceeding bytes size limit")
// ErrStringLimit represents an error where the size of string value
// exceeds the limit.
ErrStringLimit = errors.New("exceeding string size limit")
// ErrNotIndexable is an error where an Object is not indexable.
ErrNotIndexable = errors.New("not indexable")
// ErrNotIndexAssignable is an error where an Object is not index
// assignable.
ErrNotIndexAssignable = errors.New("not index-assignable")
// ErrNotImplemented is an error where an Object has not implemented a
// required method.
ErrNotImplemented = errors.New("not implemented")
// ErrInvalidRangeStep is an error where the step parameter is less than or equal to 0 when using builtin range function.
ErrInvalidRangeStep = errors.New("range step must be greater than 0")
)
// ErrInvalidReturnValueCount represents an invalid return value count error.
type ErrInvalidReturnValueCount struct {
Expected int
Actual int
}
func (e ErrInvalidReturnValueCount) Error() string {
return fmt.Sprintf("invalid return value count, expected %d, actual %d",
e.Expected, e.Actual)
}
// ErrInvalidArgumentCount represents an invalid argument count error.
type ErrInvalidArgumentCount struct {
Min int
Max int
Actual int
}
func (e ErrInvalidArgumentCount) Error() string {
if e.Max < 0 {
return fmt.Sprintf("invalid variadic argument count, expected at least %d, actual %d",
e.Min, e.Actual)
}
if e.Min == e.Max {
return fmt.Sprintf("invalid argument count, expected %d, actual %d",
e.Min, e.Actual)
}
return fmt.Sprintf("invalid argument count, expected between %d and %d, actual %d",
e.Min, e.Max, e.Actual)
}
// ErrInvalidArgumentType represents an invalid argument type error.
type ErrInvalidArgumentType struct {
Index int
Expected string
Actual string
}
// TNs is a shorthand alias for typenames
type TNs = []string
// AnyTN is a typename when any type can be accepted
const AnyTN = "any"
func (e ErrInvalidArgumentType) Error() string {
return fmt.Sprintf("invalid type for argument at index %d: expected %s, actual %s",
e.Index, e.Expected, e.Actual)
}
// CheckArgs wraps a callable variadic function with flexible argument type checking logic
// when an argument might have several allowable types.
func CheckArgs(fn CallableFunc, min, max int, expected ...TNs) CallableFunc {
if min < 0 || (max >= 0 && min > max) {
panic("invalid min max arg count values")
}
if max < 0 && len(expected) < min {
// variadic func
panic("invalid expected len, must be at least min when max < 0")
}
if max >= 0 && (len(expected) < min || len(expected) > max) {
panic("invalid expected len, must be between min and max")
}
return func(actual ...Object) (Object, error) {
if (max >= 0 && len(actual) > max) ||
(len(actual) < min) {
return nil, ErrInvalidArgumentCount{
Min: min,
Max: max,
Actual: len(actual),
}
}
for i := range actual {
var expectedTypes []string
if i > len(expected)-1 {
// variadic args
expectedTypes = expected[len(expected)-1]
} else {
// required args
expectedTypes = expected[i]
}
if len(expectedTypes) == 0 || (len(expectedTypes) == 1 && expectedTypes[0] == AnyTN) {
// any type allowed
continue
}
for j := range expectedTypes {
if expectedTypes[j] == actual[i].TypeName() {
break
} else if j+1 == len(expectedTypes) {
return nil, ErrInvalidArgumentType{
Index: i,
Expected: strings.Join(expectedTypes, "/"),
Actual: actual[i].TypeName(),
}
}
}
}
return fn(actual...)
}
}
// CheckOptArgs wraps a callable function with strict argument type checking for optional arguments
func CheckOptArgs(fn CallableFunc, min, max int, expected ...string) CallableFunc {
flexExpected := make([][]string, 0, len(expected))
for _, tn := range expected {
flexExpected = append(flexExpected, []string{tn})
}
return CheckArgs(fn, min, max, flexExpected...)
}
// CheckAnyArgs wraps a callable function with argument count checking,
// this is only useful to functions that use tengo.ToSomeType(i int, args ...Object) (type, error)
// functions as they do more complex type conversions with their own type checking internally.
func CheckAnyArgs(fn CallableFunc, minMax ...int) CallableFunc {
min := 0
if len(minMax) > 0 {
min = minMax[0]
}
max := min
if len(minMax) > 1 {
max = minMax[1]
}
var expected [][]string
if max < 0 {
expected = make([][]string, min+1)
} else {
expected = make([][]string, max)
}
return CheckArgs(fn, min, max, expected...)
}
// CheckStrictArgs wraps a callable function with strict argument type checking logic
func CheckStrictArgs(fn CallableFunc, expected ...string) CallableFunc {
return CheckOptArgs(fn, len(expected), len(expected), expected...)
} | errors.go | 0.68679 | 0.419262 | errors.go | starcoder |
package cybuf
import (
"reflect"
)
type CyBufType int
const (
CyBufType_Invalid CyBufType = iota
CyBufType_Nil
CyBufType_Bool
CyBufType_Integer
CyBufType_Char
CyBufType_Float
CyBufType_String
CyBufType_Array
CyBufType_Object
)
func GetInterfaceValueType(v interface{}) CyBufType {
realValue := reflect.TypeOf(v)
switch realValue.Kind() {
case reflect.Bool:
return CyBufType_Bool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return CyBufType_Integer
case reflect.Float32, reflect.Float64:
return CyBufType_Float
case reflect.String:
return CyBufType_String
case reflect.Slice, reflect.Array:
return CyBufType_Array
case reflect.Map, reflect.Struct:
return CyBufType_Object
}
return CyBufType_Invalid
}
// v must be a CyBufType_Integer variable
func IsSignedInteger(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
return true
default:
return false
}
}
func GetReflectValueType(v reflect.Value) CyBufType {
return GetInterfaceValueType(v.Interface())
}
func GetBytesValueType(v []byte) CyBufType {
if IsStringValue(v) {
return CyBufType_String
}
if IsArrayValue(v) {
return CyBufType_Array
}
if IsObjectValue(v) {
return CyBufType_Object
}
if IsNilType(v) {
return CyBufType_Nil
}
if IsBoolType(Bytes2string(v)) {
return CyBufType_Bool
}
if IsIntegerValue(v) {
return CyBufType_Integer
}
if IsFloatValue(v) {
return CyBufType_Float
}
return CyBufType_Invalid
}
func GetBytesValueComplexType(v []byte) CyBufType {
if IsStringValue(v) {
return CyBufType_String
}
if IsArrayValue(v) {
return CyBufType_Array
}
if IsObjectValue(v) {
return CyBufType_Object
}
return CyBufType_Invalid
}
func GetBytesValueSimpleType(v []byte) CyBufType {
if IsNilType(v) {
return CyBufType_Nil
}
if IsBoolType(Bytes2string(v)) {
return CyBufType_Bool
}
if IsIntegerValue(v) {
return CyBufType_Integer
}
if IsFloatValue(v) {
return CyBufType_Float
}
return CyBufType_Invalid
}
func IsNilType(v []byte) bool {
return v[0] == 'n' && v[1] == 'i' && v[2] == 'l'
}
func IsBoolType(v string) bool {
return v == "true" || v == "false"
}
func IsIntegerValue(v []byte) bool {
for i := 0; i < len(v); i++ {
if v[i] < '0' || v[i] > '9' {
return false
}
}
return true
}
func IsFloatValue(v []byte) bool {
sawDot := false
for i := 0; i < len(v); i++ {
if v[i] == '.' {
if sawDot {
return false
}
sawDot = true
} else if v[i] < '0' || v[i] > '9' {
return false
}
}
return true
}
func IsStringValue(v []byte) bool {
return v[0] == v[len(v)-1] && (
v[0] == '\'' || v[0] == '"')
}
func IsArrayValue(v []byte) bool {
return v[0] == v[len(v)-1] && v[0] == '['
}
func IsObjectValue(v []byte) bool {
return v[0] == v[len(v)-1] && v[0] == '{'
}
func IsBoundChar(c byte) bool {
switch c {
case '{', '}', '[', ']', '"', '\'':
return true
}
return false
}
// c must be a bound character
func BoundMap(c byte) byte {
switch c {
case '{':
return '}'
case '}':
return '{'
case '[':
return ']'
case ']':
return '['
}
return c
} | common/type.go | 0.557845 | 0.477006 | type.go | starcoder |
package interpreter
import (
"fmt"
"github.com/smackem/ylang/internal/lang"
"math"
"reflect"
)
type Line struct {
Point1 Point
Point2 Point
}
func (l Line) Compare(other Value) (Value, error) {
if r, ok := other.(Line); ok {
if l == r {
return Number(0), nil
}
}
return Boolean(lang.FalseVal), nil
}
func (l Line) Add(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line + %s Not supported", reflect.TypeOf(other))
}
func (l Line) Sub(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line - %s Not supported", reflect.TypeOf(other))
}
func (l Line) Mul(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line * %s Not supported", reflect.TypeOf(other))
}
func (l Line) Div(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line / %s Not supported", reflect.TypeOf(other))
}
func (l Line) Mod(other Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line %% %s Not supported", reflect.TypeOf(other))
}
func (l Line) In(other Value) (Value, error) {
if r, ok := other.(Rect); ok {
p1, _ := l.Point1.In(r)
p2, _ := l.Point2.In(r)
return Boolean(p1.(Boolean) && p2.(Boolean)), nil
}
return nil, fmt.Errorf("type mismatch: line In %s Not supported", reflect.TypeOf(other))
}
func (l Line) Neg() (Value, error) {
return Line{Point1: l.Point2, Point2: l.Point1}, nil
}
func (l Line) Not() (Value, error) {
return nil, fmt.Errorf("type mismatch: 'Not line' Not supported")
}
func (l Line) At(bitmap BitmapContext) (Value, error) {
return nil, fmt.Errorf("type mismatch: @line Not supported")
}
func (l Line) Property(ident string) (Value, error) {
switch ident {
case "p1", "point1":
return l.Point1, nil
case "p2", "point2":
return l.Point2, nil
case "dx":
return Number(l.Point2.X - l.Point1.X), nil
case "dy":
return Number(l.Point2.Y - l.Point1.Y), nil
case "len":
dx, dy := l.Point2.X-l.Point1.X, l.Point2.Y-l.Point1.Y
return Number(math.Sqrt(float64(dx*dx + dy*dy))), nil
}
return baseProperty(l, ident)
}
func (l Line) PrintStr() string {
return fmt.Sprintf("line(point1:%s, point2:%s)", l.Point1.PrintStr(), l.Point2.PrintStr())
}
func (l Line) Iterate(visit func(Value) error) error {
dx, dy := lang.Number(l.Point2.X-l.Point1.X), lang.Number(l.Point2.Y-l.Point1.Y)
dxabs, dyabs := math.Abs(float64(dx)), math.Abs(float64(dy))
var steps int
if dxabs > dyabs {
steps = int(dxabs)
} else {
steps = int(dyabs)
}
stepsN := lang.Number(steps)
dx, dy = dx/stepsN, dy/stepsN
x, y := lang.Number(l.Point1.X), lang.Number(l.Point1.Y)
for i := 0; i < steps; i++ {
if err := visit(Point{int(x + 0.5), int(y + 0.5)}); err != nil {
return err
}
x = x + dx
y = y + dy
}
return nil
}
func (l Line) Index(index Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line[Index] Not supported")
}
func (l Line) IndexRange(lower, upper Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line[lower..upper] Not supported")
}
func (l Line) IndexAssign(index Value, val Value) error {
return fmt.Errorf("type mismatch: line[%s] Not supported", reflect.TypeOf(index))
}
func (l Line) RuntimeTypeName() string {
return "line"
}
func (l Line) Concat(val Value) (Value, error) {
return nil, fmt.Errorf("type mismatch: line :: %s Not supported", reflect.TypeOf(val))
} | internal/interpreter/line.go | 0.725746 | 0.459197 | line.go | starcoder |
package swu
import (
"math/big"
)
// GF represents galois field over prime
type GF struct {
P *big.Int
}
var (
one = big.NewInt(1)
two = big.NewInt(2)
three = big.NewInt(3)
four = big.NewInt(4)
)
//Neg negates number over GFp
func (g *GF) Neg(a *big.Int) *big.Int {
return new(big.Int).Sub(g.P, a)
}
//NegBytes negates number over GFp represented by a byte array
func (g *GF) NegBytes(a []byte) *big.Int {
return new(big.Int).Sub(g.P, new(big.Int).SetBytes(a))
}
//Square does a^2 over GFp
func (g *GF) Square(a *big.Int) *big.Int {
return new(big.Int).Exp(a, two, g.P)
}
//Cube does a^3 over GFp
func (g *GF) Cube(a *big.Int) *big.Int {
return new(big.Int).Exp(a, three, g.P)
}
//Pow does a^b over GFp
func (g *GF) Pow(a, b *big.Int) *big.Int {
return new(big.Int).Exp(a, b, g.P)
}
//Inv does modulo inverse over GFp
func (g *GF) Inv(a *big.Int) *big.Int {
return new(big.Int).ModInverse(a, g.P)
}
//InvBytes does modulo inverse over GFp represented by a byte array
func (g *GF) InvBytes(a []byte) *big.Int {
return new(big.Int).ModInverse(new(big.Int).SetBytes(a), g.P)
}
//Add adds two numbers over GFp
func (g *GF) Add(a, b *big.Int) *big.Int {
add := new(big.Int).Add(a, b)
return add.Mod(add, g.P)
}
//AddBytes adds two numbers one of which is represented as byte array over GFp
func (g *GF) AddBytes(a []byte, b *big.Int) *big.Int {
add := new(big.Int).Add(new(big.Int).SetBytes(a), b)
return add.Mod(add, g.P)
}
//Sub subtracts two numbers over GFp
func (g *GF) Sub(a, b *big.Int) *big.Int {
negB := new(big.Int).Sub(a, b)
return negB.Mod(negB, g.P)
}
//Mul multiplies two numbers over GFp
func (g *GF) Mul(a, b *big.Int) *big.Int {
mul := new(big.Int).Mul(a, b)
return mul.Mod(mul, g.P)
}
//MulBytes multiplies two numbers one of which is represented as a byte array over GFp
func (g *GF) MulBytes(a []byte, b *big.Int) *big.Int {
mul := new(big.Int).Mul(new(big.Int).SetBytes(a), b)
return mul.Mod(mul, g.P)
}
// Div multiplies a number by an inverse of another number over GFp
func (g *GF) Div(a, b *big.Int) *big.Int {
invB := g.Inv(b)
t := g.Mul(a, invB)
return t
} | swu/gf.go | 0.817283 | 0.441854 | gf.go | starcoder |
package store
import (
"github.com/FourthState/plasma-mvp-sidechain/plasma"
ethcmn "github.com/ethereum/go-ethereum/common"
"math/big"
)
// Wallet holds reference to the total balance, unspent, and spent outputs
// at a given address
type Wallet struct {
Balance *big.Int // total amount available to be spent
Unspent []plasma.Position // position of unspent transaction outputs
Spent []plasma.Position // position of spent transaction outputs
}
// Deposit wraps a plasma deposit with spend information.
type Deposit struct {
Deposit plasma.Deposit
Spent bool
SpenderTx []byte // transaction hash that spends this deposit
}
// Output wraps a plasma output with spend information.
type Output struct {
Output plasma.Output
Spent bool
SpenderTx []byte // transaction hash that spent this output
}
// Transaction wraps a plasma transaction with spend information.
type Transaction struct {
Transaction plasma.Transaction
ConfirmationHash []byte
Spent []bool
SpenderTxs [][]byte // transaction hashes that spend the outputs of this transaction
Position plasma.Position
}
// TxOutput holds all transactional information related to an output.
type TxOutput struct {
plasma.Output
Position plasma.Position
ConfirmationHash []byte
TxHash []byte
Spent bool
SpenderTx []byte
}
// NewTxOutput creates a TxOutput object.
func NewTxOutput(output plasma.Output, pos plasma.Position, confirmationHash, txHash []byte,
spent bool, spenderTx []byte) TxOutput {
return TxOutput{
Output: output,
Position: pos,
ConfirmationHash: confirmationHash,
TxHash: txHash,
Spent: spent,
SpenderTx: spenderTx,
}
}
// TxInput holds basic transactional data along with input information
type TxInput struct {
plasma.Output
Position plasma.Position
TxHash []byte
InputAddresses []ethcmn.Address
InputPositions []plasma.Position
}
// NewTxInput creates a TxInput object.
func NewTxInput(output plasma.Output, pos plasma.Position, txHash []byte,
inputAddresses []ethcmn.Address, inputPositions []plasma.Position) TxInput {
return TxInput{
Output: output,
Position: pos,
TxHash: txHash,
InputAddresses: inputAddresses,
InputPositions: inputPositions,
}
}
// Block wraps a plasma block with the tendermint block height.
type Block struct {
plasma.Block
TMBlockHeight uint64
} | store/types.go | 0.646349 | 0.407068 | types.go | starcoder |
package exp
import (
"fmt"
"math"
"strconv"
"github.com/spf13/cobra"
"github.com/timebertt/grypto/modular"
)
func NewCommand() *cobra.Command {
var base, exp, mod int32
cmd := &cobra.Command{
Use: "exp [base] [exponent] [modulus]",
Aliases: []string{"mod-exp", "square-and-multiply"},
Short: "Use the square-and-multiply method for calculating the modular exponentiation",
Long: `The exp command implements modular exponentiation using the square-and-multiply method for int32 numbers.
It prints a value x so that x = base ^ exp mod m.
Modular exponentiation is heavily used e.g. for primality tests and public-key cryptography (like RSA).
Even for reasonably small integers, calculating the modular exponentiation directly is on the one hand
quite inefficient and on the other hand very impractical, as the resulting integers will easily outgrow
the usual variable/register sizes.
A fairly efficient method is exponentiation by squaring (also known as square-and-multiply or binary
exponentiation). It calculates the modular squares of base and multiplies all squares for which the exp
has a 1 in its binary notation.
See https://en.wikipedia.org/wiki/Exponentiation_by_squaring.`,
Args: cobra.ExactArgs(3),
PreRunE: func(cmd *cobra.Command, args []string) error {
b, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("first argument is not an int: %w", err)
}
if b > math.MaxInt32 {
return fmt.Errorf("base is greater than MaxInt32 (%d): %d", math.MaxInt32, b)
}
base = int32(b)
e, err := strconv.Atoi(args[1])
if err != nil {
return fmt.Errorf("second argument is not an int: %w", err)
}
if e > math.MaxInt32 {
return fmt.Errorf("exponent is greater than MaxInt32 (%d): %d", math.MaxInt32, b)
}
exp = int32(e)
m, err := strconv.Atoi(args[2])
if err != nil {
return fmt.Errorf("third argument is not an int: %w", err)
}
if m > math.MaxInt32 {
return fmt.Errorf("modulus is greater than MaxInt32 (%d): %d", math.MaxInt32, b)
}
mod = int32(m)
cmd.SilenceErrors = true
cmd.SilenceUsage = true
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
return runPow32(base, exp, mod)
},
}
return cmd
}
func runPow32(base, exp, mod int32) (err error) {
defer func() {
if p := recover(); p != nil {
if e, ok := p.(error); ok {
err = e
}
if e, ok := p.(string); ok {
err = fmt.Errorf(e)
}
}
}()
pow := modular.Pow32(base, exp, mod)
fmt.Printf("%s ^ %s mod %s = %d\n", parenthesis(base), parenthesis(exp), parenthesis(mod), pow)
return nil
}
func parenthesis(i int32) string {
if i < 0 {
return fmt.Sprintf("(%d)", i)
}
return fmt.Sprintf("%d", i)
} | grypto/cmd/exp/exp.go | 0.776326 | 0.438424 | exp.go | starcoder |
package kubernetes
import "fmt"
// Swagger is a map of schema keys to schema objects loaded in from a swagger file.
type Swagger struct {
Definitions map[string]*Schema
}
// Schema is a swagger schema. I'm sure there's a real definition somewhere but this gets everything this program needs.
type Schema struct {
// Description is the description of the object that conforms to this schema.
Description string
// Required lists the required fields of this object.
Required []string
// Properties
Properties map[string]*Property
// Initializers are optional. TODO(chuckha) what are these actually?
Initializers *Initializers
// Kind is an optional field that describes the kind of object inside a list type
Kind *Kind
// Metadata is an optional field that contains a reference to some other schema.
Metadata *Metadata
// GVK is the GroupVersionKind from kubernetes.
GVK []*GroupVersionKind `json:"x-kubernetes-group-version-kind"`
// Type is when the object is actually a type rename of a builtin (type X string)
Type string
// Format is the format of the type when Type is "string"
Format string
}
// Property is a single property, or field, of a schema.
type Property struct {
// Description is the description of the property being defined.
Description string
// Type is a basic type like string, object, integer or array.
Type string
// Format is a sub type, such as string could be `byte` or integer could be `int64`.
Format string
// Items is required for array types. This tells us what types are inside the array.
Items *Items
// AdditionalProperties is an optional field for object types defining what kind of key/value pairs will be found on the object.
// TODO(chuckha) make this a pointer type
AdditionalProperties AdditionalProperties
// Reference is a reference to another schema in our list of definitions.
Reference string `json:"$ref"`
}
// String implements the Stringer interface and gives us a nice human readable output.
func (p *Property) String() string {
return fmt.Sprintf(`
Type: %s
Items: %v
Reference: %s
`, p.Type, p.Items, p.Reference)
}
// Initializers are things that will run before a pod can start running.
// This is only used in two places, the object metadata and the initializersConfiguration object.
// See: https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#how-are-initializers-triggered
type Initializers struct {
// Description is a description of the initializers.
Description string
// Type is the type of initializers, this is always an array.
Type string
// Items are the actual list of initializers.
Items *Items
// TODO(chuckha) Can probably remove these two
PatchMergeKey string `json:"x-kubernetes-patch-merge-key"`
PatchStrategy string `json:"x-kubernetes-patch-strategy"`
}
// Items are the array of items when the type is "array".
type Items struct {
// Description is a description of the array of items.
Description string
// Type is the type of item in the array.
Type string
// Reference is the reference to the schema type of items stored in this array.
Reference string `json:"$ref"`
// Items can be an array of arrays of arrays of ...
Items *Items
}
// Kind is the kind we all know and love from ObjectMeta.
type Kind struct {
// Description describes what kind is, it's an resource in the API.
Description string
// Type is always a string in the Kind object.
Type string
}
// Metadata is a reference to the shared metadata schema.
type Metadata struct {
// Description descries what metadata is.
Description string
// Reference is the key to the metadata schema.
Reference string `json:"$ref"`
}
// GroupVersionKind is the gvk of kubernetes schema.
type GroupVersionKind struct {
// Group is the group such as v1, apps, etc.
Group string
// Kind is the type of object such as Deployment or Pod.
Kind string
// Version is the API version such as v1, v1alpha3, v1beta2.
Version string
}
// AdditionalProperties define the type found in objects.
// Kubernetes only uses string:string dicts, but this might break on more advanced use cases.
type AdditionalProperties struct {
// Type will almost always be a string
Type string
} | internal/kubernetes/types.go | 0.508788 | 0.409693 | types.go | starcoder |
package mesh
import (
"errors"
"io"
"github.com/EliCDavis/vector"
)
// Model is built with a collection of polygons
type Model struct {
faces []Polygon
}
// NewModel builds a new model
func NewModel(faces []Polygon) (Model, error) {
if faces == nil {
return Model{}, errors.New("Can not have nil faces")
}
if len(faces) == 0 {
return Model{}, errors.New("Can't have a model with 0 faces")
}
var center vector.Vector3
for _, f := range faces {
center = center.Add(f.center)
}
return Model{faces}, nil
}
func (m Model) GetCenterOfBoundingBox() vector.Vector3 {
bottomLeftX := 10000000.
bottomLeftY := 10000000.
bottomLeftZ := 10000000.
topRightX := -10000000.
topRightY := -10000000.
topRightZ := -10000000.
for _, poly := range m.faces {
for _, p := range poly.GetVertices() {
if p.X() < bottomLeftX {
bottomLeftX = p.X()
}
if p.Y() < bottomLeftY {
bottomLeftY = p.Y()
}
if p.Z() < bottomLeftZ {
bottomLeftZ = p.Z()
}
if p.X() > topRightX {
topRightX = p.X()
}
if p.Y() > topRightY {
topRightY = p.Y()
}
if p.Z() > topRightZ {
topRightZ = p.Z()
}
}
}
width := (topRightX - bottomLeftX)
height := (topRightY - bottomLeftY)
depth := (topRightZ - bottomLeftZ)
return vector.NewVector3(bottomLeftX+(width/2.0), bottomLeftY+(height/2.0), bottomLeftZ+(depth/2.0))
}
// GetFaces returns all faces of the model
func (m Model) GetFaces() []Polygon {
return m.faces
}
// Merge combines the faces of both the models into a new model
func (m Model) Merge(other Model) Model {
model, _ := NewModel(append(m.faces, other.faces...))
return model
}
func (m Model) Translate(movement vector.Vector3) Model {
newFaces := make([]Polygon, len(m.faces))
for f := range m.faces {
newFaces[f] = m.faces[f].Translate(movement)
}
model, _ := NewModel(newFaces)
return model
}
// Adjusts each vertices position relative to the origin
func (m Model) Scale(amount vector.Vector3, pivot vector.Vector3) Model {
newFaces := make([]Polygon, len(m.faces))
for f := range m.faces {
newFaces[f] = m.faces[f].Scale(amount, pivot)
}
model, _ := NewModel(newFaces)
return model
}
func (m Model) Rotate(amount vector.Vector3, pivot vector.Vector3) Model {
newFaces := make([]Polygon, len(m.faces))
for f := range m.faces {
newFaces[f] = m.faces[f].Rotate(amount, pivot)
}
model, _ := NewModel(newFaces)
return model
}
// Save Writes a model to obj format
func (m Model) Save(w io.Writer) error {
offset := 1
var err error
for _, face := range m.faces {
offset, err = face.Save(w, offset)
if err != nil {
return err
}
}
return nil
} | model.go | 0.827932 | 0.468 | model.go | starcoder |
package utils
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
)
type ByteSlice struct {
data []byte
cursor uint32
}
func NewByteSlice(bytes []byte) *ByteSlice {
return &ByteSlice{bytes, 0}
}
func (self *ByteSlice) At() uint32 {
return self.cursor
}
func (self *ByteSlice) Size() uint32 {
return uint32(len(self.data))
}
func (self *ByteSlice) Skip(num uint32) error {
return self.Seek(self.cursor + num)
}
func (self *ByteSlice) Seek(pos uint32) error {
if pos > uint32(len(self.data)) {
return errors.New(fmt.Sprintf("Target position %d is out of range (slice has %d bytes)!", pos, len(self.data)))
}
self.cursor = pos
return nil
}
// Reads a single byte at pos.
func (self *ByteSlice) ReadByte(pos uint32) byte {
return self.data[pos]
}
// Reads num bytes starting (including) from pos.
func (self *ByteSlice) ReadBytes(pos uint32, num uint32) []byte {
return self.data[pos:(pos + num)]
}
// Read an unsigned 8-bit integer at pos.
func (self *ByteSlice) ReadUint8(pos uint32) uint8 {
return uint8(self.ReadByte(pos))
}
// Read an unsigned 16-bit integer at pos.
func (self *ByteSlice) ReadUint16(pos uint32) uint16 {
return binary.LittleEndian.Uint16(self.ReadBytes(pos, 2))
}
// Read an signed 16-bit integer at pos.
func (self *ByteSlice) ReadInt16(pos uint32) int16 {
b := self.ReadBytes(pos, 2)
buf := bytes.NewReader(b)
result := int16(0)
err := binary.Read(buf, binary.LittleEndian, &result)
if err != nil {
fmt.Println("binary.Read failed:", err)
}
return result
}
// Read an unsigned 32-bit integer at pos.
func (self *ByteSlice) ReadUint32(pos uint32) uint32 {
return binary.LittleEndian.Uint32(self.ReadBytes(pos, 4))
}
// Read an signed 32-bit integer at pos.
func (self *ByteSlice) ReadInt32(pos uint32) int32 {
b := self.ReadBytes(pos, 4)
buf := bytes.NewReader(b)
result := int32(0)
err := binary.Read(buf, binary.LittleEndian, &result)
if err != nil {
fmt.Println("binary.Read failed:", err)
}
return result
}
// The following functions are useful when processing a byte slice in a linear fashion.
func (self *ByteSlice) advance(steps uint32) {
self.cursor = self.cursor + steps
}
func (self *ByteSlice) ConsumeByte() byte {
return self.ConsumeBytes(1)[0]
}
func (self *ByteSlice) ConsumeBytes(num uint32) []byte {
defer self.advance(num)
return self.ReadBytes(self.cursor, num)
}
func (self *ByteSlice) ConsumeUint8() uint8 {
defer self.advance(1)
return self.ReadUint8(self.cursor)
}
func (self *ByteSlice) ConsumeUint16() uint16 {
defer self.advance(2)
return self.ReadUint16(self.cursor)
}
func (self *ByteSlice) ConsumeInt16() int16 {
defer self.advance(2)
return self.ReadInt16(self.cursor)
}
func (self *ByteSlice) ConsumeUint32() uint32 {
defer self.advance(4)
return self.ReadUint32(self.cursor)
}
func (self *ByteSlice) ConsumeInt32() int32 {
defer self.advance(4)
return self.ReadInt32(self.cursor)
} | utils/bytes.go | 0.721253 | 0.438485 | bytes.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// Filter
type Filter struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// *Experimental* Filter group set used to decide whether given object belongs and should be processed as part of this object mapping. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
categoryFilterGroups []FilterGroupable
// Filter group set used to decide whether given object is in scope for provisioning. This is the filter which should be used in most cases. If an object used to satisfy this filter at a given moment, and then the object or the filter was changed so that filter is not satisfied any longer, such object will get de-provisioned'. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
groups []FilterGroupable
// *Experimental* Filter group set used to filter out objects at the early stage of reading them from the directory. If an object doesn't satisfy this filter it will not be processed further. Important to understand is that if an object used to satisfy this filter at a given moment, and then the object or the filter was changed so that filter is no longer satisfied, such object will NOT get de-provisioned. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
inputFilterGroups []FilterGroupable
}
// NewFilter instantiates a new filter and sets the default values.
func NewFilter()(*Filter) {
m := &Filter{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateFilterFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateFilterFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewFilter(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *Filter) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCategoryFilterGroups gets the categoryFilterGroups property value. *Experimental* Filter group set used to decide whether given object belongs and should be processed as part of this object mapping. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
func (m *Filter) GetCategoryFilterGroups()([]FilterGroupable) {
if m == nil {
return nil
} else {
return m.categoryFilterGroups
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *Filter) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["categoryFilterGroups"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateFilterGroupFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]FilterGroupable, len(val))
for i, v := range val {
res[i] = v.(FilterGroupable)
}
m.SetCategoryFilterGroups(res)
}
return nil
}
res["groups"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateFilterGroupFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]FilterGroupable, len(val))
for i, v := range val {
res[i] = v.(FilterGroupable)
}
m.SetGroups(res)
}
return nil
}
res["inputFilterGroups"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateFilterGroupFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]FilterGroupable, len(val))
for i, v := range val {
res[i] = v.(FilterGroupable)
}
m.SetInputFilterGroups(res)
}
return nil
}
return res
}
// GetGroups gets the groups property value. Filter group set used to decide whether given object is in scope for provisioning. This is the filter which should be used in most cases. If an object used to satisfy this filter at a given moment, and then the object or the filter was changed so that filter is not satisfied any longer, such object will get de-provisioned'. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
func (m *Filter) GetGroups()([]FilterGroupable) {
if m == nil {
return nil
} else {
return m.groups
}
}
// GetInputFilterGroups gets the inputFilterGroups property value. *Experimental* Filter group set used to filter out objects at the early stage of reading them from the directory. If an object doesn't satisfy this filter it will not be processed further. Important to understand is that if an object used to satisfy this filter at a given moment, and then the object or the filter was changed so that filter is no longer satisfied, such object will NOT get de-provisioned. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
func (m *Filter) GetInputFilterGroups()([]FilterGroupable) {
if m == nil {
return nil
} else {
return m.inputFilterGroups
}
}
// Serialize serializes information the current object
func (m *Filter) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
if m.GetCategoryFilterGroups() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetCategoryFilterGroups()))
for i, v := range m.GetCategoryFilterGroups() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("categoryFilterGroups", cast)
if err != nil {
return err
}
}
if m.GetGroups() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetGroups()))
for i, v := range m.GetGroups() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("groups", cast)
if err != nil {
return err
}
}
if m.GetInputFilterGroups() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetInputFilterGroups()))
for i, v := range m.GetInputFilterGroups() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("inputFilterGroups", cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *Filter) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCategoryFilterGroups sets the categoryFilterGroups property value. *Experimental* Filter group set used to decide whether given object belongs and should be processed as part of this object mapping. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
func (m *Filter) SetCategoryFilterGroups(value []FilterGroupable)() {
if m != nil {
m.categoryFilterGroups = value
}
}
// SetGroups sets the groups property value. Filter group set used to decide whether given object is in scope for provisioning. This is the filter which should be used in most cases. If an object used to satisfy this filter at a given moment, and then the object or the filter was changed so that filter is not satisfied any longer, such object will get de-provisioned'. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
func (m *Filter) SetGroups(value []FilterGroupable)() {
if m != nil {
m.groups = value
}
}
// SetInputFilterGroups sets the inputFilterGroups property value. *Experimental* Filter group set used to filter out objects at the early stage of reading them from the directory. If an object doesn't satisfy this filter it will not be processed further. Important to understand is that if an object used to satisfy this filter at a given moment, and then the object or the filter was changed so that filter is no longer satisfied, such object will NOT get de-provisioned. An object is considered in scope if ANY of the groups in the collection is evaluated to true.
func (m *Filter) SetInputFilterGroups(value []FilterGroupable)() {
if m != nil {
m.inputFilterGroups = value
}
} | models/filter.go | 0.679179 | 0.504639 | filter.go | starcoder |
package aoc2021
import (
"fmt"
"github.com/simonski/goutils"
)
/*
--- Day 16: Packet Decoder ---
As you leave the cave and reach open waters, you receive a transmission from the Elves back on the ship.
The transmission was sent using the Buoyancy Interchange Transmission System (BITS), a method of packing numeric expressions into a binary sequence. Your submarine's computer has saved the transmission in hexadecimal (your puzzle input).
The first step of decoding the message is to convert the hexadecimal representation into binary. Each character of hexadecimal corresponds to four bits of binary data:
0 = 0000
1 = 0001
2 = 0010
3 = 0011
4 = 0100
5 = 0101
6 = 0110
7 = 0111
8 = 1000
9 = 1001
A = 1010
B = 1011
C = 1100
D = 1101
E = 1110
F = 1111
The BITS transmission contains a single packet at its outermost layer which itself contains many other packets. The hexadecimal representation of this packet might encode a few extra 0 bits at the end; these are not part of the transmission and should be ignored.
Every packet begins with a standard header: the first three bits encode the packet version, and the next three bits encode the packet type ID. These two values are numbers; all numbers encoded in any packet are represented as binary with the most significant bit first. For example, a version encoded as the binary sequence 100 represents the number 4.
Packets with type ID 4 represent a literal value. Literal value packets encode a single binary number. To do this, the binary number is padded with leading zeroes until its length is a multiple of four bits, and then it is broken into groups of four bits. Each group is prefixed by a 1 bit except the last group, which is prefixed by a 0 bit. These groups of five bits immediately follow the packet header. For example, the hexadecimal string D2FE28 becomes:
4xxx[1xxx][1xxx][1xxx][1xxx][0xxx]
D2FE28
110100101111111000101000 24
VVVTTTAAAAABBBBBCCCCC 21
D2DE28
110|100|10111|11110|00101000 24
g1 | g2 | g | a = 0111 b 1110 c 0101 discard final 3,
011111100101 = 2021
[version][typeID]
[110|100]
110 version 6 = VVV
100 type 4 TTT
110100 vesion and type
110100 add padding to multiple of 4
0011, 0100 break to groups
10011, 00100 prefix the groups
AAAAA
Below each bit is a label indicating its purpose:
The three bits labeled V (110) are the packet version, 6.
The three bits labeled T (100) are the packet type ID, 4, which means the packet is a literal value.
The five bits labeled A (10111) start with a 1 (not the last group, keep reading) and contain the first four bits of the number, 0111.
The five bits labeled B (11110) start with a 1 (not the last group, keep reading) and contain four more bits of the number, 1110.
The five bits labeled C (00101) start with a 0 (last group, end of packet) and contain the last four bits of the number, 0101.
The three unlabeled 0 bits at the end are extra due to the hexadecimal representation and should be ignored.
So, this packet represents a literal value with binary representation 011111100101, which is 2021 in decimal.
Every other type of packet (any packet with a type ID other than 4) represent an operator that performs some calculation on one or more sub-packets contained within. Right now, the specific operations aren't important; focus on parsing the hierarchy of sub-packets.
An operator packet contains one or more packets. To indicate which subsequent binary data represents its sub-packets, an operator packet can use one of two modes indicated by the bit immediately after the packet header; this is called the length type ID:
If the length type ID is 0, then the next 15 bits are a number that represents the total length in bits of the sub-packets contained by this packet.
If the length type ID is 1, then the next 11 bits are a number that represents the number of sub-packets immediately contained by this packet.
Finally, after the length type ID bit and the 15-bit or 11-bit field, the sub-packets appear.
For example, here is an operator packet (hexadecimal string 38006F45291200) with length type ID 0 that contains two sub-packets:
001110 0 000000000011011 110100010100101001000100100 0000000
VVVTTT I LLLLLLLLLLLLLLL AAAAAAAAAAABBBBBBBBBBBBBBBB
The three bits labeled V (001) are the packet version, 1.
The three bits labeled T (110) are the packet type ID, 6, which means the packet is an operator.
The bit labeled I (0) is the length type ID, which indicates that the length is a 15-bit number representing the number of bits in the sub-packets.
The 15 bits labeled L (000000000011011) contain the
the sub-packets in bits, 27.
The 11 bits labeled A contain the first sub-packet, a literal value representing the number 10.
The 16 bits labeled B contain the second sub-packet, a literal value representing the number 20.
After reading 11 and 16 bits of sub-packet data, the total length indicated in L (27) is reached, and so parsing of this packet stops.
As another example, here is an operator packet (hexadecimal string EE00D40C823060) with length type ID 1 that contains three sub-packets:
111011100000000011 01010000001 10010000010 00110000011 00000
VVVTTTILLLLLLLLLLL AAAAAAAAAAA BBBBBBBBBBB CCCCCCCCCCC
The three bits labeled V (111) are the packet version, 7.
The three bits labeled T (011) are the packet type ID, 3, which means the packet is an operator.
The bit labeled I (1) is the length type ID, which indicates that the length is a 11-bit number representing the number of sub-packets.
The 11 bits labeled L (00000000011) contain the number of sub-packets, 3.
The 11 bits labeled A contain the first sub-packet, a literal value representing the number 1.
The 11 bits labeled B contain the second sub-packet, a literal value representing the number 2.
The 11 bits labeled C contain the third sub-packet, a literal value representing the number 3.
After reading 3 complete sub-packets, the number of sub-packets indicated in L (3) is reached, and so parsing of this packet stops.
For now, parse the hierarchy of the packets throughout the transmission and add up all of the version numbers.
Here are a few more examples of hexadecimal-encoded transmissions:
8A004A801A8002F478 represents an operator packet (version 4) which contains an operator packet (version 1) which contains an operator packet (version 5) which contains a literal value (version 6); this packet has a version sum of 16.
620080001611562C8802118E34 represents an operator packet (version 3) which contains two sub-packets; each sub-packet is an operator packet that contains two literal values. This packet has a version sum of 12.
C0015000016115A2E0802F182340 has the same structure as the previous example, but the outermost packet uses a different length type ID. This packet has a version sum of 23.
A0016C880162017C3686B18A3D4780 is an operator packet that contains an operator packet that contains an operator packet that contains five literal values; it has a version sum of 31.
Decode the structure of your hexadecimal-encoded BITS transmission; what do you get if you add up the version numbers in all packets?
*/
// rename this to the year and day in question
func (app *Application) Y2021D16P1() {
DEBUG := false
RUN_1 := false
RUN_2 := false
RUN_3 := false
RUN_4 := false
RUN_5 := false
RUN_6 := false
RUN_7 := false
RUN_PART_ONE := true
line := goutils.Repeatstring("*", 120)
if RUN_1 {
if DEBUG {
fmt.Println(line)
fmt.Println(line)
}
c := NewContextD16()
c.DEBUG = true
// Parse(DAY_2021_16_TEST_DATA_1, true, DEBUG, c, 0)
RParse(DAY_2021_16_TEST_DATA_1, c)
total := c.Root.CalculateVersionTotal()
fmt.Printf("Part1-1: Version total is %v\n", total)
c.Root.Debug()
if DEBUG {
fmt.Println(line)
fmt.Println()
fmt.Println()
fmt.Println()
}
}
if RUN_2 {
if DEBUG {
fmt.Println(line)
fmt.Println(line)
}
c := NewContextD16()
c.DEBUG = true
RParse("38006F45291200", c)
total := c.Root.CalculateVersionTotal()
fmt.Printf("Part1-2: Version total is %v\n", total)
c.Root.Debug()
if DEBUG {
fmt.Println(line)
fmt.Println()
fmt.Println()
fmt.Println()
}
}
if RUN_3 {
if DEBUG {
fmt.Println(line)
fmt.Println(line)
}
c := NewContextD16()
c.DEBUG = true
// Parse("EE00D40C823060", true, DEBUG, c, 0)
RParse("EE00D40C823060", c)
total := c.Root.CalculateVersionTotal()
fmt.Printf("Part1-3: Version total is %v\n", total)
c.Root.Debug()
if DEBUG {
fmt.Println(line)
fmt.Println()
fmt.Println()
fmt.Println()
}
}
if RUN_4 {
if DEBUG {
fmt.Println(line)
fmt.Println(line)
}
c := NewContextD16()
c.DEBUG = true
// Parse("8A004A801A8002F478", true, DEBUG, c, 0)
RParse("8A004A801A8002F478", c)
fmt.Printf("Part1-4: Version total should be 16, is %v\n", c.Root.CalculateVersionTotal())
c.Root.Debug()
c.Root.Tree(0)
if DEBUG {
fmt.Println(line)
fmt.Println()
fmt.Println()
fmt.Println()
}
}
if RUN_5 {
if DEBUG {
fmt.Println(line)
fmt.Println(line)
}
c := NewContextD16()
c.DEBUG = true
// Parse("620080001611562C8802118E34", true, DEBUG, c, 0)
RParse("620080001611562C8802118E34", c)
fmt.Printf("Part1-5: Version total should be 12, is %v\n", c.Root.CalculateVersionTotal())
c.Root.Debug()
c.Root.Tree(0)
if DEBUG {
fmt.Println(line)
fmt.Println()
fmt.Println()
fmt.Println()
}
}
if RUN_6 {
if DEBUG {
fmt.Println(line)
fmt.Println(line)
}
c := NewContextD16()
c.DEBUG = true
// Parse("C0015000016115A2E0802F182340", true, DEBUG, c, 0)
RParse("C0015000016115A2E0802F182340", c)
fmt.Printf("Part1-6: Version total should be 23, is %v\n", c.Root.CalculateVersionTotal())
c.Root.Debug()
if DEBUG {
fmt.Println(line)
fmt.Println()
fmt.Println()
fmt.Println()
}
}
if RUN_7 {
if DEBUG {
fmt.Println(line)
fmt.Println(line)
}
c := NewContextD16()
c.DEBUG = true
// Parse("A0016C880162017C3686B18A3D4780", true, DEBUG, c, 0)
RParse("A0016C880162017C3686B18A3D4780", c)
fmt.Printf("Part1-7: Version total should be 31, is %v\n", c.Root.CalculateVersionTotal())
c.Root.Debug()
c.Root.Tree(0)
if DEBUG {
fmt.Println(line)
fmt.Println()
fmt.Println()
fmt.Println()
}
}
if RUN_PART_ONE {
c := NewContextD16()
c.DEBUG = true
// Parse(DAY_2021_16_DATA, true, DEBUG, c, 0)
RParse(DAY_2021_16_DATA, c)
total := c.Root.CalculateVersionTotal()
c.Root.Debug()
c.Root.Tree(0)
fmt.Printf("Version Total should be 920, is %v\n", total)
}
}
// // rename this to the year and day in question
func (app *Application) Y2021D16P2() {
if true {
c := NewContextD16()
c.DEBUG = true
RParse("C200B40A82", c)
p := c.Root
fmt.Printf("Part2: C200B40A82 value should be 3, is %v\n", p.GetValue())
c = NewContextD16()
c.DEBUG = true
RParse("04005AC33890", c)
p = c.Root
fmt.Printf("Part2: 04005AC33890 : Value should be 54, is %v\n", p.GetValue())
c.Root.Debug()
c = NewContextD16()
c.DEBUG = true
RParse("880086C3E88112", c)
p = c.Root
fmt.Printf("Part2: 880086C3E88112 Value should be 7, is %v\n", p.GetValue())
c.Root.Debug()
c = NewContextD16()
c.DEBUG = true
RParse("CE00C43D881120", c)
p = c.Root
fmt.Printf("Part2: CE00C43D881120 Value should be 9, is %v\n", p.GetValue())
c.Root.Debug()
c = NewContextD16()
c.DEBUG = true
RParse("D8005AC2A8F0", c)
p = c.Root
fmt.Printf("Part2: D8005AC2A8F0 Value should be 1, is %v\n", p.GetValue())
c.Root.Debug()
c = NewContextD16()
c.DEBUG = true
RParse("F600BC2D8F", c)
p = c.Root
fmt.Printf("Part2: F600BC2D8F Value should be 0, is %v\n", p.GetValue())
c.Root.Debug()
c = NewContextD16()
c.DEBUG = true
RParse("9C005AC2F8F0", c)
p = c.Root
fmt.Printf("Part2: 9C005AC2F8F0 Value should be 0, is %v\n", p.GetValue())
c.Root.Debug()
c = NewContextD16()
c.DEBUG = true
RParse("9C0141080250320F1802104A08", c)
p = c.Root
fmt.Printf("Part2: 9C0141080250320F1802104A08 Value should be 1, is %v\n", p.GetValue())
c.Root.Debug()
}
fmt.Println()
fmt.Println()
// fmt.Println()
// fmt.Println()
c2 := NewContextD16()
c2.DEBUG = true
RParse(DAY_2021_16_DATA, c2)
// fmt.Printf("DATA Value is %v\n", p2.GetValue())
c2.Root.Debug()
v := c2.Root.GetValue()
fmt.Printf("DATA Value is %v\n", v)
}
// rename and uncomment this to the year and day in question once complete for a gold star}
// func (app *Application) Y20XXDXXP1Render() {
// }
// rename and uncomment this to the year and day in question once complete for a gold star!
// func (app *Application) Y20XXDXXP2Render() {
// }
// this is what we will reflect and call - so both parts with run. It's up to you to make it print nicely etc.
// The app reference has a CLI for logging.
// func (app *Application) Y2021D16() {
// app.Y2021D16P1()
// app.Y2021D16P2()
// } | app/aoc2021/aoc2021_16.go | 0.724091 | 0.734667 | aoc2021_16.go | starcoder |
import (
"github.com/kaitai-io/kaitai_struct_go_runtime/kaitai"
"io"
)
/**
* A structured binary format native to Minecraft for saving game data and transferring
* it over the network (in multiplayer), such as player data
* ([`<player>.dat`](https://minecraft.gamepedia.com/Player.dat_format); contains
* e.g. player's inventory and location), saved worlds
* ([`level.dat`](
* https://minecraft.gamepedia.com/Java_Edition_level_format#level.dat_format
* ) and [Chunk format](https://minecraft.gamepedia.com/Chunk_format#NBT_structure)),
* list of saved multiplayer servers
* ([`servers.dat`](https://minecraft.gamepedia.com/Servers.dat_format)) and so on -
* see <https://minecraft.gamepedia.com/NBT_format#Uses>.
*
* The entire file should be _gzip_-compressed (in accordance with the original
* specification [NBT.txt](
* https://web.archive.org/web/20110723210920/https://www.minecraft.net/docs/NBT.txt
* ) by Notch), but can also be compressed with _zlib_ or uncompressed.
*
* This spec can only handle uncompressed NBT data, so be sure to first detect
* what type of data you are dealing with. You can use the Unix `file` command
* to do this (`file-5.20` or later is required; older versions do not recognize
* _zlib_-compressed data and return `application/octet-stream` instead):
*
* ```shell
* file --brief --mime-type input-unknown.nbt
* ```
*
* If it says:
*
* * `application/x-gzip` or `application/gzip` (since `file-5.37`), you can decompress it by
* * `gunzip -c input-gzip.nbt > output.nbt` or
* * `python3 -c "import sys, gzip; sys.stdout.buffer.write(
* gzip.decompress(sys.stdin.buffer.read()) )" < input-gzip.nbt > output.nbt`
* * `application/zlib`, you can use
* * `openssl zlib -d -in input-zlib.nbt -out output.nbt` (does not work on most systems)
* * `python3 -c "import sys, zlib; sys.stdout.buffer.write(
* zlib.decompress(sys.stdin.buffer.read()) )" < input-zlib.nbt > output.nbt`
* * something else (especially `image/x-pcx` and `application/octet-stream`),
* it is most likely already uncompressed.
*
* The file `output.nbt` generated by one of the above commands can already be
* processed with this Kaitai Struct specification.
*
* This spec **only** implements the Java edition format. There is also
* a [Bedrock edition](https://wiki.vg/NBT#Bedrock_edition) NBT format,
* which uses little-endian encoding and has a few other differences, but it isn't
* as popular as the Java edition format.
*
* **Implementation note:** strings in `TAG_String` are incorrectly decoded with
* standard UTF-8, while they are encoded in [**Modified UTF-8**](
* https://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html#modified-utf-8
* ) (MUTF-8). That's because MUTF-8 is not supported natively by most target
* languages, and thus one must use external libraries to achieve a fully-compliant
* decoder. But decoding in standard UTF-8 is still better than nothing, and
* it usually works fine.
*
* All Unicode code points with incompatible representations in MUTF-8 and UTF-8 are
* U+0000 (_NUL_), U+D800-U+DFFF (_High_ and _Low Surrogates_) and U+10000-U+10FFFF
* (all _Supplementary_ Planes; includes e.g. emoticons, pictograms).
* A _MUTF-8_-encoded string containing these code points cannot be successfully
* decoded as UTF-8. The behavior in this case depends on the target language -
* usually an exception is thrown, or the bytes that are not valid UTF-8
* are replaced or ignored.
*
* **Sample files:**
*
* * <https://wiki.vg/NBT#Download>
* * <https://github.com/twoolie/NBT/blob/f9e892e/tests/world_test/data/scoreboard.dat>
* * <https://github.com/chmod222/cNBT/tree/3f74b69/testdata>
* * <https://github.com/PistonDevelopers/hematite_nbt/tree/0b85f89/tests>
* @see <a href="https://wiki.vg/NBT">Source</a>
* @see <a href="https://web.archive.org/web/20110723210920/https://www.minecraft.net/docs/NBT.txt">Source</a>
* @see <a href="https://minecraft.gamepedia.com/NBT_format">Source</a>
*/
type MinecraftNbt_Tag int
const (
MinecraftNbt_Tag__End MinecraftNbt_Tag = 0
MinecraftNbt_Tag__Byte MinecraftNbt_Tag = 1
MinecraftNbt_Tag__Short MinecraftNbt_Tag = 2
MinecraftNbt_Tag__Int MinecraftNbt_Tag = 3
MinecraftNbt_Tag__Long MinecraftNbt_Tag = 4
MinecraftNbt_Tag__Float MinecraftNbt_Tag = 5
MinecraftNbt_Tag__Double MinecraftNbt_Tag = 6
MinecraftNbt_Tag__ByteArray MinecraftNbt_Tag = 7
MinecraftNbt_Tag__String MinecraftNbt_Tag = 8
MinecraftNbt_Tag__List MinecraftNbt_Tag = 9
MinecraftNbt_Tag__Compound MinecraftNbt_Tag = 10
MinecraftNbt_Tag__IntArray MinecraftNbt_Tag = 11
MinecraftNbt_Tag__LongArray MinecraftNbt_Tag = 12
)
type MinecraftNbt struct {
RootCheck []byte
Root *MinecraftNbt_NamedTag
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
_f_rootType bool
rootType MinecraftNbt_Tag
}
func NewMinecraftNbt() *MinecraftNbt {
return &MinecraftNbt{
}
}
func (this *MinecraftNbt) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp1, err := this.RootType()
if err != nil {
return err
}
if ( ((tmp1 == MinecraftNbt_Tag__End) && (false)) ) {
tmp2, err := this._io.ReadBytes(int(0))
if err != nil {
return err
}
tmp2 = tmp2
this.RootCheck = tmp2
}
tmp3 := NewMinecraftNbt_NamedTag()
err = tmp3.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Root = tmp3
return err
}
func (this *MinecraftNbt) RootType() (v MinecraftNbt_Tag, err error) {
if (this._f_rootType) {
return this.rootType, nil
}
_pos, err := this._io.Pos()
if err != nil {
return nil, err
}
_, err = this._io.Seek(int64(0), io.SeekStart)
if err != nil {
return nil, err
}
tmp4, err := this._io.ReadU1()
if err != nil {
return nil, err
}
this.rootType = MinecraftNbt_Tag(tmp4)
_, err = this._io.Seek(_pos, io.SeekStart)
if err != nil {
return nil, err
}
this._f_rootType = true
tmp5, err := this.RootType()
if err != nil {
return nil, err
}
tmp6, err := this.RootType()
if err != nil {
return nil, err
}
if !(tmp6 == MinecraftNbt_Tag__Compound) {
return nil, kaitai.NewValidationNotEqualError(MinecraftNbt_Tag__Compound, tmp5, this._io, "/instances/root_type")
}
this._f_rootType = true
return this.rootType, nil
}
type MinecraftNbt_TagLongArray struct {
NumTags int32
Tags []int64
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
_f_tagsType bool
tagsType MinecraftNbt_Tag
}
func NewMinecraftNbt_TagLongArray() *MinecraftNbt_TagLongArray {
return &MinecraftNbt_TagLongArray{
}
}
func (this *MinecraftNbt_TagLongArray) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp7, err := this._io.ReadS4be()
if err != nil {
return err
}
this.NumTags = int32(tmp7)
this.Tags = make([]int64, this.NumTags)
for i := range this.Tags {
tmp8, err := this._io.ReadS8be()
if err != nil {
return err
}
this.Tags[i] = tmp8
}
return err
}
func (this *MinecraftNbt_TagLongArray) TagsType() (v MinecraftNbt_Tag, err error) {
if (this._f_tagsType) {
return this.tagsType, nil
}
this.tagsType = MinecraftNbt_Tag(MinecraftNbt_Tag__Long)
this._f_tagsType = true
return this.tagsType, nil
}
type MinecraftNbt_TagByteArray struct {
LenData int32
Data []byte
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
}
func NewMinecraftNbt_TagByteArray() *MinecraftNbt_TagByteArray {
return &MinecraftNbt_TagByteArray{
}
}
func (this *MinecraftNbt_TagByteArray) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp9, err := this._io.ReadS4be()
if err != nil {
return err
}
this.LenData = int32(tmp9)
tmp10, err := this._io.ReadBytes(int(this.LenData))
if err != nil {
return err
}
tmp10 = tmp10
this.Data = tmp10
return err
}
type MinecraftNbt_TagIntArray struct {
NumTags int32
Tags []int32
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
_f_tagsType bool
tagsType MinecraftNbt_Tag
}
func NewMinecraftNbt_TagIntArray() *MinecraftNbt_TagIntArray {
return &MinecraftNbt_TagIntArray{
}
}
func (this *MinecraftNbt_TagIntArray) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp11, err := this._io.ReadS4be()
if err != nil {
return err
}
this.NumTags = int32(tmp11)
this.Tags = make([]int32, this.NumTags)
for i := range this.Tags {
tmp12, err := this._io.ReadS4be()
if err != nil {
return err
}
this.Tags[i] = tmp12
}
return err
}
func (this *MinecraftNbt_TagIntArray) TagsType() (v MinecraftNbt_Tag, err error) {
if (this._f_tagsType) {
return this.tagsType, nil
}
this.tagsType = MinecraftNbt_Tag(MinecraftNbt_Tag__Int)
this._f_tagsType = true
return this.tagsType, nil
}
type MinecraftNbt_TagList struct {
TagsType MinecraftNbt_Tag
NumTags int32
Tags []interface{}
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
}
func NewMinecraftNbt_TagList() *MinecraftNbt_TagList {
return &MinecraftNbt_TagList{
}
}
func (this *MinecraftNbt_TagList) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp13, err := this._io.ReadU1()
if err != nil {
return err
}
this.TagsType = MinecraftNbt_Tag(tmp13)
tmp14, err := this._io.ReadS4be()
if err != nil {
return err
}
this.NumTags = int32(tmp14)
this.Tags = make([]interface{}, this.NumTags)
for i := range this.Tags {
switch (this.TagsType) {
case MinecraftNbt_Tag__LongArray:
tmp15 := NewMinecraftNbt_TagLongArray()
err = tmp15.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Tags[i] = tmp15
case MinecraftNbt_Tag__Compound:
tmp16 := NewMinecraftNbt_TagCompound()
err = tmp16.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Tags[i] = tmp16
case MinecraftNbt_Tag__Double:
tmp17, err := this._io.ReadF8be()
if err != nil {
return err
}
this.Tags[i] = tmp17
case MinecraftNbt_Tag__List:
tmp18 := NewMinecraftNbt_TagList()
err = tmp18.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Tags[i] = tmp18
case MinecraftNbt_Tag__Float:
tmp19, err := this._io.ReadF4be()
if err != nil {
return err
}
this.Tags[i] = tmp19
case MinecraftNbt_Tag__Short:
tmp20, err := this._io.ReadS2be()
if err != nil {
return err
}
this.Tags[i] = tmp20
case MinecraftNbt_Tag__Int:
tmp21, err := this._io.ReadS4be()
if err != nil {
return err
}
this.Tags[i] = tmp21
case MinecraftNbt_Tag__ByteArray:
tmp22 := NewMinecraftNbt_TagByteArray()
err = tmp22.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Tags[i] = tmp22
case MinecraftNbt_Tag__Byte:
tmp23, err := this._io.ReadS1()
if err != nil {
return err
}
this.Tags[i] = tmp23
case MinecraftNbt_Tag__IntArray:
tmp24 := NewMinecraftNbt_TagIntArray()
err = tmp24.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Tags[i] = tmp24
case MinecraftNbt_Tag__String:
tmp25 := NewMinecraftNbt_TagString()
err = tmp25.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Tags[i] = tmp25
case MinecraftNbt_Tag__Long:
tmp26, err := this._io.ReadS8be()
if err != nil {
return err
}
this.Tags[i] = tmp26
}
}
return err
}
type MinecraftNbt_TagString struct {
LenData uint16
Data string
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
}
func NewMinecraftNbt_TagString() *MinecraftNbt_TagString {
return &MinecraftNbt_TagString{
}
}
func (this *MinecraftNbt_TagString) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp27, err := this._io.ReadU2be()
if err != nil {
return err
}
this.LenData = uint16(tmp27)
tmp28, err := this._io.ReadBytes(int(this.LenData))
if err != nil {
return err
}
tmp28 = tmp28
this.Data = string(tmp28)
return err
}
/**
* unsigned according to https://wiki.vg/NBT#Specification
*/
type MinecraftNbt_TagCompound struct {
Tags []*MinecraftNbt_NamedTag
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
_f_dumpNumTags bool
dumpNumTags int
}
func NewMinecraftNbt_TagCompound() *MinecraftNbt_TagCompound {
return &MinecraftNbt_TagCompound{
}
}
func (this *MinecraftNbt_TagCompound) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
for i := 1;; i++ {
tmp29 := NewMinecraftNbt_NamedTag()
err = tmp29.Read(this._io, this, this._root)
if err != nil {
return err
}
_it := tmp29
this.Tags = append(this.Tags, _it)
tmp30, err := _it.IsTagEnd()
if err != nil {
return err
}
if tmp30 {
break
}
}
return err
}
func (this *MinecraftNbt_TagCompound) DumpNumTags() (v int, err error) {
if (this._f_dumpNumTags) {
return this.dumpNumTags, nil
}
var tmp31 int8;
tmp32 := this.Tags
tmp33, err := tmp32[len(tmp32) - 1].IsTagEnd()
if err != nil {
return 0, err
}
if ( ((len(this.Tags) >= 1) && (tmp33)) ) {
tmp31 = 1
} else {
tmp31 = 0
}
this.dumpNumTags = int((len(this.Tags) - tmp31))
this._f_dumpNumTags = true
return this.dumpNumTags, nil
}
type MinecraftNbt_NamedTag struct {
Type MinecraftNbt_Tag
Name *MinecraftNbt_TagString
Payload interface{}
_io *kaitai.Stream
_root *MinecraftNbt
_parent interface{}
_f_isTagEnd bool
isTagEnd bool
}
func NewMinecraftNbt_NamedTag() *MinecraftNbt_NamedTag {
return &MinecraftNbt_NamedTag{
}
}
func (this *MinecraftNbt_NamedTag) Read(io *kaitai.Stream, parent interface{}, root *MinecraftNbt) (err error) {
this._io = io
this._parent = parent
this._root = root
tmp34, err := this._io.ReadU1()
if err != nil {
return err
}
this.Type = MinecraftNbt_Tag(tmp34)
tmp35, err := this.IsTagEnd()
if err != nil {
return err
}
if (!(tmp35)) {
tmp36 := NewMinecraftNbt_TagString()
err = tmp36.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Name = tmp36
}
tmp37, err := this.IsTagEnd()
if err != nil {
return err
}
if (!(tmp37)) {
switch (this.Type) {
case MinecraftNbt_Tag__LongArray:
tmp38 := NewMinecraftNbt_TagLongArray()
err = tmp38.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Payload = tmp38
case MinecraftNbt_Tag__Compound:
tmp39 := NewMinecraftNbt_TagCompound()
err = tmp39.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Payload = tmp39
case MinecraftNbt_Tag__Double:
tmp40, err := this._io.ReadF8be()
if err != nil {
return err
}
this.Payload = tmp40
case MinecraftNbt_Tag__List:
tmp41 := NewMinecraftNbt_TagList()
err = tmp41.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Payload = tmp41
case MinecraftNbt_Tag__Float:
tmp42, err := this._io.ReadF4be()
if err != nil {
return err
}
this.Payload = tmp42
case MinecraftNbt_Tag__Short:
tmp43, err := this._io.ReadS2be()
if err != nil {
return err
}
this.Payload = tmp43
case MinecraftNbt_Tag__Int:
tmp44, err := this._io.ReadS4be()
if err != nil {
return err
}
this.Payload = tmp44
case MinecraftNbt_Tag__ByteArray:
tmp45 := NewMinecraftNbt_TagByteArray()
err = tmp45.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Payload = tmp45
case MinecraftNbt_Tag__Byte:
tmp46, err := this._io.ReadS1()
if err != nil {
return err
}
this.Payload = tmp46
case MinecraftNbt_Tag__IntArray:
tmp47 := NewMinecraftNbt_TagIntArray()
err = tmp47.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Payload = tmp47
case MinecraftNbt_Tag__String:
tmp48 := NewMinecraftNbt_TagString()
err = tmp48.Read(this._io, this, this._root)
if err != nil {
return err
}
this.Payload = tmp48
case MinecraftNbt_Tag__Long:
tmp49, err := this._io.ReadS8be()
if err != nil {
return err
}
this.Payload = tmp49
}
}
return err
}
func (this *MinecraftNbt_NamedTag) IsTagEnd() (v bool, err error) {
if (this._f_isTagEnd) {
return this.isTagEnd, nil
}
this.isTagEnd = bool(this.Type == MinecraftNbt_Tag__End)
this._f_isTagEnd = true
return this.isTagEnd, nil
} | minecraft_nbt/src/go/minecraft_nbt.go | 0.695338 | 0.409398 | minecraft_nbt.go | starcoder |
package isc
type ISCListToMap[T any, R any] struct {
ISCList[T]
}
func ListToMapFrom[T any, R any](list ISCList[T]) ISCListToMap[T, R] {
return ISCListToMap[T, R]{
list,
}
}
func (l ISCListToMap[T, R]) FlatMap(f func(T) []R) ISCList[R] {
return ListFlatMap(l.ISCList, f)
}
func (l ISCListToMap[T, R]) FlatMapIndexed(f func(int, T) []R) ISCList[R] {
return ListFlatMapIndexed(l.ISCList, f)
}
func (l ISCListToMap[T, R]) FlatMapTo(dest *[]R, f func(T) []R) ISCList[R] {
return ListFlatMapTo(l.ISCList, dest, f)
}
func (l ISCListToMap[T, R]) FlatMapIndexedTo(dest *[]R, f func(int, T) []R) ISCList[R] {
return ListFlatMapIndexedTo(l.ISCList, dest, f)
}
func (l ISCListToMap[T, R]) Map(f func(T) R) ISCList[R] {
return ListMap(l.ISCList, f)
}
func (l ISCListToMap[T, R]) MapIndexed(f func(int, T) R) ISCList[R] {
return ListMapIndexed(l.ISCList, f)
}
func (l ISCListToMap[T, R]) MapTo(dest *[]R, f func(T) R) ISCList[R] {
return ListMapTo(l.ISCList, dest, f)
}
func (l ISCListToMap[T, R]) MapIndexedTo(dest *[]R, f func(int, T) R) ISCList[R] {
return ListMapIndexedTo(l.ISCList, dest, f)
}
func (l ISCListToMap[T, R]) Reduce(init func(T) R, f func(R, T) R) R {
return Reduce(l.ISCList, init, f)
}
func (l ISCListToMap[T, R]) ReduceIndexed(init func(int, T) R, f func(int, R, T) R) R {
return ReduceIndexed(l.ISCList, init, f)
}
type ISCListToSlice[T any, R comparable] struct {
ISCList[T]
}
func ListToSliceFrom[T any, R comparable](list ISCList[T]) ISCListToSlice[T, R] {
return ISCListToSlice[T, R]{
list,
}
}
func (l ISCListToSlice[T, R]) SliceContains(predicate func(T) R, key R) bool {
return SliceContains(l.ISCList, predicate, key)
}
func (l ISCListToSlice[T, R]) SliceTo(valueTransform func(T) R) ISCMap[R, T] {
return SliceTo(l.ISCList, valueTransform)
}
type ISCListToTriple[T comparable, K comparable, V any] struct {
ISCList[T]
}
func ListToTripleFrom[T comparable, K comparable, V any](list ISCList[T]) ISCListToTriple[T, K, V] {
return ISCListToTriple[T, K, V]{
list,
}
}
func (l ISCListToTriple[T, K, V]) GroupBy(f func(T) K) map[K][]T {
return GroupBy(l.ISCList, f)
}
func (l ISCListToTriple[T, K, V]) GroupByTransform(f func(T) K, trans func(T) V) map[K][]V {
return GroupByTransform(l.ISCList, f, trans)
}
func (l ISCListToTriple[T, K, V]) GroupByTo(dest *map[K][]T, f func(T) K) map[K][]T {
return GroupByTo(l.ISCList, dest, f)
}
func (l ISCListToTriple[T, K, V]) GroupByTransformTo(dest *map[K][]V, f func(T) K, trans func(T) V) map[K][]V {
return GroupByTransformTo(l.ISCList, dest, f, trans)
}
func (l ISCListToTriple[T, K, V]) Associate(transform func(T) Pair[K, V]) ISCMap[K, V] {
return Associate(l.ISCList, transform)
}
func (l ISCListToTriple[T, K, V]) AssociateTo(destination *map[K]V, transform func(T) Pair[K, V]) ISCMap[K, V] {
return AssociateTo(l.ISCList, destination, transform)
}
func (l ISCListToTriple[T, K, V]) AssociateBy(keySelector func(T) K) ISCMap[K, T] {
return AssociateBy(l.ISCList, keySelector)
}
func (l ISCListToTriple[T, K, V]) AssociateByAndValue(keySelector func(T) K, valueTransform func(T) V) ISCMap[K, V] {
return AssociateByAndValue(l.ISCList, keySelector, valueTransform)
}
func (l ISCListToTriple[T, K, V]) AssociateByTo(destination *map[K]T, keySelector func(T) K) ISCMap[K, T] {
return AssociateByTo(l.ISCList, destination, keySelector)
}
func (l ISCListToTriple[T, K, V]) AssociateByAndValueTo(destination *map[K]V, keySelector func(T) K, valueTransform func(T) V) ISCMap[K, V] {
return AssociateByAndValueTo(l.ISCList, destination, keySelector, valueTransform)
}
func (l ISCListToTriple[T, K, V]) AssociateWith(valueSelector func(T) V) ISCMap[T, V] {
return AssociateWith(l.ISCList, valueSelector)
}
func (l ISCListToTriple[T, K, V]) AssociateWithTo(destination *map[T]V, valueSelector func(T) V) ISCMap[T, V] {
return AssociateWithTo(l.ISCList, destination, valueSelector)
}
type ISCListToPair[K comparable, V comparable] struct {
ISCList[Pair[K, V]]
}
func ListToPairFrom[K comparable, V comparable](list ISCList[Pair[K, V]]) ISCListToPair[K, V] {
return ISCListToPair[K, V]{
list,
}
}
func ListToPairWithPairs[K comparable, V comparable](list ...Pair[K, V]) ISCListToPair[K, V] {
return ISCListToPair[K, V]{
list,
}
}
func (l ISCListToPair[K, V]) ToMap() ISCMap[K, V] {
m := make(map[K]V)
for _, item := range l.ISCList {
m[item.First] = item.Second
}
return NewMapWithMap(m)
} | isc/listobj_ext.go | 0.517815 | 0.47244 | listobj_ext.go | starcoder |
package query
import (
"bytes"
"encoding/gob"
"fmt"
"github.com/goradd/goradd/pkg/datetime"
"log"
"reflect"
"strings"
"time"
)
// ValueNode represents a value for a built-in type that is to be used in a query.
type ValueNode struct {
value interface{}
}
// Shortcut for converting a constant value to a node
func Value(i interface{}) NodeI {
return NewValueNode(i)
}
// NewValueNode returns a new ValueNode that wraps the given value.
func NewValueNode(i interface{}) NodeI {
n := &ValueNode{
value: i,
}
switch v := i.(type) {
// do nothings
case string:
case int:
case uint:
case uint64:
case int64:
case float64:
case float32:
case time.Time:
// casts
case []byte:
n.value = string(v[:])
case datetime.DateTime:
n.value = v.GoTime()
case nil:
panic("You cannot use nil as an operator. If you are testing for a NULL, use the IsNull function.")
default:
// Use reflection to do various conversions
typ := reflect.TypeOf(v)
k := typ.Kind()
val := reflect.ValueOf(v)
switch k {
case reflect.Int:
fallthrough
case reflect.Int8:
fallthrough
case reflect.Int16:
fallthrough
case reflect.Int32:
fallthrough
case reflect.Int64:
n.value = int(val.Int())
case reflect.Uint:
fallthrough
case reflect.Uint8:
fallthrough
case reflect.Uint16:
fallthrough
case reflect.Uint32:
fallthrough
case reflect.Uint64:
n.value = uint(val.Uint())
case reflect.Bool:
n.value = val.Bool()
case reflect.Float32:
// converting float32 to float64 might cause problems in the final sql statement, so we leave the type as float32
n.value = float32(val.Float())
case reflect.Float64:
n.value = val.Float()
case reflect.Slice:
fallthrough
case reflect.Array:
var ary []NodeI
for i := 0; i < val.Len(); i++ {
// TODO: Handle NodeI's here too? Prevent more than one level deep?
ary = append(ary, NewValueNode(val.Index(i).Interface()))
}
n.value = ary
case reflect.String:
n.value = val.String()
default:
panic("Can't use this type as a value node.")
}
}
return n
}
func (n *ValueNode) Equals(n2 NodeI) bool {
if cn, ok := n2.(*ValueNode); ok {
if an2, ok := cn.value.([]NodeI); ok {
if an1, ok := n.value.([]NodeI); !ok {
return false
} else if len(an2) != len(an1) {
return false
} else {
for i, n := range an1 {
if !n.Equals(an2[i]) {
return false
}
}
}
return true
}
return cn.value == n.value
}
return false
}
func (n *ValueNode) tableName() string {
return ""
}
func (n *ValueNode) databaseKey() string {
return ""
}
func (n *ValueNode) log(level int) {
tabs := strings.Repeat("\t", level)
log.Print(tabs + "Val: " + fmt.Sprint(n.value))
}
// ValueNodeGetValue is used internally by the framework to get the node's internal value.
func ValueNodeGetValue(n *ValueNode) interface{} {
return n.value
}
func (n *ValueNode) nodeType() NodeType {
return ValueNodeType
}
func (n *ValueNode) GobEncode() (data []byte, err error) {
var buf bytes.Buffer
e := gob.NewEncoder(&buf)
if err = e.Encode(&n.value); err != nil {
panic(err)
}
data = buf.Bytes()
return
}
func (n *ValueNode) GobDecode(data []byte) (err error) {
buf := bytes.NewBuffer(data)
dec := gob.NewDecoder(buf)
if err = dec.Decode(&n.value); err != nil {
panic(err)
}
return
}
func init() {
gob.Register(&ValueNode{})
} | pkg/orm/query/valueNode.go | 0.593138 | 0.416797 | valueNode.go | starcoder |
package datasheet
import (
"fmt"
"io"
)
// ActionStore stores all of the Action data. Note that querying actions
// directly on the map will result in an empty Omen field. You should use
// GetAction in order to have an action with a correctly populated field.
type ActionStore struct {
Actions map[uint32]Action
Omens map[uint16]Omen
CraftActions map[uint32]CraftAction
}
// Action stores the data for a game Action
type Action struct {
Key uint32 `datasheet:"key"`
Name string `datasheet:"Name"`
Range int8 `datasheet:"Range"`
TargetArea bool `datasheet:"TargetArea"`
CastType byte `datasheet:"CastType"`
EffectRange byte `datasheet:"EffectRange"`
XAxisModifier byte `datasheet:"XAxisModifier"`
OmenID uint16 `datasheet:"Omen"`
Omen string
}
// Omen stores the data for a game action Omen
type Omen struct {
Key uint16 `datasheet:"key"`
Name string `datasheet:"Path"`
}
// CraftAction stores the data for a game crafting Action
type CraftAction struct {
Key uint32 `datasheet:"key"`
Name string `datasheet:"Name"`
}
// PopulateActions will populate the ActionStore with Action data provided a
// path to the data sheet for Actions.
func (a *ActionStore) PopulateActions(dataReader io.Reader) error {
a.Actions = make(map[uint32]Action)
var rows []Action
err := UnmarshalReader(dataReader, &rows)
if err != nil {
return fmt.Errorf("PopulateActions: %s", err)
}
for _, action := range rows {
a.Actions[action.Key] = action
}
return nil
}
// PopulateOmens will populate the ActionStore with Omen data provided a
// path to the data sheet for Omens
func (a *ActionStore) PopulateOmens(dataReader io.Reader) error {
a.Omens = make(map[uint16]Omen)
var rows []Omen
err := UnmarshalReader(dataReader, &rows)
if err != nil {
return fmt.Errorf("PopulateOmens: %s", err)
}
for _, omen := range rows {
a.Omens[omen.Key] = omen
}
return nil
}
// PopulateCraftActions will populate the ActionStore with CraftAction data
// provided a path to the data sheet for CraftActions
func (a *ActionStore) PopulateCraftActions(dataReader io.Reader) error {
a.CraftActions = make(map[uint32]CraftAction)
var rows []CraftAction
err := UnmarshalReader(dataReader, &rows)
if err != nil {
return fmt.Errorf("PopulateCraftActions: %s", err)
}
for _, craftAction := range rows {
a.CraftActions[craftAction.Key] = craftAction
}
return nil
}
// GetAction returns the Action associated with the action key. It will
// also populate the Omen field on the action.
// If the action is not found in the standard Action store, it will attempt
// to return an action from the CraftAction store.
func (a *ActionStore) GetAction(key uint32) Action {
if action, found := a.Actions[key]; found {
if omen, found := a.Omens[action.OmenID]; found {
action.Omen = omen.Name
}
return action
}
if craftAction, found := a.CraftActions[key]; found {
return Action{
Key: craftAction.Key,
Name: craftAction.Name,
}
}
return Action{}
} | core/datasheet/action.go | 0.607314 | 0.420778 | action.go | starcoder |
package geom
import (
"math/rand"
"github.com/paulmach/orb"
"github.com/paulmach/orb/clip"
"github.com/paulmach/orb/geojson"
"github.com/paulmach/orb/planar"
"github.com/paulmach/orb/quadtree"
)
// CentroidPoint is used to manage generating a quadtree while referencing a GeoJSON Feature
// Based on example https://github.com/paulmach/orb/blob/master/geojson/example_pointer_test.go
type CentroidPoint struct {
*geojson.Feature
}
// Point returns an orb.Point object from a CentroidPoint struct
func (cp CentroidPoint) Point() orb.Point {
c, _ := planar.CentroidArea(cp.Feature.Geometry)
return c
}
// FeatureCollectionBound returns the bounds of a FeatureCollection
func FeatureCollectionBound(fc *geojson.FeatureCollection) orb.Bound {
bound := fc.Features[0].Geometry.Bound()
for _, feat := range fc.Features[1:] {
bound = bound.Union(feat.Geometry.Bound())
}
return bound
}
// IntersectingFeatures returns all features intersecting a given feature in a quadtree
func IntersectingFeatures(qt *quadtree.Quadtree, feat *geojson.Feature) []*geojson.Feature {
var overlap []*geojson.Feature
for _, featPtr := range qt.InBound(nil, feat.Geometry.Bound()) {
overlapFeat := featPtr.(CentroidPoint).Feature
if GeometriesIntersect(feat.Geometry, overlapFeat.Geometry) || GeometriesIntersect(overlapFeat.Geometry, feat.Geometry) {
overlap = append(overlap, overlapFeat)
}
}
return overlap
}
// GeometriesIntersect checks whether two geometries intersect each other
func GeometriesIntersect(geom orb.Geometry, intersectGeom orb.Geometry) bool {
var polyRange []orb.Polygon
switch g := intersectGeom.(type) {
case orb.Polygon:
polyRange = []orb.Polygon{g}
case orb.MultiPolygon:
polyRange = g
}
for _, polygon := range polyRange {
for _, ring := range polygon {
for _, point := range ring {
switch gt := geom.(type) {
case orb.Polygon:
if planar.PolygonContains(gt, point) {
return true
}
case orb.MultiPolygon:
if planar.MultiPolygonContains(gt, point) {
return true
}
}
}
}
}
return false
}
// OverlapArea returns the area of a geometry that overlaps the Bound of another
func OverlapArea(sourceGeom orb.Geometry, overlapGeom orb.Geometry) float64 {
return planar.Area(clip.Geometry(sourceGeom.Bound(), orb.Clone(overlapGeom)))
}
// RandomPointInGeom generates a random point within a given geometry
func RandomPointInGeom(geom orb.Geometry) orb.Point {
for i := 0; i < 1000; i++ {
bounds := geom.Bound()
lon := bounds.Min[0] + rand.Float64()*(bounds.Max[0]-bounds.Min[0])
lat := bounds.Min[1] + rand.Float64()*(bounds.Max[1]-bounds.Min[1])
point := orb.Point{lon, lat}
switch g := geom.(type) {
case orb.Polygon:
if planar.PolygonContains(g, point) {
return point
}
case orb.MultiPolygon:
if planar.MultiPolygonContains(g, point) {
return point
}
}
}
return orb.Point{}
} | pkg/geom/geom.go | 0.869382 | 0.537891 | geom.go | starcoder |
package ink
import (
"fmt"
"strings"
)
// Node represents an abstract syntax tree (AST) node in an Ink program.
type Node interface {
String() string
Position() position
Eval(*StackFrame, bool) (Value, error)
}
// a string representation of the Position of a given node,
// appropriate for an error message
func poss(n Node) string {
return n.Position().String()
}
type UnaryExprNode struct {
operator Kind
operand Node
position
}
func (n UnaryExprNode) String() string {
return fmt.Sprintf("Unary %s (%s)", n.operator, n.operand)
}
func (n UnaryExprNode) Position() position {
return n.position
}
type BinaryExprNode struct {
operator Kind
leftOperand Node
rightOperand Node
position
}
func (n BinaryExprNode) String() string {
return fmt.Sprintf("Binary (%s) %s (%s)", n.leftOperand, n.operator, n.rightOperand)
}
func (n BinaryExprNode) Position() position {
return n.position
}
type FunctionCallNode struct {
function Node
arguments []Node
}
func (n FunctionCallNode) String() string {
args := make([]string, len(n.arguments))
for i, a := range n.arguments {
args[i] = a.String()
}
return fmt.Sprintf("Call (%s) on (%s)",
n.function,
strings.Join(args, ", "))
}
func (n FunctionCallNode) Position() position {
return n.function.Position()
}
type MatchClauseNode struct {
target Node
expression Node
}
func (n MatchClauseNode) String() string {
return fmt.Sprintf("Clause (%s) -> (%s)", n.target, n.expression)
}
func (n MatchClauseNode) Position() position {
return n.target.Position()
}
type MatchExprNode struct {
condition Node
clauses []MatchClauseNode
position
}
func (n MatchExprNode) String() string {
clauses := make([]string, len(n.clauses))
for i, c := range n.clauses {
clauses[i] = c.String()
}
return fmt.Sprintf("Match on (%s) to {%s}", n.condition, clauses)
}
func (n MatchExprNode) Position() position {
return n.position
}
type ExpressionListNode struct {
expressions []Node
position
}
func (n ExpressionListNode) String() string {
exprs := make([]string, len(n.expressions))
for i, expr := range n.expressions {
exprs[i] = expr.String()
}
return fmt.Sprintf("Expression List (%s)", strings.Join(exprs, ", "))
}
func (n ExpressionListNode) Position() position {
return n.position
}
type EmptyIdentifierNode struct {
position
}
func (n EmptyIdentifierNode) String() string {
return "Empty Identifier"
}
func (n EmptyIdentifierNode) Position() position {
return n.position
}
type IdentifierNode struct {
val string
position
}
func (n IdentifierNode) String() string {
return fmt.Sprintf("Identifier '%s'", n.val)
}
func (n IdentifierNode) Position() position {
return n.position
}
type NumberLiteralNode struct {
val float64
position
}
func (n NumberLiteralNode) String() string {
return fmt.Sprintf("Number %s", nToS(n.val))
}
func (n NumberLiteralNode) Position() position {
return n.position
}
type StringLiteralNode struct {
val string
position
}
func (n StringLiteralNode) String() string {
return fmt.Sprintf("String '%s'", n.val)
}
func (n StringLiteralNode) Position() position {
return n.position
}
type BooleanLiteralNode struct {
val bool
position
}
func (n BooleanLiteralNode) String() string {
return fmt.Sprintf("Boolean %t", n.val)
}
func (n BooleanLiteralNode) Position() position {
return n.position
}
type ObjectLiteralNode struct {
entries []ObjectEntryNode
position
}
func (n ObjectLiteralNode) String() string {
entries := make([]string, len(n.entries))
for i, e := range n.entries {
entries[i] = e.String()
}
return fmt.Sprintf("Object {%s}",
strings.Join(entries, ", "))
}
func (n ObjectLiteralNode) Position() position {
return n.position
}
type ObjectEntryNode struct {
key Node
val Node
position
}
func (n ObjectEntryNode) String() string {
return fmt.Sprintf("Object Entry (%s): (%s)", n.key, n.val)
}
type ListLiteralNode struct {
vals []Node
position
}
func (n ListLiteralNode) String() string {
vals := make([]string, len(n.vals))
for i, v := range n.vals {
vals[i] = v.String()
}
return fmt.Sprintf("List [%s]", strings.Join(vals, ", "))
}
func (n ListLiteralNode) Position() position {
return n.position
}
type FunctionLiteralNode struct {
arguments []Node
body Node
position
}
func (n FunctionLiteralNode) String() string {
args := make([]string, len(n.arguments))
for i, a := range n.arguments {
args[i] = a.String()
}
return fmt.Sprintf("Function (%s) => (%s)", strings.Join(args, ", "), n.body)
}
func (n FunctionLiteralNode) Position() position {
return n.position
}
func guardUnexpectedInputEnd(tokens []Tok, idx int) error {
if idx >= len(tokens) {
if len(tokens) > 0 {
return Err{
ErrSyntax,
fmt.Sprintf("unexpected end of input at %s", tokens[len(tokens)-1]),
}
}
return Err{
ErrSyntax,
fmt.Sprintf("unexpected end of input"),
}
}
return nil
}
// Parse concurrently transforms a stream of Tok (tokens) to Node (AST nodes).
// This implementation uses recursive descent parsing.
func Parse(
tokenStream <-chan Tok,
nodes chan<- Node,
fatalError bool,
debugParser bool,
) {
defer close(nodes)
tokens := make([]Tok, 0)
for tok := range tokenStream {
tokens = append(tokens, tok)
}
idx, length := 0, len(tokens)
for idx < length {
if tokens[idx].kind == Separator {
// this sometimes happens when the repl receives comment inputs
idx++
continue
}
expr, incr, err := parseExpression(tokens[idx:])
idx += incr
if err != nil {
e, isErr := err.(Err)
if isErr {
if fatalError {
LogErr(e.reason, e.message)
} else {
LogSafeErr(e.reason, e.message)
}
} else {
LogErrf(ErrAssert, "err raised that was not of Err type -> %s",
err.Error())
}
return
}
if debugParser {
LogDebug("parse ->", expr.String())
}
nodes <- expr
}
}
func getOpPriority(t Tok) int {
// higher == greater priority
switch t.kind {
case AccessorOp:
return 100
case ModulusOp:
return 80
case MultiplyOp, DivideOp:
return 50
case AddOp, SubtractOp:
return 40
case GreaterThanOp, LessThanOp, EqualOp:
return 30
case LogicalAndOp:
return 20
case LogicalXorOp:
return 15
case LogicalOrOp:
return 10
case DefineOp:
return 0
default:
return -1
}
}
func isBinaryOp(t Tok) bool {
switch t.kind {
case AddOp, SubtractOp, MultiplyOp, DivideOp, ModulusOp,
LogicalAndOp, LogicalOrOp, LogicalXorOp,
GreaterThanOp, LessThanOp, EqualOp, DefineOp, AccessorOp:
return true
default:
return false
}
}
func parseBinaryExpression(
leftOperand Node,
operator Tok,
tokens []Tok,
previousPriority int,
) (Node, int, error) {
rightAtom, idx, err := parseAtom(tokens)
if err != nil {
return nil, 0, err
}
incr := 0
ops := make([]Tok, 1)
nodes := make([]Node, 2)
ops[0] = operator
nodes[0] = leftOperand
nodes[1] = rightAtom
// build up a list of binary operations, with tree nodes
// where there are higher-priority binary ops
for len(tokens) > idx && isBinaryOp(tokens[idx]) {
if previousPriority >= getOpPriority(tokens[idx]) {
// Priority is lower than the calling function's last op,
// so return control to the parent binary op
break
} else if getOpPriority(ops[len(ops)-1]) >= getOpPriority(tokens[idx]) {
// Priority is lower than the previous op (but higher than parent),
// so it's ok to be left-heavy in this tree
ops = append(ops, tokens[idx])
idx++
err := guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
rightAtom, incr, err = parseAtom(tokens[idx:])
if err != nil {
return nil, 0, err
}
nodes = append(nodes, rightAtom)
idx += incr
} else {
err := guardUnexpectedInputEnd(tokens, idx+1)
if err != nil {
return nil, 0, err
}
// Priority is higher than previous ops,
// so make it a right-heavy tree
subtree, incr, err := parseBinaryExpression(
nodes[len(nodes)-1],
tokens[idx],
tokens[idx+1:],
getOpPriority(ops[len(ops)-1]),
)
if err != nil {
return nil, 0, err
}
nodes[len(nodes)-1] = subtree
idx += incr + 1
}
}
// ops, nodes -> left-biased binary expression tree
tree := nodes[0]
nodes = nodes[1:]
for len(ops) > 0 {
tree = BinaryExprNode{
operator: ops[0].kind,
leftOperand: tree,
rightOperand: nodes[0],
position: ops[0].position,
}
ops = ops[1:]
nodes = nodes[1:]
}
return tree, idx, nil
}
func parseExpression(tokens []Tok) (Node, int, error) {
idx := 0
consumeDanglingSeparator := func() {
// bounds check in case parseExpress() called at some point
// consumed end token
if idx < len(tokens) && tokens[idx].kind == Separator {
idx++
}
}
atom, incr, err := parseAtom(tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += incr
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
nextTok := tokens[idx]
idx++
switch nextTok.kind {
case Separator:
// consuming dangling separator
return atom, idx, nil
case RightParen, KeyValueSeparator, CaseArrow:
// these belong to the parent atom that contains this expression,
// so return without consuming token (idx - 1)
return atom, idx - 1, nil
case AddOp, SubtractOp, MultiplyOp, DivideOp, ModulusOp,
LogicalAndOp, LogicalOrOp, LogicalXorOp,
GreaterThanOp, LessThanOp, EqualOp, DefineOp, AccessorOp:
binExpr, incr, err := parseBinaryExpression(atom, nextTok, tokens[idx:], -1)
if err != nil {
return nil, 0, err
}
idx += incr
// Binary expressions are often followed by a match
if idx < len(tokens) && tokens[idx].kind == MatchColon {
colonPos := tokens[idx].position
idx++ // MatchColon
clauses, incr, err := parseMatchBody(tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += incr
consumeDanglingSeparator()
return MatchExprNode{
condition: binExpr,
clauses: clauses,
position: colonPos,
}, idx, nil
}
consumeDanglingSeparator()
return binExpr, idx, nil
case MatchColon:
clauses, incr, err := parseMatchBody(tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += incr
consumeDanglingSeparator()
return MatchExprNode{
condition: atom,
clauses: clauses,
position: nextTok.position,
}, idx, nil
default:
return nil, 0, Err{
ErrSyntax,
fmt.Sprintf("unexpected token %s following an expression", nextTok),
}
}
}
func parseAtom(tokens []Tok) (Node, int, error) {
err := guardUnexpectedInputEnd(tokens, 0)
if err != nil {
return nil, 0, err
}
tok, idx := tokens[0], 1
if tok.kind == NegationOp {
atom, idx, err := parseAtom(tokens[idx:])
if err != nil {
return nil, 0, err
}
return UnaryExprNode{
operator: tok.kind,
operand: atom,
position: tok.position,
}, idx + 1, nil
}
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
var atom Node
switch tok.kind {
case NumberLiteral:
return NumberLiteralNode{tok.num, tok.position}, idx, nil
case StringLiteral:
return StringLiteralNode{tok.str, tok.position}, idx, nil
case TrueLiteral:
return BooleanLiteralNode{true, tok.position}, idx, nil
case FalseLiteral:
return BooleanLiteralNode{false, tok.position}, idx, nil
case Identifier:
if tokens[idx].kind == FunctionArrow {
var err error
atom, idx, err = parseFunctionLiteral(tokens)
if err != nil {
return nil, 0, err
}
// parseAtom should not consume trailing Separators, but
// parseFunctionLiteral does because it ends with expressions.
// so we backtrack one token.
idx--
} else {
atom = IdentifierNode{tok.str, tok.position}
}
// may be called as a function, so flows beyond
// switch block
case EmptyIdentifier:
if tokens[idx].kind == FunctionArrow {
var err error
atom, idx, err = parseFunctionLiteral(tokens)
if err != nil {
return nil, 0, err
}
// parseAtom should not consume trailing Separators, but
// parseFunctionLiteral does because it ends with expressions.
// so we backtrack one token.
return atom, idx - 1, nil
}
return EmptyIdentifierNode{tok.position}, idx, nil
case LeftParen:
// grouped expression or function literal
exprs := make([]Node, 0)
for tokens[idx].kind != RightParen {
expr, incr, err := parseExpression(tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += incr
exprs = append(exprs, expr)
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
}
idx++ // RightParen
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
if tokens[idx].kind == FunctionArrow {
var err error
atom, idx, err = parseFunctionLiteral(tokens)
if err != nil {
return nil, 0, err
}
// parseAtom should not consume trailing Separators, but
// parseFunctionLiteral does because it ends with expressions.
// so we backtrack one token.
idx--
} else {
atom = ExpressionListNode{
expressions: exprs,
position: tok.position,
}
}
// may be called as a function, so flows beyond
// switch block
case LeftBrace:
entries := make([]ObjectEntryNode, 0)
for tokens[idx].kind != RightBrace {
keyExpr, keyIncr, err := parseExpression(tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += keyIncr
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
if tokens[idx].kind == KeyValueSeparator {
idx++
} else {
return nil, 0, Err{
ErrSyntax,
fmt.Sprintf("expected %s after composite key, found %s",
KeyValueSeparator.String(), tokens[idx]),
}
}
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
valExpr, valIncr, err := parseExpression(tokens[idx:])
if err != nil {
return nil, 0, err
}
// Separator consumed by parseExpression
idx += valIncr
entries = append(entries, ObjectEntryNode{
key: keyExpr,
val: valExpr,
position: keyExpr.Position(),
})
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
}
idx++ // RightBrace
return ObjectLiteralNode{
entries: entries,
position: tok.position,
}, idx, nil
case LeftBracket:
vals := make([]Node, 0)
for tokens[idx].kind != RightBracket {
expr, incr, err := parseExpression(tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += incr
vals = append(vals, expr)
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
}
idx++ // RightBracket
return ListLiteralNode{
vals: vals,
position: tok.position,
}, idx, nil
default:
return nil, 0, Err{
ErrSyntax,
fmt.Sprintf("unexpected start of atom, found %s", tok),
}
}
// bounds check here because parseExpression may have
// consumed all tokens before this
for idx < len(tokens) && tokens[idx].kind == LeftParen {
var incr int
var err error
atom, incr, err = parseFunctionCall(atom, tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += incr
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
}
return atom, idx, nil
}
// parses everything that follows MatchColon
// does not consume dangling separator -- that's for parseExpression
func parseMatchBody(tokens []Tok) ([]MatchClauseNode, int, error) {
idx := 1 // LeftBrace
clauses := make([]MatchClauseNode, 0)
err := guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
for tokens[idx].kind != RightBrace {
clauseNode, incr, err := parseMatchClause(tokens[idx:])
if err != nil {
return nil, 0, err
}
idx += incr
clauses = append(clauses, clauseNode)
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return nil, 0, err
}
}
idx++ // RightBrace
return clauses, idx, nil
}
func parseMatchClause(tokens []Tok) (MatchClauseNode, int, error) {
atom, idx, err := parseExpression(tokens)
if err != nil {
return MatchClauseNode{}, 0, err
}
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return MatchClauseNode{}, 0, err
}
if tokens[idx].kind != CaseArrow {
return MatchClauseNode{}, 0, Err{
ErrSyntax,
fmt.Sprintf("expected %s, but got %s", CaseArrow, tokens[idx]),
}
}
idx++ // CaseArrow
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return MatchClauseNode{}, 0, err
}
expr, incr, err := parseExpression(tokens[idx:])
if err != nil {
return MatchClauseNode{}, 0, err
}
idx += incr
return MatchClauseNode{
target: atom,
expression: expr,
}, idx, nil
}
func parseFunctionLiteral(tokens []Tok) (FunctionLiteralNode, int, error) {
tok, idx := tokens[0], 1
arguments := make([]Node, 0)
err := guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return FunctionLiteralNode{}, 0, err
}
switch tok.kind {
case LeftParen:
for {
tk := tokens[idx]
if tk.kind == Identifier {
idNode := IdentifierNode{tk.str, tk.position}
arguments = append(arguments, idNode)
} else if tk.kind == EmptyIdentifier {
idNode := EmptyIdentifierNode{tk.position}
arguments = append(arguments, idNode)
} else {
break
}
idx++
err := guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return FunctionLiteralNode{}, 0, err
}
if tokens[idx].kind != Separator {
return FunctionLiteralNode{}, 0, Err{
ErrSyntax,
fmt.Sprintf("expected arguments in a list separated by %s, found %s",
Separator, tokens[idx]),
}
}
idx++ // Separator
}
err := guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return FunctionLiteralNode{}, 0, err
}
if tokens[idx].kind != RightParen {
return FunctionLiteralNode{}, 0, Err{
ErrSyntax,
fmt.Sprintf("expected arguments list to terminate with %s, found %s",
RightParen, tokens[idx]),
}
}
idx++ // RightParen
case Identifier:
idNode := IdentifierNode{tok.str, tok.position}
arguments = append(arguments, idNode)
case EmptyIdentifier:
idNode := EmptyIdentifierNode{tok.position}
arguments = append(arguments, idNode)
default:
return FunctionLiteralNode{}, 0, Err{
ErrSyntax,
fmt.Sprintf("malformed arguments list in function at %s", tok),
}
}
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return FunctionLiteralNode{}, 0, err
}
if tokens[idx].kind != FunctionArrow {
return FunctionLiteralNode{}, 0, Err{
ErrSyntax,
fmt.Sprintf("expected %s but found %s", FunctionArrow, tokens[idx]),
}
}
idx++ // FunctionArrow
body, incr, err := parseExpression(tokens[idx:])
if err != nil {
return FunctionLiteralNode{}, 0, err
}
idx += incr
return FunctionLiteralNode{
arguments: arguments,
body: body,
position: tokens[0].position,
}, idx, nil
}
func parseFunctionCall(function Node, tokens []Tok) (FunctionCallNode, int, error) {
idx := 1
arguments := make([]Node, 0)
err := guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return FunctionCallNode{}, 0, err
}
for tokens[idx].kind != RightParen {
expr, incr, err := parseExpression(tokens[idx:])
if err != nil {
return FunctionCallNode{}, 0, err
}
idx += incr
arguments = append(arguments, expr)
err = guardUnexpectedInputEnd(tokens, idx)
if err != nil {
return FunctionCallNode{}, 0, err
}
}
idx++ // RightParen
return FunctionCallNode{
function: function,
arguments: arguments,
}, idx, nil
} | pkg/ink/parser.go | 0.763836 | 0.427456 | parser.go | starcoder |
package tuplefunc
import (
"context"
"github.com/rogpeppe/generic/tuple"
)
// WithContextAR returns a function with a context argument that
// calls f without the context and returns its result.
func WithContextAR[A, R any](f func(A) R) func(context.Context, A) R {
return func(ctx context.Context, a A) R {
return f(a)
}
}
// WithContext returns a function with a context argument
// that calls f without the context.
func WithContextA[A any](f func(A)) func(context.Context, A) {
return func(ctx context.Context, a A) {
f(a)
}
}
// WithErrorAR returns an error-returning function that
// calls f and returns a nil error.
func WithErrorAR[A, R any](f func(A) R) func(A) (R, error) {
return func(a A) (R, error) {
return f(a), nil
}
}
// ToA_0_0 returns a single-argument function that calls f.
func ToA_0_0(f func()) func(tuple.T0) {
return func(a tuple.T0) {
f()
}
}
// ToA_2_0 returns a single-argument function that calls f.
func ToA_2_0[A0, A1 any](f func(a0 A0, a1 A1)) func(tuple.T2[A0, A1]) {
return func(a tuple.T2[A0, A1]) {
f(a.T())
}
}
// ToA_3_0 returns a single-argument function that calls f.
func ToA_3_0[A0, A1, A2 any](f func(a0 A0, a1 A1, a2 A2)) func(tuple.T3[A0, A1, A2]) {
return func(a tuple.T3[A0, A1, A2]) {
f(a.T())
}
}
// ToA_4_0 returns a single-argument function that calls f.
func ToA_4_0[A0, A1, A2, A3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3)) func(tuple.T4[A0, A1, A2, A3]) {
return func(a tuple.T4[A0, A1, A2, A3]) {
f(a.T())
}
}
// ToA_5_0 returns a single-argument function that calls f.
func ToA_5_0[A0, A1, A2, A3, A4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4)) func(tuple.T5[A0, A1, A2, A3, A4]) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) {
f(a.T())
}
}
// ToA_6_0 returns a single-argument function that calls f.
func ToA_6_0[A0, A1, A2, A3, A4, A5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) {
f(a.T())
}
}
// ToR_0_0 returns a single-return function that calls f.
func ToR_0_0(f func()) func() tuple.T0 {
return func() tuple.T0 {
f()
return struct{}{}
}
}
// ToR_0_2 returns a single-return function that calls f.
func ToR_0_2[R0, R1 any](f func() (R0, R1)) func() tuple.T2[R0, R1] {
return func() tuple.T2[R0, R1] {
return tuple.MkT2(f())
}
}
// ToR_0_3 returns a single-return function that calls f.
func ToR_0_3[R0, R1, R2 any](f func() (R0, R1, R2)) func() tuple.T3[R0, R1, R2] {
return func() tuple.T3[R0, R1, R2] {
return tuple.MkT3(f())
}
}
// ToR_0_4 returns a single-return function that calls f.
func ToR_0_4[R0, R1, R2, R3 any](f func() (R0, R1, R2, R3)) func() tuple.T4[R0, R1, R2, R3] {
return func() tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f())
}
}
// ToR_0_5 returns a single-return function that calls f.
func ToR_0_5[R0, R1, R2, R3, R4 any](f func() (R0, R1, R2, R3, R4)) func() tuple.T5[R0, R1, R2, R3, R4] {
return func() tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f())
}
}
// ToR_0_6 returns a single-return function that calls f.
func ToR_0_6[R0, R1, R2, R3, R4, R5 any](f func() (R0, R1, R2, R3, R4, R5)) func() tuple.T6[R0, R1, R2, R3, R4, R5] {
return func() tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f())
}
}
// ToAR_0_0 returns a single-argument, single-return function that calls f.
func ToAR_0_0(f func()) func(tuple.T0) tuple.T0 {
return func(a tuple.T0) tuple.T0 {
f()
return struct{}{}
}
}
// ToAR_0_1 returns a single-argument, single-return function that calls f.
func ToAR_0_1[R any](f func() R) func(tuple.T0) R {
return func(a tuple.T0) R {
return f()
}
}
// ToAR_0_2 returns a single-argument, single-return function that calls f.
func ToAR_0_2[R0, R1 any](f func() (R0, R1)) func(tuple.T0) tuple.T2[R0, R1] {
return func(a tuple.T0) tuple.T2[R0, R1] {
return tuple.MkT2(f())
}
}
// ToAR_0_3 returns a single-argument, single-return function that calls f.
func ToAR_0_3[R0, R1, R2 any](f func() (R0, R1, R2)) func(tuple.T0) tuple.T3[R0, R1, R2] {
return func(a tuple.T0) tuple.T3[R0, R1, R2] {
return tuple.MkT3(f())
}
}
// ToAR_0_4 returns a single-argument, single-return function that calls f.
func ToAR_0_4[R0, R1, R2, R3 any](f func() (R0, R1, R2, R3)) func(tuple.T0) tuple.T4[R0, R1, R2, R3] {
return func(a tuple.T0) tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f())
}
}
// ToAR_0_5 returns a single-argument, single-return function that calls f.
func ToAR_0_5[R0, R1, R2, R3, R4 any](f func() (R0, R1, R2, R3, R4)) func(tuple.T0) tuple.T5[R0, R1, R2, R3, R4] {
return func(a tuple.T0) tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f())
}
}
// ToAR_0_6 returns a single-argument, single-return function that calls f.
func ToAR_0_6[R0, R1, R2, R3, R4, R5 any](f func() (R0, R1, R2, R3, R4, R5)) func(tuple.T0) tuple.T6[R0, R1, R2, R3, R4, R5] {
return func(a tuple.T0) tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f())
}
}
// ToAR_1_0 returns a single-argument, single-return function that calls f.
func ToAR_1_0[A any](f func(a A)) func(A) tuple.T0 {
return func(a A) tuple.T0 {
f(a)
return struct{}{}
}
}
// ToAR_1_1 returns a single-argument, single-return function that calls f.
func ToAR_1_1[A, R any](f func(a A) R) func(A) R {
return func(a A) R {
return f(a)
}
}
// ToAR_1_2 returns a single-argument, single-return function that calls f.
func ToAR_1_2[A, R0, R1 any](f func(a A) (R0, R1)) func(A) tuple.T2[R0, R1] {
return func(a A) tuple.T2[R0, R1] {
return tuple.MkT2(f(a))
}
}
// ToAR_1_3 returns a single-argument, single-return function that calls f.
func ToAR_1_3[A, R0, R1, R2 any](f func(a A) (R0, R1, R2)) func(A) tuple.T3[R0, R1, R2] {
return func(a A) tuple.T3[R0, R1, R2] {
return tuple.MkT3(f(a))
}
}
// ToAR_1_4 returns a single-argument, single-return function that calls f.
func ToAR_1_4[A, R0, R1, R2, R3 any](f func(a A) (R0, R1, R2, R3)) func(A) tuple.T4[R0, R1, R2, R3] {
return func(a A) tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f(a))
}
}
// ToAR_1_5 returns a single-argument, single-return function that calls f.
func ToAR_1_5[A, R0, R1, R2, R3, R4 any](f func(a A) (R0, R1, R2, R3, R4)) func(A) tuple.T5[R0, R1, R2, R3, R4] {
return func(a A) tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f(a))
}
}
// ToAR_1_6 returns a single-argument, single-return function that calls f.
func ToAR_1_6[A, R0, R1, R2, R3, R4, R5 any](f func(a A) (R0, R1, R2, R3, R4, R5)) func(A) tuple.T6[R0, R1, R2, R3, R4, R5] {
return func(a A) tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f(a))
}
}
// ToAR_2_0 returns a single-argument, single-return function that calls f.
func ToAR_2_0[A0, A1 any](f func(a0 A0, a1 A1)) func(tuple.T2[A0, A1]) tuple.T0 {
return func(a tuple.T2[A0, A1]) tuple.T0 {
f(a.T())
return struct{}{}
}
}
// ToAR_2_1 returns a single-argument, single-return function that calls f.
func ToAR_2_1[A0, A1, R any](f func(a0 A0, a1 A1) R) func(tuple.T2[A0, A1]) R {
return func(a tuple.T2[A0, A1]) R {
return f(a.T())
}
}
// ToAR_2_2 returns a single-argument, single-return function that calls f.
func ToAR_2_2[A0, A1, R0, R1 any](f func(a0 A0, a1 A1) (R0, R1)) func(tuple.T2[A0, A1]) tuple.T2[R0, R1] {
return func(a tuple.T2[A0, A1]) tuple.T2[R0, R1] {
return tuple.MkT2(f(a.T()))
}
}
// ToAR_2_3 returns a single-argument, single-return function that calls f.
func ToAR_2_3[A0, A1, R0, R1, R2 any](f func(a0 A0, a1 A1) (R0, R1, R2)) func(tuple.T2[A0, A1]) tuple.T3[R0, R1, R2] {
return func(a tuple.T2[A0, A1]) tuple.T3[R0, R1, R2] {
return tuple.MkT3(f(a.T()))
}
}
// ToAR_2_4 returns a single-argument, single-return function that calls f.
func ToAR_2_4[A0, A1, R0, R1, R2, R3 any](f func(a0 A0, a1 A1) (R0, R1, R2, R3)) func(tuple.T2[A0, A1]) tuple.T4[R0, R1, R2, R3] {
return func(a tuple.T2[A0, A1]) tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f(a.T()))
}
}
// ToAR_2_5 returns a single-argument, single-return function that calls f.
func ToAR_2_5[A0, A1, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1) (R0, R1, R2, R3, R4)) func(tuple.T2[A0, A1]) tuple.T5[R0, R1, R2, R3, R4] {
return func(a tuple.T2[A0, A1]) tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f(a.T()))
}
}
// ToAR_2_6 returns a single-argument, single-return function that calls f.
func ToAR_2_6[A0, A1, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1) (R0, R1, R2, R3, R4, R5)) func(tuple.T2[A0, A1]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return func(a tuple.T2[A0, A1]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f(a.T()))
}
}
// ToAR_3_0 returns a single-argument, single-return function that calls f.
func ToAR_3_0[A0, A1, A2 any](f func(a0 A0, a1 A1, a2 A2)) func(tuple.T3[A0, A1, A2]) tuple.T0 {
return func(a tuple.T3[A0, A1, A2]) tuple.T0 {
f(a.T())
return struct{}{}
}
}
// ToAR_3_1 returns a single-argument, single-return function that calls f.
func ToAR_3_1[A0, A1, A2, R any](f func(a0 A0, a1 A1, a2 A2) R) func(tuple.T3[A0, A1, A2]) R {
return func(a tuple.T3[A0, A1, A2]) R {
return f(a.T())
}
}
// ToAR_3_2 returns a single-argument, single-return function that calls f.
func ToAR_3_2[A0, A1, A2, R0, R1 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1)) func(tuple.T3[A0, A1, A2]) tuple.T2[R0, R1] {
return func(a tuple.T3[A0, A1, A2]) tuple.T2[R0, R1] {
return tuple.MkT2(f(a.T()))
}
}
// ToAR_3_3 returns a single-argument, single-return function that calls f.
func ToAR_3_3[A0, A1, A2, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2)) func(tuple.T3[A0, A1, A2]) tuple.T3[R0, R1, R2] {
return func(a tuple.T3[A0, A1, A2]) tuple.T3[R0, R1, R2] {
return tuple.MkT3(f(a.T()))
}
}
// ToAR_3_4 returns a single-argument, single-return function that calls f.
func ToAR_3_4[A0, A1, A2, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3)) func(tuple.T3[A0, A1, A2]) tuple.T4[R0, R1, R2, R3] {
return func(a tuple.T3[A0, A1, A2]) tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f(a.T()))
}
}
// ToAR_3_5 returns a single-argument, single-return function that calls f.
func ToAR_3_5[A0, A1, A2, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, R4)) func(tuple.T3[A0, A1, A2]) tuple.T5[R0, R1, R2, R3, R4] {
return func(a tuple.T3[A0, A1, A2]) tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f(a.T()))
}
}
// ToAR_3_6 returns a single-argument, single-return function that calls f.
func ToAR_3_6[A0, A1, A2, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, R4, R5)) func(tuple.T3[A0, A1, A2]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return func(a tuple.T3[A0, A1, A2]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f(a.T()))
}
}
// ToAR_4_0 returns a single-argument, single-return function that calls f.
func ToAR_4_0[A0, A1, A2, A3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3)) func(tuple.T4[A0, A1, A2, A3]) tuple.T0 {
return func(a tuple.T4[A0, A1, A2, A3]) tuple.T0 {
f(a.T())
return struct{}{}
}
}
// ToAR_4_1 returns a single-argument, single-return function that calls f.
func ToAR_4_1[A0, A1, A2, A3, R any](f func(a0 A0, a1 A1, a2 A2, a3 A3) R) func(tuple.T4[A0, A1, A2, A3]) R {
return func(a tuple.T4[A0, A1, A2, A3]) R {
return f(a.T())
}
}
// ToAR_4_2 returns a single-argument, single-return function that calls f.
func ToAR_4_2[A0, A1, A2, A3, R0, R1 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1)) func(tuple.T4[A0, A1, A2, A3]) tuple.T2[R0, R1] {
return func(a tuple.T4[A0, A1, A2, A3]) tuple.T2[R0, R1] {
return tuple.MkT2(f(a.T()))
}
}
// ToAR_4_3 returns a single-argument, single-return function that calls f.
func ToAR_4_3[A0, A1, A2, A3, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2)) func(tuple.T4[A0, A1, A2, A3]) tuple.T3[R0, R1, R2] {
return func(a tuple.T4[A0, A1, A2, A3]) tuple.T3[R0, R1, R2] {
return tuple.MkT3(f(a.T()))
}
}
// ToAR_4_4 returns a single-argument, single-return function that calls f.
func ToAR_4_4[A0, A1, A2, A3, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3)) func(tuple.T4[A0, A1, A2, A3]) tuple.T4[R0, R1, R2, R3] {
return func(a tuple.T4[A0, A1, A2, A3]) tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f(a.T()))
}
}
// ToAR_4_5 returns a single-argument, single-return function that calls f.
func ToAR_4_5[A0, A1, A2, A3, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, R4)) func(tuple.T4[A0, A1, A2, A3]) tuple.T5[R0, R1, R2, R3, R4] {
return func(a tuple.T4[A0, A1, A2, A3]) tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f(a.T()))
}
}
// ToAR_4_6 returns a single-argument, single-return function that calls f.
func ToAR_4_6[A0, A1, A2, A3, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, R4, R5)) func(tuple.T4[A0, A1, A2, A3]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return func(a tuple.T4[A0, A1, A2, A3]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f(a.T()))
}
}
// ToAR_5_0 returns a single-argument, single-return function that calls f.
func ToAR_5_0[A0, A1, A2, A3, A4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4)) func(tuple.T5[A0, A1, A2, A3, A4]) tuple.T0 {
return func(a tuple.T5[A0, A1, A2, A3, A4]) tuple.T0 {
f(a.T())
return struct{}{}
}
}
// ToAR_5_1 returns a single-argument, single-return function that calls f.
func ToAR_5_1[A0, A1, A2, A3, A4, R any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) R) func(tuple.T5[A0, A1, A2, A3, A4]) R {
return func(a tuple.T5[A0, A1, A2, A3, A4]) R {
return f(a.T())
}
}
// ToAR_5_2 returns a single-argument, single-return function that calls f.
func ToAR_5_2[A0, A1, A2, A3, A4, R0, R1 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1)) func(tuple.T5[A0, A1, A2, A3, A4]) tuple.T2[R0, R1] {
return func(a tuple.T5[A0, A1, A2, A3, A4]) tuple.T2[R0, R1] {
return tuple.MkT2(f(a.T()))
}
}
// ToAR_5_3 returns a single-argument, single-return function that calls f.
func ToAR_5_3[A0, A1, A2, A3, A4, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2)) func(tuple.T5[A0, A1, A2, A3, A4]) tuple.T3[R0, R1, R2] {
return func(a tuple.T5[A0, A1, A2, A3, A4]) tuple.T3[R0, R1, R2] {
return tuple.MkT3(f(a.T()))
}
}
// ToAR_5_4 returns a single-argument, single-return function that calls f.
func ToAR_5_4[A0, A1, A2, A3, A4, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3)) func(tuple.T5[A0, A1, A2, A3, A4]) tuple.T4[R0, R1, R2, R3] {
return func(a tuple.T5[A0, A1, A2, A3, A4]) tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f(a.T()))
}
}
// ToAR_5_5 returns a single-argument, single-return function that calls f.
func ToAR_5_5[A0, A1, A2, A3, A4, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, R4)) func(tuple.T5[A0, A1, A2, A3, A4]) tuple.T5[R0, R1, R2, R3, R4] {
return func(a tuple.T5[A0, A1, A2, A3, A4]) tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f(a.T()))
}
}
// ToAR_5_6 returns a single-argument, single-return function that calls f.
func ToAR_5_6[A0, A1, A2, A3, A4, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, R4, R5)) func(tuple.T5[A0, A1, A2, A3, A4]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return func(a tuple.T5[A0, A1, A2, A3, A4]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f(a.T()))
}
}
// ToAR_6_0 returns a single-argument, single-return function that calls f.
func ToAR_6_0[A0, A1, A2, A3, A4, A5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T0 {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T0 {
f(a.T())
return struct{}{}
}
}
// ToAR_6_1 returns a single-argument, single-return function that calls f.
func ToAR_6_1[A0, A1, A2, A3, A4, A5, R any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) R) func(tuple.T6[A0, A1, A2, A3, A4, A5]) R {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) R {
return f(a.T())
}
}
// ToAR_6_2 returns a single-argument, single-return function that calls f.
func ToAR_6_2[A0, A1, A2, A3, A4, A5, R0, R1 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T2[R0, R1] {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T2[R0, R1] {
return tuple.MkT2(f(a.T()))
}
}
// ToAR_6_3 returns a single-argument, single-return function that calls f.
func ToAR_6_3[A0, A1, A2, A3, A4, A5, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T3[R0, R1, R2] {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T3[R0, R1, R2] {
return tuple.MkT3(f(a.T()))
}
}
// ToAR_6_4 returns a single-argument, single-return function that calls f.
func ToAR_6_4[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T4[R0, R1, R2, R3] {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T4[R0, R1, R2, R3] {
return tuple.MkT4(f(a.T()))
}
}
// ToAR_6_5 returns a single-argument, single-return function that calls f.
func ToAR_6_5[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, R4)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T5[R0, R1, R2, R3, R4] {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T5[R0, R1, R2, R3, R4] {
return tuple.MkT5(f(a.T()))
}
}
// ToAR_6_6 returns a single-argument, single-return function that calls f.
func ToAR_6_6[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, R4, R5)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) tuple.T6[R0, R1, R2, R3, R4, R5] {
return tuple.MkT6(f(a.T()))
}
}
// ToAE_0_0 returns a single-argument function that calls f.
func ToAE_0_0(f func() error) func(tuple.T0) error {
return func(a tuple.T0) error {
return f()
}
}
// ToAE_2_0 returns a single-argument function that calls f.
func ToAE_2_0[A0, A1 any](f func(a0 A0, a1 A1) error) func(tuple.T2[A0, A1]) error {
return func(a tuple.T2[A0, A1]) error {
return f(a.T())
}
}
// ToAE_3_0 returns a single-argument function that calls f.
func ToAE_3_0[A0, A1, A2 any](f func(a0 A0, a1 A1, a2 A2) error) func(tuple.T3[A0, A1, A2]) error {
return func(a tuple.T3[A0, A1, A2]) error {
return f(a.T())
}
}
// ToAE_4_0 returns a single-argument function that calls f.
func ToAE_4_0[A0, A1, A2, A3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) error) func(tuple.T4[A0, A1, A2, A3]) error {
return func(a tuple.T4[A0, A1, A2, A3]) error {
return f(a.T())
}
}
// ToAE_5_0 returns a single-argument function that calls f.
func ToAE_5_0[A0, A1, A2, A3, A4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) error) func(tuple.T5[A0, A1, A2, A3, A4]) error {
return func(a tuple.T5[A0, A1, A2, A3, A4]) error {
return f(a.T())
}
}
// ToAE_6_0 returns a single-argument function that calls f.
func ToAE_6_0[A0, A1, A2, A3, A4, A5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) error) func(tuple.T6[A0, A1, A2, A3, A4, A5]) error {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) error {
return f(a.T())
}
}
// ToARE_0_0 returns a single-argument, single-return-with-error function that calls f.
func ToARE_0_0(f func() error) func(tuple.T0) (tuple.T0, error) {
return func(a tuple.T0) (tuple.T0, error) {
err := f()
return struct{}{}, err
}
}
// ToARE_0_1 returns a single-argument, single-return-with-error function that calls f.
func ToARE_0_1[R any](f func() (R, error)) func(tuple.T0) (R, error) {
return func(a tuple.T0) (R, error) {
return f()
}
}
// ToARE_0_2 returns a single-argument, single-return-with-error function that calls f.
func ToARE_0_2[R0, R1 any](f func() (R0, R1, error)) func(tuple.T0) (tuple.T2[R0, R1], error) {
return func(a tuple.T0) (tuple.T2[R0, R1], error) {
r0, r1, err := f()
return tuple.MkT2(r0, r1), err
}
}
// ToARE_0_3 returns a single-argument, single-return-with-error function that calls f.
func ToARE_0_3[R0, R1, R2 any](f func() (R0, R1, R2, error)) func(tuple.T0) (tuple.T3[R0, R1, R2], error) {
return func(a tuple.T0) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f()
return tuple.MkT3(r0, r1, r2), err
}
}
// ToARE_0_4 returns a single-argument, single-return-with-error function that calls f.
func ToARE_0_4[R0, R1, R2, R3 any](f func() (R0, R1, R2, R3, error)) func(tuple.T0) (tuple.T4[R0, R1, R2, R3], error) {
return func(a tuple.T0) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f()
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToARE_0_5 returns a single-argument, single-return-with-error function that calls f.
func ToARE_0_5[R0, R1, R2, R3, R4 any](f func() (R0, R1, R2, R3, R4, error)) func(tuple.T0) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(a tuple.T0) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f()
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToARE_0_6 returns a single-argument, single-return-with-error function that calls f.
func ToARE_0_6[R0, R1, R2, R3, R4, R5 any](f func() (R0, R1, R2, R3, R4, R5, error)) func(tuple.T0) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(a tuple.T0) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f()
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToARE_1_0 returns a single-argument, single-return-with-error function that calls f.
func ToARE_1_0[A any](f func(a A) error) func(A) (tuple.T0, error) {
return func(a A) (tuple.T0, error) {
err := f(a)
return struct{}{}, err
}
}
// ToARE_1_1 returns a single-argument, single-return-with-error function that calls f.
func ToARE_1_1[A, R any](f func(a A) (R, error)) func(A) (R, error) {
return func(a A) (R, error) {
return f(a)
}
}
// ToARE_1_2 returns a single-argument, single-return-with-error function that calls f.
func ToARE_1_2[A, R0, R1 any](f func(a A) (R0, R1, error)) func(A) (tuple.T2[R0, R1], error) {
return func(a A) (tuple.T2[R0, R1], error) {
r0, r1, err := f(a)
return tuple.MkT2(r0, r1), err
}
}
// ToARE_1_3 returns a single-argument, single-return-with-error function that calls f.
func ToARE_1_3[A, R0, R1, R2 any](f func(a A) (R0, R1, R2, error)) func(A) (tuple.T3[R0, R1, R2], error) {
return func(a A) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(a)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToARE_1_4 returns a single-argument, single-return-with-error function that calls f.
func ToARE_1_4[A, R0, R1, R2, R3 any](f func(a A) (R0, R1, R2, R3, error)) func(A) (tuple.T4[R0, R1, R2, R3], error) {
return func(a A) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(a)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToARE_1_5 returns a single-argument, single-return-with-error function that calls f.
func ToARE_1_5[A, R0, R1, R2, R3, R4 any](f func(a A) (R0, R1, R2, R3, R4, error)) func(A) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(a A) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(a)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToARE_1_6 returns a single-argument, single-return-with-error function that calls f.
func ToARE_1_6[A, R0, R1, R2, R3, R4, R5 any](f func(a A) (R0, R1, R2, R3, R4, R5, error)) func(A) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(a A) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(a)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToARE_2_0 returns a single-argument, single-return-with-error function that calls f.
func ToARE_2_0[A0, A1 any](f func(a0 A0, a1 A1) error) func(tuple.T2[A0, A1]) (tuple.T0, error) {
return func(a tuple.T2[A0, A1]) (tuple.T0, error) {
err := f(a.T())
return struct{}{}, err
}
}
// ToARE_2_1 returns a single-argument, single-return-with-error function that calls f.
func ToARE_2_1[A0, A1, R any](f func(a0 A0, a1 A1) (R, error)) func(tuple.T2[A0, A1]) (R, error) {
return func(a tuple.T2[A0, A1]) (R, error) {
return f(a.T())
}
}
// ToARE_2_2 returns a single-argument, single-return-with-error function that calls f.
func ToARE_2_2[A0, A1, R0, R1 any](f func(a0 A0, a1 A1) (R0, R1, error)) func(tuple.T2[A0, A1]) (tuple.T2[R0, R1], error) {
return func(a tuple.T2[A0, A1]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(a.T())
return tuple.MkT2(r0, r1), err
}
}
// ToARE_2_3 returns a single-argument, single-return-with-error function that calls f.
func ToARE_2_3[A0, A1, R0, R1, R2 any](f func(a0 A0, a1 A1) (R0, R1, R2, error)) func(tuple.T2[A0, A1]) (tuple.T3[R0, R1, R2], error) {
return func(a tuple.T2[A0, A1]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(a.T())
return tuple.MkT3(r0, r1, r2), err
}
}
// ToARE_2_4 returns a single-argument, single-return-with-error function that calls f.
func ToARE_2_4[A0, A1, R0, R1, R2, R3 any](f func(a0 A0, a1 A1) (R0, R1, R2, R3, error)) func(tuple.T2[A0, A1]) (tuple.T4[R0, R1, R2, R3], error) {
return func(a tuple.T2[A0, A1]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(a.T())
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToARE_2_5 returns a single-argument, single-return-with-error function that calls f.
func ToARE_2_5[A0, A1, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1) (R0, R1, R2, R3, R4, error)) func(tuple.T2[A0, A1]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(a tuple.T2[A0, A1]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(a.T())
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToARE_2_6 returns a single-argument, single-return-with-error function that calls f.
func ToARE_2_6[A0, A1, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1) (R0, R1, R2, R3, R4, R5, error)) func(tuple.T2[A0, A1]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(a tuple.T2[A0, A1]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(a.T())
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToARE_3_0 returns a single-argument, single-return-with-error function that calls f.
func ToARE_3_0[A0, A1, A2 any](f func(a0 A0, a1 A1, a2 A2) error) func(tuple.T3[A0, A1, A2]) (tuple.T0, error) {
return func(a tuple.T3[A0, A1, A2]) (tuple.T0, error) {
err := f(a.T())
return struct{}{}, err
}
}
// ToARE_3_1 returns a single-argument, single-return-with-error function that calls f.
func ToARE_3_1[A0, A1, A2, R any](f func(a0 A0, a1 A1, a2 A2) (R, error)) func(tuple.T3[A0, A1, A2]) (R, error) {
return func(a tuple.T3[A0, A1, A2]) (R, error) {
return f(a.T())
}
}
// ToARE_3_2 returns a single-argument, single-return-with-error function that calls f.
func ToARE_3_2[A0, A1, A2, R0, R1 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, error)) func(tuple.T3[A0, A1, A2]) (tuple.T2[R0, R1], error) {
return func(a tuple.T3[A0, A1, A2]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(a.T())
return tuple.MkT2(r0, r1), err
}
}
// ToARE_3_3 returns a single-argument, single-return-with-error function that calls f.
func ToARE_3_3[A0, A1, A2, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2, error)) func(tuple.T3[A0, A1, A2]) (tuple.T3[R0, R1, R2], error) {
return func(a tuple.T3[A0, A1, A2]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(a.T())
return tuple.MkT3(r0, r1, r2), err
}
}
// ToARE_3_4 returns a single-argument, single-return-with-error function that calls f.
func ToARE_3_4[A0, A1, A2, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, error)) func(tuple.T3[A0, A1, A2]) (tuple.T4[R0, R1, R2, R3], error) {
return func(a tuple.T3[A0, A1, A2]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(a.T())
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToARE_3_5 returns a single-argument, single-return-with-error function that calls f.
func ToARE_3_5[A0, A1, A2, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, R4, error)) func(tuple.T3[A0, A1, A2]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(a tuple.T3[A0, A1, A2]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(a.T())
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToARE_3_6 returns a single-argument, single-return-with-error function that calls f.
func ToARE_3_6[A0, A1, A2, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, R4, R5, error)) func(tuple.T3[A0, A1, A2]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(a tuple.T3[A0, A1, A2]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(a.T())
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToARE_4_0 returns a single-argument, single-return-with-error function that calls f.
func ToARE_4_0[A0, A1, A2, A3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) error) func(tuple.T4[A0, A1, A2, A3]) (tuple.T0, error) {
return func(a tuple.T4[A0, A1, A2, A3]) (tuple.T0, error) {
err := f(a.T())
return struct{}{}, err
}
}
// ToARE_4_1 returns a single-argument, single-return-with-error function that calls f.
func ToARE_4_1[A0, A1, A2, A3, R any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R, error)) func(tuple.T4[A0, A1, A2, A3]) (R, error) {
return func(a tuple.T4[A0, A1, A2, A3]) (R, error) {
return f(a.T())
}
}
// ToARE_4_2 returns a single-argument, single-return-with-error function that calls f.
func ToARE_4_2[A0, A1, A2, A3, R0, R1 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, error)) func(tuple.T4[A0, A1, A2, A3]) (tuple.T2[R0, R1], error) {
return func(a tuple.T4[A0, A1, A2, A3]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(a.T())
return tuple.MkT2(r0, r1), err
}
}
// ToARE_4_3 returns a single-argument, single-return-with-error function that calls f.
func ToARE_4_3[A0, A1, A2, A3, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, error)) func(tuple.T4[A0, A1, A2, A3]) (tuple.T3[R0, R1, R2], error) {
return func(a tuple.T4[A0, A1, A2, A3]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(a.T())
return tuple.MkT3(r0, r1, r2), err
}
}
// ToARE_4_4 returns a single-argument, single-return-with-error function that calls f.
func ToARE_4_4[A0, A1, A2, A3, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, error)) func(tuple.T4[A0, A1, A2, A3]) (tuple.T4[R0, R1, R2, R3], error) {
return func(a tuple.T4[A0, A1, A2, A3]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(a.T())
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToARE_4_5 returns a single-argument, single-return-with-error function that calls f.
func ToARE_4_5[A0, A1, A2, A3, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, R4, error)) func(tuple.T4[A0, A1, A2, A3]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(a tuple.T4[A0, A1, A2, A3]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(a.T())
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToARE_4_6 returns a single-argument, single-return-with-error function that calls f.
func ToARE_4_6[A0, A1, A2, A3, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, R4, R5, error)) func(tuple.T4[A0, A1, A2, A3]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(a tuple.T4[A0, A1, A2, A3]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(a.T())
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToARE_5_0 returns a single-argument, single-return-with-error function that calls f.
func ToARE_5_0[A0, A1, A2, A3, A4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) error) func(tuple.T5[A0, A1, A2, A3, A4]) (tuple.T0, error) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T0, error) {
err := f(a.T())
return struct{}{}, err
}
}
// ToARE_5_1 returns a single-argument, single-return-with-error function that calls f.
func ToARE_5_1[A0, A1, A2, A3, A4, R any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R, error)) func(tuple.T5[A0, A1, A2, A3, A4]) (R, error) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) (R, error) {
return f(a.T())
}
}
// ToARE_5_2 returns a single-argument, single-return-with-error function that calls f.
func ToARE_5_2[A0, A1, A2, A3, A4, R0, R1 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, error)) func(tuple.T5[A0, A1, A2, A3, A4]) (tuple.T2[R0, R1], error) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(a.T())
return tuple.MkT2(r0, r1), err
}
}
// ToARE_5_3 returns a single-argument, single-return-with-error function that calls f.
func ToARE_5_3[A0, A1, A2, A3, A4, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, error)) func(tuple.T5[A0, A1, A2, A3, A4]) (tuple.T3[R0, R1, R2], error) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(a.T())
return tuple.MkT3(r0, r1, r2), err
}
}
// ToARE_5_4 returns a single-argument, single-return-with-error function that calls f.
func ToARE_5_4[A0, A1, A2, A3, A4, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, error)) func(tuple.T5[A0, A1, A2, A3, A4]) (tuple.T4[R0, R1, R2, R3], error) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(a.T())
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToARE_5_5 returns a single-argument, single-return-with-error function that calls f.
func ToARE_5_5[A0, A1, A2, A3, A4, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, R4, error)) func(tuple.T5[A0, A1, A2, A3, A4]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(a.T())
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToARE_5_6 returns a single-argument, single-return-with-error function that calls f.
func ToARE_5_6[A0, A1, A2, A3, A4, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, R4, R5, error)) func(tuple.T5[A0, A1, A2, A3, A4]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(a.T())
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToARE_6_0 returns a single-argument, single-return-with-error function that calls f.
func ToARE_6_0[A0, A1, A2, A3, A4, A5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) error) func(tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T0, error) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T0, error) {
err := f(a.T())
return struct{}{}, err
}
}
// ToARE_6_1 returns a single-argument, single-return-with-error function that calls f.
func ToARE_6_1[A0, A1, A2, A3, A4, A5, R any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R, error)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) (R, error) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) (R, error) {
return f(a.T())
}
}
// ToARE_6_2 returns a single-argument, single-return-with-error function that calls f.
func ToARE_6_2[A0, A1, A2, A3, A4, A5, R0, R1 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, error)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T2[R0, R1], error) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(a.T())
return tuple.MkT2(r0, r1), err
}
}
// ToARE_6_3 returns a single-argument, single-return-with-error function that calls f.
func ToARE_6_3[A0, A1, A2, A3, A4, A5, R0, R1, R2 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, error)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T3[R0, R1, R2], error) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(a.T())
return tuple.MkT3(r0, r1, r2), err
}
}
// ToARE_6_4 returns a single-argument, single-return-with-error function that calls f.
func ToARE_6_4[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, error)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T4[R0, R1, R2, R3], error) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(a.T())
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToARE_6_5 returns a single-argument, single-return-with-error function that calls f.
func ToARE_6_5[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3, R4 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, R4, error)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(a.T())
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToARE_6_6 returns a single-argument, single-return-with-error function that calls f.
func ToARE_6_6[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3, R4, R5 any](f func(a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, R4, R5, error)) func(tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(a.T())
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToRE_0_0 returns a single-return-with-error function that calls f.
func ToRE_0_0(f func() error) func() (tuple.T0, error) {
return func() (tuple.T0, error) {
err := f()
return struct{}{}, err
}
}
// ToRE_0_2 returns a single-return-with-error function that calls f.
func ToRE_0_2[R0, R1 any](f func() (R0, R1, error)) func() (tuple.T2[R0, R1], error) {
return func() (tuple.T2[R0, R1], error) {
r0, r1, err := f()
return tuple.MkT2(r0, r1), err
}
}
// ToRE_0_3 returns a single-return-with-error function that calls f.
func ToRE_0_3[R0, R1, R2 any](f func() (R0, R1, R2, error)) func() (tuple.T3[R0, R1, R2], error) {
return func() (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f()
return tuple.MkT3(r0, r1, r2), err
}
}
// ToRE_0_4 returns a single-return-with-error function that calls f.
func ToRE_0_4[R0, R1, R2, R3 any](f func() (R0, R1, R2, R3, error)) func() (tuple.T4[R0, R1, R2, R3], error) {
return func() (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f()
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToRE_0_5 returns a single-return-with-error function that calls f.
func ToRE_0_5[R0, R1, R2, R3, R4 any](f func() (R0, R1, R2, R3, R4, error)) func() (tuple.T5[R0, R1, R2, R3, R4], error) {
return func() (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f()
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToRE_0_6 returns a single-return-with-error function that calls f.
func ToRE_0_6[R0, R1, R2, R3, R4, R5 any](f func() (R0, R1, R2, R3, R4, R5, error)) func() (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func() (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f()
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToCARE_0_0 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_0_0(f func(ctx context.Context) error) func(context.Context, tuple.T0) (tuple.T0, error) {
return func(ctx context.Context, a tuple.T0) (tuple.T0, error) {
err := f(ctx)
return struct{}{}, err
}
}
// ToCARE_0_1 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_0_1[R any](f func(ctx context.Context) (R, error)) func(context.Context, tuple.T0) (R, error) {
return func(ctx context.Context, a tuple.T0) (R, error) {
return f(ctx)
}
}
// ToCARE_0_2 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_0_2[R0, R1 any](f func(ctx context.Context) (R0, R1, error)) func(context.Context, tuple.T0) (tuple.T2[R0, R1], error) {
return func(ctx context.Context, a tuple.T0) (tuple.T2[R0, R1], error) {
r0, r1, err := f(ctx)
return tuple.MkT2(r0, r1), err
}
}
// ToCARE_0_3 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_0_3[R0, R1, R2 any](f func(ctx context.Context) (R0, R1, R2, error)) func(context.Context, tuple.T0) (tuple.T3[R0, R1, R2], error) {
return func(ctx context.Context, a tuple.T0) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(ctx)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToCARE_0_4 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_0_4[R0, R1, R2, R3 any](f func(ctx context.Context) (R0, R1, R2, R3, error)) func(context.Context, tuple.T0) (tuple.T4[R0, R1, R2, R3], error) {
return func(ctx context.Context, a tuple.T0) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(ctx)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToCARE_0_5 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_0_5[R0, R1, R2, R3, R4 any](f func(ctx context.Context) (R0, R1, R2, R3, R4, error)) func(context.Context, tuple.T0) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(ctx context.Context, a tuple.T0) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(ctx)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToCARE_0_6 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_0_6[R0, R1, R2, R3, R4, R5 any](f func(ctx context.Context) (R0, R1, R2, R3, R4, R5, error)) func(context.Context, tuple.T0) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(ctx context.Context, a tuple.T0) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(ctx)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToCARE_1_0 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_1_0[A any](f func(ctx context.Context, a A) error) func(context.Context, A) (tuple.T0, error) {
return func(ctx context.Context, a A) (tuple.T0, error) {
err := f(ctx, a)
return struct{}{}, err
}
}
// ToCARE_1_1 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_1_1[A, R any](f func(ctx context.Context, a A) (R, error)) func(context.Context, A) (R, error) {
return func(ctx context.Context, a A) (R, error) {
return f(ctx, a)
}
}
// ToCARE_1_2 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_1_2[A, R0, R1 any](f func(ctx context.Context, a A) (R0, R1, error)) func(context.Context, A) (tuple.T2[R0, R1], error) {
return func(ctx context.Context, a A) (tuple.T2[R0, R1], error) {
r0, r1, err := f(ctx, a)
return tuple.MkT2(r0, r1), err
}
}
// ToCARE_1_3 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_1_3[A, R0, R1, R2 any](f func(ctx context.Context, a A) (R0, R1, R2, error)) func(context.Context, A) (tuple.T3[R0, R1, R2], error) {
return func(ctx context.Context, a A) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(ctx, a)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToCARE_1_4 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_1_4[A, R0, R1, R2, R3 any](f func(ctx context.Context, a A) (R0, R1, R2, R3, error)) func(context.Context, A) (tuple.T4[R0, R1, R2, R3], error) {
return func(ctx context.Context, a A) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(ctx, a)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToCARE_1_5 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_1_5[A, R0, R1, R2, R3, R4 any](f func(ctx context.Context, a A) (R0, R1, R2, R3, R4, error)) func(context.Context, A) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(ctx context.Context, a A) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(ctx, a)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToCARE_1_6 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_1_6[A, R0, R1, R2, R3, R4, R5 any](f func(ctx context.Context, a A) (R0, R1, R2, R3, R4, R5, error)) func(context.Context, A) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(ctx context.Context, a A) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(ctx, a)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToCARE_2_0 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_2_0[A0, A1 any](f func(ctx context.Context, a0 A0, a1 A1) error) func(context.Context, tuple.T2[A0, A1]) (tuple.T0, error) {
return func(ctx context.Context, a tuple.T2[A0, A1]) (tuple.T0, error) {
err := f(ctx, a.A0, a.A1)
return struct{}{}, err
}
}
// ToCARE_2_1 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_2_1[A0, A1, R any](f func(ctx context.Context, a0 A0, a1 A1) (R, error)) func(context.Context, tuple.T2[A0, A1]) (R, error) {
return func(ctx context.Context, a tuple.T2[A0, A1]) (R, error) {
return f(ctx, a.A0, a.A1)
}
}
// ToCARE_2_2 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_2_2[A0, A1, R0, R1 any](f func(ctx context.Context, a0 A0, a1 A1) (R0, R1, error)) func(context.Context, tuple.T2[A0, A1]) (tuple.T2[R0, R1], error) {
return func(ctx context.Context, a tuple.T2[A0, A1]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(ctx, a.A0, a.A1)
return tuple.MkT2(r0, r1), err
}
}
// ToCARE_2_3 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_2_3[A0, A1, R0, R1, R2 any](f func(ctx context.Context, a0 A0, a1 A1) (R0, R1, R2, error)) func(context.Context, tuple.T2[A0, A1]) (tuple.T3[R0, R1, R2], error) {
return func(ctx context.Context, a tuple.T2[A0, A1]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(ctx, a.A0, a.A1)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToCARE_2_4 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_2_4[A0, A1, R0, R1, R2, R3 any](f func(ctx context.Context, a0 A0, a1 A1) (R0, R1, R2, R3, error)) func(context.Context, tuple.T2[A0, A1]) (tuple.T4[R0, R1, R2, R3], error) {
return func(ctx context.Context, a tuple.T2[A0, A1]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(ctx, a.A0, a.A1)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToCARE_2_5 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_2_5[A0, A1, R0, R1, R2, R3, R4 any](f func(ctx context.Context, a0 A0, a1 A1) (R0, R1, R2, R3, R4, error)) func(context.Context, tuple.T2[A0, A1]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(ctx context.Context, a tuple.T2[A0, A1]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(ctx, a.A0, a.A1)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToCARE_2_6 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_2_6[A0, A1, R0, R1, R2, R3, R4, R5 any](f func(ctx context.Context, a0 A0, a1 A1) (R0, R1, R2, R3, R4, R5, error)) func(context.Context, tuple.T2[A0, A1]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(ctx context.Context, a tuple.T2[A0, A1]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(ctx, a.A0, a.A1)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToCARE_3_0 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_3_0[A0, A1, A2 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2) error) func(context.Context, tuple.T3[A0, A1, A2]) (tuple.T0, error) {
return func(ctx context.Context, a tuple.T3[A0, A1, A2]) (tuple.T0, error) {
err := f(ctx, a.A0, a.A1, a.A2)
return struct{}{}, err
}
}
// ToCARE_3_1 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_3_1[A0, A1, A2, R any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2) (R, error)) func(context.Context, tuple.T3[A0, A1, A2]) (R, error) {
return func(ctx context.Context, a tuple.T3[A0, A1, A2]) (R, error) {
return f(ctx, a.A0, a.A1, a.A2)
}
}
// ToCARE_3_2 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_3_2[A0, A1, A2, R0, R1 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2) (R0, R1, error)) func(context.Context, tuple.T3[A0, A1, A2]) (tuple.T2[R0, R1], error) {
return func(ctx context.Context, a tuple.T3[A0, A1, A2]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(ctx, a.A0, a.A1, a.A2)
return tuple.MkT2(r0, r1), err
}
}
// ToCARE_3_3 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_3_3[A0, A1, A2, R0, R1, R2 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2) (R0, R1, R2, error)) func(context.Context, tuple.T3[A0, A1, A2]) (tuple.T3[R0, R1, R2], error) {
return func(ctx context.Context, a tuple.T3[A0, A1, A2]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(ctx, a.A0, a.A1, a.A2)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToCARE_3_4 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_3_4[A0, A1, A2, R0, R1, R2, R3 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, error)) func(context.Context, tuple.T3[A0, A1, A2]) (tuple.T4[R0, R1, R2, R3], error) {
return func(ctx context.Context, a tuple.T3[A0, A1, A2]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(ctx, a.A0, a.A1, a.A2)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToCARE_3_5 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_3_5[A0, A1, A2, R0, R1, R2, R3, R4 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, R4, error)) func(context.Context, tuple.T3[A0, A1, A2]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(ctx context.Context, a tuple.T3[A0, A1, A2]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(ctx, a.A0, a.A1, a.A2)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToCARE_3_6 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_3_6[A0, A1, A2, R0, R1, R2, R3, R4, R5 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2) (R0, R1, R2, R3, R4, R5, error)) func(context.Context, tuple.T3[A0, A1, A2]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(ctx context.Context, a tuple.T3[A0, A1, A2]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(ctx, a.A0, a.A1, a.A2)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToCARE_4_0 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_4_0[A0, A1, A2, A3 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3) error) func(context.Context, tuple.T4[A0, A1, A2, A3]) (tuple.T0, error) {
return func(ctx context.Context, a tuple.T4[A0, A1, A2, A3]) (tuple.T0, error) {
err := f(ctx, a.A0, a.A1, a.A2, a.A3)
return struct{}{}, err
}
}
// ToCARE_4_1 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_4_1[A0, A1, A2, A3, R any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3) (R, error)) func(context.Context, tuple.T4[A0, A1, A2, A3]) (R, error) {
return func(ctx context.Context, a tuple.T4[A0, A1, A2, A3]) (R, error) {
return f(ctx, a.A0, a.A1, a.A2, a.A3)
}
}
// ToCARE_4_2 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_4_2[A0, A1, A2, A3, R0, R1 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, error)) func(context.Context, tuple.T4[A0, A1, A2, A3]) (tuple.T2[R0, R1], error) {
return func(ctx context.Context, a tuple.T4[A0, A1, A2, A3]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(ctx, a.A0, a.A1, a.A2, a.A3)
return tuple.MkT2(r0, r1), err
}
}
// ToCARE_4_3 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_4_3[A0, A1, A2, A3, R0, R1, R2 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, error)) func(context.Context, tuple.T4[A0, A1, A2, A3]) (tuple.T3[R0, R1, R2], error) {
return func(ctx context.Context, a tuple.T4[A0, A1, A2, A3]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(ctx, a.A0, a.A1, a.A2, a.A3)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToCARE_4_4 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_4_4[A0, A1, A2, A3, R0, R1, R2, R3 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, error)) func(context.Context, tuple.T4[A0, A1, A2, A3]) (tuple.T4[R0, R1, R2, R3], error) {
return func(ctx context.Context, a tuple.T4[A0, A1, A2, A3]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(ctx, a.A0, a.A1, a.A2, a.A3)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToCARE_4_5 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_4_5[A0, A1, A2, A3, R0, R1, R2, R3, R4 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, R4, error)) func(context.Context, tuple.T4[A0, A1, A2, A3]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(ctx context.Context, a tuple.T4[A0, A1, A2, A3]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(ctx, a.A0, a.A1, a.A2, a.A3)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToCARE_4_6 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_4_6[A0, A1, A2, A3, R0, R1, R2, R3, R4, R5 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3) (R0, R1, R2, R3, R4, R5, error)) func(context.Context, tuple.T4[A0, A1, A2, A3]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(ctx context.Context, a tuple.T4[A0, A1, A2, A3]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(ctx, a.A0, a.A1, a.A2, a.A3)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToCARE_5_0 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_5_0[A0, A1, A2, A3, A4 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) error) func(context.Context, tuple.T5[A0, A1, A2, A3, A4]) (tuple.T0, error) {
return func(ctx context.Context, a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T0, error) {
err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4)
return struct{}{}, err
}
}
// ToCARE_5_1 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_5_1[A0, A1, A2, A3, A4, R any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R, error)) func(context.Context, tuple.T5[A0, A1, A2, A3, A4]) (R, error) {
return func(ctx context.Context, a tuple.T5[A0, A1, A2, A3, A4]) (R, error) {
return f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4)
}
}
// ToCARE_5_2 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_5_2[A0, A1, A2, A3, A4, R0, R1 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, error)) func(context.Context, tuple.T5[A0, A1, A2, A3, A4]) (tuple.T2[R0, R1], error) {
return func(ctx context.Context, a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4)
return tuple.MkT2(r0, r1), err
}
}
// ToCARE_5_3 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_5_3[A0, A1, A2, A3, A4, R0, R1, R2 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, error)) func(context.Context, tuple.T5[A0, A1, A2, A3, A4]) (tuple.T3[R0, R1, R2], error) {
return func(ctx context.Context, a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToCARE_5_4 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_5_4[A0, A1, A2, A3, A4, R0, R1, R2, R3 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, error)) func(context.Context, tuple.T5[A0, A1, A2, A3, A4]) (tuple.T4[R0, R1, R2, R3], error) {
return func(ctx context.Context, a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToCARE_5_5 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_5_5[A0, A1, A2, A3, A4, R0, R1, R2, R3, R4 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, R4, error)) func(context.Context, tuple.T5[A0, A1, A2, A3, A4]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(ctx context.Context, a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToCARE_5_6 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_5_6[A0, A1, A2, A3, A4, R0, R1, R2, R3, R4, R5 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4) (R0, R1, R2, R3, R4, R5, error)) func(context.Context, tuple.T5[A0, A1, A2, A3, A4]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(ctx context.Context, a tuple.T5[A0, A1, A2, A3, A4]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
}
// ToCARE_6_0 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_6_0[A0, A1, A2, A3, A4, A5 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) error) func(context.Context, tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T0, error) {
return func(ctx context.Context, a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T0, error) {
err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4, a.A5)
return struct{}{}, err
}
}
// ToCARE_6_1 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_6_1[A0, A1, A2, A3, A4, A5, R any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R, error)) func(context.Context, tuple.T6[A0, A1, A2, A3, A4, A5]) (R, error) {
return func(ctx context.Context, a tuple.T6[A0, A1, A2, A3, A4, A5]) (R, error) {
return f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4, a.A5)
}
}
// ToCARE_6_2 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_6_2[A0, A1, A2, A3, A4, A5, R0, R1 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, error)) func(context.Context, tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T2[R0, R1], error) {
return func(ctx context.Context, a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T2[R0, R1], error) {
r0, r1, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4, a.A5)
return tuple.MkT2(r0, r1), err
}
}
// ToCARE_6_3 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_6_3[A0, A1, A2, A3, A4, A5, R0, R1, R2 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, error)) func(context.Context, tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T3[R0, R1, R2], error) {
return func(ctx context.Context, a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T3[R0, R1, R2], error) {
r0, r1, r2, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4, a.A5)
return tuple.MkT3(r0, r1, r2), err
}
}
// ToCARE_6_4 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_6_4[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, error)) func(context.Context, tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T4[R0, R1, R2, R3], error) {
return func(ctx context.Context, a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T4[R0, R1, R2, R3], error) {
r0, r1, r2, r3, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4, a.A5)
return tuple.MkT4(r0, r1, r2, r3), err
}
}
// ToCARE_6_5 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_6_5[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3, R4 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, R4, error)) func(context.Context, tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T5[R0, R1, R2, R3, R4], error) {
return func(ctx context.Context, a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T5[R0, R1, R2, R3, R4], error) {
r0, r1, r2, r3, r4, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4, a.A5)
return tuple.MkT5(r0, r1, r2, r3, r4), err
}
}
// ToCARE_6_6 returns a context-with-single argument, single-return-with-error function that calls f.
func ToCARE_6_6[A0, A1, A2, A3, A4, A5, R0, R1, R2, R3, R4, R5 any](f func(ctx context.Context, a0 A0, a1 A1, a2 A2, a3 A3, a4 A4, a5 A5) (R0, R1, R2, R3, R4, R5, error)) func(context.Context, tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
return func(ctx context.Context, a tuple.T6[A0, A1, A2, A3, A4, A5]) (tuple.T6[R0, R1, R2, R3, R4, R5], error) {
r0, r1, r2, r3, r4, r5, err := f(ctx, a.A0, a.A1, a.A2, a.A3, a.A4, a.A5)
return tuple.MkT6(r0, r1, r2, r3, r4, r5), err
}
} | tuple/tuplefunc/tuplefunc-gen.go | 0.836755 | 0.617859 | tuplefunc-gen.go | starcoder |
package main
const (
// FieldMaxSize contains the maximum size of the field (both width and height).
FieldMaxSize = 80
// HolesEachStep holds after how many steps a hole might occur (if the preconditions are met).
HolesEachStep = 6
// HoleSpeed contains the minimum speed needed for a hole.
HoleSpeed = 3
// MaxSpeed holds the maximum speed.
MaxSpeed = 10
)
// Game represents a game of speed. See https://github.com/informatiCup/InformatiCup2021/ for a description of the game.
// This struct is a modified from the server version to fit sl_ow.
type Game struct {
Width int `json:"width"`
Height int `json:"height"`
Cells [][]int8 `json:"cells"`
Players map[int]*Player `json:"players"`
You int `json:"you"` // only needed for protocol, ignored everywhere else
Running bool `json:"running"`
Deadline string `json:"deadline,omitempty"` // RFC3339
playerAnswer []string
freeCountingSlice []bool
internalCellsFlat []int8
}
// SimulateGame simulates a full run of the game and sends the result to the provided channel.
// It has some early cut-offs for impossible games.
func (g *Game) SimulateGame(next string, result chan<- struct {
action string
win bool
survived int
survivdedOpponent int
round int
}) {
// Check speed
if next == ActionSlower && g.Players[g.You].Speed == 1 {
result <- struct {
action string
win bool
survived int
survivdedOpponent int
round int
}{next, false, 0, 0, 0}
return
}
if next == ActionFaster && g.Players[g.You].Speed == MaxSpeed {
result <- struct {
action string
win bool
survived int
survivdedOpponent int
round int
}{next, false, 0, 0, 0}
return
}
for k := range g.Players {
g.Players[k].ai = &SuperRandomAI{}
}
first := true
round := 0
survived := -1
survivedOpponent := -1
winner := -1
mainGame:
for { // Loop used for rounds
round++
if g.Players[g.You].Active {
survived++
}
for i := range g.Players {
if i == g.You {
continue
}
if g.Players[g.You].Active {
survivedOpponent++
break
}
}
g.playerAnswer = make([]string, len(g.Players))
for i := range g.playerAnswer {
if i+1 == g.You && first {
g.playerAnswer[i] = next
continue
}
if g.Players[i+1].Active {
if first {
// First round take a possible turn even if suicide is possible - that's why RandomAI can't be used here
answer := make(chan string, 1)
ng := g.PublicCopy()
ng.You = i + 1
ai := BadRandomAI{}
ai.GetChannel(answer)
ai.GetState(ng)
g.playerAnswer[i] = <-answer
continue
}
// Use AI
answer := make(chan string, 1)
ng := g.PublicCopy()
ng.You = i + 1
g.Players[i+1].ai.GetChannel(answer)
g.Players[i+1].ai.GetState(ng)
g.playerAnswer[i] = <-answer
}
}
first = false
// Process Actions
for i := range g.Players {
switch g.playerAnswer[i-1] {
case "":
g.invalidatePlayer(i)
case ActionTurnLeft:
switch g.Players[i].Direction {
case DirectionLeft:
g.Players[i].Direction = DirectionDown
case DirectionRight:
g.Players[i].Direction = DirectionUp
case DirectionUp:
g.Players[i].Direction = DirectionLeft
case DirectionDown:
g.Players[i].Direction = DirectionRight
}
case ActionTurnRight:
switch g.Players[i].Direction {
case DirectionLeft:
g.Players[i].Direction = DirectionUp
case DirectionRight:
g.Players[i].Direction = DirectionDown
case DirectionUp:
g.Players[i].Direction = DirectionRight
case DirectionDown:
g.Players[i].Direction = DirectionLeft
}
case ActionFaster:
g.Players[i].Speed++
if g.Players[i].Speed > MaxSpeed {
g.invalidatePlayer(i)
}
case ActionSlower:
g.Players[i].Speed--
if g.Players[i].Speed < 1 {
g.invalidatePlayer(i)
}
case ActionNOOP:
// Do nothing
default:
g.invalidatePlayer(i)
}
}
// Do Movement
for i := range g.Players {
if !g.Players[i].Active {
continue
}
var dostep func(x, y int) (int, int)
switch g.Players[i].Direction {
case DirectionUp:
dostep = func(x, y int) (int, int) { return x, y - 1 }
case DirectionDown:
dostep = func(x, y int) (int, int) { return x, y + 1 }
case DirectionLeft:
dostep = func(x, y int) (int, int) { return x - 1, y }
case DirectionRight:
dostep = func(x, y int) (int, int) { return x + 1, y }
}
g.Players[i].stepCounter++
for s := 0; s < g.Players[i].Speed; s++ {
g.Players[i].X, g.Players[i].Y = dostep(g.Players[i].X, g.Players[i].Y)
if g.Players[i].X < 0 || g.Players[i].X >= g.Width || g.Players[i].Y < 0 || g.Players[i].Y >= g.Height {
g.invalidatePlayer(i)
break
}
if g.Players[i].Speed >= HoleSpeed && g.Players[i].stepCounter%HolesEachStep == 0 && s != 0 && s != g.Players[i].Speed-1 {
continue
}
if g.Cells[g.Players[i].Y][g.Players[i].X] != 0 {
g.Cells[g.Players[i].Y][g.Players[i].X] = -1
} else {
g.Cells[g.Players[i].Y][g.Players[i].X] = int8(i)
}
}
}
// Check crash
for i := range g.Players {
if !g.Players[i].Active {
continue
}
var dostepback func(x, y int) (int, int)
switch g.Players[i].Direction {
case DirectionUp:
dostepback = func(x, y int) (int, int) { return x, y + 1 }
case DirectionDown:
dostepback = func(x, y int) (int, int) { return x, y - 1 }
case DirectionLeft:
dostepback = func(x, y int) (int, int) { return x + 1, y }
case DirectionRight:
dostepback = func(x, y int) (int, int) { return x - 1, y }
}
backX := g.Players[i].X
backY := g.Players[i].Y
for s := 0; s < g.Players[i].Speed; s++ {
if g.Cells[backY][backX] == -1 {
// Crash - check hole
if g.Players[i].Speed >= HoleSpeed && g.Players[i].stepCounter%HolesEachStep == 0 && s != 0 && s != g.Players[i].Speed-1 {
// No crash - is hole
} else {
g.invalidatePlayer(i)
break
}
}
backX, backY = dostepback(backX, backY)
}
}
if winner == -1 {
for i := range g.Players {
if g.Players[i].Active {
if winner == -1 {
winner = i
} else {
// Game hasn't finished - at least two alive
winner = -1
break
}
}
}
}
playerAlive := false
for i := range g.Players {
if g.Players[i].Active {
playerAlive = playerAlive || g.Players[i].Active
}
}
if !playerAlive {
break mainGame
}
}
// Finish game
g.Running = false
if winner == g.You {
survived = round
}
result <- struct {
action string
win bool
survived int
survivdedOpponent int
round int
}{next, winner == g.You, survived, survivedOpponent, round}
}
func (g *Game) checkEndGame() bool {
numberActive := 0
for i := range g.Players {
if g.Players[i].Active {
numberActive++
}
}
return numberActive <= 1
}
func (g *Game) invalidatePlayer(p int) {
_, ok := g.Players[p]
if !ok {
return
}
g.Players[p].Active = false
}
// PublicCopy returns a copy of the game with all private fields set to zero.
// As an exception for AIs, Player.stepCounter is also copied.
func (g Game) PublicCopy() *Game {
newG := Game{
Width: g.Width,
Height: g.Height,
Cells: make([][]int8, len(g.Cells)),
Players: make(map[int]*Player, len(g.Players)),
You: g.You,
Running: g.Running,
Deadline: g.Deadline,
}
if g.internalCellsFlat == nil {
for i := range g.Cells {
newG.Cells[i] = make([]int8, len(g.Cells[i]))
copy(newG.Cells[i], g.Cells[i])
}
} else {
newG.internalCellsFlat = make([]int8, len(g.internalCellsFlat))
copy(newG.internalCellsFlat, g.internalCellsFlat)
for y := range g.Cells {
newG.Cells[y] = newG.internalCellsFlat[y*newG.Width : (y+1)*newG.Width]
}
}
for k := range g.Players {
newG.Players[k] = &Player{
X: g.Players[k].X,
Y: g.Players[k].Y,
Direction: g.Players[k].Direction,
Speed: g.Players[k].Speed,
Active: g.Players[k].Active,
Name: g.Players[k].Name,
stepCounter: g.Players[k].stepCounter,
}
}
return &newG
}
func (g *Game) usage(id int) float64 {
u := 0
for y := 0; y < g.Height; y++ {
for x := 0; x < g.Width; x++ {
if g.Cells[y][x] == int8(id) {
u++
}
}
}
return float64(u) / float64(g.Height*g.Width)
}
func (g *Game) freeSpaceConnected(x, y, cutoff int) int {
// cutoff -1 == no cutoff
// Not concurrent safe
if g.freeCountingSlice == nil {
g.freeCountingSlice = make([]bool, g.Height*g.Width)
} else {
for i := range g.freeCountingSlice {
g.freeCountingSlice[i] = false
}
}
current := 0
current = g.freeSpaceConnectedInternal(x, y, cutoff, current)
current = g.freeSpaceConnectedInternal(x-1, y, cutoff, current)
current = g.freeSpaceConnectedInternal(x+1, y, cutoff, current)
current = g.freeSpaceConnectedInternal(x, y-1, cutoff, current)
current = g.freeSpaceConnectedInternal(x, y+1, cutoff, current)
return current
}
func (g *Game) freeSpaceConnectedInternal(x, y, cutoff, current int) int {
if cutoff != -1 && current > cutoff {
return current
}
if x < 0 || x >= g.Width || y < 0 || y >= g.Height {
return current
}
cell := y*g.Width + x
if g.freeCountingSlice[cell] {
return current
}
g.freeCountingSlice[cell] = true
if g.Cells[y][x] != 0 {
return current
}
current++
current = g.freeSpaceConnectedInternal(x-1, y, cutoff, current)
current = g.freeSpaceConnectedInternal(x+1, y, cutoff, current)
current = g.freeSpaceConnectedInternal(x, y-1, cutoff, current)
current = g.freeSpaceConnectedInternal(x, y+1, cutoff, current)
return current
}
// PopulateInternalCellsFlat populates the internal flat cells, thus providing a speed boost to PublicCopy after being called.
// The game object is not safe to use while this function is running.
func (g *Game) PopulateInternalCellsFlat() {
g.internalCellsFlat = make([]int8, g.Height*g.Width)
for y := range g.Cells {
for x := range g.Cells[y] {
g.internalCellsFlat[y*g.Width+x] = g.Cells[y][x]
}
}
g.Cells = make([][]int8, len(g.Cells))
for y := range g.Cells {
g.Cells[y] = g.internalCellsFlat[y*g.Width : (y+1)*g.Width]
}
} | game.go | 0.634656 | 0.407658 | game.go | starcoder |
package fp
func (m BoolArray) Tail() BoolArray { s := len(m); if s > 0 { return m[1:s-1] } else {return []bool{} } }
func (m StringArray) Tail() StringArray { s := len(m); if s > 0 { return m[1:s-1] } else {return []string{} } }
func (m IntArray) Tail() IntArray { s := len(m); if s > 0 { return m[1:s-1] } else {return []int{} } }
func (m Int64Array) Tail() Int64Array { s := len(m); if s > 0 { return m[1:s-1] } else {return []int64{} } }
func (m ByteArray) Tail() ByteArray { s := len(m); if s > 0 { return m[1:s-1] } else {return []byte{} } }
func (m RuneArray) Tail() RuneArray { s := len(m); if s > 0 { return m[1:s-1] } else {return []rune{} } }
func (m Float32Array) Tail() Float32Array { s := len(m); if s > 0 { return m[1:s-1] } else {return []float32{} } }
func (m Float64Array) Tail() Float64Array { s := len(m); if s > 0 { return m[1:s-1] } else {return []float64{} } }
func (m AnyArray) Tail() AnyArray { s := len(m); if s > 0 { return m[1:s-1] } else {return []Any{} } }
func (m Tuple2Array) Tail() Tuple2Array { s := len(m); if s > 0 { return m[1:s-1] } else {return []Tuple2{} } }
func (m BoolArrayArray) Tail() BoolArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]bool{} } }
func (m StringArrayArray) Tail() StringArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]string{} } }
func (m IntArrayArray) Tail() IntArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]int{} } }
func (m Int64ArrayArray) Tail() Int64ArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]int64{} } }
func (m ByteArrayArray) Tail() ByteArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]byte{} } }
func (m RuneArrayArray) Tail() RuneArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]rune{} } }
func (m Float32ArrayArray) Tail() Float32ArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]float32{} } }
func (m Float64ArrayArray) Tail() Float64ArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]float64{} } }
func (m AnyArrayArray) Tail() AnyArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]Any{} } }
func (m Tuple2ArrayArray) Tail() Tuple2ArrayArray { s := len(m); if s > 0 { return m[1:s-1] } else {return [][]Tuple2{} } } | fp/bootstrap_array_tail.go | 0.629433 | 0.434041 | bootstrap_array_tail.go | starcoder |
package main
import (
"errors"
"fmt"
"log"
"sort"
"github.com/theatlasroom/advent-of-code/go/utils"
)
/**
--- Day 1: Report Repair ---
After saving Christmas five years in a row, you've decided to take a vacation at a nice resort on a tropical island. Surely, Christmas will go on without you.
The tropical island has its own currency and is entirely cash-only. The gold coins used there have a little picture of a starfish; the locals just call them stars.
None of the currency exchanges seem to have heard of them, but somehow, you'll need to find fifty of these coins by the time you arrive so you can pay the deposit on your room.
To save your vacation, you need to get all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first.
Each puzzle grants one star. Good luck!
Before you leave, the Elves in accounting just need you to fix your expense report (your puzzle input); apparently, something isn't quite adding up.
Specifically, they need you to find the two entries that sum to 2020 and then multiply those two numbers together.
For example, suppose your expense report contained the following:
1721
979
366
299
675
1456
In this list, the two entries that sum to 2020 are 1721 and 299. Multiplying them together produces 1721 * 299 = 514579, so the correct answer is 514579.
Of course, your expense report is much larger. Find the two entries that sum to 2020; what do you get if you multiply them together?
**/
const target = 2020
type pair struct {
a, b int
}
func (p pair) Product() int {
return p.a * p.b
}
func (p pair) toString() string {
return fmt.Sprintf("%v %v %v", p.a, p.b, p.Product())
}
type triplet struct {
pair // embeds the pair struct
c int
}
func (t triplet) Product() int {
return t.a * t.b * t.c
}
func (t triplet) toString() string {
return fmt.Sprintf("%v %v %v %v", t.a, t.b, t.c, t.Product())
}
func equalPairs(a int, asc []int) (int, bool) {
for _, b := range asc {
sum := a + b
if sum > target {
break
}
if sum == target {
return b, true
}
}
return 0, false
}
func findPairsEqualToTarget(asc, desc []int) (pair, error) {
for _, a := range desc {
b, ok := equalPairs(a, asc)
if ok {
return pair{a, b}, nil
}
}
return pair{}, errors.New("No matching values")
}
func findPairsLessThanTarget(asc, desc []int) []pair {
var pairs []pair
for _, a := range desc {
for _, b := range asc {
sum := a + b
if sum >= target {
break
}
pairs = append(pairs, pair{a, b})
}
}
return pairs
}
func part1(asc, desc []int) pair {
p, err := findPairsEqualToTarget(asc, desc)
if err != nil {
log.Fatal(err)
}
return p
}
func part2(asc, desc []int) triplet {
p := findPairsLessThanTarget(asc, desc)
var t triplet
for _, pv := range p {
curr := pv.a + pv.b
if curr >= target {
continue
}
c, ok := equalPairs(curr, asc)
if ok {
return triplet{pv, c}
}
}
if t == (triplet{}) {
log.Fatal(errors.New("No matching values"))
}
return t
}
func main() {
// Read all the numbers
utils.Banner(utils.BannerConfig{Year: 2020, Day: 1})
input := utils.LoadDataAsString("1.txt")
data := utils.StrToIntArr(input)
// gross
sort.Ints(data)
desc := sort.IntSlice(append([]int(nil), data...))
sort.Sort(sort.Reverse(desc))
fmt.Println(part1(data, desc).toString())
fmt.Println(part2(data, desc).toString())
} | go/2020/1.go | 0.58059 | 0.403802 | 1.go | starcoder |
package client
import (
"encoding/json"
)
// YearlyRetentionOptions struct for YearlyRetentionOptions
type YearlyRetentionOptions struct {
Count int32 `json:"count"`
Type string `json:"type"`
}
// NewYearlyRetentionOptions instantiates a new YearlyRetentionOptions object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewYearlyRetentionOptions(count int32, type_ string) *YearlyRetentionOptions {
this := YearlyRetentionOptions{}
this.Count = count
this.Type = type_
return &this
}
// NewYearlyRetentionOptionsWithDefaults instantiates a new YearlyRetentionOptions object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewYearlyRetentionOptionsWithDefaults() *YearlyRetentionOptions {
this := YearlyRetentionOptions{}
return &this
}
// GetCount returns the Count field value
func (o *YearlyRetentionOptions) GetCount() int32 {
if o == nil {
var ret int32
return ret
}
return o.Count
}
// GetCountOk returns a tuple with the Count field value
// and a boolean to check if the value has been set.
func (o *YearlyRetentionOptions) GetCountOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Count, true
}
// SetCount sets field value
func (o *YearlyRetentionOptions) SetCount(v int32) {
o.Count = v
}
// GetType returns the Type field value
func (o *YearlyRetentionOptions) GetType() string {
if o == nil {
var ret string
return ret
}
return o.Type
}
// GetTypeOk returns a tuple with the Type field value
// and a boolean to check if the value has been set.
func (o *YearlyRetentionOptions) GetTypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Type, true
}
// SetType sets field value
func (o *YearlyRetentionOptions) SetType(v string) {
o.Type = v
}
func (o YearlyRetentionOptions) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["count"] = o.Count
}
if true {
toSerialize["type"] = o.Type
}
return json.Marshal(toSerialize)
}
type NullableYearlyRetentionOptions struct {
value *YearlyRetentionOptions
isSet bool
}
func (v NullableYearlyRetentionOptions) Get() *YearlyRetentionOptions {
return v.value
}
func (v *NullableYearlyRetentionOptions) Set(val *YearlyRetentionOptions) {
v.value = val
v.isSet = true
}
func (v NullableYearlyRetentionOptions) IsSet() bool {
return v.isSet
}
func (v *NullableYearlyRetentionOptions) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableYearlyRetentionOptions(val *YearlyRetentionOptions) *NullableYearlyRetentionOptions {
return &NullableYearlyRetentionOptions{value: val, isSet: true}
}
func (v NullableYearlyRetentionOptions) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableYearlyRetentionOptions) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | client/model_yearly_retention_options.go | 0.78968 | 0.418103 | model_yearly_retention_options.go | starcoder |
package gorough
import (
"math"
"sort"
)
type edgeEntry struct {
ymin float64
ymax float64
x float64
islope float64
}
func (e edgeEntry) less(ee edgeEntry) bool {
if e.ymin < ee.ymin {
return true
}
if e.ymin > ee.ymin {
return false
}
if e.x < ee.x {
return true
}
if e.x > ee.x || e.ymax == ee.ymax {
return false
}
return (e.ymax-ee.ymax)/math.Abs(e.ymax-ee.ymax) < 0
}
type edgeEntries []edgeEntry
func (e edgeEntries) Len() int { return len(e) }
func (e edgeEntries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e edgeEntries) Less(i, j int) bool { return e[i].less(e[j]) }
type activeEdgeEntry struct {
s float64
edge edgeEntry
}
type activeEdgeEntries []activeEdgeEntry
func (e activeEdgeEntries) Len() int { return len(e) }
func (e activeEdgeEntries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e activeEdgeEntries) Less(i, j int) bool {
if e[i].edge.x == e[j].edge.x {
return false
}
return (e[i].edge.x-e[j].edge.x)/math.Abs(e[i].edge.x-e[j].edge.x) < 0
}
type Filler interface {
fillPolygon(points []Point, opt *LineOptions) operation
SetAngle(float64)
SetGap(float64)
setConnectEnds(bool)
}
func polygonHachureLines(points []Point, hachureAngle float64, hachureGap float64, opt *LineOptions) []Line {
rotationCenter := Point{}
angle := math.Round(hachureAngle + 90)
if angle != 0 {
RotatePoints(&points, rotationCenter, angle)
}
lines := straightHachureLines(points, hachureGap, opt)
if angle != 0 {
RotatePoints(&points, rotationCenter, -angle)
RotateLines(&lines, rotationCenter, -angle)
}
return lines
}
func straightHachureLines(vertices []Point, hachureGap float64, opt *LineOptions) (lines []Line) {
if !vertices[0].Eq(vertices[len(vertices)-1]) {
vertices = append(vertices, vertices[0])
}
if len(vertices) <= 2 {
return
}
gap := initHachureGap(hachureGap, opt.Styles.StrokeWidth)
gap = math.Max(gap, 0.1)
// Create sorted edges table
var (
edges []edgeEntry
activeEdges []activeEdgeEntry
filterActiveEdges []activeEdgeEntry
)
edges = []edgeEntry{}
for i := 0; i < len(vertices)-1; i++ {
p1 := vertices[i]
p2 := vertices[i+1]
if p1.Y == p2.Y {
continue
}
ymin := math.Min(p1.Y, p2.Y)
x := p2.X
if ymin == p1.Y {
x = p1.X
}
edges = append(edges, edgeEntry{
ymin: ymin,
ymax: math.Max(p1.Y, p2.Y),
x: x,
islope: (p2.X - p1.X) / (p2.Y - p1.Y),
})
}
sort.Sort(edgeEntries(edges))
if len(edges) == 0 {
return
}
// Start scanning
activeEdges = []activeEdgeEntry{}
y := edges[0].ymin
for len(activeEdges) > 0 || len(edges) > 0 {
if len(edges) > 0 {
ix := -1
for i := range edges {
if edges[i].ymin > y {
break
}
ix = i
}
for _, e := range edges[0 : ix+1] {
activeEdges = append(activeEdges, activeEdgeEntry{
s: y,
edge: e,
})
}
edges = edges[ix+1:]
}
filterActiveEdges = []activeEdgeEntry{}
for i, a := range activeEdges {
if activeEdges[i].edge.ymax > y {
filterActiveEdges = append(filterActiveEdges, a)
}
}
activeEdges = filterActiveEdges
sort.Sort(activeEdgeEntries(activeEdges))
// fill between the edges
if len(activeEdges) > 1 {
for i := 0; i < len(activeEdges); i = i + 2 {
nexti := i + 1
if nexti >= len(activeEdges) {
break
}
ce := activeEdges[i].edge
ne := activeEdges[nexti].edge
lines = append(lines, Line{
P1: Point{X: math.Round(ce.x), Y: y},
P2: Point{X: math.Round(ne.x), Y: y},
})
}
}
y += gap
for i := range activeEdges {
activeEdges[i].edge.x = activeEdges[i].edge.x + (gap * activeEdges[i].edge.islope)
}
}
return
}
func randOffsetWithRange(min, max float64, opt *PenOptions) float64 {
return offset(min, max, opt.Roughness, 1)
}
func initHachureGap(hachureGap float64, strokeWidth float64) (gap float64) {
gap = hachureGap
if gap < 0 {
gap = strokeWidth * 4
if gap == 0 {
gap = 4
}
}
return
} | filler.go | 0.646237 | 0.459743 | filler.go | starcoder |
package gocv
import (
"github.com/fwessels/go-cv-simd/sse2"
)
// Binarization performs binarization of 8-bit gray image.
// All images must have 8-bit gray format and must have the same width and height.
// For every point:
// dst[i] = compare(src[i], value) ? positive : negative;
// where compare(a, b) depends from compareType (see ::SimdCompareType).
func Binarization(src gocvsimd.View, value, positive, negative uint8, dst gocvsimd.View, compareType uint8) {
gocvsimd.SimdSse2Binarization(src, uint64(value), uint64(positive), uint64(negative), dst, uint64(compareType))
}
// AveragingBinarization performs averaging binarization of 8-bit gray image.
// All images must have 8-bit gray format and must have the same width and height.
// For every point:
// sum = 0; area = 0;
// for(dy = -neighborhood; dy <= neighborhood; ++dy)
// {
// for(dx = -neighborhood; dx <= neighborhood; ++dx)
// {
// if(x + dx >= 0 && x + dx < width && y + dy >= 0 && y + dy < height)
// {
// area++;
// if(compare(src[x + dx, x + dy], value))
// sum++;
// }
// }
// }
// dst[x, y] = sum*255 > area*threshold ? positive : negative;
//where compare(a, b) depends from compareType (see ::SimdCompareType).
func AveragingBinarization(src gocvsimd.View, value uint8, neighborhood uint64, threshold, positive, negative uint8, dst gocvsimd.View, compareType uint8) {
gocvsimd.SimdSse2AveragingBinarization(src, uint64(value), neighborhood, uint64(threshold), uint64(positive), uint64(negative), dst, uint64(compareType))
}
// ConditionalCount8u calculates number of points satisfying certain condition for 8-bit gray image.
// For every point:
// if(compare(src[i], value))
// count++;
// where compare(a, b) depends from compareType (see ::SimdCompareType).
func ConditionalCount8u(src gocvsimd.View, value uint8, compareType uint8) uint32 {
return gocvsimd.SimdSse2ConditionalCount8u(src, uint64(value), uint64(compareType))
}
// ConditionalCount16i calculates the number of points satisfying certain condition for 16-bit signed integer image.
// For every point:
// if(compare(src[i], value))
// count++;
// where compare(a, b) depends from compareType (see ::SimdCompareType).
func ConditionalCount16i(src gocvsimd.View, value int16, compareType uint8) uint32 {
return gocvsimd.SimdSse2ConditionalCount16i(src, uint64(value), uint64(compareType))
}
// ConditionalSum calculates sum of image points when mask points satisfying certain condition.
// All images must have 8-bit gray format and must have the same width and height.
// For every point:
// if(compare(mask[i], value))
// sum += src[i];
// where compare(a, b) depends from compareType (see ::SimdCompareType).
func ConditionalSum(src, mask gocvsimd.View, value, compareType uint8) uint64 {
return gocvsimd.SimdSse2ConditionalSum(src, mask, uint64(value), uint64(compareType))
}
// ConditionalSquareSum calculates sum of squared image points when mask points satisfying certain condition.
// All images must have 8-bit gray format and must have the same width and height.
// For every point:
// if(compare(mask[i], value))
// sum += src[i]*src[i];
// where compare(a, b) depends from compareType (see ::SimdCompareType).
func ConditionalSquareSum(src, mask gocvsimd.View, value, compareType uint8) uint64 {
return gocvsimd.SimdSse2ConditionalSquareSum(src, mask, uint64(value), uint64(compareType))
}
// ConditionalSquareGradientSum calculates sum of squared gradient of image points when mask points satisfying certain condition.
// All images must have 8-bit gray format and must have the same width and height. The image height and width must be equal or greater 3.
// For every point except border:
// if(compare(mask[x, y], value))
// {
// dx = src[x + 1, y] - src[x - 1, y];
// dy = src[x, y + 1] - src[x, y - 1];
// sum += dx*dx + dy*dy;
// }
// where compare(a, b) depends from compareType (see ::SimdCompareType).
func ConditionalSquareGradientSum(src, mask gocvsimd.View, value, compareType uint8) uint64 {
return gocvsimd.SimdSse2ConditionalSquareGradientSum(src, mask, uint64(value), uint64(compareType))
}
// ConditionalFill fills pixels of 8-bit gray image by given value if corresponding pixels of input 8-bit gray image satisfy certain condition.
// All images must have the same width and height.
// For every point:
// if(compare(src[i], threshold))
// dst[i] = value;
// where compare(a, b) depends from compareType (see ::SimdCompareType).
func ConditionalFill(src gocvsimd.View, threshold, compareType, value uint8, dst gocvsimd.View) {
gocvsimd.SimdSse2ConditionalFill(src, uint64(threshold), uint64(compareType), uint64(value), dst)
}
// OperationBinary8u performs given operation between two images.
// All images must have the same width, height and format (8-bit gray, 16-bit UV (UV plane of NV12 pixel format), 24-bit BGR or 32-bit BGRA).
func OperationBinary8u(a, b, dst gocvsimd.View, _type uint8) {
gocvsimd.SimdSse2OperationBinary8u(a, b, dst, uint64(_type))
}
// OperationBinary16i performs given operation between two images.
// All images must have the same width, height and ::SimdPixelFormatInt16 pixel format.
func OperationBinary16i(a, b, dst gocvsimd.View, _type uint8) {
gocvsimd.SimdSse2OperationBinary16i(a, b , dst, uint64(_type))
}
// VectorProduct calculates result 8-bit gray image as product of two vectors.
// For all points:
// dst[x, y] = horizontal[x]*vertical[y]/255;
func VectorProduct(vertical, horizontal, dst gocvsimd.View) {
gocvsimd.SimdSse2VectorProduct(vertical, horizontal, dst)
} | operation.go | 0.55447 | 0.559591 | operation.go | starcoder |
package inventory
import (
"context"
"fmt"
"time"
"github.com/Jim3Things/CloudChamber/simulation/internal/clients/inventory"
"github.com/Jim3Things/CloudChamber/simulation/internal/clients/namespace"
"github.com/Jim3Things/CloudChamber/simulation/internal/clients/store"
"github.com/Jim3Things/CloudChamber/simulation/pkg/errors"
pb "github.com/Jim3Things/CloudChamber/simulation/pkg/protos/inventory"
"github.com/Jim3Things/CloudChamber/simulation/test"
)
type testItem struct {
test.Mock
ts *testSuiteCore
revision int64
revisionStore int64
key string
keyChildIndex string
KeyIndex string
}
// EnsureAllCallsUsed is a function that verifies that all expected mocked
// calls have been done the required number of times. As the calls may well
// execute asynchronously to the test (such as the case with the calls made
// during a state machine save), this function will test for completion for
// up to 2 seconds before failing.
func (t *testItem) EnsureAllCallsUsed() {
worked := t.ts.Eventually(func() bool {
return t.UnusedExpectedCallCount() == 0
}, 2*time.Second, 10*time.Millisecond)
if !worked {
t.ts.Require().Fail(t.Mock.ListUnusedCalls())
}
}
type viewTestItem struct {
testItem
}
func (m *viewTestItem) ChildIndexName() string {
args := m.Called()
m.keyChildIndex = args.StringIf(0, m.keyChildIndex)
return m.keyChildIndex
}
func (m *viewTestItem) IndexName() string {
args := m.Called()
m.KeyIndex = args.StringIf(0, m.KeyIndex)
return m.KeyIndex
}
func (m *viewTestItem) KeyName() string {
args := m.Called()
m.key = args.StringIf(0, m.key)
return m.key
}
func (m *viewTestItem) ListChildren(ctx context.Context) (int64, []string, error) {
_ = m.Called(ctx)
return store.RevisionInvalid, nil, errors.ErrFunctionNotAvailable
}
type monikeredTestItem struct {
testItem
}
func (m *monikeredTestItem) Moniker() string {
args := m.Called()
if args.Count() > 0 {
m.key = args.String(0)
}
return m.key
}
type viewActualCommon struct {
viewTestItem
condition pb.Condition
core *pb.MachineCore
}
func (m *viewActualCommon) GetCondition() pb.Condition {
panic("implement me")
}
func (m *viewActualCommon) SetCondition(cond pb.Condition) {
_ = m.Called(cond)
m.condition = cond
m.revision = store.RevisionInvalid
}
func (m *viewActualCommon) GetCore() *pb.MachineCore {
panic("implement me")
}
func (m *viewActualCommon) SetCore(item *pb.MachineCore) {
_ = m.Called(item)
m.core = item.Clone()
m.revision = store.RevisionInvalid
}
type MockedRack struct {
monikeredTestItem
region string
zone string
rack string
definition *MockedRackDefinition
actual *MockedRackActual
pdus map[int64]*MockedPdu
tors map[int64]*MockedTor
blades map[int64]*MockedBlade
}
func newMockedRack(ts *testSuiteCore, name string) *MockedRack {
r := &MockedRack{
monikeredTestItem: monikeredTestItem{
testItem: testItem{
key: fmt.Sprintf("region/test/zone/a/rack/%s", name),
ts: ts,
},
},
pdus: make(map[int64]*MockedPdu),
tors: make(map[int64]*MockedTor),
blades: make(map[int64]*MockedBlade),
}
r.definition = &MockedRackDefinition{
viewTestItem: viewTestItem{
testItem: testItem{
ts: r.ts,
revision: 0,
revisionStore: 0,
key: namespace.FullyQualifyName(ts.tables.Definition(), r.key),
keyChildIndex: "",
KeyIndex: "",
},
},
parent: r,
}
r.actual = &MockedRackActual{
viewTestItem: viewTestItem{
testItem: testItem{
ts: r.ts,
revision: 0,
revisionStore: 0,
key: namespace.FullyQualifyName(ts.tables.Actual(), r.key),
keyChildIndex: "",
KeyIndex: "",
},
},
parent: r,
}
return r
}
func (m *MockedRack) Definition() inventory.RackViewDefinitionOperations {
args := m.Called()
if args.Count() > 0 {
m.definition = args.Get(0).(*MockedRackDefinition)
m.definition.parent = m
}
return m.definition
}
func (m *MockedRack) Actual() inventory.RackViewActualOperations {
args := m.Called()
if args.Count() > 0 {
m.actual = args.Get(0).(*MockedRackActual)
}
return m.actual
}
func (m *MockedRack) NewChild(name string) (interface{}, error) {
_ = m.Called(name)
return nil, errors.ErrFunctionNotAvailable
}
func (m *MockedRack) NewPdu(ID int64) (inventory.PduOperations, error) {
args := m.Called(ID)
if err := args.ErrorIf(0, nil); err != nil {
return nil, err
}
pdu := &MockedPdu{
monikeredTestItem: monikeredTestItem{
testItem: testItem{
key: fmt.Sprintf("%s/pdus/%d", m.key, ID),
ts: m.ts,
},
},
}
m.pdus[ID] = pdu
return pdu, nil
}
func (m *MockedRack) NewTor(ID int64) (inventory.TorOperations, error) {
args := m.Called(ID)
if err := args.ErrorIf(0, nil); err != nil {
return nil, err
}
tor := &MockedTor{
monikeredTestItem: monikeredTestItem{
testItem: testItem{
key: fmt.Sprintf("%s/tors/%d", m.key, ID),
ts: m.ts,
},
},
}
m.tors[ID] = tor
return tor, nil
}
func (m *MockedRack) NewBlade(ID int64) (inventory.BladeOperations, error) {
args := m.Called(ID)
if err := args.ErrorIf(0, nil); err != nil {
return nil, err
}
blade := &MockedBlade{
monikeredTestItem: monikeredTestItem{
testItem: testItem{
key: fmt.Sprintf("%s/blades/%d", m.key, ID),
ts: m.ts,
},
},
}
m.blades[ID] = blade
return blade, nil
}
type MockedRackDefinition struct {
viewTestItem
parent *MockedRack
storedData *pb.Definition_Rack
details *pb.RackDetails
}
func (m *MockedRackDefinition) ListPdus(ctx context.Context) (int64, []int64, error) {
panic("implement me")
}
func (m *MockedRackDefinition) ListTors(ctx context.Context) (int64, []int64, error) {
panic("implement me")
}
func (m *MockedRackDefinition) ListBlades(ctx context.Context) (int64, []int64, error) {
panic("implement me")
}
func (m *MockedRackDefinition) Create(ctx context.Context) (rev int64, err error) {
args := m.Called(ctx)
m.storedData = args.GetIf(0, m.storedData).(*pb.Definition_Rack)
err = nil
if m.storedData != nil {
err = errors.ErrRackAlreadyExists{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
}
}
if err = args.ErrorIf(2, err); err != nil {
return store.RevisionInvalid, err
}
m.revisionStore = args.Int64If(1, m.revisionStore)
m.revision = m.revisionStore
return m.revisionStore, nil
}
func (m *MockedRackDefinition) Read(ctx context.Context) (rev int64, err error) {
args := m.Called(ctx)
m.storedData = args.GetIf(0, m.storedData).(*pb.Definition_Rack)
err = nil
if m.storedData == nil {
err = errors.ErrRackNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
}
}
if err = args.ErrorIf(2, err); err != nil {
return store.RevisionInvalid, err
}
m.details = m.storedData.GetDetails()
m.revisionStore = args.Int64If(1, m.revisionStore)
m.revision = m.revisionStore
return m.revisionStore, nil
}
func (m *MockedRackDefinition) Update(ctx context.Context, unconditional bool) (rev int64, err error) {
args := m.Called(ctx, unconditional)
data := args.GetIf(0, m.makeStoredData()).(*pb.Definition_Rack)
err = nil
if m.storedData == nil {
err = errors.ErrRackNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
}
}
if err == nil && data.Details == nil {
err = errors.ErrDetailsNotAvailable("rack")
}
if err = args.ErrorIf(2, err); err != nil {
return store.RevisionInvalid, err
}
m.storedData = data
m.revisionStore = args.Int64If(1, m.revisionStore+1)
m.revision = m.revisionStore
return m.revisionStore, nil
}
func (m *MockedRackDefinition) Delete(ctx context.Context, unconditional bool) (int64, error) {
args := m.Called(ctx, unconditional)
return args.Int64(0), args.Error(1)
}
func (m *MockedRackDefinition) FetchChildren(ctx context.Context) (int64, *map[string]interface{}, error) {
_ = m.Called(ctx)
return store.RevisionInvalid, nil, errors.ErrFunctionNotAvailable
}
func (m *MockedRackDefinition) SetDetails(details *pb.RackDetails) {
_ = m.Called(details)
m.details = details.Clone()
m.revision = store.RevisionInvalid
}
func (m *MockedRackDefinition) GetDetails() *pb.RackDetails {
args := m.Called()
m.details = args.GetIf(0, m.details).(*pb.RackDetails)
return m.details
}
func (m *MockedRackDefinition) GetStoreData() *pb.Definition_Rack {
panic("implement me")
}
func (m *MockedRackDefinition) Equal(d *pb.Definition_Rack) bool {
_ = m.Called(d)
return m.details.Equal(d.GetDetails())
}
func (m *MockedRackDefinition) NotEqual(d *pb.Definition_Rack) bool {
_ = m.Called(d)
return !m.details.Equal(d.GetDetails())
}
func (m *MockedRackDefinition) GetStoreDataWithChildren(ctx context.Context) (*pb.Definition_Rack, error) {
panic("implement me")
}
func (m *MockedRackDefinition) FetchPdus(ctx context.Context) (int64, map[int64]inventory.PduOperations, error) {
args := m.Called(ctx)
if err := args.ErrorIf(2, nil); err != nil {
return store.RevisionInvalid, nil, err
}
rev := args.Int64If(0, m.revision)
m.parent.pdus = args.GetIf(1, m.parent.pdus).(map[int64]*MockedPdu)
pdus := make(map[int64]inventory.PduOperations)
for i, mockedPdu := range m.parent.pdus {
pdus[i] = mockedPdu
mockedPdu.definition.readState()
}
return rev, pdus, nil
}
func (m *MockedRackDefinition) FetchTors(ctx context.Context) (int64, map[int64]inventory.TorOperations, error) {
args := m.Called(ctx)
if err := args.ErrorIf(2, nil); err != nil {
return store.RevisionInvalid, nil, err
}
rev := args.Int64If(0, m.revision)
m.parent.tors = args.GetIf(1, m.parent.tors).(map[int64]*MockedTor)
tors := make(map[int64]inventory.TorOperations)
for i, mockedTor := range m.parent.tors {
tors[i] = mockedTor
mockedTor.definition.readState()
}
return rev, tors, nil
}
func (m *MockedRackDefinition) FetchBlades(ctx context.Context) (int64, map[int64]inventory.BladeOperations, error) {
args := m.Called(ctx)
if err := args.ErrorIf(2, nil); err != nil {
return store.RevisionInvalid, nil, err
}
rev := args.Int64If(0, m.revision)
m.parent.blades = args.GetIf(1, m.parent.blades).(map[int64]*MockedBlade)
blades := make(map[int64]inventory.BladeOperations)
for i, mockedBlade := range m.parent.blades {
blades[i] = mockedBlade
mockedBlade.definition.readState()
}
return rev, blades, nil
}
func (m *MockedRackDefinition) makeStoredData() *pb.Definition_Rack {
return &pb.Definition_Rack{
Details: m.details.Clone(),
Pdus: nil,
Tors: nil,
Blades: nil,
}
}
type MockedRackActual struct {
viewTestItem
parent *MockedRack
storedData *pb.Definition_Rack
state pb.RackState_SM
core *pb.MachineCore
}
func (m *MockedRackActual) ListPdus(ctx context.Context) (int64, []int64, error) {
panic("implement me")
}
func (m *MockedRackActual) ListTors(ctx context.Context) (int64, []int64, error) {
panic("implement me")
}
func (m *MockedRackActual) ListBlades(ctx context.Context) (int64, []int64, error) {
panic("implement me")
}
func (m *MockedRackActual) Create(ctx context.Context) (int64, error) {
panic("implement me")
}
func (m *MockedRackActual) Read(ctx context.Context) (int64, error) {
panic("implement me")
}
func (m *MockedRackActual) Update(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedRackActual) Delete(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedRackActual) FetchChildren(ctx context.Context) (int64, *map[string]interface{}, error) {
panic("implement me")
}
func (m *MockedRackActual) GetState() pb.RackState_SM {
args := m.Called()
m.state = args.GetIf(0, m.state).(pb.RackState_SM)
return m.state
}
func (m *MockedRackActual) SetState(value pb.RackState_SM) {
panic("implement me")
}
func (m *MockedRackActual) GetCore() *pb.MachineCore {
args := m.Called()
m.core = args.GetIf(0, m.core).(*pb.MachineCore)
return m.core
}
func (m *MockedRackActual) SetCore(item *pb.MachineCore) {
panic("implement me")
}
func (m *MockedRackActual) GetStoreData() *pb.Actual_Rack {
panic("implement me")
}
func (m *MockedRackActual) Equal(d *pb.Actual_Rack) bool {
panic("implement me")
}
func (m *MockedRackActual) NotEqual(d *pb.Actual_Rack) bool {
panic("implement me")
}
func (m *MockedRackActual) GetStoreDataWithChildren(ctx context.Context) (*pb.Actual_Rack, error) {
panic("implement me")
}
func (m *MockedRackActual) FetchPdus(ctx context.Context) (int64, map[int64]inventory.PduOperations, error) {
panic("implement me")
}
func (m *MockedRackActual) FetchTors(ctx context.Context) (int64, map[int64]inventory.TorOperations, error) {
panic("implement me")
}
func (m *MockedRackActual) FetchBlades(ctx context.Context) (int64, map[int64]inventory.BladeOperations, error) {
panic("implement me")
}
type MockedPdu struct {
monikeredTestItem
parent *MockedRack
id int64
definition *MockedPduDefinition
actual *MockedPduActual
}
func newMockedPdu(id int64, r *MockedRack, torPorts int64, bladePorts int64) *MockedPdu {
ts := r.ts
pdu := &MockedPdu{
monikeredTestItem: monikeredTestItem{
testItem: testItem{
ts: ts,
revision: 0,
revisionStore: 0,
key: fmt.Sprintf("%<KEY>", r.key, id),
keyChildIndex: "",
KeyIndex: "",
},
},
parent: r,
id: id,
}
def := &MockedPduDefinition{
viewTestItem: viewTestItem{
testItem: testItem{
ts: ts,
revision: 0,
revisionStore: 0,
key: namespace.FullyQualifyName(ts.tables.Definition(), pdu.key),
keyChildIndex: "",
KeyIndex: "",
},
},
parent: r,
id: id,
storedData: &pb.Definition_Pdu{
Details: &pb.PduDetails{
Enabled: true,
Condition: pb.Condition_operational,
},
Ports: make(map[int64]*pb.PowerPort),
},
storedErr: nil,
details: nil,
ports: nil,
}
index := int64(0)
for i := int64(0); i < torPorts; i++ {
def.storedData.Ports[index] = &pb.PowerPort{
Wired: true,
Item: &pb.Hardware{
Type: pb.Hardware_tor,
Id: i,
Port: 0,
},
}
index++
}
for i := int64(0); i < bladePorts; i++ {
def.storedData.Ports[index] = &pb.PowerPort{
Wired: true,
Item: &pb.Hardware{
Type: pb.Hardware_blade,
Id: i,
Port: 0,
},
}
index++
}
pdu.definition = def
act := &MockedPduActual{
viewActualCommon: viewActualCommon{
viewTestItem: viewTestItem{
testItem: testItem{
ts: ts,
revision: 0,
revisionStore: 0,
key: namespace.FullyQualifyName(ts.tables.Actual(), pdu.key),
keyChildIndex: "",
KeyIndex: "",
},
},
},
parent: r,
id: id,
}
pdu.actual = act
return pdu
}
func (m *MockedPdu) Definition() inventory.PduViewDefinitionOperations {
args := m.Called()
m.definition = args.GetIf(0, m.definition).(*MockedPduDefinition)
return m.definition
}
func (m *MockedPdu) Actual() inventory.PduViewActualOperations {
args := m.Called()
m.actual = args.GetIf(0, m.actual).(*MockedPduActual)
return m.actual
}
type MockedPduDefinition struct {
viewTestItem
parent *MockedRack
id int64
storedData *pb.Definition_Pdu
storedErr error
details *pb.PduDetails
ports map[int64]*pb.PowerPort
}
func (m *MockedPduDefinition) readState() {
m.details = m.storedData.GetDetails()
m.ports = m.storedData.GetPorts()
}
func (m *MockedPduDefinition) makeStoreData() *pb.Definition_Pdu {
return &pb.Definition_Pdu{
Details: m.details,
Ports: m.ports,
}
}
func (m *MockedPduDefinition) Create(ctx context.Context) (int64, error) {
panic("implement me")
}
func (m *MockedPduDefinition) Read(ctx context.Context) (int64, error) {
panic("implement me")
}
func (m *MockedPduDefinition) Update(ctx context.Context, unconditional bool) (int64, error) {
args := m.Called(ctx, unconditional)
if err := args.ErrorIf(1, nil); err != nil {
return args.Int64(0), err
}
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrPduNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
Pdu: m.id,
}
}
data := m.makeStoreData()
if data.Details == nil {
return store.RevisionInvalid, errors.ErrDetailsNotAvailable("pdu")
}
m.storedData = data
m.revisionStore++
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedPduDefinition) Delete(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedPduDefinition) SetDetails(details *pb.PduDetails) {
panic("implement me")
}
func (m *MockedPduDefinition) GetDetails() *pb.PduDetails {
panic("implement me")
}
func (m *MockedPduDefinition) SetPorts(ports map[int64]*pb.PowerPort) {
panic("implement me")
}
func (m *MockedPduDefinition) GetPorts() map[int64]*pb.PowerPort {
args := m.Called()
m.ports = args.GetIf(0, m.ports).(map[int64]*pb.PowerPort)
return m.ports
}
func (m *MockedPduDefinition) EqualPorts(ports map[int64]*pb.PowerPort) bool {
panic("implement me")
}
func (m *MockedPduDefinition) GetStoreData() *pb.Definition_Pdu {
args := m.Called()
data := args.GetIf(0, &pb.Definition_Pdu{
Details: m.details,
Ports: m.ports,
}).(*pb.Definition_Pdu)
return data
}
func (m *MockedPduDefinition) Equal(d *pb.Definition_Pdu) bool {
panic("implement me")
}
func (m *MockedPduDefinition) NotEqual(d *pb.Definition_Pdu) bool {
panic("implement me")
}
type MockedPduActual struct {
viewActualCommon
parent *MockedRack
id int64
storedData *pb.Actual_Pdu
storedErr error
cables map[int64]*pb.Cable
state pb.PduState_SM
}
func (m *MockedPduActual) Create(ctx context.Context) (int64, error) {
args := m.Called(ctx)
if err := args.ErrorIf(1, nil); err != nil {
return store.RevisionInvalid, err
}
m.storedData = &pb.Actual_Pdu{
Condition: m.condition,
SmState: m.state,
Core: m.core.Clone(),
Cables: m.cables,
}
m.revisionStore = args.Int64If(0, m.revisionStore+1)
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedPduActual) Read(ctx context.Context) (int64, error) {
args := m.Called(ctx)
if err := args.ErrorIf(1, nil); err != nil {
return store.RevisionInvalid, err
}
m.storedData = args.GetIf(2, m.storedData).(*pb.Actual_Pdu)
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrPduNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.region,
Pdu: m.id,
}
}
m.condition = m.storedData.GetCondition()
m.state = m.storedData.GetSmState()
m.core = m.storedData.GetCore().Clone()
m.cables = m.storedData.GetCables()
m.revisionStore = args.Int64If(0, m.revisionStore)
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedPduActual) Update(ctx context.Context, unconditional bool) (int64, error) {
args := m.Called(ctx, unconditional)
if err := args.ErrorIf(1, nil); err != nil {
return args.Int64(0), err
}
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrPduNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
Pdu: m.id,
}
}
data := &pb.Actual_Pdu{
Condition: m.condition,
SmState: m.state,
Core: m.core.Clone(),
Cables: m.cables,
}
if data.Core == nil || data.SmState == pb.PduState_invalid {
return store.RevisionInvalid, errors.ErrStateMachineDataNotAvailable("pdu")
}
m.storedData = data
m.revisionStore++
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedPduActual) Delete(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedPduActual) GetCables() map[int64]*pb.Cable {
panic("implement me")
}
func (m *MockedPduActual) SetCables(cables map[int64]*pb.Cable) {
_ = m.Called(cables)
m.cables = cables
m.revision = store.RevisionInvalid
}
func (m *MockedPduActual) GetState() pb.PduState_SM {
panic("implement me")
}
func (m *MockedPduActual) SetState(state pb.PduState_SM) {
_ = m.Called(state)
m.state = state
m.revision = store.RevisionInvalid
}
func (m *MockedPduActual) GetStoreData() *pb.Actual_Pdu {
panic("implement me")
}
func (m *MockedPduActual) Equal(x *pb.Actual_Pdu) bool {
panic("implement me")
}
func (m *MockedPduActual) NotEqual(x *pb.Actual_Pdu) bool {
panic("implement me")
}
func (m *MockedPduActual) Describe() string {
return fmt.Sprintf("%v", m)
}
type MockedTor struct {
monikeredTestItem
parent *MockedRack
id int64
definition *MockedTorDefinition
actual *MockedTorActual
}
func newMockedTor(id int64, r *MockedRack, pduPorts int64, bladePorts int64) *MockedTor {
ts := r.ts
tor := &MockedTor{
monikeredTestItem: monikeredTestItem{
testItem: testItem{
key: fmt.Sprintf("%s/tors/%d", r.key, id),
ts: ts,
},
},
parent: r,
id: id,
}
def := &MockedTorDefinition{
viewTestItem: viewTestItem{
testItem: testItem{
ts: ts,
revision: 0,
revisionStore: 0,
key: namespace.FullyQualifyName(ts.tables.Definition(), tor.key),
keyChildIndex: "",
KeyIndex: "",
},
},
storedData: &pb.Definition_Tor{
Details: &pb.TorDetails{
Enabled: true,
Condition: pb.Condition_operational,
},
Ports: make(map[int64]*pb.NetworkPort),
},
parent: r,
id: id,
}
index := int64(0)
for i := int64(0); i < pduPorts; i++ {
def.storedData.Ports[index] = &pb.NetworkPort{
Wired: true,
Item: &pb.Hardware{
Type: pb.Hardware_pdu,
Id: i,
Port: 0,
},
}
index++
}
for i := int64(0); i < bladePorts; i++ {
def.storedData.Ports[index] = &pb.NetworkPort{
Wired: true,
Item: &pb.Hardware{
Type: pb.Hardware_blade,
Id: i,
Port: 0,
},
}
index++
}
tor.definition = def
act := &MockedTorActual{
viewActualCommon: viewActualCommon{
viewTestItem: viewTestItem{
testItem: testItem{
ts: ts,
key: namespace.FullyQualifyName(ts.tables.Actual(), tor.key),
keyChildIndex: "",
KeyIndex: "",
},
},
},
parent: r,
id: id,
}
tor.actual = act
return tor
}
func (m *MockedTor) Definition() inventory.TorViewDefinitionOperations {
args := m.Called()
m.definition = args.GetIf(0, m.definition).(*MockedTorDefinition)
return m.definition
}
func (m *MockedTor) Actual() inventory.TorViewActualOperations {
args := m.Called()
m.actual = args.GetIf(0, m.actual).(*MockedTorActual)
return m.actual
}
type MockedTorDefinition struct {
viewTestItem
parent *MockedRack
id int64
storedData *pb.Definition_Tor
storedErr error
details *pb.TorDetails
ports map[int64]*pb.NetworkPort
}
func (m *MockedTorDefinition) readState() {
m.details = m.storedData.GetDetails()
m.ports = m.storedData.GetPorts()
}
func (m *MockedTorDefinition) makeStoreData() *pb.Definition_Tor {
return &pb.Definition_Tor{
Details: m.details.Clone(),
Ports: m.ports,
}
}
func (m *MockedTorDefinition) Create(ctx context.Context) (int64, error) {
panic("implement me")
}
func (m *MockedTorDefinition) Read(ctx context.Context) (int64, error) {
panic("implement me")
}
func (m *MockedTorDefinition) Update(ctx context.Context, unconditional bool) (int64, error) {
args := m.Called(ctx, unconditional)
if err := args.ErrorIf(1, nil); err != nil {
return args.Int64(0), err
}
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrTorNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
Tor: m.id,
}
}
data := m.makeStoreData()
if data.Details == nil {
return store.RevisionInvalid, errors.ErrDetailsNotAvailable("tor")
}
m.storedData = data
m.revisionStore++
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedTorDefinition) Delete(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedTorDefinition) SetDetails(details *pb.TorDetails) {
panic("implement me")
}
func (m *MockedTorDefinition) GetDetails() *pb.TorDetails {
args := m.Called()
m.details = args.GetIf(0, m.details).(*pb.TorDetails)
return m.details
}
func (m *MockedTorDefinition) SetPorts(ports map[int64]*pb.NetworkPort) {
panic("implement me")
}
func (m *MockedTorDefinition) GetPorts() map[int64]*pb.NetworkPort {
args := m.Called()
m.ports = args.GetIf(0, m.ports).(map[int64]*pb.NetworkPort)
return m.ports
}
func (m *MockedTorDefinition) EqualPorts(ports map[int64]*pb.NetworkPort) bool {
panic("implement me")
}
func (m *MockedTorDefinition) GetStoreData() *pb.Definition_Tor {
args := m.Called()
data := args.GetIf(0, &pb.Definition_Tor{
Details: m.details,
Ports: m.ports,
}).(*pb.Definition_Tor)
return data
}
func (m *MockedTorDefinition) Equal(d *pb.Definition_Tor) bool {
panic("implement me")
}
func (m *MockedTorDefinition) NotEqual(d *pb.Definition_Tor) bool {
panic("implement me")
}
type MockedTorActual struct {
viewActualCommon
parent *MockedRack
id int64
storedData *pb.Actual_Tor
storedErr error
cables map[int64]*pb.Cable
state pb.TorState_SM
}
func (m *MockedTorActual) Create(ctx context.Context) (int64, error) {
args := m.Called(ctx)
if err := args.ErrorIf(1, nil); err != nil {
return store.RevisionInvalid, err
}
m.storedData = &pb.Actual_Tor{
Condition: m.condition,
SmState: m.state,
Core: m.core.Clone(),
Cables: m.cables,
}
m.revisionStore = args.Int64If(0, m.revisionStore+1)
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedTorActual) Read(ctx context.Context) (int64, error) {
args := m.Called(ctx)
if err := args.ErrorIf(1, nil); err != nil {
return store.RevisionInvalid, err
}
m.storedData = args.GetIf(2, m.storedData).(*pb.Actual_Tor)
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrTorNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.region,
Tor: m.id,
}
}
m.condition = m.storedData.GetCondition()
m.state = m.storedData.GetSmState()
m.core = m.storedData.GetCore().Clone()
m.cables = m.storedData.GetCables()
m.revisionStore = args.Int64If(0, m.revisionStore)
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedTorActual) Update(ctx context.Context, unconditional bool) (int64, error) {
args := m.Called(ctx, unconditional)
if err := args.ErrorIf(1, nil); err != nil {
return args.Int64(0), err
}
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrTorNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
Tor: m.id,
}
}
data := &pb.Actual_Tor{
Condition: m.condition,
SmState: m.state,
Core: m.core.Clone(),
Cables: m.cables,
}
if data.Core == nil || data.SmState == pb.TorState_invalid {
return store.RevisionInvalid, errors.ErrStateMachineDataNotAvailable("pdu")
}
m.storedData = data
m.revisionStore++
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedTorActual) Delete(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedTorActual) GetCables() map[int64]*pb.Cable {
panic("implement me")
}
func (m *MockedTorActual) SetCables(cables map[int64]*pb.Cable) {
_ = m.Called(cables)
m.cables = cables
m.revision = store.RevisionInvalid
}
func (m *MockedTorActual) GetState() pb.TorState_SM {
panic("implement me")
}
func (m *MockedTorActual) SetState(state pb.TorState_SM) {
_ = m.Called(state)
m.state = state
m.revision = store.RevisionInvalid
}
func (m *MockedTorActual) GetStoreData() *pb.Actual_Tor {
panic("implement me")
}
func (m *MockedTorActual) Equal(x *pb.Actual_Tor) bool {
panic("implement me")
}
func (m *MockedTorActual) NotEqual(x *pb.Actual_Tor) bool {
panic("implement me")
}
func (m *MockedTorActual) Describe() string {
return fmt.Sprintf("%v", m)
}
type MockedBlade struct {
monikeredTestItem
parent *MockedRack
id int64
definition *MockedBladeDefinition
actual *MockedBladeActual
}
func newMockedBlade(
id int64,
r *MockedRack,
capacity *pb.BladeCapacity,
bootOnPowerOn bool,
bootInfo *pb.BladeBootInfo) *MockedBlade {
ts := r.ts
blade := &MockedBlade{
monikeredTestItem: monikeredTestItem{
testItem: testItem{
ts: ts,
revision: 0,
revisionStore: 0,
key: fmt.Sprintf("%s/blades/%d", r.key, id),
keyChildIndex: "",
KeyIndex: "",
},
},
parent: r,
id: id,
definition: nil,
actual: nil,
}
def := &MockedBladeDefinition{
viewTestItem: viewTestItem{
testItem: testItem{
ts: ts,
revision: 0,
revisionStore: 0,
key: namespace.FullyQualifyName(ts.tables.Definition(), blade.key),
keyChildIndex: "",
KeyIndex: "",
},
},
parent: r,
id: id,
storedData: &pb.Definition_Blade{
Details: &pb.BladeDetails{
Condition: pb.Condition_operational,
Enabled: true,
},
Capacity: capacity,
BootOnPowerOn: bootOnPowerOn,
BootInfo: bootInfo,
},
storedErr: nil,
details: nil,
capacity: nil,
bootOnPowerOn: false,
bootInfo: nil,
}
blade.definition = def
act := &MockedBladeActual{
mockedBladeRunState: mockedBladeRunState{
viewActualCommon: viewActualCommon{
viewTestItem: viewTestItem{
testItem: testItem{
ts: ts,
revision: 0,
revisionStore: 0,
key: namespace.FullyQualifyName(ts.tables.Actual(), blade.key),
keyChildIndex: "",
KeyIndex: "",
},
},
},
usages: make(map[string]*pb.Usage),
},
parent: r,
id: id,
}
blade.actual = act
return blade
}
func (m *MockedBlade) Definition() inventory.BladeViewDefinitionOperations {
args := m.Called()
m.definition = args.GetIf(0, m.definition).(*MockedBladeDefinition)
return m.definition
}
func (m *MockedBlade) Actual() inventory.BladeViewActualOperations {
args := m.Called()
m.actual = args.GetIf(0, m.actual).(*MockedBladeActual)
return m.actual
}
func (m *MockedBlade) Observed() inventory.BladeViewObservedOperations {
panic("implement me")
}
type MockedBladeDefinition struct {
viewTestItem
parent *MockedRack
id int64
storedData *pb.Definition_Blade
storedErr error
details *pb.BladeDetails
capacity *pb.BladeCapacity
bootOnPowerOn bool
bootInfo *pb.BladeBootInfo
}
func (m *MockedBladeDefinition) readState() {
m.details = m.storedData.GetDetails()
m.capacity = m.storedData.GetCapacity()
m.bootOnPowerOn = m.storedData.GetBootOnPowerOn()
m.bootInfo = m.storedData.GetBootInfo()
}
func (m *MockedBladeDefinition) Create(ctx context.Context) (int64, error) {
panic("implement me")
}
func (m *MockedBladeDefinition) Read(ctx context.Context) (rev int64, err error) {
args := m.Called(ctx)
m.storedData = args.GetIf(0, m.storedData).(*pb.Definition_Blade)
err = nil
if m.storedData == nil {
err = errors.ErrBladeNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
Blade: m.id,
}
}
if err = args.ErrorIf(2, err); err != nil {
return store.RevisionInvalid, err
}
m.readState()
m.revisionStore = args.Int64If(1, m.revisionStore)
m.revision = m.revisionStore
return m.revisionStore, nil
}
func (m *MockedBladeDefinition) Update(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedBladeDefinition) Delete(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedBladeDefinition) SetDetails(details *pb.BladeDetails) {
panic("implement me")
}
func (m *MockedBladeDefinition) GetDetails() *pb.BladeDetails {
panic("implement me")
}
func (m *MockedBladeDefinition) SetCapacity(capacity *pb.BladeCapacity) {
panic("implement me")
}
func (m *MockedBladeDefinition) GetCapacity() *pb.BladeCapacity {
args := m.Called()
m.capacity = args.GetIf(0, m.capacity).(*pb.BladeCapacity)
return m.capacity.Clone()
}
func (m *MockedBladeDefinition) SetBootInfo(bootInfo *pb.BladeBootInfo) {
panic("implement me")
}
func (m *MockedBladeDefinition) GetBootInfo() *pb.BladeBootInfo {
args := m.Called()
m.bootInfo = args.GetIf(0, m.bootInfo).(*pb.BladeBootInfo)
return m.bootInfo.Clone()
}
func (m *MockedBladeDefinition) SetBootPowerOn(bootOnPowerOn bool) {
panic("implement me")
}
func (m *MockedBladeDefinition) GetBootOnPowerOn() bool {
args := m.Called()
m.bootOnPowerOn = args.BoolIf(0, m.bootOnPowerOn)
return m.bootOnPowerOn
}
func (m *MockedBladeDefinition) GetStoreData() *pb.Definition_Blade {
args := m.Called()
data := args.GetIf(0, &pb.Definition_Blade{
Details: m.details,
Capacity: m.capacity,
BootOnPowerOn: m.bootOnPowerOn,
BootInfo: m.bootInfo,
}).(*pb.Definition_Blade)
return data
}
func (m *MockedBladeDefinition) Equal(d *pb.Definition_Blade) bool {
panic("implement me")
}
func (m *MockedBladeDefinition) NotEqual(d *pb.Definition_Blade) bool {
panic("implement me")
}
type MockedBladeActual struct {
mockedBladeRunState
parent *MockedRack
id int64
storedData *pb.Actual_Blade
storedErr error
}
func (m *MockedBladeActual) Create(ctx context.Context) (int64, error) {
args := m.Called(ctx)
if err := args.ErrorIf(1, nil); err != nil {
return store.RevisionInvalid, err
}
m.storedData = &pb.Actual_Blade{
Condition: m.condition,
SmState: m.state,
Core: m.core.Clone(),
Timer: m.timer.Clone(),
Usage: m.usages,
}
m.revisionStore = args.Int64If(0, m.revisionStore+1)
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedBladeActual) Read(ctx context.Context) (int64, error) {
args := m.Called(ctx)
if err := args.ErrorIf(1, nil); err != nil {
return store.RevisionInvalid, err
}
m.storedData = args.GetIf(2, m.storedData).(*pb.Actual_Blade)
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrBladeNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.region,
Blade: m.id,
}
}
m.condition = m.storedData.GetCondition()
m.state = m.storedData.GetSmState()
m.core = m.storedData.GetCore().Clone()
m.timer = m.storedData.GetTimer().Clone()
m.revisionStore = args.Int64If(0, m.revisionStore)
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedBladeActual) Update(ctx context.Context, unconditional bool) (int64, error) {
args := m.Called(ctx, unconditional)
if err := args.ErrorIf(1, nil); err != nil {
return args.Int64(0), err
}
if m.storedData == nil {
return store.RevisionInvalid, errors.ErrBladeNotFound{
Region: m.parent.region,
Zone: m.parent.zone,
Rack: m.parent.rack,
Blade: m.id,
}
}
data := &pb.Actual_Blade{
Condition: m.condition,
SmState: m.state,
Core: m.core.Clone(),
Timer: m.timer.Clone(),
}
if data.Core == nil || data.SmState == pb.BladeState_invalid {
return store.RevisionInvalid, errors.ErrStateMachineDataNotAvailable("blade")
}
m.storedData = data
m.revisionStore++
m.revision = m.revisionStore
return m.revision, nil
}
func (m *MockedBladeActual) Delete(ctx context.Context, unconditional bool) (int64, error) {
panic("implement me")
}
func (m *MockedBladeActual) GetStoreData() *pb.Actual_Blade {
panic("implement me")
}
func (m *MockedBladeActual) Equal(x *pb.Actual_Blade) bool {
panic("implement me")
}
func (m *MockedBladeActual) NotEqual(x *pb.Actual_Blade) bool {
panic("implement me")
}
type mockedBladeRunState struct {
viewActualCommon
state pb.BladeState_SM
timer *pb.ActiveTimer
avail *pb.BladeCapacity
usages map[string]*pb.Usage
}
func (m *mockedBladeRunState) GetUsage(id string) *pb.Usage {
_ = m.Called(id)
return m.usages[id]
}
func (m *mockedBladeRunState) SetUsage(id string, usage *pb.Usage) {
_ = m.Called(id, usage)
m.usages[id] = usage
m.revision = store.RevisionInvalid
}
func (m *mockedBladeRunState) RemoveUsage(id string) bool {
_ = m.Called(id)
_, ok := m.usages[id]
if ok {
delete(m.usages, id)
m.revision = store.RevisionInvalid
}
return ok
}
func (m *mockedBladeRunState) GetAllUsage() map[string]*pb.Usage {
_ = m.Called()
return m.usages
}
func (m *mockedBladeRunState) SetAllUsage(usages map[string]*pb.Usage) {
_ = m.Called(usages)
m.usages = usages
m.revision = store.RevisionInvalid
}
func (m *mockedBladeRunState) GetState() pb.BladeState_SM {
panic("implement me")
}
func (m *mockedBladeRunState) SetState(state pb.BladeState_SM) {
_ = m.Called(state)
m.state = state
m.revision = store.RevisionInvalid
}
func (m *mockedBladeRunState) GetTimer() *pb.ActiveTimer {
panic("implement me")
}
func (m *mockedBladeRunState) SetTimer(t *pb.ActiveTimer) {
_ = m.Called(t)
m.timer = t.Clone()
m.revision = store.RevisionInvalid
}
func (m *mockedBladeRunState) GetAvail() *pb.BladeCapacity {
_ = m.Called()
return m.avail
}
func (m *mockedBladeRunState) SetAvail(value *pb.BladeCapacity) {
_ = m.Called(value)
m.avail = value.Clone()
m.revision = store.RevisionInvalid
} | simulation/internal/services/inventory/mocks.go | 0.609757 | 0.454472 | mocks.go | starcoder |
package api
import (
"fmt"
"math"
"math/rand"
"time"
)
func NewDistribution(distributionTypeArg string, iterationDuration time.Duration, rateFn RateFunction) (time.Duration, RateFunction, error) {
switch distributionTypeArg {
case "none":
return iterationDuration, rateFn, nil
case "regular":
distributedIterationDuration, distributedRateFn := withRegularDistribution(iterationDuration, rateFn)
return distributedIterationDuration, distributedRateFn, nil
case "random":
randomFn := func(limit int) int { return rand.Intn(limit) }
distributedIterationDuration, distributedRateFn := withRandomDistribution(iterationDuration, rateFn, randomFn)
return distributedIterationDuration, distributedRateFn, nil
default:
return iterationDuration, rateFn, fmt.Errorf("unable to parse distribution %s", distributionTypeArg)
}
}
func withRegularDistribution(iterationDuration time.Duration, rateFn RateFunction) (time.Duration, RateFunction) {
distributedIterationDuration := 100 * time.Millisecond
if iterationDuration <= distributedIterationDuration {
return iterationDuration, rateFn
}
rate := 0
accRate := 0.0
remainingSteps := 0
tickSteps := int(iterationDuration.Milliseconds() / distributedIterationDuration.Milliseconds())
distributedRateFn := func(time time.Time) int {
if remainingSteps == 0 {
rate = rateFn(time)
accRate = 0.0
remainingSteps = tickSteps
}
accRate += float64(rate) / float64(tickSteps)
accRate = math.Round(accRate*10_000_000) / 10_000_000
remainingSteps--
if accRate < 1 {
return 0
}
roundedAccRate := int(accRate)
accRate -= float64(roundedAccRate)
return roundedAccRate
}
return distributedIterationDuration, distributedRateFn
}
func withRandomDistribution(iterationDuration time.Duration, rateFn RateFunction, randFn func(int) int) (time.Duration, RateFunction) {
distributedIterationDuration := 100 * time.Millisecond
if iterationDuration <= distributedIterationDuration {
return iterationDuration, rateFn
}
remainingSteps := 0
remainingRate := 0
tickSteps := int(iterationDuration.Milliseconds() / distributedIterationDuration.Milliseconds())
distributedRateFn := func(time time.Time) int {
if remainingSteps == 0 {
remainingRate = rateFn(time)
remainingSteps = tickSteps
}
var currentRate int
if remainingSteps == 1 || remainingRate == 0 {
currentRate = remainingRate
} else {
currentRate = randFn(remainingRate)
if currentRate > remainingRate {
currentRate = remainingRate
}
}
remainingRate -= currentRate
remainingSteps--
if currentRate < 1 {
return 0
}
return currentRate
}
return distributedIterationDuration, distributedRateFn
} | internal/trigger/api/iteration_distribution.go | 0.671578 | 0.445891 | iteration_distribution.go | starcoder |
package engine
import (
"image"
"image/color"
"image/draw"
)
// Level is a struct that defines a single level of a game
type Level struct {
BackgroundColour color.RGBA
Gravity float64
GameObjects []*GameObject
Game *Game
PaintOffset Vector
BeforePaint BeforePaint
}
// Repaint redraws the entire level for a new game
func (level *Level) Repaint(stage *image.RGBA) {
// Figure out where all the floor objects are
level.AssignFloors()
// Figure out which objects are colliding
level.CalculateCollisions()
// Paint the background color
draw.Draw(stage, stage.Bounds(), &image.Uniform{level.BackgroundColour}, image.ZP, draw.Src)
// Update each game object
for _, gameObject := range level.GameObjects {
// Skip hidden objects
if gameObject.IsHidden == true {
continue
}
gameObject.Level = level
gameObject.RecalculatePosition(level.Gravity)
if gameObject.Direction == DirLeft {
gameObject.IsFlipped = true
} else if gameObject.Direction == DirRight {
gameObject.IsFlipped = false
}
// 0 is at the bottom, so flip the Y axis to paint correctly
invertedY := level.Game.Height - int(gameObject.Position.Y) - gameObject.Height()
paintY := invertedY + int(level.PaintOffset.Y)
paintX := int(gameObject.Position.X) - int(level.PaintOffset.X)
gameObject.CurrentSprite().AddToCanvas(stage, paintX, paintY, gameObject.IsFlipped)
}
}
// AssignFloors iterates through all objects in the level and defines which
// object beneath them (if any) should be considered their 'floor' object,
// setting its top edge as the lowest point that the object can fall
func (level *Level) AssignFloors() {
floorXCoords := map[int][]*GameObject{}
// Make a map of each object's possible X positions
for _, gameObject := range level.GameObjects {
// Skip hidden, non-interactive and non-floor objects
if gameObject.IsHidden == true || gameObject.IsInteractive == false || gameObject.IsFloor == false {
continue
}
for i := 0; i < gameObject.Width(); i++ {
xPos := i + int(gameObject.Position.X)
floorXCoords[xPos] = append(floorXCoords[xPos], gameObject)
}
}
// Find the objects that sit beneath every other object
for _, gameObject := range level.GameObjects {
// Skip objects that float or are non-interactive
if gameObject.Mass == 0 || gameObject.IsInteractive == false {
continue
}
highestFloorObject := float64(0 - gameObject.Height())
for i := 0; i < gameObject.Width(); i++ {
xPos := i + int(gameObject.Position.X)
if floorObjects, ok := floorXCoords[xPos]; ok {
// Find the one that is highest while still being lower than
// the object itself
for _, floorObject := range floorObjects {
floorObjectTop := (floorObject.Position.Y + float64(floorObject.Height()))
if floorObjectTop <= gameObject.Position.Y {
if floorObjectTop > highestFloorObject {
highestFloorObject = floorObjectTop
}
}
}
}
}
gameObject.FloorY = highestFloorObject
}
}
// CalculateCollisions iterates via all objects in the level and defines which
// objects (if any) intersect them
func (level *Level) CalculateCollisions() {
xCoords := map[int][]*GameObject{}
// Make a map of each object's possible x positions
for _, gameObject := range level.GameObjects {
// Skip hidden of each object's possible X positions
if gameObject.IsHidden == true || gameObject.IsInteractive == false {
continue
}
for i := 0; i < gameObject.Width(); i++ {
xPos := i + int(gameObject.Position.X)
xCoords[xPos] = append(xCoords[xPos], gameObject)
}
}
// Find objects that also intersect on the Y axis
for _, gameObject := range level.GameObjects {
intersections := map[*GameObject]bool{}
gameObjectYmin := gameObject.Position.Y
gameObjectYmax := gameObjectYmin + float64(gameObject.Height())
for i := 0; i < gameObject.Width(); i++ {
xPos := i + int(gameObject.Position.X)
if intersectingObjects, ok := xCoords[xPos]; ok {
for _, intersectingObject := range intersectingObjects {
// Ignore the object itself
if intersectingObject == gameObject {
continue
}
// Skip the object if it has already been stored
if _, ok := intersections[intersectingObject]; ok {
continue
}
intersectingObjectYMin := intersectingObject.Position.Y
intersectingObjectYMax := intersectingObjectYMin + float64(intersectingObject.Height())
if (gameObjectYmin >= intersectingObjectYMax || gameObjectYmax <= intersectingObjectYMin) == false {
intersections[intersectingObject] = true
}
}
}
}
// Let the game know that there have been collisions
if len(intersections) > 0 {
for collidingObject := range intersections {
gameObject.CollisionHandler(gameObject, Collision{
GameObject: collidingObject,
Edge: gameObject.GetCollisionEdge(collidingObject),
})
}
}
}
} | level.go | 0.708818 | 0.508483 | level.go | starcoder |
package generator
const minDictionarySize = 512
// DefaultConfig is the default configuration for the Mapper.
var DefaultConfig = Config{
StartSize: minDictionarySize, // 512B
RatioImprovements: 0.1, // 10% improvement per iteration
SamplePath: "",
DictionaryPath: "./codec/zbor/",
}
type Config struct {
// The dictionary size in kB to start with when generating dictionaries.
// Gets multiplied by 2 at each loop.
StartSize int
// The tolerance for the improvement of compression ratio between each loop. Should be between 0 and 1.
// For example, a value of 0.1 means that as long as a dictionary is at least 10% more performant than the
// previously generated one, its size increase is tolerated and the generation loop continues. Only when a
// dictionary is generated which is not at least 10% more performant than the previous one does the loop
// stop, and the previous dictionary is selected as the most optimized one.
RatioImprovements float64
// The path in which to store samples that are generated temporarily to be used for training dictionaries.
SamplePath string
// The path in which to store compiled Go dictionaries. Should point to the package in which they should be used.
DictionaryPath string
}
// Option is an option that can be given to the generator to configure optional
// parameters on initialization.
type Option func(*Config)
// WithStartSize sets the dictionary size in Bytes to start with when generating dictionaries.
// This value cannot be below 512B, or it will trigger errors in the Zstandard training algorithm.
// See https://github.com/facebook/zstd/issues/2815
func WithStartSize(size int) Option {
return func(cfg *Config) {
if size > minDictionarySize {
cfg.StartSize = size
} else {
cfg.StartSize = minDictionarySize
}
}
}
// WithRatioImprovementTolerance sets the total size in bytes of samples to use for benchmarking. Using high values will
// result in a more accurate calculation of the compression ratio at the expense of making benchmarks longer.
func WithRatioImprovementTolerance(tolerance float64) Option {
return func(cfg *Config) {
cfg.RatioImprovements = tolerance
}
}
// WithSamplePath sets path in which to temporarily store generated data samples.
func WithSamplePath(path string) Option {
return func(cfg *Config) {
if path != "" {
cfg.SamplePath = path
}
}
}
// WithDictionaryPath sets path in which to store compiled dictionaries.
func WithDictionaryPath(path string) Option {
return func(cfg *Config) {
cfg.DictionaryPath = path
}
} | codec/generator/config.go | 0.803637 | 0.405184 | config.go | starcoder |
package expression
import (
"fmt"
"github.com/linanh/go-mysql-server/sql"
)
// And checks whether two expressions are true.
type And struct {
BinaryExpression
}
// NewAnd creates a new And expression.
func NewAnd(left, right sql.Expression) sql.Expression {
return &And{BinaryExpression{Left: left, Right: right}}
}
// JoinAnd joins several expressions with And.
func JoinAnd(exprs ...sql.Expression) sql.Expression {
switch len(exprs) {
case 0:
return nil
case 1:
return exprs[0]
default:
result := NewAnd(exprs[0], exprs[1])
for _, e := range exprs[2:] {
result = NewAnd(result, e)
}
return result
}
}
func (a *And) String() string {
return fmt.Sprintf("(%s AND %s)", a.Left, a.Right)
}
func (a *And) DebugString() string {
return fmt.Sprintf("(%s AND %s)", sql.DebugString(a.Left), sql.DebugString(a.Right))
}
// Type implements the Expression interface.
func (*And) Type() sql.Type {
return sql.Boolean
}
// Eval implements the Expression interface.
func (a *And) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
lval, err := a.Left.Eval(ctx, row)
if err != nil {
return nil, err
}
if lval != nil {
lvalBool, err := sql.ConvertToBool(lval)
if err == nil && lvalBool == false {
return false, nil
}
}
rval, err := a.Right.Eval(ctx, row)
if err != nil {
return nil, err
}
if rval != nil {
rvalBool, err := sql.ConvertToBool(rval)
if err == nil && rvalBool == false {
return false, nil
}
}
if lval == nil || rval == nil {
return nil, nil
}
return true, nil
}
// WithChildren implements the Expression interface.
func (a *And) WithChildren(ctx *sql.Context, children ...sql.Expression) (sql.Expression, error) {
if len(children) != 2 {
return nil, sql.ErrInvalidChildrenNumber.New(a, len(children), 2)
}
return NewAnd(children[0], children[1]), nil
}
// Or checks whether one of the two given expressions is true.
type Or struct {
BinaryExpression
}
// NewOr creates a new Or expression.
func NewOr(left, right sql.Expression) sql.Expression {
return &Or{BinaryExpression{Left: left, Right: right}}
}
func (o *Or) String() string {
return fmt.Sprintf("(%s OR %s)", o.Left, o.Right)
}
func (o *Or) DebugString() string {
return fmt.Sprintf("%s OR %s", sql.DebugString(o.Left), sql.DebugString(o.Right))
}
// Type implements the Expression interface.
func (*Or) Type() sql.Type {
return sql.Boolean
}
// Eval implements the Expression interface.
func (o *Or) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
lval, err := o.Left.Eval(ctx, row)
if err != nil {
return nil, err
}
if lval != nil {
lvalBool, err := sql.ConvertToBool(lval)
if err == nil && lvalBool {
return true, nil
}
}
if lval == true {
return true, nil
}
rval, err := o.Right.Eval(ctx, row)
if err != nil {
return nil, err
}
if rval != nil {
rvalBool, err := sql.ConvertToBool(rval)
if err == nil && rvalBool {
return true, nil
}
}
if lval == nil && rval == nil {
return nil, nil
}
return rval == true, nil
}
// WithChildren implements the Expression interface.
func (o *Or) WithChildren(ctx *sql.Context, children ...sql.Expression) (sql.Expression, error) {
if len(children) != 2 {
return nil, sql.ErrInvalidChildrenNumber.New(o, len(children), 2)
}
return NewOr(children[0], children[1]), nil
} | sql/expression/logic.go | 0.703244 | 0.44077 | logic.go | starcoder |
package common
import (
"encoding/json"
"github.com/zhangsifeng92/geos/libraries/asio"
"strconv"
"strings"
"time"
)
const format = "2006-01-02T15:04:05"
type Microseconds int64
func MaxMicroseconds() Microseconds { return Microseconds(0x7fffffffffffffff) }
func MinMicroseconds() Microseconds { return Microseconds(0) }
func (ms Microseconds) ToSeconds() int64 { return int64(ms / 1e6) }
func (ms Microseconds) Count() int64 { return int64(ms) }
//func (ms Microseconds) String() string { return TimePoint(ms).String() }
func Seconds(s int64) Microseconds { return Microseconds(s * 1e6) }
func Milliseconds(s int64) Microseconds { return Microseconds(s * 1e3) }
func Minutes(m int64) Microseconds { return Seconds(60 * m) }
func Hours(h int64) Microseconds { return Minutes(60 * h) }
func Days(d int64) Microseconds { return Hours(24 * d) }
type TimePoint Microseconds
func Now() TimePoint { return TimePoint(time.Now().UTC().UnixNano() / 1e3) }
func MaxTimePoint() TimePoint { return TimePoint(MaxMicroseconds()) }
func MinTimePoint() TimePoint { return TimePoint(MinMicroseconds()) }
func (tp TimePoint) TimeSinceEpoch() Microseconds { return Microseconds(tp) }
func (tp TimePoint) SecSinceEpoch() uint32 { return uint32(tp) / 1e6 }
func (tp TimePoint) String() string {
return time.Unix(int64(tp)/1e6, int64(tp)%1e6*1000).UTC().Format("2006-01-02T15:04:05.000")
}
func (tp TimePoint) MarshalJSON() ([]byte, error) {
return json.Marshal(tp.String())
}
func (tp *TimePoint) UnmarshalJSON(data []byte) error {
var s string
err := json.Unmarshal(data, &s)
if err != nil {
return err
}
timePoint, err := FromIsoString(s)
if err != nil {
return err
}
*tp = timePoint
return nil
}
func FromIsoString(s string) (TimePoint, error) {
if strings.IndexByte(s, '.') < 0 {
tps, err := FromIsoStringSec(s)
if err != nil {
return 0, err
}
return tps.ToTimePoint(), nil
} else {
tps, err := FromIsoStringSec(strings.Split(s, ".")[0])
if err != nil {
return 0, err
}
subs := []byte(strings.Split(s, ".")[1])
for len(subs) < 3 {
subs = append(subs, '0')
}
ms, err2 := strconv.Atoi("1" + string(subs))
if err2 != nil {
return 0, err2
}
return tps.ToTimePoint().AddUs(Milliseconds(int64(ms) - 1000)), nil
}
}
func (tp TimePoint) AddUs(m Microseconds) TimePoint { return TimePoint(Microseconds(tp) + m) }
func (tp TimePoint) SubUs(m Microseconds) TimePoint { return TimePoint(Microseconds(tp) - m) }
func (tp TimePoint) Sub(t TimePoint) Microseconds { return Microseconds(tp - t) }
func (tp TimePoint) SubTps(t TimePointSec) Microseconds { return tp.Sub(t.ToTimePoint()) }
/**
* A lower resolution time_point accurate only to seconds from 1970
*/
type TimePointSec uint32
func NewTimePointSecTp(t TimePoint) TimePointSec { return TimePointSec(t.TimeSinceEpoch() / 1e6) }
func MaxTimePointSec() TimePointSec { return TimePointSec(0xffffffff) }
func MinTimePointSec() TimePointSec { return TimePointSec(0) }
func (tp TimePointSec) ToTimePoint() TimePoint { return TimePoint(Seconds(int64(tp))) }
func (tp TimePointSec) SecSinceEpoch() uint32 { return uint32(tp) }
func (tp TimePointSec) String() string { return tp.ToTimePoint().String() }
func (tp TimePointSec) MarshalJSON() ([]byte, error) {
return json.Marshal(tp.String())
}
func (tp *TimePointSec) UnmarshalJSON(data []byte) error {
var s string
err := json.Unmarshal(data, &s)
if err != nil {
return err
}
timePointSec, err := FromIsoStringSec(s)
if err != nil {
return err
}
*tp = timePointSec
return nil
}
func FromIsoStringSec(s string) (TimePointSec, error) {
pt, err := time.Parse(format, s)
return TimePointSec(pt.Unix()), err
}
func (tp TimePointSec) AddSec(offset uint32) TimePointSec { return TimePointSec(uint32(tp) + offset) }
func (tp TimePointSec) SubSec(offset uint32) TimePointSec { return TimePointSec(uint32(tp) + offset) }
func (tp TimePointSec) AddUs(m Microseconds) TimePoint { return tp.ToTimePoint().AddUs(m) }
func (tp TimePointSec) SubUs(m Microseconds) TimePoint { return tp.ToTimePoint().SubUs(m) }
func (tp TimePointSec) Sub(t TimePointSec) Microseconds { return tp.ToTimePoint().Sub(t.ToTimePoint()) }
/**
* using asio.DeadlineTimer
*/
type Timer asio.DeadlineTimer
func NewTimer(ctx *asio.IoContext) *Timer {
return (*Timer)(asio.NewDeadlineTimer(ctx))
}
func (t *Timer) ExpiresUntil(time TimePoint) { t.ExpiresFromNow(time.Sub(Now())) }
func (t *Timer) ExpiresAt(epoch Microseconds) { t.ExpiresUntil(TimePoint(epoch)) }
func (t *Timer) ExpiresFromNow(m Microseconds) {
(*asio.DeadlineTimer)(t).ExpiresFromNow(time.Microsecond * time.Duration(m))
}
func (t *Timer) Cancel() { (*asio.DeadlineTimer)(t).Cancel() }
func (t *Timer) AsyncWait(op func(err error)) { (*asio.DeadlineTimer)(t).AsyncWait(op) } | common/time.go | 0.602179 | 0.598547 | time.go | starcoder |
package video
import "github.com/32bitkid/huffman"
import "github.com/32bitkid/bitreader"
type motionVectors [2][2][2]int
type motionVectorPredictions motionVectors
func absInt(in int) int {
if in < 0 {
return -in
}
return in
}
type motionVectorsFormed uint
const (
motionVectorsFormed_None = motionVectorsFormed(0)
motionVectorsFormed_FrameForward = motionVectorsFormed(1 << 0)
motionVectorsFormed_FrameBackward = motionVectorsFormed(1 << 1)
)
func (mvf *motionVectorsFormed) set(mb_type *MacroblockType, pct PictureCodingType) {
switch {
case mb_type.macroblock_intra:
*mvf = motionVectorsFormed_None
case pct == PFrame &&
mb_type.macroblock_intra == false &&
mb_type.macroblock_motion_forward == false &&
mb_type.macroblock_motion_backward == false:
*mvf = motionVectorsFormed_FrameForward
case mb_type.macroblock_motion_forward && mb_type.macroblock_motion_backward:
*mvf = motionVectorsFormed_FrameForward | motionVectorsFormed_FrameBackward
case mb_type.macroblock_motion_forward:
*mvf = motionVectorsFormed_FrameForward
case mb_type.macroblock_motion_backward:
*mvf = motionVectorsFormed_FrameBackward
}
}
type motionVectorData struct {
info motionVectorInfo
code motionVectors
residual motionVectors
vertical_field_select [2][2]uint32
predictions motionVectorPredictions
actual motionVectors
previous motionVectorsFormed
}
func (motionVector *motionVectorData) update_actual(r, s, t int, f_code fCode, motion_vector_format motionVectorFormat, picture_structure PictureStructure) {
code := motionVector.code
residual := motionVector.residual
r_size := f_code[s][t] - 1
f := 1 << r_size
high := (16 * f) - 1
low := -16 * f
_range := 32 * f
var delta int
if f == 1 || code[r][s][t] == 0 {
delta = code[r][s][t]
} else {
delta = ((absInt(code[r][s][t]) - 1) * f) + residual[r][s][t] + 1
if code[r][s][t] < 0 {
delta = -delta
}
}
prediction := motionVector.predictions[r][s][t]
if motion_vector_format == motionVectorFormat_Field &&
t == 1 &&
picture_structure == PictureStructure_FramePicture {
prediction >>= 1
}
vector := prediction + delta
if vector < low {
vector += _range
}
if vector > high {
vector -= _range
}
motionVector.predictions[r][s][t] = vector
motionVector.actual[r][s][t] = vector
if motion_vector_format == motionVectorFormat_Field &&
t == 1 &&
picture_structure == PictureStructure_FramePicture {
motionVector.predictions[r][s][t] <<= 1
}
}
func (mvd *motionVectorData) reset() {
mvd.predictions[0][0][0] = 0
mvd.predictions[0][0][1] = 0
mvd.predictions[0][1][0] = 0
mvd.predictions[0][1][1] = 0
mvd.predictions[1][0][0] = 0
mvd.predictions[1][0][1] = 0
mvd.predictions[1][1][0] = 0
mvd.predictions[1][1][1] = 0
mvd.clear_actual(0)
mvd.clear_actual(1)
}
func (mvd *motionVectorData) clear_actual(s int) {
// First Motion Vector
mvd.actual[0][s][0] = 0
mvd.actual[0][s][1] = 0
// Second motion vector
mvd.actual[1][s][0] = 0
mvd.actual[1][s][1] = 0
}
func (fp *VideoSequence) motion_vectors(s int, mb *Macroblock, mvd *motionVectorData) error {
f_code := fp.PictureCodingExtension.f_code
mvd.info = mv_info(fp, mb)
motion_vector_part := func(r, s, t int) error {
if code, err := decodeMotionCode(fp); err != nil {
return err
} else {
mvd.code[r][s][t] = code
}
if f_code[s][t] != 1 && mvd.code[r][s][t] != 0 {
r_size := uint(f_code[s][t] - 1)
if code, err := fp.Read32(r_size); err != nil {
return err
} else {
mvd.residual[r][s][t] = int(code)
}
}
if mvd.info.dmv == 1 {
panic("unsupported: dmv[]")
}
mvd.update_actual(r, s, t, f_code, mvd.info.motion_vector_format, fp.PictureCodingExtension.picture_structure)
return nil
}
motion_vector := func(r, s int) error {
if err := motion_vector_part(r, s, 0); err != nil {
return err
}
if err := motion_vector_part(r, s, 1); err != nil {
return err
}
return nil
}
if mvd.info.motion_vector_count == 1 {
if mvd.info.motion_vector_format == motionVectorFormat_Field && mvd.info.dmv != 1 {
if val, err := fp.Read32(1); err != nil {
return err
} else {
mvd.vertical_field_select[0][s] = val
}
}
return motion_vector(0, s)
} else {
if val, err := fp.Read32(1); err != nil {
return err
} else {
mvd.vertical_field_select[0][s] = val
}
if err := motion_vector(0, s); err != nil {
return err
}
if val, err := fp.Read32(1); err != nil {
return err
} else {
mvd.vertical_field_select[1][s] = val
}
if err := motion_vector(1, s); err != nil {
return err
}
}
return nil
}
func decodeMotionCode(br bitreader.BitReader) (int, error) {
val, err := motionCodeDecoder.Decode(br)
if err != nil {
return 0, err
} else if code, ok := val.(int); ok {
return code, nil
} else {
return 0, huffman.ErrMissingHuffmanValue
}
}
var motionCodeDecoder = huffman.NewHuffmanDecoder(huffman.HuffmanTable{
"0000 0011 001 ": -16,
"0000 0011 011 ": -15,
"0000 0011 101 ": -14,
"0000 0011 111 ": -13,
"0000 0100 001 ": -12,
"0000 0100 011 ": -11,
"0000 0100 11 ": -10,
"0000 0101 01 ": -9,
"0000 0101 11 ": -8,
"0000 0111 ": -7,
"0000 1001 ": -6,
"0000 1011 ": -5,
"0000 111 ": -4,
"0001 1 ": -3,
"0011 ": -2,
"011 ": -1,
"1": 0,
"010": 1,
"0010": 2,
"0001 0": 3,
"0000 110": 4,
"0000 1010": 5,
"0000 1000": 6,
"0000 0110": 7,
"0000 0101 10": 8,
"0000 0101 00": 9,
"0000 0100 10": 10,
"0000 0100 010": 11,
"0000 0100 000": 12,
"0000 0011 110": 13,
"0000 0011 100": 14,
"0000 0011 010": 15,
"0000 0011 000": 16,
}) | video/motion_vectors.go | 0.52975 | 0.474509 | motion_vectors.go | starcoder |
package cs
/**
* Configuration for CS virtual server resource.
*/
type Csvserver struct {
/**
* Name for the content switching virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters.
Cannot be changed after the CS virtual server is created.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, my server or my server).
*/
Name string `json:"name,omitempty"`
/**
* Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.
*/
Td int `json:"td,omitempty"`
/**
* Protocol used by the virtual server.
*/
Servicetype string `json:"servicetype,omitempty"`
/**
* IP address of the content switching virtual server.
*/
Ipv46 string `json:"ipv46,omitempty"`
/**
* Virtual server target type.
*/
Targettype string `json:"targettype,omitempty"`
Dnsrecordtype string `json:"dnsrecordtype,omitempty"`
Persistenceid int `json:"persistenceid,omitempty"`
/**
* IP address pattern, in dotted decimal notation, for identifying packets to be accepted by the virtual server. The IP Mask parameter specifies which part of the destination IP address is matched against the pattern. Mutually exclusive with the IP Address parameter.
For example, if the IP pattern assigned to the virtual server is 198.51.100.0 and the IP mask is 255.255.240.0 (a forward mask), the first 20 bits in the destination IP addresses are matched with the first 20 bits in the pattern. The virtual server accepts requests with IP addresses that range from 172.16.31.10 to 192.168.3.11. You can also use a pattern such as 0.0.2.2 and a mask such as 0.0.255.255 (a reverse mask).
If a destination IP address matches more than one IP pattern, the pattern with the longest match is selected, and the associated virtual server processes the request. For example, if the virtual servers, vs1 and vs2, have the same IP pattern, 0.0.100.128, but different IP masks of 0.0.255.255 and 0.0.224.255, a destination IP address of 198.51.100.128 has the longest match with the IP pattern of vs1. If a destination IP address matches two or more virtual servers to the same extent, the request is processed by the virtual server whose port number matches the port number in the request.
*/
Ippattern string `json:"ippattern,omitempty"`
/**
* IP mask, in dotted decimal notation, for the IP Pattern parameter. Can have leading or trailing non-zero octets (for example, 255.255.240.0 or 0.0.255.255). Accordingly, the mask specifies whether the first n bits or the last n bits of the destination IP address in a client request are to be matched with the corresponding bits in the IP pattern. The former is called a forward mask. The latter is called a reverse mask.
*/
Ipmask string `json:"ipmask,omitempty"`
/**
* Number of consecutive IP addresses, starting with the address specified by the IP Address parameter, to include in a range of addresses assigned to this virtual server.
*/
Range int `json:"range,omitempty"`
/**
* Port number for content switching virtual server.
*/
Port int `json:"port,omitempty"`
/**
* The list of IPv4/IPv6 addresses bound to ipset would form a part of listening service on the current cs vserver
*/
Ipset string `json:"ipset,omitempty"`
/**
* Initial state of the load balancing virtual server.
*/
State string `json:"state,omitempty"`
/**
* Enable state updates for a specific content switching virtual server. By default, the Content Switching virtual server is always UP, regardless of the state of the Load Balancing virtual servers bound to it. This parameter interacts with the global setting as follows:
Global Level | Vserver Level | Result
ENABLED ENABLED ENABLED
ENABLED DISABLED ENABLED
DISABLED ENABLED ENABLED
DISABLED DISABLED DISABLED
If you want to enable state updates for only some content switching virtual servers, be sure to disable the state update parameter.
*/
Stateupdate string `json:"stateupdate,omitempty"`
/**
* Use this option to specify whether a virtual server, used for load balancing or content switching, routes requests to the cache redirection virtual server before sending it to the configured servers.
*/
Cacheable string `json:"cacheable,omitempty"`
/**
* URL to which traffic is redirected if the virtual server becomes unavailable. The service type of the virtual server should be either HTTP or SSL.
Caution: Make sure that the domain in the URL does not match the domain specified for a content switching policy. If it does, requests are continuously redirected to the unavailable virtual server.
*/
Redirecturl string `json:"redirecturl,omitempty"`
/**
* Idle time, in seconds, after which the client connection is terminated. The default values are:
180 seconds for HTTP/SSL-based services.
9000 seconds for other TCP-based services.
120 seconds for DNS-based services.
120 seconds for other UDP-based services.
*/
Clttimeout int `json:"clttimeout,omitempty"`
/**
* Type of precedence to use for both RULE-based and URL-based policies on the content switching virtual server. With the default (RULE) setting, incoming requests are evaluated against the rule-based content switching policies. If none of the rules match, the URL in the request is evaluated against the URL-based content switching policies.
*/
Precedence string `json:"precedence,omitempty"`
/**
* Consider case in URLs (for policies that use URLs instead of RULES). For example, with the ON setting, the URLs /a/1.html and /A/1.HTML are treated differently and can have different targets (set by content switching policies). With the OFF setting, /a/1.html and /A/1.HTML are switched to the same target.
*/
Casesensitive string `json:"casesensitive,omitempty"`
/**
* Type of spillover used to divert traffic to the backup virtual server when the primary virtual server reaches the spillover threshold. Connection spillover is based on the number of connections. Bandwidth spillover is based on the total Kbps of incoming and outgoing traffic.
*/
Somethod string `json:"somethod,omitempty"`
/**
* Maintain source-IP based persistence on primary and backup virtual servers.
*/
Sopersistence string `json:"sopersistence,omitempty"`
/**
* Time-out value, in minutes, for spillover persistence.
*/
Sopersistencetimeout int `json:"sopersistencetimeout,omitempty"`
/**
* Depending on the spillover method, the maximum number of connections or the maximum total bandwidth (Kbps) that a virtual server can handle before spillover occurs.
*/
Sothreshold int `json:"sothreshold,omitempty"`
/**
* Action to be performed if spillover is to take effect, but no backup chain to spillover is usable or exists
*/
Sobackupaction string `json:"sobackupaction,omitempty"`
/**
* State of port rewrite while performing HTTP redirect.
*/
Redirectportrewrite string `json:"redirectportrewrite,omitempty"`
/**
* Flush all active transactions associated with a virtual server whose state transitions from UP to DOWN. Do not enable this option for applications that must complete their transactions.
*/
Downstateflush string `json:"downstateflush,omitempty"`
/**
* Name of the backup virtual server that you are configuring. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the backup virtual server is created. You can assign a different backup virtual server or rename the existing virtual server.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks.
*/
Backupvserver string `json:"backupvserver,omitempty"`
/**
* Continue forwarding the traffic to backup virtual server even after the primary server comes UP from the DOWN state.
*/
Disableprimaryondown string `json:"disableprimaryondown,omitempty"`
/**
* Insert the virtual server's VIP address and port number in the request header. Available values function as follows:
VIPADDR - Header contains the vserver's IP address and port number without any translation.
OFF - The virtual IP and port header insertion option is disabled.
V6TOV4MAPPING - Header contains the mapped IPv4 address corresponding to the IPv6 address of the vserver and the port number. An IPv6 address can be mapped to a user-specified IPv4 address using the set ns ip6 command.
*/
Insertvserveripport string `json:"insertvserveripport,omitempty"`
/**
* Name of virtual server IP and port header, for use with the VServer IP Port Insertion parameter.
*/
Vipheader string `json:"vipheader,omitempty"`
/**
* Enable network address translation (NAT) for real-time streaming protocol (RTSP) connections.
*/
Rtspnat string `json:"rtspnat,omitempty"`
/**
* FQDN of the authentication virtual server. The service type of the virtual server should be either HTTP or SSL.
*/
Authenticationhost string `json:"authenticationhost,omitempty"`
/**
* Authenticate users who request a connection to the content switching virtual server.
*/
Authentication string `json:"authentication,omitempty"`
/**
* String specifying the listen policy for the content switching virtual server. Can be either the name of an existing expression or an in-line expression.
*/
Listenpolicy string `json:"listenpolicy,omitempty"`
/**
* Integer specifying the priority of the listen policy. A higher number specifies a lower priority. If a request matches the listen policies of more than one virtual server the virtual server whose listen policy has the highest priority (the lowest priority number) accepts the request.
*/
Listenpriority int `json:"listenpriority,omitempty"`
/**
* Enable HTTP 401-response based authentication.
*/
Authn401 string `json:"authn401,omitempty"`
/**
* Name of authentication virtual server that authenticates the incoming user requests to this content switching virtual server.
*/
Authnvsname string `json:"authnvsname,omitempty"`
/**
* Process traffic with the push virtual server that is bound to this content switching virtual server (specified by the Push VServer parameter). The service type of the push virtual server should be either HTTP or SSL.
*/
Push string `json:"push,omitempty"`
/**
* Name of the load balancing virtual server, of type PUSH or SSL_PUSH, to which the server pushes updates received on the client-facing load balancing virtual server.
*/
Pushvserver string `json:"pushvserver,omitempty"`
/**
* Expression for extracting the label from the response received from server. This string can be either an existing rule name or an inline expression. The service type of the virtual server should be either HTTP or SSL.
*/
Pushlabel string `json:"pushlabel,omitempty"`
/**
* Allow multiple Web 2.0 connections from the same client to connect to the virtual server and expect updates.
*/
Pushmulticlients string `json:"pushmulticlients,omitempty"`
/**
* Name of the TCP profile containing TCP configuration settings for the virtual server.
*/
Tcpprofilename string `json:"tcpprofilename,omitempty"`
/**
* Name of the HTTP profile containing HTTP configuration settings for the virtual server. The service type of the virtual server should be either HTTP or SSL.
*/
Httpprofilename string `json:"httpprofilename,omitempty"`
/**
* Name of the DB profile.
*/
Dbprofilename string `json:"dbprofilename,omitempty"`
/**
* Oracle server version
*/
Oracleserverversion string `json:"oracleserverversion,omitempty"`
/**
* Information about this virtual server.
*/
Comment string `json:"comment,omitempty"`
/**
* The version of the MSSQL server
*/
Mssqlserverversion string `json:"mssqlserverversion,omitempty"`
/**
* Use L2 Parameters to identify a connection
*/
L2conn string `json:"l2conn,omitempty"`
/**
* The protocol version returned by the mysql vserver.
*/
Mysqlprotocolversion int `json:"mysqlprotocolversion,omitempty"`
/**
* The server version string returned by the mysql vserver.
*/
Mysqlserverversion string `json:"mysqlserverversion,omitempty"`
/**
* The character set returned by the mysql vserver.
*/
Mysqlcharacterset int `json:"mysqlcharacterset,omitempty"`
/**
* The server capabilities returned by the mysql vserver.
*/
Mysqlservercapabilities int `json:"mysqlservercapabilities,omitempty"`
/**
* Enable logging appflow flow information
*/
Appflowlog string `json:"appflowlog,omitempty"`
/**
* The name of the network profile.
*/
Netprofile string `json:"netprofile,omitempty"`
/**
* Can be active or passive
*/
Icmpvsrresponse string `json:"icmpvsrresponse,omitempty"`
/**
* A host route is injected according to the setting on the virtual servers
* If set to PASSIVE on all the virtual servers that share the IP address, the appliance always injects the hostroute.
* If set to ACTIVE on all the virtual servers that share the IP address, the appliance injects even if one virtual server is UP.
* If set to ACTIVE on some virtual servers and PASSIVE on the others, the appliance, injects even if one virtual server set to ACTIVE is UP.
*/
Rhistate string `json:"rhistate,omitempty"`
/**
* Name of the authentication profile to be used when authentication is turned on.
*/
Authnprofile string `json:"authnprofile,omitempty"`
/**
* Name of the DNS profile to be associated with the VServer. DNS profile properties will applied to the transactions processed by a VServer. This parameter is valid only for DNS and DNS-TCP VServers.
*/
Dnsprofilename string `json:"dnsprofilename,omitempty"`
/**
* This option starts/stops the dtls service on the vserver
*/
Dtls string `json:"dtls,omitempty"`
/**
* Type of persistence for the virtual server. Available settings function as follows:
* SOURCEIP - Connections from the same client IP address belong to the same persistence session.
* COOKIEINSERT - Connections that have the same HTTP Cookie, inserted by a Set-Cookie directive from a server, belong to the same persistence session.
* SSLSESSION - Connections that have the same SSL Session ID belong to the same persistence session.
*/
Persistencetype string `json:"persistencetype,omitempty"`
/**
* Persistence mask for IP based persistence types, for IPv4 virtual servers.
*/
Persistmask string `json:"persistmask,omitempty"`
/**
* Persistence mask for IP based persistence types, for IPv6 virtual servers.
*/
V6persistmasklen int `json:"v6persistmasklen,omitempty"`
/**
* Time period for which a persistence session is in effect.
*/
Timeout int `json:"timeout,omitempty"`
/**
* Use this parameter to specify the cookie name for COOKIE peristence type. It specifies the name of cookie with a maximum of 32 characters. If not specified, cookie name is internally generated.
*/
Cookiename string `json:"cookiename,omitempty"`
/**
* Backup persistence type for the virtual server. Becomes operational if the primary persistence mechanism fails.
*/
Persistencebackup string `json:"persistencebackup,omitempty"`
/**
* Time period for which backup persistence is in effect.
*/
Backuppersistencetimeout int `json:"backuppersistencetimeout,omitempty"`
/**
* Port number for external TCP probe. NetScaler provides support for external TCP health check of the vserver status over the selected port. This option is only supported for vservers assigned with an IPAddress or ipset.
*/
Tcpprobeport int `json:"tcpprobeport,omitempty"`
/**
* Citrix ADC provides support for external health check of the vserver status. Select HTTP or TCP probes for healthcheck
*/
Probeprotocol string `json:"probeprotocol,omitempty"`
/**
* HTTP code to return in SUCCESS case.
*/
Probesuccessresponsecode string `json:"probesuccessresponsecode,omitempty"`
/**
* Citrix ADC provides support for external health check of the vserver status. Select port for HTTP/TCP monitring
*/
Probeport int `json:"probeport,omitempty"`
/**
* Name of QUIC profile which will be attached to the Content Switching VServer.
*/
Quicprofilename string `json:"quicprofilename,omitempty"`
/**
* Domain name for which to change the time to live (TTL) and/or backup service IP address.
*/
Domainname string `json:"domainname,omitempty"`
Ttl int `json:"ttl,omitempty"`
Backupip string `json:"backupip,omitempty"`
Cookiedomain string `json:"cookiedomain,omitempty"`
Cookietimeout int `json:"cookietimeout,omitempty"`
Sitedomainttl int `json:"sitedomainttl,omitempty"`
/**
* New name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my name" or 'my name').
*/
Newname string `json:"newname,omitempty"`
//------- Read only Parameter ---------;
Ip string `json:"ip,omitempty"`
Value string `json:"value,omitempty"`
Ngname string `json:"ngname,omitempty"`
Type string `json:"type,omitempty"`
Curstate string `json:"curstate,omitempty"`
Sc string `json:"sc,omitempty"`
Status string `json:"status,omitempty"`
Cachetype string `json:"cachetype,omitempty"`
Redirect string `json:"redirect,omitempty"`
Homepage string `json:"homepage,omitempty"`
Dnsvservername string `json:"dnsvservername,omitempty"`
Domain string `json:"domain,omitempty"`
Servicename string `json:"servicename,omitempty"`
Weight string `json:"weight,omitempty"`
Cachevserver string `json:"cachevserver,omitempty"`
Targetvserver string `json:"targetvserver,omitempty"`
Url string `json:"url,omitempty"`
Bindpoint string `json:"bindpoint,omitempty"`
Gt2gb string `json:"gt2gb,omitempty"`
Statechangetimesec string `json:"statechangetimesec,omitempty"`
Statechangetimemsec string `json:"statechangetimemsec,omitempty"`
Tickssincelaststatechange string `json:"tickssincelaststatechange,omitempty"`
Ruletype string `json:"ruletype,omitempty"`
Lbvserver string `json:"lbvserver,omitempty"`
Targetlbvserver string `json:"targetlbvserver,omitempty"`
Nodefaultbindings string `json:"nodefaultbindings,omitempty"`
Version string `json:"version,omitempty"`
} | resource/config/cs/csvserver.go | 0.838481 | 0.400222 | csvserver.go | starcoder |
package models
import (
"../../common"
"fmt"
"strconv"
)
type Result struct {
id int `json:id`
profile *Profile `json:profile`
code string `json:code`
company *Company `json:company`
lastUpdatedDate string `json:lastUpdatedDate`
postedDate string `json:postedDate`
applied bool `json:applied`
title string `json:title`
description string `json:description`
required string `json:required`
optional string `json:optional`
benefits string `json:benefits`
otherDetails string `json:otherDetails`
minYearsNeeded int `json:minYearsNeeded`
sourceEndpoint string `json:sourceEndpoing`
state int `json:state`
}
func (x *Result) ReloadFromJson(json string) {
var dict = common.StrToDictionary([]byte(json));
x.id, _ = strconv.Atoi(dict["id"].(string));
x.profile = &Profile{};
x.profile.ReloadFromJson(common.DictionaryToJsonString(dict["name"].(map[string]interface{})));
x.code = dict["code"].(string);
x.company = &Company{};
x.company.ReloadFromJson(common.DictionaryToJsonString(dict["city"].(map[string]interface{})));
x.lastUpdatedDate = dict["lastUpdatedDate"].(string);
x.postedDate = dict["postedDate"].(string);
var val, _ = strconv.Atoi(dict["applied"].(string));
x.applied = val > 0;
x.title = dict["title"].(string);
x.description = dict["description"].(string);
x.required = dict["required"].(string);
x.optional = dict["optional"].(string);
x.benefits = dict["benefits"].(string);
x.otherDetails = dict["otherDetails"].(string);
x.minYearsNeeded, _ = strconv.Atoi(dict["minYearsNeeded"].(string));
x.sourceEndpoint = dict["sourceEndpoing"].(string);
x.state, _ = strconv.Atoi(dict["state"].(string));
}
func (x *Result) ToJsonString() string {
var result = "{";
result += fmt.Sprintf("\"id\":\"%d\"", x.id);
result += fmt.Sprintf(",\"profile\":%s", x.profile.ToJsonString());
result += fmt.Sprintf(",\"code\":\"%s\"", x.code);
result += fmt.Sprintf(",\"company\":%s", x.company.ToJsonString());
result += fmt.Sprintf(",\"lastUpdatedDate\":\"%s\"", x.lastUpdatedDate);
result += fmt.Sprintf(",\"postedDate\":\"%s\"", x.postedDate);
result += fmt.Sprintf(",\"applied\":\"%d\"", x.applied);
result += fmt.Sprintf(",\"title\":\"%s\"", x.title);
result += fmt.Sprintf(",\"description\":\"%s\"", x.description);
result += fmt.Sprintf(",\"required\":\"%s\"", x.required);
result += fmt.Sprintf(",\"optional\":\"%s\"", x.optional);
result += fmt.Sprintf(",\"benefits\":\"%s\"", x.benefits);
result += fmt.Sprintf(",\"otherDetails\":\"%s\"", x.otherDetails);
result += fmt.Sprintf(",\"minYearsNeeded\":\"%d\"", x.minYearsNeeded);
result += fmt.Sprintf(",\"sourceEndpoint\":\"%s\"", x.sourceEndpoint);
result += fmt.Sprintf(",\"state\":\"%d\"", x.state);
result += "}";
return result;
}
func (x *Result) SetID (a int) { x.id = a; }
func (x *Result) SetProifle (a *Profile) { x.profile = a; }
func (x *Result) SetCode (a string) { x.code = a; }
func (x *Result) SetCompany (a *Company) {x.company = a; }
func (x *Result) SetLastUpdatedDate (a string) { x.lastUpdatedDate = a; }
func (x *Result) SetPostedDate (a string) { x.postedDate = a; }
func (x *Result) SetApplied (a bool) { x.applied = a; }
func (x *Result) SetTitle (a string) { x.title = a; }
func (x *Result) SetDescription (a string) { x.description = a; }
func (x *Result) SetRequired (a string) { x.required = a; }
func (x *Result) SetOptional (a string) { x.optional = a; }
func (x *Result) SetBenefits (a string) { x.benefits = a; }
func (x *Result) SetMinYearsNeeded (a int) { x.minYearsNeeded = a; }
func (x *Result) SetSourceEndpoint (a string) { x.sourceEndpoint = a; }
func (x *Result) SetState (a int) { x.state = a; }
func (x Result) ID() (int) { return x.id; }
func (x Result) Profile() (*Profile) { return x.profile; }
func (x Result) Code() (string) { return x.code; }
func (x Result) Company() (*Company) { return x.company; }
func (x Result) LastUpdatedDate() (string) { return x.lastUpdatedDate; }
func (x Result) PostedDate() (string) { return x.postedDate; }
func (x Result) Applied() (bool) { return x.applied; }
func (x Result) Title() (string) { return x.title; }
func (x Result) Description() (string) { return x.description; }
func (x Result) Required() (string) { return x.required; }
func (x Result) Optional() (string) { return x.optional; }
func (x Result) Benefits() (string) { return x.benefits; }
func (x Result) OtherDetails() (string) { return x.otherDetails; }
func (x Result) MinYearsNeeded() (int) { return x.minYearsNeeded; }
func (x Result) SourceEndpoint() (string) { return x.sourceEndpoint; }
func (x Result) State() (int) { return x.state; } | jmserver/src/classes/jmserver/models/Result.go | 0.654564 | 0.462048 | Result.go | starcoder |
package grid
import (
"sort"
"github.com/google/gapid/test/robot/web/client/dom"
)
// Data holds all the presentable data for the grid.
type Data struct {
Columns map[Key]*HeaderData
Rows map[Key]*HeaderData
Cells map[CellIndex]*CellData
}
// HeaderData holds information about a single row or column header.
type HeaderData struct {
// Label used for displaying the header.
Name string
// The list of tasks that belong to this cell.
Tasks TaskList
}
// Key is a unique identifier for headers and cells.
type Key interface{}
// CellIndex locates a single cell in the grid.
type CellIndex struct {
Column Key
Row Key
}
// CellData holds the list of tasks to display for that cell.
type CellData struct {
// The list of tasks that belong to this cell.
Tasks TaskList
// Optional Key used for transition animations when changing data.
Key Key
}
// Task holds information about a single task in a cell.
type Task struct {
// Last completed task result.
Result Result
// The current task status.
Status Status
// User data.
Data interface{}
}
// Result is an enumerator of task results.
type Result int
const (
// Unknown represents the unknown result of a task.
Unknown = Result(iota)
// Succeeded represents a task that has succeeded.
Succeeded
// Failed represents a task that has failed.
Failed
)
// Status is an enumerator of task statuses.
type Status int
const (
// Current represents a task that has a result for the latest data.
Current = Status(iota)
// Stale represents a task that has a result for data that is not current.
Stale
// InProgress represents a task that is currently being run.
// The task's result will be for data that is not current.
InProgress
)
// TaskList is a list of tasks.
type TaskList []Task
// Count returns the number of tasks that pass the predicate.
func (l *TaskList) Count(pred func(Task) bool) int {
i := 0
for _, j := range *l {
if pred(j) {
i++
}
}
return i
}
func (l TaskList) stats() taskStats {
return taskStats{
numCurrentSucceeded: l.Count(taskCurrentSucceeded),
numStaleSucceeded: l.Count(taskStaleSucceeded),
numInProgressWasSucceeded: l.Count(taskInProgressWasSucceeded),
numInProgressWasUnknown: l.Count(taskInProgressWasUnknown),
numInProgressWasFailed: l.Count(taskInProgressWasFailed),
numStaleFailed: l.Count(taskStaleFailed),
numCurrentFailed: l.Count(taskCurrentFailed),
numTasks: len(l),
}
}
func taskCurrentSucceeded(t Task) bool { return t.Result == Succeeded && t.Status == Current }
func taskStaleSucceeded(t Task) bool { return t.Result == Succeeded && t.Status == Stale }
func taskInProgressWasSucceeded(t Task) bool { return t.Result == Succeeded && t.Status == InProgress }
func taskCurrentFailed(t Task) bool { return t.Result == Failed && t.Status == Current }
func taskStaleFailed(t Task) bool { return t.Result == Failed && t.Status == Stale }
func taskInProgressWasFailed(t Task) bool { return t.Result == Failed && t.Status == InProgress }
func taskInProgressWasUnknown(t Task) bool { return t.Result == Unknown && t.Status == InProgress }
type cell struct {
index CellIndex
data *CellData
clickRipples clickRipples
alpha float64
nonClusterAlpha float64
cluster *cluster
rect *dom.Rect
}
func newCell(i CellIndex, d *CellData) *cell {
return &cell{
index: i,
data: d,
cluster: &cluster{stats: d.Tasks.stats()},
alpha: 1,
nonClusterAlpha: 1,
}
}
type taskStats struct {
numCurrentSucceeded int
numStaleSucceeded int
numInProgressWasSucceeded int
numInProgressWasUnknown int
numInProgressWasFailed int
numStaleFailed int
numCurrentFailed int
numTasks int
}
type header struct {
key Key
data *HeaderData
index int
clickRipples clickRipples
alpha float64
textAlpha float64
backgroundAlpha float64
clusterAlpha float64
tasks TaskList
cluster *cluster
rect *dom.Rect
textOffset *dom.Point
clusterRect *dom.Rect
}
func newHeader(k Key, d *HeaderData) *header {
return &header{
key: k,
data: d,
cluster: &cluster{},
alpha: 1,
textAlpha: 1,
backgroundAlpha: 1,
clusterAlpha: 1,
}
}
type dataset struct {
columns []*header
rows []*header
cells []*cell // columns * (rows * (cell))
alpha float64
highlightedCell *cell // The highlighted cell, or nil
highlightedRow *header // The highlighted row, or nil
highlightedColumn *header // The highlighted column, or nil
}
func (d *dataset) cellIndex(col, row int) (idx int) {
return col*len(d.rows) + row
}
func (d *dataset) cellColumnAndRow(idx int) (col, row int) {
rows := len(d.rows)
return idx / rows, idx % rows
}
func (d *dataset) rowAt(p *dom.Point) *header {
for _, h := range d.rows {
if h.rect.Contains(p) {
return h
}
}
return nil
}
func (d *dataset) columnAt(p *dom.Point) *header {
for _, h := range d.columns {
if h.rect.Contains(p) {
return h
}
}
return nil
}
func (d *dataset) cellAt(p *dom.Point) *cell {
for _, c := range d.cells {
if c.rect.Contains(p) {
return c
}
}
return nil
}
func buildData(in Data, rowSort, columnSort headerLess) *dataset {
out := &dataset{alpha: 1}
// Build all the columns.
keyToCol := map[Key]*header{}
out.columns = make([]*header, 0, len(in.Columns))
for k, h := range in.Columns {
col := newHeader(k, h)
col.tasks = h.Tasks
out.columns = append(out.columns, col)
keyToCol[k] = col
}
// Build all the rows.
keyToRow := map[Key]*header{}
out.rows = make([]*header, 0, len(in.Rows))
for k, h := range in.Rows {
row := newHeader(k, h)
row.tasks = h.Tasks
out.rows = append(out.rows, row)
keyToRow[k] = row
}
// Sort all the columns and rows.
sort.Sort(&headerSorter{out.columns, columnSort})
sort.Sort(&headerSorter{out.rows, rowSort})
for i, c := range out.columns {
c.index = i
}
for i, r := range out.rows {
r.index = i
}
// Sort all the cells.
out.cells = make([]*cell, len(out.rows)*len(out.columns))
for i, c := range in.Cells {
col, ok := keyToCol[i.Column]
if !ok {
continue
}
row, ok := keyToRow[i.Row]
if !ok {
continue
}
cellIdx := out.cellIndex(col.index, row.index)
out.cells[cellIdx] = newCell(i, c)
}
// Cache stats for all tasks in all columns and rows
for _, h := range out.columns {
h.cluster.stats = h.tasks.stats()
}
for _, h := range out.rows {
h.cluster.stats = h.tasks.stats()
}
// Create an empty cell for any missing cells.
for i, c := range out.cells {
if c == nil {
out.cells[i] = newCell(CellIndex{}, &CellData{})
}
}
return out
}
// SetData assigns the data to the grid.
func (g *Grid) SetData(data Data) {
new, old := buildData(data, g.rowSort, g.columnSort), g.topDataset()
g.setTransition(old, new)
g.tick()
}
type headerLess func(a, b *header) bool
func sortAlphabetic(a, b *header) bool { return a.data.Name < b.data.Name }
type headerSorter struct {
list []*header
less headerLess
}
func (s *headerSorter) Len() int { return len(s.list) }
func (s *headerSorter) Less(i, j int) bool { return s.less(s.list[i], s.list[j]) }
func (s *headerSorter) Swap(i, j int) { s.list[i], s.list[j] = s.list[j], s.list[i] } | test/robot/web/client/widgets/grid/data.go | 0.696578 | 0.53358 | data.go | starcoder |
package datastructs
import "fmt"
const (
RED, BLACK = 0, 1
)
type RbTreeNode struct {
parent, left, right *RbTreeNode
color int
key interface{}
}
type RbTree struct {
root *RbTreeNode
}
func RbTreeHeight(root *RbTreeNode) int {
if root == nil {
return -1
}
left := RbTreeHeight(root.left)
right := RbTreeHeight(root.right)
if left < right {
return right + 1
} else {
return left + 1
}
}
// PrintBinaryTree inorder recursion
func PrintRbBinaryTree(root *RbTreeNode) {
if root == nil {
return
}
if root.left != nil {
PrintRbBinaryTree(root.left)
}
fmt.Print(root.key, "\t")
if root.right != nil {
PrintRbBinaryTree(root.right)
}
}
func RbTreeSearch(x *RbTreeNode, k interface{}) *RbTreeNode {
k1, ok := k.(Comparable)
if !ok {
panic("key must be omparable")
}
cv := k1.compare(x.key)
for x != nil && cv != 0 {
cv = k1.compare(x.key)
if cv < 0 {
x = x.left
} else if cv > 0 {
x = x.right
} else {
return x
}
}
return x
}
func RbTreeMinimum(x *RbTreeNode) *RbTreeNode {
if x == nil {
return nil
}
if x.left != nil {
return RbTreeMinimum(x.left)
}
return x
}
func RbTreeMaximum(x *RbTreeNode) *RbTreeNode {
if x == nil {
return nil
}
if x.right != nil {
return RbTreeMaximum(x.right)
}
return x
}
func RbTreeSuccessor(x *RbTreeNode) *RbTreeNode {
if x == nil {
return nil
}
if x.right != nil {
return RbTreeMinimum(x.right)
}
p := x.parent
for p != nil && p.right == x {
x = p
p = p.parent
}
return p
}
func RbTreePredecessor(x *RbTreeNode) *RbTreeNode {
if x == nil {
return nil
}
if x.left != nil {
return RbTreeMaximum(x.left)
}
p := x.parent
for p != nil && p.left == x {
x = p
p = p.parent
}
return p
}
func (T *RbTree) leftRotate(x *RbTreeNode) {
y := x.right
//x的右子树转换为y的左子树
x.right = y.left
if y.left != nil {
y.left.parent = x
}
//转换x的parent给y做parent
y.parent = x.parent
if x.parent == nil {
T.root = y
} else if x == x.parent.left {
x.parent.left = y
} else {
x.parent.right = y
}
// y作为x的父节点
y.left = x
x.parent = y
}
func (T *RbTree) rightRotate(y *RbTreeNode) {
x := y.left
//y的左子树转换为x的右子树
y.left = x.right
if x.right != nil {
x.right.parent = y
}
//转换y的parent给x做parent
x.parent = y.parent
if y.parent == nil {
T.root = x
} else if y == y.parent.right {
y.parent.right = x
} else {
y.parent.left = x
}
//x作为y的父节点
x.right = y
y.parent = x
}
func (T *RbTree) RbTreeInsert(v interface{}) {
v0, ok := v.(Comparable)
if !ok {
panic("must be comparable")
}
z := &RbTreeNode{key: v, color: RED} // set red to satisfy black height property
x := T.root
y := T.root // 记录不为nil的x
for x != nil {
y = x
c := v0.compare(x.key)
if c < 0 {
x = x.left
} else {
x = x.right
}
}
z.parent = y
if y == nil {
T.root = z
} else if v0.compare(y.key) < 0 {
y.left = z
} else {
y.right = z
}
//z.left = nil
//z.right = nil
//z.color = RED
fmt.Println("insert value: ", v, z.parent)
T.rbTreeInsertFixup(z)
}
func (T *RbTree) rbTreeInsertFixup(z *RbTreeNode) {
if z.parent == nil {
return
}
for z.parent != nil && z.parent.color == RED {
if z.parent == z.parent.parent.left {
y := z.parent.parent.right
if y != nil && y.color == RED { // case 1
z.parent.color = BLACK
y.color = BLACK
z.parent.parent.color = RED
z = z.parent.parent
} else {
if z == z.parent.right { // case 2
z = z.parent
T.leftRotate(z)
}
z.parent.color = BLACK // case 3
z.parent.parent.color = RED // case 3
T.rightRotate(z.parent.parent) // case 3
}
} else { // z.parent == z.parent.parent.right
y := z.parent.parent.left
if y != nil && y.color == RED { // case 1
z.parent.color = BLACK
y.color = BLACK
z.parent.parent.color = RED
z = z.parent.parent
} else {
if z == z.parent.left { // case 2
z = z.parent
T.rightRotate(z)
}
z.parent.color = BLACK // case 3
z.parent.parent.color = RED // case 3
T.leftRotate(z.parent.parent) // case 3
}
}
}
T.root.color = BLACK
}
func (T *RbTree) RbTreeDelete(z *RbTreeNode) {
y := z
var x *RbTreeNode
yoColor := y.color
if z.left == nil {
x = z.right
T.rbTransplant(z, z.right)
} else if z.right == nil {
x = z.left
T.rbTransplant(z, z.left)
} else {
y = RbTreeMinimum(z.right)
yoColor = y.color
x = y.right
if x != nil && y.parent == z {
x.parent = y
} else {
T.rbTransplant(y, y.right)
y.right = z.right
if y.right != nil {
y.right.parent = y
}
}
T.rbTransplant(z, y)
y.left = z.left
y.left.parent = y
y.color = z.color
}
if yoColor == BLACK && x != nil {
T.rbTreeDeleteFixup(x)
}
}
func (T *RbTree) rbTransplant(u, v *RbTreeNode) {
if u.parent == nil {
T.root = v
} else if u == u.parent.left {
u.parent.left = v
} else {
u.parent.right = v
}
if v != nil {
v.parent = u.parent
}
}
func (T *RbTree) rbTreeDeleteFixup(x *RbTreeNode) {
for x != T.root && x.color == BLACK {
if x == x.parent.left {
w := x.parent.right
if w.color == RED { // case 1
w.color = BLACK
x.parent.color = RED
T.leftRotate(x.parent)
w = x.parent.right
}
if w.left.color == BLACK && w.right.color == BLACK { // case 2
w.color = RED
x = x.parent
} else {
if w.right.color == BLACK { // case 3
w.left.color = BLACK
w.color = RED
T.rightRotate(w)
w = x.parent.right
}
w.color = x.parent.color // case 4
x.parent.color = BLACK // case 4
w.right.color = BLACK // case 4
T.leftRotate(x.parent) // case 4
x = T.root
}
} else { // x = x.parent.right
w := x.parent.left
if w.color == RED { // case 1
w.color = BLACK
x.parent.color = RED
T.rightRotate(x.parent)
w = x.parent.left
}
if w.left.color == BLACK && w.right.color == BLACK { // case 2
w.color = RED
x = x.parent
} else {
if w.left.color == BLACK { // case 3
w.right.color = BLACK
w.color = RED
T.leftRotate(w)
w = x.parent.left
}
w.color = x.parent.color // case 4
x.parent.color = BLACK // case 4
w.left.color = BLACK // case 4
T.rightRotate(x.parent) // case 4
x = T.root
}
}
}
x.color = BLACK
} | datastructs/rbtree.go | 0.528533 | 0.511961 | rbtree.go | starcoder |
package profile
import (
"fmt"
"sort"
"strconv"
"strings"
)
// Merge merges all the profiles in profs into a single Profile.
// Returns a new profile independent of the input profiles. The merged
// profile is compacted to eliminate unused samples, locations,
// functions and mappings. Profiles must have identical profile sample
// and period types or the merge will fail. profile.Period of the
// resulting profile will be the maximum of all profiles, and
// profile.TimeNanos will be the earliest nonzero one.
func Merge(srcs []*Profile) (*Profile, error) {
if len(srcs) == 0 {
return nil, fmt.Errorf("no profiles to merge")
}
p, err := combineHeaders(srcs)
if err != nil {
return nil, err
}
pm := &profileMerger{
p: p,
samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
locations: make(map[locationKey]*Location, len(srcs[0].Location)),
functions: make(map[functionKey]*Function, len(srcs[0].Function)),
mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
}
for _, src := range srcs {
// Clear the profile-specific hash tables
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
// The Mapping list has the property that the first mapping
// represents the main binary. Take the first Mapping we see,
// otherwise the operations below will add mappings in an
// arbitrary order.
pm.mapMapping(src.Mapping[0])
}
for _, s := range src.Sample {
if !isZeroSample(s) {
pm.mapSample(s)
}
}
}
for _, s := range p.Sample {
if isZeroSample(s) {
// If there are any zero samples, re-merge the profile to GC
// them.
return Merge([]*Profile{p})
}
}
return p, nil
}
// Normalize normalizes the source profile by multiplying each value in profile by the
// ratio of the sum of the base profile's values of that sample type to the sum of the
// source profile's value of that sample type.
func (p *Profile) Normalize(pb *Profile) error {
if err := p.compatible(pb); err != nil {
return err
}
baseVals := make([]int64, len(p.SampleType))
for _, s := range pb.Sample {
for i, v := range s.Value {
baseVals[i] += v
}
}
srcVals := make([]int64, len(p.SampleType))
for _, s := range p.Sample {
for i, v := range s.Value {
srcVals[i] += v
}
}
normScale := make([]float64, len(baseVals))
for i := range baseVals {
if srcVals[i] == 0 {
normScale[i] = 0.0
} else {
normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
}
}
p.ScaleN(normScale)
return nil
}
func isZeroSample(s *Sample) bool {
for _, v := range s.Value {
if v != 0 {
return false
}
}
return true
}
type profileMerger struct {
p *Profile
// Memoization tables within a profile.
locationsByID map[uint64]*Location
functionsByID map[uint64]*Function
mappingsByID map[uint64]mapInfo
// Memoization tables for profile entities.
samples map[sampleKey]*Sample
locations map[locationKey]*Location
functions map[functionKey]*Function
mappings map[mappingKey]*Mapping
}
type mapInfo struct {
m *Mapping
offset int64
}
func (pm *profileMerger) mapSample(src *Sample) *Sample {
s := &Sample{
Location: make([]*Location, len(src.Location)),
Value: make([]int64, len(src.Value)),
Label: make(map[string][]string, len(src.Label)),
NumLabel: make(map[string][]int64, len(src.NumLabel)),
NumUnit: make(map[string][]string, len(src.NumLabel)),
}
for i, l := range src.Location {
s.Location[i] = pm.mapLocation(l)
}
for k, v := range src.Label {
vv := make([]string, len(v))
copy(vv, v)
s.Label[k] = vv
}
for k, v := range src.NumLabel {
u := src.NumUnit[k]
vv := make([]int64, len(v))
uu := make([]string, len(u))
copy(vv, v)
copy(uu, u)
s.NumLabel[k] = vv
s.NumUnit[k] = uu
}
// Check memoization table. Must be done on the remapped location to
// account for the remapped mapping. Add current values to the
// existing sample.
k := s.key()
if ss, ok := pm.samples[k]; ok {
for i, v := range src.Value {
ss.Value[i] += v
}
return ss
}
copy(s.Value, src.Value)
pm.samples[k] = s
pm.p.Sample = append(pm.p.Sample, s)
return s
}
// key generates sampleKey to be used as a key for maps.
func (sample *Sample) key() sampleKey {
ids := make([]string, len(sample.Location))
for i, l := range sample.Location {
ids[i] = strconv.FormatUint(l.ID, 16)
}
labels := make([]string, 0, len(sample.Label))
for k, v := range sample.Label {
labels = append(labels, fmt.Sprintf("%q%q", k, v))
}
sort.Strings(labels)
numlabels := make([]string, 0, len(sample.NumLabel))
for k, v := range sample.NumLabel {
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
}
sort.Strings(numlabels)
return sampleKey{
strings.Join(ids, "|"),
strings.Join(labels, ""),
strings.Join(numlabels, ""),
}
}
type sampleKey struct {
locations string
labels string
numlabels string
}
func (pm *profileMerger) mapLocation(src *Location) *Location {
if src == nil {
return nil
}
if l, ok := pm.locationsByID[src.ID]; ok {
pm.locationsByID[src.ID] = l
return l
}
mi := pm.mapMapping(src.Mapping)
l := &Location{
ID: uint64(len(pm.p.Location) + 1),
Mapping: mi.m,
Address: uint64(int64(src.Address) + mi.offset),
Line: make([]Line, len(src.Line)),
IsFolded: src.IsFolded,
}
for i, ln := range src.Line {
l.Line[i] = pm.mapLine(ln)
}
// Check memoization table. Must be done on the remapped location to
// account for the remapped mapping ID.
k := l.key()
if ll, ok := pm.locations[k]; ok {
pm.locationsByID[src.ID] = ll
return ll
}
pm.locationsByID[src.ID] = l
pm.locations[k] = l
pm.p.Location = append(pm.p.Location, l)
return l
}
// key generates locationKey to be used as a key for maps.
func (l *Location) key() locationKey {
key := locationKey{
addr: l.Address,
isFolded: l.IsFolded,
}
if l.Mapping != nil {
// Normalizes address to handle address space randomization.
key.addr -= l.Mapping.Start
key.mappingID = l.Mapping.ID
}
lines := make([]string, len(l.Line)*2)
for i, line := range l.Line {
if line.Function != nil {
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
}
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
}
key.lines = strings.Join(lines, "|")
return key
}
type locationKey struct {
addr, mappingID uint64
lines string
isFolded bool
}
func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
if src == nil {
return mapInfo{}
}
if mi, ok := pm.mappingsByID[src.ID]; ok {
return mi
}
// Check memoization tables.
mk := src.key()
if m, ok := pm.mappings[mk]; ok {
mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
pm.mappingsByID[src.ID] = mi
return mi
}
m := &Mapping{
ID: uint64(len(pm.p.Mapping) + 1),
Start: src.Start,
Limit: src.Limit,
Offset: src.Offset,
File: src.File,
BuildID: src.BuildID,
HasFunctions: src.HasFunctions,
HasFilenames: src.HasFilenames,
HasLineNumbers: src.HasLineNumbers,
HasInlineFrames: src.HasInlineFrames,
}
pm.p.Mapping = append(pm.p.Mapping, m)
// Update memoization tables.
pm.mappings[mk] = m
mi := mapInfo{m, 0}
pm.mappingsByID[src.ID] = mi
return mi
}
// key generates encoded strings of Mapping to be used as a key for
// maps.
func (m *Mapping) key() mappingKey {
// Normalize addresses to handle address space randomization.
// Round up to next 4K boundary to avoid minor discrepancies.
const mapsizeRounding = 0x1000
size := m.Limit - m.Start
size = size + mapsizeRounding - 1
size = size - (size % mapsizeRounding)
key := mappingKey{
size: size,
offset: m.Offset,
}
switch {
case m.BuildID != "":
key.buildIDOrFile = m.BuildID
case m.File != "":
key.buildIDOrFile = m.File
default:
// A mapping containing neither build ID nor file name is a fake mapping. A
// key with empty buildIDOrFile is used for fake mappings so that they are
// treated as the same mapping during merging.
}
return key
}
type mappingKey struct {
size, offset uint64
buildIDOrFile string
}
func (pm *profileMerger) mapLine(src Line) Line {
ln := Line{
Function: pm.mapFunction(src.Function),
Line: src.Line,
}
return ln
}
func (pm *profileMerger) mapFunction(src *Function) *Function {
if src == nil {
return nil
}
if f, ok := pm.functionsByID[src.ID]; ok {
return f
}
k := src.key()
if f, ok := pm.functions[k]; ok {
pm.functionsByID[src.ID] = f
return f
}
f := &Function{
ID: uint64(len(pm.p.Function) + 1),
Name: src.Name,
SystemName: src.SystemName,
Filename: src.Filename,
StartLine: src.StartLine,
}
pm.functions[k] = f
pm.functionsByID[src.ID] = f
pm.p.Function = append(pm.p.Function, f)
return f
}
// key generates a struct to be used as a key for maps.
func (f *Function) key() functionKey {
return functionKey{
f.StartLine,
f.Name,
f.SystemName,
f.Filename,
}
}
type functionKey struct {
startLine int64
name, systemName, fileName string
}
// combineHeaders checks that all profiles can be merged and returns
// their combined profile.
func combineHeaders(srcs []*Profile) (*Profile, error) {
for _, s := range srcs[1:] {
if err := srcs[0].compatible(s); err != nil {
return nil, err
}
}
var timeNanos, durationNanos, period int64
var comments []string
seenComments := map[string]bool{}
var defaultSampleType string
for _, s := range srcs {
if timeNanos == 0 || s.TimeNanos < timeNanos {
timeNanos = s.TimeNanos
}
durationNanos += s.DurationNanos
if period == 0 || period < s.Period {
period = s.Period
}
for _, c := range s.Comments {
if seen := seenComments[c]; !seen {
comments = append(comments, c)
seenComments[c] = true
}
}
if defaultSampleType == "" {
defaultSampleType = s.DefaultSampleType
}
}
p := &Profile{
SampleType: make([]*ValueType, len(srcs[0].SampleType)),
DropFrames: srcs[0].DropFrames,
KeepFrames: srcs[0].KeepFrames,
TimeNanos: timeNanos,
DurationNanos: durationNanos,
PeriodType: srcs[0].PeriodType,
Period: period,
Comments: comments,
DefaultSampleType: defaultSampleType,
}
copy(p.SampleType, srcs[0].SampleType)
return p, nil
}
// compatible determines if two profiles can be compared/merged.
// returns nil if the profiles are compatible; otherwise an error with
// details on the incompatibility.
func (p *Profile) compatible(pb *Profile) error {
if !equalValueType(p.PeriodType, pb.PeriodType) {
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
}
if len(p.SampleType) != len(pb.SampleType) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
for i := range p.SampleType {
if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
}
return nil
}
// equalValueType returns true if the two value types are semantically
// equal. It ignores the internal fields used during encode/decode.
func equalValueType(st1, st2 *ValueType) bool {
return st1.Type == st2.Type && st1.Unit == st2.Unit
} | src/internal/profile/merge.go | 0.715523 | 0.577436 | merge.go | starcoder |
package storetest
import (
"strings"
"testing"
"github.com/mattermost/mattermost-server/model"
"github.com/mattermost/mattermost-server/store"
"github.com/stretchr/testify/assert"
)
func TestGroupStore(t *testing.T, ss store.Store) {
t.Run("Create", func(t *testing.T) { testGroupStoreCreate(t, ss) })
t.Run("Get", func(t *testing.T) { testGroupStoreGet(t, ss) })
t.Run("GetByRemoteID", func(t *testing.T) { testGroupStoreGetByRemoteID(t, ss) })
t.Run("GetAllBySource", func(t *testing.T) { testGroupStoreGetAllByType(t, ss) })
t.Run("Update", func(t *testing.T) { testGroupStoreUpdate(t, ss) })
t.Run("Delete", func(t *testing.T) { testGroupStoreDelete(t, ss) })
t.Run("GetMemberUsers", func(t *testing.T) { testGroupGetMemberUsers(t, ss) })
t.Run("GetMemberUsersPage", func(t *testing.T) { testGroupGetMemberUsersPage(t, ss) })
t.Run("CreateOrRestoreMember", func(t *testing.T) { testGroupCreateOrRestoreMember(t, ss) })
t.Run("DeleteMember", func(t *testing.T) { testGroupDeleteMember(t, ss) })
t.Run("CreateGroupSyncable", func(t *testing.T) { testCreateGroupSyncable(t, ss) })
t.Run("GetGroupSyncable", func(t *testing.T) { testGetGroupSyncable(t, ss) })
t.Run("GetAllGroupSyncablesByGroupId", func(t *testing.T) { testGetAllGroupSyncablesByGroup(t, ss) })
t.Run("UpdateGroupSyncable", func(t *testing.T) { testUpdateGroupSyncable(t, ss) })
t.Run("DeleteGroupSyncable", func(t *testing.T) { testDeleteGroupSyncable(t, ss) })
t.Run("PendingAutoAddTeamMembers", func(t *testing.T) { testPendingAutoAddTeamMembers(t, ss) })
t.Run("PendingAutoAddChannelMembers", func(t *testing.T) { testPendingAutoAddChannelMembers(t, ss) })
}
func testGroupStoreCreate(t *testing.T, ss store.Store) {
// Save a new group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
Description: model.NewId(),
RemoteId: model.NewId(),
}
// Happy path
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Group)
assert.Len(t, d1.Id, 26)
assert.Equal(t, g1.Name, d1.Name)
assert.Equal(t, g1.DisplayName, d1.DisplayName)
assert.Equal(t, g1.Description, d1.Description)
assert.Equal(t, g1.RemoteId, d1.RemoteId)
assert.NotZero(t, d1.CreateAt)
assert.NotZero(t, d1.UpdateAt)
assert.Zero(t, d1.DeleteAt)
// Requires name and display name
g2 := &model.Group{
Name: "",
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res2 := <-ss.Group().Create(g2)
assert.Nil(t, res2.Data)
assert.NotNil(t, res2.Err)
assert.Equal(t, res2.Err.Id, "model.group.name.app_error")
g2.Name = model.NewId()
g2.DisplayName = ""
res3 := <-ss.Group().Create(g2)
assert.Nil(t, res3.Data)
assert.NotNil(t, res3.Err)
assert.Equal(t, res3.Err.Id, "model.group.display_name.app_error")
// Won't accept a duplicate name
g4 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res5 := <-ss.Group().Create(g4)
assert.Nil(t, res5.Err)
g4b := &model.Group{
Name: g4.Name,
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res5b := <-ss.Group().Create(g4b)
assert.Nil(t, res5b.Data)
assert.Equal(t, res5b.Err.Id, "store.sql_group.unique_constraint")
// Fields cannot be greater than max values
g5 := &model.Group{
Name: strings.Repeat("x", model.GroupNameMaxLength),
DisplayName: strings.Repeat("x", model.GroupDisplayNameMaxLength),
Description: strings.Repeat("x", model.GroupDescriptionMaxLength),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
assert.Nil(t, g5.IsValidForCreate())
g5.Name = g5.Name + "x"
assert.Equal(t, g5.IsValidForCreate().Id, "model.group.name.app_error")
g5.Name = model.NewId()
assert.Nil(t, g5.IsValidForCreate())
g5.DisplayName = g5.DisplayName + "x"
assert.Equal(t, g5.IsValidForCreate().Id, "model.group.display_name.app_error")
g5.DisplayName = model.NewId()
assert.Nil(t, g5.IsValidForCreate())
g5.Description = g5.Description + "x"
assert.Equal(t, g5.IsValidForCreate().Id, "model.group.description.app_error")
g5.Description = model.NewId()
assert.Nil(t, g5.IsValidForCreate())
// Must use a valid type
g6 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSource("fake"),
RemoteId: model.NewId(),
}
assert.Equal(t, g6.IsValidForCreate().Id, "model.group.source.app_error")
}
func testGroupStoreGet(t *testing.T, ss store.Store) {
// Create a group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Group)
assert.Len(t, d1.Id, 26)
// Get the group
res2 := <-ss.Group().Get(d1.Id)
assert.Nil(t, res2.Err)
d2 := res2.Data.(*model.Group)
assert.Equal(t, d1.Id, d2.Id)
assert.Equal(t, d1.Name, d2.Name)
assert.Equal(t, d1.DisplayName, d2.DisplayName)
assert.Equal(t, d1.Description, d2.Description)
assert.Equal(t, d1.RemoteId, d2.RemoteId)
assert.Equal(t, d1.CreateAt, d2.CreateAt)
assert.Equal(t, d1.UpdateAt, d2.UpdateAt)
assert.Equal(t, d1.DeleteAt, d2.DeleteAt)
// Get an invalid group
res3 := <-ss.Group().Get(model.NewId())
assert.NotNil(t, res3.Err)
assert.Equal(t, res3.Err.Id, "store.sql_group.no_rows")
}
func testGroupStoreGetByRemoteID(t *testing.T, ss store.Store) {
// Create a group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Group)
assert.Len(t, d1.Id, 26)
// Get the group
res2 := <-ss.Group().GetByRemoteID(d1.RemoteId, model.GroupSourceLdap)
assert.Nil(t, res2.Err)
d2 := res2.Data.(*model.Group)
assert.Equal(t, d1.Id, d2.Id)
assert.Equal(t, d1.Name, d2.Name)
assert.Equal(t, d1.DisplayName, d2.DisplayName)
assert.Equal(t, d1.Description, d2.Description)
assert.Equal(t, d1.RemoteId, d2.RemoteId)
assert.Equal(t, d1.CreateAt, d2.CreateAt)
assert.Equal(t, d1.UpdateAt, d2.UpdateAt)
assert.Equal(t, d1.DeleteAt, d2.DeleteAt)
// Get an invalid group
res3 := <-ss.Group().GetByRemoteID(model.NewId(), model.GroupSource("fake"))
assert.NotNil(t, res3.Err)
assert.Equal(t, res3.Err.Id, "store.sql_group.no_rows")
}
func testGroupStoreGetAllByType(t *testing.T, ss store.Store) {
numGroups := 10
groups := []*model.Group{}
// Create groups
for i := 0; i < numGroups; i++ {
g := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
groups = append(groups, g)
res := <-ss.Group().Create(g)
assert.Nil(t, res.Err)
}
// Returns all the groups
res1 := <-ss.Group().GetAllBySource(model.GroupSourceLdap)
d1 := res1.Data.([]*model.Group)
assert.Condition(t, func() bool { return len(d1) >= numGroups })
for _, expectedGroup := range groups {
present := false
for _, dbGroup := range d1 {
if dbGroup.Id == expectedGroup.Id {
present = true
break
}
}
assert.True(t, present)
}
}
func testGroupStoreUpdate(t *testing.T, ss store.Store) {
// Save a new group
g1 := &model.Group{
Name: "g1-test",
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
Description: model.NewId(),
RemoteId: model.NewId(),
}
// Create a group
res := <-ss.Group().Create(g1)
assert.Nil(t, res.Err)
d1 := res.Data.(*model.Group)
// Update happy path
g1Update := &model.Group{}
*g1Update = *g1
g1Update.Name = model.NewId()
g1Update.DisplayName = model.NewId()
g1Update.Description = model.NewId()
g1Update.RemoteId = model.NewId()
res2 := <-ss.Group().Update(g1Update)
assert.Nil(t, res2.Err)
ud1 := res2.Data.(*model.Group)
// Not changed...
assert.Equal(t, d1.Id, ud1.Id)
assert.Equal(t, d1.CreateAt, ud1.CreateAt)
assert.Equal(t, d1.Source, ud1.Source)
// Still zero...
assert.Zero(t, ud1.DeleteAt)
// Updated...
assert.Equal(t, g1Update.Name, ud1.Name)
assert.Equal(t, g1Update.DisplayName, ud1.DisplayName)
assert.Equal(t, g1Update.Description, ud1.Description)
assert.Equal(t, g1Update.RemoteId, ud1.RemoteId)
// Requires name and display name
res3 := <-ss.Group().Update(&model.Group{
Id: d1.Id,
Name: "",
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
Description: model.NewId(),
})
assert.Nil(t, res3.Data)
assert.NotNil(t, res3.Err)
assert.Equal(t, res3.Err.Id, "model.group.name.app_error")
res4 := <-ss.Group().Update(&model.Group{
Id: d1.Id,
Name: model.NewId(),
DisplayName: "",
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
})
assert.Nil(t, res4.Data)
assert.NotNil(t, res4.Err)
assert.Equal(t, res4.Err.Id, "model.group.display_name.app_error")
// Create another Group
g2 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
Description: model.NewId(),
RemoteId: model.NewId(),
}
res5 := <-ss.Group().Create(g2)
assert.Nil(t, res5.Err)
d2 := res5.Data.(*model.Group)
// Can't update the name to be a duplicate of an existing group's name
res6 := <-ss.Group().Update(&model.Group{
Id: d2.Id,
Name: g1Update.Name,
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
Description: model.NewId(),
RemoteId: model.NewId(),
})
assert.Equal(t, res6.Err.Id, "store.update_error")
// Cannot update CreateAt
someVal := model.GetMillis()
d1.CreateAt = someVal
res7 := <-ss.Group().Update(d1)
d3 := res7.Data.(*model.Group)
assert.NotEqual(t, someVal, d3.CreateAt)
// Cannot update DeleteAt to non-zero
d1.DeleteAt = 1
res9 := <-ss.Group().Update(d1)
assert.Equal(t, "model.group.delete_at.app_error", res9.Err.Id)
//...except for 0 for DeleteAt
d1.DeleteAt = 0
res8 := <-ss.Group().Update(d1)
assert.Nil(t, res8.Err)
d4 := res8.Data.(*model.Group)
assert.Zero(t, d4.DeleteAt)
}
func testGroupStoreDelete(t *testing.T, ss store.Store) {
// Save a group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Group)
assert.Len(t, d1.Id, 26)
// Check the group is retrievable
res2 := <-ss.Group().Get(d1.Id)
assert.Nil(t, res2.Err)
// Get the before count
res7 := <-ss.Group().GetAllBySource(model.GroupSourceLdap)
d7 := res7.Data.([]*model.Group)
beforeCount := len(d7)
// Delete the group
res3 := <-ss.Group().Delete(d1.Id)
assert.Nil(t, res3.Err)
// Check the group is deleted
res4 := <-ss.Group().Get(d1.Id)
d4 := res4.Data.(*model.Group)
assert.NotZero(t, d4.DeleteAt)
// Check the after count
res5 := <-ss.Group().GetAllBySource(model.GroupSourceLdap)
d5 := res5.Data.([]*model.Group)
afterCount := len(d5)
assert.Condition(t, func() bool { return beforeCount == afterCount+1 })
// Try and delete a nonexistent group
res6 := <-ss.Group().Delete(model.NewId())
assert.NotNil(t, res6.Err)
assert.Equal(t, res6.Err.Id, "store.sql_group.no_rows")
// Cannot delete again
res8 := <-ss.Group().Delete(d1.Id)
assert.Equal(t, res8.Err.Id, "store.sql_group.no_rows")
}
func testGroupGetMemberUsers(t *testing.T, ss store.Store) {
// Save a group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res := <-ss.Group().Create(g1)
assert.Nil(t, res.Err)
group := res.Data.(*model.Group)
u1 := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res = <-ss.User().Save(u1)
assert.Nil(t, res.Err)
user1 := res.Data.(*model.User)
res = <-ss.Group().CreateOrRestoreMember(group.Id, user1.Id)
assert.Nil(t, res.Err)
u2 := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res = <-ss.User().Save(u2)
assert.Nil(t, res.Err)
user2 := res.Data.(*model.User)
res = <-ss.Group().CreateOrRestoreMember(group.Id, user2.Id)
assert.Nil(t, res.Err)
// Check returns members
res = <-ss.Group().GetMemberUsers(group.Id)
assert.Nil(t, res.Err)
groupMembers := res.Data.([]*model.User)
assert.Equal(t, 2, len(groupMembers))
// Check madeup id
res = <-ss.Group().GetMemberUsers(model.NewId())
assert.Equal(t, 0, len(res.Data.([]*model.User)))
// Delete a member
<-ss.Group().DeleteMember(group.Id, user1.Id)
// Should not return deleted members
res = <-ss.Group().GetMemberUsers(group.Id)
groupMembers = res.Data.([]*model.User)
assert.Equal(t, 1, len(groupMembers))
}
func testGroupGetMemberUsersPage(t *testing.T, ss store.Store) {
// Save a group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res := <-ss.Group().Create(g1)
assert.Nil(t, res.Err)
group := res.Data.(*model.Group)
u1 := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res = <-ss.User().Save(u1)
assert.Nil(t, res.Err)
user1 := res.Data.(*model.User)
res = <-ss.Group().CreateOrRestoreMember(group.Id, user1.Id)
assert.Nil(t, res.Err)
u2 := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res = <-ss.User().Save(u2)
assert.Nil(t, res.Err)
user2 := res.Data.(*model.User)
res = <-ss.Group().CreateOrRestoreMember(group.Id, user2.Id)
assert.Nil(t, res.Err)
// Check returns members
res = <-ss.Group().GetMemberUsersPage(group.Id, 0, 100)
assert.Nil(t, res.Err)
groupMembers := res.Data.([]*model.User)
assert.Equal(t, 2, len(groupMembers))
// Check page 1
res = <-ss.Group().GetMemberUsersPage(group.Id, 0, 1)
assert.Nil(t, res.Err)
groupMembers = res.Data.([]*model.User)
assert.Equal(t, 1, len(groupMembers))
assert.Equal(t, user2.Id, groupMembers[0].Id)
// Check page 2
res = <-ss.Group().GetMemberUsersPage(group.Id, 1, 1)
assert.Nil(t, res.Err)
groupMembers = res.Data.([]*model.User)
assert.Equal(t, 1, len(groupMembers))
assert.Equal(t, user1.Id, groupMembers[0].Id)
// Check madeup id
res = <-ss.Group().GetMemberUsersPage(model.NewId(), 0, 100)
assert.Equal(t, 0, len(res.Data.([]*model.User)))
// Delete a member
<-ss.Group().DeleteMember(group.Id, user1.Id)
// Should not return deleted members
res = <-ss.Group().GetMemberUsersPage(group.Id, 0, 100)
groupMembers = res.Data.([]*model.User)
assert.Equal(t, 1, len(groupMembers))
}
func testGroupCreateOrRestoreMember(t *testing.T, ss store.Store) {
// Create group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
group := res1.Data.(*model.Group)
// Create user
u1 := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res2 := <-ss.User().Save(u1)
assert.Nil(t, res2.Err)
user := res2.Data.(*model.User)
// Happy path
res3 := <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res3.Err)
d2 := res3.Data.(*model.GroupMember)
assert.Equal(t, d2.GroupId, group.Id)
assert.Equal(t, d2.UserId, user.Id)
assert.NotZero(t, d2.CreateAt)
assert.Zero(t, d2.DeleteAt)
// Duplicate composite key (GroupId, UserId)
res4 := <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Equal(t, res4.Err.Id, "store.sql_group.uniqueness_error")
// Invalid GroupId
res6 := <-ss.Group().CreateOrRestoreMember(model.NewId(), user.Id)
assert.Equal(t, res6.Err.Id, "store.insert_error")
// Restores a deleted member
res := <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.NotNil(t, res.Err)
res = <-ss.Group().DeleteMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().GetMemberUsers(group.Id)
beforeRestoreCount := len(res.Data.([]*model.User))
res = <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().GetMemberUsers(group.Id)
afterRestoreCount := len(res.Data.([]*model.User))
assert.Equal(t, beforeRestoreCount+1, afterRestoreCount)
}
func testGroupDeleteMember(t *testing.T, ss store.Store) {
// Create group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
group := res1.Data.(*model.Group)
// Create user
u1 := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res2 := <-ss.User().Save(u1)
assert.Nil(t, res2.Err)
user := res2.Data.(*model.User)
// Create member
res3 := <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res3.Err)
d1 := res3.Data.(*model.GroupMember)
// Happy path
res4 := <-ss.Group().DeleteMember(group.Id, user.Id)
assert.Nil(t, res4.Err)
d2 := res4.Data.(*model.GroupMember)
assert.Equal(t, d2.GroupId, group.Id)
assert.Equal(t, d2.UserId, user.Id)
assert.Equal(t, d2.CreateAt, d1.CreateAt)
assert.NotZero(t, d2.DeleteAt)
// Delete an already deleted member
res5 := <-ss.Group().DeleteMember(group.Id, user.Id)
assert.Equal(t, res5.Err.Id, "store.sql_group.no_rows")
// Delete with non-existent User
res8 := <-ss.Group().DeleteMember(group.Id, model.NewId())
assert.Equal(t, res8.Err.Id, "store.sql_group.no_rows")
// Delete non-existent Group
res9 := <-ss.Group().DeleteMember(model.NewId(), group.Id)
assert.Equal(t, res9.Err.Id, "store.sql_group.no_rows")
}
func testCreateGroupSyncable(t *testing.T, ss store.Store) {
// Invalid GroupID
res2 := <-ss.Group().CreateGroupSyncable(&model.GroupSyncable{
GroupId: "x",
CanLeave: true,
SyncableId: string(model.NewId()),
Type: model.GroupSyncableTypeTeam,
})
assert.Equal(t, res2.Err.Id, "model.group_syncable.group_id.app_error")
// TODO: Add this validation test in phase 2 of LDAP groups sync.
// Invalid CanLeave/AutoAdd combo (both false)
// res3 := <-ss.Group().CreateGroupSyncable(&model.GroupSyncable{
// GroupId: model.NewId(),
// CanLeave: false,
// AutoAdd: false,
// SyncableId: string(model.NewId()),
// Type: model.GroupSyncableTypeTeam,
// })
// assert.Equal(t, res3.Err.Id, "model.group_syncable.invalid_state")
// Create Group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res4 := <-ss.Group().Create(g1)
assert.Nil(t, res4.Err)
group := res4.Data.(*model.Group)
// Create Team
t1 := &model.Team{
DisplayName: "Name",
Description: "Some description",
CompanyName: "Some company name",
AllowOpenInvite: false,
InviteId: "inviteid0",
Name: "z-z-" + model.NewId() + "a",
Email: "success+" + model.NewId() + "@simulator.<EMAIL>s.com",
Type: model.TEAM_OPEN,
}
res5 := <-ss.Team().Save(t1)
assert.Nil(t, res5.Err)
team := res5.Data.(*model.Team)
// New GroupSyncable, happy path
gt1 := &model.GroupSyncable{
GroupId: group.Id,
CanLeave: true,
AutoAdd: false,
SyncableId: string(team.Id),
Type: model.GroupSyncableTypeTeam,
}
res6 := <-ss.Group().CreateGroupSyncable(gt1)
assert.Nil(t, res6.Err)
d1 := res6.Data.(*model.GroupSyncable)
assert.Equal(t, gt1.SyncableId, d1.SyncableId)
assert.Equal(t, gt1.GroupId, d1.GroupId)
assert.Equal(t, gt1.CanLeave, d1.CanLeave)
assert.Equal(t, gt1.AutoAdd, d1.AutoAdd)
assert.NotZero(t, d1.CreateAt)
assert.Zero(t, d1.DeleteAt)
}
func testGetGroupSyncable(t *testing.T, ss store.Store) {
// Create a group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
group := res1.Data.(*model.Group)
// Create Team
t1 := &model.Team{
DisplayName: "Name",
Description: "Some description",
CompanyName: "Some company name",
AllowOpenInvite: false,
InviteId: "inviteid0",
Name: "z-z-" + model.NewId() + "a",
Email: "success+" + model.NewId() + "@simulator.amazonses.com",
Type: model.TEAM_OPEN,
}
res2 := <-ss.Team().Save(t1)
assert.Nil(t, res2.Err)
team := res2.Data.(*model.Team)
// Create GroupSyncable
gt1 := &model.GroupSyncable{
GroupId: group.Id,
CanLeave: true,
AutoAdd: false,
SyncableId: string(team.Id),
Type: model.GroupSyncableTypeTeam,
}
res3 := <-ss.Group().CreateGroupSyncable(gt1)
assert.Nil(t, res3.Err)
groupTeam := res3.Data.(*model.GroupSyncable)
// Get GroupSyncable
res4 := <-ss.Group().GetGroupSyncable(groupTeam.GroupId, groupTeam.SyncableId, model.GroupSyncableTypeTeam)
assert.Nil(t, res4.Err)
dgt := res4.Data.(*model.GroupSyncable)
assert.Equal(t, gt1.GroupId, dgt.GroupId)
assert.Equal(t, gt1.SyncableId, dgt.SyncableId)
// assert.Equal(t, gt1.CanLeave, dgt.CanLeave) // TODO: Re-add this test in phase 2 of LDAP groups sync.
assert.Equal(t, gt1.AutoAdd, dgt.AutoAdd)
assert.NotZero(t, gt1.CreateAt)
assert.NotZero(t, gt1.UpdateAt)
assert.Zero(t, gt1.DeleteAt)
}
func testGetAllGroupSyncablesByGroup(t *testing.T, ss store.Store) {
numGroupSyncables := 10
// Create group
g := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g)
assert.Nil(t, res1.Err)
group := res1.Data.(*model.Group)
groupTeams := []*model.GroupSyncable{}
// Create groupTeams
for i := 0; i < numGroupSyncables; i++ {
// Create Team
t1 := &model.Team{
DisplayName: "Name",
Description: "Some description",
CompanyName: "Some company name",
AllowOpenInvite: false,
InviteId: "inviteid0",
Name: "z-z-" + model.NewId() + "a",
Email: "success+" + model.NewId() + "@<EMAIL>",
Type: model.TEAM_OPEN,
}
res2 := <-ss.Team().Save(t1)
assert.Nil(t, res2.Err)
team := res2.Data.(*model.Team)
// create groupteam
res3 := <-ss.Group().CreateGroupSyncable(&model.GroupSyncable{
GroupId: group.Id,
CanLeave: true,
SyncableId: string(team.Id),
Type: model.GroupSyncableTypeTeam,
})
assert.Nil(t, res3.Err)
groupTeam := res3.Data.(*model.GroupSyncable)
groupTeams = append(groupTeams, groupTeam)
}
// Returns all the group teams
res4 := <-ss.Group().GetAllGroupSyncablesByGroupId(group.Id, model.GroupSyncableTypeTeam)
d1 := res4.Data.([]*model.GroupSyncable)
assert.Condition(t, func() bool { return len(d1) >= numGroupSyncables })
for _, expectedGroupTeam := range groupTeams {
present := false
for _, dbGroupTeam := range d1 {
if dbGroupTeam.GroupId == expectedGroupTeam.GroupId && dbGroupTeam.SyncableId == expectedGroupTeam.SyncableId {
present = true
break
}
}
assert.True(t, present)
}
}
func testUpdateGroupSyncable(t *testing.T, ss store.Store) {
// Create Group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res4 := <-ss.Group().Create(g1)
assert.Nil(t, res4.Err)
group := res4.Data.(*model.Group)
// Create Team
t1 := &model.Team{
DisplayName: "Name",
Description: "Some description",
CompanyName: "Some company name",
AllowOpenInvite: false,
InviteId: "inviteid0",
Name: "z-z-" + model.NewId() + "a",
Email: "success+" + model.NewId() + "@<EMAIL>",
Type: model.TEAM_OPEN,
}
res5 := <-ss.Team().Save(t1)
assert.Nil(t, res5.Err)
team := res5.Data.(*model.Team)
// New GroupSyncable, happy path
gt1 := &model.GroupSyncable{
GroupId: group.Id,
CanLeave: true,
AutoAdd: false,
SyncableId: string(team.Id),
Type: model.GroupSyncableTypeTeam,
}
res6 := <-ss.Group().CreateGroupSyncable(gt1)
assert.Nil(t, res6.Err)
d1 := res6.Data.(*model.GroupSyncable)
// Update existing group team
gt1.CanLeave = false
gt1.AutoAdd = true
res7 := <-ss.Group().UpdateGroupSyncable(gt1)
assert.Nil(t, res7.Err)
d2 := res7.Data.(*model.GroupSyncable)
assert.False(t, d2.CanLeave)
assert.True(t, d2.AutoAdd)
// TODO: Add this validation check test in phase 2 of LDAP groups sync.
// Update to invalid state
// gt1.AutoAdd = false
// gt1.CanLeave = false
// res8 := <-ss.Group().UpdateGroupSyncable(gt1)
// assert.Equal(t, res8.Err.Id, "model.group_syncable.invalid_state")
// Non-existent Group
gt2 := &model.GroupSyncable{
GroupId: model.NewId(),
CanLeave: true,
AutoAdd: false,
SyncableId: string(team.Id),
Type: model.GroupSyncableTypeTeam,
}
res9 := <-ss.Group().UpdateGroupSyncable(gt2)
assert.Equal(t, res9.Err.Id, "store.sql_group.no_rows")
// Non-existent Team
gt3 := &model.GroupSyncable{
GroupId: group.Id,
CanLeave: true,
AutoAdd: false,
SyncableId: string(model.NewId()),
Type: model.GroupSyncableTypeTeam,
}
res10 := <-ss.Group().UpdateGroupSyncable(gt3)
assert.Equal(t, res10.Err.Id, "store.sql_group.no_rows")
// Cannot update CreateAt or DeleteAt
origCreateAt := d1.CreateAt
d1.CreateAt = model.GetMillis()
d1.AutoAdd = true
d1.CanLeave = true
res11 := <-ss.Group().UpdateGroupSyncable(d1)
assert.Nil(t, res11.Err)
d3 := res11.Data.(*model.GroupSyncable)
assert.Equal(t, origCreateAt, d3.CreateAt)
// Cannot update DeleteAt to arbitrary value
d1.DeleteAt = 1
res12 := <-ss.Group().UpdateGroupSyncable(d1)
assert.Equal(t, "model.group.delete_at.app_error", res12.Err.Id)
// Can update DeleteAt to 0
d1.DeleteAt = 0
res13 := <-ss.Group().UpdateGroupSyncable(d1)
assert.Nil(t, res13.Err)
d4 := res13.Data.(*model.GroupSyncable)
assert.Zero(t, d4.DeleteAt)
}
func testDeleteGroupSyncable(t *testing.T, ss store.Store) {
// Create Group
g1 := &model.Group{
Name: model.NewId(),
DisplayName: model.NewId(),
Source: model.GroupSourceLdap,
RemoteId: model.NewId(),
}
res1 := <-ss.Group().Create(g1)
assert.Nil(t, res1.Err)
group := res1.Data.(*model.Group)
// Create Team
t1 := &model.Team{
DisplayName: "Name",
Description: "Some description",
CompanyName: "Some company name",
AllowOpenInvite: false,
InviteId: "inviteid0",
Name: "z-z-" + model.NewId() + "a",
Email: "success+" + model.NewId() + "@simulator.amazonses.com",
Type: model.TEAM_OPEN,
}
res2 := <-ss.Team().Save(t1)
assert.Nil(t, res2.Err)
team := res2.Data.(*model.Team)
// Create GroupSyncable
gt1 := &model.GroupSyncable{
GroupId: group.Id,
CanLeave: true,
AutoAdd: false,
SyncableId: string(team.Id),
Type: model.GroupSyncableTypeTeam,
}
res7 := <-ss.Group().CreateGroupSyncable(gt1)
assert.Nil(t, res7.Err)
groupTeam := res7.Data.(*model.GroupSyncable)
// Non-existent Group
res5 := <-ss.Group().DeleteGroupSyncable(model.NewId(), groupTeam.SyncableId, model.GroupSyncableTypeTeam)
assert.Equal(t, res5.Err.Id, "store.sql_group.no_rows")
// Non-existent Team
res6 := <-ss.Group().DeleteGroupSyncable(groupTeam.GroupId, string(model.NewId()), model.GroupSyncableTypeTeam)
assert.Equal(t, res6.Err.Id, "store.sql_group.no_rows")
// Happy path...
res8 := <-ss.Group().DeleteGroupSyncable(groupTeam.GroupId, groupTeam.SyncableId, model.GroupSyncableTypeTeam)
assert.Nil(t, res8.Err)
d1 := res8.Data.(*model.GroupSyncable)
assert.NotZero(t, d1.DeleteAt)
assert.Equal(t, d1.GroupId, groupTeam.GroupId)
assert.Equal(t, d1.SyncableId, groupTeam.SyncableId)
// assert.Equal(t, d1.CanLeave, groupTeam.CanLeave) // TODO: Re-add this test in phase 2 of LDAP groups sync.
assert.Equal(t, d1.AutoAdd, groupTeam.AutoAdd)
assert.Equal(t, d1.CreateAt, groupTeam.CreateAt)
assert.Condition(t, func() bool { return d1.UpdateAt > groupTeam.UpdateAt })
// Record already deleted
res9 := <-ss.Group().DeleteGroupSyncable(d1.GroupId, d1.SyncableId, d1.Type)
assert.NotNil(t, res9.Err)
assert.Equal(t, res9.Err.Id, "store.sql_group.group_syncable_already_deleted")
}
func testPendingAutoAddTeamMembers(t *testing.T, ss store.Store) {
// Create Group
res := <-ss.Group().Create(&model.Group{
Name: model.NewId(),
DisplayName: "PendingAutoAddTeamMembers Test Group",
RemoteId: model.NewId(),
Source: model.GroupSourceLdap,
})
assert.Nil(t, res.Err)
group := res.Data.(*model.Group)
// Create User
user := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res = <-ss.User().Save(user)
assert.Nil(t, res.Err)
user = res.Data.(*model.User)
// Create GroupMember
res = <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res.Err)
// Create Team
team := &model.Team{
DisplayName: "Name",
Description: "Some description",
CompanyName: "Some company name",
AllowOpenInvite: false,
InviteId: "inviteid0",
Name: "z-z-" + model.NewId() + "a",
Email: "success+" + model.NewId() + "@simulator.<EMAIL>",
Type: model.TEAM_OPEN,
}
res = <-ss.Team().Save(team)
assert.Nil(t, res.Err)
team = res.Data.(*model.Team)
// Create GroupTeam
res = <-ss.Group().CreateGroupSyncable(&model.GroupSyncable{
AutoAdd: true,
CanLeave: true,
SyncableId: team.Id,
Type: model.GroupSyncableTypeTeam,
GroupId: group.Id,
})
assert.Nil(t, res.Err)
syncable := res.Data.(*model.GroupSyncable)
// Time before syncable was created
res = <-ss.Group().PendingAutoAddTeamMembers(syncable.CreateAt - 1)
assert.Nil(t, res.Err)
userTeamIDs := res.Data.([]*model.UserTeamIDPair)
assert.Len(t, userTeamIDs, 1)
assert.Equal(t, user.Id, userTeamIDs[0].UserID)
assert.Equal(t, team.Id, userTeamIDs[0].TeamID)
// Time after syncable was created
res = <-ss.Group().PendingAutoAddTeamMembers(syncable.CreateAt + 1)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// Delete and restore GroupMember should return result
res = <-ss.Group().DeleteMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(syncable.CreateAt + 1)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
pristineSyncable := *syncable
syncable.CanLeave = false
res = <-ss.Group().UpdateGroupSyncable(syncable)
assert.Nil(t, res.Err)
// Time before syncable was updated
res = <-ss.Group().PendingAutoAddTeamMembers(syncable.UpdateAt - 1)
assert.Nil(t, res.Err)
userTeamIDs = res.Data.([]*model.UserTeamIDPair)
assert.Len(t, userTeamIDs, 1)
assert.Equal(t, user.Id, userTeamIDs[0].UserID)
assert.Equal(t, team.Id, userTeamIDs[0].TeamID)
// Time after syncable was updated
res = <-ss.Group().PendingAutoAddTeamMembers(syncable.UpdateAt + 1)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// Only includes if auto-add
syncable.AutoAdd = false
syncable.CanLeave = true // have to update this or the model isn't valid
res = <-ss.Group().UpdateGroupSyncable(syncable)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset state of syncable and verify
res = <-ss.Group().UpdateGroupSyncable(&pristineSyncable)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if Group deleted
res = <-ss.Group().Delete(group.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset state of group and verify
group.DeleteAt = 0
res = <-ss.Group().Update(group)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if Team deleted
team.DeleteAt = model.GetMillis()
res = <-ss.Team().Update(team)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset state of team and verify
team.DeleteAt = 0
res = <-ss.Team().Update(team)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if GroupTeam deleted
res = <-ss.Group().DeleteGroupSyncable(group.Id, team.Id, model.GroupSyncableTypeTeam)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset GroupTeam and verify
res = <-ss.Group().UpdateGroupSyncable(&pristineSyncable)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if GroupMember deleted
res = <-ss.Group().DeleteMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// restore group member and verify
res = <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// adding team membership stops returning result
res = <-ss.Team().SaveMember(&model.TeamMember{
TeamId: team.Id,
UserId: user.Id,
}, 999)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddTeamMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
}
func testPendingAutoAddChannelMembers(t *testing.T, ss store.Store) {
// Create Group
res := <-ss.Group().Create(&model.Group{
Name: model.NewId(),
DisplayName: "PendingAutoAddChannelMembers Test Group",
RemoteId: model.NewId(),
Source: model.GroupSourceLdap,
})
assert.Nil(t, res.Err)
group := res.Data.(*model.Group)
// Create User
user := &model.User{
Email: MakeEmail(),
Username: model.NewId(),
}
res = <-ss.User().Save(user)
assert.Nil(t, res.Err)
user = res.Data.(*model.User)
// Create GroupMember
res = <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res.Err)
// Create Channel
channel := &model.Channel{
TeamId: model.NewId(),
DisplayName: "A Name",
Name: model.NewId(),
Type: model.CHANNEL_OPEN, // Query does not look at type so this shouldn't matter.
}
res = <-ss.Channel().Save(channel, 9999)
assert.Nil(t, res.Err)
channel = res.Data.(*model.Channel)
// Create GroupChannel
res = <-ss.Group().CreateGroupSyncable(&model.GroupSyncable{
AutoAdd: true,
CanLeave: true,
SyncableId: channel.Id,
Type: model.GroupSyncableTypeChannel,
GroupId: group.Id,
})
assert.Nil(t, res.Err)
syncable := res.Data.(*model.GroupSyncable)
// Time before syncable was created
res = <-ss.Group().PendingAutoAddChannelMembers(syncable.CreateAt - 1)
assert.Nil(t, res.Err)
userChannelIDs := res.Data.([]*model.UserChannelIDPair)
assert.Len(t, userChannelIDs, 1)
assert.Equal(t, user.Id, userChannelIDs[0].UserID)
assert.Equal(t, channel.Id, userChannelIDs[0].ChannelID)
// Time after syncable was created
res = <-ss.Group().PendingAutoAddChannelMembers(syncable.CreateAt + 1)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// Delete and restore GroupMember should return result
res = <-ss.Group().DeleteMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(syncable.CreateAt + 1)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
pristineSyncable := *syncable
syncable.CanLeave = false
res = <-ss.Group().UpdateGroupSyncable(syncable)
assert.Nil(t, res.Err)
// Time before syncable was updated
res = <-ss.Group().PendingAutoAddChannelMembers(syncable.UpdateAt - 1)
assert.Nil(t, res.Err)
userChannelIDs = res.Data.([]*model.UserChannelIDPair)
assert.Len(t, userChannelIDs, 1)
assert.Equal(t, user.Id, userChannelIDs[0].UserID)
assert.Equal(t, channel.Id, userChannelIDs[0].ChannelID)
// Time after syncable was updated
res = <-ss.Group().PendingAutoAddChannelMembers(syncable.UpdateAt + 1)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// Only includes if auto-add
syncable.AutoAdd = false
syncable.CanLeave = true // have to update this or the model isn't valid
res = <-ss.Group().UpdateGroupSyncable(syncable)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset state of syncable and verify
res = <-ss.Group().UpdateGroupSyncable(&pristineSyncable)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if Group deleted
res = <-ss.Group().Delete(group.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset state of group and verify
group.DeleteAt = 0
res = <-ss.Group().Update(group)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if Channel deleted
res = <-ss.Channel().Delete(channel.Id, model.GetMillis())
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset state of channel and verify
channel.DeleteAt = 0
res = <-ss.Channel().Update(channel)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if GroupChannel deleted
res = <-ss.Group().DeleteGroupSyncable(group.Id, channel.Id, model.GroupSyncableTypeChannel)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// reset GroupChannel and verify
res = <-ss.Group().UpdateGroupSyncable(&pristineSyncable)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// No result if GroupMember deleted
res = <-ss.Group().DeleteMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// restore group member and verify
res = <-ss.Group().CreateOrRestoreMember(group.Id, user.Id)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
// Adding Channel (ChannelMemberHistory) should stop returning result
res = <-ss.ChannelMemberHistory().LogJoinEvent(user.Id, channel.Id, model.GetMillis())
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// Leaving Channel (ChannelMemberHistory) should still not return result
res = <-ss.ChannelMemberHistory().LogLeaveEvent(user.Id, channel.Id, model.GetMillis())
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 0)
// Purging ChannelMemberHistory re-returns the result
res = <-ss.ChannelMemberHistory().PermanentDeleteBatch(model.GetMillis()+1, 100)
assert.Nil(t, res.Err)
res = <-ss.Group().PendingAutoAddChannelMembers(0)
assert.Nil(t, res.Err)
assert.Len(t, res.Data, 1)
} | store/storetest/group_supplier.go | 0.525856 | 0.558207 | group_supplier.go | starcoder |
package output
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/util/config"
"gopkg.in/yaml.v3"
)
//------------------------------------------------------------------------------
func sanitiseWithBatch(
componentConfig interface{},
batchConfig batch.PolicyConfig,
) (map[string]interface{}, error) {
batchSanit, err := batch.SanitisePolicyConfig(batchConfig)
if err != nil {
return nil, err
}
cBytes, err := yaml.Marshal(componentConfig)
if err != nil {
return nil, err
}
hashMap := map[string]interface{}{}
if err = yaml.Unmarshal(cBytes, &hashMap); err != nil {
return nil, err
}
hashMap["batching"] = batchSanit
return hashMap, nil
}
//------------------------------------------------------------------------------
// DocsBatches returns a documentation paragraph regarding outputs that support
// batching.
var DocsBatches = `
This output benefits from sending messages as a batch for improved performance.
Batches can be formed at both the input and output level. You can find out more
[in this doc](/docs/configuration/batching).`
// DocsAsync returns a documentation paragraph regarding outputs that support
// asynchronous sends.
var DocsAsync = `
This output benefits from sending multiple messages in flight in parallel for
improved performance. You can tune the max number of in flight messages with the
field ` + "`max_in_flight`" + `.`
var header = "This document was generated with `benthos --list-outputs`" + `
An output is a sink where we wish to send our consumed data after applying an
optional array of [processors](/docs/components/processors/about). Only one output is configured at
the root of a Benthos config. However, the output can be a [broker](/docs/components/outputs/broker)
which combines multiple outputs under a chosen brokering pattern.
An output config section looks like this:
` + "``` yaml" + `
output:
s3:
bucket: TODO
path: "${!meta(\"kafka_topic\")}/${!json(\"message.id\")}.json"
# Optional list of processing steps
processors:
- jmespath:
query: '{ message: @, meta: { link_count: length(links) } }'
` + "```" + `
### Back Pressure
Benthos outputs apply back pressure to components upstream. This means if your
output target starts blocking traffic Benthos will gracefully stop consuming
until the issue is resolved.
### Retries
When a Benthos output fails to send a message the error is propagated back up to
the input, where depending on the protocol it will either be pushed back to the
source as a Noack (e.g. AMQP) or will be reattempted indefinitely with the
commit withheld until success (e.g. Kafka).
It's possible to instead have Benthos indefinitely retry an output until success
with a [` + "`retry`" + `](/docs/components/outputs/retry) output. Some other outputs, such as the
[` + "`broker`" + `](/docs/components/outputs/broker), might also retry indefinitely depending on their
configuration.
### Multiplexing Outputs
It is possible to perform content based multiplexing of messages to specific
outputs either by using the ` + "[`switch`](/docs/components/outputs/switch)" + ` output, or a
` + "[`broker`](/docs/components/outputs/broker)" + ` with the ` + "`fan_out`" + ` pattern and a
[filter processor](/docs/components/processors/filter_parts) on each output, which
is a processor that drops messages if the condition does not pass.
Conditions are content aware logical operators that can be combined using
boolean logic.
For more information regarding conditions, including a full list of available
conditions please [read the docs here](/docs/components/conditions/about).
### Dead Letter Queues
It's possible to create fallback outputs for when an output target fails using
a ` + "[`try`](/docs/components/outputs/try)" + ` output.`
// Descriptions returns a formatted string of collated descriptions of each
// type.
func Descriptions() string {
// Order our output types alphabetically
names := []string{}
for name := range Constructors {
names = append(names, name)
}
sort.Strings(names)
buf := bytes.Buffer{}
buf.WriteString("Outputs\n")
buf.WriteString(strings.Repeat("=", 7))
buf.WriteString("\n\n")
buf.WriteString(header)
buf.WriteString("\n\n")
buf.WriteString("### Contents\n\n")
i := 0
for _, name := range names {
if Constructors[name].Deprecated {
continue
}
i++
buf.WriteString(fmt.Sprintf("%v. [`%v`](#%v)\n", i, name, name))
}
buf.WriteString("\n")
// Append each description
for i, name := range names {
def := Constructors[name]
if def.Deprecated {
continue
}
var confBytes []byte
conf := NewConfig()
conf.Type = name
if confSanit, err := conf.Sanitised(true); err == nil {
confBytes, _ = config.MarshalYAML(confSanit)
}
buf.WriteString("## ")
buf.WriteString("`" + name + "`")
buf.WriteString("\n")
if confBytes != nil {
buf.WriteString("\n``` yaml\n")
buf.Write(confBytes)
buf.WriteString("```\n")
}
buf.WriteString(def.Description)
hasPerformanced := false
performance := func() {
if !hasPerformanced {
buf.WriteString("\n\n### Performance\n")
hasPerformanced = true
} else {
buf.WriteString("\n")
}
}
if def.Async {
performance()
buf.WriteString(`
This output benefits from sending multiple messages in flight in parallel for
improved performance. You can tune the max number of in flight messages with the
field ` + "`max_in_flight`" + `.`)
}
if def.Batches {
performance()
buf.WriteString(`
This output benefits from sending messages as a batch for improved performance.
Batches can be formed at both the input and output level. You can find out more
[in this doc](/docs/configuration/batching).`)
}
buf.WriteString("\n")
if i != (len(names) - 1) {
buf.WriteString("\n---\n")
}
}
return buf.String()
}
//------------------------------------------------------------------------------ | lib/output/docs.go | 0.70477 | 0.712339 | docs.go | starcoder |
package interactors
import (
"fmt"
"math"
"github.com/CESARBR/knot-babeltower/pkg/thing/entities"
"github.com/go-playground/validator"
)
type schemaType struct {
valueType interface{}
unit interface{}
}
type interval struct {
min int
max int
}
// rules reference table: https://knot-devel.cesar.org.br/doc/thing/unit-type-value.html
var rules = map[int]schemaType{
0x0000: {valueType: interval{1, 7}, unit: 0}, // NONE
0x0001: {valueType: interval{1, 7}, unit: interval{1, 3}}, // VOLTAGE
0x0002: {valueType: interval{1, 7}, unit: interval{1, 2}}, // CURRENT
0x0003: {valueType: interval{1, 7}, unit: 1}, // RESISTENCE
0x0004: {valueType: interval{1, 7}, unit: interval{1, 3}}, // POWER
0x0005: {valueType: interval{1, 7}, unit: interval{1, 3}}, // TEMPERATURE
0x0006: {valueType: interval{1, 7}, unit: 1}, // RELATIVE_HUMIDITY
0x0007: {valueType: interval{1, 7}, unit: interval{1, 3}}, // LUMINOSITY
0x0008: {valueType: interval{1, 7}, unit: interval{1, 3}}, // TIME
0x0009: {valueType: interval{1, 7}, unit: interval{1, 4}}, // MASS
0x000A: {valueType: interval{1, 7}, unit: interval{1, 3}}, // PRESSURE
0x000B: {valueType: interval{1, 7}, unit: interval{1, 4}}, // DISTANCE
0x000C: {valueType: interval{1, 7}, unit: interval{1, 2}}, // ANGLE
0x000D: {valueType: interval{1, 7}, unit: interval{1, 4}}, // VOLUME
0x000E: {valueType: interval{1, 7}, unit: interval{1, 3}}, // AREA
0x000F: {valueType: interval{1, 7}, unit: 1}, // RAIN
0x0010: {valueType: interval{1, 7}, unit: 1}, // DENSITY
0x0011: {valueType: interval{1, 7}, unit: 1}, // LATITUDE
0x0012: {valueType: interval{1, 7}, unit: 1}, // LONGITUDE
0x0013: {valueType: interval{1, 7}, unit: interval{1, 4}}, // SPEED
0x0014: {valueType: interval{1, 7}, unit: interval{1, 6}}, // VOLUMEFLOW
0x0015: {valueType: interval{1, 7}, unit: interval{1, 6}}, // ENERGY
0xFFF0: {valueType: interval{1, 7}, unit: 0}, // PRESENCE
0xFFF1: {valueType: interval{1, 7}, unit: 0}, // SWITCH
0xFFF2: {valueType: interval{1, 7}, unit: 0}, // COMMAND
0xFF10: {valueType: interval{1, 7}, unit: 0}, // GENERIC
0xFFFF: {valueType: interval{1, 7}, unit: 0}, // INVALID
}
// UpdateConfig executes the use case to update thing's configuration
func (i *ThingInteractor) UpdateConfig(authorization, id string, configList []entities.Config) error {
if authorization == "" {
return ErrAuthNotProvided
}
if id == "" {
return ErrIDNotProvided
}
if configList == nil {
return ErrConfigNotProvided
}
err := i.validateConfig(authorization, id, configList)
if err != nil {
return fmt.Errorf("failed to validate if config is valid: %w", err)
}
err = i.thingProxy.UpdateConfig(authorization, id, configList)
if err != nil {
return err
}
return nil
}
func (i *ThingInteractor) validateConfig(authorization, id string, configList []entities.Config) error {
thing, err := i.thingProxy.Get(authorization, id)
if err != nil {
return fmt.Errorf("error getting thing metadata: %w", err)
}
err = validateSchemaExists(configList, thing.Config)
if err != nil {
return err
}
if !i.isValidSchema(configList) {
return ErrSchemaInvalid
}
configList = validateConfigIntegrity(configList, thing.Config)
err = validateFlagValue(configList)
if err != nil {
return err
}
return nil
}
func validateFlagValue(configList []entities.Config) error {
for _, c := range configList {
if c.Event.LowerThreshold != nil && !isValidValue(c.Event.LowerThreshold, c.Schema.ValueType) {
return ErrDataInvalid
}
if c.Event.UpperThreshold != nil && !isValidValue(c.Event.UpperThreshold, c.Schema.ValueType) {
return ErrDataInvalid
}
}
return nil
}
func isValidValue(value interface{}, valueType int) bool {
switch value := value.(type) {
case float64:
if value == math.Trunc(value) {
return ValidateSchemaNumber(value, valueType)
}
return valueType == 2
default:
return false
}
}
func validateSchemaExists(newConfigList []entities.Config, actualConfigList []entities.Config) error {
for _, c := range newConfigList {
if isAnExistentConfig(actualConfigList, c) {
return ErrSchemaNotProvided
}
}
return nil
}
func isAnExistentConfig(configList []entities.Config, config entities.Config) bool {
hasRegisteredConfig := false
for _, c := range configList {
if config.SensorID == c.SensorID {
hasRegisteredConfig = true
}
}
if !hasRegisteredConfig && isSchemaEmpty(config.Schema) {
return true
}
return false
}
func validateConfigIntegrity(newConfigList []entities.Config, actualConfigList []entities.Config) []entities.Config {
for index, c := range newConfigList {
for _, t := range actualConfigList {
var event *entities.Event = &c.Event
if c.SensorID == t.SensorID {
if isSchemaEmpty(c.Schema) {
newConfigList[index].Schema = t.Schema
}
if isEventEmpty(c.Event) {
newConfigList[index].Event = t.Event
}
if c.Schema.ValueType != t.Schema.ValueType && event != nil && t.Event.LowerThreshold != nil && t.Event.UpperThreshold != nil {
newConfigList[index].Event.LowerThreshold = nil
newConfigList[index].Event.UpperThreshold = nil
}
}
}
}
return newConfigList
}
func (i *ThingInteractor) isValidSchema(configList []entities.Config) bool {
validate := validator.New()
validate.RegisterStructValidation(schemaValidation, entities.Schema{})
for _, config := range configList {
if !isSchemaEmpty(config.Schema) {
err := validate.Struct(config.Schema)
if err != nil {
return false
}
}
}
return true
}
func schemaValidation(sl validator.StructLevel) {
fmt.Print("Running schemaValidation")
schema := sl.Current().Interface().(entities.Schema)
typeID := schema.TypeID
if (typeID < 0 || 15 < typeID) && (typeID < 0xfff0 || 0xfff2 < typeID) && typeID != 0xff10 {
sl.ReportError(schema, "schema", "Type ID", "typeID", "false")
return
}
fmt.Print("passed typeID")
if !isValidValueType(schema.TypeID, schema.ValueType) {
sl.ReportError(schema, "schema", "Value Type", "valueType", "false")
return
}
fmt.Print("passed valueType")
if !isValidUnit(schema.TypeID, schema.Unit) {
sl.ReportError(schema, "schema", "Unit", "unit", "false")
}
fmt.Print("passed unit")
}
func isValidValueType(typeID, valueType int) bool {
t := rules[typeID].valueType
if t == nil {
return false
}
switch v := t.(type) {
case int:
value := v
if valueType != value {
return false
}
case interval:
interval := t.(interval)
if valueType < interval.min || interval.max < valueType {
return false
}
}
return true
}
func isValidUnit(typeID, unit int) bool {
u := rules[typeID].unit
if u == nil {
return false
}
switch v := u.(type) {
case int:
value := v
if unit != value {
return false
}
case interval:
interval := u.(interval)
if unit < interval.min || interval.max < unit {
return false
}
}
return true
}
func isSchemaEmpty(schema entities.Schema) bool {
if schema.Name == "" && schema.TypeID == 0 && schema.Unit == 0 && schema.ValueType == 0 {
return true
}
return false
}
func isEventEmpty(event entities.Event) bool {
if !event.Change && event.TimeSec == 0 && event.LowerThreshold == nil && event.UpperThreshold == nil {
return true
}
return false
} | pkg/thing/interactors/update_config.go | 0.703753 | 0.477189 | update_config.go | starcoder |
package ent
import (
"fmt"
"strings"
"time"
"github.com/bionicstork/contrib/entproto/internal/todo/ent/nilexample"
"entgo.io/ent/dialect/sql"
)
// NilExample is the model entity for the NilExample schema.
type NilExample struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// StrNil holds the value of the "str_nil" field.
StrNil *string `json:"str_nil,omitempty"`
// TimeNil holds the value of the "time_nil" field.
TimeNil *time.Time `json:"time_nil,omitempty"`
}
// scanValues returns the types for scanning values from sql.Rows.
func (*NilExample) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case nilexample.FieldID:
values[i] = new(sql.NullInt64)
case nilexample.FieldStrNil:
values[i] = new(sql.NullString)
case nilexample.FieldTimeNil:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type NilExample", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the NilExample fields.
func (ne *NilExample) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case nilexample.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
ne.ID = int(value.Int64)
case nilexample.FieldStrNil:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field str_nil", values[i])
} else if value.Valid {
ne.StrNil = new(string)
*ne.StrNil = value.String
}
case nilexample.FieldTimeNil:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field time_nil", values[i])
} else if value.Valid {
ne.TimeNil = new(time.Time)
*ne.TimeNil = value.Time
}
}
}
return nil
}
// Update returns a builder for updating this NilExample.
// Note that you need to call NilExample.Unwrap() before calling this method if this NilExample
// was returned from a transaction, and the transaction was committed or rolled back.
func (ne *NilExample) Update() *NilExampleUpdateOne {
return (&NilExampleClient{config: ne.config}).UpdateOne(ne)
}
// Unwrap unwraps the NilExample entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (ne *NilExample) Unwrap() *NilExample {
tx, ok := ne.config.driver.(*txDriver)
if !ok {
panic("ent: NilExample is not a transactional entity")
}
ne.config.driver = tx.drv
return ne
}
// String implements the fmt.Stringer.
func (ne *NilExample) String() string {
var builder strings.Builder
builder.WriteString("NilExample(")
builder.WriteString(fmt.Sprintf("id=%v", ne.ID))
if v := ne.StrNil; v != nil {
builder.WriteString(", str_nil=")
builder.WriteString(*v)
}
if v := ne.TimeNil; v != nil {
builder.WriteString(", time_nil=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteByte(')')
return builder.String()
}
// NilExamples is a parsable slice of NilExample.
type NilExamples []*NilExample
func (ne NilExamples) config(cfg config) {
for _i := range ne {
ne[_i].config = cfg
}
} | entproto/internal/todo/ent/nilexample.go | 0.698432 | 0.4184 | nilexample.go | starcoder |
package pool
import (
"github.com/xichen2020/eventdb/x/refcnt"
)
// *BucketizedInt64ArrayPool is a generic bucketized value array pool.
// RefCountedPooledInt64Array is a refcounted, pooled generic value array.
type RefCountedPooledInt64Array struct {
closed bool
cnt *refcnt.RefCounter
p *BucketizedInt64ArrayPool
vals []int64
valuesResetFn func(values []int64)
}
// NewRefCountedPooledInt64Array creates a new refcounted, pooled generic value array.
func NewRefCountedPooledInt64Array(
vals []int64,
p *BucketizedInt64ArrayPool,
resetFn func(values []int64),
) *RefCountedPooledInt64Array {
return &RefCountedPooledInt64Array{
cnt: refcnt.NewRefCounter(),
p: p,
vals: vals,
valuesResetFn: resetFn,
}
}
// Get returns the underlying raw value array.
func (rv *RefCountedPooledInt64Array) Get() []int64 { return rv.vals }
// Snapshot takes a snapshot of the current values in the refcounted array.
// The returned snapshot shares the backing array with the source array but
// keeps a copy of the array slice as the snapshot. As a result, new values
// appended to the end of the array after the snapshot is taken is invisible
// to the snapshot.
func (rv *RefCountedPooledInt64Array) Snapshot() *RefCountedPooledInt64Array {
rv.cnt.IncRef()
return &RefCountedPooledInt64Array{
cnt: rv.cnt,
p: rv.p,
vals: rv.vals,
}
}
// Append appends a value to the value array.
func (rv *RefCountedPooledInt64Array) Append(v int64) {
if len(rv.vals) < cap(rv.vals) {
rv.vals = append(rv.vals, v)
return
}
newVals := rv.p.Get(cap(rv.vals) * 2)
n := copy(newVals[:len(rv.vals)], rv.vals)
newVals = newVals[:n]
newVals = append(newVals, v)
rv.tryRelease()
rv.cnt = refcnt.NewRefCounter()
rv.vals = newVals
}
// Close closes the ref counted array.
func (rv *RefCountedPooledInt64Array) Close() {
if rv.closed {
return
}
rv.closed = true
rv.tryRelease()
}
func (rv *RefCountedPooledInt64Array) tryRelease() {
if rv.cnt.DecRef() > 0 {
return
}
if rv.valuesResetFn != nil {
rv.valuesResetFn(rv.vals)
}
rv.vals = rv.vals[:0]
rv.p.Put(rv.vals, cap(rv.vals))
rv.vals = nil
rv.cnt = nil
} | x/pool/ref_counted_pooled_int64_array.gen.go | 0.612194 | 0.460168 | ref_counted_pooled_int64_array.gen.go | starcoder |
package latticevector
import (
"errors"
"fmt"
"math"
"wallpaper/entities/utility"
)
// PairMarshal can be marshaled and converted to a Pair
type PairMarshal struct {
XLatticeVector *utility.ComplexNumberForMarshal `json:"x_lattice_vector" yaml:"x_lattice_vector"`
YLatticeVector *utility.ComplexNumberForMarshal `json:"y_lattice_vector" yaml:"y_lattice_vector"`
}
// Pair defines the shape of the wallpaper lattice.
type Pair struct {
XLatticeVector complex128
YLatticeVector complex128
}
func vectorIsZero(vector complex128) bool {
return real(vector) == 0 && imag(vector) == 0
}
// vectorsAreCollinear returns true if both vectors are perfectly lined up
func vectorsAreCollinear(vector1 complex128, vector2 complex128) bool {
absoluteValueDotProduct := math.Abs((real(vector1) * real(vector2)) + (imag(vector1) * imag(vector2)))
lengthOfVector1 := math.Sqrt((real(vector1) * real(vector1)) + (imag(vector1) * imag(vector1)))
lengthOfVector2 := math.Sqrt((real(vector2) * real(vector2)) + (imag(vector2) * imag(vector2)))
tolerance := 1e-8
return math.Abs(absoluteValueDotProduct - (lengthOfVector1 * lengthOfVector2)) < tolerance
}
// Validate returns an error if this is an invalid formula.
func(lattice *Pair)Validate() error {
if vectorIsZero(lattice.XLatticeVector) || vectorIsZero(lattice.YLatticeVector) {
return errors.New(`lattice vectors cannot be (0,0)`)
}
if vectorsAreCollinear(lattice.XLatticeVector, lattice.YLatticeVector) {
return fmt.Errorf(
`vectors cannot be collinear: (%f,%f) and \(%f,%f)`,
real(lattice.XLatticeVector),
imag(lattice.XLatticeVector),
real(lattice.YLatticeVector),
imag(lattice.YLatticeVector),
)
}
return nil
}
// ConvertToLatticeCoordinates converts a point from cartesian coordinates to the lattice coordinates
func (lattice *Pair) ConvertToLatticeCoordinates(cartesianPoint complex128) complex128 {
vector1 := lattice.XLatticeVector
vector2 := lattice.YLatticeVector
swapVectorsDuringCalculation := real(vector1) < 1e-6
if swapVectorsDuringCalculation == true {
vector1 = lattice.YLatticeVector
vector2 = lattice.XLatticeVector
}
scalarForVector2Numerator := (real(vector1) * imag(cartesianPoint)) - (imag(vector1) * real(cartesianPoint))
scalarForVector2Denominator := (real(vector1) * imag(vector2)) - (imag(vector1) * real(vector2))
scalarForVector2 := scalarForVector2Numerator / scalarForVector2Denominator
scalarForVector1Numerator := real(cartesianPoint) - (scalarForVector2 * real(vector2))
scalarForVector1Denominator := real(vector1)
scalarForVector1 := scalarForVector1Numerator / scalarForVector1Denominator
if swapVectorsDuringCalculation {
return complex(scalarForVector2, scalarForVector1)
}
return complex(scalarForVector1, scalarForVector2)
} | entities/formula/latticevector/latticeVector.go | 0.842475 | 0.556641 | latticeVector.go | starcoder |
package nlp
import (
"math/rand"
"github.com/james-bowman/sparse"
"gonum.org/v1/gonum/mat"
)
// SimHash implements the SimHash Locality Sensitive Hashing (LSH) algorithm
// using sign random projections (<NAME>, https://www.cs.princeton.edu/courses/archive/spr04/cos598B/bib/CharikarEstim.pdf)
// The distance between the original vectors is preserved through the hashing process such that
// hashed vectors can be compared using Hamming Similarity for a faster, more space
// efficient, approximation of Cosine Similarity for the original vectors.
type SimHash struct {
hyperplanes []*mat.VecDense
}
// NewSimHash constructs a new SimHash creating a set of locality sensitive
// hash functions which are combined to accept input vectors of length dim
// and produce hashed binary vector fingerprints of length bits. This method
// creates a series of random hyperplanes which are then compared to each
// input vector to produce the output hashed binary vector encoding the input
// vector's location in vector space relative to the hyperplanes. Each bit in
// the output vector corresponds to the sign (1/0 for +/-) of the result of
// the dot product comparison with each random hyperplane.
func NewSimHash(bits int, dim int) *SimHash {
// Generate random hyperplanes
hyperplanes := make([]*mat.VecDense, bits)
for j := 0; j < bits; j++ {
p := make([]float64, dim)
for i := 0; i < dim; i++ {
p[i] = rand.NormFloat64()
}
hyperplanes[j] = mat.NewVecDense(dim, p)
}
return &SimHash{hyperplanes: hyperplanes}
}
// Hash accepts a Vector and outputs a BinaryVec (which also implements the
// Gonum Vector interface). This method will panic if the input vector is of a
// different length than the dim parameter used when constructing the SimHash.
func (h *SimHash) Hash(v mat.Vector) *sparse.BinaryVec {
bits := len(h.hyperplanes)
dim := h.hyperplanes[0].Len()
if dim != v.Len() {
panic("The supplied vector has a different number of dimensions from the projected hyperplanes")
}
sig := sparse.NewBinaryVec(bits)
for i := 0; i < bits; i++ {
if sparse.Dot(v, h.hyperplanes[i]) >= 0 {
sig.SetBit(i)
}
}
return sig
} | hashing.go | 0.836488 | 0.619284 | hashing.go | starcoder |
package cryptypes
import "database/sql/driver"
// EncryptedString supports encrypting String data
type EncryptedString struct {
Field
Raw string
}
// Scan converts the value from the DB into a usable EncryptedString value
func (s *EncryptedString) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedString value into a value that can safely be stored in the DB
func (s EncryptedString) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedString supports encrypting nullable String data
type NullEncryptedString struct {
Field
Raw string
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedString value
func (s *NullEncryptedString) Scan(value interface{}) error {
if value == nil {
s.Raw = ""
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedString value into a value that can safely be stored in the DB
func (s NullEncryptedString) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedString supports signing String data
type SignedString struct {
Field
Raw string
Valid bool
}
// Scan converts the value from the DB into a usable SignedString value
func (s *SignedString) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedString value into a value that can safely be stored in the DB
func (s SignedString) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedString supports signing nullable String data
type NullSignedString struct {
Field
Raw string
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedString value
func (s *NullSignedString) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = ""
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedString value into a value that can safely be stored in the DB
func (s NullSignedString) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedString supports signing and encrypting String data
type SignedEncryptedString struct {
Field
Raw string
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedString value
func (s *SignedEncryptedString) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedString value into a value that can safely be stored in the DB
func (s SignedEncryptedString) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedString supports signing and encrypting nullable String data
type NullSignedEncryptedString struct {
Field
Raw string
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedString value
func (s *NullSignedEncryptedString) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = ""
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedString value into a value that can safely be stored in the DB
func (s NullSignedEncryptedString) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_string.go | 0.817793 | 0.469399 | type_string.go | starcoder |
package exp
// Predicate represents a single true/false comparison
type Predicate struct {
Field string
Operator string
Value interface{}
}
// New returns a fully populated Predicate
func New(field string, operator string, value interface{}) Predicate {
return Predicate{
Field: field,
Operator: operator,
Value: value,
}
}
// Equal creates a new Predicate using an "Equals" comparison
func Equal(field string, value interface{}) Predicate {
return New(field, OperatorEqual, value)
}
// NotEqual creates a new Predicate using an "Not Equals" comparison
func NotEqual(field string, value interface{}) Predicate {
return New(field, OperatorNotEqual, value)
}
// LessThan creates a new Predicate using an "Less Than" comparison
func LessThan(field string, value interface{}) Predicate {
return New(field, OperatorLessThan, value)
}
// LessOrEqual creates a new Predicate using an "Less Or Equal" comparison
func LessOrEqual(field string, value interface{}) Predicate {
return New(field, OperatorLessOrEqual, value)
}
// GreaterThan creates a new Predicate using an "Greater Than" comparison
func GreaterThan(field string, value interface{}) Predicate {
return New(field, OperatorGreaterThan, value)
}
// GreaterOrEqual creates a new Predicate using an "Greater Or Equal" comparison
func GreaterOrEqual(field string, value interface{}) Predicate {
return New(field, OperatorGreaterOrEqual, value)
}
// Contains creates a new Predicate using an "Contains" comparison
func Contains(field string, value interface{}) Predicate {
return New(field, OperatorContains, value)
}
// ContainedBy creates a new Predicate using an "ContainedBy" comparison
func ContainedBy(field string, value interface{}) Predicate {
return New(field, OperatorContainedBy, value)
}
// BeginsWith creates a new Predicate using an "BeginsWith" comparison
func BeginsWith(field string, value interface{}) Predicate {
return New(field, OperatorBeginsWith, value)
}
// EndsWith creates a new Predicate using an "EndsWith" comparison
func EndsWith(field string, value interface{}) Predicate {
return New(field, OperatorEndsWith, value)
}
// And combines this predicate with another pre-existing expression into a new And expression
func (predicate Predicate) And(exp Expression) Expression {
// Skip EmptyExpressions
if _, ok := exp.(EmptyExpression); ok {
return predicate
}
return AndExpression{predicate, exp}
}
// AndEqual combines this predicate with another one (created from the arguments) into an AndExpression
func (predicate Predicate) AndEqual(name string, value interface{}) Expression {
return predicate.And(New(name, OperatorEqual, value))
}
// AndNotEqual combines this predicate with another one (created from the arguments) into an Expression
func (predicate Predicate) AndNotEqual(name string, value interface{}) Expression {
return predicate.And(New(name, OperatorNotEqual, value))
}
// AndLessThan combines this predicate with another one (created from the arguments) into an Expression
func (predicate Predicate) AndLessThan(name string, value interface{}) Expression {
return predicate.And(New(name, OperatorLessThan, value))
}
// AndLessOrEqual combines this predicate with another one (created from the arguments) into an Expression
func (predicate Predicate) AndLessOrEqual(name string, value interface{}) Expression {
return predicate.And(New(name, OperatorLessOrEqual, value))
}
// AndGreaterThan combines this predicate with another one (created from the arguments) into an Expression
func (predicate Predicate) AndGreaterThan(name string, value interface{}) Expression {
return predicate.And(New(name, OperatorGreaterThan, value))
}
// AndGreaterOrEqual combines this predicate with another one (created from the arguments) into an Expression
func (predicate Predicate) AndGreaterOrEqual(name string, value interface{}) Expression {
return predicate.And(New(name, OperatorGreaterOrEqual, value))
}
// Or combines this predicate with another pre-existing expression into a new Or expression
func (predicate Predicate) Or(exp Expression) Expression {
// Skip EmptyExpressions
if _, ok := exp.(EmptyExpression); ok {
return predicate
}
return OrExpression{predicate, exp}
}
// Match implements the Expression interface. It uses a MatcherFunc to determine if this predicate matches an arbitrary dataset.
func (predicate Predicate) Match(fn MatcherFunc) bool {
return fn(predicate)
} | predicate.go | 0.917511 | 0.587381 | predicate.go | starcoder |
package agent
import (
"encoding/json"
"fmt"
"math"
"math/rand"
tpo "github.com/stellentus/cartpoles/lib/util/type-opr"
"github.com/stellentus/cartpoles/lib/logger"
"github.com/stellentus/cartpoles/lib/rlglue"
"github.com/stellentus/cartpoles/lib/util"
)
const (
maxFeatureAcrobot1 = 1.0
maxFeatureAcrobot2 = 1.0
maxFeatureAcrobot3 = 1.0
maxFeatureAcrobot4 = 1.0
maxFeatureAcrobot5 = 4.0 * math.Pi
maxFeatureAcrobot6 = 9.0 * math.Pi
)
type esarsaAcrobotSettings struct {
EnableDebug bool `json:"enable-debug"`
Seed int64 `json:"seed"`
NumTilings int `json:"tilings"`
NumTiles int `json:"tiles"`
Gamma float64 `json:"gamma"`
Lambda float64 `json:"lambda"`
Epsilon float64 `json:"epsilon"`
Alpha float64 `json:"alpha"`
AdaptiveAlpha float64 `json:"adaptive-alpha"`
IsStepsizeAdaptive bool `json:"is-stepsize-adaptive"`
}
// Expected sarsa-lambda with tile coding
type ESarsaAcrobot struct {
logger.Debug
rng *rand.Rand
tiler util.MultiTiler
// Agent accessible parameters
weights [][]float64 // weights is a slice of weights for each action
traces [][]float64
delta float64
oldStateActiveFeatures []int
oldAction rlglue.Action
stepsize float64
beta1 float64
beta2 float64
e float64
m [][]float64
v [][]float64
timesteps float64
accumulatingbeta1 float64
accumulatingbeta2 float64
esarsaAcrobotSettings
}
func init() {
Add("esarsa_acrobot", NewESarsaAcrobot)
}
func NewESarsaAcrobot(logger logger.Debug) (rlglue.Agent, error) {
return &ESarsaAcrobot{Debug: logger}, nil
}
// Initialize configures the agent with the provided parameters and resets any internal state.
func (agent *ESarsaAcrobot) Initialize(run uint, expAttr, envAttr rlglue.Attributes) error {
agent.esarsaAcrobotSettings = esarsaAcrobotSettings{
// These default settings will be used if the config doesn't set these values
NumTilings: 32,
NumTiles: 4,
Gamma: 0.99,
Lambda: 0.8,
Epsilon: 0.05,
Alpha: 0.1,
AdaptiveAlpha: 0.001,
IsStepsizeAdaptive: false,
}
err := json.Unmarshal(expAttr, &agent.esarsaAcrobotSettings)
if err != nil {
agent.Message("warning", "agent.ESarsa settings weren't available: "+err.Error())
agent.esarsaAcrobotSettings.Seed = 0
}
if agent.IsStepsizeAdaptive == false {
agent.stepsize = agent.Alpha / float64(agent.esarsaAcrobotSettings.NumTilings) // Setting stepsize
} else {
agent.stepsize = agent.AdaptiveAlpha / float64(agent.esarsaAcrobotSettings.NumTilings) // Setting adaptive stepsize
}
agent.beta1 = 0.9
agent.beta2 = 0.999
agent.e = math.Pow(10, -8)
agent.esarsaAcrobotSettings.Seed += int64(run)
agent.rng = rand.New(rand.NewSource(agent.esarsaAcrobotSettings.Seed)) // Create a new rand source for reproducibility
// scales the input observations for tile-coding
scalers := []util.Scaler{
util.NewScaler(-maxFeatureAcrobot1, maxFeatureAcrobot1, agent.esarsaAcrobotSettings.NumTiles),
util.NewScaler(-maxFeatureAcrobot2, maxFeatureAcrobot2, agent.esarsaAcrobotSettings.NumTiles),
util.NewScaler(-maxFeatureAcrobot3, maxFeatureAcrobot3, agent.esarsaAcrobotSettings.NumTiles),
util.NewScaler(-maxFeatureAcrobot4, maxFeatureAcrobot4, agent.esarsaAcrobotSettings.NumTiles),
util.NewScaler(-maxFeatureAcrobot5, maxFeatureAcrobot5, agent.esarsaAcrobotSettings.NumTiles),
util.NewScaler(-maxFeatureAcrobot6, maxFeatureAcrobot6, agent.esarsaAcrobotSettings.NumTiles),
}
agent.tiler, err = util.NewMultiTiler(6, agent.esarsaAcrobotSettings.NumTilings, scalers)
if err != nil {
return err
}
agent.weights = make([][]float64, 3) // one weight slice for each action
agent.weights[0] = make([]float64, agent.tiler.NumberOfIndices())
agent.weights[1] = make([]float64, agent.tiler.NumberOfIndices())
agent.weights[2] = make([]float64, agent.tiler.NumberOfIndices())
agent.traces = make([][]float64, 3) // one trace slice for each action
agent.traces[0] = make([]float64, agent.tiler.NumberOfIndices())
agent.traces[1] = make([]float64, agent.tiler.NumberOfIndices())
agent.traces[2] = make([]float64, agent.tiler.NumberOfIndices())
agent.m = make([][]float64, 3)
agent.m[0] = make([]float64, agent.tiler.NumberOfIndices())
agent.m[1] = make([]float64, agent.tiler.NumberOfIndices())
agent.m[2] = make([]float64, agent.tiler.NumberOfIndices())
agent.v = make([][]float64, 3)
agent.v[0] = make([]float64, agent.tiler.NumberOfIndices())
agent.v[1] = make([]float64, agent.tiler.NumberOfIndices())
agent.v[2] = make([]float64, agent.tiler.NumberOfIndices())
agent.timesteps = 0
agent.Message("esarsa acrobot settings", fmt.Sprintf("%+v", agent.esarsaAcrobotSettings))
return nil
}
// Start provides an initial observation to the agent and returns the agent's action.
func (agent *ESarsaAcrobot) Start(state rlglue.State) rlglue.Action {
var err error
agent.oldStateActiveFeatures, err = agent.tiler.Tile(state) // Indices of active features of the tile-coded state
if err != nil {
agent.Message("err", "agent.ESarsa is acting on garbage state because it couldn't create tiles: "+err.Error())
}
oldA, _ := agent.PolicyExpectedSarsaLambda(agent.oldStateActiveFeatures) // Exp-Sarsa-L policy
agent.oldAction, _ = tpo.GetInt(oldA)
agent.timesteps++
if agent.EnableDebug {
agent.Message("msg", "start")
}
return agent.oldAction
}
// Step provides a new observation and a reward to the agent and returns the agent's next action.
func (agent *ESarsaAcrobot) Step(state rlglue.State, reward float64) rlglue.Action {
newStateActiveFeatures, err := agent.tiler.Tile(state) // Indices of active features of the tile-coded state
if err != nil {
agent.Message("err", "agent.ESarsa is acting on garbage state because it couldn't create tiles: "+err.Error())
}
agent.delta = reward // TD error calculation begins
for _, value := range agent.oldStateActiveFeatures {
oldA, _ := tpo.GetInt(agent.oldAction)
agent.delta -= agent.weights[oldA][value] // TD error prediction calculation
agent.traces[oldA][value] = 1 // replacing active traces to 1
}
newAction, epsilons := agent.PolicyExpectedSarsaLambda(newStateActiveFeatures) // Exp-Sarsa-L policy
for j := range agent.weights {
for _, value := range newStateActiveFeatures {
agent.delta += agent.Gamma * epsilons[j] * agent.weights[j][value] // TD error target calculation
}
}
var g float64
var mhat float64
var vhat float64
// update for both actions for weights and traces
for j := range agent.weights {
for i := range agent.weights[j] {
if agent.traces[j][i] != 0 { // update only where traces are non-zero
if agent.IsStepsizeAdaptive == false {
agent.weights[j][i] += agent.stepsize * agent.delta * agent.traces[j][i] // Semi-gradient descent, update weights
} else {
g = -agent.delta * agent.traces[j][i]
agent.m[j][i] = agent.beta1*agent.m[j][i] + (1-agent.beta1)*g
agent.v[j][i] = agent.beta1*agent.v[j][i] + (1-agent.beta1)*g*g
mhat = agent.m[j][i] / (1 - math.Pow(agent.beta1, agent.timesteps))
vhat = agent.v[j][i] / (1 - math.Pow(agent.beta2, agent.timesteps))
agent.weights[j][i] -= agent.stepsize * mhat / (math.Pow(vhat, 0.5) + agent.e)
}
agent.traces[j][i] = agent.Gamma * agent.Lambda * agent.traces[j][i] // update traces
}
}
}
// New information is old for the next time step
agent.oldStateActiveFeatures = newStateActiveFeatures
agent.oldAction = newAction
if agent.EnableDebug {
agent.Message("msg", "step", "state", state, "reward", reward, "action", agent.oldAction)
}
agent.timesteps++
return agent.oldAction
}
// End informs the agent that a terminal state has been reached, providing the final reward.
func (agent *ESarsaAcrobot) End(state rlglue.State, reward float64) {
agent.Step(state, reward)
agent.traces = make([][]float64, 3) // one trace slice for each action
agent.traces[0] = make([]float64, agent.tiler.NumberOfIndices())
agent.traces[1] = make([]float64, agent.tiler.NumberOfIndices())
agent.traces[2] = make([]float64, agent.tiler.NumberOfIndices())
if agent.EnableDebug {
agent.Message("msg", "end", "state", state, "reward", reward)
}
}
// PolicyExpectedSarsaLambda returns action based on tile coded state
func (agent *ESarsaAcrobot) PolicyExpectedSarsaLambda(tileCodedStateActiveFeatures []int) (rlglue.Action, []float64) {
// Calculates action values
actionValue0 := agent.ActionValue(tileCodedStateActiveFeatures, 0)
actionValue1 := agent.ActionValue(tileCodedStateActiveFeatures, 1)
actionValue2 := agent.ActionValue(tileCodedStateActiveFeatures, 2)
greedyAction := agent.findArgmax([]float64{actionValue0, actionValue1, actionValue2})
// Calculates Epsilon-greedy probabilities for both actions
probs := make([]float64, 3) // Probabilities of taking actions 0 and 1
for i := range probs {
probs[i] = agent.Epsilon / 3
}
probs[greedyAction] = 1 - agent.Epsilon + agent.Epsilon/3
// Random sampling action based on epsilon-greedy policy
var action rlglue.Action
var randomval float64
randomval = agent.rng.Float64()
if randomval <= probs[0] {
action = 0
} else if randomval > probs[0] && randomval <= probs[0]+probs[1] {
action = 1
} else {
action = 2
}
return action, probs
}
// ActionValue returns action value for a tile coded state and action pair
func (agent *ESarsaAcrobot) ActionValue(tileCodedStateActiveFeatures []int, action rlglue.Action) float64 {
var actionValue float64
// Calculates action value as linear function (dot product) between weights and binary featured state
for _, value := range tileCodedStateActiveFeatures {
a, _ := tpo.GetInt(action)
actionValue += agent.weights[a][value]
}
return actionValue
}
func (agent *ESarsaAcrobot) GetLock() bool {
return false
}
func (agent *ESarsaAcrobot) findArgmax(array []float64) int {
max := array[0]
argmax := 0
for i, value := range array {
if value > max {
max = value
argmax = i
}
}
return argmax
}
func (agent *ESarsaAcrobot) SaveWeights(basePath string) error {
return nil
}
func (agent *ESarsaAcrobot) GetLearnProg() string {
return "0"
}
func (agent *ESarsaAcrobot) PassInfo(info string, value float64) interface{} {
return nil
} | lib/agent/esarsa_acrobot.go | 0.722625 | 0.428771 | esarsa_acrobot.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.