code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package tsm1
import (
"github.com/influxdata/influxdb/tsdb"
)
// ReadFloatBlockAt returns the float values corresponding to the given index entry.
func (t *TSMReader) ReadFloatBlockAt(entry *IndexEntry, vals *[]FloatValue) ([]FloatValue, error) {
t.mu.RLock()
v, err := t.accessor.readFloatBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadFloatArrayBlockAt fills vals with the float values corresponding to the given index entry.
func (t *TSMReader) ReadFloatArrayBlockAt(entry *IndexEntry, vals *tsdb.FloatArray) error {
t.mu.RLock()
err := t.accessor.readFloatArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadIntegerBlockAt returns the integer values corresponding to the given index entry.
func (t *TSMReader) ReadIntegerBlockAt(entry *IndexEntry, vals *[]IntegerValue) ([]IntegerValue, error) {
t.mu.RLock()
v, err := t.accessor.readIntegerBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadIntegerArrayBlockAt fills vals with the integer values corresponding to the given index entry.
func (t *TSMReader) ReadIntegerArrayBlockAt(entry *IndexEntry, vals *tsdb.IntegerArray) error {
t.mu.RLock()
err := t.accessor.readIntegerArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadUnsignedBlockAt returns the unsigned values corresponding to the given index entry.
func (t *TSMReader) ReadUnsignedBlockAt(entry *IndexEntry, vals *[]UnsignedValue) ([]UnsignedValue, error) {
t.mu.RLock()
v, err := t.accessor.readUnsignedBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadUnsignedArrayBlockAt fills vals with the unsigned values corresponding to the given index entry.
func (t *TSMReader) ReadUnsignedArrayBlockAt(entry *IndexEntry, vals *tsdb.UnsignedArray) error {
t.mu.RLock()
err := t.accessor.readUnsignedArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadStringBlockAt returns the string values corresponding to the given index entry.
func (t *TSMReader) ReadStringBlockAt(entry *IndexEntry, vals *[]StringValue) ([]StringValue, error) {
t.mu.RLock()
v, err := t.accessor.readStringBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadStringArrayBlockAt fills vals with the string values corresponding to the given index entry.
func (t *TSMReader) ReadStringArrayBlockAt(entry *IndexEntry, vals *tsdb.StringArray) error {
t.mu.RLock()
err := t.accessor.readStringArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadBooleanBlockAt returns the boolean values corresponding to the given index entry.
func (t *TSMReader) ReadBooleanBlockAt(entry *IndexEntry, vals *[]BooleanValue) ([]BooleanValue, error) {
t.mu.RLock()
v, err := t.accessor.readBooleanBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadBooleanArrayBlockAt fills vals with the boolean values corresponding to the given index entry.
func (t *TSMReader) ReadBooleanArrayBlockAt(entry *IndexEntry, vals *tsdb.BooleanArray) error {
t.mu.RLock()
err := t.accessor.readBooleanArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
func (m *accessor) readFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error) {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeFloatBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *accessor) readFloatArrayBlock(entry *IndexEntry, values *tsdb.FloatArray) error {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeFloatArrayBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
return err
}
func (m *accessor) readIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error) {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeIntegerBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *accessor) readIntegerArrayBlock(entry *IndexEntry, values *tsdb.IntegerArray) error {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeIntegerArrayBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
return err
}
func (m *accessor) readUnsignedBlock(entry *IndexEntry, values *[]UnsignedValue) ([]UnsignedValue, error) {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeUnsignedBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *accessor) readUnsignedArrayBlock(entry *IndexEntry, values *tsdb.UnsignedArray) error {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeUnsignedArrayBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
return err
}
func (m *accessor) readStringBlock(entry *IndexEntry, values *[]StringValue) ([]StringValue, error) {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeStringBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *accessor) readStringArrayBlock(entry *IndexEntry, values *tsdb.StringArray) error {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeStringArrayBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
return err
}
func (m *accessor) readBooleanBlock(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error) {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeBooleanBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *accessor) readBooleanArrayBlock(entry *IndexEntry, values *tsdb.BooleanArray) error {
m.incAccess()
m.mu.RLock()
if int64(m.b.length()) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeBooleanArrayBlock(m.b.read(entry.Offset+4, entry.Offset+int64(entry.Size)), values)
m.mu.RUnlock()
return err
} | tsdb/engine/tsm1/reader.gen.go | 0.754463 | 0.469399 | reader.gen.go | starcoder |
package layers
import (
"encoding/binary"
"errors"
"external/google/gopacket"
)
// PPP is the layer for PPP encapsulation headers.
type PPP struct {
BaseLayer
PPPType PPPType
HasPPTPHeader bool
}
// PPPEndpoint is a singleton endpoint for PPP. Since there is no actual
// addressing for the two ends of a PPP connection, we use a singleton value
// named 'point' for each endpoint.
var PPPEndpoint = gopacket.NewEndpoint(EndpointPPP, nil)
// PPPFlow is a singleton flow for PPP. Since there is no actual addressing for
// the two ends of a PPP connection, we use a singleton value to represent the
// flow for all PPP connections.
var PPPFlow = gopacket.NewFlow(EndpointPPP, nil, nil)
// LayerType returns LayerTypePPP
func (p *PPP) LayerType() gopacket.LayerType { return LayerTypePPP }
// LinkFlow returns PPPFlow.
func (p *PPP) LinkFlow() gopacket.Flow { return PPPFlow }
func decodePPP(data []byte, p gopacket.PacketBuilder) error {
ppp := &PPP{}
offset := 0
if data[0] == 0xff && data[1] == 0x03 {
offset = 2
ppp.HasPPTPHeader = true
}
if data[offset]&0x1 == 0 {
if data[offset+1]&0x1 == 0 {
return errors.New("PPP has invalid type")
}
ppp.PPPType = PPPType(binary.BigEndian.Uint16(data[offset : offset+2]))
ppp.BaseLayer = BaseLayer{data[offset : offset+2], data[offset+2:]}
//ppp.Contents = data[offset : offset+2]
//ppp.Payload = data[offset+2:]
} else {
ppp.PPPType = PPPType(data[offset])
ppp.Contents = data[offset : offset+1]
ppp.Payload = data[offset+1:]
}
p.AddLayer(ppp)
p.SetLinkLayer(ppp)
return p.NextDecoder(ppp.PPPType)
}
// LCP describes layer for PPP Link Control Protocol
type LCP struct {
BaseLayer
Code LCPType
Identifier uint8
Length uint16
Options []LCPOption
MagicNumber []byte // applicable only LCP Echo Request/Reply
}
// LCPType describes PPP LCP layer Type
type LCPType uint8
// LayerType returns gopacket.LayerTypeLCP
func (p *LCP) LayerType() gopacket.LayerType {
return LayerTypeLCP
}
// set of supported PPP LCP Type
const (
LCPTypeConfigurationRequest LCPType = 0x01
LCPTypeConfigurationAck LCPType = 0x02
LCPTypeTerminateRequest LCPType = 0x05
LCPTypeTerminateAck LCPType = 0x06
LCPTypeEchoRequest LCPType = 0x09
LCPTypeEchoReply LCPType = 0x0a
)
// LCPOption describes zero or more optional information organized into PPP LCP layer
type LCPOption struct {
Type LCPOptionType
Length uint8
Value []uint8
}
// LCPOptionType is an enumeration of LCPOption type values, and acts as a decoder for any
// type it supports. Refeers to rfc1661 for details
type LCPOptionType uint8
// set of supported LCP Option Type
// RFC 1700
const (
LCPOptionTypeMaximumReceiveUnit LCPOptionType = 0x01
LCPOptionTypeAuthenticationProtocol LCPOptionType = 0x03
LCPOptionQualityProtocol LCPOptionType = 0x04 // https://www.freesoft.org/CIE/RFC/1661/33.htm
LCPOptionTypeMagicNumber LCPOptionType = 0x05
)
// GetLCPSize returns size in byte of LCP layer
func (p *LCP) GetLCPSize() uint16 {
ans := uint16(0)
switch p.Code {
case LCPTypeEchoRequest:
ans += 4 // only magic number
case LCPTypeEchoReply:
ans += 4 // only magic number
default:
for _, tag := range p.Options {
ans += 1 // tag type size
ans += 1 // tag length size
ans += uint16(len(tag.Value)) // tag value size
}
}
ans += 1 // code
ans += 1 // identifier
ans += 2 // length
return ans
}
func decodeLCP(data []byte, p gopacket.PacketBuilder) error {
lcp := &LCP{
Code: LCPType(data[0]),
Identifier: data[1],
Length: binary.BigEndian.Uint16(data[2:4]),
Options: []LCPOption{},
MagicNumber: []byte{},
}
switch lcp.Code {
case LCPTypeEchoRequest:
lcp.MagicNumber = data[4:] // only magic number
case LCPTypeEchoReply:
lcp.MagicNumber = data[4:] // only magic number
default:
// decode LCPOption
for byteOpts := lcp.Length - 4; byteOpts > 0; {
offset := uint8(lcp.Length - byteOpts)
optionLength := data[offset+1]
tmpLCPOption := &LCPOption{
Type: LCPOptionType(data[offset]),
Length: optionLength,
Value: data[2+offset : optionLength+offset],
}
byteOpts -= uint16(optionLength) // option entire length
lcp.Options = append(lcp.Options, *tmpLCPOption)
}
}
lcp.BaseLayer = BaseLayer{data[:], []uint8{}}
p.AddLayer(lcp)
return p.NextDecoder(lcp.Code)
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (p *PPP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
if p.PPPType&0x100 == 0 {
bytes, err := b.PrependBytes(2)
if err != nil {
return err
}
binary.BigEndian.PutUint16(bytes, uint16(p.PPPType))
} else {
bytes, err := b.PrependBytes(1)
if err != nil {
return err
}
bytes[0] = uint8(p.PPPType)
}
if p.HasPPTPHeader {
bytes, err := b.PrependBytes(2)
if err != nil {
return err
}
bytes[0] = 0xff
bytes[1] = 0x03
}
return nil
}
// SerializeTo for LCP layer
func (p *LCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(int(p.Length))
if err != nil {
return err
}
bytes[0] = uint8(p.Code)
bytes[1] = p.Identifier
binary.BigEndian.PutUint16(bytes[2:], p.Length)
switch p.Code {
case LCPTypeEchoRequest:
copy(bytes[4:], p.MagicNumber) // only magic number
case LCPTypeEchoReply:
copy(bytes[4:], p.MagicNumber) // only magic number
default:
offset := uint8(4)
for _, opt := range p.Options {
bytes[offset] = uint8(opt.Type)
bytes[offset+1] = opt.Length
copy(bytes[offset+2:], opt.Value)
offset += opt.Length
}
}
return nil
}
// PAP describes layer for Password Authentication Protocol
type PAP struct {
BaseLayer
Code PAPType
Identifier uint8
Length uint16
Data []PAPData
}
// PAPType describes PAP message type
type PAPType uint8
// LayerType returns gopacket.LayerTypePAP
func (p *PAP) LayerType() gopacket.LayerType {
return LayerTypePAP
}
// set of supported PAP message type
const (
PAPTypeAuthRequest PAPType = 0x01
PAPTypeAuthAck PAPType = 0x02
PAPTypeAuthNak PAPType = 0x03
)
// PAPData struct holds all possible data carried by PAP: Peer-ID, Password and Message
type PAPData struct {
Length uint8
Value []uint8
}
func (p *PAP) AddPeerIDAndPassword(peerID string, password string) {
if code := p.Code; code == PAPTypeAuthRequest {
p.Data = []PAPData{
{
Value: []byte(peerID),
Length: uint8(len(peerID)),
},
{
Value: []byte(password),
Length: uint8(len(password)),
},
}
} else {
panic("Current PAP message is NOT a Auth Request!")
}
}
// GetPAPSize returns size in byte of PAP layer
func (p *PAP) GetPAPSize() uint16 {
ans := uint16(0)
for _, data := range p.Data {
ans += 1 // data length field size
ans += uint16(data.Length) // tag length size
}
ans += 1 // code
ans += 1 // identifier
ans += 2 // pap length field size
return ans
}
func decodePAP(data []byte, p gopacket.PacketBuilder) error {
pap := &PAP{
Code: PAPType(data[0]),
Identifier: data[1],
Length: binary.BigEndian.Uint16(data[2:4]),
Data: []PAPData{},
}
// decode PAPData
nrDatas := 1
if pap.Code == PAPTypeAuthRequest {
nrDatas = 2
}
offset := uint8(4)
for index := 0; index < nrDatas; index++ {
papDataLength := data[offset]
tmpPAPData := &PAPData{
Length: papDataLength,
Value: data[1+offset : 1+offset+papDataLength],
}
offset += 1 + papDataLength
pap.Data = append(pap.Data, *tmpPAPData)
}
pap.BaseLayer = BaseLayer{data[:], []uint8{}}
p.AddLayer(pap)
return p.NextDecoder(pap.Code)
}
// SerializeTo for PAP layer
func (p *PAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(int(p.Length))
if err != nil {
return err
}
bytes[0] = uint8(p.Code)
bytes[1] = p.Identifier
binary.BigEndian.PutUint16(bytes[2:], p.Length)
offset := uint8(4)
for _, data := range p.Data {
bytes[offset] = data.Length
copy(bytes[offset+1:], data.Value)
offset += 1 + data.Length
}
return nil
}
// IPCP describes layer for Internet Protocol Control Protocol
type IPCP struct {
BaseLayer
Code IPCPType
Identifier uint8
Length uint16
Options []IPCPOption
}
// IPCPType describes PAP message type
type IPCPType uint8
// LayerType returns gopacket.LayerTypeIPCP
func (p *IPCP) LayerType() gopacket.LayerType {
return LayerTypeIPCP
}
// set of supported IPCP message type
const (
IPCPTypeConfigurationRequest IPCPType = 0x01
IPCPTypeConfigurationAck IPCPType = 0x02
IPCPTypeConfigurationNak IPCPType = 0x03
)
// IPCPOption struct holds all possible data carried by PAP: Peer-ID, Password and Message
type IPCPOption struct {
Type IPCPOptionType
Length uint8
Value []uint8
}
// IPCPOptionType describes IPCP Option type
type IPCPOptionType uint8
// set of supported IPCP Option type
const (
IPCPOptionTypeIPAddress IPCPOptionType = 0x03
)
func decodeIPCP(data []byte, p gopacket.PacketBuilder) error {
ipcp := &IPCP{
Code: IPCPType(data[0]),
Identifier: data[1],
Length: binary.BigEndian.Uint16(data[2:4]),
Options: []IPCPOption{},
}
// decode IPCPOption
for byteOpts := ipcp.Length - 4; byteOpts > 0; {
offset := uint8(ipcp.Length - byteOpts)
optionLength := uint8(data[offset+1])
tmpIPCPOption := &IPCPOption{
Type: IPCPOptionType(data[offset]),
Length: optionLength,
Value: data[2+offset : optionLength+offset],
}
byteOpts -= uint16(optionLength) // option entire length
ipcp.Options = append(ipcp.Options, *tmpIPCPOption)
}
ipcp.BaseLayer = BaseLayer{data[:], []uint8{}}
p.AddLayer(ipcp)
return p.NextDecoder(ipcp.Code)
}
// SerializeTo for IPCP layer
func (p *IPCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
bytes, err := b.PrependBytes(int(p.Length))
if err != nil {
return err
}
bytes[0] = uint8(p.Code)
bytes[1] = p.Identifier
binary.BigEndian.PutUint16(bytes[2:], p.Length)
offset := uint8(4)
for _, opt := range p.Options {
bytes[offset] = uint8(opt.Type)
bytes[offset+1] = opt.Length
copy(bytes[offset+2:], opt.Value)
offset += opt.Length
}
return nil
}
// GetIPCPSize returns size in byte of IPCP layer
func (p *IPCP) GetIPCPSize() uint16 {
ans := uint16(0)
for _, data := range p.Options {
ans += uint16(data.Length) // option length size
}
ans += 1 // code
ans += 1 // identifier
ans += 2 // ipcp length field size
return ans
}
// GetProposedIPAddress analyzes Options and returns proposed IP Address as []byte
func (p *IPCP) GetProposedIPAddress() []byte {
ans := make([]byte, 4)
if len(p.Options) == 0 {
panic("Handled IPCP packet does not carry IP Address to be acquired!")
} else {
for _, option := range p.Options {
if code := option.Type; code == IPCPOptionTypeIPAddress {
copy(ans, option.Value)
}
}
}
return ans
} | src/external/google/gopacket/layers/ppp.go | 0.682574 | 0.453443 | ppp.go | starcoder |
package farmer
const guide = `
Automated Testing Tool Guide
We practice "Continuous Integration" (CI), that is, we
automatically run a set of tests on every commit, before
we land it. We do this with an tool called testbot.
Testbot is oriented around pull requests. For any open
pull request, it runs tests on the commit at the head of
the pull request branch, and reports the results to
GitHub as "status" objects on that commit, which GitHub
then displays in its UI as a green dot or red X next to
the commit, as well as in more detail at the bottom of
the pull request page. When a new commit is pushed to
that branch, making the previous head commit obsolete,
testbot cancels any tests still running on obsolete
commits and starts running tests on the new head.
Quick Start
To add a new test, make a file called Testfile in the
directory where you want the test to run. The test will
run when any file changes anywhere in the tree rooted in
this directory. A Testfile looks like this:
# this is a Testfile
npmtest: npm test
gotest: go test
Testfile Format
A Testfile contains one-line entries. Each entry defines
a test. An entry is an alphanumeric test name followed
by a colon followed by a shell command. For example:
rubocop: bundle && bundle exec rubocop
gctrace: GODEBUG=gctrace=1 go test
Lines beginning with # are ignored. Blank lines are
ignored. Lines that don't fit this format are an error.
Finding Tests
Here's how testbot finds tests to run.
It looks for a file called Testfile in every directory
affected by the pull request, and runs all the tests in
all the Testfiles it finds.
What does it mean for a directory to be "affected" by
the pull request? Specifically, it looks at the pull
request diff (from the nearest common ancestor of the
base branch and the pull request branch to the head of
the pull request branch) to find a list of all files
added, removed, renamed, or modified. It then considers
any ancestor directory of any of these files to be
affected. So, in a pull request that deleted a/b/c,
renamed d/e to f/g, created h/i, and edited but did not
rename or move the file j/k, the set of affected
directories would be /, /a, /a/b, /d, /f, /h, and /j.
Note in particular that merely deleting a file from a
directory will run the tests in that directory.
Test Environment
Each test runs on a machine image derived from a stock
Ubuntu AMI, modified by $TESTED_REPO/testbot/Aptfile and
$TESTED_REPO/testbot/setup.sh.
The test runner runs each test in a controlled
environment:
- makes a fresh, clean checkout in a new workspace
- sets some environment variables
- runs the test command in the Testfile's directory
If the process exits with a 0 status, the test passes.
It collects output from the test process (by redirecting
both stdout and stderr to the same file on disk). When
the test finishes (either passing or failing), it saves
the output file in S3 and links to it from the "Details"
link on the pull request page.
Test Environment Nitty-Gritty
There is much to say here. Generally, no one needs to
worry about any of it.
Currently, the test process has minimal isolation. It's
run as an ordinary user (the same user as the test
runner!) in an ordinary directory (no chroot or pivot
root or mount namespace) with ordinary network access.
In the future, we might want to put the test in a more
aggressive sandbox, if only to make writing tests easier
(so test writers don't need to worry so much about
cleaning up after themselves).
The test runner starts each test process in a new Unix
process group. When the test process finishes, it sends
a KILL (9) signal to the process group to kill any child
processes started by the test. (But if the test starts
any of the child processes in a new process group, this
won't kill them, so they may linger even after the next
test has started. If this becomes a problem, we can fix
it by putting the test in a cgroup.)
` | farmer/guide.go | 0.544317 | 0.703753 | guide.go | starcoder |
package xgp
import (
"bytes"
"fmt"
"math/rand"
"strconv"
"strings"
"github.com/MaxHalford/eaopt"
"github.com/MaxHalford/xgp/metrics"
"github.com/MaxHalford/xgp/op"
)
// A GPConfig contains all the information needed to instantiate an GP.
type GPConfig struct {
// Learning parameters
LossMetric metrics.Metric
EvalMetric metrics.Metric
ParsimonyCoeff float64
PolishBest bool
// Function parameters
Funcs string
ConstMin float64
ConstMax float64
PConst float64
PFull float64
PLeaf float64
MinHeight uint
MaxHeight uint
// Genetic algorithm parameters
NPopulations uint
NIndividuals uint
NGenerations uint
PHoistMutation float64
PSubtreeMutation float64
PPointMutation float64
PointMutationRate float64
PSubtreeCrossover float64
// Other
RNG *rand.Rand
}
// String representation of a GPConfig. It returns a string containing the
// parameters line by line.
func (c GPConfig) String() string {
var (
buffer = new(bytes.Buffer)
parameters = [][]string{
[]string{"Loss metric", c.LossMetric.String()},
[]string{"Evaluation metric", c.EvalMetric.String()},
[]string{"Parsimony coefficient", strconv.FormatFloat(c.ParsimonyCoeff, 'g', -1, 64)},
[]string{"Polish best program", strconv.FormatBool(c.PolishBest)},
[]string{"Functions", c.Funcs},
[]string{"Constant minimum", strconv.FormatFloat(c.ConstMin, 'g', -1, 64)},
[]string{"Constant maximum", strconv.FormatFloat(c.ConstMax, 'g', -1, 64)},
[]string{"Constant probability", strconv.FormatFloat(c.PConst, 'g', -1, 64)},
[]string{"Full initialization probability", strconv.FormatFloat(c.PFull, 'g', -1, 64)},
[]string{"Terminal probability", strconv.FormatFloat(c.PLeaf, 'g', -1, 64)},
[]string{"Minimum height", strconv.Itoa(int(c.MinHeight))},
[]string{"Maximum height", strconv.Itoa(int(c.MaxHeight))},
[]string{"Number of populations", strconv.Itoa(int(c.NPopulations))},
[]string{"Number of individuals per population", strconv.Itoa(int(c.NIndividuals))},
[]string{"Number of generations", strconv.Itoa(int(c.NGenerations))},
[]string{"Hoist mutation probability", strconv.FormatFloat(c.PHoistMutation, 'g', -1, 64)},
[]string{"Subtree mutation probability", strconv.FormatFloat(c.PSubtreeMutation, 'g', -1, 64)},
[]string{"Point mutation probability", strconv.FormatFloat(c.PPointMutation, 'g', -1, 64)},
[]string{"Point mutation rate", strconv.FormatFloat(c.PointMutationRate, 'g', -1, 64)},
[]string{"Subtree crossover probability", strconv.FormatFloat(c.PSubtreeCrossover, 'g', -1, 64)},
}
)
for _, param := range parameters {
buffer.WriteString(fmt.Sprintf("%s: %s\n", param[0], param[1]))
}
return strings.Trim(buffer.String(), "\n")
}
// NewGP returns an GP from a GPConfig.
func (c GPConfig) NewGP() (*GP, error) {
// Default the evaluation metric to the fitness metric if it's nil
if c.EvalMetric == nil {
c.EvalMetric = c.LossMetric
}
// The convention is to use a fitness metric which has to be minimized
if c.LossMetric.BiggerIsBetter() {
c.LossMetric = metrics.Negative{Metric: c.LossMetric}
}
// Determine the functions to use
functions, err := op.ParseFuncs(c.Funcs, ",")
if err != nil {
return nil, err
}
// Instantiate an GP
var estimator = &GP{
GPConfig: c,
Functions: functions,
EvalMetric: c.EvalMetric,
LossMetric: c.LossMetric,
Initializer: RampedHaldAndHalfInit{
PFull: c.PFull,
FullInit: FullInit{},
GrowInit: GrowInit{
PLeaf: c.PLeaf,
},
},
}
// Set the initial GA
estimator.GA, err = eaopt.GAConfig{
NPops: c.NPopulations,
PopSize: c.NIndividuals,
NGenerations: c.NGenerations,
HofSize: 1,
Model: gaModel{
selector: eaopt.SelTournament{
NContestants: 3,
},
pMutate: c.PHoistMutation + c.PPointMutation + c.PSubtreeMutation,
pCrossover: c.PSubtreeCrossover,
},
RNG: c.RNG,
ParallelEval: true,
}.NewGA()
if err != nil {
return nil, err
}
// Build fm which maps arities to functions
estimator.fm = make(map[uint][]op.Operator)
for _, f := range estimator.Functions {
var arity = f.Arity()
if _, ok := estimator.fm[arity]; ok {
estimator.fm[arity] = append(estimator.fm[arity], f)
} else {
estimator.fm[arity] = []op.Operator{f}
}
}
// Set subtree crossover
estimator.SubtreeCrossover = SubtreeCrossover{
Weight: func(operator op.Operator, depth uint, rng *rand.Rand) float64 {
if operator.Arity() == 0 {
return 0.1 // MAGIC
}
return 0.9 // MAGIC
},
}
// Set point mutation
estimator.PointMutation = PointMutation{
Rate: c.PointMutationRate,
Mutate: func(operator op.Operator, rng *rand.Rand) op.Operator {
return estimator.mutateOperator(operator, rng)
},
}
// Set hoist mutation
estimator.HoistMutation = HoistMutation{
Weight1: func(operator op.Operator, depth uint, rng *rand.Rand) float64 {
if operator.Arity() == 0 {
return 0.1 // MAGIC
}
return 0.9 // MAGIC
},
Weight2: func(operator op.Operator, depth uint, rng *rand.Rand) float64 {
return 1 // MAGIC
},
}
// Set subtree mutation
estimator.SubtreeMutation = SubtreeMutation{
Weight: func(operator op.Operator, depth uint, rng *rand.Rand) float64 {
if operator.Arity() == 0 {
return 0.1 // MAGIC
}
return 0.9 // MAGIC
},
NewOperator: func(rng *rand.Rand) op.Operator {
return estimator.newOperator(rng)
},
}
return estimator, nil
}
// NewDefaultGPConfig returns a GPConfig with default values.
func NewDefaultGPConfig() GPConfig {
return GPConfig{
LossMetric: metrics.MSE{},
EvalMetric: nil,
ParsimonyCoeff: 0,
PolishBest: true,
Funcs: "add,sub,mul,div",
ConstMin: -5,
ConstMax: 5,
MinHeight: 3,
MaxHeight: 5,
PConst: 0.5,
PFull: 0.5,
PLeaf: 0.3,
NPopulations: 1,
NIndividuals: 100,
NGenerations: 30,
PHoistMutation: 0.1,
PPointMutation: 0.1,
PSubtreeMutation: 0.1,
PointMutationRate: 0.3,
PSubtreeCrossover: 0.5,
}
} | gp_config.go | 0.774413 | 0.433022 | gp_config.go | starcoder |
package maps
import (
"github.com/dairaga/gs"
"github.com/dairaga/gs/funcs"
"github.com/dairaga/gs/slices"
)
// Keys returns a slices of all keys.
func (m M[K, V]) Keys() slices.S[K] {
return Fold(
m,
make(slices.S[K], 0, len(m)),
func(z slices.S[K], k K, _ V) slices.S[K] {
return append(z, k)
},
)
}
// Values returns a slice of all values.
func (m M[K, V]) Values() slices.S[V] {
return Fold(
m,
make(slices.S[V], 0, len(m)),
func(z slices.S[V], _ K, v V) slices.S[V] {
return append(z, v)
},
)
}
// Add adds pairs into map.
func (m M[K, V]) Add(pairs ...Pair[K, V]) M[K, V] {
for _, p := range pairs {
m[p.Key] = p.Value
}
return m
}
// Put add key and value into map.
func (m M[K, V]) Put(key K, val V) M[K, V] {
m[key] = val
return m
}
// Merge merges another map a into this. Values in this maybe overwritten by values in a if keys are in a and m.
func (m M[K, V]) Merge(a M[K, V]) M[K, V] {
for k, v := range a {
m[k] = v
}
return m
}
// Contain returns true if m has given key x.
func (m M[K, V]) Contain(x K) (ok bool) {
_, ok = m[x]
return
}
// Count returns numbers of elements in m satisfying given function p.
func (m M[K, V]) Count(p func(K, V) bool) int {
return Fold(m, 0, func(a int, k K, v V) int {
return funcs.Cond(p(k, v), a+1, a)
})
}
// Find returns the first key-value pair of m satisfying given function p. It might return different results for different runs.
func (m M[K, V]) Find(p func(K, V) bool) gs.Option[Pair[K, V]] {
for k, v := range m {
if p(k, v) {
return gs.Some(P(k, v))
}
}
return gs.None[Pair[K, V]]()
}
// Exists return true if at least one element in m satisfies given function p.
func (m M[K, V]) Exists(p func(K, V) bool) bool {
for k, v := range m {
if p(k, v) {
return true
}
}
return false
}
// Filter returns a new map made of elements in m satisfying given function p.
func (m M[K, V]) Filter(p func(K, V) bool) M[K, V] {
return Fold(
m,
make(M[K, V]),
func(z M[K, V], k K, v V) M[K, V] {
if p(k, v) {
z[k] = v
}
return z
},
)
}
// Filter returns a new map made of elements in m not satisfying given function p.
func (m M[K, V]) FilterNot(p func(K, V) bool) M[K, V] {
return m.Filter(func(k K, v V) bool { return !p(k, v) })
}
// Forall returns true if this is a empty map or all elements satisfy given function p.
func (m M[K, V]) Forall(p func(K, V) bool) bool {
for k, v := range m {
if !p(k, v) {
return false
}
}
return true
}
// Foreach applies given function op to each element in this.
func (m M[K, V]) Foreach(op func(K, V)) {
for k, v := range m {
op(k, v)
}
}
// Partition partitions this into two maps according to given function p. The first map made of elements in m not satisfying the function p, and the second map made of elements satisfying the function p.
func (m M[K, V]) Partition(p func(K, V) bool) (_, _ M[K, V]) {
t2 := Fold(
m,
gs.T2(make(M[K, V]), make(M[K, V])),
func(
z gs.Tuple2[M[K, V], M[K, V]],
k K,
v V) gs.Tuple2[M[K, V], M[K, V]] {
if p(k, v) {
z.V2[k] = v
} else {
z.V1[k] = v
}
return z
},
)
return t2.V1, t2.V2
}
// Slice returns a slice containing key-value pairs from this.
func (m M[K, V]) Slice() slices.S[Pair[K, V]] {
return Fold(
m,
make(slices.S[Pair[K, V]], 0, len(m)),
func(z slices.S[Pair[K, V]], k K, v V) slices.S[Pair[K, V]] {
return append(z, P(k, v))
},
)
} | maps/map_method.go | 0.824108 | 0.462534 | map_method.go | starcoder |
package main
const appDescription = `parq is a tool for exploring parquet files.
parq helps with viewing data in a parquet file, viewing a
file's schema, and converting data to/from parquet files.
Read more here: https://github.com/a-poor/parq
Submit issues here: https://github.com/a-poor/parq/issues
`
const cmdSchemaDesc = `Prints a table showing a parquet file's column names and data types.
Expects FILENAME to be a valid path to a parquet file with at least
one row.
Example:
$ parq schema path/to/iris.parquet
Column Name Data Type
Sepal_length float64
Sepal_width float64
Petal_length float64
Petal_width float64
Species string
Related Commands: show, head, tail, random
`
const cmdShowDesc = `Prints the full data contained in the specified parquet file as a formatted table.
Expects FILENAME to be a valid path to a parquet file with at least
one row.
Example:
$ parq show path/to/iris.parquet
Sepal_length Sepal_width Petal_length Petal_width Species
0 5.1 3.5 1.4 0.2 setosa
1 4.9 3 1.4 0.2 setosa
2 4.7 3.2 1.3 0.2 setosa
3 4.6 3.1 1.5 0.2 setosa
4 5 3.6 1.4 0.2 setosa
5 5.4 3.9 1.7 0.4 setosa
...
145 6.7 3 5.2 2.3 virginica
146 6.3 2.5 5 1.9 virginica
147 6.5 3 5.2 2 virginica
148 6.2 3.4 5.4 2.3 virginica
149 5.9 3 5.1 1.8 virginica
Related Commands: schema, head, tail, random
`
const cmdHeadDesc = `Prints the first "n-rows" rows of data contained in the specified parquet file as a formatted table.
Expects FILENAME to be a valid path to a parquet file with at least
one row.
Example:
$ parq head path/to/iris.parquet
Sepal_length Sepal_width Petal_length Petal_width Species
0 5.1 3.5 1.4 0.2 setosa
1 4.9 3 1.4 0.2 setosa
2 4.7 3.2 1.3 0.2 setosa
3 4.6 3.1 1.5 0.2 setosa
4 5 3.6 1.4 0.2 setosa
5 5.4 3.9 1.7 0.4 setosa
6 4.6 3.4 1.4 0.3 setosa
7 5 3.4 1.5 0.2 setosa
8 4.4 2.9 1.4 0.2 setosa
9 4.9 3.1 1.5 0.1 setosa
Related Commands: schema, show, tail, random
`
const cmdTailDesc = `Prints the last "n-rows" rows of data contained in the specified parquet file as a formatted table.
Expects FILENAME to be a valid path to a parquet file with at least
one row.
Example:
$ parq head path/to/iris.parquet
Sepal_length Sepal_width Petal_length Petal_width Species
140 6.7 3.1 5.6 2.4 virginica
141 6.9 3.1 5.1 2.3 virginica
142 5.8 2.7 5.1 1.9 virginica
143 6.8 3.2 5.9 2.3 virginica
144 6.7 3.3 5.7 2.5 virginica
145 6.7 3 5.2 2.3 virginica
146 6.3 2.5 5 1.9 virginica
147 6.5 3 5.2 2 virginica
148 6.2 3.4 5.4 2.3 virginica
149 5.9 3 5.1 1.8 virginica
Related Commands: schema, show, head, random
`
const cmdRandomDesc = `Prints "n-rows" rows randomly selected from the specified parquet file as a formatted table.
Note: Currently, the rows will be randomly selected WITH replacement.
Expects FILENAME to be a valid path to a parquet file with at least
one row.
The RNG can be seeded using the "--seed" flag. If set to the default value of 0, the current system time will be used.
Example:
$ parq head path/to/iris.parquet
Sepal_length Sepal_width Petal_length Petal_width Species
140 6.7 3.1 5.6 2.4 virginica
141 6.9 3.1 5.1 2.3 virginica
142 5.8 2.7 5.1 1.9 virginica
143 6.8 3.2 5.9 2.3 virginica
144 6.7 3.3 5.7 2.5 virginica
145 6.7 3 5.2 2.3 virginica
146 6.3 2.5 5 1.9 virginica
147 6.5 3 5.2 2 virginica
148 6.2 3.4 5.4 2.3 virginica
149 5.9 3 5.1 1.8 virginica
Related Commands: schema, show, head, tail
` | cliDocs.go | 0.715623 | 0.792304 | cliDocs.go | starcoder |
package nn
import (
"encoding/json"
"fmt"
"io/ioutil"
"github.com/klahssen/go-mat"
"github.com/klahssen/nn/internal/activation"
)
//Perceptron is the simplest neuron, representing a function P. It applies an activation function f to s which is the weighted sum of its inputs + bias: output=f(w*x+b). the multiplication here is a dot product and wx+b is a scalar
type Perceptron struct {
inSize int
w *mat.M64 //size 1*inSize
ftype string
fparams []float64
f activation.F
cost activation.F
b float64
alpha float64 //learning rate
s float64
a float64
}
//Size returns the size expected for the input vector
func (p *Perceptron) Size() int {
return p.inSize
}
//Perceptron is the simplest neuron, representing a function P. It applies an activation function f to s which is the weighted sum of its inputs + bias: output=f(w*x+b). the multiplication here is a dot product and wx+b is a scalar
type publicPerceptron struct {
InSize int `json:"in_size"`
W []float64 `json:"w"` //size 1*inSize
F activation.F `json:"-"`
Cost activation.F `json:"-"`
Ftype string `json:"ftype"`
Fparams []float64 `json:"fparams"`
B float64 `json:"b"`
Alpha float64 `json:"alpha"` //learning rate
S float64 `json:"s"`
A float64 `json:"a"`
}
func (p *Perceptron) export() *publicPerceptron {
return &publicPerceptron{InSize: p.inSize, W: p.w.GetData(), F: p.f, Cost: p.cost, Ftype: p.ftype, Fparams: p.fparams, B: p.b, Alpha: p.alpha, S: p.s, A: p.a}
}
func (p *Perceptron) inject(def *publicPerceptron) error {
if p == nil {
return fmt.Errorf("perceptron is nil")
}
if def == nil {
return fmt.Errorf("definition is nil")
}
p.inSize = def.InSize
p.w = mat.NewM64(1, p.inSize, def.W)
p.ftype = def.Ftype
p.fparams = def.Fparams
p.b = def.B
p.alpha = def.Alpha
p.s = def.S
p.a = def.A
F, err := activation.GetF(p.ftype, p.fparams)
p.f = F
return err
}
//FromJSON unmarshals the config and sets the Perceptron's definition
func (p *Perceptron) FromJSON(filename string) error {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil
}
pp := &publicPerceptron{}
err = json.Unmarshal(b, pp)
if err != nil {
return err
}
return p.inject(pp)
}
//JSON stores the neuron's definition in a json file
func (p *Perceptron) JSON(filename string) error {
b, err := json.Marshal(p.export())
if err != nil {
return err
}
if err = ioutil.WriteFile(filename, b, 0666); err != nil {
return err
}
fmt.Printf("Perceptron Configuration:\n%s\n", string(b))
return nil
}
//Compute the P(x)
func (p *Perceptron) Compute(x *mat.M64) (float64, error) {
res, err := mat.Mul(p.w, x)
if err != nil {
return 0.0, err
}
fn := func(x float64) float64 {
return p.f.Func(x + p.b)
}
//fmt.Printf("W*X= %v\n", res.GetData())
p.s = res.AtInd(0) + p.b
if err = res.MapElem(fn); err != nil {
return 0.0, err
}
p.a = res.AtInd(0)
return p.a, nil
}
//UpdateCoefs updates inner weights and bias
func (p *Perceptron) UpdateCoefs(data []float64) error {
nw := p.w.Size()
size := nw + 1
if len(data) != size {
return fmt.Errorf("data should count 1 bias + %d weights", p.w.Size())
}
p.b = data[0]
if err := p.w.SetData(data[1:]); err != nil {
return fmt.Errorf("can not update weights: %s", err.Error())
}
return nil
}
//Validate checks if everything is usable
func (p *Perceptron) Validate() error {
if p == nil {
return fmt.Errorf("perceptron is nil")
}
if p.w == nil {
return fmt.Errorf("weight matrix is nil")
}
if p.f.Func == nil {
return fmt.Errorf("activation function is nil")
}
if p.f.Deriv == nil {
return fmt.Errorf("activation derivative is nil")
}
if p.cost.Func == nil {
return fmt.Errorf("cost function is nil")
}
if p.cost.Deriv == nil {
return fmt.Errorf("cost derivative is nil")
}
return nil
}
//BackProp updates weight and bias based on the erreur
func (p *Perceptron) BackProp(x *mat.M64, err float64) {
//cost := p.cost.Func(err)
dcost := p.cost.Deriv(err) //derivative of the cost applied to the err
dsig := p.f.Deriv(p.s)
delta := p.alpha * dcost * dsig
//fmt.Printf("cost: %f, dcost: %v, dsig: %v, delta: %f\n", cost, dcost, dsig, delta)
p.b -= delta
r, c := p.w.Dims()
inputs := x.GetData()
vals := make([]float64, x.Size())
for i := range vals {
vals[i] = delta * inputs[i]
}
p.w.Sub(mat.NewM64(r, c, vals))
fmt.Printf("new w: %v, new b: %f\n", p.w.GetData(), p.b)
}
//NewPerceptron is a Peceptron constructor
func NewPerceptron(inSize int, learningRate float64, ftype string, fparams []float64, f, cost activation.F) (*Perceptron, error) {
if inSize <= 0 {
return nil, fmt.Errorf("input size is <=0")
}
if learningRate <= 0 || learningRate > 1 {
return nil, fmt.Errorf("learning rate must be in ]0;1]")
}
p := &Perceptron{inSize: inSize, w: mat.NewM64(1, inSize, nil), ftype: ftype, fparams: fparams, f: f, cost: cost, alpha: learningRate, b: 0.0}
if ftype != activation.FuncTypeCustom {
f2, err := activation.GetF(ftype, fparams)
if err != nil {
return nil, err
}
p.f = f2
}
err := p.Validate()
return p, err
} | perceptron.go | 0.733738 | 0.41401 | perceptron.go | starcoder |
package shack
import (
"encoding/json"
"errors"
"net/url"
"reflect"
"strconv"
"strings"
)
type (
rawFlow string
valueFlow string
bodyFlow []byte
formFlow map[string][]string
)
func newRawFlow(value string) rawFlow {
value, _ = url.QueryUnescape(value)
return rawFlow(value)
}
func newValueFlow(value string) valueFlow {
return valueFlow(value)
}
func newBodyFlow(value []byte) bodyFlow {
return value
}
func newFormFlow(value map[string][]string) formFlow {
return value
}
// Value returns the raw value of the workflow.
func(f rawFlow) Value() string {
return string(f)
}
// Value returns the raw value of the workflow.
func(f valueFlow) Value() string {
return string(f)
}
// Int trans the raw value to int.
func(f valueFlow) Int() int {
i, _ := strconv.Atoi(f.Value())
return i
}
// Int64 trans the raw value to int64.
func(f valueFlow) Int64() int64 {
return int64(f.Int())
}
// Int8 trans the raw value to int8.
func(f valueFlow) Int8() int8 {
return int8(f.Int())
}
// Float64 trans the raw value to float64.
func(f valueFlow) Float64() float64 {
f64, _ := strconv.ParseFloat(f.Value(), 64)
return f64
}
// Bool trans the raw value to bool.
func(f valueFlow) Bool() bool {
b, _ := strconv.ParseBool(f.Value())
return b
}
// Value returns the raw value of the workflow.
func(f bodyFlow) Value() []byte {
return f
}
// Value returns the raw value of the workflow.
func(f formFlow) Value() map[string][]string {
return f
}
// BindJson binds the passed struct pointer with the raw value parsed to json.
func(f valueFlow) BindJson(dst interface{}) error {
return json.Unmarshal([]byte(f), dst)
}
// BindJson binds the passed struct pointer with the raw value parsed to json.
func(f bodyFlow) BindJson(dst interface{}) error {
return json.Unmarshal(f, dst)
}
// Bind binds the passed struct pointer with the raw value parsed by the given tag.
// If the tag isn't given, it will parse according to key's name.
func(f rawFlow) Bind(dst interface{}, tag ...string) error {
p := reflect.ValueOf(dst)
if p.Kind() != reflect.Ptr || p.IsNil() {
return errors.New("dst must be a pointer")
}
m := make(map[string]string)
segments := strings.Split(f.Value(), "&")
for _, segment := range segments {
kv := strings.Split(segment, "=")
if len(kv) > 1 {
m[kv[0]] = kv[1]
}
}
return mapTo(p.Elem(), m, tag...)
}
// Bind binds the passed struct pointer with the raw value parsed by the given tag.
// If the tag isn't given, it will parse according to key's name.
func(f formFlow) Bind(dst interface{}, tag ...string) error {
p := reflect.ValueOf(dst)
if p.Kind() != reflect.Ptr || p.IsNil() {
return errors.New("dst is not a pointer")
}
m := map[string]string{}
for k, v := range f {
m[k] = v[0]
}
return mapTo(reflect.Indirect(p), m, tag...)
}
func(f rawFlow) reset() {}
func(f valueFlow) reset() {}
func(f bodyFlow) reset() {}
func(f formFlow) reset() {}
func mapTo(rv reflect.Value, m map[string]string, tag ...string) error {
if rv.Kind() != reflect.Struct && rv.IsNil() {
return errors.New("dst is nil")
}
switch rv.Kind() {
case reflect.Map:
kType := rv.Type().Key().Kind()
vType := rv.Type().Elem().Kind()
for k, v := range m {
rv.SetMapIndex(toValue(k, kType), toValue(v, vType))
}
case reflect.Struct:
for k, v := range m {
t := rv.Type()
size := rv.NumField()
if size == 0 {
return errors.New("dst struct doesn't have any fields")
}
fieldLoop:
for i := 0; i < size; i++ {
field := t.Field(i)
if field.Type.Kind() == reflect.Struct {
err := mapTo(reflect.ValueOf(field.Type), m, tag...)
if err != nil {
return err
}
}
var _tag string
if len(tag) > 0 {
_tag = tag[0]
}
key := field.Tag.Get(_tag)
if len(key) == 0 {
if field.Name == k {
rv.Field(i).Set(toValue(v, rv.Field(i).Kind()))
}
continue fieldLoop
}
if key == "-" {
continue fieldLoop
}
key = strings.TrimSuffix(key, ",omitempty")
if key == k {
rv.Field(i).Set(toValue(v, rv.Field(i).Kind()))
}
}
}
}
return nil
}
func toValue(src string, dType reflect.Kind) reflect.Value {
switch dType {
case reflect.Bool:
b, _ := strconv.ParseBool(src)
return reflect.ValueOf(b)
case reflect.Int:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(i)
case reflect.Int8:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(int8(i))
case reflect.Int16:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(int16(i))
case reflect.Int32:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(int32(i))
case reflect.Int64:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(int64(i))
case reflect.Uint:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(uint(i))
case reflect.Uint8:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(uint8(i))
case reflect.Uint16:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(uint16(i))
case reflect.Uint32:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(uint32(i))
case reflect.Uint64:
i, _ := strconv.Atoi(src)
return reflect.ValueOf(uint64(i))
case reflect.Float32:
f, _ := strconv.ParseFloat(src, 32)
return reflect.ValueOf(float32(f))
case reflect.Float64:
f, _ := strconv.ParseFloat(src, 64)
return reflect.ValueOf(f)
case reflect.Interface:
var i interface{} = src
return reflect.ValueOf(i)
}
return reflect.ValueOf(src)
} | flow.go | 0.728555 | 0.500671 | flow.go | starcoder |
package triangulate
import (
"sort"
"github.com/go-spatial/geom"
"github.com/go-spatial/geom/cmp"
"github.com/go-spatial/geom/planar/triangulate/quadedge"
)
/*
DelaunayTriangulationBuilder is a utility class which creates Delaunay
Triangulations from collections of points and extract the resulting
triangulation edges or triangles as geometries.
Author <NAME>
Ported to Go by <NAME>
*/
type DelaunayTriangulationBuilder struct {
siteCoords []quadedge.Vertex
tolerance float64
subdiv *quadedge.QuadEdgeSubdivision
}
type PointByXY []quadedge.Vertex
func (xy PointByXY) Less(i, j int) bool { return cmp.XYLessPoint(xy[i], xy[j]) }
func (xy PointByXY) Swap(i, j int) { xy[i], xy[j] = xy[j], xy[i] }
func (xy PointByXY) Len() int { return len(xy) }
func NewDelaunayTriangulationBuilder(tolerance float64) *DelaunayTriangulationBuilder {
return &DelaunayTriangulationBuilder{tolerance: tolerance}
}
/*
extractUniqueCoordinates extracts the unique points from the given Geometry.
geom - the geometry to extract from
Returns a List of the unique Coordinates
If dtb is nil a panic will occur.
*/
func (dtb *DelaunayTriangulationBuilder) extractUniqueCoordinates(g geom.Geometry) ([]quadedge.Vertex, error) {
if g == nil {
return []quadedge.Vertex{}, nil
}
coords, err := geom.GetCoordinates(g)
if err != nil {
return nil, err
}
vertices := make([]quadedge.Vertex, len(coords))
for i := range coords {
vertices[i] = quadedge.Vertex{coords[i][0], coords[i][1]}
}
return dtb.unique(vertices), nil
}
/*
unique returns a list of unique vertices.
If dtb is nil a panic will occur.
*/
func (dtb *DelaunayTriangulationBuilder) unique(points []quadedge.Vertex) []quadedge.Vertex {
sort.Sort(PointByXY(points))
// we can use a slice trick to avoid copying the array again. Maybe better
// than two index variables...
uniqued := points[:0]
for i := 0; i < len(points); i++ {
if i == 0 || cmp.PointEqual(points[i], points[i-1]) == false {
uniqued = append(uniqued, points[i])
}
}
return uniqued
}
/**
* Converts all {@link Coordinate}s in a collection to {@link Vertex}es.
* @param coords the coordinates to convert
* @return a List of Vertex objects
public static List toVertices(Collection coords)
{
List verts = new ArrayList();
for (Iterator i = coords.iterator(); i.hasNext(); ) {
Coordinate coord = (Coordinate) i.next();
verts.add(new Vertex(coord));
}
return verts;
}
*/
/**
* Computes the {@link Envelope} of a collection of {@link Coordinate}s.
*
* @param coords a List of Coordinates
* @return the envelope of the set of coordinates
public static Envelope envelope(Collection coords)
{
Envelope env = new Envelope();
for (Iterator i = coords.iterator(); i.hasNext(); ) {
Coordinate coord = (Coordinate) i.next();
env.expandToInclude(coord);
}
return env;
}
*/
/*
SetSites sets the vertices which will be triangulated. All vertices of the
given geometry will be used as sites.
geom - the geometry from which the sites will be extracted.
If dtb is nil a panic will occur.
*/
func (dtb *DelaunayTriangulationBuilder) SetSites(g geom.Geometry) error {
// remove any duplicate points (they will cause the triangulation to fail)
c, err := dtb.extractUniqueCoordinates(g)
dtb.siteCoords = c
return err
}
/**
* Sets the sites (vertices) which will be triangulated
* from a collection of {@link Coordinate}s.
*
* @param coords a collection of Coordinates.
public void setSites(Collection coords)
{
// remove any duplicate points (they will cause the triangulation to fail)
siteCoords = unique(CoordinateArrays.toCoordinateArray(coords));
}
*/
/**
* Sets the snapping tolerance which will be used
* to improved the robustness of the triangulation computation.
* A tolerance of 0.0 specifies that no snapping will take place.
*
* @param tolerance the tolerance distance to use
public void setTolerance(double tolerance)
{
this.tolerance = tolerance;
}
*/
/*
create will create the triangulation.
return true on success, false on failure.
If dtb is nil a panic will occur.
*/
func (dtb *DelaunayTriangulationBuilder) create() bool {
if dtb.subdiv != nil {
return true
}
if len(dtb.siteCoords) == 0 {
return false
}
var siteEnv *geom.Extent
for _, v := range dtb.siteCoords {
if siteEnv == nil {
siteEnv = geom.NewExtent(v)
}
siteEnv.AddGeometry(v)
}
dtb.subdiv = quadedge.NewQuadEdgeSubdivision(*siteEnv, dtb.tolerance)
triangulator := new(IncrementalDelaunayTriangulator)
triangulator.subdiv = dtb.subdiv
triangulator.InsertSites(dtb.siteCoords)
return true
}
/*
GetSubdivision gets the QuadEdgeSubdivision which models the computed
triangulation.
Returns the subdivision containing the triangulation or nil if it has
not been created.
If dtb is nil a panic will occur.
*/
func (dtb *DelaunayTriangulationBuilder) GetSubdivision() *quadedge.QuadEdgeSubdivision {
dtb.create()
return dtb.subdiv
}
/*
GetEdges gets the edges of the computed triangulation as a MultiLineString.
returns the edges of the triangulation
If dtb is nil a panic will occur.
*/
func (dtb *DelaunayTriangulationBuilder) GetEdges() geom.MultiLineString {
if !dtb.create() {
return geom.MultiLineString{}
}
return dtb.subdiv.GetEdgesAsMultiLineString()
}
/*
GetTriangles Gets the faces of the computed triangulation as a MultiPolygon.
Unlike JTS, this method returns a MultiPolygon. I found not all viewers like
displaying collections. -JRS
If dtb is nil a panic will occur.
*/
func (dtb *DelaunayTriangulationBuilder) GetTriangles() (geom.MultiPolygon, error) {
if !dtb.create() {
return geom.MultiPolygon{}, nil
}
return dtb.subdiv.GetTriangles()
} | planar/triangulate/delaunaytriangulationbuilder.go | 0.869867 | 0.55917 | delaunaytriangulationbuilder.go | starcoder |
package texture
import (
"github.com/jphsd/graphics2d/util"
"math"
)
// NonLinear is used to create a field of circles that uses a non-linear function to fill the circles.
type NonLinear struct {
LambdaX, LambdaY float64 // [1,...)
PhaseX, PhaseY float64 // [0,1]
OffsetX, OffsetY float64 // [0,1]
FFunc func(float64) float64
CosTh, SinTh float64
NLFunc util.NonLinear
Dist, Inset float64
}
// NewNonLinear creates a new instance of NonLinear. A circle/elipse is rendered using the supplied
// non-linear function and inset within a box of size lambdaX by lambdaY.
func NewNonLinear(lambdaX, lambdaY, theta float64, nl util.NonLinear, inset float64) *NonLinear {
if lambdaX < 1 {
lambdaX = 1
}
if lambdaY < 1 {
lambdaY = 1
}
// Snap to quad
ct := math.Cos(theta)
if closeTo(0, ct) {
ct = 0
} else if closeTo(1, ct) {
ct = 1
} else if closeTo(-1, ct) {
ct = -1
}
st := math.Sin(theta)
if closeTo(0, st) {
st = 0
} else if closeTo(1, st) {
st = 1
} else if closeTo(-1, st) {
st = -1
}
if inset < 0 {
inset = 0
}
if lambdaX > lambdaY {
inset /= lambdaY
} else {
inset /= lambdaX
}
if inset > 0.5 {
inset = 0.5
}
dist := 1 - 2*inset
return &NonLinear{lambdaX, lambdaY, 0, 0, 0, 0, nil, ct, st, nl, dist, inset}
}
// Eval2 implements the Field interface.
func (nl *NonLinear) Eval2(x, y float64) float64 {
u := x*nl.CosTh + y*nl.SinTh
v := -x*nl.SinTh + y*nl.CosTh
u, v = nl.XYtoUV(u, v)
res := 0.0
if u > nl.Inset && u < nl.Dist+nl.Inset && v > nl.Inset && v < nl.Dist+nl.Inset {
// Within inset, rescale to [0,1]
u, v = (u-nl.Inset)/nl.Dist, (v-nl.Inset)/nl.Dist
dx, dy := 0.5-u, 0.5-v
d := 2 * math.Sqrt(dx*dx+dy*dy)
if d <= 1 {
res = nl.NLFunc.Transform(1 - d)
}
}
if nl.FFunc == nil {
return res*2 - 1
}
return nl.FFunc(res*2 - 1)
}
// XYtoUV converts values in (-inf,inf) to [0,1] based on the generator's orientation, lambdas and phase values.
func (nl *NonLinear) XYtoUV(x, y float64) (float64, float64) {
nx := 0
for x < 0 {
x += nl.LambdaX
nx--
}
for x > nl.LambdaX {
x -= nl.LambdaX
nx++
}
ny := 0
for y < 0 {
y += nl.LambdaY
ny--
}
for y > nl.LambdaY {
y -= nl.LambdaY
ny++
}
if !util.Equals(0, nl.OffsetX) {
offs := float64(ny) * nl.OffsetX
offs -= math.Floor(offs)
if offs < 0 {
offs = 1 - offs
}
u := x/nl.LambdaX + nl.PhaseX + offs
for u > 1 {
u -= 1
}
v := y/nl.LambdaY + nl.PhaseY
if v > 1 {
v -= 1
}
return u, v
}
u := x/nl.LambdaX + nl.PhaseX
for u > 1 {
u -= 1
}
offs := float64(nx) * nl.OffsetY
offs -= math.Floor(offs)
if offs < 0 {
offs = 1 - offs
}
v := y/nl.LambdaY + nl.PhaseY + offs
for v > 1 {
v -= 1
}
return u, v
} | nonlinear.go | 0.752377 | 0.607372 | nonlinear.go | starcoder |
package coordtarns
import (
"gonum.org/v1/gonum/mat"
"math"
)
const Re = 6378.137 //地球赤道半径2002.4.7
const Ra = 6378137.0 //WGS84椭球长半轴
const Rb = 6356752.314245179497 //WGS84椭球短半轴
const e1 = 0.081819190842621 //第一偏心率,计算公式为e1=sqrt(Ra*Ra-Rb*Rb)/Ra
const e22 = 0.082094437949656 //第二偏心率
const e2 = 0.993305458
const M_PI = math.Pi
type radar struct {
radar_jd float64
radar_wd float64
radar_h float64
}
func (this *radar) rae_xyz(r float64, a float64, e float64) (xx, yy, zz float64) {
var rr float64
if r < 0.0000000001 {
rr = 0.00000001
} else {
rr = r
}
xx = rr * math.Cos(e*math.Pi/180.) * math.Sin(a*math.Pi/180.) / 1000.
yy = rr * math.Cos(e*math.Pi/180.) * math.Cos(a*math.Pi/180.) / 1000.
zz = rr * math.Sin(e*math.Pi/180.) / 1000.0
return
}
func (this *radar) xyz_rae(xx, yy, zz float64) (r, a, e float64) {
r = math.Sqrt(xx*xx + yy*yy + zz*zz)
a = math.Atan2(xx, yy) * 180. / math.Pi
if a < 0 {
a += 360.0
}
e = math.Asin(zz/r) * 180. / math.Pi
r = r * 1000.0
return
}
//目标在雷达极坐标系转换到WGS84空间直角坐标系
func (this *radar) xyz_XYZ(x, y, z float64) (XX, YY, ZZ float64) {
R2 := mat.NewDense(3, 3, nil)
T2 := mat.NewDense(3, 1, nil)
mTargetWGS84 := mat.NewDense(3, 1, nil)
mTargetRadar := mat.NewDense(3, 1, nil)
var rj, rw, rh float64
var pj, qj, pw, qw, N float64
rj = this.radar_jd * M_PI / 180.0
rw = this.radar_wd * M_PI / 180.0
rh = this.radar_h / 1000.0
pj = math.Sin(rj)
qj = math.Cos(rj)
pw = math.Sin(rw)
qw = math.Cos(rw)
N = Ra / (1000.0 * math.Sqrt(1-e1*e1*pw*pw))
mTargetRadar.Set(0, 0, x)
mTargetRadar.Set(1, 0, y)
mTargetRadar.Set(2, 0, z)
R2.Set(0, 0, -pj)
R2.Set(0, 1, qj)
R2.Set(0, 2, 0)
R2.Set(1, 0, -pw*qj)
R2.Set(1, 1, -pw*pj)
R2.Set(1, 2, qw)
R2.Set(2, 0, qw*qj)
R2.Set(2, 1, qw*pj)
R2.Set(2, 2, pw)
T2.Set(0, 0, (N+rh)*qw*qj)
T2.Set(1, 0, (N+rh)*qw*pj)
T2.Set(2, 0, (N*(1-e1*e1)+rh)*pw)
tmp1 := mat.NewDense(3, 1, nil)
tmp1.Product(R2.T(), mTargetRadar)
mTargetWGS84.Add(tmp1, T2)
XX = mTargetWGS84.At(0, 0)
YY = mTargetWGS84.At(1, 0)
ZZ = mTargetWGS84.At(2, 0)
return
}
func (this *radar) XYZ_jwh(XX, YY, ZZ float64) (jd, wd, h float64) {
var N, U float64
XX = XX * 1000.0
YY = YY * 1000.0
ZZ = ZZ * 1000.0
p := math.Sqrt(XX*XX + YY*YY)
jd = math.Atan2(YY, XX) * 180.0 / M_PI
U = math.Atan2(ZZ, p*math.Sqrt((1-e1*e1)))
wd = math.Atan2(ZZ+Rb*e22*e22*math.Sin(U)*math.Sin(U)*math.Sin(U), p-e1*e1*Ra*math.Cos(U)*math.Cos(U)*math.Cos(U))
N = Ra / math.Sqrt(1-e1*e1*math.Sin(wd)*math.Sin(wd))
h = p/math.Cos(wd) - N
wd = wd * 180.0 / M_PI
h = h / 1000.0
return
}
func (this *radar) XYZ_xyz(XX, YY, ZZ float64) (xx, yy, zz float64) {
var rj, rw, rh float64
var pj, qj, pw, qw float64
mm := mat.NewDense(3, 3, nil)
mX := mat.NewDense(3, 1, nil)
mx := mat.NewDense(3, 1, nil)
mc := mat.NewDense(3, 1, nil)
rj = this.radar_jd * M_PI / 180.0
rw = this.radar_wd * M_PI / 180.0
rh = this.radar_h
pj = math.Sin(rj)
qj = math.Cos(rj)
pw = math.Sin(rw)
qw = math.Cos(rw)
//雷达站在WGS直角坐标下坐标
N0 := Ra / math.Sqrt(1-e1*e1*pw*pw)
X0 := (N0 + rh) * qw * qj / 1000.0
Y0 := (N0 + rh) * qw * pj / 1000.0
Z0 := (N0*(1-e1*e1) + rh) * pw / 1000.0
mm.Set(0, 0, -pj)
mm.Set(0, 1, -pw*qj)
mm.Set(0, 2, qw*qj)
mm.Set(1, 0, qj)
mm.Set(1, 1, -pw*pj)
mm.Set(1, 2, qw*pj)
mm.Set(2, 0, 0)
mm.Set(2, 1, qw)
mm.Set(2, 2, pw)
mc.Set(0, 0, X0)
mc.Set(1, 0, Y0)
mc.Set(2, 0, Z0)
mX.Set(0, 0, XX)
mX.Set(1, 0, YY)
mX.Set(2, 0, ZZ)
D := mat.NewDense(3, 1, nil)
D.Sub(mX, mc)
mx.Product(mm.T(), D)
xx = mx.At(0, 0)
yy = mx.At(1, 0)
zz = mx.At(2, 0)
return
}
func (this *radar) jwh_XYZ(jd, wd, h float64) (XX, YY, ZZ float64) {
//目标在WGS直角坐标下坐标(东北天)
L := jd * M_PI / 180.0
B := wd * M_PI / 180.0
H := h * 1000.0
N := Ra / math.Sqrt(1-e1*e1*math.Sin(B)*math.Sin(B))
XX = (N + H) * math.Cos(B) * math.Cos(L) / 1000.0
YY = (N + H) * math.Cos(B) * math.Sin(L) / 1000.0
ZZ = (N*(1-e1*e1) + H) * math.Sin(B) / 1000.0
return
}
func (this *radar) jwh_rae(jd, wd, h float64) (r, a, e float64) {
XX, YY, ZZ := this.jwh_XYZ(jd, wd, h/1000.0) //度,公里--->公里
x, y, z := this.XYZ_xyz(XX, YY, ZZ) //公里--->公里
r, a, e = this.xyz_rae(x, y, z) //公里--->米、度
return
}
func (this *radar) rae_jwh(r, a, e float64) (jd, wd, h float64) {
x, y, z := this.rae_xyz(r, a, e)
XX, YY, ZZ := this.xyz_XYZ(x, y, z)
jd, wd, h = this.XYZ_jwh(XX, YY, ZZ)
h = h * 1000.0
return
} | coordTransformRadar.go | 0.522689 | 0.428114 | coordTransformRadar.go | starcoder |
package models
// Extension is documented here http://hl7.org/fhir/StructureDefinition/Extension
type Extension struct {
ID *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
URL string `bson:"url" json:"url"`
// An address expressed using postal conventions (as opposed to GPS or other location definition formats). This data type may be used to convey addresses for use in delivering mail as well as for visiting locations which might not be valid for mail delivery. There are a variety of postal address formats defined around the world.
ValueAddress *Address `json:"valueAddress,omitempty"`
// A duration of time during which an organism (or a process) has existed.
// ValueAge *Age `json:"valueAge,omitempty"`
// A text note which also contains information about who made the statement and when.
ValueAnnotation *Annotation `json:"valueAnnotation,omitempty"`
// For referring to data content defined in other formats.
ValueAttachment *Attachment `json:"valueAttachment,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueBase64Binary *string `json:"valueBase64Binary,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueBoolean *bool `json:"valueBoolean,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueCanonical *string `json:"valueCanonical,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueCode *string `json:"valueCode,omitempty"`
// A concept that may be defined by a formal reference to a terminology or ontology or may be provided by text.
ValueCodeableConcept *CodeableConcept `json:"valueCodeableConcept,omitempty"`
// A reference to a code defined by a terminology system.
ValueCoding *Coding `json:"valueCoding,omitempty"`
// Specifies contact information for a person or organization.
ValueContactDetail *ContactDetail `json:"valueContactDetail,omitempty"`
// Details for all kinds of technology mediated contact points for a person or organization, including telephone, email, etc.
ValueContactPoint *ContactPoint `json:"valueContactPoint,omitempty"`
// A contributor to the content of a knowledge asset, including authors, editors, reviewers, and endorsers.
// ValueContributor *Contributor `json:"valueContributor,omitempty"`
// A measured amount (or an amount that can potentially be measured). Note that measured amounts include amounts that are not precisely quantified, including amounts involving arbitrary units and floating currencies.
// ValueCount *Count `json:"valueCount,omitempty"`
// Describes a required data item for evaluation in terms of the type of data, and optional code or date-based filters of the data.
ValueDataRequirement *DataRequirement `json:"valueDataRequirement,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueDate *DateTime `json:"valueDate,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueDateTime *DateTime `json:"valueDateTime,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueDecimal *float64 `json:"valueDecimal,omitempty"`
// A length - a value with a unit that is a physical distance.
// ValueDistance *Distance `json:"valueDistance,omitempty"`
// Indicates how the medication is/was taken or should be taken by the patient.
ValueDosage *Dosage `json:"valueDosage,omitempty"`
// A length of time.
ValueDuration *Duration `json:"valueDuration,omitempty"`
// A expression that is evaluated in a specified context and returns a value. The context of use of the expression must specify the context in which the expression is evaluated, and how the result of the expression is used.
ValueExpression *Expression `json:"valueExpression,omitempty"`
// A human's name with the ability to identify parts and usage.
ValueHumanName *HumanName `json:"valueHumanName,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueId *string `json:"valueId,omitempty"`
// An identifier - identifies some entity uniquely and unambiguously. Typically this is used for business identifiers.
ValueIdentifier *Identifier `json:"valueIdentifier,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueInstant *string `json:"valueInstant,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueInteger *int `json:"valueInteger,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueMarkdown *string `json:"valueMarkdown,omitempty"`
// The metadata about a resource. This is content in the resource that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource.
ValueMeta *Meta `json:"valueMeta,omitempty"`
// An amount of economic utility in some recognized currency.
ValueMoney *Money `json:"valueMoney,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueOid *string `json:"valueOid,omitempty"`
// The parameters to the module. This collection specifies both the input and output parameters. Input parameters are provided by the caller as part of the $evaluate operation. Output parameters are included in the GuidanceResponse.
ValueParameterDefinition *ParameterDefinition `json:"valueParameterDefinition,omitempty"`
// A time period defined by a start and end date and optionally time.
ValuePeriod *Period `json:"valuePeriod,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValuePositiveInt *int `json:"valuePositiveInt,omitempty"`
// A measured amount (or an amount that can potentially be measured). Note that measured amounts include amounts that are not precisely quantified, including amounts involving arbitrary units and floating currencies.
ValueQuantity *Quantity `json:"valueQuantity,omitempty"`
// A set of ordered Quantities defined by a low and high limit.
ValueRange *Range `json:"valueRange,omitempty"`
// A relationship of two Quantity values - expressed as a numerator and a denominator.
ValueRatio *Ratio `json:"valueRatio,omitempty"`
// A reference from one resource to another.
ValueReference *Reference `json:"valueReference,omitempty"`
// Related artifacts such as additional documentation, justification, or bibliographic references.
ValueRelatedArtifact *RelatedArtifact `json:"valueRelatedArtifact,omitempty"`
// A series of measurements taken by a device, with upper and lower limits. There may be more than one dimension in the data.
// ValueSampledData *SampledData `json:"valueSampledData,omitempty"`
// A signature along with supporting context. The signature may be a digital signature that is cryptographic in nature, or some other signature acceptable to the domain. This other signature may be as simple as a graphical image representing a hand-written signature, or a signature ceremony Different signature approaches have different utilities.
ValueSignature *Signature `json:"valueSignature,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueString *string `json:"valueString,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueTime *string `json:"valueTime,omitempty"`
// Specifies an event that may occur multiple times. Timing schedules are used to record when things are planned, expected or requested to occur. The most common usage is in dosage instructions for medications. They are also used when planning care of various kinds, and may be used for reporting the schedule to which past regular activities were carried out.
ValueTiming *Timing `json:"valueTiming,omitempty"`
// A description of a triggering event. Triggering events can be named events, data events, or periodic, as determined by the type element.
ValueTriggerDefinition *TriggerDefinition `json:"valueTriggerDefinition,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueUnsignedInt *int `json:"valueUnsignedInt,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueUri *string `json:"valueUri,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueUrl *string `json:"valueUrl,omitempty"`
// Specifies clinical/business/etc. metadata that can be used to retrieve, index and/or categorize an artifact. This metadata can either be specific to the applicable population (e.g., age category, DRG) or the specific context of care (e.g., venue, care setting, provider of care).
ValueUsageContext *UsageContext `json:"valueUsageContext,omitempty"`
// Value of extension - must be one of a constrained set of the data types (see [Extensibility](extensibility.html) for a list).
ValueUuid *string `json:"valueUuid,omitempty"`
} | models/extension.gen.go | 0.89115 | 0.500977 | extension.gen.go | starcoder |
package exp
import (
"fmt"
"xelf.org/xelf/ast"
"xelf.org/xelf/knd"
"xelf.org/xelf/lit"
"xelf.org/xelf/typ"
)
// ErrDefer is a marker error used to indicate a deferred resolution and not a failure per-se.
// The user can errors.Is(err, ErrDefer) and resume program resolution with more context provided.
var ErrDefer = fmt.Errorf("deferred resolution")
// Eval creates and evaluates a new program for str and returns the result or an error.
func Eval(reg *lit.Reg, env Env, str string) (*Lit, error) {
if reg == nil {
reg = &lit.Reg{}
}
x, err := Parse(reg, str)
if err != nil {
return nil, err
}
return EvalExp(reg, env, x)
}
// EvalExp creates and evaluates a new program for x and returns the result or an error.
func EvalExp(reg *lit.Reg, env Env, x Exp) (*Lit, error) {
p := NewProg(reg, env, x)
x, err := p.Resl(env, x, typ.Void)
if err != nil {
return nil, err
}
return p.Eval(env, x)
}
// Env is a scoped context to resolve symbols. Envs configure most of the program resolution.
type Env interface {
// Parent returns the parent environment or nil.
Parent() Env
// Dyn returns a dyn spec for this environment or nil.
Dyn() Spec
// Resl resolves a part of a symbol and returns the result or an error.
Resl(p *Prog, s *Sym, k string) (Exp, error)
// Eval evaluates a part of a symbol and returns a literal or an error.
Eval(p *Prog, s *Sym, k string) (*Lit, error)
}
// Prog is the entry context to resolve an expression in an environment.
// Programs are bound to their expression and cannot be reused.
type Prog struct {
Reg *lit.Reg
Sys *typ.Sys
Root Env
Exp Exp
fnid uint
}
// NewProg returns a new program using the given registry, environment and expression.
// The registry argument can be nil, a new registry will be used by default.
func NewProg(reg *lit.Reg, env Env, exp Exp) *Prog {
if reg == nil {
reg = &lit.Reg{}
}
return &Prog{Reg: reg, Sys: typ.NewSys(reg), Root: env, Exp: exp}
}
// Resl resolves an expression using a type hint and returns the result or an error.
func (p *Prog) Resl(env Env, e Exp, h typ.Type) (Exp, error) {
switch a := e.(type) {
case *Tag:
if a.Exp != nil {
x, err := p.Resl(env, a.Exp, typ.ResEl(h))
if err != nil {
return nil, err
}
a.Exp = x
}
return a, nil
case *Sym:
if h.Kind == knd.Sym {
return &Lit{Res: typ.Sym, Val: lit.Str(a.Sym), Src: a.Src}, nil
}
k := a.Sym
if a.Env != nil {
env = a.Env
k = a.Rel
}
r, err := env.Resl(p, a, k)
if err != nil {
return nil, ast.ErrReslSym(a.Src, a.Sym, err)
}
// TODO check hint
return r, nil
case *Lit:
if a.Res.Kind&knd.Typ != 0 {
t, ok := a.Val.(typ.Type)
if ok {
a.Val = p.Sys.Update(t)
}
}
rt, err := p.Sys.Unify(a.Res, h)
if err != nil {
return nil, ast.ErrUnify(a.Src, err.Error())
}
a.Res = rt
return a, nil
case *Tupl:
tt, tn := typ.TuplEl(a.Type)
for i, arg := range a.Els {
ah := tt
if tn > 1 {
ah = tt.Body.(*typ.ParamBody).Params[i%tn].Type
}
el, err := p.Resl(env, arg, ah)
if err != nil {
return nil, err
}
a.Els[i] = el
}
ut, err := p.Sys.Unify(a.Type, h)
if err != nil {
return nil, ast.ErrUnify(a.Src, err.Error())
}
a.Type = ut
return a, nil
case *Call:
if a.Spec == nil {
spec, args, err := p.reslSpec(env, a)
if err != nil {
return nil, err
}
sig, err := p.Sys.Inst(spec.Type())
if err != nil {
return nil, ast.ErrLayout(a.Src, sig, err)
}
args, err = LayoutSpec(sig, args)
if err != nil {
return nil, ast.ErrLayout(a.Src, sig, err)
}
a.Sig, a.Spec, a.Args = sig, spec, args
}
return a.Spec.Resl(p, env, a, h)
}
return nil, ast.ErrUnexpectedExp(e.Source(), e)
}
// Eval evaluates a resolved expression and returns a literal or an error.
func (p *Prog) Eval(env Env, e Exp) (_ *Lit, err error) {
switch a := e.(type) {
case *Sym:
res, err := env.Eval(p, a, a.Sym)
if err != nil {
return nil, ast.ErrEval(a.Src, a.Sym, err)
}
return res, nil
case *Call:
res, err := a.Spec.Eval(p, a)
if err != nil {
return nil, ast.ErrEval(a.Src, SigName(a.Sig), err)
}
return res, nil
case *Tupl:
vals := make([]lit.Val, len(a.Els))
for i, arg := range a.Els {
at, err := p.Eval(env, arg)
if err != nil {
return nil, err
}
vals[i] = at.Val
}
return &Lit{Val: &lit.List{Vals: vals}}, nil
case *Lit:
if a.Res.Kind&knd.Typ != 0 {
if t, ok := a.Val.(typ.Type); ok {
a.Val = p.Sys.Update(t)
}
}
return a, nil
}
return nil, ast.ErrUnexpectedExp(e.Source(), e)
}
// EvalArgs evaluates resolved call arguments and returns the result or an error.
// This is a convenience method for the most basic needs of many spec implementations.
func (p *Prog) EvalArgs(c *Call) ([]*Lit, error) {
res := make([]*Lit, len(c.Args))
for i, arg := range c.Args {
if arg == nil {
continue
}
a, err := p.Eval(c.Env, arg)
if err != nil {
return nil, err
}
res[i] = a
}
return res, nil
}
// NextFnID returns a new number to identify an anonymous functions.
func (p *Prog) NextFnID() uint {
p.fnid++
return p.fnid
}
func (p *Prog) reslSpec(env Env, c *Call) (Spec, []Exp, error) {
if len(c.Args) == 0 {
return nil, nil, ast.ErrReslSpec(c.Src, "unexpected empty call", nil)
}
fst, err := p.Resl(env, c.Args[0], typ.Void)
if err != nil {
return nil, nil, err
}
if fst.Kind() == knd.Lit && fst.Resl().Kind&knd.Spec != 0 {
if l, ok := fst.(*Lit); ok {
if s, ok := l.Val.(Spec); ok {
return s, c.Args[1:], nil
}
}
}
dyn := env.Dyn()
if dyn == nil {
name := fmt.Sprintf("no dyn spec found for %s", fst)
return nil, nil, ast.ErrReslSpec(c.Src, name, nil)
}
return dyn, c.Args, nil
} | exp/prog.go | 0.580471 | 0.443781 | prog.go | starcoder |
package solution
// pos is a board position
type pos struct {
r, c int
}
func (p pos) Neighbors() []pos {
return []pos{
{p.r + 1, p.c},
{p.r - 1, p.c},
{p.r, p.c + 1},
{p.r, p.c - 1},
}
}
// grid represents a grid of characters.
type grid struct {
char [][]byte
}
func (g grid) Height() int {
return len(g.char)
}
func (g grid) Width() int {
if g.Height() == 0 {
return 0
}
return len(g.char[0])
}
func (g grid) At(p pos) byte {
return g.char[p.r][p.c]
}
func (g grid) Contains(p pos) bool {
return 0 <= p.r && p.r < g.Height() && 0 <= p.c && p.c < g.Width()
}
func (g grid) Neighbors(p pos) []pos {
ns := make([]pos, 0, 4)
for _, n := range p.Neighbors() {
if g.Contains(n) {
ns = append(ns, n)
}
}
return ns
}
func findWords(board [][]byte, words []string) []string {
g := grid{char: board}
t := NewTrieDictionary(words)
set := map[string]bool{}
var p pos
for p.r = 0; p.r < g.Height(); p.r++ {
for p.c = 0; p.c < g.Width(); p.c++ {
found := findWordsTrie(g, p, map[pos]bool{}, "", t)
for w := range found {
set[w] = true
}
}
}
result := []string{}
for word := range set {
result = append(result, word)
}
return result
}
func findWordsTrie(g grid, p pos, seen map[pos]bool, prefix string, t *Trie) map[string]bool {
found := map[string]bool{}
// Consider this position.
ch := g.At(p)
s := t.Sub(ch)
if s == nil {
return found
}
prefix += string([]byte{ch})
// Have we found a word at this point.
if s.Exists() {
found[prefix] = true
}
// Investigate neighbors.
seen[p] = true
for _, n := range g.Neighbors(p) {
if seen[n] {
continue
}
for w := range findWordsTrie(g, n, seen, prefix, s) {
found[w] = true
}
}
seen[p] = false
return found
}
type Trie struct {
exists bool
sub map[byte]*Trie
}
func NewTrie() *Trie {
return &Trie{
exists: false,
sub: make(map[byte]*Trie),
}
}
func NewTrieDictionary(words []string) *Trie {
t := NewTrie()
for _, word := range words {
t.Insert(word)
}
return t
}
func (t *Trie) Exists() bool {
return t.exists
}
func (t *Trie) Sub(ch byte) *Trie {
return t.sub[ch]
}
func (t *Trie) Insert(s string) {
t.InsertBytes([]byte(s))
}
func (t *Trie) InsertBytes(b []byte) {
if len(b) == 0 {
t.exists = true
return
}
ch, rest := b[0], b[1:]
if _, ok := t.sub[ch]; !ok {
t.sub[ch] = NewTrie()
}
t.sub[ch].InsertBytes(rest)
} | leetcode/word-search-ii/solution.go | 0.754825 | 0.570451 | solution.go | starcoder |
package ast
type (
SourceType string
Program struct {
SourceType SourceType
Body []ProgramBody
}
ProgramBody interface {
VisitProgramBody(ProgramBodyVisitor)
}
ProgramBodyVisitor struct {
Statement func(Statement)
ModuleDeclaration func(ModuleDeclaration)
}
Expression interface {
VisitExpression(ExpressionVisitor)
}
ExpressionVisitor struct {
Identifier func(*Identifier)
Literal func(Literal)
ThisExpression func(*ThisExpression)
ArrayExpression func(*ArrayExpression)
ObjectExpression func(*ObjectExpression)
FunctionExpression func(*FunctionExpression)
UnaryExpression func(*UnaryExpression)
UpdateExpression func(*UpdateExpression)
BinaryExpression func(*BinaryExpression)
AssignmentExpression func(*AssignmentExpression)
LogicalExpression func(*LogicalExpression)
ConditionalExpression func(*ConditionalExpression)
CallExpression func(*CallExpression)
NewExpression func(*NewExpression)
SequenceExpression func(*SequenceExpression)
ArrowFunctionExpression func(*ArrowFunctionExpression)
YieldExpression func(*YieldExpression)
AwaitExpression func(*AwaitExpression)
TemplateLiteral func(*TemplateLiteral)
TaggedTemplateExpression func(*TaggedTemplateExpression)
ClassExpression func(*ClassExpression)
}
Statement interface {
ProgramBody
VisitStatement(StatementVisitor)
}
StatementVisitor struct {
Declaration func(Declaration)
ExpressionStatement func(*ExpressionStatement)
BlockStatement func(*BlockStatement)
EmptyStatement func(*EmptyStatement)
DebuggerStatement func(*DebuggerStatement)
WithStatement func(*WithStatement)
ReturnStatement func(*ReturnStatement)
LabeledStatement func(*LabeledStatement)
BreakStatement func(*BreakStatement)
ContinueStatement func(*ContinueStatement)
IfStatement func(*IfStatement)
SwitchStatement func(*SwitchStatement)
ThrowStatement func(*ThrowStatement)
TryStatement func(*TryStatement)
WhileStatement func(*WhileStatement)
DoWhileStatement func(*DoWhileStatement)
ForStatement func(*ForStatement)
ForInStatement func(*ForInStatement)
ForOfStatement func(*ForOfStatement)
}
Declaration interface {
Statement
VisitDeclaration(DeclarationVisitor)
}
DeclarationVisitor struct {
FunctionDeclaration func(*FunctionDeclaration)
VariableDeclaration func(*VariableDeclaration)
ClassDeclaration func(*ClassDeclaration)
}
Pattern interface {
VisitPattern(PatternVisitor)
}
PatternVisitor struct {
Identifier func(*Identifier)
MemberExpression func(*MemberExpression)
}
Identifier struct {
Name string
}
Literal interface {
Expression
VisitLiteral(LiteralVisitor)
}
LiteralVisitor struct {
StringLiteral func(*StringLiteral)
BooleanLiteral func(*BooleanLiteral)
NullLiteral func(*NullLiteral)
NumberLiteral func(*NumberLiteral)
RegExpLiteral func(*RegExpLiteral)
}
StringLiteral struct {
Value string
}
BooleanLiteral struct {
Value bool
}
NullLiteral struct{}
NumberLiteral struct {
Value float64
}
RegExpLiteral struct {
Regex struct {
Pattern string
Flags string
}
}
ExpressionStatement struct {
Expression Expression
}
BlockStatement struct {
Body []Statement
}
EmptyStatement struct{}
DebuggerStatement struct{}
WithStatement struct {
Object Expression
Body Statement
}
ReturnStatement struct {
Argument Expression
}
LabeledStatement struct {
Label *Identifier
Body Statement
}
BreakStatement struct {
Label *Identifier
}
ContinueStatement struct {
Label *Identifier
}
IfStatement struct {
Test Expression
Consequent Statement
Alternate Statement
}
SwitchStatement struct {
Discriminant Expression
Cases []*SwitchCase
}
SwitchCase struct {
Test Expression
Consequent []Statement
}
ThrowStatement struct {
Argument Expression
}
TryStatement struct {
Block *BlockStatement
Handler *CatchClause
Finalizer *BlockStatement
}
CatchClause struct {
Param Pattern
Body *BlockStatement
}
WhileStatement struct {
Test Expression
Body Statement
}
DoWhileStatement struct {
Test Expression
Body Statement
}
ForStatement struct {
Init ForStatementInit
Test Expression
Update Expression
Body Statement
}
ForStatementInit interface {
VisitForStatementInit(ForStatementInitVisitor)
}
ForStatementInitVisitor struct {
VariableDeclaration func(*VariableDeclaration)
Expression func(Expression)
}
ForInStatement struct {
Left ForInStatementLeft
Right Expression
Body Statement
}
ForInStatementLeft interface {
VisitForInStatementLeft(ForInStatementLeftVisitor)
}
ForInStatementLeftVisitor struct {
VariableDeclaration func(*VariableDeclaration)
Pattern func(Pattern)
}
ForOfStatement struct {
Left ForOfStatementLeft
Right Expression
Body Statement
Await bool
}
ForOfStatementLeft interface {
VisitForOfStatementLeft(ForOfStatementLeftVisitor)
}
ForOfStatementLeftVisitor struct {
VariableDeclaration func(*VariableDeclaration)
Pattern func(Pattern)
}
FunctionDeclaration struct {
ID *Identifier
Params []Pattern
Body *BlockStatement
Generator bool
Async bool
}
VariableDeclaration struct {
Kind string
Declarations []*VariableDeclarator
}
VariableDeclarator struct {
ID Pattern
Init Expression
}
ThisExpression struct{}
ArrayExpression struct {
Elements []ArrayExpressionElement
}
ArrayExpressionElement interface {
VisitArrayExpressionElement(ArrayExpressionElementVisitor)
}
ArrayExpressionElementVisitor struct {
Expression func(Expression)
SpreadElement func(*SpreadElement)
}
ObjectExpression struct {
Properties []ObjectExpressionProperty
}
ObjectExpressionProperty interface {
VisitObjectExpressionProperty(ObjectExpressionPropertyVisitor)
}
ObjectExpressionPropertyVisitor struct {
Property func(*Property)
SpreadElement func(*SpreadElement)
}
Property struct {
Key Expression
Value Expression
Kind string
Method bool
Shorthand bool
Computed bool
}
FunctionExpression struct {
ID *Identifier
Params []Pattern
Body *BlockStatement
Generator bool
Async bool
}
UnaryExpression struct {
Operator string
Prefix bool
Argument Expression
}
UpdateExpression struct {
Operator string
Prefix bool
Argument Expression
}
BinaryExpression struct {
Operator string
Left Expression
Right Expression
}
AssignmentExpression struct {
Operator string
Left Pattern
Right Expression
}
LogicalExpression struct {
Operator string
Left Expression
Right Expression
}
MemberExpression struct {
Object MemberExpressionObject
Property Expression
Computed bool
}
MemberExpressionObject interface {
VisitMemberExpressionObject(MemberExpressionObjectVisitor)
}
MemberExpressionObjectVisitor struct {
Expression func(Expression)
Super func(*Super)
}
ConditionalExpression struct {
Test Expression
Alternate Expression
Consequent Expression
}
CallExpression struct {
Callee CallExpressionCallee
Arguments []CallExpressionArgument
}
CallExpressionCallee interface {
VisitCallExpressionCallee(CallExpressionCalleeVisitor)
}
CallExpressionCalleeVisitor struct {
Expression func(Expression)
Super func(*Super)
}
CallExpressionArgument interface {
VisitCallExpressionArgument(CallExpressionArgumentVisitor)
}
CallExpressionArgumentVisitor struct {
Expression func(Expression)
SpreadElement func(*SpreadElement)
}
NewExpression struct {
Callee Expression
Arguments []NewExpressionArgument
}
NewExpressionArgument interface {
VisitNewExpressionArgument(NewExpressionArgumentVisitor)
}
NewExpressionArgumentVisitor struct {
Expression func(Expression)
SpreadElement func(*SpreadElement)
}
SequenceExpression struct {
Expression []Expression
}
ArrowFunctionExpression struct {
Body ArrowFunctionExpressionBody
Expression bool
}
ArrowFunctionExpressionBody interface {
VisitArrowFunctionExpressionBody(ArrowFunctionExpressionBodyVisitor)
}
ArrowFunctionExpressionBodyVisitor struct {
BlockStatement func(*BlockStatement)
Expression func(Expression)
}
YieldExpression struct {
Argument Expression
Delegate bool
}
AwaitExpression struct {
Argument Expression
}
TemplateLiteral struct {
Quasis []*TemplateElement
Expressions []Expression
}
TaggedTemplateExpression struct {
Tag Expression
Quasi TemplateLiteral
}
TemplateElement struct {
Tail bool
Value struct {
Cooked *string
Raw string
}
}
ObjectPattern struct {
Properties []ObjectPatternProperty
}
ObjectPatternProperty interface {
VisitObjectPatternProperty(ObjectPatternPropertyVisitor)
}
ObjectPatternPropertyVisitor struct {
AssignmentProperty func(*AssignmentProperty)
RestElement func(*RestElement)
}
AssignmentProperty struct {
Key Expression
Value Pattern
Shorthand bool
Computed bool
}
ArrayPattern struct {
Elements []Pattern
}
RestElement struct {
Argument Pattern
}
AssignmentPattern struct {
Left Pattern
Right Expression
}
Super struct{}
SpreadElement struct {
Argument Expression
}
Class struct {
ID *Identifier
SuperClass Expression
Body ClassBody
}
ClassBody struct {
Body []*MethodDefinition
}
MethodDefinition struct {
Key Expression
Value *FunctionExpression
Kind string
Computed bool
Static bool
}
ClassDeclaration struct {
ID *Identifier
SuperClass Expression
Body *ClassBody
}
ClassExpression struct {
ID *Identifier
SuperClass Expression
Body *ClassBody
}
MetaProperty struct {
Meta *Identifier
Property *Identifier
}
ModuleDeclaration interface {
ProgramBody
VisitModuleDeclaration(ModuleDeclarationVisitor)
}
ModuleDeclarationVisitor struct {
ImportDeclaration func(*ImportDeclaration)
ExportNamedDeclaration func(*ExportNamedDeclaration)
ExportDefaultDeclaration func(*ExportDefaultDeclaration)
ExportAllDeclaration func(*ExportAllDeclaration)
}
ImportDeclaration struct {
Specifiers []ImportDeclarationSpecifier
Source *StringLiteral
}
ImportDeclarationSpecifier interface {
VisitImportDeclarationSpecifier(ImportDeclarationSpecifierVisitor)
}
ImportDeclarationSpecifierVisitor struct {
ImportSpecifier func(*ImportSpecifier)
ImportDefaultSpecifier func(*ImportDefaultSpecifier)
ImportNamespaceSpecifier func(*ImportNamespaceSpecifier)
}
ImportSpecifier struct {
Local *Identifier
Imported *Identifier
}
ImportDefaultSpecifier struct {
Local *Identifier
}
ImportNamespaceSpecifier struct {
Local *Identifier
}
ExportNamedDeclaration struct {
Declaration Declaration
Specifiers []*ExportSpecifier
Source *StringLiteral
}
ExportSpecifier struct {
Local *Identifier
Exported *Identifier
}
ExportDefaultDeclaration struct {
Declaration ExportDefaultDeclarationDeclaration
}
ExportDefaultDeclarationDeclaration interface {
VisitExportDefaultDeclarationDeclaration(ExportDefaultDeclarationDeclarationVisitor)
}
ExportDefaultDeclarationDeclarationVisitor struct {
AnonymousDefaultExportedFunctionDeclaration func(*AnonymousDefaultExportedFunctionDeclaration)
FunctionDeclaration func(*FunctionDeclaration)
AnonymousDefaultExportedClassDeclaration func(*AnonymousDefaultExportedClassDeclaration)
ClassDeclaration func(*ClassDeclaration)
Expression func(Expression)
}
AnonymousDefaultExportedFunctionDeclaration struct {
Params []Pattern
Body *BlockStatement
Generator bool
}
AnonymousDefaultExportedClassDeclaration struct {
SuperClass Expression
Body *ClassBody
}
ExportAllDeclaration struct {
Source Literal
}
)
const (
Script SourceType = "script"
Module SourceType = "module"
)
func (p *ExpressionStatement) VisitProgramBody(v ProgramBodyVisitor) { v.Statement(p) }
func (p *BlockStatement) VisitProgramBody(v ProgramBodyVisitor) { v.Statement(p) }
func (p *ImportDeclaration) VisitProgramBody(v ProgramBodyVisitor) { v.ModuleDeclaration(p) }
func (s *ExpressionStatement) VisitStatement(v StatementVisitor) { v.ExpressionStatement(s) }
func (s *BlockStatement) VisitStatement(v StatementVisitor) { v.BlockStatement(s) }
func (e *Identifier) VisitExpression(v ExpressionVisitor) { v.Identifier(e) }
func (e *StringLiteral) VisitExpression(v ExpressionVisitor) { v.Literal(e) }
func (e *BooleanLiteral) VisitExpression(v ExpressionVisitor) { v.Literal(e) }
func (e *NullLiteral) VisitExpression(v ExpressionVisitor) { v.Literal(e) }
func (e *NumberLiteral) VisitExpression(v ExpressionVisitor) { v.Literal(e) }
func (e *RegExpLiteral) VisitExpression(v ExpressionVisitor) { v.Literal(e) }
func (e *SequenceExpression) VisitExpression(v ExpressionVisitor) { v.SequenceExpression(e) }
func (e *AssignmentExpression) VisitExpression(v ExpressionVisitor) { v.AssignmentExpression(e) }
func (e *ConditionalExpression) VisitExpression(v ExpressionVisitor) { v.ConditionalExpression(e) }
func (e *LogicalExpression) VisitExpression(v ExpressionVisitor) { v.LogicalExpression(e) }
func (e *BinaryExpression) VisitExpression(v ExpressionVisitor) { v.BinaryExpression(e) }
func (e *UnaryExpression) VisitExpression(v ExpressionVisitor) { v.UnaryExpression(e) }
func (e *UpdateExpression) VisitExpression(v ExpressionVisitor) { v.UpdateExpression(e) }
func (l *StringLiteral) VisitLiteral(v LiteralVisitor) { v.StringLiteral(l) }
func (l *BooleanLiteral) VisitLiteral(v LiteralVisitor) { v.BooleanLiteral(l) }
func (l *NullLiteral) VisitLiteral(v LiteralVisitor) { v.NullLiteral(l) }
func (l *NumberLiteral) VisitLiteral(v LiteralVisitor) { v.NumberLiteral(l) }
func (l *RegExpLiteral) VisitLiteral(v LiteralVisitor) { v.RegExpLiteral(l) }
func (m *ImportDeclaration) VisitModuleDeclaration(v ModuleDeclarationVisitor) {
v.ImportDeclaration(m)
}
func (i *ImportSpecifier) VisitImportDeclarationSpecifier(v ImportDeclarationSpecifierVisitor) {
v.ImportSpecifier(i)
}
func (i *ImportDefaultSpecifier) VisitImportDeclarationSpecifier(v ImportDeclarationSpecifierVisitor) {
v.ImportDefaultSpecifier(i)
}
func (i *ImportNamespaceSpecifier) VisitImportDeclarationSpecifier(v ImportDeclarationSpecifierVisitor) {
v.ImportNamespaceSpecifier(i)
}
func (p *Identifier) VisitPattern(v PatternVisitor) { v.Identifier(p) }
func (p *MemberExpression) VisitPattern(v PatternVisitor) { v.MemberExpression(p) } | pkg/asset/js/ast/ast.go | 0.5083 | 0.702632 | ast.go | starcoder |
package wrand
import (
"math"
"math/rand"
"sort"
)
// SelectIndex takes a list of weights and returns an index with a probability corresponding
// to the relative weight of each index. Behavior is undefined if len(weights) == 0. A weight
// of 0 will never be selected unless all are 0, in which case any index may be selected.
// Negative weights are multiplied by -1.
func SelectIndex(weights []float64) int {
cumWeights := make([]float64, len(weights))
cumWeights[0] = weights[0]
for i, w := range weights {
if i > 0 {
cumWeights[i] = cumWeights[i-1] + math.Abs(w)
}
}
if cumWeights[len(weights)-1] == 0.0 {
return rand.Intn(len(weights))
}
rnd := rand.Float64() * cumWeights[len(weights)-1]
return sort.SearchFloat64s(cumWeights, rnd)
}
// The Item type holds the user's value
type Item interface {
Weight() int
WeightIs(int)
CumWeight() int
CumWeightIs(int)
}
// The Object type is the collection that holds all the items choose from.
type Object struct {
pool itemPool
totalWeight int
inverse bool
}
// NewObject creates and returns a new Object to work with. If inverse is true then
// smaller weights are more likely.
func NewObject(inverse bool) *Object {
return &Object{make(itemPool, 0), 0, inverse}
}
// NewItem adds a new Item to the Object with ithe given value and weight.
func (o *Object) NewItem(item Item) {
// O(n)
o.pool = append(o.pool, item)
o.update()
}
// UpdateItemWeight sets the given Item's weight to the value provided. You should use
// this instead of setting the Item's weight yourself.
func (o *Object) UpdateItemWeight(item Item, weight int) {
// O(n)
item.WeightIs(weight)
o.update()
}
func (o *Object) update() {
maxWeight := 0
for _, item := range o.pool {
if item.Weight() > maxWeight {
maxWeight = item.Weight()
}
}
cumWeight := 0
for _, item := range o.pool {
w := item.Weight()
if o.inverse {
w = maxWeight - w + 1
}
cumWeight += w
item.CumWeightIs(cumWeight)
}
o.totalWeight = cumWeight
sort.Sort(o.pool)
}
// RandomItem returns a random Item out of the ones that have been added via NewItem
// taking into account the weights of each item.
func (o *Object) RandomItem() Item {
// O(log n)
rnd := int(rand.Float64() * float64(o.totalWeight))
i := sort.Search(o.pool.Len(), func(i int) bool { return o.pool[i].CumWeight() > rnd })
return o.pool[i]
}
// itemPool is a sortable list of Items
type itemPool []Item
func (p itemPool) Len() int {
return len(p)
}
func (p itemPool) Less(i, j int) bool {
return p[i].CumWeight() < p[j].CumWeight()
}
func (p itemPool) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
} | wrand.go | 0.753285 | 0.506103 | wrand.go | starcoder |
package edge_compute_networking
import (
"encoding/json"
"time"
)
// NetworkMetadata Metadata associated with an entity
type NetworkMetadata struct {
// A string to string key/value pair
Annotations *map[string]string `json:"annotations,omitempty"`
// A string to string key/value pair
Labels *map[string]string `json:"labels,omitempty"`
// The date that a metadata entry was created
CreatedAt NullableTime `json:"createdAt,omitempty"`
// The date that a metadata entry was last updated
UpdatedAt NullableTime `json:"updatedAt,omitempty"`
// The date that a network policy was requested for deletion
DeleteRequestedAt NullableTime `json:"deleteRequestedAt,omitempty"`
// An entity's version number Versions start at 1 when they are created and increment by 1 every time they are updated.
Version *string `json:"version,omitempty"`
}
// NewNetworkMetadata instantiates a new NetworkMetadata object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewNetworkMetadata() *NetworkMetadata {
this := NetworkMetadata{}
return &this
}
// NewNetworkMetadataWithDefaults instantiates a new NetworkMetadata object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewNetworkMetadataWithDefaults() *NetworkMetadata {
this := NetworkMetadata{}
return &this
}
// GetAnnotations returns the Annotations field value if set, zero value otherwise.
func (o *NetworkMetadata) GetAnnotations() map[string]string {
if o == nil || o.Annotations == nil {
var ret map[string]string
return ret
}
return *o.Annotations
}
// GetAnnotationsOk returns a tuple with the Annotations field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkMetadata) GetAnnotationsOk() (*map[string]string, bool) {
if o == nil || o.Annotations == nil {
return nil, false
}
return o.Annotations, true
}
// HasAnnotations returns a boolean if a field has been set.
func (o *NetworkMetadata) HasAnnotations() bool {
if o != nil && o.Annotations != nil {
return true
}
return false
}
// SetAnnotations gets a reference to the given map[string]string and assigns it to the Annotations field.
func (o *NetworkMetadata) SetAnnotations(v map[string]string) {
o.Annotations = &v
}
// GetLabels returns the Labels field value if set, zero value otherwise.
func (o *NetworkMetadata) GetLabels() map[string]string {
if o == nil || o.Labels == nil {
var ret map[string]string
return ret
}
return *o.Labels
}
// GetLabelsOk returns a tuple with the Labels field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkMetadata) GetLabelsOk() (*map[string]string, bool) {
if o == nil || o.Labels == nil {
return nil, false
}
return o.Labels, true
}
// HasLabels returns a boolean if a field has been set.
func (o *NetworkMetadata) HasLabels() bool {
if o != nil && o.Labels != nil {
return true
}
return false
}
// SetLabels gets a reference to the given map[string]string and assigns it to the Labels field.
func (o *NetworkMetadata) SetLabels(v map[string]string) {
o.Labels = &v
}
// GetCreatedAt returns the CreatedAt field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *NetworkMetadata) GetCreatedAt() time.Time {
if o == nil || o.CreatedAt.Get() == nil {
var ret time.Time
return ret
}
return *o.CreatedAt.Get()
}
// GetCreatedAtOk returns a tuple with the CreatedAt field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *NetworkMetadata) GetCreatedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return o.CreatedAt.Get(), o.CreatedAt.IsSet()
}
// HasCreatedAt returns a boolean if a field has been set.
func (o *NetworkMetadata) HasCreatedAt() bool {
if o != nil && o.CreatedAt.IsSet() {
return true
}
return false
}
// SetCreatedAt gets a reference to the given NullableTime and assigns it to the CreatedAt field.
func (o *NetworkMetadata) SetCreatedAt(v time.Time) {
o.CreatedAt.Set(&v)
}
// SetCreatedAtNil sets the value for CreatedAt to be an explicit nil
func (o *NetworkMetadata) SetCreatedAtNil() {
o.CreatedAt.Set(nil)
}
// UnsetCreatedAt ensures that no value is present for CreatedAt, not even an explicit nil
func (o *NetworkMetadata) UnsetCreatedAt() {
o.CreatedAt.Unset()
}
// GetUpdatedAt returns the UpdatedAt field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *NetworkMetadata) GetUpdatedAt() time.Time {
if o == nil || o.UpdatedAt.Get() == nil {
var ret time.Time
return ret
}
return *o.UpdatedAt.Get()
}
// GetUpdatedAtOk returns a tuple with the UpdatedAt field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *NetworkMetadata) GetUpdatedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return o.UpdatedAt.Get(), o.UpdatedAt.IsSet()
}
// HasUpdatedAt returns a boolean if a field has been set.
func (o *NetworkMetadata) HasUpdatedAt() bool {
if o != nil && o.UpdatedAt.IsSet() {
return true
}
return false
}
// SetUpdatedAt gets a reference to the given NullableTime and assigns it to the UpdatedAt field.
func (o *NetworkMetadata) SetUpdatedAt(v time.Time) {
o.UpdatedAt.Set(&v)
}
// SetUpdatedAtNil sets the value for UpdatedAt to be an explicit nil
func (o *NetworkMetadata) SetUpdatedAtNil() {
o.UpdatedAt.Set(nil)
}
// UnsetUpdatedAt ensures that no value is present for UpdatedAt, not even an explicit nil
func (o *NetworkMetadata) UnsetUpdatedAt() {
o.UpdatedAt.Unset()
}
// GetDeleteRequestedAt returns the DeleteRequestedAt field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *NetworkMetadata) GetDeleteRequestedAt() time.Time {
if o == nil || o.DeleteRequestedAt.Get() == nil {
var ret time.Time
return ret
}
return *o.DeleteRequestedAt.Get()
}
// GetDeleteRequestedAtOk returns a tuple with the DeleteRequestedAt field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *NetworkMetadata) GetDeleteRequestedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return o.DeleteRequestedAt.Get(), o.DeleteRequestedAt.IsSet()
}
// HasDeleteRequestedAt returns a boolean if a field has been set.
func (o *NetworkMetadata) HasDeleteRequestedAt() bool {
if o != nil && o.DeleteRequestedAt.IsSet() {
return true
}
return false
}
// SetDeleteRequestedAt gets a reference to the given NullableTime and assigns it to the DeleteRequestedAt field.
func (o *NetworkMetadata) SetDeleteRequestedAt(v time.Time) {
o.DeleteRequestedAt.Set(&v)
}
// SetDeleteRequestedAtNil sets the value for DeleteRequestedAt to be an explicit nil
func (o *NetworkMetadata) SetDeleteRequestedAtNil() {
o.DeleteRequestedAt.Set(nil)
}
// UnsetDeleteRequestedAt ensures that no value is present for DeleteRequestedAt, not even an explicit nil
func (o *NetworkMetadata) UnsetDeleteRequestedAt() {
o.DeleteRequestedAt.Unset()
}
// GetVersion returns the Version field value if set, zero value otherwise.
func (o *NetworkMetadata) GetVersion() string {
if o == nil || o.Version == nil {
var ret string
return ret
}
return *o.Version
}
// GetVersionOk returns a tuple with the Version field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkMetadata) GetVersionOk() (*string, bool) {
if o == nil || o.Version == nil {
return nil, false
}
return o.Version, true
}
// HasVersion returns a boolean if a field has been set.
func (o *NetworkMetadata) HasVersion() bool {
if o != nil && o.Version != nil {
return true
}
return false
}
// SetVersion gets a reference to the given string and assigns it to the Version field.
func (o *NetworkMetadata) SetVersion(v string) {
o.Version = &v
}
func (o NetworkMetadata) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Annotations != nil {
toSerialize["annotations"] = o.Annotations
}
if o.Labels != nil {
toSerialize["labels"] = o.Labels
}
if o.CreatedAt.IsSet() {
toSerialize["createdAt"] = o.CreatedAt.Get()
}
if o.UpdatedAt.IsSet() {
toSerialize["updatedAt"] = o.UpdatedAt.Get()
}
if o.DeleteRequestedAt.IsSet() {
toSerialize["deleteRequestedAt"] = o.DeleteRequestedAt.Get()
}
if o.Version != nil {
toSerialize["version"] = o.Version
}
return json.Marshal(toSerialize)
}
type NullableNetworkMetadata struct {
value *NetworkMetadata
isSet bool
}
func (v NullableNetworkMetadata) Get() *NetworkMetadata {
return v.value
}
func (v *NullableNetworkMetadata) Set(val *NetworkMetadata) {
v.value = val
v.isSet = true
}
func (v NullableNetworkMetadata) IsSet() bool {
return v.isSet
}
func (v *NullableNetworkMetadata) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableNetworkMetadata(val *NetworkMetadata) *NullableNetworkMetadata {
return &NullableNetworkMetadata{value: val, isSet: true}
}
func (v NullableNetworkMetadata) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableNetworkMetadata) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pkg/edge_compute_networking/model_network_metadata.go | 0.800224 | 0.42931 | model_network_metadata.go | starcoder |
package shortest_distance_to_target_color
import (
"math"
"sort"
)
/*
1182. 与目标颜色间的最短距离
https://leetcode-cn.com/problems/shortest-distance-to-target-color
给你一个数组 colors,里面有 1、2、 3 三种颜色。
我们需要在 colors 上进行一些查询操作 queries,其中每个待查项都由两个整数 i 和 c 组成。
现在请你帮忙设计一个算法,查找从索引 i 到具有目标颜色 c 的元素之间的最短距离。
如果不存在解决方案,请返回 -1。
示例 1:
输入:colors = [1,1,2,1,3,2,2,3,3], queries = [[1,3],[2,2],[6,1]]
输出:[3,0,3]
解释:
距离索引 1 最近的颜色 3 位于索引 4(距离为 3)。
距离索引 2 最近的颜色 2 就是它自己(距离为 0)。
距离索引 6 最近的颜色 1 位于索引 3(距离为 3)。
示例 2:
输入:colors = [1,2], queries = [[0,3]]
输出:[-1]
解释:colors 中没有颜色 3。
提示:
1 <= colors.length <= 5*10^4
1 <= colors[i] <= 3
1 <= queries.length <= 5*10^4
queries[i].length == 2
0 <= queries[i][0] < colors.length
1 <= queries[i][1] <= 3
*/
/*
朴素实现,超时
*/
func shortestDistanceColor1(colors []int, queries [][]int) []int {
r := make([]int, len(queries))
for i := 0; i < len(queries); i++ {
query := queries[i]
r[i] = minDist1(colors, query[0], query[1])
}
return r
}
func minDist1(colors []int, index int, color int) int {
if colors[index] == color {
return 0
}
for i, j := index-1, index+1; i >= 0 || j < len(colors); i, j = i-1, j+1 {
if i >= 0 && color == colors[i] {
return index - i
}
if j < len(colors) && color == colors[j] {
return j - index
}
}
return -1
}
/*
使用一个哈希表colorMap,对于颜色c(1<=c<=3), colorMap[c]按升序保存colors里颜色为c的元素索引——这里颜色有限,也可以用三个切片代替哈希表
对于要搜索的索引和颜色,在colorMap[dstColor]里用二分搜索与srcIndex最接近的索引,计算处与srcIndex的距离即可
*/
func shortestDistanceColor(colors []int, queries [][]int) []int {
colorMap := make(map[int][]int, 3)
for index, color := range colors {
colorMap[color] = append(colorMap[color], index)
}
r := make([]int, len(queries))
for i := 0; i < len(queries); i++ {
r[i] = minDist(queries[i], colorMap)
}
return r
}
func minDist(query []int, m map[int][]int) int {
dstIndex, dstColor := query[0], query[1]
indexes := m[dstColor]
if len(indexes) == 0 {
return -1
}
i := sort.SearchInts(indexes, dstIndex)
if i == len(indexes) {
return dstIndex - indexes[i-1]
}
if i == 0 {
return indexes[0] - dstIndex
}
return min(abs(indexes[i]-dstIndex), abs(indexes[i-1]-dstIndex))
}
func minDist2(query []int, m map[int][]int) int {
dstIndex, dstColor := query[0], query[1]
indexes := m[dstColor]
if len(indexes) == 0 {
return -1
}
left, right := 0, len(indexes)
for left < right {
mid := left + (right-left)/2
if indexes[mid] == dstIndex {
return 0
}
if indexes[mid] < dstIndex {
left = mid + 1
} else {
right = mid
}
}
if left == len(indexes) {
return dstIndex - indexes[left-1]
}
if left == 0 {
return indexes[0] - dstIndex
}
return min(abs(indexes[left]-dstIndex), abs(indexes[left-1]-dstIndex))
}
func min(a, b int) int {
return int(math.Min(float64(a), float64(b)))
}
func abs(x int) int {
return int(math.Abs(float64(x)))
} | solutions/shortest-distance-to-target-color/d.go | 0.654122 | 0.462655 | d.go | starcoder |
package fp
func (a BoolArray) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a BoolArray) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a BoolArray) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a StringArray) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a StringArray) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a IntArray) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a IntArray) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Int64Array) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Int64Array) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a ByteArray) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a ByteArray) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a RuneArray) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a RuneArray) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float32Array) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float32Array) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Float64Array) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Float64Array) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a AnyArray) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a AnyArray) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipBoolArray(a2 BoolArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipStringArray(a2 StringArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipIntArray(a2 IntArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipInt64Array(a2 Int64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipByteArray(a2 ByteArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipRuneArray(a2 RuneArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipFloat32Array(a2 Float32Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipFloat64Array(a2 Float64Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipAnyArray(a2 AnyArray) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipTuple2Array(a2 Tuple2Array) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(len(a2))))
zipped := make([]Tuple2, minLen)
for i := 0; i < minLen; i++ {
zipped[i] = Tuple2 { a[i], a2[i] }
}
return zipped
}
func (a Tuple2Array) ZipBoolList(l2 BoolList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipStringList(l2 StringList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipIntList(l2 IntList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipInt64List(l2 Int64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipByteList(l2 ByteList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipRuneList(l2 RuneList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipFloat32List(l2 Float32List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipFloat64List(l2 Float64List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipAnyList(l2 AnyList) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
}
func (a Tuple2Array) ZipTuple2List(l2 Tuple2List) Tuple2Array {
minLen := int(Int(len(a)).Min(Int(l2.Size())))
zipped := make([]Tuple2, minLen)
xs := l2
for i := 0; xs.NonEmpty() && i < minLen; i ++ {
zipped[i] = Tuple2 { a[i], *xs.head }
xs = *xs.tail
}
return zipped
} | fp/bootstrap_array_zip.go | 0.680135 | 0.474022 | bootstrap_array_zip.go | starcoder |
package spread
import "github.com/onwsk8r/goption-pricing/formula"
// BearCall involves selling an ITM call and buying an OTM call.
// The is a credit spread, and is most effective when the stock deceeds the
// strike price of the ITM call and both options expire worthless. An example
// would be buying a Mar 130 Call and selling a Mar 100 Call with an underlying
// price of 115. Hopefully those options are pretty far out because the stock
// needs to go up 15% for you to maximize your profit.
type BearCall struct {
formula.Calculator
LongCallPrice float64
ShortCallPrice float64
}
func GetBearCall(c formula.Calculator) BearCall {
return BearCall{
Calculator: c,
}
}
// Long implements the Spread interface
func (s *BearCall) Long() float64 {
s.SetStrikePrice(s.LongCallPrice)
longCall := s.Call()
s.SetStrikePrice(s.ShortCallPrice)
return longCall - s.Call()
}
// Short implements the Spread interface
func (s *BearCall) Short() float64 {
// This is not confusing at all...
s.SetStrikePrice(s.ShortCallPrice)
longCall := s.Call()
s.SetStrikePrice(s.LongCallPrice)
return longCall - s.Call()
}
// BullPut involves selling an ITM put and buying an OTM put.
// The is a credit spread, and is most effective when the stock exceeds the
// strike price of the ITM put and both options expire worthless. An example
// would be buying a Mar 130 Put and selling a Mar 100 Put with an underlying
// price of 115. Hopefully those options are pretty far out because the stock
// needs to go up 15% for you to maximize your profit.
type BullPut struct {
formula.Calculator
LongPutPrice float64
ShortPutPrice float64
}
func GetBullPut(c formula.Calculator) BullPut {
return BullPut{
Calculator: c,
}
}
// Long implements the Spread interface
func (s *BullPut) Long() float64 {
s.SetStrikePrice(s.LongPutPrice)
longPut := s.Put()
s.SetStrikePrice(s.ShortPutPrice)
return longPut - s.Put()
}
// Short implements the Spread interface
func (s *BullPut) Short() float64 {
// This is not confusing at all...
s.SetStrikePrice(s.ShortPutPrice)
longPut := s.Put()
s.SetStrikePrice(s.LongPutPrice)
return longPut - s.Put()
} | spread/bcbp.go | 0.669205 | 0.449513 | bcbp.go | starcoder |
package cal
import (
"math"
"time"
)
// AddUkraineHolidays adds all Ukraine holidays to the Calendar
func AddUkraineHolidays(c *Calendar) {
c.AddHoliday(uaHolidays()...)
}
type holidayRule struct {
name string
startYear int
endYear int
day int
month time.Month
isEasterDepend bool
easterOffsetDay int
}
func uaHolidays() []Holiday {
var uaHolidays []Holiday
// Holidays in Ukraine
// Reference https://en.wikipedia.org/wiki/Public_holidays_in_Ukraine
var holidayRules = []holidayRule{
{name: "New Year", month: time.January, day: 1},
{name: "Labour Day 1", month: time.May, day: 1},
{name: "Labour Day 2", endYear: 2018, month: time.May, day: 2},
{name: "Defender Of Ukraine day", startYear: 2015, month: time.October, day: 14},
{name: "Catholic Christmas day", startYear: 2017, month: time.December, day: 25},
{name: "Orthodox Christmas day", month: time.January, day: 7},
{name: "Women Day", month: time.March, day: 8},
{name: "Victory Day", month: time.May, day: 9},
{name: "Constitution Day", month: time.June, day: 28},
{name: "Independence Day", month: time.August, day: 24},
{name: "Orthodox Easter Day", isEasterDepend: true, easterOffsetDay: 0},
{name: "Orthodox Pentecost Day", isEasterDepend: true, easterOffsetDay: 49},
}
for _, rule := range holidayRules {
rule := rule
uaHolidays = append(uaHolidays, holidayByRule(&rule, false), holidayByRule(&rule, true))
}
return uaHolidays
}
func holidayByRule(h *holidayRule, isExtendHoliday bool) Holiday {
return NewHolidayFunc(func(year int, loc *time.Location) (time.Month, int) {
var d time.Time
if h.isEasterDepend {
easter := calculateJulianEaster(year, loc)
d = easter.AddDate(0, 0, h.easterOffsetDay)
} else {
startYear := h.startYear
endYear := h.endYear
month := h.month
day := h.day
if ((startYear == 0) || (startYear > 0 && year > startYear)) && ((endYear == 0) || (endYear > 0 && year < endYear)) {
d = time.Date(year, month, day, 0, 0, 0, 0, loc)
}
}
//Extended holiday on first working day for Saturday and Sunday
if isExtendHoliday {
switch int(d.Weekday()) {
case 6:
d = d.AddDate(0, 0, 2)
case 0:
d = d.AddDate(0, 0, 1)
}
}
return d.Month(), d.Day()
})
}
func calculateJulianEaster(year int, loc *time.Location) time.Time {
//based on the Meeus Julian algorithm
a := year % 4
b := year % 7
c := year % 19
d := (19*c + 15) % 30
e := (2*a + 4*b - d + 34) % 7
month := math.Floor(float64((d + e + 114) / 31))
day := ((d + e + 114) % 31) + 1 + 13
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, loc)
} | holiday_defs_ua.go | 0.635788 | 0.50177 | holiday_defs_ua.go | starcoder |
package qhull
import (
"fmt"
"log"
"math"
"github.com/celer/csg/csg"
)
const AUTOMATIC_TOLERANCE = 0.0
const DOUBLE_PREC = 2.2204460492503131e-16
//Hull creates a hull between two 3d meshes
type Hull struct {
findIndex int
charLength float64
Debug bool
points []*Vertex
vertexPointIndices []int
maxVertex [3]*Vertex
minVertex [3]*Vertex
discardedFaces [3]*Face
faces []*Face
horizon []*HalfEdge
claimed *VertexList
unclaimed *VertexList
newFaces *FaceList
numFaces int
numPoints int
numVertices int
explicitTolerance float64
tolerance float64
}
func (q *Hull) markFaceVertices(face *Face, mark int) {
he0 := face.edge
he := he0
for {
he.Head().index = mark
he = he.next
if he == he0 {
break
}
}
}
func (q *Hull) reindexFacesAndVertices() {
for i := 0; i < q.numPoints; i++ {
q.points[i].index = -1
}
// remove inactive faces and mark active vertices
q.numFaces = 0
for i := 0; i < len(q.faces); i++ {
face := q.faces[i]
if face.mark != VISIBLE {
q.faces = append(q.faces[:i], q.faces[i+1:]...)
i--
} else {
q.markFaceVertices(face, 0)
q.numFaces++
}
}
if q.Debug {
log.Printf("Reindexing faces/verts - faces left after removing inactive faces %d", len(q.faces))
}
// reindex vertices
q.numVertices = 0
for i := 0; i < q.numPoints; i++ {
vtx := q.points[i]
if vtx.index == 0 {
q.vertexPointIndices[q.numVertices] = i
vtx.index = q.numVertices
q.numVertices++
}
}
}
func (q *Hull) initBuffers(nump int) {
q.vertexPointIndices = make([]int, nump)
q.points = make([]*Vertex, nump)
q.faces = make([]*Face, 0)
q.horizon = make([]*HalfEdge, 0)
q.claimed = &VertexList{}
q.unclaimed = &VertexList{}
q.newFaces = &FaceList{}
q.numFaces = 0
q.numPoints = nump
}
//Build a hull given a set of vectors (as points)
func (q *Hull) Build(points []*csg.Vector, nump int) error {
if nump < 4 {
return fmt.Errorf("Less than four input points specified")
}
if len(points) < nump {
return fmt.Errorf("Point array too small for specified number of points")
}
q.initBuffers(nump)
q.setPoints(points, nump)
q.buildHull()
return nil
}
func (q *Hull) setPoints(points []*csg.Vector, nump int) {
for i := 0; i < nump; i++ {
q.points[i] = NewVertex(points[i], i)
}
}
func (q *Hull) computeMinAndMax() {
var max *csg.Vector
var min *csg.Vector
max = q.points[0].point.Clone()
min = q.points[0].point.Clone()
for i := 0; i < 3; i++ {
q.maxVertex[i] = q.points[0]
q.minVertex[i] = q.points[0]
}
for _, p := range q.points {
pnt := p.point
if pnt.X > max.X {
max.X = pnt.X
q.maxVertex[0] = p
} else if pnt.X < min.X {
min.X = pnt.X
q.minVertex[0] = p
}
if pnt.Y > max.Y {
max.Y = pnt.Y
q.maxVertex[1] = p
} else if pnt.Y < min.Y {
min.Y = pnt.Y
q.minVertex[1] = p
}
if pnt.Z > max.Z {
max.Z = pnt.Z
q.maxVertex[2] = p
} else if pnt.Z < min.Z {
min.Z = pnt.Z
q.minVertex[2] = p
}
}
if q.Debug {
log.Printf("Max %v", max)
log.Printf("Min %v", min)
}
cl := math.Max(max.X-min.X, max.Y-min.Y)
cl = math.Max(max.Z-min.Z, cl)
if q.explicitTolerance == AUTOMATIC_TOLERANCE {
q.tolerance = 3 * DOUBLE_PREC *
(math.Max(math.Abs(max.X), math.Abs(min.X)) +
math.Max(math.Abs(max.Y), math.Abs(min.Y)) +
math.Max(math.Abs(max.Z), math.Abs(min.Z)))
} else {
q.tolerance = q.explicitTolerance
}
if q.Debug {
log.Printf("Tolerance: %f", q.tolerance)
for i := 0; i < 3; i++ {
log.Printf("Max %d %v", i, q.maxVertex[i])
log.Printf("Min %d %v", i, q.minVertex[i])
}
}
}
func (q *Hull) createInitialSimplex() error {
max := 0.0
imax := 0
for i := 0; i < 3; i++ {
diff := q.maxVertex[i].point.Get(i) - q.minVertex[i].point.Get(i)
if diff > max {
max = diff
imax = i
}
}
if max <= q.tolerance {
return fmt.Errorf("Input points appear to e coincident")
}
vtx := make([]*Vertex, 4)
vtx[0] = q.maxVertex[imax]
vtx[1] = q.minVertex[imax]
var normal *csg.Vector
n := vtx[1].point.Minus(vtx[0].point).Normalize()
maxSqr := 0.0
for _, p := range q.points {
diff := p.point.Minus(vtx[0].point)
xprod := n.Cross(diff)
lenSqr := xprod.LengthSquared()
if lenSqr > maxSqr && p != vtx[0] && p != vtx[1] {
maxSqr = lenSqr
vtx[2] = p
normal = xprod.Clone()
}
}
if math.Sqrt(maxSqr) < 100*q.tolerance {
return fmt.Errorf("Input points appear to be colinear")
}
normal = normal.Normalize()
maxDist := 0.0
d0 := vtx[2].point.Dot(normal)
for _, p := range q.points {
dist := math.Abs(p.point.Dot(normal) - d0)
if dist > maxDist && p != vtx[0] && p != vtx[1] && p != vtx[2] {
maxDist = dist
vtx[3] = p
}
}
if math.Sqrt(maxDist) < 100*q.tolerance {
return fmt.Errorf("Input points appear to be coplanar")
}
if q.Debug {
log.Printf("Initial points")
for _, v := range vtx {
log.Printf("\t%v", v)
}
}
tris := make([]*Face, 4)
if vtx[3].point.Dot(normal)-d0 < 0 {
tris[0] = NewFaceFromTriangle(vtx[0], vtx[1], vtx[2])
tris[1] = NewFaceFromTriangle(vtx[3], vtx[1], vtx[0])
tris[2] = NewFaceFromTriangle(vtx[3], vtx[2], vtx[1])
tris[3] = NewFaceFromTriangle(vtx[3], vtx[0], vtx[2])
for i := 0; i < 3; i++ {
k := (i + 1) % 3
tris[i+1].GetEdge(1).SetOpposite(tris[k+1].GetEdge(0))
tris[i+1].GetEdge(2).SetOpposite(tris[0].GetEdge(k))
}
} else {
tris[0] = NewFaceFromTriangle(vtx[0], vtx[2], vtx[1])
tris[1] = NewFaceFromTriangle(vtx[3], vtx[0], vtx[1])
tris[2] = NewFaceFromTriangle(vtx[3], vtx[1], vtx[2])
tris[3] = NewFaceFromTriangle(vtx[3], vtx[2], vtx[0])
for i := 0; i < 3; i++ {
k := (i + 1) % 3
tris[i+1].GetEdge(0).SetOpposite(tris[k+1].GetEdge(1))
tris[i+1].GetEdge(2).SetOpposite(tris[0].GetEdge((3 - i) % 3))
}
}
q.faces = append(q.faces, tris...)
for _, v := range q.points {
if v == vtx[0] || v == vtx[1] || v == vtx[2] || v == vtx[3] {
continue
}
maxDist = q.tolerance
var maxFace *Face
for k := 0; k < 4; k++ {
dist := tris[k].DistanceToPlane(v.point)
if dist > maxDist {
maxFace = tris[k]
maxDist = dist
}
}
if maxFace != nil {
q.addPointToFace(v, maxFace)
}
}
return nil
}
func (q *Hull) addPointToFace(vtx *Vertex, face *Face) {
vtx.face = face
if face.outside == nil {
q.claimed.Add(vtx)
} else {
q.claimed.InsertBefore(vtx, face.outside)
}
face.outside = vtx
}
func (q *Hull) nextPointToAdd() *Vertex {
if !q.claimed.IsEmpty() {
eyeFace := q.claimed.First().face
var eyeVtx *Vertex
maxDist := 0.0
for vtx := eyeFace.outside; vtx != nil && vtx.face == eyeFace; vtx = vtx.next {
dist := eyeFace.DistanceToPlane(vtx.point)
if dist > maxDist {
maxDist = dist
eyeVtx = vtx
}
}
return eyeVtx
} else {
return nil
}
}
func (q *Hull) removePointFromFace(vtx *Vertex, face *Face) {
if vtx == face.outside {
if vtx.next != nil && vtx.next.face == face {
face.outside = vtx.next
} else {
face.outside = nil
}
}
q.claimed.Delete(vtx)
}
func (q *Hull) removeAllPointsFromFace(face *Face) *Vertex {
if face.outside != nil {
end := face.outside
for end.next != nil && end.next.face == face {
end = end.next
}
q.claimed.DeleteChain(face.outside, end)
end.next = nil
return face.outside
}
return nil
}
func (q *Hull) deleteFacePoints(face, absorbingFace *Face) {
faceVtxs := q.removeAllPointsFromFace(face)
if faceVtxs != nil {
if absorbingFace == nil {
q.unclaimed.AddAll(faceVtxs)
} else {
vtxNext := faceVtxs
for vtx := vtxNext; vtx != nil; vtx = vtxNext {
vtxNext = vtx.next
dist := absorbingFace.DistanceToPlane(vtx.point)
if dist > q.tolerance {
q.addPointToFace(vtx, absorbingFace)
} else {
q.unclaimed.Add(vtx)
}
}
}
}
}
func (q *Hull) calculateHorizon(eyePnt *csg.Vector, edge0 *HalfEdge, face *Face, horizon []*HalfEdge) {
q.deleteFacePoints(face, nil)
face.mark = DELETED
if q.Debug {
log.Printf(" visiting face %v", face)
}
var edge *HalfEdge
if edge0 == nil {
edge0 = face.GetEdge(0)
edge = edge0
} else {
edge = edge0.next
}
for {
oppFace := edge.OppositeFace()
if oppFace.mark == VISIBLE {
if oppFace.DistanceToPlane(eyePnt) > q.tolerance {
q.calculateHorizon(eyePnt, edge.Opposite(), oppFace, horizon)
} else {
q.horizon = append(q.horizon, edge)
if q.Debug {
log.Printf(" adding horizon edge %v", edge)
}
}
}
edge = edge.next
if edge == edge0 {
break
}
}
}
func (q *Hull) addAdjoiningFace(eyeVtx *Vertex, he *HalfEdge) *HalfEdge {
face := NewFaceFromTriangle(eyeVtx, he.Tail(), he.Head())
q.faces = append(q.faces, face)
face.GetEdge(-1).SetOpposite(he.Opposite())
return face.GetEdge(0)
}
func (q *Hull) addNewFaces(newFaces *FaceList, eyeVtx *Vertex, horizon []*HalfEdge) {
newFaces.Clear()
var hedgeSidePrev *HalfEdge
var hedgeSideBegin *HalfEdge
for _, horizonHe := range horizon {
{
hedgeSide := q.addAdjoiningFace(eyeVtx, horizonHe)
if q.Debug {
log.Printf("new face: %v", hedgeSide.Face)
}
if hedgeSidePrev != nil {
hedgeSide.next.SetOpposite(hedgeSidePrev)
} else {
hedgeSideBegin = hedgeSide
}
newFaces.Add(hedgeSide.Face)
hedgeSidePrev = hedgeSide
}
hedgeSideBegin.next.SetOpposite(hedgeSidePrev)
}
}
const NONCONVEX_WRT_LARGER_FACE = 1
const NONCONVEX = 2
func (q *Hull) oppFaceDistance(he *HalfEdge) float64 {
return he.Face.DistanceToPlane(he.opposite.Face.centroid)
}
func (q *Hull) doAdjacentMerge(face *Face, mergeType int) bool {
hedge := face.edge
convex := true
for {
oppFace := hedge.OppositeFace()
merge := false
var dist1 float64
if mergeType == NONCONVEX {
// then merge faces if they are definitively non-convex
if q.oppFaceDistance(hedge) > -q.tolerance ||
q.oppFaceDistance(hedge.opposite) > -q.tolerance {
merge = true
}
} else {
// merge faces if they are parallel or non-convex
// wrt to the larger face; otherwise, just mark
// the face non-convex for the second pass.
if face.area > oppFace.area {
dist1 = q.oppFaceDistance(hedge)
if dist1 > -q.tolerance {
merge = true
} else if q.oppFaceDistance(hedge.opposite) > -q.tolerance {
convex = false
}
} else {
if q.oppFaceDistance(hedge.opposite) > -q.tolerance {
merge = true
} else if q.oppFaceDistance(hedge) > -q.tolerance {
convex = false
}
}
}
if merge {
if q.Debug {
log.Printf(" merging %v and %v", face, oppFace)
}
numd := face.mergeAdjacentFace(hedge, q.discardedFaces[0:])
for i := 0; i < numd; i++ {
q.deleteFacePoints(q.discardedFaces[i], face)
}
if q.Debug {
log.Printf(" result %v", face)
}
return true
}
hedge = hedge.next
if hedge == face.edge {
break
}
}
if !convex {
face.mark = NON_CONVEX
}
return false
}
func (q *Hull) addPointToHull(eyeVtx *Vertex) {
q.horizon = make([]*HalfEdge, 0)
q.unclaimed.Clear()
if q.Debug {
log.Printf("Adding point: %v", eyeVtx)
log.Printf("which is %f above face %v", eyeVtx.face.DistanceToPlane(eyeVtx.point), eyeVtx.face)
}
q.removePointFromFace(eyeVtx, eyeVtx.face)
q.calculateHorizon(eyeVtx.point, nil, eyeVtx.face, q.horizon)
q.newFaces.Clear()
q.addNewFaces(q.newFaces, eyeVtx, q.horizon)
// first merge pass ... merge faces which are non-convex
// as determined by the larger face
if q.Debug {
log.Printf("First merge")
}
for face := q.newFaces.First(); face != nil; face = face.next {
if face.mark == VISIBLE {
for q.doAdjacentMerge(face, NONCONVEX_WRT_LARGER_FACE) {
}
}
}
// second merge pass ... merge faces which are non-convex
// wrt either face
if q.Debug {
log.Printf("Second merge")
}
for face := q.newFaces.First(); face != nil; face = face.next {
if face.mark == NON_CONVEX {
face.mark = VISIBLE
for q.doAdjacentMerge(face, NONCONVEX) {
}
}
}
q.resolveUnclaimedPoints(q.newFaces)
}
func (q *Hull) resolveUnclaimedPoints(newFaces *FaceList) {
vtxNext := q.unclaimed.First()
for vtx := vtxNext; vtx != nil; vtx = vtxNext {
vtxNext = vtx.next
maxDist := q.tolerance
var maxFace *Face
for newFace := newFaces.First(); newFace != nil; newFace = newFace.next {
if newFace.mark == VISIBLE {
dist := newFace.DistanceToPlane(vtx.point)
if dist > maxDist {
maxDist = dist
maxFace = newFace
}
if maxDist > 1000*q.tolerance {
break
}
}
}
if maxFace != nil {
q.addPointToFace(vtx, maxFace)
if q.Debug && vtx.index == q.findIndex {
log.Printf("%d CLAIMED BY %v", q.findIndex, maxFace)
}
} else {
if q.Debug && vtx.index == q.findIndex {
log.Printf("%d DISCARDED", q.findIndex)
}
}
}
}
//Vertices returns the vertices used to construct this hull
func (q *Hull) Vertices() []*csg.Vector {
ret := make([]*csg.Vector, q.numVertices)
for i := 0; i < q.numVertices; i++ {
ret[i] = q.points[q.vertexPointIndices[i]].point
}
return ret
}
//BuildFromCSG builds a hull from some number of CSGs
func (q *Hull) BuildFromCSG(csgs []*csg.CSG) error {
points := make([]*csg.Vector, 0)
for _, c := range csgs {
for _, p := range c.ToPolygons() {
for _, v := range p.Vertices {
points = append(points, v.Position)
}
}
}
return q.Build(points, len(points))
}
// ToCSG converts this hull into a CSG object
func (q *Hull) ToCSG() *csg.CSG {
polys := make([]*csg.Polygon, len(q.faces))
for i, face := range q.faces {
polys[i] = face.ToPolygon()
}
return csg.NewCSGFromPolygons(polys)
}
//Faces returns the faces which constitude this hull
func (q *Hull) Faces() [][]int {
indexFlags := 0
allFaces := make([][]int, len(q.faces))
k := 0
for _, face := range q.faces {
allFaces[k] = q.getFaceIndices(face, indexFlags)
k++
}
return allFaces
}
const CLOCKWISE = 0x1
const INDEXED_FROM_ONE = 0x2
const INDEXED_FROM_ZERO = 0x4
const POINT_RELATIVE = 0x8
func (q *Hull) getFaceIndices(face *Face, flags int) []int {
ccw := ((flags & CLOCKWISE) == 0)
indexedFromOne := ((flags & INDEXED_FROM_ONE) != 0)
pointRelative := ((flags & POINT_RELATIVE) != 0)
indices := make([]int, face.numVerts)
hedge := face.edge
k := 0
for {
idx := hedge.Head().index
if pointRelative {
idx = q.vertexPointIndices[idx]
}
if indexedFromOne {
idx++
}
indices[k] = idx
k++
if ccw {
hedge = hedge.next
} else {
hedge = hedge.prev
}
if hedge == face.edge {
break
}
}
return indices
}
func (q *Hull) buildHull() error {
cnt := 0
eyeVtx := &Vertex{}
q.computeMinAndMax()
err := q.createInitialSimplex()
if err != nil {
return err
}
for {
eyeVtx = q.nextPointToAdd()
if eyeVtx == nil {
break
}
q.addPointToHull(eyeVtx)
cnt++
if q.Debug {
log.Printf("iteration %d done", cnt)
}
}
q.reindexFacesAndVertices()
if q.Debug {
log.Printf("hull done")
}
return nil
} | qhull/hull.go | 0.566978 | 0.483892 | hull.go | starcoder |
package backup
import (
"sync"
"github.com/zero-os/0-Disk"
"github.com/zero-os/0-Disk/errors"
)
// unpackRawDedupedMap allows you to unpack a raw deduped map
// and start using it as an actual dedupedMap.
// If the count of the given raw deduped map is `0`, a new dedupedMap is created instead.
// NOTE: the slice (hashes) data will be shared amongst the raw and real deduped map,
// so ensure that this is OK.
func unpackRawDedupedMap(raw RawDedupedMap) (*dedupedMap, error) {
if raw.Count == 0 {
return newDedupedMap(), nil
}
err := raw.Validate()
if err != nil {
return nil, err
}
hashes := make(map[int64]zerodisk.Hash, raw.Count)
for i := int64(0); i < raw.Count; i++ {
hashes[raw.Indices[i]] = zerodisk.Hash(raw.Hashes[i])
}
return &dedupedMap{hashes: hashes}, nil
}
// newDedupedMap creates a new deduped map,
// which contains all the metadata stored for a(n) (exported) backup.
// See `dedupedMap` for more information.
func newDedupedMap() *dedupedMap {
return &dedupedMap{
hashes: make(map[int64]zerodisk.Hash),
}
}
// dedupedMap contains all hashes for a vdisk's backup,
// where each hash is mapped to its (export) block index.
type dedupedMap struct {
hashes map[int64]zerodisk.Hash
mux sync.Mutex
}
// SetHash sets the given hash, mapped to the given (export block) index.
// If there is already a hash mapped to the given (export block) index,
// and the hash equals the given hash, the given hash won't be used and `false` wil be returned.
// Otherwise the given hash is mapped to the given index and `true`` will be returned.
func (dm *dedupedMap) SetHash(index int64, hash zerodisk.Hash) bool {
dm.mux.Lock()
defer dm.mux.Unlock()
if h, found := dm.hashes[index]; found && h.Equals(hash) {
return false
}
dm.hashes[index] = hash
return true
}
// GetHash returns the hash which is mapped to the given (export block) index.
// `false` is returned in case no hash is mapped to the given (export block) index.
func (dm *dedupedMap) GetHash(index int64) (zerodisk.Hash, bool) {
dm.mux.Lock()
defer dm.mux.Unlock()
hash, found := dm.hashes[index]
return hash, found
}
// Raw returns this dedupedMap as a RawDedupedMap.
// NOTE: the hash data is shared with the hashes stored in this DedupedMap,
// so ensure that this functional is called in complete isolation
func (dm *dedupedMap) Raw() (*RawDedupedMap, error) {
dm.mux.Lock()
defer dm.mux.Unlock()
hashCount := len(dm.hashes)
if hashCount == 0 {
return nil, errors.New("deduped map is empty")
}
raw := new(RawDedupedMap)
raw.Count = int64(hashCount)
raw.Indices = make([]int64, hashCount)
raw.Hashes = make([][]byte, hashCount)
var i int
for index, hash := range dm.hashes {
raw.Indices[i] = index
raw.Hashes[i] = hash.Bytes()
i++
}
return raw, nil
} | nbd/ardb/backup/deduped_map.go | 0.757077 | 0.475544 | deduped_map.go | starcoder |
package gmap
import (
"strconv"
)
// Helper function to convert an interface{} to string
func interfaceToString(v interface{}, def string) (string, error) {
switch v.(type) {
case string:
return v.(string), nil
case bool:
return strconv.FormatBool(v.(bool)), nil
case float64:
return strconv.FormatFloat(v.(float64), 'f', -1, 64), nil
case int:
return strconv.Itoa(v.(int)), nil
case int8:
return strconv.FormatInt(int64(v.(int8)), 10), nil
case int16:
return strconv.FormatInt(int64(v.(int16)), 10), nil
case int32:
return strconv.FormatInt(int64(v.(int32)), 10), nil
case int64:
return strconv.FormatInt(v.(int64), 10), nil
case uint:
return strconv.FormatUint(uint64(v.(uint)), 10), nil
case uint8:
return strconv.FormatUint(uint64(v.(uint8)), 10), nil
case uint16:
return strconv.FormatUint(uint64(v.(uint16)), 10), nil
case uint32:
return strconv.FormatUint(uint64(v.(uint32)), 10), nil
case uint64:
return strconv.FormatUint(v.(uint64), 10), nil
default:
return def, ErrTypeMismatch
}
}
// Helper function to convert an interface{} to int
func interfaceToInt(v interface{}, def int) (int, error) {
switch v.(type) {
case int:
return v.(int), nil
case int8:
return int(v.(int8)), nil
case int16:
return int(v.(int16)), nil
case int32:
return int(v.(int32)), nil
case int64:
return int(v.(int64)), nil
case uint:
return int(v.(uint)), nil
case uint8:
return int(v.(uint8)), nil
case uint16:
return int(v.(uint16)), nil
case uint32:
return int(v.(uint32)), nil
case uint64:
return int(v.(uint64)), nil
case float32:
return int(v.(float32)), nil
case float64:
return int(v.(float64)), nil
case string:
return strconv.Atoi(v.(string))
case bool:
i := 0
if v.(bool) {
i = 1
}
return i, nil
default:
return def, ErrTypeMismatch
}
}
// Helper function to convert an interface{} to float64
func interfaceToFloat64(v interface{}, def float64) (float64, error) {
switch v.(type) {
case uint:
return float64(v.(uint)), nil
case uint8:
return float64(v.(uint8)), nil
case uint16:
return float64(v.(uint16)), nil
case uint32:
return float64(v.(uint32)), nil
case uint64:
return float64(v.(uint64)), nil
case int:
return float64(v.(int)), nil
case int8:
return float64(v.(int8)), nil
case int16:
return float64(v.(int16)), nil
case int32:
return float64(v.(int32)), nil
case int64:
return float64(v.(int64)), nil
case float32:
return float64(v.(float32)), nil
case float64:
return v.(float64), nil
case bool:
f := 0.0
if v.(bool) {
f = 1.0
}
return f, nil
case string:
return strconv.ParseFloat(v.(string), 64)
default:
return def, ErrTypeMismatch
}
} | helpers.go | 0.518546 | 0.478346 | helpers.go | starcoder |
package judgment
// Some code below is taken directly from colorful's doc examples.
// https://github.com/lucasb-eyer/go-colorful/blob/master/doc/gradientgen/gradientgen.go
// May be useful later:
// c, err := colorful.Hex(s)
import (
"errors"
"fmt"
"github.com/lucasb-eyer/go-colorful"
"image/color"
)
// CreateDefaultPalette returns a Palette of amountOfColors colors.
// 7 colors we use, red to green:
// "#df3222", "#ed6f01", "#fab001", "#c5d300", "#7bbd3e", "#00a249", "#017a36"
// When requiring more than 7, we interpolate in HSV space.
// This tries to be fault-tolerant, and returns an empty palette upon trouble.
func CreateDefaultPalette(amountOfColors int) color.Palette {
const Color0 = 0xdf3222
const Color1 = 0xed6f01
const Color2 = 0xfab001
const Color3 = 0xc5d300
const Color4 = 0x7bbd3e
const Color5 = 0x00a249
const Color6 = 0x017a36
color0 := hexToRGB(Color0)
color1 := hexToRGB(Color1)
color2 := hexToRGB(Color2)
color3 := hexToRGB(Color3)
color4 := hexToRGB(Color4)
color5 := hexToRGB(Color5)
color6 := hexToRGB(Color6)
if amountOfColors < 0 {
amountOfColors = amountOfColors * -1
}
switch amountOfColors {
case 0:
return []color.Color{}
case 1:
return []color.Color{
color5,
}
case 2:
return []color.Color{
color0,
color5,
}
case 3:
return []color.Color{
color0,
color2,
color5,
}
case 4:
return []color.Color{
color0,
color2,
color4,
color6,
}
case 5:
return []color.Color{
color0,
color1,
color2,
color4,
color5,
}
case 6:
return []color.Color{
color0,
color1,
color2,
color4,
color5,
color6,
}
case 7:
return []color.Color{
color0,
color1,
color2,
color3,
color4,
color5,
color6,
}
default:
palette, err := bakePalette(amountOfColors, []color.Color{
color0,
color1,
color2,
color3,
color4,
color5,
color6,
})
if err != nil {
//panic("CreateDefaultPalette: failed to bake: "+err.Error())
return []color.Color{}
}
return palette
}
}
// DumpPaletteHexString dumps the provided palette as a string
// Looks like: "#df3222", "#ed6f01", "#fab001", "#c5d300", "#7bbd3e", "#00a249", "#017a36"
func DumpPaletteHexString(palette color.Palette, separator string, quote string) string {
out := ""
for colorIndex, colorRgba := range palette {
if colorIndex > 0 {
out += separator
}
out += quote
out += DumpColorHexString(colorRgba, "#", false)
out += quote
}
return out
}
// DumpColorHexString outputs strings like #ff3399 or #ff3399ff with alpha
// Be mindful that PRECISION IS LOST because hex format has less bits
func DumpColorHexString(c color.Color, prefix string, withAlpha bool) string {
out := prefix
r, g, b, a := c.RGBA()
out += fmt.Sprintf("%02x", r>>8)
out += fmt.Sprintf("%02x", g>>8)
out += fmt.Sprintf("%02x", b>>8)
if withAlpha {
out += fmt.Sprintf("%02x", a>>8)
}
return out
}
// there probably is a colorful way to do this
func hexToRGB(hexColor int) color.Color {
rgba := color.RGBA{
R: uint8((hexColor & 0xff0000) >> 16),
G: uint8((hexColor & 0x00ff00) >> 8),
B: uint8((hexColor & 0x0000ff) >> 0),
A: 0xff,
}
c, success := colorful.MakeColor(rgba)
if !success {
panic("hexToRgb")
}
return c
}
// This table contains the "key" colors of the color gradient we want to generate.
// The position of each key has to live in the range [0,1]
type keyColor struct {
Color colorful.Color
Position float64
}
type gradientTable []keyColor
// This is the meat of the gradient computation. It returns a HCL-blend between
// the two colors around `t`.
// Note: It relies heavily on the fact that the gradient keypoints are sorted.
func (gt gradientTable) getInterpolatedColorFor(t float64) colorful.Color {
for i := 0; i < len(gt)-1; i++ {
c1 := gt[i]
c2 := gt[i+1]
if c1.Position <= t && t <= c2.Position {
// We are in between c1 and c2. Go blend them!
t := (t - c1.Position) / (c2.Position - c1.Position)
return c1.Color.BlendHcl(c2.Color, t).Clamped()
}
}
// Nothing found? Means we're at (or past) the last gradient keypoint.
return gt[len(gt)-1].Color
}
func bakePalette(toLength int, keyColors color.Palette) (color.Palette, error) {
if toLength < 2 {
return nil, errors.New("bakePalette: the length of the palette must be > 1")
}
keyPoints := gradientTable{}
paletteLen := len(keyColors)
for colorIndex, colorObject := range keyColors {
colorfulColor, success := colorful.MakeColor(colorObject)
if !success {
panic("Bad palette color: alpha channel is probably 0.")
}
keyPoints = append(keyPoints, keyColor{
Color: colorfulColor,
Position: float64(colorIndex) / (float64(paletteLen) - 1.0),
})
}
outPalette := make([]color.Color, 0, 7)
for i := 0; i < toLength; i++ {
c := keyPoints.getInterpolatedColorFor(float64(i) / (float64(toLength) - 1))
outPalette = append(outPalette, c)
}
return outPalette, nil
} | judgment/colors.go | 0.8618 | 0.455622 | colors.go | starcoder |
package utils
import (
"encoding/binary"
"math"
)
// BytesToUint64 takes a slice of 8 bytes and returns an uint64 value
// NOTE: bytes must be 8 length here, or else it will panic. it should just return an error value
func BytesToUint64(bytes []uint8) (v uint64) {
if len(bytes) != 8 {
panic("Invalid bytes array length sent to BytesToUint64")
}
for i, b := range bytes {
v += uint64(b) << uint(7-i)
}
return
}
// Int32ToBytes converts a int32 value into a slice of its 4 bytes
func Int32ToBytes(value int32) (bytes []byte) {
bytes = make([]byte, 4)
bytes[0] = byte((value >> 24) & 0xFF)
bytes[1] = byte((value >> 16) & 0xFF)
bytes[2] = byte((value >> 8) & 0xFF)
bytes[3] = byte((value) & 0xFF)
return bytes
}
// Uint32ToBytes converts a uint32 value into a slice of its 4 bytes
func Uint32ToBytes(value uint32) (bytes []byte) {
bytes = make([]byte, 4)
bytes[0] = byte((value >> 24) & 0xFF)
bytes[1] = byte((value >> 16) & 0xFF)
bytes[2] = byte((value >> 8) & 0xFF)
bytes[3] = byte((value) & 0xFF)
return bytes
}
// Uint64ToBytes converts a uint64 value into a slice of its 8 bytes
func Uint64ToBytes(value uint64) (bytes []byte) {
bytes = make([]byte, 8)
bytes[0] = byte((value >> 56) & 0xFF)
bytes[1] = byte((value >> 48) & 0xFF)
bytes[2] = byte((value >> 40) & 0xFF)
bytes[3] = byte((value >> 32) & 0xFF)
bytes[4] = byte((value >> 24) & 0xFF)
bytes[5] = byte((value >> 16) & 0xFF)
bytes[6] = byte((value >> 8) & 0xFF)
bytes[7] = byte((value) & 0xFF)
return bytes
}
// SplitUint64 spits a uint64 value into two uint32 values
func SplitUint64(val uint64) (p1, p2 uint32) {
return uint32(val >> 32), uint32(val)
}
// CombineUint32 returns the packed uint64 value of two uint32's
func CombineUint32(val1, val2 uint32) uint64 {
r := uint64(val1)
r <<= 32
r += uint64(val2)
return r
}
// BytesToFloat32 takes a slice of 4 bytes and converts it to a float32 value
func BytesToFloat32(bytes []uint8) float32 {
if len(bytes) != 4 {
panic("Invalid bytes array length sent to BytesToFloat32")
}
bits := binary.LittleEndian.Uint32(bytes)
v := math.Float32frombits(bits)
return v
} | binary.go | 0.780913 | 0.492798 | binary.go | starcoder |
package main
import (
"fmt"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat"
)
type SummaryDoc struct {
// Roots is the set of roots in the Gene Ontology.
Roots []string
// Summaries contains the summaries of a smeargol
// analysis.
Summaries [][]*Summary
}
type Summary struct {
// Name is the name of the sample.
Name string
// Root is the root GO term for the summary.
Root string
// Depth is the distance from the root.
Depth int
// Rows and Cols are the dimensions of the matrix
// describing the GO level. Rows corresponds to the
// number of genes and Cols corresponds to the number
// of GO terms in the level.
Rows, Cols int
// OptimalRank and FractionalRank are the calculated
// ranks of the summary matrix. OptimalRank is
// calculated according to the method of <NAME>
// and <NAME> https://arxiv.org/abs/1305.5870.
// FractionalRank is the rank calculated using the
// user-provided fraction parameters.
OptimalRank, FractionalRank int
// Sigma is the complete set of singular values.
Sigma []float64
}
// https://arxiv.org/abs/1305.5870
func optimalTruncation(path string, m *mat.Dense, cut, frac float64) (*Summary, error) {
var svd mat.SVD
ok := svd.Factorize(m, mat.SVDThin)
if !ok {
return nil, fmt.Errorf("could not factorise %q", path)
}
sigma := svd.Values(nil)
sum := make([]float64, len(sigma))
floats.CumSum(sum, sigma)
var rFrac int
var f float64
max := sum[len(sum)-1]
if max != 0 {
floats.Scale(1/max, sum)
rFrac = idxAbove(frac, sum)
switch {
case rFrac < len(sigma):
f = sigma[rFrac]
case len(sigma) != 0:
f = sigma[0]
}
}
sigmaCut := sigma[:idxBelow(cut, sigma)]
rows, cols := m.Dims()
t := tau(rows, cols, sigmaCut)
rOpt := idxBelow(t, sigmaCut)
err := plotValues(path, sigmaCut, t, f, rOpt, rFrac)
return &Summary{Rows: rows, Cols: cols, OptimalRank: rOpt, FractionalRank: rFrac, Sigma: sigma}, err
}
func idxAbove(thresh float64, s []float64) int {
for i, v := range s {
if v > thresh {
return i
}
}
return len(s)
}
func idxBelow(thresh float64, s []float64) int {
for i, v := range s {
if v < thresh {
return i
}
}
return len(s)
}
// https://arxiv.org/abs/1305.5870 Eq. 4.
func tau(rows, cols int, values []float64) float64 {
if len(values) == 0 {
return 0
}
reverseFloats(values)
m := stat.Quantile(0.5, 1, values, nil)
reverseFloats(values)
return omega(rows, cols) * m
}
func reverseFloats(f []float64) {
for i, j := 0, len(f)-1; i < j; i, j = i+1, j-1 {
f[i], f[j] = f[j], f[i]
}
}
// https://arxiv.org/abs/1305.5870 Eq. 5.
func omega(rows, cols int) float64 {
beta := float64(rows) / float64(cols)
beta2 := beta * beta
return 0.56*beta2*beta - 0.95*beta2 + 1.82*beta + 1.43
} | cmd/smeargol/optimal_truncation.go | 0.784278 | 0.512632 | optimal_truncation.go | starcoder |
package bulletproof
import (
"github.com/pkg/errors"
"github.com/coinbase/kryptology/pkg/core/curves"
)
// innerProduct takes two lists of scalars (a, b) and performs the dot product returning a single scalar
func innerProduct(a, b []curves.Scalar) (curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of scalar vectors must be the same")
}
if len(a) < 1 {
return nil, errors.New("length of vectors must be at least one")
}
// Get a new scalar of value zero of the same curve as input arguments
innerProduct := a[0].Zero()
for i, aElem := range a {
bElem := b[i]
// innerProduct = aElem*bElem + innerProduct
innerProduct = aElem.MulAdd(bElem, innerProduct)
}
return innerProduct, nil
}
// splitPointVector takes a vector of points, splits it in half returning each half
func splitPointVector(points []curves.Point) ([]curves.Point, []curves.Point, error) {
if len(points) < 1 {
return nil, nil, errors.New("length of points must be at least one")
}
if len(points)&0x01 != 0 {
return nil, nil, errors.New("length of points must be even")
}
nPrime := len(points) >> 1
firstHalf := points[:nPrime]
secondHalf := points[nPrime:]
return firstHalf, secondHalf, nil
}
// splitScalarVector takes a vector of scalars, splits it in half returning each half
func splitScalarVector(scalars []curves.Scalar) ([]curves.Scalar, []curves.Scalar, error) {
if len(scalars) < 1 {
return nil, nil, errors.New("length of scalars must be at least one")
}
if len(scalars)&0x01 != 0 {
return nil, nil, errors.New("length of scalars must be even")
}
nPrime := len(scalars) >> 1
firstHalf := scalars[:nPrime]
secondHalf := scalars[nPrime:]
return firstHalf, secondHalf, nil
}
// multiplyScalarToPointVector takes a single scalar and a list of points, multiplies each point by scalar
func multiplyScalarToPointVector(x curves.Scalar, g []curves.Point) []curves.Point {
products := make([]curves.Point, len(g))
for i, gElem := range g {
product := gElem.Mul(x)
products[i] = product
}
return products
}
// multiplyScalarToScalarVector takes a single scalar (x) and a list of scalars (a), multiplies each scalar in the vector by the scalar
func multiplyScalarToScalarVector(x curves.Scalar, a []curves.Scalar) []curves.Scalar {
products := make([]curves.Scalar, len(a))
for i, aElem := range a {
product := aElem.Mul(x)
products[i] = product
}
return products
}
// multiplyPairwisePointVectors takes two lists of points (g, h) and performs a pairwise multiplication returning a list of points
func multiplyPairwisePointVectors(g, h []curves.Point) ([]curves.Point, error) {
if len(g) != len(h) {
return nil, errors.New("length of point vectors must be the same")
}
product := make([]curves.Point, len(g))
for i, gElem := range g {
product[i] = gElem.Add(h[i])
}
return product, nil
}
// multiplyPairwiseScalarVectors takes two lists of points (a, b) and performs a pairwise multiplication returning a list of scalars
func multiplyPairwiseScalarVectors(a, b []curves.Scalar) ([]curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of point vectors must be the same")
}
product := make([]curves.Scalar, len(a))
for i, aElem := range a {
product[i] = aElem.Mul(b[i])
}
return product, nil
}
// addPairwiseScalarVectors takes two lists of scalars (a, b) and performs a pairwise addition returning a list of scalars
func addPairwiseScalarVectors(a, b []curves.Scalar) ([]curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of scalar vectors must be the same")
}
sum := make([]curves.Scalar, len(a))
for i, aElem := range a {
sum[i] = aElem.Add(b[i])
}
return sum, nil
}
// subtractPairwiseScalarVectors takes two lists of scalars (a, b) and performs a pairwise subtraction returning a list of scalars
func subtractPairwiseScalarVectors(a, b []curves.Scalar) ([]curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of scalar vectors must be the same")
}
diff := make([]curves.Scalar, len(a))
for i, aElem := range a {
diff[i] = aElem.Sub(b[i])
}
return diff, nil
}
// invertScalars takes a list of scalars then returns a list with each element inverted
func invertScalars(xs []curves.Scalar) ([]curves.Scalar, error) {
xinvs := make([]curves.Scalar, len(xs))
for i, x := range xs {
xinv, err := x.Invert()
if err != nil {
return nil, errors.Wrap(err, "bulletproof helpers invertx")
}
xinvs[i] = xinv
}
return xinvs, nil
}
// isPowerOfTwo returns whether a number i is a power of two or not
func isPowerOfTwo(i int) bool {
return i&(i-1) == 0
}
// get2nVector returns a scalar vector 2^n such that [1, 2, 4, ... 2^(n-1)]
// See k^n and 2^n definitions on pg 12 of https://eprint.iacr.org/2017/1066.pdf
func get2nVector(len int, curve curves.Curve) []curves.Scalar {
vector2n := make([]curves.Scalar, len)
vector2n[0] = curve.Scalar.One()
vector2n[1] = vector2n[0].Double()
for i := 2; i < len; i++ {
vector2n[i] = vector2n[i-1].Double()
}
return vector2n
}
func get1nVector(len int, curve curves.Curve) []curves.Scalar {
vector1n := make([]curves.Scalar, len)
for i := 0; i < len; i++ {
vector1n[i] = curve.Scalar.One()
}
return vector1n
}
func getknVector(k curves.Scalar, len int, curve curves.Curve) []curves.Scalar {
vectorkn := make([]curves.Scalar, len)
vectorkn[0] = curve.Scalar.One()
vectorkn[1] = k
for i := 2; i < len; i++ {
vectorkn[i] = vectorkn[i-1].Mul(k)
}
return vectorkn
} | pkg/bulletproof/helpers.go | 0.782122 | 0.527621 | helpers.go | starcoder |
package raytracer
import (
"errors"
)
type Matrix struct {
Data [][]float64
Dim int
}
func NewMatrix(dim int, vals [][]float64) *Matrix {
m := new(Matrix)
m.Dim = dim
m.Data = make([][]float64, dim)
for i := range m.Data {
m.Data[i] = make([]float64, dim)
}
copy(m.Data, vals)
return m
}
func ZeroMatrix(dim int) *Matrix {
m := new(Matrix)
m.Dim = dim
m.Data = make([][]float64, dim)
for i := range m.Data {
m.Data[i] = make([]float64, dim)
}
return m
}
func EyeMatrix(dim int) *Matrix {
m := ZeroMatrix(dim)
for i := 0; i < dim; i++ {
m.Data[i][i] = 1
}
return m
}
func (m *Matrix) Equal(m2 *Matrix) bool {
if m.Dim != m2.Dim || m.Dim == 0 || m2.Dim == 0 {
return false
}
if len(m.Data) != len(m2.Data) {
return false
}
if (m.Data == nil) || (m2.Data == nil) {
return false
}
for i, v := range m.Data {
for j, w := range v {
if !AlmostEqual(w, m2.Data[i][j], Eps) {
return false
}
}
}
return true
}
func (m *Matrix) Multiply(m1 *Matrix) *Matrix {
return Multiply(m, m1)
}
func Multiply(m1, m2 *Matrix) *Matrix {
m := ZeroMatrix(4)
dim := m1.Dim
for i := 0; i < dim; i++ {
for j := 0; j < dim; j++ {
calVal := func() float64 {
var ret float64
for d := 0; d < dim; d++ {
ret += m1.Data[i][d] * m2.Data[d][j]
}
return ret
}
m.Data[i][j] = calVal()
}
}
return m
}
func MultiplyTuple(m *Matrix, t *Tuple) *Tuple {
res := &Tuple{}
dim := m.Dim
getTupleVal := func(key int) float64 {
var ret float64
switch key {
case 0:
ret = t.X
case 1:
ret = t.Y
case 2:
ret = t.Z
case 3:
ret = t.W
}
return ret
}
setTupleVal := func(key int, val float64) {
switch key {
case 0:
res.X = val
case 1:
res.Y = val
case 2:
res.Z = val
case 3:
res.W = val
}
}
for i := 0; i < dim; i++ {
var tmpSum float64
for j := 0; j < dim; j++ {
tj := getTupleVal(j)
tmpSum += m.Data[i][j] * tj
}
setTupleVal(i, tmpSum)
}
return res
}
func Transpose(m *Matrix) *Matrix {
ret := ZeroMatrix(m.Dim)
for i := 0; i < m.Dim; i++ {
for j := 0; j < m.Dim; j++ {
ret.Data[j][i] = m.Data[i][j]
}
}
return ret
}
func Determinant(m *Matrix) float64 {
dim := m.Dim
var det float64
if dim == 2 {
det = m.Data[0][0]*m.Data[1][1] - m.Data[0][1]*m.Data[1][0]
} else {
for col := 0; col < dim; col++ {
det += m.Data[0][col] * Cofactor(m, 0, col)
}
}
return det
}
func Submatrix(m *Matrix, row, col int) *Matrix {
subDim := m.Dim - 1
ret := ZeroMatrix(subDim)
var k int
for i := 0; i < m.Dim; i++ {
if i == row {
continue
}
for j := 0; j < m.Dim; j++ {
if j == col {
continue
}
ret.Data[k/subDim][k%subDim] = m.Data[i][j]
k++
}
}
return ret
}
func Minor(m *Matrix, row, col int) float64 {
b := Submatrix(m, row, col)
return Determinant(b)
}
func Cofactor(m *Matrix, row, col int) float64 {
val := Minor(m, row, col)
if (row+col)%2 == 1 {
val = -val
}
return val
}
func Inverse(m *Matrix) (ret *Matrix, err error) {
if Determinant(m) == 0 {
return ret, errors.New("the matrix is not invertible")
}
ret = ZeroMatrix(m.Dim)
det := Determinant(m)
for row := 0; row < m.Dim; row++ {
for col := 0; col < m.Dim; col++ {
c := Cofactor(m, row, col)
ret.Data[col][row] = c / det
}
}
return
} | raytracer/matrices.go | 0.588534 | 0.464537 | matrices.go | starcoder |
package _752_Open_the_Lock
/*https://leetcode.com/problems/open-the-lock/
You have a lock in front of you with 4 circular wheels. Each wheel has 10 slots: '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'. The wheels can rotate freely and wrap around: for example we can turn '9' to be '0', or '0' to be '9'. Each move consists of turning one wheel one slot.
The lock initially starts at '0000', a string representing the state of the 4 wheels.
You are given a list of deadends dead ends, meaning if the lock displays any of these codes, the wheels of the lock will stop turning and you will be unable to open it.
Given a target representing the value of the wheels that will unlock the lock, return the minimum total number of turns required to open the lock, or -1 if it is impossible.
Example 1:
Input: deadends = ["0201","0101","0102","1212","2002"], target = "0202"
Output: 6
Explanation:
A sequence of valid moves would be "0000" -> "1000" -> "1100" -> "1200" -> "1201" -> "1202" -> "0202".
Note that a sequence like "0000" -> "0001" -> "0002" -> "0102" -> "0202" would be invalid,
because the wheels of the lock become stuck after the display becomes the dead end "0102".
Example 2:
Input: deadends = ["8888"], target = "0009"
Output: 1
Explanation:
We can turn the last wheel in reverse to move from "0000" -> "0009".
Example 3:
Input: deadends = ["8887","8889","8878","8898","8788","8988","7888","9888"], target = "8888"
Output: -1
Explanation:
We can't reach the target without getting stuck.
Example 4:
Input: deadends = ["0000"], target = "8888"
Output: -1
Note:
The length of deadends will be in the range [1, 500].
target will not be in the list deadends.
Every string in deadends and the string target will be a string of 4 digits from the 10,000 possibilities '0000' to '9999'.
*/
func change(e string, char int, up bool) string {
// 0 -> 48
// 9 -> 57
modch := func(b byte) byte {
if up {
return 48 + (b-48+1)%10
}
if b == 48 {
return 57
}
return b-1
}
switch char {
case 0:
return string([]byte{modch(e[0]), e[1], e[2], e[3]})
case 1:
return string([]byte{e[0], modch(e[1]), e[2], e[3]})
case 2:
return string([]byte{e[0], e[1], modch(e[2]), e[3]})
case 3:
return string([]byte{e[0], e[1], e[2], modch(e[3])})
}
return ""
}
func openLock(deadends []string, target string) int {
var queue []string
enqueue := func(e string) {
queue = append(queue, e)
}
dequeue := func() string {
if len(queue) == 0 {
return ""
}
e := queue[0]
queue = queue[1:]
return e
}
de := map[string]bool{}
for _, d := range deadends {
de[d] = true
}
used := map[string]bool{}
level := 0
enqueue("0000")
for len(queue) > 0 {
size := len(queue)
for size > 0 {
e := dequeue()
if e == target {
return level
}
if de[e] || used[e] {
size--
continue
}
used[e] = true
for i := 0; i < 4; i++ {
up := change(e, i, true)
if !used[up] {
enqueue(up)
}
down := change(e, i, false)
if !used[down] {
enqueue(down)
}
}
size--
}
//fmt.Printf("queue: %+v \n", queue)
level++
}
return -1
} | 752_Open_the_Lock/solution.go | 0.840717 | 0.528959 | solution.go | starcoder |
package lexer
const (
// White space is used to improve legibility of source text and act as separation between tokens, and any amount of white space may appear before or after any token. White space between tokens is not significant to the semantic meaning of a GraphQL Document, however white space characters may appear within a String or Comment token
runeHorizontalTab = '\u0009'
runeSpace = '\u0020'
// Like white space, line terminators are used to improve the legibility of source text, any amount may appear before or after any other token and have no significance to the semantic meaning of a GraphQL Document. Line terminators are not found within any other token
runeNewLine = '\u000A'
runeCarriageReturn = '\u000D'
// The “Byte Order Mark” is a special Unicode character which may appear at the beginning of a file containing Unicode which programs may use to determine the fact that the text stream is Unicode, what endianness the text stream is in, and which of several Unicode encodings to interpret
runeUnicodeBOM = '\uFEFF'
// Similar to white space and line terminators, commas (,) are used to improve the legibility of source text and separate lexical tokens but are otherwise syntactically and semantically insignificant within GraphQL Documents
runeComma = ','
// Punctations
runeExclamationMark = '!'
runeDollar = '$'
runeLeftParentheses = '('
runeRightParentheses = ')'
runeLeftBrace = '{'
runeRightBrace = '}'
runeLeftBracket = '['
runeRightBracket = ']'
runeColon = ':'
runeEqual = '='
runeAt = '@'
runeVerticalBar = '|'
runeDot = '.'
/* String values are single line "<value>" or multiline
"""
<value line 1>
<value line 2>
"""*/
runeQuotation = '"'
// Hashtag starts a new comment
runeHashtag = '#'
// EOF
runeEOF = 0
// AND rune
runeAND = '&'
// backSlask \ to escape things
runeBackSlash = '\\'
// unicode u
runeU = 'u'
// negative sign for numeric values
runeNegativeSign = '-'
// plus sign
runePlusSign = '+'
)
func isSourceCharacter(r rune) bool {
return r == '\u0009' ||
r == '\u000A' ||
r == '\u000D' ||
('\u0020' <= r && r <= '\uFFFF')
}
func isLineTerminator(r rune) bool {
return r == runeNewLine || r == runeCarriageReturn
}
func isWhitespace(r rune) bool {
return r == runeSpace || r == runeHorizontalTab
}
func isCommentCharacter(r rune) bool {
return isSourceCharacter(r) && !isLineTerminator(r)
}
func isPunctuator(r rune) bool {
return r == runeExclamationMark ||
r == runeDollar ||
r == runeLeftParentheses ||
r == runeRightParentheses ||
r == runeLeftBrace ||
r == runeRightBrace ||
r == runeLeftBracket ||
r == runeRightBracket ||
r == runeColon ||
r == runeEqual ||
r == runeAt ||
r == runeVerticalBar ||
r == runeAND
}
func isNameStart(r rune) bool {
return ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') || r == '_'
}
func isName(r rune) bool {
return ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') || ('0' <= r && r <= '9') || r == '_'
}
// comment is ignorable as well, but should be checked separately
func canIgnore(r rune) bool {
return r == runeUnicodeBOM || isWhitespace(r) || isLineTerminator(r) || r == runeComma
}
func isDigit(r rune) bool {
return '0' <= r && r <= '9'
}
func isNonZeroDigit(r rune) bool {
return '1' <= r && r <= '9'
}
func isExponentIndicator(r rune) bool {
return r == 'e' || r == 'E'
}
// TODO: check for triple dots | pkg/language/lexer/runes.go | 0.597608 | 0.627181 | runes.go | starcoder |
package cem
import (
"errors"
"fmt"
"math"
"gonum.org/v1/gonum/mat"
)
// This file contains helper functions for matrix calculations.
func choleskySymmetricFromCovariance(covariance *mat.Dense, num int) (*mat.Cholesky, error) {
covariance, err := nearestPD(covariance)
if err != nil {
return nil, err
}
symCov := mat.NewSymDense(num, nil)
for i := 0; i < num; i++ {
for j := 0; j < num; j++ {
symCov.SetSym(i, j, (covariance.At(i, j)+covariance.At(j, i))/2.0)
}
}
var choleskySymmetricCovariance mat.Cholesky
choleskySymmetricCovariance.Factorize(symCov)
return &choleskySymmetricCovariance, nil
}
func nearestPD(A *mat.Dense) (*mat.Dense, error) {
ARows, AColumns := A.Dims()
B := mat.NewDense(ARows, AColumns, nil)
transposedA := transpose(A)
for i := 0; i < ARows; i++ {
for j := 0; j < AColumns; j++ {
value := (A.At(i, j) + transposedA.At(i, j)) / 2.0
B.Set(i, j, value)
}
}
u, s, v, err := svd(B)
if err != nil {
return nil, err
}
uRows, uColumns := u.Dims()
vRows, vColumns := v.Dims()
uDense := mat.NewDense(uRows, uColumns, nil)
vDense := mat.NewDense(vRows, vColumns, nil)
for i := 0; i < uRows; i++ {
for j := 0; j < uColumns; j++ {
uDense.Set(i, j, u.At(i, j))
}
}
for i := 0; i < vRows; i++ {
for j := 0; j < vColumns; j++ {
vDense.Set(i, j, v.At(i, j))
}
}
diagonalSMatrix := mat.NewDense(ARows, AColumns, nil)
for i := 0; i < ARows; i++ {
for j := 0; j < AColumns; j++ {
if i == j {
diagonalSMatrix.Set(i, j, s[i])
} else {
diagonalSMatrix.Set(i, j, 0.0)
}
}
}
var temp mat.Dense
var original mat.Dense
temp.Mul(uDense, diagonalSMatrix)
original.Mul(&temp, transpose(vDense))
originalRows, originalColumns := original.Dims()
originalDense := mat.NewDense(originalRows, originalColumns, nil)
for i := 0; i < originalRows; i++ {
for j := 0; j < originalColumns; j++ {
originalDense.Set(i, j, original.At(i, j))
}
}
var temp0 mat.Dense
var H mat.Dense
temp0.Mul(diagonalSMatrix, vDense)
H.Mul(transpose(vDense), &temp0)
hRows, hColumns := H.Dims()
hDense := mat.NewDense(hRows, hColumns, nil)
for i := 0; i < hRows; i++ {
for j := 0; j < hColumns; j++ {
hDense.Set(i, j, H.At(i, j))
}
}
A2 := mat.NewDense(ARows, AColumns, nil)
for i := 0; i < ARows; i++ {
for j := 0; j < AColumns; j++ {
A2.Set(i, j, (B.At(i, j)+hDense.At(i, j))/2.0)
}
}
A3 := mat.NewDense(ARows, AColumns, nil)
transposedA2 := transpose(A2)
for i := 0; i < ARows; i++ {
for j := 0; j < AColumns; j++ {
A3.Set(i, j, (A2.At(i, j)+transposedA2.At(i, j))/2.0)
}
}
if isPD(A3) {
return A3, nil
}
normA := mat.Norm(A, 2)
nextNearestDistance := math.Nextafter(normA, normA+1) - normA
previousNearestDistance := normA - math.Nextafter(normA, normA-1)
spacing := math.Min(nextNearestDistance, previousNearestDistance)
I := mat.NewDense(ARows, ARows, nil)
for i := 0; i < ARows; i++ {
for j := 0; j < ARows; j++ {
if i == j {
I.Set(i, j, 1)
} else {
I.Set(i, j, 0)
}
}
}
k := 1.0
for !isPD(A3) {
var eig mat.Eigen
ok := eig.Factorize(A3, mat.EigenNone)
if !ok {
return nil, errors.New("Eigen decomposition failed")
}
eigenvalues := eig.Values(nil)
realeigenvalues := make([]float64, len(eigenvalues))
for i := range eigenvalues {
realeigenvalues[i] = real(eigenvalues[i])
}
minrealeigenvalues := realeigenvalues[0]
for _, value := range realeigenvalues {
if minrealeigenvalues > value {
minrealeigenvalues = value
}
}
for i := 0; i < ARows; i++ {
for j := 0; j < AColumns; j++ {
A3.Set(i, j, I.At(i, j)*((-minrealeigenvalues*math.Pow(k, 2))+spacing))
}
}
k++
}
return A3, nil
}
func isPD(matrix mat.Matrix) bool {
boolean := true
var eig mat.Eigen
ok := eig.Factorize(matrix, mat.EigenNone)
if !ok {
return false
}
eigenvalues := eig.Values(nil)
realeigenvalues := make([]float64, len(eigenvalues))
for i := range eigenvalues {
realeigenvalues[i] = real(eigenvalues[i])
}
for i := range realeigenvalues {
if realeigenvalues[i] <= 0 {
boolean = false
break
}
}
return boolean
}
func transpose(matrix *mat.Dense) *mat.Dense {
rows, columns := matrix.Dims()
transposeMatrix := mat.NewDense(rows, columns, nil)
for i := 0; i < rows; i++ {
for j := 0; j < columns; j++ {
transposeMatrix.Set(i, j, matrix.At(j, i))
}
}
return transposeMatrix
}
func svd(Matrix mat.Matrix) (mat.Dense, []float64, mat.Dense, error) {
var svd mat.SVD
if ok := svd.Factorize(Matrix, mat.SVDFull); !ok {
var nilMat mat.Dense
return nilMat, nil, nilMat, errors.New("SVD factorization failed")
}
var v, u mat.Dense
svd.UTo(&u)
svd.VTo(&v)
s := svd.Values(nil)
return u, s, v, nil
}
func matPrint(X mat.Matrix) {
fa := mat.Formatted(X, mat.Prefix(""), mat.Squeeze())
fmt.Printf("%v\n", fa)
} | lib/cem/matrix.go | 0.61057 | 0.452294 | matrix.go | starcoder |
package main
import "fmt"
var interations int
type Tree struct {
node *Node
}
type Node struct {
value int
left *Node
right *Node
}
// Tree's Insert function
func (t *Tree) Insert(value int) *Tree {
if t.node == nil {
t.node = &Node{value: value}
} else {
t.node.Insert(value)
}
return t // Returning Tree so we can use chained functions
}
// Node's Insert function
func (n *Node) Insert(value int) {
if value <= n.value {
// Go to left
if n.left == nil {
// If there is no node at left side, create here..
n.left = &Node{value: value}
} else {
// If there is a node at left, call insert..
n.left.Insert(value) // Calling Insert func recursively
}
} else {
// Go to right
if n.right == nil {
// If there is no node at right side, create here..
n.right = &Node{value: value}
} else {
// If there is a node at right, call insert..
n.right.Insert(value) // Calling Insert func recursively
}
}
}
// Print nodes recursively
func printNode(n *Node) {
if n == nil {
return
}
fmt.Printf(" %d ", n.value)
printNode(n.left) // Recursively call printNode
printNode(n.right) // Recursively call printNode
}
// Search element using Binary Search
func (n *Node) Search(value int) {
if n == nil {
fmt.Print("\n\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")
fmt.Printf("Value to find: %d\n", value)
fmt.Printf("Result: %d doesn't exists in Binary Tree\n", value)
fmt.Printf("Iterations Took: %d", interations)
fmt.Print("\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")
return
}
interations++
if n.value == value {
fmt.Print("\n\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")
fmt.Printf("Value to find: %d\n", value)
fmt.Printf("Result: %d exists in Binary Tree\n", value)
fmt.Printf("Iterations Took: %d", interations)
fmt.Print("\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")
return
}
if value <= n.value {
// Search in left
n.left.Search(value) // Recursively call Search function
} else {
// Search in right
n.right.Search(value) // Recursively call Search function
}
}
func main() {
t := &Tree{}
t.Insert(10).Insert(8).Insert(20).Insert(11).Insert(0).Insert(30)
fmt.Print("\n\nFull Binary Tree: ")
printNode(t.node) // Print Binrary Tree
interations = 0
t.node.Search(0) // Search for value
interations = 0
t.node.Search(10) // Search for value
interations = 0
t.node.Search(300) // Search for value
} | 08-Binary-Search/main.go | 0.651798 | 0.442817 | main.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// PositionDetail
type PositionDetail struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Detail about the company or employer.
company CompanyDetailable
// Description of the position in question.
description *string
// When the position ended.
endMonthYear *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.DateOnly
// The title held when in that position.
jobTitle *string
// The role the position entailed.
role *string
// The start month and year of the position.
startMonthYear *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.DateOnly
// Short summary of the position.
summary *string
}
// NewPositionDetail instantiates a new positionDetail and sets the default values.
func NewPositionDetail()(*PositionDetail) {
m := &PositionDetail{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreatePositionDetailFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreatePositionDetailFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewPositionDetail(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PositionDetail) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCompany gets the company property value. Detail about the company or employer.
func (m *PositionDetail) GetCompany()(CompanyDetailable) {
if m == nil {
return nil
} else {
return m.company
}
}
// GetDescription gets the description property value. Description of the position in question.
func (m *PositionDetail) GetDescription()(*string) {
if m == nil {
return nil
} else {
return m.description
}
}
// GetEndMonthYear gets the endMonthYear property value. When the position ended.
func (m *PositionDetail) GetEndMonthYear()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.DateOnly) {
if m == nil {
return nil
} else {
return m.endMonthYear
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *PositionDetail) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["company"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateCompanyDetailFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetCompany(val.(CompanyDetailable))
}
return nil
}
res["description"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDescription(val)
}
return nil
}
res["endMonthYear"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetDateOnlyValue()
if err != nil {
return err
}
if val != nil {
m.SetEndMonthYear(val)
}
return nil
}
res["jobTitle"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetJobTitle(val)
}
return nil
}
res["role"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRole(val)
}
return nil
}
res["startMonthYear"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetDateOnlyValue()
if err != nil {
return err
}
if val != nil {
m.SetStartMonthYear(val)
}
return nil
}
res["summary"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSummary(val)
}
return nil
}
return res
}
// GetJobTitle gets the jobTitle property value. The title held when in that position.
func (m *PositionDetail) GetJobTitle()(*string) {
if m == nil {
return nil
} else {
return m.jobTitle
}
}
// GetRole gets the role property value. The role the position entailed.
func (m *PositionDetail) GetRole()(*string) {
if m == nil {
return nil
} else {
return m.role
}
}
// GetStartMonthYear gets the startMonthYear property value. The start month and year of the position.
func (m *PositionDetail) GetStartMonthYear()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.DateOnly) {
if m == nil {
return nil
} else {
return m.startMonthYear
}
}
// GetSummary gets the summary property value. Short summary of the position.
func (m *PositionDetail) GetSummary()(*string) {
if m == nil {
return nil
} else {
return m.summary
}
}
// Serialize serializes information the current object
func (m *PositionDetail) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteObjectValue("company", m.GetCompany())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("description", m.GetDescription())
if err != nil {
return err
}
}
{
err := writer.WriteDateOnlyValue("endMonthYear", m.GetEndMonthYear())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("jobTitle", m.GetJobTitle())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("role", m.GetRole())
if err != nil {
return err
}
}
{
err := writer.WriteDateOnlyValue("startMonthYear", m.GetStartMonthYear())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("summary", m.GetSummary())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PositionDetail) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCompany sets the company property value. Detail about the company or employer.
func (m *PositionDetail) SetCompany(value CompanyDetailable)() {
if m != nil {
m.company = value
}
}
// SetDescription sets the description property value. Description of the position in question.
func (m *PositionDetail) SetDescription(value *string)() {
if m != nil {
m.description = value
}
}
// SetEndMonthYear sets the endMonthYear property value. When the position ended.
func (m *PositionDetail) SetEndMonthYear(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.DateOnly)() {
if m != nil {
m.endMonthYear = value
}
}
// SetJobTitle sets the jobTitle property value. The title held when in that position.
func (m *PositionDetail) SetJobTitle(value *string)() {
if m != nil {
m.jobTitle = value
}
}
// SetRole sets the role property value. The role the position entailed.
func (m *PositionDetail) SetRole(value *string)() {
if m != nil {
m.role = value
}
}
// SetStartMonthYear sets the startMonthYear property value. The start month and year of the position.
func (m *PositionDetail) SetStartMonthYear(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.DateOnly)() {
if m != nil {
m.startMonthYear = value
}
}
// SetSummary sets the summary property value. Short summary of the position.
func (m *PositionDetail) SetSummary(value *string)() {
if m != nil {
m.summary = value
}
} | models/position_detail.go | 0.647575 | 0.429609 | position_detail.go | starcoder |
package simulation
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"image"
"log"
)
const MaxCharge = 6
type Circuit struct {
wires []*Wire
transistors []*Transistor
}
func (c *Circuit) Wires() []*Wire {
return c.wires
}
type WireState struct {
charge uint8
wire *Wire
}
func (w WireState) Charge() uint8 {
return w.charge
}
func (w WireState) Wire() *Wire {
return w.wire
}
type Simulation struct {
circuit *Circuit
states []WireState
}
func (s *Simulation) Circuit() *Circuit {
return s.circuit
}
func (s *Simulation) State(wire *Wire) WireState {
return s.states[wire.index]
}
func New(img *image.Paletted) *Simulation {
size := img.Bounds().Size()
groups := make(map[*group]struct{}, 0)
matrix := newBucketMatrix(size.X, size.Y)
for y := 0; y < size.Y; y++ {
for x := 0; x < size.X; x++ {
charge := img.ColorIndexAt(x, y) - 1
if charge > MaxCharge {
continue
}
topLeftBucket := matrix.get(x-1, y-1)
topBucket := matrix.get(x, y-1)
leftBucket := matrix.get(x-1, y)
var currentBucket *bucket
switch {
case nil == topBucket && nil == leftBucket:
currentBucket = newBucket()
groups[currentBucket.group] = struct{}{}
case nil == topBucket && nil != leftBucket:
currentBucket = leftBucket
case (nil != topBucket && nil == leftBucket) ||
topBucket == leftBucket ||
topBucket.group == leftBucket.group:
currentBucket = topBucket
default:
currentBucket = topBucket
delete(groups, topBucket.group)
topBucket.group.moveContentTo(leftBucket.group)
}
if nil != topLeftBucket && nil != topBucket && nil != leftBucket {
currentBucket.group.wire.isPowerSource = true
}
matrix.set(x, y, currentBucket)
if charge > currentBucket.group.wireState.charge {
currentBucket.group.wireState.charge = charge
}
currentBucket.addPixel(image.Point{x, y})
}
}
for y := 0; y < size.Y; y++ {
for x := 0; x < size.X; x++ {
if nil != matrix.get(x, y) {
continue
}
topBucket := matrix.get(x, y-1)
topRightBucket := matrix.get(x+1, y-1)
rightBucket := matrix.get(x+1, y)
bottomRightBucket := matrix.get(x+1, y+1)
bottomBucket := matrix.get(x, y+1)
bottomLeftBucket := matrix.get(x-1, y+1)
leftBucket := matrix.get(x-1, y)
topLeftBucket := matrix.get(x-1, y-1)
if nil == topLeftBucket && nil == topRightBucket && nil == bottomLeftBucket && nil == bottomRightBucket &&
nil != topBucket && nil != rightBucket && nil != bottomBucket && nil != leftBucket {
if topBucket.group != bottomBucket.group {
delete(groups, topBucket.group)
topBucket.group.moveContentTo(bottomBucket.group)
}
if rightBucket.group != leftBucket.group {
delete(groups, rightBucket.group)
rightBucket.group.moveContentTo(leftBucket.group)
}
}
}
}
transistors := make([]*Transistor, 0)
for y := 0; y < size.Y; y++ {
for x := 0; x < size.X; x++ {
if nil != matrix.get(x, y) {
continue
}
topBucket := matrix.get(x, y-1)
topRightBucket := matrix.get(x+1, y-1)
rightBucket := matrix.get(x+1, y)
bottomRightBucket := matrix.get(x+1, y+1)
bottomBucket := matrix.get(x, y+1)
bottomLeftBucket := matrix.get(x-1, y+1)
leftBucket := matrix.get(x-1, y)
topLeftBucket := matrix.get(x-1, y-1)
switch {
case nil == bottomLeftBucket && nil == bottomRightBucket &&
nil == topBucket && nil != rightBucket && nil != bottomBucket && nil != leftBucket:
transistors = append(transistors,
newTransistor(image.Point{x, y}, bottomBucket.group.wire, rightBucket.group.wire, leftBucket.group.wire))
case nil == bottomLeftBucket && nil == topLeftBucket &&
nil != topBucket && nil == rightBucket && nil != bottomBucket && nil != leftBucket:
transistors = append(transistors,
newTransistor(image.Point{x, y}, leftBucket.group.wire, topBucket.group.wire, bottomBucket.group.wire))
case nil == topLeftBucket && nil == topRightBucket &&
nil != topBucket && nil != rightBucket && nil == bottomBucket && nil != leftBucket:
transistors = append(transistors,
newTransistor(image.Point{x, y}, topBucket.group.wire, rightBucket.group.wire, leftBucket.group.wire))
case nil == bottomRightBucket && nil == topRightBucket &&
nil != topBucket && nil != rightBucket && nil != bottomBucket && nil == leftBucket:
transistors = append(transistors,
newTransistor(image.Point{x, y}, rightBucket.group.wire, topBucket.group.wire, bottomBucket.group.wire))
}
}
}
wires := make([]*Wire, len(groups))
wireStates := make([]WireState, len(groups))
i := 0
for k := range groups {
k.wire.index = i
wires[i] = k.wire
wireStates[i] = k.wireState
i++
}
return &Simulation{&Circuit{wires: wires, transistors: transistors}, wireStates}
}
func (s *Simulation) Step() *Simulation {
newWireState := make([]WireState, len(s.states))
for i, state := range s.states {
charge := state.charge
if state.wire.isPowerSource {
if state.charge < MaxCharge {
charge = state.charge + 1
}
} else {
source := s.tracePowerSource(state)
if source.charge > state.charge+1 {
charge = state.charge + 1
} else if source.charge <= state.charge && state.charge > 0 {
charge = state.charge - 1
}
}
newWireState[i] = WireState{charge, state.wire}
}
return &Simulation{s.circuit, newWireState}
}
func (s *Simulation) tracePowerSource(origin WireState) WireState {
result := origin
for _, transistor := range origin.wire.transistors {
if nil != transistor.base && s.states[transistor.base.index].charge > 0 {
continue
}
if origin.wire == transistor.inputA {
inputBState := s.states[transistor.inputB.index]
if inputBState.charge == MaxCharge {
return inputBState
}
if inputBState.charge > result.charge {
result = inputBState
continue
}
} else if origin.wire == transistor.inputB {
inputAState := s.states[transistor.inputA.index]
if inputAState.charge == MaxCharge {
return inputAState
}
if inputAState.charge > result.charge {
result = inputAState
continue
}
}
}
return result
}
func (s *Simulation) DiffDraw(previousSimulation *Simulation, img *image.Paletted) {
for i, state := range s.states {
if previousSimulation.states[i].charge == state.charge {
continue
}
state.wire.draw(img, state.charge+1)
}
}
func (s *Simulation) Draw(img *image.Paletted) {
for _, state := range s.states {
state.wire.draw(img, state.charge+1)
}
}
func (s *Simulation) DrawAll(initialImage *image.Paletted, frameCount int) []*image.Paletted {
bounds := initialImage.Bounds()
images := make([]*image.Paletted, frameCount)
s.Draw(initialImage)
images[0] = initialImage
for f := 1; f < frameCount; f++ {
newSimulation := s.Step()
img := image.NewPaletted(bounds, initialImage.Palette)
newSimulation.DiffDraw(s, img)
images[f] = img
s = newSimulation
}
return images
}
func (s *Simulation) FindLooping() (*Simulation, int) {
hashs := make(map[[sha1.Size]byte]int, 0)
frame := 0
for {
s = s.Step()
var hash [sha1.Size]byte
copy(hash[:], s.Hash())
if f, ok := hashs[hash]; ok {
return s, frame - f
}
hashs[hash] = frame
frame++
}
}
func (s *Simulation) Hash() []byte {
hash := sha1.New()
for index, state := range s.states {
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.LittleEndian, uint32(index))
if err != nil {
log.Fatal(err)
}
err = binary.Write(buf, binary.LittleEndian, state.charge)
if err != nil {
log.Fatal(err)
}
_, err = hash.Write(buf.Bytes())
if err != nil {
log.Fatal(err)
}
}
return hash.Sum(nil)
}
type Transistor struct {
position image.Point
base *Wire
inputA *Wire
inputB *Wire
}
func (t *Transistor) Position() image.Point {
return t.position
}
func (t *Transistor) Base() *Wire {
return t.base
}
func (t *Transistor) InputA() *Wire {
return t.inputA
}
func (t *Transistor) InputB() *Wire {
return t.inputB
}
func newTransistor(position image.Point, base, inputA, inputB *Wire) *Transistor {
transistor := &Transistor{
position: position,
base: base,
inputA: inputA,
inputB: inputB,
}
inputA.transistors = append(inputA.transistors, transistor)
inputB.transistors = append(inputB.transistors, transistor)
return transistor
}
type Wire struct {
index int
pixels []image.Point
bounds image.Rectangle
transistors []*Transistor
isPowerSource bool
}
func (w *Wire) Pixels() []image.Point {
return w.pixels
}
func (w *Wire) Bounds() image.Rectangle {
return w.bounds
}
func (w *Wire) Transistors() []*Transistor {
return w.transistors
}
func (w *Wire) IsPowerSource() bool {
return w.isPowerSource
}
func newWire() *Wire {
return &Wire{
index: -1,
pixels: make([]image.Point, 0),
bounds: image.Rectangle{image.Pt(0, 0), image.Pt(0, 0)},
transistors: make([]*Transistor, 0),
isPowerSource: false,
}
}
func (w *Wire) draw(img *image.Paletted, colorIndex uint8) {
for _, pixel := range w.pixels {
img.SetColorIndex(pixel.X, pixel.Y, colorIndex)
}
}
type bucketMatrix struct {
buckets [][]*bucket
width int
height int
}
func newBucketMatrix(width int, height int) *bucketMatrix {
m := &bucketMatrix{make([][]*bucket, height), width, height}
for y := 0; y < height; y++ {
m.buckets[y] = make([]*bucket, width)
}
return m
}
func (m *bucketMatrix) get(x int, y int) *bucket {
if x < 0 || y < 0 || x >= m.width || y >= m.height {
return nil
}
return m.buckets[y][x]
}
func (m *bucketMatrix) set(x int, y int, bucket *bucket) {
m.buckets[y][x] = bucket
}
type bucket struct {
group *group
}
func newBucket() *bucket {
newBucket := &bucket{nil}
newGroup := &group{
buckets: []*bucket{newBucket},
wire: newWire(),
}
newGroup.wireState = WireState{wire: newGroup.wire, charge: 0}
newBucket.group = newGroup
return newBucket
}
func (b *bucket) addPixel(pixel image.Point) {
b.group.wire.pixels = append(b.group.wire.pixels, pixel)
b.group.wire.bounds = b.group.wire.bounds.Union(
image.Rectangle{
pixel,
pixel.Add(image.Point{1, 1})})
}
type group struct {
buckets []*bucket
wire *Wire
wireState WireState
}
func (g *group) moveContentTo(other *group) {
if g == other {
log.Fatal("A group can not be moved to itself.")
}
for _, bucket := range g.buckets {
bucket.group = other
other.buckets = append(other.buckets, bucket)
}
if g.wire.isPowerSource {
other.wire.isPowerSource = true
}
if g.wireState.charge > other.wireState.charge {
other.wireState.charge = g.wireState.charge
}
other.wire.bounds = other.wire.bounds.Union(g.wire.bounds)
other.wire.pixels = append(other.wire.pixels, g.wire.pixels...)
} | simulation/simulation.go | 0.549641 | 0.438424 | simulation.go | starcoder |
Package driver provides a standard database/sql compatible SQL driver for Hazelcast.
This driver supports Hazelcast 5.0 and up. Check out the Hazelcast SQL documentation here: https://docs.hazelcast.com/hazelcast/latest/sql/sql-overview
The documentation for the database/sql package is here: https://pkg.go.dev/database/sql
Enabling Hazelcast SQL
The SQL support should be enabled in Hazelcast server configuration:
<hazelcast>
<jet enabled="true" />
</hazelcast>
Creating a Driver Instance Using sql.Open
This driver provides two ways to create an instance.
The first one is via the standard sql.Open function.
That function takes two parameters, the driver name and the DSN (Data Source Name).
Here's a sample:
db, err := sql.Open("hazelcast", "hz://@localhost:5701?cluster.name=dev")
Use hazelcast as the driver name.
The DSN may be blank. In that case, the default configuration is used.
Otherwise, the DSN must start with the scheme (hz://) and have the following optional parts:
- Username and password for the cluster, separated by a column: d<PASSWORD>:<PASSWORD>
- Hazelcast member addresses, separated by commas: server1:port1,server2:port2
- Options as key=value pairs, separated by ampersand (&). Both the key and value must be URL encoded: cluster.name=dev&ssl=true
Username/password part is separated from the address by the at sign (@).
There should be a question mark (?) between the address(es) and options.
Here is a full DSN:
hz://dave:<PASSWORD>@<EMAIL>:5000,my-server2.company.com:6000?cluster.name=prod&ssl=true&log=warn
The following are the available options:
- unisocket: A boolean. Enables/disables the unisocket mode. Default: false. Example: unisocket=true
- log: One of the following: off, fatal, error, warn, info, debug, trace. Default: info. Example: log=debug
- cluster.name: A string. Specifies the cluster name. Default: dev. Example: cluster.name=hzc1
- cloud.token: A string. Sets the Hazelcast Cloud token. Example: cloud.token=<PASSWORD>
- stats.period: Duration between sending statistics, which can be parsed by time.Parse.
Use one of the following suffixes: s (seconds), m (minutes), h (hours). Example: stats.period=10s
- ssl: A boolean. Enables/disables SSL connections. Defaults: false. Example: ssl=true
- ssl.ca.path: The path to the PEM file for the certificate authority. Implies ssl=true. Example: ssl.ca.path=/etc/ssl/ca.pem
- ssl.cert.path: The path to the TLS certificate. Implies ssl=true. Example: ssl.cert.path=/etc/ssl/cert.pem
- ssl.key.path: The path to the certificate key. Implies ssl=true. Example: ssl.key.path=/etc/ssl/key.pem
- ssl.key.password: The optional certificate password. Example: ssl.key.password=<PASSWORD>
Some items in the client configuration cannot be set in the DSN, such as serialization factories and SSL configuration.
You can use the following functions to set those configuration items globally:
- SetSerializationConfig(...)
- SetLoggerConfig(...)
- SetSSLConfig(...)
Note that, these functions affect only the subsequent sql.Open calls, not the previous ones.
Here's an example:
sc1 := &serialization.Config{}
sc2 := &serialization.Config{}
// no serialization configuration is used for the call below
db1, err := sql.Open("hazelcast", "")
// the following two sql.Open calls use sc1
err = driver.SetSerializationConfig(sc1)
db2, err := sql.Open("hazelcast", "")
db3, err := sql.Open("hazelcast", "")
// the following sql.Open call uses sc2
err = driver.SetSerializationConfig(sc2)
db4, err := sql.Open("hazelcast", "")
Creating a Driver Instance Using driver.Open
It is possible to create a driver instance using an existing Hazelcast client configuration using the driver.Open function.
All client configuration items, except listeners are supported.
cfg := hazelcast.Config{}
cfg.Cluster.Name = "prod"
cfg.Serialization.SetPortableFactories(&MyPortableFactory{})
db := driver.Open(cfg)
Executing Queries
database/sql package supports two kinds of queries: The ones returning rows (select statements and a few others) and the rest (insert, update, etc.).
The former kinds of queries are executed with QueryXXX methods and the latter ones are executed with ExecXXX methods of the sql.DB instance returned from sql.Open or driver.Open.
Use the question mark (?) for placeholders.
Here is an Exec example:
q := `INSERT INTO person(__key, age, name) VALUES (?, ?, ?)`
result, err := db.Exec(q, 1001, 35, "<NAME>")
// handle the error
cnt, err := result.RowsAffected()
// handle the error
fmt.Printf("Affected rows: %d\n", cnt)
Note that LastInsertId is not supported and at the moment AffectedRows always returns 0.
An example Query call:
q :=`SELECT name, age FROM person WHERE age >= ?`
rows, err := db.Query(q, 30)
// handle the error
defer rows.Close()
var name string
var age int
for rows.Next() {
err := rows.Scan(&name, &age)
// handle the error
fmt.Println(name, age)
}
Context variants of Query and Exec, such as QueryContext and ExecContext are fully supported.
They can be used to pass Hazelcast specific parameters, such as the cursor buffer size.
See the Passing Hazelcast Specific Parameters section below.
Passing Hazelcast-Specific Parameters
This driver supports the following extra query parameters that Hazelcast supports:
- Cursor buffer size: Size of the server-side buffer for rows.
- Timeout: Maximum time a query is allowed to execute.
Checkout the documentation below for details.
The extra query parameters are passed in a context augmented using WithCursorBufferSize and WithQueryTimeout functions. Here is an example:
// set the cursor buffer size to 10_000
ctx := driver.WithCursorBufferSize(context.Background(), 10_000)
// set the query timeout to 2 minutes
ctx = driver.WithQueryTimeout(ctx, 2*time.Minute)
// use the parameters above with any methods that uses that context
rows, err := db.QueryContext(ctx, "select * from people")
Creating a Mapping
To connect to a data source and query it as if it is a table, a mapping should be created.
Currently, mappings for Map, Kafka and file data sources are supported.
You can read the details about mappings here: https://docs.hazelcast.com/hazelcast/latest/sql/sql-overview#mappings
Supported Data Types
The following data types are supported when inserting/updating.
The names in parantheses correspond to SQL types:
- string (varchar)
- int8 (tinyint)
- int16 (smallint)
- int32 (integer)
- int64 (bigint)
- bool (boolean)
- float32 (real)
- float64 (double)
- types.Decimal (decimal)
- time.Time (date) Detected by checking: hour == minute == second == nanoseconds = 0
- time.Time (time) Detected by checking: year == 0, month == day == 1
- time.Time (timestamp) Detected by checking: hour == minute == second == nanoseconds = 0, timezone == time.Local
- time.Time (timestamp with time zone) Detected by checking: hour == minute == second == nanoseconds = 0, timezone != time.Local
Using Raw Values
You can directly use one of the supported data types.
Creating a mapping:
CREATE MAPPING person
TYPE IMAP
OPTIONS (
'keyFormat' = 'int',
'valueFormat' = 'varchar'
)
Inserting rows:
INSERT INTO person VALUES(100, '<NAME>')
Querying rows:
SELECT __key, this from person
Using JSON
Non-nested JSON values are supported.
Assuming the following JSON value:
{
"age": 35,
"name": "<NAME>"
}
Some or all fields of the JSON value may be mapped and used.
Creating a mapping:
CREATE MAPPING person (
__key BIGINT,
age BIGINT,
name VARCHAR
)
TYPE IMAP
OPTIONS (
'keyFormat' = 'bigint',
'valueFormat' = 'json-flat'
)
Inserting rows:
INSERT INTO person VALUES(100, 35, '<NAME>')
Querying rows:
SELECT __key, name FROM person WHERE age > 30
Using Portable
Portable example:
Assuming the following portable type:
type Person struct {
Name string
Age int16
}
func (r Person) FactoryID() int32 {
return 100
}
func (r Person) ClassID() int32 {
return 1
}
func (r Person) WritePortable(wr serialization.PortableWriter) {
wr.WriteString("name", r.Name)
wr.WriteInt16("age", r.Age)
}
func (r *Person) ReadPortable(rd serialization.PortableReader) {
r.Name = rd.ReadString("name")
r.Age = rd.ReadInt16("age")
}
Creating a mapping:
CREATE MAPPING person (
__key BIGINT,
age TINYINT,
name VARCHAR
)
TYPE IMAP
OPTIONS (
'keyFormat' = 'bigint',
'valueFormat' = 'portable',
'valuePortableFactoryId' = '100',
'valuePortableClassId' = '1'
)
Querying rows:
SELECT __key, name FROM person WHERE age > 30
*/
package driver | sql/driver/doc.go | 0.866203 | 0.619356 | doc.go | starcoder |
package gates
// --- Given Primitive Gates ---
func Nand(x, y bool) bool {
return !(x && y)
}
// --- End of Given Primitive Gates ---
func Not(x bool) bool {
return Nand(x, x)
}
func And(x, y bool) bool {
return Not(Nand(x, y))
}
func Or(x, y bool) bool {
return Nand(Not(x), Not(y))
}
func Xor(x, y bool) bool {
return Or(And(x, Not(y)), And(Not(x), y))
}
func Eq(a, b bool) bool {
return Or(
And(a, b),
And(
Not(a),
Not(b),
),
)
}
// If sel = true, return y, else return x
func Mux(a, b, sel bool) bool {
return Or(
And(Not(sel), a),
And(sel, b),
)
}
// If sel=0 then {a=in, b=0} else {a=0, b=in}.
// [Go] If sel=false then {a=in, b=false} else {a=false, b=in}.
func DMux(in, sel bool) [2]bool {
return [2]bool{
Mux(in, false, sel),
Mux(false, in, sel),
}
}
func _DMux(in, sel bool) (a, b bool) {
out := DMux(in, sel)
return out[0], out[1]
}
func Not16(in [16]bool) [16]bool {
var out [16]bool
for i := 0; i < len(out); i++ {
out[i] = Not(in[i])
}
return out
}
func And16(a, b [16]bool) [16]bool {
var out [16]bool
for i := 0; i < len(out); i++ {
out[i] = And(a[i], b[i])
//fmt.Printf("And(a=a[%d], b=b[%d], out=out[%d]);\n", i, i, i)
}
return out
}
func Mux16(a, b [16]bool, sel bool) [16]bool {
var out [16]bool
for i := 0; i < 16; i++ {
out[i] = Mux(a[i], b[i], sel)
}
return out
}
func reduce(
fn func(acc bool, cur bool) bool,
init bool,
f [8]bool,
) bool {
out := init
for _, item := range f {
out = fn(out, item)
//fmt.Printf("Or(a=in[%d], b=in[%d], out=temp);\n", id, id + gates)
}
return out
}
func Or8Way(in [8]bool) bool {
return reduce(Or, false, in)
}
func Mux4Way16(a, b, c, d [16]bool, sel [2]bool) [16]bool {
return Mux16(
Mux16(a, b, sel[1]),
Mux16(c, d, sel[1]),
sel[0],
)
}
func Mux8Way16(a, b, c, d, e, f, g, h [16]bool, sel [3]bool) [16]bool {
return Mux16(
Mux4Way16(a, b, c, d, [2]bool{sel[1], sel[2]}),
Mux4Way16(e, f, g, h, [2]bool{sel[1], sel[2]}),
sel[0],
)
}
func DMux4Way(in bool, sel [2]bool) (a, b, c, d bool) {
g1, g2 := _DMux(in, sel[0])
a, b = _DMux(g1, sel[1])
c, d = _DMux(g2, sel[1])
return
}
func DMux8Way(in bool, sel [3]bool) (a, b, c, d, e, f, g, h bool) {
g1, g2 := _DMux(in, sel[0])
a, b, c, d = DMux4Way(g1, [2]bool{sel[1], sel[2]})
e, f, g, h = DMux4Way(g2, [2]bool{sel[1], sel[2]})
return
} | src/gates/gates.go | 0.680135 | 0.708515 | gates.go | starcoder |
package main
import (
"fmt"
"github.com/aaronjanse/3mux/render"
)
// A Split splits a region of the screen into a areas reserved for multiple child nodes
type Split struct {
elements []Node
selectionIdx int
verticallyStacked bool
renderRect Rect
}
func (s *Split) serialize() string {
var out string
if s.verticallyStacked {
out = "VSplit"
} else {
out = "HSplit"
}
out += fmt.Sprintf("[%d]", s.selectionIdx)
out += "("
for i, e := range s.elements {
if i != 0 {
out += ", "
}
out += e.contents.serialize()
}
out += ")"
return out
}
// setRenderRect updates the Split's renderRect cache after which it calls refreshRenderRect
// this for when a split is reshaped
func (s *Split) setRenderRect(x, y, w, h int) {
s.renderRect = Rect{x, y, w, h}
// NOTE: should we clear the screen?
s.refreshRenderRect()
}
func (s *Split) getRenderRect() Rect {
return s.renderRect
}
func (s *Split) kill() {
for _, n := range s.elements {
n.contents.kill()
}
}
func (s *Split) setPause(pause bool) {
for _, e := range s.elements {
e.contents.setPause(pause)
}
}
// removeTheDead recursively searches the tree and removes panes with Dead == true.
// A pane declares itself dead when its shell dies.
func removeTheDead(path Path) {
s := path.getContainer().(*Split)
for idx := len(s.elements) - 1; idx >= 0; idx-- {
element := s.elements[idx]
switch c := element.contents.(type) {
case *Split:
removeTheDead(append(path, idx))
case *Pane:
if c.Dead {
t := path.popContainer(idx)
t.(*Pane).kill()
}
}
}
}
// refreshRenderRect recalculates the coordinates of a Split's elements and calls setRenderRect on each of its children
// this is for when one or more of a split's children are reshaped
func (s *Split) refreshRenderRect() {
x := s.renderRect.x
y := s.renderRect.y
w := s.renderRect.w
h := s.renderRect.h
s.redrawLines()
var area int
if s.verticallyStacked {
area = h
} else {
area = w
}
dividers := getDividerPositions(area, s.elements)
if len(s.elements) == 1 {
dividers = []int{area}
}
for idx, pos := range dividers {
lastPos := -1
if idx > 0 {
lastPos = dividers[idx-1]
}
childArea := pos - lastPos - 1
if idx == len(dividers)-1 && idx != 0 {
childArea = area - lastPos - 1
}
childNode := s.elements[idx]
if s.verticallyStacked {
childNode.contents.setRenderRect(x, y+lastPos+1, w, childArea)
} else {
childNode.contents.setRenderRect(x+lastPos+1, y, childArea, h)
}
}
}
func (s *Split) redrawLines() {
x := s.renderRect.x
y := s.renderRect.y
w := s.renderRect.w
h := s.renderRect.h
var area int
if s.verticallyStacked {
area = h
} else {
area = w
}
dividers := getDividerPositions(area, s.elements)
for idx, pos := range dividers {
if idx == len(dividers)-1 {
break
}
if s.verticallyStacked {
for i := 0; i < w; i++ {
renderer.HandleCh(render.PositionedChar{
Rune: '─',
Cursor: render.Cursor{X: x + i, Y: y + pos},
})
}
} else {
for j := 0; j < h; j++ {
renderer.HandleCh(render.PositionedChar{
Rune: '│',
Cursor: render.Cursor{X: x + pos, Y: y + j},
})
}
}
}
}
func getDividerPositions(area int, contents []Node) []int {
var dividerPositions []int
for idx, node := range contents {
var lastPos int
if idx == 0 {
lastPos = 0
} else {
lastPos = dividerPositions[idx-1]
}
pos := lastPos + int(node.size*float32(area))
dividerPositions = append(dividerPositions, pos)
}
return dividerPositions
} | split.go | 0.507568 | 0.409929 | split.go | starcoder |
package profilescmdline
// HelpMsg returns a detailed help message for the profiles packages.
func HelpMsg() string {
return `
Profiles are used to manage external sofware dependencies and offer a balance
between providing no support at all and a full blown package manager.
Profiles can be built natively as well as being cross compiled.
A profile is a named collection of software required for a given system component or
application. Current example profiles include 'syncbase' which consists
of the leveldb and snappy libraries or 'android' which consists of all of the
android components and downloads needed to build android applications. Profiles
are built for specific targets.
Targets
Profiles generally refer to uncompiled source code that needs to be compiled for
a specific "target". Targets hence represent compiled code and consist of:
1. An 'architecture' that refers to the CPU to be generate code for
2. An 'operating system' that refers to the operating system to generate code for
3. A lexicographically orderd set of supported versions, one of which is designated
as the default.
4. An 'environment' which is a set of environment variables to use when compiling the profile
Targets thus provide the basic support needed for cross compilation.
Targets are versioned and multiple versions may be installed and used simultaneously.
Versions are ordered lexicographically and each target specifies a 'default'
version to be used when a specific version is not explicitly requested. A request
to 'upgrade' the profile will result in the installation of the default version
of the targets currently installed if that default version is not already installed.
The Supported Commands
Profiles, or more correctly, targets for specific profiles may be installed or
removed. When doing so, the name of the profile is required, but the other
components of the target are optional and will default to the values of the
system that the commands are run on (so-called native builds) and the default
version for that target. Once a profile is installed it may be referred to by
its tag for subsequent removals.
The are also update and cleanup commands. Update installs the default version
of the requested profile or for all profiles for the already installed targets.
Cleanup will uninstall targets whose version is older than the default.
Finally, there are commands to list the available and installed profiles and
to access the environment variables specified and stored in each profile
installation and a command (recreate) to generate a list of commands that
can be run to recreate the currently installed profiles.
The Profiles Database
The profiles packages manages a database that tracks the installed profiles
and their configurations. Other command line tools and packages are expected
to read information about the currently installed profiles from this database
via the profiles package. The profile command line tools support displaying the
database (via the list command) or for specifying an alternate version of the
file (via the -profiles-db flag) which is generally useful for debugging.
Adding Profiles
Profiles are intended to be provided as go packages that register themselves
with the profile command line tools via the *v.io/jiri/profiles* package.
They must implement the interfaces defined by that package and be imported
(e.g. import _ "myprofile") by the command line tools that are to use them.
`
} | profiles/profilescmdline/help.go | 0.840029 | 0.58599 | help.go | starcoder |
package countries
// TypeCurrencyCode for Typer interface
const TypeCurrencyCode string = "countries.CurrencyCode"
// TypeCurrency for Typer interface
const TypeCurrency string = "countries.Currency"
// Currencies. Two codes present, for example CurrencyUSDollar == CurrencyUSD == 840.
const (
CurrencyUnknown CurrencyCode = 0
CurrencyAfghani CurrencyCode = 971
CurrencyLek CurrencyCode = 8
CurrencyAlgerianDinar CurrencyCode = 12
CurrencyUSDollar CurrencyCode = 840
CurrencyEuro CurrencyCode = 978
CurrencyKwanza CurrencyCode = 973
CurrencyEastCaribbeanDollar CurrencyCode = 951
CurrencyArgentinePeso CurrencyCode = 32
CurrencyArmenianDram CurrencyCode = 51
CurrencyArubanFlorin CurrencyCode = 533
CurrencyAustralianDollar CurrencyCode = 36
CurrencyAzerbaijanianManat CurrencyCode = 944
CurrencyBahamianDollar CurrencyCode = 44
CurrencyBahrainiDinar CurrencyCode = 48
CurrencyTaka CurrencyCode = 50
CurrencyBarbadosDollar CurrencyCode = 52
CurrencyBelarussianRuble CurrencyCode = 974
CurrencyBelizeDollar CurrencyCode = 84
CurrencyCFAFrancBCEAO CurrencyCode = 952
CurrencyBermudianDollar CurrencyCode = 60
CurrencyNgultrum CurrencyCode = 64
CurrencyIndianRupee CurrencyCode = 356
CurrencyBoliviano CurrencyCode = 68
CurrencyConvertibleMark CurrencyCode = 977
CurrencyPula CurrencyCode = 72
CurrencyNorwegianKrone CurrencyCode = 578
CurrencyBrazilianReal CurrencyCode = 986
CurrencyBruneiDollar CurrencyCode = 96
CurrencyBulgarianLev CurrencyCode = 975
CurrencyBurundiFranc CurrencyCode = 108
CurrencyCaboVerdeEscudo CurrencyCode = 132
CurrencyRiel CurrencyCode = 116
CurrencyCFAFrancBEAC CurrencyCode = 950
CurrencyCanadianDollar CurrencyCode = 124
CurrencyCaymanIslandsDollar CurrencyCode = 136
CurrencyUnidaddeFomento CurrencyCode = 990
CurrencyChileanPeso CurrencyCode = 152
CurrencyYuanRenminbi CurrencyCode = 156
CurrencyColombianPeso CurrencyCode = 170
CurrencyUnidaddeValorReal CurrencyCode = 970
CurrencyComoroFranc CurrencyCode = 174
CurrencyCongoleseFranc CurrencyCode = 976
CurrencyNewZealandDollar CurrencyCode = 554
CurrencyCostaRicanColon CurrencyCode = 188
CurrencyKuna CurrencyCode = 191
CurrencyPesoConvertible CurrencyCode = 931
CurrencyCubanPeso CurrencyCode = 192
CurrencyNetherlandsAntilleanGuilder CurrencyCode = 532
CurrencyCzechKoruna CurrencyCode = 203
CurrencyDanishKrone CurrencyCode = 208
CurrencyDjiboutiFranc CurrencyCode = 262
CurrencyDominicanPeso CurrencyCode = 214
CurrencyEgyptianPound CurrencyCode = 818
CurrencyElSalvadorColon CurrencyCode = 222
CurrencyNakfa CurrencyCode = 232
CurrencyEthiopianBirr CurrencyCode = 230
CurrencyFalklandIslandsPound CurrencyCode = 238
CurrencyFijiDollar CurrencyCode = 242
CurrencyCFPFranc CurrencyCode = 953
CurrencyDalasi CurrencyCode = 270
CurrencyLari CurrencyCode = 981
CurrencyGhanaCedi CurrencyCode = 936
CurrencyGibraltarPound CurrencyCode = 292
CurrencyQuetzal CurrencyCode = 320
CurrencyPoundSterling CurrencyCode = 826
CurrencyGuineaFranc CurrencyCode = 324
CurrencyGuyanaDollar CurrencyCode = 328
CurrencyGourde CurrencyCode = 332
CurrencyLempira CurrencyCode = 340
CurrencyHongKongDollar CurrencyCode = 344
CurrencyForint CurrencyCode = 348
CurrencyIcelandKrona CurrencyCode = 352
CurrencyRupiah CurrencyCode = 360
CurrencySDR CurrencyCode = 960
CurrencyIranianRial CurrencyCode = 364
CurrencyIraqiDinar CurrencyCode = 368
CurrencyNewIsraeliSheqel CurrencyCode = 376
CurrencyJamaicanDollar CurrencyCode = 388
CurrencyYen CurrencyCode = 392
CurrencyJordanianDinar CurrencyCode = 400
CurrencyTenge CurrencyCode = 398
CurrencyKenyanShilling CurrencyCode = 404
CurrencyNorthKoreanWon CurrencyCode = 408
CurrencyWon CurrencyCode = 410
CurrencyKuwaitiDinar CurrencyCode = 414
CurrencySom CurrencyCode = 417
CurrencyKip CurrencyCode = 418
CurrencyLebanesePound CurrencyCode = 422
CurrencyLoti CurrencyCode = 426
CurrencyRand CurrencyCode = 710
CurrencyLiberianDollar CurrencyCode = 430
CurrencyLibyanDinar CurrencyCode = 434
CurrencySwissFranc CurrencyCode = 756
CurrencyPataca CurrencyCode = 446
CurrencyDenar CurrencyCode = 807
CurrencyMalagasyAriary CurrencyCode = 969
CurrencyKwacha CurrencyCode = 454
CurrencyMalaysianRinggit CurrencyCode = 458
CurrencyRufiyaa CurrencyCode = 462
CurrencyOuguiya CurrencyCode = 929
CurrencyMauritiusRupee CurrencyCode = 480
CurrencyADBUnitofAccount CurrencyCode = 965
CurrencyMexicanPeso CurrencyCode = 484
CurrencyMexicanUnidaddeInversion CurrencyCode = 979
CurrencyMexicanUDI CurrencyCode = 979
CurrencyMoldovanLeu CurrencyCode = 498
CurrencyTugrik CurrencyCode = 496
CurrencyMoroccanDirham CurrencyCode = 504
CurrencyMozambiqueMetical CurrencyCode = 943
CurrencyKyat CurrencyCode = 104
CurrencyNamibiaDollar CurrencyCode = 516
CurrencyNepaleseRupee CurrencyCode = 524
CurrencyCordobaOro CurrencyCode = 558
CurrencyNaira CurrencyCode = 566
CurrencyRialOmani CurrencyCode = 512
CurrencyPakistanRupee CurrencyCode = 586
CurrencyBalboa CurrencyCode = 590
CurrencyKina CurrencyCode = 598
CurrencyGuarani CurrencyCode = 600
CurrencyNuevoSol CurrencyCode = 604
CurrencyPhilippinePeso CurrencyCode = 608
CurrencyZloty CurrencyCode = 985
CurrencyQatariRial CurrencyCode = 634
CurrencyRomanianLeu CurrencyCode = 946
CurrencyRussianRuble CurrencyCode = 643
CurrencyRwandaFranc CurrencyCode = 646
CurrencySaintHelenaPound CurrencyCode = 654
CurrencyTala CurrencyCode = 882
CurrencyDobra CurrencyCode = 930
CurrencySaudiRiyal CurrencyCode = 682
CurrencySerbianDinar CurrencyCode = 941
CurrencySeychellesRupee CurrencyCode = 690
CurrencyLeone CurrencyCode = 694
CurrencySingaporeDollar CurrencyCode = 702
CurrencySucre CurrencyCode = 994
CurrencySolomonIslandsDollar CurrencyCode = 90
CurrencySomaliShilling CurrencyCode = 706
CurrencySouthSudanesePound CurrencyCode = 728
CurrencySriLankaRupee CurrencyCode = 144
CurrencySudanesePound CurrencyCode = 938
CurrencySurinamDollar CurrencyCode = 968
CurrencyLilangeni CurrencyCode = 748
CurrencySwedishKrona CurrencyCode = 752
CurrencyWIREuro CurrencyCode = 947
CurrencyWIRFranc CurrencyCode = 948
CurrencySyrianPound CurrencyCode = 760
CurrencyNewTaiwanDollar CurrencyCode = 901
CurrencySomoni CurrencyCode = 972
CurrencyTanzanianShilling CurrencyCode = 834
CurrencyBaht CurrencyCode = 764
CurrencyPaanga CurrencyCode = 776
CurrencyTrinidadandTobagoDollar CurrencyCode = 780
CurrencyTunisianDinar CurrencyCode = 788
CurrencyTurkishLira CurrencyCode = 949
CurrencyTurkmenistanNewManat CurrencyCode = 934
CurrencyUgandaShilling CurrencyCode = 800
CurrencyHryvnia CurrencyCode = 980
CurrencyUAEDirham CurrencyCode = 784
CurrencyUSDollarNextday CurrencyCode = 997
CurrencyUruguayPesoenUnidadesIndexadas CurrencyCode = 940
CurrencyUruguayPUI CurrencyCode = 940
CurrencyURUIURUI CurrencyCode = 940
CurrencyPesoUruguayo CurrencyCode = 858
CurrencyUzbekistanSum CurrencyCode = 860
CurrencyVatu CurrencyCode = 548
CurrencyBolivar CurrencyCode = 937
CurrencyDong CurrencyCode = 704
CurrencyYemeniRial CurrencyCode = 886
CurrencyZambianKwacha CurrencyCode = 967
CurrencyZimbabweDollar CurrencyCode = 932
CurrencyYugoslavianDinar CurrencyCode = 891
CurrencyNone CurrencyCode = 998
)
// Currencies by ISO 4217. Two codes present, for example CurrencyUSDollar == CurrencyUSD == 840.
const (
CurrencyAFN CurrencyCode = 971
CurrencyALL CurrencyCode = 8
CurrencyDZD CurrencyCode = 12
CurrencyUSD CurrencyCode = 840
CurrencyEUR CurrencyCode = 978
CurrencyAOA CurrencyCode = 973
CurrencyXCD CurrencyCode = 951
CurrencyARS CurrencyCode = 32
CurrencyAMD CurrencyCode = 51
CurrencyAWG CurrencyCode = 533
CurrencyAUD CurrencyCode = 36
CurrencyAZN CurrencyCode = 944
CurrencyBSD CurrencyCode = 44
CurrencyBHD CurrencyCode = 48
CurrencyBDT CurrencyCode = 50
CurrencyBBD CurrencyCode = 52
CurrencyBYR CurrencyCode = 974
CurrencyBZD CurrencyCode = 84
CurrencyXOF CurrencyCode = 952
CurrencyBMD CurrencyCode = 60
CurrencyBTN CurrencyCode = 64
CurrencyINR CurrencyCode = 356
CurrencyBOB CurrencyCode = 68
CurrencyBAM CurrencyCode = 977
CurrencyBWP CurrencyCode = 72
CurrencyNOK CurrencyCode = 578
CurrencyBRL CurrencyCode = 986
CurrencyBND CurrencyCode = 96
CurrencyBGN CurrencyCode = 975
CurrencyBIF CurrencyCode = 108
CurrencyCVE CurrencyCode = 132
CurrencyKHR CurrencyCode = 116
CurrencyXAF CurrencyCode = 950
CurrencyCAD CurrencyCode = 124
CurrencyKYD CurrencyCode = 136
CurrencyCLF CurrencyCode = 990
CurrencyCLP CurrencyCode = 152
CurrencyCNY CurrencyCode = 156
CurrencyCOP CurrencyCode = 170
CurrencyCOU CurrencyCode = 970
CurrencyKMF CurrencyCode = 174
CurrencyCDF CurrencyCode = 976
CurrencyNZD CurrencyCode = 554
CurrencyCRC CurrencyCode = 188
CurrencyHRK CurrencyCode = 191
CurrencyCUC CurrencyCode = 931
CurrencyCUP CurrencyCode = 192
CurrencyANG CurrencyCode = 532
CurrencyCZK CurrencyCode = 203
CurrencyDKK CurrencyCode = 208
CurrencyDJF CurrencyCode = 262
CurrencyDOP CurrencyCode = 214
CurrencyEGP CurrencyCode = 818
CurrencySVC CurrencyCode = 222
CurrencyERN CurrencyCode = 232
CurrencyETB CurrencyCode = 230
CurrencyFKP CurrencyCode = 238
CurrencyFJD CurrencyCode = 242
CurrencyXPF CurrencyCode = 953
CurrencyGMD CurrencyCode = 270
CurrencyGEL CurrencyCode = 981
CurrencyGHS CurrencyCode = 936
CurrencyGIP CurrencyCode = 292
CurrencyGTQ CurrencyCode = 320
CurrencyGBP CurrencyCode = 826
CurrencyGNF CurrencyCode = 324
CurrencyGYD CurrencyCode = 328
CurrencyHTG CurrencyCode = 332
CurrencyHNL CurrencyCode = 340
CurrencyHKD CurrencyCode = 344
CurrencyHUF CurrencyCode = 348
CurrencyISK CurrencyCode = 352
CurrencyIDR CurrencyCode = 360
CurrencyXDR CurrencyCode = 960
CurrencyIRR CurrencyCode = 364
CurrencyIQD CurrencyCode = 368
CurrencyILS CurrencyCode = 376
CurrencyJMD CurrencyCode = 388
CurrencyJPY CurrencyCode = 392
CurrencyJOD CurrencyCode = 400
CurrencyKZT CurrencyCode = 398
CurrencyKES CurrencyCode = 404
CurrencyKPW CurrencyCode = 408
CurrencyKRW CurrencyCode = 410
CurrencyKWD CurrencyCode = 414
CurrencyKGS CurrencyCode = 417
CurrencyLAK CurrencyCode = 418
CurrencyLBP CurrencyCode = 422
CurrencyLSL CurrencyCode = 426
CurrencyZAR CurrencyCode = 710
CurrencyLRD CurrencyCode = 430
CurrencyLYD CurrencyCode = 434
CurrencyCHF CurrencyCode = 756
CurrencyMOP CurrencyCode = 446
CurrencyMKD CurrencyCode = 807
CurrencyMGA CurrencyCode = 969
CurrencyMWK CurrencyCode = 454
CurrencyMYR CurrencyCode = 458
CurrencyMVR CurrencyCode = 462
CurrencyMRU CurrencyCode = 929
CurrencyMUR CurrencyCode = 480
CurrencyXUA CurrencyCode = 965
CurrencyMXN CurrencyCode = 484
CurrencyMXV CurrencyCode = 979
CurrencyMDL CurrencyCode = 498
CurrencyMNT CurrencyCode = 496
CurrencyMAD CurrencyCode = 504
CurrencyMZN CurrencyCode = 943
CurrencyMMK CurrencyCode = 104
CurrencyNAD CurrencyCode = 516
CurrencyNPR CurrencyCode = 524
CurrencyNIO CurrencyCode = 558
CurrencyNGN CurrencyCode = 566
CurrencyOMR CurrencyCode = 512
CurrencyPKR CurrencyCode = 586
CurrencyPAB CurrencyCode = 590
CurrencyPGK CurrencyCode = 598
CurrencyPYG CurrencyCode = 600
CurrencyPEN CurrencyCode = 604
CurrencyPHP CurrencyCode = 608
CurrencyPLN CurrencyCode = 985
CurrencyQAR CurrencyCode = 634
CurrencyRON CurrencyCode = 946
CurrencyRUB CurrencyCode = 643
CurrencyRWF CurrencyCode = 646
CurrencySHP CurrencyCode = 654
CurrencyWST CurrencyCode = 882
CurrencySTN CurrencyCode = 930
CurrencySAR CurrencyCode = 682
CurrencyRSD CurrencyCode = 941
CurrencySCR CurrencyCode = 690
CurrencySLL CurrencyCode = 694
CurrencySGD CurrencyCode = 702
CurrencyXSU CurrencyCode = 994
CurrencySBD CurrencyCode = 90
CurrencySOS CurrencyCode = 706
CurrencySSP CurrencyCode = 728
CurrencyLKR CurrencyCode = 144
CurrencySDG CurrencyCode = 938
CurrencySRD CurrencyCode = 968
CurrencySZL CurrencyCode = 748
CurrencySEK CurrencyCode = 752
CurrencyCHE CurrencyCode = 947
CurrencyCHW CurrencyCode = 948
CurrencySYP CurrencyCode = 760
CurrencyTWD CurrencyCode = 901
CurrencyTJS CurrencyCode = 972
CurrencyTZS CurrencyCode = 834
CurrencyTHB CurrencyCode = 764
CurrencyTOP CurrencyCode = 776
CurrencyTTD CurrencyCode = 780
CurrencyTND CurrencyCode = 788
CurrencyTRY CurrencyCode = 949
CurrencyTMT CurrencyCode = 934
CurrencyUGX CurrencyCode = 800
CurrencyUAH CurrencyCode = 980
CurrencyAED CurrencyCode = 784
CurrencyUSN CurrencyCode = 997
CurrencyUYI CurrencyCode = 940
CurrencyUYU CurrencyCode = 858
CurrencyUZS CurrencyCode = 860
CurrencyVUV CurrencyCode = 548
CurrencyVEF CurrencyCode = 937
CurrencyVND CurrencyCode = 704
CurrencyYER CurrencyCode = 886
CurrencyZMW CurrencyCode = 967
CurrencyZWL CurrencyCode = 932
CurrencyYUD CurrencyCode = 891
CurrencyNON CurrencyCode = 998
) | currenciesconst.go | 0.568176 | 0.662824 | currenciesconst.go | starcoder |
package dtw
import "math"
// WarpDistance is a more memory efficient O(N) way to calculate the warp
// distance since it only needs to keep 2 columns instead of N*M costs O(N^2)
func WarpDistance(ts1, ts2 TimeSeries, distFunc DistanceFunc) float64 {
n1 := ts1.Len()
n2 := ts2.Len()
mem := make([]float64, n1*2)
lastRow := mem[n1:]
curRow := mem[:n1]
curRow[0] = distFunc(ts1.At(0), ts2.At(0))
for x := 1; x < n1; x++ {
curRow[x] = curRow[x-1] + distFunc(ts1.At(x), ts2.At(0))
}
for y := 1; y < n2; y++ {
curRow, lastRow = lastRow, curRow
curRow[0] = lastRow[0] + distFunc(ts1.At(0), ts2.At(y))
for x := 1; x < n1; x++ {
minCost := min(curRow[x-1], min(lastRow[x], lastRow[x-1]))
curRow[x] = minCost + distFunc(ts1.At(x), ts2.At(y))
}
}
return curRow[len(curRow)-1]
}
// DTW performs dynamic time warping on the given timeseries and returns
// the path as well as the minimum cost.
func DTW(ts1, ts2 TimeSeries, distFunc DistanceFunc) ([]Point, float64) {
n1 := ts1.Len()
n2 := ts2.Len()
grid := make([]float64, n1*n2)
grid[0] = distFunc(ts1.At(0), ts2.At(0))
for x := 1; x < n1; x++ {
grid[x] = grid[x-1] + distFunc(ts1.At(x), ts2.At(0))
}
off := n1
for y := 1; y < n2; y++ {
grid[off] = grid[off-n1] + distFunc(ts1.At(0), ts2.At(y))
off++
for x := 1; x < n1; x++ {
minCost := min(grid[off-1], min(grid[off-n1], grid[off-n1-1]))
grid[off] = minCost + distFunc(ts1.At(x), ts2.At(y))
off++
}
}
path := make([]Point, 0)
x, y := n1-1, n2-1
path = append(path, Point{X: x, Y: y})
for x > 0 || y > 0 {
o := y*n1 + x
diag := math.Inf(1)
left := math.Inf(1)
down := math.Inf(1)
if x > 0 && y > 0 {
diag = grid[o-n1-1]
}
if x > 0 {
left = grid[o-1]
}
if y > 0 {
down = grid[o-n1]
}
switch {
case diag <= left && diag <= down:
x--
y--
case left < diag && left < down:
x--
case down < diag && down < left:
y--
// Move towards the diagnal if all equal
case x <= y:
x--
default:
y--
}
path = append(path, Point{X: x, Y: y})
}
// Reverse the path
for i := 0; i < len(path)/2; i++ {
j := len(path) - i - 1
path[i], path[j] = path[j], path[i]
}
return path, grid[n1*n2-1]
}
func Constrained(ts1, ts2 TimeSeries, window Window, grid Matrix, distFunc DistanceFunc) ([]Point, float64) {
rect := window.Rect()
if grid == nil {
grid = NewMatrixFromWindow(window)
}
r := window.Range(rect.MinY)
grid.Set(r.Min, rect.MinY, distFunc(ts1.At(r.Min), ts2.At(rect.MinY)))
for x := r.Min + 1; x <= r.Max; x++ {
grid.Set(x, rect.MinY, grid.Get(x-1, rect.MinY)+distFunc(ts1.At(x), ts2.At(rect.MinY)))
}
for y := rect.MinY + 1; y <= rect.MaxY; y++ {
r := window.Range(y)
lastCost := grid.Get(r.Min, y-1)
if lastCost == inf {
// lastCost = 0.0
panic("HMM")
}
grid.Set(r.Min, y, lastCost+distFunc(ts1.At(r.Min), ts2.At(y)))
for x := r.Min + 1; x <= r.Max; x++ {
minCost := min(grid.Get(x-1, y), min(grid.Get(x, y-1), grid.Get(x-1, y-1)))
if minCost == inf {
panic("WTF?")
}
grid.Set(x, y, minCost+distFunc(ts1.At(x), ts2.At(y)))
}
}
path := make([]Point, 0)
x, y := rect.MaxX, rect.MaxY
path = append(path, Point{X: x, Y: y})
for x > 0 || y > 0 {
diag := grid.Get(x-1, y-1)
left := grid.Get(x-1, y)
down := grid.Get(x, y-1)
switch {
case diag <= left && diag <= down:
x--
y--
case left < diag && left < down:
x--
case down < diag && down < left:
y--
// Move towards the diagnal if all equal
case x <= y:
x--
default:
y--
}
path = append(path, Point{X: x, Y: y})
}
// Reverse the path
for i := 0; i < len(path)/2; i++ {
j := len(path) - i - 1
path[i], path[j] = path[j], path[i]
}
return path, grid.Get(rect.MaxX, rect.MaxY)
} | dtw/dtw.go | 0.726911 | 0.621282 | dtw.go | starcoder |
package main
import (
"github.com/gen2brain/raylib-go/physics"
"github.com/gen2brain/raylib-go/raylib"
)
const (
velocity = 0.5
)
func main() {
screenWidth := float32(800)
screenHeight := float32(450)
raylib.SetConfigFlags(raylib.FlagMsaa4xHint)
raylib.InitWindow(int32(screenWidth), int32(screenHeight), "Physac [raylib] - physics movement")
// Physac logo drawing position
logoX := int32(screenWidth) - raylib.MeasureText("Physac", 30) - 10
logoY := int32(15)
// Initialize physics and default physics bodies
physics.Init()
// Create floor and walls rectangle physics body
floor := physics.NewBodyRectangle(raylib.NewVector2(screenWidth/2, screenHeight), screenWidth, 100, 10)
platformLeft := physics.NewBodyRectangle(raylib.NewVector2(screenWidth*0.25, screenHeight*0.6), screenWidth*0.25, 10, 10)
platformRight := physics.NewBodyRectangle(raylib.NewVector2(screenWidth*0.75, screenHeight*0.6), screenWidth*0.25, 10, 10)
wallLeft := physics.NewBodyRectangle(raylib.NewVector2(-5, screenHeight/2), 10, screenHeight, 10)
wallRight := physics.NewBodyRectangle(raylib.NewVector2(screenWidth+5, screenHeight/2), 10, screenHeight, 10)
// Disable dynamics to floor and walls physics bodies
floor.Enabled = false
platformLeft.Enabled = false
platformRight.Enabled = false
wallLeft.Enabled = false
wallRight.Enabled = false
// Create movement physics body
body := physics.NewBodyRectangle(raylib.NewVector2(screenWidth/2, screenHeight/2), 50, 50, 1)
body.FreezeOrient = true // Constrain body rotation to avoid little collision torque amounts
raylib.SetTargetFPS(60)
for !raylib.WindowShouldClose() {
// Update created physics objects
physics.Update()
if raylib.IsKeyPressed(raylib.KeyR) { // Reset physics input
// Reset movement physics body position, velocity and rotation
body.Position = raylib.NewVector2(screenWidth/2, screenHeight/2)
body.Velocity = raylib.NewVector2(0, 0)
body.SetRotation(0)
}
// Physics body creation inputs
if raylib.IsKeyDown(raylib.KeyRight) {
body.Velocity.X = velocity
} else if raylib.IsKeyDown(raylib.KeyLeft) {
body.Velocity.X = -velocity
}
if raylib.IsKeyDown(raylib.KeyUp) && body.IsGrounded {
body.Velocity.Y = -velocity * 4
}
raylib.BeginDrawing()
raylib.ClearBackground(raylib.Black)
raylib.DrawFPS(int32(screenWidth)-90, int32(screenHeight)-30)
// Draw created physics bodies
for i, body := range physics.GetBodies() {
vertexCount := physics.GetShapeVerticesCount(i)
for j := 0; j < vertexCount; j++ {
// Get physics bodies shape vertices to draw lines
// NOTE: GetShapeVertex() already calculates rotation transformations
vertexA := body.GetShapeVertex(j)
jj := 0
if j+1 < vertexCount { // Get next vertex or first to close the shape
jj = j + 1
}
vertexB := body.GetShapeVertex(jj)
raylib.DrawLineV(vertexA, vertexB, raylib.Green) // Draw a line between two vertex positions
}
}
raylib.DrawText("Use 'ARROWS' to move player", 10, 10, 10, raylib.White)
raylib.DrawText("Press 'R' to reset example", 10, 30, 10, raylib.White)
raylib.DrawText("Physac", logoX, logoY, 30, raylib.White)
raylib.DrawText("Powered by", logoX+50, logoY-7, 10, raylib.White)
raylib.EndDrawing()
}
physics.Close() // Unitialize physics
raylib.CloseWindow()
} | examples/physics/physac/movement/main.go | 0.658198 | 0.510008 | main.go | starcoder |
package v1
import (
"fmt"
"strings"
"github.com/prometheus/prometheus/pkg/textparse"
)
// A MetricConverter contains the logic to convert a Metric to a Panel.
type MetricConverter interface {
// Can indicates that a MetricConverter can handle a Metric.
Can(metric Metric) bool
// Do contains the the code that turns a Metric into one or more Panels.
Do(metric Metric, options Options) []Panel
}
// Options are passed to the Do() method of each MetricConverter.
type Options struct {
CounterChangeFunc string
Labels []string
TimeRange string
}
// CounterConverter handles metrics of type Counter.
// It constructs a query that applies a function, rate() or increase() to it.
type CounterConverter struct{}
// Can implements MetricConverter.
func (cc *CounterConverter) Can(m Metric) bool {
return m.Type == textparse.MetricTypeCounter
}
// Do implements MetricConverter.
func (cc *CounterConverter) Do(m Metric, o Options) []Panel {
legend := []string{}
for _, l := range m.LabelKeys {
legend = append(legend, fmt.Sprintf("{{%s}}", l))
}
hasLegend := true
if len(legend) == 0 {
legend = append(legend, "{{instance}}")
hasLegend = false
}
g := Graph{}
g.Description = string(m.Help)
g.Format = FindRangeFormat(m.Name)
g.HasLegend = hasLegend
g.Legend = strings.Join(legend, " ")
g.Title = fmt.Sprintf("%s %s over %s", string(m.Name), o.CounterChangeFunc, o.TimeRange)
g.Queries = []GraphQuery{
{Query: fmt.Sprintf("%s(%s%s[%s])", o.CounterChangeFunc, m.Name, labelSelectors(o.Labels), o.TimeRange)},
}
return []Panel{g}
}
// GaugeConverter handles metrics of type Gauge.
// It returns one Singlestat Panel and because of that will only handle metrics without any labels.
type GaugeConverter struct{}
// Can implements MetricConverter.
func (gc *GaugeConverter) Can(m Metric) bool {
return m.Type == textparse.MetricTypeGauge && len(m.LabelKeys) == 0
}
// Do implements MetricConverter.
func (gc *GaugeConverter) Do(m Metric, o Options) []Panel {
s := Singlestat{}
s.Description = string(m.Help)
s.Format = FindFormat(m.Name)
s.Query = m.Name + labelSelectors(o.Labels)
s.Title = m.Name
s.ValueName = "current"
return []Panel{s}
}
// GaugeDerivConverter handles Metrics of type Gauge that can be converted into Panels of type Graph.
// The deriv() function is applied to the Metric.
// A conversion is only done if the suffix of the Metric suggests that calculating per-second derivative is of interest.
type GaugeDerivConverter struct{}
// Can implements MetricConverter.
func (gd *GaugeDerivConverter) Can(m Metric) bool {
return m.Type == textparse.MetricTypeGauge && strings.HasSuffix(m.Name, "_bytes")
}
// Do implements MetricConverter.
func (gd *GaugeDerivConverter) Do(m Metric, o Options) []Panel {
legend := []string{}
for _, lk := range m.LabelKeys {
legend = append(legend, fmt.Sprintf("{{%s}}", lk))
}
hasLegend := true
if len(legend) == 0 {
legend = append(legend, "{{instance}}")
hasLegend = false
}
g := Graph{}
g.Description = string(m.Help)
g.Format = FindRangeFormat(m.Name)
g.HasLegend = hasLegend
g.Legend = strings.Join(legend, " ")
g.Title = fmt.Sprintf("%s %s over %s", string(m.Name), "deriv", o.TimeRange)
g.Queries = []GraphQuery{
{Query: fmt.Sprintf("%s(%s%s[%s])", "deriv", m.Name, labelSelectors(o.Labels), o.TimeRange)},
}
return []Panel{g}
}
// GaugeTimestampConverter handles Metrics whose suffix suggests that their value is a unix timestamp.
// It returns a Singlestat Panel and because of that will only handle metrics without any labels.
type GaugeTimestampConverter struct{}
// Can implements MetricConverter.
func (gt *GaugeTimestampConverter) Can(m Metric) bool {
return m.Type == textparse.MetricTypeGauge &&
(strings.HasSuffix(m.Name, "_timestamp_seconds") || strings.HasSuffix(m.Name, "_timestamp")) &&
len(m.LabelKeys) == 0
}
// Do implements MetricConverter.
func (gt *GaugeTimestampConverter) Do(m Metric, o Options) []Panel {
query := m.Name + labelSelectors(o.Labels)
if strings.HasSuffix(m.Name, "_timestamp_seconds") {
query = query + " * 1000"
}
s := Singlestat{}
s.Description = string(m.Help)
s.Format = FindFormat(m.Name)
s.Query = query
s.Title = m.Name
s.ValueName = "current"
return []Panel{s}
}
// GaugeInfoConverter handles Metrics whose suffix suggests that information is stored in labels not in the value.
// Examples of such metrics are "go_info" or "prometheus_build_info".
// It returns one Singlestat Panel per label key.
type GaugeInfoConverter struct{}
// Can implements MetricConverter.
func (gi *GaugeInfoConverter) Can(m Metric) bool {
return m.Type == textparse.MetricTypeGauge && strings.HasSuffix(m.Name, "_info")
}
// Do implements MetricConverter.
func (gi *GaugeInfoConverter) Do(m Metric, o Options) []Panel {
panels := []Panel{}
for _, lk := range m.LabelKeys {
s := Singlestat{}
s.Description = string(m.Help)
s.Format = defaultFormat
s.Legend = fmt.Sprintf("{{%s}}", lk)
s.Query = m.Name + labelSelectors(o.Labels)
s.Title = fmt.Sprintf("%s - %s", string(m.Name), lk)
s.ValueName = "name"
panels = append(panels, s)
}
return panels
}
// HistogramConverter handles metrics of type Histogram.
// It returns four Panels of type Graph, one for avg, p50, p95 and p99.
type HistogramConverter struct{}
// Can implements MetricConverter.
func (h *HistogramConverter) Can(m Metric) bool {
return m.Type == textparse.MetricTypeHistogram
}
// Do implements MetricConverter.
func (h *HistogramConverter) Do(m Metric, o Options) []Panel {
legend := []string{}
for _, lk := range m.LabelKeys {
if lk == "le" {
continue
}
legend = append(legend, fmt.Sprintf("{{%s}}", lk))
}
hasLegend := true
if len(legend) == 0 {
legend = append(legend, "{{instance}}")
hasLegend = false
}
legendFormatted := strings.Join(legend, " ")
selectors := labelSelectors(o.Labels)
avg := Graph{}
avg.Description = string(m.Help)
avg.Format = FindFormat(m.Name)
avg.HasLegend = hasLegend
avg.Legend = legendFormatted
avg.Title = fmt.Sprintf("%s avg", string(m.Name))
avg.Queries = []GraphQuery{
{Query: fmt.Sprintf("%s_sum%s / %s_count%s", m.Name, selectors, m.Name, selectors)},
}
p50 := Graph{}
p50.Description = string(m.Help)
p50.Format = FindFormat(m.Name)
p50.HasLegend = hasLegend
p50.Legend = legendFormatted
p50.Title = fmt.Sprintf("%s p50", string(m.Name))
p50.Queries = []GraphQuery{
{Query: fmt.Sprintf("histogram_quantile(0.5, rate(%s_bucket%s[%s]))", m.Name, selectors, o.TimeRange)},
}
p90 := Graph{}
p90.Description = string(m.Help)
p90.Format = FindFormat(m.Name)
p90.HasLegend = hasLegend
p90.Legend = legendFormatted
p90.Title = fmt.Sprintf("%s p90", string(m.Name))
p90.Queries = []GraphQuery{
{Query: fmt.Sprintf("histogram_quantile(0.9, rate(%s_bucket%s[%s]))", m.Name, selectors, o.TimeRange)},
}
p99 := Graph{}
p99.Description = string(m.Help)
p99.Format = FindFormat(m.Name)
p99.HasLegend = hasLegend
p99.Legend = legendFormatted
p99.Title = fmt.Sprintf("%s p99", string(m.Name))
p99.Queries = []GraphQuery{
{Query: fmt.Sprintf("histogram_quantile(0.99, rate(%s_bucket%s[%s]))", m.Name, selectors, o.TimeRange)},
}
return []Panel{avg, p50, p90, p99}
}
// GaugeWithLabelsConverter handles metrics of type Gauge that define labels.
// It returns one Panel of type Graph.
type GaugeWithLabelsConverter struct{}
// Can implements MetricConverter.
func (gl *GaugeWithLabelsConverter) Can(m Metric) bool {
return m.Type == textparse.MetricTypeGauge && len(m.LabelKeys) > 0
}
// Do implements MetricConverter.
func (gl *GaugeWithLabelsConverter) Do(m Metric, o Options) []Panel {
legend := []string{}
for _, lk := range m.LabelKeys {
legend = append(legend, fmt.Sprintf("{{%s}}", lk))
}
query := m.Name + labelSelectors(o.Labels)
if strings.HasSuffix(m.Name, "_timestamp_seconds") {
query = query + " * 1000"
}
g := Graph{}
g.Description = string(m.Help)
g.Format = FindFormat(m.Name)
g.HasLegend = true
g.Legend = strings.Join(legend, " ")
g.Title = m.Name
g.Queries = []GraphQuery{
{Query: query},
}
return []Panel{g}
}
func labelSelectors(labels []string) string {
if len(labels) == 0 {
return ""
}
selectors := []string{}
for _, l := range labels {
selectors = append(selectors, fmt.Sprintf(`%s=\"$%s\"`, l, l))
}
return "{" + strings.Join(selectors, ",") + "}"
} | pkg/converter.go | 0.809653 | 0.407274 | converter.go | starcoder |
package pso
import (
"math"
"math/rand"
"time"
)
type (
particle struct {
position, best, velocity []float64
bestCost float64
}
dimension struct {
min, max float64
}
ParticleSwarmOptimizer struct {
Iterations, Size int
Ω, Φp, Φg float64
Rand *rand.Rand
Dimensions []dimension
OptimizationFunction OptimizationFunction
}
OptimizationFunction func([]float64) float64
)
func New(f OptimizationFunction) *ParticleSwarmOptimizer {
return &ParticleSwarmOptimizer{
Iterations: 500,
Size: -1,
Ω: math.NaN(),
Φp: math.NaN(),
Φg: math.NaN(),
Rand: rand.New(rand.NewSource(time.Now().UnixNano())),
Dimensions: []dimension{},
OptimizationFunction: f,
}
}
func (pso *ParticleSwarmOptimizer) AddDimension(min, max float64) {
pso.Dimensions = append(pso.Dimensions, dimension{min, max})
}
func (pso *ParticleSwarmOptimizer) Solve() ([]float64, float64) {
pso.setDefaults()
pop := make([]particle, pso.Iterations)
globalBest := make([]float64, len(pso.Dimensions))
globalBestCost := 0.0
f := pso.OptimizationFunction
for i := range pop {
pop[i].position = make([]float64, len(pso.Dimensions))
pop[i].best = make([]float64, len(pso.Dimensions))
pop[i].velocity = make([]float64, len(pso.Dimensions))
for j, dim := range pso.Dimensions {
pop[i].position[j] = dim.min + (dim.max-dim.min)*pso.Rand.Float64()
pop[i].best[j] = pop[i].position[j]
pop[i].velocity[j] = -(dim.max - dim.min) + (dim.max-dim.min)*2*pso.Rand.Float64()
}
cost := f(pop[i].position)
if i == 0 || cost < globalBestCost {
copy(globalBest, pop[i].position)
globalBestCost = cost
}
}
for it := 0; it < pso.Iterations; it++ {
for i := range pop {
rP, rG := pso.Rand.Float64(), pso.Rand.Float64()
for j, dim := range pso.Dimensions {
pop[i].velocity[j] = pso.Ω*pop[i].velocity[j] +
pso.Φp*rP*(pop[i].best[j]-pop[i].position[j]) +
pso.Φg*rG*(globalBest[j]-pop[i].position[j])
pop[i].position[j] += pop[i].velocity[j]
if pop[i].position[j] > dim.max {
pop[i].position[j] = dim.max
} else if pop[i].position[j] < dim.min {
pop[i].position[j] = dim.min
}
}
cost := f(pop[i].position)
if cost < pop[i].bestCost {
copy(pop[i].best, pop[i].position)
pop[i].bestCost = cost
}
if cost < globalBestCost {
copy(globalBest, pop[i].position)
globalBestCost = cost
}
}
}
return globalBest, globalBestCost
}
func (pso *ParticleSwarmOptimizer) setDefaults() {
type parameter struct {
dimensions, evaluations, size int
ω, φp, φg float64
}
// this table is from:
// http://www.cof.orst.edu/cof/teach/fe640/Class_Materials/Particle%20Swarm/PSO%20parameters.pdf
parameters := []parameter{
{2, 400, 25, 0.3925, 2.5586, 1.3358},
{2, 4000, 156, 0.4091, 2.1304, 1.0575},
{5, 1000, 63, -0.3593, -0.7238, 2.0289},
{5, 10000, 223, -0.3699, -0.1207, 3.3657},
{10, 2000, 63, 0.6571, 1.6319, 0.6239},
{10, 20000, 53, -0.3488, -0.2746, 4.8976},
{20, 40000, 69, -0.4438, -0.2699, 3.3950},
{20, 400000, 149, -0.3236, -0.1136, 3.9789},
{30, 600000, 95, -0.6031, -0.6485, 2.6475},
{50, 100000, 106, -0.2256, -0.1564, 3.8876},
{100, 200000, 161, -0.2089, -0.0787, 3.7637},
}
dim := parameters[0].dimensions
evs := parameters[0].evaluations
for _, p := range parameters {
if distance(len(pso.Dimensions), p.dimensions) < distance(len(pso.Dimensions), dim) {
dim = p.dimensions
evs = p.evaluations
}
}
sz, ω, φp, φg := 0, 0.0, 0.0, 0.0
for _, p := range parameters {
if p.dimensions == dim &&
distance(pso.Iterations, p.evaluations) <= distance(pso.Iterations, evs) {
evs = p.evaluations
sz, ω, φp, φg = p.size, p.ω, p.φp, p.φg
}
}
if pso.Size < 0 {
pso.Size = sz
}
if math.IsNaN(pso.Ω) {
pso.Ω = ω
}
if math.IsNaN(pso.Φp) {
pso.Φp = φp
}
if math.IsNaN(pso.Φg) {
pso.Φg = φg
}
}
func distance(x, y int) int {
return int(math.Abs(float64(y - x)))
} | pso/pso.go | 0.525856 | 0.416322 | pso.go | starcoder |
package quantum
import (
"bytes"
"fmt"
"math"
"strconv"
)
// Qubit is a representation of a qubit.
type Qubit struct {
basis0 complex128
basis1 complex128
}
// MakeQubit creates a qubit from the provided basis states.
// It returns a Qubit struct if the provided states represent a unit vector.
// It returns an error if the provided states do not represent a unit vector.
func MakeQubit(basis0 complex128, basis1 complex128) (*Qubit, error) {
// Make sure the provided basis states represent a unit vector
if !checkIfUnitVector(basis0, basis1) {
return nil, fmt.Errorf("a qubit must be a unit vector")
}
qubit := new(Qubit)
qubit.basis0 = basis0
qubit.basis1 = basis1
return qubit, nil
}
// Basis0 retrieves the value of basis0 for this Qubit.
func (qubit *Qubit) Basis0() complex128 {
return qubit.basis0
}
// Basis1 retrieves the value of basis1 for this Qubit.
func (qubit *Qubit) Basis1() complex128 {
return qubit.basis1
}
// Update updates this Qubit's fields with the provided values.
// Will return nil if the provided values make a unit vector.
// Will return an error if the provided values do not make a unit vector.
func (qubit *Qubit) Update(basis0 complex128, basis1 complex128) error {
// Make sure the provided basis states represent a unit vector
if !checkIfUnitVector(basis0, basis1) {
return fmt.Errorf("a qubit must be a unit vector")
}
qubit.basis0 = basis0
qubit.basis1 = basis1
return nil
}
// String writes the provided Qubit as a string in Dirac-esque notation.
func (qubit *Qubit) String() string {
var stateBuffer bytes.Buffer
stateBuffer.WriteString(fmt.Sprintf("%v|%0*s> + ", qubit.basis0, 1, strconv.FormatInt(int64(0), 2)))
stateBuffer.WriteString(fmt.Sprintf("%v|%0*s> + ", qubit.basis1, 1, strconv.FormatInt(int64(1), 2)))
return stateBuffer.String()
}
// checkIfUnitVector returns true if the sum of squared magnitutes of
// the parameters is 1, false otherwise.
func checkIfUnitVector(states ...complex128) bool {
var sum float64
for _, state := range states {
sum += math.Abs(real(state * state))
}
// Make sure the provided basis sates represent a unit vector
if sum-float64EqualityThreshold > 1.0 || sum+float64EqualityThreshold < 1.0 {
return false
}
return true
}
// TensoredQubits is a representation of a tensor product of qubits.
type TensoredQubits struct {
states []complex128 // states are the states of the basis vectors for this qubit.
}
// MakeTensoredQubits creates a tensor product of qubits from the provided states.
// It returns a TensoredQubits struct if the provided states represent a unit vector.
// It returns an error if the provided states do not represent a unit vector.
func MakeTensoredQubits(states ...complex128) (*TensoredQubits, error) {
tensoredQubits := new(TensoredQubits)
// Make sure the states provided represent a unit vector
if !checkIfUnitVector(states...) {
return nil, fmt.Errorf("a tensor product of qubits must be a unit vector")
}
return tensoredQubits, nil
}
// String writes the provided TensoredQubits as a string in Dirac-esque notation.
func (tensoredQubits TensoredQubits) String() string {
var stateBuffer bytes.Buffer
binaryDigits := int(math.Sqrt(float64(len(tensoredQubits.states))))
for i, state := range tensoredQubits.states {
stateBuffer.WriteString(fmt.Sprintf("%v|%0*s> + ", state, binaryDigits, strconv.FormatInt(int64(i), 2)))
}
stateString := stateBuffer.String()
return stateString[:len(stateString)-2]
} | qubit.go | 0.86592 | 0.766162 | qubit.go | starcoder |
package outputs
import (
"time"
"barista.run/bar"
"barista.run/timing"
)
// AtTimeDelta creates a TimedOutput from a function by repeatedly calling it at
// different times, using a fixed point in time as a reference point.
type AtTimeDelta func(time.Duration) bar.Output
// From sets the reference point and creates a timed output that repeats the
// given function. The repeat rate is:
// - delta < 1 minute: every second
// - delta < 1 hour: every minute
// - otherwise: every hour
// This is useful if the output displays a single time unit (e.g. 3m, or 8h).
func (a AtTimeDelta) From(time time.Time) bar.TimedOutput {
return &repeatOnDelta{time, a, timeDeltaCoarse}
}
// FromFine is From with more rapid updates:
// - delta < 1 hour: every second
// - delta < 24 hours: every minute
// - otherwise: every hour
// This is useful if the output displays two time units (e.g. 5h3m, or 2d7h).
func (a AtTimeDelta) FromFine(time time.Time) bar.TimedOutput {
return &repeatOnDelta{time, a, timeDeltaFine}
}
type repeatOnDelta struct {
time.Time
outputFunc func(time.Duration) bar.Output
granularity func(time.Duration) time.Duration
}
func (r *repeatOnDelta) Segments() []*bar.Segment {
delta, truncated, granularity := r.durations()
if truncated > delta && granularity < 0 {
truncated += granularity
}
o := r.outputFunc(truncated)
if o == nil {
return nil
}
return o.Segments()
}
func (r *repeatOnDelta) NextRefresh() time.Time {
delta, truncated, granularity := r.durations()
if truncated <= delta {
if granularity > 0 {
truncated += granularity
} else {
truncated -= granularity
}
}
return r.Add(truncated)
}
func (r *repeatOnDelta) durations() (delta, truncated, granularity time.Duration) {
delta = timing.Now().Sub(r.Time)
if delta > 0 {
granularity = r.granularity(delta + 1)
} else {
granularity = -r.granularity(-delta - 1)
}
truncated = delta / granularity * granularity
return // delta, truncated, granularity
}
func timeDeltaFine(in time.Duration) time.Duration {
if in <= time.Hour {
return time.Second
}
if in <= 24*time.Hour {
return time.Minute
}
return time.Hour
}
func timeDeltaCoarse(in time.Duration) time.Duration {
if in <= time.Minute {
return time.Second
}
if in <= time.Hour {
return time.Minute
}
return time.Hour
} | outputs/timedelta.go | 0.81457 | 0.639624 | timedelta.go | starcoder |
package cellularautomata
import (
"math"
"github.com/arbori/population.git/population/lattice"
"github.com/arbori/population.git/population/rule"
)
type Cellularautomata struct {
env lattice.Lattice
mirror lattice.Lattice
states []float32
motion [][]int
rule rule.Rule
dimention int
}
func New(states []float32, motion [][]int, rule rule.Rule, dim ...int) (Cellularautomata, error) {
var err error
var env lattice.Lattice
var mirror lattice.Lattice
env, err = lattice.NewWithValue(float32(0), dim...)
if err != nil {
return Cellularautomata{}, err
}
mirror, err = lattice.NewWithValue(float32(0), dim...)
if err != nil {
return Cellularautomata{}, err
}
return Cellularautomata{
env: env,
mirror: mirror,
states: states,
motion: motion,
rule: rule,
dimention: len(dim),
}, nil
}
func (ca *Cellularautomata) NeighborhoodValues(X ...int) []float32 {
size := len(ca.motion)
dimention := len(ca.motion[0])
if dimention != len(X) {
return make([]float32, 0)
}
neighborhood := make([]float32, size)
point := make([]int, dimention)
for n := 0; n < size; n += 1 {
for c := 0; c < dimention; c += 1 {
point[c] = ca.motion[n][c] + X[c]
if point[c] < 0 {
point[c] = ca.env.Limits[c] + point[c]
} else if point[c] >= ca.env.Limits[c] {
point[c] = point[c] - ca.env.Limits[c]
}
}
neighborhood[n] = ca.env.At(point...).(float32)
}
return neighborhood
}
func (ca *Cellularautomata) Get(x ...int) float32 {
var result float32 = 0
cell := ca.env.At(x...)
if cell != nil {
result = cell.(float32)
}
return result
}
func (ca *Cellularautomata) Set(value float32, x ...int) {
for i := 0; ca.states[i] != value; i += 1 {
}
ca.env.Set(value, x...)
}
func (ca *Cellularautomata) Dimention() int {
return ca.env.Dimention
}
func (ca *Cellularautomata) Limits() []int {
return ca.env.Limits
}
func (ca *Cellularautomata) Evolve() {
point := make([]int, ca.env.Dimention)
position := 0
overflowed := 1
point[0] = -1
for inc(&point, &ca.env.Limits, position, overflowed) {
neighborhood := ca.NeighborhoodValues(point...)
state := ca.rule.Transition(neighborhood)
ca.mirror.Set(state, point...)
}
for i := 0; i < len(point); i += 1 {
point[i] = 0
}
position = 0
overflowed = 1
point[0] = -1
for inc(&point, &ca.env.Limits, position, overflowed) {
ca.env.Set(ca.mirror.At(point...), point...)
}
}
func inc(point *[]int, limits *[]int, position int, overflowed int) bool {
if position >= len(*point) || len(*point) != len(*limits) || (*point)[position] >= (*limits)[position] {
return false
}
(*point)[position] += overflowed
if (*point)[position] >= (*limits)[position] {
(*point)[position] = 0
overflowed = 1
position += 1
return inc(point, limits, position+1, overflowed)
}
return true
}
func NeighborhoodMotionVonNeumman(d int, r int) [][]int {
var size int = d*r*(r+1) + 1
var sum float64
result := make([][]int, size)
point := nextPointer(nil, d, r)
for i := 0; i < size; {
sum = 0
for j := d - 1; j >= 0; j -= 1 {
sum += math.Abs(float64(point[j]))
}
if sum <= 1 {
result[i] = point
i += 1
}
point = nextPointer(point, d, r)
}
return result
}
func NeighborhoodMotionMoore(d int, r int) [][]int {
var size int = int(math.Pow(float64(2*r+1), float64(d)))
result := make([][]int, size)
result[0] = nextPointer(nil, d, r)
for i := 1; i < size; i += 1 {
result[i] = nextPointer(result[i-1], d, r)
}
return result
}
func nextPointer(current []int, d int, r int) []int {
next := make([]int, d)
for j := 0; j < d; j += 1 {
if current == nil {
next[j] = -r
} else {
next[j] = current[j]
}
}
for j := d - 1; current != nil && j >= 0; j -= 1 {
next[j] = current[j] + 1
if next[j] <= r {
break
}
next[j] = -r
}
return next
} | cellularautomata/cellularautomata.go | 0.57344 | 0.519278 | cellularautomata.go | starcoder |
package main
import (
"fmt"
"math"
"os"
"github.com/pointlander/datum/iris"
)
var MaxEntropy = math.Log2(3)
// Embeddings is a set of embeddings
type Embeddings struct {
Columns int
Network *Network
Embeddings []Embedding
}
// Embedding is an embedding with a label and features
type Embedding struct {
iris.Iris
Source int
Features []float64
}
// Copy makes a copy of the embeddings
func (e *Embeddings) Copy() Embeddings {
embeddings := Embeddings{
Columns: e.Columns,
Embeddings: make([]Embedding, len(e.Embeddings)),
}
copy(embeddings.Embeddings, e.Embeddings)
return embeddings
}
// Variance computes the variance for the features with column
func (e *Embeddings) Variance(column int) float64 {
n, sum := float64(len(e.Embeddings)), 0.0
for _, row := range e.Embeddings {
sum += row.Features[column]
}
average, variance := sum/n, 0.0
for _, row := range e.Embeddings {
v := row.Features[column] - average
variance += v * v
}
return variance / n
}
// PivotVariance computes the variance for the left and right features with column
func (e *Embeddings) PivotVariance(column int, pivot float64) (left, right float64) {
nLeft, nRight, sumLeft, sumRight := 0, 0, 0.0, 0.0
for _, row := range e.Embeddings {
if value := row.Features[column]; value > pivot {
nRight++
sumRight += value
} else {
nLeft++
sumLeft += value
}
}
averageLeft, averageRight := sumLeft, sumRight
if nLeft != 0 {
averageLeft /= float64(nLeft)
}
if nRight != 0 {
averageRight /= float64(nRight)
}
for _, row := range e.Embeddings {
if value := row.Features[column]; value > pivot {
v := value - averageRight
right += v * v
} else {
v := value - averageLeft
left += v * v
}
}
if nLeft != 0 {
left /= float64(nLeft)
}
if nRight != 0 {
right /= float64(nRight)
}
return left, right
}
// VarianceReduction implements variance reduction algorithm
func (e *Embeddings) VarianceReduction(depth int, label, count uint) *Reduction {
reduction := Reduction{
Embeddings: e,
Label: label,
Depth: count,
}
length := len(e.Embeddings)
if depth <= 0 || length == 0 {
return &reduction
}
for k := 0; k < e.Columns; k++ {
total := e.Variance(k)
for _, row := range e.Embeddings {
pivot := row.Features[k]
a, b := e.PivotVariance(k, pivot)
if cost := total - (a + b); cost > reduction.Max {
reduction.Max, reduction.Column, reduction.Pivot = cost, k, pivot
}
}
}
left := Embeddings{
Columns: e.Columns,
Network: e.Network,
Embeddings: make([]Embedding, 0, length),
}
right := Embeddings{
Columns: e.Columns,
Network: e.Network,
Embeddings: make([]Embedding, 0, length),
}
for _, row := range e.Embeddings {
if row.Features[reduction.Column] > reduction.Pivot {
right.Embeddings = append(right.Embeddings, row)
} else {
left.Embeddings = append(left.Embeddings, row)
}
}
reduction.Left, reduction.Right =
left.VarianceReduction(depth-1, label, count+1),
right.VarianceReduction(depth-1, label|(1<<count), count+1)
return &reduction
}
// PrintTable prints a table of embeddings
func (r *Reduction) PrintTable(out *os.File, mode Mode, cutoff float64) {
if out == nil {
return
}
fmt.Fprintf(out, "# Training cost vs epochs\n")
fmt.Fprintf(out, "]\n\n", mode.String(), mode.String())
fmt.Fprintf(out, "# Decision tree\n")
fmt.Fprintf(out, "```go\n")
fmt.Fprintf(out, "%s\n", r.String())
fmt.Fprintf(out, "```\n\n")
headers, rows := make([]string, 0, Width2+2), make([][]string, 0, 256)
headers = append(headers, "label", "cluster")
for i := 0; i < r.Embeddings.Columns; i++ {
headers = append(headers, fmt.Sprintf("%d", i))
}
var load func(r *Reduction)
load = func(r *Reduction) {
if r == nil {
return
}
if (r.Left == nil && r.Right == nil) || r.Max < cutoff {
for _, item := range r.Embeddings.Embeddings {
row := make([]string, 0, r.Embeddings.Columns+2)
label, predicted := item.Label, r.Label
row = append(row, label, fmt.Sprintf("%d", predicted))
for _, value := range item.Features {
row = append(row, fmt.Sprintf("%f", value))
}
rows = append(rows, row)
}
return
}
load(r.Left)
load(r.Right)
}
load(r.Left)
load(r.Right)
fmt.Fprintf(out, "# Output of neural network middle layer\n")
printTable(out, headers, rows)
fmt.Fprintf(out, "\n")
plotData(r.Embeddings, fmt.Sprintf("results/embedding_%s.png", mode.String()))
fmt.Fprintf(out, "# PCA of network middle layer\n")
fmt.Fprintf(out, "]\n", mode.String(), mode.String())
}
// GetEntropy gets the entropy
func (r *Reduction) GetEntropy(cutoff float64) (entropy float64) {
histograms := make(map[uint][3]uint)
var count func(r *Reduction)
count = func(r *Reduction) {
if r == nil {
return
}
if (r.Left == nil && r.Right == nil) || r.Max < cutoff {
predicted := r.Label
for _, item := range r.Embeddings.Embeddings {
histogram := histograms[predicted]
histogram[iris.Labels[item.Label]]++
histograms[predicted] = histogram
}
return
}
count(r.Left)
count(r.Right)
}
count(r.Left)
count(r.Right)
total := uint(0)
for _, histogram := range histograms {
e, s := 0.0, uint(0)
for _, c := range histogram {
if c == 0 {
continue
}
s += c
counts := float64(c)
e += counts * math.Log2(counts)
}
total += s
sum := float64(s)
entropy += (sum*math.Log2(sum) - e)
}
return entropy / (float64(total) * MaxEntropy)
}
// GetConsistency returns zero if the data is self consistent
func (r *Reduction) GetConsistency() (consistency uint) {
embeddings := r.Embeddings
for i, x := range embeddings.Embeddings {
max, match := -1.0, 0
for j, y := range embeddings.Embeddings {
if j == i {
continue
}
sumAB, sumAA, sumBB := 0.0, 0.0, 0.0
for k, a := range x.Features {
b := y.Features[k]
sumAB += a * b
sumAA += a * a
sumBB += b * b
}
similarity := sumAB / (math.Sqrt(sumAA) * math.Sqrt(sumBB))
if similarity > max {
max, match = similarity, j
}
}
should := iris.Labels[embeddings.Embeddings[i].Label]
found := iris.Labels[embeddings.Embeddings[match].Label]
if should != found {
consistency++
}
}
return consistency
}
// Reduction is the result of variance reduction
type Reduction struct {
Embeddings *Embeddings
Label uint
Depth uint
Column int
Pivot float64
Max float64
Left, Right *Reduction
}
// String converts the reduction to a string representation
func (r *Reduction) String() string {
var serialize func(r *Reduction, depth uint) string
serialize = func(r *Reduction, depth uint) string {
spaces := ""
for i := uint(0); i < depth; i++ {
spaces += " "
}
left, right := "", ""
var labelLeft, labelRight uint
if r.Left != nil {
labelLeft = r.Left.Label
if r.Left.Left != nil || r.Left.Right != nil {
left = serialize(r.Left, depth+1)
}
}
if r.Right != nil {
labelRight = r.Right.Label
if r.Right.Left != nil || r.Right.Right != nil {
right = serialize(r.Right, depth+1)
}
}
layer := fmt.Sprintf("%s// variance reduction: %f\n", spaces, r.Max)
layer += fmt.Sprintf("%sif output[%d] > %f {\n", spaces, r.Column, r.Pivot)
if right == "" {
layer += fmt.Sprintf("%s label := %d\n", spaces, labelRight)
} else {
layer += fmt.Sprintf("%s\n", right)
}
layer += fmt.Sprintf("%s} else {\n", spaces)
if left == "" {
layer += fmt.Sprintf("%s label := %d\n", spaces, labelLeft)
} else {
layer += fmt.Sprintf("%s\n", left)
}
layer += fmt.Sprintf("%s}", spaces)
return layer
}
return serialize(r, 0)
} | embedding.go | 0.806777 | 0.596727 | embedding.go | starcoder |
package measurement
import (
"log"
"time"
"github.com/tarent/gomulocity/measurement"
)
var Example1NewMeasurements = measurement.NewMeasurement{
Time: timeToPointer(time.Now().Format(time.RFC3339)),
MeasurementType: "P",
Metrics: map[string]interface{}{
"P": struct {
P struct {
Unit string `json:"unit"`
Value int `json:"value"`
}
}{
P: struct {
Unit string `json:"unit"`
Value int `json:"value"`
}{Unit: "W", Value: 71},
},
},
}
var Example2NewMeasurements = measurement.NewMeasurement{
Time: timeToPointer(time.Now().Format(time.RFC3339)),
MeasurementType: "P",
Metrics: map[string]interface{}{
"P": struct {
P1 struct {
Unit string `json:"unit"`
Value int `json:"value"`
}
P2 struct {
Unit string `json:"unit"`
Value int `json:"value"`
}
P3 struct {
Unit string `json:"unit"`
Value int `json:"value"`
}
}{
P1: struct {
Unit string `json:"unit"`
Value int `json:"value"`
}{Unit: "W", Value: 77},
P2: struct {
Unit string `json:"unit"`
Value int `json:"value"`
}{Unit: "W", Value: 43},
P3: struct {
Unit string `json:"unit"`
Value int `json:"value"`
}{Unit: "W", Value: 12},
},
},
}
var ExampleCollection = []measurement.NewMeasurement{
{
Time: timeToPointer(time.Now().Format(time.RFC3339)),
MeasurementType: "VoltageMeasurement",
Metrics: map[string]interface{}{
"VoltageMeasurement": struct {
Voltage struct {
Unit string `json:"unit"`
Value float64 `json:"value"`
} `json:"voltage"`
}{
struct {
Unit string `json:"unit"`
Value float64 `json:"value"`
}{Unit: "V", Value: 227.32},
},
},
},
{
Time: timeToPointer(time.Now().Format(time.RFC3339)),
MeasurementType: "c8y_FrequencyMeasurement",
Metrics: map[string]interface{}{
"c8y_FrequencyMeasurement": struct {
Frequency struct {
Unit string
Value float64
}
}{
struct {
Unit string
Value float64
}{Unit: "Hz", Value: 37.71},
},
},
},
{
Time: timeToPointer(time.Now().Format(time.RFC3339)),
MeasurementType: "P",
Metrics: map[string]interface{}{
"P": struct {
P1 struct {
Unit string `json:"unit"`
Value int `json:"value"`
}
P2 struct {
Unit string `json:"unit"`
Value int `json:"value"`
}
P3 struct {
Unit string `json:"unit"`
Value int `json:"value"`
}
}{
P1: struct {
Unit string `json:"unit"`
Value int `json:"value"`
}{Unit: "W", Value: 77},
P2: struct {
Unit string `json:"unit"`
Value int `json:"value"`
}{Unit: "W", Value: 43},
P3: struct {
Unit string `json:"unit"`
Value int `json:"value"`
}{Unit: "W", Value: 12},
},
},
},
}
func timeToPointer(timeString string) *time.Time {
t, err := time.Parse(time.RFC3339, timeString)
if err != nil {
log.Fatal(err)
}
return &t
} | examples/measurement/example_newMeasurements.go | 0.636918 | 0.486271 | example_newMeasurements.go | starcoder |
package main
import (
"math"
"math/rand"
)
type Weight struct {
Weight Dual
Delta, Gradient float32
}
type Network struct {
Sizes []int
Layers [][]Weight
Biases [][]Weight
}
func random32(a, b float32) float32 {
return (b-a)*rand.Float32() + a
}
func NewNetwork(sizes ...int) Network {
last, layers, biases := sizes[0], make([][]Weight, len(sizes)-1), make([][]Weight, len(sizes)-1)
for i, size := range sizes[1:] {
layers[i] = make([]Weight, last*size)
for j := range layers[i] {
layers[i][j].Weight.Val = random32(-1, 1) / float32(math.Sqrt(float64(last)))
}
biases[i] = make([]Weight, size)
for j := range biases[i] {
biases[i][j].Weight.Val = random32(-1, 1) / float32(math.Sqrt(float64(last)))
}
last = size
}
return Network{
Sizes: sizes,
Layers: layers,
Biases: biases,
}
}
type NetState struct {
*Network
State [][]Dual
}
func (n *Network) NewNetState() NetState {
state := make([][]Dual, len(n.Sizes))
for i, size := range n.Sizes {
state[i] = make([]Dual, size)
}
return NetState{
Network: n,
State: state,
}
}
func (n *NetState) Inference() {
for i, layer := range n.Layers {
w := 0
for j := 0; j < n.Sizes[i+1]; j++ {
var sum Dual
for _, activation := range n.State[i] {
sum = Add(sum, Mul(activation, layer[w].Weight))
sum = Add(sum, n.Biases[i][j].Weight)
w++
}
n.State[i+1][j] = Sigmoid(sum)
}
}
}
type TrainingData struct {
Inputs, Outputs []float32
}
func (n *Network) Train(data []TrainingData, target float64, alpha, eta float32) int {
size := len(data)
iterations, state, randomized := 0, n.NewNetState(), make([]TrainingData, size)
copy(randomized, data)
for {
for i, sample := range randomized {
j := i + rand.Intn(size-i)
randomized[i], randomized[j] = randomized[j], sample
}
total := 0.0
for _, item := range randomized {
cost := 0.0
for j, input := range item.Inputs {
state.State[0][j].Val = input
}
for _, layer := range n.Layers {
for j := range layer {
layer[j].Weight.Der = 1.0
state.Inference()
var sum Dual
for k, output := range item.Outputs {
sub := Sub(state.State[len(state.State)-1][k], Dual{Val: output})
sum = Add(sum, Mul(sub, sub))
}
sum = Mul(Half, sum)
layer[j].Weight.Der = 0.0
layer[j].Gradient = sum.Der
cost = float64(sum.Val)
}
}
for _, bias := range n.Biases {
for j := range bias {
bias[j].Weight.Der = 1.0
state.Inference()
var sum Dual
for k, output := range item.Outputs {
sub := Sub(state.State[len(state.State)-1][k], Dual{Val: output})
sum = Add(sum, Mul(sub, sub))
}
sum = Mul(Half, sum)
bias[j].Weight.Der = 0.0
bias[j].Gradient = sum.Der
cost = float64(sum.Val)
}
}
total += cost
for _, layer := range n.Layers {
for j := range layer {
layer[j].Delta = alpha*layer[j].Delta - eta*layer[j].Gradient
layer[j].Weight.Val += layer[j].Delta
}
}
for _, bias := range n.Biases {
for j := range bias {
bias[j].Delta = alpha*bias[j].Delta - eta*bias[j].Gradient
bias[j].Weight.Val += bias[j].Delta
}
}
}
iterations++
if total < target {
break
}
}
return iterations
} | network.go | 0.652352 | 0.448004 | network.go | starcoder |
package tileset
import (
"image"
"github.com/mewkiz/pkg/imgutil"
)
// A TileSet is a collection of one or more tile images, all of which have the
// same width and height.
type TileSet struct {
// Tile set sprite sheet.
imgutil.SubImager
// Tile width.
TileWidth int
// Tile height.
TileHeight int
// Tile set width and height.
width, height int
// Mapping from tile identifiers to tile images.
tiles map[TileID]image.Image
}
// New returns a tile set based on the provided sprite sheet img.
func New(img image.Image, tileWidth, tileHeight int) (ts *TileSet) {
ts = &TileSet{
TileWidth: tileWidth,
TileHeight: tileHeight,
tiles: make(map[TileID]image.Image),
}
ts.SubImager = imgutil.SubFallback(img)
bounds := ts.Bounds()
ts.width = bounds.Dx()
ts.height = bounds.Dy()
return ts
}
// Open opens the sprite sheet specified by imgPath and returns a tile set based
// upon it.
func Open(imgPath string, tileWidth, tileHeight int) (ts *TileSet, err error) {
img, err := imgutil.ReadFile(imgPath)
if err != nil {
return nil, err
}
ts = New(img, tileWidth, tileHeight)
return ts, nil
}
// A TileID uniquely identifies a tile image in a specific tile set. The zero
// value represents no tile image.
type TileID int
// IsValid returns true if the tile identifier is valid and false if it's the
// zero value.
func (id TileID) IsValid() bool {
return id != 0
}
// tileRect returns the bounding rectangle of the tile image in the sprite
// sheet.
func (ts *TileSet) tileRect(id TileID) image.Rectangle {
tsCols := ts.width / ts.TileWidth
i := int(id - 1)
col := i % tsCols
row := i / tsCols
x := col * ts.TileWidth
y := row * ts.TileHeight
return image.Rect(x, y, x+ts.TileWidth, y+ts.TileHeight)
}
// Tile returns the tile image specified by id from the tile set.
func (ts *TileSet) Tile(id TileID) image.Image {
tile, ok := ts.tiles[id]
if !ok {
// Create the tile image as a subimage of the sprite sheet.
rect := ts.tileRect(id)
tile = ts.SubImage(rect)
ts.tiles[id] = tile
}
return tile
}
// LastID returns the last tile identifier contained within the tile set. An
// empty tile set always returns the zero value.
func (ts *TileSet) LastID() (id TileID) {
// TODO(u): ignore trailing empty tiles?
tsCols := ts.width / ts.TileWidth
tsRows := ts.height / ts.TileHeight
id = TileID(tsCols * tsRows)
return id
} | tileset/tileset.go | 0.661595 | 0.492188 | tileset.go | starcoder |
package dfl
import (
"fmt"
"github.com/pkg/errors"
"github.com/spatialcurrent/go-adaptive-functions/pkg/af"
)
// Add is a BinaryOperator that represents the addition of two nodes.
type Add struct {
*BinaryOperator
}
// Dfl returns the DFL representation of this node as a string
func (a Add) Dfl(quotes []string, pretty bool, tabs int) string {
return a.BinaryOperator.Dfl("+", quotes, pretty, tabs)
}
// Sql returns the SQL representation of this node.
func (a Add) Sql(pretty bool, tabs int) string {
return a.BinaryOperator.Sql("+", pretty, tabs)
}
// Map returns a map representation of this node.
func (a Add) Map() map[string]interface{} {
return a.BinaryOperator.Map("+", a.Left, a.Right)
}
// Compile returns a compiled version of this node.
// If the left and right values are both compiled as literals, then returns the compiled Literal with that value set.
// Otherwise returns a clone of this node.
func (a Add) Compile() Node {
left := a.Left.Compile()
right := a.Right.Compile()
switch left.(type) {
case Literal:
switch right.(type) {
case Literal:
v, err := af.Add.ValidateRun([]interface{}{left.(Literal).Value, right.(Literal).Value})
if err != nil {
return &Add{&BinaryOperator{Left: left, Right: right}}
}
return Literal{Value: v}
}
switch left.(Literal).Value.(type) {
case string:
switch right.(type) {
case Concat:
switch right.(Concat).Arguments[0].(type) {
case Literal:
n := Literal{
Value: left.(Literal).Value.(string) + fmt.Sprint(right.(Concat).Arguments[0].(Literal).Value),
}
return Concat{&MultiOperator{Arguments: append([]Node{n}, right.(Concat).Arguments[1:]...)}}
}
return Concat{&MultiOperator{Arguments: append([]Node{left}, right.(Concat).Arguments...)}}
}
return Concat{&MultiOperator{Arguments: []Node{left, right}}}
}
case Attribute, *Attribute, Variable, *Variable:
switch right.(type) {
case Literal:
switch right.(Literal).Value.(type) {
case string:
return Concat{&MultiOperator{Arguments: []Node{left, right}}}
}
case Concat:
return Concat{&MultiOperator{Arguments: append([]Node{left}, right.(Concat).Arguments...)}}
}
}
return &Add{&BinaryOperator{Left: left, Right: right}}
}
// Evaluate returns the value of this node given Context ctx, and an error if any.
func (a Add) Evaluate(vars map[string]interface{}, ctx interface{}, funcs FunctionMap, quotes []string) (map[string]interface{}, interface{}, error) {
vars, lv, rv, err := a.EvaluateLeftAndRight(vars, ctx, funcs, quotes)
if err != nil {
return vars, 0, err
}
v, err := af.Add.ValidateRun(lv, rv)
if err != nil {
return vars, 0, errors.Wrap(err, ErrorEvaluate{Node: a, Quotes: quotes}.Error())
}
return vars, v, err
} | pkg/dfl/Add.go | 0.87401 | 0.54153 | Add.go | starcoder |
package awk
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"regexp"
"time"
"github.com/Jeffail/gabs/v2"
"github.com/benhoyt/goawk/interp"
"github.com/benhoyt/goawk/parser"
"github.com/benthosdev/benthos/v4/internal/bundle"
"github.com/benthosdev/benthos/v4/internal/component/processor"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/message"
)
var varInvalidRegexp *regexp.Regexp
func init() {
varInvalidRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]`)
err := bundle.AllProcessors.Add(func(conf processor.Config, mgr bundle.NewManagement) (processor.V1, error) {
p, err := newAWKProc(conf.AWK, mgr)
if err != nil {
return nil, err
}
return processor.NewV2ToV1Processor("awk", p, mgr), nil
}, docs.ComponentSpec{
Name: "awk",
Categories: []string{
"Mapping",
},
Summary: `
Executes an AWK program on messages. This processor is very powerful as it
offers a range of [custom functions](#awk-functions) for querying and mutating
message contents and metadata.`,
Description: `
Works by feeding message contents as the program input based on a chosen
[codec](#codecs) and replaces the contents of each message with the result. If
the result is empty (nothing is printed by the program) then the original
message contents remain unchanged.
Comes with a wide range of [custom functions](#awk-functions) for accessing
message metadata, json fields, printing logs, etc. These functions can be
overridden by functions within the program.
Check out the [examples section](#examples) in order to see how this processor
can be used.
This processor uses [GoAWK][goawk], in order to understand the differences
in how the program works you can [read more about it here][goawk.differences].`,
Footnotes: `
## Codecs
The chosen codec determines how the contents of the message are fed into the
program. Codecs only impact the input string and variables initialised for your
program, they do not change the range of custom functions available.
### ` + "`none`" + `
An empty string is fed into the program. Functions can still be used in order to
extract and mutate metadata and message contents.
This is useful for when your program only uses functions and doesn't need the
full text of the message to be parsed by the program, as it is significantly
faster.
### ` + "`text`" + `
The full contents of the message are fed into the program as a string, allowing
you to reference tokenised segments of the message with variables ($0, $1, etc).
Custom functions can still be used with this codec.
This is the default codec as it behaves most similar to typical usage of the awk
command line tool.
### ` + "`json`" + `
An empty string is fed into the program, and variables are automatically
initialised before execution of your program by walking the flattened JSON
structure. Each value is converted into a variable by taking its full path,
e.g. the object:
` + "``` json" + `
{
"foo": {
"bar": {
"value": 10
},
"created_at": "2018-12-18T11:57:32"
}
}
` + "```" + `
Would result in the following variable declarations:
` + "```" + `
foo_bar_value = 10
foo_created_at = "2018-12-18T11:57:32"
` + "```" + `
Custom functions can also still be used with this codec.
## AWK Functions
` + "### `json_get`" + `
Signature: ` + "`json_get(path)`" + `
Attempts to find a JSON value in the input message payload by a
[dot separated path](/docs/configuration/field_paths) and returns it as a string.
` + "### `json_set`" + `
Signature: ` + "`json_set(path, value)`" + `
Attempts to set a JSON value in the input message payload identified by a
[dot separated path](/docs/configuration/field_paths), the value argument will be interpreted
as a string.
In order to set non-string values use one of the following typed varieties:
` + "- `json_set_int(path, value)`" + `
` + "- `json_set_float(path, value)`" + `
` + "- `json_set_bool(path, value)`" + `
` + "### `json_append`" + `
Signature: ` + "`json_append(path, value)`" + `
Attempts to append a value to an array identified by a
[dot separated path](/docs/configuration/field_paths). If the target does not
exist it will be created. If the target exists but is not already an array then
it will be converted into one, with its original contents set to the first
element of the array.
The value argument will be interpreted as a string. In order to append
non-string values use one of the following typed varieties:
` + "- `json_append_int(path, value)`" + `
` + "- `json_append_float(path, value)`" + `
` + "- `json_append_bool(path, value)`" + `
` + "### `json_delete`" + `
Signature: ` + "`json_delete(path)`" + `
Attempts to delete a JSON field from the input message payload identified by a
[dot separated path](/docs/configuration/field_paths).
` + "### `json_length`" + `
Signature: ` + "`json_length(path)`" + `
Returns the size of the string or array value of JSON field from the input
message payload identified by a [dot separated path](/docs/configuration/field_paths).
If the target field does not exist, or is not a string or array type, then zero
is returned. In order to explicitly check the type of a field use ` + "`json_type`" + `.
` + "### `json_type`" + `
Signature: ` + "`json_type(path)`" + `
Returns the type of a JSON field from the input message payload identified by a
[dot separated path](/docs/configuration/field_paths).
Possible values are: "string", "int", "float", "bool", "undefined", "null",
"array", "object".
` + "### `create_json_object`" + `
Signature: ` + "`create_json_object(key1, val1, key2, val2, ...)`" + `
Generates a valid JSON object of key value pair arguments. The arguments are
variadic, meaning any number of pairs can be listed. The value will always
resolve to a string regardless of the value type. E.g. the following call:
` + "`create_json_object(\"a\", \"1\", \"b\", 2, \"c\", \"3\")`" + `
Would result in this string:
` + "`{\"a\":\"1\",\"b\":\"2\",\"c\":\"3\"}`" + `
` + "### `create_json_array`" + `
Signature: ` + "`create_json_array(val1, val2, ...)`" + `
Generates a valid JSON array of value arguments. The arguments are variadic,
meaning any number of values can be listed. The value will always resolve to a
string regardless of the value type. E.g. the following call:
` + "`create_json_array(\"1\", 2, \"3\")`" + `
Would result in this string:
` + "`[\"1\",\"2\",\"3\"]`" + `
` + "### `metadata_set`" + `
Signature: ` + "`metadata_set(key, value)`" + `
Set a metadata key for the message to a value. The value will always resolve to
a string regardless of the value type.
` + "### `metadata_get`" + `
Signature: ` + "`metadata_get(key) string`" + `
Get the value of a metadata key from the message.
` + "### `timestamp_unix`" + `
Signature: ` + "`timestamp_unix() int`" + `
Returns the current unix timestamp (the number of seconds since 01-01-1970).
` + "### `timestamp_unix`" + `
Signature: ` + "`timestamp_unix(date) int`" + `
Attempts to parse a date string by detecting its format and returns the
equivalent unix timestamp (the number of seconds since 01-01-1970).
` + "### `timestamp_unix`" + `
Signature: ` + "`timestamp_unix(date, format) int`" + `
Attempts to parse a date string according to a format and returns the equivalent
unix timestamp (the number of seconds since 01-01-1970).
The format is defined by showing how the reference time, defined to be
` + "`Mon Jan 2 15:04:05 -0700 MST 2006`" + ` would be displayed if it were the value.
` + "### `timestamp_unix_nano`" + `
Signature: ` + "`timestamp_unix_nano() int`" + `
Returns the current unix timestamp in nanoseconds (the number of nanoseconds
since 01-01-1970).
` + "### `timestamp_unix_nano`" + `
Signature: ` + "`timestamp_unix_nano(date) int`" + `
Attempts to parse a date string by detecting its format and returns the
equivalent unix timestamp in nanoseconds (the number of nanoseconds since
01-01-1970).
` + "### `timestamp_unix_nano`" + `
Signature: ` + "`timestamp_unix_nano(date, format) int`" + `
Attempts to parse a date string according to a format and returns the equivalent
unix timestamp in nanoseconds (the number of nanoseconds since 01-01-1970).
The format is defined by showing how the reference time, defined to be
` + "`Mon Jan 2 15:04:05 -0700 MST 2006`" + ` would be displayed if it were the value.
` + "### `timestamp_format`" + `
Signature: ` + "`timestamp_format(unix, format) string`" + `
Formats a unix timestamp. The format is defined by showing how the reference
time, defined to be ` + "`Mon Jan 2 15:04:05 -0700 MST 2006`" + ` would be displayed if it
were the value.
The format is optional, and if omitted RFC3339 (` + "`2006-01-02T15:04:05Z07:00`" + `)
will be used.
` + "### `timestamp_format_nano`" + `
Signature: ` + "`timestamp_format_nano(unixNano, format) string`" + `
Formats a unix timestamp in nanoseconds. The format is defined by showing how
the reference time, defined to be ` + "`Mon Jan 2 15:04:05 -0700 MST 2006`" + ` would be
displayed if it were the value.
The format is optional, and if omitted RFC3339 (` + "`2006-01-02T15:04:05Z07:00`" + `)
will be used.
` + "### `print_log`" + `
Signature: ` + "`print_log(message, level)`" + `
Prints a Benthos log message at a particular log level. The log level is
optional, and if omitted the level ` + "`INFO`" + ` will be used.
[goawk]: https://github.com/benhoyt/goawk
[goawk.differences]: https://github.com/benhoyt/goawk#differences-from-awk`,
Config: docs.FieldComponent().WithChildren(
docs.FieldString("codec", "A [codec](#codecs) defines how messages should be inserted into the AWK program as variables. The codec does not change which [custom Benthos functions](#awk-functions) are available. The `text` codec is the closest to a typical AWK use case.").HasOptions("none", "text", "json"),
docs.FieldString("program", "An AWK program to execute"),
).ChildDefaultAndTypesFromStruct(processor.NewAWKConfig()),
Examples: []docs.AnnotatedExample{
{
Title: "JSON Mapping and Arithmetic",
Summary: `
Because AWK is a full programming language it's much easier to map documents and
perform arithmetic with it than with other Benthos processors. For example, if
we were expecting documents of the form:
` + "```json" + `
{"doc":{"val1":5,"val2":10},"id":"1","type":"add"}
{"doc":{"val1":5,"val2":10},"id":"2","type":"multiply"}
` + "```" + `
And we wished to perform the arithmetic specified in the ` + "`type`" + ` field,
on the values ` + "`val1` and `val2`" + ` and, finally, map the result into the
document, giving us the following resulting documents:
` + "```json" + `
{"doc":{"result":15,"val1":5,"val2":10},"id":"1","type":"add"}
{"doc":{"result":50,"val1":5,"val2":10},"id":"2","type":"multiply"}
` + "```" + `
We can do that with the following:`,
Config: `
pipeline:
processors:
- awk:
program: |
function map_add_vals() {
json_set_int("doc.result", json_get("doc.val1") + json_get("doc.val2"));
}
function map_multiply_vals() {
json_set_int("doc.result", json_get("doc.val1") * json_get("doc.val2"));
}
function map_unknown(type) {
json_set("error","unknown document type");
print_log("Document type not recognised: " type, "ERROR");
}
{
type = json_get("type");
if (type == "add")
map_add_vals();
else if (type == "multiply")
map_multiply_vals();
else
map_unknown(type);
}
`,
},
{
Title: "Stuff With Arrays",
Summary: `
It's possible to iterate JSON arrays by appending an index value to the path,
this can be used to do things like removing duplicates from arrays. For example,
given the following input document:
` + "```json" + `
{"path":{"to":{"foos":["one","two","three","two","four"]}}}
` + "```" + `
We could create a new array ` + "`foos_unique` from `foos`" + ` giving us the result:
` + "```json" + `
{"path":{"to":{"foos":["one","two","three","two","four"],"foos_unique":["one","two","three","four"]}}}
` + "```" + `
With the following config:`,
Config: `
pipeline:
processors:
- awk:
program: |
{
array_path = "path.to.foos"
array_len = json_length(array_path)
for (i = 0; i < array_len; i++) {
ele = json_get(array_path "." i)
if ( ! ( ele in seen ) ) {
json_append(array_path "_unique", ele)
seen[ele] = 1
}
}
}
`,
},
},
})
if err != nil {
panic(err)
}
}
//------------------------------------------------------------------------------
type awkProc struct {
codec string
program *parser.Program
log log.Modular
functions map[string]interface{}
}
func newAWKProc(conf processor.AWKConfig, mgr bundle.NewManagement) (processor.V2, error) {
program, err := parser.ParseProgram([]byte(conf.Program), &parser.ParserConfig{
Funcs: awkFunctionsMap,
})
if err != nil {
return nil, fmt.Errorf("failed to compile AWK program: %v", err)
}
switch conf.Codec {
case "none":
case "text":
case "json":
default:
return nil, fmt.Errorf("unrecognised codec: %v", conf.Codec)
}
functionOverrides := make(map[string]interface{}, len(awkFunctionsMap))
for k, v := range awkFunctionsMap {
functionOverrides[k] = v
}
functionOverrides["print_log"] = func(value, level string) {
switch level {
default:
fallthrough
case "", "INFO":
mgr.Logger().Infoln(value)
case "TRACE":
mgr.Logger().Traceln(value)
case "DEBUG":
mgr.Logger().Debugln(value)
case "WARN":
mgr.Logger().Warnln(value)
case "ERROR":
mgr.Logger().Errorln(value)
case "FATAL":
mgr.Logger().Fatalln(value)
}
}
a := &awkProc{
codec: conf.Codec,
program: program,
log: mgr.Logger(),
functions: functionOverrides,
}
return a, nil
}
//------------------------------------------------------------------------------
func getTime(dateStr, format string) (time.Time, error) {
if dateStr == "" {
return time.Now(), nil
}
if format == "" {
var err error
var parsed time.Time
for _, layout := range []string{
time.RubyDate,
time.RFC1123Z,
time.RFC1123,
time.RFC3339,
time.RFC822,
time.RFC822Z,
"Mon, 2 Jan 2006 15:04:05 -0700",
"2006-01-02T15:04:05MST",
"2006-01-02T15:04:05",
"2006-01-02 15:04:05",
"2006-01-02T15:04:05Z0700",
"2006-01-02",
} {
if parsed, err = time.Parse(layout, dateStr); err == nil {
break
}
}
if err != nil {
return time.Time{}, fmt.Errorf("failed to detect datetime format of: %v", dateStr)
}
return parsed, nil
}
return time.Parse(format, dateStr)
}
var awkFunctionsMap = map[string]interface{}{
"timestamp_unix": func(dateStr string, format string) (int64, error) {
ts, err := getTime(dateStr, format)
if err != nil {
return 0, err
}
return ts.Unix(), nil
},
"timestamp_unix_nano": func(dateStr string, format string) (int64, error) {
ts, err := getTime(dateStr, format)
if err != nil {
return 0, err
}
return ts.UnixNano(), nil
},
"timestamp_format": func(unix int64, formatArg string) string {
format := time.RFC3339
if len(formatArg) > 0 {
format = formatArg
}
t := time.Unix(unix, 0).In(time.UTC)
return t.Format(format)
},
"timestamp_format_nano": func(unixNano int64, formatArg string) string {
format := time.RFC3339
if len(formatArg) > 0 {
format = formatArg
}
s := unixNano / 1000000000
ns := unixNano - (s * 1000000000)
t := time.Unix(s, ns).In(time.UTC)
return t.Format(format)
},
"metadata_get": func(key string) string {
// Do nothing, this is a placeholder for compilation.
return ""
},
"metadata_set": func(key, value string) {
// Do nothing, this is a placeholder for compilation.
},
"json_get": func(path string) (string, error) {
// Do nothing, this is a placeholder for compilation.
return "", errors.New("not implemented")
},
"json_set": func(path, value string) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_set_int": func(path string, value int) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_set_float": func(path string, value float64) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_set_bool": func(path string, value bool) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_append": func(path, value string) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_append_int": func(path string, value int) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_append_float": func(path string, value float64) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_append_bool": func(path string, value bool) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_delete": func(path string) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_length": func(path string) (int, error) {
// Do nothing, this is a placeholder for compilation.
return 0, errors.New("not implemented")
},
"json_type": func(path string) (string, error) {
// Do nothing, this is a placeholder for compilation.
return "", errors.New("not implemented")
},
"create_json_object": func(vals ...string) string {
pairs := map[string]string{}
for i := 0; i < len(vals)-1; i += 2 {
pairs[vals[i]] = vals[i+1]
}
bytes, _ := json.Marshal(pairs)
if len(bytes) == 0 {
return "{}"
}
return string(bytes)
},
"create_json_array": func(vals ...string) string {
bytes, _ := json.Marshal(vals)
if len(bytes) == 0 {
return "[]"
}
return string(bytes)
},
"print_log": func(value, level string) {
// Do nothing, this is a placeholder for compilation.
},
}
//------------------------------------------------------------------------------
func flattenForAWK(path string, data interface{}) map[string]string {
m := map[string]string{}
switch t := data.(type) {
case map[string]interface{}:
for k, v := range t {
newPath := k
if len(path) > 0 {
newPath = path + "." + k
}
for k2, v2 := range flattenForAWK(newPath, v) {
m[k2] = v2
}
}
case []interface{}:
for _, ele := range t {
for k, v := range flattenForAWK(path, ele) {
m[k] = v
}
}
default:
m[path] = fmt.Sprintf("%v", t)
}
return m
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (a *awkProc) Process(ctx context.Context, msg *message.Part) ([]*message.Part, error) {
part := msg.Copy()
var mutableJSONPart interface{}
customFuncs := make(map[string]interface{}, len(a.functions))
for k, v := range a.functions {
customFuncs[k] = v
}
var outBuf, errBuf bytes.Buffer
// Function overrides
customFuncs["metadata_get"] = func(k string) string {
return part.MetaGet(k)
}
customFuncs["metadata_set"] = func(k, v string) {
part.MetaSet(k, v)
}
customFuncs["json_get"] = func(path string) (string, error) {
jsonPart, err := part.JSON()
if err != nil {
return "", fmt.Errorf("failed to parse message into json: %v", err)
}
gPart := gabs.Wrap(jsonPart)
gTarget := gPart.Path(path)
if gTarget.Data() == nil {
return "null", nil
}
if str, isString := gTarget.Data().(string); isString {
return str, nil
}
return gTarget.String(), nil
}
getJSON := func() (*gabs.Container, error) {
var err error
jsonPart := mutableJSONPart
if jsonPart == nil {
if jsonPart, err = part.JSON(); err == nil {
jsonPart, err = message.CopyJSON(jsonPart)
}
if err == nil {
mutableJSONPart = jsonPart
}
}
if err != nil {
return nil, fmt.Errorf("failed to parse message into json: %v", err)
}
gPart := gabs.Wrap(jsonPart)
return gPart, nil
}
setJSON := func(path string, v interface{}) (int, error) {
gPart, err := getJSON()
if err != nil {
return 0, err
}
_, _ = gPart.SetP(v, path)
part.SetJSON(gPart.Data())
return 0, nil
}
customFuncs["json_set"] = func(path, v string) (int, error) {
return setJSON(path, v)
}
customFuncs["json_set_int"] = func(path string, v int) (int, error) {
return setJSON(path, v)
}
customFuncs["json_set_float"] = func(path string, v float64) (int, error) {
return setJSON(path, v)
}
customFuncs["json_set_bool"] = func(path string, v bool) (int, error) {
return setJSON(path, v)
}
arrayAppendJSON := func(path string, v interface{}) (int, error) {
gPart, err := getJSON()
if err != nil {
return 0, err
}
_ = gPart.ArrayAppendP(v, path)
part.SetJSON(gPart.Data())
return 0, nil
}
customFuncs["json_append"] = func(path, v string) (int, error) {
return arrayAppendJSON(path, v)
}
customFuncs["json_append_int"] = func(path string, v int) (int, error) {
return arrayAppendJSON(path, v)
}
customFuncs["json_append_float"] = func(path string, v float64) (int, error) {
return arrayAppendJSON(path, v)
}
customFuncs["json_append_bool"] = func(path string, v bool) (int, error) {
return arrayAppendJSON(path, v)
}
customFuncs["json_delete"] = func(path string) (int, error) {
gObj, err := getJSON()
if err != nil {
return 0, err
}
_ = gObj.DeleteP(path)
part.SetJSON(gObj.Data())
return 0, nil
}
customFuncs["json_length"] = func(path string) (int, error) {
gObj, err := getJSON()
if err != nil {
return 0, err
}
switch t := gObj.Path(path).Data().(type) {
case string:
return len(t), nil
case []interface{}:
return len(t), nil
}
return 0, nil
}
customFuncs["json_type"] = func(path string) (string, error) {
gObj, err := getJSON()
if err != nil {
return "", err
}
if !gObj.ExistsP(path) {
return "undefined", nil
}
switch t := gObj.Path(path).Data().(type) {
case int:
return "int", nil
case float64:
return "float", nil
case json.Number:
return "float", nil
case string:
return "string", nil
case bool:
return "bool", nil
case []interface{}:
return "array", nil
case map[string]interface{}:
return "object", nil
case nil:
return "null", nil
default:
return "", fmt.Errorf("type not recognised: %T", t)
}
}
config := &interp.Config{
Output: &outBuf,
Error: &errBuf,
Funcs: customFuncs,
}
if a.codec == "json" {
jsonPart, err := part.JSON()
if err != nil {
a.log.Errorf("Failed to parse part into json: %v\n", err)
return nil, err
}
for k, v := range flattenForAWK("", jsonPart) {
config.Vars = append(config.Vars, varInvalidRegexp.ReplaceAllString(k, "_"), v)
}
config.Stdin = bytes.NewReader([]byte(" "))
} else if a.codec == "text" {
config.Stdin = bytes.NewReader(part.Get())
} else {
config.Stdin = bytes.NewReader([]byte(" "))
}
if a.codec != "none" {
_ = part.MetaIter(func(k, v string) error {
config.Vars = append(config.Vars, varInvalidRegexp.ReplaceAllString(k, "_"), v)
return nil
})
}
if exitStatus, err := interp.ExecProgram(a.program, config); err != nil {
a.log.Errorf("Non-fatal execution error: %v\n", err)
return nil, err
} else if exitStatus != 0 {
err = fmt.Errorf(
"non-fatal execution error: awk interpreter returned non-zero exit code: %d", exitStatus,
)
a.log.Errorf("AWK: %v\n", err)
return nil, err
}
if errMsg, err := io.ReadAll(&errBuf); err != nil {
a.log.Errorf("Read err error: %v\n", err)
} else if len(errMsg) > 0 {
a.log.Errorf("Execution error: %s\n", errMsg)
return nil, errors.New(string(errMsg))
}
resMsgBytes, err := io.ReadAll(&outBuf)
if err != nil {
a.log.Errorf("Read output error: %v\n", err)
return nil, err
}
if len(resMsgBytes) > 0 {
// Remove trailing line break
if resMsgBytes[len(resMsgBytes)-1] == '\n' {
resMsgBytes = resMsgBytes[:len(resMsgBytes)-1]
}
part.Set(resMsgBytes)
}
return []*message.Part{part}, nil
}
func (a *awkProc) Close(context.Context) error {
return nil
} | internal/impl/awk/processor.go | 0.682468 | 0.626653 | processor.go | starcoder |
package gtasa
import (
"github.com/jamiemansfield/gtasave/io"
"github.com/jamiemansfield/gtasave/util"
"math"
"reflect"
)
func Parse(data []byte, v interface{}) error {
// Separate the blocks
var blocks [34][]byte
var index = 0
reader := io.CreateReader(data)
for reader.Available() {
if isAtBoundary(reader) {
// Skip the block boundary
reader.Skip(5)
// Read the block
blocks[index] = readBlock(reader)
index += 1
} else {
reader.Skip(1)
}
}
// Get type information on the save format
t := reflect.ValueOf(v)
for i := 0; i < reflect.TypeOf(v).Elem().NumField(); i++ {
field := t.Type().Elem().Field(i)
tag, err := util.GetGtaTag(field.Tag.Get("gta"))
if err != nil {
return err
}
blockReader := io.CreateReader(blocks[tag.Index])
// Get type information of the block
t2 := t.Elem().Field(i)
// Read the struct
readStruct(blockReader, t2.Type(), t2)
}
return nil
}
func read(reader *io.Reader, tag *util.GtaTag, f reflect.Value) {
switch f.Type().Kind() {
case reflect.Bool:
value := reader.ReadBool(tag.Index)
f.SetBool(value)
case reflect.Int:
value := reader.ReadUInt32(tag.Index)
f.SetInt(int64(value))
case reflect.Uint8:
value := reader.ReadUInt8(tag.Index)
f.Set(reflect.ValueOf(value).Convert(f.Type()))
case reflect.Int8:
value := reader.ReadInt8(tag.Index)
f.Set(reflect.ValueOf(value).Convert(f.Type()))
case reflect.Uint16:
value := reader.ReadUInt16(tag.Index)
f.Set(reflect.ValueOf(value))
case reflect.Uint32:
value := reader.ReadUInt32(tag.Index)
f.Set(reflect.ValueOf(value).Convert(f.Type()))
case reflect.Float32:
intBits := reader.ReadUInt32(tag.Index)
value := math.Float32frombits(intBits)
// NaN check
if value != value {
value = -1
}
f.Set(reflect.ValueOf(value))
case reflect.String:
raw := reader.Splice(tag.Index, tag.Length)
// Encoded in ASCII, padded with /raw/ zeros to make up the length.
reader := io.CreateReader(raw)
var length int
for reader.Available() && reader.Peek(0) != 0 {
reader.Skip(1)
length += 1
}
f.SetString(string(reader.Splice(-length, length)))
case reflect.Struct:
raw := reader.Splice(tag.Index, tag.Length)
reader := io.CreateReader(raw)
readStruct(reader, f.Type(), f)
case reflect.Slice:
sliceType := tag.SliceLengthType
// 4 bytes for length of array
length := func() int {
switch sliceType {
default:
return int(reader.ReadUInt32(tag.Index))
case util.SliceUint32:
return int(reader.ReadUInt32(tag.Index))
case util.SliceUint16:
return int(reader.ReadUInt16(tag.Index))
}
}()
// Create the slice
f.Set(reflect.MakeSlice(f.Type(), length, length))
raw := reader.Splice(tag.Index + sliceType.GetLength(), tag.Length * length)
reader := io.CreateReader(raw)
readArrayOrSlice(reader, tag, f)
case reflect.Array:
raw := reader.Splice(tag.Index, f.Len() * tag.Length)
reader := io.CreateReader(raw)
readArrayOrSlice(reader, tag, f)
}
}
// This method populates a struct's fields with values from the reader.
func readStruct(reader *io.Reader, structType reflect.Type, structValue reflect.Value) {
// Iterate over the structs fields
for i := 0; i < structType.NumField(); i++ {
fieldType := structType.Field(i)
fieldValue := structValue.Field(i)
tag, err := util.GetGtaTag(fieldType.Tag.Get("gta"))
if err != nil {
return
}
read(reader, tag, fieldValue)
}
}
// This method populates a struct field, with an array or slice
func readArrayOrSlice(reader *io.Reader, tag *util.GtaTag, value reflect.Value) {
// Iterate over the array
for i := 0; i < value.Len(); i++ {
subReader := io.CreateReader(reader.Splice(i * value.Len(), value.Len()))
read(subReader, &util.GtaTag{
// We're using a new reader, so need to start fresh
Index: 0,
Length: tag.Length,
}, value.Index(i))
}
}
func isAtBoundary(r *io.Reader) bool {
return r.Peek(0) == 'B' &&
r.Peek(1) == 'L' &&
r.Peek(2) == 'O' &&
r.Peek(3) == 'C' &&
r.Peek(4) == 'K'
}
func readBlock(r *io.Reader) []byte {
var start = r.Index()
for r.Available() && !isAtBoundary(r) {
r.Skip(1)
}
return r.Splice(start - r.Index(), r.Index() - start)
} | gtasa/parser.go | 0.638046 | 0.457621 | parser.go | starcoder |
package enumerable
// MapIntToInt maps a slice of int to int
func MapIntToInt(in []int, f func(int) int) []int {
out := make([]int, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapIntToFloat64 maps a slice of int to float64
func MapIntToFloat64(in []int, f func(int) float64) []float64 {
out := make([]float64, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapIntToBool maps a slice of int to bool
func MapIntToBool(in []int, f func(int) bool) []bool {
out := make([]bool, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapIntToString maps a slice of int to string
func MapIntToString(in []int, f func(int) string) []string {
out := make([]string, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapIntToAnyValue maps a slice of int to AnyValue
func MapIntToAnyValue(in []int, f func(int) AnyValue) []AnyValue {
out := make([]AnyValue, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapFloat64ToInt maps a slice of float64 to int
func MapFloat64ToInt(in []float64, f func(float64) int) []int {
out := make([]int, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapFloat64ToFloat64 maps a slice of float64 to float64
func MapFloat64ToFloat64(in []float64, f func(float64) float64) []float64 {
out := make([]float64, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapFloat64ToBool maps a slice of float64 to bool
func MapFloat64ToBool(in []float64, f func(float64) bool) []bool {
out := make([]bool, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapFloat64ToString maps a slice of float64 to string
func MapFloat64ToString(in []float64, f func(float64) string) []string {
out := make([]string, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapFloat64ToAnyValue maps a slice of float64 to AnyValue
func MapFloat64ToAnyValue(in []float64, f func(float64) AnyValue) []AnyValue {
out := make([]AnyValue, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapBoolToInt maps a slice of bool to int
func MapBoolToInt(in []bool, f func(bool) int) []int {
out := make([]int, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapBoolToFloat64 maps a slice of bool to float64
func MapBoolToFloat64(in []bool, f func(bool) float64) []float64 {
out := make([]float64, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapBoolToBool maps a slice of bool to bool
func MapBoolToBool(in []bool, f func(bool) bool) []bool {
out := make([]bool, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapBoolToString maps a slice of bool to string
func MapBoolToString(in []bool, f func(bool) string) []string {
out := make([]string, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapBoolToAnyValue maps a slice of bool to AnyValue
func MapBoolToAnyValue(in []bool, f func(bool) AnyValue) []AnyValue {
out := make([]AnyValue, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapStringToInt maps a slice of string to int
func MapStringToInt(in []string, f func(string) int) []int {
out := make([]int, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapStringToFloat64 maps a slice of string to float64
func MapStringToFloat64(in []string, f func(string) float64) []float64 {
out := make([]float64, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapStringToBool maps a slice of string to bool
func MapStringToBool(in []string, f func(string) bool) []bool {
out := make([]bool, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapStringToString maps a slice of string to string
func MapStringToString(in []string, f func(string) string) []string {
out := make([]string, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapStringToAnyValue maps a slice of string to AnyValue
func MapStringToAnyValue(in []string, f func(string) AnyValue) []AnyValue {
out := make([]AnyValue, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapAnyValueToInt maps a slice of AnyValue to int
func MapAnyValueToInt(in []AnyValue, f func(AnyValue) int) []int {
out := make([]int, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapAnyValueToFloat64 maps a slice of AnyValue to float64
func MapAnyValueToFloat64(in []AnyValue, f func(AnyValue) float64) []float64 {
out := make([]float64, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapAnyValueToBool maps a slice of AnyValue to bool
func MapAnyValueToBool(in []AnyValue, f func(AnyValue) bool) []bool {
out := make([]bool, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapAnyValueToString maps a slice of AnyValue to string
func MapAnyValueToString(in []AnyValue, f func(AnyValue) string) []string {
out := make([]string, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
// MapAnyValueToAnyValue maps a slice of AnyValue to AnyValue
func MapAnyValueToAnyValue(in []AnyValue, f func(AnyValue) AnyValue) []AnyValue {
out := make([]AnyValue, len(in))
for i, value := range in {
out[i] = f(value)
}
return out
}
var mapFuncs = []interface{}{
MapIntToInt,
MapIntToFloat64,
MapIntToBool,
MapIntToString,
MapIntToAnyValue,
MapFloat64ToInt,
MapFloat64ToFloat64,
MapFloat64ToBool,
MapFloat64ToString,
MapFloat64ToAnyValue,
MapBoolToInt,
MapBoolToFloat64,
MapBoolToBool,
MapBoolToString,
MapBoolToAnyValue,
MapStringToInt,
MapStringToFloat64,
MapStringToBool,
MapStringToString,
MapStringToAnyValue,
MapAnyValueToInt,
MapAnyValueToFloat64,
MapAnyValueToBool,
MapAnyValueToString,
MapAnyValueToAnyValue,
} | generated_map_funcs.go | 0.788013 | 0.431285 | generated_map_funcs.go | starcoder |
package prop
import (
"math"
"time"
)
// DurationConstraint is an interface to represent time.Duration constraint.
type DurationConstraint interface {
Compare(time.Duration) (float64, bool)
Value() (time.Duration, bool)
}
// Duration specifies ideal duration value.
// Any value may be selected, but closest value takes priority.
type Duration time.Duration
// Compare implements DurationConstraint.
func (d Duration) Compare(a time.Duration) (float64, bool) {
return math.Abs(float64(a-time.Duration(d))) / math.Max(math.Abs(float64(a)), math.Abs(float64(d))), true
}
// Value implements DurationConstraint.
func (d Duration) Value() (time.Duration, bool) { return time.Duration(d), true }
// DurationExact specifies exact duration value.
type DurationExact time.Duration
// Compare implements DurationConstraint.
func (d DurationExact) Compare(a time.Duration) (float64, bool) {
if time.Duration(d) == a {
return 0.0, true
}
return 1.0, false
}
// Value implements DurationConstraint.
func (d DurationExact) Value() (time.Duration, bool) { return time.Duration(d), true }
// DurationOneOf specifies list of expected duration values.
type DurationOneOf []time.Duration
// Compare implements DurationConstraint.
func (d DurationOneOf) Compare(a time.Duration) (float64, bool) {
for _, ii := range d {
if ii == a {
return 0.0, true
}
}
return 1.0, false
}
// Value implements DurationConstraint.
func (DurationOneOf) Value() (time.Duration, bool) { return 0, false }
// DurationRanged specifies range of expected duration value.
// If Ideal is non-zero, closest value to Ideal takes priority.
type DurationRanged struct {
Min time.Duration
Max time.Duration
Ideal time.Duration
}
// Compare implements DurationConstraint.
func (d DurationRanged) Compare(a time.Duration) (float64, bool) {
if d.Min != 0 && d.Min > a {
// Out of range
return 1.0, false
}
if d.Max != 0 && d.Max < a {
// Out of range
return 1.0, false
}
if d.Ideal == 0 {
// If the value is in the range and Ideal is not specified,
// any value is evenly acceptable.
return 0.0, true
}
switch {
case a == d.Ideal:
return 0.0, true
case a < d.Ideal:
if d.Min == 0 {
// If Min is not specified, smaller values than Ideal are even.
return 0.0, true
}
return float64(d.Ideal-a) / float64(d.Ideal-d.Min), true
default:
if d.Max == 0 {
// If Max is not specified, larger values than Ideal are even.
return 0.0, true
}
return float64(a-d.Ideal) / float64(d.Max-d.Ideal), true
}
}
// Value implements DurationConstraint.
func (DurationRanged) Value() (time.Duration, bool) { return 0, false } | pkg/prop/duration.go | 0.861378 | 0.523116 | duration.go | starcoder |
package coord
import (
"math"
)
// WGS84坐标系:即地球坐标系,国际上通用的坐标系。
// GCJ02坐标系:即火星坐标系,WGS84坐标系经加密后的坐标系。Google Maps,高德在用。
// BD09坐标系:即百度坐标系,GCJ02坐标系经加密后的坐标系。
const (
X_PI = math.Pi * 3000.0 / 180.0
OFFSET = 0.00669342162296594323
AXIS = 6378245.0
)
//BD09toGCJ02 百度坐标系->火星坐标系
func BD09toGCJ02(lon, lat float64) (float64, float64) {
x := lon - 0.0065
y := lat - 0.006
z := math.Sqrt(x*x+y*y) - 0.00002*math.Sin(y*X_PI)
theta := math.Atan2(y, x) - 0.000003*math.Cos(x*X_PI)
gLon := z * math.Cos(theta)
gLat := z * math.Sin(theta)
return gLon, gLat
}
//GCJ02toBD09 火星坐标系->百度坐标系
func GCJ02toBD09(lon, lat float64) (float64, float64) {
z := math.Sqrt(lon*lon+lat*lat) + 0.00002*math.Sin(lat*X_PI)
theta := math.Atan2(lat, lon) + 0.000003*math.Cos(lon*X_PI)
bdLon := z*math.Cos(theta) + 0.0065
bdLat := z*math.Sin(theta) + 0.006
return bdLon, bdLat
}
//WGS84toGCJ02 WGS84坐标系->火星坐标系
func WGS84toGCJ02(lon, lat float64) (float64, float64) {
if isOutOFChina(lon, lat) {
return lon, lat
}
mgLon, mgLat := delta(lon, lat)
return mgLon, mgLat
}
//GCJ02toWGS84 火星坐标系->WGS84坐标系
func GCJ02toWGS84(lon, lat float64) (float64, float64) {
if isOutOFChina(lon, lat) {
return lon, lat
}
mgLon, mgLat := delta(lon, lat)
return lon*2 - mgLon, lat*2 - mgLat
}
//BD09toWGS84 百度坐标系->WGS84坐标系
func BD09toWGS84(lon, lat float64) (float64, float64) {
lon, lat = BD09toGCJ02(lon, lat)
return GCJ02toWGS84(lon, lat)
}
//WGS84toBD09 WGS84坐标系->百度坐标系
func WGS84toBD09(lon, lat float64) (float64, float64) {
lon, lat = WGS84toGCJ02(lon, lat)
return GCJ02toBD09(lon, lat)
}
func delta(lon, lat float64) (float64, float64) {
dlat := transformlat(lon-105.0, lat-35.0)
dlon := transformlng(lon-105.0, lat-35.0)
radlat := lat / 180.0 * math.Pi
magic := math.Sin(radlat)
magic = 1 - OFFSET*magic*magic
sqrtmagic := math.Sqrt(magic)
dlat = (dlat * 180.0) / ((AXIS * (1 - OFFSET)) / (magic * sqrtmagic) * math.Pi)
dlon = (dlon * 180.0) / (AXIS / sqrtmagic * math.Cos(radlat) * math.Pi)
mgLat := lat + dlat
mgLon := lon + dlon
return mgLon, mgLat
}
func transformlat(lon, lat float64) float64 {
var ret = -100.0 + 2.0*lon + 3.0*lat + 0.2*lat*lat + 0.1*lon*lat + 0.2*math.Sqrt(math.Abs(lon))
ret += (20.0*math.Sin(6.0*lon*math.Pi) + 20.0*math.Sin(2.0*lon*math.Pi)) * 2.0 / 3.0
ret += (20.0*math.Sin(lat*math.Pi) + 40.0*math.Sin(lat/3.0*math.Pi)) * 2.0 / 3.0
ret += (160.0*math.Sin(lat/12.0*math.Pi) + 320*math.Sin(lat*math.Pi/30.0)) * 2.0 / 3.0
return ret
}
func transformlng(lon, lat float64) float64 {
var ret = 300.0 + lon + 2.0*lat + 0.1*lon*lon + 0.1*lon*lat + 0.1*math.Sqrt(math.Abs(lon))
ret += (20.0*math.Sin(6.0*lon*math.Pi) + 20.0*math.Sin(2.0*lon*math.Pi)) * 2.0 / 3.0
ret += (20.0*math.Sin(lon*math.Pi) + 40.0*math.Sin(lon/3.0*math.Pi)) * 2.0 / 3.0
ret += (150.0*math.Sin(lon/12.0*math.Pi) + 300.0*math.Sin(lon/30.0*math.Pi)) * 2.0 / 3.0
return ret
}
func isOutOFChina(lon, lat float64) bool {
return !(lon > 73.66 && lon < 135.05 && lat > 3.86 && lat < 53.55)
} | tools/coord/transform.go | 0.526343 | 0.434761 | transform.go | starcoder |
package enginetest
import (
"github.com/dolthub/go-mysql-server/enginetest/queries"
)
// DoltDiffPlanTests are tests that check our query plans for various operations on the dolt diff system tables
var DoltDiffPlanTests = []queries.QueryPlanTest{
{
Query: `select * from dolt_diff_one_pk where to_pk=1`,
ExpectedPlan: "Exchange\n" +
" └─ IndexedTableAccess(dolt_diff_one_pk on [dolt_diff_one_pk.to_pk] with ranges: [{[1, 1]}])\n" +
"",
},
{
Query: `select * from dolt_diff_one_pk where to_pk>=10 and to_pk<=100`,
ExpectedPlan: "Exchange\n" +
" └─ IndexedTableAccess(dolt_diff_one_pk on [dolt_diff_one_pk.to_pk] with ranges: [{[10, 100]}])\n" +
"",
},
{
Query: `select * from dolt_diff_two_pk where to_pk1=1`,
ExpectedPlan: "Exchange\n" +
" └─ IndexedTableAccess(dolt_diff_two_pk on [dolt_diff_two_pk.to_pk1,dolt_diff_two_pk.to_pk2] with ranges: [{[1, 1], (-∞, ∞)}])\n" +
"",
},
{
Query: `select * from dolt_diff_two_pk where to_pk1=1 and to_pk2=2`,
ExpectedPlan: "Exchange\n" +
" └─ IndexedTableAccess(dolt_diff_two_pk on [dolt_diff_two_pk.to_pk1,dolt_diff_two_pk.to_pk2] with ranges: [{[1, 1], [2, 2]}])\n" +
"",
},
{
Query: `select * from dolt_diff_two_pk where to_pk1 < 1 and to_pk2 > 10`,
ExpectedPlan: "Exchange\n" +
" └─ IndexedTableAccess(dolt_diff_two_pk on [dolt_diff_two_pk.to_pk1,dolt_diff_two_pk.to_pk2] with ranges: [{(-∞, 1), (10, ∞)}])\n" +
"",
},
}
var DoltDiffPlanNewFormatTests = []queries.QueryPlanTest{
{
Query: `select * from dolt_diff_one_pk where to_pk=1`,
ExpectedPlan: "Exchange\n" +
" └─ Filter(dolt_diff_one_pk.to_pk = 1)\n" +
" └─ IndexedTableAccess(dolt_diff_one_pk on [dolt_diff_one_pk.to_pk] with ranges: [{[1, 1]}])\n" +
"",
},
{
Query: `select * from dolt_diff_one_pk where to_pk>=10 and to_pk<=100`,
ExpectedPlan: "Exchange\n" +
" └─ Filter((dolt_diff_one_pk.to_pk >= 10) AND (dolt_diff_one_pk.to_pk <= 100))\n" +
" └─ IndexedTableAccess(dolt_diff_one_pk on [dolt_diff_one_pk.to_pk] with ranges: [{[10, 100]}])\n" +
"",
},
{
Query: `select * from dolt_diff_two_pk where to_pk1=1`,
ExpectedPlan: "Exchange\n" +
" └─ Filter(dolt_diff_two_pk.to_pk1 = 1)\n" +
" └─ IndexedTableAccess(dolt_diff_two_pk on [dolt_diff_two_pk.to_pk1,dolt_diff_two_pk.to_pk2] with ranges: [{[1, 1], (-∞, ∞)}])\n" +
"",
},
{
Query: `select * from dolt_diff_two_pk where to_pk1=1 and to_pk2=2`,
ExpectedPlan: "Exchange\n" +
" └─ Filter((dolt_diff_two_pk.to_pk1 = 1) AND (dolt_diff_two_pk.to_pk2 = 2))\n" +
" └─ IndexedTableAccess(dolt_diff_two_pk on [dolt_diff_two_pk.to_pk1,dolt_diff_two_pk.to_pk2] with ranges: [{[1, 1], [2, 2]}])\n" +
"",
},
{
Query: `select * from dolt_diff_two_pk where to_pk1 < 1 and to_pk2 > 10`,
ExpectedPlan: "Exchange\n" +
" └─ Filter((dolt_diff_two_pk.to_pk1 < 1) AND (dolt_diff_two_pk.to_pk2 > 10))\n" +
" └─ IndexedTableAccess(dolt_diff_two_pk on [dolt_diff_two_pk.to_pk1,dolt_diff_two_pk.to_pk2] with ranges: [{(-∞, 1), (10, ∞)}])\n" +
"",
},
}
var NewFormatQueryPlanTests = []queries.QueryPlanTest{
{
Query: `SELECT * FROM one_pk ORDER BY pk`,
ExpectedPlan: "Projected table access on [pk c1 c2 c3 c4 c5]\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{(-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM two_pk ORDER BY pk1, pk2`,
ExpectedPlan: "Projected table access on [pk1 pk2 c1 c2 c3 c4 c5]\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2] with ranges: [{(-∞, ∞), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM two_pk ORDER BY pk1`,
ExpectedPlan: "Projected table access on [pk1 pk2 c1 c2 c3 c4 c5]\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2] with ranges: [{(-∞, ∞), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT pk1 AS one, pk2 AS two FROM two_pk ORDER BY pk1, pk2`,
ExpectedPlan: "Project(two_pk.pk1 as one, two_pk.pk2 as two)\n" +
" └─ Projected table access on [pk1 pk2]\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2] with ranges: [{(-∞, ∞), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT pk1 AS one, pk2 AS two FROM two_pk ORDER BY one, two`,
ExpectedPlan: "Project(two_pk.pk1 as one, two_pk.pk2 as two)\n" +
" └─ Projected table access on [pk1 pk2]\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2] with ranges: [{(-∞, ∞), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT t1.i FROM mytable t1 JOIN mytable t2 on t1.i = t2.i + 1 where t1.i = 2 and t2.i = 1`,
ExpectedPlan: "Project(t1.i)\n" +
" └─ IndexedJoin(t1.i = (t2.i + 1))\n" +
" ├─ Filter(t2.i = 1)\n" +
" │ └─ TableAlias(t2)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[1, 1]}])\n" +
" └─ Filter(t1.i = 2)\n" +
" └─ TableAlias(t1)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `select row_number() over (order by i desc), mytable.i as i2
from mytable join othertable on i = i2 order by 1`,
ExpectedPlan: "Sort(row_number() over (order by i desc) ASC)\n" +
" └─ Project(row_number() over ( order by [mytable.i, idx=0, type=BIGINT, nullable=false] DESC) as row_number() over (order by i desc), i2)\n" +
" └─ Window(row_number() over ( order by [mytable.i, idx=0, type=BIGINT, nullable=false] DESC), mytable.i as i2)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT * FROM one_pk_two_idx WHERE v1 < 2 AND v2 IS NOT NULL`,
ExpectedPlan: "Filter((one_pk_two_idx.v1 < 2) AND (NOT(one_pk_two_idx.v2 IS NULL)))\n" +
" └─ Projected table access on [pk v1 v2]\n" +
" └─ IndexedTableAccess(one_pk_two_idx on [one_pk_two_idx.v1,one_pk_two_idx.v2] with ranges: [{(-∞, 2), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM one_pk_two_idx WHERE v1 IN (1, 2) AND v2 <= 2`,
ExpectedPlan: "Filter((one_pk_two_idx.v1 HASH IN (1, 2)) AND (one_pk_two_idx.v2 <= 2))\n" +
" └─ Projected table access on [pk v1 v2]\n" +
" └─ IndexedTableAccess(one_pk_two_idx on [one_pk_two_idx.v1,one_pk_two_idx.v2] with ranges: [{[2, 2], (-∞, 2]}, {[1, 1], (-∞, 2]}])\n" +
"",
},
{
Query: `SELECT * FROM one_pk_three_idx WHERE v1 > 2 AND v2 = 3`,
ExpectedPlan: "Filter((one_pk_three_idx.v1 > 2) AND (one_pk_three_idx.v2 = 3))\n" +
" └─ Projected table access on [pk v1 v2 v3]\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.v1,one_pk_three_idx.v2,one_pk_three_idx.v3] with ranges: [{(2, ∞), [3, 3], (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM one_pk_three_idx WHERE v1 > 2 AND v3 = 3`,
ExpectedPlan: "Filter((one_pk_three_idx.v1 > 2) AND (one_pk_three_idx.v3 = 3))\n" +
" └─ Projected table access on [pk v1 v2 v3]\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.v1,one_pk_three_idx.v2,one_pk_three_idx.v3] with ranges: [{(2, ∞), (-∞, ∞), (-∞, ∞)}])\n" +
"",
},
{
Query: `select row_number() over (order by i desc), mytable.i as i2
from mytable join othertable on i = i2
where mytable.i = 2
order by 1`,
ExpectedPlan: "Sort(row_number() over (order by i desc) ASC)\n" +
" └─ Project(row_number() over ( order by [mytable.i, idx=0, type=BIGINT, nullable=false] DESC) as row_number() over (order by i desc), i2)\n" +
" └─ Window(row_number() over ( order by [mytable.i, idx=0, type=BIGINT, nullable=false] DESC), mytable.i as i2)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Filter(mytable.i = 2)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[2, 2]}])\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `INSERT INTO mytable(i,s) SELECT t1.i, 'hello' FROM mytable t1 JOIN mytable t2 on t1.i = t2.i + 1 where t1.i = 2 and t2.i = 1`,
ExpectedPlan: "Insert(i, s)\n" +
" ├─ Table(mytable)\n" +
" └─ Project(i, s)\n" +
" └─ Project(t1.i, 'hello')\n" +
" └─ IndexedJoin(t1.i = (t2.i + 1))\n" +
" ├─ Filter(t2.i = 1)\n" +
" │ └─ TableAlias(t2)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[1, 1]}])\n" +
" └─ Filter(t1.i = 2)\n" +
" └─ TableAlias(t1)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT /*+ JOIN_ORDER(t1, t2) */ t1.i FROM mytable t1 JOIN mytable t2 on t1.i = t2.i + 1 where t1.i = 2 and t2.i = 1`,
ExpectedPlan: "Project(t1.i)\n" +
" └─ InnerJoin(t1.i = (t2.i + 1))\n" +
" ├─ Filter(t1.i = 2)\n" +
" │ └─ Projected table access on [i]\n" +
" │ └─ TableAlias(t1)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[2, 2]}])\n" +
" └─ Filter(t2.i = 1)\n" +
" └─ Projected table access on [i]\n" +
" └─ TableAlias(t2)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[1, 1]}])\n" +
"",
},
{
Query: `SELECT /*+ JOIN_ORDER(t1, mytable) */ t1.i FROM mytable t1 JOIN mytable t2 on t1.i = t2.i + 1 where t1.i = 2 and t2.i = 1`,
ExpectedPlan: "Project(t1.i)\n" +
" └─ IndexedJoin(t1.i = (t2.i + 1))\n" +
" ├─ Filter(t2.i = 1)\n" +
" │ └─ TableAlias(t2)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[1, 1]}])\n" +
" └─ Filter(t1.i = 2)\n" +
" └─ TableAlias(t1)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT /*+ JOIN_ORDER(t1, t2, t3) */ t1.i FROM mytable t1 JOIN mytable t2 on t1.i = t2.i + 1 where t1.i = 2 and t2.i = 1`,
ExpectedPlan: "Project(t1.i)\n" +
" └─ IndexedJoin(t1.i = (t2.i + 1))\n" +
" ├─ Filter(t2.i = 1)\n" +
" │ └─ TableAlias(t2)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[1, 1]}])\n" +
" └─ Filter(t1.i = 2)\n" +
" └─ TableAlias(t1)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT t1.i FROM mytable t1 JOIN mytable t2 on t1.i = t2.i + 1 where t1.i = 2 and t2.i = 1`,
ExpectedPlan: "Project(t1.i)\n" +
" └─ IndexedJoin(t1.i = (t2.i + 1))\n" +
" ├─ Filter(t2.i = 1)\n" +
" │ └─ TableAlias(t2)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[1, 1]}])\n" +
" └─ Filter(t1.i = 2)\n" +
" └─ TableAlias(t1)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2`,
ExpectedPlan: "Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2 OR s = s2`,
ExpectedPlan: "Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin((mytable.i = othertable.i2) OR (mytable.s = othertable.s2))\n" +
" ├─ Table(mytable)\n" +
" └─ Concat\n" +
" ├─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" └─ IndexedTableAccess(othertable on [othertable.s2])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable INNER JOIN othertable ot ON i = i2 OR s = s2`,
ExpectedPlan: "Project(mytable.i, ot.i2, ot.s2)\n" +
" └─ IndexedJoin((mytable.i = ot.i2) OR (mytable.s = ot.s2))\n" +
" ├─ Table(mytable)\n" +
" └─ TableAlias(ot)\n" +
" └─ Concat\n" +
" ├─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" └─ IndexedTableAccess(othertable on [othertable.s2])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2 OR SUBSTRING_INDEX(s, ' ', 1) = s2`,
ExpectedPlan: "Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin((mytable.i = othertable.i2) OR (SUBSTRING_INDEX(mytable.s, ' ', 1) = othertable.s2))\n" +
" ├─ Table(mytable)\n" +
" └─ Concat\n" +
" ├─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" └─ IndexedTableAccess(othertable on [othertable.s2])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2 OR SUBSTRING_INDEX(s, ' ', 1) = s2 OR SUBSTRING_INDEX(s, ' ', 2) = s2`,
ExpectedPlan: "Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin(((mytable.i = othertable.i2) OR (SUBSTRING_INDEX(mytable.s, ' ', 1) = othertable.s2)) OR (SUBSTRING_INDEX(mytable.s, ' ', 2) = othertable.s2))\n" +
" ├─ Table(mytable)\n" +
" └─ Concat\n" +
" ├─ Concat\n" +
" │ ├─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" │ └─ IndexedTableAccess(othertable on [othertable.s2])\n" +
" └─ IndexedTableAccess(othertable on [othertable.s2])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2 UNION SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2`,
ExpectedPlan: "Distinct\n" +
" └─ Union\n" +
" ├─ Project(mytable.i, othertable.i2, othertable.s2)\n" +
" │ └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" │ ├─ Table(mytable)\n" +
" │ └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" └─ Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT sub.i, sub.i2, sub.s2, ot.i2, ot.s2 FROM (SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2) sub INNER JOIN othertable ot ON sub.i = ot.i2`,
ExpectedPlan: "Project(sub.i, sub.i2, sub.s2, ot.i2, ot.s2)\n" +
" └─ IndexedJoin(sub.i = ot.i2)\n" +
" ├─ SubqueryAlias(sub)\n" +
" │ └─ Project(mytable.i, othertable.i2, othertable.s2)\n" +
" │ └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" │ ├─ Table(mytable)\n" +
" │ └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" └─ TableAlias(ot)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT sub.i, sub.i2, sub.s2, ot.i2, ot.s2 FROM othertable ot INNER JOIN (SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2) sub ON sub.i = ot.i2`,
ExpectedPlan: "Project(sub.i, sub.i2, sub.s2, ot.i2, ot.s2)\n" +
" └─ IndexedJoin(sub.i = ot.i2)\n" +
" ├─ SubqueryAlias(sub)\n" +
" │ └─ Project(mytable.i, othertable.i2, othertable.s2)\n" +
" │ └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" │ ├─ Table(mytable)\n" +
" │ └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" └─ TableAlias(ot)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT sub.i, sub.i2, sub.s2, ot.i2, ot.s2 FROM othertable ot LEFT JOIN (SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2 WHERE CONVERT(s2, signed) <> 0) sub ON sub.i = ot.i2 WHERE ot.i2 > 0`,
ExpectedPlan: "Project(sub.i, sub.i2, sub.s2, ot.i2, ot.s2)\n" +
" └─ LeftJoin(sub.i = ot.i2)\n" +
" ├─ Filter(ot.i2 > 0)\n" +
" │ └─ TableAlias(ot)\n" +
" │ └─ IndexedTableAccess(othertable on [othertable.i2] with ranges: [{(0, ∞)}])\n" +
" └─ HashLookup(child: (sub.i), lookup: (ot.i2))\n" +
" └─ CachedResults\n" +
" └─ SubqueryAlias(sub)\n" +
" └─ Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(mytable)\n" +
" └─ Filter(NOT((convert(othertable.s2, signed) = 0)))\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `select /*+ JOIN_ORDER( i, k, j ) */ * from one_pk i join one_pk k on i.pk = k.pk join (select pk, rand() r from one_pk) j on i.pk = j.pk`,
ExpectedPlan: "IndexedJoin(i.pk = j.pk)\n" +
" ├─ IndexedJoin(i.pk = k.pk)\n" +
" │ ├─ TableAlias(i)\n" +
" │ │ └─ Table(one_pk)\n" +
" │ └─ TableAlias(k)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
" └─ HashLookup(child: (j.pk), lookup: (i.pk))\n" +
" └─ CachedResults\n" +
" └─ SubqueryAlias(j)\n" +
" └─ Project(one_pk.pk, RAND() as r)\n" +
" └─ Projected table access on [pk]\n" +
" └─ Table(one_pk)\n" +
"",
},
{
Query: `INSERT INTO mytable SELECT sub.i + 10, ot.s2 FROM othertable ot INNER JOIN (SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i = i2) sub ON sub.i = ot.i2`,
ExpectedPlan: "Insert()\n" +
" ├─ Table(mytable)\n" +
" └─ Project(i, s)\n" +
" └─ Project((sub.i + 10), ot.s2)\n" +
" └─ IndexedJoin(sub.i = ot.i2)\n" +
" ├─ SubqueryAlias(sub)\n" +
" │ └─ Project(mytable.i)\n" +
" │ └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" │ ├─ Table(mytable)\n" +
" │ └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" └─ TableAlias(ot)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT mytable.i, selfjoin.i FROM mytable INNER JOIN mytable selfjoin ON mytable.i = selfjoin.i WHERE selfjoin.i IN (SELECT 1 FROM DUAL)`,
ExpectedPlan: "Project(mytable.i, selfjoin.i)\n" +
" └─ Filter(selfjoin.i IN (Project(1)\n" +
" └─ Table(dual)\n" +
" ))\n" +
" └─ IndexedJoin(mytable.i = selfjoin.i)\n" +
" ├─ Table(mytable)\n" +
" └─ TableAlias(selfjoin)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM mytable INNER JOIN othertable ON i = i2`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM othertable JOIN mytable ON i = i2`,
ExpectedPlan: "Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(othertable)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM othertable JOIN mytable ON i = i2`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(othertable)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM othertable JOIN mytable ON i = i2`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(othertable)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM othertable JOIN mytable ON i = i2 LIMIT 1`,
ExpectedPlan: "Limit(1)\n" +
" └─ Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ IndexedJoin(mytable.i = othertable.i2)\n" +
" ├─ Table(othertable)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable INNER JOIN othertable ON i2 = i`,
ExpectedPlan: "Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ IndexedJoin(othertable.i2 = mytable.i)\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM mytable INNER JOIN othertable ON i2 = i`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ IndexedJoin(othertable.i2 = mytable.i)\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT * FROM MYTABLE JOIN OTHERTABLE ON i = i2 AND NOT (s2 <=> s)`,
ExpectedPlan: "IndexedJoin((mytable.i = othertable.i2) AND (NOT((othertable.s2 <=> mytable.s))))\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT * FROM MYTABLE JOIN OTHERTABLE ON i = i2 AND NOT (s2 = s)`,
ExpectedPlan: "IndexedJoin((mytable.i = othertable.i2) AND (NOT((othertable.s2 = mytable.s))))\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT * FROM MYTABLE JOIN OTHERTABLE ON i = i2 AND CONCAT(s, s2) IS NOT NULL`,
ExpectedPlan: "IndexedJoin((mytable.i = othertable.i2) AND (NOT(concat(mytable.s, othertable.s2) IS NULL)))\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT * FROM MYTABLE JOIN OTHERTABLE ON i = i2 AND s > s2`,
ExpectedPlan: "InnerJoin((mytable.i = othertable.i2) AND (mytable.s > othertable.s2))\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ Table(othertable)\n" +
"",
},
{
Query: `SELECT * FROM MYTABLE JOIN OTHERTABLE ON i = i2 AND NOT(s > s2)`,
ExpectedPlan: "InnerJoin((mytable.i = othertable.i2) AND (NOT((mytable.s > othertable.s2))))\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ Table(othertable)\n" +
"",
},
{
Query: `SELECT /*+ JOIN_ORDER(mytable, othertable) */ s2, i2, i FROM mytable INNER JOIN (SELECT * FROM othertable) othertable ON i2 = i`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ InnerJoin(othertable.i2 = mytable.i)\n" +
" ├─ Table(mytable)\n" +
" └─ HashLookup(child: (othertable.i2), lookup: (mytable.i))\n" +
" └─ CachedResults\n" +
" └─ SubqueryAlias(othertable)\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ Table(othertable)\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM mytable LEFT JOIN (SELECT * FROM othertable) othertable ON i2 = i`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ LeftJoin(othertable.i2 = mytable.i)\n" +
" ├─ Table(mytable)\n" +
" └─ HashLookup(child: (othertable.i2), lookup: (mytable.i))\n" +
" └─ CachedResults\n" +
" └─ SubqueryAlias(othertable)\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ Table(othertable)\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM (SELECT * FROM mytable) mytable RIGHT JOIN (SELECT * FROM othertable) othertable ON i2 = i`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ RightJoin(othertable.i2 = mytable.i)\n" +
" ├─ HashLookup(child: (mytable.i), lookup: (othertable.i2))\n" +
" │ └─ CachedResults\n" +
" │ └─ SubqueryAlias(mytable)\n" +
" │ └─ Projected table access on [i s]\n" +
" │ └─ Table(mytable)\n" +
" └─ SubqueryAlias(othertable)\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ Table(othertable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a WHERE a.s is not null`,
ExpectedPlan: "Filter(NOT(a.s IS NULL))\n" +
" └─ Projected table access on [i s]\n" +
" └─ TableAlias(a)\n" +
" └─ IndexedTableAccess(mytable on [mytable.s] with ranges: [{(<nil>, ∞)}, {(-∞, <nil>)}])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a inner join mytable b on (a.i = b.s) WHERE a.s is not null`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.s)\n" +
" ├─ Filter(NOT(a.s IS NULL))\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.s] with ranges: [{(<nil>, ∞)}, {(-∞, <nil>)}])\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(mytable on [mytable.s])\n" +
"",
},
{
Query: `SELECT /*+ JOIN_ORDER(b, a) */ a.* FROM mytable a inner join mytable b on (a.i = b.s) WHERE a.s is not null`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.s)\n" +
" ├─ TableAlias(b)\n" +
" │ └─ Table(mytable)\n" +
" └─ Filter(NOT(a.s IS NULL))\n" +
" └─ TableAlias(a)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a inner join mytable b on (a.i = b.s) WHERE a.s not in ('1', '2', '3', '4')`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.s)\n" +
" ├─ Filter(NOT((a.s HASH IN ('1', '2', '3', '4'))))\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.s] with ranges: [{(1, 2)}, {(2, 3)}, {(3, 4)}, {(4, ∞)}, {(-∞, 1)}])\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(mytable on [mytable.s])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a inner join mytable b on (a.i = b.s) WHERE a.i in (1, 2, 3, 4)`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.s)\n" +
" ├─ Filter(a.i HASH IN (1, 2, 3, 4))\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[2, 2]}, {[3, 3]}, {[4, 4]}, {[1, 1]}])\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(mytable on [mytable.s])\n" +
"",
},
{
Query: `SELECT * FROM mytable WHERE i in (1, 2, 3, 4)`,
ExpectedPlan: "Filter(mytable.i HASH IN (1, 2, 3, 4))\n" +
" └─ Projected table access on [i s]\n" +
" └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[2, 2]}, {[3, 3]}, {[4, 4]}, {[1, 1]}])\n" +
"",
},
{
Query: `SELECT * FROM mytable WHERE i in (CAST(NULL AS SIGNED), 2, 3, 4)`,
ExpectedPlan: "Filter(mytable.i HASH IN (NULL, 2, 3, 4))\n" +
" └─ Projected table access on [i s]\n" +
" └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[2, 2]}, {[3, 3]}, {[4, 4]}, {[<nil>, <nil>]}])\n" +
"",
},
{
Query: `SELECT * FROM mytable WHERE i in (1+2)`,
ExpectedPlan: "Filter(mytable.i HASH IN (3))\n" +
" └─ Projected table access on [i s]\n" +
" └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[3, 3]}])\n" +
"",
},
{
Query: `SELECT * from mytable where upper(s) IN ('FIRST ROW', 'SECOND ROW')`,
ExpectedPlan: "Filter(UPPER(mytable.s) HASH IN ('FIRST ROW', 'SECOND ROW'))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable where cast(i as CHAR) IN ('a', 'b')`,
ExpectedPlan: "Filter(convert(mytable.i, char) HASH IN ('a', 'b'))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable where cast(i as CHAR) IN ('1', '2')`,
ExpectedPlan: "Filter(convert(mytable.i, char) HASH IN ('1', '2'))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable where (i > 2) IN (true)`,
ExpectedPlan: "Filter((mytable.i > 2) HASH IN (true))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable where (i + 6) IN (7, 8)`,
ExpectedPlan: "Filter((mytable.i + 6) HASH IN (7, 8))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable where (i + 40) IN (7, 8)`,
ExpectedPlan: "Filter((mytable.i + 40) HASH IN (7, 8))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable where (i = 1 | false) IN (true)`,
ExpectedPlan: "Filter((mytable.i = 1) HASH IN (true))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable where (i = 1 & false) IN (true)`,
ExpectedPlan: "Filter((mytable.i = 0) HASH IN (true))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * FROM mytable WHERE i in (2*i)`,
ExpectedPlan: "Filter(mytable.i IN ((2 * mytable.i)))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * FROM mytable WHERE i in (i)`,
ExpectedPlan: "Filter(mytable.i IN (mytable.i))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable WHERE 4 IN (i + 2)`,
ExpectedPlan: "Filter(4 IN ((mytable.i + 2)))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * from mytable WHERE s IN (cast('first row' AS CHAR))`,
ExpectedPlan: "Filter(mytable.s HASH IN ('first row'))\n" +
" └─ Projected table access on [i s]\n" +
" └─ IndexedTableAccess(mytable on [mytable.s] with ranges: [{[first row, first row]}])\n" +
"",
},
{
Query: `SELECT * from mytable WHERE s IN (lower('SECOND ROW'), 'FIRST ROW')`,
ExpectedPlan: "Filter(mytable.s HASH IN ('second row', 'FIRST ROW'))\n" +
" └─ Projected table access on [i s]\n" +
" └─ IndexedTableAccess(mytable on [mytable.s] with ranges: [{[FIRST ROW, FIRST ROW]}, {[second row, second row]}])\n" +
"",
},
{
Query: `SELECT * from mytable where true IN (i > 3)`,
ExpectedPlan: "Filter(true IN ((mytable.i > 3)))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b where a.i = b.i`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.i)\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b where a.s = b.i OR a.i = 1`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin((a.s = b.i) OR (a.i = 1))\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [i]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b where NOT(a.i = b.s OR a.s = b.i)`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin(NOT(((a.i = b.s) OR (a.s = b.i))))\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s i]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b where a.i = b.s OR a.s = b.i IS FALSE`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin((a.i = b.s) OR (a.s = b.i) IS FALSE)\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s i]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b where a.i >= b.i`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin(a.i >= b.i)\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [i]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b where a.i = a.s`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ CrossJoin\n" +
" ├─ Filter(a.i = a.s)\n" +
" │ └─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b where a.i in (2, 432, 7)`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ CrossJoin\n" +
" ├─ Filter(a.i HASH IN (2, 432, 7))\n" +
" │ └─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[432, 432]}, {[7, 7]}, {[2, 2]}])\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b, mytable c, mytable d where a.i = b.i AND b.i = c.i AND c.i = d.i AND c.i = 2`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.i)\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ IndexedJoin(b.i = c.i)\n" +
" ├─ TableAlias(b)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" └─ IndexedJoin(c.i = d.i)\n" +
" ├─ Filter(c.i = 2)\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" └─ TableAlias(d)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b, mytable c, mytable d where a.i = b.i AND b.i = c.i AND (c.i = d.s OR c.i = 2)`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin((c.i = d.s) OR (c.i = 2))\n" +
" ├─ InnerJoin(b.i = c.i)\n" +
" │ ├─ InnerJoin(a.i = b.i)\n" +
" │ │ ├─ Projected table access on [i s]\n" +
" │ │ │ └─ TableAlias(a)\n" +
" │ │ │ └─ Table(mytable)\n" +
" │ │ └─ Projected table access on [i]\n" +
" │ │ └─ TableAlias(b)\n" +
" │ │ └─ Table(mytable)\n" +
" │ └─ Projected table access on [i]\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s]\n" +
" └─ TableAlias(d)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a, mytable b, mytable c, mytable d where a.i = b.i AND b.i = c.i`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ CrossJoin\n" +
" ├─ InnerJoin(b.i = c.i)\n" +
" │ ├─ InnerJoin(a.i = b.i)\n" +
" │ │ ├─ Projected table access on [i s]\n" +
" │ │ │ └─ TableAlias(a)\n" +
" │ │ │ └─ Table(mytable)\n" +
" │ │ └─ Projected table access on [i]\n" +
" │ │ └─ TableAlias(b)\n" +
" │ │ └─ Table(mytable)\n" +
" │ └─ Projected table access on [i]\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(d)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b where a.i = b.i`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.i)\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b where a.i = b.i OR a.i = b.s`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin((a.i = b.i) OR (a.i = b.s))\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(b)\n" +
" └─ Concat\n" +
" ├─ IndexedTableAccess(mytable on [mytable.i])\n" +
" └─ IndexedTableAccess(mytable on [mytable.s])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b where NOT(a.i = b.s OR a.s = b.i)`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin(NOT(((a.i = b.s) OR (a.s = b.i))))\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s i]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b where a.i = b.s OR a.s = b.i IS FALSE`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin((a.i = b.s) OR (a.s = b.i) IS FALSE)\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s i]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b where a.i >= b.i`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin(a.i >= b.i)\n" +
" ├─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [i]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b where a.i = a.i`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ CrossJoin\n" +
" ├─ Filter(a.i = a.i)\n" +
" │ └─ Projected table access on [i s]\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(b)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b CROSS JOIN mytable c CROSS JOIN mytable d where a.i = b.i AND b.i = c.i AND c.i = d.i AND c.i = 2`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.i)\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(mytable)\n" +
" └─ IndexedJoin(b.i = c.i)\n" +
" ├─ TableAlias(b)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" └─ IndexedJoin(c.i = d.i)\n" +
" ├─ Filter(c.i = 2)\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" └─ TableAlias(d)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b CROSS JOIN mytable c CROSS JOIN mytable d where a.i = b.i AND b.i = c.i AND (c.i = d.s OR c.i = 2)`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ InnerJoin((c.i = d.s) OR (c.i = 2))\n" +
" ├─ InnerJoin(b.i = c.i)\n" +
" │ ├─ InnerJoin(a.i = b.i)\n" +
" │ │ ├─ Projected table access on [i s]\n" +
" │ │ │ └─ TableAlias(a)\n" +
" │ │ │ └─ Table(mytable)\n" +
" │ │ └─ Projected table access on [i]\n" +
" │ │ └─ TableAlias(b)\n" +
" │ │ └─ Table(mytable)\n" +
" │ └─ Projected table access on [i]\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(mytable)\n" +
" └─ Projected table access on [s]\n" +
" └─ TableAlias(d)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a CROSS JOIN mytable b CROSS JOIN mytable c CROSS JOIN mytable d where a.i = b.i AND b.s = c.s`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ CrossJoin\n" +
" ├─ InnerJoin(b.s = c.s)\n" +
" │ ├─ InnerJoin(a.i = b.i)\n" +
" │ │ ├─ Projected table access on [i s]\n" +
" │ │ │ └─ TableAlias(a)\n" +
" │ │ │ └─ Table(mytable)\n" +
" │ │ └─ Projected table access on [s i]\n" +
" │ │ └─ TableAlias(b)\n" +
" │ │ └─ Table(mytable)\n" +
" │ └─ Projected table access on [s]\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(d)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT a.* FROM mytable a inner join mytable b on (a.i = b.s) WHERE a.i BETWEEN 10 AND 20`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ IndexedJoin(a.i = b.s)\n" +
" ├─ Filter(a.i BETWEEN 10 AND 20)\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{[10, 20]}])\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(mytable on [mytable.s])\n" +
"",
},
{
Query: `SELECT lefttable.i, righttable.s
FROM (SELECT * FROM mytable) lefttable
JOIN (SELECT * FROM mytable) righttable
ON lefttable.i = righttable.i AND righttable.s = lefttable.s
ORDER BY lefttable.i ASC`,
ExpectedPlan: "Sort(lefttable.i ASC)\n" +
" └─ Project(lefttable.i, righttable.s)\n" +
" └─ InnerJoin((lefttable.i = righttable.i) AND (righttable.s = lefttable.s))\n" +
" ├─ SubqueryAlias(lefttable)\n" +
" │ └─ Projected table access on [i s]\n" +
" │ └─ Table(mytable)\n" +
" └─ HashLookup(child: (righttable.i, righttable.s), lookup: (lefttable.i, lefttable.s))\n" +
" └─ CachedResults\n" +
" └─ SubqueryAlias(righttable)\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM mytable RIGHT JOIN (SELECT * FROM othertable) othertable ON i2 = i`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ RightIndexedJoin(othertable.i2 = mytable.i)\n" +
" ├─ SubqueryAlias(othertable)\n" +
" │ └─ Projected table access on [s2 i2]\n" +
" │ └─ Table(othertable)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT s2, i2, i FROM mytable INNER JOIN (SELECT * FROM othertable) othertable ON i2 = i`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ IndexedJoin(othertable.i2 = mytable.i)\n" +
" ├─ SubqueryAlias(othertable)\n" +
" │ └─ Projected table access on [s2 i2]\n" +
" │ └─ Table(othertable)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT * FROM (SELECT * FROM othertable) othertable_alias WHERE s2 = 'a'`,
ExpectedPlan: "SubqueryAlias(othertable_alias)\n" +
" └─ Filter(othertable.s2 = 'a')\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.s2] with ranges: [{[a, a]}])\n" +
"",
},
{
Query: `SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM othertable) othertable_one) othertable_two) othertable_three WHERE s2 = 'a'`,
ExpectedPlan: "SubqueryAlias(othertable_three)\n" +
" └─ SubqueryAlias(othertable_two)\n" +
" └─ SubqueryAlias(othertable_one)\n" +
" └─ Filter(othertable.s2 = 'a')\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.s2] with ranges: [{[a, a]}])\n" +
"",
},
{
Query: `SELECT othertable.s2, othertable.i2, mytable.i FROM mytable INNER JOIN (SELECT * FROM othertable) othertable ON othertable.i2 = mytable.i WHERE othertable.s2 > 'a'`,
ExpectedPlan: "Project(othertable.s2, othertable.i2, mytable.i)\n" +
" └─ IndexedJoin(othertable.i2 = mytable.i)\n" +
" ├─ SubqueryAlias(othertable)\n" +
" │ └─ Filter(othertable.s2 > 'a')\n" +
" │ └─ Projected table access on [s2 i2]\n" +
" │ └─ IndexedTableAccess(othertable on [othertable.s2] with ranges: [{(a, ∞)}])\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT mytable.i, mytable.s FROM mytable WHERE mytable.i = (SELECT i2 FROM othertable LIMIT 1)`,
ExpectedPlan: "IndexedInSubqueryFilter(mytable.i IN ((Limit(1)\n" +
" └─ Project(othertable.i2)\n" +
" └─ Projected table access on [i2]\n" +
" └─ Table(othertable)\n" +
")))\n" +
" └─ Projected table access on [i s]\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT mytable.i, mytable.s FROM mytable WHERE mytable.i IN (SELECT i2 FROM othertable)`,
ExpectedPlan: "IndexedInSubqueryFilter(mytable.i IN ((Project(othertable.i2)\n" +
" └─ Projected table access on [i2]\n" +
" └─ Table(othertable)\n" +
")))\n" +
" └─ Projected table access on [i s]\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT mytable.i, mytable.s FROM mytable WHERE mytable.i IN (SELECT i2 FROM othertable WHERE mytable.i = othertable.i2)`,
ExpectedPlan: "Filter(mytable.i IN (Project(othertable.i2)\n" +
" └─ Filter(mytable.i = othertable.i2)\n" +
" └─ Projected table access on [i2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"))\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT * FROM mytable mt INNER JOIN othertable ot ON mt.i = ot.i2 AND mt.i > 2`,
ExpectedPlan: "IndexedJoin(mt.i = ot.i2)\n" +
" ├─ Filter(mt.i > 2)\n" +
" │ └─ TableAlias(mt)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{(2, ∞)}])\n" +
" └─ TableAlias(ot)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT /*+ JOIN_ORDER(mt, o) */ * FROM mytable mt INNER JOIN one_pk o ON mt.i = o.pk AND mt.s = o.c2`,
ExpectedPlan: "IndexedJoin((mt.i = o.pk) AND (mt.s = o.c2))\n" +
" ├─ TableAlias(mt)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(o)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT i, i2, s2 FROM mytable RIGHT JOIN othertable ON i = i2 - 1`,
ExpectedPlan: "Project(mytable.i, othertable.i2, othertable.s2)\n" +
" └─ RightIndexedJoin(mytable.i = (othertable.i2 - 1))\n" +
" ├─ Table(othertable)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
"",
},
{
Query: `SELECT * FROM tabletest, mytable mt INNER JOIN othertable ot ON mt.i = ot.i2`,
ExpectedPlan: "CrossJoin\n" +
" ├─ Table(tabletest)\n" +
" └─ IndexedJoin(mt.i = ot.i2)\n" +
" ├─ TableAlias(mt)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(ot)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `SELECT t1.timestamp FROM reservedWordsTable t1 JOIN reservedWordsTable t2 ON t1.TIMESTAMP = t2.tImEstamp`,
ExpectedPlan: "Project(t1.Timestamp)\n" +
" └─ IndexedJoin(t1.Timestamp = t2.Timestamp)\n" +
" ├─ TableAlias(t1)\n" +
" │ └─ Table(reservedWordsTable)\n" +
" └─ TableAlias(t2)\n" +
" └─ IndexedTableAccess(reservedWordsTable on [reservedWordsTable.Timestamp])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk JOIN two_pk ON one_pk.pk=two_pk.pk1 AND one_pk.pk=two_pk.pk2`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin((one_pk.pk = two_pk.pk1) AND (one_pk.pk = two_pk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk JOIN two_pk ON one_pk.pk=two_pk.pk1 AND one_pk.pk=two_pk.pk2 OR one_pk.c2 = two_pk.c3`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ InnerJoin(((one_pk.pk = two_pk.pk1) AND (one_pk.pk = two_pk.pk2)) OR (one_pk.c2 = two_pk.c3))\n" +
" ├─ Projected table access on [pk c2]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [pk1 pk2 c3]\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk opk JOIN two_pk tpk ON opk.pk=tpk.pk1 AND opk.pk=tpk.pk2`,
ExpectedPlan: "Project(opk.pk, tpk.pk1, tpk.pk2)\n" +
" └─ IndexedJoin((opk.pk = tpk.pk1) AND (opk.pk = tpk.pk2))\n" +
" ├─ TableAlias(opk)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(tpk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk JOIN two_pk ON one_pk.pk=two_pk.pk1 AND one_pk.pk=two_pk.pk2`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin((one_pk.pk = two_pk.pk1) AND (one_pk.pk = two_pk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk LEFT JOIN two_pk ON one_pk.pk <=> two_pk.pk1 AND one_pk.pk = two_pk.pk2`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ LeftIndexedJoin((one_pk.pk <=> two_pk.pk1) AND (one_pk.pk = two_pk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk LEFT JOIN two_pk ON one_pk.pk = two_pk.pk1 AND one_pk.pk <=> two_pk.pk2`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ LeftIndexedJoin((one_pk.pk = two_pk.pk1) AND (one_pk.pk <=> two_pk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk LEFT JOIN two_pk ON one_pk.pk <=> two_pk.pk1 AND one_pk.pk <=> two_pk.pk2`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ LeftIndexedJoin((one_pk.pk <=> two_pk.pk1) AND (one_pk.pk <=> two_pk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk RIGHT JOIN two_pk ON one_pk.pk=two_pk.pk1 AND one_pk.pk=two_pk.pk2`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ RightIndexedJoin((one_pk.pk = two_pk.pk1) AND (one_pk.pk = two_pk.pk2))\n" +
" ├─ Table(two_pk)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT * FROM (SELECT * FROM othertable) othertable_alias WHERE othertable_alias.i2 = 1`,
ExpectedPlan: "SubqueryAlias(othertable_alias)\n" +
" └─ Filter(othertable.i2 = 1)\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2] with ranges: [{[1, 1]}])\n" +
"",
},
{
Query: `SELECT * FROM (SELECT * FROM othertable WHERE i2 = 1) othertable_alias WHERE othertable_alias.i2 = 1`,
ExpectedPlan: "SubqueryAlias(othertable_alias)\n" +
" └─ Filter(othertable.i2 = 1)\n" +
" └─ Filter(othertable.i2 = 1)\n" +
" └─ Projected table access on [i2 s2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2] with ranges: [{[1, 1]}])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table ORDER BY date_col ASC`,
ExpectedPlan: "Sort(datetime_table.date_col ASC)\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ Table(datetime_table)\n" +
"",
},
{
Query: `SELECT * FROM datetime_table ORDER BY date_col ASC LIMIT 100`,
ExpectedPlan: "Limit(100)\n" +
" └─ TopN(Limit: [100]; datetime_table.date_col ASC)\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ Table(datetime_table)\n" +
"",
},
{
Query: `SELECT * FROM datetime_table ORDER BY date_col ASC LIMIT 100 OFFSET 100`,
ExpectedPlan: "Limit(100)\n" +
" └─ Offset(100)\n" +
" └─ TopN(Limit: [(100 + 100)]; datetime_table.date_col ASC)\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ Table(datetime_table)\n" +
"",
},
{
Query: `SELECT * FROM datetime_table where date_col = '2020-01-01'`,
ExpectedPlan: "Filter(datetime_table.date_col = '2020-01-01')\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.date_col] with ranges: [{[2020-01-01, 2020-01-01]}])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table where date_col > '2020-01-01'`,
ExpectedPlan: "Filter(datetime_table.date_col > '2020-01-01')\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.date_col] with ranges: [{(2020-01-01, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table where datetime_col = '2020-01-01'`,
ExpectedPlan: "Filter(datetime_table.datetime_col = '2020-01-01')\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.datetime_col] with ranges: [{[2020-01-01, 2020-01-01]}])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table where datetime_col > '2020-01-01'`,
ExpectedPlan: "Filter(datetime_table.datetime_col > '2020-01-01')\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.datetime_col] with ranges: [{(2020-01-01, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table where timestamp_col = '2020-01-01'`,
ExpectedPlan: "Filter(datetime_table.timestamp_col = '2020-01-01')\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.timestamp_col] with ranges: [{[2020-01-01, 2020-01-01]}])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table where timestamp_col > '2020-01-01'`,
ExpectedPlan: "Filter(datetime_table.timestamp_col > '2020-01-01')\n" +
" └─ Projected table access on [i date_col datetime_col timestamp_col time_col]\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.timestamp_col] with ranges: [{(2020-01-01, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table dt1 join datetime_table dt2 on dt1.timestamp_col = dt2.timestamp_col`,
ExpectedPlan: "IndexedJoin(dt1.timestamp_col = dt2.timestamp_col)\n" +
" ├─ TableAlias(dt1)\n" +
" │ └─ Table(datetime_table)\n" +
" └─ TableAlias(dt2)\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.timestamp_col])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table dt1 join datetime_table dt2 on dt1.date_col = dt2.timestamp_col`,
ExpectedPlan: "IndexedJoin(dt1.date_col = dt2.timestamp_col)\n" +
" ├─ TableAlias(dt1)\n" +
" │ └─ Table(datetime_table)\n" +
" └─ TableAlias(dt2)\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.timestamp_col])\n" +
"",
},
{
Query: `SELECT * FROM datetime_table dt1 join datetime_table dt2 on dt1.datetime_col = dt2.timestamp_col`,
ExpectedPlan: "IndexedJoin(dt1.datetime_col = dt2.timestamp_col)\n" +
" ├─ TableAlias(dt1)\n" +
" │ └─ Table(datetime_table)\n" +
" └─ TableAlias(dt2)\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.timestamp_col])\n" +
"",
},
{
Query: `SELECT dt1.i FROM datetime_table dt1
join datetime_table dt2 on dt1.date_col = date(date_sub(dt2.timestamp_col, interval 2 day))
order by 1`,
ExpectedPlan: "Sort(dt1.i ASC)\n" +
" └─ Project(dt1.i)\n" +
" └─ IndexedJoin(dt1.date_col = DATE(DATE_SUB(dt2.timestamp_col, INTERVAL 2 DAY)))\n" +
" ├─ TableAlias(dt2)\n" +
" │ └─ Table(datetime_table)\n" +
" └─ TableAlias(dt1)\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.date_col])\n" +
"",
},
{
Query: `SELECT dt1.i FROM datetime_table dt1
join datetime_table dt2 on dt1.date_col = date(date_sub(dt2.timestamp_col, interval 2 day))
order by 1 limit 3 offset 0`,
ExpectedPlan: "Limit(3)\n" +
" └─ Offset(0)\n" +
" └─ TopN(Limit: [(3 + 0)]; dt1.i ASC)\n" +
" └─ Project(dt1.i)\n" +
" └─ IndexedJoin(dt1.date_col = DATE(DATE_SUB(dt2.timestamp_col, INTERVAL 2 DAY)))\n" +
" ├─ TableAlias(dt2)\n" +
" │ └─ Table(datetime_table)\n" +
" └─ TableAlias(dt1)\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.date_col])\n" +
"",
},
{
Query: `SELECT dt1.i FROM datetime_table dt1
join datetime_table dt2 on dt1.date_col = date(date_sub(dt2.timestamp_col, interval 2 day))
order by 1 limit 3`,
ExpectedPlan: "Limit(3)\n" +
" └─ TopN(Limit: [3]; dt1.i ASC)\n" +
" └─ Project(dt1.i)\n" +
" └─ IndexedJoin(dt1.date_col = DATE(DATE_SUB(dt2.timestamp_col, INTERVAL 2 DAY)))\n" +
" ├─ TableAlias(dt2)\n" +
" │ └─ Table(datetime_table)\n" +
" └─ TableAlias(dt1)\n" +
" └─ IndexedTableAccess(datetime_table on [datetime_table.date_col])\n" +
"",
},
{
Query: `SELECT pk FROM one_pk
JOIN two_pk tpk ON one_pk.pk=tpk.pk1 AND one_pk.pk=tpk.pk2
JOIN two_pk tpk2 ON tpk2.pk1=TPK.pk2 AND TPK2.pk2=tpk.pk1`,
ExpectedPlan: "Project(one_pk.pk)\n" +
" └─ IndexedJoin((one_pk.pk = tpk.pk1) AND (one_pk.pk = tpk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedJoin((tpk2.pk1 = tpk.pk2) AND (tpk2.pk2 = tpk.pk1))\n" +
" ├─ TableAlias(tpk)\n" +
" │ └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
" └─ TableAlias(tpk2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT /* JOIN_ORDER(tpk, one_pk, tpk2) */
pk FROM one_pk
JOIN two_pk tpk ON one_pk.pk=tpk.pk1 AND one_pk.pk=tpk.pk2
JOIN two_pk tpk2 ON tpk2.pk1=TPK.pk2 AND TPK2.pk2=tpk.pk1`,
ExpectedPlan: "Project(one_pk.pk)\n" +
" └─ IndexedJoin((tpk2.pk1 = tpk.pk2) AND (tpk2.pk2 = tpk.pk1))\n" +
" ├─ IndexedJoin((one_pk.pk = tpk.pk1) AND (one_pk.pk = tpk.pk2))\n" +
" │ ├─ TableAlias(tpk)\n" +
" │ │ └─ Table(two_pk)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
" └─ TableAlias(tpk2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT /* JOIN_ORDER(tpk, one_pk, tpk2) */
pk FROM one_pk
JOIN two_pk tpk ON one_pk.pk=tpk.pk1 AND one_pk.pk=tpk.pk2
LEFT JOIN two_pk tpk2 ON tpk2.pk1=TPK.pk2 AND TPK2.pk2=tpk.pk1`,
ExpectedPlan: "Project(one_pk.pk)\n" +
" └─ LeftIndexedJoin((tpk2.pk1 = tpk.pk2) AND (tpk2.pk2 = tpk.pk1))\n" +
" ├─ IndexedJoin((one_pk.pk = tpk.pk1) AND (one_pk.pk = tpk.pk2))\n" +
" │ ├─ TableAlias(tpk)\n" +
" │ │ └─ Table(two_pk)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
" └─ TableAlias(tpk2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,tpk.pk1,tpk2.pk1,tpk.pk2,tpk2.pk2 FROM one_pk
JOIN two_pk tpk ON pk=tpk.pk1 AND pk-1=tpk.pk2
JOIN two_pk tpk2 ON pk-1=TPK2.pk1 AND pk=tpk2.pk2
ORDER BY 1`,
ExpectedPlan: "Sort(one_pk.pk ASC)\n" +
" └─ Project(one_pk.pk, tpk.pk1, tpk2.pk1, tpk.pk2, tpk2.pk2)\n" +
" └─ IndexedJoin(((one_pk.pk - 1) = tpk2.pk1) AND (one_pk.pk = tpk2.pk2))\n" +
" ├─ IndexedJoin((one_pk.pk = tpk.pk1) AND ((one_pk.pk - 1) = tpk.pk2))\n" +
" │ ├─ Table(one_pk)\n" +
" │ └─ TableAlias(tpk)\n" +
" │ └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
" └─ TableAlias(tpk2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk FROM one_pk
LEFT JOIN two_pk tpk ON one_pk.pk=tpk.pk1 AND one_pk.pk=tpk.pk2
LEFT JOIN two_pk tpk2 ON tpk2.pk1=TPK.pk2 AND TPK2.pk2=tpk.pk1`,
ExpectedPlan: "Project(one_pk.pk)\n" +
" └─ LeftIndexedJoin((tpk2.pk1 = tpk.pk2) AND (tpk2.pk2 = tpk.pk1))\n" +
" ├─ LeftIndexedJoin((one_pk.pk = tpk.pk1) AND (one_pk.pk = tpk.pk2))\n" +
" │ ├─ Table(one_pk)\n" +
" │ └─ TableAlias(tpk)\n" +
" │ └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
" └─ TableAlias(tpk2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk FROM one_pk
LEFT JOIN two_pk tpk ON one_pk.pk=tpk.pk1 AND one_pk.pk=tpk.pk2
JOIN two_pk tpk2 ON tpk2.pk1=TPK.pk2 AND TPK2.pk2=tpk.pk1`,
ExpectedPlan: "Project(one_pk.pk)\n" +
" └─ IndexedJoin((tpk2.pk1 = tpk.pk2) AND (tpk2.pk2 = tpk.pk1))\n" +
" ├─ LeftIndexedJoin((one_pk.pk = tpk.pk1) AND (one_pk.pk = tpk.pk2))\n" +
" │ ├─ Table(one_pk)\n" +
" │ └─ TableAlias(tpk)\n" +
" │ └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
" └─ TableAlias(tpk2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk FROM one_pk
JOIN two_pk tpk ON one_pk.pk=tpk.pk1 AND one_pk.pk=tpk.pk2
LEFT JOIN two_pk tpk2 ON tpk2.pk1=TPK.pk2 AND TPK2.pk2=tpk.pk1`,
ExpectedPlan: "Project(one_pk.pk)\n" +
" └─ LeftIndexedJoin((tpk2.pk1 = tpk.pk2) AND (tpk2.pk2 = tpk.pk1))\n" +
" ├─ IndexedJoin((one_pk.pk = tpk.pk1) AND (one_pk.pk = tpk.pk2))\n" +
" │ ├─ Table(one_pk)\n" +
" │ └─ TableAlias(tpk)\n" +
" │ └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
" └─ TableAlias(tpk2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk FROM one_pk
RIGHT JOIN two_pk tpk ON one_pk.pk=tpk.pk1 AND one_pk.pk=tpk.pk2
RIGHT JOIN two_pk tpk2 ON tpk.pk1=TPk2.pk2 AND tpk.pk2=TPK2.pk1`,
ExpectedPlan: "Project(one_pk.pk)\n" +
" └─ RightIndexedJoin((tpk.pk1 = tpk2.pk2) AND (tpk.pk2 = tpk2.pk1))\n" +
" ├─ TableAlias(tpk2)\n" +
" │ └─ Table(two_pk)\n" +
" └─ RightIndexedJoin((one_pk.pk = tpk.pk1) AND (one_pk.pk = tpk.pk2))\n" +
" ├─ TableAlias(tpk)\n" +
" │ └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT i,pk1,pk2 FROM mytable JOIN two_pk ON i-1=pk1 AND i-2=pk2`,
ExpectedPlan: "Project(mytable.i, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin(((mytable.i - 1) = two_pk.pk1) AND ((mytable.i - 2) = two_pk.pk2))\n" +
" ├─ Table(mytable)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk LEFT JOIN two_pk ON pk=pk1`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ LeftIndexedJoin(one_pk.pk = two_pk.pk1)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ RightIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(niltable)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,nt.i,nt2.i FROM one_pk
RIGHT JOIN niltable nt ON pk=nt.i
RIGHT JOIN niltable nt2 ON pk=nt2.i + 1`,
ExpectedPlan: "Project(one_pk.pk, nt.i, nt2.i)\n" +
" └─ RightIndexedJoin(one_pk.pk = (nt2.i + 1))\n" +
" ├─ TableAlias(nt2)\n" +
" │ └─ Table(niltable)\n" +
" └─ RightIndexedJoin(one_pk.pk = nt.i)\n" +
" ├─ TableAlias(nt)\n" +
" │ └─ Table(niltable)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i AND f IS NOT NULL`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ LeftIndexedJoin((one_pk.pk = niltable.i) AND (NOT(niltable.f IS NULL)))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i and pk > 0`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ RightJoin((one_pk.pk = niltable.i) AND (one_pk.pk > 0))\n" +
" ├─ Projected table access on [pk]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [i f]\n" +
" └─ Table(niltable)\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i WHERE f IS NOT NULL`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ Filter(NOT(niltable.f IS NULL))\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i WHERE i2 > 1`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ Filter(niltable.i2 > 1)\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i WHERE i > 1`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ Filter(niltable.i > 1)\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i WHERE c1 > 10`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Filter(one_pk.c1 > 10)\n" +
" │ └─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i WHERE f IS NOT NULL`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ RightIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Filter(NOT(niltable.f IS NULL))\n" +
" │ └─ Table(niltable)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i WHERE pk > 1`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Filter(one_pk.pk > 1)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{(1, ∞)}])\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i WHERE pk > 0`,
ExpectedPlan: "Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ Filter(one_pk.pk > 0)\n" +
" └─ RightIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(niltable)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk JOIN two_pk ON pk=pk1`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin(one_pk.pk = two_pk.pk1)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT /*+ JOIN_ORDER(two_pk, one_pk) */ pk,pk1,pk2 FROM one_pk JOIN two_pk ON pk=pk1`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin(one_pk.pk = two_pk.pk1)\n" +
" ├─ Table(two_pk)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT a.pk1,a.pk2,b.pk1,b.pk2 FROM two_pk a JOIN two_pk b ON a.pk1=b.pk1 AND a.pk2=b.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(a.pk1 ASC, a.pk2 ASC, b.pk1 ASC)\n" +
" └─ Project(a.pk1, a.pk2, b.pk1, b.pk2)\n" +
" └─ IndexedJoin((a.pk1 = b.pk1) AND (a.pk2 = b.pk2))\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(two_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT a.pk1,a.pk2,b.pk1,b.pk2 FROM two_pk a JOIN two_pk b ON a.pk1=b.pk2 AND a.pk2=b.pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(a.pk1 ASC, a.pk2 ASC, b.pk1 ASC)\n" +
" └─ Project(a.pk1, a.pk2, b.pk1, b.pk2)\n" +
" └─ IndexedJoin((a.pk1 = b.pk2) AND (a.pk2 = b.pk1))\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(two_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT a.pk1,a.pk2,b.pk1,b.pk2 FROM two_pk a JOIN two_pk b ON b.pk1=a.pk1 AND a.pk2=b.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(a.pk1 ASC, a.pk2 ASC, b.pk1 ASC)\n" +
" └─ Project(a.pk1, a.pk2, b.pk1, b.pk2)\n" +
" └─ IndexedJoin((b.pk1 = a.pk1) AND (a.pk2 = b.pk2))\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(two_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT a.pk1,a.pk2,b.pk1,b.pk2 FROM two_pk a JOIN two_pk b ON a.pk1+1=b.pk1 AND a.pk2+1=b.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(a.pk1 ASC, a.pk2 ASC, b.pk1 ASC)\n" +
" └─ Project(a.pk1, a.pk2, b.pk1, b.pk2)\n" +
" └─ IndexedJoin(((a.pk1 + 1) = b.pk1) AND ((a.pk2 + 1) = b.pk2))\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(two_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT a.pk1,a.pk2,b.pk1,b.pk2 FROM two_pk a, two_pk b WHERE a.pk1=b.pk1 AND a.pk2=b.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(a.pk1 ASC, a.pk2 ASC, b.pk1 ASC)\n" +
" └─ Project(a.pk1, a.pk2, b.pk1, b.pk2)\n" +
" └─ IndexedJoin((a.pk1 = b.pk1) AND (a.pk2 = b.pk2))\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(two_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT a.pk1,a.pk2,b.pk1,b.pk2 FROM two_pk a, two_pk b WHERE a.pk1=b.pk2 AND a.pk2=b.pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(a.pk1 ASC, a.pk2 ASC, b.pk1 ASC)\n" +
" └─ Project(a.pk1, a.pk2, b.pk1, b.pk2)\n" +
" └─ IndexedJoin((a.pk1 = b.pk2) AND (a.pk2 = b.pk1))\n" +
" ├─ TableAlias(a)\n" +
" │ └─ Table(two_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT one_pk.c5,pk1,pk2 FROM one_pk JOIN two_pk ON pk=pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.c5 ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.c5, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin(one_pk.pk = two_pk.pk1)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT opk.c5,pk1,pk2 FROM one_pk opk JOIN two_pk tpk ON opk.pk=tpk.pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(opk.c5 ASC, tpk.pk1 ASC, tpk.pk2 ASC)\n" +
" └─ Project(opk.c5, tpk.pk1, tpk.pk2)\n" +
" └─ IndexedJoin(opk.pk = tpk.pk1)\n" +
" ├─ TableAlias(opk)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(tpk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT opk.c5,pk1,pk2 FROM one_pk opk JOIN two_pk tpk ON pk=pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(opk.c5 ASC, tpk.pk1 ASC, tpk.pk2 ASC)\n" +
" └─ Project(opk.c5, tpk.pk1, tpk.pk2)\n" +
" └─ IndexedJoin(opk.pk = tpk.pk1)\n" +
" ├─ TableAlias(opk)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(tpk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT opk.c5,pk1,pk2 FROM one_pk opk, two_pk tpk WHERE pk=pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(opk.c5 ASC, tpk.pk1 ASC, tpk.pk2 ASC)\n" +
" └─ Project(opk.c5, tpk.pk1, tpk.pk2)\n" +
" └─ IndexedJoin(opk.pk = tpk.pk1)\n" +
" ├─ TableAlias(opk)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(tpk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT one_pk.c5,pk1,pk2 FROM one_pk,two_pk WHERE pk=pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.c5 ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.c5, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin(one_pk.pk = two_pk.pk1)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i ORDER BY 1`,
ExpectedPlan: "Sort(one_pk.pk ASC)\n" +
" └─ Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i WHERE f IS NOT NULL ORDER BY 1`,
ExpectedPlan: "Sort(one_pk.pk ASC)\n" +
" └─ Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ Filter(NOT(niltable.f IS NULL))\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk LEFT JOIN niltable ON pk=i WHERE pk > 1 ORDER BY 1`,
ExpectedPlan: "Sort(one_pk.pk ASC)\n" +
" └─ Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ LeftIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Filter(one_pk.pk > 1)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{(1, ∞)}])\n" +
" └─ IndexedTableAccess(niltable on [niltable.i])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i ORDER BY 2,3`,
ExpectedPlan: "Sort(niltable.i ASC, niltable.f ASC)\n" +
" └─ Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ RightIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(niltable)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i WHERE f IS NOT NULL ORDER BY 2,3`,
ExpectedPlan: "Sort(niltable.i ASC, niltable.f ASC)\n" +
" └─ Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ RightIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Filter(NOT(niltable.f IS NULL))\n" +
" │ └─ Table(niltable)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i WHERE pk > 0 ORDER BY 2,3`,
ExpectedPlan: "Sort(niltable.i ASC, niltable.f ASC)\n" +
" └─ Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ Filter(one_pk.pk > 0)\n" +
" └─ RightIndexedJoin(one_pk.pk = niltable.i)\n" +
" ├─ Table(niltable)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,i,f FROM one_pk RIGHT JOIN niltable ON pk=i and pk > 0 ORDER BY 2,3`,
ExpectedPlan: "Sort(niltable.i ASC, niltable.f ASC)\n" +
" └─ Project(one_pk.pk, niltable.i, niltable.f)\n" +
" └─ RightJoin((one_pk.pk = niltable.i) AND (one_pk.pk > 0))\n" +
" ├─ Projected table access on [pk]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [i f]\n" +
" └─ Table(niltable)\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk JOIN two_pk ON one_pk.pk=two_pk.pk1 AND one_pk.pk=two_pk.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.pk ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ IndexedJoin((one_pk.pk = two_pk.pk1) AND (one_pk.pk = two_pk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk JOIN two_pk ON pk1-pk>0 AND pk2<1`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ InnerJoin((two_pk.pk1 - one_pk.pk) > 0)\n" +
" ├─ Projected table access on [pk]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Filter(two_pk.pk2 < 1)\n" +
" └─ Projected table access on [pk1 pk2]\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk JOIN two_pk ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.pk ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ CrossJoin\n" +
" ├─ Projected table access on [pk]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [pk1 pk2]\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk LEFT JOIN two_pk ON one_pk.pk=two_pk.pk1 AND one_pk.pk=two_pk.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.pk ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ LeftIndexedJoin((one_pk.pk = two_pk.pk1) AND (one_pk.pk = two_pk.pk2))\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk LEFT JOIN two_pk ON pk=pk1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.pk ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ LeftIndexedJoin(one_pk.pk = two_pk.pk1)\n" +
" ├─ Table(one_pk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk RIGHT JOIN two_pk ON one_pk.pk=two_pk.pk1 AND one_pk.pk=two_pk.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.pk ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ RightIndexedJoin((one_pk.pk = two_pk.pk1) AND (one_pk.pk = two_pk.pk2))\n" +
" ├─ Table(two_pk)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk opk JOIN two_pk tpk ON opk.pk=tpk.pk1 AND opk.pk=tpk.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(opk.pk ASC, tpk.pk1 ASC, tpk.pk2 ASC)\n" +
" └─ Project(opk.pk, tpk.pk1, tpk.pk2)\n" +
" └─ IndexedJoin((opk.pk = tpk.pk1) AND (opk.pk = tpk.pk2))\n" +
" ├─ TableAlias(opk)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(tpk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk opk JOIN two_pk tpk ON pk=tpk.pk1 AND pk=tpk.pk2 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(opk.pk ASC, tpk.pk1 ASC, tpk.pk2 ASC)\n" +
" └─ Project(opk.pk, tpk.pk1, tpk.pk2)\n" +
" └─ IndexedJoin((opk.pk = tpk.pk1) AND (opk.pk = tpk.pk2))\n" +
" ├─ TableAlias(opk)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(tpk)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2])\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk,two_pk WHERE one_pk.c1=two_pk.c1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.pk ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.pk, two_pk.pk1, two_pk.pk2)\n" +
" └─ InnerJoin(one_pk.c1 = two_pk.c1)\n" +
" ├─ Projected table access on [pk c1]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [pk1 pk2 c1]\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2,one_pk.c1 AS foo, two_pk.c1 AS bar FROM one_pk JOIN two_pk ON one_pk.c1=two_pk.c1 ORDER BY 1,2,3`,
ExpectedPlan: "Sort(one_pk.pk ASC, two_pk.pk1 ASC, two_pk.pk2 ASC)\n" +
" └─ Project(one_pk.pk, two_pk.pk1, two_pk.pk2, one_pk.c1 as foo, two_pk.c1 as bar)\n" +
" └─ InnerJoin(one_pk.c1 = two_pk.c1)\n" +
" ├─ Projected table access on [pk c1]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [pk1 pk2 c1]\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2,one_pk.c1 AS foo,two_pk.c1 AS bar FROM one_pk JOIN two_pk ON one_pk.c1=two_pk.c1 WHERE one_pk.c1=10`,
ExpectedPlan: "Project(one_pk.pk, two_pk.pk1, two_pk.pk2, one_pk.c1 as foo, two_pk.c1 as bar)\n" +
" └─ InnerJoin(one_pk.c1 = two_pk.c1)\n" +
" ├─ Filter(one_pk.c1 = 10)\n" +
" │ └─ Projected table access on [pk c1]\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [pk1 pk2 c1]\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT pk,pk2 FROM one_pk t1, two_pk t2 WHERE pk=1 AND pk2=1 ORDER BY 1,2`,
ExpectedPlan: "Sort(t1.pk ASC, t2.pk2 ASC)\n" +
" └─ Project(t1.pk, t2.pk2)\n" +
" └─ CrossJoin\n" +
" ├─ Filter(t1.pk = 1)\n" +
" │ └─ Projected table access on [pk]\n" +
" │ └─ TableAlias(t1)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{[1, 1]}])\n" +
" └─ Filter(t2.pk2 = 1)\n" +
" └─ Projected table access on [pk2]\n" +
" └─ TableAlias(t2)\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT pk,pk1,pk2 FROM one_pk t1, two_pk t2 WHERE pk=1 AND pk2=1 AND pk1=1 ORDER BY 1,2`,
ExpectedPlan: "Sort(t1.pk ASC, t2.pk1 ASC)\n" +
" └─ Project(t1.pk, t2.pk1, t2.pk2)\n" +
" └─ CrossJoin\n" +
" ├─ Filter(t1.pk = 1)\n" +
" │ └─ Projected table access on [pk]\n" +
" │ └─ TableAlias(t1)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{[1, 1]}])\n" +
" └─ Filter((t2.pk2 = 1) AND (t2.pk1 = 1))\n" +
" └─ Projected table access on [pk1 pk2]\n" +
" └─ TableAlias(t2)\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2] with ranges: [{[1, 1], (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT i FROM mytable mt
WHERE (SELECT i FROM mytable where i = mt.i and i > 2) IS NOT NULL
AND (SELECT i2 FROM othertable where i2 = i) IS NOT NULL`,
ExpectedPlan: "Project(mt.i)\n" +
" └─ Filter((NOT((Project(mytable.i)\n" +
" └─ Filter(mytable.i = mt.i)\n" +
" └─ Projected table access on [i]\n" +
" └─ Filter(mytable.i > 2)\n" +
" └─ IndexedTableAccess(mytable on [mytable.i] with ranges: [{(2, ∞)}])\n" +
" ) IS NULL)) AND (NOT((Project(othertable.i2)\n" +
" └─ Filter(othertable.i2 = mt.i)\n" +
" └─ Projected table access on [i2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" ) IS NULL)))\n" +
" └─ TableAlias(mt)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT i FROM mytable mt
WHERE (SELECT i FROM mytable where i = mt.i) IS NOT NULL
AND (SELECT i2 FROM othertable where i2 = i and i > 2) IS NOT NULL`,
ExpectedPlan: "Project(mt.i)\n" +
" └─ Filter((NOT((Project(mytable.i)\n" +
" └─ Filter(mytable.i = mt.i)\n" +
" └─ Projected table access on [i]\n" +
" └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" ) IS NULL)) AND (NOT((Project(othertable.i2)\n" +
" └─ Filter((othertable.i2 = mt.i) AND (mt.i > 2))\n" +
" └─ Projected table access on [i2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
" ) IS NULL)))\n" +
" └─ TableAlias(mt)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `SELECT pk,pk2, (SELECT pk from one_pk where pk = 1 limit 1) FROM one_pk t1, two_pk t2 WHERE pk=1 AND pk2=1 ORDER BY 1,2`,
ExpectedPlan: "Sort(t1.pk ASC, t2.pk2 ASC)\n" +
" └─ Project(t1.pk, t2.pk2, (Limit(1)\n" +
" └─ Project(one_pk.pk)\n" +
" └─ Projected table access on [pk]\n" +
" └─ Filter(one_pk.pk = 1)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{[1, 1]}])\n" +
" ) as (SELECT pk from one_pk where pk = 1 limit 1))\n" +
" └─ CrossJoin\n" +
" ├─ Filter(t1.pk = 1)\n" +
" │ └─ TableAlias(t1)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{[1, 1]}])\n" +
" └─ Filter(t2.pk2 = 1)\n" +
" └─ TableAlias(t2)\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `SELECT ROW_NUMBER() OVER (ORDER BY s2 ASC) idx, i2, s2 FROM othertable WHERE s2 <> 'second' ORDER BY i2 ASC`,
ExpectedPlan: "Sort(othertable.i2 ASC)\n" +
" └─ Project(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC) as idx, othertable.i2, othertable.s2)\n" +
" └─ Window(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC), othertable.i2, othertable.s2)\n" +
" └─ Filter(NOT((othertable.s2 = 'second')))\n" +
" └─ Projected table access on [i2 s2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.s2] with ranges: [{(second, ∞)}, {(-∞, second)}])\n" +
"",
},
{
Query: `SELECT * FROM (SELECT ROW_NUMBER() OVER (ORDER BY s2 ASC) idx, i2, s2 FROM othertable ORDER BY i2 ASC) a WHERE s2 <> 'second'`,
ExpectedPlan: "SubqueryAlias(a)\n" +
" └─ Filter(NOT((othertable.s2 = 'second')))\n" +
" └─ Sort(othertable.i2 ASC)\n" +
" └─ Project(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC) as idx, othertable.i2, othertable.s2)\n" +
" └─ Window(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC), othertable.i2, othertable.s2)\n" +
" └─ Projected table access on [s2 i2]\n" +
" └─ Table(othertable)\n" +
"",
},
{
Query: `SELECT ROW_NUMBER() OVER (ORDER BY s2 ASC) idx, i2, s2 FROM othertable WHERE i2 < 2 OR i2 > 2 ORDER BY i2 ASC`,
ExpectedPlan: "Sort(othertable.i2 ASC)\n" +
" └─ Project(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC) as idx, othertable.i2, othertable.s2)\n" +
" └─ Window(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC), othertable.i2, othertable.s2)\n" +
" └─ Filter((othertable.i2 < 2) OR (othertable.i2 > 2))\n" +
" └─ Projected table access on [i2 s2]\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2] with ranges: [{(-∞, 2)}, {(2, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM (SELECT ROW_NUMBER() OVER (ORDER BY s2 ASC) idx, i2, s2 FROM othertable ORDER BY i2 ASC) a WHERE i2 < 2 OR i2 > 2`,
ExpectedPlan: "SubqueryAlias(a)\n" +
" └─ Filter((othertable.i2 < 2) OR (othertable.i2 > 2))\n" +
" └─ Sort(othertable.i2 ASC)\n" +
" └─ Project(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC) as idx, othertable.i2, othertable.s2)\n" +
" └─ Window(row_number() over ( order by [othertable.s2, idx=0, type=TEXT, nullable=false] ASC), othertable.i2, othertable.s2)\n" +
" └─ Projected table access on [i2 s2]\n" +
" └─ Table(othertable)\n" +
"",
},
{
Query: `SELECT t, n, lag(t, 1, t+1) over (partition by n) FROM bigtable`,
ExpectedPlan: "Project(bigtable.t, bigtable.n, lag(bigtable.t, 1, (bigtable.t + 1)) over ( partition by bigtable.n) as lag(t, 1, t+1) over (partition by n))\n" +
" └─ Window(bigtable.t, bigtable.n, lag(bigtable.t, 1, (bigtable.t + 1)) over ( partition by bigtable.n))\n" +
" └─ Projected table access on [t n]\n" +
" └─ Table(bigtable)\n" +
"",
},
{
Query: `select i, row_number() over (w3) from mytable window w1 as (w2), w2 as (), w3 as (w1)`,
ExpectedPlan: "Project(mytable.i, row_number() over () as row_number() over (w3))\n" +
" └─ Window(mytable.i, row_number() over ())\n" +
" └─ Projected table access on [i]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `select i, row_number() over (w1 partition by s) from mytable window w1 as (order by i asc)`,
ExpectedPlan: "Project(mytable.i, row_number() over ( partition by mytable.s order by [mytable.i, idx=0, type=BIGINT, nullable=false] ASC) as row_number() over (w1 partition by s))\n" +
" └─ Window(mytable.i, row_number() over ( partition by mytable.s order by [mytable.i, idx=0, type=BIGINT, nullable=false] ASC))\n" +
" └─ Projected table access on [i s]\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `DELETE FROM two_pk WHERE c1 > 1`,
ExpectedPlan: "Delete\n" +
" └─ Filter(two_pk.c1 > 1)\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `DELETE FROM two_pk WHERE pk1 = 1 AND pk2 = 2`,
ExpectedPlan: "Delete\n" +
" └─ Filter((two_pk.pk1 = 1) AND (two_pk.pk2 = 2))\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2] with ranges: [{[1, 1], [2, 2]}])\n" +
"",
},
{
Query: `UPDATE two_pk SET c1 = 1 WHERE c1 > 1`,
ExpectedPlan: "Update\n" +
" └─ UpdateSource(SET two_pk.c1 = 1)\n" +
" └─ Filter(two_pk.c1 > 1)\n" +
" └─ Table(two_pk)\n" +
"",
},
{
Query: `UPDATE two_pk SET c1 = 1 WHERE pk1 = 1 AND pk2 = 2`,
ExpectedPlan: "Update\n" +
" └─ UpdateSource(SET two_pk.c1 = 1)\n" +
" └─ Filter((two_pk.pk1 = 1) AND (two_pk.pk2 = 2))\n" +
" └─ IndexedTableAccess(two_pk on [two_pk.pk1,two_pk.pk2] with ranges: [{[1, 1], [2, 2]}])\n" +
"",
},
{
Query: `UPDATE /*+ JOIN_ORDER(two_pk, one_pk) */ one_pk JOIN two_pk on one_pk.pk = two_pk.pk1 SET two_pk.c1 = two_pk.c1 + 1`,
ExpectedPlan: "Update\n" +
" └─ Update Join\n" +
" └─ UpdateSource(SET two_pk.c1 = (two_pk.c1 + 1))\n" +
" └─ Project(one_pk.pk, one_pk.c1, one_pk.c2, one_pk.c3, one_pk.c4, one_pk.c5, two_pk.pk1, two_pk.pk2, two_pk.c1, two_pk.c2, two_pk.c3, two_pk.c4, two_pk.c5)\n" +
" └─ IndexedJoin(one_pk.pk = two_pk.pk1)\n" +
" ├─ Table(two_pk)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `UPDATE one_pk INNER JOIN (SELECT * FROM two_pk) as t2 on one_pk.pk = t2.pk1 SET one_pk.c1 = one_pk.c1 + 1, one_pk.c2 = one_pk.c2 + 1`,
ExpectedPlan: "Update\n" +
" └─ Update Join\n" +
" └─ UpdateSource(SET one_pk.c1 = (one_pk.c1 + 1),SET one_pk.c2 = (one_pk.c2 + 1))\n" +
" └─ Project(one_pk.pk, one_pk.c1, one_pk.c2, one_pk.c3, one_pk.c4, one_pk.c5, t2.pk1, t2.pk2, t2.c1, t2.c2, t2.c3, t2.c4, t2.c5)\n" +
" └─ IndexedJoin(one_pk.pk = t2.pk1)\n" +
" ├─ SubqueryAlias(t2)\n" +
" │ └─ Projected table access on [pk1 pk2 c1 c2 c3 c4 c5]\n" +
" │ └─ Table(two_pk)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT a.* FROM invert_pk as a, invert_pk as b WHERE a.y = b.z`,
ExpectedPlan: "Project(a.x, a.y, a.z)\n" +
" └─ IndexedJoin(a.y = b.z)\n" +
" ├─ TableAlias(b)\n" +
" │ └─ Table(invert_pk)\n" +
" └─ TableAlias(a)\n" +
" └─ IndexedTableAccess(invert_pk on [invert_pk.y,invert_pk.z,invert_pk.x])\n" +
"",
},
{
Query: `SELECT a.* FROM invert_pk as a, invert_pk as b WHERE a.y = b.z AND a.z = 2`,
ExpectedPlan: "Project(a.x, a.y, a.z)\n" +
" └─ IndexedJoin(a.y = b.z)\n" +
" ├─ TableAlias(b)\n" +
" │ └─ Table(invert_pk)\n" +
" └─ Filter(a.z = 2)\n" +
" └─ TableAlias(a)\n" +
" └─ IndexedTableAccess(invert_pk on [invert_pk.y,invert_pk.z,invert_pk.x])\n" +
"",
},
{
Query: `SELECT * FROM invert_pk WHERE y = 0`,
ExpectedPlan: "Filter(invert_pk.y = 0)\n" +
" └─ Projected table access on [x y z]\n" +
" └─ IndexedTableAccess(invert_pk on [invert_pk.y,invert_pk.z,invert_pk.x] with ranges: [{[0, 0], (-∞, ∞), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM invert_pk WHERE y >= 0`,
ExpectedPlan: "Filter(invert_pk.y >= 0)\n" +
" └─ Projected table access on [x y z]\n" +
" └─ IndexedTableAccess(invert_pk on [invert_pk.y,invert_pk.z,invert_pk.x] with ranges: [{[0, ∞), (-∞, ∞), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM invert_pk WHERE y >= 0 AND z < 1`,
ExpectedPlan: "Filter((invert_pk.y >= 0) AND (invert_pk.z < 1))\n" +
" └─ Projected table access on [x y z]\n" +
" └─ IndexedTableAccess(invert_pk on [invert_pk.y,invert_pk.z,invert_pk.x] with ranges: [{[0, ∞), (-∞, 1), (-∞, ∞)}])\n" +
"",
},
{
Query: `SELECT * FROM one_pk WHERE pk IN (1)`,
ExpectedPlan: "Filter(one_pk.pk HASH IN (1))\n" +
" └─ Projected table access on [pk c1 c2 c3 c4 c5]\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk] with ranges: [{[1, 1]}])\n" +
"",
},
{
Query: `SELECT a.* FROM one_pk a CROSS JOIN one_pk c LEFT JOIN one_pk b ON b.pk = c.pk and b.pk = a.pk`,
ExpectedPlan: "Project(a.pk, a.c1, a.c2, a.c3, a.c4, a.c5)\n" +
" └─ LeftIndexedJoin((b.pk = c.pk) AND (b.pk = a.pk))\n" +
" ├─ CrossJoin\n" +
" │ ├─ TableAlias(a)\n" +
" │ │ └─ Table(one_pk)\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT a.* FROM one_pk a CROSS JOIN one_pk c RIGHT JOIN one_pk b ON b.pk = c.pk and b.pk = a.pk`,
ExpectedPlan: "Project(a.pk, a.c1, a.c2, a.c3, a.c4, a.c5)\n" +
" └─ RightJoin((b.pk = c.pk) AND (b.pk = a.pk))\n" +
" ├─ CrossJoin\n" +
" │ ├─ Projected table access on [pk c1 c2 c3 c4 c5]\n" +
" │ │ └─ TableAlias(a)\n" +
" │ │ └─ Table(one_pk)\n" +
" │ └─ Projected table access on [pk]\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(one_pk)\n" +
" └─ Projected table access on [pk]\n" +
" └─ TableAlias(b)\n" +
" └─ Table(one_pk)\n" +
"",
},
{
Query: `SELECT a.* FROM one_pk a CROSS JOIN one_pk c INNER JOIN one_pk b ON b.pk = c.pk and b.pk = a.pk`,
ExpectedPlan: "Project(a.pk, a.c1, a.c2, a.c3, a.c4, a.c5)\n" +
" └─ IndexedJoin((b.pk = c.pk) AND (b.pk = a.pk))\n" +
" ├─ CrossJoin\n" +
" │ ├─ TableAlias(a)\n" +
" │ │ └─ Table(one_pk)\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(one_pk)\n" +
" └─ TableAlias(b)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT a.* FROM one_pk a CROSS JOIN one_pk b INNER JOIN one_pk c ON b.pk = c.pk LEFT JOIN one_pk d ON c.pk = d.pk`,
ExpectedPlan: "Project(a.pk, a.c1, a.c2, a.c3, a.c4, a.c5)\n" +
" └─ LeftIndexedJoin(c.pk = d.pk)\n" +
" ├─ IndexedJoin(b.pk = c.pk)\n" +
" │ ├─ CrossJoin\n" +
" │ │ ├─ TableAlias(a)\n" +
" │ │ │ └─ Table(one_pk)\n" +
" │ │ └─ TableAlias(b)\n" +
" │ │ └─ Table(one_pk)\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
" └─ TableAlias(d)\n" +
" └─ IndexedTableAccess(one_pk on [one_pk.pk])\n" +
"",
},
{
Query: `SELECT a.* FROM one_pk a CROSS JOIN one_pk c INNER JOIN (select * from one_pk) b ON b.pk = c.pk`,
ExpectedPlan: "Project(a.pk, a.c1, a.c2, a.c3, a.c4, a.c5)\n" +
" └─ InnerJoin(b.pk = c.pk)\n" +
" ├─ CrossJoin\n" +
" │ ├─ TableAlias(a)\n" +
" │ │ └─ Table(one_pk)\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(one_pk)\n" +
" └─ HashLookup(child: (b.pk), lookup: (c.pk))\n" +
" └─ CachedResults\n" +
" └─ SubqueryAlias(b)\n" +
" └─ Projected table access on [pk c1 c2 c3 c4 c5]\n" +
" └─ Table(one_pk)\n" +
"",
},
{
Query: `SELECT * FROM tabletest join mytable mt INNER JOIN othertable ot ON tabletest.i = ot.i2 order by 1,3,6`,
ExpectedPlan: "Sort(tabletest.i ASC, mt.i ASC, ot.i2 ASC)\n" +
" └─ IndexedJoin(tabletest.i = ot.i2)\n" +
" ├─ CrossJoin\n" +
" │ ├─ Table(tabletest)\n" +
" │ └─ TableAlias(mt)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(ot)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `select a.pk, c.v2 from one_pk_three_idx a cross join one_pk_three_idx b right join one_pk_three_idx c on b.pk = c.v1 where b.pk = 0 and c.v2 = 0;`,
ExpectedPlan: "Project(a.pk, c.v2)\n" +
" └─ Filter(b.pk = 0)\n" +
" └─ RightJoin(b.pk = c.v1)\n" +
" ├─ CrossJoin\n" +
" │ ├─ Projected table access on [pk]\n" +
" │ │ └─ TableAlias(a)\n" +
" │ │ └─ Table(one_pk_three_idx)\n" +
" │ └─ Projected table access on [pk]\n" +
" │ └─ TableAlias(b)\n" +
" │ └─ Table(one_pk_three_idx)\n" +
" └─ Filter(c.v2 = 0)\n" +
" └─ Projected table access on [v2 v1]\n" +
" └─ TableAlias(c)\n" +
" └─ Table(one_pk_three_idx)\n" +
"",
},
{
Query: `select a.pk, c.v2 from one_pk_three_idx a cross join one_pk_three_idx b left join one_pk_three_idx c on b.pk = c.v1 where b.pk = 0 and a.v2 = 1;`,
ExpectedPlan: "Project(a.pk, c.v2)\n" +
" └─ LeftIndexedJoin(b.pk = c.v1)\n" +
" ├─ CrossJoin\n" +
" │ ├─ Filter(a.v2 = 1)\n" +
" │ │ └─ TableAlias(a)\n" +
" │ │ └─ Table(one_pk_three_idx)\n" +
" │ └─ Filter(b.pk = 0)\n" +
" │ └─ TableAlias(b)\n" +
" │ └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk] with ranges: [{[0, 0]}])\n" +
" └─ TableAlias(c)\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.v1,one_pk_three_idx.v2,one_pk_three_idx.v3])\n" +
"",
},
{
Query: `with a as (select a.i, a.s from mytable a CROSS JOIN mytable b) select * from a RIGHT JOIN mytable c on a.i+1 = c.i-1;`,
ExpectedPlan: "RightJoin((a.i + 1) = (c.i - 1))\n" +
" ├─ CachedResults\n" +
" │ └─ SubqueryAlias(a)\n" +
" │ └─ Project(a.i, a.s)\n" +
" │ └─ CrossJoin\n" +
" │ ├─ Projected table access on [i s]\n" +
" │ │ └─ TableAlias(a)\n" +
" │ │ └─ Table(mytable)\n" +
" │ └─ TableAlias(b)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(c)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `select a.* from mytable a RIGHT JOIN mytable b on a.i = b.i+1 LEFT JOIN mytable c on a.i = c.i-1 RIGHT JOIN mytable d on b.i = d.i;`,
ExpectedPlan: "Project(a.i, a.s)\n" +
" └─ RightIndexedJoin(b.i = d.i)\n" +
" ├─ TableAlias(d)\n" +
" │ └─ Table(mytable)\n" +
" └─ LeftIndexedJoin(a.i = (c.i - 1))\n" +
" ├─ RightIndexedJoin(a.i = (b.i + 1))\n" +
" │ ├─ TableAlias(b)\n" +
" │ │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" └─ TableAlias(c)\n" +
" └─ Table(mytable)\n" +
"",
},
{
Query: `select a.*,b.* from mytable a RIGHT JOIN othertable b on a.i = b.i2+1 LEFT JOIN mytable c on a.i = c.i-1 LEFT JOIN othertable d on b.i2 = d.i2;`,
ExpectedPlan: "Project(a.i, a.s, b.s2, b.i2)\n" +
" └─ LeftIndexedJoin(b.i2 = d.i2)\n" +
" ├─ LeftIndexedJoin(a.i = (c.i - 1))\n" +
" │ ├─ RightIndexedJoin(a.i = (b.i2 + 1))\n" +
" │ │ ├─ TableAlias(b)\n" +
" │ │ │ └─ Table(othertable)\n" +
" │ │ └─ TableAlias(a)\n" +
" │ │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" │ └─ TableAlias(c)\n" +
" │ └─ Table(mytable)\n" +
" └─ TableAlias(d)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `select a.*,b.* from mytable a RIGHT JOIN othertable b on a.i = b.i2+1 RIGHT JOIN mytable c on a.i = c.i-1 LEFT JOIN othertable d on b.i2 = d.i2;`,
ExpectedPlan: "Project(a.i, a.s, b.s2, b.i2)\n" +
" └─ LeftIndexedJoin(b.i2 = d.i2)\n" +
" ├─ RightIndexedJoin(a.i = (c.i - 1))\n" +
" │ ├─ TableAlias(c)\n" +
" │ │ └─ Table(mytable)\n" +
" │ └─ RightIndexedJoin(a.i = (b.i2 + 1))\n" +
" │ ├─ TableAlias(b)\n" +
" │ │ └─ Table(othertable)\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(mytable on [mytable.i])\n" +
" └─ TableAlias(d)\n" +
" └─ IndexedTableAccess(othertable on [othertable.i2])\n" +
"",
},
{
Query: `select i.pk, j.v3 from one_pk_two_idx i JOIN one_pk_three_idx j on i.v1 = j.pk;`,
ExpectedPlan: "Project(i.pk, j.v3)\n" +
" └─ IndexedJoin(i.v1 = j.pk)\n" +
" ├─ TableAlias(i)\n" +
" │ └─ Table(one_pk_two_idx)\n" +
" └─ TableAlias(j)\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk])\n" +
"",
},
{
Query: `select i.pk, j.v3, k.c1 from one_pk_two_idx i JOIN one_pk_three_idx j on i.v1 = j.pk JOIN one_pk k on j.v3 = k.pk;`,
ExpectedPlan: "Project(i.pk, j.v3, k.c1)\n" +
" └─ IndexedJoin(j.v3 = k.pk)\n" +
" ├─ TableAlias(k)\n" +
" │ └─ Table(one_pk)\n" +
" └─ IndexedJoin(i.v1 = j.pk)\n" +
" ├─ TableAlias(i)\n" +
" │ └─ Table(one_pk_two_idx)\n" +
" └─ TableAlias(j)\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk])\n" +
"",
},
{
Query: `select i.pk, j.v3 from (one_pk_two_idx i JOIN one_pk_three_idx j on((i.v1 = j.pk)));`,
ExpectedPlan: "Project(i.pk, j.v3)\n" +
" └─ IndexedJoin(i.v1 = j.pk)\n" +
" ├─ TableAlias(i)\n" +
" │ └─ Table(one_pk_two_idx)\n" +
" └─ TableAlias(j)\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk])\n" +
"",
},
{
Query: `select i.pk, j.v3, k.c1 from ((one_pk_two_idx i JOIN one_pk_three_idx j on ((i.v1 = j.pk))) JOIN one_pk k on((j.v3 = k.pk)));`,
ExpectedPlan: "Project(i.pk, j.v3, k.c1)\n" +
" └─ IndexedJoin(j.v3 = k.pk)\n" +
" ├─ TableAlias(k)\n" +
" │ └─ Table(one_pk)\n" +
" └─ IndexedJoin(i.v1 = j.pk)\n" +
" ├─ TableAlias(i)\n" +
" │ └─ Table(one_pk_two_idx)\n" +
" └─ TableAlias(j)\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk])\n" +
"",
},
{
Query: `select i.pk, j.v3, k.c1 from (one_pk_two_idx i JOIN one_pk_three_idx j on ((i.v1 = j.pk)) JOIN one_pk k on((j.v3 = k.pk)))`,
ExpectedPlan: "Project(i.pk, j.v3, k.c1)\n" +
" └─ IndexedJoin(j.v3 = k.pk)\n" +
" ├─ TableAlias(k)\n" +
" │ └─ Table(one_pk)\n" +
" └─ IndexedJoin(i.v1 = j.pk)\n" +
" ├─ TableAlias(i)\n" +
" │ └─ Table(one_pk_two_idx)\n" +
" └─ TableAlias(j)\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk])\n" +
"",
},
{
Query: `select a.* from one_pk_two_idx a RIGHT JOIN (one_pk_two_idx i JOIN one_pk_three_idx j on i.v1 = j.pk) on a.pk = i.v1 LEFT JOIN (one_pk_two_idx k JOIN one_pk_three_idx l on k.v1 = l.pk) on a.pk = l.v2;`,
ExpectedPlan: "Project(a.pk, a.v1, a.v2)\n" +
" └─ LeftIndexedJoin(a.pk = l.v2)\n" +
" ├─ RightIndexedJoin(a.pk = i.v1)\n" +
" │ ├─ IndexedJoin(i.v1 = j.pk)\n" +
" │ │ ├─ TableAlias(i)\n" +
" │ │ │ └─ Table(one_pk_two_idx)\n" +
" │ │ └─ TableAlias(j)\n" +
" │ │ └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk])\n" +
" │ └─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(one_pk_two_idx on [one_pk_two_idx.pk])\n" +
" └─ IndexedJoin(k.v1 = l.pk)\n" +
" ├─ TableAlias(k)\n" +
" │ └─ Table(one_pk_two_idx)\n" +
" └─ TableAlias(l)\n" +
" └─ IndexedTableAccess(one_pk_three_idx on [one_pk_three_idx.pk])\n" +
"",
},
{
Query: `select a.* from one_pk_two_idx a LEFT JOIN (one_pk_two_idx i JOIN one_pk_three_idx j on i.pk = j.v3) on a.pk = i.pk RIGHT JOIN (one_pk_two_idx k JOIN one_pk_three_idx l on k.v2 = l.v3) on a.v1 = l.v2;`,
ExpectedPlan: "Project(a.pk, a.v1, a.v2)\n" +
" └─ RightIndexedJoin(a.v1 = l.v2)\n" +
" ├─ IndexedJoin(k.v2 = l.v3)\n" +
" │ ├─ TableAlias(k)\n" +
" │ │ └─ Table(one_pk_two_idx)\n" +
" │ └─ TableAlias(l)\n" +
" │ └─ Table(one_pk_three_idx)\n" +
" └─ LeftIndexedJoin(a.pk = i.pk)\n" +
" ├─ TableAlias(a)\n" +
" │ └─ IndexedTableAccess(one_pk_two_idx on [one_pk_two_idx.v1])\n" +
" └─ IndexedJoin(i.pk = j.v3)\n" +
" ├─ TableAlias(j)\n" +
" │ └─ Table(one_pk_three_idx)\n" +
" └─ TableAlias(i)\n" +
" └─ IndexedTableAccess(one_pk_two_idx on [one_pk_two_idx.pk])\n" +
"",
},
} | go/libraries/doltcore/sqle/enginetest/dolt_query_plans.go | 0.613005 | 0.634458 | dolt_query_plans.go | starcoder |
package graph
import (
"fmt"
"io"
"math"
"path/filepath"
"strings"
"github.com/google/pprof/internal/measurement"
)
// DotAttributes contains details about the graph itself, giving
// insight into how its elements should be rendered.
type DotAttributes struct {
Nodes map[*Node]*DotNodeAttributes // A map allowing each Node to have its own visualization option
}
// DotNodeAttributes contains Node specific visualization options.
type DotNodeAttributes struct {
Shape string // The optional shape of the node when rendered visually
Bold bool // If the node should be bold or not
Peripheries int // An optional number of borders to place around a node
URL string // An optional url link to add to a node
Formatter func(*NodeInfo) string // An optional formatter for the node's label
}
// DotConfig contains attributes about how a graph should be
// constructed and how it should look.
type DotConfig struct {
Title string // The title of the DOT graph
LegendURL string // The URL to link to from the legend.
Labels []string // The labels for the DOT's legend
FormatValue func(int64) string // A formatting function for values
FormatTag func(int64, string) string // A formatting function for numeric tags
Total int64 // The total weight of the graph, used to compute percentages
}
const maxNodelets = 4 // Number of nodelets for labels (both numeric and non)
// ComposeDot creates and writes a in the DOT format to the writer, using
// the configurations given.
func ComposeDot(w io.Writer, g *Graph, a *DotAttributes, c *DotConfig) {
builder := &builder{w, a, c}
// Begin constructing DOT by adding a title and legend.
builder.start()
defer builder.finish()
builder.addLegend()
if len(g.Nodes) == 0 {
return
}
// Preprocess graph to get id map and find max flat.
nodeIDMap := make(map[*Node]int)
hasNodelets := make(map[*Node]bool)
maxFlat := float64(abs64(g.Nodes[0].FlatValue()))
for i, n := range g.Nodes {
nodeIDMap[n] = i + 1
if float64(abs64(n.FlatValue())) > maxFlat {
maxFlat = float64(abs64(n.FlatValue()))
}
}
edges := EdgeMap{}
// Add nodes and nodelets to DOT builder.
for _, n := range g.Nodes {
builder.addNode(n, nodeIDMap[n], maxFlat)
hasNodelets[n] = builder.addNodelets(n, nodeIDMap[n])
// Collect all edges. Use a fake node to support multiple incoming edges.
for _, e := range n.Out {
edges[&Node{}] = e
}
}
// Add edges to DOT builder. Sort edges by frequency as a hint to the graph layout engine.
for _, e := range edges.Sort() {
builder.addEdge(e, nodeIDMap[e.Src], nodeIDMap[e.Dest], hasNodelets[e.Src])
}
}
// builder wraps an io.Writer and understands how to compose DOT formatted elements.
type builder struct {
io.Writer
attributes *DotAttributes
config *DotConfig
}
// start generates a title and initial node in DOT format.
func (b *builder) start() {
graphname := "unnamed"
if b.config.Title != "" {
graphname = b.config.Title
}
fmt.Fprintln(b, `digraph "`+graphname+`" {`)
fmt.Fprintln(b, `node [style=filled fillcolor="#f8f8f8"]`)
}
// finish closes the opening curly bracket in the constructed DOT buffer.
func (b *builder) finish() {
fmt.Fprintln(b, "}")
}
// addLegend generates a legend in DOT format.
func (b *builder) addLegend() {
labels := b.config.Labels
var title string
if len(labels) > 0 {
title = labels[0]
}
fmt.Fprintf(b, `subgraph cluster_L { "%s" [shape=box fontsize=16`, title)
fmt.Fprintf(b, ` label="%s\l"`, strings.Join(labels, `\l`))
if b.config.LegendURL != "" {
fmt.Fprintf(b, ` URL="%s" target="_blank"`, b.config.LegendURL)
}
if b.config.Title != "" {
fmt.Fprintf(b, ` tooltip="%s"`, b.config.Title)
}
fmt.Fprintf(b, "] }\n")
}
// addNode generates a graph node in DOT format.
func (b *builder) addNode(node *Node, nodeID int, maxFlat float64) {
flat, cum := node.FlatValue(), node.CumValue()
attrs := b.attributes.Nodes[node]
// Populate label for node.
var label string
if attrs != nil && attrs.Formatter != nil {
label = attrs.Formatter(&node.Info)
} else {
label = multilinePrintableName(&node.Info)
}
flatValue := b.config.FormatValue(flat)
if flat != 0 {
label = label + fmt.Sprintf(`%s (%s)`,
flatValue,
strings.TrimSpace(percentage(flat, b.config.Total)))
} else {
label = label + "0"
}
cumValue := flatValue
if cum != flat {
if flat != 0 {
label = label + `\n`
} else {
label = label + " "
}
cumValue = b.config.FormatValue(cum)
label = label + fmt.Sprintf(`of %s (%s)`,
cumValue,
strings.TrimSpace(percentage(cum, b.config.Total)))
}
// Scale font sizes from 8 to 24 based on percentage of flat frequency.
// Use non linear growth to emphasize the size difference.
baseFontSize, maxFontGrowth := 8, 16.0
fontSize := baseFontSize
if maxFlat != 0 && flat != 0 && float64(abs64(flat)) <= maxFlat {
fontSize += int(math.Ceil(maxFontGrowth * math.Sqrt(float64(abs64(flat))/maxFlat)))
}
// Determine node shape.
shape := "box"
if attrs != nil && attrs.Shape != "" {
shape = attrs.Shape
}
// Create DOT attribute for node.
attr := fmt.Sprintf(`label="%s" fontsize=%d shape=%s tooltip="%s (%s)" color="%s" fillcolor="%s"`,
label, fontSize, shape, node.Info.PrintableName(), cumValue,
dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), false),
dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), true))
// Add on extra attributes if provided.
if attrs != nil {
// Make bold if specified.
if attrs.Bold {
attr += ` style="bold,filled"`
}
// Add peripheries if specified.
if attrs.Peripheries != 0 {
attr += fmt.Sprintf(` peripheries=%d`, attrs.Peripheries)
}
// Add URL if specified. target="_blank" forces the link to open in a new tab.
if attrs.URL != "" {
attr += fmt.Sprintf(` URL="%s" target="_blank"`, attrs.URL)
}
}
fmt.Fprintf(b, "N%d [%s]\n", nodeID, attr)
}
// addNodelets generates the DOT boxes for the node tags if they exist.
func (b *builder) addNodelets(node *Node, nodeID int) bool {
var nodelets string
// Populate two Tag slices, one for LabelTags and one for NumericTags.
var ts []*Tag
lnts := make(map[string][]*Tag)
for _, t := range node.LabelTags {
ts = append(ts, t)
}
for l, tm := range node.NumericTags {
for _, t := range tm {
lnts[l] = append(lnts[l], t)
}
}
// For leaf nodes, print cumulative tags (includes weight from
// children that have been deleted).
// For internal nodes, print only flat tags.
flatTags := len(node.Out) > 0
// Select the top maxNodelets alphanumeric labels by weight.
SortTags(ts, flatTags)
if len(ts) > maxNodelets {
ts = ts[:maxNodelets]
}
for i, t := range ts {
w := t.CumValue()
if flatTags {
w = t.FlatValue()
}
if w == 0 {
continue
}
weight := b.config.FormatValue(w)
nodelets += fmt.Sprintf(`N%d_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", nodeID, i, t.Name, weight)
nodelets += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"]`+"\n", nodeID, nodeID, i, weight, weight, weight)
if nts := lnts[t.Name]; nts != nil {
nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d_%d`, nodeID, i))
}
}
if nts := lnts[""]; nts != nil {
nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d`, nodeID))
}
fmt.Fprint(b, nodelets)
return nodelets != ""
}
func (b *builder) numericNodelets(nts []*Tag, maxNumNodelets int, flatTags bool, source string) string {
nodelets := ""
// Collapse numeric labels into maxNumNodelets buckets, of the form:
// 1MB..2MB, 3MB..5MB, ...
for j, t := range b.collapsedTags(nts, maxNumNodelets, flatTags) {
w, attr := t.CumValue(), ` style="dotted"`
if flatTags || t.FlatValue() == t.CumValue() {
w, attr = t.FlatValue(), ""
}
if w != 0 {
weight := b.config.FormatValue(w)
nodelets += fmt.Sprintf(`N%s_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", source, j, t.Name, weight)
nodelets += fmt.Sprintf(`%s -> N%s_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"%s]`+"\n", source, source, j, weight, weight, weight, attr)
}
}
return nodelets
}
// addEdge generates a graph edge in DOT format.
func (b *builder) addEdge(edge *Edge, from, to int, hasNodelets bool) {
var inline string
if edge.Inline {
inline = `\n (inline)`
}
w := b.config.FormatValue(edge.WeightValue())
attr := fmt.Sprintf(`label=" %s%s"`, w, inline)
if b.config.Total != 0 {
// Note: edge.weight > b.config.Total is possible for profile diffs.
if weight := 1 + int(min64(abs64(edge.WeightValue()*100/b.config.Total), 100)); weight > 1 {
attr = fmt.Sprintf(`%s weight=%d`, attr, weight)
}
if width := 1 + int(min64(abs64(edge.WeightValue()*5/b.config.Total), 5)); width > 1 {
attr = fmt.Sprintf(`%s penwidth=%d`, attr, width)
}
attr = fmt.Sprintf(`%s color="%s"`, attr,
dotColor(float64(edge.WeightValue())/float64(abs64(b.config.Total)), false))
}
arrow := "->"
if edge.Residual {
arrow = "..."
}
tooltip := fmt.Sprintf(`"%s %s %s (%s)"`,
edge.Src.Info.PrintableName(), arrow, edge.Dest.Info.PrintableName(), w)
attr = fmt.Sprintf(`%s tooltip=%s labeltooltip=%s`, attr, tooltip, tooltip)
if edge.Residual {
attr = attr + ` style="dotted"`
}
if hasNodelets {
// Separate children further if source has tags.
attr = attr + " minlen=2"
}
fmt.Fprintf(b, "N%d -> N%d [%s]\n", from, to, attr)
}
// dotColor returns a color for the given score (between -1.0 and
// 1.0), with -1.0 colored red, 0.0 colored grey, and 1.0 colored
// green. If isBackground is true, then a light (low-saturation)
// color is returned (suitable for use as a background color);
// otherwise, a darker color is returned (suitable for use as a
// foreground color).
func dotColor(score float64, isBackground bool) string {
// A float between 0.0 and 1.0, indicating the extent to which
// colors should be shifted away from grey (to make positive and
// negative values easier to distinguish, and to make more use of
// the color range.)
const shift = 0.7
// Saturation and value (in hsv colorspace) for background colors.
const bgSaturation = 0.1
const bgValue = 0.93
// Saturation and value (in hsv colorspace) for foreground colors.
const fgSaturation = 1.0
const fgValue = 0.7
// Choose saturation and value based on isBackground.
var saturation float64
var value float64
if isBackground {
saturation = bgSaturation
value = bgValue
} else {
saturation = fgSaturation
value = fgValue
}
// Limit the score values to the range [-1.0, 1.0].
score = math.Max(-1.0, math.Min(1.0, score))
// Reduce saturation near score=0 (so it is colored grey, rather than yellow).
if math.Abs(score) < 0.2 {
saturation *= math.Abs(score) / 0.2
}
// Apply 'shift' to move scores away from 0.0 (grey).
if score > 0.0 {
score = math.Pow(score, (1.0 - shift))
}
if score < 0.0 {
score = -math.Pow(-score, (1.0 - shift))
}
var r, g, b float64 // red, green, blue
if score < 0.0 {
g = value
r = value * (1 + saturation*score)
} else {
r = value
g = value * (1 - saturation*score)
}
b = value * (1 - saturation)
return fmt.Sprintf("#%02x%02x%02x", uint8(r*255.0), uint8(g*255.0), uint8(b*255.0))
}
// percentage computes the percentage of total of a value, and encodes
// it as a string. At least two digits of precision are printed.
func percentage(value, total int64) string {
var ratio float64
if total != 0 {
ratio = math.Abs(float64(value)/float64(total)) * 100
}
switch {
case math.Abs(ratio) >= 99.95 && math.Abs(ratio) <= 100.05:
return " 100%"
case math.Abs(ratio) >= 1.0:
return fmt.Sprintf("%5.2f%%", ratio)
default:
return fmt.Sprintf("%5.2g%%", ratio)
}
}
func multilinePrintableName(info *NodeInfo) string {
infoCopy := *info
infoCopy.Name = strings.Replace(infoCopy.Name, "::", `\n`, -1)
infoCopy.Name = strings.Replace(infoCopy.Name, ".", `\n`, -1)
if infoCopy.File != "" {
infoCopy.File = filepath.Base(infoCopy.File)
}
return strings.Join(infoCopy.NameComponents(), `\n`) + `\n`
}
// collapsedTags trims and sorts a slice of tags.
func (b *builder) collapsedTags(ts []*Tag, count int, flatTags bool) []*Tag {
ts = SortTags(ts, flatTags)
if len(ts) <= count {
return ts
}
tagGroups := make([][]*Tag, count)
for i, t := range (ts)[:count] {
tagGroups[i] = []*Tag{t}
}
for _, t := range (ts)[count:] {
g, d := 0, tagDistance(t, tagGroups[0][0])
for i := 1; i < count; i++ {
if nd := tagDistance(t, tagGroups[i][0]); nd < d {
g, d = i, nd
}
}
tagGroups[g] = append(tagGroups[g], t)
}
var nts []*Tag
for _, g := range tagGroups {
l, w, c := b.tagGroupLabel(g)
nts = append(nts, &Tag{
Name: l,
Flat: w,
Cum: c,
})
}
return SortTags(nts, flatTags)
}
func tagDistance(t, u *Tag) float64 {
v, _ := measurement.Scale(u.Value, u.Unit, t.Unit)
if v < float64(t.Value) {
return float64(t.Value) - v
}
return v - float64(t.Value)
}
func (b *builder) tagGroupLabel(g []*Tag) (label string, flat, cum int64) {
formatTag := b.config.FormatTag
if formatTag == nil {
formatTag = measurement.Label
}
if len(g) == 1 {
t := g[0]
return formatTag(t.Value, t.Unit), t.FlatValue(), t.CumValue()
}
min := g[0]
max := g[0]
df, f := min.FlatDiv, min.Flat
dc, c := min.CumDiv, min.Cum
for _, t := range g[1:] {
if v, _ := measurement.Scale(t.Value, t.Unit, min.Unit); int64(v) < min.Value {
min = t
}
if v, _ := measurement.Scale(t.Value, t.Unit, max.Unit); int64(v) > max.Value {
max = t
}
f += t.Flat
df += t.FlatDiv
c += t.Cum
dc += t.CumDiv
}
if df != 0 {
f = f / df
}
if dc != 0 {
c = c / dc
}
return formatTag(min.Value, min.Unit) + ".." + formatTag(max.Value, max.Unit), f, c
}
func min64(a, b int64) int64 {
if a < b {
return a
}
return b
} | vendor/github.com/google/pprof/internal/graph/dotgraph.go | 0.690142 | 0.509947 | dotgraph.go | starcoder |
package policy
import "github.com/benthosdev/benthos/v4/internal/docs"
// FieldSpec returns a spec for a common batching field.
func FieldSpec() docs.FieldSpec {
return docs.FieldSpec{
Name: "batching",
Type: docs.FieldTypeObject,
Description: `
Allows you to configure a [batching policy](/docs/configuration/batching).`,
Examples: []interface{}{
map[string]interface{}{
"count": 0,
"byte_size": 5000,
"period": "1s",
},
map[string]interface{}{
"count": 10,
"period": "1s",
},
map[string]interface{}{
"count": 0,
"period": "1m",
"check": `this.contains("END BATCH")`,
},
},
Children: docs.FieldSpecs{
docs.FieldInt(
"count",
"A number of messages at which the batch should be flushed. If `0` disables count based batching.",
),
docs.FieldInt(
"byte_size",
"An amount of bytes at which the batch should be flushed. If `0` disables size based batching.",
).HasDefault(0),
docs.FieldString(
"period",
"A period in which an incomplete batch should be flushed regardless of its size.",
"1s", "1m", "500ms",
).HasDefault(""),
docs.FieldBloblang(
"check",
"A [Bloblang query](/docs/guides/bloblang/about/) that should return a boolean value indicating whether a message should end a batch.",
`this.type == "end_of_transaction"`,
).HasDefault(""),
docs.FieldAdvanced(
"processors",
"A list of [processors](/docs/components/processors/about) to apply to a batch as it is flushed. This allows you to aggregate and archive the batch however you see fit. Please note that all resulting messages are flushed as a single batch, therefore splitting the batch into smaller batches using these processors is a no-op.",
[]map[string]interface{}{
{
"archive": map[string]interface{}{
"format": "lines",
},
},
},
[]map[string]interface{}{
{
"archive": map[string]interface{}{
"format": "json_array",
},
},
},
[]map[string]interface{}{
{
"merge_json": struct{}{},
},
},
).Array().HasType(docs.FieldTypeProcessor).Optional(),
},
}
} | internal/batch/policy/docs.go | 0.77768 | 0.498718 | docs.go | starcoder |
package unitcapturereduce
import (
"bytes"
"container/list"
"encoding/json"
"fmt"
)
/*
Reduction algorithm:
Assume UnitCapture outputs keyframes that are linearly interpolated between by UnitPlay (an approximation of actual behaviour)
Goal: Remove keyframes that do not greatly effect the followed 'path' ('path' here being the path through 12 demension space that the keyframes represent)
1. Keep the first and last keyframe
2. For each keyframe between the first and last consider removing it;
2.1 Linearly interpolate between the one before and the one after using the time of the one under consideration
2.2 Construct a normal distribution with the current keyframe as mean and a standard deviation of 1
2.3 Calculate the probability of the linearly interpolated keyframe using this distribution
2.4 If the probability of this keyframe is above the probability threshold, the current key frame can be removed
3. Continue until all keyframes have been considered
*/
// ReduceUnitCapture reduces BIS_fnc_UnitCapture in a lossy manner using an error threshold
func ReduceUnitCapture(rawCaptureData string, probabilityThreshold float64) (string, int, int, error) {
captureData, err := parseCaptureData(rawCaptureData)
if err != nil {
return "", 0, 0, err
}
before := captureData.numberOfFrames()
if before < 3 {
return "", 0, 0, fmt.Errorf("There must be atleast 3 capture frames")
}
captureData.reduce(probabilityThreshold)
after := captureData.numberOfFrames()
return captureData.SQFString(), before, after, nil
}
// Parse the SQF array capture data into a List of captureKeyFrame structs
func parseCaptureData(rawCaptureData string) (*captureKeyFrames, error) {
// A SQF array is actually valid JSON, so parse as json
// No typesafe way to represent inner slice though
var unsafeCaptureData [][]interface{}
err := json.Unmarshal([]byte(rawCaptureData), &unsafeCaptureData)
if err != nil {
return nil, err
}
data := list.New()
// Unpack each parsed keyframe into a captureKeyFrame
for i, unsafeKeyFrame := range unsafeCaptureData {
if len(unsafeKeyFrame) != 5 {
return nil, fmt.Errorf("Invalid UnitCapture Output")
}
// Typesafe assertions to the correct type
time, timeOK := unsafeKeyFrame[0].(float64)
unsafePosition, unsafePositionOK := unsafeKeyFrame[1].([]interface{})
unsafeDirection, unsafeDirectionOK := unsafeKeyFrame[2].([]interface{})
unsafeUp, unsafeUpOK := unsafeKeyFrame[3].([]interface{})
unsafeVelocity, unsafeVelocityOK := unsafeKeyFrame[4].([]interface{})
position := unsafeSliceToVec3(unsafePosition)
direction := unsafeSliceToVec3(unsafeDirection)
up := unsafeSliceToVec3(unsafeUp)
velocity := unsafeSliceToVec3(unsafeVelocity)
// Check everything asserted properly
if !timeOK ||
!unsafePositionOK || !unsafeDirectionOK || !unsafeUpOK || !unsafeVelocityOK ||
position == nil || direction == nil || up == nil || velocity == nil {
return nil, fmt.Errorf("Invalid UnitCapture Output")
}
// Pack into struct and add to list
keyframe := &captureKeyFrame{
OriginalFrameNumber: i,
Time: time,
Position: *position,
Direction: *direction,
Up: *up,
Velocity: *velocity,
}
data.PushBack(keyframe)
}
return (*captureKeyFrames)(data), nil
}
// Unpack an unsafe slice of 3 elemenrs into a vec3
// Returns nil if the slice does not have 3 elements or are not all float64s
func unsafeSliceToVec3(slice []interface{}) *vec3 {
if len(slice) != 3 {
return nil
}
a, aOK := slice[0].(float64)
b, bOK := slice[1].(float64)
c, cOK := slice[2].(float64)
if !aOK || !bOK || !cOK {
return nil
}
return &vec3{
A: a,
B: b,
C: c,
}
}
type captureKeyFrames list.List
func (ckf *captureKeyFrames) numberOfFrames() int {
return (*list.List)(ckf).Len()
}
// Reduce the capture data (see algorithm above)
func (ckf *captureKeyFrames) reduce(probabilityThreshold float64) {
startElm := (*list.List)(ckf).Front()
considerElm := startElm.Next()
endElm := considerElm.Next()
start := startElm.Value.(*captureKeyFrame)
consider := considerElm.Value.(*captureKeyFrame)
end := endElm.Value.(*captureKeyFrame)
for {
newFrame := start.lerp(end, consider.Time)
if vectorisedNormalDistributionPDF(newFrame.toSlice(), consider.toSlice(), 1) > probabilityThreshold {
(*list.List)(ckf).Remove(considerElm)
}
if endElm.Next() == nil {
break
}
startElm = startElm.Next()
considerElm = startElm.Next()
endElm = considerElm.Next()
if endElm == nil {
break
}
start = startElm.Value.(*captureKeyFrame)
consider = considerElm.Value.(*captureKeyFrame)
end = endElm.Value.(*captureKeyFrame)
}
}
// Convert the keyframes back to SQF array format
func (ckf *captureKeyFrames) SQFString() string {
buf := &bytes.Buffer{}
fmt.Fprint(buf, "[")
for elm := (*list.List)(ckf).Front(); elm != nil; elm = elm.Next() {
fmt.Fprint(buf, elm.Value.(*captureKeyFrame).SQFString())
if elm.Next() != nil {
fmt.Fprint(buf, ",")
}
}
fmt.Fprint(buf, "]")
return buf.String()
}
type captureKeyFrame struct {
OriginalFrameNumber int
Time float64
Position vec3
Direction vec3
Up vec3
Velocity vec3
}
// Linearly interpolate between two keyframes
func (ckf *captureKeyFrame) lerp(end *captureKeyFrame, time float64) *captureKeyFrame {
t := (time - ckf.Time) / end.Time
return &captureKeyFrame{
OriginalFrameNumber: -1,
Time: time,
Position: ckf.Position.lerp(end.Position, t),
Direction: ckf.Direction.lerp(end.Direction, t),
Up: ckf.Up.lerp(end.Up, t),
Velocity: ckf.Velocity.lerp(end.Velocity, t),
}
}
// Convert a keyframe into a slice with 12 elements
// Helpful for treating a keyframe as one big vector
func (ckf *captureKeyFrame) toSlice() []float64 {
slice := make([]float64, 12)
slice[0] = ckf.Position.A
slice[1] = ckf.Position.B
slice[2] = ckf.Position.C
slice[3] = ckf.Direction.A
slice[4] = ckf.Direction.B
slice[5] = ckf.Direction.C
slice[6] = ckf.Up.A
slice[7] = ckf.Up.B
slice[8] = ckf.Up.C
slice[9] = ckf.Velocity.A
slice[10] = ckf.Velocity.B
slice[11] = ckf.Velocity.C
return slice
}
// Convert a keyframe to SQF array format
func (ckf *captureKeyFrame) SQFString() string {
return fmt.Sprintf("[%v,%s,%s,%s,%s]", ckf.Time, ckf.Position.SQFString(), ckf.Direction.SQFString(), ckf.Up.SQFString(), ckf.Velocity.SQFString())
} | armatools/unitcapturereduce/unitcapturereduce.go | 0.815416 | 0.48987 | unitcapturereduce.go | starcoder |
package result
import (
"fmt"
)
type container[T any] struct {
value T
}
// Result is a helper type for error handling without returning multiple values.
// Any Result will either contain a value or an error.
type Result[S, F any] struct {
value *container[S]
failure *container[F]
}
func (r *Result[_, _]) String() string {
if r.IsFailure() {
var v any = r.FailureValue()
switch f := v.(type) {
case error:
return f.Error()
case string:
return f
case fmt.Stringer:
return f.String()
}
return fmt.Sprint(v)
}
var v any = r.SuccessValue()
switch f := v.(type) {
case error:
return f.Error()
case fmt.Stringer:
return f.String()
}
return fmt.Sprint(v)
}
// IsSuccess tests if this Result has a value.
func (r Result[_, _]) IsSuccess() bool {
return r.value != nil
}
// IsFailure tests if this Result has a failure.
func (r Result[_, _]) IsFailure() bool {
return r.failure != nil
}
// IsSome is an alias for IsSuccess to satisfy the option.Optional interface.
func (r Result[_, _]) IsSome() bool {
return r.value != nil
}
// IsNone is an alias for IsFailure to satisfy the option.Optional interface.
func (r Result[_, _]) IsNone() bool {
return r.failure != nil
}
// SuccessValue returns the value if this Result is Success. If the result is an error, Value returns the zero value of the value type.
func (r Result[S, _]) SuccessValue() S {
if r.value == nil {
v := new(S)
return *v
}
return r.value.value
}
// FailureValue returns the error if this Result is Error. Otherwise, it returns nil.
func (r Result[_, F]) FailureValue() F {
if r.failure == nil {
v := new(F)
return *v
}
return r.failure.value
}
// Value is an alias for SuccessValue to satisfy the option.Optional interface.
func (r Result[S, _]) Value() S {
return r.SuccessValue()
}
// Success creates a Result with a value.
func Success[S, F any](v S) Result[S, F] {
return Result[S, F]{
value: &container[S]{v},
failure: nil,
}
}
// Failure creates a Result with an error.
func Failure[S, F any](v F) Result[S, F] {
return Result[S, F]{
value: nil,
failure: &container[F]{v},
}
}
// HandleResult accepts functions to handle a Result when it has a success or when it has an failure.
// This will panic if either of the functions are nil.
func HandleResult[S, F, R any](r Result[S, F], whenSuccess func(S) R, whenFailure func(F) R) R {
if whenSuccess == nil {
panic("whenSuccess function must be supplied to HandleResult")
}
if whenFailure == nil {
panic("whenFailure function must be supplied to HandleResult")
}
if r.IsFailure() {
return whenFailure(r.FailureValue())
}
return whenSuccess(r.SuccessValue())
}
// Bind applies binder when result is Success and otherwise returns the Failure.
func Bind[S, F, R any](binder func(S) Result[R, F], r Result[S, F]) Result[R, F] {
if r.IsFailure() {
return Failure[R](r.FailureValue())
}
return binder(r.SuccessValue())
}
// Map applies mapping when result is Success and otherwise returns the Failure.
func Map[S, F, R any](mapping func(S) R, r Result[S, F]) Result[R, F] {
if r.IsFailure() {
return Failure[R](r.FailureValue())
}
return Success[R, F](mapping(r.SuccessValue()))
}
// MapError applies mapping to the error when the result is Failure and otherwise returns the Success.
func MapError[S, F any](mapping func(F) F, r Result[S, F]) Result[S, F] {
if r.IsFailure() {
return Failure[S](mapping(r.FailureValue()))
}
return r
}
// DefaultValue returns the value of r if r is Success. Otherwise, it returns success.
func DefaultValue[S, F any](success S, r Result[S, F]) S {
if r.IsFailure() {
return success
}
return r.SuccessValue()
}
// DefaultWith returns the value of r if r is Success. Otherwise, it returns the output of defThunk.
func DefaultWith[S, F any](defThunk func() S, r Result[S, F]) S {
if r.IsFailure() {
return defThunk()
}
return r.SuccessValue()
}
// Contains tests whether the result contains value.
func Contains[S comparable, F any](value S, r Result[S, F]) bool {
return r.IsSuccess() && r.SuccessValue() == value
}
// Count returns 0 if this result is Failure. Otherwise returns 1.
func Count[S, F any](r Result[S, F]) int {
if r.IsFailure() {
return 0
}
return 1
}
// Exists tests whether the value of r matches the predicate. If the Result is an error, it returns false.
func Exists[S, F any](predicate func(S) bool, r Result[S, F]) bool {
return r.IsSuccess() && predicate(r.SuccessValue())
}
// Flatten returns the inner Result when Results are nested.
func Flatten[S, F any](rr Result[Result[S, F], F]) Result[S, F] {
if rr.IsFailure() {
return Failure[S](rr.FailureValue())
}
return rr.SuccessValue()
}
// Fold applies the folder function to a Result with s being the initial state for the folder.
// If the Result is an Failure, the initial state is returned.
func Fold[S, F, State any](folder func(State, S) State, s State, r Result[S, F]) State {
if r.IsFailure() {
return s
}
return folder(s, r.SuccessValue())
}
// FoldBack applies the folder function to a Result with s being in the initial state for the folder.
// If the Result is an Failure, the initial state is returned.
func FoldBack[S, F, State any](folder func(S, State) State, r Result[S, F], s State) State {
if r.IsFailure() {
return s
}
return folder(r.SuccessValue(), s)
}
// ForAll tests whether the value contained in the Result matches the predicate.
// It will always return true if the Result is a Failure.
func ForAll[S, F any](predicate func(S) bool, r Result[S, F]) bool {
return r.IsFailure() || predicate(r.SuccessValue())
}
// Get returns the value of the Result.
// If Result is a Failure, it panics.
func Get[S, F any](r Result[S, F]) S {
if r.IsFailure() {
panic("cannot call Get on a Failure Result")
}
return r.SuccessValue()
}
// IsNone returns true if the Result is a Failure.
func IsNone[S, F any](r Result[S, F]) bool {
return r.IsFailure()
}
// IsSome returns true if the Result is Success.
func IsSome[S, F any](r Result[S, F]) bool {
return r.IsSuccess()
}
// Iter applies the action to the result.
func Iter[S, F any](action func(S), r Result[S, F]) {
if r.IsFailure() {
return
}
action(r.SuccessValue())
}
// Map2 applies function f to two Results and returns the function's return value as a Result.
// If either Result is an Error, it returns the error as the Result.
func Map2[S1, S2, F, R any](f func(S1, S2) R, r1 Result[S1, F], r2 Result[S2, F]) Result[R, F] {
if r1.IsFailure() {
return Failure[R](r1.FailureValue())
}
if r2.IsFailure() {
return Failure[R](r2.FailureValue())
}
return Success[R, F](f(r1.SuccessValue(), r2.SuccessValue()))
}
// Map3 applies function f to three Results and returns the function's return value as a Result.
// If any of the Results is an Error, it returns the error as the Result.
func Map3[S1, S2, S3, F, R any](f func(S1, S2, S3) R, r1 Result[S1, F], r2 Result[S2, F], r3 Result[S3, F]) Result[R, F] {
if r1.IsFailure() {
return Failure[R](r1.FailureValue())
}
if r2.IsFailure() {
return Failure[R](r2.FailureValue())
}
if r3.IsFailure() {
return Failure[R](r3.FailureValue())
}
return Success[R, F](f(r1.SuccessValue(), r2.SuccessValue(), r3.SuccessValue()))
}
// OfNullable creates a result from a pointer.
// If the pointer is nil, the result will be a Failure with the the message "nil".
// If the pointer is not nil, the result will be Succeess of the value the pointer points to.
func OfNullable[S, F any](value *S) Result[S, F] {
if value == nil {
return Failure[S](*(new(F)))
}
return Success[S, F](*value)
}
// OrElse returns r if it is Success or ifNone if r is a Failure.
func OrElse[S, F any](ifNone Result[S, F], r Result[S, F]) Result[S, F] {
if r.IsFailure() {
return ifNone
}
return r
}
// OrElseWith returns r if it is Success or the Result returned from ifNoneThunk if r is an Error.
func OrElseWith[S, F any](ifNoneThunk func() Result[S, F], r Result[S, F]) Result[S, F] {
if r.IsFailure() {
return ifNoneThunk()
}
return r
}
// ToSlice returns the value in Result as a single item slice.
// If the Result is an Failure, it returns an empty slice.
func ToSlice[S, F any](r Result[S, F]) []S {
if r.IsFailure() {
return []S{}
}
return []S{r.SuccessValue()}
}
// ToNullable returns a pointer to the value in the Result if it is Success.
// If the Result is an Failure, it returns nil.
func ToNullable[S, F any](r Result[S, F]) *S {
if r.IsFailure() {
return nil
}
v := r.SuccessValue()
return &v
}
// Lift adapts a function that returns a value and an error into a function
// that returns a Result that will be Success if there is no error and Failure if there is an error.
func Lift[S, F any](f func() (S, error)) func() Result[S, F] {
return func() Result[S, F] {
s, err := f()
if err != nil {
var fv any = *(new(F))
switch e := fv.(type) {
case error:
return Failure[S](e.(F))
case string:
var v any = err.Error()
return Failure[S](v.(F))
}
return Failure[S](fv.(F))
}
return Success[S, F](s)
}
}
// Lift1 adapts a function that accepts one input and returns a value and an error into a function
// that returns a Result that will be Success if there is no error and Failure if there is an error.
func Lift1[T, S, F any](f func(T) (S, error)) func(T) Result[S, F] {
return func(input T) Result[S, F] {
s, err := f(input)
lifted := Lift[S, F](func() (S, error) { return s, err })
return lifted()
}
}
// Lift2 adapts a function that accepts two inputs and returns a value and an error into a function
// that returns a Result that will be Success if there is no error and Failure if there is an error.
func Lift2[T1, T2, S, F any](f func(T1, T2) (S, error)) func(T1, T2) Result[S, F] {
return func(input1 T1, input2 T2) Result[S, F] {
s, err := f(input1, input2)
lifted := Lift[S, F](func() (S, error) { return s, err })
return lifted()
}
} | result/result.go | 0.76366 | 0.429489 | result.go | starcoder |
package feature
import (
"encoding/json"
"errors"
"fmt"
"github.com/tomchavakis/turf-go/geojson"
"github.com/tomchavakis/turf-go/geojson/geometry"
)
// Feature defines a new feature type
// A Feature object represents a spatially bounded thing. Every object is a GeoJSON object no matter where it
// occurs in a GeoJSON text.
// https://tools.ietf.org/html/rfc7946#section-3.2
type Feature struct {
ID string `json:"id"`
// A Feature object has a "Type" member with the value "Feature".
Type geojson.OBjectType `json:"type"`
// A Feature object has a member with the name "properties". The
// value of the properties member is an object (any JSON object or a
// JSON null value).
Properties map[string]interface{} `json:"properties"`
// Bbox is the bounding box of the feature.
Bbox []float64 `json:"bbox"`
// A Feature object has a member with the name "Geometry". The value
// of the geometry member SHALL be either a Geometry object as
// defined above or, in the case that the Feature is unlocated, a
// JSON null value.
Geometry geometry.Geometry `json:"geometry"`
}
// New initializes a new Feature
func New(geometry geometry.Geometry, bbox []float64, properties map[string]interface{}, id string) (*Feature, error) {
return &Feature{
ID: id,
Geometry: geometry,
Properties: properties,
Type: geojson.Feature,
Bbox: bbox,
}, nil
}
// FromJSON returns a new Feature by passing in a valid JSON string.
func FromJSON(gjson string) (*Feature, error) {
if gjson == "" {
return nil, errors.New("input cannot be empty")
}
var feature Feature
err := json.Unmarshal([]byte(gjson), &feature)
if err != nil {
return nil, errors.New("cannot decode the input value")
}
return &feature, nil
}
// ToPoint converts the Feature to Point.
func (f *Feature) ToPoint() (*geometry.Point, error) {
if f.Geometry.GeoJSONType != geojson.Point {
return nil, errors.New("invalid geometry type")
}
var coords []float64
ccc, err := json.Marshal(f.Geometry.Coordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
err = json.Unmarshal(ccc, &coords)
if err != nil {
return nil, errors.New("cannot unmarshal object")
}
var pos = geometry.Point{}
pos.Lat = coords[1]
pos.Lng = coords[0]
return &pos, nil
}
// ToMultiPoint converts the Feature to MultiPoint type.
func (f *Feature) ToMultiPoint() (*geometry.MultiPoint, error) {
if f.Geometry.GeoJSONType != geojson.MultiPoint {
return nil, errors.New("invalid geometry type")
}
var m geometry.MultiPoint
var coords [][]float64
ccc, err := json.Marshal(f.Geometry.Coordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
err = json.Unmarshal(ccc, &coords)
if err != nil {
return nil, errors.New("cannot unmarshal object")
}
for i := 0; i < len(coords); i++ {
p := geometry.NewPoint(coords[i][1], coords[i][0])
m.Coordinates = append(m.Coordinates, *p)
}
return &m, nil
}
// ToPolygon converts a Polygon Feature to Polygon geometry.
func (f *Feature) ToPolygon() (*geometry.Polygon, error) {
if f.Geometry.GeoJSONType != geojson.Polygon {
return nil, errors.New("invalid geometry type")
}
var coords = []geometry.LineString{}
var polygonCoordinates [][][]float64
ccc, err := json.Marshal(f.Geometry.Coordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
err = json.Unmarshal(ccc, &polygonCoordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
for i := 0; i < len(polygonCoordinates); i++ {
var posArray = []geometry.Point{}
for j := 0; j < len(polygonCoordinates[i]); j++ {
pos := geometry.Point{
Lng: polygonCoordinates[i][j][0],
Lat: polygonCoordinates[i][j][1],
}
posArray = append(posArray, pos)
}
ln := geometry.LineString{
Coordinates: posArray,
}
coords = append(coords, ln)
}
poly, err := geometry.NewPolygon(coords)
if err != nil {
return nil, fmt.Errorf("cannot create a new polygon %v", err.Error())
}
return poly, nil
}
// ToMultiPolygon converts a MultiPolygon Feature to MultiPolygon geometry.
func (f *Feature) ToMultiPolygon() (*geometry.MultiPolygon, error) {
if f.Geometry.GeoJSONType != geojson.MultiPolygon {
return nil, errors.New("invalid geometry type")
}
var multiPolygonCoordinates [][][][]float64
ccc, err := json.Marshal(f.Geometry.Coordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
err = json.Unmarshal(ccc, &multiPolygonCoordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
var polys = []geometry.Polygon{}
for k := 0; k < len(multiPolygonCoordinates); k++ {
var coords = []geometry.LineString{}
for i := 0; i < len(multiPolygonCoordinates[k]); i++ {
var posArray = []geometry.Point{}
for j := 0; j < len(multiPolygonCoordinates[k][i]); j++ {
pos := geometry.Point{
Lng: multiPolygonCoordinates[k][i][j][0],
Lat: multiPolygonCoordinates[k][i][j][1],
}
posArray = append(posArray, pos)
}
ln := geometry.LineString{
Coordinates: posArray,
}
coords = append(coords, ln)
}
poly := geometry.Polygon{
Coordinates: coords,
}
polys = append(polys, poly)
}
poly, err := geometry.NewMultiPolygon(polys)
if err != nil {
return nil, errors.New("cannot creat a new polygon")
}
return poly, nil
}
// ToLineString converts a ToLineString Feature to ToLineString geometry.
func (f *Feature) ToLineString() (*geometry.LineString, error) {
if f.Geometry.GeoJSONType != geojson.LineString {
return nil, errors.New("invalid geometry type")
}
var coords [][]float64
ccc, err := json.Marshal(f.Geometry.Coordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
err = json.Unmarshal(ccc, &coords)
if err != nil {
return nil, errors.New("cannot marshal object")
}
var coordinates []geometry.Point
for _, coord := range coords {
p := geometry.Point{
Lat: coord[1],
Lng: coord[0],
}
coordinates = append(coordinates, p)
}
lineString, err := geometry.NewLineString(coordinates)
if err != nil {
return nil, errors.New("cannot creat a new polygon")
}
return lineString, nil
}
// ToMultiLineString converts a MultiLineString faeture to MultiLineString geometry.
func (f *Feature) ToMultiLineString() (*geometry.MultiLineString, error) {
if f.Geometry.GeoJSONType != geojson.MiltiLineString {
return nil, errors.New("invalid geometry type")
}
var coords [][][]float64
ccc, err := json.Marshal(f.Geometry.Coordinates)
if err != nil {
return nil, errors.New("cannot marshal object")
}
err = json.Unmarshal(ccc, &coords)
if err != nil {
return nil, errors.New("cannot marshal object")
}
var coordinates []geometry.LineString
for i := 0; i < len(coords); i++ {
var ls geometry.LineString
var points []geometry.Point
for j := 0; j < len(coords[i]); j++ {
p := geometry.Point{
Lat: coords[i][j][1],
Lng: coords[i][j][0],
}
points = append(points, p)
}
ls.Coordinates = points
coordinates = append(coordinates, ls)
}
ml, err := geometry.NewMultiLineString(coordinates)
if err != nil {
return nil, errors.New("can't create a new multiLineString")
}
return ml, nil
} | geojson/feature/feature.go | 0.769687 | 0.533397 | feature.go | starcoder |
package gozxing
import (
"math/bits"
errors "golang.org/x/xerrors"
)
type BitArray struct {
bits []uint32
size int
}
func NewEmptyBitArray() *BitArray {
return &BitArray{makeArray(1), 0}
}
func NewBitArray(size int) *BitArray {
return &BitArray{makeArray(size), size}
}
func (b *BitArray) GetSize() int {
return b.size
}
func (b *BitArray) GetSizeInBytes() int {
return (b.size + 7) / 8
}
func (b *BitArray) ensureCapacity(size int) {
if size > len(b.bits)*32 {
newBits := makeArray(size)
copy(newBits, b.bits)
b.bits = newBits
}
}
func (b *BitArray) Get(i int) bool {
return (b.bits[i/32] & (1 << uint(i%32))) != 0
}
func (b *BitArray) Set(i int) {
b.bits[i/32] |= 1 << uint(i%32)
}
func (b *BitArray) Flip(i int) {
b.bits[i/32] ^= 1 << uint(i%32)
}
func (b *BitArray) GetNextSet(from int) int {
if from >= b.size {
return b.size
}
bitsOffset := from / 32
currentBits := b.bits[bitsOffset]
currentBits &= -(1 << uint(from&0x1F))
for currentBits == 0 {
bitsOffset++
if bitsOffset == len(b.bits) {
return b.size
}
currentBits = b.bits[bitsOffset]
}
result := (bitsOffset * 32) + bits.TrailingZeros32(currentBits)
if result > b.size {
return b.size
}
return result
}
func (b *BitArray) GetNextUnset(from int) int {
if from >= b.size {
return b.size
}
bitsOffset := from / 32
currentBits := ^b.bits[bitsOffset]
currentBits &= -(1 << uint(from&0x1F))
for currentBits == 0 {
bitsOffset++
if bitsOffset == len(b.bits) {
return b.size
}
currentBits = ^b.bits[bitsOffset]
}
result := (bitsOffset * 32) + bits.TrailingZeros32(currentBits)
if result > b.size {
return b.size
}
return result
}
func (b *BitArray) SetBulk(i int, newBits uint32) {
b.bits[i/32] = newBits
}
func (b *BitArray) SetRange(start, end int) error {
if end < start || start < 0 || end > b.size {
return errors.New("IllegalArgumentException")
}
if end == start {
return nil
}
end--
firstInt := start / 32
lastInt := end / 32
for i := firstInt; i <= lastInt; i++ {
firstBit := 0
lastBit := 31
if i == firstInt {
firstBit = start % 32
}
if i == lastInt {
lastBit = end % 32
}
mask := (2 << uint(lastBit)) - (1 << uint(firstBit))
b.bits[i] |= uint32(mask)
}
return nil
}
func (b *BitArray) Clear() {
for i := range b.bits {
b.bits[i] = 0
}
}
func (b *BitArray) IsRange(start, end int, value bool) (bool, error) {
if end < start || start < 0 || end > b.size {
return false, errors.New("IllegalArgumentException")
}
if end == start {
return true, nil
}
end--
firstInt := start / 32
lastInt := end / 32
for i := firstInt; i <= lastInt; i++ {
firstBit := 0
lastBit := 31
if i == firstInt {
firstBit = start % 32
}
if i == lastInt {
lastBit = end % 32
}
mask := uint32((2 << uint(lastBit)) - (1 << uint(firstBit)))
expect := uint32(0)
if value {
expect = mask
}
if (b.bits[i] & mask) != expect {
return false, nil
}
}
return true, nil
}
func (b *BitArray) AppendBit(bit bool) {
b.ensureCapacity(b.size + 1)
if bit {
b.bits[b.size/32] |= 1 << uint(b.size%32)
}
b.size++
}
func (b *BitArray) AppendBits(value int, numBits int) error {
if numBits < 0 || numBits > 32 {
return errors.New("IllegalArgumentException: Num bits must be between 0 and 32")
}
b.ensureCapacity(b.size + numBits)
for numBitsLeft := numBits; numBitsLeft > 0; numBitsLeft-- {
b.AppendBit(((value >> uint(numBitsLeft-1)) & 0x01) == 1)
}
return nil
}
func (b *BitArray) AppendBitArray(other *BitArray) {
otherSize := other.size
b.ensureCapacity(b.size + otherSize)
for i := 0; i < otherSize; i++ {
b.AppendBit(other.Get(i))
}
}
func (b *BitArray) Xor(other *BitArray) error {
if b.size != other.size {
return errors.New("IllegalArgumentException: Sizes don't match")
}
for i := 0; i < len(b.bits); i++ {
b.bits[i] ^= other.bits[i]
}
return nil
}
func (b *BitArray) ToBytes(bitOffset int, array []byte, offset, numBytes int) {
for i := 0; i < numBytes; i++ {
theByte := byte(0)
for j := 0; j < 8; j++ {
if b.Get(bitOffset) {
theByte |= 1 << uint(7-j)
}
bitOffset++
}
array[offset+i] = theByte
}
}
func (b *BitArray) GetBitArray() []uint32 {
return b.bits
}
func (b *BitArray) Reverse() {
newBits := make([]uint32, len(b.bits))
len := (b.size - 1) / 32
oldBitsLen := len + 1
for i := 0; i < oldBitsLen; i++ {
newBits[len-i] = bits.Reverse32(b.bits[i])
}
if b.size != oldBitsLen*32 {
leftOffset := uint(oldBitsLen*32 - b.size)
currentInt := newBits[0] >> leftOffset
for i := 1; i < oldBitsLen; i++ {
nextInt := newBits[i]
currentInt |= nextInt << uint(32-leftOffset)
newBits[i-1] = currentInt
currentInt = nextInt >> leftOffset
}
newBits[oldBitsLen-1] = currentInt
}
b.bits = newBits
}
func makeArray(size int) []uint32 {
return make([]uint32, (size+31)/32)
}
// equals()
// hasCode()
func (b *BitArray) String() string {
result := make([]byte, 0, b.size+(b.size/8)+1)
for i := 0; i < b.size; i++ {
if (i % 8) == 0 {
result = append(result, ' ')
}
if b.Get(i) {
result = append(result, 'X')
} else {
result = append(result, '.')
}
}
return string(result)
}
// clone() | bit_array.go | 0.604866 | 0.473596 | bit_array.go | starcoder |
package zcl
import (
"errors"
"github.com/shimmeringbee/bytecodec"
"github.com/shimmeringbee/bytecodec/bitbuffer"
)
/*
* Zigbee Cluster List data types, as per 2.6.2 in ZCL Revision 6 (14 January 2016).
* Downloaded From: https://zigbeealliance.org/developer_resources/zigbee-cluster-library/
*/
const (
TypeNull AttributeDataType = 0x00
TypeData8 AttributeDataType = 0x08
TypeData16 AttributeDataType = 0x09
TypeData24 AttributeDataType = 0x0a
TypeData32 AttributeDataType = 0x0b
TypeData40 AttributeDataType = 0x0c
TypeData48 AttributeDataType = 0x0d
TypeData56 AttributeDataType = 0x0e
TypeData64 AttributeDataType = 0x0f
TypeBoolean AttributeDataType = 0x10
TypeBitmap8 AttributeDataType = 0x18
TypeBitmap16 AttributeDataType = 0x19
TypeBitmap24 AttributeDataType = 0x1a
TypeBitmap32 AttributeDataType = 0x1b
TypeBitmap40 AttributeDataType = 0x1c
TypeBitmap48 AttributeDataType = 0x1d
TypeBitmap56 AttributeDataType = 0x1e
TypeBitmap64 AttributeDataType = 0x1f
TypeUnsignedInt8 AttributeDataType = 0x20
TypeUnsignedInt16 AttributeDataType = 0x21
TypeUnsignedInt24 AttributeDataType = 0x22
TypeUnsignedInt32 AttributeDataType = 0x23
TypeUnsignedInt40 AttributeDataType = 0x24
TypeUnsignedInt48 AttributeDataType = 0x25
TypeUnsignedInt56 AttributeDataType = 0x26
TypeUnsignedInt64 AttributeDataType = 0x27
TypeSignedInt8 AttributeDataType = 0x28
TypeSignedInt16 AttributeDataType = 0x29
TypeSignedInt24 AttributeDataType = 0x2a
TypeSignedInt32 AttributeDataType = 0x2b
TypeSignedInt40 AttributeDataType = 0x2c
TypeSignedInt48 AttributeDataType = 0x2d
TypeSignedInt56 AttributeDataType = 0x2e
TypeSignedInt64 AttributeDataType = 0x2f
TypeEnum8 AttributeDataType = 0x30
TypeEnum16 AttributeDataType = 0x31
TypeFloatSemi AttributeDataType = 0x38
TypeFloatSingle AttributeDataType = 0x39
TypeFloatDouble AttributeDataType = 0x3a
TypeStringOctet8 AttributeDataType = 0x41
TypeStringCharacter8 AttributeDataType = 0x42
TypeStringOctet16 AttributeDataType = 0x43
TypeStringCharacter16 AttributeDataType = 0x44
TypeArray AttributeDataType = 0x48
TypeStructure AttributeDataType = 0x4c
TypeSet AttributeDataType = 0x50
TypeBag AttributeDataType = 0x51
TypeTimeOfDay AttributeDataType = 0xe0
TypeDate AttributeDataType = 0xe1
TypeUTCTime AttributeDataType = 0xe2
TypeClusterID AttributeDataType = 0xe9
TypeAttributeID AttributeDataType = 0xea
TypeBACnetOID AttributeDataType = 0xeb
TypeIEEEAddress AttributeDataType = 0xf0
TypeSecurityKey128 AttributeDataType = 0xf1
TypeUnknown AttributeDataType = 0xff
)
var DiscreteTypes = map[AttributeDataType]bool{
TypeNull: false,
TypeData8: true,
TypeData16: true,
TypeData24: true,
TypeData32: true,
TypeData40: true,
TypeData48: true,
TypeData56: true,
TypeData64: true,
TypeBoolean: true,
TypeBitmap8: true,
TypeBitmap16: true,
TypeBitmap24: true,
TypeBitmap32: true,
TypeBitmap40: true,
TypeBitmap48: true,
TypeBitmap56: true,
TypeBitmap64: true,
TypeUnsignedInt8: false,
TypeUnsignedInt16: false,
TypeUnsignedInt24: false,
TypeUnsignedInt32: false,
TypeUnsignedInt40: false,
TypeUnsignedInt48: false,
TypeUnsignedInt56: false,
TypeUnsignedInt64: false,
TypeSignedInt8: false,
TypeSignedInt16: false,
TypeSignedInt24: false,
TypeSignedInt32: false,
TypeSignedInt40: false,
TypeSignedInt48: false,
TypeSignedInt56: false,
TypeSignedInt64: false,
TypeEnum8: true,
TypeEnum16: true,
TypeFloatSemi: false,
TypeFloatSingle: false,
TypeFloatDouble: false,
TypeStringOctet8: true,
TypeStringCharacter8: true,
TypeStringOctet16: true,
TypeStringCharacter16: true,
TypeArray: true,
TypeStructure: true,
TypeSet: true,
TypeBag: true,
TypeTimeOfDay: false,
TypeDate: false,
TypeUTCTime: false,
TypeClusterID: true,
TypeAttributeID: true,
TypeBACnetOID: true,
TypeIEEEAddress: true,
TypeSecurityKey128: true,
TypeUnknown: false,
}
type AttributeDataType byte
type AttributeID uint16
type AttributeDataValue struct {
Value interface{}
}
func findPreviousDataType(ctx bytecodec.Context) (AttributeDataType, error) {
structType := ctx.Root.Type()
for i := ctx.CurrentIndex - 1; i >= 0; i-- {
fieldType := structType.Field(i)
if fieldType.Type.Name() == "AttributeDataType" {
value := ctx.Root.Field(i)
dataType := value.Interface().(AttributeDataType)
return dataType, nil
}
}
return TypeUnknown, errors.New("unable to find prior attribute data type to extrapolate type information")
}
func (a *AttributeDataValue) Marshal(bb *bitbuffer.BitBuffer, ctx bytecodec.Context) error {
if dataType, err := findPreviousDataType(ctx); err != nil {
return err
} else {
if DiscreteTypes[dataType] {
return nil
}
return marshalZCLType(bb, ctx, dataType, a.Value)
}
}
func (a *AttributeDataValue) Unmarshal(bb *bitbuffer.BitBuffer, ctx bytecodec.Context) error {
if dataType, err := findPreviousDataType(ctx); err != nil {
return err
} else {
if DiscreteTypes[dataType] {
return nil
}
if value, err := unmarshalZCLType(bb, dataType, ctx); err != nil {
return err
} else {
a.Value = value
}
}
return nil
}
type AttributeDataTypeValue struct {
DataType AttributeDataType
Value interface{}
}
func (a *AttributeDataTypeValue) Marshal(bb *bitbuffer.BitBuffer, ctx bytecodec.Context) error {
if err := bb.WriteByte(byte(a.DataType)); err != nil {
return err
}
return marshalZCLType(bb, ctx, a.DataType, a.Value)
}
func (a *AttributeDataTypeValue) Unmarshal(bb *bitbuffer.BitBuffer, ctx bytecodec.Context) error {
if dt, err := bb.ReadByte(); err != nil {
return err
} else {
a.DataType = AttributeDataType(dt)
}
val, err := unmarshalZCLType(bb, a.DataType, ctx)
if err != nil {
return err
}
a.Value = val
return nil
}
type AttributeSlice struct {
DataType AttributeDataType
Values []interface{}
}
type BACnetOID uint32
type TimeOfDay struct {
Hours uint8
Minutes uint8
Seconds uint8
Hundredths uint8
}
type Date struct {
Year uint8
Month uint8
DayOfMonth uint8
DayOfWeek uint8
}
type UTCTime uint32 | zcl_types.go | 0.590779 | 0.46308 | zcl_types.go | starcoder |
package model
// LocaleInfo holds the data to specify a standard locale.
type LocaleInfo struct {
Ident string
Language string
Country string
}
// Locales is mapping of standard locale idents to LocaleInfo
var Locales = map[string]LocaleInfo{
"sq_AL": LocaleInfo{Ident: "sq_AL", Language: "Albanian", Country: "Albania"},
"ar_DZ": LocaleInfo{Ident: "ar_DZ", Language: "Arabic", Country: "Algeria"},
"ar_BH": LocaleInfo{Ident: "ar_BH", Language: "Arabic", Country: "Bahrain"},
"ar_EG": LocaleInfo{Ident: "ar_EG", Language: "Arabic", Country: "Egypt"},
"ar_IQ": LocaleInfo{Ident: "ar_IQ", Language: "Arabic", Country: "Iraq"},
"ar_JO": LocaleInfo{Ident: "ar_JO", Language: "Arabic", Country: "Jordan"},
"ar_KW": LocaleInfo{Ident: "ar_KW", Language: "Arabic", Country: "Kuwait"},
"ar_LB": LocaleInfo{Ident: "ar_LB", Language: "Arabic", Country: "Lebanon"},
"ar_LY": LocaleInfo{Ident: "ar_LY", Language: "Arabic", Country: "Libya"},
"ar_MA": LocaleInfo{Ident: "ar_MA", Language: "Arabic", Country: "Morocco"},
"ar_OM": LocaleInfo{Ident: "ar_OM", Language: "Arabic", Country: "Oman"},
"ar_QA": LocaleInfo{Ident: "ar_QA", Language: "Arabic", Country: "Qatar"},
"ar_SA": LocaleInfo{Ident: "ar_SA", Language: "Arabic", Country: "Saudi Arabia"},
"ar_SD": LocaleInfo{Ident: "ar_SD", Language: "Arabic", Country: "Sudan"},
"ar_SY": LocaleInfo{Ident: "ar_SY", Language: "Arabic", Country: "Syria"},
"ar_TN": LocaleInfo{Ident: "ar_TN", Language: "Arabic", Country: "Tunisia"},
"ar_AE": LocaleInfo{Ident: "ar_AE", Language: "Arabic", Country: "United Arab Emirates"},
"ar_YE": LocaleInfo{Ident: "ar_YE", Language: "Arabic", Country: "Yemen"},
"be_BY": LocaleInfo{Ident: "be_BY", Language: "Belarusian", Country: "Belarus"},
"bg_BG": LocaleInfo{Ident: "bg_BG", Language: "Bulgarian", Country: "Bulgaria"},
"ca_ES": LocaleInfo{Ident: "ca_ES", Language: "Catalan", Country: "Spain"},
"zh_CN": LocaleInfo{Ident: "zh_CN", Language: "Chinese (Simplified)", Country: "China"},
"zh_SG": LocaleInfo{Ident: "zh_SG", Language: "Chinese (Simplified)", Country: "Singapore"},
"zh_HK": LocaleInfo{Ident: "zh_HK", Language: "Chinese (Traditional)", Country: "Hong Kong"},
"zh_TW": LocaleInfo{Ident: "zh_TW", Language: "Chinese (Traditional)", Country: "Taiwan"},
"hr_HR": LocaleInfo{Ident: "hr_HR", Language: "Croatian", Country: "Croatia"},
"cs_CZ": LocaleInfo{Ident: "cs_CZ", Language: "Czech", Country: "Czech Republic"},
"da_DK": LocaleInfo{Ident: "da_DK", Language: "Danish", Country: "Denmark"},
"nl_BE": LocaleInfo{Ident: "nl_BE", Language: "Dutch", Country: "Belgium"},
"nl_NL": LocaleInfo{Ident: "nl_NL", Language: "Dutch", Country: "Netherlands"},
"en_AU": LocaleInfo{Ident: "en_AU", Language: "English", Country: "Australia"},
"en_CA": LocaleInfo{Ident: "en_CA", Language: "English", Country: "Canada"},
"en_IN": LocaleInfo{Ident: "en_IN", Language: "English", Country: "India"},
"en_IE": LocaleInfo{Ident: "en_IE", Language: "English", Country: "Ireland"},
"en_MT": LocaleInfo{Ident: "en_MT", Language: "English", Country: "Malta"},
"en_NZ": LocaleInfo{Ident: "en_NZ", Language: "English", Country: "New Zealand"},
"en_PH": LocaleInfo{Ident: "en_PH", Language: "English", Country: "Philippines"},
"en_SG": LocaleInfo{Ident: "en_SG", Language: "English", Country: "Singapore"},
"en_ZA": LocaleInfo{Ident: "en_ZA", Language: "English", Country: "South Africa"},
"en_GB": LocaleInfo{Ident: "en_GB", Language: "English", Country: "United Kingdom"},
"en_US": LocaleInfo{Ident: "en_US", Language: "English", Country: "United States"},
"et_EE": LocaleInfo{Ident: "et_EE", Language: "Estonian", Country: "Estonia"},
"fi_FI": LocaleInfo{Ident: "fi_FI", Language: "Finnish", Country: "Finland"},
"fr_BE": LocaleInfo{Ident: "fr_BE", Language: "French", Country: "Belgium"},
"fr_CA": LocaleInfo{Ident: "fr_CA", Language: "French", Country: "Canada"},
"fr_FR": LocaleInfo{Ident: "fr_FR", Language: "French", Country: "France"},
"fr_LU": LocaleInfo{Ident: "fr_LU", Language: "French", Country: "Luxembourg"},
"fr_CH": LocaleInfo{Ident: "fr_CH", Language: "French", Country: "Switzerland"},
"de_AT": LocaleInfo{Ident: "de_AT", Language: "German", Country: "Austria"},
"de_DE": LocaleInfo{Ident: "de_DE", Language: "German", Country: "Germany"},
"de_LU": LocaleInfo{Ident: "de_LU", Language: "German", Country: "Luxembourg"},
"de_CH": LocaleInfo{Ident: "de_CH", Language: "German", Country: "Switzerland"},
"el_CY": LocaleInfo{Ident: "el_CY", Language: "Greek", Country: "Cyprus"},
"el_GR": LocaleInfo{Ident: "el_GR", Language: "Greek", Country: "Greece"},
"iw_IL": LocaleInfo{Ident: "iw_IL", Language: "Hebrew", Country: "Israel"},
"hi_IN": LocaleInfo{Ident: "hi_IN", Language: "Hindi", Country: "India"},
"hu_HU": LocaleInfo{Ident: "hu_HU", Language: "Hungarian", Country: "Hungary"},
"is_IS": LocaleInfo{Ident: "is_IS", Language: "Icelandic", Country: "Iceland"},
"in_ID": LocaleInfo{Ident: "in_ID", Language: "Indonesian", Country: "Indonesia"},
"ga_IE": LocaleInfo{Ident: "ga_IE", Language: "Irish", Country: "Ireland"},
"it_IT": LocaleInfo{Ident: "it_IT", Language: "Italian", Country: "Italy"},
"it_CH": LocaleInfo{Ident: "it_CH", Language: "Italian", Country: "Switzerland"},
"ja_JP": LocaleInfo{Ident: "ja_JP", Language: "Japanese", Country: "Japan"},
"ko_KR": LocaleInfo{Ident: "ko_KR", Language: "Korean", Country: "South Korea"},
"lv_LV": LocaleInfo{Ident: "lv_LV", Language: "Latvian", Country: "Latvia"},
"lt_LT": LocaleInfo{Ident: "lt_LT", Language: "Lithuanian", Country: "Lithuania"},
"mk_MK": LocaleInfo{Ident: "mk_MK", Language: "Macedonian", Country: "Macedonia"},
"ms_MY": LocaleInfo{Ident: "ms_MY", Language: "Malay", Country: "Malaysia"},
"mt_MT": LocaleInfo{Ident: "mt_MT", Language: "Maltese", Country: "Malta"},
"no_NO": LocaleInfo{Ident: "no_NO", Language: "Norwegian (Bokmål)", Country: "Norway"},
"no_NO_NY": LocaleInfo{Ident: "no_NO_NY", Language: "Norwegian (Nynorsk)", Country: "Norway"},
"pl_PL": LocaleInfo{Ident: "pl_PL", Language: "Polish", Country: "Poland"},
"pt_BR": LocaleInfo{Ident: "pt_BR", Language: "Portuguese", Country: "Brazil"},
"pt_PT": LocaleInfo{Ident: "pt_PT", Language: "Portuguese", Country: "Portugal"},
"ro_RO": LocaleInfo{Ident: "ro_RO", Language: "Romanian", Country: "Romania"},
"ru_RU": LocaleInfo{Ident: "ru_RU", Language: "Russian", Country: "Russia"},
"sr_BA": LocaleInfo{Ident: "sr_BA", Language: "Serbian (Cyrillic)", Country: "Bosnia and Herzegovina"},
"sr_ME": LocaleInfo{Ident: "sr_ME", Language: "Serbian (Cyrillic)", Country: "Montenegro"},
"sr_RS": LocaleInfo{Ident: "sr_RS", Language: "Serbian (Cyrillic)", Country: "Serbia"},
"sr_La_tn_BA": LocaleInfo{Ident: "sr_La_tn_BA", Language: "Serbian (Latin)", Country: "Bosnia and Herzegovina"},
"sr_La_tn_ME": LocaleInfo{Ident: "sr_La_tn_ME", Language: "Serbian (Latin)", Country: "Montenegro"},
"sr_La_tn_RS": LocaleInfo{Ident: "sr_La_tn_RS", Language: "Serbian (Latin)", Country: "Serbia"},
"sk_SK": LocaleInfo{Ident: "sk_SK", Language: "Slovak", Country: "Slovakia"},
"sl_SI": LocaleInfo{Ident: "sl_SI", Language: "Slovenian", Country: "Slovenia"},
"es_AR": LocaleInfo{Ident: "es_AR", Language: "Spanish", Country: "Argentina"},
"es_BO": LocaleInfo{Ident: "es_BO", Language: "Spanish", Country: "Bolivia"},
"es_CL": LocaleInfo{Ident: "es_CL", Language: "Spanish", Country: "Chile"},
"es_CO": LocaleInfo{Ident: "es_CO", Language: "Spanish", Country: "Colombia"},
"es_CR": LocaleInfo{Ident: "es_CR", Language: "Spanish", Country: "Costa Rica"},
"es_DO": LocaleInfo{Ident: "es_DO", Language: "Spanish", Country: "Dominican Republic"},
"es_EC": LocaleInfo{Ident: "es_EC", Language: "Spanish", Country: "Ecuador"},
"es_SV": LocaleInfo{Ident: "es_SV", Language: "Spanish", Country: "El Salvador"},
"es_GT": LocaleInfo{Ident: "es_GT", Language: "Spanish", Country: "Guatemala"},
"es_HN": LocaleInfo{Ident: "es_HN", Language: "Spanish", Country: "Honduras"},
"es_MX": LocaleInfo{Ident: "es_MX", Language: "Spanish", Country: "Mexico"},
"es_NI": LocaleInfo{Ident: "es_NI", Language: "Spanish", Country: "Nicaragua"},
"es_PA": LocaleInfo{Ident: "es_PA", Language: "Spanish", Country: "Panama"},
"es_PY": LocaleInfo{Ident: "es_PY", Language: "Spanish", Country: "Paraguay"},
"es_PE": LocaleInfo{Ident: "es_PE", Language: "Spanish", Country: "Peru"},
"es_PR": LocaleInfo{Ident: "es_PR", Language: "Spanish", Country: "Puerto Rico"},
"es_ES": LocaleInfo{Ident: "es_ES", Language: "Spanish", Country: "Spain"},
"es_US": LocaleInfo{Ident: "es_US", Language: "Spanish", Country: "United States"},
"es_UY": LocaleInfo{Ident: "es_UY", Language: "Spanish", Country: "Uruguay"},
"es_VE": LocaleInfo{Ident: "es_VE", Language: "Spanish", Country: "Venezuela"},
"sv_SE": LocaleInfo{Ident: "sv_SE", Language: "Swedish", Country: "Sweden"},
"th_TH": LocaleInfo{Ident: "th_TH", Language: "Thai (Western digits)", Country: "Thailand"},
"th_TH_TH": LocaleInfo{Ident: "th_TH_TH", Language: "Thai (Thai digits)", Country: "Thailand"},
"tr_TR": LocaleInfo{Ident: "tr_TR", Language: "Turkish", Country: "Turkey"},
"uk_UA": LocaleInfo{Ident: "uk_UA", Language: "Ukrainian", Country: "Ukraine"},
"vi_VN": LocaleInfo{Ident: "vi_VN", Language: "Vietnamese", Country: "Vietnam"},
} | parrot-api/model/localeinfo.go | 0.546012 | 0.428413 | localeinfo.go | starcoder |
package main
import (
"errors"
"fmt"
"strconv"
"github.com/openblockchain/obc-peer/openchain/chaincode/shim"
)
/*
This is a system chaincode used to update the validity period on the ledger. It needs to be deployed at genesis time to avoid the need of
a TCert during deployment and It will be invoked by a system component (probably the TCA) who is the source of the validity period value.
This component will increment the validity period locally and dispatch an invocation transaction for this chaincode passing the new validity
period as a parameter.
This chaincode is responsible for the verification of the caller's identity, for that we can use an enrolment certificate id or some other
type of verification. For this to work this id (or certificate, public key, etc) needs to be accessible from the chaincode (probably embedded into
the chaincode).
This is the flow for this to work:
1) Obtain some identity id (enrolment certificate id, certificate, public key, etc).
2) Include the information obtained in 1 into this chaincode.
3) Deploy the chaincode
4) Include the chaincode id into the system component that is going to invoke it (possibly the TCA) so it can dispatch an invoke transaction.
*/
type systemChaincode struct {
}
const system_validity_period_key = "system.validity.period"
// Initialize the in the ledger (this needs to be run only once!!!!)
func (t *systemChaincode) init(stub *shim.ChaincodeStub, args []string) ([]byte, error) {
var vp int64 = 0
// Initialize the validity period in the ledger (this needs to be run only once!!!!)
err := stub.PutState(system_validity_period_key, []byte(strconv.FormatInt(vp, 10)))
if err != nil {
return nil, err
}
return nil, nil
}
// Transaction updates system validity period on the ledger
func (t *systemChaincode) invoke(stub *shim.ChaincodeStub, args []string) ([]byte, error) {
// FIXME: this chaincode needs to be executed by an authorized party. In order to guarantee this, two verifications
// need to be perfomed:
// 1. The identity of the caller should be available somehow for the chaincode to perform a check.
// 2. The ability to determine if the chaincode is executed directly or as a part of a nested execution of chaincodes.
// We need to verify identity of caller and that this is not a nested chaincode invocation
directly_called_by_TCA := true // stub.ValidateCaller(expectedCaller, stub.GetActualCaller()) && stub.StackDepth() == 0
if directly_called_by_TCA {
if len(args) != 1 {
return nil, errors.New("Incorrect number of arguments. Expecting 1")
}
vp := args[0]
err := stub.PutState(system_validity_period_key, []byte(vp))
if err != nil {
return nil, err
}
}
return nil, nil
}
// Run callback representing the invocation of a chaincode
// This chaincode will update the system validity period on the ledger
func (t *systemChaincode) Run(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
// Handle different functions
if function == "init" {
return t.init(stub, args)
} else if function == "invoke" {
return t.invoke(stub, args)
}
return nil, errors.New("Received unknown function invocation")
}
// Query callback representing the query of a chaincode
func (t *systemChaincode) Query(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
if function != "query" {
return nil, errors.New("Invalid query function name. Expecting \"query\"")
}
if len(args) != 1 {
return nil, errors.New("Incorrect number of arguments. Expecting 1")
}
key := args[0]
if key != system_validity_period_key {
return nil, errors.New("Incorrect key. Expecting " + system_validity_period_key)
}
// Get the state from the ledger
vp, err := stub.GetState(key)
if err != nil {
jsonResp := "{\"Error\":\"Failed to get state for " + key + "\"}"
return nil, errors.New(jsonResp)
}
if vp == nil {
jsonResp := "{\"Error\":\"Nil value for " + key + "\"}"
return nil, errors.New(jsonResp)
}
jsonResp := "{\"Name\":\"" + key + "\",\"Value\":\"" + string(vp) + "\"}"
fmt.Printf("Query Response:%s\n", jsonResp)
return []byte(jsonResp), nil
}
func main() {
err := shim.Start(new(systemChaincode))
if err != nil {
fmt.Printf("Error starting System chaincode: %s", err)
}
} | openchain/system_chaincode/validity_period_update/validity_period_update.go | 0.50415 | 0.453201 | validity_period_update.go | starcoder |
package commands
import (
"fmt"
"strings"
"github.com/BurntSushi/gribble"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil/xrect"
"github.com/xsrc/wingo/logger"
"github.com/xsrc/wingo/prompt"
"github.com/xsrc/wingo/workspace"
"github.com/xsrc/wingo/wm"
"github.com/xsrc/wingo/xclient"
)
// parsePos takes a string and parses an x or y position from it.
// The magic here is that while a string could just be a simple integer,
// it could also be a float greater than 0 but <= 1 in terms of the current
// head's geometry.
func parsePos(geom xrect.Rect, gribblePos gribble.Any, y bool) (int, bool) {
switch pos := gribblePos.(type) {
case int:
return pos, true
case float64:
if pos <= 0 || pos > 1 {
logger.Warning.Printf("'%s' not in the valid range (0, 1].", pos)
return 0, false
}
if y {
return geom.Y() + int(float64(geom.Height())*pos), true
} else {
return geom.X() + int(float64(geom.Width())*pos), true
}
}
panic("unreachable")
}
// parseDim takes a string and parses a width or height dimension from it.
// The magic here is that while a string could just be a simple integer,
// it could also be a float greater than 0 but <= 1 in terms of the current
// head's geometry.
func parseDim(geom xrect.Rect, gribbleDim gribble.Any, hght bool) (int, bool) {
switch dim := gribbleDim.(type) {
case int:
return dim, true
case float64:
if dim <= 0 || dim > 1 {
logger.Warning.Printf("'%s' not in the valid range (0, 1].", dim)
return 0, false
}
if hght {
return int(float64(geom.Height()) * dim), true
} else {
return int(float64(geom.Width()) * dim), true
}
}
panic("unreachable")
}
// stringBool takes a string and returns true if the string corresponds
// to a "true" value. i.e., "Yes", "Y", "y", "YES", "yEs", etc.
func stringBool(s string) bool {
sl := strings.ToLower(s)
return sl == "yes" || sl == "y"
}
// boolToInt converts a boolean value to an integer. (True = 1 and False = 0.)
func boolToInt(b bool) int {
if b {
return 1
}
return 0
}
// intToBool converts an integer value to a boolean.
// An integer other than 0 or 1 causes a panic.
func intToBool(n int) bool {
switch n {
case 0:
return false
case 1:
return true
}
panic(fmt.Sprintf("Unexpected boolean integer: %d", n))
}
// stringTabComp takes a string and converts it to a tab completion constant
// defined in the prompt package. Valid values are "Prefix", "Any" and
// "Multiple"
func stringTabComp(s string) int {
switch s {
case "Prefix":
return prompt.TabCompletePrefix
case "Any":
return prompt.TabCompleteAny
case "Multiple":
return prompt.TabCompleteMultiple
default:
logger.Warning.Printf(
"Tab completion mode '%s' not supported.", s)
}
return prompt.TabCompletePrefix
}
// Shortcut for executing Client interface functions that have no parameters
// and no return values on the currently focused window.
func withFocused(f func(c *xclient.Client)) gribble.Any {
if focused := wm.LastFocused(); focused != nil {
client := focused.(*xclient.Client)
f(client)
return int(client.Id())
}
return ":void:"
}
func withClient(cArg gribble.Any, f func(c *xclient.Client)) gribble.Any {
switch c := cArg.(type) {
case int:
if c == 0 {
return ":void:"
}
for _, client_ := range wm.Clients {
client := client_.(*xclient.Client)
if int(client.Id()) == c {
f(client)
return int(client.Id())
}
}
return ":void:"
case string:
switch c {
case ":void:":
return ":void:"
case ":mouse:":
wid := xproto.Window(wm.MouseClientClicked)
if client := wm.FindManagedClient(wid); client != nil {
c := client.(*xclient.Client)
f(c)
return int(c.Id())
} else {
f(nil)
return ":void:"
}
default:
for _, client_ := range wm.Clients {
client := client_.(*xclient.Client)
name := strings.ToLower(client.Name())
if strings.Contains(name, strings.ToLower(c)) {
f(client)
return int(client.Id())
}
}
return ":void:"
}
default:
panic(fmt.Sprintf("BUG: Unknown Gribble return type: %T", c))
}
panic("unreachable")
}
func withWorkspace(wArg gribble.Any, f func(wrk *workspace.Workspace)) {
switch w := wArg.(type) {
case int:
if wrk := wm.Heads.Workspaces.Get(w); wrk != nil {
f(wrk)
}
case string:
if wrk := wm.Heads.Workspaces.Find(w); wrk != nil {
f(wrk)
}
}
}
func cmdError(format string, v ...interface{}) string {
return fmt.Sprintf("ERROR: %s", fmt.Sprintf(format, v...))
} | commands/misc.go | 0.567218 | 0.449091 | misc.go | starcoder |
package aoc
import (
"fmt"
)
type FP2 uint64
func NewFP2(x, y int32) FP2 {
return FP2(uint64(uint32(x))<<32 + uint64(uint32(y)))
}
func (p FP2) String() string {
x, y := p.XY()
return fmt.Sprintf("%d,%d", x, y)
}
func (p FP2) XY() (int32, int32) {
y := int32(p & 0xffffffff)
x := int32((p >> 32) & 0xffffffff)
return x, y
}
func FP2UKeyMax(bits int) int {
return 2 << (bits * 2)
}
func (p FP2) UKey(bits int) uint64 {
y := p & 0xffffffff
x := (p >> 32) & 0xffffffff
return uint64(x<<bits + y)
}
func (p FP2) ManhattanDistance(o FP2) int {
y := int32(p & 0xffffffff)
x := int32((p >> 32) & 0xffffffff)
oy := int32(o & 0xffffffff)
ox := int32((o >> 32) & 0xffffffff)
var dx int
if x > ox {
dx = int(x - ox)
} else {
dx = int(ox - x)
}
var dy int
if y > oy {
dy = int(y - oy)
} else {
dy = int(oy - y)
}
return dx + dy
}
func (p FP2) Add(o FP2) FP2 {
y := int32(p & 0xffffffff)
x := int32((p >> 32) & 0xffffffff)
oy := int32(o & 0xffffffff)
ox := int32((o >> 32) & 0xffffffff)
return NewFP2(x+ox, y+oy)
}
func (p FP2) Sub(o FP2) FP2 {
y := int32(p & 0xffffffff)
x := int32((p >> 32) & 0xffffffff)
oy := int32(o & 0xffffffff)
ox := int32((o >> 32) & 0xffffffff)
return NewFP2(x-ox, y-oy)
}
func (p FP2) Norm(o FP2) FP2 {
y := int32(p & 0xffffffff)
x := int32((p >> 32) & 0xffffffff)
var nx int32
var ny int32
if x > 0 {
nx = 1
} else if x < 0 {
nx = -1
}
if y > 0 {
ny = 1
} else if y < 0 {
ny = -1
}
return NewFP2(nx, ny)
}
func (p FP2) Rotate(rotation int) FP2 {
y := int32(p & 0xffffffff)
x := int32((p >> 32) & 0xffffffff)
switch rotation {
case 0:
return NewFP2(x, y)
case 1:
return NewFP2(x, -y)
case 2:
return NewFP2(y, -x)
case 3:
return NewFP2(y, x)
case 4:
return NewFP2(-x, -y)
case 5:
return NewFP2(-x, y)
case 6:
return NewFP2(-y, x)
case 7:
return NewFP2(-y, -x)
default:
panic("invalid rotation")
}
}
func (p FP2) CW() FP2 {
x, y := p.XY()
return NewFP2(-y, x)
}
func (p FP2) CCW() FP2 {
x, y := p.XY()
return NewFP2(y, -x)
} | lib-go/fastpoint2d.go | 0.508544 | 0.407451 | fastpoint2d.go | starcoder |
package types
import (
"github.com/juju/errors"
mysql "github.com/pingcap/tidb/mysqldef"
)
// CompareInt64 returns an integer comparing the int64 x to y.
func CompareInt64(x, y int64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareUint64 returns an integer comparing the uint64 x to y.
func CompareUint64(x, y uint64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareFloat64 returns an integer comparing the float64 x to y.
func CompareFloat64(x, y float64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareInteger returns an integer comparing the int64 x to the uint64 y.
func CompareInteger(x int64, y uint64) int {
if x < 0 {
return -1
}
return CompareUint64(uint64(x), y)
}
// CompareString returns an integer comparing the string x to y.
func CompareString(x, y string) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// compareFloatString compares float a to float-formated string s.
// compareFloatString first parses s to a float value, if failed, returns error.
func compareFloatString(a float64, s string) (int, error) {
// MySQL will convert string to a float point value
// MySQL use a very loose conversation, e.g, 123.abc -> 123
// we should do a trade off whether supporting this feature or using a strict mode
// now we use a strict mode
b, err := StrToFloat(s)
if err != nil {
return 0, err
}
return CompareFloat64(a, b), nil
}
// compareStringFloat compares float-formated string s to float a.
func compareStringFloat(s string, a float64) (int, error) {
n, err := compareFloatString(a, s)
return -n, err
}
func coerceCompare(a, b interface{}) (x interface{}, y interface{}) {
x, y = Coerce(a, b)
// change []byte to string for later compare
switch v := a.(type) {
case []byte:
x = string(v)
}
switch v := b.(type) {
case []byte:
y = string(v)
}
return x, y
}
// Compare returns an integer comparing the interface a to b.
// a > b -> 1
// a = b -> 0
// a < b -> -1
func Compare(a, b interface{}) (int, error) {
a, b = coerceCompare(a, b)
if a == nil || b == nil {
// Check ni first, nil is always less than none nil value.
if a == nil && b != nil {
return -1, nil
} else if a != nil && b == nil {
return 1, nil
} else {
// here a and b are all nil
return 0, nil
}
}
// TODO: support compare time type with other int, float, decimal types.
// TODO: support hexadecimal type
switch x := a.(type) {
case float64:
switch y := b.(type) {
case float64:
return CompareFloat64(x, y), nil
case string:
return compareFloatString(x, y)
}
case int64:
switch y := b.(type) {
case int64:
return CompareInt64(x, y), nil
case uint64:
return CompareInteger(x, y), nil
case string:
return compareFloatString(float64(x), y)
}
case uint64:
switch y := b.(type) {
case uint64:
return CompareUint64(x, y), nil
case int64:
return -CompareInteger(y, x), nil
case string:
return compareFloatString(float64(x), y)
}
case mysql.Decimal:
switch y := b.(type) {
case mysql.Decimal:
return x.Cmp(y), nil
case string:
f, err := mysql.ConvertToDecimal(y)
if err != nil {
return 0, err
}
return x.Cmp(f), nil
}
case string:
switch y := b.(type) {
case string:
return CompareString(x, y), nil
case int64:
return compareStringFloat(x, float64(y))
case uint64:
return compareStringFloat(x, float64(y))
case float64:
return compareStringFloat(x, y)
case mysql.Decimal:
f, err := mysql.ConvertToDecimal(x)
if err != nil {
return 0, err
}
return f.Cmp(y), nil
case mysql.Time:
n, err := y.CompareString(x)
return -n, err
case mysql.Duration:
n, err := y.CompareString(x)
return -n, err
}
case mysql.Time:
switch y := b.(type) {
case mysql.Time:
return x.Compare(y), nil
case string:
return x.CompareString(y)
}
case mysql.Duration:
switch y := b.(type) {
case mysql.Duration:
return x.Compare(y), nil
case string:
return x.CompareString(y)
}
}
return 0, errors.Errorf("invalid comapre type %T cmp %T", a, b)
} | util/types/compare.go | 0.710729 | 0.62701 | compare.go | starcoder |
package sliceutil
import "reflect"
func findFist(s interface{}, a interface{}) (int, bool) {
si := reflect.ValueOf(s)
if si.IsNil() || si.Len() == 0 {
return -1, false
}
for i := 0; i < si.Len(); i++ {
if reflect.DeepEqual(si.Index(i).Interface(), a) {
return i, true
}
}
return -1, false
}
func findLast(s interface{}, a interface{}) (int, bool) {
si := reflect.ValueOf(s)
if si.IsNil() || si.Len() == 0 {
return -1, false
}
var idx int = -1
var ok bool = false
for i := 0; i < si.Len(); i++ {
if reflect.DeepEqual(si.Index(i).Interface(), a) {
idx = i
ok = true
}
}
return idx, ok
}
// FindFistInt returns the first specified element index. If can't find anything, return -1 and false
func FindFistInt(s []int, a int) (int, bool) {
return findFist(s, a)
}
// FindFistInt32 returns the first index of specified element of type int32. If can't find anything, return -1 and false
func FindFistInt32(s []int32, a int32) (int, bool) {
return findFist(s, a)
}
// FindFistInt64 returns the first index of specified element of type int64. If can't find anything, return -1 and false
func FindFistInt64(s []int64, a int64) (int, bool) {
return findFist(s, a)
}
// FindFistBool returns the first index of specified element of type bool. If can't find anything, return -1 and false
func FindFistBool(s []bool, a bool) (int, bool) {
return findFist(s, a)
}
// FindFistByte returns the first index of specified element of type byte. If can't find anything, return -1 and false
func FindFistByte(s []byte, a byte) (int, bool) {
return findFist(s, a)
}
// FindFistString returns the first index of specified element of type string. If can't find anything, return -1 and false
func FindFistString(s []string, a string) (int, bool) {
return findFist(s, a)
}
// FindLastInt returns the last index of specified element of type int. If can't find anything, return -1 and false
func FindLastInt(s []int, a int) (int, bool) {
return findLast(s, a)
}
// FindLastInt32 returns the last index of specified element of type int32. If can't find anything, return -1 and false
func FindLastInt32(s []int32, a int32) (int, bool) {
return findLast(s, a)
}
// FindLastInt64 returns the last index of specified element of type int64. If can't find anything, return -1 and false
func FindLastInt64(s []int64, a int64) (int, bool) {
return findLast(s, a)
}
// FindLastBool returns the last index of specified element of type bool. If can't find anything, return -1 and false
func FindLastBool(s []bool, a bool) (int, bool) {
return findLast(s, a)
}
// FindLastByte returns the last index of specified element of type byte. If can't find anything, return -1 and false
func FindLastByte(s []byte, a byte) (int, bool) {
return findLast(s, a)
}
// FindLastString returns the last index of specified element of type string. If can't find anything, return -1 and false
func FindLastString(s []string, a string) (int, bool) {
return findLast(s, a)
} | find.go | 0.816443 | 0.473414 | find.go | starcoder |
package throttle
import (
"sync/atomic"
"time"
)
//------------------------------------------------------------------------------
// Type is a throttle of retries to avoid endless busy loops when a message
// fails to reach its destination.
type Type struct {
// unthrottledRetries is the number of concecutive retries we are
// comfortable attempting before throttling begins.
unthrottledRetries int64
// maxExponentialPeriod is the maximum duration for which our throttle lasts
// when exponentially increasing.
maxExponentialPeriod int64
// baseThrottlePeriod is the static duration for which our throttle lasts.
baseThrottlePeriod int64
// throttlePeriod is the current throttle period, by default this is set to
// the baseThrottlePeriod.
throttlePeriod int64
// closeChan can interrupt a throttle when closed.
closeChan <-chan struct{}
// consecutiveRetries is the live count of consecutive retries.
consecutiveRetries int64
}
// New creates a new throttle, which permits a static number of consecutive
// retries before throttling subsequent retries. A success will reset the count
// of consecutive retries.
func New(options ...func(*Type)) *Type {
t := &Type{
unthrottledRetries: 3,
baseThrottlePeriod: int64(time.Second),
maxExponentialPeriod: int64(time.Minute),
closeChan: nil,
}
t.throttlePeriod = t.baseThrottlePeriod
for _, option := range options {
option(t)
}
return t
}
//------------------------------------------------------------------------------
// OptMaxUnthrottledRetries sets the maximum number of consecutive retries that
// will be attempted before throttling will begin.
func OptMaxUnthrottledRetries(n int64) func(*Type) {
return func(t *Type) {
t.unthrottledRetries = n
}
}
// OptMaxExponentPeriod sets the maximum period of time that throttles will last
// when exponentially increasing.
func OptMaxExponentPeriod(period time.Duration) func(*Type) {
return func(t *Type) {
t.maxExponentialPeriod = int64(period)
}
}
// OptThrottlePeriod sets the static period of time that throttles will last.
func OptThrottlePeriod(period time.Duration) func(*Type) {
return func(t *Type) {
t.baseThrottlePeriod = int64(period)
t.throttlePeriod = int64(period)
}
}
// OptCloseChan sets a read-only channel that, if closed, will interrupt a retry
// throttle early.
func OptCloseChan(c <-chan struct{}) func(*Type) {
return func(t *Type) {
t.closeChan = c
}
}
//------------------------------------------------------------------------------
// Retry indicates that a retry is about to occur and, if appropriate, will
// block until either the throttle period is over and the retry may be attempted
// (returning true) or that the close channel has closed (returning false).
func (t *Type) Retry() bool {
if rets := atomic.AddInt64(&t.consecutiveRetries, 1); rets <= t.unthrottledRetries {
return true
}
select {
case <-time.After(time.Duration(atomic.LoadInt64(&t.throttlePeriod))):
case <-t.closeChan:
return false
}
return true
}
// ExponentialRetry is the same as Retry except also sets the throttle period to
// exponentially increase after each consecutive retry.
func (t *Type) ExponentialRetry() bool {
if atomic.LoadInt64(&t.consecutiveRetries) > t.unthrottledRetries {
if throtPrd := atomic.LoadInt64(&t.throttlePeriod); throtPrd < t.maxExponentialPeriod {
throtPrd = throtPrd * 2
if throtPrd > t.maxExponentialPeriod {
throtPrd = t.maxExponentialPeriod
}
atomic.StoreInt64(&t.throttlePeriod, throtPrd)
}
}
return t.Retry()
}
// Reset clears the count of consecutive retries and resets the exponential
// backoff.
func (t *Type) Reset() {
atomic.StoreInt64(&t.consecutiveRetries, 0)
atomic.StoreInt64(&t.throttlePeriod, t.baseThrottlePeriod)
}
//------------------------------------------------------------------------------ | lib/util/throttle/type.go | 0.738669 | 0.438424 | type.go | starcoder |
package clock
import (
"context"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/venus/pkg/specactors/builtin"
)
// DefaultEpochDuration is the default duration of epochs
const DefaultEpochDuration = builtin.EpochDurationSeconds * time.Second
// DefaultPropagationDelay is the default time to await for blocks to arrive before mining
const DefaultPropagationDelay = 6 * time.Second
// ChainEpochClock is an interface for a clock that represents epochs of the protocol.
type ChainEpochClock interface {
EpochDuration() time.Duration
EpochAtTime(t time.Time) abi.ChainEpoch
EpochRangeAtTimestamp(t uint64) (abi.ChainEpoch, abi.ChainEpoch)
StartTimeOfEpoch(e abi.ChainEpoch) time.Time
WaitForEpoch(ctx context.Context, e abi.ChainEpoch)
WaitForEpochPropDelay(ctx context.Context, e abi.ChainEpoch)
WaitNextEpoch(ctx context.Context) abi.ChainEpoch
Clock
}
// chainClock is a clock that represents epochs of the protocol.
type chainClock struct {
// The time of the first block. EpochClock counts up from there.
genesisTime time.Time
// The fixed time length of the epoch window
epochDuration time.Duration
// propDelay is the time between the start of the epoch and the start
// of mining for the subsequent epoch. This delay provides time for
// blocks from the previous epoch to arrive.
propDelay time.Duration
Clock
}
// NewChainClock returns a ChainEpochClock wrapping a default clock.Clock
func NewChainClock(genesisTime uint64, blockTime time.Duration, propDelay time.Duration) ChainEpochClock {
return NewChainClockFromClock(genesisTime, blockTime, propDelay, NewSystemClock())
}
// NewChainClockFromClock returns a ChainEpochClock wrapping the provided
// clock.Clock
func NewChainClockFromClock(genesisSeconds uint64, blockTime time.Duration, propDelay time.Duration, c Clock) ChainEpochClock {
gt := time.Unix(int64(genesisSeconds), 0)
return &chainClock{
genesisTime: gt,
epochDuration: blockTime,
propDelay: propDelay,
Clock: c,
}
}
func (cc *chainClock) EpochDuration() time.Duration {
return cc.epochDuration
}
// EpochAtTime returns the ChainEpoch corresponding to t.
// It first subtracts genesisTime, then divides by epochDuration
// and returns the resulting number of epochs.
func (cc *chainClock) EpochAtTime(t time.Time) abi.ChainEpoch {
difference := t.Sub(cc.genesisTime)
epochs := difference / cc.epochDuration
return abi.ChainEpoch(epochs)
}
// EpochRangeAtTimestamp returns the possible epoch number range a given
// unix second timestamp value can validly belong to. This method can go
// away once integration tests work well enough to not require subsecond
// block times.
func (cc *chainClock) EpochRangeAtTimestamp(seconds uint64) (abi.ChainEpoch, abi.ChainEpoch) {
earliest := time.Unix(int64(seconds), 0)
first := cc.EpochAtTime(earliest)
latest := earliest.Add(time.Second)
last := cc.EpochAtTime(latest)
return first, last
}
// StartTimeOfEpoch returns the start time of the given epoch.
func (cc *chainClock) StartTimeOfEpoch(e abi.ChainEpoch) time.Time {
addedTime := cc.genesisTime.Add(cc.epochDuration * time.Duration(e))
return addedTime
}
// WaitNextEpoch returns after the next epoch occurs, or ctx is done.
func (cc *chainClock) WaitNextEpoch(ctx context.Context) abi.ChainEpoch {
currEpoch := cc.EpochAtTime(cc.Now())
nextEpoch := currEpoch + 1
cc.WaitForEpoch(ctx, nextEpoch)
return nextEpoch
}
// WaitForEpoch returns when an epoch is due to start, or ctx is done.
func (cc *chainClock) WaitForEpoch(ctx context.Context, e abi.ChainEpoch) {
cc.waitForEpochOffset(ctx, e, 0)
}
// WaitForEpochPropDelay returns propDelay time after the start of the epoch, or when ctx is done.
func (cc *chainClock) WaitForEpochPropDelay(ctx context.Context, e abi.ChainEpoch) {
cc.waitForEpochOffset(ctx, e, cc.propDelay)
}
// waitNextEpochOffset returns when time is offset past the start of the epoch, or ctx is done.
func (cc *chainClock) waitForEpochOffset(ctx context.Context, e abi.ChainEpoch, offset time.Duration) {
targetTime := cc.StartTimeOfEpoch(e).Add(offset)
nowB4 := cc.Now()
waitDur := targetTime.Sub(nowB4)
if waitDur > 0 {
newEpochCh := cc.After(waitDur)
select {
case <-newEpochCh:
case <-ctx.Done():
}
}
} | pkg/clock/chainclock.go | 0.811527 | 0.479382 | chainclock.go | starcoder |
package tensor
import (
"runtime"
)
// MultIterator is an iterator that iterates over multiple tensors, including masked tensors.
// It utilizes the *AP of a Tensor to determine what the next index is.
// This data structure is similar to Numpy's flatiter, with some standard Go based restrictions of course
// (such as, not allowing negative indices)
type MultIterator struct {
*AP // Uses AP of the largest tensor in list
fit0 *FlatIterator //largest fit in fitArr (by AP total size)
mask []bool
numMasked int
lastIndexArr []int
shape Shape
whichBlock []int
fitArr []*FlatIterator
strides []int
size int
done bool
reverse bool
}
func genIterator(m map[int]int, strides []int, idx int) (int, bool) {
key := hashIntArray(strides)
f, ok := m[key]
if !ok {
m[key] = idx
return idx, ok
}
return f, ok
}
// NewMultIterator creates a new MultIterator from a list of APs
func NewMultIterator(aps ...*AP) *MultIterator {
nit := len(aps)
if nit < 1 {
return nil
}
for _, ap := range aps {
if ap == nil {
panic("ap is nil") //TODO: Probably remove this panic
}
}
var maxDims int
var maxShape = aps[0].shape
for i := range aps {
if aps[i].Dims() >= maxDims {
maxDims = aps[i].Dims()
if aps[i].Size() > maxShape.TotalSize() {
maxShape = aps[i].shape
}
}
}
it := new(MultIterator)
it.whichBlock = BorrowInts(nit)
it.lastIndexArr = BorrowInts(nit)
it.strides = BorrowInts(nit * maxDims)
shape := BorrowInts(len(maxShape))
copy(shape, maxShape)
it.shape = shape
for _, ap := range aps {
_, err := BroadcastStrides(shape, ap.shape, it.strides[:maxDims], ap.strides)
if err != nil {
panic("can not broadcast strides")
}
}
for i := range it.strides {
it.strides[i] = 0
}
it.fitArr = make([]*FlatIterator, nit)
//TODO: Convert this make to Borrow perhaps?
m := make(map[int]int)
nBlocks := 0
offset := 0
for i, ap := range aps {
f, ok := genIterator(m, ap.strides, nBlocks)
if !ok {
offset = nBlocks * maxDims
apStrides, _ := BroadcastStrides(shape, ap.shape, it.strides[offset:offset+maxDims], ap.strides)
copy(it.strides[offset:offset+maxDims], apStrides)
ReturnInts(apStrides) // Borrowed in BroadcastStrides but returned here - dangerous pattern?
nBlocks++
}
ap2 := NewAP(it.shape[:maxDims], it.strides[offset:offset+maxDims])
ap2.o = ap.o
ap2.Δ = ap.Δ
it.whichBlock[i] = f
it.fitArr[nBlocks-1] = NewFlatIterator(ap2)
}
it.fitArr = it.fitArr[:nBlocks]
it.strides = it.strides[:nBlocks*maxDims]
it.fit0 = it.fitArr[0]
for _, f := range it.fitArr {
if it.fit0.size < f.size {
it.fit0 = f
it.AP = f.AP
}
}
return it
}
// MultIteratorFromDense creates a new MultIterator from a list of dense tensors
func MultIteratorFromDense(tts ...DenseTensor) *MultIterator {
aps := BorrowAPList(len(tts))
hasMask := BorrowBools(len(tts))
defer ReturnBools(hasMask)
var masked = false
numMasked := 0
for i, tt := range tts {
aps[i] = tt.Info()
if mt, ok := tt.(MaskedTensor); ok {
hasMask[i] = mt.IsMasked()
}
masked = masked || hasMask[i]
if hasMask[i] {
numMasked++
}
}
it := NewMultIterator(aps...)
runtime.SetFinalizer(it, destroyIterator)
if masked {
// create new mask slice if more than tensor is masked
if numMasked > 1 {
it.mask = BorrowBools(it.shape.TotalSize())
memsetBools(it.mask, false)
for i, err := it.Start(); err == nil; i, err = it.Next() {
for j, k := range it.lastIndexArr {
if hasMask[j] {
it.mask[i] = it.mask[i] || tts[j].(MaskedTensor).Mask()[k]
}
}
}
}
}
it.numMasked = numMasked
ReturnAPList(aps)
return it
}
// destroyMultIterator returns any borrowed objects back to pool
func destroyMultIterator(it *MultIterator) {
if cap(it.whichBlock) > 0 {
ReturnInts(it.whichBlock)
it.whichBlock = nil
}
if cap(it.lastIndexArr) > 0 {
ReturnInts(it.lastIndexArr)
it.lastIndexArr = nil
}
if cap(it.strides) > 0 {
ReturnInts(it.strides)
it.strides = nil
}
if it.numMasked > 1 {
if cap(it.mask) > 0 {
ReturnBools(it.mask)
it.mask = nil
}
}
}
// SetReverse initializes iterator to run backward
func (it *MultIterator) SetReverse() {
for _, f := range it.fitArr {
f.SetReverse()
}
}
// SetForward initializes iterator to run forward
func (it *MultIterator) SetForward() {
for _, f := range it.fitArr {
f.SetForward()
}
}
//Start begins iteration
func (it *MultIterator) Start() (int, error) {
it.Reset()
return it.Next()
}
//Done checks whether iterators are done
func (it *MultIterator) Done() bool {
for _, f := range it.fitArr {
if !f.done {
it.done = false
return false
}
}
it.done = true
return true
}
// Next returns the index of the next coordinate
func (it *MultIterator) Next() (int, error) {
if it.done {
return -1, noopError{}
}
it.done = false
for _, f := range it.fitArr {
f.Next()
it.done = it.done || f.done
}
for i, j := range it.whichBlock {
it.lastIndexArr[i] = it.fitArr[j].lastIndex
}
return it.fit0.lastIndex, nil
}
func (it *MultIterator) NextValidity() (int, bool, error) {
i, err := it.Next()
if err != nil {
return i, false, err
}
if len(it.mask) == 0 {
return i, true, err
}
return i, it.mask[i], err
}
// NextValid returns the index of the next valid coordinate
func (it *MultIterator) NextValid() (int, int, error) {
var invalid = true
var count int
var mult = 1
if it.reverse {
mult = -1
}
for invalid {
if it.done {
for i, j := range it.whichBlock {
it.lastIndexArr[i] = it.fitArr[j].lastIndex
}
return -1, 0, noopError{}
}
for _, f := range it.fitArr {
f.Next()
it.done = it.done || f.done
}
count++
invalid = !it.mask[it.fit0.lastIndex]
}
return it.fit0.lastIndex, mult * count, nil
}
// NextInvalid returns the index of the next invalid coordinate
func (it *MultIterator) NextInvalid() (int, int, error) {
var valid = true
var count = 0
var mult = 1
if it.reverse {
mult = -1
}
for valid {
if it.done {
for i, j := range it.whichBlock {
it.lastIndexArr[i] = it.fitArr[j].lastIndex
}
return -1, 0, noopError{}
}
for _, f := range it.fitArr {
f.Next()
it.done = it.done || f.done
}
count++
valid = !it.mask[it.fit0.lastIndex]
}
return it.fit0.lastIndex, mult * count, nil
}
// Coord returns the next coordinate.
// When Next() is called, the coordinates are updated AFTER the Next() returned.
// See example for more details.
func (it *MultIterator) Coord() []int {
return it.fit0.track
}
// Reset resets the iterator state.
func (it *MultIterator) Reset() {
for _, f := range it.fitArr {
f.Reset()
}
for i, j := range it.whichBlock {
it.lastIndexArr[i] = it.fitArr[j].lastIndex
}
it.done = false
}
// LastIndex returns index of requested iterator
func (it *MultIterator) LastIndex(j int) int {
return it.lastIndexArr[j]
}
/*
// Chan returns a channel of ints. This is useful for iterating multiple Tensors at the same time.
func (it *FlatIterator) Chan() (retVal chan int) {
retVal = make(chan int)
go func() {
for next, err := it.Next(); err == nil; next, err = it.Next() {
retVal <- next
}
close(retVal)
}()
return
}
*/ | vendor/gorgonia.org/tensor/iterator_mult.go | 0.591251 | 0.429968 | iterator_mult.go | starcoder |
package gender
import (
"github.com/akhripko/gremlin-ent/ent/predicate"
"github.com/facebook/ent/dialect/gremlin/graph/dsl"
"github.com/facebook/ent/dialect/gremlin/graph/dsl/__"
"github.com/facebook/ent/dialect/gremlin/graph/dsl/p"
)
// ID filters vertices based on their identifier.
func ID(id int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.HasID(id)
})
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.HasID(p.EQ(id))
})
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.HasID(p.NEQ(id))
})
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
t.HasID(p.Within(v...))
})
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
t.HasID(p.Without(v...))
})
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.HasID(p.GT(id))
})
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.HasID(p.GTE(id))
})
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.HasID(p.LT(id))
})
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.HasID(p.LTE(id))
})
}
// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
func Value(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.EQ(v))
})
}
// ValueEQ applies the EQ predicate on the "value" field.
func ValueEQ(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.EQ(v))
})
}
// ValueNEQ applies the NEQ predicate on the "value" field.
func ValueNEQ(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.NEQ(v))
})
}
// ValueIn applies the In predicate on the "value" field.
func ValueIn(vs ...string) predicate.Gender {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.Within(v...))
})
}
// ValueNotIn applies the NotIn predicate on the "value" field.
func ValueNotIn(vs ...string) predicate.Gender {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.Without(v...))
})
}
// ValueGT applies the GT predicate on the "value" field.
func ValueGT(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.GT(v))
})
}
// ValueGTE applies the GTE predicate on the "value" field.
func ValueGTE(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.GTE(v))
})
}
// ValueLT applies the LT predicate on the "value" field.
func ValueLT(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.LT(v))
})
}
// ValueLTE applies the LTE predicate on the "value" field.
func ValueLTE(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.LTE(v))
})
}
// ValueContains applies the Contains predicate on the "value" field.
func ValueContains(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.Containing(v))
})
}
// ValueHasPrefix applies the HasPrefix predicate on the "value" field.
func ValueHasPrefix(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.StartingWith(v))
})
}
// ValueHasSuffix applies the HasSuffix predicate on the "value" field.
func ValueHasSuffix(v string) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.Has(Label, FieldValue, p.EndingWith(v))
})
}
// HasPersonas applies the HasEdge predicate on the "personas" edge.
func HasPersonas() predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
t.InE(PersonasInverseLabel).InV()
})
}
// HasPersonasWith applies the HasEdge predicate on the "personas" edge with a given conditions (other predicates).
func HasPersonasWith(preds ...predicate.Persona) predicate.Gender {
return predicate.Gender(func(t *dsl.Traversal) {
tr := __.OutV()
for _, p := range preds {
p(tr)
}
t.InE(PersonasInverseLabel).Where(tr).InV()
})
}
// And groups list of predicates with the AND operator between them.
func And(predicates ...predicate.Gender) predicate.Gender {
return predicate.Gender(func(tr *dsl.Traversal) {
trs := make([]interface{}, 0, len(predicates))
for _, p := range predicates {
t := __.New()
p(t)
trs = append(trs, t)
}
tr.Where(__.And(trs...))
})
}
// Or groups list of predicates with the OR operator between them.
func Or(predicates ...predicate.Gender) predicate.Gender {
return predicate.Gender(func(tr *dsl.Traversal) {
trs := make([]interface{}, 0, len(predicates))
for _, p := range predicates {
t := __.New()
p(t)
trs = append(trs, t)
}
tr.Where(__.Or(trs...))
})
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Gender) predicate.Gender {
return predicate.Gender(func(tr *dsl.Traversal) {
t := __.New()
p(t)
tr.Where(__.Not(t))
})
} | ent/gender/where.go | 0.713032 | 0.404743 | where.go | starcoder |
package longest_continuous_subarray_with_absolute_diff_less_than_or_equal_to_limit
import (
"container/heap"
"github.com/zrcoder/leetcodeGo/util/intheap"
)
/*
1438. 绝对差不超过限制的最长连续子数组 https://leetcode-cn.com/problems/longest-continuous-subarray-with-absolute-diff-less-than-or-equal-to-limit/
给你一个整数数组 nums ,和一个表示限制的整数 limit,
请你返回最长连续子数组的长度,该子数组中的任意两个元素之间的绝对差必须小于或者等于 limit 。
如果不存在满足条件的子数组,则返回 0 。
示例 1:
输入:nums = [8,2,4,7], limit = 4
输出:2
解释:所有子数组如下:
[8] 最大绝对差 |8-8| = 0 <= 4.
[8,2] 最大绝对差 |8-2| = 6 > 4.
[8,2,4] 最大绝对差 |8-2| = 6 > 4.
[8,2,4,7] 最大绝对差 |8-2| = 6 > 4.
[2] 最大绝对差 |2-2| = 0 <= 4.
[2,4] 最大绝对差 |2-4| = 2 <= 4.
[2,4,7] 最大绝对差 |2-7| = 5 > 4.
[4] 最大绝对差 |4-4| = 0 <= 4.
[4,7] 最大绝对差 |4-7| = 3 <= 4.
[7] 最大绝对差 |7-7| = 0 <= 4.
因此,满足题意的最长子数组的长度为 2 。
示例 2:
输入:nums = [10,1,2,4,7,2], limit = 5
输出:4
解释:满足题意的最长子数组是 [2,4,7,2],其最大绝对差 |2-7| = 5 <= 5 。
示例 3:
输入:nums = [4,2,2,2,4,4,2,2], limit = 0
输出:3
提示:
1 <= nums.length <= 10^5
1 <= nums[i] <= 10^9
0 <= limit <= 10^9
*/
/*
参考 [239] 滑动窗口最大值
朴素滑动窗口实现:
用左右两个指针left,right,两个指针确定了一个窗口[left, right]
起初两个指针都指向最左侧,根据情况向右移动左指针或右指针
如果窗口里的元素满足题意,即所有值的绝对查不超过limit,则移动right,否则移动left
为了判断窗口里的元素是否满足题意,需要遍历一遍窗口统计到最小值和最大值求差
最坏时间复杂度O(n^2),最后一个用例超时;空间复杂度O(1)
*/
func longestSubarray0(nums []int, limit int) int {
result := 0
left, right := 0, 0
for right < len(nums) {
if isValid(nums, left, right, limit) {
result = max(result, right-left+1)
right++
} else {
left++
}
}
return result
}
func isValid(nums []int, left, right, limit int) bool {
lo, hi := nums[left], nums[left]
for i := left + 1; i <= right; i++ {
if nums[i] < lo {
lo = nums[i]
} else if nums[i] > hi {
hi = nums[i]
}
}
return hi-lo <= limit
}
/*
isValid方法时间复杂度可以优化
要迅速找到窗口中的最大值和最小值,可以借助堆
用两个堆,一个大顶堆一个小顶堆,两个堆里的元素完全相同,都是窗口里的元素
每次根据两个堆堆顶元素的差值能够迅速获知窗口中所有元素是否满足题意
时间复杂度:平均O(n*lg(n)),最坏O(n^2);空间复杂度O(n);实测双百通过
*/
func longestSubarray(nums []int, limit int) int {
if len(nums) < 2 {
return len(nums)
}
minHeap := &intheap.Heap{}
minHeap.InitWithCmp(func(i, j int) bool {
return minHeap.Get(i) < minHeap.Get(j)
})
maxHeap := &intheap.Heap{}
maxHeap.InitWithCmp(func(i, j int) bool {
return maxHeap.Get(i) > maxHeap.Get(j)
})
heap.Push(minHeap, nums[0])
heap.Push(maxHeap, nums[0])
result := 1
left, right := 0, 1
for right < len(nums) {
if maxHeap.Peek()-minHeap.Peek() <= limit {
result = max(result, minHeap.Len())
heap.Push(minHeap, nums[right])
heap.Push(maxHeap, nums[right])
right++
} else {
minHeap.Remove(nums[left])
maxHeap.Remove(nums[left])
left++
}
}
if maxHeap.Peek()-minHeap.Peek() <= limit {
result = max(result, minHeap.Len())
}
return result
}
func max(a, b int) int {
if a > b {
return a
}
return b
} | solutions/longest-continuous-subarray-with-absolute-diff-less-than-or-equal-to-limit/d.go | 0.647464 | 0.446434 | d.go | starcoder |
package fitness
import (
"github.com/RH12503/Triangula/geom"
"github.com/RH12503/Triangula/image"
"github.com/RH12503/Triangula/rasterize"
"github.com/RH12503/Triangula/triangulation/incrdelaunay"
)
type polygonsImageFunction struct {
target pixelData // pixels data of the target image.
// Variance data stored in blocks of pixels. The variance of a N*N block can easily be found instead of
// needing to iterate through N*N pixels.
targetN pixelDataN
blockSize int // The size of each N*N block.
maxDifference float64 // The maximum difference of all pixels to the target image.
TriangleCache []CacheData
nextCache []CacheData
// The triangulation used to create the triangles.
Triangulation *incrdelaunay.IVoronoi
// The triangulation of the points before being mutated accessed from the
// fitness function's base.
Base *incrdelaunay.IVoronoi
}
// Calculate returns the fitness of a group of points.
func (g *polygonsImageFunction) Calculate(data PointsData) float64 {
points := data.Points
w, h := g.target.Size()
if g.Triangulation == nil {
// If there's no base triangulation, the whole triangulation needs to be recalculated
g.Triangulation = incrdelaunay.NewVoronoi(w, h)
for _, p := range points {
g.Triangulation.Insert(createPoint(p.X, p.Y, w, h))
}
} else if g.Base != nil {
// If there is a base triangulation, set this triangulation to the base
g.Triangulation.Set(g.Base)
// And then modify the points that have been mutated
for _, m := range data.Mutations {
g.Triangulation.Remove(createPoint(m.Old.X, m.Old.Y, w, h))
}
for _, m := range data.Mutations {
g.Triangulation.Insert(createPoint(m.New.X, m.New.Y, w, h))
}
}
// Prepare for next generation
g.Base = nil
g.nextCache = g.nextCache[:0]
pixels := g.target.pixels
pixelsN := g.targetN.pixels
// Calcuate the variance between the target image and current triangles
var difference float64
cacheMask := uint64(len(g.TriangleCache)) - 1
tris := g.TriangleCache
var polygon geom.Polygon
var polygonData []int16
g.Triangulation.IterPolygons(func(points []incrdelaunay.FloatPoint) {
polygon.Points = polygon.Points[:0]
polygonData = polygonData[:0]
for _, p := range points {
polygon.Points = append(polygon.Points, geom.Point{
X: fastRound(p.X),
Y: fastRound(p.Y),
})
polygonData = append(polygonData, int16(fastRound(p.X)))
polygonData = append(polygonData, int16(fastRound(p.Y)))
}
polyData := &PolygonCacheData{
coords: polygonData,
}
hash := polyData.Hash()
index0 := uint32(hash & cacheMask)
data := tris[index0]
// Check if the triangle is in the cache
if data == nil || !data.Equals(polyData) {
// The triangle isn't in the hash, so calculate the variance
// Welford's online algorithm is used:
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
n := 0
var sR0, sG0, sB0 int
var sSq int
rasterize.DDAPolygon(polygon, g.blockSize, func(x0, x1, y int) {
row := pixels[y]
if x0 >= 0 && x1 <= len(row) {
for x := x0; x < x1; x++ {
pixel := row[x]
sR0 += int(pixel.r)
sG0 += int(pixel.g)
sB0 += int(pixel.b)
sSq += int(pixel.sq)
}
}
n += x1 - x0
}, func(x, y int) {
pixel := pixelsN[y][x]
sR0 += int(pixel.r)
sG0 += int(pixel.g)
sB0 += int(pixel.b)
sSq += int(pixel.sq)
n += g.blockSize * g.blockSize
})
var diff float64
if n != 0 {
diff = float64(sSq) - float64(sR0*sR0+sG0*sG0+sB0*sB0)/float64(n)
}
difference += diff
polyData.fitness = diff
polyData.SetCachedHash(index0)
var newPolyData []int16
newPolyData = append(newPolyData, polygonData...)
polyData.coords = newPolyData
g.nextCache = append(g.nextCache, polyData)
} else {
// If the triangle is in the cache, we don't need to recalculate the variance
difference += data.Data()
g.nextCache = append(g.nextCache, data)
}
})
g.TriangleCache = g.nextCache
return 1 - (difference / g.maxDifference)
}
func (g *polygonsImageFunction) SetBase(other CacheFunction) {
g.Base = other.(*polygonsImageFunction).Triangulation
}
func (g *polygonsImageFunction) Cache() []CacheData {
return g.TriangleCache
}
func (g *polygonsImageFunction) SetCache(cache []CacheData) {
g.TriangleCache = cache
}
func PolygonsImageFunctions(target image.Data, blockSize, n int) []CacheFunction {
w, h := target.Size()
functions := make([]CacheFunction, n)
pixels := fromImage(target)
pixelsN := fromImageN(target, blockSize)
maxDiff := float64(maxPixelDifference * w * h)
for i := 0; i < n; i++ {
function := polygonsImageFunction{
target: pixels,
targetN: pixelsN,
blockSize: blockSize,
maxDifference: maxDiff,
TriangleCache: make([]CacheData, 2),
}
functions[i] = &function
}
return functions
} | fitness/polygons.go | 0.794465 | 0.561455 | polygons.go | starcoder |
package types
import (
"fmt"
"regexp"
"strconv"
"github.com/TheThingsNetwork/ttn/utils/errors"
"github.com/brocaar/lorawan/band"
)
type DataRate struct {
SpreadingFactor uint `json:"spreading_factor,omitempty"`
Bandwidth uint `json:"bandwidth,omitempty"`
}
// ParseDataRate parses a 32-bit hex-encoded string to a Devdatr
func ParseDataRate(input string) (datr *DataRate, err error) {
re := regexp.MustCompile("SF(7|8|9|10|11|12)BW(125|250|500)")
matches := re.FindStringSubmatch(input)
if len(matches) != 3 {
return nil, errors.New("ttn/core: Invalid DataRate")
}
sf, _ := strconv.ParseUint(matches[1], 10, 64)
bw, _ := strconv.ParseUint(matches[2], 10, 64)
return &DataRate{
SpreadingFactor: uint(sf),
Bandwidth: uint(bw),
}, nil
}
func ConvertDataRate(input band.DataRate) (datr *DataRate, err error) {
if input.Modulation != band.LoRaModulation {
err = errors.New(fmt.Sprintf("ttn/core: %s can not be converted to a LoRa DataRate", input.Modulation))
}
datr = &DataRate{
SpreadingFactor: uint(input.SpreadFactor),
Bandwidth: uint(input.Bandwidth),
}
return
}
// Bytes returns the DataRate as a byte slice
func (datr DataRate) Bytes() []byte {
return []byte(datr.String())
}
// String implements the Stringer interface.
func (datr DataRate) String() string {
return fmt.Sprintf("SF%dBW%d", datr.SpreadingFactor, datr.Bandwidth)
}
// GoString implements the GoStringer interface.
func (datr DataRate) GoString() string {
return datr.String()
}
// MarshalText implements the TextMarshaler interface.
func (datr DataRate) MarshalText() ([]byte, error) {
return []byte(datr.String()), nil
}
// UnmarshalText implements the TextUnmarshaler interface.
func (datr *DataRate) UnmarshalText(data []byte) error {
parsed, err := ParseDataRate(string(data))
if err != nil {
return err
}
*datr = *parsed
return nil
}
// MarshalBinary implements the BinaryMarshaler interface.
func (datr DataRate) MarshalBinary() ([]byte, error) {
return datr.MarshalText()
}
// UnmarshalBinary implements the BinaryUnmarshaler interface.
func (datr *DataRate) UnmarshalBinary(data []byte) error {
return datr.UnmarshalText(data)
}
// MarshalTo is used by Protobuf
func (datr DataRate) MarshalTo(b []byte) (int, error) {
copy(b, datr.Bytes())
return len(datr.Bytes()), nil
}
// Size is used by Protobuf
func (datr DataRate) Size() int {
return len(datr.Bytes())
}
// Marshal implements the Marshaler interface.
func (datr DataRate) Marshal() ([]byte, error) {
return datr.MarshalBinary()
}
// Unmarshal implements the Unmarshaler interface.
func (datr *DataRate) Unmarshal(data []byte) error {
*datr = DataRate{} // Reset the receiver
return datr.UnmarshalBinary(data)
} | core/types/data_rate.go | 0.754734 | 0.400368 | data_rate.go | starcoder |
package power
import . "github.com/deinspanjer/units/unit"
// Power represents a SI unit of power (in watts, W)
type Power Unit
// ...
const (
// SI
Yoctowatt = Watt * 1e-24
Zeptowatt = Watt * 1e-21
Attowatt = Watt * 1e-18
Femtowatt = Watt * 1e-15
Picowatt = Watt * 1e-12
Nanowatt = Watt * 1e-9
Microwatt = Watt * 1e-6
Milliwatt = Watt * 1e-3
Centiwatt = Watt * 1e-2
Deciwatt = Watt * 1e-1
Watt Power = 1e0
Decawatt = Watt * 1e1
Hectowatt = Watt * 1e2
Kilowatt = Watt * 1e3
Megawatt = Watt * 1e6
Gigawatt = Watt * 1e9
Terawatt = Watt * 1e12
Petawatt = Watt * 1e15
Exawatt = Watt * 1e18
Zettawatt = Watt * 1e21
Yottawatt = Watt * 1e24
// non-SI
Pferdestarke = Watt * 735.49875
)
// Yoctowatts returns the power in yW
func (p Power) Yoctowatts() float64 {
return float64(p / Yoctowatt)
}
// Zeptowatts returns the power in zW
func (p Power) Zeptowatts() float64 {
return float64(p / Zeptowatt)
}
// Attowatts returns the power in aW
func (p Power) Attowatts() float64 {
return float64(p / Attowatt)
}
// Femtowatts returns the power in fW
func (p Power) Femtowatts() float64 {
return float64(p / Femtowatt)
}
// Picowatts returns the power in pW
func (p Power) Picowatts() float64 {
return float64(p / Picowatt)
}
// Nanowatts returns the power in nW
func (p Power) Nanowatts() float64 {
return float64(p / Nanowatt)
}
// Microwatts returns the power in µW
func (p Power) Microwatts() float64 {
return float64(p / Microwatt)
}
// Milliwatts returns the power in mW
func (p Power) Milliwatts() float64 {
return float64(p / Milliwatt)
}
// Centiwatts returns the power in cW
func (p Power) Centiwatts() float64 {
return float64(p / Centiwatt)
}
// Deciwatts returns the power in dW
func (p Power) Deciwatts() float64 {
return float64(p / Deciwatt)
}
// Watts returns the power in W
func (p Power) Watts() float64 {
return float64(p)
}
// Decawatts returns the power in daW
func (p Power) Decawatts() float64 {
return float64(p / Decawatt)
}
// Hectowatts returns the power in hW
func (p Power) Hectowatts() float64 {
return float64(p / Hectowatt)
}
// Kilowatts returns the power in kW
func (p Power) Kilowatts() float64 {
return float64(p / Kilowatt)
}
// Megawatts returns the power in MW
func (p Power) Megawatts() float64 {
return float64(p / Megawatt)
}
// Gigawatts returns the power in GW
func (p Power) Gigawatts() float64 {
return float64(p / Gigawatt)
}
// Petawatts returns the power in PW
func (p Power) Petawatts() float64 {
return float64(p / Petawatt)
}
// Exawatts returns the power in EW
func (p Power) Exawatts() float64 {
return float64(p / Exawatt)
}
// Zettawatts returns the power in ZW
func (p Power) Zettawatts() float64 {
return float64(p / Zettawatt)
}
// Yottawatts returns the power in YW
func (p Power) Yottawatts() float64 {
return float64(p / Yottawatt)
}
// Terawatts returns the power in tW
func (p Power) Terawatts() float64 {
return float64(p / Terawatt)
}
// Pferdestarke returns the power in PS
func (p Power) Pferdestarke() float64 {
return float64(p / Pferdestarke)
} | power/power.go | 0.804675 | 0.401189 | power.go | starcoder |
package ckmeans
func fill_dp_matrix(x, w []float64, S [][]float64, J [][]int) {
K := len(S)
N := len(S[0])
sum_x := make([]float64, N)
sum_x_sq := make([]float64, N)
sum_w := make([]float64, len(w))
sum_w_sq := make([]float64, len(w))
// //jseq := []int{}
shift := x[N/2] // median. used to shift the values of x to improve numerical stability
sum_x[0] = w[0] * (x[0] - shift)
sum_x_sq[0] = w[0] * (x[0] - shift) * (x[0] - shift)
sum_w[0] = w[0]
sum_w_sq[0] = w[0] * w[0]
S[0][0] = 0
J[0][0] = 0
for i := 1; i < N; i++ {
sum_x[i] = sum_x[i-1] + w[i]*(x[i]-shift)
sum_x_sq[i] = sum_x_sq[i-1] + w[i]*(x[i]-shift)*(x[i]-shift)
sum_w[i] = sum_w[i-1] + w[i]
sum_w_sq[i] = sum_w_sq[i-1] + w[i]*w[i]
// NOTE: using same dissimilarity as SMAWK - original algorithm potentially (but not really) allowed for alternative criterion here
// i.e. not convinced embedding criterion in SMAWK is all that correct
S[0][i] = dissimilarity(0, i, sum_x, sum_x_sq, sum_w, sum_w_sq)
J[0][i] = 0
}
for q := 1; q < K; q++ {
var imin int
if q < K-1 {
imin = 1
if q > imin {
imin = q
}
} else {
imin = N - 1
}
fill_row_q_SMAWK(imin, N-1, q, S, J, sum_x, sum_x_sq, sum_w, sum_w_sq)
}
}
func backtrackWeighted(x, y []float64, J [][]int, counts []int, weights []float64, K int) {
N := len(J[0])
cluster_right := N - 1
for k := K - 1; k >= 0; k-- {
cluster_left := J[k][cluster_right]
counts[k] = cluster_right - cluster_left + 1
weights[k] = 0
for i := cluster_left; i <= cluster_right; i++ {
weights[k] += y[i]
}
if k > 0 {
cluster_right = cluster_left - 1
}
}
}
func backtrackWeightedX(x, y []float64, J [][]int) []int {
K := len(J)
N := len(J[0])
clusters := make([]int, N)
cluster_right := N - 1
for k := K - 1; k >= 0; k-- {
cluster_left := J[k][cluster_right]
for i := cluster_left; i <= cluster_right; i++ {
clusters[i] = k
}
if k > 0 {
cluster_right = cluster_left - 1
}
}
return clusters
} | taps2beats/ckmeans/dp.go | 0.531209 | 0.429071 | dp.go | starcoder |
package analyzer
import (
"fmt"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/expression"
"github.com/dolthub/go-mysql-server/sql/plan"
)
// applyIndexesFromOuterScope attempts to apply an indexed lookup to a subquery using variables from the outer scope.
// It functions similarly to pushdownFilters, in that it applies an index to a table. But unlike that function, it must
// apply, effectively, an indexed join between two tables, one of which is defined in the outer scope. This is similar
// to the process in the join analyzer.
func applyIndexesFromOuterScope(ctx *sql.Context, a *Analyzer, n sql.Node, scope *Scope) (sql.Node, error) {
if scope == nil {
return n, nil
}
// this isn't good enough: we need to consider aliases defined in the outer scope as well for this analysis
tableAliases, err := getTableAliases(n, scope)
if err != nil {
return nil, err
}
indexLookups, err := getOuterScopeIndexes(ctx, a, n, scope, tableAliases)
if err != nil {
return nil, err
}
if len(indexLookups) == 0 {
return n, nil
}
childSelector := func(c plan.TransformContext) bool {
switch c.Parent.(type) {
// We can't push any indexes down a branch that have already had an index pushed down it
case *plan.IndexedTableAccess:
return false
}
return true
}
// replace the tables with possible index lookups with indexed access
for _, idxLookup := range indexLookups {
n, err = plan.TransformUpCtx(n, childSelector, func(c plan.TransformContext) (sql.Node, error) {
switch n := c.Node.(type) {
case *plan.IndexedTableAccess:
return n, nil
case *plan.TableAlias:
if strings.ToLower(n.Name()) == idxLookup.table {
return pushdownIndexToTable(a, n, idxLookup.index, idxLookup.keyExpr)
}
return n, nil
case *plan.ResolvedTable:
if strings.ToLower(n.Name()) == idxLookup.table {
return pushdownIndexToTable(a, n, idxLookup.index, idxLookup.keyExpr)
}
return n, nil
default:
return n, nil
}
})
if err != nil {
return nil, err
}
}
return n, nil
}
// pushdownIndexToTable attempts to push the index given down to the table given, if it implements
// sql.IndexAddressableTable
func pushdownIndexToTable(a *Analyzer, tableNode NameableNode, index sql.Index, keyExpr []sql.Expression) (sql.Node, error) {
return plan.TransformUp(tableNode, func(n sql.Node) (sql.Node, error) {
switch n := n.(type) {
case *plan.ResolvedTable:
table := getTable(tableNode)
if table == nil {
return n, nil
}
if _, ok := table.(sql.IndexAddressableTable); ok {
a.Log("table %q transformed with pushdown of index", tableNode.Name())
return plan.NewIndexedTableAccess(n, index, keyExpr), nil
}
}
return n, nil
})
}
type subqueryIndexLookup struct {
table string
keyExpr []sql.Expression
index sql.Index
}
func getOuterScopeIndexes(
ctx *sql.Context,
a *Analyzer,
node sql.Node,
scope *Scope,
tableAliases TableAliases,
) ([]subqueryIndexLookup, error) {
indexSpan, _ := ctx.Span("getOuterScopeIndexes")
defer indexSpan.Finish()
var indexes map[string]sql.Index
var exprsByTable joinExpressionsByTable
var err error
plan.Inspect(node, func(node sql.Node) bool {
switch node := node.(type) {
case *plan.Filter:
var indexAnalyzer *indexAnalyzer
indexAnalyzer, err = getIndexesForNode(ctx, a, node)
if err != nil {
return false
}
defer indexAnalyzer.releaseUsedIndexes()
indexes, exprsByTable, err = getSubqueryIndexes(ctx, a, node.Expression, scope, indexAnalyzer, tableAliases)
if err != nil {
return false
}
}
return true
})
if err != nil {
return nil, err
}
if len(indexes) == 0 {
return nil, nil
}
var lookups []subqueryIndexLookup
for table, idx := range indexes {
if exprsByTable[table] != nil {
// creating a key expression can fail in some cases, just skip this table
keyExpr, err := createIndexKeyExpr(ctx, idx, exprsByTable[table], tableAliases)
if err != nil {
return nil, err
}
if keyExpr == nil {
continue
}
lookups = append(lookups, subqueryIndexLookup{
table: table,
keyExpr: keyExpr,
index: idx,
})
}
}
return lookups, nil
}
// createIndexKeyExpr returns a slice of expressions to be used when creating an index lookup key for the table given.
func createIndexKeyExpr(ctx *sql.Context, idx sql.Index, joinExprs []*joinColExpr, tableAliases TableAliases) ([]sql.Expression, error) {
// To allow partial matching, we need to see if the expressions are a prefix of the index
idxExpressions := idx.Expressions()
normalizedJoinExprStrs := make([]string, len(joinExprs))
for i := range joinExprs {
normalizedJoinExprStrs[i] = normalizeExpression(ctx, tableAliases, joinExprs[i].colExpr).String()
}
if ok, prefixCount := exprsAreIndexSubset(normalizedJoinExprStrs, idxExpressions); !ok || prefixCount != len(normalizedJoinExprStrs) {
return nil, nil
}
// Since the expressions are a prefix, we cut the index expressions we are using to just those involved
idxPrefixExpressions := idxExpressions[:len(normalizedJoinExprStrs)]
keyExprs := make([]sql.Expression, len(idxPrefixExpressions))
IndexExpressions:
for i, idxExpr := range idxPrefixExpressions {
for j := range joinExprs {
if idxExpr == normalizedJoinExprStrs[j] {
keyExprs[i] = joinExprs[j].comparand
continue IndexExpressions
}
}
return nil, fmt.Errorf("index `%s` reported having prefix of `%v` but has expressions `%v`",
idx.ID(), normalizedJoinExprStrs, idxExpressions)
}
return keyExprs, nil
}
func getSubqueryIndexes(
ctx *sql.Context,
a *Analyzer,
e sql.Expression,
scope *Scope,
ia *indexAnalyzer,
tableAliases TableAliases,
) (map[string]sql.Index, joinExpressionsByTable, error) {
scopeLen := len(scope.Schema())
// build a list of candidate predicate expressions, those that might be used for an index lookup
var candidatePredicates []sql.Expression
for _, e := range splitConjunction(e) {
// We are only interested in expressions that involve an outer scope variable (those whose index is less than the
// scope length)
isScopeExpr := false
sql.Inspect(e, func(e sql.Expression) bool {
if gf, ok := e.(*expression.GetField); ok {
if gf.Index() < scopeLen {
isScopeExpr = true
return false
}
}
return true
})
if isScopeExpr {
candidatePredicates = append(candidatePredicates, e)
}
}
tablesInScope := tablesInScope(scope)
// group them by the table they reference
// TODO: this only works for equality, make it work for other operands
exprsByTable := joinExprsByTable(candidatePredicates)
result := make(map[string]sql.Index)
// For every predicate involving a table in the outer scope, see if there's an index lookup possible on its comparands
// (the tables in this scope)
for _, scopeTable := range tablesInScope {
indexCols := exprsByTable[scopeTable]
if indexCols != nil {
table := indexCols[0].comparandCol.Table()
idx := ia.MatchingIndex(ctx, ctx.GetCurrentDatabase(), table,
normalizeExpressions(ctx, tableAliases, extractComparands(indexCols)...)...)
if idx != nil {
result[table] = idx
}
}
}
return result, exprsByTable, nil
}
func tablesInScope(scope *Scope) []string {
tables := make(map[string]bool)
for _, node := range scope.InnerToOuter() {
for _, col := range schemas(node.Children()) {
tables[col.Source] = true
}
}
var tableSlice []string
for table := range tables {
tableSlice = append(tableSlice, table)
}
return tableSlice
} | sql/analyzer/apply_indexes_from_outer_scope.go | 0.659953 | 0.427695 | apply_indexes_from_outer_scope.go | starcoder |
package maybe
import (
"testing"
"github.com/calebcase/base/control/monad"
"github.com/calebcase/base/data"
)
type Class[A, B, C any] interface {
monad.Class[A, B, C, Maybe[func(A) B], Maybe[A], Maybe[B], Maybe[C]]
NewJust(A) Just[A]
NewNothing() Nothing[A]
}
type Type[A, B, C any] struct{}
// Ensure Type implements Class.
var _ Class[int, int, int] = Type[int, int, int]{}
func NewType[A, B, C any]() Type[A, B, C] {
return Type[A, B, C]{}
}
func (t Type[A, B, C]) NewJust(x A) Just[A] {
return Just[A]{x}
}
func (t Type[A, B, C]) NewNothing() Nothing[A] {
return Nothing[A]{}
}
func (t Type[A, B, C]) FMap(f func(A) B, v Maybe[A]) Maybe[B] {
if j, ok := v.(Just[A]); ok {
return Just[B]{f(j.Value)}
}
return Nothing[B]{}
}
func (t Type[A, B, C]) FReplace(a A, v Maybe[B]) Maybe[A] {
if _, ok := v.(Just[B]); ok {
return Just[A]{a}
}
return Nothing[A]{}
}
func (t Type[A, B, C]) Pure(x A) Maybe[A] {
return Just[A]{x}
}
func (t Type[A, B, C]) Apply(f Maybe[func(A) B], m Maybe[A]) Maybe[B] {
if jf, ok := f.(Just[func(A) B]); ok {
return t.FMap(jf.Value, m)
}
return Nothing[B]{}
}
func (t Type[A, B, C]) LiftA2(f func(A, B) C, x Maybe[A], y Maybe[B]) Maybe[C] {
jx, ok := x.(Just[A])
if !ok {
return Nothing[C]{}
}
jy, ok := y.(Just[B])
if !ok {
return Nothing[C]{}
}
return Just[C]{f(jx.Value, jy.Value)}
}
func (t Type[A, B, C]) ApplyR(x Maybe[A], y Maybe[B]) Maybe[B] {
return y
}
func (t Type[A, B, C]) ApplyL(x Maybe[A], y Maybe[B]) Maybe[A] {
return x
}
func (t Type[A, B, C]) Bind(x Maybe[A], k func(A) Maybe[B]) Maybe[B] {
if jx, ok := x.(Just[A]); ok {
return k(jx.Value)
}
return Nothing[B]{}
}
func (t Type[A, B, C]) Then(x Maybe[A], y Maybe[B]) Maybe[B] {
return t.ApplyR(x, y)
}
func (t Type[A, B, C]) Return(x A) Maybe[A] {
return t.Pure(x)
}
// Maybe is the sum type for maybe.
type Maybe[T any] interface {
isMaybe(T)
data.Data[T]
}
// Just contains a value.
type Just[T any] struct {
Value T
}
func (j Just[T]) isMaybe(_ T) {}
func (j Just[T]) DEmpty() bool {
return false
}
func (j Just[T]) DValue() T {
return j.Value
}
func (j Just[T]) DRest() data.Data[T] {
return nil
}
// Nothing indicates no value is present.
type Nothing[T any] struct{}
func (n Nothing[T]) isMaybe(_ T) {}
func (n Nothing[T]) DEmpty() bool {
return true
}
func (n Nothing[T]) DValue() T {
panic(data.ErrNoValue)
}
func (n Nothing[T]) DRest() data.Data[T] {
return nil
}
// Apply returns the default value `dflt` if `v` is Nothing. Otherwise it
// returns the result of calling `f` on `v`.
func Apply[A, B any](dflt B, f func(a A) B, v Maybe[A]) B {
if j, ok := v.(Just[A]); ok {
return f(j.Value)
}
return dflt
}
func IsJust[A any](v Maybe[A]) bool {
_, ok := v.(Just[A])
return ok
}
func IsNothing[A any](v Maybe[A]) bool {
_, ok := v.(Nothing[A])
return ok
}
func FromJust[A any](v Maybe[A]) A {
return v.(Just[A]).Value
}
func FromMaybe[A any](dflt A, v Maybe[A]) A {
if j, ok := v.(Just[A]); ok {
return j.Value
}
return dflt
}
func ListToMaybe[A any](vs []A) Maybe[A] {
if len(vs) == 0 {
return Nothing[A]{}
}
return Just[A]{vs[0]}
}
func MaybeToList[A any](v Maybe[A]) []A {
if j, ok := v.(Just[A]); ok {
return []A{j.Value}
}
return []A{}
}
func CatMaybes[A any](vs []Maybe[A]) []A {
rs := make([]A, 0, len(vs))
for _, v := range vs {
if j, ok := v.(Just[A]); ok {
rs = append(rs, j.Value)
}
}
return rs
}
func MapMaybes[A, B any](f func(A) Maybe[B], vs []A) (rs []B) {
for _, v := range vs {
r := f(v)
if j, ok := r.(Just[B]); ok {
rs = append(rs, j.Value)
}
}
return rs
}
// Conform returns a function testing if the implementation abides by its laws.
func Conform[
A any,
CA Class[A, A, A],
](c CA) func(t *testing.T, x A) {
return func(t *testing.T, x A) {
t.Run("monad.Conform", func(t *testing.T) {
monad.Conform[A, Maybe[func(A) A], Maybe[A]](c)(t, x)
})
}
} | data/maybe/maybe.go | 0.607547 | 0.656355 | maybe.go | starcoder |
package smd
import (
"math"
"github.com/gonum/floats"
"github.com/gonum/matrix/mat64"
)
const (
deg2rad = math.Pi / 180
rad2deg = 1 / deg2rad
)
// Norm returns the Norm of a given vector which is supposed to be 3x1.
func Norm(v []float64) float64 {
return math.Sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
}
// Unit returns the Unit vector of a given vector.
func Unit(a []float64) (b []float64) {
n := Norm(a)
if floats.EqualWithinAbs(n, 0, 1e-12) {
return []float64{0, 0, 0}
}
b = make([]float64, len(a))
for i, val := range a {
b[i] = val / n
}
return
}
// unitVec returns the unit vector of a given mat64.Vector.
func unitVec(a *mat64.Vector) (b *mat64.Vector) {
b = mat64.NewVector(a.Len(), nil)
n := mat64.Norm(a, 2)
if floats.EqualWithinAbs(n, 0, 1e-12) {
return // Nil vector
}
b.ScaleVec(1/n, a)
return
}
// Sign returns the Sign of a given number.
func Sign(v float64) float64 {
if floats.EqualWithinAbs(v, 0, 1e-12) {
return 1
}
return v / math.Abs(v)
}
// Dot performs the inner product.
func Dot(a, b []float64) float64 {
rtn := 0.
for i := 0; i < len(a); i++ {
rtn += a[i] * b[i]
}
return rtn
}
// Cross performs the Cross product.
func Cross(a, b []float64) []float64 {
return []float64{a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]} // Cross product R x V.
}
// cross performs the cross product from two mat64.Vectors.
func crossVec(a, b *mat64.Vector) *mat64.Vector {
rslt := mat64.NewVector(3, nil) // only support dim 3 (cross only defined in dims 3 and 7)
rslt.SetVec(0, a.At(1, 0)*b.At(2, 0)-a.At(2, 0)*b.At(1, 0))
rslt.SetVec(1, a.At(2, 0)*b.At(0, 0)-a.At(0, 0)*b.At(2, 0))
rslt.SetVec(2, a.At(0, 0)*b.At(1, 0)-a.At(1, 0)*b.At(0, 0))
return rslt
}
// Spherical2Cartesian returns the provided spherical coordinates vector in Cartesian.
func Spherical2Cartesian(a []float64) (b []float64) {
b = make([]float64, 3)
sθ, cθ := math.Sincos(a[1])
sφ, cφ := math.Sincos(a[2])
b[0] = a[0] * sθ * cφ
b[1] = a[0] * sθ * sφ
b[2] = a[0] * cθ
return
}
// Cartesian2Spherical returns the provided Cartesian coordinates vector in spherical.
func Cartesian2Spherical(a []float64) (b []float64) {
b = make([]float64, 3)
if Norm(a) == 0 {
return []float64{0, 0, 0}
}
b[0] = Norm(a)
b[1] = math.Acos(a[2] / b[0])
b[2] = math.Atan2(a[1], a[0])
return
}
// Deg2rad converts degrees to radians, and enforced only positive numbers.
func Deg2rad(a float64) float64 {
if a < 0 {
a += 360
}
return math.Mod(a*deg2rad, 2*math.Pi)
}
// Rad2deg converts radians to degrees, and enforced only positive numbers.
func Rad2deg(a float64) float64 {
if a < 0 {
a += 2 * math.Pi
}
return math.Mod(a/deg2rad, 360)
}
// Rad2deg180 converts radians to degrees, and enforce between +/-180.
func Rad2deg180(a float64) float64 {
if a < -math.Pi {
a += 2 * math.Pi
} else if a > math.Pi {
a -= 2 * math.Pi
}
return math.Mod(a/deg2rad, 360)
}
// DenseIdentity returns an identity matrix of type Dense and of the provided size.
func DenseIdentity(n int) *mat64.Dense {
return ScaledDenseIdentity(n, 1)
}
// ScaledDenseIdentity returns an identity matrix time of type Dense a scaling factor of the provided size.
func ScaledDenseIdentity(n int, s float64) *mat64.Dense {
vals := make([]float64, n*n)
for j := 0; j < n*n; j++ {
if j%(n+1) == 0 {
vals[j] = s
} else {
vals[j] = 0
}
}
return mat64.NewDense(n, n, vals)
} | math.go | 0.832849 | 0.735974 | math.go | starcoder |
package graph
import (
"fmt"
"sync"
)
// Identifier has an ID method.
type Identifier interface {
ID() string
}
// Simple is an implementation of an undirected unweighted graph.
type Simple struct {
sync.RWMutex
vertices []Identifier
edges map[string][]Identifier
}
// NewSimple initialises a new Simple struct.
func NewSimple() *Simple {
return &Simple{
vertices: make([]Identifier, 0),
edges: make(map[string][]Identifier),
}
}
// AddVertex adds a new vertex to the Simple and returns an error
// when a vertex with the same ID already exists.
func (g *Simple) AddVertex(v Identifier) error {
g.Lock()
defer g.Unlock()
if g.hasVertex(v) {
return fmt.Errorf("vertex id '%s' exists", v.ID())
}
g.vertices = append(g.vertices, v)
return nil
}
// AddEdge adds an edge to the graph and returns an error if the
// vertices have not already been added.
func (g *Simple) AddEdge(v1, v2 Identifier) error {
g.Lock()
defer g.Unlock()
if !g.hasVertex(v1) || !g.hasVertex(v2) {
return fmt.Errorf("vertex id '%s' or '%s' missing", v1.ID(), v2.ID())
}
if _, ok := g.edges[v1.ID()]; !ok {
g.edges[v1.ID()] = make([]Identifier, 0)
}
if _, ok := g.edges[v2.ID()]; !ok {
g.edges[v2.ID()] = make([]Identifier, 0)
}
g.edges[v1.ID()] = append(g.edges[v1.ID()], v2)
g.edges[v2.ID()] = append(g.edges[v2.ID()], v1)
return nil
}
// IsNeighbor determines if vertices v1 and v2 form an edge. Returns
// an error if one or more of the vertices do not exist in the graph.
func (g *Simple) IsNeighbor(v1, v2 Identifier) (bool, error) {
g.RLock()
defer g.RUnlock()
if !g.hasVertex(v1) || !g.hasVertex(v2) {
return false, fmt.Errorf("vertex id '%s' or '%s' missing", v1.ID(), v2.ID())
}
return g.hasEdge(v1, v2), nil
}
// Neighbors returns all vertices that share an edge with Vertex v.
func (g *Simple) Neighbors(v Identifier) ([]Identifier, error) {
g.RLock()
defer g.RUnlock()
if !g.hasVertex(v) {
return nil, fmt.Errorf("vertex id '%s' missing", v.ID())
}
if g.edges[v.ID()] == nil {
g.edges[v.ID()] = make([]Identifier, 0)
}
return g.edges[v.ID()], nil
}
func (g *Simple) hasVertex(v Identifier) bool {
for _, vv := range g.vertices {
if vv.ID() == v.ID() {
return true
}
}
return false
}
func (g *Simple) hasEdge(v1, v2 Identifier) bool {
for _, v := range g.edges[v1.ID()] {
if v.ID() == v2.ID() {
return true
}
}
return false
} | graph/simple.go | 0.734501 | 0.427277 | simple.go | starcoder |
// Package stdlib implements functions to standard library.
package stdlib
var Modules = []string{
`module Enum
val size = fn (array: Array) -> Integer
var count = 0
repeat v in array
count += 1
end
count
end
val empty? = fn (array: Array) -> Boolean
size(array) == 0
end
val reverse = fn (array: Array) -> Array
var reversed = []
repeat i in size(array)-1..0
reversed[] = array[i]
end
reversed
end
val first = fn (array: Array)
array[0]
end
val last = fn array: Array
array[size(array) - 1]
end
val insert = fn (array: Array, element) -> Array
array[] = element
end
val delete = fn (array: Array, index) -> Array
var purged = []
repeat i, v in array
if i != index
purged[] = v
end
end
purged
end
val map = fn (array: Array, fun: Function) -> Array
repeat v in array
fun(v)
end
end
val filter = fn (array: Array, fun: Function) -> Array
var filtered = []
repeat v in array
if fun(v)
filtered[] = v
end
end
filtered
end
val reduce = fn (array: Array, start, fun: Function)
var acc = start
repeat v in array
acc = fun(v, acc)
end
return acc
end
val find = fn (array: Array, fun: Function)
repeat v in array
if fun(v)
return v
end
end
nil
end
val contains? = fn (array: Array, search) -> Boolean
repeat v in array
if v == search
return true
end
end
false
end
val unique = fn (array: Array) -> Array
var filtered = []
var hash = [=>]
repeat i, v in array
if hash[v] == nil
hash[v] = i
filtered[] = v
end
end
filtered
end
val random = fn (array: Array)
var rnd = runtime_rand(0, size(array) - 1)
array[rnd]
end
end`,
`module Math
val pi = 3.14159265359
val e = 2.718281828459
val floor = fn (nr: Float) -> Integer
Integer(nr - nr % 1)
end
val ceil = fn (nr: Float) -> Integer
val rem = nr % 1
if rem == 0
return Integer(nr)
end
nr > 0 ? Integer(nr + (1 - rem)) : Integer(nr - (1 + rem))
end
val max = fn (nr1, nr2)
if !Type.isNumber?(nr1) || !Type.isNumber?(nr2)
panic("Math.max() expects a Float or Integer")
end
return nr1 > nr2 ? nr1 : nr2
end
val min = fn (nr1, nr2)
if !Type.isNumber?(nr1) || !Type.isNumber?(nr2)
panic("Math.min() expects a Float or Integer")
end
return nr1 > nr2 ? nr2 : nr1
end
val random = fn (min: Integer, max: Integer) -> Integer
runtime_rand(min, max)
end
val abs = fn (nr)
if !Type.isNumber?(nr)
panic("Math.abs() expects a Float or Integer")
end
if nr < 0
return -nr
end
nr
end
val pow = fn (nr, exp)
if !Type.isNumber?(nr) || !Type.isNumber?(exp)
panic("Math.pow() expects a Float or Integer")
end
nr ** exp
end
end`,
`module Type
val of = fn x
typeof(x)
end
val isNumber? = fn x
if typeof(x) == "Float" || typeof(x) == "Integer"
return true
end
false
end
val toString = fn x
String(x)
end
val toInteger = fn x
Integer(x)
end
val toFloat = fn x
Float(x)
end
val toArray = fn x
Array(x)
end
end`,
`module Dictionary
val size = fn (dict: Dictionary) -> Integer
var count = 0
repeat v in dict
count += 1
end
count
end
val contains? = fn (dict: Dictionary, key) -> Boolean
repeat k, v in dict
if k == key
return true
end
end
false
end
val empty? = fn (dict: Dictionary) -> Boolean
size(dict) == 0
end
val insert = fn (dict: Dictionary, key, value) -> Dictionary
if dict[key] != nil
panic("Dictionary key '" + String(key) + "' already exists")
end
dict[key] = value
end
val update = fn (dict: Dictionary, key, value) -> Dictionary
if dict[key] == nil
panic("Dictionary key '" + String(key) + "' doesn't exist")
end
dict[key] = value
end
val delete = fn (dict: Dictionary, key) -> Dictionary
if dict[key] == nil
panic("Dictionary key '" + String(key) + "' doesn't exist")
end
var purged = [=>]
repeat k, v in dict
if k != key
purged[k] = v
end
end
purged
end
end`,
`module String
val count = fn (str: String) -> Integer
var cnt = 0
repeat v in str
cnt += 1
end
cnt
end
val first = fn (str: String) -> String
str[0]
end
val last = fn (str: String) -> String
str[String.count(str) - 1]
end
val lower = fn (str: String) -> String
runtime_tolower(str)
end
val upper = fn (str: String) -> String
runtime_toupper(str)
end
val capitalize = fn (str: String) -> String
var title = str
repeat i, v in str
if i == 0 || str[i - 1] != nil && str[i - 1] == " "
title[i] = String.upper(v)
end
end
title
end
val reverse = fn (str: String) -> String
var reversed = ""
repeat i in String.count(str)-1..0
reversed += str[i]
end
reversed
end
val slice = fn (str: String, start: Integer, length: Integer) -> String
if start < 0 || length < 0
panic("String.slice() expects positive start and length parameters")
end
var sliced = ""
var chars = 0
repeat i, v in str
if i >= start && chars < length
sliced += v
chars += 1
end
end
sliced
end
val trim = fn (str: String, subset: String) -> String
var trimmed = String.trimRight(String.trimLeft(str, subset), subset)
trimmed
end
val trimLeft = fn (str: String, subset: String) -> String
var trimmed = str
repeat v in subset
if trimmed[0] == v
trimmed = String.slice(trimmed, 1, String.count(trimmed))
continue
end
end
trimmed
end
val trimRight = fn (str: String, subset: String) -> String
var trimmed = str
repeat v in subset
if String.last(trimmed) == v
trimmed = String.slice(trimmed, 0, String.count(trimmed) - 1)
continue
end
end
trimmed
end
val join = fn (array: Array, sep: String) -> String
var separator = ""
repeat v in array
separator += v + sep
end
if String.count(separator) > String.count(sep)
return String.slice(separator, 0, String.count(separator) - String.count(sep))
end
separator
end
val split = fn (str: String, sep: String) -> Array
val count_sep = String.count(sep)
var array = []
var last_index = 0
repeat i, v in str
if String.slice(str, i, count_sep) == sep
var curr = String.slice(str, last_index, i - last_index)
if curr != ""
array[] = curr
end
last_index = i + count_sep
end
end
array[] = String.slice(str, last_index, String.count(str))
array
end
val starts? = fn (str: String, prefix: String) -> Boolean
if String.count(str) < String.count(prefix)
return false
end
if String.slice(str, 0, String.count(prefix)) == prefix
return true
end
false
end
val ends? = fn (str: String, suffix: String) -> Boolean
if String.count(str) < String.count(suffix)
return false
end
if String.slice(str, String.count(str) - String.count(suffix), String.count(str)) == suffix
return true
end
false
end
val contains? = fn (str: String, search: String) -> Boolean
repeat i, v in str
if String.slice(str, i, String.count(search)) == search
return true
end
end
false
end
val replace = fn (str: String, search: String, replace: String) -> String
val count_search = String.count(search)
var rpl = ""
var last_index = 0
repeat i, v in str
if String.slice(str, i, count_search) == search
rpl = rpl + String.slice(str, last_index, i - last_index) + replace
last_index = i + count_search
end
end
rpl + String.slice(str, last_index, String.count(str))
end
val match? = fn (str: String, regex: String) -> Boolean
runtime_regex_match(str, regex)
end
end`,
} | runtime/stdlib/stdlib.go | 0.615897 | 0.565719 | stdlib.go | starcoder |
package web
import (
"github.com/cadmean-ru/amphion/engine"
"github.com/cadmean-ru/amphion/rendering"
)
func drawPoint(p5 *p5, primitive rendering.Primitive) {
t := primitive.GetTransform()
pos := t.Position
point := primitive.(*rendering.GeometryPrimitive)
p5.fill(point.Appearance.FillColor)
p5.point(pos.X, pos.Y)
}
func drawLine(p5 *p5, primitive rendering.Primitive) {
t := primitive.GetTransform()
pos := t.Position
size := t.Size
line := primitive.(*rendering.GeometryPrimitive)
p5.fill(line.Appearance.StrokeColor)
p5.strokeWeight(int(line.Appearance.StrokeWeight))
x2, y2 := pos.X+size.X, pos.Y+size.Y
p5.line(pos.X, pos.Y, x2, y2)
}
func drawRectangle(p5 *p5, primitive rendering.Primitive) {
t := primitive.GetTransform()
pos := t.Position
size := t.Size
rect := primitive.(*rendering.GeometryPrimitive)
p5.fill(rect.Appearance.FillColor)
p5.strokeWeight(int(rect.Appearance.StrokeWeight))
p5.stroke(rect.Appearance.StrokeColor)
p5.rect(pos.X, pos.Y, size.X, size.Y, int(rect.Appearance.CornerRadius))
}
func drawEllipse(p5 *p5, primitive rendering.Primitive) {
t := primitive.GetTransform()
pos := t.Position
size := t.Size
ellipse := primitive.(*rendering.GeometryPrimitive)
p5.fill(ellipse.Appearance.FillColor)
p5.strokeWeight(int(ellipse.Appearance.StrokeWeight))
p5.stroke(ellipse.Appearance.StrokeColor)
p5.ellipse(pos.X, pos.Y, size.X, size.Y)
}
func drawTriangle(p5 *p5, primitive rendering.Primitive) {
t := primitive.GetTransform()
pos := t.Position
size := t.Size
triangle := primitive.(*rendering.GeometryPrimitive)
p5.fill(triangle.Appearance.FillColor)
p5.strokeWeight(int(triangle.Appearance.StrokeWeight))
p5.stroke(triangle.Appearance.StrokeColor)
tx1 := pos.X
ty1 := pos.Y + size.Y
tx2 := pos.X + (size.X / 2)
ty2 := pos.Y
tx3 := pos.X + size.X
ty3 := pos.Y + size.Y
p5.triangle(tx1, ty1, tx2, ty2, tx3, ty3)
}
var prevFontSize byte
func drawText(p5 *p5, primitive rendering.Primitive) {
t := primitive.GetTransform()
pos := t.Position
size := t.Size
tp := primitive.(*rendering.TextPrimitive)
p5.fill(tp.Appearance.FillColor)
p5.strokeWeight(int(tp.Appearance.StrokeWeight))
if tp.TextAppearance.FontSize != prevFontSize {
prevFontSize = tp.TextAppearance.FontSize
p5.textSize(int(tp.TextAppearance.FontSize))
}
p5.textAlign(tp.HTextAlign, tp.VTextAlign)
p5.text(tp.Text, pos.X, pos.Y, size.X, size.Y)
}
var images = map[string]*p5image{}
func drawImage(p5 *p5, primitive rendering.Primitive) {
t := primitive.GetTransform()
pos := t.Position
size := t.Size
ip := primitive.(*rendering.ImagePrimitive)
if img, ok := images[ip.ImageUrl]; ok {
if img.ready {
p5.image(img, pos.X, pos.Y, size.X, size.Y)
}
} else {
images[ip.ImageUrl] = p5.loadImage(ip.ImageUrl, func() {
engine.GetInstance().RequestRendering()
})
}
} | frontend/web/primitiveDrawingFunctions.go | 0.594669 | 0.428114 | primitiveDrawingFunctions.go | starcoder |
package continuous
import (
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
"math"
"math/rand"
)
// Johnson SU Distribution (Unbounded)
// https://reference.wolfram.com/language/ref/JohnsonDistribution.html
type JohnsonSU struct {
gamma, delta, location, scale float64 // γ, δ, location μ, and scale σ
src rand.Source
}
func NewJohnsonSU(gamma, delta, location, scale float64) (*JohnsonSU, error) {
return NewJohnsonSUWithSource(gamma, delta, location, scale, nil)
}
func NewJohnsonSUWithSource(gamma, delta, location, scale float64, src rand.Source) (*JohnsonSU, error) {
if delta <= 0 && scale <= 0 {
return nil, err.Invalid()
}
return &JohnsonSU{gamma, delta, location, scale, src}, nil
}
// γ ∈ (-∞,∞)
// δ ∈ (0,∞)
// μ ∈ (-∞,∞)
// σ ∈ (0,∞)
func (j *JohnsonSU) Parameters() stats.Limits {
return stats.Limits{
"γ": stats.Interval{math.Inf(-1), math.Inf(1), true, true},
"δ": stats.Interval{0, math.Inf(1), true, true},
"μ": stats.Interval{math.Inf(-1), math.Inf(1), true, true},
"σ": stats.Interval{0, math.Inf(1), true, true},
}
}
// x ∈ (-∞,∞)
func (j *JohnsonSU) Support() stats.Interval {
return stats.Interval{math.Inf(-1), math.Inf(1), true, true}
}
func (j *JohnsonSU) Probability(x float64) float64 {
return (math.Exp(-.5*math.Pow(j.gamma+j.delta*math.Asinh((x-j.location)/j.scale), 2)) * j.delta) / (math.Sqrt(2*math.Pi) * math.Sqrt(((x-j.location)*(x-j.location))+(j.scale*j.scale)))
}
func (j *JohnsonSU) Distribution(x float64) float64 {
return .5 * (1 + math.Erf(j.gamma+j.delta*math.Asinh((x-j.location)/j.scale)/math.Sqrt(2)))
}
func (j *JohnsonSU) Mean() float64 {
return j.location - math.Exp(1/(2*(j.delta*j.delta)))*j.scale*math.Sinh(j.gamma/j.delta)
}
func (j *JohnsonSU) Variance() float64 {
return 1. / 4 * math.Exp(-((2 * j.gamma) / j.delta)) * (-1 + math.Exp(1/(j.delta*j.delta))) * (math.Exp(1/(j.delta*j.delta)) + 2*math.Exp((2*j.gamma)/j.delta) + math.Exp((1+4*j.gamma*j.delta)/(j.delta*j.delta))) * (j.scale * j.scale)
}
func (j *JohnsonSU) Median() float64 {
return j.location - j.scale*math.Sinh(j.gamma/j.delta)
}
func (j *JohnsonSU) ExKurtosis() float64 {
num := (4*math.Exp((2+2*j.gamma*j.delta)/(j.delta*j.delta))*(2+math.Exp(1/(j.delta*j.delta))) + 4*math.Exp((2+6*j.gamma*j.delta)/(j.delta*j.delta))*(2+math.Exp(1/(j.delta*j.delta))) + 6*math.Exp((4*j.gamma)/j.delta)*(1+2*math.Exp(1/(j.delta*j.delta))) + math.Exp(2/(j.delta*j.delta))*(-3+math.Exp(2/(j.delta*j.delta))*(3+math.Exp(1/(j.delta*j.delta))*(2+math.Exp(1/(j.delta*j.delta))))) + math.Exp((2+8*j.gamma*j.delta)/(j.delta*j.delta))*(-3+math.Exp(2/(j.delta*j.delta))*(3+math.Exp(1/(j.delta*j.delta))*(2+math.Exp(1/(j.delta*j.delta))))))
denom := math.Pow(math.Exp(1/(j.delta*j.delta))+2*math.Exp((2*j.gamma)/j.delta)+math.Exp((1+4*j.gamma*j.delta)/(j.delta*j.delta)), 2)
return (num / denom) - 3
}
func (j *JohnsonSU) Entropy() float64 {
stats.NotImplementedError()
return math.NaN()
}
func (j *JohnsonSU) Inverse(q float64) float64 {
if q <= 0 {
return math.Inf(-1)
}
if q >= 1 {
return math.Inf(1)
}
return j.location + j.scale*math.Sinh((-j.gamma+math.Sqrt(2)*math.Erfinv(-1+2*q))/j.delta)
}
func (j *JohnsonSU) Rand() float64 {
var rnd float64
if j.src != nil {
rnd = rand.New(j.src).Float64()
} else {
rnd = rand.Float64()
}
n := Normal{0, 1, j.src, nil}
return j.scale*math.Sinh((n.Inverse(rnd)-j.gamma)/j.delta) + j.location
} | dist/continuous/johnson_su.go | 0.840488 | 0.458288 | johnson_su.go | starcoder |
package client
import (
"encoding/json"
)
// YearlyScheduleSettings struct for YearlyScheduleSettings
type YearlyScheduleSettings struct {
TimeLocal Time `json:"timeLocal"`
DayNumberInMonth DayNumbersInMonth `json:"dayNumberInMonth"`
DayOfWeek *DaysOfWeek `json:"dayOfWeek,omitempty"`
DayOfMonth *int32 `json:"dayOfMonth,omitempty"`
Month Months `json:"month"`
Retention YearlyRetentionOptions `json:"retention"`
}
// NewYearlyScheduleSettings instantiates a new YearlyScheduleSettings object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewYearlyScheduleSettings(timeLocal Time, dayNumberInMonth DayNumbersInMonth, month Months, retention YearlyRetentionOptions) *YearlyScheduleSettings {
this := YearlyScheduleSettings{}
this.TimeLocal = timeLocal
this.DayNumberInMonth = dayNumberInMonth
this.Month = month
this.Retention = retention
return &this
}
// NewYearlyScheduleSettingsWithDefaults instantiates a new YearlyScheduleSettings object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewYearlyScheduleSettingsWithDefaults() *YearlyScheduleSettings {
this := YearlyScheduleSettings{}
return &this
}
// GetTimeLocal returns the TimeLocal field value
func (o *YearlyScheduleSettings) GetTimeLocal() Time {
if o == nil {
var ret Time
return ret
}
return o.TimeLocal
}
// GetTimeLocalOk returns a tuple with the TimeLocal field value
// and a boolean to check if the value has been set.
func (o *YearlyScheduleSettings) GetTimeLocalOk() (*Time, bool) {
if o == nil {
return nil, false
}
return &o.TimeLocal, true
}
// SetTimeLocal sets field value
func (o *YearlyScheduleSettings) SetTimeLocal(v Time) {
o.TimeLocal = v
}
// GetDayNumberInMonth returns the DayNumberInMonth field value
func (o *YearlyScheduleSettings) GetDayNumberInMonth() DayNumbersInMonth {
if o == nil {
var ret DayNumbersInMonth
return ret
}
return o.DayNumberInMonth
}
// GetDayNumberInMonthOk returns a tuple with the DayNumberInMonth field value
// and a boolean to check if the value has been set.
func (o *YearlyScheduleSettings) GetDayNumberInMonthOk() (*DayNumbersInMonth, bool) {
if o == nil {
return nil, false
}
return &o.DayNumberInMonth, true
}
// SetDayNumberInMonth sets field value
func (o *YearlyScheduleSettings) SetDayNumberInMonth(v DayNumbersInMonth) {
o.DayNumberInMonth = v
}
// GetDayOfWeek returns the DayOfWeek field value if set, zero value otherwise.
func (o *YearlyScheduleSettings) GetDayOfWeek() DaysOfWeek {
if o == nil || o.DayOfWeek == nil {
var ret DaysOfWeek
return ret
}
return *o.DayOfWeek
}
// GetDayOfWeekOk returns a tuple with the DayOfWeek field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *YearlyScheduleSettings) GetDayOfWeekOk() (*DaysOfWeek, bool) {
if o == nil || o.DayOfWeek == nil {
return nil, false
}
return o.DayOfWeek, true
}
// HasDayOfWeek returns a boolean if a field has been set.
func (o *YearlyScheduleSettings) HasDayOfWeek() bool {
if o != nil && o.DayOfWeek != nil {
return true
}
return false
}
// SetDayOfWeek gets a reference to the given DaysOfWeek and assigns it to the DayOfWeek field.
func (o *YearlyScheduleSettings) SetDayOfWeek(v DaysOfWeek) {
o.DayOfWeek = &v
}
// GetDayOfMonth returns the DayOfMonth field value if set, zero value otherwise.
func (o *YearlyScheduleSettings) GetDayOfMonth() int32 {
if o == nil || o.DayOfMonth == nil {
var ret int32
return ret
}
return *o.DayOfMonth
}
// GetDayOfMonthOk returns a tuple with the DayOfMonth field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *YearlyScheduleSettings) GetDayOfMonthOk() (*int32, bool) {
if o == nil || o.DayOfMonth == nil {
return nil, false
}
return o.DayOfMonth, true
}
// HasDayOfMonth returns a boolean if a field has been set.
func (o *YearlyScheduleSettings) HasDayOfMonth() bool {
if o != nil && o.DayOfMonth != nil {
return true
}
return false
}
// SetDayOfMonth gets a reference to the given int32 and assigns it to the DayOfMonth field.
func (o *YearlyScheduleSettings) SetDayOfMonth(v int32) {
o.DayOfMonth = &v
}
// GetMonth returns the Month field value
func (o *YearlyScheduleSettings) GetMonth() Months {
if o == nil {
var ret Months
return ret
}
return o.Month
}
// GetMonthOk returns a tuple with the Month field value
// and a boolean to check if the value has been set.
func (o *YearlyScheduleSettings) GetMonthOk() (*Months, bool) {
if o == nil {
return nil, false
}
return &o.Month, true
}
// SetMonth sets field value
func (o *YearlyScheduleSettings) SetMonth(v Months) {
o.Month = v
}
// GetRetention returns the Retention field value
func (o *YearlyScheduleSettings) GetRetention() YearlyRetentionOptions {
if o == nil {
var ret YearlyRetentionOptions
return ret
}
return o.Retention
}
// GetRetentionOk returns a tuple with the Retention field value
// and a boolean to check if the value has been set.
func (o *YearlyScheduleSettings) GetRetentionOk() (*YearlyRetentionOptions, bool) {
if o == nil {
return nil, false
}
return &o.Retention, true
}
// SetRetention sets field value
func (o *YearlyScheduleSettings) SetRetention(v YearlyRetentionOptions) {
o.Retention = v
}
func (o YearlyScheduleSettings) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["timeLocal"] = o.TimeLocal
}
if true {
toSerialize["dayNumberInMonth"] = o.DayNumberInMonth
}
if o.DayOfWeek != nil {
toSerialize["dayOfWeek"] = o.DayOfWeek
}
if o.DayOfMonth != nil {
toSerialize["dayOfMonth"] = o.DayOfMonth
}
if true {
toSerialize["month"] = o.Month
}
if true {
toSerialize["retention"] = o.Retention
}
return json.Marshal(toSerialize)
}
type NullableYearlyScheduleSettings struct {
value *YearlyScheduleSettings
isSet bool
}
func (v NullableYearlyScheduleSettings) Get() *YearlyScheduleSettings {
return v.value
}
func (v *NullableYearlyScheduleSettings) Set(val *YearlyScheduleSettings) {
v.value = val
v.isSet = true
}
func (v NullableYearlyScheduleSettings) IsSet() bool {
return v.isSet
}
func (v *NullableYearlyScheduleSettings) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableYearlyScheduleSettings(val *YearlyScheduleSettings) *NullableYearlyScheduleSettings {
return &NullableYearlyScheduleSettings{value: val, isSet: true}
}
func (v NullableYearlyScheduleSettings) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableYearlyScheduleSettings) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | client/model_yearly_schedule_settings.go | 0.815343 | 0.480844 | model_yearly_schedule_settings.go | starcoder |
package ameda
import (
"strconv"
)
// BoolToInterface converts bool to interface.
func BoolToInterface(v bool) interface{} {
return v
}
// BoolToInterfacePtr converts bool to *interface.
func BoolToInterfacePtr(v bool) *interface{} {
r := BoolToInterface(v)
return &r
}
// BoolToString converts bool to string.
func BoolToString(v bool) string {
return strconv.FormatBool(v)
}
// BoolToStringPtr converts bool to *string.
func BoolToStringPtr(v bool) *string {
r := BoolToString(v)
return &r
}
// BoolToBoolPtr converts bool to *bool.
func BoolToBoolPtr(v bool) *bool {
return &v
}
// BoolToFloat32 converts bool to float32.
func BoolToFloat32(v bool) float32 {
if v {
return 1
}
return 0
}
// BoolToFloat32Ptr converts bool to *float32.
func BoolToFloat32Ptr(v bool) *float32 {
r := BoolToFloat32(v)
return &r
}
// BoolToFloat64 converts bool to float64.
func BoolToFloat64(v bool) float64 {
if v {
return 1
}
return 0
}
// BoolToFloat64Ptr converts bool to *float64.
func BoolToFloat64Ptr(v bool) *float64 {
r := BoolToFloat64(v)
return &r
}
// BoolToInt converts bool to int.
func BoolToInt(v bool) int {
if v {
return 1
}
return 0
}
// BoolToIntPtr converts bool to *int.
func BoolToIntPtr(v bool) *int {
r := BoolToInt(v)
return &r
}
// BoolToInt8 converts bool to int8.
func BoolToInt8(v bool) int8 {
if v {
return 1
}
return 0
}
// BoolToInt8Ptr converts bool to *int8.
func BoolToInt8Ptr(v bool) *int8 {
r := BoolToInt8(v)
return &r
}
// BoolToInt16 converts bool to int16.
func BoolToInt16(v bool) int16 {
if v {
return 1
}
return 0
}
// BoolToInt16Ptr converts bool to *int16.
func BoolToInt16Ptr(v bool) *int16 {
r := BoolToInt16(v)
return &r
}
// BoolToInt32 converts bool to int32.
func BoolToInt32(v bool) int32 {
if v {
return 1
}
return 0
}
// BoolToInt32Ptr converts bool to *int32.
func BoolToInt32Ptr(v bool) *int32 {
r := BoolToInt32(v)
return &r
}
// BoolToInt64 converts bool to int64.
func BoolToInt64(v bool) int64 {
if v {
return 1
}
return 0
}
// BoolToInt64Ptr converts bool to *int64.
func BoolToInt64Ptr(v bool) *int64 {
r := BoolToInt64(v)
return &r
}
// BoolToUint converts bool to uint.
func BoolToUint(v bool) uint {
if v {
return 1
}
return 0
}
// BoolToUintPtr converts bool to *uint.
func BoolToUintPtr(v bool) *uint {
r := BoolToUint(v)
return &r
}
// BoolToUint8 converts bool to uint8.
func BoolToUint8(v bool) uint8 {
if v {
return 1
}
return 0
}
// BoolToUint8Ptr converts bool to *uint8.
func BoolToUint8Ptr(v bool) *uint8 {
r := BoolToUint8(v)
return &r
}
// BoolToUint16 converts bool to uint16.
func BoolToUint16(v bool) uint16 {
if v {
return 1
}
return 0
}
// BoolToUint16Ptr converts bool to *uint16.
func BoolToUint16Ptr(v bool) *uint16 {
r := BoolToUint16(v)
return &r
}
// BoolToUint32 converts bool to uint32.
func BoolToUint32(v bool) uint32 {
if v {
return 1
}
return 0
}
// BoolToUint32Ptr converts bool to *uint32.
func BoolToUint32Ptr(v bool) *uint32 {
r := BoolToUint32(v)
return &r
}
// BoolToUint64 converts bool to uint64.
func BoolToUint64(v bool) uint64 {
if v {
return 1
}
return 0
}
// BoolToUint64Ptr converts bool to *uint64.
func BoolToUint64Ptr(v bool) *uint64 {
r := BoolToUint64(v)
return &r
} | vendor/github.com/henrylee2cn/ameda/bool.go | 0.639511 | 0.44734 | bool.go | starcoder |
package pcapng
import (
"bytes"
"fmt"
"net"
"strings"
"github.com/bearmini/pcapng-go/pcapng/blocktype"
"github.com/bearmini/pcapng-go/pcapng/optioncode"
"github.com/pkg/errors"
)
/*
4.5. Name Resolution Block
The Name Resolution Block (NRB) is used to support the correlation of
numeric addresses (present in the captured packets) and their
corresponding canonical names and it is optional. Having the literal
names saved in the file prevents the need for performing name
resolution at a later time, when the association between names and
addresses may be different from the one in use at capture time.
Moreover, the NRB avoids the need for issuing a lot of DNS requests
every time the trace capture is opened, and also provides name
resolution when reading the capture with a machine not connected to
the network.
A Name Resolution Block is often placed at the beginning of the file,
but no assumptions can be taken about its position. Multiple NRBs
can exist in a pcapng file, either due to memory constraints or
because additional name resolutions were performed by file processing
tools, like network analyzers.
A Name Resolution Block need not contain any Records, except the
nrb_record_end Record which MUST be the last Record. The addresses
and names in NRB Records MAY be repeated multiple times; i.e., the
same IP address may resolve to multiple names, the same name may
resolve to the multiple IP addresses, and even the same address-to-
name pair may appear multiple times, in the same NRB or across NRBs.
The format of the Name Resolution Block is shown in Figure 13.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+---------------------------------------------------------------+
0 | Block Type = 0x00000004 |
+---------------------------------------------------------------+
4 | Block Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8 | Record Type | Record Value Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
12 / Record Value /
/ variable length, padded to 32 bits /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
. .
. . . . other records . . . .
. .
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Record Type = nrb_record_end | Record Value Length = 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
/ Options (variable) /
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Block Total Length |
+---------------------------------------------------------------+
Figure 13: Name Resolution Block Format
The Name Resolution Block has the following fields:
o Block Type: The block type of the Name Resolution Block is 4.
o Block Total Length: total size of this block, as described in
Section 3.1.
This is followed by zero or more Name Resolution Records (in the TLV
format), each of which contains an association between a network
address and a name. An nrb_record_end MUST be added after the last
Record, and MUST exist even if there are no other Records in the NRB.
There are currently three possible types of records:
+-----------------+--------+----------+
| Name | Code | Length |
+-----------------+--------+----------+
| nrb_record_end | 0x0000 | 0 |
| nrb_record_ipv4 | 0x0001 | Variable |
| nrb_record_ipv6 | 0x0002 | Variable |
+-----------------+--------+----------+
Table 5: Name Resolution Block Records
nrb_record_end:
The nrb_record_end record delimits the end of name resolution
records. This record is needed to determine when the list of
name resolution records has ended and some options (if any)
begin.
nrb_record_ipv4:
The nrb_record_ipv4 record specifies an IPv4 address
(contained in the first 4 octets), followed by one or more
zero-terminated UTF-8 strings containing the DNS entries for
that address. The minimum valid Record Length for this
Record Type is thus 6: 4 for the IP octets, 1 character, and
a zero-value octet terminator. Note that the IP address is
treated as four octets, one for each octet of the IP address;
it is not a 32-bit word, and thus the endianness of the SHB
does not affect this field's value.
Example: '127 0 0 1'"localhost".
[Open issue: is an empty string (i.e., just a zero-value
octet) valid?]
nrb_record_ipv6:
The nrb_record_ipv6 record specifies an IPv6 address
(contained in the first 16 octets), followed by one or more
zero-terminated strings containing the DNS entries for that
address. The minimum valid Record Length for this Record
Type is thus 18: 16 for the IP octets, 1 character, and a
zero-value octet terminator.
Example: '20 01 0d b8 00 00 00 00 00 00 00 00 12 34 56
78'"somehost".
[Open issue: is an empty string (i.e., just a zero-value
octet) valid?]
Record Types other than those specified earlier MUST be ignored and
skipped past. More Record Types will likely be defined in the
future, and MUST NOT break backwards compatibility.
Each Record Value is aligned to and padded to a 32-bit boundary. The
corresponding Record Value Length reflects the actual length of the
Record Value; it does not include the lengths of the Record Type
field, the Record Value Length field, any padding for the Record
Value, or anything after the Record Value. For Record Types with
name strings, the Record Length does include the zero-value octet
terminating that string. A Record Length of 0 is valid, unless
indicated otherwise.
After the list of Name Resolution Records, optionally, a list of
options (formatted according to the rules defined in Section 3.5) can
be present.
In addition to the options defined in Section 3.5, the following
options are valid within this block:
+---------------+------+----------+-------------------+
| Name | Code | Length | Multiple allowed? |
+---------------+------+----------+-------------------+
| ns_dnsname | 2 | Variable | no |
| ns_dnsIP4addr | 3 | 4 | no |
| ns_dnsIP6addr | 4 | 16 | no |
+---------------+------+----------+-------------------+
Table 6: Name Resolution Block Options
ns_dnsname:
The ns_dnsname option is a UTF-8 string containing the name
of the machine (DNS server) used to perform the name
resolution.
Example: "our_nameserver".
ns_dnsIP4addr:
The ns_dnsIP4addr option specifies the IPv4 address of the
DNS server. Note that the IP address is treated as four
octets, one for each octet of the IP address; it is not a
32-bit word, and thus the endianness of the SHB does not
affect this field's value.
Example: '192 168 0 1'.
ns_dnsIP6addr:
The ns_dnsIP6addr option specifies the IPv6 address of the
DNS server.
Example: '20 01 0d b8 00 00 00 00 00 00 00 00 12 34 56 78'.
*/
type NameResolutionBlock struct {
BlockType blocktype.BlockType
BlockTotalLength uint32
Records []Record
Options NameResolutionBlockOptions
}
type Record struct {
RecordType RecordType
RecordValueLength uint16
RecordValue RecordValue
}
type RecordValue struct {
IP net.IP
Name string
}
func parseRecordValue(rt RecordType, b []byte) (RecordValue, error) {
switch rt {
case RecordTypeIPv4:
if len(b) < 6 {
return RecordValue{}, errors.New("invalid ipv4 record value length")
}
return RecordValue{
IP: net.IP(b[0:4]),
Name: string(b[4:]),
}, nil
case RecordTypeIPv6:
if len(b) < 18 {
return RecordValue{}, errors.New("invalid ipv6 record value length")
}
return RecordValue{
IP: net.IP(b[0:16]),
Name: string(b[16:]),
}, nil
default:
return RecordValue{}, errors.New("unknown record type")
}
}
func (r Record) String() string {
t := r.RecordType
l := r.RecordValueLength
v := r.RecordValue
return fmt.Sprintf("type:%s len:%d, value:%s (%s)", t, l, v.IP.String(), v.Name)
}
type RecordType uint16
const (
RecordTypeEnd RecordType = 0
RecordTypeIPv4 RecordType = 1
RecordTypeIPv6 RecordType = 2
)
func (t RecordType) String() string {
switch t {
case RecordTypeEnd:
return "End"
case RecordTypeIPv4:
return "IPv4"
case RecordTypeIPv6:
return "IPv6"
default:
return "(Unknown)"
}
}
type NameResolutionBlockOptions struct {
DNSName *string
DNSIPv4Addr *net.IP
DNSIPv6Addr *net.IP
Comments []string
CustomOptions []CustomOption
}
func (o NameResolutionBlockOptions) String() string {
options := make([]string, 0)
if o.DNSName != nil {
options = append(options, fmt.Sprintf("dnsname:%s", *o.DNSName))
}
if o.DNSIPv4Addr != nil {
options = append(options, fmt.Sprintf("dnsIP4addr:%s", *o.DNSIPv4Addr))
}
if o.DNSIPv6Addr != nil {
options = append(options, fmt.Sprintf("dnsIP6addr:%s", *o.DNSIPv6Addr))
}
return strings.Join(options, ",")
}
func (b *NameResolutionBlock) GetType() blocktype.BlockType {
return b.BlockType
}
func (b *NameResolutionBlock) String() string {
records := make([]string, 0)
for _, r := range b.Records {
records = append(records, r.String())
}
return fmt.Sprintf("%s block_len:%d records:[%s] options:{%s}",
b.BlockType.String(), b.BlockTotalLength, strings.Join(records, ","), b.Options.String())
}
func (r *Reader) parseNameResolutionBlock(blockTotalLength uint32, bodyBytes []byte) (*NameResolutionBlock, error) {
br := newEndiannessAwareReader(r.endian, bytes.NewReader(bodyBytes))
records := make([]Record, 0)
records_loop:
for {
rtv, err := br.readUint16()
if err != nil {
return nil, errors.Wrap(err, "unable to read record type")
}
rt := RecordType(rtv)
rl, err := br.readUint16()
if err != nil {
return nil, errors.Wrap(err, "unable to read record length")
}
if rt == RecordTypeEnd {
break records_loop
}
rvb, err := br.readBytes(uint(rl))
if err != nil {
return nil, errors.Wrap(err, "unable to read record value")
}
rv, err := parseRecordValue(rt, rvb)
if err != nil {
return nil, errors.Wrap(err, "unable to parse record value")
}
records = append(records, Record{
RecordType: rt,
RecordValueLength: rl,
RecordValue: rv,
})
// read padding
padLen := 4 - (ol & 0x3)
_, err = br.readBytes(uint(padLen))
if err != nil {
return nil, errors.Wrap(err, "unable to read padding in an option value")
}
}
var opts NameResolutionBlockOptions
options_loop:
for {
oc, err := br.readUint16()
if err != nil {
break
}
ol, err := br.readUint16()
if err != nil {
break
}
switch optioncode.OptionCode(oc) {
case optioncode.EndOfOpt:
break options_loop
case optioncode.Comment:
readCommonOptionComment(ol, br, &opts.Comments)
case optioncode.CustomUTF8, optioncode.CustomUTF8WithoutNull, optioncode.CustomBinary, optioncode.CustomBinaryShouldNotCopied:
err := readCustomOption(oc, ol, br, &opts.CustomOptions)
if err != nil {
return nil, err
}
case optioncode.NS_DNSName:
ov, err := br.readBytes(uint(ol))
if err != nil {
return nil, errors.Wrap(err, "unable to read ns_dnsname")
}
name := string(ov)
opts.DNSName = &name
case optioncode.NS_DNSIP4Addr:
if ol != 4 {
return nil, errors.New("invalid option length for ns_dnsIP4addr")
}
ov, err := br.readBytes(4)
if err != nil {
return nil, errors.Wrap(err, "unable to read ns_dnsIP4addr")
}
ipv4 := net.IP(ov)
opts.DNSIPv4Addr = &ipv4
case optioncode.NS_DNSIP6Addr:
if ol != 16 {
return nil, errors.New("invalid option length for ns_dnsIP6addr")
}
ov, err := br.readBytes(16)
if err != nil {
return nil, errors.Wrap(err, "unable to read ns_dnsIP6addr")
}
ipv6 := net.IP(ov)
opts.DNSIPv6Addr = &ipv6
default:
_, err := br.readBytes(uint(ol))
if err != nil {
return nil, errors.Wrapf(err, "unable to read unknown option (%d)", oc)
}
}
// read padding
padLen := ol & 0x3
_, err = br.readBytes(uint(padLen))
if err != nil {
return nil, errors.Wrap(err, "unable to read padding in an option value")
}
}
return &NameResolutionBlock{
BlockType: blocktype.NameResolution,
BlockTotalLength: blockTotalLength,
Records: records,
Options: opts,
}, nil
} | pcapng/name_resolution_block.go | 0.698946 | 0.505676 | name_resolution_block.go | starcoder |
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// VarBitArrayFromBoolSliceSlice returns a driver.Valuer that produces a PostgreSQL varbit[] from the given Go [][]bool.
func VarBitArrayFromBoolSliceSlice(val [][]bool) driver.Valuer {
return varBitArrayFromBoolSliceSlice{val: val}
}
// VarBitArrayToBoolSliceSlice returns an sql.Scanner that converts a PostgreSQL varbit[] into a Go [][]bool and sets it to val.
func VarBitArrayToBoolSliceSlice(val *[][]bool) sql.Scanner {
return varBitArrayToBoolSliceSlice{val: val}
}
// VarBitArrayFromUint8SliceSlice returns a driver.Valuer that produces a PostgreSQL varbit[] from the given Go [][]uint8.
func VarBitArrayFromUint8SliceSlice(val [][]uint8) driver.Valuer {
return varBitArrayFromUint8SliceSlice{val: val}
}
// VarBitArrayToUint8SliceSlice returns an sql.Scanner that converts a PostgreSQL varbit[] into a Go [][]uint8 and sets it to val.
func VarBitArrayToUint8SliceSlice(val *[][]uint8) sql.Scanner {
return varBitArrayToUint8SliceSlice{val: val}
}
// VarBitArrayFromStringSlice returns a driver.Valuer that produces a PostgreSQL varbit[] from the given Go []string.
func VarBitArrayFromStringSlice(val []string) driver.Valuer {
return varBitArrayFromStringSlice{val: val}
}
// VarBitArrayToStringSlice returns an sql.Scanner that converts a PostgreSQL varbit[] into a Go []string and sets it to val.
func VarBitArrayToStringSlice(val *[]string) sql.Scanner {
return varBitArrayToStringSlice{val: val}
}
// VarBitArrayFromInt64Slice returns a driver.Valuer that produces a PostgreSQL varbit[] from the given Go []int64.
func VarBitArrayFromInt64Slice(val []int64) driver.Valuer {
return varBitArrayFromInt64Slice{val: val}
}
// VarBitArrayToInt64Slice returns an sql.Scanner that converts a PostgreSQL varbit[] into a Go []int64 and sets it to val.
func VarBitArrayToInt64Slice(val *[]int64) sql.Scanner {
return varBitArrayToInt64Slice{val: val}
}
type varBitArrayFromBoolSliceSlice struct {
val [][]bool
}
func (v varBitArrayFromBoolSliceSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := 2 + (len(v.val) - 1)
for i := 0; i < len(v.val); i++ {
if v.val[i] == nil {
size += 4 // len("NULL")
} else if len(v.val[i]) == 0 {
size += 2 // len(`""`)
} else {
size += len(v.val[i])
}
}
out := make([]byte, size)
out[0] = '{'
idx := 1
for i := 0; i < len(v.val); i++ {
if v.val[i] == nil {
out[idx+0] = 'N'
out[idx+1] = 'U'
out[idx+2] = 'L'
out[idx+3] = 'L'
idx += 4
} else if len(v.val[i]) == 0 {
out[idx+0] = '"'
out[idx+1] = '"'
idx += 2
} else {
for j := 0; j < len(v.val[i]); j++ {
if v.val[i][j] {
out[idx] = '1'
} else {
out[idx] = '0'
}
idx += 1
}
}
out[idx] = ','
idx += 1
}
out[len(out)-1] = '}' // replace last "," with "}"
return out, nil
}
type varBitArrayToBoolSliceSlice struct {
val *[][]bool
}
func (v varBitArrayToBoolSliceSlice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(arr)
boolss := make([][]bool, len(elems))
for i := 0; i < len(elems); i++ {
if len(elems[i]) == 4 && elems[i][0] == 'N' { // NULL?
continue
}
if len(elems[i]) == 2 && elems[i][0] == '"' { // ""?
boolss[i] = []bool{}
continue
}
bools := make([]bool, len(elems[i]))
for j := 0; j < len(elems[i]); j++ {
if elems[i][j] == '1' {
bools[j] = true
}
}
boolss[i] = bools
}
*v.val = boolss
return nil
}
type varBitArrayFromUint8SliceSlice struct {
val [][]uint8
}
func (v varBitArrayFromUint8SliceSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := 2 + (len(v.val) - 1)
for i := 0; i < len(v.val); i++ {
if v.val[i] == nil {
size += 4 // len("NULL")
} else if len(v.val[i]) == 0 {
size += 2 // len(`""`)
} else {
size += len(v.val[i])
}
}
out := make([]byte, size)
out[0] = '{'
idx := 1
for i := 0; i < len(v.val); i++ {
if v.val[i] == nil {
out[idx+0] = 'N'
out[idx+1] = 'U'
out[idx+2] = 'L'
out[idx+3] = 'L'
idx += 4
} else if len(v.val[i]) == 0 {
out[idx+0] = '"'
out[idx+1] = '"'
idx += 2
} else {
for j := 0; j < len(v.val[i]); j++ {
if v.val[i][j] == 1 {
out[idx] = '1'
} else {
out[idx] = '0'
}
idx += 1
}
}
out[idx] = ','
idx += 1
}
out[len(out)-1] = '}' // replace last "," with "}"
return out, nil
}
type varBitArrayToUint8SliceSlice struct {
val *[][]uint8
}
func (v varBitArrayToUint8SliceSlice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(arr)
uint8ss := make([][]uint8, len(elems))
for i := 0; i < len(elems); i++ {
if len(elems[i]) == 4 && elems[i][0] == 'N' { // NULL?
continue
}
if len(elems[i]) == 2 && elems[i][0] == '"' { // ""?
uint8ss[i] = []uint8{}
continue
}
uint8s := make([]uint8, len(elems[i]))
for j := 0; j < len(elems[i]); j++ {
if elems[i][j] == '1' {
uint8s[j] = 1
}
}
uint8ss[i] = uint8s
}
*v.val = uint8ss
return nil
}
type varBitArrayFromStringSlice struct {
val []string
}
func (v varBitArrayFromStringSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := 2 + (len(v.val) - 1)
for i := 0; i < len(v.val); i++ {
if len(v.val[i]) == 0 {
size += 2 // len(`""`)
} else {
size += len(v.val[i])
}
}
out := make([]byte, size)
out[0] = '{'
idx := 1
for i := 0; i < len(v.val); i++ {
if length := len(v.val[i]); length == 0 {
out[idx+0] = '"'
out[idx+1] = '"'
idx += 2
} else {
copy(out[idx:idx+length], []byte(v.val[i]))
idx += length
}
out[idx] = ','
idx += 1
}
out[len(out)-1] = '}' // replace last "," with "}"
return out, nil
}
type varBitArrayToStringSlice struct {
val *[]string
}
func (v varBitArrayToStringSlice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(arr)
strings := make([]string, len(elems))
for i := 0; i < len(elems); i++ {
if len(elems[i]) == 2 && elems[i][0] == '"' { // ""?
continue
}
strings[i] = string(elems[i])
}
*v.val = strings
return nil
}
type varBitArrayFromInt64Slice struct {
val []int64
}
func (v varBitArrayFromInt64Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
out := []byte{'{'}
for i := 0; i < len(v.val); i++ {
out = strconv.AppendInt(out, v.val[i], 2)
out = append(out, ',')
}
out[len(out)-1] = '}' // replace last "," with "}"
return out, nil
}
type varBitArrayToInt64Slice struct {
val *[]int64
}
func (v varBitArrayToInt64Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(arr)
int64s := make([]int64, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 2, 64)
if err != nil {
return err
}
int64s[i] = i64
}
*v.val = int64s
return nil
} | pgsql/varbitarr.go | 0.603465 | 0.570152 | varbitarr.go | starcoder |
package mmaths
import (
"math/rand"
"sort"
)
// QsortInterface interface to sort.Sort
type QsortInterface interface {
sort.Interface
// Partition returns slice[:i] and slice[i+1:]
// These should references the original memory
// since this does an in-place sort
Partition(i int) (left QsortInterface, right QsortInterface)
}
// IntSlice : alias to index array being sorted
type IntSlice []int
func (is IntSlice) Less(i, j int) bool {
return is[i] < is[j]
}
func (is IntSlice) Swap(i, j int) {
is[i], is[j] = is[j], is[i]
}
func (is IntSlice) Len() int {
return len(is)
}
// Partition : splits index array around pivot
func (is IntSlice) Partition(i int) (left QsortInterface, right QsortInterface) {
return IntSlice(is[:i]), IntSlice(is[i+1:])
}
// Qsort a (quick) sorting algorithm that is apparently faster then Go's native sort.Sort
// from: https://stackoverflow.com/questions/23276417/golang-custom-sort-is-faster-than-native-sort#23278451
func Qsort(a QsortInterface, prng *rand.Rand) QsortInterface {
if a.Len() < 2 {
return a
}
left, right := 0, a.Len()-1
// Pick a pivot
pivotIndex := prng.Int() % a.Len()
// Move the pivot to the right
a.Swap(pivotIndex, right)
// Pile elements smaller than the pivot on the left
for i := 0; i < a.Len(); i++ {
if a.Less(i, right) {
a.Swap(i, left)
left++
}
}
// Place the pivot after the last smaller element
a.Swap(left, right)
// Go down the rabbit hole
leftSide, rightSide := a.Partition(left)
Qsort(leftSide, prng)
Qsort(rightSide, prng)
return a
}
// QsortIndx same as above, but preserves original slice index
// modified from: https://stackoverflow.com/questions/23276417/golang-custom-sort-is-faster-than-native-sort#23278451
func QsortIndx(a QsortIndxInterface, prng *rand.Rand) []int {
if a.Len() < 2 {
return []int{0}
}
left, right := 0, a.Len()-1
// Pick a pivot
pivotIndex := prng.Int() % a.Len()
// Move the pivot to the right
a.Swap(pivotIndex, right)
// Pile elements smaller than the pivot on the left
for i := 0; i < a.Len(); i++ {
if a.Less(i, right) {
a.Swap(i, left)
left++
}
}
// Place the pivot after the last smaller element
a.Swap(left, right)
// Go down the rabbit hole
leftSide, rightSide := a.Partition(left)
QsortIndx(leftSide, prng)
QsortIndx(rightSide, prng)
return a.Indices()
}
// QsortIndxInterface interface to sort.Sort
type QsortIndxInterface interface {
sort.Interface
// Partition returns slice[:i] and slice[i+1:]
// These should references the original memory
// since this does an in-place sort
Partition(i int) (left QsortIndxInterface, right QsortIndxInterface)
Indices() []int
}
// IndexedSlice alias to float array being sorted
type IndexedSlice struct {
Indx []int
Val []float64
}
// New IndexSlice constructor, default indices
func (is *IndexedSlice) New(v []float64) {
is.Indx = make([]int, len(v))
is.Val = make([]float64, len(v))
for i, v := range v {
is.Indx[i] = i
is.Val[i] = v
}
}
// Indices : returns the index property
func (is IndexedSlice) Indices() []int {
return is.Indx
}
func (is IndexedSlice) Len() int {
return len(is.Indx)
}
func (is IndexedSlice) Less(i, j int) bool {
return is.Val[i] < is.Val[j]
}
func (is IndexedSlice) Swap(i, j int) {
is.Indx[i], is.Indx[j] = is.Indx[j], is.Indx[i]
is.Val[i], is.Val[j] = is.Val[j], is.Val[i]
}
// Partition splits index array around pivot
func (is IndexedSlice) Partition(i int) (left QsortIndxInterface, right QsortIndxInterface) {
left = IndexedSlice{
Indx: is.Indx[:i],
Val: is.Val[:i],
}
right = IndexedSlice{
Indx: is.Indx[i+1:],
Val: is.Val[i+1:],
}
return
}
// SortMapInt returns an IndexedSlice sorted by value
func SortMapInt(m map[int]int) ([]int, []int) {
vi, vf, ii := make([]int, len(m)), make([]float64, len(m)), 0
for k, v := range m {
vi[ii] = k
vf[ii] = float64(v)
ii++
}
sort.Sort(IndexedSlice{Indx: vi, Val: vf})
vfi := make([]int, len(m))
for i, v := range vf {
vfi[i] = int(v)
}
return vi, vfi
}
// SortMapFloat returns the key-values sorted by value
func SortMapFloat(m map[int]float64) ([]int, []float64) {
vi, vf, ii := make([]int, len(m)), make([]float64, len(m)), 0
for k, v := range m {
vi[ii] = k
vf[ii] = v
ii++
}
sort.Sort(IndexedSlice{Indx: vi, Val: vf})
vfi := make([]float64, len(m))
for i, v := range vf {
vfi[i] = v
}
return vi, vfi
} | sort.go | 0.782455 | 0.467453 | sort.go | starcoder |
package main
import "fmt"
/*
Here we'll look at different ways of writing components.
We'll use `chan string` as the port, we'll look the connections
in a separate folder.
*/
/*
First of all the usual verison, you have a struct per component,
where fields may define configuration.
The `In` could be hooked up manually or via reflection.
*/
type Printer struct {
In <-chan string
}
func (printer *Printer) Execute() {
for value := range printer.In {
fmt.Println(value)
}
}
/*
Then we could do the port lookup inside the component constructor:
*/
type Printer2 struct {
in <-chan string
}
func NewPrinter2(p *Process) *Printer2 {
return &Printer2{
in: p.In("IN"),
}
}
func (printer *Printer2) Execute(p *Process) {
for value := range printer.in {
fmt.Println(value)
}
}
/*
Alternatively, it could be done as part of Execute:
*/
type Printer3 struct {
in <-chan string
}
func (printer *Printer3) Execute(p *Process) {
printer.in = p.In("IN")
for value := range printer.in {
fmt.Println(value)
}
}
/*
One common approach is to use closures to define functionality.
This return the execute function.
*/
func Printer4(p *Process) (execute func()) {
in := p.In("IN")
return func() {
for value := range in {
fmt.Println(value)
}
}
}
/*
Now via reflection it would also be possible to define components as functions
and the ports as arguments.
One of the issues is that with reflection it's not possible to figure out the
argument names.
*/
func Printer5(in <-chan string) {
for value := range in {
fmt.Println(value)
}
}
/*
One option to capture the names is to use a struct instead.
Of course, both versions using reflection will have some overhead.
To gain persitence across runs it would either need to persist the arguments.
*/
func Printer6(port *struct {
In <-chan string
}) {
for value := range port.In {
fmt.Println(value)
}
}
/*
Although in principle it doesn't differ much from this definition
that uses reflection to fill in the ports.
This version would be preferred over the previous ones, because it's slightly
clearer how it works.
*/
type Printer7 struct {
In <-chan string
}
func (p *Printer7) Execute() {
for value := range p.In {
fmt.Println(value)
}
}
/*
It's also possible to treat components as just functionality and no state at all.
The first example uses a `map[string]string` to hang data to the process.
This is quite similar to
*/
func Printer8(p *Process) {
in := p.In("IN")
data := p.Data()
for value := range in {
data[value] = "found"
}
}
/*
It's also possible to treat components as just functionality and no state at all.
We could also use an approach similar to sync.Pool to persist data.
*/
func Printer9(p *Process) {
in := p.In("IN")
type Data struct {
Counter int
}
data := p.Data2("default", func() interface{} {
return &Data{}
}).(*Data)
for value := range in {
fmt.Println(value)
data.Counter++
}
}
/*
To prevent name collisions between components a tag type can be used instead
of a string.
*/
func Printer10(p *Process) {
in := p.In("IN")
type Tag struct{}
type Data struct {
Counter int
}
data := p.Data2(Tag{}, func() interface{} {
return &Data{}
}).(*Data)
for value := range in {
fmt.Println(value)
data.Counter++
}
}
/* stub to make compilation work */
type Process struct{}
func (p *Process) In(name string) <-chan string {
// TODO:
return nil
}
func (p *Process) Data() map[string]string {
// TODO:
return nil
}
func (p *Process) Data2(tag interface{}, create func() interface{}) interface{} {
// TODO:
return nil
} | 09-component-definition/definitions.go | 0.557966 | 0.551332 | definitions.go | starcoder |
package continuous
import (
integ "github.com/jtejido/ggsl/integration"
"github.com/jtejido/ggsl/specfunc"
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
smath "github.com/jtejido/stats/math"
"math"
"math/rand"
)
// Q-Gaussian distribution
// https://en.wikipedia.org/wiki/Q-Gaussian_distribution
type QGaussian struct {
mean, scale, q float64 // μ, b, q
src rand.Source
}
func NewQGaussian(mean, scale, q float64) (*QGaussian, error) {
return NewQGaussianWithSource(mean, scale, q, nil)
}
func NewQGaussianWithSource(mean, scale, q float64, src rand.Source) (*QGaussian, error) {
if scale <= 0 || q >= 3 {
return nil, err.Invalid()
}
r := new(QGaussian)
r.mean = mean
r.scale = scale
r.q = q
r.src = src
return r, nil
}
func (q *QGaussian) String() string {
return "QGaussian: Parameters - " + q.Parameters().String() + ", Support(x) - " + q.Support().String()
}
// μ ∈ (-∞,∞)
// b ∈ (0,∞)
// q ∈ (-∞,3)
func (q *QGaussian) Parameters() stats.Limits {
return stats.Limits{
"μ": stats.Interval{math.Inf(-1), math.Inf(1), true, true},
"b": stats.Interval{0, math.Inf(1), true, true},
"q": stats.Interval{math.Inf(-1), 3, true, true},
}
}
// x ∈ (-∞,∞) for 1 <= q < 3
// x ∈ [-1/sqrt(b(1-q)),1/sqrt(b(1-q))] for q < 1
func (q *QGaussian) Support() stats.Interval {
if 1 <= q.q && q.q < 3 {
return stats.Interval{math.Inf(-1), math.Inf(1), true, true}
}
return stats.Interval{-(1 / math.Sqrt(q.scale*(1-q.q))), 1 / math.Sqrt(q.scale*(1-q.q)), false, false}
}
func (q *QGaussian) Probability(x float64) float64 {
if q.q == 1 {
return math.Exp(-(math.Pow(-x+q.mean, 2) / (2 * (q.scale * q.scale)))) / (math.Sqrt(2*math.Pi) * q.scale)
} else if 1 < q.q && q.q < 3 {
num := math.Sqrt(-1+q.q) * math.Pow(1+(((-1+q.q)*math.Pow(-x+q.mean, 2))/(2*(q.scale*q.scale))), 1/(1-q.q)) * specfunc.Gamma(1/(-1+q.q))
denom := math.Sqrt(2*math.Pi) * q.scale * specfunc.Gamma(-((-3 + q.q) / (2 * (-1 + q.q))))
return num / denom
} else if q.q < 1 {
rge := (math.Sqrt((1-q.q)/(q.scale*q.scale)) * (x - q.mean)) / math.Sqrt(2)
if -1 <= rge && rge <= 1 {
num := math.Sqrt(1-q.q) * math.Pow(1+(((-1+q.q)*math.Pow(-x+q.mean, 2))/(2*(q.scale*q.scale))), 1/(1-q.q)) * specfunc.Gamma((3./2)+(1/(1-q.q)))
denom := math.Sqrt(2*math.Pi) * q.scale * specfunc.Gamma(1+(1/(1-q.q)))
return num / denom
}
}
return 0
}
type integrand struct {
pdf func(float64) float64
}
func (i *integrand) Evaluate(x float64) float64 {
return i.pdf(x)
}
// We don't have closed-form for this. The limit value should be changeable
func (q *QGaussian) Distribution(x float64) float64 {
f := &integrand{pdf: q.Probability}
workspace, _ := integ.NewWorkspace(30)
var cdf, abserr float64
integ.Qagil(f, x, abserr, 1e-12, 30, workspace, &cdf, &abserr)
return cdf
}
func (q *QGaussian) Mean() float64 {
if q.q < 2 {
return q.mean
}
return math.NaN()
}
func (q *QGaussian) Median() float64 {
return q.mean
}
func (q *QGaussian) Variance() float64 {
if q.q < 5./3 {
return (2 * (q.scale * q.scale)) / (5 - 3*q.q)
} else if 5./3 <= q.q && q.q < 2 {
return math.Inf(1)
}
return math.NaN()
}
func (q *QGaussian) Skewness() float64 {
if q.q < 3./2 {
return 0
}
return math.NaN()
}
func (q *QGaussian) ExKurtosis() float64 {
if q.q < 7./5 {
return 6 * ((q.q - 1) / (7 - 5*q.q))
}
return math.NaN()
}
// see also, <NAME>, <NAME>, <NAME> and <NAME>
// Generalized Box–Muller method for generating q-Gaussian random deviates
// IEEE Transactions on Information Theory 53, 4805 (2007)
func (q *QGaussian) Rand() float64 {
var rnd func() float64
if q.src != nil {
rnd = rand.New(q.src).Float64
} else {
rnd = rand.Float64
}
qGen := (1 + q.q) / (3 - q.q)
u1 := rnd()
u2 := rnd()
z := math.Sqrt(-2*smath.Logq(u1, qGen)) * math.Cos(2*math.Pi*u2)
return q.mean + (z / math.Sqrt(q.scale*(3-q.q)))
} | dist/continuous/q_gaussian.go | 0.807005 | 0.468851 | q_gaussian.go | starcoder |
package main
import (
"container/list"
"math"
"github.com/gazed/vu"
)
// end is the screen that shows the end of game animation. This is a model of
// a silicon atom. No one is expected to get here based on the current game
// difficulty settings.
type end struct {
scene *vu.Ent // 3D scene.
bg *vu.Ent // Background.
atom *vu.Ent // Group the animated atom.
e1 *vu.Ent // Up/down electron group.
e2 *vu.Ent // Left/right electron group.
e3 *vu.Ent // Slash electron group.
e4 *vu.Ent // Backslash electron group.
eles []*electron // All electrons.
scale float64 // Used for the fade in animation.
fov float64 // Field of view.
evolving bool // Used to disable keys during screen transitions.
}
// Implement the screen interface.
func (e *end) fadeIn() animation { return e.createFadeIn() }
func (e *end) fadeOut() animation { return nil }
func (e *end) resize(width, height int) {}
func (e *end) activate(state int) {
switch state {
case screenActive:
e.scene.Cull(false)
e.evolving = false
case screenDeactive:
e.scene.Cull(true)
e.evolving = false
case screenEvolving:
e.scene.Cull(false)
e.evolving = true
default:
logf("end state error")
}
}
// User input to game events. Implements screen interface.
func (e *end) processInput(in *vu.Input, eventq *list.List) {
for press, down := range in.Down {
switch {
case press == vu.KEsc && down == 1 && !e.evolving:
publish(eventq, toggleOptions, nil)
}
}
}
// Process game events. Implements screen interface.
func (e *end) processEvents(eventq *list.List) (transition int) {
for ev := eventq.Front(); ev != nil; ev = ev.Next() {
eventq.Remove(ev)
event := ev.Value.(*event)
switch event.id {
case toggleOptions:
return configGame
}
}
return finishGame
}
// newEndScreen creates the end game screen.
// Expected to be called once on game startup.
func newEndScreen(mp *bampf, ww, wh int) *end {
e := &end{}
e.scale = 0.01
e.fov = 75
e.scene = mp.eng.AddScene()
e.scene.Cam().SetClip(0.1, 50).SetFov(e.fov).SetAt(0, 0, 10)
e.scene.Cull(true)
// use a filter effect for the background.
e.bg = e.scene.AddPart().SetScale(100, 100, 1).SetAt(0, 0, -10)
m := e.bg.MakeModel("wave", "msh:square", "mat:solid")
m.SetUniform("screen", 500, 500)
// create the atom and its electrons.
e.newAtom()
return e
}
// createFadeIn returns a new fade-in animation. The initial setup is necessary for
// cases where the user finishes the game and then plays again and finishes again
// all in one application session.
func (e *end) createFadeIn() animation {
e.scale = 0.01
e.atom.SetScale(e.scale, e.scale, e.scale)
return e.newFadeAnimation()
}
// create the silicon atom.
func (e *end) newAtom() {
e.atom = e.scene.AddPart().SetScale(e.scale, e.scale, e.scale).SetAt(0, 0, 0)
// rotating image.
cimg := e.atom.AddPart().SetScale(2, 2, 2)
model := cimg.MakeModel("spinball", "msh:billboard", "tex:ele", "tex:halo")
model.Clamp("ele").Clamp("halo")
model.SetAlpha(0.6)
// create the electrons.
e.e1 = e.atom.AddPart()
e.eles = []*electron{}
e.eles = append(e.eles, newElectron(e.e1, 2, 90))
e.eles = append(e.eles, newElectron(e.e1, 3, 90))
e.eles = append(e.eles, newElectron(e.e1, 4, 90))
e.eles = append(e.eles, newElectron(e.e1, 2, -90))
e.eles = append(e.eles, newElectron(e.e1, 3, -90))
e.eles = append(e.eles, newElectron(e.e1, 4, -90))
e.e2 = e.atom.AddPart()
e.eles = append(e.eles, newElectron(e.e2, 3, 0))
e.eles = append(e.eles, newElectron(e.e2, 4, 0))
e.eles = append(e.eles, newElectron(e.e2, 3, 180))
e.eles = append(e.eles, newElectron(e.e2, 4, 180))
e.e3 = e.atom.AddPart()
e.eles = append(e.eles, newElectron(e.e3, 3, 45))
e.eles = append(e.eles, newElectron(e.e3, 3, -135))
e.e4 = e.atom.AddPart()
e.eles = append(e.eles, newElectron(e.e4, 3, -45))
e.eles = append(e.eles, newElectron(e.e4, 3, 135))
}
// newFadeAnimation creates the fade-in to the end screen animation.
func (e *end) newFadeAnimation() animation { return &fadeEndAnimation{e: e, ticks: 75} }
// end
// ===========================================================================
// fadeEndAnimation fades in the end screen.
// fadeEndAnimation fades in the end screen upon game completion.
type fadeEndAnimation struct {
e *end // Main state needed by the animation.
ticks int // Animation run rate - number of animation steps.
tkcnt int // Current step.
state int // Track progress 0:start, 1:run, 2:done.
}
// Animate is called each engine update while the animation is running.
func (f *fadeEndAnimation) Animate(dt float64) bool {
switch f.state {
case 0:
f.tkcnt = 0
f.e.bg.SetAlpha(0)
f.e.scale = 0.01
f.state = 1
return true
case 1:
f.e.scale += 0.99 / float64(f.ticks)
f.e.atom.SetScale(f.e.scale, f.e.scale, f.e.scale)
alpha := f.e.bg.Alpha() + float64(1)/float64(f.ticks)
f.e.bg.SetAlpha(alpha)
if f.tkcnt >= f.ticks {
f.Wrap()
return false // animation done.
}
f.tkcnt++
return true
default:
return false // animation done.
}
}
// Wrap is called to immediately finish up the animation.
func (f *fadeEndAnimation) Wrap() {
f.e.bg.SetAlpha(1.0)
f.e.scale = 1.0
f.e.atom.SetScale(f.e.scale, f.e.scale, f.e.scale)
f.e.activate(screenActive)
f.state = 2
}
// fadeEndAnimation
// ===========================================================================
// electron
// electron is used for the atom electron model instances.
type electron struct {
core *vu.Ent // 3D model.
band int // Electron band.
}
// newElectron creates a new electron model.
func newElectron(root *vu.Ent, band int, angle float64) *electron {
ele := &electron{}
ele.band = band
x, y := ele.initialLocation(angle)
ele.core = root.AddPart().SetAt(x, y, 0)
// rotating image.
cimg := ele.core.AddPart().SetScale(0.25, 0.25, 0.25)
model := cimg.MakeModel("spinball", "msh:billboard", "tex:ele", "tex:halo")
model.SetAlpha(0.6)
return ele
}
// initialLocation positions each electron in the given band and angle.
func (ele *electron) initialLocation(angle float64) (dx, dy float64) {
dx = float64(float64(ele.band) * math.Cos(angle*math.Pi/180))
dy = float64(float64(ele.band) * math.Sin(angle*math.Pi/180))
return
} | end.go | 0.711431 | 0.48987 | end.go | starcoder |
package amcl
import (
fmt "fmt"
math "github.com/IBM/mathlib"
)
type Fp256bn struct {
C *math.Curve
}
func (a *Fp256bn) G1ToProto(g1 *math.G1) *ECP {
if g1 == nil {
panic("nil argument")
}
bytes := g1.Bytes()[1:]
l := len(bytes) / 2
return &ECP{
X: bytes[:l],
Y: bytes[l:],
}
}
func (a *Fp256bn) G1FromRawBytes(raw []byte) (*math.G1, error) {
l := len(raw) / 2
return a.G1FromProto(&ECP{
X: raw[:l],
Y: raw[l:],
})
}
func (a *Fp256bn) G1FromProto(e *ECP) (*math.G1, error) {
if e == nil {
return nil, fmt.Errorf("nil argument")
}
if len(e.X) != a.C.FieldBytes || len(e.Y) != a.C.FieldBytes {
return nil, fmt.Errorf("invalid marshalled length")
}
bytes := make([]byte, len(e.X)*2+1)
l := len(e.X)
bytes[0] = 0x04
copy(bytes[1:], e.X)
copy(bytes[l+1:], e.Y)
return a.C.NewG1FromBytes(bytes)
}
func (a *Fp256bn) G2ToProto(g2 *math.G2) *ECP2 {
if g2 == nil {
panic("nil argument")
}
bytes := g2.Bytes()
l := len(bytes) / 4
return &ECP2{
Xa: bytes[0:l],
Xb: bytes[l : 2*l],
Ya: bytes[2*l : 3*l],
Yb: bytes[3*l:],
}
}
func (a *Fp256bn) G2FromProto(e *ECP2) (*math.G2, error) {
if e == nil {
return nil, fmt.Errorf("nil argument")
}
if len(e.Xa) != a.C.FieldBytes || len(e.Xb) != a.C.FieldBytes || len(e.Ya) != a.C.FieldBytes || len(e.Yb) != a.C.FieldBytes {
return nil, fmt.Errorf("invalid marshalled length")
}
bytes := make([]byte, len(e.Xa)*4)
l := len(e.Xa)
copy(bytes[0:l], e.Xa)
copy(bytes[l:2*l], e.Xb)
copy(bytes[2*l:3*l], e.Ya)
copy(bytes[3*l:], e.Yb)
return a.C.NewG2FromBytes(bytes)
}
type Fp256bnMiracl struct {
C *math.Curve
}
func (a *Fp256bnMiracl) G1ToProto(g1 *math.G1) *ECP {
if g1 == nil {
panic("nil argument")
}
bytes := g1.Bytes()[1:]
l := len(bytes) / 2
return &ECP{
X: bytes[:l],
Y: bytes[l:],
}
}
func (a *Fp256bnMiracl) G1FromRawBytes(raw []byte) (*math.G1, error) {
l := len(raw) / 2
return a.G1FromProto(&ECP{
X: raw[:l],
Y: raw[l:],
})
}
func (a *Fp256bnMiracl) G1FromProto(e *ECP) (*math.G1, error) {
if e == nil {
return nil, fmt.Errorf("nil argument")
}
if len(e.X) != a.C.FieldBytes || len(e.Y) != a.C.FieldBytes {
return nil, fmt.Errorf("invalid marshalled length")
}
bytes := make([]byte, len(e.X)*2+1)
l := len(e.X)
bytes[0] = 0x04
copy(bytes[1:], e.X)
copy(bytes[l+1:], e.Y)
return a.C.NewG1FromBytes(bytes)
}
func (a *Fp256bnMiracl) G2ToProto(g2 *math.G2) *ECP2 {
if g2 == nil {
panic("nil argument")
}
bytes := g2.Bytes()[1:]
l := len(bytes) / 4
return &ECP2{
Xa: bytes[0:l],
Xb: bytes[l : 2*l],
Ya: bytes[2*l : 3*l],
Yb: bytes[3*l:],
}
}
func (a *Fp256bnMiracl) G2FromProto(e *ECP2) (*math.G2, error) {
if e == nil {
return nil, fmt.Errorf("nil argument")
}
if len(e.Xa) != a.C.FieldBytes || len(e.Xb) != a.C.FieldBytes || len(e.Ya) != a.C.FieldBytes || len(e.Yb) != a.C.FieldBytes {
return nil, fmt.Errorf("invalid marshalled length")
}
bytes := make([]byte, 1+len(e.Xa)*4)
bytes[0] = 0x04
l := len(e.Xa)
copy(bytes[1:], e.Xa)
copy(bytes[1+l:], e.Xb)
copy(bytes[1+2*l:], e.Ya)
copy(bytes[1+3*l:], e.Yb)
return a.C.NewG2FromBytes(bytes)
} | vendor/github.com/IBM/idemix/bccsp/schemes/dlog/crypto/translator/amcl/fp256bn.go | 0.59972 | 0.441131 | fp256bn.go | starcoder |
package curves
import (
"github.com/wieku/danser-go/framework/math/vector"
)
// NewBSpline creates a spline that goes through all given control points.
// points[1] and points[len(points)-2] are terminal tangents.
func NewBSpline(points []vector.Vector2f) *Spline {
beziers := SolveBSpline(points)
beziersC := make([]Curve, len(beziers))
for i, b := range beziers {
b.CalculateLength()
beziersC[i] = b
}
return NewSpline(beziersC)
}
// NewBSplineW creates a spline that goes through all given control points with forced weights(lengths), useful when control points have to be passed at certain times.
// points[1] and points[len(points)-2] are terminal tangents.
func NewBSplineW(points []vector.Vector2f, weights []float32) *Spline {
beziers := SolveBSpline(points)
beziersC := make([]Curve, len(beziers))
for i, b := range beziers {
beziersC[i] = b
}
return NewSplineW(beziersC, weights)
}
// SolveBSpline calculates the spline that goes through all given control points.
// points[1] and points[len(points)-2] are terminal tangents
// Returns an array of bezier curves in NA (non-approximated) version for performance considerations.
func SolveBSpline(points1 []vector.Vector2f) []*Bezier {
pointsLen := len(points1)
points := make([]vector.Vector2f, 0, pointsLen)
points = append(points, points1[0])
points = append(points, points1[2:pointsLen-2]...)
points = append(points, points1[pointsLen-1], points1[1], points1[pointsLen-2])
n := len(points) - 2
d := make([]vector.Vector2f, n)
d[0] = points[n].Sub(points[0])
d[n-1] = points[n+1].Sub(points[n-1]).Scl(-1)
A := make([]vector.Vector2f, len(points))
Bi := make([]float32, len(points))
Bi[1] = -0.25
A[1] = points[2].Sub(points[0]).Sub(d[0]).Scl(1.0 / 4)
for i := 2; i < n-1; i++ {
Bi[i] = -1 / (4 + Bi[i-1])
A[i] = points[i+1].Sub(points[i-1]).Sub(A[i-1]).Scl(-1 * Bi[i])
}
for i := n - 2; i > 0; i-- {
d[i] = A[i].Add(d[i+1].Scl(Bi[i]))
}
bezierPoints := []vector.Vector2f{points[0], points[0].Add(d[0])}
for i := 1; i < n-1; i++ {
bezierPoints = append(bezierPoints, points[i].Sub(d[i]), points[i], points[i].Add(d[i]))
}
bezierPoints = append(bezierPoints, points[n-1].Sub(d[n-1]), points[n-1])
var beziers []*Bezier
for i := 0; i < len(bezierPoints)-3; i += 3 {
beziers = append(beziers, NewBezierNA(bezierPoints[i:i+4]))
}
return beziers
} | framework/math/curves/bspline.go | 0.796055 | 0.478407 | bspline.go | starcoder |
package gl
// #include "gl.h"
import "C"
//void glFrustum (float64 left, float64 right, float64 bottom, float64 top, float64 zNear, float64 zFar)
func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {
C.glFrustum(C.GLdouble(left), C.GLdouble(right), C.GLdouble(bottom), C.GLdouble(top), C.GLdouble(zNear), C.GLdouble(zFar))
}
//void glLoadIdentity (void)
func LoadIdentity() {
C.glLoadIdentity()
}
//void glLoadMatrixd (const float64 *m)
func LoadMatrixd(m *[16]float64) {
C.glLoadMatrixd((*C.GLdouble)(&m[0]))
}
//void glLoadMatrixf (const float32 *m)
func LoadMatrixf(m *[16]float32) {
C.glLoadMatrixf((*C.GLfloat)(&m[0]))
}
//void glMatrixMode (GLenum mode)
func MatrixMode(mode GLenum) {
C.glMatrixMode(C.GLenum(mode))
}
//void glMultMatrixd (const float64 *m)
func MultMatrixd(m *[16]float64) {
C.glMultMatrixd((*C.GLdouble)(&m[0]))
}
//void glMultMatrixf (const float32 *m)
func MultMatrixf(m *[16]float32) {
C.glMultMatrixf((*C.GLfloat)(&m[0]))
}
//void glOrtho (float64 left, float64 right, float64 bottom, float64 top, float64 zNear, float64 zFar)
func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {
C.glOrtho(C.GLdouble(left), C.GLdouble(right), C.GLdouble(bottom), C.GLdouble(top), C.GLdouble(zNear), C.GLdouble(zFar))
}
//void glPopMatrix (void)
func PopMatrix() {
C.glPopMatrix()
}
//void glPushMatrix (void)
func PushMatrix() {
C.glPushMatrix()
}
//void glRotated (float64 angle, float64 x, float64 y, float64 z)
func Rotated(angle float64, x float64, y float64, z float64) {
C.glRotated(C.GLdouble(angle), C.GLdouble(x), C.GLdouble(y), C.GLdouble(z))
}
//void glRotatef (float32 angle, float32 x, float32 y, float32 z)
func Rotatef(angle float32, x float32, y float32, z float32) {
C.glRotatef(C.GLfloat(angle), C.GLfloat(x), C.GLfloat(y), C.GLfloat(z))
}
//void glScaled (float64 x, float64 y, float64 z)
func Scaled(x float64, y float64, z float64) {
C.glScaled(C.GLdouble(x), C.GLdouble(y), C.GLdouble(z))
}
//void glScalef (float32 x, float32 y, float32 z)
func Scalef(x float32, y float32, z float32) {
C.glScalef(C.GLfloat(x), C.GLfloat(y), C.GLfloat(z))
}
//void glTranslated (float64 x, float64 y, float64 z)
func Translated(x float64, y float64, z float64) {
C.glTranslated(C.GLdouble(x), C.GLdouble(y), C.GLdouble(z))
}
//void glTranslatef (float32 x, float32 y, float32 z)
func Translatef(x float32, y float32, z float32) {
C.glTranslatef(C.GLfloat(x), C.GLfloat(y), C.GLfloat(z))
} | src/github.com/go-gl/gl/matrix.go | 0.674158 | 0.547887 | matrix.go | starcoder |
package main
import "strconv"
// Device is the representation of the wrist device
type Device [4]int
func (d Device) isEqual(other Device) bool {
for i, value := range d {
if other[i] != value {
return false
}
}
return true
}
func initDevice(strslice []string) Device {
result := Device{}
for i := 0; i < 4; i++ {
result[i], _ = strconv.Atoi(strslice[i])
}
return result
}
// Below are the different operations that can be done on the device
//Addition:
// addr (add register) stores into register C the result of adding register A and register B.
func (d Device) addr(i Instruction) Device {
d[i.Cout] = d[i.Ain] + d[i.Bin]
return d
}
//addi (add immediate) stores into register C the result of adding register A and value B.
func (d Device) addi(i Instruction) Device {
d[i.Cout] = d[i.Ain] + i.Bin
return d
}
// Multiplication:
// mulr (multiply register) stores into register C the result of multiplying register A and register B.
func (d Device) mulr(i Instruction) Device {
d[i.Cout] = d[i.Ain] * d[i.Bin]
return d
}
// muli (multiply immediate) stores into register C the result of multiplying register A and value B.
func (d Device) muli(i Instruction) Device {
d[i.Cout] = d[i.Ain] * i.Bin
return d
}
// Bitwise AND:
// banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.
func (d Device) banr(i Instruction) Device {
d[i.Cout] = d[i.Ain] & d[i.Bin]
return d
}
// bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.
func (d Device) bani(i Instruction) Device {
d[i.Cout] = d[i.Ain] & i.Bin
return d
}
// Bitwise OR:
// borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B.
func (d Device) borr(i Instruction) Device {
d[i.Cout] = d[i.Ain] | d[i.Bin]
return d
}
// bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B.
func (d Device) bori(i Instruction) Device {
d[i.Cout] = d[i.Ain] | i.Bin
return d
}
// Assignment:
// setr (set register) copies the contents of register A into register C. (Input B is ignored.)
func (d Device) setr(i Instruction) Device {
d[i.Cout] = d[i.Ain]
return d
}
// seti (set immediate) stores value A into register C. (Input B is ignored.)
func (d Device) seti(i Instruction) Device {
d[i.Cout] = i.Ain
return d
}
// Greater-than testing:
// gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0.
func (d Device) gtir(i Instruction) Device {
if i.Ain > d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// gtri (greater-than register/immediate) sets register C to 1 if register A is greater than value B. Otherwise, register C is set to 0.
func (d Device) gtri(i Instruction) Device {
if d[i.Ain] > i.Bin {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0.
func (d Device) gtrr(i Instruction) Device {
if d[i.Ain] > d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// Equality testing:
// eqir (equal immediate/register) sets register C to 1 if value A is equal to register B. Otherwise, register C is set to 0.
func (d Device) eqir(i Instruction) Device {
if i.Ain == d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// eqri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0.
func (d Device) eqri(i Instruction) Device {
if d[i.Ain] == i.Bin {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0.
func (d Device) eqrr(i Instruction) Device {
if d[i.Ain] == d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
} | 2018/16_1/device.go | 0.722233 | 0.450541 | device.go | starcoder |
package struct2struct
import (
"errors"
"fmt"
"reflect"
"strconv"
)
var appliers []applier
func init() {
appliers = []applier{
interfaceApplier,
settableTestApplier,
matchedTypeApplier,
pointerApplier,
sliceApplier,
mapApplier,
structApplier,
intApplier,
uintApplier,
floatApplier,
stringApplier,
}
}
type applier func(reflect.Value, reflect.Value) (bool, error)
func applyField(iField reflect.Value, vField reflect.Value) error {
for _, applier := range appliers {
applied, err := applier(iField, vField)
if applied || err != nil {
return err
}
}
if !iField.IsValid() || !vField.IsValid() {
return fmt.Errorf("could not apply types")
}
return fmt.Errorf("could not apply type '%v' to '%v'", iField.Type(), vField.Type())
}
func intApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
switch vField.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
default:
return false, nil
}
var value int64
switch iField.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value = iField.Int()
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
value = int64(iField.Uint())
case reflect.Float32, reflect.Float64:
value = int64(iField.Float())
case reflect.String:
valInt, err := strconv.Atoi(iField.String())
if err != nil {
return false, err
}
value = int64(valInt)
default:
return false, nil
}
vField.SetInt(value)
return true, nil
}
func uintApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
switch vField.Type().Kind() {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
default:
return false, nil
}
var value uint64
switch iField.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value = uint64(iField.Int())
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
value = iField.Uint()
case reflect.Float32, reflect.Float64:
value = uint64(iField.Float())
case reflect.String:
valInt, err := strconv.Atoi(iField.String())
if err != nil {
return false, err
}
value = uint64(valInt)
default:
return false, nil
}
vField.SetUint(value)
return true, nil
}
func floatApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
var bitSize = 32
switch vField.Type().Kind() {
case reflect.Float32:
case reflect.Float64:
bitSize = 64
default:
return false, nil
}
var value float64
var err error
switch iField.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value = float64(iField.Int())
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
value = float64(iField.Uint())
case reflect.Float32:
value, _ = strconv.ParseFloat(fmt.Sprint(float32(iField.Float())), bitSize)
case reflect.Float64:
value = iField.Float()
case reflect.String:
value, err = strconv.ParseFloat(iField.String(), bitSize)
if err != nil {
return false, err
}
default:
return false, nil
}
vField.SetFloat(value)
return true, nil
}
func stringApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
if vField.Type().Kind() != reflect.String {
return false, nil
}
vField.SetString(fmt.Sprint(iField.Interface()))
return true, nil
}
func interfaceApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
if vField.Type().Kind() != reflect.Interface {
return false, nil
}
vField.Set(iField)
return true, nil
}
func sliceApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
if iField.Type().Kind() != reflect.Slice && vField.Type().Kind() != reflect.Slice {
return false, nil
}
if iField.Type().Kind() != reflect.Slice || vField.Type().Kind() != reflect.Slice {
return false, errors.New("cannot apply a non-slice value to a slice")
}
for i := 0; i < iField.Len(); i++ {
iValue := iField.Index(i)
appendVal := reflect.New(vField.Type().Elem())
err := applyField(iValue, appendVal.Elem())
if err != nil {
return false, err
}
vField.Set(reflect.Append(vField, appendVal.Elem()))
}
return true, nil
}
// settableTestApplier drops handling for any unsettable fields
func settableTestApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !vField.CanSet() {
return true, nil
}
return false, nil
}
func matchedTypeApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
if iField.Type() == vField.Type() {
vField.Set(iField)
return true, nil
}
return false, nil
}
func structApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
if iField.Type().Kind() != reflect.Struct && vField.Type().Kind() != reflect.Struct {
return false, nil
}
if iField.Type().Kind() != reflect.Struct || vField.Type().Kind() != reflect.Struct {
return false, errors.New("cannot apply a struct type to a non-struct")
}
newPtr := reflect.New(vField.Type())
newPtr.Elem().Set(vField)
err := marshalStruct(iField.Interface(), newPtr.Interface())
vField.Set(newPtr.Elem())
return err == nil, err
}
func marshalStruct(i interface{}, v interface{}) error {
iFields := mapFields(i, v)
vFields := mapFields(v, i)
for name, iField := range iFields {
if vField, ok := vFields[name]; ok {
err := applyField(iField, vField)
if err != nil {
return fmt.Errorf("%v: %v", name, err)
}
}
}
return nil
}
func pointerApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
if iField.Type().Kind() == reflect.Ptr {
err := applyField(reflect.Indirect(iField), vField)
return err == nil, err
}
iPtrType := reflect.PtrTo(iField.Type())
if vField.Type().Kind() == reflect.Ptr {
if iPtrType == vField.Type() {
newPtr := reflect.New(iField.Type())
newPtr.Elem().Set(iField)
err := applyField(newPtr, vField)
return err == nil, err
}
t := reflect.TypeOf(vField.Interface())
if iField.Kind() == reflect.Struct && t.Elem().Kind() == reflect.Struct {
newPtr := reflect.New(t.Elem())
err := applyField(iField, newPtr.Elem())
if err == nil {
vField.Set(newPtr)
}
return err == nil, err
}
}
return false, nil
}
func mapApplier(iField reflect.Value, vField reflect.Value) (bool, error) {
if !iField.IsValid() || !vField.IsValid() {
return false, nil
}
if iField.Type().Kind() != reflect.Map && vField.Type().Kind() != reflect.Map {
return false, nil
}
if iField.Type().Kind() != reflect.Map || vField.Type().Kind() != reflect.Map {
return false, errors.New("cannot apply a map type to a non-map")
}
vKeyType := vField.Type().Key()
vElemType := vField.Type().Elem()
newMap := reflect.MakeMap(vField.Type())
for _, key := range iField.MapKeys() {
newKey := reflect.New(vKeyType)
newElem := reflect.New(vElemType)
err := applyField(key, newKey.Elem())
if err != nil {
return false, err
}
err = applyField(iField.MapIndex(key), newElem.Elem())
if err != nil {
return false, err
}
newMap.SetMapIndex(newKey.Elem(), newElem.Elem())
}
vField.Set(newMap)
return true, nil
} | vendor/github.com/theothertomelliott/struct2struct/appliers.go | 0.533641 | 0.41567 | appliers.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.