code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package main import ( "fmt" "io/ioutil" "math" "regexp" "strconv" "strings" ) type Particle struct { x int64 y int64 z int64 vx int64 vy int64 vz int64 ax int64 ay int64 az int64 annihilated bool } func (p Particle) Distance() float64 { dist := math.Pow(float64(p.x), 2) dist += math.Pow(float64(p.y), 2) dist += math.Pow(float64(p.z), 2) return dist } func closestParticle(particles []Particle) int { time := float64(1000) tSquared := math.Pow(time, 2) distances := make([]float64, len(particles)) for i, p := range particles { p.x += int64(time*float64(p.vx)) + int64(0.5*tSquared*float64(p.ax)) p.y += int64(time*float64(p.vy)) + int64(0.5*tSquared*float64(p.ay)) p.z += int64(time*float64(p.vz)) + int64(0.5*tSquared*float64(p.az)) distances[i] = p.Distance() } minParticle := 0 for i, d := range distances { if d < distances[minParticle] { minParticle = i } } return minParticle } func parseParticle(line string) Particle { coordsStr := regexp.MustCompile("[p,v,a,=,<,>,\\s]+").Split(line, 11) coords := make([]int64, len(coordsStr)) for i, cStr := range coordsStr[1:] { val, _ := strconv.Atoi(cStr) coords[i] = int64(val) } return Particle{ x: coords[0], y: coords[1], z: coords[2], vx: coords[3], vy: coords[4], vz: coords[5], ax: coords[6], ay: coords[7], az: coords[8], annihilated: false, } } func numUncollidedParticles(particles []Particle) int { rounds := 40 for i := 0; i < rounds; i++ { locations := make(map[string]int) for i, p := range particles { if p.annihilated { continue } p.vx += p.ax p.x += p.vx p.vy += p.ay p.y += p.vy p.vz += p.az p.z += p.vz pLoc := fmt.Sprintf("%d,%d,%d", p.x, p.y, p.z) if _, ok := locations[pLoc]; ok { particles[locations[pLoc]].annihilated = true p.annihilated = true } else { locations[pLoc] = i } particles[i] = p } } remaining := 0 for _, p := range particles { if !p.annihilated { remaining++ } } return remaining } func main() { dat, _ := ioutil.ReadFile("20.txt") lines := strings.Split(string(dat), "\n") particles := make([]Particle, len(lines)) for i, line := range lines { particles[i] = parseParticle(line) } part1Particles := make([]Particle, len(particles)) copy(part1Particles, particles) part1 := closestParticle(part1Particles) fmt.Printf("Part 1: %d\n", part1) part2 := numUncollidedParticles(particles) fmt.Printf("Part 2: %d\n", part2) }
Day20-25/20.go
0.514644
0.414069
20.go
starcoder
package netfilter import ( "encoding/binary" "fmt" "github.com/mdlayher/netlink" "github.com/pkg/errors" "golang.org/x/sys/unix" ) // An Attribute is a copy of a netlink.Attribute that can be nested. type Attribute struct { // The type of this Attribute, typically matched to a constant. Type uint16 // An arbitrary payload which is specified by Type. Data []byte // Whether the attribute's data contains nested attributes. Nested bool Children []Attribute // Whether the attribute's data is in network (true) or native (false) byte order. NetByteOrder bool } func (a Attribute) String() string { if a.Nested { return fmt.Sprintf("<Length %d, Type %d, Nested %t, %d Children (%v)>", len(a.Data), a.Type, a.Nested, len(a.Children), a.Children) } return fmt.Sprintf("<Length %d, Type %d, Nested %t, NetByteOrder %t, %v>", len(a.Data), a.Type, a.Nested, a.NetByteOrder, a.Data) } // Uint16 interprets a non-nested Netfilter attribute in network byte order as a uint16. func (a Attribute) Uint16() uint16 { if a.Nested { panic("Uint16: unexpected Nested attribute") } if l := len(a.Data); l != 2 { panic(fmt.Sprintf("Uint16: unexpected byte slice length: %d", l)) } return binary.BigEndian.Uint16(a.Data) } // PutUint16 sets the Attribute's data field to a Uint16 encoded in net byte order. func (a *Attribute) PutUint16(v uint16) { if len(a.Data) != 2 { a.Data = make([]byte, 2) } binary.BigEndian.PutUint16(a.Data, v) } // Uint32 interprets a non-nested Netfilter attribute in network byte order as a uint32. func (a Attribute) Uint32() uint32 { if a.Nested { panic("Uint32: unexpected Nested attribute") } if l := len(a.Data); l != 4 { panic(fmt.Sprintf("Uint32: unexpected byte slice length: %d", l)) } return binary.BigEndian.Uint32(a.Data) } // PutUint32 sets the Attribute's data field to a Uint32 encoded in net byte order. func (a *Attribute) PutUint32(v uint32) { if len(a.Data) != 4 { a.Data = make([]byte, 4) } binary.BigEndian.PutUint32(a.Data, v) } // Int32 converts the result of Uint16() to an int32. func (a Attribute) Int32() int32 { return int32(a.Uint32()) } // Uint64 interprets a non-nested Netfilter attribute in network byte order as a uint64. func (a Attribute) Uint64() uint64 { if a.Nested { panic("Uint64: unexpected Nested attribute") } if l := len(a.Data); l != 8 { panic(fmt.Sprintf("Uint64: unexpected byte slice length: %d", l)) } return binary.BigEndian.Uint64(a.Data) } // PutUint64 sets the Attribute's data field to a Uint64 encoded in net byte order. func (a *Attribute) PutUint64(v uint64) { if len(a.Data) != 8 { a.Data = make([]byte, 8) } binary.BigEndian.PutUint64(a.Data, v) } // Int64 converts the result of Uint16() to an int64. func (a Attribute) Int64() int64 { return int64(a.Uint64()) } // Uint16Bytes gets the big-endian 2-byte representation of a uint16. func Uint16Bytes(u uint16) []byte { d := make([]byte, 2) binary.BigEndian.PutUint16(d, u) return d } // Uint32Bytes gets the big-endian 4-byte representation of a uint32. func Uint32Bytes(u uint32) []byte { d := make([]byte, 4) binary.BigEndian.PutUint32(d, u) return d } // Uint64Bytes gets the big-endian 8-byte representation of a uint64. func Uint64Bytes(u uint64) []byte { d := make([]byte, 8) binary.BigEndian.PutUint64(d, u) return d } // unmarshalAttributes returns an array of netfilter.Attributes decoded from // a byte array. This byte array should be taken from the netlink.Message's // Data payload after the nfHeaderLen offset. func unmarshalAttributes(b []byte) ([]Attribute, error) { // Obtain a list of parsed netlink attributes possibly holding // nested Netfilter attributes in their binary Data field. attrs, err := netlink.UnmarshalAttributes(b) if err != nil { return nil, errors.Wrap(err, errWrapNetlinkUnmarshalAttrs) } var ra []Attribute // Only allocate backing array when there are netlink attributes to decode. if len(attrs) != 0 { ra = make([]Attribute, 0, len(attrs)) } // Wrap all netlink.Attributes into netfilter.Attributes to support nesting for _, nla := range attrs { // Copy the netlink attribute's fields into the netfilter attribute. nfa := Attribute{ // Only consider the rightmost 14 bits for Type Type: nla.Type & ^(uint16(unix.NLA_F_NESTED) | uint16(unix.NLA_F_NET_BYTEORDER)), Data: nla.Data, } // Boolean flags extracted from the two leftmost bits of Type nfa.Nested = (nla.Type & uint16(unix.NLA_F_NESTED)) != 0 nfa.NetByteOrder = (nla.Type & uint16(unix.NLA_F_NET_BYTEORDER)) != 0 if nfa.NetByteOrder && nfa.Nested { return nil, errInvalidAttributeFlags } // Unmarshal recursively if the netlink Nested flag is set if nfa.Nested { if nfa.Children, err = unmarshalAttributes(nla.Data); err != nil { return nil, err } } ra = append(ra, nfa) } return ra, nil } // marshalAttributes marshals a nested attribute structure into a byte slice. // This byte slice can then be copied into a netlink.Message's Data field after // the nfHeaderLen offset. func marshalAttributes(attrs []Attribute) ([]byte, error) { // netlink.Attribute to use as scratch buffer, requires a single allocation nla := netlink.Attribute{} // Output array, initialized to the length of the input array ra := make([]netlink.Attribute, 0, len(attrs)) for _, nfa := range attrs { if nfa.NetByteOrder && nfa.Nested { return nil, errInvalidAttributeFlags } // Save nested or byte order flags back to the netlink.Attribute's // Type field to include it in the marshaling operation nla.Type = nfa.Type switch { case nfa.Nested: nla.Type = nla.Type | unix.NLA_F_NESTED case nfa.NetByteOrder: nla.Type = nla.Type | unix.NLA_F_NET_BYTEORDER } // Recursively marshal the attribute's children if nfa.Nested { nfnab, err := marshalAttributes(nfa.Children) if err != nil { return nil, err } nla.Data = nfnab } else { nla.Data = nfa.Data } ra = append(ra, nla) } // Marshal all Netfilter attributes into binary representation of Netlink attributes return netlink.MarshalAttributes(ra) }
attribute.go
0.686895
0.41745
attribute.go
starcoder
// Print a binary tree in an m*n 2D string array following these rules: // 1. The row number m should be equal to the height of the given binary tree. // 2. The column number n should always be an odd number. // 3. The root node's value (in string format) should be put in the exactly middle of the first row it can be put. The column and the row where the root node belongs will separate the rest space into two parts (left-bottom part and right-bottom part). You should print the left subtree in the left-bottom part and print the right subtree in the right-bottom part. The left-bottom part and the right-bottom part should have the same size. Even if one subtree is none while the other is not, you don't need to print anything for the none subtree but still need to leave the space as large as that for the other subtree. However, if two subtrees are none, then you don't need to leave space for both of them. // 4. Each unused space should contain an empty string "". // 5. Print the subtrees following the same rules. // Example 1: // Input: // 1 // / // 2 // Output: // [["", "1", ""], // ["2", "", ""]] // Example 2: // Input: // 1 // / \ // 2 3 // \ // 4 // Output: // [["", "", "", "1", "", "", ""], // ["", "2", "", "", "", "3", ""], // ["", "", "4", "", "", "", ""]] // Example 3: // Input: // 1 // / \ // 2 5 // / // 3 // / // 4 // Output: // [["", "", "", "", "", "", "", "1", "", "", "", "", "", "", ""] // ["", "", "", "2", "", "", "", "", "", "", "", "5", "", "", ""] // ["", "3", "", "", "", "", "", "", "", "", "", "", "", "", ""] // ["4", "", "", "", "", "", "", "", "", "", "", "", "", "", ""]] // Note: The height of binary tree is in the range of [1, 10]. package leetcode import "strconv" // TreeNode is a node of a binary tree. type TreeNode struct { Val int Left *TreeNode Right *TreeNode } func height(root *TreeNode) int { if root == nil { return 0 } max := height(root.Left) if h := height(root.Right); h > max { max = h } return max + 1 } func printTree(root *TreeNode) [][]string { m := height(root) n := 1<<uint(m) - 1 array := make([][]string, m) for i := range array { array[i] = make([]string, n) } fill(array, 0, n/2, 1<<uint(m-2), root) return array } func fill(array [][]string, i, j, d int, root *TreeNode) { if root == nil { return } array[i][j] = strconv.Itoa(root.Val) fill(array, i+1, j-d, d/2, root.Left) fill(array, i+1, j+d, d/2, root.Right) }
0655/code.go
0.715325
0.674583
code.go
starcoder
package any // Ok determines whether Value is not the zero Value. // This is useful to check before using the underlying value. // Consider using Default over Ok. func (v Value) Ok() bool { return v.i != nil } // BoolOk returns the value as a bool type and a bool whether the Value is of type bool. func (v Value) BoolOk() (bool, bool) { b, ok := v.i.(bool) return b, ok } // IntOk returns the value as a int type and a bool whether the Value is of type int. func (v Value) IntOk() (int, bool) { i, ok := v.i.(int) return i, ok } // Int8Ok returns the value as a int8 type and a bool whether the Value is of type int8. func (v Value) Int8Ok() (int8, bool) { i, ok := v.i.(int8) return i, ok } // Int16Ok returns the value as a int16 type and a bool whether the Value is of type int16. func (v Value) Int16Ok() (int16, bool) { i, ok := v.i.(int16) return i, ok } // Int32Ok returns the value as a int32 type and a bool whether the Value is of type int32. func (v Value) Int32Ok() (int32, bool) { i, ok := v.i.(int32) return i, ok } // Int64Ok returns the value as a int64 type and a bool whether the Value is of type int64. func (v Value) Int64Ok() (int64, bool) { i, ok := v.i.(int64) return i, ok } // UintOk returns the value as a uint type and a bool whether the Value is of type uint. func (v Value) UintOk() (uint, bool) { i, ok := v.i.(uint) return i, ok } // Uint8Ok returns the value as a uint8 type and a bool whether the Value is of type uint8. func (v Value) Uint8Ok() (uint8, bool) { i, ok := v.i.(uint8) return i, ok } // Uint16Ok returns the value as a uint16 type and a bool whether the Value is of type uint16. func (v Value) Uint16Ok() (uint16, bool) { i, ok := v.i.(uint16) return i, ok } // Uint32Ok returns the value as a uint32 type and a bool whether the Value is of type uint32. func (v Value) Uint32Ok() (uint32, bool) { i, ok := v.i.(uint32) return i, ok } // Uint64Ok returns the value as a uint64 type and a bool whether the Value is of type uint64. func (v Value) Uint64Ok() (uint64, bool) { i, ok := v.i.(uint64) return i, ok } // UintptrOk returns the value as a uintptr type and a bool whether the Value is of type uintptr. func (v Value) UintptrOk() (uintptr, bool) { i, ok := v.i.(uintptr) return i, ok } // Float32Ok returns the value as a float32 type and a bool whether the Value is of type float32. func (v Value) Float32Ok() (float32, bool) { f, ok := v.i.(float32) return f, ok } // Float64Ok returns the value as a float64 type and a bool whether the Value is of type float64. func (v Value) Float64Ok() (float64, bool) { f, ok := v.i.(float64) return f, ok } // Complex64Ok returns the value as a complex64 type and a bool whether the Value is of type complex64. func (v Value) Complex64Ok() (complex64, bool) { c, ok := v.i.(complex64) return c, ok } // Complex128Ok returns the value as a complex128 type and a bool whether the Value is of type complex128. func (v Value) Complex128Ok() (complex128, bool) { c, ok := v.i.(complex128) return c, ok } // ByteOk returns the value as a byte type and a bool whether the Value is of type byte. func (v Value) ByteOk() (byte, bool) { b, ok := v.i.(byte) return b, ok } // BytesOk returns the value as a []byte type and a bool whether the Value is of type []byte. func (v Value) BytesOk() ([]byte, bool) { b, ok := v.i.([]byte) return b, ok } // RuneOk returns the value as a rune type and a bool whether the Value is of type rune. func (v Value) RuneOk() (rune, bool) { r, ok := v.i.(rune) return r, ok } // StringOk returns the value as a string type and a bool whether the Value is of type string. func (v Value) StringOk() (string, bool) { s, ok := v.i.(string) return s, ok } // InterfaceOk provides the underlying value as an empty interface and a bool of Ok. func (v Value) InterfaceOk() (interface{}, bool) { return v.i, v.Ok() }
ok.go
0.79653
0.57687
ok.go
starcoder
package constellation import ( "fmt" "io" "strings" "github.com/awalterschulze/gographviz" ) // GenerateGraph - function to take a dagconfigService structure and create a graph object that contains the // representation of the graph. Also outputs a string representation (GraphViz dot notation) of the resulting graph // this can be passed on to GraphViz to graphically render the resulting graph func (readGraph *Config) GenerateGraph(out io.Writer) (string, error) { // Lookup is used to map IDs to names. Names are easier to visualise but IDs are more important to ensure the // presented constellation is correct and IDs are used to link nodes together lookup := make(map[string]string) g := gographviz.NewGraph() // Replace spaces with underscores, names with spaces can break graphviz engines if err := g.SetName(strings.Replace(readGraph.Name, " ", "_", -1)); err != nil { return "", err } if err := g.AddAttr(g.Name, "rankdir", "LR"); err != nil { return "", err } // Make the graph directed (a constellation is DAG) if err := g.SetDir(true); err != nil { return "", err } // Add all nodes to the graph storing the lookup from ID to name (for later adding relationships) // Replace spaces in names with underscores, names with spaces can break graphviz engines) for _, v := range readGraph.Services { fmt.Fprintf(out, "Adding node %s\n", v.ID) newName := strings.Replace(v.ID, " ", "_", -1) if strings.Compare(newName, v.ID) != 0 { fmt.Fprintf(out, "Changing %s to %s\n", v.ID, newName) } lookup[v.ID] = newName err := g.AddNode(readGraph.Name, "\""+newName+"\"", nil) if err != nil { return "", err } } // Add relationships to the graph linking using the lookup IDs to name map // Replace spaces in names with underscores, names with spaces can break graphviz engines) for _, v := range readGraph.Relationships { fmt.Fprintf(out, "Adding relationship from %s ---> %s\n", v.From, v.To) localFrom := "\"" + lookup[v.From] + "\"" localTo := "\"" + lookup[v.To] + "\"" err := g.AddEdge(localFrom, localTo, true, nil) if err != nil { return "", err } } // Produce resulting graph in dot notation format return g.String(), nil }
internal/platform/constellation/graph.go
0.698741
0.436202
graph.go
starcoder
package indicators import ( "container/list" "errors" "github.com/thetruetrade/gotrade" ) // An Average Directional Index Rating (Adxr), no storage type AdxrWithoutStorage struct { *baseIndicatorWithFloatBounds // private variables periodCounter int periodHistory *list.List adx *AdxWithoutStorage timePeriod int } // NewAdxrWithoutStorage creates an Average Directional Index Rating (Adxr) without storage func NewAdxrWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *AdxrWithoutStorage, err error) { // an indicator without storage MUST have a value available action if valueAvailableAction == nil { return nil, ErrValueAvailableActionIsNil } // the minimum timeperiod for an Adxr indicator is 2 if timePeriod < 2 { return nil, errors.New("timePeriod is less than the minimum (2)") } // check the maximum timeperiod if timePeriod > MaximumLookbackPeriod { return nil, errors.New("timePeriod is greater than the maximum (100000)") } ind := AdxrWithoutStorage{ periodCounter: 0, periodHistory: list.New(), timePeriod: timePeriod, } ind.adx, err = NewAdxWithoutStorage(timePeriod, func(dataItem float64, streamBarIndex int) { ind.periodHistory.PushBack(dataItem) if ind.periodCounter > ind.GetLookbackPeriod() { adxN := ind.periodHistory.Front().Value.(float64) result := (dataItem + adxN) / 2.0 ind.UpdateIndicatorWithNewValue(result, streamBarIndex) } if ind.periodHistory.Len() >= timePeriod { first := ind.periodHistory.Front() ind.periodHistory.Remove(first) } }) var lookback int = 3 if timePeriod > 1 { lookback = timePeriod - 1 + ind.adx.GetLookbackPeriod() } ind.baseIndicatorWithFloatBounds = newBaseIndicatorWithFloatBounds(lookback, valueAvailableAction) return &ind, nil } // A Directional Movement Indicator Rating (Adxr) type Adxr struct { *AdxrWithoutStorage // public variables Data []float64 } // NewAdxr creates an Average Directional Index Rating (Adxr) for online usage func NewAdxr(timePeriod int) (indicator *Adxr, err error) { ind := Adxr{} ind.AdxrWithoutStorage, err = NewAdxrWithoutStorage(timePeriod, func(dataItem float64, streamBarIndex int) { ind.Data = append(ind.Data, dataItem) }) return &ind, err } // NewDefaultAdxr creates an Average Directional Index Rating (Adxr) for online usage with default parameters // - timePeriod: 14 func NewDefaultAdxr() (indicator *Adxr, err error) { timePeriod := 14 return NewAdxr(timePeriod) } // NewAdxrWithSrcLen creates an Average Directional Index Rating (Adxr) for offline usage func NewAdxrWithSrcLen(sourceLength uint, timePeriod int) (indicator *Adxr, err error) { ind, err := NewAdxr(timePeriod) // only initialise the storage if there is enough source data to require it if sourceLength-uint(ind.GetLookbackPeriod()) > 1 { ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod())) } return ind, err } // NewDefaultAdxrWithSrcLen creates an Average Directional Index Rating (Adxr) for offline usage with default parameters func NewDefaultAdxrWithSrcLen(sourceLength uint) (indicator *Adxr, err error) { ind, err := NewDefaultAdxr() // only initialise the storage if there is enough source data to require it if sourceLength-uint(ind.GetLookbackPeriod()) > 1 { ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod())) } return ind, err } // NewAdxrForStream creates an Average Directional Rating Index (Adxr) for online usage with a source data stream func NewAdxrForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *Adxr, err error) { ind, err := NewAdxr(timePeriod) priceStream.AddTickSubscription(ind) return ind, err } // NewDefaultAdxrForStream creates an Average Directional Index Rating (Adxr) for online usage with a source data stream func NewDefaultAdxrForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Adxr, err error) { ind, err := NewDefaultAdxr() priceStream.AddTickSubscription(ind) return ind, err } // NewAdxrForStreamWithSrcLen creates an Average Directional Index Rating (Adxr) for offline usage with a source data stream func NewAdxrForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *Adxr, err error) { ind, err := NewAdxrWithSrcLen(sourceLength, timePeriod) priceStream.AddTickSubscription(ind) return ind, err } // NewDefaultAdxrForStreamWithSrcLen creates an Average Directional Index Rating (Adxr) for offline usage with a source data stream func NewDefaultAdxrForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Adxr, err error) { ind, err := NewDefaultAdxrWithSrcLen(sourceLength) priceStream.AddTickSubscription(ind) return ind, err } // ReceiveDOHLCVTick consumes a source data DOHLCV price tick func (ind *AdxrWithoutStorage) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) { ind.periodCounter += 1 ind.adx.ReceiveDOHLCVTick(tickData, streamBarIndex) }
indicators/adxr.go
0.700383
0.435541
adxr.go
starcoder
package web import "fmt" // Table holds the HTML table of pricing information for a resource. type Table struct { Index int Type string Header [2]string GeneralRows [][2]string PricingInfo [][8]string Total [3]string } // PricingTypeTables holds the HTML tables of hourly, monthly and yearly pricing information for a resource. type PricingTypeTables struct { Hourly Table Monthly Table Yearly Table } // AddComputeInstanceGeneralInfo fills the table with general information about the resource change. func (t *Table) AddComputeInstanceGeneralInfo(name, ID, action, machineType, zone, cpuType, memType string) { t.Header = [2]string{"Name", name} t.GeneralRows = [][2]string{ {"ID", ID}, {"Action", action}, {"Machine Type", machineType}, {"Zone", zone}, {"CPU Type", cpuType}, {"RAM Type", memType}, } } // AddComputeInstancePricing fills the table with the pricing information section for all billing components. func (t *Table) AddComputeInstancePricing(priceUnit string, cpuCostPerUnit1, cpuCostPerUnit2 float64, cpuUnits1, cpuUnits2 int, memCostPerUnit1, memCostPerUnit2, memUnits1, memUnits2 float64) { cpuTot1 := cpuCostPerUnit1 * float64(cpuUnits1) cpuTot2 := cpuCostPerUnit2 * float64(cpuUnits2) memTot1 := memCostPerUnit1 * memUnits1 memTot2 := memCostPerUnit2 * memUnits2 dCPU := cpuTot2 - cpuTot1 dMem := memTot2 - memTot1 f1 := func(x float64) string { return fmt.Sprintf("%.6f USD/%s", x, priceUnit) } f2 := func(x float64) string { return fmt.Sprintf("%.2f", x) } f3 := func(x int) string { return fmt.Sprintf("%d", x) } t.PricingInfo = [][8]string{ {"CPU", f1(cpuCostPerUnit1), f3(cpuUnits1), f1(cpuTot1), f1(cpuCostPerUnit2), f3(cpuUnits2), f1(cpuTot2), f1(dCPU)}, {"RAM", f1(memCostPerUnit1), f2(memUnits1), f1(memTot1), f1(memCostPerUnit2), f2(memUnits2), f1(memTot2), f1(dMem)}, } t.Total = [3]string{f1(cpuTot1 + memTot1), f1(cpuTot2 + memTot2), f1(dCPU + dMem)} } // AddComputeDiskGeneralInfo fills the table with general information about the resource change. func (t *Table) AddComputeDiskGeneralInfo(name, id, action, diskType, zones, image, snapshot string) { t.Header = [2]string{"Name", name} t.GeneralRows = [][2]string{ {"ID", id}, {"Action", action}, {"Disk Type", diskType}, {"Zones", zones}, {"Image", image}, {"Snapshot", snapshot}, } } // AddComputeDiskPricing fills the table with the pricing information section for all billing components. func (t *Table) AddComputeDiskPricing(priceUnit string, costPerUnit1, costPerUnit2 float64, units1, units2 int64, delta float64) { f1 := func(x float64) string { return fmt.Sprintf("%.6f USD/%s", x, priceUnit) } f2 := func(x int64) string { return fmt.Sprintf("%d", x) } tot1 := costPerUnit1 * float64(units1) tot2 := costPerUnit2 * float64(units2) t.PricingInfo = [][8]string{ {"Disk", f1(costPerUnit1), f2(units1), f1(tot1), f1(costPerUnit2), f2(units2), f1(tot2), f1(delta)}, } t.Total = [3]string{f1(tot1), f1(tot2), f1(delta)} }
io/web/table.go
0.575707
0.488222
table.go
starcoder
package sqlparser // CloneSQLNode creates a deep clone of the input. func CloneSQLNode(in SQLNode) SQLNode { if in == nil { return nil } switch in := in.(type) { case AccessMode: return in case *AddColumns: return CloneRefOfAddColumns(in) case *AddConstraintDefinition: return CloneRefOfAddConstraintDefinition(in) case *AddIndexDefinition: return CloneRefOfAddIndexDefinition(in) case AlgorithmValue: return in case *AliasedExpr: return CloneRefOfAliasedExpr(in) case *AliasedTableExpr: return CloneRefOfAliasedTableExpr(in) case *AlterCharset: return CloneRefOfAlterCharset(in) case *AlterCheck: return CloneRefOfAlterCheck(in) case *AlterColumn: return CloneRefOfAlterColumn(in) case *AlterDatabase: return CloneRefOfAlterDatabase(in) case *AlterMigration: return CloneRefOfAlterMigration(in) case *AlterTable: return CloneRefOfAlterTable(in) case *AlterView: return CloneRefOfAlterView(in) case *AlterVschema: return CloneRefOfAlterVschema(in) case *AndExpr: return CloneRefOfAndExpr(in) case Argument: return in case *AutoIncSpec: return CloneRefOfAutoIncSpec(in) case *Begin: return CloneRefOfBegin(in) case *BetweenExpr: return CloneRefOfBetweenExpr(in) case *BinaryExpr: return CloneRefOfBinaryExpr(in) case BoolVal: return in case *CallProc: return CloneRefOfCallProc(in) case *CaseExpr: return CloneRefOfCaseExpr(in) case *ChangeColumn: return CloneRefOfChangeColumn(in) case *CheckConstraintDefinition: return CloneRefOfCheckConstraintDefinition(in) case ColIdent: return CloneColIdent(in) case *ColName: return CloneRefOfColName(in) case *CollateExpr: return CloneRefOfCollateExpr(in) case *ColumnDefinition: return CloneRefOfColumnDefinition(in) case *ColumnType: return CloneRefOfColumnType(in) case Columns: return CloneColumns(in) case *Commit: return CloneRefOfCommit(in) case *CommonTableExpr: return CloneRefOfCommonTableExpr(in) case *ComparisonExpr: return CloneRefOfComparisonExpr(in) case *ConstraintDefinition: return CloneRefOfConstraintDefinition(in) case *ConvertExpr: return CloneRefOfConvertExpr(in) case *ConvertType: return CloneRefOfConvertType(in) case *ConvertUsingExpr: return CloneRefOfConvertUsingExpr(in) case *CreateDatabase: return CloneRefOfCreateDatabase(in) case *CreateTable: return CloneRefOfCreateTable(in) case *CreateView: return CloneRefOfCreateView(in) case *CurTimeFuncExpr: return CloneRefOfCurTimeFuncExpr(in) case *DeallocateStmt: return CloneRefOfDeallocateStmt(in) case *Default: return CloneRefOfDefault(in) case *Definer: return CloneRefOfDefiner(in) case *Delete: return CloneRefOfDelete(in) case *DerivedTable: return CloneRefOfDerivedTable(in) case *DropColumn: return CloneRefOfDropColumn(in) case *DropDatabase: return CloneRefOfDropDatabase(in) case *DropKey: return CloneRefOfDropKey(in) case *DropTable: return CloneRefOfDropTable(in) case *DropView: return CloneRefOfDropView(in) case *ExecuteStmt: return CloneRefOfExecuteStmt(in) case *ExistsExpr: return CloneRefOfExistsExpr(in) case *ExplainStmt: return CloneRefOfExplainStmt(in) case *ExplainTab: return CloneRefOfExplainTab(in) case Exprs: return CloneExprs(in) case *ExtractFuncExpr: return CloneRefOfExtractFuncExpr(in) case *ExtractedSubquery: return CloneRefOfExtractedSubquery(in) case *Flush: return CloneRefOfFlush(in) case *Force: return CloneRefOfForce(in) case *ForeignKeyDefinition: return CloneRefOfForeignKeyDefinition(in) case *FuncExpr: return CloneRefOfFuncExpr(in) case GroupBy: return CloneGroupBy(in) case *GroupConcatExpr: return CloneRefOfGroupConcatExpr(in) case *IndexDefinition: return CloneRefOfIndexDefinition(in) case *IndexHint: return CloneRefOfIndexHint(in) case IndexHints: return CloneIndexHints(in) case *IndexInfo: return CloneRefOfIndexInfo(in) case *Insert: return CloneRefOfInsert(in) case *IntervalExpr: return CloneRefOfIntervalExpr(in) case *IntroducerExpr: return CloneRefOfIntroducerExpr(in) case *IsExpr: return CloneRefOfIsExpr(in) case IsolationLevel: return in case *JSONArrayExpr: return CloneRefOfJSONArrayExpr(in) case *JSONAttributesExpr: return CloneRefOfJSONAttributesExpr(in) case *JSONContainsExpr: return CloneRefOfJSONContainsExpr(in) case *JSONContainsPathExpr: return CloneRefOfJSONContainsPathExpr(in) case *JSONExtractExpr: return CloneRefOfJSONExtractExpr(in) case *JSONKeysExpr: return CloneRefOfJSONKeysExpr(in) case *JSONObjectExpr: return CloneRefOfJSONObjectExpr(in) case JSONObjectParam: return CloneJSONObjectParam(in) case *JSONOverlapsExpr: return CloneRefOfJSONOverlapsExpr(in) case *JSONPrettyExpr: return CloneRefOfJSONPrettyExpr(in) case *JSONQuoteExpr: return CloneRefOfJSONQuoteExpr(in) case *JSONRemoveExpr: return CloneRefOfJSONRemoveExpr(in) case *JSONSchemaValidFuncExpr: return CloneRefOfJSONSchemaValidFuncExpr(in) case *JSONSchemaValidationReportFuncExpr: return CloneRefOfJSONSchemaValidationReportFuncExpr(in) case *JSONSearchExpr: return CloneRefOfJSONSearchExpr(in) case *JSONStorageFreeExpr: return CloneRefOfJSONStorageFreeExpr(in) case *JSONStorageSizeExpr: return CloneRefOfJSONStorageSizeExpr(in) case *JSONTableExpr: return CloneRefOfJSONTableExpr(in) case *JSONUnquoteExpr: return CloneRefOfJSONUnquoteExpr(in) case *JSONValueExpr: return CloneRefOfJSONValueExpr(in) case *JSONValueMergeExpr: return CloneRefOfJSONValueMergeExpr(in) case *JSONValueModifierExpr: return CloneRefOfJSONValueModifierExpr(in) case *JoinCondition: return CloneRefOfJoinCondition(in) case *JoinTableExpr: return CloneRefOfJoinTableExpr(in) case *JtColumnDefinition: return CloneRefOfJtColumnDefinition(in) case *JtOnResponse: return CloneRefOfJtOnResponse(in) case *KeyState: return CloneRefOfKeyState(in) case *Limit: return CloneRefOfLimit(in) case ListArg: return in case *Literal: return CloneRefOfLiteral(in) case *Load: return CloneRefOfLoad(in) case *LockOption: return CloneRefOfLockOption(in) case *LockTables: return CloneRefOfLockTables(in) case MatchAction: return in case *MatchExpr: return CloneRefOfMatchExpr(in) case *MemberOfExpr: return CloneRefOfMemberOfExpr(in) case *ModifyColumn: return CloneRefOfModifyColumn(in) case *Nextval: return CloneRefOfNextval(in) case *NotExpr: return CloneRefOfNotExpr(in) case *NullVal: return CloneRefOfNullVal(in) case Offset: return in case OnDup: return CloneOnDup(in) case *OptLike: return CloneRefOfOptLike(in) case *OrExpr: return CloneRefOfOrExpr(in) case *Order: return CloneRefOfOrder(in) case OrderBy: return CloneOrderBy(in) case *OrderByOption: return CloneRefOfOrderByOption(in) case *OtherAdmin: return CloneRefOfOtherAdmin(in) case *OtherRead: return CloneRefOfOtherRead(in) case *ParenTableExpr: return CloneRefOfParenTableExpr(in) case *ParsedComments: return CloneRefOfParsedComments(in) case *PartitionDefinition: return CloneRefOfPartitionDefinition(in) case *PartitionDefinitionOptions: return CloneRefOfPartitionDefinitionOptions(in) case *PartitionEngine: return CloneRefOfPartitionEngine(in) case *PartitionOption: return CloneRefOfPartitionOption(in) case *PartitionSpec: return CloneRefOfPartitionSpec(in) case *PartitionValueRange: return CloneRefOfPartitionValueRange(in) case Partitions: return ClonePartitions(in) case *PrepareStmt: return CloneRefOfPrepareStmt(in) case ReferenceAction: return in case *ReferenceDefinition: return CloneRefOfReferenceDefinition(in) case *Release: return CloneRefOfRelease(in) case *RenameIndex: return CloneRefOfRenameIndex(in) case *RenameTable: return CloneRefOfRenameTable(in) case *RenameTableName: return CloneRefOfRenameTableName(in) case *RevertMigration: return CloneRefOfRevertMigration(in) case *Rollback: return CloneRefOfRollback(in) case RootNode: return CloneRootNode(in) case *SRollback: return CloneRefOfSRollback(in) case *Savepoint: return CloneRefOfSavepoint(in) case *Select: return CloneRefOfSelect(in) case SelectExprs: return CloneSelectExprs(in) case *SelectInto: return CloneRefOfSelectInto(in) case *Set: return CloneRefOfSet(in) case *SetExpr: return CloneRefOfSetExpr(in) case SetExprs: return CloneSetExprs(in) case *SetTransaction: return CloneRefOfSetTransaction(in) case *Show: return CloneRefOfShow(in) case *ShowBasic: return CloneRefOfShowBasic(in) case *ShowCreate: return CloneRefOfShowCreate(in) case *ShowFilter: return CloneRefOfShowFilter(in) case *ShowMigrationLogs: return CloneRefOfShowMigrationLogs(in) case *ShowOther: return CloneRefOfShowOther(in) case *StarExpr: return CloneRefOfStarExpr(in) case *Stream: return CloneRefOfStream(in) case *SubPartition: return CloneRefOfSubPartition(in) case *Subquery: return CloneRefOfSubquery(in) case *SubstrExpr: return CloneRefOfSubstrExpr(in) case TableExprs: return CloneTableExprs(in) case TableIdent: return CloneTableIdent(in) case TableName: return CloneTableName(in) case TableNames: return CloneTableNames(in) case TableOptions: return CloneTableOptions(in) case *TableSpec: return CloneRefOfTableSpec(in) case *TablespaceOperation: return CloneRefOfTablespaceOperation(in) case *TimestampFuncExpr: return CloneRefOfTimestampFuncExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *TruncateTable: return CloneRefOfTruncateTable(in) case *UnaryExpr: return CloneRefOfUnaryExpr(in) case *Union: return CloneRefOfUnion(in) case *UnlockTables: return CloneRefOfUnlockTables(in) case *Update: return CloneRefOfUpdate(in) case *UpdateExpr: return CloneRefOfUpdateExpr(in) case UpdateExprs: return CloneUpdateExprs(in) case *Use: return CloneRefOfUse(in) case *VStream: return CloneRefOfVStream(in) case ValTuple: return CloneValTuple(in) case *Validation: return CloneRefOfValidation(in) case Values: return CloneValues(in) case *ValuesFuncExpr: return CloneRefOfValuesFuncExpr(in) case VindexParam: return CloneVindexParam(in) case *VindexSpec: return CloneRefOfVindexSpec(in) case *WeightStringFuncExpr: return CloneRefOfWeightStringFuncExpr(in) case *When: return CloneRefOfWhen(in) case *Where: return CloneRefOfWhere(in) case *With: return CloneRefOfWith(in) case *XorExpr: return CloneRefOfXorExpr(in) default: // this should never happen return nil } } // CloneRefOfAddColumns creates a deep clone of the input. func CloneRefOfAddColumns(n *AddColumns) *AddColumns { if n == nil { return nil } out := *n out.Columns = CloneSliceOfRefOfColumnDefinition(n.Columns) out.After = CloneRefOfColName(n.After) return &out } // CloneRefOfAddConstraintDefinition creates a deep clone of the input. func CloneRefOfAddConstraintDefinition(n *AddConstraintDefinition) *AddConstraintDefinition { if n == nil { return nil } out := *n out.ConstraintDefinition = CloneRefOfConstraintDefinition(n.ConstraintDefinition) return &out } // CloneRefOfAddIndexDefinition creates a deep clone of the input. func CloneRefOfAddIndexDefinition(n *AddIndexDefinition) *AddIndexDefinition { if n == nil { return nil } out := *n out.IndexDefinition = CloneRefOfIndexDefinition(n.IndexDefinition) return &out } // CloneRefOfAliasedExpr creates a deep clone of the input. func CloneRefOfAliasedExpr(n *AliasedExpr) *AliasedExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) out.As = CloneColIdent(n.As) return &out } // CloneRefOfAliasedTableExpr creates a deep clone of the input. func CloneRefOfAliasedTableExpr(n *AliasedTableExpr) *AliasedTableExpr { if n == nil { return nil } out := *n out.Expr = CloneSimpleTableExpr(n.Expr) out.Partitions = ClonePartitions(n.Partitions) out.As = CloneTableIdent(n.As) out.Hints = CloneIndexHints(n.Hints) out.Columns = CloneColumns(n.Columns) return &out } // CloneRefOfAlterCharset creates a deep clone of the input. func CloneRefOfAlterCharset(n *AlterCharset) *AlterCharset { if n == nil { return nil } out := *n return &out } // CloneRefOfAlterCheck creates a deep clone of the input. func CloneRefOfAlterCheck(n *AlterCheck) *AlterCheck { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) return &out } // CloneRefOfAlterColumn creates a deep clone of the input. func CloneRefOfAlterColumn(n *AlterColumn) *AlterColumn { if n == nil { return nil } out := *n out.Column = CloneRefOfColName(n.Column) out.DefaultVal = CloneExpr(n.DefaultVal) return &out } // CloneRefOfAlterDatabase creates a deep clone of the input. func CloneRefOfAlterDatabase(n *AlterDatabase) *AlterDatabase { if n == nil { return nil } out := *n out.DBName = CloneTableIdent(n.DBName) out.AlterOptions = CloneSliceOfDatabaseOption(n.AlterOptions) return &out } // CloneRefOfAlterMigration creates a deep clone of the input. func CloneRefOfAlterMigration(n *AlterMigration) *AlterMigration { if n == nil { return nil } out := *n return &out } // CloneRefOfAlterTable creates a deep clone of the input. func CloneRefOfAlterTable(n *AlterTable) *AlterTable { if n == nil { return nil } out := *n out.Table = CloneTableName(n.Table) out.AlterOptions = CloneSliceOfAlterOption(n.AlterOptions) out.PartitionSpec = CloneRefOfPartitionSpec(n.PartitionSpec) out.PartitionOption = CloneRefOfPartitionOption(n.PartitionOption) out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfAlterView creates a deep clone of the input. func CloneRefOfAlterView(n *AlterView) *AlterView { if n == nil { return nil } out := *n out.ViewName = CloneTableName(n.ViewName) out.Definer = CloneRefOfDefiner(n.Definer) out.Columns = CloneColumns(n.Columns) out.Select = CloneSelectStatement(n.Select) out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfAlterVschema creates a deep clone of the input. func CloneRefOfAlterVschema(n *AlterVschema) *AlterVschema { if n == nil { return nil } out := *n out.Table = CloneTableName(n.Table) out.VindexSpec = CloneRefOfVindexSpec(n.VindexSpec) out.VindexCols = CloneSliceOfColIdent(n.VindexCols) out.AutoIncSpec = CloneRefOfAutoIncSpec(n.AutoIncSpec) return &out } // CloneRefOfAndExpr creates a deep clone of the input. func CloneRefOfAndExpr(n *AndExpr) *AndExpr { if n == nil { return nil } out := *n out.Left = CloneExpr(n.Left) out.Right = CloneExpr(n.Right) return &out } // CloneRefOfAutoIncSpec creates a deep clone of the input. func CloneRefOfAutoIncSpec(n *AutoIncSpec) *AutoIncSpec { if n == nil { return nil } out := *n out.Column = CloneColIdent(n.Column) out.Sequence = CloneTableName(n.Sequence) return &out } // CloneRefOfBegin creates a deep clone of the input. func CloneRefOfBegin(n *Begin) *Begin { if n == nil { return nil } out := *n return &out } // CloneRefOfBetweenExpr creates a deep clone of the input. func CloneRefOfBetweenExpr(n *BetweenExpr) *BetweenExpr { if n == nil { return nil } out := *n out.Left = CloneExpr(n.Left) out.From = CloneExpr(n.From) out.To = CloneExpr(n.To) return &out } // CloneRefOfBinaryExpr creates a deep clone of the input. func CloneRefOfBinaryExpr(n *BinaryExpr) *BinaryExpr { if n == nil { return nil } out := *n out.Left = CloneExpr(n.Left) out.Right = CloneExpr(n.Right) return &out } // CloneRefOfCallProc creates a deep clone of the input. func CloneRefOfCallProc(n *CallProc) *CallProc { if n == nil { return nil } out := *n out.Name = CloneTableName(n.Name) out.Params = CloneExprs(n.Params) return &out } // CloneRefOfCaseExpr creates a deep clone of the input. func CloneRefOfCaseExpr(n *CaseExpr) *CaseExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) out.Whens = CloneSliceOfRefOfWhen(n.Whens) out.Else = CloneExpr(n.Else) return &out } // CloneRefOfChangeColumn creates a deep clone of the input. func CloneRefOfChangeColumn(n *ChangeColumn) *ChangeColumn { if n == nil { return nil } out := *n out.OldColumn = CloneRefOfColName(n.OldColumn) out.NewColDefinition = CloneRefOfColumnDefinition(n.NewColDefinition) out.After = CloneRefOfColName(n.After) return &out } // CloneRefOfCheckConstraintDefinition creates a deep clone of the input. func CloneRefOfCheckConstraintDefinition(n *CheckConstraintDefinition) *CheckConstraintDefinition { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneColIdent creates a deep clone of the input. func CloneColIdent(n ColIdent) ColIdent { return *CloneRefOfColIdent(&n) } // CloneRefOfColName creates a deep clone of the input. func CloneRefOfColName(n *ColName) *ColName { return n } // CloneRefOfCollateExpr creates a deep clone of the input. func CloneRefOfCollateExpr(n *CollateExpr) *CollateExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfColumnDefinition creates a deep clone of the input. func CloneRefOfColumnDefinition(n *ColumnDefinition) *ColumnDefinition { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Type = CloneColumnType(n.Type) return &out } // CloneRefOfColumnType creates a deep clone of the input. func CloneRefOfColumnType(n *ColumnType) *ColumnType { if n == nil { return nil } out := *n out.Options = CloneRefOfColumnTypeOptions(n.Options) out.Length = CloneRefOfLiteral(n.Length) out.Scale = CloneRefOfLiteral(n.Scale) out.EnumValues = CloneSliceOfString(n.EnumValues) return &out } // CloneColumns creates a deep clone of the input. func CloneColumns(n Columns) Columns { if n == nil { return nil } res := make(Columns, 0, len(n)) for _, x := range n { res = append(res, CloneColIdent(x)) } return res } // CloneRefOfCommit creates a deep clone of the input. func CloneRefOfCommit(n *Commit) *Commit { if n == nil { return nil } out := *n return &out } // CloneRefOfCommonTableExpr creates a deep clone of the input. func CloneRefOfCommonTableExpr(n *CommonTableExpr) *CommonTableExpr { if n == nil { return nil } out := *n out.TableID = CloneTableIdent(n.TableID) out.Columns = CloneColumns(n.Columns) out.Subquery = CloneRefOfSubquery(n.Subquery) return &out } // CloneRefOfComparisonExpr creates a deep clone of the input. func CloneRefOfComparisonExpr(n *ComparisonExpr) *ComparisonExpr { if n == nil { return nil } out := *n out.Left = CloneExpr(n.Left) out.Right = CloneExpr(n.Right) out.Escape = CloneExpr(n.Escape) return &out } // CloneRefOfConstraintDefinition creates a deep clone of the input. func CloneRefOfConstraintDefinition(n *ConstraintDefinition) *ConstraintDefinition { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Details = CloneConstraintInfo(n.Details) return &out } // CloneRefOfConvertExpr creates a deep clone of the input. func CloneRefOfConvertExpr(n *ConvertExpr) *ConvertExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) out.Type = CloneRefOfConvertType(n.Type) return &out } // CloneRefOfConvertType creates a deep clone of the input. func CloneRefOfConvertType(n *ConvertType) *ConvertType { if n == nil { return nil } out := *n out.Length = CloneRefOfLiteral(n.Length) out.Scale = CloneRefOfLiteral(n.Scale) return &out } // CloneRefOfConvertUsingExpr creates a deep clone of the input. func CloneRefOfConvertUsingExpr(n *ConvertUsingExpr) *ConvertUsingExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfCreateDatabase creates a deep clone of the input. func CloneRefOfCreateDatabase(n *CreateDatabase) *CreateDatabase { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) out.DBName = CloneTableIdent(n.DBName) out.CreateOptions = CloneSliceOfDatabaseOption(n.CreateOptions) return &out } // CloneRefOfCreateTable creates a deep clone of the input. func CloneRefOfCreateTable(n *CreateTable) *CreateTable { if n == nil { return nil } out := *n out.Table = CloneTableName(n.Table) out.TableSpec = CloneRefOfTableSpec(n.TableSpec) out.OptLike = CloneRefOfOptLike(n.OptLike) out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfCreateView creates a deep clone of the input. func CloneRefOfCreateView(n *CreateView) *CreateView { if n == nil { return nil } out := *n out.ViewName = CloneTableName(n.ViewName) out.Definer = CloneRefOfDefiner(n.Definer) out.Columns = CloneColumns(n.Columns) out.Select = CloneSelectStatement(n.Select) out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfCurTimeFuncExpr creates a deep clone of the input. func CloneRefOfCurTimeFuncExpr(n *CurTimeFuncExpr) *CurTimeFuncExpr { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Fsp = CloneExpr(n.Fsp) return &out } // CloneRefOfDeallocateStmt creates a deep clone of the input. func CloneRefOfDeallocateStmt(n *DeallocateStmt) *DeallocateStmt { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) out.Name = CloneColIdent(n.Name) return &out } // CloneRefOfDefault creates a deep clone of the input. func CloneRefOfDefault(n *Default) *Default { if n == nil { return nil } out := *n return &out } // CloneRefOfDefiner creates a deep clone of the input. func CloneRefOfDefiner(n *Definer) *Definer { if n == nil { return nil } out := *n return &out } // CloneRefOfDelete creates a deep clone of the input. func CloneRefOfDelete(n *Delete) *Delete { if n == nil { return nil } out := *n out.With = CloneRefOfWith(n.With) out.Comments = CloneRefOfParsedComments(n.Comments) out.Targets = CloneTableNames(n.Targets) out.TableExprs = CloneTableExprs(n.TableExprs) out.Partitions = ClonePartitions(n.Partitions) out.Where = CloneRefOfWhere(n.Where) out.OrderBy = CloneOrderBy(n.OrderBy) out.Limit = CloneRefOfLimit(n.Limit) return &out } // CloneRefOfDerivedTable creates a deep clone of the input. func CloneRefOfDerivedTable(n *DerivedTable) *DerivedTable { if n == nil { return nil } out := *n out.Select = CloneSelectStatement(n.Select) return &out } // CloneRefOfDropColumn creates a deep clone of the input. func CloneRefOfDropColumn(n *DropColumn) *DropColumn { if n == nil { return nil } out := *n out.Name = CloneRefOfColName(n.Name) return &out } // CloneRefOfDropDatabase creates a deep clone of the input. func CloneRefOfDropDatabase(n *DropDatabase) *DropDatabase { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) out.DBName = CloneTableIdent(n.DBName) return &out } // CloneRefOfDropKey creates a deep clone of the input. func CloneRefOfDropKey(n *DropKey) *DropKey { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) return &out } // CloneRefOfDropTable creates a deep clone of the input. func CloneRefOfDropTable(n *DropTable) *DropTable { if n == nil { return nil } out := *n out.FromTables = CloneTableNames(n.FromTables) out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfDropView creates a deep clone of the input. func CloneRefOfDropView(n *DropView) *DropView { if n == nil { return nil } out := *n out.FromTables = CloneTableNames(n.FromTables) out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfExecuteStmt creates a deep clone of the input. func CloneRefOfExecuteStmt(n *ExecuteStmt) *ExecuteStmt { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Comments = CloneRefOfParsedComments(n.Comments) out.Arguments = CloneColumns(n.Arguments) return &out } // CloneRefOfExistsExpr creates a deep clone of the input. func CloneRefOfExistsExpr(n *ExistsExpr) *ExistsExpr { if n == nil { return nil } out := *n out.Subquery = CloneRefOfSubquery(n.Subquery) return &out } // CloneRefOfExplainStmt creates a deep clone of the input. func CloneRefOfExplainStmt(n *ExplainStmt) *ExplainStmt { if n == nil { return nil } out := *n out.Statement = CloneStatement(n.Statement) return &out } // CloneRefOfExplainTab creates a deep clone of the input. func CloneRefOfExplainTab(n *ExplainTab) *ExplainTab { if n == nil { return nil } out := *n out.Table = CloneTableName(n.Table) return &out } // CloneExprs creates a deep clone of the input. func CloneExprs(n Exprs) Exprs { if n == nil { return nil } res := make(Exprs, 0, len(n)) for _, x := range n { res = append(res, CloneExpr(x)) } return res } // CloneRefOfExtractFuncExpr creates a deep clone of the input. func CloneRefOfExtractFuncExpr(n *ExtractFuncExpr) *ExtractFuncExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfExtractedSubquery creates a deep clone of the input. func CloneRefOfExtractedSubquery(n *ExtractedSubquery) *ExtractedSubquery { if n == nil { return nil } out := *n out.Original = CloneExpr(n.Original) out.Subquery = CloneRefOfSubquery(n.Subquery) out.OtherSide = CloneExpr(n.OtherSide) out.alternative = CloneExpr(n.alternative) return &out } // CloneRefOfFlush creates a deep clone of the input. func CloneRefOfFlush(n *Flush) *Flush { if n == nil { return nil } out := *n out.FlushOptions = CloneSliceOfString(n.FlushOptions) out.TableNames = CloneTableNames(n.TableNames) return &out } // CloneRefOfForce creates a deep clone of the input. func CloneRefOfForce(n *Force) *Force { if n == nil { return nil } out := *n return &out } // CloneRefOfForeignKeyDefinition creates a deep clone of the input. func CloneRefOfForeignKeyDefinition(n *ForeignKeyDefinition) *ForeignKeyDefinition { if n == nil { return nil } out := *n out.Source = CloneColumns(n.Source) out.IndexName = CloneColIdent(n.IndexName) out.ReferenceDefinition = CloneRefOfReferenceDefinition(n.ReferenceDefinition) return &out } // CloneRefOfFuncExpr creates a deep clone of the input. func CloneRefOfFuncExpr(n *FuncExpr) *FuncExpr { if n == nil { return nil } out := *n out.Qualifier = CloneTableIdent(n.Qualifier) out.Name = CloneColIdent(n.Name) out.Exprs = CloneSelectExprs(n.Exprs) return &out } // CloneGroupBy creates a deep clone of the input. func CloneGroupBy(n GroupBy) GroupBy { if n == nil { return nil } res := make(GroupBy, 0, len(n)) for _, x := range n { res = append(res, CloneExpr(x)) } return res } // CloneRefOfGroupConcatExpr creates a deep clone of the input. func CloneRefOfGroupConcatExpr(n *GroupConcatExpr) *GroupConcatExpr { if n == nil { return nil } out := *n out.Exprs = CloneSelectExprs(n.Exprs) out.OrderBy = CloneOrderBy(n.OrderBy) out.Limit = CloneRefOfLimit(n.Limit) return &out } // CloneRefOfIndexDefinition creates a deep clone of the input. func CloneRefOfIndexDefinition(n *IndexDefinition) *IndexDefinition { if n == nil { return nil } out := *n out.Info = CloneRefOfIndexInfo(n.Info) out.Columns = CloneSliceOfRefOfIndexColumn(n.Columns) out.Options = CloneSliceOfRefOfIndexOption(n.Options) return &out } // CloneRefOfIndexHint creates a deep clone of the input. func CloneRefOfIndexHint(n *IndexHint) *IndexHint { if n == nil { return nil } out := *n out.Indexes = CloneSliceOfColIdent(n.Indexes) return &out } // CloneIndexHints creates a deep clone of the input. func CloneIndexHints(n IndexHints) IndexHints { if n == nil { return nil } res := make(IndexHints, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfIndexHint(x)) } return res } // CloneRefOfIndexInfo creates a deep clone of the input. func CloneRefOfIndexInfo(n *IndexInfo) *IndexInfo { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.ConstraintName = CloneColIdent(n.ConstraintName) return &out } // CloneRefOfInsert creates a deep clone of the input. func CloneRefOfInsert(n *Insert) *Insert { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) out.Table = CloneTableName(n.Table) out.Partitions = ClonePartitions(n.Partitions) out.Columns = CloneColumns(n.Columns) out.Rows = CloneInsertRows(n.Rows) out.OnDup = CloneOnDup(n.OnDup) return &out } // CloneRefOfIntervalExpr creates a deep clone of the input. func CloneRefOfIntervalExpr(n *IntervalExpr) *IntervalExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfIntroducerExpr creates a deep clone of the input. func CloneRefOfIntroducerExpr(n *IntroducerExpr) *IntroducerExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfIsExpr creates a deep clone of the input. func CloneRefOfIsExpr(n *IsExpr) *IsExpr { if n == nil { return nil } out := *n out.Left = CloneExpr(n.Left) return &out } // CloneRefOfJSONArrayExpr creates a deep clone of the input. func CloneRefOfJSONArrayExpr(n *JSONArrayExpr) *JSONArrayExpr { if n == nil { return nil } out := *n out.Params = CloneExprs(n.Params) return &out } // CloneRefOfJSONAttributesExpr creates a deep clone of the input. func CloneRefOfJSONAttributesExpr(n *JSONAttributesExpr) *JSONAttributesExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.Path = CloneJSONPathParam(n.Path) return &out } // CloneRefOfJSONContainsExpr creates a deep clone of the input. func CloneRefOfJSONContainsExpr(n *JSONContainsExpr) *JSONContainsExpr { if n == nil { return nil } out := *n out.Target = CloneExpr(n.Target) out.Candidate = CloneExpr(n.Candidate) out.PathList = CloneSliceOfJSONPathParam(n.PathList) return &out } // CloneRefOfJSONContainsPathExpr creates a deep clone of the input. func CloneRefOfJSONContainsPathExpr(n *JSONContainsPathExpr) *JSONContainsPathExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.OneOrAll = CloneExpr(n.OneOrAll) out.PathList = CloneSliceOfJSONPathParam(n.PathList) return &out } // CloneRefOfJSONExtractExpr creates a deep clone of the input. func CloneRefOfJSONExtractExpr(n *JSONExtractExpr) *JSONExtractExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.PathList = CloneSliceOfJSONPathParam(n.PathList) return &out } // CloneRefOfJSONKeysExpr creates a deep clone of the input. func CloneRefOfJSONKeysExpr(n *JSONKeysExpr) *JSONKeysExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.PathList = CloneSliceOfJSONPathParam(n.PathList) return &out } // CloneRefOfJSONObjectExpr creates a deep clone of the input. func CloneRefOfJSONObjectExpr(n *JSONObjectExpr) *JSONObjectExpr { if n == nil { return nil } out := *n out.Params = CloneSliceOfRefOfJSONObjectParam(n.Params) return &out } // CloneJSONObjectParam creates a deep clone of the input. func CloneJSONObjectParam(n JSONObjectParam) JSONObjectParam { return *CloneRefOfJSONObjectParam(&n) } // CloneRefOfJSONOverlapsExpr creates a deep clone of the input. func CloneRefOfJSONOverlapsExpr(n *JSONOverlapsExpr) *JSONOverlapsExpr { if n == nil { return nil } out := *n out.JSONDoc1 = CloneExpr(n.JSONDoc1) out.JSONDoc2 = CloneExpr(n.JSONDoc2) return &out } // CloneRefOfJSONPrettyExpr creates a deep clone of the input. func CloneRefOfJSONPrettyExpr(n *JSONPrettyExpr) *JSONPrettyExpr { if n == nil { return nil } out := *n out.JSONVal = CloneExpr(n.JSONVal) return &out } // CloneRefOfJSONQuoteExpr creates a deep clone of the input. func CloneRefOfJSONQuoteExpr(n *JSONQuoteExpr) *JSONQuoteExpr { if n == nil { return nil } out := *n out.StringArg = CloneExpr(n.StringArg) return &out } // CloneRefOfJSONRemoveExpr creates a deep clone of the input. func CloneRefOfJSONRemoveExpr(n *JSONRemoveExpr) *JSONRemoveExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.PathList = CloneExprs(n.PathList) return &out } // CloneRefOfJSONSchemaValidFuncExpr creates a deep clone of the input. func CloneRefOfJSONSchemaValidFuncExpr(n *JSONSchemaValidFuncExpr) *JSONSchemaValidFuncExpr { if n == nil { return nil } out := *n out.Schema = CloneExpr(n.Schema) out.Document = CloneExpr(n.Document) return &out } // CloneRefOfJSONSchemaValidationReportFuncExpr creates a deep clone of the input. func CloneRefOfJSONSchemaValidationReportFuncExpr(n *JSONSchemaValidationReportFuncExpr) *JSONSchemaValidationReportFuncExpr { if n == nil { return nil } out := *n out.Schema = CloneExpr(n.Schema) out.Document = CloneExpr(n.Document) return &out } // CloneRefOfJSONSearchExpr creates a deep clone of the input. func CloneRefOfJSONSearchExpr(n *JSONSearchExpr) *JSONSearchExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.OneOrAll = CloneExpr(n.OneOrAll) out.SearchStr = CloneExpr(n.SearchStr) out.EscapeChar = CloneExpr(n.EscapeChar) out.PathList = CloneSliceOfJSONPathParam(n.PathList) return &out } // CloneRefOfJSONStorageFreeExpr creates a deep clone of the input. func CloneRefOfJSONStorageFreeExpr(n *JSONStorageFreeExpr) *JSONStorageFreeExpr { if n == nil { return nil } out := *n out.JSONVal = CloneExpr(n.JSONVal) return &out } // CloneRefOfJSONStorageSizeExpr creates a deep clone of the input. func CloneRefOfJSONStorageSizeExpr(n *JSONStorageSizeExpr) *JSONStorageSizeExpr { if n == nil { return nil } out := *n out.JSONVal = CloneExpr(n.JSONVal) return &out } // CloneRefOfJSONTableExpr creates a deep clone of the input. func CloneRefOfJSONTableExpr(n *JSONTableExpr) *JSONTableExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) out.Alias = CloneTableIdent(n.Alias) out.Filter = CloneExpr(n.Filter) out.Columns = CloneSliceOfRefOfJtColumnDefinition(n.Columns) return &out } // CloneRefOfJSONUnquoteExpr creates a deep clone of the input. func CloneRefOfJSONUnquoteExpr(n *JSONUnquoteExpr) *JSONUnquoteExpr { if n == nil { return nil } out := *n out.JSONValue = CloneExpr(n.JSONValue) return &out } // CloneRefOfJSONValueExpr creates a deep clone of the input. func CloneRefOfJSONValueExpr(n *JSONValueExpr) *JSONValueExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.Path = CloneJSONPathParam(n.Path) out.ReturningType = CloneRefOfConvertType(n.ReturningType) out.EmptyOnResponse = CloneRefOfJtOnResponse(n.EmptyOnResponse) out.ErrorOnResponse = CloneRefOfJtOnResponse(n.ErrorOnResponse) return &out } // CloneRefOfJSONValueMergeExpr creates a deep clone of the input. func CloneRefOfJSONValueMergeExpr(n *JSONValueMergeExpr) *JSONValueMergeExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.JSONDocList = CloneExprs(n.JSONDocList) return &out } // CloneRefOfJSONValueModifierExpr creates a deep clone of the input. func CloneRefOfJSONValueModifierExpr(n *JSONValueModifierExpr) *JSONValueModifierExpr { if n == nil { return nil } out := *n out.JSONDoc = CloneExpr(n.JSONDoc) out.Params = CloneSliceOfRefOfJSONObjectParam(n.Params) return &out } // CloneRefOfJoinCondition creates a deep clone of the input. func CloneRefOfJoinCondition(n *JoinCondition) *JoinCondition { if n == nil { return nil } out := *n out.On = CloneExpr(n.On) out.Using = CloneColumns(n.Using) return &out } // CloneRefOfJoinTableExpr creates a deep clone of the input. func CloneRefOfJoinTableExpr(n *JoinTableExpr) *JoinTableExpr { if n == nil { return nil } out := *n out.LeftExpr = CloneTableExpr(n.LeftExpr) out.RightExpr = CloneTableExpr(n.RightExpr) out.Condition = CloneRefOfJoinCondition(n.Condition) return &out } // CloneRefOfJtColumnDefinition creates a deep clone of the input. func CloneRefOfJtColumnDefinition(n *JtColumnDefinition) *JtColumnDefinition { if n == nil { return nil } out := *n out.JtOrdinal = CloneRefOfJtOrdinalColDef(n.JtOrdinal) out.JtPath = CloneRefOfJtPathColDef(n.JtPath) out.JtNestedPath = CloneRefOfJtNestedPathColDef(n.JtNestedPath) return &out } // CloneRefOfJtOnResponse creates a deep clone of the input. func CloneRefOfJtOnResponse(n *JtOnResponse) *JtOnResponse { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfKeyState creates a deep clone of the input. func CloneRefOfKeyState(n *KeyState) *KeyState { if n == nil { return nil } out := *n return &out } // CloneRefOfLimit creates a deep clone of the input. func CloneRefOfLimit(n *Limit) *Limit { if n == nil { return nil } out := *n out.Offset = CloneExpr(n.Offset) out.Rowcount = CloneExpr(n.Rowcount) return &out } // CloneRefOfLiteral creates a deep clone of the input. func CloneRefOfLiteral(n *Literal) *Literal { if n == nil { return nil } out := *n return &out } // CloneRefOfLoad creates a deep clone of the input. func CloneRefOfLoad(n *Load) *Load { if n == nil { return nil } out := *n return &out } // CloneRefOfLockOption creates a deep clone of the input. func CloneRefOfLockOption(n *LockOption) *LockOption { if n == nil { return nil } out := *n return &out } // CloneRefOfLockTables creates a deep clone of the input. func CloneRefOfLockTables(n *LockTables) *LockTables { if n == nil { return nil } out := *n out.Tables = CloneTableAndLockTypes(n.Tables) return &out } // CloneRefOfMatchExpr creates a deep clone of the input. func CloneRefOfMatchExpr(n *MatchExpr) *MatchExpr { if n == nil { return nil } out := *n out.Columns = CloneSelectExprs(n.Columns) out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfMemberOfExpr creates a deep clone of the input. func CloneRefOfMemberOfExpr(n *MemberOfExpr) *MemberOfExpr { if n == nil { return nil } out := *n out.Value = CloneExpr(n.Value) out.JSONArr = CloneExpr(n.JSONArr) return &out } // CloneRefOfModifyColumn creates a deep clone of the input. func CloneRefOfModifyColumn(n *ModifyColumn) *ModifyColumn { if n == nil { return nil } out := *n out.NewColDefinition = CloneRefOfColumnDefinition(n.NewColDefinition) out.After = CloneRefOfColName(n.After) return &out } // CloneRefOfNextval creates a deep clone of the input. func CloneRefOfNextval(n *Nextval) *Nextval { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfNotExpr creates a deep clone of the input. func CloneRefOfNotExpr(n *NotExpr) *NotExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfNullVal creates a deep clone of the input. func CloneRefOfNullVal(n *NullVal) *NullVal { if n == nil { return nil } out := *n return &out } // CloneOnDup creates a deep clone of the input. func CloneOnDup(n OnDup) OnDup { if n == nil { return nil } res := make(OnDup, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfUpdateExpr(x)) } return res } // CloneRefOfOptLike creates a deep clone of the input. func CloneRefOfOptLike(n *OptLike) *OptLike { if n == nil { return nil } out := *n out.LikeTable = CloneTableName(n.LikeTable) return &out } // CloneRefOfOrExpr creates a deep clone of the input. func CloneRefOfOrExpr(n *OrExpr) *OrExpr { if n == nil { return nil } out := *n out.Left = CloneExpr(n.Left) out.Right = CloneExpr(n.Right) return &out } // CloneRefOfOrder creates a deep clone of the input. func CloneRefOfOrder(n *Order) *Order { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneOrderBy creates a deep clone of the input. func CloneOrderBy(n OrderBy) OrderBy { if n == nil { return nil } res := make(OrderBy, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfOrder(x)) } return res } // CloneRefOfOrderByOption creates a deep clone of the input. func CloneRefOfOrderByOption(n *OrderByOption) *OrderByOption { if n == nil { return nil } out := *n out.Cols = CloneColumns(n.Cols) return &out } // CloneRefOfOtherAdmin creates a deep clone of the input. func CloneRefOfOtherAdmin(n *OtherAdmin) *OtherAdmin { if n == nil { return nil } out := *n return &out } // CloneRefOfOtherRead creates a deep clone of the input. func CloneRefOfOtherRead(n *OtherRead) *OtherRead { if n == nil { return nil } out := *n return &out } // CloneRefOfParenTableExpr creates a deep clone of the input. func CloneRefOfParenTableExpr(n *ParenTableExpr) *ParenTableExpr { if n == nil { return nil } out := *n out.Exprs = CloneTableExprs(n.Exprs) return &out } // CloneRefOfParsedComments creates a deep clone of the input. func CloneRefOfParsedComments(n *ParsedComments) *ParsedComments { if n == nil { return nil } out := *n out.comments = CloneComments(n.comments) return &out } // CloneRefOfPartitionDefinition creates a deep clone of the input. func CloneRefOfPartitionDefinition(n *PartitionDefinition) *PartitionDefinition { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Options = CloneRefOfPartitionDefinitionOptions(n.Options) return &out } // CloneRefOfPartitionDefinitionOptions creates a deep clone of the input. func CloneRefOfPartitionDefinitionOptions(n *PartitionDefinitionOptions) *PartitionDefinitionOptions { if n == nil { return nil } out := *n out.ValueRange = CloneRefOfPartitionValueRange(n.ValueRange) out.Comment = CloneRefOfLiteral(n.Comment) out.Engine = CloneRefOfPartitionEngine(n.Engine) out.DataDirectory = CloneRefOfLiteral(n.DataDirectory) out.IndexDirectory = CloneRefOfLiteral(n.IndexDirectory) out.MaxRows = CloneRefOfInt(n.MaxRows) out.MinRows = CloneRefOfInt(n.MinRows) return &out } // CloneRefOfPartitionEngine creates a deep clone of the input. func CloneRefOfPartitionEngine(n *PartitionEngine) *PartitionEngine { if n == nil { return nil } out := *n return &out } // CloneRefOfPartitionOption creates a deep clone of the input. func CloneRefOfPartitionOption(n *PartitionOption) *PartitionOption { if n == nil { return nil } out := *n out.ColList = CloneColumns(n.ColList) out.Expr = CloneExpr(n.Expr) out.SubPartition = CloneRefOfSubPartition(n.SubPartition) out.Definitions = CloneSliceOfRefOfPartitionDefinition(n.Definitions) return &out } // CloneRefOfPartitionSpec creates a deep clone of the input. func CloneRefOfPartitionSpec(n *PartitionSpec) *PartitionSpec { if n == nil { return nil } out := *n out.Names = ClonePartitions(n.Names) out.Number = CloneRefOfLiteral(n.Number) out.TableName = CloneTableName(n.TableName) out.Definitions = CloneSliceOfRefOfPartitionDefinition(n.Definitions) return &out } // CloneRefOfPartitionValueRange creates a deep clone of the input. func CloneRefOfPartitionValueRange(n *PartitionValueRange) *PartitionValueRange { if n == nil { return nil } out := *n out.Range = CloneValTuple(n.Range) return &out } // ClonePartitions creates a deep clone of the input. func ClonePartitions(n Partitions) Partitions { if n == nil { return nil } res := make(Partitions, 0, len(n)) for _, x := range n { res = append(res, CloneColIdent(x)) } return res } // CloneRefOfPrepareStmt creates a deep clone of the input. func CloneRefOfPrepareStmt(n *PrepareStmt) *PrepareStmt { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Statement = CloneExpr(n.Statement) out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfReferenceDefinition creates a deep clone of the input. func CloneRefOfReferenceDefinition(n *ReferenceDefinition) *ReferenceDefinition { if n == nil { return nil } out := *n out.ReferencedTable = CloneTableName(n.ReferencedTable) out.ReferencedColumns = CloneColumns(n.ReferencedColumns) return &out } // CloneRefOfRelease creates a deep clone of the input. func CloneRefOfRelease(n *Release) *Release { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) return &out } // CloneRefOfRenameIndex creates a deep clone of the input. func CloneRefOfRenameIndex(n *RenameIndex) *RenameIndex { if n == nil { return nil } out := *n out.OldName = CloneColIdent(n.OldName) out.NewName = CloneColIdent(n.NewName) return &out } // CloneRefOfRenameTable creates a deep clone of the input. func CloneRefOfRenameTable(n *RenameTable) *RenameTable { if n == nil { return nil } out := *n out.TablePairs = CloneSliceOfRefOfRenameTablePair(n.TablePairs) return &out } // CloneRefOfRenameTableName creates a deep clone of the input. func CloneRefOfRenameTableName(n *RenameTableName) *RenameTableName { if n == nil { return nil } out := *n out.Table = CloneTableName(n.Table) return &out } // CloneRefOfRevertMigration creates a deep clone of the input. func CloneRefOfRevertMigration(n *RevertMigration) *RevertMigration { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfRollback creates a deep clone of the input. func CloneRefOfRollback(n *Rollback) *Rollback { if n == nil { return nil } out := *n return &out } // CloneRootNode creates a deep clone of the input. func CloneRootNode(n RootNode) RootNode { return *CloneRefOfRootNode(&n) } // CloneRefOfSRollback creates a deep clone of the input. func CloneRefOfSRollback(n *SRollback) *SRollback { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) return &out } // CloneRefOfSavepoint creates a deep clone of the input. func CloneRefOfSavepoint(n *Savepoint) *Savepoint { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) return &out } // CloneRefOfSelect creates a deep clone of the input. func CloneRefOfSelect(n *Select) *Select { if n == nil { return nil } out := *n out.Cache = CloneRefOfBool(n.Cache) out.From = CloneSliceOfTableExpr(n.From) out.Comments = CloneRefOfParsedComments(n.Comments) out.SelectExprs = CloneSelectExprs(n.SelectExprs) out.Where = CloneRefOfWhere(n.Where) out.With = CloneRefOfWith(n.With) out.GroupBy = CloneGroupBy(n.GroupBy) out.Having = CloneRefOfWhere(n.Having) out.OrderBy = CloneOrderBy(n.OrderBy) out.Limit = CloneRefOfLimit(n.Limit) out.Into = CloneRefOfSelectInto(n.Into) return &out } // CloneSelectExprs creates a deep clone of the input. func CloneSelectExprs(n SelectExprs) SelectExprs { if n == nil { return nil } res := make(SelectExprs, 0, len(n)) for _, x := range n { res = append(res, CloneSelectExpr(x)) } return res } // CloneRefOfSelectInto creates a deep clone of the input. func CloneRefOfSelectInto(n *SelectInto) *SelectInto { if n == nil { return nil } out := *n return &out } // CloneRefOfSet creates a deep clone of the input. func CloneRefOfSet(n *Set) *Set { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) out.Exprs = CloneSetExprs(n.Exprs) return &out } // CloneRefOfSetExpr creates a deep clone of the input. func CloneRefOfSetExpr(n *SetExpr) *SetExpr { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Expr = CloneExpr(n.Expr) return &out } // CloneSetExprs creates a deep clone of the input. func CloneSetExprs(n SetExprs) SetExprs { if n == nil { return nil } res := make(SetExprs, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfSetExpr(x)) } return res } // CloneRefOfSetTransaction creates a deep clone of the input. func CloneRefOfSetTransaction(n *SetTransaction) *SetTransaction { if n == nil { return nil } out := *n out.SQLNode = CloneSQLNode(n.SQLNode) out.Comments = CloneRefOfParsedComments(n.Comments) out.Characteristics = CloneSliceOfCharacteristic(n.Characteristics) return &out } // CloneRefOfShow creates a deep clone of the input. func CloneRefOfShow(n *Show) *Show { if n == nil { return nil } out := *n out.Internal = CloneShowInternal(n.Internal) return &out } // CloneRefOfShowBasic creates a deep clone of the input. func CloneRefOfShowBasic(n *ShowBasic) *ShowBasic { if n == nil { return nil } out := *n out.Tbl = CloneTableName(n.Tbl) out.DbName = CloneTableIdent(n.DbName) out.Filter = CloneRefOfShowFilter(n.Filter) return &out } // CloneRefOfShowCreate creates a deep clone of the input. func CloneRefOfShowCreate(n *ShowCreate) *ShowCreate { if n == nil { return nil } out := *n out.Op = CloneTableName(n.Op) return &out } // CloneRefOfShowFilter creates a deep clone of the input. func CloneRefOfShowFilter(n *ShowFilter) *ShowFilter { if n == nil { return nil } out := *n out.Filter = CloneExpr(n.Filter) return &out } // CloneRefOfShowMigrationLogs creates a deep clone of the input. func CloneRefOfShowMigrationLogs(n *ShowMigrationLogs) *ShowMigrationLogs { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) return &out } // CloneRefOfShowOther creates a deep clone of the input. func CloneRefOfShowOther(n *ShowOther) *ShowOther { if n == nil { return nil } out := *n return &out } // CloneRefOfStarExpr creates a deep clone of the input. func CloneRefOfStarExpr(n *StarExpr) *StarExpr { if n == nil { return nil } out := *n out.TableName = CloneTableName(n.TableName) return &out } // CloneRefOfStream creates a deep clone of the input. func CloneRefOfStream(n *Stream) *Stream { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) out.SelectExpr = CloneSelectExpr(n.SelectExpr) out.Table = CloneTableName(n.Table) return &out } // CloneRefOfSubPartition creates a deep clone of the input. func CloneRefOfSubPartition(n *SubPartition) *SubPartition { if n == nil { return nil } out := *n out.ColList = CloneColumns(n.ColList) out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfSubquery creates a deep clone of the input. func CloneRefOfSubquery(n *Subquery) *Subquery { if n == nil { return nil } out := *n out.Select = CloneSelectStatement(n.Select) return &out } // CloneRefOfSubstrExpr creates a deep clone of the input. func CloneRefOfSubstrExpr(n *SubstrExpr) *SubstrExpr { if n == nil { return nil } out := *n out.Name = CloneExpr(n.Name) out.From = CloneExpr(n.From) out.To = CloneExpr(n.To) return &out } // CloneTableExprs creates a deep clone of the input. func CloneTableExprs(n TableExprs) TableExprs { if n == nil { return nil } res := make(TableExprs, 0, len(n)) for _, x := range n { res = append(res, CloneTableExpr(x)) } return res } // CloneTableIdent creates a deep clone of the input. func CloneTableIdent(n TableIdent) TableIdent { return *CloneRefOfTableIdent(&n) } // CloneTableName creates a deep clone of the input. func CloneTableName(n TableName) TableName { return *CloneRefOfTableName(&n) } // CloneTableNames creates a deep clone of the input. func CloneTableNames(n TableNames) TableNames { if n == nil { return nil } res := make(TableNames, 0, len(n)) for _, x := range n { res = append(res, CloneTableName(x)) } return res } // CloneTableOptions creates a deep clone of the input. func CloneTableOptions(n TableOptions) TableOptions { if n == nil { return nil } res := make(TableOptions, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfTableOption(x)) } return res } // CloneRefOfTableSpec creates a deep clone of the input. func CloneRefOfTableSpec(n *TableSpec) *TableSpec { if n == nil { return nil } out := *n out.Columns = CloneSliceOfRefOfColumnDefinition(n.Columns) out.Indexes = CloneSliceOfRefOfIndexDefinition(n.Indexes) out.Constraints = CloneSliceOfRefOfConstraintDefinition(n.Constraints) out.Options = CloneTableOptions(n.Options) out.PartitionOption = CloneRefOfPartitionOption(n.PartitionOption) return &out } // CloneRefOfTablespaceOperation creates a deep clone of the input. func CloneRefOfTablespaceOperation(n *TablespaceOperation) *TablespaceOperation { if n == nil { return nil } out := *n return &out } // CloneRefOfTimestampFuncExpr creates a deep clone of the input. func CloneRefOfTimestampFuncExpr(n *TimestampFuncExpr) *TimestampFuncExpr { if n == nil { return nil } out := *n out.Expr1 = CloneExpr(n.Expr1) out.Expr2 = CloneExpr(n.Expr2) return &out } // CloneRefOfTrimFuncExpr creates a deep clone of the input. func CloneRefOfTrimFuncExpr(n *TrimFuncExpr) *TrimFuncExpr { if n == nil { return nil } out := *n out.TrimArg = CloneExpr(n.TrimArg) out.StringArg = CloneExpr(n.StringArg) return &out } // CloneRefOfTruncateTable creates a deep clone of the input. func CloneRefOfTruncateTable(n *TruncateTable) *TruncateTable { if n == nil { return nil } out := *n out.Table = CloneTableName(n.Table) return &out } // CloneRefOfUnaryExpr creates a deep clone of the input. func CloneRefOfUnaryExpr(n *UnaryExpr) *UnaryExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfUnion creates a deep clone of the input. func CloneRefOfUnion(n *Union) *Union { if n == nil { return nil } out := *n out.Left = CloneSelectStatement(n.Left) out.Right = CloneSelectStatement(n.Right) out.OrderBy = CloneOrderBy(n.OrderBy) out.With = CloneRefOfWith(n.With) out.Limit = CloneRefOfLimit(n.Limit) out.Into = CloneRefOfSelectInto(n.Into) return &out } // CloneRefOfUnlockTables creates a deep clone of the input. func CloneRefOfUnlockTables(n *UnlockTables) *UnlockTables { if n == nil { return nil } out := *n return &out } // CloneRefOfUpdate creates a deep clone of the input. func CloneRefOfUpdate(n *Update) *Update { if n == nil { return nil } out := *n out.With = CloneRefOfWith(n.With) out.Comments = CloneRefOfParsedComments(n.Comments) out.TableExprs = CloneTableExprs(n.TableExprs) out.Exprs = CloneUpdateExprs(n.Exprs) out.Where = CloneRefOfWhere(n.Where) out.OrderBy = CloneOrderBy(n.OrderBy) out.Limit = CloneRefOfLimit(n.Limit) return &out } // CloneRefOfUpdateExpr creates a deep clone of the input. func CloneRefOfUpdateExpr(n *UpdateExpr) *UpdateExpr { if n == nil { return nil } out := *n out.Name = CloneRefOfColName(n.Name) out.Expr = CloneExpr(n.Expr) return &out } // CloneUpdateExprs creates a deep clone of the input. func CloneUpdateExprs(n UpdateExprs) UpdateExprs { if n == nil { return nil } res := make(UpdateExprs, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfUpdateExpr(x)) } return res } // CloneRefOfUse creates a deep clone of the input. func CloneRefOfUse(n *Use) *Use { if n == nil { return nil } out := *n out.DBName = CloneTableIdent(n.DBName) return &out } // CloneRefOfVStream creates a deep clone of the input. func CloneRefOfVStream(n *VStream) *VStream { if n == nil { return nil } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) out.SelectExpr = CloneSelectExpr(n.SelectExpr) out.Table = CloneTableName(n.Table) out.Where = CloneRefOfWhere(n.Where) out.Limit = CloneRefOfLimit(n.Limit) return &out } // CloneValTuple creates a deep clone of the input. func CloneValTuple(n ValTuple) ValTuple { if n == nil { return nil } res := make(ValTuple, 0, len(n)) for _, x := range n { res = append(res, CloneExpr(x)) } return res } // CloneRefOfValidation creates a deep clone of the input. func CloneRefOfValidation(n *Validation) *Validation { if n == nil { return nil } out := *n return &out } // CloneValues creates a deep clone of the input. func CloneValues(n Values) Values { if n == nil { return nil } res := make(Values, 0, len(n)) for _, x := range n { res = append(res, CloneValTuple(x)) } return res } // CloneRefOfValuesFuncExpr creates a deep clone of the input. func CloneRefOfValuesFuncExpr(n *ValuesFuncExpr) *ValuesFuncExpr { if n == nil { return nil } out := *n out.Name = CloneRefOfColName(n.Name) return &out } // CloneVindexParam creates a deep clone of the input. func CloneVindexParam(n VindexParam) VindexParam { return *CloneRefOfVindexParam(&n) } // CloneRefOfVindexSpec creates a deep clone of the input. func CloneRefOfVindexSpec(n *VindexSpec) *VindexSpec { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Type = CloneColIdent(n.Type) out.Params = CloneSliceOfVindexParam(n.Params) return &out } // CloneRefOfWeightStringFuncExpr creates a deep clone of the input. func CloneRefOfWeightStringFuncExpr(n *WeightStringFuncExpr) *WeightStringFuncExpr { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) out.As = CloneRefOfConvertType(n.As) return &out } // CloneRefOfWhen creates a deep clone of the input. func CloneRefOfWhen(n *When) *When { if n == nil { return nil } out := *n out.Cond = CloneExpr(n.Cond) out.Val = CloneExpr(n.Val) return &out } // CloneRefOfWhere creates a deep clone of the input. func CloneRefOfWhere(n *Where) *Where { if n == nil { return nil } out := *n out.Expr = CloneExpr(n.Expr) return &out } // CloneRefOfWith creates a deep clone of the input. func CloneRefOfWith(n *With) *With { if n == nil { return nil } out := *n out.ctes = CloneSliceOfRefOfCommonTableExpr(n.ctes) return &out } // CloneRefOfXorExpr creates a deep clone of the input. func CloneRefOfXorExpr(n *XorExpr) *XorExpr { if n == nil { return nil } out := *n out.Left = CloneExpr(n.Left) out.Right = CloneExpr(n.Right) return &out } // CloneAlterOption creates a deep clone of the input. func CloneAlterOption(in AlterOption) AlterOption { if in == nil { return nil } switch in := in.(type) { case *AddColumns: return CloneRefOfAddColumns(in) case *AddConstraintDefinition: return CloneRefOfAddConstraintDefinition(in) case *AddIndexDefinition: return CloneRefOfAddIndexDefinition(in) case AlgorithmValue: return in case *AlterCharset: return CloneRefOfAlterCharset(in) case *AlterCheck: return CloneRefOfAlterCheck(in) case *AlterColumn: return CloneRefOfAlterColumn(in) case *ChangeColumn: return CloneRefOfChangeColumn(in) case *DropColumn: return CloneRefOfDropColumn(in) case *DropKey: return CloneRefOfDropKey(in) case *Force: return CloneRefOfForce(in) case *KeyState: return CloneRefOfKeyState(in) case *LockOption: return CloneRefOfLockOption(in) case *ModifyColumn: return CloneRefOfModifyColumn(in) case *OrderByOption: return CloneRefOfOrderByOption(in) case *RenameIndex: return CloneRefOfRenameIndex(in) case *RenameTableName: return CloneRefOfRenameTableName(in) case TableOptions: return CloneTableOptions(in) case *TablespaceOperation: return CloneRefOfTablespaceOperation(in) case *Validation: return CloneRefOfValidation(in) default: // this should never happen return nil } } // CloneCallable creates a deep clone of the input. func CloneCallable(in Callable) Callable { if in == nil { return nil } switch in := in.(type) { case *ConvertExpr: return CloneRefOfConvertExpr(in) case *ConvertUsingExpr: return CloneRefOfConvertUsingExpr(in) case *CurTimeFuncExpr: return CloneRefOfCurTimeFuncExpr(in) case *ExtractFuncExpr: return CloneRefOfExtractFuncExpr(in) case *FuncExpr: return CloneRefOfFuncExpr(in) case *GroupConcatExpr: return CloneRefOfGroupConcatExpr(in) case *JSONArrayExpr: return CloneRefOfJSONArrayExpr(in) case *JSONAttributesExpr: return CloneRefOfJSONAttributesExpr(in) case *JSONContainsExpr: return CloneRefOfJSONContainsExpr(in) case *JSONContainsPathExpr: return CloneRefOfJSONContainsPathExpr(in) case *JSONExtractExpr: return CloneRefOfJSONExtractExpr(in) case *JSONKeysExpr: return CloneRefOfJSONKeysExpr(in) case *JSONObjectExpr: return CloneRefOfJSONObjectExpr(in) case *JSONOverlapsExpr: return CloneRefOfJSONOverlapsExpr(in) case *JSONPrettyExpr: return CloneRefOfJSONPrettyExpr(in) case *JSONQuoteExpr: return CloneRefOfJSONQuoteExpr(in) case *JSONRemoveExpr: return CloneRefOfJSONRemoveExpr(in) case *JSONSchemaValidFuncExpr: return CloneRefOfJSONSchemaValidFuncExpr(in) case *JSONSchemaValidationReportFuncExpr: return CloneRefOfJSONSchemaValidationReportFuncExpr(in) case *JSONSearchExpr: return CloneRefOfJSONSearchExpr(in) case *JSONStorageFreeExpr: return CloneRefOfJSONStorageFreeExpr(in) case *JSONStorageSizeExpr: return CloneRefOfJSONStorageSizeExpr(in) case *JSONUnquoteExpr: return CloneRefOfJSONUnquoteExpr(in) case *JSONValueExpr: return CloneRefOfJSONValueExpr(in) case *JSONValueMergeExpr: return CloneRefOfJSONValueMergeExpr(in) case *JSONValueModifierExpr: return CloneRefOfJSONValueModifierExpr(in) case *MatchExpr: return CloneRefOfMatchExpr(in) case *MemberOfExpr: return CloneRefOfMemberOfExpr(in) case *SubstrExpr: return CloneRefOfSubstrExpr(in) case *TimestampFuncExpr: return CloneRefOfTimestampFuncExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *ValuesFuncExpr: return CloneRefOfValuesFuncExpr(in) case *WeightStringFuncExpr: return CloneRefOfWeightStringFuncExpr(in) default: // this should never happen return nil } } // CloneCharacteristic creates a deep clone of the input. func CloneCharacteristic(in Characteristic) Characteristic { if in == nil { return nil } switch in := in.(type) { case AccessMode: return in case IsolationLevel: return in default: // this should never happen return nil } } // CloneColTuple creates a deep clone of the input. func CloneColTuple(in ColTuple) ColTuple { if in == nil { return nil } switch in := in.(type) { case ListArg: return in case *Subquery: return CloneRefOfSubquery(in) case ValTuple: return CloneValTuple(in) default: // this should never happen return nil } } // CloneConstraintInfo creates a deep clone of the input. func CloneConstraintInfo(in ConstraintInfo) ConstraintInfo { if in == nil { return nil } switch in := in.(type) { case *CheckConstraintDefinition: return CloneRefOfCheckConstraintDefinition(in) case *ForeignKeyDefinition: return CloneRefOfForeignKeyDefinition(in) default: // this should never happen return nil } } // CloneDBDDLStatement creates a deep clone of the input. func CloneDBDDLStatement(in DBDDLStatement) DBDDLStatement { if in == nil { return nil } switch in := in.(type) { case *AlterDatabase: return CloneRefOfAlterDatabase(in) case *CreateDatabase: return CloneRefOfCreateDatabase(in) case *DropDatabase: return CloneRefOfDropDatabase(in) default: // this should never happen return nil } } // CloneDDLStatement creates a deep clone of the input. func CloneDDLStatement(in DDLStatement) DDLStatement { if in == nil { return nil } switch in := in.(type) { case *AlterTable: return CloneRefOfAlterTable(in) case *AlterView: return CloneRefOfAlterView(in) case *CreateTable: return CloneRefOfCreateTable(in) case *CreateView: return CloneRefOfCreateView(in) case *DropTable: return CloneRefOfDropTable(in) case *DropView: return CloneRefOfDropView(in) case *RenameTable: return CloneRefOfRenameTable(in) case *TruncateTable: return CloneRefOfTruncateTable(in) default: // this should never happen return nil } } // CloneExplain creates a deep clone of the input. func CloneExplain(in Explain) Explain { if in == nil { return nil } switch in := in.(type) { case *ExplainStmt: return CloneRefOfExplainStmt(in) case *ExplainTab: return CloneRefOfExplainTab(in) default: // this should never happen return nil } } // CloneExpr creates a deep clone of the input. func CloneExpr(in Expr) Expr { if in == nil { return nil } switch in := in.(type) { case *AndExpr: return CloneRefOfAndExpr(in) case Argument: return in case *BetweenExpr: return CloneRefOfBetweenExpr(in) case *BinaryExpr: return CloneRefOfBinaryExpr(in) case BoolVal: return in case *CaseExpr: return CloneRefOfCaseExpr(in) case *ColName: return CloneRefOfColName(in) case *CollateExpr: return CloneRefOfCollateExpr(in) case *ComparisonExpr: return CloneRefOfComparisonExpr(in) case *ConvertExpr: return CloneRefOfConvertExpr(in) case *ConvertUsingExpr: return CloneRefOfConvertUsingExpr(in) case *CurTimeFuncExpr: return CloneRefOfCurTimeFuncExpr(in) case *Default: return CloneRefOfDefault(in) case *ExistsExpr: return CloneRefOfExistsExpr(in) case *ExtractFuncExpr: return CloneRefOfExtractFuncExpr(in) case *ExtractedSubquery: return CloneRefOfExtractedSubquery(in) case *FuncExpr: return CloneRefOfFuncExpr(in) case *GroupConcatExpr: return CloneRefOfGroupConcatExpr(in) case *IntervalExpr: return CloneRefOfIntervalExpr(in) case *IntroducerExpr: return CloneRefOfIntroducerExpr(in) case *IsExpr: return CloneRefOfIsExpr(in) case *JSONArrayExpr: return CloneRefOfJSONArrayExpr(in) case *JSONAttributesExpr: return CloneRefOfJSONAttributesExpr(in) case *JSONContainsExpr: return CloneRefOfJSONContainsExpr(in) case *JSONContainsPathExpr: return CloneRefOfJSONContainsPathExpr(in) case *JSONExtractExpr: return CloneRefOfJSONExtractExpr(in) case *JSONKeysExpr: return CloneRefOfJSONKeysExpr(in) case *JSONObjectExpr: return CloneRefOfJSONObjectExpr(in) case *JSONOverlapsExpr: return CloneRefOfJSONOverlapsExpr(in) case *JSONPrettyExpr: return CloneRefOfJSONPrettyExpr(in) case *JSONQuoteExpr: return CloneRefOfJSONQuoteExpr(in) case *JSONRemoveExpr: return CloneRefOfJSONRemoveExpr(in) case *JSONSchemaValidFuncExpr: return CloneRefOfJSONSchemaValidFuncExpr(in) case *JSONSchemaValidationReportFuncExpr: return CloneRefOfJSONSchemaValidationReportFuncExpr(in) case *JSONSearchExpr: return CloneRefOfJSONSearchExpr(in) case *JSONStorageFreeExpr: return CloneRefOfJSONStorageFreeExpr(in) case *JSONStorageSizeExpr: return CloneRefOfJSONStorageSizeExpr(in) case *JSONUnquoteExpr: return CloneRefOfJSONUnquoteExpr(in) case *JSONValueExpr: return CloneRefOfJSONValueExpr(in) case *JSONValueMergeExpr: return CloneRefOfJSONValueMergeExpr(in) case *JSONValueModifierExpr: return CloneRefOfJSONValueModifierExpr(in) case ListArg: return in case *Literal: return CloneRefOfLiteral(in) case *MatchExpr: return CloneRefOfMatchExpr(in) case *MemberOfExpr: return CloneRefOfMemberOfExpr(in) case *NotExpr: return CloneRefOfNotExpr(in) case *NullVal: return CloneRefOfNullVal(in) case Offset: return in case *OrExpr: return CloneRefOfOrExpr(in) case *Subquery: return CloneRefOfSubquery(in) case *SubstrExpr: return CloneRefOfSubstrExpr(in) case *TimestampFuncExpr: return CloneRefOfTimestampFuncExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *UnaryExpr: return CloneRefOfUnaryExpr(in) case ValTuple: return CloneValTuple(in) case *ValuesFuncExpr: return CloneRefOfValuesFuncExpr(in) case *WeightStringFuncExpr: return CloneRefOfWeightStringFuncExpr(in) case *XorExpr: return CloneRefOfXorExpr(in) default: // this should never happen return nil } } // CloneInsertRows creates a deep clone of the input. func CloneInsertRows(in InsertRows) InsertRows { if in == nil { return nil } switch in := in.(type) { case *Select: return CloneRefOfSelect(in) case *Union: return CloneRefOfUnion(in) case Values: return CloneValues(in) default: // this should never happen return nil } } // CloneJSONPathParam creates a deep clone of the input. func CloneJSONPathParam(in JSONPathParam) JSONPathParam { if in == nil { return nil } switch in := in.(type) { case *AndExpr: return CloneRefOfAndExpr(in) case Argument: return in case *BetweenExpr: return CloneRefOfBetweenExpr(in) case *BinaryExpr: return CloneRefOfBinaryExpr(in) case BoolVal: return in case *CaseExpr: return CloneRefOfCaseExpr(in) case *ColName: return CloneRefOfColName(in) case *CollateExpr: return CloneRefOfCollateExpr(in) case *ComparisonExpr: return CloneRefOfComparisonExpr(in) case *ConvertExpr: return CloneRefOfConvertExpr(in) case *ConvertUsingExpr: return CloneRefOfConvertUsingExpr(in) case *CurTimeFuncExpr: return CloneRefOfCurTimeFuncExpr(in) case *Default: return CloneRefOfDefault(in) case *ExistsExpr: return CloneRefOfExistsExpr(in) case *ExtractFuncExpr: return CloneRefOfExtractFuncExpr(in) case *ExtractedSubquery: return CloneRefOfExtractedSubquery(in) case *FuncExpr: return CloneRefOfFuncExpr(in) case *GroupConcatExpr: return CloneRefOfGroupConcatExpr(in) case *IntervalExpr: return CloneRefOfIntervalExpr(in) case *IntroducerExpr: return CloneRefOfIntroducerExpr(in) case *IsExpr: return CloneRefOfIsExpr(in) case *JSONArrayExpr: return CloneRefOfJSONArrayExpr(in) case *JSONAttributesExpr: return CloneRefOfJSONAttributesExpr(in) case *JSONContainsExpr: return CloneRefOfJSONContainsExpr(in) case *JSONContainsPathExpr: return CloneRefOfJSONContainsPathExpr(in) case *JSONExtractExpr: return CloneRefOfJSONExtractExpr(in) case *JSONKeysExpr: return CloneRefOfJSONKeysExpr(in) case *JSONObjectExpr: return CloneRefOfJSONObjectExpr(in) case *JSONOverlapsExpr: return CloneRefOfJSONOverlapsExpr(in) case *JSONPrettyExpr: return CloneRefOfJSONPrettyExpr(in) case *JSONQuoteExpr: return CloneRefOfJSONQuoteExpr(in) case *JSONRemoveExpr: return CloneRefOfJSONRemoveExpr(in) case *JSONSchemaValidFuncExpr: return CloneRefOfJSONSchemaValidFuncExpr(in) case *JSONSchemaValidationReportFuncExpr: return CloneRefOfJSONSchemaValidationReportFuncExpr(in) case *JSONSearchExpr: return CloneRefOfJSONSearchExpr(in) case *JSONStorageFreeExpr: return CloneRefOfJSONStorageFreeExpr(in) case *JSONStorageSizeExpr: return CloneRefOfJSONStorageSizeExpr(in) case *JSONUnquoteExpr: return CloneRefOfJSONUnquoteExpr(in) case *JSONValueExpr: return CloneRefOfJSONValueExpr(in) case *JSONValueMergeExpr: return CloneRefOfJSONValueMergeExpr(in) case *JSONValueModifierExpr: return CloneRefOfJSONValueModifierExpr(in) case ListArg: return in case *Literal: return CloneRefOfLiteral(in) case *MatchExpr: return CloneRefOfMatchExpr(in) case *MemberOfExpr: return CloneRefOfMemberOfExpr(in) case *NotExpr: return CloneRefOfNotExpr(in) case *NullVal: return CloneRefOfNullVal(in) case Offset: return in case *OrExpr: return CloneRefOfOrExpr(in) case *Subquery: return CloneRefOfSubquery(in) case *SubstrExpr: return CloneRefOfSubstrExpr(in) case *TimestampFuncExpr: return CloneRefOfTimestampFuncExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *UnaryExpr: return CloneRefOfUnaryExpr(in) case ValTuple: return CloneValTuple(in) case *ValuesFuncExpr: return CloneRefOfValuesFuncExpr(in) case *WeightStringFuncExpr: return CloneRefOfWeightStringFuncExpr(in) case *XorExpr: return CloneRefOfXorExpr(in) default: // this should never happen return nil } } // CloneSelectExpr creates a deep clone of the input. func CloneSelectExpr(in SelectExpr) SelectExpr { if in == nil { return nil } switch in := in.(type) { case *AliasedExpr: return CloneRefOfAliasedExpr(in) case *Nextval: return CloneRefOfNextval(in) case *StarExpr: return CloneRefOfStarExpr(in) default: // this should never happen return nil } } // CloneSelectStatement creates a deep clone of the input. func CloneSelectStatement(in SelectStatement) SelectStatement { if in == nil { return nil } switch in := in.(type) { case *Select: return CloneRefOfSelect(in) case *Union: return CloneRefOfUnion(in) default: // this should never happen return nil } } // CloneShowInternal creates a deep clone of the input. func CloneShowInternal(in ShowInternal) ShowInternal { if in == nil { return nil } switch in := in.(type) { case *ShowBasic: return CloneRefOfShowBasic(in) case *ShowCreate: return CloneRefOfShowCreate(in) case *ShowOther: return CloneRefOfShowOther(in) default: // this should never happen return nil } } // CloneSimpleTableExpr creates a deep clone of the input. func CloneSimpleTableExpr(in SimpleTableExpr) SimpleTableExpr { if in == nil { return nil } switch in := in.(type) { case *DerivedTable: return CloneRefOfDerivedTable(in) case TableName: return CloneTableName(in) default: // this should never happen return nil } } // CloneStatement creates a deep clone of the input. func CloneStatement(in Statement) Statement { if in == nil { return nil } switch in := in.(type) { case *AlterDatabase: return CloneRefOfAlterDatabase(in) case *AlterMigration: return CloneRefOfAlterMigration(in) case *AlterTable: return CloneRefOfAlterTable(in) case *AlterView: return CloneRefOfAlterView(in) case *AlterVschema: return CloneRefOfAlterVschema(in) case *Begin: return CloneRefOfBegin(in) case *CallProc: return CloneRefOfCallProc(in) case *Commit: return CloneRefOfCommit(in) case *CreateDatabase: return CloneRefOfCreateDatabase(in) case *CreateTable: return CloneRefOfCreateTable(in) case *CreateView: return CloneRefOfCreateView(in) case *DeallocateStmt: return CloneRefOfDeallocateStmt(in) case *Delete: return CloneRefOfDelete(in) case *DropDatabase: return CloneRefOfDropDatabase(in) case *DropTable: return CloneRefOfDropTable(in) case *DropView: return CloneRefOfDropView(in) case *ExecuteStmt: return CloneRefOfExecuteStmt(in) case *ExplainStmt: return CloneRefOfExplainStmt(in) case *ExplainTab: return CloneRefOfExplainTab(in) case *Flush: return CloneRefOfFlush(in) case *Insert: return CloneRefOfInsert(in) case *Load: return CloneRefOfLoad(in) case *LockTables: return CloneRefOfLockTables(in) case *OtherAdmin: return CloneRefOfOtherAdmin(in) case *OtherRead: return CloneRefOfOtherRead(in) case *PrepareStmt: return CloneRefOfPrepareStmt(in) case *Release: return CloneRefOfRelease(in) case *RenameTable: return CloneRefOfRenameTable(in) case *RevertMigration: return CloneRefOfRevertMigration(in) case *Rollback: return CloneRefOfRollback(in) case *SRollback: return CloneRefOfSRollback(in) case *Savepoint: return CloneRefOfSavepoint(in) case *Select: return CloneRefOfSelect(in) case *Set: return CloneRefOfSet(in) case *SetTransaction: return CloneRefOfSetTransaction(in) case *Show: return CloneRefOfShow(in) case *ShowMigrationLogs: return CloneRefOfShowMigrationLogs(in) case *Stream: return CloneRefOfStream(in) case *TruncateTable: return CloneRefOfTruncateTable(in) case *Union: return CloneRefOfUnion(in) case *UnlockTables: return CloneRefOfUnlockTables(in) case *Update: return CloneRefOfUpdate(in) case *Use: return CloneRefOfUse(in) case *VStream: return CloneRefOfVStream(in) default: // this should never happen return nil } } // CloneTableExpr creates a deep clone of the input. func CloneTableExpr(in TableExpr) TableExpr { if in == nil { return nil } switch in := in.(type) { case *AliasedTableExpr: return CloneRefOfAliasedTableExpr(in) case *JSONTableExpr: return CloneRefOfJSONTableExpr(in) case *JoinTableExpr: return CloneRefOfJoinTableExpr(in) case *ParenTableExpr: return CloneRefOfParenTableExpr(in) default: // this should never happen return nil } } // CloneSliceOfRefOfColumnDefinition creates a deep clone of the input. func CloneSliceOfRefOfColumnDefinition(n []*ColumnDefinition) []*ColumnDefinition { if n == nil { return nil } res := make([]*ColumnDefinition, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfColumnDefinition(x)) } return res } // CloneSliceOfDatabaseOption creates a deep clone of the input. func CloneSliceOfDatabaseOption(n []DatabaseOption) []DatabaseOption { if n == nil { return nil } res := make([]DatabaseOption, 0, len(n)) for _, x := range n { res = append(res, CloneDatabaseOption(x)) } return res } // CloneSliceOfAlterOption creates a deep clone of the input. func CloneSliceOfAlterOption(n []AlterOption) []AlterOption { if n == nil { return nil } res := make([]AlterOption, 0, len(n)) for _, x := range n { res = append(res, CloneAlterOption(x)) } return res } // CloneSliceOfColIdent creates a deep clone of the input. func CloneSliceOfColIdent(n []ColIdent) []ColIdent { if n == nil { return nil } res := make([]ColIdent, 0, len(n)) for _, x := range n { res = append(res, CloneColIdent(x)) } return res } // CloneSliceOfRefOfWhen creates a deep clone of the input. func CloneSliceOfRefOfWhen(n []*When) []*When { if n == nil { return nil } res := make([]*When, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfWhen(x)) } return res } // CloneRefOfColIdent creates a deep clone of the input. func CloneRefOfColIdent(n *ColIdent) *ColIdent { if n == nil { return nil } out := *n return &out } // CloneColumnType creates a deep clone of the input. func CloneColumnType(n ColumnType) ColumnType { return *CloneRefOfColumnType(&n) } // CloneRefOfColumnTypeOptions creates a deep clone of the input. func CloneRefOfColumnTypeOptions(n *ColumnTypeOptions) *ColumnTypeOptions { if n == nil { return nil } out := *n out.Null = CloneRefOfBool(n.Null) out.Default = CloneExpr(n.Default) out.OnUpdate = CloneExpr(n.OnUpdate) out.As = CloneExpr(n.As) out.Comment = CloneRefOfLiteral(n.Comment) out.Reference = CloneRefOfReferenceDefinition(n.Reference) out.Invisible = CloneRefOfBool(n.Invisible) out.EngineAttribute = CloneRefOfLiteral(n.EngineAttribute) out.SecondaryEngineAttribute = CloneRefOfLiteral(n.SecondaryEngineAttribute) out.SRID = CloneRefOfLiteral(n.SRID) return &out } // CloneSliceOfString creates a deep clone of the input. func CloneSliceOfString(n []string) []string { if n == nil { return nil } res := make([]string, 0, len(n)) copy(res, n) return res } // CloneSliceOfRefOfIndexColumn creates a deep clone of the input. func CloneSliceOfRefOfIndexColumn(n []*IndexColumn) []*IndexColumn { if n == nil { return nil } res := make([]*IndexColumn, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfIndexColumn(x)) } return res } // CloneSliceOfRefOfIndexOption creates a deep clone of the input. func CloneSliceOfRefOfIndexOption(n []*IndexOption) []*IndexOption { if n == nil { return nil } res := make([]*IndexOption, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfIndexOption(x)) } return res } // CloneSliceOfJSONPathParam creates a deep clone of the input. func CloneSliceOfJSONPathParam(n []JSONPathParam) []JSONPathParam { if n == nil { return nil } res := make([]JSONPathParam, 0, len(n)) for _, x := range n { res = append(res, CloneJSONPathParam(x)) } return res } // CloneSliceOfRefOfJSONObjectParam creates a deep clone of the input. func CloneSliceOfRefOfJSONObjectParam(n []*JSONObjectParam) []*JSONObjectParam { if n == nil { return nil } res := make([]*JSONObjectParam, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfJSONObjectParam(x)) } return res } // CloneRefOfJSONObjectParam creates a deep clone of the input. func CloneRefOfJSONObjectParam(n *JSONObjectParam) *JSONObjectParam { if n == nil { return nil } out := *n out.Key = CloneExpr(n.Key) out.Value = CloneExpr(n.Value) return &out } // CloneSliceOfRefOfJtColumnDefinition creates a deep clone of the input. func CloneSliceOfRefOfJtColumnDefinition(n []*JtColumnDefinition) []*JtColumnDefinition { if n == nil { return nil } res := make([]*JtColumnDefinition, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfJtColumnDefinition(x)) } return res } // CloneRefOfJtOrdinalColDef creates a deep clone of the input. func CloneRefOfJtOrdinalColDef(n *JtOrdinalColDef) *JtOrdinalColDef { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) return &out } // CloneRefOfJtPathColDef creates a deep clone of the input. func CloneRefOfJtPathColDef(n *JtPathColDef) *JtPathColDef { if n == nil { return nil } out := *n out.Name = CloneColIdent(n.Name) out.Type = CloneColumnType(n.Type) out.Path = CloneExpr(n.Path) out.EmptyOnResponse = CloneRefOfJtOnResponse(n.EmptyOnResponse) out.ErrorOnResponse = CloneRefOfJtOnResponse(n.ErrorOnResponse) return &out } // CloneRefOfJtNestedPathColDef creates a deep clone of the input. func CloneRefOfJtNestedPathColDef(n *JtNestedPathColDef) *JtNestedPathColDef { if n == nil { return nil } out := *n out.Path = CloneExpr(n.Path) out.Columns = CloneSliceOfRefOfJtColumnDefinition(n.Columns) return &out } // CloneTableAndLockTypes creates a deep clone of the input. func CloneTableAndLockTypes(n TableAndLockTypes) TableAndLockTypes { if n == nil { return nil } res := make(TableAndLockTypes, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfTableAndLockType(x)) } return res } // CloneComments creates a deep clone of the input. func CloneComments(n Comments) Comments { if n == nil { return nil } res := make(Comments, 0, len(n)) for _, x := range n { res = append(res, x) } return res } // CloneRefOfInt creates a deep clone of the input. func CloneRefOfInt(n *int) *int { if n == nil { return nil } out := *n return &out } // CloneSliceOfRefOfPartitionDefinition creates a deep clone of the input. func CloneSliceOfRefOfPartitionDefinition(n []*PartitionDefinition) []*PartitionDefinition { if n == nil { return nil } res := make([]*PartitionDefinition, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfPartitionDefinition(x)) } return res } // CloneSliceOfRefOfRenameTablePair creates a deep clone of the input. func CloneSliceOfRefOfRenameTablePair(n []*RenameTablePair) []*RenameTablePair { if n == nil { return nil } res := make([]*RenameTablePair, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfRenameTablePair(x)) } return res } // CloneRefOfRootNode creates a deep clone of the input. func CloneRefOfRootNode(n *RootNode) *RootNode { if n == nil { return nil } out := *n out.SQLNode = CloneSQLNode(n.SQLNode) return &out } // CloneRefOfBool creates a deep clone of the input. func CloneRefOfBool(n *bool) *bool { if n == nil { return nil } out := *n return &out } // CloneSliceOfTableExpr creates a deep clone of the input. func CloneSliceOfTableExpr(n []TableExpr) []TableExpr { if n == nil { return nil } res := make([]TableExpr, 0, len(n)) for _, x := range n { res = append(res, CloneTableExpr(x)) } return res } // CloneSliceOfCharacteristic creates a deep clone of the input. func CloneSliceOfCharacteristic(n []Characteristic) []Characteristic { if n == nil { return nil } res := make([]Characteristic, 0, len(n)) for _, x := range n { res = append(res, CloneCharacteristic(x)) } return res } // CloneRefOfTableIdent creates a deep clone of the input. func CloneRefOfTableIdent(n *TableIdent) *TableIdent { if n == nil { return nil } out := *n return &out } // CloneRefOfTableName creates a deep clone of the input. func CloneRefOfTableName(n *TableName) *TableName { if n == nil { return nil } out := *n out.Name = CloneTableIdent(n.Name) out.Qualifier = CloneTableIdent(n.Qualifier) return &out } // CloneRefOfTableOption creates a deep clone of the input. func CloneRefOfTableOption(n *TableOption) *TableOption { if n == nil { return nil } out := *n out.Value = CloneRefOfLiteral(n.Value) out.Tables = CloneTableNames(n.Tables) return &out } // CloneSliceOfRefOfIndexDefinition creates a deep clone of the input. func CloneSliceOfRefOfIndexDefinition(n []*IndexDefinition) []*IndexDefinition { if n == nil { return nil } res := make([]*IndexDefinition, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfIndexDefinition(x)) } return res } // CloneSliceOfRefOfConstraintDefinition creates a deep clone of the input. func CloneSliceOfRefOfConstraintDefinition(n []*ConstraintDefinition) []*ConstraintDefinition { if n == nil { return nil } res := make([]*ConstraintDefinition, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfConstraintDefinition(x)) } return res } // CloneRefOfVindexParam creates a deep clone of the input. func CloneRefOfVindexParam(n *VindexParam) *VindexParam { if n == nil { return nil } out := *n out.Key = CloneColIdent(n.Key) return &out } // CloneSliceOfVindexParam creates a deep clone of the input. func CloneSliceOfVindexParam(n []VindexParam) []VindexParam { if n == nil { return nil } res := make([]VindexParam, 0, len(n)) for _, x := range n { res = append(res, CloneVindexParam(x)) } return res } // CloneSliceOfRefOfCommonTableExpr creates a deep clone of the input. func CloneSliceOfRefOfCommonTableExpr(n []*CommonTableExpr) []*CommonTableExpr { if n == nil { return nil } res := make([]*CommonTableExpr, 0, len(n)) for _, x := range n { res = append(res, CloneRefOfCommonTableExpr(x)) } return res } // CloneDatabaseOption creates a deep clone of the input. func CloneDatabaseOption(n DatabaseOption) DatabaseOption { return *CloneRefOfDatabaseOption(&n) } // CloneRefOfIndexColumn creates a deep clone of the input. func CloneRefOfIndexColumn(n *IndexColumn) *IndexColumn { if n == nil { return nil } out := *n out.Column = CloneColIdent(n.Column) out.Length = CloneRefOfLiteral(n.Length) out.Expression = CloneExpr(n.Expression) return &out } // CloneRefOfIndexOption creates a deep clone of the input. func CloneRefOfIndexOption(n *IndexOption) *IndexOption { if n == nil { return nil } out := *n out.Value = CloneRefOfLiteral(n.Value) return &out } // CloneRefOfTableAndLockType creates a deep clone of the input. func CloneRefOfTableAndLockType(n *TableAndLockType) *TableAndLockType { if n == nil { return nil } out := *n out.Table = CloneTableExpr(n.Table) return &out } // CloneRefOfRenameTablePair creates a deep clone of the input. func CloneRefOfRenameTablePair(n *RenameTablePair) *RenameTablePair { if n == nil { return nil } out := *n out.FromTable = CloneTableName(n.FromTable) out.ToTable = CloneTableName(n.ToTable) return &out } // CloneRefOfDatabaseOption creates a deep clone of the input. func CloneRefOfDatabaseOption(n *DatabaseOption) *DatabaseOption { if n == nil { return nil } out := *n return &out }
go/vt/sqlparser/ast_clone.go
0.509276
0.533458
ast_clone.go
starcoder
package repository import ( "api/entity" "api/model" "context" "log" "reflect" "golang.org/x/xerrors" ) type WorldMock struct { expect *WorldExpect } func (r *WorldMock) EXPECT() *WorldExpect { return r.expect } func NewWorldMock() *WorldMock { return &WorldMock{expect: NewWorldExpect()} } type WorldToModelExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(value *entity.World) value *entity.World r0 *model.World } func (r *WorldToModelExpect) Return(r0 *model.World) *WorldToModelExpect { r.r0 = r0 return r } func (r *WorldToModelExpect) Do(action func(value *entity.World)) *WorldToModelExpect { r.actions = append(r.actions, action) return r } func (r *WorldToModelExpect) OutOfOrder() *WorldToModelExpect { r.isOutOfOrder = true return r } func (r *WorldToModelExpect) AnyTimes() *WorldToModelExpect { r.isAnyTimes = true return r } func (r *WorldToModelExpect) Times(n int) *WorldToModelExpect { r.requiredTimes = n return r } func (r *WorldMock) ToModel(value *entity.World) (r0 *model.World) { if len(r.expect.toModel) == 0 { log.Printf("cannot find mock method for World.ToModel") return } for _, exp := range r.expect.toModel { if !reflect.DeepEqual(exp.value, value) { continue } for _, action := range exp.actions { action(value) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { log.Printf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } log.Printf("invalid argument World value:[%+v]", value) return } func (r *WorldExpect) ToModel(value *entity.World) *WorldToModelExpect { exp := &WorldToModelExpect{ actions: []func(value *entity.World){}, expect: r, value: value, } r.toModel = append(r.toModel, exp) return exp } type WorldToModelsExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(values entity.Worlds) values entity.Worlds r0 *model.Worlds } func (r *WorldToModelsExpect) Return(r0 *model.Worlds) *WorldToModelsExpect { r.r0 = r0 return r } func (r *WorldToModelsExpect) Do(action func(values entity.Worlds)) *WorldToModelsExpect { r.actions = append(r.actions, action) return r } func (r *WorldToModelsExpect) OutOfOrder() *WorldToModelsExpect { r.isOutOfOrder = true return r } func (r *WorldToModelsExpect) AnyTimes() *WorldToModelsExpect { r.isAnyTimes = true return r } func (r *WorldToModelsExpect) Times(n int) *WorldToModelsExpect { r.requiredTimes = n return r } func (r *WorldMock) ToModels(values entity.Worlds) (r0 *model.Worlds) { if len(r.expect.toModels) == 0 { log.Printf("cannot find mock method for World.ToModels") return } for _, exp := range r.expect.toModels { if !reflect.DeepEqual(exp.values, values) { continue } for _, action := range exp.actions { action(values) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { log.Printf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } log.Printf("invalid argument World values:[%+v]", values) return } func (r *WorldExpect) ToModels(values entity.Worlds) *WorldToModelsExpect { exp := &WorldToModelsExpect{ actions: []func(values entity.Worlds){}, expect: r, values: values, } r.toModels = append(r.toModels, exp) return exp } type WorldCreateExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(ctx context.Context, value *entity.World) ctx context.Context value *entity.World r0 *model.World r1 error } func (r *WorldCreateExpect) Return(r0 *model.World, r1 error) *WorldCreateExpect { r.r0 = r0 r.r1 = r1 return r } func (r *WorldCreateExpect) Do(action func(ctx context.Context, value *entity.World)) *WorldCreateExpect { r.actions = append(r.actions, action) return r } func (r *WorldCreateExpect) OutOfOrder() *WorldCreateExpect { r.isOutOfOrder = true return r } func (r *WorldCreateExpect) AnyTimes() *WorldCreateExpect { r.isAnyTimes = true return r } func (r *WorldCreateExpect) Times(n int) *WorldCreateExpect { r.requiredTimes = n return r } func (r *WorldMock) Create(ctx context.Context, value *entity.World) (r0 *model.World, r1 error) { if len(r.expect.create) == 0 { r1 = xerrors.New("cannot find mock method for World.Create") return } for _, exp := range r.expect.create { if !reflect.DeepEqual(exp.ctx, ctx) { continue } if !reflect.DeepEqual(exp.value, value) { continue } for _, action := range exp.actions { action(ctx, value) } if exp.isAnyTimes { r0 = exp.r0 r1 = exp.r1 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r1 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 r1 = exp.r1 return } r1 = xerrors.Errorf("invalid argument World ctx:[%+v] value:[%+v]", ctx, value) return } func (r *WorldExpect) Create(ctx context.Context, value *entity.World) *WorldCreateExpect { exp := &WorldCreateExpect{ actions: []func(ctx context.Context, value *entity.World){}, ctx: ctx, expect: r, value: value, } r.create = append(r.create, exp) return exp } type WorldCreatesExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(ctx context.Context, entities entity.Worlds) ctx context.Context entities entity.Worlds r0 *model.Worlds r1 error } func (r *WorldCreatesExpect) Return(r0 *model.Worlds, r1 error) *WorldCreatesExpect { r.r0 = r0 r.r1 = r1 return r } func (r *WorldCreatesExpect) Do(action func(ctx context.Context, entities entity.Worlds)) *WorldCreatesExpect { r.actions = append(r.actions, action) return r } func (r *WorldCreatesExpect) OutOfOrder() *WorldCreatesExpect { r.isOutOfOrder = true return r } func (r *WorldCreatesExpect) AnyTimes() *WorldCreatesExpect { r.isAnyTimes = true return r } func (r *WorldCreatesExpect) Times(n int) *WorldCreatesExpect { r.requiredTimes = n return r } func (r *WorldMock) Creates(ctx context.Context, entities entity.Worlds) (r0 *model.Worlds, r1 error) { if len(r.expect.creates) == 0 { r1 = xerrors.New("cannot find mock method for World.Creates") return } for _, exp := range r.expect.creates { if !reflect.DeepEqual(exp.ctx, ctx) { continue } if !reflect.DeepEqual(exp.entities, entities) { continue } for _, action := range exp.actions { action(ctx, entities) } if exp.isAnyTimes { r0 = exp.r0 r1 = exp.r1 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r1 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 r1 = exp.r1 return } r1 = xerrors.Errorf("invalid argument World ctx:[%+v] entities:[%+v]", ctx, entities) return } func (r *WorldExpect) Creates(ctx context.Context, entities entity.Worlds) *WorldCreatesExpect { exp := &WorldCreatesExpect{ actions: []func(ctx context.Context, entities entity.Worlds){}, ctx: ctx, entities: entities, expect: r, } r.creates = append(r.creates, exp) return exp } type WorldFindAllExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context) a0 context.Context r0 *model.Worlds r1 error } func (r *WorldFindAllExpect) Return(r0 *model.Worlds, r1 error) *WorldFindAllExpect { r.r0 = r0 r.r1 = r1 return r } func (r *WorldFindAllExpect) Do(action func(a0 context.Context)) *WorldFindAllExpect { r.actions = append(r.actions, action) return r } func (r *WorldFindAllExpect) OutOfOrder() *WorldFindAllExpect { r.isOutOfOrder = true return r } func (r *WorldFindAllExpect) AnyTimes() *WorldFindAllExpect { r.isAnyTimes = true return r } func (r *WorldFindAllExpect) Times(n int) *WorldFindAllExpect { r.requiredTimes = n return r } func (r *WorldMock) FindAll(a0 context.Context) (r0 *model.Worlds, r1 error) { if len(r.expect.findAll) == 0 { r1 = xerrors.New("cannot find mock method for World.FindAll") return } for _, exp := range r.expect.findAll { if !reflect.DeepEqual(exp.a0, a0) { continue } for _, action := range exp.actions { action(a0) } if exp.isAnyTimes { r0 = exp.r0 r1 = exp.r1 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r1 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 r1 = exp.r1 return } r1 = xerrors.Errorf("invalid argument World a0:[%+v]", a0) return } func (r *WorldExpect) FindAll(a0 context.Context) *WorldFindAllExpect { exp := &WorldFindAllExpect{ a0: a0, actions: []func(a0 context.Context){}, expect: r, } r.findAll = append(r.findAll, exp) return exp } type WorldFindByIDExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 uint64) a0 context.Context a1 uint64 r0 *model.World r1 error } func (r *WorldFindByIDExpect) Return(r0 *model.World, r1 error) *WorldFindByIDExpect { r.r0 = r0 r.r1 = r1 return r } func (r *WorldFindByIDExpect) Do(action func(a0 context.Context, a1 uint64)) *WorldFindByIDExpect { r.actions = append(r.actions, action) return r } func (r *WorldFindByIDExpect) OutOfOrder() *WorldFindByIDExpect { r.isOutOfOrder = true return r } func (r *WorldFindByIDExpect) AnyTimes() *WorldFindByIDExpect { r.isAnyTimes = true return r } func (r *WorldFindByIDExpect) Times(n int) *WorldFindByIDExpect { r.requiredTimes = n return r } func (r *WorldMock) FindByID(a0 context.Context, a1 uint64) (r0 *model.World, r1 error) { if len(r.expect.findByID) == 0 { r1 = xerrors.New("cannot find mock method for World.FindByID") return } for _, exp := range r.expect.findByID { if !reflect.DeepEqual(exp.a0, a0) { continue } if !reflect.DeepEqual(exp.a1, a1) { continue } for _, action := range exp.actions { action(a0, a1) } if exp.isAnyTimes { r0 = exp.r0 r1 = exp.r1 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r1 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 r1 = exp.r1 return } r1 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v]", a0, a1) return } func (r *WorldExpect) FindByID(a0 context.Context, a1 uint64) *WorldFindByIDExpect { exp := &WorldFindByIDExpect{ a0: a0, a1: a1, actions: []func(a0 context.Context, a1 uint64){}, expect: r, } r.findByID = append(r.findByID, exp) return exp } type WorldFindByIDsExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 []uint64) a0 context.Context a1 []uint64 r0 *model.Worlds r1 error } func (r *WorldFindByIDsExpect) Return(r0 *model.Worlds, r1 error) *WorldFindByIDsExpect { r.r0 = r0 r.r1 = r1 return r } func (r *WorldFindByIDsExpect) Do(action func(a0 context.Context, a1 []uint64)) *WorldFindByIDsExpect { r.actions = append(r.actions, action) return r } func (r *WorldFindByIDsExpect) OutOfOrder() *WorldFindByIDsExpect { r.isOutOfOrder = true return r } func (r *WorldFindByIDsExpect) AnyTimes() *WorldFindByIDsExpect { r.isAnyTimes = true return r } func (r *WorldFindByIDsExpect) Times(n int) *WorldFindByIDsExpect { r.requiredTimes = n return r } func (r *WorldMock) FindByIDs(a0 context.Context, a1 []uint64) (r0 *model.Worlds, r1 error) { if len(r.expect.findByIDs) == 0 { r1 = xerrors.New("cannot find mock method for World.FindByIDs") return } for _, exp := range r.expect.findByIDs { if !reflect.DeepEqual(exp.a0, a0) { continue } if len(exp.a1) != len(a1) { continue } if exp.isOutOfOrder { isMatched := func() bool { for _, exp := range exp.a1 { found := false for idx, act := range a1 { if exp != act { continue } a1 = append(a1[:idx], a1[idx+1:]...) found = true break } if !found { return false } } return true }() if !isMatched { continue } } else { if !reflect.DeepEqual(exp.a1, a1) { continue } } for _, action := range exp.actions { action(a0, a1) } if exp.isAnyTimes { r0 = exp.r0 r1 = exp.r1 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r1 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 r1 = exp.r1 return } r1 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v]", a0, a1) return } func (r *WorldExpect) FindByIDs(a0 context.Context, a1 []uint64) *WorldFindByIDsExpect { exp := &WorldFindByIDsExpect{ a0: a0, a1: a1, actions: []func(a0 context.Context, a1 []uint64){}, expect: r, } r.findByIDs = append(r.findByIDs, exp) return exp } type WorldUpdateByIDExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 uint64, a2 map[string]interface{}) a0 context.Context a1 uint64 a2 map[string]interface{} r0 error } func (r *WorldUpdateByIDExpect) Return(r0 error) *WorldUpdateByIDExpect { r.r0 = r0 return r } func (r *WorldUpdateByIDExpect) Do(action func(a0 context.Context, a1 uint64, a2 map[string]interface{})) *WorldUpdateByIDExpect { r.actions = append(r.actions, action) return r } func (r *WorldUpdateByIDExpect) OutOfOrder() *WorldUpdateByIDExpect { r.isOutOfOrder = true return r } func (r *WorldUpdateByIDExpect) AnyTimes() *WorldUpdateByIDExpect { r.isAnyTimes = true return r } func (r *WorldUpdateByIDExpect) Times(n int) *WorldUpdateByIDExpect { r.requiredTimes = n return r } func (r *WorldMock) UpdateByID(a0 context.Context, a1 uint64, a2 map[string]interface{}) (r0 error) { if len(r.expect.updateByID) == 0 { r0 = xerrors.New("cannot find mock method for World.UpdateByID") return } for _, exp := range r.expect.updateByID { if !reflect.DeepEqual(exp.a0, a0) { continue } if !reflect.DeepEqual(exp.a1, a1) { continue } if !reflect.DeepEqual(exp.a2, a2) { continue } for _, action := range exp.actions { action(a0, a1, a2) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r0 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } r0 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v] a2:[%+v]", a0, a1, a2) return } func (r *WorldExpect) UpdateByID(a0 context.Context, a1 uint64, a2 map[string]interface{}) *WorldUpdateByIDExpect { exp := &WorldUpdateByIDExpect{ a0: a0, a1: a1, a2: a2, actions: []func(a0 context.Context, a1 uint64, a2 map[string]interface{}){}, expect: r, } r.updateByID = append(r.updateByID, exp) return exp } type WorldUpdateByIDsExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 []uint64, a2 map[string]interface{}) a0 context.Context a1 []uint64 a2 map[string]interface{} r0 error } func (r *WorldUpdateByIDsExpect) Return(r0 error) *WorldUpdateByIDsExpect { r.r0 = r0 return r } func (r *WorldUpdateByIDsExpect) Do(action func(a0 context.Context, a1 []uint64, a2 map[string]interface{})) *WorldUpdateByIDsExpect { r.actions = append(r.actions, action) return r } func (r *WorldUpdateByIDsExpect) OutOfOrder() *WorldUpdateByIDsExpect { r.isOutOfOrder = true return r } func (r *WorldUpdateByIDsExpect) AnyTimes() *WorldUpdateByIDsExpect { r.isAnyTimes = true return r } func (r *WorldUpdateByIDsExpect) Times(n int) *WorldUpdateByIDsExpect { r.requiredTimes = n return r } func (r *WorldMock) UpdateByIDs(a0 context.Context, a1 []uint64, a2 map[string]interface{}) (r0 error) { if len(r.expect.updateByIDs) == 0 { r0 = xerrors.New("cannot find mock method for World.UpdateByIDs") return } for _, exp := range r.expect.updateByIDs { if !reflect.DeepEqual(exp.a0, a0) { continue } if len(exp.a1) != len(a1) { continue } if exp.isOutOfOrder { isMatched := func() bool { for _, exp := range exp.a1 { found := false for idx, act := range a1 { if exp != act { continue } a1 = append(a1[:idx], a1[idx+1:]...) found = true break } if !found { return false } } return true }() if !isMatched { continue } } else { if !reflect.DeepEqual(exp.a1, a1) { continue } } if !reflect.DeepEqual(exp.a2, a2) { continue } for _, action := range exp.actions { action(a0, a1, a2) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r0 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } r0 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v] a2:[%+v]", a0, a1, a2) return } func (r *WorldExpect) UpdateByIDs(a0 context.Context, a1 []uint64, a2 map[string]interface{}) *WorldUpdateByIDsExpect { exp := &WorldUpdateByIDsExpect{ a0: a0, a1: a1, a2: a2, actions: []func(a0 context.Context, a1 []uint64, a2 map[string]interface{}){}, expect: r, } r.updateByIDs = append(r.updateByIDs, exp) return exp } type WorldDeleteByIDExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 uint64) a0 context.Context a1 uint64 r0 error } func (r *WorldDeleteByIDExpect) Return(r0 error) *WorldDeleteByIDExpect { r.r0 = r0 return r } func (r *WorldDeleteByIDExpect) Do(action func(a0 context.Context, a1 uint64)) *WorldDeleteByIDExpect { r.actions = append(r.actions, action) return r } func (r *WorldDeleteByIDExpect) OutOfOrder() *WorldDeleteByIDExpect { r.isOutOfOrder = true return r } func (r *WorldDeleteByIDExpect) AnyTimes() *WorldDeleteByIDExpect { r.isAnyTimes = true return r } func (r *WorldDeleteByIDExpect) Times(n int) *WorldDeleteByIDExpect { r.requiredTimes = n return r } func (r *WorldMock) DeleteByID(a0 context.Context, a1 uint64) (r0 error) { if len(r.expect.deleteByID) == 0 { r0 = xerrors.New("cannot find mock method for World.DeleteByID") return } for _, exp := range r.expect.deleteByID { if !reflect.DeepEqual(exp.a0, a0) { continue } if !reflect.DeepEqual(exp.a1, a1) { continue } for _, action := range exp.actions { action(a0, a1) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r0 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } r0 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v]", a0, a1) return } func (r *WorldExpect) DeleteByID(a0 context.Context, a1 uint64) *WorldDeleteByIDExpect { exp := &WorldDeleteByIDExpect{ a0: a0, a1: a1, actions: []func(a0 context.Context, a1 uint64){}, expect: r, } r.deleteByID = append(r.deleteByID, exp) return exp } type WorldDeleteByIDsExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 []uint64) a0 context.Context a1 []uint64 r0 error } func (r *WorldDeleteByIDsExpect) Return(r0 error) *WorldDeleteByIDsExpect { r.r0 = r0 return r } func (r *WorldDeleteByIDsExpect) Do(action func(a0 context.Context, a1 []uint64)) *WorldDeleteByIDsExpect { r.actions = append(r.actions, action) return r } func (r *WorldDeleteByIDsExpect) OutOfOrder() *WorldDeleteByIDsExpect { r.isOutOfOrder = true return r } func (r *WorldDeleteByIDsExpect) AnyTimes() *WorldDeleteByIDsExpect { r.isAnyTimes = true return r } func (r *WorldDeleteByIDsExpect) Times(n int) *WorldDeleteByIDsExpect { r.requiredTimes = n return r } func (r *WorldMock) DeleteByIDs(a0 context.Context, a1 []uint64) (r0 error) { if len(r.expect.deleteByIDs) == 0 { r0 = xerrors.New("cannot find mock method for World.DeleteByIDs") return } for _, exp := range r.expect.deleteByIDs { if !reflect.DeepEqual(exp.a0, a0) { continue } if len(exp.a1) != len(a1) { continue } if exp.isOutOfOrder { isMatched := func() bool { for _, exp := range exp.a1 { found := false for idx, act := range a1 { if exp != act { continue } a1 = append(a1[:idx], a1[idx+1:]...) found = true break } if !found { return false } } return true }() if !isMatched { continue } } else { if !reflect.DeepEqual(exp.a1, a1) { continue } } for _, action := range exp.actions { action(a0, a1) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r0 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } r0 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v]", a0, a1) return } func (r *WorldExpect) DeleteByIDs(a0 context.Context, a1 []uint64) *WorldDeleteByIDsExpect { exp := &WorldDeleteByIDsExpect{ a0: a0, a1: a1, actions: []func(a0 context.Context, a1 []uint64){}, expect: r, } r.deleteByIDs = append(r.deleteByIDs, exp) return exp } type WorldCountExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context) a0 context.Context r0 int64 r1 error } func (r *WorldCountExpect) Return(r0 int64, r1 error) *WorldCountExpect { r.r0 = r0 r.r1 = r1 return r } func (r *WorldCountExpect) Do(action func(a0 context.Context)) *WorldCountExpect { r.actions = append(r.actions, action) return r } func (r *WorldCountExpect) OutOfOrder() *WorldCountExpect { r.isOutOfOrder = true return r } func (r *WorldCountExpect) AnyTimes() *WorldCountExpect { r.isAnyTimes = true return r } func (r *WorldCountExpect) Times(n int) *WorldCountExpect { r.requiredTimes = n return r } func (r *WorldMock) Count(a0 context.Context) (r0 int64, r1 error) { if len(r.expect.count) == 0 { r1 = xerrors.New("cannot find mock method for World.Count") return } for _, exp := range r.expect.count { if !reflect.DeepEqual(exp.a0, a0) { continue } for _, action := range exp.actions { action(a0) } if exp.isAnyTimes { r0 = exp.r0 r1 = exp.r1 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r1 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 r1 = exp.r1 return } r1 = xerrors.Errorf("invalid argument World a0:[%+v]", a0) return } func (r *WorldExpect) Count(a0 context.Context) *WorldCountExpect { exp := &WorldCountExpect{ a0: a0, actions: []func(a0 context.Context){}, expect: r, } r.count = append(r.count, exp) return exp } type WorldDeleteExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 *entity.World) a0 context.Context a1 *entity.World r0 error } func (r *WorldDeleteExpect) Return(r0 error) *WorldDeleteExpect { r.r0 = r0 return r } func (r *WorldDeleteExpect) Do(action func(a0 context.Context, a1 *entity.World)) *WorldDeleteExpect { r.actions = append(r.actions, action) return r } func (r *WorldDeleteExpect) OutOfOrder() *WorldDeleteExpect { r.isOutOfOrder = true return r } func (r *WorldDeleteExpect) AnyTimes() *WorldDeleteExpect { r.isAnyTimes = true return r } func (r *WorldDeleteExpect) Times(n int) *WorldDeleteExpect { r.requiredTimes = n return r } func (r *WorldMock) Delete(a0 context.Context, a1 *entity.World) (r0 error) { if len(r.expect.delete) == 0 { r0 = xerrors.New("cannot find mock method for World.Delete") return } for _, exp := range r.expect.delete { if !reflect.DeepEqual(exp.a0, a0) { continue } if !reflect.DeepEqual(exp.a1, a1) { continue } for _, action := range exp.actions { action(a0, a1) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r0 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } r0 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v]", a0, a1) return } func (r *WorldExpect) Delete(a0 context.Context, a1 *entity.World) *WorldDeleteExpect { exp := &WorldDeleteExpect{ a0: a0, a1: a1, actions: []func(a0 context.Context, a1 *entity.World){}, expect: r, } r.delete = append(r.delete, exp) return exp } type WorldUpdateExpect struct { expect *WorldExpect isOutOfOrder bool isAnyTimes bool requiredTimes int calledTimes int actions []func(a0 context.Context, a1 *entity.World) a0 context.Context a1 *entity.World r0 error } func (r *WorldUpdateExpect) Return(r0 error) *WorldUpdateExpect { r.r0 = r0 return r } func (r *WorldUpdateExpect) Do(action func(a0 context.Context, a1 *entity.World)) *WorldUpdateExpect { r.actions = append(r.actions, action) return r } func (r *WorldUpdateExpect) OutOfOrder() *WorldUpdateExpect { r.isOutOfOrder = true return r } func (r *WorldUpdateExpect) AnyTimes() *WorldUpdateExpect { r.isAnyTimes = true return r } func (r *WorldUpdateExpect) Times(n int) *WorldUpdateExpect { r.requiredTimes = n return r } func (r *WorldMock) Update(a0 context.Context, a1 *entity.World) (r0 error) { if len(r.expect.update) == 0 { r0 = xerrors.New("cannot find mock method for World.Update") return } for _, exp := range r.expect.update { if !reflect.DeepEqual(exp.a0, a0) { continue } if !reflect.DeepEqual(exp.a1, a1) { continue } for _, action := range exp.actions { action(a0, a1) } if exp.isAnyTimes { r0 = exp.r0 return } if exp.requiredTimes > 1 && exp.calledTimes > exp.requiredTimes { r0 = xerrors.Errorf("invalid call times. requiredTimes: [%d] calledTimes: [%d]", exp.requiredTimes, exp.calledTimes) return } exp.calledTimes++ r0 = exp.r0 return } r0 = xerrors.Errorf("invalid argument World a0:[%+v] a1:[%+v]", a0, a1) return } func (r *WorldExpect) Update(a0 context.Context, a1 *entity.World) *WorldUpdateExpect { exp := &WorldUpdateExpect{ a0: a0, a1: a1, actions: []func(a0 context.Context, a1 *entity.World){}, expect: r, } r.update = append(r.update, exp) return exp } type WorldExpect struct { toModel []*WorldToModelExpect toModels []*WorldToModelsExpect create []*WorldCreateExpect creates []*WorldCreatesExpect findAll []*WorldFindAllExpect findByID []*WorldFindByIDExpect findByIDs []*WorldFindByIDsExpect updateByID []*WorldUpdateByIDExpect updateByIDs []*WorldUpdateByIDsExpect deleteByID []*WorldDeleteByIDExpect deleteByIDs []*WorldDeleteByIDsExpect count []*WorldCountExpect delete []*WorldDeleteExpect update []*WorldUpdateExpect } func NewWorldExpect() *WorldExpect { return &WorldExpect{ count: []*WorldCountExpect{}, create: []*WorldCreateExpect{}, creates: []*WorldCreatesExpect{}, delete: []*WorldDeleteExpect{}, deleteByID: []*WorldDeleteByIDExpect{}, deleteByIDs: []*WorldDeleteByIDsExpect{}, findAll: []*WorldFindAllExpect{}, findByID: []*WorldFindByIDExpect{}, findByIDs: []*WorldFindByIDsExpect{}, toModel: []*WorldToModelExpect{}, toModels: []*WorldToModelsExpect{}, update: []*WorldUpdateExpect{}, updateByID: []*WorldUpdateByIDExpect{}, updateByIDs: []*WorldUpdateByIDsExpect{}, } }
_example/03_api/mock/repository/world.go
0.512449
0.447883
world.go
starcoder
package reward import ( abi "github.com/filecoin-project/specs-actors/actors/abi" big "github.com/filecoin-project/specs-actors/actors/abi/big" ) // A quantity of space * time (in byte-epochs) representing power committed to the network for some duration. type Spacetime = big.Int type State struct { // CumsumBaseline is a target CumsumRealized needs to reach for EffectiveNetworkTime to increase // CumsumBaseline and CumsumRealized are expressed in byte-epochs. CumsumBaseline Spacetime // CumsumRealized is cumulative sum of network power capped by BalinePower(epoch) CumsumRealized Spacetime // EffectiveNetworkTime is ceiling of real effective network time `theta` based on // CumsumBaselinePower(theta) == CumsumRealizedPower // Theta captures the notion of how much the network has progressed in its baseline // and in advancing network time. EffectiveNetworkTime abi.ChainEpoch // The reward to be paid in per WinCount to block producers. // The actual reward total paid out depends on the number of winners in any round. // This value is recomputed every non-null epoch and used in the next non-null epoch. ThisEpochReward abi.TokenAmount // Epoch tracks for which epoch the Reward was computed Epoch abi.ChainEpoch } func ConstructState(currRealizedPower abi.StoragePower) *State { st := &State{ CumsumBaseline: big.Zero(), CumsumRealized: big.Zero(), EffectiveNetworkTime: 0, ThisEpochReward: big.Zero(), Epoch: -1, } st.updateToNextEpochWithReward(currRealizedPower) return st } // Takes in current realized power and updates internal state // Used for update of internal state during null rounds func (st *State) updateToNextEpoch(currRealizedPower abi.StoragePower) { st.Epoch++ cappedRealizedPower := big.Min(BaselinePowerAt(st.Epoch), currRealizedPower) st.CumsumRealized = big.Add(st.CumsumRealized, cappedRealizedPower) for st.CumsumRealized.GreaterThan(st.CumsumBaseline) { st.EffectiveNetworkTime++ st.CumsumBaseline = big.Add(st.CumsumBaseline, BaselinePowerAt(st.EffectiveNetworkTime)) } } // Takes in a current realized power for a reward epoch and computes // and updates reward state to track reward for the next epoch func (st *State) updateToNextEpochWithReward(currRealizedPower abi.StoragePower) { prevRewardTheta := computeRTheta(st.EffectiveNetworkTime, st.CumsumRealized, st.CumsumBaseline) st.updateToNextEpoch(currRealizedPower) currRewardTheta := computeRTheta(st.EffectiveNetworkTime, st.CumsumRealized, st.CumsumBaseline) st.ThisEpochReward = computeReward(st.Epoch, prevRewardTheta, currRewardTheta) }
actors/builtin/reward/reward_state.go
0.797083
0.503662
reward_state.go
starcoder
package goyolov5 import ( "fmt" "image" "image/color" "image/draw" "log" "unsafe" ) // From https://github.com/pixiv/go-libjpeg/blob/master/rgb/rgb.go // Tensor represent image data which has RGB colors. // Tensor is compatible with image.RGBA, but does not have alpha channel to reduce using memory. type Tensor struct { // Pix holds the image's stream, in R, G, B order. Pix []uint8 // Stride is the Pix stride (in bytes) between vertically adjacent pixels. Stride int // Rect is the image's bounds. Rect image.Rectangle ctensor Ctensor } // NewTensor allocates and returns RGB image func NewTensor(r image.Rectangle) *Tensor { w, h := r.Dx(), r.Dy() return &Tensor{Pix: make([]uint8, 3*w*h), Stride: 3 * w, Rect: r} } func NewTensorFromImage(i image.Image) *Tensor { r := i.Bounds() w, h := r.Dx(), r.Dy() newTensor := &Tensor{Pix: make([]uint8, 3*w*h), Stride: 3 * w, Rect: r} for y := 0; y < h; y++ { for x := 0; x < w; x++ { newTensor.Set(x, y, i.At(x, y)) } } return newTensor } // ColorModel returns RGB color model. func (p *Tensor) ColorModel() color.Model { return ColorModel } // Bounds implements image.Image.At func (p *Tensor) Bounds() image.Rectangle { return p.Rect } // At implements image.Image.At func (p *Tensor) At(x, y int) color.Color { return p.RGBAAt(x, y) } // RGBAAt returns the color of the pixel at (x, y) as RGBA. func (p *Tensor) RGBAAt(x, y int) color.RGBA { if !(image.Point{x, y}.In(p.Rect)) { return color.RGBA{} } i := (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3 return color.RGBA{p.Pix[i+0], p.Pix[i+1], p.Pix[i+2], 0xFF} } // ColorModel is RGB color model instance var ColorModel = color.ModelFunc(rgbModel) func rgbModel(c color.Color) color.Color { if _, ok := c.(RGB); ok { return c } r, g, b, _ := c.RGBA() return RGB{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)} } // RGB color type RGB struct { R, G, B uint8 } // RGBA implements Color.RGBA func (c RGB) RGBA() (r, g, b, a uint32) { r = uint32(c.R) r |= r << 8 g = uint32(c.G) g |= g << 8 b = uint32(c.B) b |= b << 8 a = uint32(0xFFFF) return } func (p *Tensor) ToSquareShape() (*Tensor, int, int, error) { sb := p.Bounds() w := float64(sb.Max.X) h := float64(sb.Max.Y) fast := false if w > h { h = w fast = true } else { w = h } db := image.Rect(0, 0, int(w), int(h)) // Now Center sw := sb.Max.X sh := sb.Max.Y dw := db.Max.X dh := db.Max.Y dr := image.Rect( 0, 0, sw, sh) extraX := dw - sw extraY := dh - sh dst := NewTensor(db) // draw the src image onto dst if fast { // dst.Pix[0:len(p.Pix)] = p.Pix copy(dst.Pix, p.Pix) } else { draw.Draw(dst, dr, p, p.Bounds().Min, draw.Src) } return dst, extraX, extraY, nil } func (p *Tensor) Set(x, y int, c color.Color) { if !(image.Point{x, y}.In(p.Rect)) { return } i := p.PixOffset(x, y) c1 := color.RGBAModel.Convert(c).(color.RGBA) s := p.Pix[i : i+3 : i+3] // Small cap improves performance, see https://golang.org/issue/27857 s[0] = c1.R s[1] = c1.G s[2] = c1.B } // PixOffset returns the index of the first element of Pix that corresponds to // the pixel at (x, y). func (p *Tensor) PixOffset(x, y int) int { return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3 } func (img *Tensor) Resize(targetSize int) (*Tensor, float64, error) { // Trivial case: return input image if int(targetSize) == img.Bounds().Dx() && int(targetSize) == img.Bounds().Dy() { return img, 1.0, nil } // Input image has no pixels if img.Bounds().Dx() <= 0 || img.Bounds().Dy() <= 0 { return img, 1.0, nil } if img.Bounds().Dx() != img.Bounds().Dy() { return nil, 0.0, fmt.Errorf("cannot resize a non-square tensor") } if targetSize > img.Bounds().Dx() { return nil, 0.0, fmt.Errorf("cannot upscale a tensor") } newTensor := NewTensor(image.Rect(0, 0, targetSize, targetSize)) dr := newTensor.Bounds() adr := newTensor.Bounds() sr := img.Bounds() ratio := float64(sr.Dx()) / float64(dr.Dx()) dw2 := uint64(dr.Dx()) * 2 dh2 := uint64(dr.Dy()) * 2 sw := uint64(sr.Dx()) sh := uint64(sr.Dy()) for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { sy := (2*uint64(dy) + 1) * sh / dh2 d := (dr.Min.Y+int(dy)-newTensor.Rect.Min.Y)*newTensor.Stride + (dr.Min.X+adr.Min.X-newTensor.Rect.Min.X)*3 for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+3 { sx := (2*uint64(dx) + 1) * sw / dw2 pi := (sr.Min.Y+int(sy)-img.Rect.Min.Y)*img.Stride + (sr.Min.X+int(sx)-img.Rect.Min.X)*3 pr := uint32(img.Pix[pi+0]) * 0x101 pg := uint32(img.Pix[pi+1]) * 0x101 pb := uint32(img.Pix[pi+2]) * 0x101 newTensor.Pix[d+0] = uint8(pr >> 8) newTensor.Pix[d+1] = uint8(pg >> 8) newTensor.Pix[d+2] = uint8(pb >> 8) } } return newTensor, ratio, nil } // HLine draws a horizontal line func (img *Tensor) HLine(x1, y, x2 int, col color.Color) { for ; x1 <= x2; x1++ { img.Set(x1, y, col) } } // VLine draws a veritcal line func (img *Tensor) VLine(x, y1, y2 int, col color.Color) { for ; y1 <= y2; y1++ { img.Set(x, y1, col) } } // Rect draws a rectangle utilizing HLine() and VLine() func (img *Tensor) DrawRect(rect image.Rectangle, col color.Color) { img.HLine(rect.Min.X, rect.Min.Y, rect.Max.X, col) img.HLine(rect.Min.X, rect.Max.Y, rect.Max.X, col) img.VLine(rect.Min.X, rect.Min.Y, rect.Max.Y, col) img.VLine(rect.Max.X, rect.Min.Y, rect.Max.Y, col) } // Drop drops (frees) the tensor func (ts *Tensor) Drop() error { atFree(ts.ctensor) if err := TorchErr(); err != nil { return err } return nil } // Numel returns the total number of elements stored in a tensor. func (ts *Tensor) Numel() uint { shape, err := ts.Size() if err != nil { log.Fatal(err) } return uint(FlattenDim(shape)) } func bytesToInt64s(buf []byte) []int64 { if len(buf) < 1<<16 { return (*[1 << 13]int64)(unsafe.Pointer(&buf[0]))[0 : len(buf)/8 : len(buf)/8] } l := len(buf) if l > 1<<32 { // only use the first 2^32 bytes l = (1 << 32) - 1 } return (*[1 << 29]int64)(unsafe.Pointer(&buf[0]))[0 : l/8 : l/8] } // FlattenDim counts number of elements with given shape func FlattenDim(shape []int64) int { n := int64(1) for _, d := range shape { n *= d } return int(n) } // Make sure Image implements image.Image. // See https://golang.org/doc/effective_go.html#blank_implements. var _ image.Image = new(Tensor)
tensor.go
0.864081
0.529263
tensor.go
starcoder
package stdlib type Type_bool bool type Type_uint256 int type Type_address int type Type_uint256arr []Type_uint256 type Txn struct { Balance Type_uint256 Value Type_uint256 DidTimeout bool From Type_address Data Msg } var Txn0 = Txn{ Balance: 0, Value: 0 } func Ite_uint256(c Type_bool, x Type_uint256, y Type_uint256) Type_uint256 { if c { return x } else { return y } } func Ite_bool(c Type_bool, x Type_bool, y Type_bool) Type_bool { if c { return x } else { return y } } func Eq(x Type_uint256, y Type_uint256) Type_bool { return x == y } func Add(x Type_uint256, y Type_uint256) Type_uint256 { return x + y } func Sub(x Type_uint256, y Type_uint256) Type_uint256 { return x - y } func Div(x Type_uint256, y Type_uint256) Type_uint256 { return x / y } func Mod(x Type_uint256, y Type_uint256) Type_uint256 { return x % y } func Gt(x Type_uint256, y Type_uint256) Type_bool { return x > y } func Lt(x Type_uint256, y Type_uint256) Type_bool { return x < y } func Keccak256(x Type_uint256, y Type_uint256) Type_uint256 { panic("XXX") } func Assert( b Type_bool ) { if b { return } else { panic("Assertion failed") } } type Msg int var Msg0 Msg = 0 func MsgEncode_uint256( m Msg, v Type_uint256 ) Msg { panic("XXX") } func MsgEncode_bool( m Msg, v Type_bool ) Msg { panic("XXX") } func MsgEncode_uint256arr( m Msg, v []Type_uint256 ) Msg { panic("XXX") } func MsgEncode_address( m Msg, v Type_address ) Msg { panic("XXX") } func MsgDecode_bool( m Msg, path []string ) Type_bool { panic("XXX") } func MsgDecode_uint256( m Msg, path []string ) Type_uint256 { panic("XXX") } func MsgDecode_uint256arr( m Msg, path []string ) []Type_uint256 { panic("XXX") } type Contract int func (c Contract) SendRecv(dbg string, m_name string, m Msg, amount Type_uint256, e_name string, delay Type_uint256, t_name string ) <-chan Txn { panic("XXX SendRecv") } func (c Contract) Recv(dbg string, e_name string, delay Type_uint256, to_me bool, to_m Msg, to_m_name string, to_e_name string ) <-chan Txn { panic("XXX Recv") }
go/src/reach-sh/stdlib/stdlib.go
0.635222
0.526586
stdlib.go
starcoder
package utils import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/prometheus/common/model" ) type Selector string // Range represents a sliced time range with increments. type Range struct { // Start and End are the boundaries of the time range. Start, End model.Time // Step is the maximum time between two slices within the boundaries. Step time.Duration } // Client is a Prometheus client for the Prometheus HTTP API. // The "timeout" parameter for the HTTP API is set based on the context's deadline, // when present and applicable. type Client interface { // Series lists the time series matching the given series selectors Series(ctx context.Context, interval model.Interval, selectors ...Selector) ([]Series, error) // Query runs a non-range query at the given time. Query(ctx context.Context, t model.Time, query Selector) (QueryResult, error) // QueryRange runs a range query at the given time. QueryRange(ctx context.Context, r Range, query Selector) (QueryResult, error) } // QueryResult is the result of a query. // Type will always be set, as well as one of the other fields, matching the type. type QueryResult struct { Type model.ValueType Vector *model.Vector Scalar *model.Scalar Matrix *model.Matrix } func (qr *QueryResult) UnmarshalJSON(b []byte) error { v := struct { Type model.ValueType `json:"resultType"` Result json.RawMessage `json:"result"` }{} err := json.Unmarshal(b, &v) if err != nil { return err } qr.Type = v.Type switch v.Type { case model.ValScalar: var sv model.Scalar err = json.Unmarshal(v.Result, &sv) qr.Scalar = &sv case model.ValVector: var vv model.Vector err = json.Unmarshal(v.Result, &vv) qr.Vector = &vv case model.ValMatrix: var mv model.Matrix err = json.Unmarshal(v.Result, &mv) qr.Matrix = &mv default: err = fmt.Errorf("unexpected value type %q", v.Type) } return err } // Series represents a description of a series: a name and a set of labels. // Series is roughly equivalent to model.Metrics, but has easy access to name // and the set of non-name labels. type Series struct { Name string Labels model.LabelSet } func (s *Series) UnmarshalJSON(data []byte) error { var rawMetric model.Metric err := json.Unmarshal(data, &rawMetric) if err != nil { return err } if name, ok := rawMetric[model.MetricNameLabel]; ok { s.Name = string(name) delete(rawMetric, model.MetricNameLabel) } s.Labels = model.LabelSet(rawMetric) return nil } func (s *Series) String() string { lblStrings := make([]string, 0, len(s.Labels)) for k, v := range s.Labels { lblStrings = append(lblStrings, fmt.Sprintf("%s=%q", k, v)) } return fmt.Sprintf("%s{%s}", s.Name, strings.Join(lblStrings, ",")) }
pkg/utils/prometheus.go
0.67822
0.408926
prometheus.go
starcoder
package tween import ( "github.com/Dethrail/Tamagotchi/engine" //"math" //"time" ) func Scale(t *Tween, arr []float32) []float32 { scale := t.Target.Transform().Scale() if arr == nil || len(arr) == 0 { return []float32{scale.X, scale.Y, scale.Z} } scale = VectorFmt(scale, arr, t.Format) t.Target.Transform().SetScale(scale) return []float32{scale.X, scale.Y, scale.Z} } func Position(t *Tween, arr []float32) []float32 { pos := t.Target.Transform().Position() if arr == nil || len(arr) == 0 { return []float32{pos.X, pos.Y, pos.Z} } pos = VectorFmt(pos, arr, t.Format) t.Target.Transform().SetPosition(pos) return []float32{pos.X, pos.Y, pos.Z} } func Rotation(t *Tween, arr []float32) []float32 { rot := t.Target.Transform().Rotation() if arr == nil || len(arr) == 0 { return []float32{rot.X, rot.Y, rot.Z} } rot = VectorFmtRotation(rot, arr, t.Format) t.Target.Transform().SetRotation(rot) return []float32{rot.X, rot.Y, rot.Z} } func Color(t *Tween, arr []float32) []float32 { col := t.Target.Sprite.Color if t.Target.Sprite == nil { panic("Cannot run Color tween on none Sprite GameObjects") } if arr == nil || len(arr) == 0 { return []float32{col.R, col.G, col.B, col.A} } col = ColorFmt(col, arr, t.Format) t.Target.Sprite.Color = col return []float32{col.R, col.G, col.B, col.A} } func WorldScale(t *Tween, arr []float32) []float32 { scale := t.Target.Transform().WorldScale() if arr == nil || len(arr) == 0 { return []float32{scale.X, scale.Y, scale.Z} } scale = VectorFmt(scale, arr, t.Format) t.Target.Transform().SetWorldScale(scale) return []float32{scale.X, scale.Y, scale.Z} } func WorldPosition(t *Tween, arr []float32) []float32 { pos := t.Target.Transform().WorldPosition() if arr == nil || len(arr) == 0 { return []float32{pos.X, pos.Y, pos.Z} } pos = VectorFmt(pos, arr, t.Format) t.Target.Transform().SetWorldPosition(pos) return []float32{pos.X, pos.Y, pos.Z} } func WorldRotation(t *Tween, arr []float32) []float32 { rot := t.Target.Transform().WorldRotation() if arr == nil || len(arr) == 0 { return []float32{rot.X, rot.Y, rot.Z} } rot = VectorFmtRotation(rot, arr, t.Format) t.Target.Transform().SetWorldRotation(rot) return []float32{rot.X, rot.Y, rot.Z} } func ColorFmt(v engine.Color, arr []float32, s string) engine.Color { if len(s) == 0 { if len(arr) > 3 { v.R = arr[0] v.G = arr[1] v.B = arr[2] v.A = arr[3] } else if len(arr) > 2 { v.R = arr[0] v.G = arr[1] v.B = arr[2] } else if len(arr) > 1 { v.R = arr[0] v.G = arr[1] } else { v.R = arr[0] } return v } if (len(s) <= 4 && len(s) >= 1) && len(arr) == 1 { for _, r := range s { switch r { case 'r', 'R': v.R = arr[0] case 'g', 'G': v.G = arr[0] case 'b', 'B': v.B = arr[0] case 'a', 'A': v.A = arr[0] } } return v } for i, r := range s { if i >= len(arr) { break } switch r { case 'r', 'R': v.R = arr[i] case 'g', 'G': v.G = arr[i] case 'b', 'B': v.B = arr[i] case 'a', 'A': v.A = arr[i] } } return v } func VectorFmt(v engine.Vector, arr []float32, s string) engine.Vector { if len(s) == 0 { if len(arr) > 2 { v.X = arr[0] v.Y = arr[1] v.Z = arr[2] } else if len(arr) > 1 { v.X = arr[0] v.Y = arr[1] } else { v.X = arr[0] } return v } if (len(s) <= 3 && len(s) >= 1) && len(arr) == 1 { for _, r := range s { switch r { case 'x', 'X': v.X = arr[0] case 'y', 'Y': v.Y = arr[0] case 'z', 'Z': v.Z = arr[0] } } return v } for i, r := range s { if i >= len(arr) { break } switch r { case 'x', 'X': v.X = arr[i] case 'y', 'Y': v.Y = arr[i] case 'z', 'Z': v.Z = arr[i] } } return v } func VectorFmtRotation(v engine.Vector, arr []float32, s string) engine.Vector { if len(s) == 0 && len(arr) == 1 { v.Z = arr[0] return v } return VectorFmt(v, arr, s) }
server/components/tween/Type.go
0.521715
0.530115
Type.go
starcoder
package geometry import ( "math" ) func CalculateMovementDistance(elapsedTimeInMs int64, distancePerMs float64, maxDistance *float64) float64 { var distance = float64(elapsedTimeInMs) * distancePerMs if nil != maxDistance { return math.Min(*maxDistance, distance) } return distance } type movementCallbackFunc func(direction Direction, distance float64, diagonalDistance float64) float64 func calculateMovement(elapsedTimeInMs int64, direction Direction, distancePerMs float64, maxDistance *float64, fn movementCallbackFunc) float64 { var distanceExact = CalculateMovementDistance(elapsedTimeInMs, distancePerMs, maxDistance) var diagonalDistance = math.Round(math.Sqrt((distanceExact * distanceExact) / 2)) var distance = math.Round(distanceExact) return fn(direction, distance, diagonalDistance) } func CalculateMovementX(elapsedTimeInMs int64, direction Direction, distancePerMs float64, maxDistance *float64) float64 { var fn movementCallbackFunc = func(direction Direction, distance float64, diagonalDistance float64) float64 { switch direction { case Up: return 0.0 case UpRight: return diagonalDistance case Right: return distance case DownRight: return diagonalDistance case Down: return 0 case DownLeft: return diagonalDistance * -1 case Left: return distance * -1 case UpLeft: return diagonalDistance * -1 default: return 0 } } return calculateMovement(elapsedTimeInMs, direction, distancePerMs, maxDistance, fn) } func CalculateMovementY(elapsedTimeInMs int64, direction Direction, distancePerMs float64, maxDistance *float64) float64 { var fn movementCallbackFunc = func(direction Direction, distance float64, diagonalDistance float64) float64 { switch direction { case Up: return distance * -1 case UpRight: return diagonalDistance * -1 case Right: return 0 case DownRight: return diagonalDistance case Down: return distance case DownLeft: return diagonalDistance case Left: return 0 case UpLeft: return diagonalDistance * -1 default: return 0 } } return calculateMovement(elapsedTimeInMs, direction, distancePerMs, maxDistance, fn) }
src/engine/geometry/movement.go
0.760384
0.753829
movement.go
starcoder
package organizr import ( "sort" "github.com/Viking2012/goraynor/src/structs" ) // implementation basics from https://pkg.go.dev/sort#example-package-SortKeys // lessFunc is the type of a "less" function that defines the ordering of its PurchaseRecord arguments. type lessFunc func(p1, p2 *structs.PriceRecord) bool // multiSorter implements the Sort interface, sorting the changes within. type multiSorter struct { records []structs.PriceRecord less []lessFunc } // Sort sorts the argument slice according to the less functions passed to OrderedBy. func (ms *multiSorter) Sort(records []structs.PriceRecord) { ms.records = records sort.Sort(ms) } // OrderedBy returns a Sorter that sorts using the less functions, in order. // Call its Sort method to sort the data. func OrderedBy(less ...lessFunc) *multiSorter { return &multiSorter{ less: less, } } // Len is part of sort.Interface. func (ms *multiSorter) Len() int { return len(ms.records) } // Swap is part of sort.Interface func (ms *multiSorter) Swap(i, j int) { ms.records[i], ms.records[j] = ms.records[j], ms.records[i] } // Less is part of sort.Interface. It is implemented by looping along the // less functions until it finds a comparison that discriminates between // the two items (one is less than the other). Note that it can call the // less functions twice per call. We could change the functions to return // -1, 0, 1 and reduce the number of calls for greater efficiency: an // exercise for the reader. func (ms *multiSorter) Less(i, j int) bool { p, q := &ms.records[i], &ms.records[j] // Try all but the last comparison. var k int for k = 0; k < len(ms.less)-1; k++ { less := ms.less[k] switch { case less(p, q): // p < q, so we have a decision. return true case less(q, p): // p > q, so we have a decision. return false } // p == q; try the next comparison. } // All comparisons to here said "equal", so just return whatever // the final comparison reports. return ms.less[k](p, q) } func byUuid(p1, p2 *structs.PriceRecord) bool { return p1.Uuid < p2.Uuid } func byProduct(p1, p2 *structs.PriceRecord) bool { return p1.ProductID < p2.ProductID } func byCustomer(p1, p2 *structs.PriceRecord) bool { return p1.CustomerID < p2.CustomerID } func byDate(p1, p2 *structs.PriceRecord) bool { return p1.PurchaseDate.Before(p2.PurchaseDate) } func byDocumentNumber(p1, p2 *structs.PriceRecord) bool { return p1.DocumentNumber < p2.DocumentNumber } func byDocumentLineNumber(p1, p2 *structs.PriceRecord) bool { return p1.DocumentLineNumber < p2.DocumentLineNumber } func byPrice(p1, p2 *structs.PriceRecord) bool { return p1.Price < p2.Price } var ByUuid lessFunc = byUuid var ByProduct lessFunc = byProduct var ByCustomer lessFunc = byCustomer var ByDate lessFunc = byDate var ByDocumentNumber lessFunc = byDocumentNumber var ByDocumentLineNumber lessFunc = byDocumentLineNumber var ByPrice lessFunc = byPrice
src/organizr/organizr.go
0.735926
0.560674
organizr.go
starcoder
package number import ( "github.com/shopspring/decimal" ) const ( presentDecimals = 8 persistentDecimals = 32 ) type Decimal struct { decimal.Decimal } func Zero() Decimal { return Decimal{} } func NewDecimal(value int64, decimals int32) Decimal { return Decimal{decimal.New(value, -decimals).Round(persistentDecimals)} } func FromString(source string) Decimal { d, _ := decimal.NewFromString(source) return Decimal{d.Round(persistentDecimals)} } func FromFloat(source float64) Decimal { return Decimal{decimal.NewFromFloat(source).Round(persistentDecimals)} } func (d Decimal) Integer(precision uint8) Integer { return Integer{d.Mul(NewDecimal(1, -int32(precision))).IntPart(), precision} } func (a Decimal) Add(b Decimal) Decimal { return Decimal{a.Decimal.Add(b.Decimal)} } func (a Decimal) Sub(b Decimal) Decimal { return Decimal{a.Decimal.Sub(b.Decimal)} } func (a Decimal) Div(b Decimal) Decimal { return Decimal{a.Decimal.DivRound(b.Decimal, persistentDecimals)} } func (a Decimal) Divisible(b Decimal) bool { if a.Cmp(b) < 0 { return false } div := a.Div(b) return div.Floor().Persist() == div.Persist() } func (a Decimal) Mul(b Decimal) Decimal { return Decimal{a.Decimal.Mul(b.Decimal).Round(persistentDecimals)} } func (a Decimal) Neg() Decimal { return Decimal{a.Decimal.Neg()} } func (a Decimal) Cmp(b Decimal) int { return a.Decimal.Cmp(b.Decimal) } func (a Decimal) Floor() Decimal { return Decimal{a.Decimal.Floor()} } func (a Decimal) Ceil() Decimal { return Decimal{a.Decimal.Ceil()} } func (a Decimal) Round(decimals int32) Decimal { return Decimal{a.Decimal.Round(decimals)} } func (a Decimal) RoundFloor(decimals int32) Decimal { return a.Mul(NewDecimal(1, -decimals)).Floor().Mul(NewDecimal(1, decimals)) } func (a Decimal) RoundCeil(decimals int32) Decimal { return a.Mul(NewDecimal(1, -decimals)).Ceil().Mul(NewDecimal(1, decimals)) } func (a Decimal) Equal(b Decimal) bool { return a.Decimal.Equal(b.Decimal) } func (a Decimal) Persist() string { return a.Decimal.String() } func (a Decimal) PresentFloor() string { return a.RoundFloor(presentDecimals).Persist() } func (a Decimal) PresentCeil() string { return a.RoundCeil(presentDecimals).Persist() } func (a Decimal) Float64() float64 { f, _ := a.Decimal.Float64() return f } func (a Decimal) Exhausted() bool { presentMin := NewDecimal(1, presentDecimals).Decimal.Round(presentDecimals) return a.RoundFloor(presentDecimals).LessThan(presentMin) }
decimal.go
0.866683
0.400339
decimal.go
starcoder
package nn import ( "math" ) type OutputLayer struct { *Layer biasNeuron *BiasNeuron prevLayer ILayer withBias bool weights [][]float64 biasWeights []float64 activationDerivativeFunction ActivationDerivativeFunction learningRate float64 moment float64 } func NewOutputLayer(numberOfNeurons int, prevLayer ILayer, withBias bool, weights [][]float64, biasWeights []float64, activationDerivativeFunction ActivationDerivativeFunction, learningRate, moment float64) *OutputLayer { l := new(OutputLayer) l.Layer = NewLayer(LayerTypeOutput, numberOfNeurons) l.prevLayer = prevLayer l.withBias = withBias l.weights = weights l.biasWeights = biasWeights l.activationDerivativeFunction = activationDerivativeFunction l.learningRate = learningRate l.moment = moment l.build() return l } func (outputLayer *OutputLayer) build() { var neurons []INeuron biasNeuron := NewBiasNeuron() for i := 0; i < outputLayer.numberOfNeurons; i++ { outputNeuron := NewOutputNeuron() var neuronWeights []float64 if outputLayer.weights != nil && i < len(outputLayer.weights) { neuronWeights = outputLayer.weights[i] } for prevLayerNeuronIndex, prevLayerNeuron := range outputLayer.prevLayer.GetNeurons() { synapse := NewSynapse(outputLayer.learningRate, outputLayer.moment) if neuronWeights != nil && prevLayerNeuronIndex < len(neuronWeights) { synapse.Weight = neuronWeights[prevLayerNeuronIndex] } outputNeuron.AddSynapse(synapse, SynapseTypeIn) prevLayerNeuron.AddSynapse(synapse, SynapseTypeOut) synapse.InNeuron = prevLayerNeuron synapse.OutNeuron = outputNeuron } if outputLayer.withBias { synapse := NewSynapse(outputLayer.learningRate, outputLayer.moment) if outputLayer.biasWeights != nil && i < len(outputLayer.biasWeights) { synapse.Weight = outputLayer.biasWeights[i] } outputNeuron.AddSynapse(synapse, SynapseTypeIn) biasNeuron.AddSynapse(synapse, SynapseTypeOut) synapse.InNeuron = biasNeuron synapse.OutNeuron = outputNeuron } neurons = append(neurons, outputNeuron) } if outputLayer.withBias { outputLayer.biasNeuron = biasNeuron } outputLayer.neurons = neurons } func (outputLayer *OutputLayer) GetOutput() []float64 { var result []float64 for _, outputNeuron := range outputLayer.neurons { result = append(result, outputNeuron.GetValue()) } return result } func (outputLayer *OutputLayer) GetError(input []float64) float64 { calculatedValue := 0.0 for neuronIndex, neuron := range outputLayer.neurons { idealInput := input[neuronIndex] calculatedValue += math.Pow(idealInput-neuron.GetValue(), 2) } calculatedValue /= float64(outputLayer.numberOfNeurons) return calculatedValue } func (outputLayer *OutputLayer) UpdateDelta(input []float64) { for neuronIndex, neuron := range outputLayer.neurons { inputValue := input[neuronIndex] neuron.(*OutputNeuron).UpdateDelta(inputValue, outputLayer.activationDerivativeFunction) } } func (outputLayer *OutputLayer) UpdateWeight() { for _, neuron := range outputLayer.neurons { for _, synapse := range neuron.GetInSynapses() { if synapse.InNeuron.GetNeuronType() == NeuronTypeBias { continue } synapse.UpdateGradient() synapse.UpdateWeight() } } } func (outputLayer *OutputLayer) GetBiasWeights() []float64 { var layerBiasWeights []float64 if outputLayer.withBias { for _, synapse := range outputLayer.biasNeuron.outSynapses { layerBiasWeights = append(layerBiasWeights, synapse.Weight) } } return layerBiasWeights }
src/nn/layer_output.go
0.702632
0.544499
layer_output.go
starcoder
package iso20022 // Specifies periods related to a corporate action option. type CorporateActionPeriod5 struct { // Period during which the price of a security is determined. PriceCalculationPeriod *Period1Choice `xml:"PricClctnPrd,omitempty"` // Period during which both old and new equity may be traded simultaneously, for example, consolidation of equity or splitting of equity. ParallelTradingPeriod *Period1Choice `xml:"ParllTradgPrd,omitempty"` // Period during which the specified option, or all options of the event, remains valid, for example, offer period. ActionPeriod *Period1Choice `xml:"ActnPrd,omitempty"` // Period during which the shareholder can revoke, change or withdraw its instruction. RevocabilityPeriod *Period1Choice `xml:"RvcbltyPrd,omitempty"` // Period during which the privilege is not available, for example, this can happen whenever a meeting takes place or whenever a coupon payment is due. PrivilegeSuspensionPeriod *Period1Choice `xml:"PrvlgSspnsnPrd,omitempty"` // Period during which the participant of the account servicer can revoke change or withdraw its instructions. AccountServicerRevocabilityPeriod *Period1Choice `xml:"AcctSvcrRvcbltyPrd,omitempty"` // Period defining the last date on which withdrawal in street name requests on the outturn security will be accepted and the date on which the suspension will be released and withdrawal by transfer processing on the outturn security will resume. DepositorySuspensionPeriodForWithdrawal *Period1Choice `xml:"DpstrySspnsnPrdForWdrwl,omitempty"` } func (c *CorporateActionPeriod5) AddPriceCalculationPeriod() *Period1Choice { c.PriceCalculationPeriod = new(Period1Choice) return c.PriceCalculationPeriod } func (c *CorporateActionPeriod5) AddParallelTradingPeriod() *Period1Choice { c.ParallelTradingPeriod = new(Period1Choice) return c.ParallelTradingPeriod } func (c *CorporateActionPeriod5) AddActionPeriod() *Period1Choice { c.ActionPeriod = new(Period1Choice) return c.ActionPeriod } func (c *CorporateActionPeriod5) AddRevocabilityPeriod() *Period1Choice { c.RevocabilityPeriod = new(Period1Choice) return c.RevocabilityPeriod } func (c *CorporateActionPeriod5) AddPrivilegeSuspensionPeriod() *Period1Choice { c.PrivilegeSuspensionPeriod = new(Period1Choice) return c.PrivilegeSuspensionPeriod } func (c *CorporateActionPeriod5) AddAccountServicerRevocabilityPeriod() *Period1Choice { c.AccountServicerRevocabilityPeriod = new(Period1Choice) return c.AccountServicerRevocabilityPeriod } func (c *CorporateActionPeriod5) AddDepositorySuspensionPeriodForWithdrawal() *Period1Choice { c.DepositorySuspensionPeriodForWithdrawal = new(Period1Choice) return c.DepositorySuspensionPeriodForWithdrawal }
CorporateActionPeriod5.go
0.858763
0.508666
CorporateActionPeriod5.go
starcoder
package wildcat import ( "unicode/utf8" ) type calculator interface { calculate(data []byte) int64 } // Counter shows type Counter interface { IsType(ct CounterType) bool Type() CounterType update(data []byte) Count(ct CounterType) int64 } // CounterType represents the types of counting. type CounterType int const ( // Bytes shows the counter type for counting byte size. Bytes CounterType = 1 // Characters shows the counter type for counting characters. Characters = 2 // Words shows the counter type for counting the words. Words = 4 // Lines shows the counter type for counting the lines. Lines = 8 // All shows the counter type for counting byte size, characters, words, and lines. All = Lines | Words | Characters | Bytes ) // IsType checks the equality between the receiver and the given counter type. func (ct CounterType) IsType(ct2 CounterType) bool { return ct&ct2 == ct2 } // NewCounter generates Counter by CounterTypes. func NewCounter(counterType CounterType) Counter { counter := &multipleCounter{ct: counterType, counters: map[CounterType]Counter{}} generators := []struct { ct CounterType generator func() Counter }{ {Bytes, func() Counter { return &singleCounter{ct: Bytes, number: 0, calculator: &byteCalculator{}} }}, {Characters, func() Counter { return &singleCounter{ct: Characters, number: 0, calculator: &characterCalculator{}} }}, {Words, func() Counter { return &singleCounter{ct: Words, number: 0, calculator: &wordCalculator{}} }}, {Lines, func() Counter { return &singleCounter{ct: Lines, number: 0, calculator: &lineCalculator{}} }}, } for _, gens := range generators { if counterType&gens.ct == gens.ct { counter.counters[gens.ct] = gens.generator() } } return counter } type multipleCounter struct { ct CounterType counters map[CounterType]Counter } func (mc *multipleCounter) IsType(ct CounterType) bool { return mc.ct&ct == ct } func (mc *multipleCounter) Type() CounterType { return mc.ct } func (mc *multipleCounter) update(data []byte) { for _, v := range mc.counters { v.update(data) } } func (mc *multipleCounter) Count(ct CounterType) int64 { counter, ok := mc.counters[ct] if !ok { return -1 } return counter.Count(ct) } type singleCounter struct { ct CounterType number int64 calculator calculator } func (sc *singleCounter) IsType(ct CounterType) bool { return sc.ct.IsType(ct) } func (sc *singleCounter) Type() CounterType { return sc.ct } func (sc *singleCounter) Count(ct CounterType) int64 { return sc.number } func (sc *singleCounter) update(data []byte) { sc.number = sc.number + sc.calculator.calculate(data) } type lineCalculator struct { } func (lc *lineCalculator) calculate(data []byte) int64 { var number int64 = 0 for _, datum := range data { if datum == '\n' { number++ } } return number } type wordCalculator struct { } func isWhiteSpace(data byte) bool { return data == 0 || data == ' ' || data == '\t' || data == '\n' || data == '\r' } func (wc *wordCalculator) calculate(data []byte) int64 { number := int64(0) if len(data) > 0 && !isWhiteSpace(data[0]) { number++ } for i, datum := range data { if i > 0 && isWhiteSpace(data[i-1]) && !isWhiteSpace(datum) { number++ } } return number } type byteCalculator struct { } func (bc *byteCalculator) calculate(data []byte) int64 { return int64(len(data)) } type characterCalculator struct { } func (cc *characterCalculator) calculate(data []byte) int64 { return int64(utf8.RuneCount(data)) }
counter.go
0.60871
0.502441
counter.go
starcoder
package eve import ( "bits" ) // GE provides a convenient way to write Graphics Engine commands. type GE struct { DL } // GE retuns Graphics Engine command writer. Special addr -1 means that GE // writes commands to the Graphics Engine co-processor. func (d *Driver) GE(addr int) GE { d.end() w := Writer{d: d} switch addr { case -1: if d.state.flags&3 > stateWrite { // Use previous state d.state.flags |= stateOpen w.state = d.state addr = w.addr() break } w.state.addr = int(d.readUint32(d.mmap.regcmdwrite)) if d.mmap == &eve1 { addr = d.mmap.ramcmd + w.state.addr w.state.flags = stateOpen | stateWriteCmd } else { addr = regcmdbwrite w.state.flags = stateOpen | stateWriteBulkCmd } d.state = w.state default: checkAddr(addr) w.state = state{addr, stateOpen | stateWrite} d.state = w.state } w.start(addr) return GE{DL{w}} } // DLStart starts a new display list. func (ge *GE) DLStart() { ge.restart(4) ge.wr32(CMD_DLSTART) } // Swap swaps the current display list. func (ge *GE) Swap() { ge.restart(4) ge.wr32(CMD_SWAP) } // ColdStart sets co-processor engine state to default values. func (ge *GE) ColdStart() { ge.restart(4) ge.wr32(CMD_COLDSTART) } // Interrupt triggers interrupt INT_CMDFLAG. func (ge *GE) Interrupt() { ge.restart(4) ge.wr32(CMD_INTERRUPT) } // Append appends more commands resident in RAM_G to the current display list // memory address where the offset is specified in REG_CMD_DL. func (ge *GE) Append(addr, num int) { ge.restart(3 * 4) ge.wr32(CMD_APPEND) ge.wr32(uint32(addr)) ge.wr32(uint32(num)) } // RegRead reads a register value. func (ge *GE) RegRead(addr int) { ge.restart(2 * 4) ge.wr32(CMD_REGREAD) ge.wr32(uint32(addr)) } // MemWrite writes the following bytes into memory. func (ge *GE) MemWrite(addr, num int) { ge.restart(3 * 4) ge.wr32(CMD_MEMWRITE) ge.wr32(uint32(addr)) ge.wr32(uint32(num)) } // Inflate decompresses the following compressed data into RAM_G. func (ge *GE) Inflate(addr int) { ge.restart(2 * 4) ge.wr32(CMD_INFLATE) ge.wr32(uint32(addr)) } // LoadImage decompresses the following JPEG image data into a bitmap at // specified address (EVE2 supports also PNG). Image data should be padded to // align to 4 byte boundary (see Writer.Align32). func (ge *GE) LoadImage(addr int, options uint16) { ge.restart(3 * 4) ge.wr32(CMD_LOADIMAGE) ge.wr32(uint32(addr)) ge.wr32(uint32(options)) } func (ge *GE) LoadImageBytes(addr int, options uint16, img []byte) { ge.LoadImage(addr, options) for len(img) > 0 { n := ge.d.CmdSpace() if n > len(img) { n = len(img) } ge.Write(img[:n]) img = img[n:] } ge.align32() } // MediaFIFO sets up a streaming media FIFO in RAM_G. func (ge *GE) MediaFIFO(addr, size int) { ge.restart(3 * 4) ge.wr32(CMD_MEDIAFIFO) ge.wr32(uint32(addr)) ge.wr32(uint32(size)) } // PlayVideo plays back MJPEG-encoded AVI video. func (ge *GE) PlayVideo(options uint32) { ge.restart(2 * 4) ge.wr32(CMD_PLAYVIDEO) ge.wr32(options) } // VideoStart initializes the AVI video decoder. func (ge *GE) VideoStart() { ge.restart(4) ge.wr32(CMD_VIDEOSTART) } // VideoFrame loads the next frame of video. func (ge *GE) VideoFrame(dst, ptr int) { ge.restart(3 * 4) ge.wr32(CMD_VIDEOFRAME) ge.wr32(uint32(dst)) ge.wr32(uint32(ptr)) } // MemCRC computes a CRC-32 for a block of EVE memory. func (ge *GE) MemCRC(addr, num int) { ge.restart(3 * 4) ge.wr32(CMD_MEMCRC) ge.wr32(uint32(addr)) ge.wr32(uint32(num)) } // MemZero writes zero to a block of memory. func (ge *GE) MemZero(addr, num int) { ge.restart(3 * 4) ge.wr32(CMD_MEMZERO) ge.wr32(uint32(addr)) ge.wr32(uint32(num)) } // MemSet fills memory with a byte value. func (ge *GE) MemSet(addr int, val byte, num int) { ge.restart(3 * 4) ge.wr32(CMD_MEMSET) ge.wr32(uint32(val)) ge.wr32(uint32(num)) } // MemCpy copies a block of memory. func (ge *GE) MemCpy(dst, src, num int) { ge.restart(4 * 4) ge.wr32(CMD_MEMCPY) ge.wr32(uint32(dst)) ge.wr32(uint32(src)) ge.wr32(uint32(num)) } // Button writes only header of CMD_BUTTON command (without label string). Use // Write* methods to write button label. Label string must be terminated with // zero byte and padded to align to 4 byte boundary (see Writer.Align32). func (ge *GE) Button(x, y, w, h int, font byte, options uint16) { ge.restart(4 * 4) ge.wr32(CMD_BUTTON) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(font) | uint32(options)<<16) } // ButtonString draws a button with label s. func (ge *GE) ButtonString(x, y, w, h int, font byte, options uint16, s string) { ge.Button(x, y, w, h, font, options) ge.addrAdd(len(s) + 1) ge.ws(s) ge.wr8(0) ge.align32() } // Clock draws an analog clock. func (ge *GE) Clock(x, y, r int, options uint16, h, m, s, ms int) { ge.restart(5 * 4) ge.wr32(CMD_CLOCK) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(r)&0xFFFF | uint32(options)<<16) ge.wr32(uint32(h)&0xFFFF | uint32(m)&0xFFFF<<16) ge.wr32(uint32(s)&0xFFFF | uint32(ms)&0xFFFF<<16) } // FgColor sets the foreground color. func (ge *GE) FgColor(rgb uint32) { ge.restart(2 * 4) ge.wr32(CMD_FGCOLOR) ge.wr32(rgb) } // BgColor sets the background color. func (ge *GE) BgColor(rgb uint32) { ge.restart(2 * 4) ge.wr32(CMD_BGCOLOR) ge.wr32(rgb) } // GradColor sets the 3D button highlight color. func (ge *GE) GradColor(rgb uint32) { ge.restart(2 * 4) ge.wr32(CMD_GRADCOLOR) ge.wr32(rgb) } // Gauge draws a gauge. func (ge *GE) Gauge(x, y, r int, options uint16, major, minor, val, max int) { ge.restart(5 * 4) ge.wr32(CMD_GAUGE) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(r)&0xFFFF | uint32(options)<<16) ge.wr32(uint32(major)&0xFFFF | uint32(minor)&0xFFFF<<16) ge.wr32(uint32(val)&0xFFFF | uint32(max)&0xFFFF<<16) } //Gradienta draws a smooth color gradient. func (ge *GE) Gradient(x0, y0 int, rgb0 uint32, x1, y1 int, rgb1 uint32) { ge.restart(5 * 4) ge.wr32(CMD_GRADIENT) ge.wr32(uint32(x0)&0xFFFF | uint32(y0)&0xFFFF<<16) ge.wr32(rgb0) ge.wr32(uint32(x1)&0xFFFF | uint32(y1)&0xFFFF<<16) ge.wr32(rgb1) } // Keys writes only header of CMD_KEYS command (without key labels). Use Write* // methods to write key labels. Labels string must be terminated with zero byte // and padded to align to 4 byte boundary (see Writer.Align32). func (ge *GE) Keys(x, y, w, h int, font byte, options uint16) { ge.restart(4 * 4) ge.wr32(CMD_KEYS) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(font) | uint32(options)<<16) } // KeysString draws a row of keys using s. func (ge *GE) KeysString(x, y, w, h int, font byte, options uint16, s string) { ge.Keys(x, y, w, h, font, options) ge.addrAdd(len(s) + 1) ge.ws(s) ge.wr8(0) ge.align32() } // Progress draws a progress bar. func (ge *GE) Progress(x, y, w, h int, options uint16, val, max int) { ge.restart(5 * 4) ge.wr32(CMD_PROGRESS) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(options) | uint32(val)&0xFFFF<<16) ge.wr32(uint32(max) & 0xFFFF) } // Progress draws a scroll bar. func (ge *GE) Scrollbar(x, y, w, h int, options uint16, val, size, max int) { ge.restart(5 * 4) ge.wr32(CMD_SCROLLBAR) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(options) | uint32(val)&0xFFFF<<16) ge.wr32(uint32(size) | uint32(max)&0xFFFF<<16) } // Slider draws a slider. func (ge *GE) Slider(x, y, w, h int, options uint16, val, max int) { ge.restart(5 * 4) ge.wr32(CMD_SLIDER) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(options) | uint32(val)&0xFFFF<<16) ge.wr32(uint32(max) & 0xFFFF) } // Dial draws a rotary dial control. func (ge *GE) Dial(x, y, r int, options uint16, val int) { ge.restart(4 * 4) ge.wr32(CMD_DIAL) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(r)&0xFFFF | uint32(options)<<16) ge.wr32(uint32(val)) } // Toggle writes only header of CMD_TOGGLE command (without label string). Use // Write* methods to write toggle label. Label string must be terminated with // zero byte and padded to align to 4 byte boundary (see Writer.Align32). func (ge *GE) Toggle(x, y, w int, font byte, options uint16, state bool) { ge.restart(4 * 4) ge.wr32(CMD_TOGGLE) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(font)<<16) ge.wr32(uint32(options) | uint32(bits.One(!state)-1)<<16) } // Toggle draws a toggle switch using s as label. func (ge *GE) ToggleString(x, y, w int, font byte, opts uint16, state bool, s string) { ge.Toggle(x, y, w, font, opts, state) ge.addrAdd(len(s) + 1) ge.ws(s) ge.wr8(0) ge.align32() } // Text writes only header of CMD_TEXT command (without text string). Use // Write* methods to write text. Text string must be terminated with zero byte. // ge.Text(20, 30, 26, eve.DEFAULT) // fmt.Fprintf(&ge, "x=%d y=%d\000", x, y) // ge.Align32() func (ge *GE) Text(x, y int, font byte, options uint16) { ge.restart(3 * 4) ge.wr32(CMD_TEXT) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(font) | uint32(options)<<16) } // TextString draws text. func (ge *GE) TextString(x, y int, font byte, options uint16, s string) { ge.Text(x, y, font, options) ge.addrAdd(len(s) + 1) ge.ws(s) ge.wr8(0) ge.align32() } // SetBase sets the base for number output. func (ge *GE) SetBase(base int) { ge.restart(2 * 4) ge.wr32(CMD_SETBASE) ge.wr32(uint32(base)) } // Number draws number. func (ge *GE) Number(x, y int, font byte, options uint16, n int) { ge.restart(4 * 4) ge.wr32(CMD_NUMBER) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(font) | uint32(options)<<16) ge.wr32(uint32(n)) } // LoadIdentity instructs the graphics engine to set the current matrix to the // identity matrix, so it is able to form the new matrix as requested by Scale, // Rotate, Translate command. func (ge *GE) LoadIdentity() { ge.restart(4) ge.wr32(CMD_LOADIDENTITY) } // SetMatrix assigns the value of the current matrix to the bitmap transform // matrix of the graphics engine by generating display list commands. func (ge *GE) SetMatrix(a, b, c, d, e, f int) { ge.restart(7 * 4) ge.wr32(CMD_SETMATRIX) ge.wr32(uint32(a)) ge.wr32(uint32(b)) ge.wr32(uint32(c)) ge.wr32(uint32(d)) ge.wr32(uint32(e)) ge.wr32(uint32(f)) } // GetMatrix retrieves the current matrix within the context of the graphics // engine. func (ge *GE) GetMatrix() { ge.restart(4) ge.wr32(CMD_GETMATRIX) } // GetPtr gets the end memory address of data inflated by Inflate command. func (ge *GE) GetPtr() { ge.restart(4) ge.wr32(CMD_GETPTR) } // GetProps gets the image properties decompressed by LoadImage. func (ge *GE) GetProps() { ge.restart(4) ge.wr32(CMD_GETPROPS) } // Scale applies a scale to the current matrix. func (ge *GE) Scale(sx, sy int) { ge.restart(3 * 4) ge.wr32(CMD_SCALE) ge.wr32(uint32(sx)) ge.wr32(uint32(sy)) } // Rotate applies a rotation to the current matrix. func (ge *GE) Rotate(a int) { ge.restart(2 * 4) ge.wr32(CMD_ROTATE) ge.wr32(uint32(a)) } // Translate applies a translation to the current matrix. func (ge *GE) Translate(tx, ty int) { ge.restart(3 * 4) ge.wr32(CMD_TRANSLATE) ge.wr32(uint32(tx)) ge.wr32(uint32(ty)) } // Calibrate execute the touch screen calibration routine. It returns the // address to the status value (status != 0 means success). func (ge *GE) Calibrate() int { ge.restart(2 * 4) ge.wr32(CMD_CALIBRATE) ge.wr32(0) return ge.Addr() - 4 } // SetRotate rotate the screen (EVE2). func (ge *GE) SetRotate(r byte) { ge.restart(2 * 4) ge.wr32(CMD_SETROTATE) ge.wr32(uint32(r)) } // Spinner starts an animated spinner. func (ge *GE) Spinner(x, y int, style uint16, scale int) { ge.restart(3 * 4) ge.wr32(CMD_SPINNER) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(style) | uint32(scale)&0xFFFF<<16) } // Screensaver starts an animated screensaver. func (ge *GE) Screensaver() { ge.restart(4) ge.wr32(CMD_SCREENSAVER) } // Sketch starts a continuous sketch update. It does not display anything, only // draws to the bitmap located in RAM_G, at address addr. func (ge *GE) Sketch(x, y, w, h, addr int, format byte) { ge.restart(5 * 4) ge.wr32(CMD_SKETCH) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(addr)) ge.wr32(uint32(format)) } // Stop stops any of spinner, screensaver or sketch. func (ge *GE) Stop() { ge.restart(4) ge.wr32(CMD_STOP) } // SetFont sets up a custom font. func (ge *GE) SetFont(font byte, addr int) { ge.restart(3 * 4) ge.wr32(CMD_SETROTATE) ge.wr32(uint32(font)) ge.wr32(uint32(addr)) } // SetFont2 sets up a custom font (EVE2). func (ge *GE) SetFont2(font byte, addr, firstchar int) { ge.restart(4 * 4) ge.wr32(CMD_SETROTATE) ge.wr32(uint32(font)) ge.wr32(uint32(addr)) ge.wr32(uint32(firstchar)) } // SetScratch sets the scratch bitmap for widget use (EVE2). func (ge *GE) SetScratch(handle byte) { ge.restart(2 * 4) ge.wr32(CMD_SETSCRATCH) ge.wr32(uint32(handle)) } // ROMFont loads a ROM font into bitmap handle (EVE2). func (ge *GE) ROMFont(font, romslot byte) { ge.restart(3 * 4) ge.wr32(CMD_ROMFONT) ge.wr32(uint32(font)) ge.wr32(uint32(romslot)) } // Track tracks touches for a graphics object. func (ge *GE) Track(x, y, w, h, tag int) { ge.restart(4 * 4) ge.wr32(CMD_TRACK) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(uint16(tag))) } // Snapshot takes a snapshot of the current screen. func (ge *GE) Snapshot(addr int) { ge.restart(2 * 4) ge.wr32(CMD_SNAPSHOT) ge.wr32(uint32(addr)) } // Snapshot2 takes a snapshot of part of the current screen (EVE2). func (ge *GE) Snapshot2(format byte, addr, x, y, w, h int) { ge.restart(5 * 4) ge.wr32(CMD_SNAPSHOT2) ge.wr32(uint32(format)) ge.wr32(uint32(addr)) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) } // SetBitmap takes a snapshot of part of the current screen. func (ge *GE) SetBitmap(addr int, format byte, w, h int) { ge.restart(4 * 4) ge.wr32(CMD_SETBITMAP) ge.wr32(uint32(addr)) ge.wr32(uint32(format)) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) } // Logo plays FTDI logo animation. func (ge *GE) Logo() { ge.restart(4) ge.wr32(CMD_LOGO) } // CSketch - deprecated (FT801). func (ge *GE) CSketch(x, y, w, h, addr int, format byte, freq int) { ge.restart(6 * 4) ge.wr32(CMD_SKETCH) ge.wr32(uint32(x)&0xFFFF | uint32(y)&0xFFFF<<16) ge.wr32(uint32(w)&0xFFFF | uint32(h)&0xFFFF<<16) ge.wr32(uint32(addr)) ge.wr32(uint32(format)) ge.wr32(uint32(freq)) }
egpath/src/display/eve/ge.go
0.626467
0.489686
ge.go
starcoder
// hstepper contains a horizontal stepper component. package hstepper import ( . "github.com/golangee/forms" "github.com/golangee/forms/theme/material/icon" "strconv" ) // Step is the model for the internal step view. type Step struct { ico icon.Icon numberOnly bool title string } // NewStep creates a numbered step entry. func NewStep(caption string) Step { return Step{numberOnly: true, title: caption} } // NewIconStep creates a step with an icon instead of a number. func NewIconStep(ico icon.Icon, caption string) Step { return Step{ico: ico, title: caption} } // Stepper is a horizontal stepper. type Stepper struct { *HStack stepInactiveColor Color steps []*stepView } // NewStepper creates a new view with the given steps. func NewStepper(steps ...Step) *Stepper { t := &Stepper{} t.HStack = NewHStack() t.stepInactiveColor = RGB(0x9e, 0x9e, 0x9e) t.SetHorizontalAlign(Center) t.SetSteps(steps...) return t } // SetSteps removes all existing steps and sets new ones. func (t *Stepper) SetSteps(steps ...Step) *Stepper { t.ClearViews() t.steps = nil for idx, step := range steps { myIdx := -1 if step.numberOnly { myIdx = idx + 1 } stepView := newStepView(t, step.ico, step.title, myIdx, idx == len(steps)-1) stepView.setActive(false) t.steps = append(t.steps, stepView) t.HStack.AddViews(stepView) } return t } // SetProgress updates the view state of the stepper steps. Passed steps // are colorized using the primary color and the active step caption is bolder. func (t *Stepper) SetProgress(idx int) *Stepper { if idx > len(t.steps) { idx = len(t.steps) } for _, step := range t.steps { step.setDone(false) step.setActive(false) } for i := 0; i < idx; i++ { t.steps[i].setDone(true) if i == idx-1 { t.steps[i].setActive(true) } } return t } // Style applies generic style attributes. func (t *Stepper) Style(style ...Style) *Stepper { t.HStack.Style(style...) return t } // Self assigns the receiver to the given reference func (t *Stepper) Self(ref **Stepper) *Stepper { *ref = t return t } type stepView struct { parent *Stepper btn *IconButton caption *Text sepView *Frame *HStack } func newStepView(parent *Stepper, ico icon.Icon, text string, num int, last bool) *stepView { t := &stepView{} t.parent = parent t.HStack = NewHStack( NewIconButton(ico).Style( BackgroundColor(Theme().Color()), ForegroundColor(Theme().ForegroundColor()), ).Self(&t.btn), newStepTitle(text).Self(&t.caption), ) if num >= 0 { t.btn.SetChar(rune(strconv.Itoa(num)[0])) } if !last { t.HStack.AddViews(newStepSeparator().Self(&t.sepView)) } return t } func (t *stepView) setDone(a bool) *stepView { if a { t.btn.Style(BackgroundColor(Theme().Color())) if t.sepView != nil { t.sepView.Style(BackgroundColor(Theme().Color())) } } else { t.btn.Style(BackgroundColor(t.parent.stepInactiveColor)) if t.sepView != nil { t.sepView.Style(BackgroundColor(t.parent.stepInactiveColor)) } } return t } func (t *stepView) setActive(a bool) *stepView { if a { t.caption.Style(FontWeight(WeightBolder)) } else { t.caption.Style(FontWeight(WeightNormal)) } return t } func newStepTitle(s string) *Text { return NewText(s).Style( MarginTop(Auto()), MarginBottom(Auto()), PadLeft(DefaultPadding), PadRight(DefaultPadding), ) } func newStepSeparator() *Frame { return NewFrame().Style( BackgroundColor(Theme().Color()), Height(Pixel(1)), Width(Pixel(80)), MarginTop(Auto()), MarginBottom(Auto()), MarginRight(DefaultPadding), ) }
views/hstepper/stepper.go
0.694303
0.553023
stepper.go
starcoder
package core import ( "runtime" . "github.com/gooid/gocv/opencv3/internal/native" ) const _channelsMatOfPoint2f = 2 var _depthMatOfPoint2f = CvTypeCV_32F type MatOfPoint2f struct { *Mat } func NewMatOfPoint2f() (rcvr *MatOfPoint2f) { rcvr = &MatOfPoint2f{} rcvr.Mat = NewMat2() runtime.SetFinalizer(rcvr, func(interface{}) { rcvr.finalize() }) return } func NewMatOfPoint2f2(addr int64) (rcvr *MatOfPoint2f) { rcvr = &MatOfPoint2f{} rcvr.Mat = NewMat(addr) runtime.SetFinalizer(rcvr, func(interface{}) { rcvr.finalize() }) if !rcvr.Empty() && rcvr.CheckVector2(_channelsMatOfPoint2f, _depthMatOfPoint2f) < 0 { Throw(NewIllegalArgumentException("Incompatible Mat")) } return } func NewMatOfPoint2f3(m *Mat) (rcvr *MatOfPoint2f) { rcvr = &MatOfPoint2f{} rcvr.Mat = NewMat8(m, RangeAll()) runtime.SetFinalizer(rcvr, func(interface{}) { rcvr.finalize() }) if !rcvr.Empty() && rcvr.CheckVector2(_channelsMatOfPoint2f, _depthMatOfPoint2f) < 0 { Throw(NewIllegalArgumentException("Incompatible Mat")) } return } func NewMatOfPoint2f4(a []*Point) (rcvr *MatOfPoint2f) { rcvr = &MatOfPoint2f{} rcvr.Mat = NewMat2() runtime.SetFinalizer(rcvr, func(interface{}) { rcvr.finalize() }) rcvr.FromArray(a) return } func (rcvr *MatOfPoint2f) Alloc(elemNumber int) { if elemNumber > 0 { rcvr.Create(elemNumber, 1, CvTypeMakeType(_depthMatOfPoint2f, _channelsMatOfPoint2f)) } } func (rcvr *MatOfPoint2f) FromArray(a []*Point) { if a == nil || len(a) == 0 { return } num := len(a) rcvr.Alloc(num) buff := make([]float32, num*_channelsMatOfPoint2f) for i := (0); i < num; i++ { p := a[i] buff[_channelsMatOfPoint2f*i+0] = float32(p.X) buff[_channelsMatOfPoint2f*i+1] = float32(p.Y) } rcvr.PutF(0, 0, buff) } func MatOfPoint2fFromNativeAddr(addr int64) *MatOfPoint2f { return NewMatOfPoint2f2(addr) } func (rcvr *MatOfPoint2f) ToArray() []*Point { num := rcvr.Total() ap := make([]*Point, num) if num == 0 { return ap } buff := make([]float32, num*_channelsMatOfPoint2f) rcvr.GetF(0, 0, buff) for i := int64(0); i < num; i++ { ap[i] = NewPoint(float64(buff[i*_channelsMatOfPoint2f]), float64(buff[i*_channelsMatOfPoint2f+1])) } return ap }
opencv3/core/MatOfPoint2f.java.go
0.613237
0.423637
MatOfPoint2f.java.go
starcoder
package graph import ( "github.com/puppetlabs/leg/datastructure" ) type intrusiveEdge struct { Source, Target Vertex Edge Edge Weight float64 } type baseGraphOps interface { EdgesBetween(source, target Vertex) EdgeSet EdgeBetween(source, target Vertex) (Edge, error) EdgesOf(vertex Vertex) EdgeSet AddEdge(edge Edge) RemoveEdge(edge Edge) Vertices() MutableVertexSet } type baseEdgesView struct { g *baseGraph } func (sev *baseEdgesView) Contains(edge Edge) bool { return sev.g.edges.Contains(edge) } func (sev *baseEdgesView) Count() uint { return uint(sev.g.edges.Size()) } func (sev *baseEdgesView) AsSlice() []Edge { s := make([]Edge, sev.g.edges.Size()) i := 0 sev.ForEach(func(edge Edge) error { s[i] = edge i++ return nil }) return s } func (sev *baseEdgesView) ForEach(fn EdgeSetIterationFunc) error { return sev.g.edges.ForEachInto(func(key Edge, value *intrusiveEdge) error { return fn(key) }) } type baseGraph struct { AllowsLoops, AllowsMultipleEdges bool Ops baseGraphOps features GraphFeature edges datastructure.Map // map[Edge]*intrusiveEdge edgesView EdgeSet } func (g *baseGraph) Features() GraphFeature { return g.features } func (g *baseGraph) EdgesBetween(source, target Vertex) EdgeSet { return g.Ops.EdgesBetween(source, target) } func (g *baseGraph) EdgeBetween(source, target Vertex) (Edge, error) { return g.Ops.EdgeBetween(source, target) } func (g *baseGraph) Connect(source, target Vertex) error { return g.AddEdge(source, target, NewEdge()) } func (g *baseGraph) AddEdge(source, target Vertex, edge Edge) error { return g.AddEdgeWithWeight(source, target, edge, DefaultEdgeWeight) } func (g *baseGraph) ConnectWithWeight(source, target Vertex, weight float64) error { return g.AddEdgeWithWeight(source, target, NewEdge(), weight) } func (g *baseGraph) AddEdgeWithWeight(source, target Vertex, edge Edge, weight float64) error { if g.ContainsEdge(edge) { return ErrEdgeAlreadyInGraph } if !g.ContainsVertex(source) { return &VertexNotFoundError{source} } if !g.ContainsVertex(target) { return &VertexNotFoundError{target} } if !g.AllowsMultipleEdges && g.ContainsEdgeBetween(source, target) { return ErrEdgeAlreadyInGraph } if !g.AllowsLoops && source == target { return ErrWouldCreateLoop } ie := &intrusiveEdge{ Source: source, Target: target, Edge: edge, Weight: weight, } g.edges.Put(edge, ie) g.Ops.AddEdge(edge) return nil } func (g *baseGraph) AddVertex(vertex Vertex) { g.Ops.Vertices().Add(vertex) } func (g *baseGraph) ContainsEdgeBetween(source, target Vertex) bool { _, err := g.EdgeBetween(source, target) return err == nil } func (g *baseGraph) ContainsEdge(edge Edge) bool { return g.edges.Contains(edge) } func (g *baseGraph) ContainsVertex(vertex Vertex) bool { return g.Vertices().Contains(vertex) } func (g *baseGraph) Edges() EdgeSet { if g.edgesView == nil { g.edgesView = &baseEdgesView{g} } return g.edgesView } func (g *baseGraph) EdgesOf(vertex Vertex) (EdgeSet, error) { if !g.ContainsVertex(vertex) { return nil, &VertexNotFoundError{vertex} } return g.Ops.EdgesOf(vertex), nil } func (g *baseGraph) RemoveEdges(edges []Edge) (modified bool) { for _, edge := range edges { modified = modified || g.RemoveEdge(edge) } return } func (g *baseGraph) RemoveEdgesBetween(source, target Vertex) EdgeSet { edges := g.EdgesBetween(source, target) g.RemoveEdges(edges.AsSlice()) return edges } func (g *baseGraph) RemoveEdge(edge Edge) bool { if !g.ContainsEdge(edge) { return false } g.Ops.RemoveEdge(edge) g.edges.Remove(edge) return true } func (g *baseGraph) RemoveEdgeBetween(source, target Vertex) (Edge, error) { edge, err := g.EdgeBetween(source, target) if err != nil { return nil, err } g.RemoveEdge(edge) return edge, nil } func (g *baseGraph) RemoveVertices(vertices []Vertex) (modified bool) { for _, vertex := range vertices { modified = modified || g.RemoveVertex(vertex) } return } func (g *baseGraph) RemoveVertex(vertex Vertex) bool { if !g.ContainsVertex(vertex) { return false } g.RemoveEdges(g.Ops.EdgesOf(vertex).AsSlice()) g.Ops.Vertices().Remove(vertex) return true } func (g *baseGraph) Vertices() VertexSet { return g.Ops.Vertices() } func (g *baseGraph) SourceVertexOf(edge Edge) (Vertex, error) { ie, found := g.edges.Get(edge) if !found { return nil, ErrEdgeNotFound } return ie.(*intrusiveEdge).Source, nil } func (g *baseGraph) TargetVertexOf(edge Edge) (Vertex, error) { ie, found := g.edges.Get(edge) if !found { return nil, ErrEdgeNotFound } return ie.(*intrusiveEdge).Target, nil } func (g *baseGraph) WeightOf(edge Edge) (float64, error) { ie, found := g.edges.Get(edge) if !found { return DefaultEdgeWeight, ErrEdgeNotFound } return ie.(*intrusiveEdge).Weight, nil } func newBaseGraph(features GraphFeature, allowsLoops, allowsMultipleEdges bool, ops baseGraphOps) *baseGraph { var edges datastructure.Map if features&DeterministicIteration != 0 { edges = datastructure.NewLinkedHashMap() } else { edges = datastructure.NewHashMap() } return &baseGraph{ AllowsLoops: allowsLoops, AllowsMultipleEdges: allowsMultipleEdges, Ops: ops, features: features, edges: edges, } } type baseUndirectedGraph struct { *baseGraph Ops baseUndirectedGraphOps } type baseUndirectedGraphOps interface { baseGraphOps DegreeOf(vertex Vertex) uint } func (ug *baseUndirectedGraph) DegreeOf(vertex Vertex) (uint, error) { if !ug.ContainsVertex(vertex) { return 0, &VertexNotFoundError{vertex} } return ug.Ops.DegreeOf(vertex), nil } func newBaseUndirectedGraph(features GraphFeature, allowsLoops, allowsMultipleEdges bool, ops baseUndirectedGraphOps) *baseUndirectedGraph { return &baseUndirectedGraph{newBaseGraph(features, allowsLoops, allowsMultipleEdges, ops), ops} } type baseDirectedGraph struct { *baseGraph Ops baseDirectedGraphOps } type baseDirectedGraphOps interface { baseGraphOps InDegreeOf(vertex Vertex) uint IncomingEdgesOf(vertex Vertex) EdgeSet OutDegreeOf(vertex Vertex) uint OutgoingEdgesOf(vertex Vertex) EdgeSet } func (dg *baseDirectedGraph) InDegreeOf(vertex Vertex) (uint, error) { if !dg.ContainsVertex(vertex) { return 0, &VertexNotFoundError{vertex} } return dg.Ops.InDegreeOf(vertex), nil } func (dg *baseDirectedGraph) IncomingEdgesOf(vertex Vertex) (EdgeSet, error) { if !dg.ContainsVertex(vertex) { return nil, &VertexNotFoundError{vertex} } return dg.Ops.IncomingEdgesOf(vertex), nil } func (dg *baseDirectedGraph) OutDegreeOf(vertex Vertex) (uint, error) { if !dg.ContainsVertex(vertex) { return 0, &VertexNotFoundError{vertex} } return dg.Ops.OutDegreeOf(vertex), nil } func (dg *baseDirectedGraph) OutgoingEdgesOf(vertex Vertex) (EdgeSet, error) { if !dg.ContainsVertex(vertex) { return nil, &VertexNotFoundError{vertex} } return dg.Ops.OutgoingEdgesOf(vertex), nil } func newBaseDirectedGraph(features GraphFeature, allowsLoops, allowsMultipleEdges bool, ops baseDirectedGraphOps) *baseDirectedGraph { return &baseDirectedGraph{newBaseGraph(features, allowsLoops, allowsMultipleEdges, ops), ops} }
graph/base.go
0.727298
0.465752
base.go
starcoder
package policy import ( "sort" mesh_proto "github.com/kumahq/kuma/api/mesh/v1alpha1" "github.com/kumahq/kuma/pkg/core/resources/apis/mesh" ) // SelectDataplanePolicy given a Dataplane definition and a list of DataplanePolicy returns the "best matching" DataplanePolicy. // A DataplanePolicy is considered a match if one of the inbound interfaces of a Dataplane or tag section on Gateway Dataplane has all tags of DataplanePolicy's selector. // Every matching DataplanePolicy gets a rank (score) defined as a maximum number of tags in a matching selector. // DataplanePolicy with an empty list of selectors is considered a match with a rank (score) of 0. // DataplanePolicy with an empty selector (one that has no tags) is considered a match with a rank (score) of 0. // In case if there are multiple DataplanePolicies with the same rank (score), the policy created last is chosen. func SelectDataplanePolicy(dataplane *mesh.DataplaneResource, policies []DataplanePolicy) DataplanePolicy { sort.Stable(DataplanePolicyByName(policies)) // sort to avoid flakiness var bestPolicy DataplanePolicy var bestRank mesh_proto.TagSelectorRank sameRankCreatedLater := func(policy DataplanePolicy, rank mesh_proto.TagSelectorRank) bool { return rank.CompareTo(bestRank) == 0 && policy.GetMeta().GetCreationTime().After(bestPolicy.GetMeta().GetCreationTime()) } for _, policy := range policies { if 0 == len(policy.Selectors()) { // match everything if bestPolicy == nil || sameRankCreatedLater(policy, mesh_proto.TagSelectorRank{}) { bestPolicy = policy } continue } for _, selector := range policy.Selectors() { if 0 == len(selector.Match) { // match everything if bestPolicy == nil || sameRankCreatedLater(policy, mesh_proto.TagSelectorRank{}) { bestPolicy = policy } continue } tagSelector := mesh_proto.TagSelector(selector.Match) if dataplane.Spec.Matches(tagSelector) { rank := tagSelector.Rank() if rank.CompareTo(bestRank) > 0 || sameRankCreatedLater(policy, rank) { bestRank = rank bestPolicy = policy } } } } return bestPolicy } type DataplanePolicyByName []DataplanePolicy func (a DataplanePolicyByName) Len() int { return len(a) } func (a DataplanePolicyByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a DataplanePolicyByName) Less(i, j int) bool { return a[i].GetMeta().GetName() < a[j].GetMeta().GetName() }
pkg/core/policy/dataplane_matcher.go
0.69946
0.565359
dataplane_matcher.go
starcoder
package randelbrot import ( "math" ) // A Renderer maintains state and provides functions for rendering Mandelbrot Set images type Renderer struct { xCoordinates, yCoordinates []float64 } func (r *Renderer) initializeCoordinateMap(sizeX int, sizeY int, set *MandelbrotSet, maxCount int) { r.xCoordinates = make([]float64, sizeX) r.yCoordinates = make([]float64, sizeY) size := set.Side gapX := size / float64(sizeX) gapY := size / float64(sizeY) gap := math.Min(gapX, gapY) x := set.CenterX - ((gap * float64(sizeX)) / 2.0) y := set.CenterY - ((gap * float64(sizeY)) / 2.0) for i := 0; i < sizeX; i++ { r.xCoordinates[i] = x x += gap } for j := 0; j < sizeY; j++ { r.yCoordinates[j] = y y += gap } } func setBand(x int, y int, count int, buffer *PixelBuffer, bandMap *bandMap) { band := bandMap.Map(count) buffer.SetValue(x, y, band) } // Render draws the specified location into the given PixelBuffer func (r *Renderer) Render(buffer *PixelBuffer, set *MandelbrotSet, bandMap *bandMap, maxCount int) { r.initializeCoordinateMap(buffer.SizeX(), buffer.SizeY(), set, maxCount) for i := 0; i < buffer.SizeX(); i++ { tx := r.xCoordinates[i] for j := 0; j < buffer.SizeY(); j++ { ty := r.yCoordinates[j] count := CalculateCount(tx, ty, maxCount) setBand(i, j, count, buffer, bandMap) } } } func (r *Renderer) getOrCalculateBand(buffer *PixelBuffer, bandMap *bandMap, i int, j int, maxCount int) (band int32, calculated bool) { calculated = false band = 0 if (i < 0) || (j < 0) || (i >= buffer.SizeX()) || (j >= buffer.SizeY()) { return } band = buffer.GetValue(i, j) if band == 0 { calculated = true count := CalculateCount(r.xCoordinates[i], r.yCoordinates[j], maxCount) band = bandMap.Map(count) buffer.SetValue(i, j, band) } return } func getBand(buffer *PixelBuffer, i int, j int) (band int32) { band = 0 if (i < 0) || (j < 0) || (i >= buffer.SizeX()) || (j >= buffer.SizeY()) { return } band = buffer.GetValue(i, j) return } func fillToLeft(buffer *PixelBuffer, i int, j int, band int32) { testBand := getBand(buffer, i-1, j) if (testBand == 0) || (testBand == band) { temp := i - 1 testBand = getBand(buffer, temp, j) for testBand == 0 { if temp < 0 { return } if testBand == 0 { buffer.SetValue(temp, j, band) } temp-- testBand = getBand(buffer, temp, j) } } } func fillCrawl(buffer *PixelBuffer, firstI int, firstJ int, band int32) { i := firstI j := firstJ iinc := 1 jinc := 1 done := false for !done { if getBand(buffer, i+iinc, j) != band { jinc = iinc } else { jinc = -1 * iinc i += iinc done = ((firstI == i) && (firstJ == j)) if jinc > 0 { fillToLeft(buffer, i, j, band) } } if done { break } if getBand(buffer, i, j+jinc) != band { iinc = -1 * jinc } else { iinc = jinc j += jinc done = ((firstI == i) && (firstJ == j)) if jinc > 0 { fillToLeft(buffer, i, j, band) } } } } func (r *Renderer) crawl(buffer *PixelBuffer, bandMap *bandMap, firstI int, firstJ int, bandInterior int32, maxCount int) (crawled bool) { crawled = false done := false i := firstI j := firstJ iinc := 1 jinc := 1 for !done { band, calculated := r.getOrCalculateBand(buffer, bandMap, i+iinc, j, maxCount) if band != bandInterior { if calculated { crawled = true } jinc = iinc } else { jinc = -1 * iinc i += iinc done = ((firstI == i) && (firstJ == j)) } band, calculated = r.getOrCalculateBand(buffer, bandMap, i, j+jinc, maxCount) if band != bandInterior { if calculated { crawled = true } iinc = -1 * jinc } else { iinc = jinc j += jinc done = ((firstI == i) && (firstJ == j)) } } return } // RenderByCrawling uses the contour crawling algorithm to draw the given location of the set into the PixelBuffer func (r *Renderer) RenderByCrawling(buffer *PixelBuffer, set *MandelbrotSet, bandMap *bandMap, maxCount int) (numberOfContours int) { numberOfContours = 0 r.initializeCoordinateMap(buffer.SizeX(), buffer.SizeY(), set, maxCount) for i := 0; i < buffer.SizeX(); i++ { // Keep track of the last band and how many pixels into that band we are // Start crawling after we see a few pixels of the same band lastBand := int32(0) numberOfPointsFoundInBand := 0 startOfBand := 0 for j := 0; j < buffer.SizeY(); j++ { band, calculated := r.getOrCalculateBand(buffer, bandMap, i, j, maxCount) if calculated && (band == lastBand) { numberOfPointsFoundInBand++ } else { if band != lastBand { startOfBand = j lastBand = band } numberOfPointsFoundInBand = 1 } if numberOfPointsFoundInBand > 5 { if r.crawl(buffer, bandMap, i, startOfBand, band, maxCount) { numberOfContours++ fillCrawl(buffer, i, startOfBand, band) } numberOfPointsFoundInBand = 0 } } } return }
randelbrot/renderer.go
0.606964
0.509337
renderer.go
starcoder
package elogo import ( "math" ) const ( // K is the default K-Factor K = 32 // D is the default deviation D = 400 ) // Elo calculates Elo rating changes based on the configured factors. type Elo struct { K int D int } // Outcome is a match result data for a single player. type Outcome struct { Delta int Rating int } // NewElo instantiates the Elo object with default factors. // Default K-Factor is 32 // Default deviation is 400 func NewElo() *Elo { return &Elo{K, D} } // NewEloWithFactors instantiates the Elo object with custom factor values. func NewEloWithFactors(k, d int) *Elo { return &Elo{k, d} } // ExpectedScore gives the expected chance that the first player wins func (e *Elo) ExpectedScore(ratingA, ratingB int) float64 { return e.ExpectedScoreWithFactors(ratingA, ratingB, e.D) } // ExpectedScoreWithFactors overrides default factors and gives the expected chance that the first player wins func (e *Elo) ExpectedScoreWithFactors(ratingA, ratingB, d int) float64 { return 1 / (1 + math.Pow(10, float64(ratingB - ratingA) / float64(d))) } // RatingDelta gives the ratings change for the first player for the given score func (e *Elo) RatingDelta(ratingA, ratingB int, score float64) int { return e.RatingDeltaWithFactors(ratingA, ratingB, score, e.K, e.D) } // RatingDeltaWithFactors overrides default factors and gives the ratings change for the first player for the given score func (e *Elo) RatingDeltaWithFactors(ratingA, ratingB int, score float64, k, d int ) int { return int(float64(k) * (score - e.ExpectedScoreWithFactors(ratingA, ratingB, d))) } // Rating gives the new rating for the first player for the given score func (e *Elo) Rating(ratingA, ratingB int, score float64) int { return e.RatingWithFactors(ratingA, ratingB, score, e.K, e.D) } // RatingWithFactors overrides default factors and gives the new rating for the first player for the given score func (e *Elo) RatingWithFactors(ratingA, ratingB int, score float64, k, d int ) int { return ratingA + e.RatingDeltaWithFactors(ratingA, ratingB, score, k, d) } // Outcome gives an Outcome object for each player for the given score func (e *Elo) Outcome(ratingA, ratingB int, score float64) (Outcome, Outcome) { return e.OutcomeWithFactors(ratingA, ratingB, score, e.K, e.D) } // OutcomeWithFactors overrides default factors and gives an Outcome object for each player for the given score func (e *Elo) OutcomeWithFactors(ratingA, ratingB int, score float64, k, d int ) (Outcome, Outcome) { delta := e.RatingDeltaWithFactors(ratingA, ratingB, score, k, d) return Outcome{ delta, ratingA + delta }, Outcome{ -delta, ratingB - delta } }
elogo.go
0.785061
0.622502
elogo.go
starcoder
package gogeom //y^2 = 4*a*x type Parabola struct { A float64 IsYAxis bool } //(y - k)^2 = 4*a*(x - h) type ParabolaWithOrigin struct { A float64 H float64 K float64 IsYAxis bool } //Length of latus ration func (p *Parabola) LenghtOfLatusRation() float64 { return (p.A * 4) } //Length of latus ration for shifted origin func (p *ParabolaWithOrigin) LenghtOfLatusRation() float64 { return (p.A * 4) } // find the focus of a parabola func (p *Parabola) FocusOfParabola() (float64, float64) { if p.IsYAxis == false { return p.A, 0 } else { return 0, p.A } } // focus of shifted origin parabola func (p *ParabolaWithOrigin) FocusOfParabola() (float64, float64) { if p.IsYAxis == false { return (p.H + p.A), p.K } else { return (p.H), (p.K + p.A) } } //Equation of directix func (p *Parabola) DirectrixEquation() string { if p.IsYAxis == true { return ("y = " + FloatToString(-(p.A))) } else { return ("x = " + FloatToString(-(p.A))) } } //axis of normal parabola func (p *Parabola) AxisEquation() string { if p.IsYAxis == true { return ("x = 0") } else { return ("y = 0") } } //axis of normal ParabolaWithOrigin func (p *ParabolaWithOrigin) AxisEquation() string { if p.IsYAxis == true { return ("x = " + FloatToString(p.H-p.A)) } else { return ("y = " + FloatToString(p.K-p.A)) } } //Equation of directix ParabolaWithOrigin func (p *ParabolaWithOrigin) DirectrixEquation() string { if p.IsYAxis == true { return ("y = " + FloatToString(p.K-(p.A))) } else { return ("x = " + FloatToString(p.H-(p.A))) } } //Equation of Vertex ParabolaWithOrigin func (p *ParabolaWithOrigin) Vertex() (float64, float64) { return p.H, p.K } //Equation of Position Of Point func (p *Parabola) PositionOfPoint(x, y float64) string { output := "" if PowerFunction(y, 2) == (4 * p.A * x) { output = "Point lies on Parabola" } else if PowerFunction(y, 2) > (4 * p.A * x) { output = "Point lies outside of Parabola" } else if PowerFunction(y, 2) < (4 * p.A * x) { output = "Point lies inside of Parabola" } return output } //Line y =mx+c is intersecting with the parabola func (p *Parabola) PointOfInteresction(m, c float64) string { output := "" denominator := p.A / m if c == denominator { output = "meet the parabola at coincident points" } else if c < denominator { output = "intersect to parabola at two points" } else if c > denominator { output = "doesn't cut the parabola or touch it" } return output } //Equation of Tangent Equation of parabola func (p *Parabola) TangentEquation(x, y float64) string { output := "" if p.IsYAxis == true { output = FloatToString(x) + "x = " + FloatToString(2*p.A) + " (x +" + FloatToString(x) + ")" } else { output = FloatToString(y) + "y = " + FloatToString(2*p.A) + " (y +" + FloatToString(y) + ")" } return output } //Equation of Normal Equation of parabola func (p *Parabola) NormalEquation(x, y float64) string { LHS := FloatToString(y) + "y" RHS := FloatToString(-(y / 2 * p.A)) + " ( x - " + FloatToString(x) + " ) " return LHS + " = " + RHS } //Equation of Chord Of Contact Equation func (p *Parabola) ChordOfContactEquation(x, y float64) string { return FloatToString(x) + "x = " + FloatToString(2) + " (x +" + FloatToString(x) + ")" } //Equation of Polar Of Point Equation func (p *Parabola) PolarOfPoint(x, y float64) string { return FloatToString(x) + "x = " + FloatToString(2) + " (x +" + FloatToString(x) + ")" } // Point of Pole in Line func (p *Parabola) PoleOfline(l, m float64) (float64, float64) { return m / l, (-2 * p.A * m) / l }
parabola.go
0.727395
0.569015
parabola.go
starcoder
package sunspec // Point defines the generic behavior all sunspec types have in common. type Point interface { // Index defines the locality of the point in a modbus address space. Index // Name returns the point´s identifier. Name() string // Valid specifies whether the underlying value is implemented by the device. Valid() bool // Origin returns the point´s associated group Origin() Group // Static specifies whether the point is expected to stay constant - not change over time. Static() bool // Writable specifies whether the point can be written to. Writable() bool // encode puts the point´s value into a buffer. encode(buf []byte) error // decode sets the point´s value from a buffer. decode(buf []byte) error } // PointDef is the definition of a sunspec point element. type PointDef struct { Name string `json:"name"` Type string `json:"type"` Value interface{} `json:"value,omitempty"` Count interface{} `json:"count,omitempty"` Size uint16 `json:"size"` ScaleFactor interface{} `json:"sf,omitempty"` Units string `json:"units,omitempty"` Writable writable `json:"access,omitempty"` Mandatory mandatory `json:"mandatory,omitempty"` Static static `json:"static,omitempty"` Label string `json:"label,omitempty"` Description string `json:"desc,omitempty"` Detail string `json:"detail,omitempty"` Notes string `json:"notes,omitempty"` Comments []string `json:"comments,omitempty"` Symbols []SymbolDef `json:"symbols,omitempty"` } func (def *PointDef) Instance(adr uint16, o Group) Point { p := point{ name: def.Name, static: bool(def.Static), writable: bool(def.Writable), origin: o, address: adr, } f := scale{def.ScaleFactor} s := make(Symbols, len(def.Symbols)) for _, sym := range def.Symbols { s[sym.Value] = &symbol{sym.Name, sym.Value} } init := map[string]func() Point{ "int16": func() Point { return &tInt16{p, toInt16(def.Value), f} }, "int32": func() Point { return &tInt32{p, toInt32(def.Value), f} }, "int64": func() Point { return &tInt64{p, toInt64(def.Value), f} }, "pad": func() Point { return &tPad{p} }, "sunssf": func() Point { return &tSunssf{p, toInt16(def.Value)} }, "uint16": func() Point { return &tUint16{p, toUint16(def.Value), f} }, "uint32": func() Point { return &tUint32{p, toUint32(def.Value), f} }, "uint64": func() Point { return &tUint64{p, toUint64(def.Value), f} }, "acc16": func() Point { return &tAcc16{p, toUint16(def.Value), f} }, "acc32": func() Point { return &tAcc32{p, toUint32(def.Value), f} }, "acc64": func() Point { return &tAcc64{p, toUint64(def.Value), f} }, "count": func() Point { return &tCount{p, toUint16(def.Value)} }, "bitfield16": func() Point { return &tBitfield16{p, toUint16(def.Value), s} }, "bitfield32": func() Point { return &tBitfield32{p, toUint32(def.Value), s} }, "bitfield64": func() Point { return &tBitfield64{p, toUint64(def.Value), s} }, "enum16": func() Point { return &tEnum16{p, toUint16(def.Value), s} }, "enum32": func() Point { return &tEnum32{p, toUint32(def.Value), s} }, "string": func() Point { return &tString{p, append(make([]byte, 0, def.Size*2), toByteS(def.Value)...)} }, "float32": func() Point { return &tFloat32{p, toFloat32(def.Value)} }, "float64": func() Point { return &tFloat64{p, toFloat64(def.Value)} }, "ipaddr": func() Point { return &tIpaddr{p, [4]byte{}} }, // initial value ToDo "ipv6addr": func() Point { return &tIpv6addr{p, [16]byte{}} }, // initial value ToDo "eui48": func() Point { return &tEui48{p, [8]byte{}} }, // initial value ToDo } return init[def.Type]() } // point is internally used to build out a useable model type point struct { name string origin Group static bool writable bool address uint16 } // Address returns the modbus starting address of the point. func (p *point) Address() uint16 { return p.address } // ID returns the point´s identifier func (p *point) Name() string { return p.name } // Writable specifies whether the point can be written to. func (p *point) Writable() bool { return p.writable } // Origin returns the point´s associated group func (p *point) Origin() Group { return p.origin } // Static specifies whether the points underlying data is supposed to be constant, // meaning it is not supposed to change over time. func (p *point) Static() bool { return p.static } // Points is a collection wrapper for multiple Points. // Offering functionalities applicable for them. type Points []Point // First returns the first point of the collection func (pts Points) First() Point { return pts[0] } // Last returns the last point of the collection func (pts Points) Last() Point { return pts[len(pts)-1] } // Quantity returns the total number of registers (2-Byte-Tuples/words) // required to store the point in a modbus address space. func (pts Points) Quantity() uint16 { var l uint16 for _, p := range pts { l += p.Quantity() } return l } // Point returns the first immediate point identified by name. func (pts Points) Point(name string) Point { for _, p := range pts { if p.Name() == name { return p } } return nil } // Points returns all immediate points identified by names. // If names are omitted all points are returned. func (pts Points) Points(names ...string) Points { if len(names) == 0 { return append(Points(nil), pts...) } col := make(Points, 0, len(names)) for _, p := range pts { for _, id := range names { if p.Name() == id { col = append(col, p) break } } } return col } // address is internally used to get the address of a continuous collection of points. func (pts Points) address() uint16 { return pts[0].Address() } // Index returns the merged indexes of all points in the collection. func (pts Points) Index() []Index { idx := make([]Index, 0, len(pts)) for _, p := range pts { idx = append(idx, p) } return merge(idx) } // index is internally used to get the locality of a continuous collection of points. func (pts Points) index() Index { return index{address: pts.address(), quantity: pts.Quantity()} } // decode sets the value for all points in the collection as stored in the buffer. func (pts Points) decode(buf []byte) error { for _, p := range pts { if err := p.decode(buf); err != nil { return err } buf = buf[2*p.Quantity():] } return nil } // encode puts the values of the points in the collection into the buffer. func (pts Points) encode(buf []byte) error { for _, p := range pts { if err := p.encode(buf); err != nil { return err } buf = buf[2*p.Quantity():] } return nil }
point.go
0.889313
0.517144
point.go
starcoder
package docs import "github.com/swaggo/swag" const docTemplate = `{ "schemes": {{ marshal .Schemes }}, "swagger": "2.0", "info": { "description": "{{escape .Description}}", "title": "{{.Title}}", "termsOfService": "http://estuary.tech", "contact": { "name": "API Support", "url": "https://docs.estuary.tech/feedback" }, "license": { "name": "Apache 2.0 Apache-2.0 OR MIT", "url": "https://github.com/application-research/estuary/blob/master/LICENSE.md" }, "version": "{{.Version}}" }, "host": "{{.Host}}", "basePath": "{{.BasePath}}", "paths": { "/admin/peering/peers": { "get": { "description": "This endpoint can be used to list all peers on Peering Service", "produces": [ "application/json" ], "tags": [ "admin", "peering", "peers" ], "summary": "List all Peering peers", "responses": {} }, "post": { "description": "This endpoint can be used to add a Peer from the Peering Service", "produces": [ "application/json" ], "tags": [ "admin", "peering", "peers" ], "summary": "Add peers on Peering Service", "responses": {} }, "delete": { "description": "This endpoint can be used to remove a Peer from the Peering Service", "produces": [ "application/json" ], "tags": [ "admin", "peering", "peers" ], "summary": "Remove peers on Peering Service", "responses": {} } }, "/admin/peering/start": { "post": { "description": "This endpoint can be used to start the Peering Service", "produces": [ "application/json" ], "tags": [ "admin", "peering", "peers" ], "summary": "Start Peering", "responses": {} } }, "/admin/peering/status": { "get": { "description": "This endpoint can be used to check the Peering status", "produces": [ "application/json" ], "tags": [ "admin", "peering", "peers" ], "summary": "Check Peering Status", "responses": {} } }, "/admin/peering/stop": { "post": { "description": "This endpoint can be used to stop the Peering Service", "produces": [ "application/json" ], "tags": [ "admin", "peering", "peers" ], "summary": "Stop Peering", "responses": {} } }, "/admin/system/config": { "get": { "description": "This endpoint is used to get system configs.", "produces": [ "application/json" ], "tags": [ "admin" ], "summary": "Get systems(estuary/shuttle) config", "responses": {} } }, "/admin/users": { "get": { "description": "This endpoint is used to get all users.", "produces": [ "application/json" ], "tags": [ "admin" ], "summary": "Get all users", "responses": {} } }, "/collections/add-content": { "post": { "description": "When a collection is created, users with valid API keys can add contents to the collection. This endpoint can be used to add contents to a collection.", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "collections" ], "summary": "Add contents to a collection", "parameters": [ { "description": "Contents to add to collection", "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/main.addContentToCollectionParams" } } ], "responses": { "200": { "description": "OK", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/collections/content": { "get": { "description": "This endpoint is used to get contents in a collection. If no colpath query param is passed", "produces": [ "application/json" ], "tags": [ "collections" ], "summary": "Get contents in a collection", "parameters": [ { "type": "string", "description": "Collection UUID", "name": "coluuid", "in": "query", "required": true }, { "type": "string", "description": "Directory", "name": "colpath", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "type": "string" } } } } }, "/collections/create": { "post": { "description": "This endpoint is used to create a new collection. A collection is a representaion of a group of objects added on the estuary. This endpoint can be used to create a new collection.", "produces": [ "application/json" ], "tags": [ "collections" ], "summary": "Create a new collection", "parameters": [ { "description": "Collection name and description", "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/main.createCollectionBody" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/main.Collection" } }, "400": { "description": "Bad Request", "schema": { "$ref": "#/definitions/util.HttpError" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/util.HttpError" } }, "500": { "description": "Internal Server Error", "schema": { "$ref": "#/definitions/util.HttpError" } } } } }, "/collections/fs/add": { "post": { "description": "This endpoint adds a file to a collection", "produces": [ "application/json" ], "tags": [ "collections" ], "summary": "Add a file to a collection", "parameters": [ { "type": "string", "description": "Collection ID", "name": "coluuid", "in": "query", "required": true }, { "type": "string", "description": "Content", "name": "content", "in": "query", "required": true }, { "type": "string", "description": "Path to file", "name": "path", "in": "query", "required": true } ], "responses": {} } }, "/collections/list": { "get": { "description": "This endpoint is used to list all collections. Whenever a user logs on estuary, it will list all collections that the user has access to. This endpoint provides a way to list all collections to the user.", "produces": [ "application/json" ], "tags": [ "collections" ], "summary": "List all collections", "parameters": [ { "type": "integer", "description": "User ID", "name": "id", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/main.Collection" } } }, "400": { "description": "Bad Request", "schema": { "$ref": "#/definitions/util.HttpError" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/util.HttpError" } }, "500": { "description": "Internal Server Error", "schema": { "$ref": "#/definitions/util.HttpError" } } } } }, "/collections/{coluuid}": { "delete": { "description": "This endpoint is used to delete an existing collection.", "tags": [ "collections" ], "summary": "Deletes a collection", "parameters": [ { "type": "string", "description": "Collection ID", "name": "coluuid", "in": "path", "required": true } ], "responses": {} } }, "/collections/{coluuid}/commit": { "post": { "description": "This endpoint is used to save the contents in a collection, producing a top-level CID that references all the current CIDs in the collection.", "produces": [ "application/json" ], "tags": [ "collections" ], "summary": "Produce a CID of the collection contents", "parameters": [ { "type": "string", "description": "coluuid", "name": "coluuid", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "string" } } } } }, "/content/add": { "post": { "description": "This endpoint is used to upload new content.", "consumes": [ "multipart/form-data" ], "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Add new content", "parameters": [ { "type": "file", "description": "File to upload", "name": "file", "in": "formData", "required": true }, { "type": "string", "description": "Collection UUID", "name": "coluuid", "in": "path" }, { "type": "string", "description": "Collection path", "name": "colpath", "in": "path" } ], "responses": {} } }, "/content/add-car": { "post": { "description": "This endpoint is used to add a car object to the network. The object can be a file or a directory.", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Add Car object", "parameters": [ { "description": "Car", "name": "body", "in": "body", "required": true, "schema": { "type": "string" } }, { "type": "string", "description": "Filename", "name": "filename", "in": "query" }, { "type": "string", "description": "Commp", "name": "commp", "in": "query" }, { "type": "string", "description": "Size", "name": "size", "in": "query" } ], "responses": {} } }, "/content/add-ipfs": { "post": { "description": "This endpoint is used to add an IPFS object to the network. The object can be a file or a directory.", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Add IPFS object", "parameters": [ { "description": "IPFS Body", "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/util.ContentAddIpfsBody" } } ], "responses": {} } }, "/content/aggregated/{content}": { "get": { "description": "This endpoint returns aggregated content stats", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Get aggregated content stats", "parameters": [ { "type": "string", "description": "Content ID", "name": "content", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "string" } } } } }, "/content/all-deals": { "get": { "description": "This endpoint is used to get all deals for a user", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Get all deals for a user", "parameters": [ { "type": "string", "description": "Begin", "name": "begin", "in": "query", "required": true }, { "type": "string", "description": "Duration", "name": "duration", "in": "query", "required": true }, { "type": "string", "description": "All", "name": "all", "in": "query", "required": true } ], "responses": {} } }, "/content/bw-usage/{content}": { "get": { "description": "This endpoint returns content bandwidth", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Get content bandwidth", "parameters": [ { "type": "string", "description": "Content ID", "name": "content", "in": "path", "required": true } ], "responses": {} } }, "/content/create": { "post": { "description": "This endpoint adds a new content", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Add a new content", "parameters": [ { "description": "Content", "name": "body", "in": "body", "required": true, "schema": { "type": "string" } } ], "responses": {} } }, "/content/deals": { "get": { "description": "This endpoint lists all content with deals", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Content with deals", "parameters": [ { "type": "integer", "description": "Limit", "name": "limit", "in": "query" }, { "type": "integer", "description": "Offset", "name": "offset", "in": "query" } ], "responses": {} } }, "/content/ensure-replication/{datacid}": { "get": { "description": "This endpoint ensures that the content is replicated to the specified number of providers", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Ensure Replication", "parameters": [ { "type": "string", "description": "Data CID", "name": "datacid", "in": "path", "required": true } ], "responses": {} } }, "/content/failures/{content}": { "get": { "description": "This endpoint returns all failures for a content", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "List all failures for a content", "parameters": [ { "type": "string", "description": "Content ID", "name": "content", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "string" } } } } }, "/content/importdeal": { "post": { "description": "This endpoint imports a deal into the shuttle.", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Import a deal", "parameters": [ { "description": "Import a deal", "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/main.importDealBody" } } ], "responses": {} } }, "/content/list": { "get": { "description": "This endpoint lists all content", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "List all pinned content", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "type": "string" } } } } } }, "/content/read/{cont}": { "get": { "description": "This endpoint reads content from the blockstore", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Read content", "parameters": [ { "type": "string", "description": "CID", "name": "cont", "in": "path", "required": true } ], "responses": {} } }, "/content/staging-zones": { "get": { "description": "This endpoint is used to get staging zone for user.", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Get staging zone for user", "responses": {} } }, "/content/stats": { "get": { "description": "This endpoint is used to get content statistics. Every content stored in the network (estuary) is tracked by a unique ID which can be used to get information about the content. This endpoint will allow the consumer to get the collected stats of a conten", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Get content statistics", "parameters": [ { "type": "string", "description": "limit", "name": "limit", "in": "path", "required": true } ], "responses": {} } }, "/content/status/{id}": { "get": { "description": "This endpoint returns the status of a content", "produces": [ "application/json" ], "tags": [ "content" ], "summary": "Content Status", "parameters": [ { "type": "integer", "description": "Content ID", "name": "id", "in": "path", "required": true } ], "responses": {} } }, "/deal/estimate": { "post": { "description": "This endpoint estimates the cost of a deal", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Estimate the cost of a deal", "parameters": [ { "description": "The size of the deal in bytes, the replication factor, and the duration of the deal in blocks", "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/main.estimateDealBody" } } ], "responses": {} } }, "/deal/info/{dealid}": { "get": { "description": "This endpoint returns the deal info for a deal", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Get Deal Info", "parameters": [ { "type": "integer", "description": "Deal ID", "name": "dealid", "in": "path", "required": true } ], "responses": {} } }, "/deal/make/{miner}": { "post": { "description": "This endpoint makes a deal for a given content and miner", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Make Deal", "parameters": [ { "type": "string", "description": "Miner", "name": "miner", "in": "path", "required": true }, { "description": "Deal Request", "name": "dealRequest", "in": "body", "required": true, "schema": { "type": "string" } } ], "responses": {} } }, "/deal/proposal/{propcid}": { "get": { "description": "This endpoint returns the proposal for a deal", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Get Proposal", "parameters": [ { "type": "string", "description": "Proposal CID", "name": "propcid", "in": "path", "required": true } ], "responses": {} } }, "/deal/query/{miner}": { "get": { "description": "This endpoint returns the ask for a given CID", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Query Ask", "parameters": [ { "type": "string", "description": "CID", "name": "miner", "in": "path", "required": true } ], "responses": {} } }, "/deal/status-by-proposal/{propcid}": { "get": { "description": "Get Deal Status by PropCid", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Get Deal Status by PropCid", "parameters": [ { "type": "string", "description": "PropCid", "name": "propcid", "in": "path", "required": true } ], "responses": {} } }, "/deal/status/{deal}": { "get": { "description": "This endpoint returns the status of a deal", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Get Deal Status", "parameters": [ { "type": "integer", "description": "Deal ID", "name": "deal", "in": "path", "required": true } ], "responses": {} } }, "/deal/status/{miner}/{propcid}": { "get": { "description": "This endpoint returns the status of a deal", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Deal Status", "parameters": [ { "type": "string", "description": "Miner", "name": "miner", "in": "path", "required": true }, { "type": "string", "description": "Proposal CID", "name": "propcid", "in": "path", "required": true } ], "responses": {} } }, "/deal/transfer/in-progress": { "get": { "description": "This endpoint returns the in-progress transfers", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Transfer In Progress", "responses": {} } }, "/deal/transfer/status": { "post": { "description": "This endpoint returns the status of a transfer", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Transfer Status", "responses": {} } }, "/deals/failures": { "get": { "description": "This endpoint returns a list of storage failures for user", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Get storage failures for user", "responses": {} } }, "/net/addrs": { "get": { "description": "This endpoint is used to get net addrs", "produces": [ "application/json" ], "tags": [ "net" ], "summary": "Net Addrs", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "type": "string" } } } } } }, "/pinning/pins": { "get": { "description": "This endpoint lists all pin status objects", "produces": [ "application/json" ], "tags": [ "pinning" ], "summary": "List all pin status objects", "responses": { "400": { "description": "Bad Request", "schema": { "$ref": "#/definitions/util.HttpError" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/util.HttpError" } }, "500": { "description": "Internal Server Error", "schema": { "$ref": "#/definitions/util.HttpError" } } } }, "post": { "description": "This endpoint adds a pin to the IPFS daemon.", "produces": [ "application/json" ], "tags": [ "pinning" ], "summary": "Add and pin object", "parameters": [ { "type": "string", "description": "cid", "name": "cid", "in": "path", "required": true }, { "type": "string", "description": "name", "name": "name", "in": "path", "required": true } ], "responses": {} } }, "/pinning/pins/{pinid}": { "get": { "description": "This endpoint returns a pin status object.", "produces": [ "application/json" ], "tags": [ "pinning" ], "summary": "Get a pin status object", "parameters": [ { "type": "string", "description": "cid", "name": "pinid", "in": "path", "required": true } ], "responses": {} }, "post": { "description": "This endpoint replaces a pinned object.", "produces": [ "application/json" ], "tags": [ "pinning" ], "summary": "Replace a pinned object", "parameters": [ { "type": "string", "description": "Pin ID", "name": "pinid", "in": "path", "required": true } ], "responses": {} }, "delete": { "description": "This endpoint deletes a pinned object.", "produces": [ "application/json" ], "tags": [ "pinning" ], "summary": "Delete a pinned object", "parameters": [ { "type": "string", "description": "Pin ID", "name": "pinid", "in": "path", "required": true } ], "responses": {} } }, "/public/by-cid/{cid}": { "get": { "description": "This endpoint returns the content associated with a CID", "produces": [ "application/json" ], "tags": [ "public" ], "summary": "Get Content by Cid", "parameters": [ { "type": "string", "description": "Cid", "name": "cid", "in": "path", "required": true } ], "responses": {} } }, "/public/deals/failures": { "get": { "description": "This endpoint returns a list of storage failures", "produces": [ "application/json" ], "tags": [ "deals" ], "summary": "Get storage failures", "responses": {} } }, "/public/info": { "get": { "description": "This endpoint returns information about the node", "produces": [ "application/json" ], "tags": [ "public" ], "summary": "Get public node info", "responses": {} } }, "/public/metrics/deals-on-chain": { "get": { "description": "This endpoint is used to get deal metrics", "produces": [ "application/json" ], "tags": [ "public", "metrics" ], "summary": "Get deal metrics", "responses": {} } }, "/public/miners": { "get": { "description": "This endpoint returns all miners", "produces": [ "application/json" ], "tags": [ "public", "net" ], "summary": "Get all miners", "responses": {} } }, "/public/miners/deals/{miner}": { "get": { "description": "This endpoint returns all miners deals", "produces": [ "application/json" ], "tags": [ "public", "miner" ], "summary": "Get all miners deals", "parameters": [ { "type": "string", "description": "Filter by miner", "name": "miner", "in": "path" } ], "responses": {} } }, "/public/miners/failures/{miner}": { "get": { "description": "This endpoint returns all miners", "produces": [ "application/json" ], "tags": [ "public", "net" ], "summary": "Get all miners", "parameters": [ { "type": "string", "description": "Filter by miner", "name": "miner", "in": "query" } ], "responses": {} } }, "/public/miners/stats/{miner}": { "get": { "description": "This endpoint returns miner stats", "produces": [ "application/json" ], "tags": [ "public", "miner" ], "summary": "Get miner stats", "parameters": [ { "type": "string", "description": "Filter by miner", "name": "miner", "in": "path" } ], "responses": {} } }, "/public/net/addrs": { "get": { "description": "This endpoint is used to get net addrs", "produces": [ "application/json" ], "tags": [ "public", "net" ], "summary": "Net Addrs", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "type": "string" } } } } } }, "/public/net/peers": { "get": { "description": "This endpoint is used to get net peers", "produces": [ "application/json" ], "tags": [ "public", "net" ], "summary": "Net Peers", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "type": "string" } } } } } }, "/public/stats": { "get": { "description": "This endpoint is used to get public stats.", "produces": [ "application/json" ], "tags": [ "public" ], "summary": "Public stats", "responses": {} } }, "/user/api-keys": { "get": { "description": "This endpoint is used to get API keys for a user. In estuary, each user can be given multiple API keys (tokens). This endpoint can be used to retrieve all available API keys for a given user.", "produces": [ "application/json" ], "tags": [ "User" ], "summary": "Get API keys for a user", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/main.getApiKeysResp" } } }, "400": { "description": "Bad Request", "schema": { "$ref": "#/definitions/util.HttpError" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/util.HttpError" } }, "500": { "description": "Internal Server Error", "schema": { "$ref": "#/definitions/util.HttpError" } } } }, "post": { "description": "This endpoint is used to create API keys for a user. In estuary, each user is given an API key to access all features.", "produces": [ "application/json" ], "tags": [ "User" ], "summary": "Create API keys for a user", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/main.getApiKeysResp" } }, "400": { "description": "Bad Request", "schema": { "$ref": "#/definitions/util.HttpError" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/util.HttpError" } }, "500": { "description": "Internal Server Error", "schema": { "$ref": "#/definitions/util.HttpError" } } } } }, "/user/api-keys/{key}": { "delete": { "description": "This endpoint is used to revoke a user API key. In estuary, every user is assigned with an API key, this API key is generated and issued for each user and is primarily use to access all estuary features. This endpoint can be used to revoke the API key thats assigned to the user.", "produces": [ "application/json" ], "tags": [ "User" ], "summary": "Revoke a User API Key.", "parameters": [ { "type": "string", "description": "Key", "name": "key", "in": "path", "required": true } ], "responses": {} } }, "/user/export": { "get": { "description": "This endpoint is used to get API keys for a user.", "produces": [ "application/json" ], "tags": [ "User" ], "summary": "Export user data", "responses": { "200": { "description": "OK", "schema": { "type": "string" } } } } }, "/user/stats": { "get": { "description": "This endpoint is used to create API keys for a user.", "produces": [ "application/json" ], "tags": [ "User" ], "summary": "Create API keys for a user", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/main.userStatsResponse" } } } } } }, "definitions": { "main.Collection": { "type": "object", "properties": { "cid": { "type": "string" }, "createdAt": { "type": "string" }, "description": { "type": "string" }, "name": { "type": "string" }, "userId": { "type": "integer" }, "uuid": { "type": "string" } } }, "main.addContentToCollectionParams": { "type": "object", "properties": { "cids": { "type": "array", "items": { "type": "string" } }, "coluuid": { "type": "string" }, "contents": { "type": "array", "items": { "type": "integer" } } } }, "main.createCollectionBody": { "type": "object", "properties": { "description": { "type": "string" }, "name": { "type": "string" } } }, "main.estimateDealBody": { "type": "object", "properties": { "durationBlks": { "type": "integer" }, "replication": { "type": "integer" }, "size": { "type": "integer" }, "verified": { "type": "boolean" } } }, "main.getApiKeysResp": { "type": "object", "properties": { "expiry": { "type": "string" }, "token": { "type": "string" } } }, "main.importDealBody": { "type": "object", "properties": { "colpath": { "type": "string" }, "coluuid": { "type": "string" }, "dealIDs": { "type": "array", "items": { "type": "integer" } }, "name": { "type": "string" } } }, "main.userStatsResponse": { "type": "object", "properties": { "numPins": { "type": "integer" }, "totalSize": { "type": "integer" } } }, "util.ContentAddIpfsBody": { "type": "object", "properties": { "colpath": { "type": "string" }, "coluuid": { "type": "string" }, "filename": { "type": "string" }, "peers": { "type": "array", "items": { "type": "string" } }, "root": { "type": "string" } } }, "util.HttpError": { "type": "object", "properties": { "code": { "type": "integer" }, "details": { "type": "string" }, "message": { "type": "string" } } } } }` // SwaggerInfo holds exported Swagger Info so clients can modify it var SwaggerInfo = &swag.Spec{ Version: "0.0.0", Host: "api.estuary.tech", BasePath: "/", Schemes: []string{}, Title: "Estuary API", Description: "This is the API for the Estuary application.", InfoInstanceName: "swagger", SwaggerTemplate: docTemplate, } func init() { swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) }
docs/docs.go
0.766031
0.406921
docs.go
starcoder
package bitmap import ( "errors" "image" "image/color" "github.com/pzduniak/unipdf/common" "github.com/pzduniak/unipdf/internal/jbig2/writer" ) // ErrIndexOutOfRange is the error that returns if the bitmap byte index is out of range. var ErrIndexOutOfRange = errors.New("bitmap byte index out of range") // Bitmap is the jbig2 bitmap representation. type Bitmap struct { // Width and Height represents bitmap dimensions. Width, Height int // BitmapNumber is the bitmap's id number. BitmapNumber int // RowStride is the number of bytes set per row. RowStride int // Data saves the bits data for the bitmap. Data []byte isVanilla bool } // New creates new bitmap with the parameters as provided in the arguments. func New(width, height int) *Bitmap { bm := &Bitmap{ Width: width, Height: height, RowStride: (width + 7) >> 3, } bm.Data = make([]byte, height*bm.RowStride) return bm } // NewWithData creates new bitmap with the provided 'width', 'height' and the byte slice 'data'. func NewWithData(width, height int, data []byte) *Bitmap { bm := New(width, height) bm.Data = data return bm } // Equals checks if all the pixels in the 'b' bitmap are equals to the 's' bitmap. func (b *Bitmap) Equals(s *Bitmap) bool { if len(b.Data) != len(s.Data) { return false } for y := 0; y < b.Height; y++ { for x := 0; x < b.Width; x++ { if b.GetPixel(x, y) != s.GetPixel(x, y) { return false } } } return true } // GetBitOffset gets the bit offset at the 'x' coordinate. func (b *Bitmap) GetBitOffset(x int) int { return x & 0x07 } // GetByte gets and returns the byte at given 'index'. func (b *Bitmap) GetByte(index int) (byte, error) { if index > len(b.Data)-1 || index < 0 { return 0, ErrIndexOutOfRange } return b.Data[index], nil } // GetByteIndex gets the byte index from the bitmap at coordinates 'x','y'. func (b *Bitmap) GetByteIndex(x, y int) int { return y*b.RowStride + (x >> 3) } // GetChocolateData gets bitmap data as a byte slice with Chocolate bit interpretation. // 'Chocolate' data is the bit interpretation where the 0'th bit means white and the 1'th bit means black. // The naming convention based on the: `https://en.wikipedia.org/wiki/Binary_image#Interpretation` page. func (b *Bitmap) GetChocolateData() []byte { if b.isVanilla { b.inverseData() } return b.Data } // GetPixel gets the pixel value at the coordinates 'x', 'y'. func (b *Bitmap) GetPixel(x, y int) bool { i := b.GetByteIndex(x, y) o := b.GetBitOffset(x) shift := uint(7 - o) if i > len(b.Data)-1 { common.Log.Debug("Trying to get pixel out of the data range. x: '%d', y:'%d', bm: '%s'", x, y, b) return false } if (b.Data[i]>>shift)&0x01 >= 1 { return true } return false } // GetUnpaddedData gets the data without row stride padding. // The unpadded data contains bitmap.Height * bitmap.Width bits with // optional last byte padding. func (b *Bitmap) GetUnpaddedData() ([]byte, error) { padding := uint(b.Width & 0x07) if padding == 0 { return b.Data, nil } size := b.Width * b.Height if size%8 != 0 { size >>= 3 size++ } else { size >>= 3 } data := make([]byte, size) w := writer.NewMSB(data) for y := 0; y < b.Height; y++ { // btIndex is the byte index per row. for btIndex := 0; btIndex < b.RowStride; btIndex++ { bt := b.Data[y*b.RowStride+btIndex] if btIndex != b.RowStride-1 { err := w.WriteByte(bt) if err != nil { return nil, err } continue } for i := uint(0); i < padding; i++ { err := w.WriteBit(int(bt >> (7 - i) & 0x01)) if err != nil { return nil, err } } } } return data, nil } // GetVanillaData gets bitmap data as a byte slice with Vanilla bit interpretation. // 'Vanilla' is the bit interpretation where the 0'th bit means black and 1'th bit means white. // The naming convention based on the `https://en.wikipedia.org/wiki/Binary_image#Interpretation` page. func (b *Bitmap) GetVanillaData() []byte { if !b.isVanilla { b.inverseData() } return b.Data } // SetPixel sets the pixel at 'x', 'y' coordinates with the value of 'pixel'. // Returns an error if the index is out of range. func (b *Bitmap) SetPixel(x, y int, pixel byte) error { i := b.GetByteIndex(x, y) if i > len(b.Data)-1 { return ErrIndexOutOfRange } o := b.GetBitOffset(x) shift := uint(7 - o) src := b.Data[i] result := src | (pixel & 0x01 << shift) b.Data[i] = result return nil } // SetDefaultPixel sets all bits within bitmap to '1'. func (b *Bitmap) SetDefaultPixel() { for i := range b.Data { b.Data[i] = byte(0xff) } } // SetByte sets the byte at 'index' with value 'v'. // Returns an error if the index is out of range. func (b *Bitmap) SetByte(index int, v byte) error { if index > len(b.Data)-1 || index < 0 { return ErrIndexOutOfRange } b.Data[index] = v return nil } // String implements the Stringer interface. func (b *Bitmap) String() string { var s = "\n" for y := 0; y < b.Height; y++ { var row string for x := 0; x < b.Width; x++ { pix := b.GetPixel(x, y) if pix { row += "1" } else { row += "0" } } s += row + "\n" } return s } // ToImage gets the bitmap data and store in the image.Image. func (b *Bitmap) ToImage() image.Image { img := image.NewGray(image.Rect(0, 0, b.Width-1, b.Height-1)) for x := 0; x < b.Width; x++ { for y := 0; y < b.Height; y++ { c := color.Black if b.GetPixel(x, y) { c = color.White } img.Set(x, y, c) } } return img } // InverseData inverses the data if the 'isChocolate' flag matches // current bitmap 'isVanilla' state. func (b *Bitmap) InverseData(isChocolate bool) { if b.isVanilla != !isChocolate { b.inverseData() } } func (b *Bitmap) inverseData() { for i := 0; i < len(b.Data); i++ { b.Data[i] = ^b.Data[i] } b.isVanilla = !b.isVanilla }
bot/vendor/github.com/pzduniak/unipdf/internal/jbig2/bitmap/bitmap.go
0.819749
0.55429
bitmap.go
starcoder
package polynomial import ( "log" "math" ) type dataFrame struct { c float64 d float64 e float64 f float64 } // legendre.recurrences in R.cran.orthopolynom // GPL 3.0 // https://github.com/cran/orthopolynom/blob/master/R/legendre.recurrences.R func recurrences(n int, normalize ...bool) (relations []dataFrame) { var normalization bool = false switch len(normalize) { case 0: normalization = false case 1: normalization = normalize[0] default: log.Panic("Something is wrong. the length of Normalize Parameter should be 0 or 1, but we got ", len(normalize), normalize) } if n < 0 { log.Panic("negative highest polynomial order") } relations = make([]dataFrame, (n + 1)) j := 0 switch normalization { case true: var j_float64 float64 for j <= n { j_float64 = float64(j) var tempF float64 if j_float64 == 0 { tempF = 0 } else { tempF = j_float64 * math.Sqrt((2*j_float64+3)/(2*j_float64-1)) } relations[j] = dataFrame{ c: j_float64 + 1.0, d: 0.0, e: math.Sqrt((2*j_float64 + 1) * (2*j_float64 + 3)), f: tempF, } j = j + 1 } return case false: var j_float64 float64 for j <= n { j_float64 = float64(j) relations[j] = dataFrame{ c: j_float64 + 1.0, d: 0.0, e: 2*j_float64 + 1, f: j_float64, } j = j + 1 } return } return } // https://github.com/cran/orthopolynom/blob/master/R/orthonormal.polynomials.R func orthonormalPolynomials(recurrences []dataFrame, p0 Polynomial) (polynomials []Polynomial) { np1 := len(recurrences) n := np1 - 1 polynomials = make([]Polynomial, np1) polynomials[0] = p0 j := 0 for j < n { monomial := NewPolynomial([]float64{recurrences[j].d, recurrences[j].e}) var p_jp1 Polynomial if j == 0 { p_jp1 = (monomial.MultiplyPolynomial(p0)).Divide(recurrences[j].c) } else { p_jp1 = (monomial.MultiplyPolynomial(polynomials[j]).MinusPolynomial(polynomials[j-1].Muliply(recurrences[j].f))).Divide(recurrences[j].c) } polynomials[j+1] = p_jp1 j = j + 1 } return } func orthogonalPolynomials(recurrences []dataFrame) (polynomials []Polynomial) { polynomials = orthonormalPolynomials(recurrences, NewPolynomial([]float64{1})) return } // https://github.com/cran/orthopolynom/blob/master/R/legendre.polynomials.R func LegendrePolynomials(n int, normalize ...bool) (polynomials []Polynomial) { var normalization bool = false switch len(normalize) { case 0: normalization = false case 1: normalization = normalize[0] default: log.Panic("Something is wrong. the length of Normalize Parameter should be 0 or 1, but we got ", len(normalize), normalize) } if n < 0 { log.Panic("negative highest polynomial order") } if n <= 10 { if normalization { polynomials = []Polynomial{ {[]float64{0.7071067811865476}}, {[]float64{0, 1.2247448713915892}}, {[]float64{-0.7905694150420949, 0, 2.371708245126285}}, {[]float64{0, -2.8062430400804566, 0, 4.677071733467428}}, {[]float64{0.795495128834866, 0, -7.954951288348662, 0, 9.280776503073438}}, {[]float64{0, 4.397264774834466, 0, -20.52056894922751, 0, 18.46851205430476}}, {[]float64{-0.7967217989988726, 0, 16.73115777897633, 0, -50.19347333692898, 0, 36.80854711374793}}, {[]float64{0, -5.990715472712755, 0, 53.9164392544148, 0, -118.61616635971255, 0, 73.42905536553636}}, {[]float64{0.7972004543733809, 0, -28.699216357441717, 0, 157.84568996592944, 0, -273.59919594094436, 0, 146.57099782550594}}, {[]float64{0, 7.585118792715734, 0, -111.24840895983077, 0, 433.86879494334, 0, -619.8125642047713, 0, 292.689266430031}}, {[]float64{-0.7974348906244046, 0, 43.85891898434226, 0, -380.1106311976329, 0, 1140.3318935928987, 0, -1384.688727934234, 0, 584.6463517944546}}, } polynomials = polynomials[:(n + 1)] return } else { polynomials = []Polynomial{ {[]float64{1}}, {[]float64{0, 1}}, {[]float64{-0.5, 0, 1.5}}, {[]float64{0, -1.5, 0, 2.5}}, {[]float64{0.375, 0, -3.75, 0, 4.375}}, {[]float64{0, 1.875, 0, -8.75, 0, 7.875}}, {[]float64{-0.3125, 0, 6.5625, 0, -19.6875, 0, 14.4375}}, {[]float64{0, -2.1875, 0, 19.6875, 0, -43.3125, 0, 26.8125}}, {[]float64{0.273438, 0, -9.84375, 0, 54.140625, 0, -93.84375, 0, 50.273438}}, {[]float64{0, 2.460938, 0, -36.09375, 0, 140.765625, 0, -201.09375, 0, 94.9609388}}, {[]float64{-0.246094, 0, 13.535156, 0, -117.304688, 0, 351.914062, 0, -427.324219, 0, 180.425781}}, } polynomials = polynomials[:(n + 1)] return } } recurrence := recurrences(n, normalization) switch normalization { case true: // h0 := 2.0 p0 := Polynomial{c: []float64{1.0 / math.Sqrt2}} polynomials = orthonormalPolynomials(recurrence, p0) return case false: polynomials = orthogonalPolynomials(recurrence) return } return }
polynomial/legendre.go
0.762998
0.620507
legendre.go
starcoder
package packed // Efficient sequential read/write of packed integers. type BulkOperationPacked9 struct { *BulkOperationPacked } func newBulkOperationPacked9() BulkOperation { return &BulkOperationPacked9{newBulkOperationPacked(9)} } func (op *BulkOperationPacked9) decodeLongToInt(blocks []int64, values []int32, iterations int) { blocksOffset, valuesOffset := 0, 0 for i := 0; i < iterations; i ++ { block0 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(int64(uint64(block0) >> 55)); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block0) >> 46) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block0) >> 37) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block0) >> 28) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block0) >> 19) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block0) >> 10) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block0) >> 1) & 511); valuesOffset++ block1 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block0 & 1) << 8) | (int64(uint64(block1) >> 56))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block1) >> 47) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block1) >> 38) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block1) >> 29) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block1) >> 20) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block1) >> 11) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block1) >> 2) & 511); valuesOffset++ block2 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block1 & 3) << 7) | (int64(uint64(block2) >> 57))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block2) >> 48) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block2) >> 39) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block2) >> 30) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block2) >> 21) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block2) >> 12) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block2) >> 3) & 511); valuesOffset++ block3 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block2 & 7) << 6) | (int64(uint64(block3) >> 58))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block3) >> 49) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block3) >> 40) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block3) >> 31) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block3) >> 22) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block3) >> 13) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block3) >> 4) & 511); valuesOffset++ block4 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block3 & 15) << 5) | (int64(uint64(block4) >> 59))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block4) >> 50) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block4) >> 41) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block4) >> 32) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block4) >> 23) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block4) >> 14) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block4) >> 5) & 511); valuesOffset++ block5 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block4 & 31) << 4) | (int64(uint64(block5) >> 60))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block5) >> 51) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block5) >> 42) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block5) >> 33) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block5) >> 24) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block5) >> 15) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block5) >> 6) & 511); valuesOffset++ block6 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block5 & 63) << 3) | (int64(uint64(block6) >> 61))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block6) >> 52) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block6) >> 43) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block6) >> 34) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block6) >> 25) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block6) >> 16) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block6) >> 7) & 511); valuesOffset++ block7 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block6 & 127) << 2) | (int64(uint64(block7) >> 62))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block7) >> 53) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block7) >> 44) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block7) >> 35) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block7) >> 26) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block7) >> 17) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block7) >> 8) & 511); valuesOffset++ block8 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int32(((block7 & 255) << 1) | (int64(uint64(block8) >> 63))); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block8) >> 54) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block8) >> 45) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block8) >> 36) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block8) >> 27) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block8) >> 18) & 511); valuesOffset++ values[valuesOffset] = int32(int64(uint64(block8) >> 9) & 511); valuesOffset++ values[valuesOffset] = int32(block8 & 511); valuesOffset++ } } func (op *BulkOperationPacked9) decodeByteToInt(blocks []byte, values []int32, iterations int) { blocksOffset, valuesOffset := 0, 0 for i := 0; i < iterations; i ++ { byte0 := blocks[blocksOffset] blocksOffset++ byte1 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte0) << 1) | int64(uint8(byte1) >> 7)) valuesOffset++ byte2 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte1 & 127) << 2) | int64(uint8(byte2) >> 6)) valuesOffset++ byte3 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte2 & 63) << 3) | int64(uint8(byte3) >> 5)) valuesOffset++ byte4 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte3 & 31) << 4) | int64(uint8(byte4) >> 4)) valuesOffset++ byte5 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte4 & 15) << 5) | int64(uint8(byte5) >> 3)) valuesOffset++ byte6 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte5 & 7) << 6) | int64(uint8(byte6) >> 2)) valuesOffset++ byte7 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte6 & 3) << 7) | int64(uint8(byte7) >> 1)) valuesOffset++ byte8 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int32((int64(byte7 & 1) << 8) | int64(byte8)) valuesOffset++ } } func (op *BulkOperationPacked9) decodeLongToLong(blocks []int64, values []int64, iterations int) { blocksOffset, valuesOffset := 0, 0 for i := 0; i < iterations; i ++ { block0 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = int64(uint64(block0) >> 55); valuesOffset++ values[valuesOffset] = int64(uint64(block0) >> 46) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block0) >> 37) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block0) >> 28) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block0) >> 19) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block0) >> 10) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block0) >> 1) & 511; valuesOffset++ block1 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block0 & 1) << 8) | (int64(uint64(block1) >> 56)); valuesOffset++ values[valuesOffset] = int64(uint64(block1) >> 47) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block1) >> 38) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block1) >> 29) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block1) >> 20) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block1) >> 11) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block1) >> 2) & 511; valuesOffset++ block2 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block1 & 3) << 7) | (int64(uint64(block2) >> 57)); valuesOffset++ values[valuesOffset] = int64(uint64(block2) >> 48) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block2) >> 39) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block2) >> 30) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block2) >> 21) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block2) >> 12) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block2) >> 3) & 511; valuesOffset++ block3 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block2 & 7) << 6) | (int64(uint64(block3) >> 58)); valuesOffset++ values[valuesOffset] = int64(uint64(block3) >> 49) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block3) >> 40) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block3) >> 31) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block3) >> 22) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block3) >> 13) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block3) >> 4) & 511; valuesOffset++ block4 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block3 & 15) << 5) | (int64(uint64(block4) >> 59)); valuesOffset++ values[valuesOffset] = int64(uint64(block4) >> 50) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block4) >> 41) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block4) >> 32) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block4) >> 23) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block4) >> 14) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block4) >> 5) & 511; valuesOffset++ block5 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block4 & 31) << 4) | (int64(uint64(block5) >> 60)); valuesOffset++ values[valuesOffset] = int64(uint64(block5) >> 51) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block5) >> 42) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block5) >> 33) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block5) >> 24) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block5) >> 15) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block5) >> 6) & 511; valuesOffset++ block6 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block5 & 63) << 3) | (int64(uint64(block6) >> 61)); valuesOffset++ values[valuesOffset] = int64(uint64(block6) >> 52) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block6) >> 43) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block6) >> 34) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block6) >> 25) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block6) >> 16) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block6) >> 7) & 511; valuesOffset++ block7 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block6 & 127) << 2) | (int64(uint64(block7) >> 62)); valuesOffset++ values[valuesOffset] = int64(uint64(block7) >> 53) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block7) >> 44) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block7) >> 35) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block7) >> 26) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block7) >> 17) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block7) >> 8) & 511; valuesOffset++ block8 := blocks[blocksOffset]; blocksOffset++ values[valuesOffset] = ((block7 & 255) << 1) | (int64(uint64(block8) >> 63)); valuesOffset++ values[valuesOffset] = int64(uint64(block8) >> 54) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block8) >> 45) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block8) >> 36) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block8) >> 27) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block8) >> 18) & 511; valuesOffset++ values[valuesOffset] = int64(uint64(block8) >> 9) & 511; valuesOffset++ values[valuesOffset] = block8 & 511; valuesOffset++ } } func (op *BulkOperationPacked9) decodeByteToLong(blocks []byte, values []int64, iterations int) { blocksOffset, valuesOffset := 0, 0 for i := 0; i < iterations; i ++ { byte0 := blocks[blocksOffset] blocksOffset++ byte1 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte0) << 1) | int64(uint8(byte1) >> 7)) valuesOffset++ byte2 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte1 & 127) << 2) | int64(uint8(byte2) >> 6)) valuesOffset++ byte3 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte2 & 63) << 3) | int64(uint8(byte3) >> 5)) valuesOffset++ byte4 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte3 & 31) << 4) | int64(uint8(byte4) >> 4)) valuesOffset++ byte5 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte4 & 15) << 5) | int64(uint8(byte5) >> 3)) valuesOffset++ byte6 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte5 & 7) << 6) | int64(uint8(byte6) >> 2)) valuesOffset++ byte7 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte6 & 3) << 7) | int64(uint8(byte7) >> 1)) valuesOffset++ byte8 := blocks[blocksOffset] blocksOffset++ values[valuesOffset] = int64((int64(byte7 & 1) << 8) | int64(byte8)) valuesOffset++ } }
vendor/github.com/balzaczyy/golucene/core/util/packed/bulkOperation9.go
0.55254
0.710302
bulkOperation9.go
starcoder
package models import ( "github.com/is8ac/tfutils" "github.com/is8ac/tfutils/descend" tf "github.com/tensorflow/tensorflow/tensorflow/go" "github.com/tensorflow/tensorflow/tensorflow/go/op" ) // MakeSingleLayerNN create a modelDef for a single layer nn. func MakeSingleLayerNN(inputs, targets tf.Output) ( paramDefs []descend.ParamDef, // list of param tensors to have the state machine to create. lossFunc descend.LossFunc, // func for the state machine to evaluate parameters. makeFinalizeAccuracy func(*op.Scope, []tf.Output, tf.Output, tf.Output) (func(*tf.Session) func() (float32, error), tf.Output), // func to make a func to make a func compute accuracy. ) { inputDims, err := inputs.Shape().ToSlice() if err != nil { panic(err) } targetDims, err := targets.Shape().ToSlice() if err != nil { panic(err) } if len(inputDims) != 2 { panic("input must be 2 dimensional, is shape " + inputs.Shape().String()) } if len(targetDims) != 2 { panic("target must be 2 dimensional, is shape " + inputs.Shape().String()) } model := func(s *op.Scope, params []tf.Output, inputs tf.Output) tf.Output { return op.Add(s, params[1], op.MatMul(s, inputs, params[0])) } paramDefs = []descend.ParamDef{ descend.ParamDef{Name: "weights", Init: tfutils.Zero(tf.Float, tf.MakeShape(inputDims[1], targetDims[1]))}, descend.ParamDef{Name: "biases", Init: tfutils.Zero(tf.Float, tf.MakeShape(targetDims[1]))}, } lossFunc = func(s *op.Scope, params []tf.Output) (loss tf.Output) { softmax := op.Softmax(s, model(s, params, inputs)) sqrDiffs := op.SquaredDifference(s, softmax, targets) sums := op.Sum(s, sqrDiffs, op.Const(s, int32(-1))) loss = op.Mean(s, sums, op.Const(s.SubScope("mean_reduce_dims"), []int32{0})) return } makeFinalizeAccuracy = func(s *op.Scope, params []tf.Output, testInputs, testTargets tf.Output, ) ( finalizeAccuracy func(*tf.Session) func() (float32, error), accuracy tf.Output, ) { actual := model(s, params, testInputs) actualLabels := op.ArgMax(s, actual, op.Const(s.SubScope("argmax_dim"), int32(-1)), op.ArgMaxOutputType(tf.Int32)) correct := op.Reshape(s, op.Equal(s, actualLabels, testTargets), op.Const(s.SubScope("all"), []int32{-1})) accuracy = op.Mean(s, op.Cast(s.SubScope("accuracy"), correct, tf.Float), op.Const(s.SubScope("mean_dim"), int32(0))) finalizeAccuracy = func(sess *tf.Session) func() (float32, error) { return func() (acc float32, err error) { results, err := sess.Run(nil, []tf.Output{accuracy}, nil) if err != nil { return } acc = results[0].Value().(float32) return } } return } return }
descend/models/models.go
0.724578
0.439567
models.go
starcoder
package util import ( "bytes" "fmt" "math" "text/tabwriter" e2e "k8s.io/kubernetes/test/e2e/framework" "github.com/golang/glog" ) // MetricKey is used to identify a metric uniquely. type MetricKey struct { TestName string // Name of the test ("Load Capacity", "Density", etc) Verb string // "GET","LIST",etc for API calls and "POD STARTUP" for pod startup Resource string // "nodes","pods", etc for API calls and empty value for pod startup Percentile string // The percentile string ("Perc50", "Perc90", etc) } // MetricComparisonData holds all the values corresponding to a metric's comparison. type MetricComparisonData struct { LeftJobSample []float64 // Sample values from the left job's runs RightJobSample []float64 // Sample values from the right job's runs Matched bool // Boolean indicating if the samples matched Comments string // Any comments wrt the matching (for human interpretation) // Below are some common statistical measures, that we would compute for the left // and right job samples. They are used by some comparison schemes. AvgL, AvgR float64 // Average StDevL, StDevR float64 // Standard deviation MaxL, MaxR float64 // Max value } // JobComparisonData is a struct holding a map with keys as the metrics' keys and // values as their comparison data. type JobComparisonData struct { Data map[MetricKey]*MetricComparisonData } // NewJobComparisonData is a constructor for JobComparisonData struct. func NewJobComparisonData() *JobComparisonData { return &JobComparisonData{ Data: make(map[MetricKey]*MetricComparisonData), } } // PrettyPrint prints the job comparison data in a table form with columns aligned. func (j *JobComparisonData) PrettyPrint() { var buf bytes.Buffer w := tabwriter.NewWriter(&buf, 0, 0, 2, ' ', 0) fmt.Fprintf(w, "E2E TEST\tVERB\tRESOURCE\tPERCENTILE\tMATCHED?\tCOMMENTS\n") for key, data := range j.Data { fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\n", key.TestName, key.Verb, key.Resource, key.Percentile, data.Matched, data.Comments) } w.Flush() glog.Infof("\n%v", buf.String()) } // Adds a sample value (if not NaN) to a given metric's MetricComparisonData. func (j *JobComparisonData) addSampleValue(sample float64, testName, verb, resource, percentile string, fromLeftJob bool) { if math.IsNaN(sample) { return } // Check if the metric exists in the map already, and add it if necessary. metricKey := MetricKey{testName, verb, resource, percentile} if _, ok := j.Data[metricKey]; !ok { j.Data[metricKey] = &MetricComparisonData{} } // Add the sample to the metric's comparison data. if fromLeftJob { j.Data[metricKey].LeftJobSample = append(j.Data[metricKey].LeftJobSample, sample) } else { j.Data[metricKey].RightJobSample = append(j.Data[metricKey].RightJobSample, sample) } } func (j *JobComparisonData) addAPICallLatencyValues(apiCall *e2e.APICall, testName string, fromLeftJob bool) { perc50 := float64(apiCall.Latency.Perc50) perc90 := float64(apiCall.Latency.Perc90) perc99 := float64(apiCall.Latency.Perc99) j.addSampleValue(perc50, testName, apiCall.Verb, apiCall.Resource, "Perc50", fromLeftJob) j.addSampleValue(perc90, testName, apiCall.Verb, apiCall.Resource, "Perc90", fromLeftJob) j.addSampleValue(perc99, testName, apiCall.Verb, apiCall.Resource, "Perc99", fromLeftJob) } func (j *JobComparisonData) addPodStartupLatencyValues(podStartupLatency *e2e.PodStartupLatency, testName string, fromLeftJob bool) { perc50 := float64(podStartupLatency.Latency.Perc50) perc90 := float64(podStartupLatency.Latency.Perc90) perc99 := float64(podStartupLatency.Latency.Perc99) perc100 := float64(podStartupLatency.Latency.Perc100) j.addSampleValue(perc50, testName, "POD_STARTUP", "", "Perc50", fromLeftJob) j.addSampleValue(perc90, testName, "POD_STARTUP", "", "Perc90", fromLeftJob) j.addSampleValue(perc99, testName, "POD_STARTUP", "", "Perc99", fromLeftJob) j.addSampleValue(perc100, testName, "POD_STARTUP", "", "Perc100", fromLeftJob) } // GetFlattennedComparisonData flattens arrays of API and pod latencies of left & right jobs into JobComparisonData. func GetFlattennedComparisonData(leftApiLatencies, rightApiLatencies []map[string]*e2e.APIResponsiveness, leftPodLatencies, rightPodLatencies []map[string]*e2e.PodStartupLatency) *JobComparisonData { j := NewJobComparisonData() // Add API call latencies of left job. for _, runApiLatencies := range leftApiLatencies { for testName, apiCallLatencies := range runApiLatencies { for _, apiCallLatency := range apiCallLatencies.APICalls { j.addAPICallLatencyValues(&apiCallLatency, testName, true) } } } // Add API call latencies of right job. for _, runApiLatencies := range rightApiLatencies { for testName, apiCallLatencies := range runApiLatencies { for _, apiCallLatency := range apiCallLatencies.APICalls { j.addAPICallLatencyValues(&apiCallLatency, testName, false) } } } // Add Pod startup latencies of left job. for _, runPodLatencies := range leftPodLatencies { for testName, podStartupLatency := range runPodLatencies { j.addPodStartupLatencyValues(podStartupLatency, testName, true) } } // Add Pod startup latencies of right job. for _, runPodLatencies := range rightPodLatencies { for testName, podStartupLatency := range runPodLatencies { j.addPodStartupLatencyValues(podStartupLatency, testName, false) } } return j } func computeSampleStats(sample []float64, avg, stDev, max *float64) { len := len(sample) if len == 0 { *avg = math.NaN() *stDev = math.NaN() *max = math.NaN() return } sum := 0.0 squareSum := 0.0 for i := 0; i < len; i++ { sum += sample[i] squareSum += sample[i] * sample[i] *max = math.Max(*max, sample[i]) } *avg = sum / float64(len) *stDev = math.Sqrt(squareSum/float64(len) - (*avg * *avg)) } // ComputeStatsForMetricSamples computes avg, std-dev and max for each metric's left and right samples. func (j *JobComparisonData) ComputeStatsForMetricSamples() { for _, metricData := range j.Data { computeSampleStats(metricData.LeftJobSample, &metricData.AvgL, &metricData.StDevL, &metricData.MaxL) computeSampleStats(metricData.RightJobSample, &metricData.AvgR, &metricData.StDevR, &metricData.MaxR) } }
benchmark/pkg/util/util.go
0.668447
0.439146
util.go
starcoder
package algorithm import ( "basic/datatrans" "fmt" "sync" "time" ) var SaveDataFlag bool = false /* @note:this function uses concurrent calculation to boost the comparision between 3 different kinds of algorithm. Basically gorotine, semaphore and message quene(channel) are used in this example. It serves as a good example for multiple tasks calculation mission. @input:`row` and `col` size the map. `dense` is the partial of blocked cells in the map `costL` and `costH` range the cost of every step of the agent, `strat` and `end` denotes the start point and target point you want to reach. @author: */ func Compare(row, col int, dense float64, costL, costH int, start, end [2]int, id int) (retData map[string]interface{}, Feasible [][]int, CostMap [][]int) { retData = make(map[string]interface{}) fmt.Println("random map is generating....") feasibleMap, retMap := MapGenerator(row, col, dense, costL, costH) Feasible, CostMap = feasibleMap, retMap filename := time.Now().Format("200601021545") if SaveDataFlag { datatrans.OutputMat(filename, feasibleMap, retMap, id) } var wg sync.WaitGroup datachan := make(chan map[string]interface{}, 10) wg.Add(7) go func() { tstart := time.Now() c1, s1, t1 := DijkstraForGrid(feasibleMap, retMap, start, end) timecost := time.Since(tstart) fmt.Println("Task of Dijkstra serch is over,the total step is", s1, "and the cost is", c1, " time expired ", timecost) data := make(map[string]interface{}) data["Di"] = 1 data["cost"] = c1 data["total"] = s1 data["tract"] = t1 if SaveDataFlag { datatrans.OutputTract(filename+"_Dijkstra_", t1, id) } datachan <- data wg.Done() }() go func() { tstart := time.Now() c2, s2, t2 := AstarSearch(feasibleMap, retMap, start, end, HalmintanDistance) timecost := time.Since(tstart) fmt.Println("Task of A* search is over, the total step is", s2, "and the cost is", c2, " time expired ", timecost) data := make(map[string]interface{}) data["As"] = 1 data["cost"] = c2 data["total"] = s2 data["tract"] = t2 if SaveDataFlag { datatrans.OutputTract(filename+"_Astar_", t2, id) } datachan <- data wg.Done() }() go func() { tstart := time.Now() c2, s2, t2 := AstarSearch(feasibleMap, retMap, start, end, ChebyshevDistance) timecost := time.Since(tstart) fmt.Println("Task of A*C search is over, the total step is", s2, "and the cost is", c2, " time expired ", timecost) data := make(map[string]interface{}) data["AsC"] = 1 data["cost"] = c2 data["total"] = s2 data["tract"] = t2 if SaveDataFlag { datatrans.OutputTract(filename+"_Astar_", t2, id) } datachan <- data wg.Done() }() go func() { tstart := time.Now() c3, s3, t3 := AstarSearchDijkstra(feasibleMap, retMap, start, end, HalmintanDistance) timecost := time.Since(tstart) fmt.Println("Task of Dijkstra with A* is over, the total step is", s3, "and the cost is", c3, " time expired ", timecost) data := make(map[string]interface{}) data["MOA"] = 1 data["cost"] = c3 data["total"] = s3 data["tract"] = t3 if SaveDataFlag { datatrans.OutputTract(filename+"_DijkstraAstar_", t3, id) } datachan <- data wg.Done() }() go func() { tstart := time.Now() c3, s3, t3 := BfsSearch(feasibleMap, retMap, start, end) timecost := time.Since(tstart) fmt.Println("Task of bfs is over, the total step is", s3, "and the cost is", c3, " time expired ", timecost) data := make(map[string]interface{}) data["BFS"] = 1 data["cost"] = c3 data["total"] = s3 data["tract"] = t3 if SaveDataFlag { datatrans.OutputTract(filename+"_bfs_", t3, id) } datachan <- data wg.Done() }() go func() { tstart := time.Now() c3, s3, t3 := JPS(feasibleMap, 10, start, end, _HalmintanDistance) timecost := time.Since(tstart) fmt.Println("Task of JPS is over, the total step is", s3, "and the cost is", c3, t3, " time expired ", timecost) data := make(map[string]interface{}) data["JPS"] = 1 data["cost"] = c3 data["total"] = s3 data["tract"] = t3 if SaveDataFlag { datatrans.OutputTract(filename+"_DijkstraAstar_", t3, id) } datachan <- data wg.Done() }() go func() { tstart := time.Now() c3, s3, t3 := BidirectionAstarDijkstra_Normal(feasibleMap, retMap, start, end, HalmintanDistance) timecost := time.Since(tstart) fmt.Println("Task of BIMOA* is over, the total step is", s3, "and the cost is", c3, " time expired ", timecost) data := make(map[string]interface{}) data["BIMOA"] = 1 data["cost"] = c3 data["total"] = s3 data["tract"] = t3 if SaveDataFlag { datatrans.OutputTract(filename+"_DijkstraAstar_", t3, id) } datachan <- data wg.Done() }() fmt.Println("Gorotinues are working") wg.Wait() fmt.Println("Gorotinues finish tasks") close(datachan) for v := range datachan { //retData = append(retData, v) if _, has := v["BFS"]; has { retData["BFS"] = v } if _, has := v["MOA"]; has { retData["MOA"] = v } if _, has := v["As"]; has { retData["As"] = v } if _, has := v["Di"]; has { retData["Dijkstra"] = v } if _, has := v["JPS"]; has { retData["JPS"] = v } } return }
src/models/algorithm/searchmethod/compare.go
0.538498
0.475971
compare.go
starcoder
package flatten_nested_list_iterator // https://leetcode-cn.com/problems/flatten-nested-list-iterator type NestedInteger struct { value int isInteger bool sub []*NestedInteger } func (this NestedInteger) IsInteger() bool { return this.isInteger } func (this NestedInteger) GetInteger() int { return this.value } func (this *NestedInteger) SetInteger(value int) { this.value = value } func (this *NestedInteger) Add(elem NestedInteger) { this.sub = append(this.sub, &elem) } func (this NestedInteger) GetList() []*NestedInteger { return this.sub } func NewNestedInteger(values []interface{}) []*NestedInteger { var res []*NestedInteger for _, v := range values { cur := &NestedInteger{} switch v.(type) { case int: cur.SetInteger(v.(int)) cur.isInteger = true case []interface{}: cur.isInteger = false nexts := NewNestedInteger(v.([]interface{})) for _, next := range nexts { cur.Add(*next) } } res = append(res, cur) } return res } /** *一次遍历出所有值 type NestedIterator struct { values []int len int index int } func Constructor(nestedList []*NestedInteger) *NestedIterator { n := &NestedIterator{} n.ConstructorHelper(nestedList) n.len = len(n.values) return n } func (this *NestedIterator) ConstructorHelper(nestedList []*NestedInteger) { for _, v := range nestedList { if v.IsInteger() { this.values = append(this.values, v.GetInteger()) } else { this.ConstructorHelper(v.GetList()) } } } func (this *NestedIterator) Next() int { v := this.values[this.index] this.index++ return v } func (this *NestedIterator) HasNext() bool { return this.index < this.len } */ /** // 使用队列 type NestedIterator struct { it []*NestedInteger } func Constructor(nestedList []*NestedInteger) *NestedIterator { return &NestedIterator{it: nestedList} } func (this *NestedIterator) Next() int { n := (this.it[0]).GetInteger() this.it = this.it[1:] return n } func (this *NestedIterator) HasNext() bool { // 第一个元素不是Integer,则将第一个Integer压入栈顶 for len(this.it) > 0 && !this.it[0].IsInteger() { first := this.it[0].GetList() this.it = this.it[1:] for i := len(first) - 1; i >= 0; i-- { this.it = append([]*NestedInteger{first[i]}, this.it...) } } return len(this.it) > 0 } */ // 使用栈 type NestedIterator struct { // 栈中存放队列 stack [][]*NestedInteger } func Constructor(nestedList []*NestedInteger) *NestedIterator { return &NestedIterator{[][]*NestedInteger{nestedList}} } func (this *NestedIterator) Next() int { queue := this.stack[len(this.stack)-1] val := queue[0].GetInteger() this.stack[len(this.stack)-1] = queue[1:] return val } func (this *NestedIterator) HasNext() bool { for len(this.stack) > 0 { // 栈顶队列 queue := this.stack[len(this.stack)-1] if len(queue) < 1 { //出栈 this.stack = this.stack[:len(this.stack)-1] continue } n := queue[0] if n.IsInteger() { return true } // 将队列压入栈 this.stack[len(this.stack)-1] = queue[1:] this.stack = append(this.stack, n.GetList()) } return false }
data_structure/binary_tree/flatten_nested_list_iterator/flatten_nested_list_iterator.go
0.767733
0.446917
flatten_nested_list_iterator.go
starcoder
package providers import ( "encoding/json" "fmt" "log" "github.com/StackExchange/dnscontrol/v2/models" ) // Registrar is an interface for a domain registrar. It can return a list of needed corrections to be applied in the future. Implement this only if the provider is a "registrar" (i.e. can update the NS records of the parent to a domain). type Registrar interface { models.Registrar } // DNSServiceProvider is able to generate a set of corrections that need to be made to correct records for a domain. Implement this only if the provider is a DNS Service Provider (can update records in a DNS zone). type DNSServiceProvider interface { models.DNSProvider } // DomainCreator should be implemented by providers that have the ability to add domains to an account. the create-domains command // can be run to ensure all domains are present before running preview/push. Implement this only if the provider supoprts the `dnscontrol create-domain` command. type DomainCreator interface { EnsureDomainExists(domain string) error } // DomainLister should be implemented by providers that have the // ability to list the zones they manage. This facilitates using the // "get-zones" command for "all" zones. type ZoneLister interface { ListZones() ([]string, error) } // RegistrarInitializer is a function to create a registrar. Function will be passed the unprocessed json payload from the configuration file for the given provider. type RegistrarInitializer func(map[string]string) (Registrar, error) // RegistrarTypes stores initializer for each registrar. var RegistrarTypes = map[string]RegistrarInitializer{} // DspInitializer is a function to create a DNS service provider. Function will be passed the unprocessed json payload from the configuration file for the given provider. type DspInitializer func(map[string]string, json.RawMessage) (DNSServiceProvider, error) // DNSProviderTypes stores initializer for each DSP. var DNSProviderTypes = map[string]DspInitializer{} // RegisterRegistrarType adds a registrar type to the registry by providing a suitable initialization function. func RegisterRegistrarType(name string, init RegistrarInitializer, pm ...ProviderMetadata) { if _, ok := RegistrarTypes[name]; ok { log.Fatalf("Cannot register registrar type %s multiple times", name) } RegistrarTypes[name] = init unwrapProviderCapabilities(name, pm) } // RegisterDomainServiceProviderType adds a dsp to the registry with the given initialization function. func RegisterDomainServiceProviderType(name string, init DspInitializer, pm ...ProviderMetadata) { if _, ok := DNSProviderTypes[name]; ok { log.Fatalf("Cannot register registrar type %s multiple times", name) } DNSProviderTypes[name] = init unwrapProviderCapabilities(name, pm) } // CreateRegistrar initializes a registrar instance from given credentials. func CreateRegistrar(rType string, config map[string]string) (Registrar, error) { initer, ok := RegistrarTypes[rType] if !ok { return nil, fmt.Errorf("registrar type %s not declared", rType) } return initer(config) } // CreateDNSProvider initializes a dns provider instance from given credentials. func CreateDNSProvider(dType string, config map[string]string, meta json.RawMessage) (DNSServiceProvider, error) { initer, ok := DNSProviderTypes[dType] if !ok { return nil, fmt.Errorf("DSP type %s not declared", dType) } return initer(config, meta) } // None is a basic provider type that does absolutely nothing. Can be useful as a placeholder for third parties or unimplemented providers. type None struct{} // GetRegistrarCorrections returns corrections to update registrars. func (n None) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { return nil, nil } // GetNameservers returns the current nameservers for a domain. func (n None) GetNameservers(string) ([]*models.Nameserver, error) { return nil, nil } // GetZoneRecords gets the records of a zone and returns them in RecordConfig format. func (client None) GetZoneRecords(domain string) (models.Records, error) { return nil, fmt.Errorf("not implemented") // This enables the get-zones subcommand. // Implement this by extracting the code from GetDomainCorrections into // a single function. For most providers this should be relatively easy. } // GetDomainCorrections returns corrections to update a domain. func (n None) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { return nil, nil } func init() { RegisterRegistrarType("NONE", func(map[string]string) (Registrar, error) { return None{}, nil }) } // CustomRType stores an rtype that is only valid for this DSP. type CustomRType struct { Name string Provider string RealType string } // RegisterCustomRecordType registers a record type that is only valid for one provider. // provider is the registered type of provider this is valid with // name is the record type as it will appear in the js. (should be something like $PROVIDER_FOO) // realType is the record type it will be replaced with after validation func RegisterCustomRecordType(name, provider, realType string) { customRecordTypes[name] = &CustomRType{Name: name, Provider: provider, RealType: realType} } // GetCustomRecordType returns a registered custom record type, or nil if none func GetCustomRecordType(rType string) *CustomRType { return customRecordTypes[rType] } var customRecordTypes = map[string]*CustomRType{}
providers/providers.go
0.66356
0.467271
providers.go
starcoder
package xgraphics /* xgraphics/new.go contains a few additional constructors for creating an xgraphics.Image. */ import ( "bytes" "fmt" "image" _ "image/gif" _ "image/jpeg" _ "image/png" "os" "github.com/jezek/xgb/xproto" "github.com/alex11br/xgbutil" "github.com/alex11br/xgbutil/ewmh" "github.com/alex11br/xgbutil/xwindow" ) // NewConvert converts any image satisfying the image.Image interface to an // xgraphics.Image type. // If 'img' is an xgraphics.Image, it will be copied and a new image will // be returned. // Also, NewConvert attempts to optimize image conversion for some image // formats. (i.e., *image.RGBA.) func NewConvert(X *xgbutil.XUtil, img image.Image) *Image { ximg := New(X, img.Bounds()) // I've attempted to optimize this loop. // It actually takes more time to convert an image than to send the bytes // over the wire. (I suspect 'copy' is super fast, which can be used in // XDraw, whereas computing each pixel is super slow.) // But how is image decoding so much faster than this? I'll have to // investigate... Maybe the Color interface being used here is the real // slow down. switch concrete := img.(type) { case *image.NRGBA: convertNRGBA(ximg, concrete) case *image.NRGBA64: convertNRGBA64(ximg, concrete) case *image.RGBA: convertRGBA(ximg, concrete) case *image.RGBA64: convertRGBA64(ximg, concrete) case *image.YCbCr: convertYCbCr(ximg, concrete) case *Image: convertXImage(ximg, concrete) default: xgbutil.Logger.Printf("Converting image type %T the slow way. "+ "Optimization for this image type hasn't been added yet.", img) convertImage(ximg, img) } return ximg } // NewFileName uses the image package's decoder and converts a file specified // by fileName to an xgraphics.Image value. // Opening a file or decoding an image can cause an error. func NewFileName(X *xgbutil.XUtil, fileName string) (*Image, error) { srcReader, err := os.Open(fileName) if err != nil { return nil, err } defer srcReader.Close() img, _, err := image.Decode(srcReader) if err != nil { return nil, err } return NewConvert(X, img), nil } // NewBytes uses the image package's decoder to convert the bytes given to // an xgraphics.Imag value. // Decoding an image can cause an error. func NewBytes(X *xgbutil.XUtil, bs []byte) (*Image, error) { img, _, err := image.Decode(bytes.NewReader(bs)) if err != nil { return nil, err } return NewConvert(X, img), nil } // NewEwmhIcon converts EWMH icon data (ARGB) to an xgraphics.Image type. // You should probably use xgraphics.FindIcon instead of this directly. func NewEwmhIcon(X *xgbutil.XUtil, icon *ewmh.WmIcon) *Image { ximg := New(X, image.Rect(0, 0, int(icon.Width), int(icon.Height))) r := ximg.Rect width := r.Dx() var argb, x, y int for x = r.Min.X; x < r.Max.X; x++ { for y = r.Min.Y; y < r.Max.Y; y++ { argb = int(icon.Data[x+(y*width)]) ximg.SetBGRA(x, y, BGRA{ B: uint8(argb & 0x000000ff), G: uint8((argb & 0x0000ff00) >> 8), R: uint8((argb & 0x00ff0000) >> 16), A: uint8(argb >> 24), }) } } return ximg } // NewIcccmIcon converts two pixmap ids (icon_pixmap and icon_mask in the // WM_HINTS properts) to a single xgraphics.Image. // It is okay for one of iconPixmap or iconMask to be 0, but not both. // You should probably use xgraphics.FindIcon instead of this directly. func NewIcccmIcon(X *xgbutil.XUtil, iconPixmap, iconMask xproto.Pixmap) (*Image, error) { if iconPixmap == 0 && iconMask == 0 { return nil, fmt.Errorf("NewIcccmIcon: At least one of iconPixmap or " + "iconMask must be non-zero, but both are 0.") } var pximg, mximg *Image var err error // Get the xgraphics.Image for iconPixmap. if iconPixmap != 0 { pximg, err = NewDrawable(X, xproto.Drawable(iconPixmap)) if err != nil { return nil, err } } // Now get the xgraphics.Image for iconMask. if iconMask != 0 { mximg, err = NewDrawable(X, xproto.Drawable(iconMask)) if err != nil { return nil, err } } // Now merge them together if both were specified. switch { case pximg != nil && mximg != nil: r := pximg.Bounds() var x, y int var bgra, maskBgra BGRA for x = r.Min.X; x < r.Max.X; x++ { for y = r.Min.Y; y < r.Max.Y; y++ { maskBgra = mximg.At(x, y).(BGRA) bgra = pximg.At(x, y).(BGRA) if maskBgra.A == 0 { pximg.SetBGRA(x, y, BGRA{ B: bgra.B, G: bgra.G, R: bgra.R, A: 0, }) } } } return pximg, nil case pximg != nil: return pximg, nil case mximg != nil: return mximg, nil } panic("unreachable") } // NewDrawable converts an X drawable into a xgraphics.Image. // This is used in NewIcccmIcon. func NewDrawable(X *xgbutil.XUtil, did xproto.Drawable) (*Image, error) { // Get the geometry of the pixmap for use in the GetImage request. pgeom, err := xwindow.RawGeometry(X, xproto.Drawable(did)) if err != nil { return nil, err } // Get the image data for each pixmap. pixmapData, err := xproto.GetImage(X.Conn(), xproto.ImageFormatZPixmap, did, 0, 0, uint16(pgeom.Width()), uint16(pgeom.Height()), (1<<32)-1).Reply() if err != nil { return nil, err } // Now create the xgraphics.Image and populate it with data from // pixmapData and maskData. ximg := New(X, image.Rect(0, 0, pgeom.Width(), pgeom.Height())) // We'll try to be a little flexible with the image format returned, // but not completely flexible. err = readDrawableData(X, ximg, did, pixmapData, pgeom.Width(), pgeom.Height()) if err != nil { return nil, err } return ximg, nil } // readDrawableData uses Format information to read data from an X pixmap // into an xgraphics.Image. // readPixmapData does not take into account all information possible to read // X pixmaps and bitmaps. Of particular note is bit order/byte order. func readDrawableData(X *xgbutil.XUtil, ximg *Image, did xproto.Drawable, imgData *xproto.GetImageReply, width, height int) error { format := GetFormat(X, imgData.Depth) if format == nil { return fmt.Errorf("Could not find valid format for pixmap %d "+ "with depth %d", did, imgData.Depth) } switch format.Depth { case 1: // We read bitmaps in as alpha masks. if format.BitsPerPixel != 1 { return fmt.Errorf("The image returned for pixmap id %d with "+ "depth %d has an unsupported value for bits-per-pixel: %d", did, format.Depth, format.BitsPerPixel) } // Calculate the padded width of our image data. pad := int(X.Setup().BitmapFormatScanlinePad) paddedWidth := width if width%pad != 0 { paddedWidth = width + pad - (width % pad) } // Process one scanline at a time. Each 'y' represents a // single scanline. for y := 0; y < height; y++ { // Each scanline has length 'width' padded to // BitmapFormatScanlinePad, which is found in the X setup info. // 'i' is the index to the starting byte of the yth scanline. i := y * paddedWidth / 8 for x := 0; x < width; x++ { b := imgData.Data[i+x/8] >> uint(x%8) if b&1 > 0 { // opaque ximg.Set(x, y, BGRA{0x0, 0x0, 0x0, 0xff}) } else { // transparent ximg.Set(x, y, BGRA{0xff, 0xff, 0xff, 0x0}) } } } case 24, 32: switch format.BitsPerPixel { case 24: bytesPer := int(format.BitsPerPixel) / 8 var i int ximg.For(func(x, y int) BGRA { i = y*width*bytesPer + x*bytesPer return BGRA{ B: imgData.Data[i], G: imgData.Data[i+1], R: imgData.Data[i+2], A: 0xff, } }) case 32: bytesPer := int(format.BitsPerPixel) / 8 var i int ximg.For(func(x, y int) BGRA { i = y*width*bytesPer + x*bytesPer return BGRA{ B: imgData.Data[i], G: imgData.Data[i+1], R: imgData.Data[i+2], A: imgData.Data[i+3], } }) default: return fmt.Errorf("The image returned for pixmap id %d has "+ "an unsupported value for bits-per-pixel: %d", did, format.BitsPerPixel) } default: return fmt.Errorf("The image returned for pixmap id %d has an "+ "unsupported value for depth: %d", did, format.Depth) } return nil } // GetFormat searches SetupInfo for a Format matching the depth provided. func GetFormat(X *xgbutil.XUtil, depth byte) *xproto.Format { for _, pixForm := range X.Setup().PixmapFormats { if pixForm.Depth == depth { return &pixForm } } return nil } // getVisualInfo searches SetupInfo for a VisualInfo value matching // the depth provided. // XXX: This isn't used (yet). func getVisualInfo(X *xgbutil.XUtil, depth byte, visualid xproto.Visualid) *xproto.VisualInfo { for _, depthInfo := range X.Screen().AllowedDepths { fmt.Printf("%#v\n", depthInfo) // fmt.Printf("%#v\n", depthInfo.Visuals) fmt.Println("------------") if depthInfo.Depth == depth { for _, visual := range depthInfo.Visuals { if visual.VisualId == visualid { return &visual } } } } return nil }
xgraphics/new.go
0.688573
0.519338
new.go
starcoder
package models import ( i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) // AccessReviewSettings type AccessReviewSettings struct { // Indicates whether showing recommendations to reviewers is enabled. accessRecommendationsEnabled *bool // The number of days of user activities to show to reviewers. activityDurationInDays *int32 // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. additionalData map[string]interface{} // Indicates whether the auto-apply capability, to automatically change the target object access resource, is enabled. If not enabled, a user must, after the review completes, apply the access review. autoApplyReviewResultsEnabled *bool // Indicates whether a decision should be set if the reviewer did not supply one. For use when auto-apply is enabled. If you don't want to have a review decision recorded unless the reviewer makes an explicit choice, set it to false. autoReviewEnabled *bool // Detailed settings for how the feature should set the review decision. For use when auto-apply is enabled. autoReviewSettings AutoReviewSettingsable // Indicates whether reviewers are required to provide a justification when reviewing access. justificationRequiredOnApproval *bool // Indicates whether sending mails to reviewers and the review creator is enabled. mailNotificationsEnabled *bool // Detailed settings for recurrence. recurrenceSettings AccessReviewRecurrenceSettingsable // Indicates whether sending reminder emails to reviewers is enabled. remindersEnabled *bool } // NewAccessReviewSettings instantiates a new accessReviewSettings and sets the default values. func NewAccessReviewSettings()(*AccessReviewSettings) { m := &AccessReviewSettings{ } m.SetAdditionalData(make(map[string]interface{})); return m } // CreateAccessReviewSettingsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateAccessReviewSettingsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { return NewAccessReviewSettings(), nil } // GetAccessRecommendationsEnabled gets the accessRecommendationsEnabled property value. Indicates whether showing recommendations to reviewers is enabled. func (m *AccessReviewSettings) GetAccessRecommendationsEnabled()(*bool) { if m == nil { return nil } else { return m.accessRecommendationsEnabled } } // GetActivityDurationInDays gets the activityDurationInDays property value. The number of days of user activities to show to reviewers. func (m *AccessReviewSettings) GetActivityDurationInDays()(*int32) { if m == nil { return nil } else { return m.activityDurationInDays } } // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *AccessReviewSettings) GetAdditionalData()(map[string]interface{}) { if m == nil { return nil } else { return m.additionalData } } // GetAutoApplyReviewResultsEnabled gets the autoApplyReviewResultsEnabled property value. Indicates whether the auto-apply capability, to automatically change the target object access resource, is enabled. If not enabled, a user must, after the review completes, apply the access review. func (m *AccessReviewSettings) GetAutoApplyReviewResultsEnabled()(*bool) { if m == nil { return nil } else { return m.autoApplyReviewResultsEnabled } } // GetAutoReviewEnabled gets the autoReviewEnabled property value. Indicates whether a decision should be set if the reviewer did not supply one. For use when auto-apply is enabled. If you don't want to have a review decision recorded unless the reviewer makes an explicit choice, set it to false. func (m *AccessReviewSettings) GetAutoReviewEnabled()(*bool) { if m == nil { return nil } else { return m.autoReviewEnabled } } // GetAutoReviewSettings gets the autoReviewSettings property value. Detailed settings for how the feature should set the review decision. For use when auto-apply is enabled. func (m *AccessReviewSettings) GetAutoReviewSettings()(AutoReviewSettingsable) { if m == nil { return nil } else { return m.autoReviewSettings } } // GetFieldDeserializers the deserialization information for the current model func (m *AccessReviewSettings) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) res["accessRecommendationsEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetAccessRecommendationsEnabled(val) } return nil } res["activityDurationInDays"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt32Value() if err != nil { return err } if val != nil { m.SetActivityDurationInDays(val) } return nil } res["autoApplyReviewResultsEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetAutoApplyReviewResultsEnabled(val) } return nil } res["autoReviewEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetAutoReviewEnabled(val) } return nil } res["autoReviewSettings"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreateAutoReviewSettingsFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetAutoReviewSettings(val.(AutoReviewSettingsable)) } return nil } res["justificationRequiredOnApproval"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetJustificationRequiredOnApproval(val) } return nil } res["mailNotificationsEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetMailNotificationsEnabled(val) } return nil } res["recurrenceSettings"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreateAccessReviewRecurrenceSettingsFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetRecurrenceSettings(val.(AccessReviewRecurrenceSettingsable)) } return nil } res["remindersEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetRemindersEnabled(val) } return nil } return res } // GetJustificationRequiredOnApproval gets the justificationRequiredOnApproval property value. Indicates whether reviewers are required to provide a justification when reviewing access. func (m *AccessReviewSettings) GetJustificationRequiredOnApproval()(*bool) { if m == nil { return nil } else { return m.justificationRequiredOnApproval } } // GetMailNotificationsEnabled gets the mailNotificationsEnabled property value. Indicates whether sending mails to reviewers and the review creator is enabled. func (m *AccessReviewSettings) GetMailNotificationsEnabled()(*bool) { if m == nil { return nil } else { return m.mailNotificationsEnabled } } // GetRecurrenceSettings gets the recurrenceSettings property value. Detailed settings for recurrence. func (m *AccessReviewSettings) GetRecurrenceSettings()(AccessReviewRecurrenceSettingsable) { if m == nil { return nil } else { return m.recurrenceSettings } } // GetRemindersEnabled gets the remindersEnabled property value. Indicates whether sending reminder emails to reviewers is enabled. func (m *AccessReviewSettings) GetRemindersEnabled()(*bool) { if m == nil { return nil } else { return m.remindersEnabled } } // Serialize serializes information the current object func (m *AccessReviewSettings) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { { err := writer.WriteBoolValue("accessRecommendationsEnabled", m.GetAccessRecommendationsEnabled()) if err != nil { return err } } { err := writer.WriteInt32Value("activityDurationInDays", m.GetActivityDurationInDays()) if err != nil { return err } } { err := writer.WriteBoolValue("autoApplyReviewResultsEnabled", m.GetAutoApplyReviewResultsEnabled()) if err != nil { return err } } { err := writer.WriteBoolValue("autoReviewEnabled", m.GetAutoReviewEnabled()) if err != nil { return err } } { err := writer.WriteObjectValue("autoReviewSettings", m.GetAutoReviewSettings()) if err != nil { return err } } { err := writer.WriteBoolValue("justificationRequiredOnApproval", m.GetJustificationRequiredOnApproval()) if err != nil { return err } } { err := writer.WriteBoolValue("mailNotificationsEnabled", m.GetMailNotificationsEnabled()) if err != nil { return err } } { err := writer.WriteObjectValue("recurrenceSettings", m.GetRecurrenceSettings()) if err != nil { return err } } { err := writer.WriteBoolValue("remindersEnabled", m.GetRemindersEnabled()) if err != nil { return err } } { err := writer.WriteAdditionalData(m.GetAdditionalData()) if err != nil { return err } } return nil } // SetAccessRecommendationsEnabled sets the accessRecommendationsEnabled property value. Indicates whether showing recommendations to reviewers is enabled. func (m *AccessReviewSettings) SetAccessRecommendationsEnabled(value *bool)() { if m != nil { m.accessRecommendationsEnabled = value } } // SetActivityDurationInDays sets the activityDurationInDays property value. The number of days of user activities to show to reviewers. func (m *AccessReviewSettings) SetActivityDurationInDays(value *int32)() { if m != nil { m.activityDurationInDays = value } } // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *AccessReviewSettings) SetAdditionalData(value map[string]interface{})() { if m != nil { m.additionalData = value } } // SetAutoApplyReviewResultsEnabled sets the autoApplyReviewResultsEnabled property value. Indicates whether the auto-apply capability, to automatically change the target object access resource, is enabled. If not enabled, a user must, after the review completes, apply the access review. func (m *AccessReviewSettings) SetAutoApplyReviewResultsEnabled(value *bool)() { if m != nil { m.autoApplyReviewResultsEnabled = value } } // SetAutoReviewEnabled sets the autoReviewEnabled property value. Indicates whether a decision should be set if the reviewer did not supply one. For use when auto-apply is enabled. If you don't want to have a review decision recorded unless the reviewer makes an explicit choice, set it to false. func (m *AccessReviewSettings) SetAutoReviewEnabled(value *bool)() { if m != nil { m.autoReviewEnabled = value } } // SetAutoReviewSettings sets the autoReviewSettings property value. Detailed settings for how the feature should set the review decision. For use when auto-apply is enabled. func (m *AccessReviewSettings) SetAutoReviewSettings(value AutoReviewSettingsable)() { if m != nil { m.autoReviewSettings = value } } // SetJustificationRequiredOnApproval sets the justificationRequiredOnApproval property value. Indicates whether reviewers are required to provide a justification when reviewing access. func (m *AccessReviewSettings) SetJustificationRequiredOnApproval(value *bool)() { if m != nil { m.justificationRequiredOnApproval = value } } // SetMailNotificationsEnabled sets the mailNotificationsEnabled property value. Indicates whether sending mails to reviewers and the review creator is enabled. func (m *AccessReviewSettings) SetMailNotificationsEnabled(value *bool)() { if m != nil { m.mailNotificationsEnabled = value } } // SetRecurrenceSettings sets the recurrenceSettings property value. Detailed settings for recurrence. func (m *AccessReviewSettings) SetRecurrenceSettings(value AccessReviewRecurrenceSettingsable)() { if m != nil { m.recurrenceSettings = value } } // SetRemindersEnabled sets the remindersEnabled property value. Indicates whether sending reminder emails to reviewers is enabled. func (m *AccessReviewSettings) SetRemindersEnabled(value *bool)() { if m != nil { m.remindersEnabled = value } }
models/access_review_settings.go
0.620852
0.418816
access_review_settings.go
starcoder
package sequtil import ( "fmt" "strings" ) // Maps nucleotide byte value to its int value. var ntoi []int func init() { // Initialize ntoi values. ntoi = make([]int, 256) for i := range ntoi { ntoi[i] = -1 } ntoi['a'], ntoi['A'] = 0, 0 ntoi['c'], ntoi['C'] = 1, 1 ntoi['g'], ntoi['G'] = 2, 2 ntoi['t'], ntoi['T'] = 3, 3 } // Ntoi converts a nucleotide to an int. // Aa:0 Cc:1 Gg:2 Tt:3. Other values return -1. func Ntoi(nuc byte) int { return ntoi[nuc] } // Iton converts an int to a nucleotide character. // 0:A 1:C 2:G 3:T. Other values return N. func Iton(num int) byte { switch num { case 0: return 'A' case 1: return 'C' case 2: return 'G' case 3: return 'T' default: return 'N' } } // ReverseComplement appends to dst the reverse complement of src and returns // the new dst. Characters not in "aAcCgGtTnN" will cause a panic. func ReverseComplement(dst, src []byte) []byte { for i := len(src) - 1; i >= 0; i-- { b := src[i] switch b { case 'a': dst = append(dst, 't') case 'c': dst = append(dst, 'g') case 'g': dst = append(dst, 'c') case 't': dst = append(dst, 'a') case 'A': dst = append(dst, 'T') case 'C': dst = append(dst, 'G') case 'G': dst = append(dst, 'C') case 'T': dst = append(dst, 'A') case 'N': dst = append(dst, 'N') case 'n': dst = append(dst, 'n') default: panic(fmt.Sprintf("Unexpected base value: %v, want aAcCgGtTnN", b)) } } return dst } // ReverseComplementString returns the reverse complement of s. // Characters not in "aAcCgGtTnN" will cause a panic. func ReverseComplementString(s string) string { builder := &strings.Builder{} builder.Grow(len(s)) for i := len(s) - 1; i >= 0; i-- { b := s[i] switch b { case 'a': builder.WriteByte('t') case 'c': builder.WriteByte('g') case 'g': builder.WriteByte('c') case 't': builder.WriteByte('a') case 'A': builder.WriteByte('T') case 'C': builder.WriteByte('G') case 'G': builder.WriteByte('C') case 'T': builder.WriteByte('A') case 'N': builder.WriteByte('N') case 'n': builder.WriteByte('n') default: panic(fmt.Sprintf("Unexpected base value: %v, want aAcCgGtTnN", b)) } } return builder.String() } // DNATo2Bit appends to dst the 2-bit representation of the DNA sequence in src, // and returns the new dst. Characters not in "aAcCgGtT" will cause a panic. func DNATo2Bit(dst, src []byte) []byte { dn := len(dst) for i, b := range src { di := dn + i/4 shift := 6 - i%4*2 // Make the first character the most significant. if shift == 6 { // Starting a new byte. dst = append(dst, 0) } dbInt := Ntoi(b) if dbInt == -1 { panic(fmt.Sprintf("Unexpected base value: %v, want aAcCgGtT", b)) } db := byte(dbInt) << shift dst[di] |= db } return dst } // DNAFrom2Bit appends to dst the nucleotides represented in 2-bit in src and // returns the new dst. Only outputs characters in "ACGT". func DNAFrom2Bit(dst, src []byte) []byte { for i := 0; i < len(src); i++ { dst = append(dst, dnaFrom2bit[src[i]][:]...) } return dst } // Maps 2-bit value to its expanded representation. var dnaFrom2bit = make([][4]byte, 256) // Initializes the dnaFrom2bit slice. func init() { val := make([]byte, 4) for i := 0; i < 256; i++ { for j := 0; j < 4; j++ { // First base is the most significant digit. val[3-j] = Iton((i >> (2 * j)) & 3) } copy(dnaFrom2bit[i][:], val) } }
sequtil/sequtil.go
0.619817
0.458834
sequtil.go
starcoder
package natural import ( "fmt" ) var naturalA Natural var naturalsNames []string var naturals map[string]Natural type Natural struct { name string } func (n Natural) Name() string { return n.name } func (n Natural) Next() Natural { for pos, name := range naturalsNames { if n.name == name { nextPos := (pos + 1) % len(naturalsNames) nextNaturalName := naturalsNames[nextPos] return naturals[nextNaturalName] } } panic(fmt.Sprintf("cannot find next Natural from %v", n)) } func (n Natural) Prev() Natural { // range naturalsNames in reverse order for pos := len(naturalsNames) - 1; pos >= 0; pos-- { name := naturalsNames[pos] if n.name == name { prevPos := (pos - 1) % len(naturalsNames) if prevPos < 0 { prevPos += len(naturalsNames) } prevNaturalName := naturalsNames[prevPos] return naturals[prevNaturalName] } } panic(fmt.Sprintf("cannot find previous Natural from %v", n)) } // Semitones return the number of Semitones from A func (n Natural) Semitones() int { semitones := 0 current := naturalA for current != n { next := current.Next() if (next.name == "C" && current.name == "B") || (next.name == "F" && current.name == "E") { semitones += 1 } else { semitones += 2 } current = next } return semitones } // SemitonesBasedOn return the number of Semitones // of a Natural note from another Natural note func (n Natural) SemitonesBasedOn(from Natural) int { diff := n.Semitones() - from.Semitones() if diff < 0 { diff += 12 } return diff } func (n Natural) SemitonesToNext() int { return n.Next().SemitonesBasedOn(n) } func (n Natural) SemitonesFromPrev() int { return n.SemitonesBasedOn(n.Prev()) } func (n Natural) AddIntervalSize(size int) (Natural, error) { if size == 0 { return Natural{}, fmt.Errorf("cannot add interval size %d to natural %v", size, n) } else if size > 0 { size -= 1 } else { size += 1 } // Find natural position for pos, name := range naturalsNames { if name == n.name { pos = (pos + size) % len(naturalsNames) if pos < 0 { pos += len(naturalsNames) } return naturals[naturalsNames[pos]], nil } } panic(fmt.Sprintf("cannot add interval size %d to natural %v", size, n)) } func (n Natural) IsA() bool { return n.name == "A" } func (n Natural) IsB() bool { return n.name == "B" } func (n Natural) IsC() bool { return n.name == "C" } func (n Natural) IsD() bool { return n.name == "D" } func (n Natural) IsE() bool { return n.name == "E" } func (n Natural) IsF() bool { return n.name == "F" } func (n Natural) IsG() bool { return n.name == "G" } func New(name string) (Natural, error) { if natural, exists := naturals[name]; exists { return natural, nil } else { return Natural{}, fmt.Errorf("cannot find Natural with name %s", name) } } func AllNaturals() []Natural { all := make([]Natural, len(naturalsNames)) for i, name := range naturalsNames { all[i] = naturals[name] } return all } func init() { // init naturalsNames naturalsNames = []string{"A", "B", "C", "D", "E", "F", "G"} // init naturals naturals = make(map[string]Natural) for _, name := range naturalsNames { nat := Natural{name} naturals[name] = nat } // init naturalA var err error if naturalA, err = New("A"); err != nil { panic(err) } }
theory/natural/natural.go
0.525856
0.431944
natural.go
starcoder
package core import ( "fmt" "path" "reflect" "strings" ) // Path represents an on-disk path that is either an input to or an output from a BuildStep (or both). type Path interface { Absolute() string Relative() string String() string WithExt(ext string) OutPath WithPrefix(prefix string) OutPath WithSuffix(suffix string) OutPath } // inPath is a path relative to the workspace source directory. type inPath struct { rel string } // Absolute returns the absolute path. func (p inPath) Absolute() string { return path.Join(input.SourceDir, p.rel) } // Relative returns the path relative to the workspace source directory. func (p inPath) Relative() string { return p.rel } // WithExt creates an OutPath with the same relative path and the given extension. func (p inPath) WithExt(ext string) OutPath { return outPath{p.rel}.WithExt(ext) } // WithPrefix creates an OutPath with the same relative path and the given prefix. func (p inPath) WithPrefix(prefix string) OutPath { return outPath{p.rel}.WithPrefix(prefix) } // WithSuffix creates an OutPath with the same relative path and the given suffix. func (p inPath) WithSuffix(suffix string) OutPath { return outPath{p.rel}.WithSuffix(suffix) } // String representation of an inPath is its quoted absolute path. func (p inPath) String() string { return p.Absolute() } // OutPath is a path relative to the workspace build directory. type OutPath interface { Path forceOutPath() } type outPath struct { rel string } // Absolute returns the absolute path. func (p outPath) Absolute() string { return path.Join(buildDir(), p.rel) } // Relative returns the path relative to the workspace build directory. func (p outPath) Relative() string { return p.rel } // WithExt creates an OutPath with the same relative path and the given extension. func (p outPath) WithExt(ext string) OutPath { oldExt := path.Ext(p.rel) newRel := fmt.Sprintf("%s.%s", strings.TrimSuffix(p.rel, oldExt), ext) return outPath{newRel} } // WithPrefix creates an OutPath with the same relative path and the given prefix. func (p outPath) WithPrefix(prefix string) OutPath { return outPath{path.Join(path.Dir(p.rel), prefix+path.Base(p.rel))} } // WithSuffix creates an OutPath with the same relative path and the given suffix. func (p outPath) WithSuffix(suffix string) OutPath { return outPath{p.rel + suffix} } // String representation of an OutPath is its quoted absolute path. func (p outPath) String() string { return p.Absolute() } // forceOutPath makes sure that inPath or Path cannot be used as OutPath. func (p outPath) forceOutPath() {} // GlobalPath is a global path. type GlobalPath interface { Absolute() string } type globalPath struct { abs string } // Absolute returns absolute path. func (p globalPath) Absolute() string { return p.abs } // String representation of a globalPath is its quoted absolute path. func (p globalPath) String() string { return p.Absolute() } // NewInPath creates an inPath for a path relativ to the source directory. func NewInPath(pkg interface{}, p string) Path { return inPath{path.Join(reflect.TypeOf(pkg).PkgPath(), p)} } // NewOutPath creates an OutPath for a path relativ to the build directory. func NewOutPath(pkg interface{}, p string) OutPath { return outPath{path.Join(reflect.TypeOf(pkg).PkgPath(), p)} } // NewGlobalPath creates a globalPath. func NewGlobalPath(p string) GlobalPath { return globalPath{p} } // BuildPath returns a path relative to the build directory. func BuildPath(p string) OutPath { return outPath{p} } // SourcePath returns a path relative to the source directory. func SourcePath(p string) Path { return inPath{p} }
RULES/core/path.go
0.841663
0.437583
path.go
starcoder
package events func MakeInitialData() []Event { var christmasOnsite2020 *OnsiteEvent { christmasOnsite2020 = NewOnsiteEvent() christmasOnsite2020.BaseEvent.active = true christmasOnsite2020.BaseEvent.id = "christmas-onsite-2020" christmasOnsite2020.BaseEvent.name = "Christmas Onsite 2020" christmasOnsite2020.Rooms = []OnsiteEventRoom{ { ID: "main-stage", Name: "Main Stage", ConferenceURL: "https://swarm.ly/rxm", Description: "Join Oliver to talk about your Kubernetes journey, your experience.", }, { ID: "mulled-wine", Name: "Mulled wine", ConferenceURL: "https://meet.google.com/uod-yuaj-hot", Description: "The Cocktailkunst team has been shaping bar culture in Germany for over 10 years and has already trained over 10,000 participants in cocktail courses and workshops. Some of the best bartenders in Germany are behind the project. Founder <NAME> has been awarded several world championship titles and has published various specialist books on the topics of beverages and enjoyment. Look forward to perfect craftsmanship, real expert knowledge and a relaxed atmosphere!", }, { ID: "remote-work", Name: "Remote work / Distributed teams / etc.", ConferenceURL: "https://meet.google.com/igm-tvcz-jzj", Description: `REMOTE FTW! Giant Swarm has been fully remote for 6 years now. If you want to dive deeper into a remote company organization, join us at our stall. We are curious about your challenges. We are not limited to remote only discussions. Other "new work buzzwords" like transparency, self organization, agile, ...and how we fill them with life may also be on the plate. We are happy to share and discuss <3.`, }, { ID: "monitoring", Name: "Observability", ConferenceURL: "https://meet.google.com/jvd-sjhb-fjn", Description: "Prometheus? Loki? Jaeger? Feeling lost? Monitoring and observability is core to running modern Cloud Native infrastructure - find out the latest and greatest about the pillars of metrics, logging, tracing, and all things observability, from Giant Swarm engineers in the field and the larger Giant Swarm community.", }, { ID: "security", Name: "Security / Auditing", ConferenceURL: "https://meet.google.com/thu-cvos-iue", Description: `--- CLASSIFIED --- MESSAGE VIA SIGNAL W/ BASE64 ENC TO LEARN / SHARE / DISCUSS LATEST SECURITY AND AUDITING MEASURES DELETE AFTER READING --- CLASSIFIED ---`, }, { ID: "release-engineering", Name: "Release engineering", ConferenceURL: "https://meet.google.com/nov-xkmk-gxh", Description: "Cloud Native Infrastructure is powered by Continuous Integration and Continuous Deployment, and Release Engineering as a whole. Given that, the landscape is hard to navigate, with hundreds of tools, and many best practices. Talk with Giant Swarm Release Engineering experts, as well as other members of the larger Giant Swarm community.", }, { ID: "kubernetes", Name: "Kubernetes (incl. operators)", ConferenceURL: "https://meet.google.com/bao-uhfb-nnc", Description: "Kubernetes has already placed itself as the market leader orchestration system, with Giant Swarm running production-grade Kubernetes for the last five years. It's fair to say we've picked up a few tips and tricks, including how to build world-class operators - discover Giant Swarm's thoughts on the past, present, and future of Kubernetes.", }, { ID: "managed-apps", Name: "Managed apps", ConferenceURL: "https://meet.google.com/fga-vgtv-jat", Description: "With Kubernetes being just one building block in your digital transformation, there's a growing need for all the other projects in the Cloud Native ecosystem, and beyond. With Giant Swarm's App Platform, we've learnt all about running and supporting applications on Kubernetes clusters, and enabling the future of the Cloud Native stack. Let's have a chat about managing Prometheus, Loki, Istio, and more.", }, { ID: "devops", Name: "DevOps / Operations", ConferenceURL: "https://meet.google.com/awg-cbfs-wrp", Description: "We all wish it was as simple as pushing code, but Operations forms a large part of critical infrastructure. With modern improvements like DevOps and SecOps entering the space, room for innovation is at an all-time high. Chat with both your team members from other companies, and Giant Swarm engineers on everything that keeps the lights on.", }, { ID: "christmas-tree", Name: "Christmas tree", ConferenceURL: "https://meet.jit.si/GiantSwarm2020ChristmasTree", Description: "We don't want to brag, but our Christmas tree is pretty much as cool as Christmas trees get. Take some selfies and share with your family and friends. They'll love it!", }, { ID: "ice-rink", Name: "Ice rink", ConferenceURL: "https://meet.jit.si/GiantSwarm2020IceRink", Description: "Challenge for today: Try not to fa... Nevermind, I just did.", }, { ID: "photo-booth", Name: "Photo booth", }, { ID: "ferris-wheel", Name: "Ferris wheel", ConferenceURL: "https://meet.jit.si/GiantSwarm2020FerrisWheel", Description: "Take a ride into the Ferris wheel and explore all the beautiful stalls from the sky.", }, { ID: "info-signpost", Name: "Info signpost", ConferenceURL: "https://meet.google.com/teq-yydd-byt", Description: "Need some information about the stalls? Well this is probably not the best place for that, but you can still hang out if you want.", }, { ID: "carousel", Name: "Carousel", ConferenceURL: "https://meet.jit.si/GiantSwarm2020Carousel", Description: "Aren't you a little bit old for this? Alright... I'll let it slide this time around.", }, { ID: "spare", Name: "Magic Show", ConferenceURL: "https://meet.google.com/saa-uswq-zaz", Description: "<NAME> is going to host his magic show here at 4:30pm and 8:00pm CET", }, { ID: "direkt-gruppe", Name: "Partner - direkt gruppe", ConferenceURL: "https://teams.microsoft.com/l/meetup-join/19%3ameeting_YmZhYzI3NDItYmQxZS00NmRmLTkwNDctYzJlM2UyMmFjZDA4%40thread.v2/0?context=%7b%22Tid%22%3a%222ec62c4e-38c0-437a-a523-11d6b35548dc%22%2c%22Oid%22%3a%2274110082-96f5-4cb8-8b09-bf9c6375f71b%22%7d", Description: "direkt gruppe is a recognized digitization partner for IT strategy and technology, transformation and solutions as well as SAP process consulting. The group consists of four companies: direkt gruppe GmbH, advanced technology direkt GmbH, business solutions direkt GmbH and solutions direkt GmbH.", }, { ID: "container-solutions", Name: "Partner - Container Solutions", ConferenceURL: "https://meet.google.com/kud-munb-exm", Description: "Container Solutions is a professional services firm that prides itself on helping companies migrate to Cloud Native. We collaborate closely with our clients, from the boardroom down, to increase independence, increase control, and reduce risk. We help organisations select the best path forward, regardless of vendor. We draw upon a wide range of skills honed in the real world: from formulating strategy, to teaching, to hardcore, distributed systems delivery.", }, { ID: "viadee", Name: "Partner - viadee", ConferenceURL: "https://us02web.zoom.us/j/84588133999", Description: "Since 1994 viadee stands for independence, specific know-how and innovative spirit. We support our customers in finding and developing an individual cloud solution for their business model. We provide consulting services and train our customers on cloud platforms and applications. But don’t worry, we don’t stop at slides! We are also passionate and experienced hands-on developers.", }, } } events := []Event{ christmasOnsite2020, } return events }
pkg/server/models/events/types/data.go
0.520253
0.427815
data.go
starcoder
package gojas import ( "encoding/json" "strings" // "log" ) //JsonAssertion is the struct we use to organize our walking of the JSON doc. The decoder is // created by the Maker only. At the moment, the assertions are walking the JSON doc each time. // Consider an extended method set that can reuse a single JsonAssertion, in cases of large numbers of asserts. type JsonAssertion struct { // Path string json string receptacle map[string]interface{} decoder *json.Decoder } //MakeJsonAssertion creates and initializes a JsonAssertion, with decoder instance, or returns an error. func MakeJsonAssertion(data string) (jas *JsonAssertion, err error) { jas = &JsonAssertion{json: data, receptacle: make(map[string]interface{})} jas.decoder = json.NewDecoder(strings.NewReader(jas.json)) if err = jas.decoder.Decode(&jas.receptacle); err != nil { // if logme{log.Printf("ERROR decoding:(%v)\n", err.Error())} } return } func (jas *JsonAssertion) IsObjectAt(path string) (ok bool) { _, ok = jas.objectAtPath(splitPath(path), jas.receptacle) return } func (jas *JsonAssertion) IsNumberAt(path string, val float64) (ok bool) { asserted := val val, ok = jas.floatAtPath(splitPath(path)) return ok && val == asserted } func (jas *JsonAssertion) IsBoolAt(path string, val bool) (ok bool) { asserted := val val, ok = jas.boolAtPath(splitPath(path)) return ok && val == asserted } func (jas *JsonAssertion) IsStringAt(path string, val string) (ok bool) { asserted := val val, ok = jas.stringAtPath(splitPath(path)) return ok && val == asserted } func (jas *JsonAssertion) IsIdenticalFloatSliceAt(path string, val []interface{}) (ok bool) { asserted := val val, ok = jas.arrayAtPath(splitPath(path)) return ok && areIdenticalFloat64InterfaceSlices(val,asserted) } //IsMatchingFloatSliceAt looks for a float slice at the given path, // and if it finds one, compares them without regard to their ordering. func (jas *JsonAssertion) IsMatchingFloatSliceAt(path string, val []interface{}) (ok bool) { asserted := val val, ok = jas.arrayAtPath(splitPath(path)) return ok && areMatchingFloat64InterfaceSlices(val,asserted) } // IsIdenticalStringSliceAt // Assert that a string array is found at the given path, which is 'identical' as in: // same length, same elements AND in the same order. func (jas *JsonAssertion) IsIdenticalStringSliceAt(path string, val []interface{}) (ok bool) { asserted := val val, ok = jas.arrayAtPath(splitPath(path)) return ok && areIdenticalStringInterfaceSlices(val,asserted) } // IsMatchingStringSliceAt // Assert that a string array is found at the given path, which is 'similar' as in: // same length, same elements but not necessarily in the same order. func (jas *JsonAssertion) IsMatchingStringSliceAt(path string, val []interface{}) (ok bool) { asserted := val val, ok = jas.arrayAtPath(splitPath(path)) return ok && areMatchingStringInterfaceSlices(val,asserted) }
gojas.go
0.610221
0.59461
gojas.go
starcoder
package otkafka import ( "time" "github.com/go-kit/kit/metrics" "github.com/segmentio/kafka-go" ) type readerCollector struct { factory ReaderFactory stats *ReaderStats interval time.Duration } // AggStats is a gauge group struct. type AggStats struct { Min metrics.Gauge Max metrics.Gauge Avg metrics.Gauge } // ReaderStats is a collection of metrics for kafka reader info. type ReaderStats struct { Dials metrics.Counter Fetches metrics.Counter Messages metrics.Counter Bytes metrics.Counter Rebalances metrics.Counter Timeouts metrics.Counter Errors metrics.Counter Offset metrics.Gauge Lag metrics.Gauge MinBytes metrics.Gauge MaxBytes metrics.Gauge MaxWait metrics.Gauge QueueLength metrics.Gauge QueueCapacity metrics.Gauge DialTime AggStats ReadTime AggStats WaitTime AggStats FetchSize AggStats FetchBytes AggStats reader *string } // Reader sets the writer label in WriterStats. func (r *ReaderStats) Reader(reader string) *ReaderStats { stats := *r stats.reader = &reader return &stats } // Observe records the reader stats. It should be called periodically. func (r *ReaderStats) Observe(stats kafka.ReaderStats) { withValues := []string{"client_id", stats.ClientID, "topic", stats.Topic, "partition", stats.Partition} if r.reader == nil { withValues = append(withValues, "reader", "default") } else { withValues = append(withValues, "reader", *r.reader) } r.Dials.With(withValues...).Add(float64(stats.Dials)) r.Fetches.With(withValues...).Add(float64(stats.Fetches)) r.Messages.With(withValues...).Add(float64(stats.Messages)) r.Bytes.With(withValues...).Add(float64(stats.Bytes)) r.Rebalances.With(withValues...).Add(float64(stats.Rebalances)) r.Timeouts.With(withValues...).Add(float64(stats.Timeouts)) r.Errors.With(withValues...).Add(float64(stats.Errors)) r.Offset.With(withValues...).Set(float64(stats.Offset)) r.Lag.With(withValues...).Set(float64(stats.Lag)) r.MinBytes.With(withValues...).Set(float64(stats.MinBytes)) r.MaxBytes.With(withValues...).Set(float64(stats.MaxBytes)) r.MaxWait.With(withValues...).Set(stats.MaxWait.Seconds()) r.QueueLength.With(withValues...).Set(float64(stats.QueueLength)) r.QueueCapacity.With(withValues...).Set(float64(stats.QueueCapacity)) r.DialTime.Min.With(withValues...).Set(stats.DialTime.Min.Seconds()) r.DialTime.Max.With(withValues...).Set(stats.DialTime.Max.Seconds()) r.DialTime.Avg.With(withValues...).Set(stats.DialTime.Avg.Seconds()) r.ReadTime.Min.With(withValues...).Set(stats.ReadTime.Min.Seconds()) r.ReadTime.Max.With(withValues...).Set(stats.ReadTime.Max.Seconds()) r.ReadTime.Avg.With(withValues...).Set(stats.ReadTime.Avg.Seconds()) r.WaitTime.Min.With(withValues...).Set(stats.WaitTime.Min.Seconds()) r.WaitTime.Max.With(withValues...).Set(stats.WaitTime.Max.Seconds()) r.WaitTime.Avg.With(withValues...).Set(stats.WaitTime.Avg.Seconds()) r.FetchSize.Min.With(withValues...).Set(float64(stats.FetchSize.Min)) r.FetchSize.Max.With(withValues...).Set(float64(stats.FetchSize.Max)) r.FetchSize.Avg.With(withValues...).Set(float64(stats.FetchSize.Avg)) r.FetchBytes.Min.With(withValues...).Set(float64(stats.FetchBytes.Min)) r.FetchBytes.Max.With(withValues...).Set(float64(stats.FetchBytes.Max)) r.FetchBytes.Avg.With(withValues...).Set(float64(stats.FetchBytes.Avg)) } // newCollector creates a new kafka reader wrapper containing the name of the reader. func newReaderCollector(factory ReaderFactory, stats *ReaderStats, interval time.Duration) *readerCollector { return &readerCollector{ factory: factory, stats: stats, interval: interval, } } // collectConnectionStats collects kafka reader info for Prometheus to scrape. func (d *readerCollector) collectConnectionStats() { for k, v := range d.factory.List() { reader := v.Conn.(*kafka.Reader) stats := reader.Stats() d.stats.Reader(k).Observe(stats) } }
otkafka/reader_metrics.go
0.633524
0.485722
reader_metrics.go
starcoder
package iso20022 // Net position of a segregated holding, in a single security, within the overall position held in a securities account at a specified place of safekeeping. type AggregateBalancePerSafekeepingPlace29 struct { // Place where the securities are safe-kept, physically or notionally. This place can be, for example, a local custodian, a Central Securities Depository (CSD) or an International Central Securities Depository (ICSD). SafekeepingPlace *SafeKeepingPlace1 `xml:"SfkpgPlc"` // Market(s) on which the security is listed. PlaceOfListing *MarketIdentification3Choice `xml:"PlcOfListg,omitempty"` // Choice between formats for the entity to which the financial instruments are pledged. Pledgee *Pledgee1 `xml:"Pldgee,omitempty"` // Total quantity of financial instruments of the balance. AggregateBalance *Balance6 `xml:"AggtBal"` // Price of the financial instrument in one or more currencies. PriceDetails []*PriceInformation12 `xml:"PricDtls"` // Information needed to process a currency exchange or conversion. ForeignExchangeDetails []*ForeignExchangeTerms22 `xml:"FXDtls,omitempty"` // Specifies the number of days used for calculating the accrued interest amount. DaysAccrued *Number `xml:"DaysAcrd,omitempty"` // Valuation amounts provided in the base currency of the account. AccountBaseCurrencyAmounts *BalanceAmounts1 `xml:"AcctBaseCcyAmts"` // Valuation amounts provided in the currency of the financial instrument. InstrumentCurrencyAmounts *BalanceAmounts1 `xml:"InstrmCcyAmts,omitempty"` // Valuation amounts provided in another currency than the base currency of the account. AlternateReportingCurrencyAmounts *BalanceAmounts1 `xml:"AltrnRptgCcyAmts,omitempty"` // Breakdown of the aggregate quantity reported into significant lots, for example, tax lots. QuantityBreakdown []*QuantityBreakdown28 `xml:"QtyBrkdwn,omitempty"` // Specifies the underlying business area/type of trade causing the collateral movement. ExposureType *ExposureType16Choice `xml:"XpsrTp,omitempty"` // Breakdown of the aggregate balance per meaningful sub-balances and availability. BalanceBreakdown []*SubBalanceInformation14 `xml:"BalBrkdwn,omitempty"` // Provides additional instrument sub-balance information on all or parts of the reported financial instrument (unregistered, tax exempt, etc.). AdditionalBalanceBreakdown []*AdditionalBalanceInformation14 `xml:"AddtlBalBrkdwn,omitempty"` // Provides additional information on the holding. HoldingAdditionalDetails *Max350Text `xml:"HldgAddtlDtls,omitempty"` } func (a *AggregateBalancePerSafekeepingPlace29) AddSafekeepingPlace() *SafeKeepingPlace1 { a.SafekeepingPlace = new(SafeKeepingPlace1) return a.SafekeepingPlace } func (a *AggregateBalancePerSafekeepingPlace29) AddPlaceOfListing() *MarketIdentification3Choice { a.PlaceOfListing = new(MarketIdentification3Choice) return a.PlaceOfListing } func (a *AggregateBalancePerSafekeepingPlace29) AddPledgee() *Pledgee1 { a.Pledgee = new(Pledgee1) return a.Pledgee } func (a *AggregateBalancePerSafekeepingPlace29) AddAggregateBalance() *Balance6 { a.AggregateBalance = new(Balance6) return a.AggregateBalance } func (a *AggregateBalancePerSafekeepingPlace29) AddPriceDetails() *PriceInformation12 { newValue := new(PriceInformation12) a.PriceDetails = append(a.PriceDetails, newValue) return newValue } func (a *AggregateBalancePerSafekeepingPlace29) AddForeignExchangeDetails() *ForeignExchangeTerms22 { newValue := new(ForeignExchangeTerms22) a.ForeignExchangeDetails = append(a.ForeignExchangeDetails, newValue) return newValue } func (a *AggregateBalancePerSafekeepingPlace29) SetDaysAccrued(value string) { a.DaysAccrued = (*Number)(&value) } func (a *AggregateBalancePerSafekeepingPlace29) AddAccountBaseCurrencyAmounts() *BalanceAmounts1 { a.AccountBaseCurrencyAmounts = new(BalanceAmounts1) return a.AccountBaseCurrencyAmounts } func (a *AggregateBalancePerSafekeepingPlace29) AddInstrumentCurrencyAmounts() *BalanceAmounts1 { a.InstrumentCurrencyAmounts = new(BalanceAmounts1) return a.InstrumentCurrencyAmounts } func (a *AggregateBalancePerSafekeepingPlace29) AddAlternateReportingCurrencyAmounts() *BalanceAmounts1 { a.AlternateReportingCurrencyAmounts = new(BalanceAmounts1) return a.AlternateReportingCurrencyAmounts } func (a *AggregateBalancePerSafekeepingPlace29) AddQuantityBreakdown() *QuantityBreakdown28 { newValue := new(QuantityBreakdown28) a.QuantityBreakdown = append(a.QuantityBreakdown, newValue) return newValue } func (a *AggregateBalancePerSafekeepingPlace29) AddExposureType() *ExposureType16Choice { a.ExposureType = new(ExposureType16Choice) return a.ExposureType } func (a *AggregateBalancePerSafekeepingPlace29) AddBalanceBreakdown() *SubBalanceInformation14 { newValue := new(SubBalanceInformation14) a.BalanceBreakdown = append(a.BalanceBreakdown, newValue) return newValue } func (a *AggregateBalancePerSafekeepingPlace29) AddAdditionalBalanceBreakdown() *AdditionalBalanceInformation14 { newValue := new(AdditionalBalanceInformation14) a.AdditionalBalanceBreakdown = append(a.AdditionalBalanceBreakdown, newValue) return newValue } func (a *AggregateBalancePerSafekeepingPlace29) SetHoldingAdditionalDetails(value string) { a.HoldingAdditionalDetails = (*Max350Text)(&value) }
AggregateBalancePerSafekeepingPlace29.go
0.87864
0.419172
AggregateBalancePerSafekeepingPlace29.go
starcoder
package day04 import ( "fmt" "strconv" "strings" ) type passwordRange struct { from, to int } type password int type criterion func(password password) bool // Day holds the data needed to solve part one and part two type Day struct { passwordRange passwordRange } // NewDay returns a new Day that solves part one and two for the given input func NewDay(input string) (*Day, error) { passwordRange, err := parsePasswordRange(input) if err != nil { return nil, fmt.Errorf("invalid password range %s: %w", input, err) } return &Day{ passwordRange: passwordRange, }, nil } // SolvePartOne solves part one func (d Day) SolvePartOne() (string, error) { criteria := []criterion{ password.isSixDigitNumber, password.hasTwoAdjacentDigits, password.hasNeverDecreasingDigits, } meetingCriteriaPasswords := d.meetingCriteriaPasswords(criteria) return fmt.Sprintf("%d", len(meetingCriteriaPasswords)), nil } // SolvePartTwo solves part two func (d Day) SolvePartTwo() (string, error) { criteria := []criterion{ password.isSixDigitNumber, password.hasNeverDecreasingDigits, password.hasExactlyTwoAdjacentDigits, } meetingCriteriaPasswords := d.meetingCriteriaPasswords(criteria) return fmt.Sprintf("%d", len(meetingCriteriaPasswords)), nil } func parsePasswordRange(passwordRangeString string) (passwordRange, error) { ranges := strings.Split(passwordRangeString, "-") if len(ranges) != 2 { return passwordRange{}, fmt.Errorf("invalid number of ranges %d", len(ranges)) } from, err := strconv.Atoi(ranges[0]) if err != nil { return passwordRange{}, fmt.Errorf("invalid from %s: %w", ranges[0], err) } to, err := strconv.Atoi(ranges[1]) if err != nil { return passwordRange{}, fmt.Errorf("invalid to %s: %w", ranges[1], err) } if from > to { return passwordRange{}, fmt.Errorf("from (%d) can't be greater than to (%d)", from, to) } return passwordRange{ from: from, to: to, }, nil } func (d Day) meetingCriteriaPasswords(criteria []criterion) []password { var meetingCriteriaPasswords []password for i := d.passwordRange.from; i <= d.passwordRange.to; i++ { password := password(i) if password.meetsCriteria(criteria) { meetingCriteriaPasswords = append(meetingCriteriaPasswords, password) } } return meetingCriteriaPasswords } func (p password) meetsCriteria(criteria []criterion) bool { for _, criterion := range criteria { if !criterion(p) { return false } } return true } func (p password) isSixDigitNumber() bool { return p >= 100000 && p < 1000000 } func (p password) hasTwoAdjacentDigits() bool { return twoAdjacentDigits(int(p)) } func twoAdjacentDigits(n int) bool { if n < 10 { return false } lastDigit := n % 10 penultimateDigit := (n / 10) % 10 return penultimateDigit == lastDigit || twoAdjacentDigits(n/10) } func (p password) hasNeverDecreasingDigits() bool { return neverDecreasingDigits(int(p)) } func neverDecreasingDigits(n int) bool { if n < 10 { return true } lastDigit := n % 10 penultimateDigit := (n / 10) % 10 return lastDigit >= penultimateDigit && neverDecreasingDigits(n/10) } func (p password) hasExactlyTwoAdjacentDigits() bool { return exactlyTwoAdjacentDigits(int(p)) } func exactlyTwoAdjacentDigits(n int) bool { if n < 100 { return twoAdjacentDigits(n) } lastDigit := n % 10 penultimateDigit := (n / 10) % 10 antePenultimateDigit := (n / 100) % 10 if lastDigit == penultimateDigit && penultimateDigit != antePenultimateDigit { return true } return exactlyTwoAdjacentDigits(removeLastEqualDigits(n, lastDigit)) } func removeLastEqualDigits(n, digit int) int { lastDigit := n % 10 if lastDigit == digit { return removeLastEqualDigits(n/10, digit) } return n }
day04/day04.go
0.736495
0.458167
day04.go
starcoder
type OpTree struct { L *OpTree R *OpTree Op string IsNum bool Num int } func NewNum(num int) *OpTree { return &OpTree { IsNum: true, Num: num, } } func (t *OpTree) Calc() int { if t == nil { return 0 } if t.IsNum { return t.Num } l := t.L.Calc() r := t.R.Calc() switch t.Op { case "+": return l + r case "-": return l - r } return l } func (t *OpTree) Insert(nt *OpTree) *OpTree { if t.L == nil { t.L = nt return t } t.R = nt root := new(OpTree) root.L = t return root } func (t *OpTree) Debug() string { if t == nil { return "" } if t.IsNum { return fmt.Sprintf("%d", t.Num) } return fmt.Sprintf("(%s) %s (%s)", t.L.Debug(), t.Op, t.R.Debug()) } func Parse(s string, start, n int ) (root *OpTree, idx int) { root = new(OpTree) num := 0 defer func () { // 结束前需要把最后一个数字也加进去 root = root.Insert(NewNum(num)) }() for i := start; i < n; i++ { switch s[i] { case '(': c, j := Parse(s, i+1, n) root = root.Insert(c) // fmt.Println(s[i:j+1], c) i = j case ')': return root, i case '+': root = root.Insert( NewNum(num), ) root.Op = "+" num = 0 case '-': root = root.Insert( NewNum(num), ) root.Op = "-" num = 0 case ' ': continue default: // 数字 num = num * 10 + int(s[i] - '0') } } return root, n } func calculate(s string) int { // 先消去所有的空格 n := len(s) root, _ := Parse(s, 0, n) // fmt.Println(root.Debug()) return root.Calc() }
leetcode/224/224.go
0.508544
0.41117
224.go
starcoder
package satellite import ( "log" "math" "strconv" "strings" ) // Constants const TWOPI float64 = math.Pi * 2.0 const DEG2RAD float64 = math.Pi / 180.0 const RAD2DEG float64 = 180.0 / math.Pi const XPDOTP float64 = 1440.0 / (2.0 * math.Pi) // Holds latitude and Longitude in either degrees or radians type LatLong struct { Latitude, Longitude float64 } // Holds X, Y, Z position type Vector3 struct { X, Y, Z float64 } // Holds an azimuth, elevation and range type LookAngles struct { Az, El, Rg float64 } //ParseTLE TLE两行数据转为结构体 // Parses a two line element dataset into a Satellite struct func ParseTLE(line1, line2, gravconst string) (sat Satellite) { sat.Line1 = line1 sat.Line2 = line2 sat.Error = 0 sat.whichconst = getGravConst(gravconst) // LINE 1 BEGIN sat.satnum = parseInt(strings.TrimSpace(line1[2:7])) sat.epochyr = parseInt(line1[18:20]) sat.epochdays = parseFloat(line1[20:32]) // These three can be negative / positive sat.ndot = parseFloat(strings.Replace(line1[33:43], " ", "", 2)) sat.nddot = parseFloat(strings.Replace(line1[44:45]+"."+line1[45:50]+"e"+line1[50:52], " ", "", 2)) sat.bstar = parseFloat(strings.Replace(line1[53:54]+"."+line1[54:59]+"e"+line1[59:61], " ", "", 2)) // LINE 1 END // LINE 2 BEGIN sat.inclo = parseFloat(strings.Replace(line2[8:16], " ", "", 2)) sat.nodeo = parseFloat(strings.Replace(line2[17:25], " ", "", 2)) sat.ecco = parseFloat("." + line2[26:33]) sat.argpo = parseFloat(strings.Replace(line2[34:42], " ", "", 2)) sat.mo = parseFloat(strings.Replace(line2[43:51], " ", "", 2)) sat.no = parseFloat(strings.Replace(line2[52:63], " ", "", 2)) // LINE 2 END return } //TLEToSat TLE两行数据转为结构体 并运行sgp4init // Converts a two line element data set into a Satellite struct and runs sgp4init func TLEToSat(line1, line2 string, gravconst string) Satellite { //sat := Satellite{Line1: line1, Line2: line2} sat := ParseTLE(line1, line2, gravconst) opsmode := "i" sat.no = sat.no / XPDOTP sat.ndot = sat.ndot / (XPDOTP * 1440.0) sat.nddot = sat.nddot / (XPDOTP * 1440.0 * 1440) sat.inclo = sat.inclo * DEG2RAD sat.nodeo = sat.nodeo * DEG2RAD sat.argpo = sat.argpo * DEG2RAD sat.mo = sat.mo * DEG2RAD var year int64 = 0 if sat.epochyr < 57 { year = sat.epochyr + 2000 } else { year = sat.epochyr + 1900 } mon, day, hr, min, sec := days2mdhms(year, sat.epochdays) sat.jdsatepoch = JDay(int(year), int(mon), int(day), int(hr), int(min), int(sec)) sgp4init(&opsmode, sat.jdsatepoch-2433281.5, &sat) return sat } // Parses a string into a float64 value. func parseFloat(strIn string) (ret float64) { ret, err := strconv.ParseFloat(strIn, 64) if err != nil { log.Fatal(err) } return ret } // Parses a string into a int64 value. func parseInt(strIn string) (ret int64) { ret, err := strconv.ParseInt(strIn, 10, 0) if err != nil { log.Fatal(err) } return ret }
helpers.go
0.575588
0.564759
helpers.go
starcoder
package mergesort func Sort(initialArray []int) []int { if len(initialArray) <= 1 { return initialArray } middle := len(initialArray) / 2 leftPartSorted := Sort(initialArray[:middle]) rightPartSorted := Sort(initialArray[middle:]) return merge2(leftPartSorted, rightPartSorted) } /* As long as both arrays are not empty if left is empty append everything remaining from the right to the result and return it if right is empty append everything remaining from the left to the result and return it if( first value of right > first value of left) append to the result the first value of right and shift right by 1 else append to the result the first value of left and shift left by 1 */ func merge(leftPartSorted []int, rightPartSorted []int) []int { mergedArray := make([]int, 0, len(rightPartSorted)+len(leftPartSorted)) for len(leftPartSorted) > 0 || len(rightPartSorted) > 0 { if len(leftPartSorted) == 0 { return append(mergedArray, rightPartSorted...) } if len(rightPartSorted) == 0 { return append(mergedArray, leftPartSorted...) } if leftPartSorted[0] >= rightPartSorted[0] { mergedArray = append(mergedArray, leftPartSorted[0]) leftPartSorted = leftPartSorted[1:] } else { mergedArray = append(mergedArray, rightPartSorted[0]) rightPartSorted = rightPartSorted[1:] } } return mergedArray } func merge2(right []int, left []int) []int { mergedResult := make([]int, 0, len(right)+len(left)) rightPointer := 0 leftPointer := 0 for leftPointer < len(left) && rightPointer < len(right) { if right[rightPointer] > left[leftPointer] { mergedResult = append(mergedResult, right[rightPointer]) leftPointer++ } else { mergedResult = append(mergedResult, left[leftPointer]) rightPointer++ } } if rightPointer == len(right)-1 && leftPointer < len(left) { mergedResult = append(mergedResult, left[leftPointer:]...) } else if leftPointer == len(left)-1 && rightPointer < len(right) { mergedResult = append(mergedResult, right[rightPointer:]...) } return mergedResult }
sorting/mergesort/Mergesort.go
0.59843
0.541045
Mergesort.go
starcoder
package api import ( "encoding/json" ) // BeamStatsResponse struct for BeamStatsResponse type BeamStatsResponse struct { BeamStatsMap *map[string]SoracomBeamStats `json:"beamStatsMap,omitempty"` Date *string `json:"date,omitempty"` Unixtime *int64 `json:"unixtime,omitempty"` } // NewBeamStatsResponse instantiates a new BeamStatsResponse object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewBeamStatsResponse() *BeamStatsResponse { this := BeamStatsResponse{} return &this } // NewBeamStatsResponseWithDefaults instantiates a new BeamStatsResponse object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewBeamStatsResponseWithDefaults() *BeamStatsResponse { this := BeamStatsResponse{} return &this } // GetBeamStatsMap returns the BeamStatsMap field value if set, zero value otherwise. func (o *BeamStatsResponse) GetBeamStatsMap() map[string]SoracomBeamStats { if o == nil || o.BeamStatsMap == nil { var ret map[string]SoracomBeamStats return ret } return *o.BeamStatsMap } // GetBeamStatsMapOk returns a tuple with the BeamStatsMap field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BeamStatsResponse) GetBeamStatsMapOk() (*map[string]SoracomBeamStats, bool) { if o == nil || o.BeamStatsMap == nil { return nil, false } return o.BeamStatsMap, true } // HasBeamStatsMap returns a boolean if a field has been set. func (o *BeamStatsResponse) HasBeamStatsMap() bool { if o != nil && o.BeamStatsMap != nil { return true } return false } // SetBeamStatsMap gets a reference to the given map[string]SoracomBeamStats and assigns it to the BeamStatsMap field. func (o *BeamStatsResponse) SetBeamStatsMap(v map[string]SoracomBeamStats) { o.BeamStatsMap = &v } // GetDate returns the Date field value if set, zero value otherwise. func (o *BeamStatsResponse) GetDate() string { if o == nil || o.Date == nil { var ret string return ret } return *o.Date } // GetDateOk returns a tuple with the Date field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BeamStatsResponse) GetDateOk() (*string, bool) { if o == nil || o.Date == nil { return nil, false } return o.Date, true } // HasDate returns a boolean if a field has been set. func (o *BeamStatsResponse) HasDate() bool { if o != nil && o.Date != nil { return true } return false } // SetDate gets a reference to the given string and assigns it to the Date field. func (o *BeamStatsResponse) SetDate(v string) { o.Date = &v } // GetUnixtime returns the Unixtime field value if set, zero value otherwise. func (o *BeamStatsResponse) GetUnixtime() int64 { if o == nil || o.Unixtime == nil { var ret int64 return ret } return *o.Unixtime } // GetUnixtimeOk returns a tuple with the Unixtime field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BeamStatsResponse) GetUnixtimeOk() (*int64, bool) { if o == nil || o.Unixtime == nil { return nil, false } return o.Unixtime, true } // HasUnixtime returns a boolean if a field has been set. func (o *BeamStatsResponse) HasUnixtime() bool { if o != nil && o.Unixtime != nil { return true } return false } // SetUnixtime gets a reference to the given int64 and assigns it to the Unixtime field. func (o *BeamStatsResponse) SetUnixtime(v int64) { o.Unixtime = &v } func (o BeamStatsResponse) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.BeamStatsMap != nil { toSerialize["beamStatsMap"] = o.BeamStatsMap } if o.Date != nil { toSerialize["date"] = o.Date } if o.Unixtime != nil { toSerialize["unixtime"] = o.Unixtime } return json.Marshal(toSerialize) } type NullableBeamStatsResponse struct { value *BeamStatsResponse isSet bool } func (v NullableBeamStatsResponse) Get() *BeamStatsResponse { return v.value } func (v *NullableBeamStatsResponse) Set(val *BeamStatsResponse) { v.value = val v.isSet = true } func (v NullableBeamStatsResponse) IsSet() bool { return v.isSet } func (v *NullableBeamStatsResponse) Unset() { v.value = nil v.isSet = false } func NewNullableBeamStatsResponse(val *BeamStatsResponse) *NullableBeamStatsResponse { return &NullableBeamStatsResponse{value: val, isSet: true} } func (v NullableBeamStatsResponse) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableBeamStatsResponse) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
openapi/api/model_beam_stats_response.go
0.761095
0.428652
model_beam_stats_response.go
starcoder
package filter import ( "math" "github.com/mdouchement/hdr" "github.com/mdouchement/hdr/hdrcolor" ) // fast gaussian blur based on http://blog.ivank.net/fastest-gaussian-blur.html // and Golang implementation https://github.com/tajtiattila/blur // FastGaussian blurs im using a fast approximation of gaussian blur. // The algorithm has a computational complexity independent of radius. func FastGaussian(src hdr.Image, radius int) hdr.Image { boxes := determineBoxes(float64(radius), 3) tmp := hdr.EmptyAs(src) dst := hdr.EmptyAs(src) boxBlur4(dst, tmp, src, (boxes[0]-1)/2) boxBlur4(dst, tmp, dst, (boxes[1]-1)/2) boxBlur4(dst, tmp, dst, (boxes[2]-1)/2) return dst } func boxBlur4(dst, scratch, src hdr.Image, radius int) { if src == scratch || dst == scratch { panic("scratch must be different than src and dst") } boxBlurH(scratch.(hdr.ImageSet), src, radius) boxBlurV(dst.(hdr.ImageSet), scratch, radius) } func boxBlurH(dst hdr.ImageSet, src hdr.Image, radius int) { w, h := src.Bounds().Dx(), src.Bounds().Dy() r1 := radius + 1 r1f := float64(r1) r2f := float64(2*radius + 1) var vr, vg, vb float64 for y := 0; y < h; y++ { fvr, fvg, fvb, _ := src.HDRAt(0, y).HDRRGBA() lvr, lvg, lvb, _ := src.HDRAt(w-1, y).HDRRGBA() vr = r1f * fvr vg = r1f * fvg vb = r1f * fvb for x := 0; x < radius; x++ { r, g, b, _ := src.HDRAt(x, y).HDRRGBA() vr += r vg += g vb += b } for x := 0; x < r1; x++ { r, g, b, _ := src.HDRAt(x+radius, y).HDRRGBA() vr += r - fvr vg += g - fvg vb += b - fvb dst.Set(x, y, hdrcolor.RGB{R: vr / r2f, G: vg / r2f, B: vb / r2f}) } for x := r1; x < w-radius; x++ { r, g, b, _ := src.HDRAt(x+radius, y).HDRRGBA() r1, g1, b1, _ := src.HDRAt(x-r1, y).HDRRGBA() vr += r - r1 vg += g - g1 vb += b - b1 dst.Set(x, y, hdrcolor.RGB{R: vr / r2f, G: vg / r2f, B: vb / r2f}) } for x := w - radius; x < w; x++ { r, g, b, _ := src.HDRAt(x-r1, y).HDRRGBA() vr += lvr - r vg += lvg - g vb += lvb - b dst.Set(x, y, hdrcolor.RGB{R: vr / r2f, G: vg / r2f, B: vb / r2f}) } } } func boxBlurV(dst hdr.ImageSet, src hdr.Image, radius int) { w, h := src.Bounds().Dx(), src.Bounds().Dy() r1 := radius + 1 r1f := float64(r1) r2f := float64(2*radius + 1) var vr, vg, vb float64 for x := 0; x < w; x++ { fvr, fvg, fvb, _ := src.HDRAt(x, 0).HDRRGBA() lvr, lvg, lvb, _ := src.HDRAt(x, h-1).HDRRGBA() vr = r1f * fvr vg = r1f * fvg vb = r1f * fvb for y := 0; y < radius; y++ { r, g, b, _ := src.HDRAt(x, y).HDRRGBA() vr += r vg += g vb += b } for y := 0; y < r1; y++ { r, g, b, _ := src.HDRAt(x, y+radius).HDRRGBA() vr += r - fvr vg += g - fvg vb += b - fvb dst.Set(x, y, hdrcolor.RGB{R: vr / r2f, G: vg / r2f, B: vb / r2f}) } for y := r1; y < h-radius; y++ { r, g, b, _ := src.HDRAt(x, y+radius).HDRRGBA() r1, g1, b1, _ := src.HDRAt(x, y-r1).HDRRGBA() vr += r - r1 vg += g - g1 vb += b - b1 dst.Set(x, y, hdrcolor.RGB{R: vr / r2f, G: vg / r2f, B: vb / r2f}) } for y := h - radius; y < h; y++ { r, g, b, _ := src.HDRAt(x, y-r1).HDRRGBA() vr += lvr - r vg += lvg - g vb += lvb - b dst.Set(x, y, hdrcolor.RGB{R: vr / r2f, G: vg / r2f, B: vb / r2f}) } } } func determineBoxes(sigma float64, nbox int) []int { // standard deviation, number of boxes idealWeight := math.Sqrt((12 * sigma * sigma / float64(nbox)) + 1) wlo := int(math.Floor(idealWeight)) if wlo%2 == 0 { wlo-- } wup := wlo + 2 idealMedian := (12*sigma*sigma - float64(nbox*wlo*wlo+4*nbox*wlo+3*nbox)) / (-4*float64(wlo) - 4) median := int(math.Floor(idealMedian + 0.5)) boxsizes := make([]int, nbox) for i := range boxsizes { if i < median { boxsizes[i] = wlo } else { boxsizes[i] = wup } } return boxsizes }
filter/fast_gaussian.go
0.664976
0.490907
fast_gaussian.go
starcoder
package layout import ( "image" "image/color" "image/draw" "log" "github.com/faiface/gui" ) var _ Layout = Grid{} // Grid represents a grid with rows and columns in each row. // Each row can be a different length. type Grid struct { // Rows represents the number of childs of each row. Rows []int // Background represents the background of the grid as a uniform color. Background color.Color // Gap represents the grid gap, equal on all sides. Gap int // Split represents the way the space is divided among the columns in each row. Split SplitFunc // SplitRows represents the way the space is divided among the rows. SplitRows SplitFunc Margin int Border int BorderColor color.Color // Flip represents the orientation of the grid. // When false, rows are spread in the Y axis and columns in the X axis. // When true, rows are spread in the X axis and columns in the Y axis. Flip bool } func (g Grid) redraw(drw draw.Image, bounds image.Rectangle) { col := g.Background if col == nil { col = color.Black } if g.Border > 0 { bcol := g.BorderColor if bcol == nil { bcol = color.Black } draw.Draw(drw, bounds, image.NewUniform(bcol), image.ZP, draw.Src) } draw.Draw(drw, bounds.Inset(g.Border), image.NewUniform(col), image.ZP, draw.Src) } func (g Grid) Intercept(env gui.Env) gui.Env { return RedrawIntercepter{g.redraw}.Intercept(env) } func (g Grid) Lay(bounds image.Rectangle) []image.Rectangle { gap := g.Gap rows := g.Rows splitMain := g.Split if splitMain == nil { splitMain = EvenSplit } splitSec := g.SplitRows if splitSec == nil { splitSec = EvenSplit } margin := g.Margin flip := g.Flip if margin+gap < 0 { log.Println("Grid goes out of bounds") } if margin+gap < g.Border { log.Println("Grid border will not be shown properly") } ret := make([]image.Rectangle, 0) // Sorry it's not very understandable var H, W int var mX, mY int if flip { H = bounds.Dx() W = bounds.Dy() mX = bounds.Min.Y mY = bounds.Min.X } else { H = bounds.Dy() W = bounds.Dx() mX = bounds.Min.X mY = bounds.Min.Y } rowsH := splitSec(len(rows), H-(gap*(len(rows)+1))-margin*2) var X int var Y int Y = gap + mY + margin for y, cols := range rows { h := rowsH[y] colsW := splitMain(cols, W-(gap*(cols+1))-margin*2) X = gap + mX + margin for _, w := range colsW { var r image.Rectangle if flip { r = image.Rect(Y, X, Y+h, X+w) } else { r = image.Rect(X, Y, X+w, Y+h) } ret = append(ret, r) X += gap + w } Y += gap + h } return ret }
layout/grid.go
0.67822
0.508117
grid.go
starcoder
package value import ( "bytes" "encoding/binary" "math" "github.com/caravan/essentials/id" ) type ( // Value is a placeholder for what will eventually be a generic Value interface { Compare(Value) Comparison Bytes() []byte } // Comparison represents the result of an equality comparison Comparison int // Key is a Value that represents a database key Key []byte // Bool is a Value that represents a stored boolean Bool bool // String is a Value that represents a stored string String string // Integer is a Value that represents a stored integer Integer int64 // Float is a Value that represents a stored floating point number Float float64 ) // Comparison results const ( LessThan Comparison = iota - 1 EqualTo GreaterThan Incomparable ) var ( trueBytes = []byte{1} falseBytes = []byte{0} emptyKey = Key{} ) // NewKey returns a new unique database Key func NewKey() Key { return id.New().Bytes() } // Compare returns a Comparison between this Key and another Value func (l Key) Compare(r Value) Comparison { if r, ok := r.(Key); ok { ls := string(l) rs := string(r) switch { case ls == rs: return EqualTo case ls < rs: return LessThan default: return GreaterThan } } return Incomparable } // Bytes returns a byte-array representation of this Key func (l Key) Bytes() []byte { return l } // WithKeys combines a Key with a set of additional Keys func (l Key) WithKeys(k ...Key) Key { keys := append([]Key{l}, k...) return JoinKeys(keys...) } // JoinKeys joins a set of Keys or returns an empty Key if provided none func JoinKeys(keys ...Key) Key { if len(keys) == 0 { return emptyKey } var buf bytes.Buffer buf.Write(keys[0].Bytes()) for _, k := range keys[1:] { buf.WriteByte(0) buf.Write(k.Bytes()) } return buf.Bytes() } // Compare returns a Comparison between this Bool and another Value func (l Bool) Compare(r Value) Comparison { if r, ok := r.(Bool); ok { switch { case l == r: return EqualTo case l == false: return LessThan default: return GreaterThan } } return Incomparable } // Bytes returns a byte-array representation of this Bool func (l Bool) Bytes() []byte { if l { return trueBytes } return falseBytes } // Compare returns a Comparison between this String and another Value func (l String) Compare(r Value) Comparison { if r, ok := r.(String); ok { switch { case l == r: return EqualTo case l < r: return LessThan default: return GreaterThan } } return Incomparable } // Bytes returns a byte-array representation of this String func (l String) Bytes() []byte { return []byte(l) } // Compare returns a Comparison between this Integer and another Value func (l Integer) Compare(r Value) Comparison { if r, ok := r.(Integer); ok { switch { case l == r: return EqualTo case l < r: return LessThan default: return GreaterThan } } return Incomparable } // Bytes returns a byte-array representation of this Integer func (l Integer) Bytes() []byte { var buf bytes.Buffer holder := make([]byte, 8) if l >= 0 { buf.WriteByte(1) binary.BigEndian.PutUint64(holder, uint64(l)) buf.Write(holder) } else { buf.WriteByte(0) binary.BigEndian.PutUint64(holder, uint64(-l)) buf.Write(holder) } return buf.Bytes() } // Compare returns a Comparison between this Float and another Value func (l Float) Compare(r Value) Comparison { if r, ok := r.(Float); ok { switch { case l == r: return EqualTo case l < r: return LessThan default: return GreaterThan } } return Incomparable } // Bytes returns a byte-array representation of this Float func (l Float) Bytes() []byte { var buf bytes.Buffer i := int64(l) buf.Write(Integer(i).Bytes()) u := math.Float64bits(float64(l) - float64(i)) holder := make([]byte, 8) binary.BigEndian.PutUint64(holder, u) buf.Write(holder) return buf.Bytes() }
value/value.go
0.769427
0.464416
value.go
starcoder
package tile import ( "math" "sync" ) type costFn = func(Tile) uint16 // Edge represents an edge of the path type edge struct { Point Cost uint32 } // Around performs a breadth first search around a point. func (m *Grid) Around(from Point, distance uint32, costOf costFn, fn Iterator) { start, ok := m.At(from.X, from.Y) if !ok { return } fn(from, start) // Acquire a frontier heap for search frontier := acquireHeap() frontier.Push(from.Integer(), 0) defer releaseHeap(frontier) // For pre-allocating, we use πr2 since BFS will result in a approximation // of a circle, in the worst case. maxArea := int(math.Ceil(math.Pi * float64(distance*distance))) reached := make(map[uint32]struct{}, maxArea) reached[from.Integer()] = struct{}{} for !frontier.IsEmpty() { pCurr, _ := frontier.Pop() current := unpackPoint(pCurr) // Get all of the neighbors m.Neighbors(current.X, current.Y, func(next Point, nextTile Tile) { if d := from.DistanceTo(next); d > distance { return // Too far } if cost := costOf(nextTile); cost == 0 { return // Blocked tile, ignore completely } // Add to the search queue pNext := next.Integer() if _, ok := reached[pNext]; !ok { frontier.Push(pNext, 1) reached[pNext] = struct{}{} fn(next, nextTile) } }) } } // Path calculates a short path and the distance between the two locations func (m *Grid) Path(from, to Point, costOf costFn) ([]Point, int, bool) { // Acquire a frontier heap for search frontier := acquireHeap() frontier.Push(from.Integer(), 0) defer releaseHeap(frontier) // For pre-allocating, we use πr2 since BFS will result in a approximation // of a circle, in the worst case. distance := float64(from.DistanceTo(to)) maxArea := int(math.Ceil(math.Pi * float64(distance*distance))) edges := make(map[uint32]edge, maxArea) edges[from.Integer()] = edge{ Point: from, Cost: 0, } for !frontier.IsEmpty() { pCurr, _ := frontier.Pop() current := unpackPoint(pCurr) // We have a path to the goal if current.Equal(to) { dist := int(edges[current.Integer()].Cost) path := make([]Point, 0, dist) curr, _ := edges[current.Integer()] for !curr.Point.Equal(from) { path = append(path, curr.Point) curr = edges[curr.Point.Integer()] } return path, dist, true } // Get all of the neighbors m.Neighbors(current.X, current.Y, func(next Point, nextTile Tile) { cNext := costOf(nextTile) if cNext == 0 { return // Blocked tile, ignore completely } pNext := next.Integer() newCost := edges[pCurr].Cost + uint32(cNext) // cost(current, next) if e, ok := edges[pNext]; !ok || newCost < e.Cost { priority := newCost + next.DistanceTo(to) // heuristic frontier.Push(next.Integer(), priority) edges[pNext] = edge{ Point: current, Cost: newCost, } } }) } return nil, 0, false } // ----------------------------------------------------------------------------- var heapPool = sync.Pool{ New: func() interface{} { return new(heap32) }, } // Acquires a new instance of a heap func acquireHeap() *heap32 { h := heapPool.Get().(*heap32) h.Reset() return h } // Releases a heap instance back to the pool func releaseHeap(h *heap32) { heapPool.Put(h) } // ----------------------------------------------------------------------------- // heapNode represents a ranked node for the heap. type heapNode struct { Value uint32 // The value of the ranked node. Rank uint32 // The rank associated with the ranked node. } type heap32 []heapNode func newHeap32(capacity int) heap32 { return make(heap32, 0, capacity) } // Reset clears the heap for reuse func (h *heap32) Reset() { *h = (*h)[:0] } // Push pushes the element x onto the heap. // The complexity is O(log n) where n = h.Len(). func (h *heap32) Push(v, rank uint32) { *h = append(*h, heapNode{ Value: v, Rank: rank, }) h.up(h.Len() - 1) } // Pop removes and returns the minimum element (according to Less) from the heap. // The complexity is O(log n) where n = h.Len(). // Pop is equivalent to Remove(h, 0). func (h *heap32) Pop() (uint32, bool) { n := h.Len() - 1 if n < 0 { return 0, false } h.Swap(0, n) h.down(0, n) return h.pop(), true } // Remove removes and returns the element at index i from the heap. // The complexity is O(log n) where n = h.Len(). func (h *heap32) Remove(i int) uint32 { n := h.Len() - 1 if n != i { h.Swap(i, n) if !h.down(i, n) { h.up(i) } } return h.pop() } func (h *heap32) pop() uint32 { old := *h n := len(old) no := old[n-1] *h = old[0 : n-1] return no.Value } func (h *heap32) up(j int) { for { i := (j - 1) / 2 // parent if i == j || !h.Less(j, i) { break } h.Swap(i, j) j = i } } func (h *heap32) down(i0, n int) bool { i := i0 for { j1 := 2*i + 1 if j1 >= n || j1 < 0 { // j1 < 0 after int overflow break } j := j1 // left child if j2 := j1 + 1; j2 < n && h.Less(j2, j1) { j = j2 // = 2*i + 2 // right child } if !h.Less(j, i) { break } h.Swap(i, j) i = j } return i > i0 } func (h heap32) Len() int { return len(h) } func (h heap32) IsEmpty() bool { return len(h) == 0 } func (h heap32) Less(i, j int) bool { return h[i].Rank < h[j].Rank } func (h *heap32) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] }
path.go
0.801159
0.465509
path.go
starcoder
package ring import ( "math/big" "math/bits" ) // MForm switches a to the Montgomery domain by computing // a*2^64 mod q. func MForm(a, q uint64, u []uint64) (r uint64) { mhi, _ := bits.Mul64(a, u[1]) r = -(a*u[0] + mhi) * q if r >= q { r -= q } return } // MFormConstant switches a to the Montgomery domain by computing // a*2^64 mod q in constant time. // The result is between 0 and 2*q-1. func MFormConstant(a, q uint64, u []uint64) (r uint64) { mhi, _ := bits.Mul64(a, u[1]) r = -(a*u[0] + mhi) * q return } // InvMForm switches a from the Montgomery domain back to the // standard domain by computing a*(1/2^64) mod q. func InvMForm(a, q, qInv uint64) (r uint64) { r, _ = bits.Mul64(a*qInv, q) r = q - r if r >= q { r -= q } return } // InvMFormConstant switches a from the Montgomery domain back to the // standard domain by computing a*(1/2^64) mod q in constant time. // The result is between 0 and 2*q-1. func InvMFormConstant(a, q, qInv uint64) (r uint64) { r, _ = bits.Mul64(a*qInv, q) r = q - r return } // MRedParams computes the parameter qInv = (q^-1) mod 2^64, // required for MRed. func MRedParams(q uint64) (qInv uint64) { qInv = 1 for i := 0; i < 63; i++ { qInv *= q q *= q } return } // MRed computes x * y * (1/2^64) mod q. func MRed(x, y, q, qInv uint64) (r uint64) { mhi, mlo := bits.Mul64(x, y) hhi, _ := bits.Mul64(mlo*qInv, q) r = mhi - hhi + q if r >= q { r -= q } return } // MRedConstant computes x * y * (1/2^64) mod q in constant time. // The result is between 0 and 2*q-1. func MRedConstant(x, y, q, qInv uint64) (r uint64) { ahi, alo := bits.Mul64(x, y) H, _ := bits.Mul64(alo*qInv, q) r = ahi - H + q return } // BRedParams computes the parameters for the BRed algorithm. // Returns ((2^128)/q)/(2^64) and (2^128)/q mod 2^64. func BRedParams(q uint64) (params []uint64) { bigR := new(big.Int).Lsh(NewUint(1), 128) bigR.Quo(bigR, NewUint(q)) // 2^radix // q mhi := new(big.Int).Rsh(bigR, 64).Uint64() mlo := bigR.Uint64() return []uint64{mhi, mlo} } // BRedAdd computes a mod q. func BRedAdd(a, q uint64, u []uint64) (r uint64) { mhi, _ := bits.Mul64(a, u[0]) r = a - mhi*q if r >= q { r -= q } return } // BRedAddConstant computes a mod q in constant time. // The result is between 0 and 2*q-1. func BRedAddConstant(x, q uint64, u []uint64) uint64 { s0, _ := bits.Mul64(x, u[0]) return x - s0*q } // BRed computes x*y mod q. func BRed(x, y, q uint64, u []uint64) (r uint64) { var mhi, mlo, lhi, hhi, hlo, s0, carry uint64 mhi, mlo = bits.Mul64(x, y) // computes r = mhi * uhi + (mlo * uhi + mhi * ulo)<<64 + (mlo * ulo)) >> 128 r = mhi * u[0] // r = mhi * uhi hhi, hlo = bits.Mul64(mlo, u[0]) // mlo * uhi r += hhi lhi, _ = bits.Mul64(mlo, u[1]) // mlo * ulo s0, carry = bits.Add64(hlo, lhi, 0) r += carry hhi, hlo = bits.Mul64(mhi, u[1]) // mhi * ulo r += hhi _, carry = bits.Add64(hlo, s0, 0) r += carry r = mlo - r*q if r >= q { r -= q } return } // BRedConstant computes x*y mod q in constant time. // The result is between 0 and 2*q-1. func BRedConstant(x, y, q uint64, u []uint64) (r uint64) { var mhi, mlo, lhi, hhi, hlo, s0, carry uint64 mhi, mlo = bits.Mul64(x, y) // computes r = mhi * uhi + (mlo * uhi + mhi * ulo)<<64 + (mlo * ulo)) >> 128 r = mhi * u[0] // r = mhi * uhi hhi, hlo = bits.Mul64(mlo, u[0]) // mlo * uhi r += hhi lhi, _ = bits.Mul64(mlo, u[1]) // mlo * ulo s0, carry = bits.Add64(hlo, lhi, 0) r += carry hhi, hlo = bits.Mul64(mhi, u[1]) // mhi * ulo r += hhi _, carry = bits.Add64(hlo, s0, 0) r += carry r = mlo - r*q return } // CRed reduce returns a mod q where a is between 0 and 2*q-1. func CRed(a, q uint64) uint64 { if a >= q { return a - q } return a }
ring/modular_reduction.go
0.764188
0.508117
modular_reduction.go
starcoder
package geodist import ( "errors" "math" ) // these constants are used for vincentyDistance() const a = 6378137 const b = 6356752.3142 const f = 1 / 298.257223563 // WGS-84 ellipsiod /* VincentyDistance computes the distances between two georgaphic coordinates Args: p1: the 'starting' point, given in latitude, longitude as a Coord struct p2: the 'ending' point Returns: A 3 element tuple: distance between the 2 points given in (1) miles and (2) kilometers The 3rd element will return true upon a successful computation or false if the algorithm fails to converge. -1, -1, false is returned upon failure */ func VincentyDistance(p1, p2 Coord) (float64, float64, error) { // convert from degrees to radians piRad := math.Pi / 180 p1.Lat = p1.Lat * piRad p1.Lon = p1.Lon * piRad p2.Lat = p2.Lat * piRad p2.Lon = p2.Lon * piRad L := p2.Lon - p1.Lon U1 := math.Atan((1 - f) * math.Tan(p1.Lat)) U2 := math.Atan((1 - f) * math.Tan(p2.Lat)) sinU1 := math.Sin(U1) cosU1 := math.Cos(U1) sinU2 := math.Sin(U2) cosU2 := math.Cos(U2) lambda := L lambdaP := 2 * math.Pi iterLimit := 20 var sinLambda, cosLambda, sinSigma float64 var cosSigma, sigma, sinAlpha, cosSqAlpha, cos2SigmaM, C float64 for { if math.Abs(lambda-lambdaP) > 1e-12 && (iterLimit > 0) { iterLimit -= 1 } else { break } sinLambda = math.Sin(lambda) cosLambda = math.Cos(lambda) sinSigma = math.Sqrt((cosU2*sinLambda)*(cosU2*sinLambda) + (cosU1*sinU2-sinU1*cosU2*cosLambda)*(cosU1*sinU2-sinU1*cosU2*cosLambda)) if sinSigma == 0 { return 0, 0, nil // co-incident points } cosSigma = sinU1*sinU2 + cosU1*cosU2*cosLambda sigma = math.Atan2(sinSigma, cosSigma) sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma cosSqAlpha = 1 - sinAlpha*sinAlpha cos2SigmaM = cosSigma - 2*sinU1*sinU2/cosSqAlpha if math.IsNaN(cos2SigmaM) { cos2SigmaM = 0 // equatorial line: cosSqAlpha=0 } C = f / 16 * cosSqAlpha * (4 + f*(4-3*cosSqAlpha)) lambdaP = lambda lambda = L + (1-C)*f*sinAlpha*(sigma+C*sinSigma*(cos2SigmaM+C*cosSigma*(-1+2*cos2SigmaM*cos2SigmaM))) } if iterLimit == 0 { return -1, -1, errors.New("vincenty algorithm failed to converge") // formula failed to converge } uSq := cosSqAlpha * (a*a - b*b) / (b * b) A := 1 + uSq/16384*(4096+uSq*(-768+uSq*(320-175*uSq))) B := uSq / 1024 * (256 + uSq*(-128+uSq*(74-47*uSq))) deltaSigma := B * sinSigma * (cos2SigmaM + B/4*(cosSigma*(-1+2*cos2SigmaM*cos2SigmaM)-B/6*cos2SigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2SigmaM*cos2SigmaM))) meters := b * A * (sigma - deltaSigma) kilometers := meters / 1000 miles := kilometers * 0.621371 return miles, kilometers, nil }
vincenty.go
0.833358
0.577912
vincenty.go
starcoder
package stack // This JSON document lays out the basic structure of the CloudFormation // template for our pool stacks. The Resources here will be transfered to the // Pool.*Template attributes. // Make certain that the appropriate Pool structures are modified when changing // the structure of this template. var poolTmpl = []byte(` { "AWSTemplateFormatVersion": "2010-09-09", "Description": "Galaxy Pool Template", "Resources": { "asg_": { "Properties": { "AvailabilityZones": [], "Cooldown": "300", "DesiredCapacity": "1", "HealthCheckGracePeriod": "300", "HealthCheckType": "EC2", "LaunchConfigurationName": {}, "MinSize": "1", "MaxSize": "2", "Tags": [], "VPCZoneIdentifier": [] }, "Type": "AWS::AutoScaling::AutoScalingGroup" }, "elb_": { "Properties": { "CrossZone": true, "HealthCheck": { "HealthyThreshold": "2", "Interval": "30", "Target": "HTTP:80/health", "Timeout": "5", "UnhealthyThreshold": "2" }, "Listeners": [ { "InstancePort": "80", "InstanceProtocol": "HTTP", "LoadBalancerPort": "80", "Protocol": "HTTP" } ], "SecurityGroups": [], "AvailabilityZones": [] }, "Type": "AWS::ElasticLoadBalancing::LoadBalancer" }, "lc_": { "Properties": { "AssociatePublicIpAddress": true, "BlockDeviceMappings": [ { "DeviceName": "/dev/sda1", "Ebs": { "VolumeType": "gp2", "VolumeSize": 8 } } ], "InstanceType": "", "KeyName": "", "SecurityGroups": [] }, "Type": "AWS::AutoScaling::LaunchConfiguration" } } }`) var scalingTemplate = []byte(` { "ScaleDown": { "Properties": { "AdjustmentType": "ChangeInCapacity", "AutoScalingGroupName": { "Ref": "ASG" }, "Cooldown": "300", "ScalingAdjustment": "-1" }, "Type": "AWS::AutoScaling::ScalingPolicy" }, "ScaleDownAlarm": { "Properties": { "ActionsEnabled": "true", "AlarmActions": [ { "Ref": "ScaleDown" } ], "ComparisonOperator": "LessThanThreshold", "Dimensions": [ { "Name": "AutoScalingGroupName", "Value": { "Ref": "ASG" } } ], "EvaluationPeriods": "5", "MetricName": "CPUUtilization", "Namespace": "AWS/EC2", "Period": "60", "Statistic": "Average", "Threshold": "30.0" }, "Type": "AWS::CloudWatch::Alarm" }, "ScaleUp": { "Properties": { "AdjustmentType": "ChangeInCapacity", "AutoScalingGroupName": { "Ref": "ASG" }, "Cooldown": "300", "ScalingAdjustment": "1" }, "Type": "AWS::AutoScaling::ScalingPolicy" }, "ScaleUpAlarm": { "Properties": { "ActionsEnabled": "true", "AlarmActions": [ { "Ref": "ScaleUp" } ], "ComparisonOperator": "GreaterThanThreshold", "Dimensions": [ { "Name": "AutoScalingGroupName", "Value": { "Ref": "ASG" } } ], "EvaluationPeriods": "5", "MetricName": "CPUUtilization", "Namespace": "AWS/EC2", "Period": "60", "Statistic": "Average", "Threshold": "80.0" }, "Type": "AWS::CloudWatch::Alarm" } }`)
stack/pool_template.go
0.533641
0.443721
pool_template.go
starcoder
package fsm import ( "sync" "github.com/pkg/errors" ) var ( // ErrBuild represents the error of building an FSM. ErrBuild = errors.New("error when building an FSM") // ErrTransitionNotFound indicates that there doesn't exist a transition defined on the source state and the event. ErrTransitionNotFound = errors.New("unable to find a valid transition") // ErrInvalidTransition indicates that the actual destination state after transition is not defined. ErrInvalidTransition = errors.New("invalid transition") ) // State represents the state and it is a string. type State string // EventType represents a event type and it is a string. type EventType string // Event is the interface of the events that could be handled by an FSM type Event interface { // Type returns the event type Type() EventType } // Transition is the interface of the transition that would happen on a certain state when receiving a certain event. // The transition returns a destination state where an FSM should transit into or an error. type Transition func(Event) (State, error) // FSM is the interface of an FSM (finite state machine). It allows to define the transition logic and destinations // after the transition, and intake the event to trigger the transition. The event handling is synchronized, so that it // guarantee always processing one event at any time. An FSM must have exactly one initial state. type FSM interface { // CurrentState returns the current state. CurrentState() State // Handle handles an event and return error if there is any. Handle(Event) error } // Builder is an FSM builder to help construct an FSM. type Builder struct { states map[State]bool td map[State]map[EventType]transAndDsts } // NewBuilder creates an FSM builder instance with empty setup. func NewBuilder() *Builder { return &Builder{ states: make(map[State]bool), td: make(map[State]map[EventType]transAndDsts), } } // AddInitialState adds an initial state func (b *Builder) AddInitialState(s State) *Builder { if _, ok := b.states[s]; !ok { b.states[s] = true } return b } // AddStates adds the non-initial state(s) func (b *Builder) AddStates(states ...State) *Builder { for _, s := range states { if _, ok := b.states[s]; !ok { b.states[s] = false } } return b } // AddTransition adds a transition setup, including the source state, the event to trigger the transition, the // transition callback, and the legal destination transitions. func (b *Builder) AddTransition(src State, et EventType, trans Transition, dsts []State) *Builder { tdPerState, ok := b.td[src] if !ok { tdPerState = make(map[EventType]transAndDsts) b.td[src] = tdPerState } if _, ok := tdPerState[et]; !ok { dstsCpy := make([]State, len(dsts)) copy(dstsCpy, dsts) tdPerState[et] = transAndDsts{ trans: trans, dsts: dstsCpy, } } return b } // Build builds an FSM instance based on the configured states and transition setup. func (b *Builder) Build() (FSM, error) { m := &fsm{ td: make(map[State]map[EventType]transAndDsts), } initStateFound := false for s, init := range b.states { m.td[s] = nil if !init { continue } if initStateFound { return nil, errors.Wrap(ErrBuild, "more than one initial state defined") } m.state = s initStateFound = true } if !initStateFound { return nil, errors.Wrap(ErrBuild, "no initial state defined") } for s, tdPerState := range b.td { if _, ok := m.td[s]; !ok { return nil, errors.Wrapf(ErrBuild, "transition from a undefined state %s", s) } for _, td := range tdPerState { for _, dst := range td.dsts { if _, ok := m.td[dst]; !ok { return nil, errors.Wrapf(ErrBuild, "transition to a undefined state %s", dst) } } } m.td[s] = tdPerState } return m, nil } // fsm implements FSM interface type fsm struct { mutex sync.RWMutex state State td map[State]map[EventType]transAndDsts } func (m *fsm) CurrentState() State { m.mutex.RLock() defer m.mutex.RUnlock() return m.state } func (m *fsm) Handle(e Event) error { m.mutex.Lock() defer m.mutex.Unlock() tdPerState, ok := m.td[m.state] if !ok { return errors.Wrapf( ErrTransitionNotFound, "transition and destinations are found for state %s", m.state, ) } td, ok := tdPerState[e.Type()] if !ok { return errors.Wrapf( ErrTransitionNotFound, "transition and destinations are found for state %s, event %s", m.state, e.Type(), ) } dst, err := td.trans(e) if err != nil { return err } for _, d := range td.dsts { if dst == d { m.state = dst return nil } } return errors.Wrapf(ErrInvalidTransition, "undefined transition from state %s to state %s", m.state, dst) } type transAndDsts struct { trans Transition dsts []State }
vendor/github.com/iotexproject/go-fsm/fsm.go
0.671471
0.468061
fsm.go
starcoder
package sqlparser type SQLAstVisitor interface { Visit(SQLNode) error } func (node *AccessMode) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *AliasedExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *AliasedTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *AndExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Auth) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *AuthRevoke) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *AutoIncSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Begin) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *BinaryExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node BoolVal) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *CaseExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node ColIdent) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ColName) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *CollateExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ColumnDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node Columns) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ColumnType) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node Comments) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Commit) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ComparisonExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ConstraintDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ConvertExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ConvertType) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ConvertUsingExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *CurTimeFuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *DBDDL) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *DDL) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Default) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Delete) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *DescribeTable) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Exec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ExecSubquery) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ExistsExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Explain) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node Exprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ForeignKeyDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *FuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node GroupBy) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *GroupConcatExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *IndexDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *IndexHints) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *IndexInfo) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Insert) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *IntervalExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *IsExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *IsolationLevel) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node JoinCondition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *JoinTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Limit) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node ListArg) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *MatchExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node Nextval) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *NotExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *NullVal) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *OptLike) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node OnDup) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node OrderBy) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *OrExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Order) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *OtherAdmin) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *OtherRead) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ParenSelect) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ParenTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *PartitionDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node Partitions) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *PartitionSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *RangeCond) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node ReferenceAction) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Registry) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Release) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Rollback) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *SQLVal) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *SRollback) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Savepoint) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Select) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node SelectExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Set) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *SetExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node SetExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *SetTransaction) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Show) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ShowFilter) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Sleep) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *StarExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Stream) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Subquery) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *SubstrExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node TableExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node TableIdent) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node TableName) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node TableNames) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *TableSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *TimestampFuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *UnaryExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Union) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *UnionSelect) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Update) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *UpdateExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node UpdateExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Use) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node ValTuple) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node Values) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *ValuesFuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node VindexParam) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *VindexSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *When) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *Where) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } func (node *XorExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) }
go/vt/sqlparser/external_visitor.go
0.699254
0.471527
external_visitor.go
starcoder
package ratelimit import ( "context" "time" "github.com/tikv/pd/pkg/syncutil" "golang.org/x/time/rate" ) // RateLimiter is a rate limiter based on `golang.org/x/time/rate`. // It implements `Available` function which is not included in `golang.org/x/time/rate`. // Note: AvailableN will increase the wait time of WaitN. type RateLimiter struct { mu syncutil.Mutex limiter *rate.Limiter } // NewRateLimiter returns a new Limiter that allows events up to rate r (it means limiter refill r token per second) // and permits bursts of at most b tokens. func NewRateLimiter(r float64, b int) *RateLimiter { return &RateLimiter{limiter: rate.NewLimiter(rate.Limit(r), b)} } // Available returns whether limiter has enough tokens. // Note: Available will increase the wait time of WaitN. func (l *RateLimiter) Available(n int) bool { l.mu.Lock() defer l.mu.Unlock() now := time.Now() r := l.limiter.ReserveN(now, n) delay := r.DelayFrom(now) r.CancelAt(now) return delay == 0 } // Allow is same as `rate.Limiter.Allow`. func (l *RateLimiter) Allow() bool { return l.AllowN(1) } // AllowN is same as `rate.Limiter.AllowN`. func (l *RateLimiter) AllowN(n int) bool { l.mu.Lock() defer l.mu.Unlock() now := time.Now() return l.limiter.AllowN(now, n) } // SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). func (l *RateLimiter) SetBurst(burst int) { l.mu.Lock() defer l.mu.Unlock() l.limiter.SetBurst(burst) } // SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). func (l *RateLimiter) SetLimit(limit rate.Limit) { l.mu.Lock() defer l.mu.Unlock() l.limiter.SetLimit(limit) } // Limit returns the maximum overall event rate. func (l *RateLimiter) Limit() rate.Limit { return l.limiter.Limit() } // Burst returns the maximum burst size. Burst is the maximum number of tokens // that can be consumed in a single call to Allow, Reserve, or Wait, so higher // Burst values allow more events to happen at once. // A zero Burst allows no events, unless limit == Inf. func (l *RateLimiter) Burst() int { return l.limiter.Burst() } // WaitN blocks until lim permits n events to happen. // It returns an error if n exceeds the Limiter's burst size, the Context is // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (l *RateLimiter) WaitN(ctx context.Context, n int) error { l.mu.Lock() defer l.mu.Unlock() return l.limiter.WaitN(ctx, n) }
pkg/ratelimit/ratelimiter.go
0.81604
0.413092
ratelimiter.go
starcoder
package siec import ( "math/big" ) func (curve *SIEC255Params) affineToProjective(x, y *big.Int) (X, Y, Z *big.Int) { X, Y, Z = new(big.Int), new(big.Int), new(big.Int) X.Set(x) X.Mod(X, curve.P) Y.Set(y) Y.Mod(Y, curve.P) Z.SetInt64(1) return } func (curve *SIEC255Params) projectiveToAffine(X, Y, Z *big.Int) (x, y *big.Int) { x, y = new(big.Int), new(big.Int) if Z.Sign() == 0 { return new(big.Int), new(big.Int) } Zinv := new(big.Int).ModInverse(Z, curve.P) Zinvsq := new(big.Int).Mul(Zinv, Zinv) x.Mul(X, Zinvsq) x.Mod(x, curve.P) y.Mul(Y, Zinvsq.Mul(Zinvsq, Zinv)) y.Mod(y, curve.P) return } // assumes Z1 = Z2 = 1. func (curve *SIEC255Params) mmadd2007bl(X1, Y1, X2, Y2 *big.Int) (X3, Y3, Z3 *big.Int) { w := new(big.Int) ww := new(big.Int) // H = X2-X1 H := new(big.Int).Sub(X2, X1) // HH = H^2 HH := new(big.Int).Mul(H, H) // I = 4*HH I := new(big.Int).Lsh(HH, 2) // J = H*I J := new(big.Int).Mul(H, I) // r = 2*(Y2-Y1) w.Sub(Y2, Y1) r := new(big.Int).Lsh(w, 1) // V = X1*I V := new(big.Int).Mul(X1, I) // X3 = r^2-J-2*V w.Mul(r, r) ww.Add(J, ww.Lsh(V, 1)) X3 = new(big.Int).Sub(w, ww) // Y3 = r*(V-X3)-2*Y1*J w.Mul(r, w.Sub(V, X3)) ww.Lsh(ww.Mul(Y1, J), 1) Y3 = new(big.Int).Sub(w, ww) // Z3 = 2*H Z3 = new(big.Int).Lsh(H, 1) return } // http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2001-b func (curve *SIEC255Params) add2007bl(X1, Y1, Z1, X2, Y2, Z2 *big.Int) (X3, Y3, Z3 *big.Int) { if X1.BitLen() == 0 && Y1.BitLen() == 0 && Z1.BitLen() == 0 { return X2, Y2, Z2 } if X2.BitLen() == 0 && Y2.BitLen() == 0 && Z2.BitLen() == 0 { return X1, Y1, Z1 } w := new(big.Int) ww := new(big.Int) // Z1Z1 = Z1^2 Z1Z1 := new(big.Int).Mul(Z1, Z1) // Z2Z2 = Z2^2 Z2Z2 := new(big.Int).Mul(Z2, Z2) // U1 = X1*Z2Z2 U1 := new(big.Int).Mul(X1, Z2Z2) // U2 = X2*Z1Z1 U2 := new(big.Int).Mul(X2, Z1Z1) // S1 = Y1*Z2*Z2Z2 w.Mul(Z2, Z2Z2) S1 := new(big.Int).Mul(Y1, w) // S2 = Y2*Z1*Z1Z1 w.Mul(Z1, Z1Z1) S2 := new(big.Int).Mul(Y2, w) // H = U2-U1 H := new(big.Int).Sub(U2, U1) // I = (2*H)^2 ww.Lsh(H, 1) I := new(big.Int).Mul(ww, ww) // J = H*I J := new(big.Int).Mul(H, I) // r = 2*(S2-S1) w.Sub(S2, S1) r := new(big.Int).Lsh(w, 1) // V = U1*I V := new(big.Int).Mul(U1, I) // X3 = r^2-J-2*V w.Mul(r, r) ww.Lsh(V, 1) ww.Add(J, ww) X3 = new(big.Int).Sub(w, ww) X3.Mod(X3, curve.P) // Y3 = r*(V-X3)-2*S1*J w.Sub(V, X3) w.Mul(r, w) ww.Mul(S1, J) ww.Lsh(ww, 1) Y3 = new(big.Int).Sub(w, ww) Y3.Mod(Y3, curve.P) // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2)*H w.Add(Z1, Z2) w.Mul(w, w) ww.Add(Z1Z1, Z2Z2) w.Sub(w, ww) Z3 = new(big.Int).Mul(w, H) Z3.Mod(Z3, curve.P) return } // http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l func (curve *SIEC255Params) dbl2009l(X1, Y1, Z1 *big.Int) (X3, Y3, Z3 *big.Int) { w := new(big.Int) m := new(big.Int) // A = X1^2 A := new(big.Int).Mul(X1, X1) A.Mod(A, curve.P) // B = Y1^2 B := new(big.Int).Mul(Y1, Y1) B.Mod(B, curve.P) // C = B^2 C := new(big.Int).Mul(B, B) C.Mod(C, curve.P) // D = 2*((X1+B)^2-A-C) w.Add(X1, B) D := new(big.Int).Lsh(w.Sub(w.Mul(w, w), m.Add(A, C)), 1) D.Mod(D, curve.P) // E = 3*A E := new(big.Int).Mul(three, A) E.Mod(E, curve.P) // F = E^2 F := new(big.Int).Mul(E, E) F.Mod(F, curve.P) // X3 = F-2*D X3 = new(big.Int).Sub(F, w.Lsh(D, 1)) X3.Mod(X3, curve.P) // Y3 = E*(D-X3)-8*C Y3 = new(big.Int).Sub(w.Mul(E, w.Sub(D, X3)), m.Mul(eight, C)) Y3.Mod(Y3, curve.P) // Z3 = 2*Y1*Z1 Z3 = w.Lsh(w.Mul(Z1, Y1), 1) Z3.Mod(Z3, curve.P) return } func (curve *SIEC255Params) projectiveScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { Bz := new(big.Int).SetInt64(1) x, y, z := new(big.Int), new(big.Int), new(big.Int) for _, byte := range k { for bitNum := 0; bitNum < 8; bitNum++ { x, y, z = curve.dbl2009l(x, y, z) if byte&0x80 == 0x80 { x, y, z = curve.add2007bl(Bx, By, Bz, x, y, z) } byte <<= 1 } } return curve.projectiveToAffine(x, y, z) } func (curve *SIEC255Params) projectiveScalarBaseMult(k []byte) (*big.Int, *big.Int) { return curve.projectiveScalarMult(curve.Gx, curve.Gy, k) }
projective.go
0.710327
0.438905
projective.go
starcoder
package natlab import ( "fmt" "sync" "time" "inet.af/netaddr" ) // FirewallType is the type of filtering a stateful firewall // does. Values express different modes defined by RFC 4787. type FirewallType int const ( // AddressAndPortDependentFirewall specifies a destination // address-and-port dependent firewall. Outbound traffic to an // ip:port authorizes traffic from that ip:port exactly, and // nothing else. AddressAndPortDependentFirewall FirewallType = iota // AddressDependentFirewall specifies a destination address // dependent firewall. Once outbound traffic has been seen to an // IP address, that IP address can talk back from any port. AddressDependentFirewall // EndpointIndependentFirewall specifies a destination endpoint // independent firewall. Once outbound traffic has been seen from // a source, anyone can talk back to that source. EndpointIndependentFirewall ) // fwKey is the lookup key for a firewall session. While it contains a // 4-tuple ({src,dst} {ip,port}), some FirewallTypes will zero out // some fields, so in practice the key is either a 2-tuple (src only), // 3-tuple (src ip+port and dst ip) or 4-tuple (src+dst ip+port). type fwKey struct { src netaddr.IPPort dst netaddr.IPPort } // key returns an fwKey for the given src and dst, trimmed according // to the FirewallType. fwKeys are always constructed from the // "outbound" point of view (i.e. src is the "trusted" side of the // world), it's the caller's responsibility to swap src and dst in the // call to key when processing packets inbound from the "untrusted" // world. func (s FirewallType) key(src, dst netaddr.IPPort) fwKey { k := fwKey{src: src} switch s { case EndpointIndependentFirewall: case AddressDependentFirewall: k.dst = k.dst.WithIP(dst.IP()) case AddressAndPortDependentFirewall: k.dst = dst default: panic(fmt.Sprintf("unknown firewall selectivity %v", s)) } return k } // DefaultSessionTimeout is the default timeout for a firewall // session. const DefaultSessionTimeout = 30 * time.Second // Firewall is a simple stateful firewall that allows all outbound // traffic and filters inbound traffic based on recently seen outbound // traffic. Its HandlePacket method should be attached to a Machine to // give it a stateful firewall. type Firewall struct { // SessionTimeout is the lifetime of idle sessions in the firewall // state. Packets transiting from the TrustedInterface reset the // session lifetime to SessionTimeout. If zero, // DefaultSessionTimeout is used. SessionTimeout time.Duration // Type specifies how precisely return traffic must match // previously seen outbound traffic to be allowed. Defaults to // AddressAndPortDependentFirewall. Type FirewallType // TrustedInterface is an optional interface that is considered // trusted in addition to PacketConns local to the Machine. All // other interfaces can only respond to traffic from // TrustedInterface or the local host. TrustedInterface *Interface // TimeNow is a function returning the current time. If nil, // time.Now is used. TimeNow func() time.Time // TODO: refresh directionality: outbound-only, both mu sync.Mutex seen map[fwKey]time.Time // session -> deadline } func (f *Firewall) timeNow() time.Time { if f.TimeNow != nil { return f.TimeNow() } return time.Now() } func (f *Firewall) init() { if f.seen == nil { f.seen = map[fwKey]time.Time{} } } func (f *Firewall) HandleOut(p *Packet, oif *Interface) *Packet { f.mu.Lock() defer f.mu.Unlock() f.init() k := f.Type.key(p.Src, p.Dst) f.seen[k] = f.timeNow().Add(f.sessionTimeoutLocked()) p.Trace("firewall out ok") return p } func (f *Firewall) HandleIn(p *Packet, iif *Interface) *Packet { f.mu.Lock() defer f.mu.Unlock() f.init() // reverse src and dst because the session table is from the POV // of outbound packets. k := f.Type.key(p.Dst, p.Src) now := f.timeNow() if now.After(f.seen[k]) { p.Trace("firewall drop") return nil } p.Trace("firewall in ok") return p } func (f *Firewall) HandleForward(p *Packet, iif *Interface, oif *Interface) *Packet { if iif == f.TrustedInterface { // Treat just like a locally originated packet return f.HandleOut(p, oif) } if oif != f.TrustedInterface { // Not a possible return packet from our trusted interface, drop. p.Trace("firewall drop, unexpected oif") return nil } // Otherwise, a session must exist, same as HandleIn. return f.HandleIn(p, iif) } func (f *Firewall) sessionTimeoutLocked() time.Duration { if f.SessionTimeout == 0 { return DefaultSessionTimeout } return f.SessionTimeout }
tstest/natlab/firewall.go
0.564339
0.455744
firewall.go
starcoder
package quickunion import ( "fmt" "github.com/ivanlemeshev/algorithms-go/unionfind" ) // QuickUnion is an implementation of union–find data type. This implementation // uses quick union. The constructor takes O(n) time, where n is the number of // sites. The union and find operations take O(n) time in the worst case. The // count operation takes O(1) time. type QuickUnion struct { parent []int count int } // New initializes an empty union-find data structure with n elements from 0 // through n-1. Initially, each elements is in its own set. func New(n int) unionfind.UnionFind { parent := make([]int, n) for i := 0; i < n; i++ { parent[i] = i } return &QuickUnion{ parent: parent, count: n, } } // Union merges the set containing element p with the the set containing element q. func (qu *QuickUnion) Union(p, q int) error { if err := qu.validate(p); err != nil { return err } if err := qu.validate(q); err != nil { return err } rootP := qu.find(p) rootQ := qu.find(q) // p and q are already in the same component if rootP == rootQ { return nil } qu.parent[rootP] = rootQ qu.count-- return nil } // Find returns the canonical element of the set containing element p. func (qu *QuickUnion) Find(p int) (int, error) { if err := qu.validate(p); err != nil { return 0, err } return qu.find(p), nil } // IsConnected returns true if the two elements are in the same set. func (qu *QuickUnion) IsConnected(p, q int) (bool, error) { if err := qu.validate(p); err != nil { return false, err } if err := qu.validate(q); err != nil { return false, err } rootP := qu.find(p) rootQ := qu.find(q) return rootP == rootQ, nil } // Count returns the number of sets. func (qu *QuickUnion) Count() int { return qu.count } // validate that p is a valid index. func (qu *QuickUnion) validate(p int) error { n := len(qu.parent) if p < 0 || p >= n { return fmt.Errorf("index %d is not between 0 and %d", p, n-1) } return nil } // find returns the canonical element of the set containing element p. func (qu *QuickUnion) find(p int) int { for p != qu.parent[p] { p = qu.parent[p] } return p }
unionfind/quickunion/quickunion.go
0.804214
0.462473
quickunion.go
starcoder
package nakamura import ( "errors" "reflect" "strconv" "strings" "time" ) type Months struct { January, February, March, April, May, June, July, August, September, October, November, December int } func IsDateValid(input, format string) bool { if len(input) == 0 { return false } date, dateFormat := getDateType(input, format) dateIntegerCount := 0 if len(dateFormat) != 3 || len(date) != 3 { return false } for _, chunk := range date { if _, err := strconv.Atoi(chunk); err == nil { dateIntegerCount++ } } if dateIntegerCount != 3 { return false } if !dateMatchesFormat(date, dateFormat) { return false } return true } //the split version of both func dateMatchesFormat(input, format []string) bool { var globalMonth int var globalYear int for i := 0; i < len(input); i++ { if len(input[i]) != len(format[i]) { return false } if month, err := strconv.Atoi(input[i]); format[i] == "MM" && err == nil { globalMonth = month if month > 12 || month < 1 { return false } } if year, err := strconv.Atoi(input[i]); format[i] == "YYYY" && err == nil { globalYear = year } if day, err := strconv.Atoi(input[i]); format[i] == "DD" && err == nil { return isDayValidInMonth(globalYear, globalMonth, day) } //check if the day of a month is valid } return true } func isDayValidInMonth(year, month, day int) bool { validityCheck := Months{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} validityCount := Months{} if isLeapYear(year) { validityCount = Months{31, 29, 31, 30, 31, 30, 31, 30, 31, 31, 30, 31} } else { validityCount = Months{31, 28, 31, 30, 31, 30, 31, 30, 31, 31, 30, 31} } monthVal := reflect.ValueOf(validityCheck) daysVal := reflect.ValueOf(validityCount) //Get Month check if day passed in is valid in the given month for i := 0; i < monthVal.NumField(); i++ { if month == monthVal.Field(i).Interface().(int) { maxDaysInMonth := daysVal.Field(i).Interface().(int) if day <= maxDaysInMonth && day > 0 { return true } else { return false } } } return false } func Normalise(input, format string) string { date, dateFormat := getDateType(input, format) year, month, day := returnYearMonthDay(date, dateFormat) return strings.Split(time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.Local).String(), " ")[0] } func returnYearMonthDay(input, format []string) (year, month, day int) { var myDay int var myMonth int var myYear int for i := 0; i < len(input); i++ { if year, err := strconv.Atoi(input[i]); format[i] == "YYYY" && err == nil { myYear = year } if month, err := strconv.Atoi(input[i]); format[i] == "MM" && err == nil { myMonth = month } if day, err := strconv.Atoi(input[i]); format[i] == "DD" && err == nil { myDay = day } } return myYear, myMonth, myDay } func Humanise(input, format string) string { date, dateFormat := getDateType(input, format) year, month, day := returnYearMonthDay(date, dateFormat) return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.Local).Weekday().String() + "," + strconv.Itoa(day) + " " + getMonth(month) + " " + strconv.Itoa(year) } func getDateType(input, format string) (date, dateFormat []string) { //check if date is type hyphen or slash formatHyphen := strings.Split(format, "-") hyph := false if len(formatHyphen) == 3 { hyph = true } if hyph { date := strings.Split(input, "-") return date, formatHyphen } else { date := strings.Split(input, "/") formatSlash := strings.Split(format, "/") return date, formatSlash } } func getMonth(month int) string { validityCheck := Months{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} monthVal := reflect.ValueOf(validityCheck) for i := 0; i < monthVal.NumField(); i++ { if month == monthVal.Field(i).Interface().(int) { return monthVal.Type().Field(i).Name } } return "" } func Today() string { return strings.Split(time.Now().String(), " ")[0] } func IsWeekend(input, format string) bool { date, dateFormat := getDateType(input, format) year, month, day := returnYearMonthDay(date, dateFormat) if weekday := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.Local).Weekday().String(); weekday == "Sunday" || weekday == "Saturday" { return true } return false } func IsLeapYear(input, format string) bool { date, dateFormat := getDateType(input, format) year, _, _ := returnYearMonthDay(date, dateFormat) return isLeapYear(year) } func isLeapYear(year int) bool { return ((year%4 == 0) && (year%100 != 0)) || (year%400 == 0) } func GreaterThan(firstDate, secondDate Nakamura, format string) bool { date1, dateFormat1 := getDateType(firstDate.date, format) year1, month1, day1 := returnYearMonthDay(date1, dateFormat1) date2, dateFormat2 := getDateType(secondDate.date, format) year2, month2, day2 := returnYearMonthDay(date2, dateFormat2) return time.Date(year1, time.Month(month1), day1, 0, 0, 0, 0, time.Local).After(time.Date(year2, time.Month(month2), day2, 0, 0, 0, 0, time.Local)) } func LessThan(firstDate, secondDate Nakamura, format string) bool { return !GreaterThan(firstDate, secondDate, format) && !Equal(firstDate, secondDate, format) } func Add(input Nakamura, value int, format string) Nakamura { date, dateFormat := getDateType(input.date, "YYYY-MM-DD") year, month, day := returnYearMonthDay(date, dateFormat) inputDate := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.Local) switch format { case "YYYY": return Nakamura{strings.Split(inputDate.AddDate(value, 0, 0).String(), " ")[0], input.format} case "MM": return Nakamura{strings.Split(inputDate.AddDate(0, value, 0).String(), " ")[0], input.format} case "DD": return Nakamura{strings.Split(inputDate.AddDate(0, 0, value).String(), " ")[0], input.format} } return Nakamura{} } func Equal(firstDate, secondDate Nakamura, format string) bool { date1, dateFormat1 := getDateType(firstDate.date, format) year1, month1, day1 := returnYearMonthDay(date1, dateFormat1) date2, dateFormat2 := getDateType(secondDate.date, format) year2, month2, day2 := returnYearMonthDay(date2, dateFormat2) return time.Date(year1, time.Month(month1), day1, 0, 0, 0, 0, time.Local).Equal(time.Date(year2, time.Month(month2), day2, 0, 0, 0, 0, time.Local)) } func Weekday(input, format string) string { date, dateFormat := getDateType(input, format) year, month, day := returnYearMonthDay(date, dateFormat) return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.Local).Weekday().String() } func Month(input, format string) string { date, dateFormat := getDateType(input, format) _, month, _ := returnYearMonthDay(date, dateFormat) return getMonth(month) } // DaysInMonth returns the number of days in a month // For example, for January 2017: daysInMonth(2017, 1) ==> 31 func DaysInMonth(input, format string) (int, error) { date, dateFormat := getDateType(input, format) year, month, _ := returnYearMonthDay(date, dateFormat) return getDaysInMonth(year, month) } func getDaysInMonth(year, month int) (int, error) { if month > 0 && month <= 12 { if month == 2 { if isLeapYear(year) { return 29, nil } return 28, nil } // Force-use a zero index to accommodate month = month - 1 // Months 1 - 7 (Odd -> 31 | Even -> 30) // Months 8 - 12 (Odd -> 30 | Even -> 31) return 31 - (month % 7 % 2), nil } return 0, errors.New("Invalid month") } // Max returns the latest of the dates passed as arguments func GetMax(dates ...Nakamura) Nakamura { if len(dates) > 0 { maxDate := dates[0] for i := 1; i < len(dates); i++ { if GreaterThan(dates[i], maxDate, maxDate.format) { maxDate = dates[i] } } return maxDate } return Nakamura{Today(), "YYYY-MM-DD"} } // Min returns the earliest of the dates passed as arguments func GetMin(dates ...Nakamura) Nakamura { if len(dates) > 0 { minDate := dates[0] for i := 1; i < len(dates); i++ { if LessThan(dates[i], minDate, minDate.format) { minDate = dates[i] } } return minDate } return Nakamura{Today(), "YYYY-MM-DD"} }
diem.go
0.549641
0.433742
diem.go
starcoder
package sqlow // DataType is SQL DataType type DataType struct { TypeName string Type string UNSIGNED bool UndignedType string ZEROFILL bool MaxLength int DefaultPropaty string AutoIncrement bool PrimaryKey bool } var ( // TINYINT is the one that exists in SQL and can store numbers from -128 to 127. If you add UNSIGNED, you can store up to 255, but you can only use integers. TINYINT = DataType{TypeName: "TINYINT", Type: "INT8", UNSIGNED: true, ZEROFILL: true, UndignedType: "UNIT8", AutoIncrement: true, PrimaryKey: true} // SMALLINT is the one that exists in SQL and can store numbers from -32768 to 32767. With UNSIGNED, you can store up to 65535, but you can only use integers. SMALLINT = DataType{TypeName: "SMALLINT", Type: "INT16", UNSIGNED: true, ZEROFILL: true, UndignedType: "UNIT16", AutoIncrement: true, PrimaryKey: true} // MEDIUMINT is the one that exists in SQL and can store numbers from -8388608 to 8388607. With UNSIGNED, you can store up to 16777215, but you can only use integers. MEDIUMINT = DataType{TypeName: "MEDIUMINT", Type: "INT32", UNSIGNED: true, ZEROFILL: true, UndignedType: "UNIT32", AutoIncrement: true, PrimaryKey: true} // INT is the one that exists in SQL and can store numbers from -2147483648 to 2147483647. With UNSIGNED, you can store up to 4294967295, but you can only use integers. INT = DataType{TypeName: "INT", Type: "INT32", UNSIGNED: true, ZEROFILL: true, UndignedType: "UNIT32", AutoIncrement: true, PrimaryKey: true} // INTEGER is the one that exists in SQL and can store numbers from -2147483648 to 2147483647. With UNSIGNED, you can store up to 4294967295, but you can only use integers. INTEGER = DataType{TypeName: "INTEGER", Type: "INT32", UNSIGNED: true, ZEROFILL: true, UndignedType: "UNIT32", AutoIncrement: true, PrimaryKey: true} // BIGINT is the one that exists in SQL and can store numbers from -9223372036854775808 to 9223372036854775807. With UNSIGNED, you can store up to 18446744073709551615, but you can only use integers. BIGINT = DataType{TypeName: "BIGINT", Type: "INT64", UNSIGNED: true, ZEROFILL: true, UndignedType: "UNIT64", AutoIncrement: true, PrimaryKey: true} // BOOLEAN is a type in SQL that can store True or False. BOOLEAN = DataType{TypeName: "BOOLEAN", Type: "BOOL", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: true} // BOOL is a type in SQL that can store True or False. BOOL = DataType{TypeName: "BIGINT", Type: "BOOL", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: false} // BIT is a SQL type that can store bit values. BIT = DataType{TypeName: "UNIT", Type: "UNIT", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: true} // FLOAT is a type that exists in SQL and can store accurate decimals up to the 7th decimal place. FLOAT = DataType{TypeName: "FLOAT", Type: "FLOAT32", UNSIGNED: true, ZEROFILL: true, AutoIncrement: true, PrimaryKey: true} // DOUBLE is a type that exists in SQL and can store accurate decimals up to the 7th decimal place. DOUBLE = DataType{TypeName: "DOUBLE", Type: "FLOAT64", UNSIGNED: true, ZEROFILL: true, AutoIncrement: true, PrimaryKey: true} // DATE is a type that exists in SQL. You can save the year, month, and day. DATE = DataType{TypeName: "DATE", Type: "DATE", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: true} // DATETIME is a type that exists in SQL. You can save the year, month, day, hour, minute, and second. DATETIME = DataType{TypeName: "DATETIME", Type: "DATE", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: true} // TIMESTAMP is a type that exists in SQL. You can save the year, month, day, hour, minute, and second. Also, if no value is explicitly assigned, the date and time will be set automatically when the value is changed. TIMESTAMP = DataType{TypeName: "TIMESTAMP", Type: "DATE", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: true} // TIME is a type that exists in SQL. You can save hours, minutes, and seconds. TIME = DataType{TypeName: "TIME", Type: "DATE", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: true} // VARCHAR is a SQL type. You can store the specified character string. VARCHAR = DataType{TypeName: "VARCHAR", Type: "STRING", UNSIGNED: false, ZEROFILL: false, MaxLength: 65535, DefaultPropaty: "255", AutoIncrement: false, PrimaryKey: false} // TEXT is a SQL type. You can store the specified character string. TEXT = DataType{TypeName: "TEXT", Type: "STRING", UNSIGNED: false, ZEROFILL: false, MaxLength: 14090025, DefaultPropaty: "255", AutoIncrement: false, PrimaryKey: false} // MIDIUMTEXT is a SQL type. You can store the specified character string. MIDIUMTEXT = DataType{TypeName: "MIDIUMTEXT", Type: "STRING", UNSIGNED: false, ZEROFILL: false, MaxLength: 3741318945, DefaultPropaty: "255", AutoIncrement: false, PrimaryKey: false} // LONGTEXT is a SQL type. You can store the specified character string. LONGTEXT = DataType{TypeName: "LONGTEXT", Type: "STRING", UNSIGNED: false, ZEROFILL: false, MaxLength: 4294967295, DefaultPropaty: "255", AutoIncrement: false, PrimaryKey: false} // ENUM is a type of SQL. Can store one of the specified string lists. ENUM = DataType{TypeName: "TEXT", Type: "LIST", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: false} // SET is a type of SQL. Can store multiple in the specified string list. SET = DataType{TypeName: "SET", Type: "LIST", UNSIGNED: false, ZEROFILL: false, AutoIncrement: false, PrimaryKey: false} )
datatypes.go
0.583559
0.441854
datatypes.go
starcoder
package math const ( E = 2.71828182845904523536028747135266249775724709369995957496696763 // A001113 Pi = 3.14159265358979323846264338327950288419716939937510582097494459 // A000796 Phi = 1.61803398874989484820458683436563811772030917980576286213544862 // A001622 Sqrt2 = 1.41421356237309504880168872420969807856967187537694807317667974 // A002193 SqrtE = 1.64872127070012814684865078781416357165377610071014801157507931 // A019774 SqrtPi = 1.77245385090551602729816748334114518279754945612238712821380779 // A002161 SqrtPhi = 1.27201964951406896425242246173749149171560804184009624861664038 // A139339 Ln2 = 0.693147180559945309417232121458176568075500134360255254120680009 // A002162 Log2E = 1 / Ln2 Ln10 = 2.30258509299404568401799145468436420760110148862877297603332790 // A002392 Log10E = 1 / Ln10 ) const ( MaxFloat32 = 3.40282346638528859811704183484516925440e+38 // 2**127 * (2**24 - 1) / 2**23 SmallestNonzeroFloat32 = 1.401298464324817070923729583289916131280e-45 // 1 / 2**(127 - 1 + 23) MaxFloat64 = 1.797693134862315708145274237317043567981e+308 // 2**1023 * (2**53 - 1) / 2**52 SmallestNonzeroFloat64 = 4.940656458412465441765687928682213723651e-324 // 1 / 2**(1023 - 1 + 52) ) const ( MaxInt8 = 1<<7 - 1 MinInt8 = -1 << 7 MaxInt16 = 1<<15 - 1 MinInt16 = -1 << 15 MaxInt32 = 1<<31 - 1 MinInt32 = -1 << 31 MaxInt64 = 1<<63 - 1 MinInt64 = -1 << 63 MaxUint8 = 1<<8 - 1 MaxUint16 = 1<<16 - 1 MaxUint32 = 1<<32 - 1 MaxUint64 = 1<<64 - 1 ) func Abs(x float64) float64 { return 0.0 } func Acos(x float64) float64 { return 0.0 } func Acosh(x float64) float64 { return 0.0 } func Asin(x float64) float64 { return 0.0 } func Asinh(x float64) float64 { return 0.0 } func Atan(x float64) float64 { return 0.0 } func Atan2(y, x float64) float64 { return 0.0 } func Atanh(x float64) float64 { return 0.0 } func Cbrt(x float64) float64 { return 0.0 } func Ceil(x float64) float64 { return 0.0 } func Copysign(x, y float64) float64 { return 0.0 } func Cos(x float64) float64 { return 0.0 } func Cosh(x float64) float64 { return 0.0 } func Dim(x, y float64) float64 { return 0.0 } func Erf(x float64) float64 { return 0.0 } func Erfc(x float64) float64 { return 0.0 } func Exp(x float64) float64 { return 0.0 } func Exp2(x float64) float64 { return 0.0 } func Expm1(x float64) float64 { return 0.0 } func Float32bits(f float32) uint32 { return 0.0 } func Float32frombits(b uint32) float32 { return 0.0 } func Float64bits(f float64) uint64 { return 0.0 } func Float64frombits(b uint64) float64 { return 0.0 } func Floor(x float64) float64 { return 0.0 } func Frexp(f float64) (frac float64, exp int) { return } func Gamma(x float64) float64 { return 0.0 } func Hypot(p, q float64) float64 { return 0.0 } func Ilogb(x float64) int { return 0.0 } func Inf(sign int) float64 { return 0.0 } func IsInf(f float64, sign int) bool { return true } func IsNaN(f float64) (is bool) { return } func J0(x float64) float64 { return 0.0 } func J1(x float64) float64 { return 0.0 } func Jn(n int, x float64) float64 { return 0.0 } func Ldexp(frac float64, exp int) float64 { return 0.0 } func Lgamma(x float64) (lgamma float64, sign int) { return } func Log(x float64) float64 { return 0.0 } func Log10(x float64) float64 { return 0.0 } func Log1p(x float64) float64 { return 0.0 } func Log2(x float64) float64 { return 0.0 } func Logb(x float64) float64 { return 0.0 } func Max(x, y float64) float64 { return 0.0 } func Min(x, y float64) float64 { return 0.0 } func Mod(x, y float64) float64 { return 0.0 } func Modf(f float64) (int float64, frac float64) { return } func NaN() float64 { return 0.0 } func Nextafter(x, y float64) (r float64) { return 0.0 } func Nextafter32(x, y float32) (r float32) { return 0.0 } func Pow(x, y float64) float64 { return 0.0 } func Pow10(e int) float64 { return 0.0 } func Remainder(x, y float64) float64 { return 0.0 } func Signbit(x float64) bool { return true } func Sin(x float64) float64 { return 0.0 } func Sincos(x float64) (sin, cos float64) { return } func Sinh(x float64) float64 { return 0.0 } func Sqrt(x float64) float64 { return 0.0 } func Tan(x float64) float64 { return 0.0 } func Tanh(x float64) float64 { return 0.0 } func Trunc(x float64) float64 { return 0.0 } func Y0(x float64) float64 { return 0.0 } func Y1(x float64) float64 { return 0.0 } func Yn(n int, x float64) float64 { return 0.0 }
go/src/math/math.go
0.602997
0.502258
math.go
starcoder
package compare // Int compares two int values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func Int(value1 int, value2 int) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // Int8 compares two int8 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func Int8(value1 int8, value2 int8) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // Int16 compares two int16 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func Int16(value1 int16, value2 int16) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // Int32 compares two int32 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func Int32(value1 int32, value2 int32) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // Int64 compares two int64 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func Int64(value1 int64, value2 int64) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // UInt compares two uint values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func UInt(value1 uint, value2 uint) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // UInt8 compares two uint8 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func UInt8(value1 uint8, value2 uint8) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // UInt16 compares two uint16 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func UInt16(value1 uint16, value2 uint16) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // UInt32 compares two uint32 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func UInt32(value1 uint32, value2 uint32) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } } // UInt64 compares two uint64 values. // It returns -1 if value1 is LESS THAN value2. // It returns 0 if value1 is EQUAL TO value2. // It returns 1 if value1 is GREATER THAN value2. func UInt64(value1 uint64, value2 uint64) int { switch { case value1 > value2: return 1 case value1 == value2: return 0 default: return -1 } }
int.go
0.776962
0.496643
int.go
starcoder
package tuplecodec import "bytes" // Less decides the key is less than another key func (tk TupleKey) Less(another TupleKey) bool { return bytes.Compare(tk,another) < 0 } // Compare compares the key with another key func (tk TupleKey) Compare(another TupleKey) int { return bytes.Compare(tk,another) } // Equal decides the key is equal to another key func (tk TupleKey) Equal(another TupleKey) bool { return bytes.Equal(tk,another) } // IsPredecessor decides the is the predecessor of the another key func (tk TupleKey) IsPredecessor(another TupleKey) bool { //1. the length of the key + 1 == the length of the another key. //2. the last byte of the another key is zero. //3. other bytes are equal. return len(tk) + 1 == len(another) && another[len(another) - 1] == 0 && tk.Equal(another[:len(another) - 1]) } // SuccessorOfKey gets the successor of the key. // Carefully, key can not be changed. func SuccessorOfKey(key TupleKey) TupleKey { l := len(key) + 1 if cap(key) > len(key) { ret := key[:l] if ret[l - 1] == 0 { return ret } } ret := make([]byte, l) copy(ret,key) ret[l - 1] = 0 return ret } // SuccessorOfPrefix gets the successor of the prefix func SuccessorOfPrefix(prefix TupleKey) TupleKey { if len(prefix) == 0 { return TupleKey{0xFF} } ret := make([]byte,len(prefix)) copy(ret,prefix) for i := len(ret) - 1; i >= 0 ; i-- { ret[i] = ret[i] + 1 if ret[i] != 0 { return ret[:i + 1] } } return prefix } func (r Range) IsValid() bool { if len(r.startKey) == 0 && len(r.endKey) == 0 { return false } //endKey can be empty if len(r.endKey) == 0{ return true } //startKey < endKey if bytes.Compare(r.startKey,r.endKey) >= 0 { return false } return true } // Contain checks the key in the range func (r Range) Contain(key TupleKey) bool { //startKey <= key < endKey return bytes.Compare(key,r.startKey) >= 0 && bytes.Compare(key,r.endKey) < 0 } // Equal checks the range is equal to the another range. func (r Range) Equal(another Range) bool { return bytes.Compare(r.startKey,another.startKey) == 0 && bytes.Compare(r.endKey,another.endKey) == 0 } // Merge merges two ranges func (r Range) Merge(another Range) Range { if !(r.IsValid() && another.IsValid()) { return Range{} } start := r.startKey end := r.startKey //choose the end of the range R as the final endKey. if len(r.endKey) > 0 { end = r.endKey } if another.startKey.Less(start) { start = another.startKey } if end.Less(another.startKey) { end = another.startKey } if len(another.endKey) > 0 && end.Less(another.endKey) { end = another.endKey } if start.Equal(end) { return Range{startKey: start} }else if r.startKey.Equal(end) || another.startKey.Equal(end) { return Range{startKey: start, endKey: SuccessorOfKey(end)} } return Range{startKey: start,endKey: end} } // Overlap checks the range overlaps or not. func (r Range) Overlap(another Range) bool { if !(r.IsValid() && another.IsValid()) { return false } if len(r.endKey) == 0 && len(another.endKey) == 0 { return bytes.Compare(r.startKey,another.startKey) == 0 }else if len(r.endKey) == 0 { return bytes.Compare(r.startKey,another.startKey) >= 0 && bytes.Compare(r.startKey,another.endKey) < 0 }else if len(another.endKey) == 0 { return bytes.Compare(another.startKey,r.startKey) >= 0 && bytes.Compare(another.startKey,r.endKey) < 0 } return bytes.Compare(r.endKey,another.startKey) > 0 && bytes.Compare(r.startKey,another.endKey) < 0 } // Intersect gets the intersected range. func (r Range) Intersect(another Range) Range { if !r.Overlap(another) { return Range{} } if len(r.endKey) == 0 { return r } if len(another.endKey) == 0 { return another } start := r.startKey if start.Less(another.startKey) { start = another.startKey } end := r.endKey if another.endKey.Less(end) { end = another.endKey } return Range{start,end} } // Contain check the range contains the another range. func (r Range) ContainRange(another Range) bool { if !r.Overlap(another) { return false } if len(r.endKey) == 0 && len(another.endKey) == 0 { return r.startKey.Equal(another.startKey) }else if len(r.endKey) == 0 { return false }else if len(another.endKey) == 0 { return bytes.Compare(another.startKey,r.startKey) >= 0 && bytes.Compare(another.startKey,r.endKey) < 0 } return bytes.Compare(r.startKey,another.startKey) <= 0 && bytes.Compare(r.endKey,another.endKey) >= 0 }
pkg/vm/engine/tpe/tuplecodec/key.go
0.739046
0.522994
key.go
starcoder
package ease import ( "math" ) const backS float32 = 1.70158 var pi = float32(math.Pi) // TweenFunc provides an interface used for the easing equation. You can use // one of the provided easing functions or provide your own. type TweenFunc func(t, b, c, d float32) float32 func Linear(t, b, c, d float32) float32 { return c*t/d + b } func InQuad(t, b, c, d float32) float32 { return c*pow(t/d, 2) + b } func OutQuad(t, b, c, d float32) float32 { t = t / d return -c*t*(t-2) + b } func InOutQuad(t, b, c, d float32) float32 { t = t / d * 2 if t < 1 { return c/2*pow(t, 2) + b } return -c/2*((t-1)*(t-3)-1) + b } func OutInQuad(t, b, c, d float32) float32 { if t < d/2 { return OutQuad(t*2, b, c/2, d) } return InQuad((t*2)-d, b+c/2, c/2, d) } func InCubic(t, b, c, d float32) float32 { return c*pow(t/d, 3) + b } func OutCubic(t, b, c, d float32) float32 { return c*(pow(t/d-1, 3)+1) + b } func InOutCubic(t, b, c, d float32) float32 { t = t / d * 2 if t < 1 { return c/2*t*t*t + b } t = t - 2 return c/2*(t*t*t+2) + b } func OutInCubic(t, b, c, d float32) float32 { if t < d/2 { return OutCubic(t*2, b, c/2, d) } return InCubic((t*2)-d, b+c/2, c/2, d) } func InQuart(t, b, c, d float32) float32 { return c*pow(t/d, 4) + b } func OutQuart(t, b, c, d float32) float32 { return -c*(pow(t/d-1, 4)-1) + b } func InOutQuart(t, b, c, d float32) float32 { t = t / d * 2 if t < 1 { return c/2*pow(t, 4) + b } return -c/2*(pow(t-2, 4)-2) + b } func OutInQuart(t, b, c, d float32) float32 { if t < d/2 { return OutQuart(t*2, b, c/2, d) } return InQuart((t*2)-d, b+c/2, c/2, d) } func InQuint(t, b, c, d float32) float32 { return c*pow(t/d, 5) + b } func OutQuint(t, b, c, d float32) float32 { return c*(pow(t/d-1, 5)+1) + b } func InOutQuint(t, b, c, d float32) float32 { t = t / d * 2 if t < 1 { return c/2*pow(t, 5) + b } return c/2*(pow(t-2, 5)+2) + b } func OutInQuint(t, b, c, d float32) float32 { if t < d/2 { return OutQuint(t*2, b, c/2, d) } return InQuint((t*2)-d, b+c/2, c/2, d) } func InSine(t, b, c, d float32) float32 { return -c*cos(t/d*(pi/2)) + c + b } func OutSine(t, b, c, d float32) float32 { return c*sin(t/d*(pi/2)) + b } func InOutSine(t, b, c, d float32) float32 { return -c/2*(cos(pi*t/d)-1) + b } func OutInSine(t, b, c, d float32) float32 { if t < d/2 { return OutSine(t*2, b, c/2, d) } return InSine((t*2)-d, b+c/2, c/2, d) } func InExpo(t, b, c, d float32) float32 { if t == 0 { return b } return c*pow(2, 10*(t/d-1)) + b - c*0.001 } func OutExpo(t, b, c, d float32) float32 { if t == d { return b + c } return c*1.001*(-pow(2, -10*t/d)+1) + b } func InOutExpo(t, b, c, d float32) float32 { if t == 0 { return b } if t == d { return b + c } t = t / d * 2 if t < 1 { return c/2*pow(2, 10*(t-1)) + b - c*0.0005 } return c/2*1.0005*(-pow(2, -10*(t-1))+2) + b } func OutInExpo(t, b, c, d float32) float32 { if t < d/2 { return OutExpo(t*2, b, c/2, d) } return InExpo((t*2)-d, b+c/2, c/2, d) } func InCirc(t, b, c, d float32) float32 { return (-c*(sqrt(1-pow(t/d, 2))-1) + b) } func OutCirc(t, b, c, d float32) float32 { return (c*sqrt(1-pow(t/d-1, 2)) + b) } func InOutCirc(t, b, c, d float32) float32 { t = t / d * 2 if t < 1 { return -c/2*(sqrt(1-t*t)-1) + b } t = t - 2 return c/2*(sqrt(1-t*t)+1) + b } func OutInCirc(t, b, c, d float32) float32 { if t < d/2 { return OutCirc(t*2, b, c/2, d) } return InCirc((t*2)-d, b+c/2, c/2, d) } func InElastic(t, b, c, d float32) float32 { if t == 0 { return b } t = t / d if t == 1 { return b + c } p, a, s := calculatePAS(c, d) t = t - 1 return -(a * pow(2, 10*t) * sin((t*d-s)*(2*pi)/p)) + b } func OutElastic(t, b, c, d float32) float32 { if t == 0 { return b } t = t / d if t == 1 { return b + c } p, a, s := calculatePAS(c, d) return a*pow(2, -10*t)*sin((t*d-s)*(2*pi)/p) + c + b } func InOutElastic(t, b, c, d float32) float32 { if t == 0 { return b } t = t / d * 2 if t == 2 { return b + c } p, a, s := calculatePAS(c, d) t = t - 1 if t < 0 { return -0.5*(a*pow(2, 10*t)*sin((t*d-s)*(2*pi)/p)) + b } return a*pow(2, -10*t)*sin((t*d-s)*(2*pi)/p)*0.5 + c + b } func OutInElastic(t, b, c, d float32) float32 { if t < d/2 { return OutElastic(t*2, b, c/2, d) } return InElastic((t*2)-d, b+c/2, c/2, d) } func InBack(t, b, c, d float32) float32 { t = t / d return c*t*t*((backS+1)*t-backS) + b } func OutBack(t, b, c, d float32) float32 { t = t/d - 1 return c*(t*t*((backS+1)*t+backS)+1) + b } func InOutBack(t, b, c, d float32) float32 { s := backS * 1.525 t = t / d * 2 if t < 1 { return c/2*(t*t*((s+1)*t-s)) + b } t = t - 2 return c/2*(t*t*((s+1)*t+s)+2) + b } func OutInBack(t, b, c, d float32) float32 { if t < (d / 2) { return OutBack(t*2, b, c/2, d) } return InBack((t*2)-d, b+c/2, c/2, d) } func OutBounce(t, b, c, d float32) float32 { t = t / d if t < 1/2.75 { return c*(7.5625*t*t) + b } if t < 2/2.75 { t = t - (1.5 / 2.75) return c*(7.5625*t*t+0.75) + b } else if t < 2.5/2.75 { t = t - (2.25 / 2.75) return c*(7.5625*t*t+0.9375) + b } t = t - (2.625 / 2.75) return c*(7.5625*t*t+0.984375) + b } func InBounce(t, b, c, d float32) float32 { return c - OutBounce(d-t, 0, c, d) + b } func InOutBounce(t, b, c, d float32) float32 { if t < d/2 { return InBounce(t*2, 0, c, d)*0.5 + b } return OutBounce(t*2-d, 0, c, d)*0.5 + c*.5 + b } func OutInBounce(t, b, c, d float32) float32 { if t < d/2 { return OutBounce(t*2, b, c/2, d) } return InBounce((t*2)-d, b+c/2, c/2, d) } func calculatePAS(c, d float32) (p, a, s float32) { p = d * 0.3 return p, c, p / 4 } func pow(x, y float32) float32 { return float32(math.Pow(float64(x), float64(y))) } func cos(x float32) float32 { return float32(math.Cos(float64(x))) } func sin(x float32) float32 { return float32(math.Sin(float64(x))) } func sqrt(x float32) float32 { return float32(math.Sqrt(float64(x))) }
ease/easing_functions.go
0.818809
0.628863
easing_functions.go
starcoder
package dsp import ( "math" "math/rand" ) // RandRange returns random values between a specified range func RandRange(min, max float64) float64 { return rand.Float64()*(max-min) + min } // ExpRatio produces an (inverse-)exponential curve that's inflection can be controlled by a specific ratio func ExpRatio(ratio, speed float64) float64 { return math.Exp(-math.Log((1+ratio)/ratio) / speed) } // SoftClamp limits a value to a specific range, but compresses the value as it goes beyond the threshold func SoftClamp(s, threshold float64) float64 { abs := math.Abs(s) if abs <= 0.5 { return s } return (abs - 0.25*(1-threshold)) / s } // Overload is a sigmoid function that simulates soft clip overloading func Overload(x float64) float64 { return math.Copysign(1, x) * (1 - math.Exp(-math.Abs(x))) } // Clamp limits a value to a specific range func Clamp(s, min, max float64) float64 { if s > max { return max } else if s < min { return min } return s } // Mix sums two panned inputs func Mix(mix, a, b float64) float64 { aOut, bOut := PanMix(mix, a, b) return aOut + bOut } // PanMix pans two inputs between two outputs func PanMix(pan, a, b float64) (float64, float64) { pan = Clamp(pan, -1, 1) if pan > 0 { return (1 - pan) * a, b } else if pan < 0 { return a, (1 + pan) * b } return a, b } // IsPowerOfTwo determines whether or not an integer is a power of two func IsPowerOfTwo(x int) bool { return (x & (x - 1)) == 0 } // Fold reflects a value exceeding minimum/maximum thresholds back over those thresholds func Fold(s, min, max float64) float64 { bottomdiff := s - min if s >= max { s = max + max - s if s >= min { return s } } else if s < min { s = min + min - s if s < max { return s } } else { return s } if max == min { return min } diff := max - min diff2 := diff + diff s = bottomdiff - diff2*math.Floor(bottomdiff/diff2) if s >= diff { s += -diff2 } return s + min } // Chebyshev generates the Chebyshev polynomial coefficient for order n func Chebyshev(n int, x float64) float64 { switch n { case 0: return 1 case 1: return x case 2: return (2.0 * x * x) - 1.0 } var ( y1 = (2.0 * x * x) - 1.0 y2 = x y = y1 ) for i := 3; i <= n; i++ { y = (2.0 * x * y1) - y2 y2, y1 = y1, y } return y }
dsp/math.go
0.842669
0.542742
math.go
starcoder
package travel import ( "strconv" "time" ) func (t *Travel) AddCentury() *Travel { t.t = t.t.AddDate(100, 0, 0) return t } func (t *Travel) AddCenturies(centuries int) *Travel { t.t = t.t.AddDate(centuries*100, 0, 0) return t } func (t *Travel) SubCentury() *Travel { t.t = t.t.AddDate(-100, 0, 0) return t } func (t *Travel) SubCenturies(centuries int) *Travel { t.t = t.t.AddDate(-centuries*100, 0, 0) return t } func (t *Travel) AddYear() *Travel { t.t = t.t.AddDate(1, 0, 0) return t } func (t *Travel) AddYears(years int) *Travel { t.t = t.t.AddDate(years, 0, 0) return t } func (t *Travel) SubYear() *Travel { t.t = t.t.AddDate(-1, 0, 0) return t } func (t *Travel) SubYears(years int) *Travel { t.t = t.t.AddDate(-years, 0, 0) return t } func (t *Travel) AddMonth() *Travel { t.t = t.t.AddDate(0, 1, 0) return t } func (t *Travel) AddMonths(months int) *Travel { t.t = t.t.AddDate(0, months, 0) return t } func (t *Travel) SubMonth() *Travel { t.t = t.t.AddDate(0, -1, 0) return t } func (t *Travel) SubMonths(months int) *Travel { t.t = t.t.AddDate(0, -months, 0) return t } func (t *Travel) AddDays(days int) *Travel { t.t = t.t.AddDate(0, 0, days) return t } func (t *Travel) AddDay() *Travel { t.t = t.t.AddDate(0, 0, 1) return t } func (t *Travel) SubDays(days int) *Travel { t.t = t.t.AddDate(0, 0, -days) return t } func (t *Travel) SubDay() *Travel { t.t = t.t.AddDate(0, 0, -1) return t } func (t *Travel) AddWeek() *Travel { t.t = t.t.AddDate(0, 0, 7) return t } func (t *Travel) AddWeeks(weeks int) *Travel { t.t = t.t.AddDate(0, 0, weeks*7) return t } func (t *Travel) SubWeek() *Travel { t.t = t.t.AddDate(0, 0, -7) return t } func (t *Travel) SubWeeks(weeks int) *Travel { t.t = t.t.AddDate(0, 0, -weeks*7) return t } func parseDuration(operation string, value int, unit string) time.Duration { duration, _ := time.ParseDuration(operation + strconv.Itoa(value) + unit) return duration } func (t *Travel) AddHour() *Travel { h := parseDuration("-", 1, "h") t.t = t.t.Add(h) return t } func (t *Travel) AddHours(hours int) *Travel { h := parseDuration("+", hours, "h") t.t = t.t.Add(h) return t } func (t *Travel) SubHour() *Travel { h := parseDuration("-", 1, "h") t.t = t.t.Add(h) return t } func (t *Travel) SubHours(hours int) *Travel { h := parseDuration("-", hours, "h") t.t = t.t.Add(h) return t } func (t *Travel) AddMinute() *Travel { m := parseDuration("-", 1, "m") t.t = t.t.Add(m) return t } func (t *Travel) AddMinutes(minutes int) *Travel { m := parseDuration("+", minutes, "m") t.t = t.t.Add(m) return t } func (t *Travel) SubMinute() *Travel { m := parseDuration("-", 1, "m") t.t = t.t.Add(m) return t } func (t *Travel) SubMinutes(minutes int) *Travel { m := parseDuration("-", minutes, "m") t.t = t.t.Add(m) return t } func (t *Travel) AddSecond() *Travel { s := parseDuration("+", 1, "s") t.t = t.t.Add(s) return t } func (t *Travel) AddSeconds(seconds int) *Travel { s := parseDuration("+", seconds, "s") t.t = t.t.Add(s) return t } func (t *Travel) SubSecond() *Travel { s := parseDuration("-", 1, "s") t.t = t.t.Add(s) return t } func (t *Travel) SubSeconds(seconds int) *Travel { s := parseDuration("-", seconds, "s") t.t = t.t.Add(s) return t } func (t *Travel) AddMilliSecond() *Travel { ms := parseDuration("+", 1, "ms") t.t = t.t.Add(ms) return t } func (t *Travel) AddMilliSeconds(milli int) *Travel { ms := parseDuration("+", milli, "ms") t.t = t.t.Add(ms) return t } func (t *Travel) SubMilliSecond() *Travel { ms := parseDuration("-", 1, "ms") t.t = t.t.Add(ms) return t } func (t *Travel) SubMilliSeconds(milli int) *Travel { ms := parseDuration("-", milli, "ms") t.t = t.t.Add(ms) return t } func (t *Travel) AddMicroSecond() *Travel { us := parseDuration("+", 1, "us") t.t = t.t.Add(us) return t } func (t *Travel) AddMicroSeconds(micro int) *Travel { us := parseDuration("+", micro, "us") t.t = t.t.Add(us) return t } func (t *Travel) SubMicroSecond() *Travel { us := parseDuration("-", 1, "us") t.t = t.t.Add(us) return t } func (t *Travel) SubMicroSeconds(micro int) *Travel { us := parseDuration("-", micro, "us") t.t = t.t.Add(us) return t } func (t *Travel) AddNanoSecond() *Travel { ns := parseDuration("+", 1, "ns") t.t = t.t.Add(ns) return t } func (t *Travel) AddNanoSeconds(nano int) *Travel { ns := parseDuration("+", nano, "ns") t.t = t.t.Add(ns) return t } func (t *Travel) SubNanoSecond() *Travel { ns := parseDuration("-", 1, "ns") t.t = t.t.Add(ns) return t } func (t *Travel) SubNanoSeconds(nano int) *Travel { ns := parseDuration("-", nano, "ns") t.t = t.t.Add(ns) return t }
calculation.go
0.692226
0.582907
calculation.go
starcoder
package main import ( "image/jpeg" "image/png" "image" "bufio" "flag" "math" "fmt" "log" "os" ) const ImageChannels = 3 func GetSSIMChannel(inputImage *image.Image, outputImage *image.Image, index int) float64 { imageWidth := (*inputImage).Bounds().Max.X imageHeight := (*inputImage).Bounds().Max.Y iSum := 0.0 oSum := 0.0 for x := 0; x < imageWidth; x++ { for y := 0; y < imageHeight; y++ { i := make([]uint32, 3) o := make([]uint32, 3) i[0], i[1], i[2], _ = (*inputImage).At(x, y).RGBA() o[0], o[1], o[2], _ = (*outputImage).At(x, y).RGBA() iSum += float64(i[index]) oSum += float64(o[index]) } } iMean := iSum / float64(imageWidth * imageHeight) oMean := oSum / float64(imageWidth * imageHeight) iStSum := 0.0 oStSum := 0.0 covSum := 0.0 for x := 0; x < imageWidth; x++ { for y := 0; y < imageHeight; y++ { i := make([]uint32, 3) o := make([]uint32, 3) i[0], i[1], i[2], _ = (*inputImage).At(x, y).RGBA() o[0], o[1], o[2], _ = (*outputImage).At(x, y).RGBA() iStSum += math.Pow(float64(i[index]) - iMean, 2) oStSum += math.Pow(float64(o[index]) - oMean, 2) covSum += (float64(i[index]) - iMean) * (float64(o[index]) - oMean) } } iStDev := math.Sqrt(iStSum / float64(imageWidth * imageHeight)) oStDev := math.Sqrt(oStSum / float64(imageWidth * imageHeight)) cov := covSum / float64(imageWidth * imageHeight) C1 := math.Pow(0.01 * 65535.0, 2) C2 := math.Pow(0.03 * 65535.0, 2) return ((2.0 * iMean * oMean + C1) * (2.0 * cov + C2)) / ((math.Pow(iMean, 2) + math.Pow(oMean, 2) + C1) * (math.Pow(iStDev, 2) + math.Pow(oStDev, 2) + C2)) } func GetSSIM(inputImage *image.Image, outputImage *image.Image) float64 { indexR := GetSSIMChannel(inputImage, outputImage, 0) indexG := GetSSIMChannel(inputImage, outputImage, 1) indexB := GetSSIMChannel(inputImage, outputImage, 2) return (indexR + indexG + indexB) / 3.0; } func GetPSNR(inputImage *image.Image, outputImage *image.Image) float64 { imageWidth := (*inputImage).Bounds().Max.X imageHeight := (*inputImage).Bounds().Max.Y mseSum := 0.0 for x := 0; x < imageWidth; x++ { for y := 0; y < imageHeight; y++ { iR, iG, iB, _ := (*inputImage).At(x, y).RGBA() oR, oG, oB, _ := (*outputImage).At(x, y).RGBA() dtR := math.Pow(float64(oR) - float64(iR), 2) dtG := math.Pow(float64(oG) - float64(iG), 2) dtB := math.Pow(float64(oB) - float64(iB), 2) mseSum += dtR + dtG + dtB } } return 20 * math.Log10(65535) - 10 * math.Log10(mseSum / float64(imageWidth * imageHeight * ImageChannels)); } func LoadImageJPEG(filename string) *image.Image { file, err := os.Open(filename) if err != nil { log.Fatalf("[error] Cannot open %s\n", filename) } reader := bufio.NewReader(file) image, err := jpeg.Decode(reader) if err != nil { log.Fatalf("[error] Cannot decode %s as JPEG file\n", filename) } return &image } func LoadImagePNG(filename string) *image.Image { file, err := os.Open(filename) if err != nil { log.Fatalf("[error] Cannot open %s\n", filename) } reader := bufio.NewReader(file) image, err := png.Decode(reader) if err != nil { log.Fatalf("[error] Cannot decode %s as PNG file\n", filename) } return &image } func ConvertImage(inputImage *image.Image, outputFilename string, quality int) { // Save destination image outputFile, err := os.Create(outputFilename) if err != nil { log.Fatalf("[error] Cannot create output image") } jpeg.Encode(outputFile, *inputImage, &jpeg.Options{ quality }) } func main() { optImageInput := flag.String("i", "none", "Input PNG image filename") optImageOutput := flag.String("o", "none", "Output JPEG image filename") optImageQuality := flag.Int("q", 85, "JPEG quality") flag.Parse() inputImage := LoadImagePNG(*optImageInput) ConvertImage(inputImage, *optImageOutput, *optImageQuality) outputImage := LoadImageJPEG(*optImageOutput) fmt.Printf("PSNR: %.2fdB\n", GetPSNR(inputImage, outputImage)) fmt.Printf("SSIM: %.3f\n", GetSSIM(inputImage, outputImage)) }
scripts/encoder/encoder-intra.go
0.58261
0.496582
encoder-intra.go
starcoder
package fixed import ( "math/bits" ) // 72/56 fixed-point value type Fixed struct { lo, hi uint64 } var fixedRawval1 = rawfixed(1) var fixedOne = rawfixed(oneValue) const unsignMask = uint64(1)<<63 - 1 const signMask = uint64(1) << 63 func sign_(x int64) uint64 { return uint64(x) & signMask } func abs_(x int64) uint64 { xs := x >> 63 return uint64((x ^ xs) - xs) } func rawfixed(x int64) Fixed { r := Fixed{lo: abs_(x), hi: sign_(x)} return r } func (x Fixed) fixed56() int64 { if (x.hi&unsignMask) != 0 || (x.lo&signMask) != 0 { panic(ErrOverflow) } return int64(x.lo) * x.sign() } func fixed(x int64) Fixed { v, s := abs_(x), sign_(x) return Fixed{lo: v << 56, hi: (v >> 8) | s} } func ufixed(x uint64) Fixed { return Fixed{lo: x << 56, hi: x >> 8} } func fixed_(x uint64, sign uint64) Fixed { return Fixed{lo: x, hi: sign} } func (x Fixed) integer() int64 { if x.bitlen() > 56+63 { panic(ErrOverflow) } sign := (int64(x.hi)>>63)*2 + 1 return int64(x.hi<<8|x.lo>>56) * sign } func (x Fixed) fraction() uint64 { return x.lo & uint64(fracMask) } func (x Fixed) round() int64 { if x.bitlen() > 56+63 { panic(ErrOverflow) } sign := (int64(x.hi)>>63)*2 + 1 v := int64(x.hi<<8|x.lo>>56)*sign + int64((x.lo>>55)&1)*sign return v } func (x Fixed) floor() int64 { if x.bitlen() > 56+63 { panic(ErrOverflow) } sign := (int64(x.hi)>>63)*2 + 1 v := int64(x.hi<<8|x.lo>>56) * sign if sign < 0 && x.lo&uint64(fracMask) != 0 { return v - 1 } return v } func (x Fixed) ceil() int64 { if x.bitlen() > 56+63 { panic(ErrOverflow) } sign := (int64(x.hi)>>63)*2 + 1 v := int64(x.hi<<8|x.lo>>56) * sign if sign > 0 && x.lo&uint64(fracMask) != 0 { v += 1 } return v } func (x Fixed) setSignAs(y Fixed) Fixed { x.hi = x.hi&unsignMask | y.sign_() return x } func (x Fixed) sign() int64 { return (int64(x.hi)>>63)*2 + 1 } func (x Fixed) sign_() uint64 { return x.hi & signMask } func (x Fixed) neg() Fixed { x.hi ^= signMask return x } func (x Fixed) abs() Fixed { return Fixed{x.lo, x.hi & unsignMask} } func (x Fixed) bit(i int) bool { switch { case i > 127: return false case i >= 64: return (x.hi>>(i-64))&1 != 0 case i >= 0: return (x.hi>>i)&1 != 0 } return false } func (x Fixed) bitlen() int { if v := x.hi & unsignMask; v != 0 { return bits.Len64(v) + 64 } return bits.Len64(x.lo) } func (x Fixed) shlmax(m int) (Fixed, int) { if n := bits.LeadingZeros64(x.hi) - 1; n < m { m = n } return x.shl_(m), m } func (x Fixed) shl_(n int) Fixed { lo, hi := x.lo, x.hi if n > 64 { hi, lo = lo, 0 n -= 64 } hi, lo = hi<<n|lo>>(64-n), lo<<n return Fixed{lo: lo, hi: hi} } func (x Fixed) shl(n int) Fixed { lo, hi := x.lo, x.hi&unsignMask if n > 64 { if hi != 0 { panic(ErrOverflow) } hi, lo = lo, 0 n -= 64 } if hi>>(64-n)|((hi<<n)&signMask) != 0 { panic(ErrOverflow) } hi, lo = hi<<n|lo>>(64-n), lo<<n return Fixed{lo: lo, hi: hi | (x.hi & signMask)} } func (x Fixed) shr(n int) Fixed { lo, hi := x.lo, x.hi&unsignMask if n > 64 { hi, lo = 0, hi n -= 64 } hi, lo = hi>>n, hi<<(64-n)|lo>>n return Fixed{lo: lo, hi: hi | (x.hi & signMask)} } func (x Fixed) iszero() bool { return x.hi&unsignMask|x.lo == 0 } func (x Fixed) less(y Fixed) bool { return x.hi < y.hi || (x.hi == y.hi && x.lo < y.lo) } func (x Fixed) greater(y Fixed) bool { return x.hi > y.hi || (x.hi == y.hi && x.lo > y.lo) } func (x Fixed) equal(y Fixed) bool { return x.hi == y.hi && x.lo == y.lo }
fixednat.go
0.567218
0.412057
fixednat.go
starcoder
package main import ( "github.com/fogleman/gg" "fmt" "math" ) const width, height int = 1920, 1080 func main() { base := Axes{ xmin: -50, xmax: 50, ymin: -200, ymax: 500, xscale: 5, yscale: 50, GraphicalObject: GraphicalObject{ width: 1920, height: 1080, }, } dc := gg.NewContext(width, height) base.Draw(dc) base.PlotPoint(dc, Coord{1, 1}) dc.Identity() base.LineGraphFunction(dc, 0.1, GraphFunction(func(n float64) (float64) {return 50 * math.Sin(n)})) dc.SavePNG("test.png") } type GraphicalObject struct { width float64 height float64 location Coord } type Axes struct { GraphicalObject xmin float64 xmax float64 ymin float64 ymax float64 xscale float64 yscale float64 } type Coord struct { x float64 y float64 } func (a *Axes) findOrigin() Coord { xrange := a.xmax - a.xmin yrange := a.ymax - a.ymin xcoord := a.width - a.xmax / xrange * a.width ycoord := a.ymax / yrange * a.height return Coord{xcoord, ycoord} } func (a *Axes) Draw(dc *gg.Context) error { orig := a.findOrigin() dc.SetRGB(1, 0.5, 0.5) dc.SetLineWidth(5) dc.DrawLine(orig.x, 0, orig.x, float64(height)) dc.DrawLine(0, orig.y, float64(width), orig.y) for i := a.xscale; i < a.xmax; i = i+ a.xscale { tickWidth := a.xmax / (a.xmax - a.xmin) * float64(width) / (a.xmax / a.xscale) fmt.Println(tickWidth) tickX := orig.x + (i / a.xscale) * tickWidth dc.DrawLine(tickX, orig.y - 20, tickX, orig.y + 20) } for i := -a.xscale; i > a.xmin; i = i - a.xscale { tickWidth := a.xmin / (a.xmax - a.xmin) * float64(width) / (a.xmin / a.xscale) fmt.Println(tickWidth) tickX := orig.x + (i / a.xscale) * tickWidth dc.DrawLine(tickX, orig.y - 20, tickX, orig.y + 20) } for i := -a.yscale; i > -a.ymax; i = i - a.yscale { yRange := a.ymax - a.ymin tickWidth := a.ymax / (yRange) * float64(height) / (a.ymax / a.yscale) fmt.Println(tickWidth) tickY := orig.y + (i / a.yscale) * tickWidth dc.DrawLine(orig.x - 20, tickY, orig.x + 20, tickY) } for i := a.yscale; i < -a.ymin; i = i + a.yscale { yRange := a.ymax - a.ymin tickWidth := a.ymin / (yRange) * float64(height) / (a.ymin / a.yscale) fmt.Println(tickWidth) tickY := orig.y + (i / a.yscale) * tickWidth dc.DrawLine(orig.x - 20, tickY, orig.x + 20, tickY) } dc.Stroke() return nil } func (a *Axes) TranslatePoint(coord Coord) Coord { pointX := coord.x / (a.xmax - a.xmin) * float64(width) pointY := -coord.y / (a.ymax - a.ymin) * float64(height) return Coord{pointX, pointY} } func (a *Axes) LineGraphFunction(dc *gg.Context, inc float64, fn GraphFunction) error { for i := a.xmin; i < a.xmax - inc; i = i + inc { orig := a.findOrigin() dc.SetLineWidth(5) firstPoint := a.TranslatePoint(Coord{i, fn(i)}) secondPoint := a.TranslatePoint(Coord{i + inc, fn(i + inc)}) dc.DrawLine(orig.x + firstPoint.x, orig.y + firstPoint.y, orig.x + secondPoint.x, orig.y + secondPoint.y) } dc.Stroke() return nil } type GraphFunction func(float64) float64 func (a *Axes) PlotPoint(dc *gg.Context, coord Coord) (error) { orig := a.findOrigin() translatedPoint := a.TranslatePoint(coord) pointX := translatedPoint.x pointY := translatedPoint.y dc.DrawCircle(orig.x + pointX, orig.y - pointY, 10) dc.SetRGB(1, 1, 1) dc.Fill() return nil }
main.go
0.830216
0.462898
main.go
starcoder
package gollection func LinkedListOf[T any](elements ...T) LinkedList[T] { var inner = &linkedList[T]{0, nil, nil} var list = LinkedList[T]{inner} for _, v := range elements { list.Append(v) } return list } func LinkedListFrom[T any](collection Collection[T]) LinkedList[T] { var list = LinkedListOf[T]() list.AppendAll(collection) return list } type LinkedList[T any] struct { inner *linkedList[T] } type linkedList[T any] struct { size int first *twoWayNode[T] last *twoWayNode[T] } type twoWayNode[T any] struct { value T next *twoWayNode[T] prev *twoWayNode[T] } func (a LinkedList[T]) GetFirst() T { if v, ok := a.TryGetFirst().Get(); ok { return v } panic(OutOfBounds) } func (a LinkedList[T]) TryGetFirst() Option[T] { if first := a.inner.first; first != nil { return Some(first.value) } return None[T]() } func (a LinkedList[T]) RemoveFirst() T { var first = a.inner.first if first == nil { panic(OutOfBounds) } return a.unlinkFirst(first) } func (a LinkedList[T]) GetLast() T { if v, ok := a.TryGetLast().Get(); ok { return v } panic(OutOfBounds) } func (a LinkedList[T]) TryGetLast() Option[T] { if last := a.inner.last; last != nil { return Some(last.value) } return None[T]() } func (a LinkedList[T]) RemoveLast() T { var last = a.inner.last if last == nil { panic(OutOfBounds) } return a.unlinkLast(last) } func (a LinkedList[T]) Prepend(element T) { a.linkFirst(element) } func (a LinkedList[T]) PrependAll(elements Collection[T]) { a.InsertAll(0, elements) } func (a LinkedList[T]) Append(element T) { a.linkLast(element) } func (a LinkedList[T]) AppendAll(elements Collection[T]) { a.InsertAll(a.inner.size, elements) } func (a LinkedList[T]) Insert(index int, element T) { if index < 0 || index > a.inner.size { panic(OutOfBounds) } if index == 0 { a.linkLast(element) } else { a.linkBefore(element, a.at(index)) } } func (a LinkedList[T]) InsertAll(index int, elements Collection[T]) { if index < 0 || index > a.inner.size { panic(OutOfBounds) } var size = elements.Size() if size == 0 { return } var pred, succ *twoWayNode[T] if index == a.inner.size { succ = nil pred = a.inner.last } else { succ = a.at(index) pred = succ.prev } var iter = elements.Iter() for v, ok := iter.Next().Get(); ok; v, ok = iter.Next().Get() { var newNode = &twoWayNode[T]{value: v, prev: pred, next: nil} if pred == nil { a.inner.first = newNode } else { pred.next = newNode } pred = newNode } if succ == nil { a.inner.last = pred } else { pred.next = succ succ.prev = pred } a.inner.size += size } func (a LinkedList[T]) Remove(index int) T { if a.isOutOfBounds(index) { panic(OutOfBounds) } return a.unlink(a.at(index)) } func (a LinkedList[T]) Get(index int) T { if v, ok := a.TryGet(index).Get(); ok { return v } panic(OutOfBounds) } func (a LinkedList[T]) Set(index int, newElement T) T { if v, ok := a.TrySet(index, newElement).Get(); ok { return v } panic(OutOfBounds) } func (a LinkedList[T]) Update(index int, update func(oldElement T) T) T { if a.isOutOfBounds(index) { panic(OutOfBounds) } var x = a.at(index) x.value = update(x.value) return x.value } func (a LinkedList[T]) TryGet(index int) Option[T] { if a.isOutOfBounds(index) { return None[T]() } return Some(a.at(index).value) } func (a LinkedList[T]) TrySet(index int, newElement T) Option[T] { if a.isOutOfBounds(index) { return None[T]() } var x = a.at(index) var oldValue = x.value x.value = newElement return Some(oldValue) } func (a LinkedList[T]) Clear() { for x := a.inner.first; x != nil; { var next = x.next var empty T x.value = empty x.next = nil x.prev = nil x = next } a.inner.first = nil a.inner.last = nil a.inner.size = 0 } func (a LinkedList[T]) Size() int { return a.inner.size } func (a LinkedList[T]) IsEmpty() bool { return a.inner.size == 0 } func (a LinkedList[T]) Iter() Iterator[T] { return &linkedListIterator[T]{a.inner.first} } func (a LinkedList[T]) ToSlice() []T { var arr = make([]T, a.Size()) ForEach(func(t T) { arr = append(arr, t) }, a.Iter()) return arr } func (a LinkedList[T]) Clone() LinkedList[T] { return LinkedListFrom[T](a) } func (a LinkedList[T]) isOutOfBounds(index int) bool { if index < 0 || index >= a.inner.size { return true } return false } func (a LinkedList[T]) at(index int) *twoWayNode[T] { if index < (a.inner.size >> 1) { var x = a.inner.first for i := 0; i < index; i++ { x = x.next } return x } else { var x = a.inner.last for i := a.inner.size - 1; i > index; i-- { x = x.prev } return x } } func (a LinkedList[T]) linkFirst(element T) { var first = a.inner.first var newNode = &twoWayNode[T]{value: element, prev: nil, next: first} a.inner.first = newNode if first == nil { a.inner.last = newNode } else { first.prev = newNode } a.inner.size++ } func (a LinkedList[T]) linkLast(element T) { var last = a.inner.last var newNode = &twoWayNode[T]{value: element, next: nil, prev: last} a.inner.last = newNode if last == nil { a.inner.first = newNode } else { last.next = newNode } a.inner.size++ } func (a LinkedList[T]) linkBefore(element T, succ *twoWayNode[T]) { var pred = succ.prev var newNode = &twoWayNode[T]{value: element, prev: pred, next: succ} succ.prev = newNode if pred == nil { a.inner.first = newNode } else { pred.next = newNode } a.inner.size++ } func (a LinkedList[T]) unlink(x *twoWayNode[T]) T { var element = x.value var next = x.next var prev = x.prev if prev == nil { a.inner.first = next } else { prev.next = next x.prev = nil } if next == nil { a.inner.last = prev } else { next.prev = prev x.next = nil } var empty T x.value = empty a.inner.size-- return element } func (a LinkedList[T]) unlinkFirst(x *twoWayNode[T]) T { var element = x.value var next = x.next var empty T x.value = empty x.next = nil a.inner.first = next if next == nil { a.inner.last = nil } else { next.prev = nil } a.inner.size-- return element } func (a LinkedList[T]) unlinkLast(x *twoWayNode[T]) T { var element = x.value var prev = x.prev var empty T x.value = empty x.prev = nil a.inner.last = prev if prev == nil { a.inner.first = nil } else { prev.next = nil } a.inner.size-- return element } type linkedListIterator[T any] struct { current *twoWayNode[T] } func (a *linkedListIterator[T]) Next() Option[T] { if a.current != nil { var item = a.current.value a.current = a.current.next return Some(item) } return None[T]() }
linked_list.go
0.654453
0.513607
linked_list.go
starcoder
package asstags // Mov Set the position and Movement of the line (incremental) func Mov(args ...interface{}) string { lenARGS := len(args) if lenARGS > 6 || lenARGS%2 != 0 { panic("Wrong parameter count.") } if lenARGS == 2 { x, ok := args[0].(float64) if !ok { panic("1st parameter not type float64.") } y, ok1 := args[1].(float64) if !ok1 { panic("2d parameter not type float64.") } return Pos(x, y) } else if lenARGS == 4 { x1, ok1 := args[0].(float64) if !ok1 { panic("1st parameter not type float64.") } y1, ok2 := args[1].(float64) if !ok2 { panic("2d parameter not type float64.") } x2, ok3 := args[2].(float64) if !ok3 { panic("3rd parameter not type float64.") } y2, ok4 := args[3].(float64) if !ok4 { panic("4th parameter not type float64.") } return Move( x1, y1, x1+x2, y1+y2) } else if lenARGS == 6 { x1, ok1 := args[0].(float64) if !ok1 { panic("1st parameter not type float64.") } y1, ok2 := args[1].(float64) if !ok2 { panic("2d parameter not type float64.") } x2, ok3 := args[2].(float64) if !ok3 { panic("3rd parameter not type float64.") } y2, ok4 := args[3].(float64) if !ok4 { panic("4th parameter not type float64.") } return Move( x1, y1, x1+x2, y1+y2, args[4], args[5]) } else { return "" } } // Ejemplo: // CycleTags(200, 1000, 100, be(1), be(2)) // >>> '\\t(200,300,\\be1)\t(300,400,\\be2)..\\t(900,1000,\\be2)' func CycleTags(start, dur, interval int, tags ...string) (ttags string) { i := 0 n := len(tags) startTime := start endTime := startTime + interval for { ttags += T(startTime, endTime, tags[i%n]) startTime = endTime endTime += interval if endTime >= dur { ttags += T(startTime, dur, tags[i%n]) break } i++ } return } func FscAR(x, y float64, ar float64, res [2]int) string { return Fscx(x) + Fscy(y*float64(res[1])*(ar)/float64(res[0])) } func FscScale(x, y float64, scale [2]float64) string { return Fscx(x*scale[0]/100) + Fscy(y*scale[1]/100) }
asstags/extra.go
0.528533
0.451085
extra.go
starcoder
package decredmaterial import ( "image" "image/color" "gioui.org/f32" "gioui.org/layout" "gioui.org/op/clip" "gioui.org/op/paint" ) // ProgressBar indicates the progress of a process. Height defines the thickness of the progressbar, // BackgroundColor defines the color of the track, ProgressColor defines the color of the moving progress. type ProgressBar struct { BackgroundColor color.RGBA ProgressColor color.RGBA Progress float64 } // track lays out a rectangle to represent the level of progress yet to be completed. func (p *ProgressBar) track(gtx *layout.Context) { borderedRectangle(gtx, p.BackgroundColor, gtx.Constraints.Width.Max, gtx.Constraints.Height.Max) } // value lays out a rectangle to represent the level of progress that has been completed. func (p *ProgressBar) value(gtx *layout.Context) { width := p.Progress / 100 * float64(gtx.Constraints.Width.Max) if width > float64(gtx.Constraints.Width.Max) { width = float64(gtx.Constraints.Width.Max) } borderedRectangle(gtx, p.ProgressColor, int(width), gtx.Constraints.Height.Max) } // borderedRectangle defines the dimensions of the rectangle, draws it and adds color it using the Fill method. func borderedRectangle(gtx *layout.Context, color color.RGBA, x, y int) { br := float32(y / 5) rect := f32.Rectangle{ Max: f32.Point{ X: float32(x), Y: float32(y), }, } clip.Rect{ Rect: rect, NE: br, NW: br, SE: br, SW: br, }.Op(gtx.Ops).Add(gtx.Ops) fillProgressBar(gtx, color, x, y) } // Layout lays out the track and level of progress on each other. func (p *ProgressBar) Layout(gtx *layout.Context) { layout.Stack{}.Layout(gtx, layout.Stacked(func() { p.track(gtx) p.value(gtx) }), ) } // ProgressBar returns a new ProgressBar instance. func (t *Theme) ProgressBar(progress float64) *ProgressBar { return &ProgressBar{ BackgroundColor: t.Color.Hint, ProgressColor: t.Color.Success, Progress: progress, } } func fillProgressBar(gtx *layout.Context, col color.RGBA, x, y int) { d := image.Point{X: x, Y: y} dr := f32.Rectangle{ Max: f32.Point{X: float32(d.X), Y: float32(d.Y)}, } paint.ColorOp{Color: col}.Add(gtx.Ops) paint.PaintOp{Rect: dr}.Add(gtx.Ops) gtx.Dimensions = layout.Dimensions{Size: d} }
ui/decredmaterial/progressbar.go
0.674479
0.457016
progressbar.go
starcoder
package system import ( "fmt" ) // Renderer is responsible for rendering pixels and handling user input type Renderer interface { Start(vm *VirtualMachine) } // OpCode represents an instruction for the virtual machine type OpCode uint16 func (o OpCode) String() string { return fmt.Sprintf("%04X", uint16(o)) } // Display represents a 64x32 pixel matrix type Display [32]uint64 // PixelSetAt determines if the pixel located at coordinate (x, y) is on func (d *Display) PixelSetAt(x int, y int) bool { columnFilter := uint64(1) << (63 - uint(x)) return d[y]&columnFilter == columnFilter } // VirtualMachine the core CHIP8 architecture, containing memory, registers, input, and pixel data // For reference, see: http://devernay.free.fr/hacks/chip8/C8TECH10.HTM#2.0 type VirtualMachine struct { Memory [4096]byte // http://devernay.free.fr/hacks/chip8/C8TECH10.HTM#2.1 Registers [16]byte // Abbreviated as V0-VF: http://devernay.free.fr/hacks/chip8/C8TECH10.HTM#2.2 Stack []uint16 ProgramCounter uint16 // Abbreviated as PC IndexRegister uint16 // Abbreviated as I: http://devernay.free.fr/hacks/chip8/C8TECH10.HTM#2.2 DelayTimer byte // Abbreviated as DT SoundTimer byte // Abbreviated as ST // Represents the state of key presses - http://devernay.free.fr/hacks/chip8/C8TECH10.HTM#2.3 Keyboard [16]bool // The state of the pixels - http://devernay.free.fr/hacks/chip8/C8TECH10.HTM#2.4 Pixels Display // Should the machine be running Running bool } // NewVirtualMachine creates a new virtual machine, loading fonts at the start of the memory space func NewVirtualMachine() VirtualMachine { vm := VirtualMachine{} vm.Running = true // Load the memory from the font set into the // lower "program" memory space for i := 0; i < len(fontSet); i++ { vm.Memory[i] = fontSet[i] } return vm } // Load data into memory. By convention, 0x0 - 0x200 is reserved, so data is loaded // starting at memory address 0x200 func (vm *VirtualMachine) Load(data []byte) { // Load the memory starting in application space for i := 0; i < len(data); i++ { vm.Memory[512+i] = data[i] } vm.ProgramCounter = 512 } // OpCodeAt returns an op code at the given memory address func (vm *VirtualMachine) OpCodeAt(address uint16) OpCode { firstByte := uint16(vm.Memory[address]) secondByte := uint16(vm.Memory[address+1]) return OpCode((firstByte << 8) + secondByte) } // CurrentOpCode returns the op code referenced by the program counter register func (vm *VirtualMachine) CurrentOpCode() OpCode { return vm.OpCodeAt(vm.ProgramCounter) } // IncrementPC advances the program counter to reference the next op code func (vm *VirtualMachine) IncrementPC() { vm.ProgramCounter += 2 } // DecrementTimers decrements delay timer and the sound timer if they are positive func (vm *VirtualMachine) DecrementTimers() { if vm.DelayTimer > 0 { vm.DelayTimer-- } if vm.SoundTimer > 0 { vm.SoundTimer-- } } // CHIP-8 Font Set. // See here for reference: http://devernay.free.fr/hacks/chip8/C8TECH10.HTM#2.4 var fontSet = [80]byte{ 0xF0, 0x90, 0x90, 0x90, 0xF0, // 0 0x20, 0x60, 0x20, 0x20, 0x70, // 1 0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2 0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3 0x90, 0x90, 0xF0, 0x10, 0x10, // 4 0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5 0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6 0xF0, 0x10, 0x20, 0x40, 0x40, // 7 0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8 0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9 0xF0, 0x90, 0xF0, 0x90, 0x90, // A 0xE0, 0x90, 0xE0, 0x90, 0xE0, // B 0xF0, 0x80, 0x80, 0x80, 0xF0, // C 0xE0, 0x90, 0x90, 0x90, 0xE0, // D 0xF0, 0x80, 0xF0, 0x80, 0xF0, // E 0xF0, 0x80, 0xF0, 0x80, 0x80, // F }
system/machine.go
0.776411
0.446977
machine.go
starcoder
package common import ( "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/types_go_proto" "sync" ) // EmbeddingTable struct type EmbeddingTable struct { Dim int64 Initializer string EmbeddingVectors map[int64]*Tensor Dtype types_go_proto.DataType lock sync.RWMutex } // NewEmbeddingTable creates an embedding table instance func NewEmbeddingTable(dim int64, initializer string, dtype types_go_proto.DataType) *EmbeddingTable { return &EmbeddingTable{ Dim: dim, Initializer: initializer, EmbeddingVectors: make(map[int64]*Tensor), Dtype: dtype, } } // GetEmbeddingVector returns an REFERENCE of embedding vector giving an index func (e *EmbeddingTable) GetEmbeddingVector(index int64) *Tensor { e.lock.RLock() if value, ok := e.EmbeddingVectors[index]; ok { e.lock.RUnlock() return value } e.lock.RUnlock() newVector := NewEmptyVector(e.Dim, e.Dtype) e.lock.Lock() // TODO(qijun) only support uniform initializer if e.Initializer == "uniform" { initializerFn := RandomUniform(-0.05, 0.05, int64(len(e.EmbeddingVectors))) initializerFn(newVector) } e.EmbeddingVectors[index] = newVector e.lock.Unlock() return newVector } // GetEmbeddingVectors returns COPYS of embedding vectors giving an array of indices func (e *EmbeddingTable) GetEmbeddingVectors(indices []int64) *Tensor { dim := []int64{int64(len(indices)), e.Dim} tensor := NewEmptyTensor(dim, e.Dtype) for i, index := range indices { tensor.SetRow(int64(i), e.GetEmbeddingVector(index)) } return tensor } // SetEmbeddingVectors sets (indices, value) pair to embedding vector func (e *EmbeddingTable) SetEmbeddingVectors(idxslice *IndexedSlices) error { for i, index := range idxslice.Ids { value := e.GetEmbeddingVector(index) copy(value.Buffer, idxslice.ConcatTensors.GetRow(int64(i)).Buffer) } return nil } // ToIndexedSlices transforms embedding table format to indexed slices format func (e *EmbeddingTable) ToIndexedSlices() *IndexedSlices { e.lock.RLock() ids := make([]int64, 0, len(e.EmbeddingVectors)) for k := range e.EmbeddingVectors { ids = append(ids, k) } e.lock.RUnlock() return NewIndexedSlices(e.GetEmbeddingVectors(ids), ids) }
elasticdl/pkg/common/embedding_table.go
0.571169
0.407098
embedding_table.go
starcoder
package executetest import ( "fmt" "github.com/apache/arrow/go/arrow/array" "github.com/influxdata/flux" "github.com/influxdata/flux/arrow" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/values" ) // Table is an implementation of execute.Table // It is designed to make it easy to statically declare the data within the table. // Not all fields need to be set. See comments on each field. // Use Normalize to ensure that all fields are set before equality comparisons. type Table struct { // GroupKey of the table. Does not need to be set explicitly. GroupKey flux.GroupKey // KeyCols is a list of column that are part of the group key. // The column type is deduced from the ColMeta slice. KeyCols []string // KeyValues is a list of values for the group key columns. // Only needs to be set when no data is present on the table. KeyValues []interface{} // ColMeta is a list of columns of the table. ColMeta []flux.ColMeta // Data is a list of rows, i.e. Data[row][col] // Each row must be a list with length equal to len(ColMeta) Data [][]interface{} } // Normalize ensures all fields of the table are set correctly. func (t *Table) Normalize() { if t.GroupKey == nil { cols := make([]flux.ColMeta, len(t.KeyCols)) vs := make([]values.Value, len(t.KeyCols)) if len(t.KeyValues) != len(t.KeyCols) { t.KeyValues = make([]interface{}, len(t.KeyCols)) } for j, label := range t.KeyCols { idx := execute.ColIdx(label, t.ColMeta) if idx < 0 { panic(fmt.Errorf("table invalid: missing group column %q", label)) } cols[j] = t.ColMeta[idx] if len(t.Data) > 0 { t.KeyValues[j] = t.Data[0][idx] } var v values.Value if t.KeyValues[j] == nil { v = values.NewNull(flux.SemanticType(t.ColMeta[idx].Type)) } else { v = values.New(t.KeyValues[j]) if v.Type() == semantic.Invalid { panic(fmt.Errorf("invalid value: %s", t.KeyValues[j])) } } vs[j] = v } t.GroupKey = execute.NewGroupKey(cols, vs) } } func (t *Table) Empty() bool { return len(t.Data) == 0 } func (t *Table) RefCount(n int) {} func (t *Table) Cols() []flux.ColMeta { return t.ColMeta } func (t *Table) Key() flux.GroupKey { t.Normalize() return t.GroupKey } func (t *Table) Do(f func(flux.ColReader) error) error { cols := make([]array.Interface, len(t.ColMeta)) for j, col := range t.ColMeta { switch col.Type { case flux.TBool: b := arrow.NewBoolBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(bool)) } else { b.AppendNull() } } cols[j] = b.NewBooleanArray() b.Release() case flux.TFloat: b := arrow.NewFloatBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(float64)) } else { b.AppendNull() } } cols[j] = b.NewFloat64Array() b.Release() case flux.TInt: b := arrow.NewIntBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(int64)) } else { b.AppendNull() } } cols[j] = b.NewInt64Array() b.Release() case flux.TString: b := arrow.NewStringBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.AppendString(v.(string)) } else { b.AppendNull() } } cols[j] = b.NewBinaryArray() b.Release() case flux.TTime: b := arrow.NewIntBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(int64(v.(values.Time))) } else { b.AppendNull() } } cols[j] = b.NewInt64Array() b.Release() case flux.TUInt: b := arrow.NewUintBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(uint64)) } else { b.AppendNull() } } cols[j] = b.NewUint64Array() b.Release() } } cr := &ColReader{ key: t.Key(), meta: t.ColMeta, cols: cols, } return f(cr) } type ColReader struct { key flux.GroupKey meta []flux.ColMeta cols []array.Interface } func (cr *ColReader) Key() flux.GroupKey { return cr.key } func (cr *ColReader) Cols() []flux.ColMeta { return cr.meta } func (cr *ColReader) Len() int { if len(cr.cols) == 0 { return 0 } return cr.cols[0].Len() } func (cr *ColReader) Bools(j int) *array.Boolean { return cr.cols[j].(*array.Boolean) } func (cr *ColReader) Ints(j int) *array.Int64 { return cr.cols[j].(*array.Int64) } func (cr *ColReader) UInts(j int) *array.Uint64 { return cr.cols[j].(*array.Uint64) } func (cr *ColReader) Floats(j int) *array.Float64 { return cr.cols[j].(*array.Float64) } func (cr *ColReader) Strings(j int) *array.Binary { return cr.cols[j].(*array.Binary) } func (cr *ColReader) Times(j int) *array.Int64 { return cr.cols[j].(*array.Int64) } // RowWiseTable is a flux Table implementation that // calls f once for each row in its Do method. type RowWiseTable struct { *Table } // Do calls f once for each row in the table func (t *RowWiseTable) Do(f func(flux.ColReader) error) error { cols := make([]array.Interface, len(t.ColMeta)) for j, col := range t.ColMeta { switch col.Type { case flux.TBool: b := arrow.NewBoolBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(bool)) } else { b.AppendNull() } } cols[j] = b.NewBooleanArray() b.Release() case flux.TFloat: b := arrow.NewFloatBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(float64)) } else { b.AppendNull() } } cols[j] = b.NewFloat64Array() b.Release() case flux.TInt: b := arrow.NewIntBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(int64)) } else { b.AppendNull() } } cols[j] = b.NewInt64Array() b.Release() case flux.TString: b := arrow.NewStringBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.AppendString(v.(string)) } else { b.AppendNull() } } cols[j] = b.NewBinaryArray() b.Release() case flux.TTime: b := arrow.NewIntBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(int64(v.(values.Time))) } else { b.AppendNull() } } cols[j] = b.NewInt64Array() b.Release() case flux.TUInt: b := arrow.NewUintBuilder(nil) for i := range t.Data { if v := t.Data[i][j]; v != nil { b.Append(v.(uint64)) } else { b.AppendNull() } } cols[j] = b.NewUint64Array() b.Release() } } release := func(cols []array.Interface) { for _, arr := range cols { arr.Release() } } defer release(cols) l := cols[0].Len() for i := 0; i < l; i++ { row := make([]array.Interface, len(t.ColMeta)) for j, col := range t.ColMeta { switch col.Type { case flux.TBool: row[j] = arrow.BoolSlice(cols[j].(*array.Boolean), i, i+1) case flux.TFloat: row[j] = arrow.FloatSlice(cols[j].(*array.Float64), i, i+1) case flux.TInt: row[j] = arrow.IntSlice(cols[j].(*array.Int64), i, i+1) case flux.TString: row[j] = arrow.StringSlice(cols[j].(*array.Binary), i, i+1) case flux.TTime: row[j] = arrow.IntSlice(cols[j].(*array.Int64), i, i+1) case flux.TUInt: row[j] = arrow.UintSlice(cols[j].(*array.Uint64), i, i+1) } } if err := f(&ColReader{ key: t.Key(), meta: t.ColMeta, cols: row, }); err != nil { return err } release(row) } return nil } func TablesFromCache(c execute.DataCache) (tables []*Table, err error) { c.ForEach(func(key flux.GroupKey) { if err != nil { return } var tbl flux.Table tbl, err = c.Table(key) if err != nil { return } var cb *Table cb, err = ConvertTable(tbl) if err != nil { return } tables = append(tables, cb) c.ExpireTable(key) }) return tables, nil } func ConvertTable(tbl flux.Table) (*Table, error) { key := tbl.Key() blk := &Table{ GroupKey: key, ColMeta: tbl.Cols(), } keyCols := key.Cols() if len(keyCols) > 0 { blk.KeyCols = make([]string, len(keyCols)) blk.KeyValues = make([]interface{}, len(keyCols)) for j, c := range keyCols { blk.KeyCols[j] = c.Label var v interface{} if !key.IsNull(j) { switch c.Type { case flux.TBool: v = key.ValueBool(j) case flux.TUInt: v = key.ValueUInt(j) case flux.TInt: v = key.ValueInt(j) case flux.TFloat: v = key.ValueFloat(j) case flux.TString: v = key.ValueString(j) case flux.TTime: v = key.ValueTime(j) default: return nil, fmt.Errorf("unsupported column type %v", c.Type) } } blk.KeyValues[j] = v } } err := tbl.Do(func(cr flux.ColReader) error { l := cr.Len() for i := 0; i < l; i++ { row := make([]interface{}, len(blk.ColMeta)) for j, c := range blk.ColMeta { switch c.Type { case flux.TBool: if col := cr.Bools(j); col.IsValid(i) { row[j] = col.Value(i) } case flux.TInt: if col := cr.Ints(j); col.IsValid(i) { row[j] = col.Value(i) } case flux.TUInt: if col := cr.UInts(j); col.IsValid(i) { row[j] = col.Value(i) } case flux.TFloat: if col := cr.Floats(j); col.IsValid(i) { row[j] = col.Value(i) } case flux.TString: if col := cr.Strings(j); col.IsValid(i) { row[j] = col.ValueString(i) } case flux.TTime: if col := cr.Times(j); col.IsValid(i) { row[j] = values.Time(col.Value(i)) } default: panic(fmt.Errorf("unknown column type %s", c.Type)) } } blk.Data = append(blk.Data, row) } return nil }) if err != nil { return nil, err } return blk, nil } type SortedTables []*Table func (b SortedTables) Len() int { return len(b) } func (b SortedTables) Less(i int, j int) bool { return b[i].Key().Less(b[j].Key()) } func (b SortedTables) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } // NormalizeTables ensures that each table is normalized func NormalizeTables(bs []*Table) { for _, b := range bs { b.Key() } } func MustCopyTable(tbl flux.Table) flux.Table { cpy, _ := execute.CopyTable(tbl, UnlimitedAllocator) return cpy }
execute/executetest/table.go
0.689724
0.447823
table.go
starcoder
package run import ( "regexp" "strings" log "github.com/sirupsen/logrus" ) // mapNodesToLabelSpecs maps nodes to labelSpecs func mapNodesToLabelSpecs(specs []string, createdNodes []string) (map[string][]string, error) { // check node-specifier possibilitites possibleNodeSpecifiers := []string{"all", "workers", "agents", "server", "master"} possibleNodeSpecifiers = append(possibleNodeSpecifiers, createdNodes...) nodeToLabelSpecMap := make(map[string][]string) for _, spec := range specs { labelSpec, node := extractLabelNode(spec) // check if node-specifier is valid (either a role or a name) and append to list if matches nodeFound := false for _, name := range possibleNodeSpecifiers { if node == name { nodeFound = true nodeToLabelSpecMap[node] = append(nodeToLabelSpecMap[node], labelSpec) break } } // node extraction was a false positive, use full spec with default node if !nodeFound { nodeToLabelSpecMap[defaultLabelNodes] = append(nodeToLabelSpecMap[defaultLabelNodes], spec) } } return nodeToLabelSpecMap, nil } // extractLabelNode separates the node specification from the actual label specs func extractLabelNode(spec string) (string, string) { // label defaults to full spec labelSpec := spec // node defaults to "all" node := defaultLabelNodes // only split at the last "@" re := regexp.MustCompile(`^(.*)@([^@]+)$`) match := re.FindStringSubmatch(spec) if len(match) > 0 { labelSpec = match[1] node = match[2] } return labelSpec, node } // splitLabel separates the label key from the label value func splitLabel(label string) (string, string) { // split only on first '=' sign (like `docker run` do) labelSlice := strings.SplitN(label, "=", 2) if len(labelSlice) > 1 { return labelSlice[0], labelSlice[1] } // defaults to label key with empty value (like `docker run` do) return label, "" } // MergeLabelSpecs merges labels for a given node func MergeLabelSpecs(nodeToLabelSpecMap map[string][]string, role, name string) ([]string, error) { labelSpecs := []string{} // add portSpecs according to node role for _, group := range nodeRuleGroupsMap[role] { for _, v := range nodeToLabelSpecMap[group] { exists := false for _, i := range labelSpecs { if v == i { exists = true } } if !exists { labelSpecs = append(labelSpecs, v) } } } // add portSpecs according to node name for _, v := range nodeToLabelSpecMap[name] { exists := false for _, i := range labelSpecs { if v == i { exists = true } } if !exists { labelSpecs = append(labelSpecs, v) } } return labelSpecs, nil } // MergeLabels merges list of labels into a label map func MergeLabels(labelMap map[string]string, labels []string) map[string]string { for _, label := range labels { labelKey, labelValue := splitLabel(label) if _, found := labelMap[labelKey]; found { log.Warningf("Overriding already existing label [%s]", labelKey) } labelMap[labelKey] = labelValue } return labelMap }
cli/label.go
0.645232
0.415077
label.go
starcoder
package collector // see https://www.tinkerforge.com/en/doc/Software/Device_Identifier.html func DeviceName(id uint16) string { return deviceMap[id] } var deviceMap = map[uint16]string{ 11: "DC Brick", 13: "Master Brick", 14: "Servo Brick", 15: "Stepper Brick", 16: "IMU Brick", 17: "RED Brick", 18: "IMU Brick 2.0", 19: "Silent Stepper Brick", 21: "Ambient Light Bricklet", 23: "Current12 Bricklet", 24: "Current25 Bricklet", 25: "Distance IR Bricklet", 26: "Dual Relay Bricklet", 27: "Humidity Bricklet", 28: "IO-16 Bricklet", 29: "IO-4 Bricklet", 111: "HAT Brick", 112: "HAT Zero Brick", 210: "Joystick Bricklet", 211: "LCD 16x2 Bricklet", 212: "LCD 20x4 Bricklet", 213: "Linear Poti Bricklet", 214: "Piezo Buzzer Bricklet", 215: "Rotary Poti Bricklet", 216: "Temperature Bricklet", 217: "Temperature IR Bricklet", 218: "Voltage Bricklet", 219: "Analog In Bricklet", 220: "Analog Out Bricklet", 221: "Barometer Bricklet", 222: "GPS Bricklet", 223: "Industrial Digital In 4 Bricklet", 224: "Industrial Digital Out 4 Bricklet", 225: "Industrial Quad Relay Bricklet", 226: "PTC Bricklet", 227: "Voltage/Current Bricklet", 228: "Industrial Dual 0-20mA Bricklet", 229: "Distance US Bricklet", 230: "Dual Button Bricklet", 231: "LED Strip Bricklet", 232: "Moisture Bricklet", 233: "Motion Detector Bricklet", 234: "Multi Touch Bricklet", 235: "Remote Switch Bricklet", 236: "Rotary Encoder Bricklet", 237: "Segment Display 4x7 Bricklet", 238: "Sound Intensity Bricklet", 239: "Tilt Bricklet", 240: "Hall Effect Bricklet", 241: "Line Bricklet", 242: "Piezo Speaker Bricklet", 243: "Color Bricklet", 244: "Solid State Relay Bricklet", 246: "NFC/RFID Bricklet", 249: "Industrial Dual Analog In Bricklet", 250: "Accelerometer Bricklet", 251: "Analog In Bricklet 2.0", 253: "Load Cell Bricklet", 254: "RS232 Bricklet", 255: "Laser Range Finder Bricklet", 256: "Analog Out Bricklet 2.0", 258: "Industrial Analog Out Bricklet", 259: "Ambient Light Bricklet 2.0", 260: "Dust Detector Bricklet", 262: "CO2 Bricklet", 263: "OLED 128x64 Bricklet", 264: "OLED 64x48 Bricklet", 265: "UV Light Bricklet", 266: "Thermocouple Bricklet", 267: "Motorized Linear Poti Bricklet", 268: "Real-Time Clock Bricklet", 270: "CAN Bricklet", 271: "RGB LED Bricklet", 272: "RGB LED Matrix Bricklet", 276: "GPS Bricklet 2.0", 277: "RS485 Bricklet", 278: "Thermal Imaging Bricklet", 279: "XMC1400 Breakout Bricklet", 282: "RGB LED Button Bricklet", 283: "Humidity Bricklet 2.0", 284: "Industrial Dual Relay Bricklet", 285: "DMX Bricklet", 286: "NFC Bricklet", 288: "Outdoor Weather Bricklet", 289: "Remote Switch Bricklet 2.0", 290: "Sound Pressure Level Bricklet", 291: "Temperature IR Bricklet 2.0", 292: "Motion Detector Bricklet 2.0", 293: "Industrial Counter Bricklet", 294: "Rotary Encoder Bricklet 2.0", 295: "Analog In Bricklet 3.0", 296: "Solid State Relay Bricklet 2.0", 297: "Air Quality Bricklet", 298: "LCD 128x64 Bricklet", 299: "Distance US Bricklet 2.0", 2100: "Industrial Digital In 4 Bricklet 2.0", 2101: "PTC Bricklet 2.0", 2102: "Industrial Quad Relay Bricklet 2.0", 2103: "LED Strip Bricklet 2.0", 2104: "Load Cell Bricklet 2.0", 2105: "Voltage/Current Bricklet 2.0", 2106: "Real-Time Clock Bricklet 2.0", 2107: "CAN Bricklet 2.0", 2108: "RS232 Bricklet 2.0", 2109: "Thermocouple Bricklet 2.0", 2110: "Particulate Matter Bricklet", 2111: "IO-4 Bricklet 2.0", 2112: "OLED 128x64 Bricklet 2.0", 2113: "Temperature Bricklet 2.0", 2114: "IO-16 Bricklet 2.0", 2115: "Analog Out Bricklet 3.0", 2116: "Industrial Analog Out Bricklet 2.0", 2117: "Barometer Bricklet 2.0", 2118: "UV Light Bricklet 2.0", 2119: "Dual Button Bricklet 2.0", 2120: "Industrial Dual 0-20mA Bricklet 2.0", 2121: "Industrial Dual Analog In Bricklet 2.0", 2122: "Isolator Bricklet", 2123: "One Wire Bricklet", 2124: "Industrial Digital Out 4 Bricklet 2.0", 2125: "Distance IR Bricklet 2.0", 2127: "RGB LED Bricklet 2.0", 2128: "Color Bricklet 2.0", 2129: "Multi Touch Bricklet 2.0", 2130: "Accelerometer Bricklet 2.0", 2131: "Ambient Light Bricklet 3.0", 2132: "Hall Effect Bricklet 2.0", 2137: "Segment Display 4x7 Bricklet 2.0", 2138: "Joystick Bricklet 2.0", 2139: "Linear Poti Bricklet 2.0", 2140: "Rotary Poti Bricklet 2.0", 2144: "Laser Range Finder Bricklet 2.0", 2145: "Piezo Speaker Bricklet 2.0", 2146: "E-Paper 296x128 Bricklet", 2147: "CO2 Bricklet 2.0", 2152: "Energy Monitor Bricklet", 2153: "Compass Bricklet", 2156: "Performance DC Bricklet", 2157: "Servo Bricklet 2.0", 2161: "IMU Bricklet 3.0", 2162: "Industrial Dual AC Relay Bricklet", 2164: "Industrial PTC Bricklet", 2165: "DC Bricklet 2.0", 2166: "Silent Stepper Bricklet 2.0", }
collector/devices.go
0.653569
0.64848
devices.go
starcoder
package libdconf import ( "strconv" "strings" ) // NewSchemaType will attempt to convert the provided key/val into a SchemaType func NewSchemaType(rawVal string) (sT *SchemaType, parseErr error) { sT = &SchemaType{Val: rawVal} // Ensure we always set the raw value if (rawVal == "false") || (rawVal == "true") { // Is a boolean sT.Type = "bool" // Define as a boolean if rawVal == "false" { // Is false sT.BoolVal = false } else { // Is true sT.BoolVal = true } } else if strings.HasPrefix(rawVal, "uint32") { // If the string starts with uint32 rawVal = strings.Replace(rawVal, "uint32", "", -1) rawVal = strings.TrimSpace(rawVal) // Remove any whitespace var i uint64 i, parseErr = strconv.ParseUint(rawVal, 10, 32) if parseErr == nil { // Didn't fail to parse our string as a uint sT.Type = "uint32" sT.UintVal = uint32(i) // Convert the uint64 to uint32 and set } else { // Failed to parse return } } else if floaty, floatParseErr := strconv.ParseFloat(rawVal, 64); floatParseErr == nil { // Attempt to convert to a float64 before int sT.FloatVal = floaty sT.FloatHadTrailingZero = strings.HasSuffix(rawVal, ".0") // This is useful for double->Go float64 and Go float64->double conversion sT.Type = "float64" } else if inty, intParseErr := strconv.ParseInt(rawVal, 10, 32); intParseErr == nil { // Attempt to convert to an int64 that is convertable to an int32 sT.IntVal = int32(inty) sT.Type = "int32" } else { // Treat as a string, Val already set sT.Type = "string" // Define as a string } return } // Duplicate will duplicate this SchemaType func (sT *SchemaType) Duplicate() *SchemaType { newSt := SchemaType{ Type: sT.Type, BoolVal: sT.BoolVal, FloatHadTrailingZero: sT.FloatHadTrailingZero, FloatVal: sT.FloatVal, IntVal: sT.IntVal, UintVal: sT.UintVal, Val: sT.Val, } return &newSt } // Matches will check if the provided SchemaType matches this one func (sT *SchemaType) Matches(oST *SchemaType) (matches bool) { if sT.Type != oST.Type { // Types don't match return } switch sT.Type { case "bool": return sT.BoolVal == oST.BoolVal case "uint32": return sT.UintVal == oST.UintVal case "int32": return sT.IntVal == oST.IntVal case "float64": return sT.FloatVal == oST.FloatVal default: return sT.Val == oST.Val } } // String will convert our SchemaType back to a string // Note this only converts the value itself and not the key func (sT *SchemaType) String() string { switch sT.Type { case "bool": return strconv.FormatBool(sT.BoolVal) case "uint32": return "uint32 " + strconv.FormatUint(uint64(sT.UintVal), 10) case "int32": return strconv.FormatInt(int64(sT.IntVal), 10) case "float64": floatString := strconv.FormatFloat(sT.FloatVal, 'G', -1, 64) // Use G for max digits, no trailing zeroes if !strings.Contains(floatString, ".") && sT.FloatHadTrailingZero { // Has no decimal and had one when we created the type floatString += ".0" // Add the .0 back } return floatString default: // Fall back (string, array string, etc) return sT.Val // Add our value directly } }
schemaType.go
0.58261
0.489076
schemaType.go
starcoder
package metadata var ( // NullPath means no path NullPath = Path([]string{}) ) // Path is used to identify a particle of metadata. The path can be strings separated by / as in a URL. type Path []string // Clean scrubs the path to remove any empty string or . or .. and collapse the path into a concise form. // It's similar to path/filepath.Clean in the standard lib. func (p Path) Clean() Path { this := []string(p) copy := []string{} for _, v := range this { switch v { case "", ".": case "..": if len(copy) == 0 { copy = append(copy, "..") } else { copy = copy[0 : len(copy)-1] if len(copy) == 0 { return NullPath } } default: copy = append(copy, v) } } return Path(copy) } // Len returns the length of the path func (p Path) Len() int { return len([]string(p)) } // Index returns the ith component in the path func (p Path) Index(i int) *string { if p.Len() <= i { return nil } copy := []string(p)[i] return &copy } // Shift returns a new path that's shifted i positions to the left -- ith child of the head at index=0 func (p Path) Shift(i int) Path { len := p.Len() - i if len <= 0 { return Path([]string{}) } new := make([]string, len) copy(new, []string(p)[i:]) return Path(new) } // Dir returns the 'dir' of the path func (p Path) Dir() Path { pp := p.Clean() if len(pp) > 1 { return p[0 : len(pp)-1] } return Path([]string{"."}) } // Base returns the base of the path func (p Path) Base() string { pp := p.Clean() return pp[len(pp)-1] } // Join joins the input as a child of this path func (p Path) Join(child string) Path { return p.Sub(Path([]string{child})) } // Sub joins the child to the parent func (p Path) Sub(child Path) Path { pp := p.Clean() return Path(append(pp, []string(child)...)) } // Rel returns a new path that is a child of the input from this path. // e.g. For a path a/b/c/d Rel(a/b/) returns c/d. NullPath is returned if // the two are not relative to one another. func (p Path) Rel(path Path) Path { this := []string(p.Clean()) parent := []string(path.Clean()) if len(this) < len(parent) { return NullPath } for i := 0; i < len(parent); i++ { if parent[i] != this[i] { return NullPath } } return Path(this[len(parent):]) }
vendor/github.com/docker/infrakit/pkg/spi/metadata/path.go
0.625667
0.410461
path.go
starcoder
package raytracer import ( "container/heap" "fmt" "gonum.org/v1/gonum/spatial/r3" "math" "math/rand" ) type boundingVolumeHierarchyNode struct { nodeId int pMin r3.Vec pMax r3.Vec leaf bool shape *Shape children []*boundingVolumeHierarchyNode } type boundingVolumeHierarchy struct { root boundingVolumeHierarchyNode extents []r3.Vec shapes *[]Shape } // bounding box hierarchy where boundaries are computed in a box shape func NewBoundingVolumeHierarchy(shapes *[]Shape) *boundingVolumeHierarchy { fmt.Printf("Building BoundingVolumeHierarchy\n") pMin := r3.Vec{X: math.MaxFloat64, Y: math.MaxFloat64, Z: math.MaxFloat64} pMax := r3.Vec{X: float64(math.MinInt64), Y: float64(math.MinInt64), Z: float64(math.MinInt64)} for _, s := range *shapes { lowest, highest := s.computeSquareBounds() pMin.X = math.Min(pMin.X, lowest.X) pMin.Y = math.Min(pMin.Y, lowest.Y) pMin.Z = math.Min(pMin.Z, lowest.Z) pMax.X = math.Max(pMax.X, highest.X) pMax.Y = math.Max(pMax.Y, highest.Y) pMax.Z = math.Max(pMax.Z, highest.Z) } // add the max jitter than can happen when jittering the centroid of shapes pMin = r3.Sub(pMin, r3.Scale(bvhCentroidJitterFactor, r3.Vec{X: 1, Y: 1, Z: 1})) pMax = r3.Add(pMax, r3.Scale(bvhCentroidJitterFactor, r3.Vec{X: 1, Y: 1, Z: 1})) bvh := boundingVolumeHierarchy{ shapes: shapes, root: boundingVolumeHierarchyNode{ nodeId: 0, pMin: pMin, pMax: pMax, leaf: true, shape: nil, children: nil, }, } nodeCounter := 1 for i := 0; i < len(*shapes); i++ { ptr := &(*shapes)[i] addToBVH(&bvh.root, ptr, &nodeCounter) } bvh.recomputeBounds() fmt.Printf("Finished building BoundingVolumeHierarchy\n") return &bvh } func (bvh boundingVolumeHierarchy) getTraceFunction(bvhExploreAlgorithm BoundingVolumeHierarchyTraversalAlgorithm) func(r *ray, tMin float64) (hit bool, record *hitRecord) { if bvhExploreAlgorithm == Dijkstra { return bvh.trace } else if bvhExploreAlgorithm == DepthFirstSearch { return bvh.traceRecursively } else { panic(fmt.Sprintf("No trace algorithm found for %d", bvhExploreAlgorithm)) } } func (bvh boundingVolumeHierarchy) traceRecursively(r *ray, tMin float64) (hit bool, record *hitRecord) { return traceDownBoundingVolumeHierarchyNode(r, tMin, math.MaxFloat64, &bvh.root) } func (bvh boundingVolumeHierarchy) trace(r *ray, tMin float64) (hit bool, record *hitRecord) { minHeap := make(bvhPriorityQueue, 0) minHeap.Push(&Item{ value: &bvh.root, t: 0, }) heap.Init(&minHeap) hr := hitRecord{t: math.MaxFloat64} for minHeap.Len() > 0 { item := heap.Pop(&minHeap).(*Item) node := item.value // no need to explore further if all bounding boxes are further than hit object if item.t > hr.t { break } if node.leaf { if node.shape != nil { shapeHr := (*node.shape).hit(r, tMin, hr.t) if shapeHr.t > 0.0 && shapeHr.t < hr.t { hr = shapeHr } } } else { if node.children != nil { for _, v := range node.children { if v != nil { didHit, tNear, _ := hitBoundingBox(r, v.pMin, v.pMax) if didHit { tPriority := tNear heap.Push(&minHeap, &Item{ value: v, t: tPriority, }) } } } } } } return hr.t != math.MaxFloat64, &hr } // traces a ray and returns if it hits something, and a hit record func traceDownBoundingVolumeHierarchyNode(r *ray, tMin float64, tMax float64, node *boundingVolumeHierarchyNode) (hit bool, record *hitRecord) { if didHit, _, _ := hitBoundingBox(r, node.pMin, node.pMax); !didHit { return false, &hitRecord{t: -1} } if node.leaf { if node.shape == nil { return false, nil } else { hr := (*node.shape).hit(r, tMin, tMax) return hr.t > 0.0, &hr } } else { localTMax := tMax var minHitRecord = &hitRecord{ t: tMax, } if node.children != nil { for _, v := range node.children { if v != nil { rHit, rhr := traceDownBoundingVolumeHierarchyNode(r, tMin, localTMax, v) if rHit { if rhr.t > tMin && rhr.t < minHitRecord.t { minHitRecord = rhr } } } } } return minHitRecord.t != math.MaxFloat64, minHitRecord } } // recomputes the bounds for all objects in the BVH, from bottom up func (bvh boundingVolumeHierarchy) recomputeBounds() { recomputeNodeBounds(&bvh.root) destroyUselessNodes(&bvh.root) } func (bvh boundingVolumeHierarchy) printNodes() { printNode(0, &bvh.root) } func printNode(depth int, node *boundingVolumeHierarchyNode) { s := "" for i := 0; i < depth; i++ { s += " " } shapeStr := "" if node.shape != nil { shapeStr = (*node.shape).description() } fmt.Printf("%10v: %s %v %v %v\n", node.nodeId, s, node.pMin, node.pMax, shapeStr) if node.children != nil { for _, v := range node.children { if v != nil { printNode(depth+1, v) } } } } func recomputeNodeBounds(node *boundingVolumeHierarchyNode) (pMin r3.Vec, pMax r3.Vec) { boundsLow := r3.Vec{X: math.MaxFloat64, Y: math.MaxFloat64, Z: math.MaxFloat64} boundsHigh := r3.Vec{X: float64(math.MinInt64), Y: float64(math.MinInt64), Z: float64(math.MinInt64)} if node.leaf { if node.shape != nil { boundsLow, boundsHigh = (*node.shape).computeSquareBounds() } } else { for _, child := range node.children { childBoundsLow, childBoundsHigh := recomputeNodeBounds(child) boundsLow.X = math.Min(boundsLow.X, childBoundsLow.X) boundsLow.Y = math.Min(boundsLow.Y, childBoundsLow.Y) boundsLow.Z = math.Min(boundsLow.Z, childBoundsLow.Z) boundsHigh.X = math.Max(boundsHigh.X, childBoundsHigh.X) boundsHigh.Y = math.Max(boundsHigh.Y, childBoundsHigh.Y) boundsHigh.Z = math.Max(boundsHigh.Z, childBoundsHigh.Z) } } node.pMin = boundsLow node.pMax = boundsHigh return boundsLow, boundsHigh } func destroyUselessNodes(node *boundingVolumeHierarchyNode) { if node.children != nil { for i, v := range node.children { if v.pMin.X == math.MaxFloat64 && v.pMin.Y == math.MaxFloat64 && v.pMin.Z == math.MaxFloat64 && v.pMax.X == float64(math.MinInt64) && v.pMax.Y == float64(math.MinInt64) && v.pMax.Z == float64(math.MinInt64) { node.children[i] = nil } else { destroyUselessNodes(v) } } } } func addToBVH( curr *boundingVolumeHierarchyNode, shape *Shape, nodeCounter *int, ) { if curr.leaf { // empty leaf node, feel free to add if curr.shape == nil { ptr := &(*shape) curr.shape = ptr return // promote this to a child node, put object 1 is 1 and another in 2 } else { curr.leaf = false curr.children = splitBvhQuadrant(&curr.pMin, &curr.pMax, nodeCounter) removedShape := *curr.shape curr.shape = nil // recursive call to same node, now that it isn't a leaf it should add it addToBVH(curr, &removedShape, nodeCounter) addToBVH(curr, shape, nodeCounter) return } } else { // delegate adding it to the node down ptr := curr.children[getBvhQuadrantIndex(shape, &curr.pMin, &curr.pMax)] addToBVH(ptr, shape, nodeCounter) return } } // front bottom left = 0 // front bottom right = 1 // front top left = 2 // front top right = 3 // back bottom left = 4 // back bottom right = 5 // back top left = 6 // back top right = 7 // to prevent two shapes from having the same centroid coordinates, we add a random jitter factor to each centroid func getBvhQuadrantIndex(s *Shape, pMin *r3.Vec, pMax *r3.Vec) uint8 { centroid := r3.Add((*s).centroid(), r3.Scale(bvhCentroidJitterFactor, r3.Vec{X: rand.Float64(), Y: rand.Float64(), Z: rand.Float64()})) idx := uint8(0) if centroid.X > pMin.X+(pMax.X-pMin.X)/2 { idx += 1 } if centroid.Y > pMin.Y+(pMax.Y-pMin.Y)/2 { idx += 2 } if centroid.Z > pMin.Z+(pMax.Z-pMin.Z)/2 { idx += 4 } return idx } // see getBvhQuadrantIndex func splitBvhQuadrant(lowestBounds *r3.Vec, highestBounds *r3.Vec, nodeCounter *int) []*boundingVolumeHierarchyNode { halfX := (highestBounds.X - lowestBounds.X) / 2 halfY := (highestBounds.Y - lowestBounds.Y) / 2 halfZ := (highestBounds.Z - lowestBounds.Z) / 2 *nodeCounter = (*nodeCounter) + 8 return []*boundingVolumeHierarchyNode{ &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 8, pMin: r3.Vec{X: lowestBounds.X, Y: lowestBounds.Y, Z: lowestBounds.Z}, pMax: r3.Vec{X: lowestBounds.X + halfX, Y: lowestBounds.Y + halfY, Z: lowestBounds.Z + halfZ}, leaf: true, shape: nil, children: nil, }, &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 7, pMin: r3.Vec{X: lowestBounds.X + halfX, Y: lowestBounds.Y, Z: lowestBounds.Z}, pMax: r3.Vec{X: highestBounds.X, Y: lowestBounds.Y + halfY, Z: lowestBounds.Z + halfZ}, leaf: true, shape: nil, children: nil, }, &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 6, pMin: r3.Vec{X: lowestBounds.X, Y: lowestBounds.Y + halfY, Z: lowestBounds.Z}, pMax: r3.Vec{X: lowestBounds.X + halfX, Y: highestBounds.Y, Z: lowestBounds.Z + halfZ}, leaf: true, shape: nil, children: nil, }, &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 5, pMin: r3.Vec{X: lowestBounds.X + halfX, Y: lowestBounds.Y + halfY, Z: lowestBounds.Z}, pMax: r3.Vec{X: highestBounds.X, Y: highestBounds.Y, Z: lowestBounds.Z + halfZ}, leaf: true, shape: nil, children: nil, }, &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 4, pMin: r3.Vec{X: lowestBounds.X, Y: lowestBounds.Y, Z: lowestBounds.Z + halfZ}, pMax: r3.Vec{X: lowestBounds.X + halfX, Y: lowestBounds.Y + halfY, Z: highestBounds.Z}, leaf: true, shape: nil, children: nil, }, &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 3, pMin: r3.Vec{X: lowestBounds.X + halfX, Y: lowestBounds.Y, Z: lowestBounds.Z + halfZ}, pMax: r3.Vec{X: highestBounds.X, Y: lowestBounds.Y + halfY, Z: highestBounds.Z}, leaf: true, shape: nil, children: nil, }, &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 2, pMin: r3.Vec{X: lowestBounds.X, Y: lowestBounds.Y + halfY, Z: lowestBounds.Z + halfZ}, pMax: r3.Vec{X: lowestBounds.X + halfX, Y: highestBounds.Y, Z: highestBounds.Z}, leaf: true, shape: nil, children: nil, }, &boundingVolumeHierarchyNode{ nodeId: (*nodeCounter) + 1, pMin: r3.Vec{X: lowestBounds.X + halfX, Y: lowestBounds.Y + halfY, Z: lowestBounds.Z + halfZ}, pMax: r3.Vec{X: highestBounds.X, Y: highestBounds.Y, Z: highestBounds.Z}, leaf: true, shape: nil, children: nil, }, } } // determines whether the ray hits the bounding box func hitBoundingBox(r *ray, pMin r3.Vec, pMax r3.Vec) (hit bool, tNear float64, tFar float64) { // first check, am i inside the bounding box? if r.p.X >= pMin.X && r.p.Y >= pMin.Y && r.p.Z >= pMin.Z && r.p.X <= pMax.X && r.p.Y <= pMax.Y && r.p.Z <= pMax.Z { return true, 0, 0 } // second check, do ray-box intersection check invDirection := r3.Vec{ X: 1 / r.normalizedDirection.X, Y: 1 / r.normalizedDirection.Y, Z: 1 / r.normalizedDirection.Z, } // 1 if less than 0, invert if less than 0 bounds0 := pMin bounds1 := pMax if r.normalizedDirection.X < 0 { bounds0.X = pMax.X bounds1.X = pMin.X } if r.normalizedDirection.Y < 0 { bounds0.Y = pMax.Y bounds1.Y = pMin.Y } if r.normalizedDirection.Z < 0 { bounds0.Z = pMax.Z bounds1.Z = pMin.Z } ptMin := (bounds0.X - r.p.X) * invDirection.X ptMax := (bounds1.X - r.p.X) * invDirection.X tYMin := (bounds0.Y - r.p.Y) * invDirection.Y tYMax := (bounds1.Y - r.p.Y) * invDirection.Y if ptMin > tYMax || tYMin > ptMax { return false, -1, -1 } if tYMin > ptMin { ptMin = tYMin } if tYMax < ptMax { ptMax = tYMax } tZMin := (bounds0.Z - r.p.Z) * invDirection.Z tZMax := (bounds1.Z - r.p.Z) * invDirection.Z if (ptMin > tZMax) || (ptMax < tZMin) { return false, -1, -1 } if tZMin > ptMin { ptMin = tZMin } if tZMax < ptMax { ptMax = tZMax } return true, ptMin, ptMax }
raytracer/accelerationstructures.go
0.753739
0.415551
accelerationstructures.go
starcoder
package spritesystem import ( "image" "math" "sort" c "github.com/x-hgg-x/goecsengine/components" m "github.com/x-hgg-x/goecsengine/math" w "github.com/x-hgg-x/goecsengine/world" "github.com/hajimehoshi/ebiten/v2" ecs "github.com/x-hgg-x/goecs/v2" ) type spriteDepth struct { sprite *c.SpriteRender depth float64 } // RenderSpriteSystem draws images. // Images are drawn in ascending order of depth. // Images with higher depth are thus drawn above images with lower depth. func RenderSpriteSystem(world w.World, screen *ebiten.Image) { sprites := world.Manager.Join(world.Components.Engine.SpriteRender, world.Components.Engine.Transform) // Copy query slice into a struct slice for sorting iSprite := 0 spritesDepths := make([]spriteDepth, sprites.Size()) sprites.Visit(ecs.Visit(func(entity ecs.Entity) { spritesDepths[iSprite] = spriteDepth{ sprite: world.Components.Engine.SpriteRender.Get(entity).(*c.SpriteRender), depth: world.Components.Engine.Transform.Get(entity).(*c.Transform).Depth, } iSprite++ })) // Sort by increasing values of depth sort.Slice(spritesDepths, func(i, j int) bool { return spritesDepths[i].depth < spritesDepths[j].depth }) // Sprites with higher values of depth are drawn later so they are on top for _, st := range spritesDepths { drawImageWithWrap(screen, st.sprite) } } // Draw sprite with texture wrapping. // Image is tiled when texture coordinates are greater than image size. func drawImageWithWrap(screen *ebiten.Image, spriteRender *c.SpriteRender) { sprite := spriteRender.SpriteSheet.Sprites[spriteRender.SpriteNumber] texture := spriteRender.SpriteSheet.Texture textureWidth, textureHeight := texture.Image.Size() startX := int(math.Floor(float64(sprite.X) / float64(textureWidth))) startY := int(math.Floor(float64(sprite.Y) / float64(textureHeight))) stopX := int(math.Ceil(float64(sprite.X+sprite.Width) / float64(textureWidth))) stopY := int(math.Ceil(float64(sprite.Y+sprite.Height) / float64(textureHeight))) currentX := 0 for indX := startX; indX < stopX; indX++ { left := m.Max(0, sprite.X-indX*textureWidth) right := m.Min(textureWidth, sprite.X+sprite.Width-indX*textureWidth) currentY := 0 for indY := startY; indY < stopY; indY++ { top := m.Max(0, sprite.Y-indY*textureHeight) bottom := m.Min(textureHeight, sprite.Y+sprite.Height-indY*textureHeight) op := spriteRender.Options op.GeoM.Translate(float64(currentX), float64(currentY)) screen.DrawImage(texture.Image.SubImage(image.Rect(left, top, right, bottom)).(*ebiten.Image), &op) currentY += bottom - top } currentX += right - left } }
systems/sprite/render.go
0.680135
0.427875
render.go
starcoder
package util import ( "fmt" ) // BitSet is the interface wraps method for BitSet data structure implementation. type BitSet interface { // Clear used for set the bit specified by the index to false. Clear(index int) // Set used for set the bit at the specified index to true. Set(index int) // Get returns the value of the bit with the specified index. // The value is true if the bit with the index is currently set in this BitSet; // otherwise, the result is false. Get(index int) bool // IsEmpty returns true if this BitSet contains no bits that are set to true. IsEmpty() bool // Reset clean all bit. Reset() } // ByteSliceBitSet is a implementation of BitSet interface based on byte slice. type byteSliceBitSet struct { bytes []byte wordInUse int } func (bs *byteSliceBitSet) String() string { return fmt.Sprintf("byteSliceBitSet{%8b}", bs.bytes) } // Clear used for set the bit specified by the index to false. func (bs *byteSliceBitSet) Clear(index int) { if index < 0 { return } // Check capacity if !bs.checkCapacity(index) { return } bs.checkAndIncreaseCapacity(index) // Locate byte and bit byteIndex, bitIndex := bs.locateBit(index) // Validate word is use if bs.bytes[byteIndex]&byte(1<<byte(bitIndex)) != 0 { // Decrease word in use counter bs.wordInUse -= 1 } // Set value bs.bytes[byteIndex] = bs.bytes[byteIndex] & ^(1 << byte(bitIndex)) } // Set used for set the bit at the specified index to true. func (bs *byteSliceBitSet) Set(index int) { if index < 0 { return } // Check capacity bs.checkAndIncreaseCapacity(index) // Locate byte and bit byteIndex, bitIndex := bs.locateBit(index) // Set value bs.bytes[byteIndex] = bs.bytes[byteIndex] | (1 << byte(bitIndex)) // Increase word in use counter bs.wordInUse += 1 } // Get returns the value of the bit with the specified index. // The value is true if the bit with the index is currently set in this BitSet; // otherwise, the result is false. func (bs *byteSliceBitSet) Get(index int) bool { if index < 0 { return false } // Check capacity if !bs.checkCapacity(index) { return false } // Local byte and bit byteIndex, bitIndex := bs.locateBit(index) // Get value mask := byte(1 << byte(bitIndex)) return (bs.bytes[byteIndex] & mask) != 0 } // IsEmpty returns true if this BitSet contains no bits that are set to true. func (bs *byteSliceBitSet) IsEmpty() bool { return bs.wordInUse == 0 } // Reset clean all bit. func (bs *byteSliceBitSet) Reset() { bs.wordInUse = 0 bs.bytes = []byte{} } func (bs *byteSliceBitSet) checkAndIncreaseCapacity(index int) { if index < 0 { return } if !bs.checkCapacity(index) { var newCapacity int if (index+1)%8 == 0 { newCapacity = (index + 1) / 8 } else { newCapacity = (index+1)/8 + 1 } newBytes := make([]byte, newCapacity) copy(newBytes, bs.bytes) bs.bytes = newBytes } } func (bs *byteSliceBitSet) checkCapacity(index int) bool { return !(cap(bs.bytes)*8-1 < index) } func (bs *byteSliceBitSet) locateBit(index int) (byteIndex, bitIndex int) { if (index+1)%8 == 0 { byteIndex = (index+1)/8 - 1 bitIndex = 7 } else { byteIndex = (index + 1) / 8 bitIndex = (index+1)%8 - 1 } return } // NewByteSliceBitSet create a new instance of byteSliceBitSet. func NewByteSliceBitSet() BitSet { return &byteSliceBitSet{} }
util/bitset.go
0.74382
0.432842
bitset.go
starcoder
package adaptablepq import ( "fmt" ) type AdaptablePQ[K, V any] struct { heap []*Entry[K, V] comparator Comparator[K] } // New constructs and returns an empty adaptable pq based on a min-heap. func New[K, V any](comparator Comparator[K]) *AdaptablePQ[K, V] { return &AdaptablePQ[K, V]{heap: []*Entry[K, V]{}, comparator: comparator} } // Enqueue adds a new entry to the pq and returns it. func (a *AdaptablePQ[K, V]) Enqueue(key K, value V) *Entry[K, V] { newEntry := &Entry[K, V]{Key: key, Value: value, Index: len(a.heap)} a.heap = append(a.heap, newEntry) a.upHeapBubble(len(a.heap) - 1) return newEntry } // Dequeue removes and returns the entry with the highest priority. func (a *AdaptablePQ[K, V]) Dequeue() *Entry[K, V] { if a.IsEmpty() { return nil } res := a.heap[0] a.swap(0, len(a.heap)-1) a.heap = a.heap[:len(a.heap)-1] a.downHeapBubble(0) return res } // Min returns the entry with the highest priority. func (a *AdaptablePQ[K, V]) Min() *Entry[K, V] { if a.IsEmpty() { return nil } return a.heap[0] } // Remove gets an entry and removes it from the pq. func (a *AdaptablePQ[K, V]) Remove(entry *Entry[K, V]) { i := entry.Index if i == len(a.heap)-1 { a.heap = a.heap[:len(a.heap)-1] } else { a.swap(i, len(a.heap)-1) a.heap = a.heap[:len(a.heap)-1] a.bubble(i) } } // Size returns the number of the elements in the pq. func (a *AdaptablePQ[K, V]) Size() int { return len(a.heap) } // IsEmpty returns true if the pq doesn't have any elements. func (a *AdaptablePQ[K, V]) IsEmpty() bool { return a.Size() == 0 } // ReplaceKey replaces the given entry's key and relocates it in the heap to the right position. func (a *AdaptablePQ[K, V]) ReplaceKey(entry *Entry[K, V], key K) { entry.Key = key a.bubble(entry.Index) } // ReplaceValue just replaces the value of the given entry. func (a *AdaptablePQ[K, V]) ReplaceValue(entry *Entry[K, V], value V) { entry.Value = value } // String returns the string representation of the pq. func (a *AdaptablePQ[K, V]) String() string { str := "[ " for _, e := range a.heap { str += fmt.Sprint(e) + " " } str += "]" return str } // swap just swaps the entires in the heap. func (a *AdaptablePQ[K, V]) swap(i, j int) { a.heap[i], a.heap[j] = a.heap[j], a.heap[i] a.heap[i].Index = i a.heap[j].Index = j } // bubble calls the right bubbling method based on the comparator. func (a *AdaptablePQ[K, V]) bubble(i int) { if i > 0 && a.comparator.Compare(a.heap[i].Key, a.heap[a.parent(i)].Key) < 0 { a.upHeapBubble(i) } else { a.downHeapBubble(i) } } // parent returns the index of the parent of the given index of the entry in the heap. func (a *AdaptablePQ[K, V]) parent(i int) int { return (i - 1) / 2 } // left returns the index of the left child of the given entry's index in the heap. func (a *AdaptablePQ[K, V]) left(i int) int { return 2*i + 1 } // right returns the index of the right child of the given entry's index in the heap. func (a *AdaptablePQ[K, V]) right(i int) int { return 2*i + 2 } // hasLeft returns true if the given entry has a left child in the heap. func (a *AdaptablePQ[K, V]) hasLeft(i int) bool { return a.left(i) < len(a.heap) } // hasRight returns true if the given entry has a right child in the heap. func (a *AdaptablePQ[K, V]) hasRight(i int) bool { return a.right(i) < len(a.heap) } // downHeapBubble swaps the node downwards until reaching the right position in the heap. func (a *AdaptablePQ[K, V]) downHeapBubble(i int) { // Until reaching bottom. for a.hasLeft(i) { leftIdx := a.left(i) smallChildIdx := leftIdx if a.hasRight(i) { rightIdx := a.right(i) if a.comparator.Compare(a.heap[leftIdx].Key, a.heap[rightIdx].Key) > 0 { smallChildIdx = rightIdx } } if a.comparator.Compare(a.heap[smallChildIdx].Key, a.heap[i].Key) >= 0 { break } a.swap(i, smallChildIdx) i = smallChildIdx } } // upHeapBubble swaps the node upwards until reaching the right position in the heap. func (a *AdaptablePQ[K, V]) upHeapBubble(i int) { // Until reaching root. for i > 0 { p := a.parent(i) if a.comparator.Compare(a.heap[i].Key, a.heap[p].Key) >= 0 { break } a.swap(i, p) i = p } }
priorityqueue/adaptablepq/adaptable_heap_pq.go
0.835852
0.412294
adaptable_heap_pq.go
starcoder
package main import ( "fmt" "math" "github.com/leekchan/accounting" "golang.org/x/exp/rand" "gonum.org/v1/gonum/floats" "gonum.org/v1/gonum/mat" "gonum.org/v1/gonum/stat" ) // CashProcess contains the assumptions of the cash flow simulation type CashProcess struct { AnnualCashFlow float64 Drift float64 Volatility float64 TerminalMultiplier float64 RiskPremium float64 } // CostProcess is the assumptions of the cost and investment process type CostProcess struct { Investment float64 TotalExpectedCost float64 Volatility float64 FailureProb float64 } // Simulation holds the assumptions for the Monte Carlo simulation. type Simulation struct { TimeStep float64 PatentLength int Runs int Basis func(x, y float64) []float64 } // ProjectProcess contains the correlated structures of the cost // and cash flow processes comprising the project. // Correlation correlates the CashProcess and the CostProcess. type ProjectProcess struct { CashProcess CostProcess Correlation float64 RiskFreeRate float64 Simulation } func main() { // Set random seed rand.Seed(355) // Initialize project components cashProcess := CashProcess{ AnnualCashFlow: 20e6, Drift: 0.02, Volatility: 0.35, TerminalMultiplier: 5, RiskPremium: 0.036, } investmentProcess := CostProcess{ Investment: 10e6, TotalExpectedCost: 100e6, Volatility: 0.5, FailureProb: 0.06931, } sim := Simulation{ TimeStep: 0.25, PatentLength: 20, Runs: 200_000, Basis: basis, } project := ProjectProcess{ CashProcess: cashProcess, CostProcess: investmentProcess, Correlation: -0.1, RiskFreeRate: 0.05, Simulation: sim, } // Estimate Project Value projectValue := project.Lsm() // Print currency ac := accounting.Accounting{Symbol: "$", Precision: 2} fmt.Println("The Project Value:", ac.FormatMoney(projectValue)) } // Simulate returns the correlated cash and cost processes. func (pp *ProjectProcess) Simulate() (*mat.Dense, *mat.Dense) { // Set number of periods numberOfPeriods := int(float64(pp.PatentLength) / pp.TimeStep) // Risk adjusted cash flow drift rate adjCashDrift := pp.CashProcess.Drift - pp.RiskPremium // Matrices to hold the simulated cash and cost values netCash := mat.NewDense(pp.Runs, numberOfPeriods, nil) cost := mat.NewDense(pp.Runs, numberOfPeriods, nil) // stochastic simulation of the investment costs and cash flows for run := 0; run < pp.Runs; run++ { for period := 0; period < numberOfPeriods; period++ { // correlate random variables costEps := rand.NormFloat64() cashEps := pp.Correlation*costEps + sqrt(1-sqr(pp.Correlation))*rand.NormFloat64() // cash flow simulation prevCash := pp.AnnualCashFlow if period != 0 { prevCash = netCash.At(run, period-1) } nextCash := prevCash * exp((adjCashDrift-0.5*sqr(pp.CashProcess.Volatility))*pp.TimeStep+ pp.CashProcess.Volatility*sqrt(pp.TimeStep)*cashEps) netCash.Set(run, period, nextCash) // cost simulation prevCost := pp.TotalExpectedCost if period != 0 { prevCost = cost.At(run, period-1) } // Only update costs if not zero nextCost := 0.0 if prevCost != 0 { nextCost = prevCost - pp.Investment*pp.TimeStep + pp.CostProcess.Volatility*sqrt(pp.Investment*prevCost*pp.TimeStep)*costEps if nextCost < 0 { nextCost = 0 } } cost.Set(run, period, nextCost) } } return netCash, cost } // Lsm evaluates the project using the Least Squares Monte Carlo algorithm func (pp *ProjectProcess) Lsm() float64 { // Calc periods numberOfPeriods := int(float64(pp.PatentLength) / pp.TimeStep) lastPeriod := numberOfPeriods - 1 // Simulate the cost and cash flow values cashMatrix, costMatrix := pp.Simulate() // valueArray holds the value function iteration matrix valueArray := mat.NewDense(pp.Runs, numberOfPeriods, nil) // Set the Terminal Value for run := 0; run < pp.Runs; run++ { // If cost is positive then still investing // and no value in terminal period. if costMatrix.At(run, lastPeriod) == 0 { termVal := pp.TerminalMultiplier * cashMatrix.At(run, lastPeriod) valueArray.Set(run, lastPeriod, termVal) } } // Discount rates depending on the phase of the project cashDiscRate := exp(-1 * pp.RiskFreeRate * pp.TimeStep) investDiscRate := exp(-1 * (pp.RiskFreeRate + pp.FailureProb) * pp.TimeStep) // Determine size of Basis matrix for value function approximation numBasisCols := len(pp.Basis(0, 0)) // Value iteration for period := lastPeriod - 1; period >= 0; period-- { // Initialize next periods Value nextVal := mat.NewVecDense(pp.Runs, nil) // Discount next period's value to serve as the dependent variable of the regression nextVal.ScaleVec(investDiscRate, valueArray.ColView(period+1)) // Initialize basis matrix for regression basisMatrix := mat.NewDense(pp.Runs, numBasisCols, nil) // Set basis matrix rows for the regression for run := 0; run < pp.Runs; run++ { basisMatrix.SetRow(run, pp.Basis(costMatrix.At(run, period), cashMatrix.At(run, period))) } // Solve for regression coefficients coefficients := mat.NewVecDense(numBasisCols, nil) coefficients.SolveVec(basisMatrix, nextVal) // Estimate continuation value of investment estVal := mat.NewVecDense(pp.Runs, nil) estVal.MulVec(basisMatrix, coefficients) // Determine Value and set in valueArray for run := 0; run < pp.Runs; run++ { // Investing Value if costMatrix.At(run, period) != 0 { investVal := estVal.AtVec(run) - pp.Investment*pp.TimeStep // Only invest if project value is positive after investment if investVal > 0 { valueArray.Set(run, period, nextVal.AtVec(run)-pp.Investment*pp.TimeStep) } } else { // Post investment sales cash flow valueArray.Set(run, period, cashMatrix.At(run, period)*pp.TimeStep+ cashDiscRate*valueArray.At(run, period+1)) } } } // Convert first month to slice of floats initialValue := make([]float64, pp.Runs) mat.Col(initialValue, 0, valueArray) // Discount one last time to initial period floats.ScaleTo(initialValue, investDiscRate, initialValue) // Average discounted initial period across all runs retVal := stat.Mean(initialValue, nil) return retVal } // helper functions // square the input func sqr(x float64) float64 { return x * x } // polynomial basis to approximate the value function func basis(x, y float64) []float64 { return []float64{1, x, y, x * y, sqr(x), sqr(y), sqr(x) * y, x * sqr(y), sqr(x * y)} } // local function aliases var exp = math.Exp var sqrt = math.Sqrt
main.go
0.708616
0.619615
main.go
starcoder
package prng import ( "math/bits" "math" "unsafe" ) // A Xosh with a xoshiro256 prng implements a 64-bit generator with 256-bit state. type Xosh struct { s0, s1, s2, s3 uint64 } // NewXosh returns a new xoshiro256 generator seeded by the seed. func NewXosh(seed uint64) Xosh { x := Xosh{} x.Seed(seed) return x } // Seed seeds a xoshiro256 by the seed using splitMix64. Any seed is ok. func (x *Xosh) Seed(seed uint64) { x.s0 = Splitmix(&seed) x.s1 = Splitmix(&seed) x.s2 = Splitmix(&seed) x.s3 = Splitmix(&seed) } // NextXosh returns the next xoshiro256 generator from Outlet. Each generator has // 2^128 long random streams, which is not overlapping with other generators streams. // NextXosh is safe for concurrent use by multiple goroutines. func (s *Outlet) NextXosh() Xosh { s.mu.Lock() defer s.mu.Unlock() s.xosh.Jump() return s.xosh } // NewXoshSlice returns a slice of n xoshiro256 generators with non-overlapping 2^128 // long random streams. First generator is seeded by the seed. func NewXoshSlice(n int, seed uint64) []Xosh { s := make([]Xosh, n) s[0].Seed(seed) for i := 1; i < n; i++ { s[i] = s[i-1] s[i].Jump() } return s } // Uint64 returns a pseudo-random uint64. Uint64 is xoshiro256**. func (x *Xosh) Uint64() (next uint64) { next = bits.RotateLeft64(x.s1 * 5, 7) * 9 *x = x.NextState() return } // Xoshiro256plus is xoshiro256+ func (x *Xosh) Xoshiro256plus() (next uint64) { next = x.s0 + x.s3 *x = x.NextState() return } //Xoshiro256plusplus is xoshiro256++ func (x *Xosh) Xoshiro256plusplus() (next uint64) { next = bits.RotateLeft64(x.s0 + x.s3, 23) + x.s0 *x = x.NextState() return } // NextState returns the next Xosh state of the xoshiro256 linear engine. func (x Xosh) NextState() Xosh { //gc compiler detects similar expressions if given in parentheses return Xosh{ s0: x.s0 ^ (x.s1 ^ x.s3), s1: (x.s0 ^ x.s2) ^ x.s1, s2: (x.s0 ^ x.s2) ^ (x.s1 << 17), s3: bits.RotateLeft64(x.s1 ^ x.s3, 45), } } // Float64 returns a uniformly distributed pseudo-random float64 from [0, 1). // The distribution includes 2^53 evenly spaced floats with spacing 2^-53. func (x *Xosh) Float64() float64 { return float64(x.Xoshiro256plus() >> 11) / (1<<53) } // Float64_64 returns a uniformly distributed pseudo-random float64 from [0, 1). // The distribution includes all floats in [2^-12, 1) and 2^52 evenly spaced // floats in [0, 2^-12) with spacing 2^-64. func (x *Xosh) Float64_64() float64 { u := x.Uint64() if u == 0 { return 0 } // without this min returned is 2^-65 z := uint64(bits.LeadingZeros64(u)) + 1 return math.Float64frombits((1023 - z) << 52 | u << z >> 12) } // Float64_117 returns a uniformly distributed pseudo-random float64 from [0, 1). // The distribution includes all floats in [2^-65, 1) and 2^52 evenly spaced // floats in [0, 2^-65) with spacing 2^-117. func (x *Xosh) Float64_117() float64 { u := x.Uint64() z := uint64(bits.LeadingZeros64(u)) + 1 if z <= 12 { return math.Float64frombits((1023 - z) << 52 | u << z >> 12) } z-- u = u << z | x.Uint64() >> (64 - z) return float64(u >> 11) * twoToMinus(53 + z) } // Float64full returns a uniformly distributed pseudo-random float64 from [0, 1). // The distribution includes all floats in [0, 1). // Float64full is equivalent to Float64Bisect in truncate mode. func (x *Xosh) Float64full() float64 { u := x.Uint64() z := uint64(bits.LeadingZeros64(u)) + 1 if z <= 12 { //99.975% of cases return math.Float64frombits((1023 - z) << 52 | u << z >> 12) } z-- exp := uint64(0) for u == 0 { u = x.Uint64() z = uint64(bits.LeadingZeros64(u)) exp += 64 if exp + z >= 1074 { return 0 } } u = u << z | x.Uint64() >> (64 - z) exp += z if exp < 1022 { return math.Float64frombits((1022 - exp) << 52 | u << 1 >> 12) } return math.Float64frombits(u >> (exp - 1022) >> 12) // 2^52 subnormal floats } // WriteState writes the current state of the generator x to b. // WriteState without allocations is faster than State(). func (x *Xosh) WriteState(b []byte) { if len(b) < XoshStateSize { panic("ReadState: byte slice too short") } // This expects a little endian cpu, eg. all amd64. *(*uint64)(unsafe.Pointer(&b[ 0])) = bits.ReverseBytes64(x.s0) *(*uint64)(unsafe.Pointer(&b[ 8])) = bits.ReverseBytes64(x.s1) *(*uint64)(unsafe.Pointer(&b[16])) = bits.ReverseBytes64(x.s2) *(*uint64)(unsafe.Pointer(&b[24])) = bits.ReverseBytes64(x.s3) } // State returns the current binary state of the generator x as []byte. func (x *Xosh) State() []byte { var b[XoshStateSize]byte *(*uint64)(unsafe.Pointer(&b[ 0])) = bits.ReverseBytes64(x.s0) *(*uint64)(unsafe.Pointer(&b[ 8])) = bits.ReverseBytes64(x.s1) *(*uint64)(unsafe.Pointer(&b[16])) = bits.ReverseBytes64(x.s2) *(*uint64)(unsafe.Pointer(&b[24])) = bits.ReverseBytes64(x.s3) return b[:] } // ReadState reads the state of the generator x from b []byte. func (x *Xosh) ReadState(b []byte) { if len(b) < XoshStateSize { panic("ReadState: byte slice too short") } x.s0 = bits.ReverseBytes64(*(*uint64)(unsafe.Pointer(&b[ 0]))) x.s1 = bits.ReverseBytes64(*(*uint64)(unsafe.Pointer(&b[ 8]))) x.s2 = bits.ReverseBytes64(*(*uint64)(unsafe.Pointer(&b[16]))) x.s3 = bits.ReverseBytes64(*(*uint64)(unsafe.Pointer(&b[24]))) } // Alternative ReadState // x.s0 = binary.BigEndian.Uint64(b[0:]) // x.s1 = binary.BigEndian.Uint64(b[8:]) // x.s2 = binary.BigEndian.Uint64(b[16:]) // x.s3 = binary.BigEndian.Uint64(b[24:]) // Alternative State // binary.BigEndian.PutUint64(b[0:], x.s0) // binary.BigEndian.PutUint64(b[8:], x.s1) // binary.BigEndian.PutUint64(b[16:], x.s2) // binary.BigEndian.PutUint64(b[24:], x.s3)
xosh.go
0.719384
0.427695
xosh.go
starcoder
package level /* level Copyright (c) 2019 beito This software is released under the MIT License. http://opensource.org/licenses/mit-license.php */ import ( "github.com/beito123/nbt" ) // Level is a simple level loader type Level struct { } // Format is a simple interface for level formats // This needs to be supported concurrency type Format interface { // Name returns name of level Name() string // SetName sets the name of level SetName(name string) // GameType returns the default game mode of level GameType() GameType // SetGameType sets the game mode of level SetGameType(typ GameType) // Spawn returns the default spawn of level Spawn() (x, y, z int) // SetSpawn sets the default spawn of level SetSpawn(x, y, z int) // Property returns a property of level.dat Property(name string) (tag nbt.Tag, ok bool) // SetProperty sets a property SetProperty(tag nbt.Tag) // AllProperties returns all properties AllProperties() *nbt.Compound // SetAllProperties sets all properties SetAllProperties(com *nbt.Compound) // Close closes the level format // You must close after you use the format // It's should not run other functions after format is closed Close() error // Dimension return dimension of the level Dimension() Dimension // SetDimension set dimension of the level SetDimension(Dimension) // LoadChunk loads a chunk. // If create is enabled, generates a chunk if it doesn't exist LoadChunk(x, y int, create bool) error // UnloadChunk unloads a chunk. UnloadChunk(x, y int) error // GenerateChunk generates a chunk GenerateChunk(x, y int) error // HasGeneratedChunk returns whether the chunk is generaged HasGeneratedChunk(x, y int) (bool, error) // IsLoadedChunk returns weather a chunk is loaded. IsLoadedChunk(x, y int) bool // SaveChunk saves a chunk. SaveChunk(x, y int) error // SaveChunks saves all chunks. SaveChunks() error // Chunk returns a chunk. // If a chunk is not loaded, it will be loaded Chunk(x, y int) (Chunk, error) // LoadedChunks returns loaded chunks. LoadedChunks() []Chunk } // Chunk is a simple interface for chunk type Chunk interface { // X returns x coordinate X() int // Y returns y coordinate Y() int // SetX set x coordinate SetX(x int) // SetY set y coordinate SetY(y int) // Height returns the height of the highest block at chunk coordinate Height(x, y int) (height uint16) // Biome returns biome Biome(x, y int) byte // SetBiome set biome SetBiome(x, y int, biome byte) // Entities returns entities of nbt data Entities() []*nbt.Compound // SetEntities set entities of nbt data SetEntities(entities []*nbt.Compound) // BlockEntities returns block entities of nbt data BlockEntities() []*nbt.Compound // SetBlockEntities set block entities of nbt data SetBlockEntities(entities []*nbt.Compound) // GetBlock gets a BlockState at chunk coordinate GetBlock(x, y, z int) (BlockState, error) // SetBlock set a BlockState at chunk coordinate SetBlock(x, y, z int, state BlockState) error } // BlockState is a block information type BlockState interface { // TODO: fix bad codes :P // Name returns block name Name() string // ToBlockNameProperties returns block name and properties // If it's not supported, returns false for ok ToBlockNameProperties() (name string, properties map[string]string, ok bool) // ToBlockNameMeta returns block name and meta // If it's not supported, returns false for ok ToBlockNameMeta() (name string, meta int, ok bool) // ToBlockIDMeta returns block id and meta // If it's not supported, returns false for ok ToBlockIDMeta() (id int, meta int, ok bool) }
level.go
0.681197
0.400955
level.go
starcoder
package block const hashAir = 0 const hashAncientDebris = 1 const hashAndesite = 2 const hashBarrier = 3 const hashBasalt = 4 const hashBeacon = 5 const hashBedrock = 6 const hashBeetrootSeeds = 7 const hashBlueIce = 8 const hashBoneBlock = 9 const hashBricks = 10 const hashCake = 11 const hashCarpet = 12 const hashCarrot = 13 const hashChest = 14 const hashChiseledQuartz = 15 const hashClay = 16 const hashCoalBlock = 17 const hashCoalOre = 18 const hashCobblestone = 19 const hashCocoaBean = 20 const hashConcrete = 21 const hashConcretePowder = 22 const hashCoral = 23 const hashCoralBlock = 24 const hashDiamondBlock = 25 const hashDiamondOre = 26 const hashDiorite = 27 const hashDirt = 28 const hashDirtPath = 29 const hashDragonEgg = 30 const hashEmeraldBlock = 31 const hashEmeraldOre = 32 const hashEndBrickStairs = 33 const hashEndBricks = 34 const hashEndStone = 35 const hashFarmland = 36 const hashFire = 37 const hashGildedBlackstone = 38 const hashGlass = 39 const hashGlassPane = 40 const hashGlazedTerracotta = 41 const hashGlowstone = 42 const hashGoldBlock = 43 const hashGoldOre = 44 const hashGranite = 45 const hashGrass = 46 const hashGrassPlant = 47 const hashGravel = 48 const hashInvisibleBedrock = 49 const hashIronBars = 50 const hashIronBlock = 51 const hashIronOre = 52 const hashKelp = 53 const hashLantern = 54 const hashLapisBlock = 55 const hashLapisOre = 56 const hashLava = 57 const hashLeaves = 58 const hashLight = 59 const hashLitPumpkin = 60 const hashLog = 61 const hashMelon = 62 const hashMelonSeeds = 63 const hashNetherBrickFence = 64 const hashNetherGoldOre = 65 const hashNetherQuartzOre = 66 const hashNetherWart = 67 const hashNetheriteBlock = 68 const hashNetherrack = 69 const hashNoteBlock = 70 const hashObsidian = 71 const hashPlanks = 72 const hashPotato = 73 const hashPumpkin = 74 const hashPumpkinSeeds = 75 const hashQuartz = 76 const hashQuartzBricks = 77 const hashQuartzPillar = 78 const hashSand = 79 const hashSandstone = 80 const hashSeaLantern = 81 const hashShroomlight = 82 const hashSoulSand = 83 const hashSoulSoil = 84 const hashSponge = 85 const hashStainedGlass = 86 const hashStainedGlassPane = 87 const hashStainedTerracotta = 88 const hashStandingSign = 89 const hashStone = 90 const hashTerracotta = 91 const hashTorch = 92 const hashWallSign = 93 const hashWater = 94 const hashWheatSeeds = 95 const hashWoodDoor = 96 const hashWoodFence = 97 const hashWoodFenceGate = 98 const hashWoodSlab = 99 const hashWoodStairs = 100 const hashWoodTrapdoor = 101 const hashWool = 102 const hashCalcite = 12 const hashCarpet = 13 const hashCarrot = 14 const hashChest = 15 const hashChiseledQuartz = 16 const hashClay = 17 const hashCoalBlock = 18 const hashCoalOre = 19 const hashCobblestone = 20 const hashCocoaBean = 21 const hashConcrete = 22 const hashConcretePowder = 23 const hashCopperOre = 24 const hashCoral = 25 const hashCoralBlock = 26 const hashDiamondBlock = 27 const hashDiamondOre = 28 const hashDiorite = 29 const hashDirt = 30 const hashDirtPath = 31 const hashDoubleFlower = 32 const hashDoubleTallGrass = 33 const hashDragonEgg = 34 const hashDripstone = 35 const hashEmeraldBlock = 36 const hashEmeraldOre = 37 const hashEndBrickStairs = 38 const hashEndBricks = 39 const hashEndStone = 40 const hashFarmland = 41 const hashFire = 42 const hashGildedBlackstone = 43 const hashGlass = 44 const hashGlassPane = 45 const hashGlazedTerracotta = 46 const hashGlowstone = 47 const hashGoldBlock = 48 const hashGoldOre = 49 const hashGranite = 50 const hashGrass = 51 const hashGravel = 52 const hashInvisibleBedrock = 53 const hashIronBars = 54 const hashIronBlock = 55 const hashIronOre = 56 const hashKelp = 57 const hashLantern = 58 const hashLapisBlock = 59 const hashLapisOre = 60 const hashLava = 61 const hashLeaves = 62 const hashLight = 63 const hashLitPumpkin = 64 const hashLog = 65 const hashMelon = 66 const hashMelonSeeds = 67 const hashMossCarpet = 68 const hashNetherBrickFence = 69 const hashNetherGoldOre = 70 const hashNetherQuartzOre = 71 const hashNetherSprouts = 72 const hashNetherWart = 73 const hashNetheriteBlock = 74 const hashNetherrack = 75 const hashNoteBlock = 76 const hashObsidian = 77 const hashPlanks = 78 const hashPotato = 79 const hashPumpkin = 80 const hashPumpkinSeeds = 81 const hashQuartz = 82 const hashQuartzBricks = 83 const hashQuartzPillar = 84 const hashRawCopperBlock = 85 const hashRawGoldBlock = 86 const hashRawIronBlock = 87 const hashSand = 88 const hashSandstone = 89 const hashSeaLantern = 90 const hashShroomlight = 91 const hashSoulSand = 92 const hashSoulSoil = 93 const hashSponge = 94 const hashSporeBlossom = 95 const hashStainedGlass = 96 const hashStainedGlassPane = 97 const hashStainedTerracotta = 98 const hashStone = 99 const hashTallGrass = 100 const hashTerracotta = 101 const hashTorch = 102 const hashTuff = 103 const hashWater = 104 const hashWheatSeeds = 105 const hashWoodDoor = 106 const hashWoodFence = 107 const hashWoodFenceGate = 108 const hashWoodSlab = 109 const hashWoodStairs = 110 const hashWoodTrapdoor = 111 const hashWool = 112 func (Air) Hash() uint64 { return hashAir } func (AncientDebris) Hash() uint64 { return hashAncientDebris } func (a Andesite) Hash() uint64 { return hashAndesite | uint64(boolByte(a.Polished))<<7 } func (Barrier) Hash() uint64 { return hashBarrier } func (b Basalt) Hash() uint64 { return hashBasalt | uint64(boolByte(b.Polished))<<7 | uint64(b.Axis)<<8 } func (Beacon) Hash() uint64 { return hashBeacon } func (b Bedrock) Hash() uint64 { return hashBedrock | uint64(boolByte(b.InfiniteBurning))<<7 } func (b BeetrootSeeds) Hash() uint64 { return hashBeetrootSeeds | uint64(b.Growth)<<7 } func (BlueIce) Hash() uint64 { return hashBlueIce } func (b BoneBlock) Hash() uint64 { return hashBoneBlock | uint64(b.Axis)<<7 } func (Bricks) Hash() uint64 { return hashBricks } func (c Cake) Hash() uint64 { return hashCake | uint64(c.Bites)<<7 } func (c Calcite) Hash() uint64 { return hashCalcite } func (c Carpet) Hash() uint64 { return hashCarpet | uint64(c.Colour.Uint8())<<7 } func (c Carrot) Hash() uint64 { return hashCarrot | uint64(c.Growth)<<7 } func (c Chest) Hash() uint64 { return hashChest | uint64(c.Facing)<<7 } func (ChiseledQuartz) Hash() uint64 { return hashChiseledQuartz } func (c Clay) Hash() uint64 { return hashClay } func (CoalBlock) Hash() uint64 { return hashCoalBlock } func (c CoalOre) Hash() uint64 { return hashCoalOre | uint64(c.Type.Uint8())<<7 } func (c Cobblestone) Hash() uint64 { return hashCobblestone | uint64(boolByte(c.Mossy))<<7 } func (c CocoaBean) Hash() uint64 { return hashCocoaBean | uint64(c.Facing)<<7 | uint64(c.Age)<<9 } func (c Concrete) Hash() uint64 { return hashConcrete | uint64(c.Colour.Uint8())<<7 } func (c ConcretePowder) Hash() uint64 { return hashConcretePowder | uint64(c.Colour.Uint8())<<7 } func (c CopperOre) Hash() uint64 { return hashCopperOre | uint64(c.Type.Uint8())<<7 } func (c Coral) Hash() uint64 { return hashCoral | uint64(c.Type.Uint8())<<7 | uint64(boolByte(c.Dead))<<10 } func (c CoralBlock) Hash() uint64 { return hashCoralBlock | uint64(c.Type.Uint8())<<7 | uint64(boolByte(c.Dead))<<10 } func (DiamondBlock) Hash() uint64 { return hashDiamondBlock } func (d DiamondOre) Hash() uint64 { return hashDiamondOre | uint64(d.Type.Uint8())<<7 } func (d Diorite) Hash() uint64 { return hashDiorite | uint64(boolByte(d.Polished))<<7 } func (d Dirt) Hash() uint64 { return hashDirt | uint64(boolByte(d.Coarse))<<7 } func (DirtPath) Hash() uint64 { return hashDirtPath } func (d DoubleFlower) Hash() uint64 { return hashDoubleFlower | uint64(boolByte(d.UpperPart))<<7 | uint64(d.Type.Uint8())<<8 } func (d DoubleTallGrass) Hash() uint64 { return hashDoubleTallGrass | uint64(boolByte(d.UpperPart))<<7 | uint64(d.Type.Uint8())<<8 } func (DragonEgg) Hash() uint64 { return hashDragonEgg } func (d Dripstone) Hash() uint64 { return hashDripstone } func (EmeraldBlock) Hash() uint64 { return hashEmeraldBlock } func (e EmeraldOre) Hash() uint64 { return hashEmeraldOre | uint64(e.Type.Uint8())<<7 } func (s EndBrickStairs) Hash() uint64 { return hashEndBrickStairs | uint64(boolByte(s.UpsideDown))<<7 | uint64(s.Facing)<<8 } func (EndBricks) Hash() uint64 { return hashEndBricks } func (EndStone) Hash() uint64 { return hashEndStone } func (f Farmland) Hash() uint64 { return hashFarmland | uint64(f.Hydration)<<7 } func (f Fire) Hash() uint64 { return hashFire | uint64(f.Type.Uint8())<<7 | uint64(f.Age)<<8 } func (GildedBlackstone) Hash() uint64 { return hashGildedBlackstone } func (Glass) Hash() uint64 { return hashGlass } func (GlassPane) Hash() uint64 { return hashGlassPane } func (t GlazedTerracotta) Hash() uint64 { return hashGlazedTerracotta | uint64(t.Colour.Uint8())<<7 | uint64(t.Facing)<<11 } func (Glowstone) Hash() uint64 { return hashGlowstone } func (GoldBlock) Hash() uint64 { return hashGoldBlock } func (g GoldOre) Hash() uint64 { return hashGoldOre | uint64(g.Type.Uint8())<<7 } func (g Granite) Hash() uint64 { return hashGranite | uint64(boolByte(g.Polished))<<7 } func (Grass) Hash() uint64 { return hashGrass } func (Gravel) Hash() uint64 { return hashGravel } func (InvisibleBedrock) Hash() uint64 { return hashInvisibleBedrock } func (IronBars) Hash() uint64 { return hashIronBars } func (IronBlock) Hash() uint64 { return hashIronBlock } func (i IronOre) Hash() uint64 { return hashIronOre | uint64(i.Type.Uint8())<<7 } func (k Kelp) Hash() uint64 { return hashKelp | uint64(k.Age)<<7 } func (l Lantern) Hash() uint64 { return hashLantern | uint64(boolByte(l.Hanging))<<7 | uint64(l.Type.Uint8())<<8 } func (LapisBlock) Hash() uint64 { return hashLapisBlock } func (l LapisOre) Hash() uint64 { return hashLapisOre | uint64(l.Type.Uint8())<<7 } func (l Lava) Hash() uint64 { return hashLava | uint64(boolByte(l.Still))<<7 | uint64(l.Depth)<<8 | uint64(boolByte(l.Falling))<<16 } func (l Leaves) Hash() uint64 { return hashLeaves | uint64(l.Wood.Uint8())<<7 | uint64(boolByte(l.Persistent))<<10 | uint64(boolByte(l.ShouldUpdate))<<11 } func (l Light) Hash() uint64 { return hashLight | uint64(l.Level)<<7 } func (l LitPumpkin) Hash() uint64 { return hashLitPumpkin | uint64(l.Facing)<<7 } func (l Log) Hash() uint64 { return hashLog | uint64(l.Wood.Uint8())<<7 | uint64(boolByte(l.Stripped))<<10 | uint64(l.Axis)<<11 } func (Melon) Hash() uint64 { return hashMelon } func (m MelonSeeds) Hash() uint64 { return hashMelonSeeds | uint64(m.Growth)<<7 | uint64(m.Direction)<<15 } func (m MossCarpet) Hash() uint64 { return hashMossCarpet } func (NetherBrickFence) Hash() uint64 { return hashNetherBrickFence } func (NetherGoldOre) Hash() uint64 { return hashNetherGoldOre } func (NetherQuartzOre) Hash() uint64 { return hashNetherQuartzOre } func (n NetherSprouts) Hash() uint64 { return hashNetherSprouts } func (n NetherWart) Hash() uint64 { return hashNetherWart | uint64(n.Age)<<7 } func (NetheriteBlock) Hash() uint64 { return hashNetheriteBlock } func (Netherrack) Hash() uint64 { return hashNetherrack } func (n NoteBlock) Hash() uint64 { return hashNoteBlock } func (o Obsidian) Hash() uint64 { return hashObsidian | uint64(boolByte(o.Crying))<<7 } func (p Planks) Hash() uint64 { return hashPlanks | uint64(p.Wood.Uint8())<<7 } func (p Potato) Hash() uint64 { return hashPotato | uint64(p.Growth)<<7 } func (p Pumpkin) Hash() uint64 { return hashPumpkin | uint64(boolByte(p.Carved))<<7 | uint64(p.Facing)<<8 } func (p PumpkinSeeds) Hash() uint64 { return hashPumpkinSeeds | uint64(p.Growth)<<7 | uint64(p.Direction)<<15 } func (q Quartz) Hash() uint64 { return hashQuartz | uint64(boolByte(q.Smooth))<<7 } func (QuartzBricks) Hash() uint64 { return hashQuartzBricks } func (q QuartzPillar) Hash() uint64 { return hashQuartzPillar | uint64(q.Axis)<<7 } func (RawCopperBlock) Hash() uint64 { return hashRawCopperBlock } func (RawGoldBlock) Hash() uint64 { return hashRawGoldBlock } func (RawIronBlock) Hash() uint64 { return hashRawIronBlock } func (s Sand) Hash() uint64 { return hashSand | uint64(boolByte(s.Red))<<7 } func (s Sandstone) Hash() uint64 { return hashSandstone | uint64(s.Type.Uint8())<<7 | uint64(boolByte(s.Red))<<9 } func (SeaLantern) Hash() uint64 { return hashSeaLantern } func (Shroomlight) Hash() uint64 { return hashShroomlight } func (SoulSand) Hash() uint64 { return hashSoulSand } func (SoulSoil) Hash() uint64 { return hashSoulSoil } func (s Sponge) Hash() uint64 { return hashSponge | uint64(boolByte(s.Wet))<<7 } func (s SporeBlossom) Hash() uint64 { return hashSporeBlossom } func (g StainedGlass) Hash() uint64 { return hashStainedGlass | uint64(g.Colour.Uint8())<<7 } func (p StainedGlassPane) Hash() uint64 { return hashStainedGlassPane | uint64(p.Colour.Uint8())<<7 } func (t StainedTerracotta) Hash() uint64 { return hashStainedTerracotta | uint64(t.Colour.Uint8())<<7 } func (s StandingSign) Hash() uint64 { return hashStandingSign | uint64(s.Wood.Uint8())<<7 | uint64(s.Orientation)<<11 } func (s Stone) Hash() uint64 { return hashStone | uint64(boolByte(s.Smooth))<<7 } func (g TallGrass) Hash() uint64 { return hashTallGrass | uint64(g.Type.Uint8())<<7 } func (Terracotta) Hash() uint64 { return hashTerracotta } func (t Torch) Hash() uint64 { return hashTorch | uint64(t.Facing)<<7 | uint64(t.Type.Uint8())<<10 } func (s WallSign) Hash() uint64 { return hashWallSign | uint64(s.Wood.Uint8())<<7 | uint64(s.Facing)<<11 } func (t Tuff) Hash() uint64 { return hashTuff } func (w Water) Hash() uint64 { return hashWater | uint64(boolByte(w.Still))<<7 | uint64(w.Depth)<<8 | uint64(boolByte(w.Falling))<<16 } func (s WheatSeeds) Hash() uint64 { return hashWheatSeeds | uint64(s.Growth)<<7 } func (d WoodDoor) Hash() uint64 { return hashWoodDoor | uint64(d.Wood.Uint8())<<7 | uint64(d.Facing)<<10 | uint64(boolByte(d.Open))<<12 | uint64(boolByte(d.Top))<<13 | uint64(boolByte(d.Right))<<14 } func (w WoodFence) Hash() uint64 { return hashWoodFence | uint64(w.Wood.Uint8())<<7 } func (f WoodFenceGate) Hash() uint64 { return hashWoodFenceGate | uint64(f.Wood.Uint8())<<7 | uint64(f.Facing)<<10 | uint64(boolByte(f.Open))<<12 | uint64(boolByte(f.Lowered))<<13 } func (s WoodSlab) Hash() uint64 { return hashWoodSlab | uint64(s.Wood.Uint8())<<7 | uint64(boolByte(s.Top))<<10 | uint64(boolByte(s.Double))<<11 } func (s WoodStairs) Hash() uint64 { return hashWoodStairs | uint64(s.Wood.Uint8())<<7 | uint64(boolByte(s.UpsideDown))<<10 | uint64(s.Facing)<<11 } func (t WoodTrapdoor) Hash() uint64 { return hashWoodTrapdoor | uint64(t.Wood.Uint8())<<7 | uint64(t.Facing)<<10 | uint64(boolByte(t.Open))<<12 | uint64(boolByte(t.Top))<<13 } func (w Wool) Hash() uint64 { return hashWool | uint64(w.Colour.Uint8())<<7 }
server/block/hash.go
0.570331
0.50061
hash.go
starcoder
package targets import ( "fmt" "strconv" "strings" ) const MinDelegationHashPrefixBitLen = 1 const MaxDelegationHashPrefixBitLen = 32 // hexEncode formats x as a hex string. The hex string is left padded with // zeros to padWidth, if necessary. func hexEncode(x uint64, padWidth int) string { // Benchmarked to be more than 10x faster than padding with Sprintf. s := strconv.FormatUint(x, 16) if len(s) >= padWidth { return s } return strings.Repeat("0", padWidth-len(s)) + s } const bitsPerHexDigit = 4 // numHexDigits returns the number of hex digits required to encode the given // number of bits. func numHexDigits(numBits int) int { // ceil(numBits / bitsPerHexDigit) return ((numBits - 1) / bitsPerHexDigit) + 1 } // HashBins represents an ordered list of hash bin target roles, which together // partition the space of target path hashes equal-sized buckets based on path // has prefix. type HashBins struct { rolePrefix string bitLen int hexDigitLen int numBins uint64 numPrefixesPerBin uint64 } // NewHashBins creates a HashBins partitioning with 2^bitLen buckets. func NewHashBins(rolePrefix string, bitLen int) (*HashBins, error) { if bitLen < MinDelegationHashPrefixBitLen || bitLen > MaxDelegationHashPrefixBitLen { return nil, fmt.Errorf("bitLen is out of bounds, should be between %v and %v inclusive", MinDelegationHashPrefixBitLen, MaxDelegationHashPrefixBitLen) } hexDigitLen := numHexDigits(bitLen) numBins := uint64(1) << bitLen numPrefixesTotal := uint64(1) << (bitsPerHexDigit * hexDigitLen) numPrefixesPerBin := numPrefixesTotal / numBins return &HashBins{ rolePrefix: rolePrefix, bitLen: bitLen, hexDigitLen: hexDigitLen, numBins: numBins, numPrefixesPerBin: numPrefixesPerBin, }, nil } // NumBins returns the number of hash bin partitions. func (hb *HashBins) NumBins() uint64 { return hb.numBins } // GetBin returns the HashBin at index i, or nil if i is out of bounds. func (hb *HashBins) GetBin(i uint64) *HashBin { if i >= hb.numBins { return nil } return &HashBin{ rolePrefix: hb.rolePrefix, hexDigitLen: hb.hexDigitLen, first: i * hb.numPrefixesPerBin, last: ((i + 1) * hb.numPrefixesPerBin) - 1, } } // HashBin represents a hex prefix range. First should be less than Last. type HashBin struct { rolePrefix string hexDigitLen int first uint64 last uint64 } // RoleName returns the name of the role that signs for the HashBin. func (b *HashBin) RoleName() string { if b.first == b.last { return b.rolePrefix + hexEncode(b.first, b.hexDigitLen) } return b.rolePrefix + hexEncode(b.first, b.hexDigitLen) + "-" + hexEncode(b.last, b.hexDigitLen) } // HashPrefixes returns a slice of all hash prefixes in the bin. func (b *HashBin) HashPrefixes() []string { n := int(b.last - b.first + 1) ret := make([]string, int(n)) x := b.first for i := 0; i < n; i++ { ret[i] = hexEncode(x, b.hexDigitLen) x++ } return ret }
internal/targets/hash_bins.go
0.833325
0.549399
hash_bins.go
starcoder
package common import "math" // Schedule is a means of transforming values based on timesteps. type Schedule interface { // Value for the given step. Value() float32 // Initial value Initial() float32 } // ConstantSchedule just returns a constant value. type ConstantSchedule struct { value float32 } // NewConstantSchedule returns a new constant schedule. func NewConstantSchedule(value float32) *ConstantSchedule { return &ConstantSchedule{ value: value, } } // Value for the given step. func (c *ConstantSchedule) Value() float32 { return c.value } // Initial value. func (c *ConstantSchedule) Initial() float32 { return c.value } // LinearSchedule returns values on a linear means. type LinearSchedule struct { // numTimesteps is the number of timesteps in the schedule. numTimesteps float64 // initialValue for the schedule at the first timestep. initialValue float64 // finalValue for the schedule at the last timestep. finalValue float64 currentStep int } // NewLinearSchedule returns a new LinearSchedule. func NewLinearSchedule(numTimesteps int, initialValue, finalValue float32) *LinearSchedule { return &LinearSchedule{ numTimesteps: float64(numTimesteps), initialValue: float64(initialValue), finalValue: float64(finalValue), } } // Value for the given step. func (l *LinearSchedule) Value() float32 { fraction := math.Min(float64(l.currentStep)/l.numTimesteps, 1.0) l.currentStep++ return float32(l.initialValue + fraction*(l.finalValue-l.initialValue)) } // Initial value for the schedule. func (l *LinearSchedule) Initial() float32 { return float32(l.initialValue) } // DefaultLinearSchedule returns a linear schedule with some sensible defaults. func DefaultLinearSchedule(numTimesteps int) *LinearSchedule { return NewLinearSchedule(numTimesteps, 1.0, 0.1) } // DecaySchedule returns values on an exponential decay means. type DecaySchedule struct { // decay is the amount the value should decay at each step. decayRate float64 // initialValue for the schedule at the first timestep. initialValue float64 // minValue for the schedule at the last timestep. minValue float64 currentValue float64 } // NewDecaySchedule returns a new DecaySchedule. func NewDecaySchedule(decayRate, initialValue, minValue float32) *DecaySchedule { return &DecaySchedule{ decayRate: float64(decayRate), initialValue: float64(initialValue), minValue: float64(minValue), currentValue: float64(initialValue), } } // Value for the given step. Will decay with each call. func (d *DecaySchedule) Value() float32 { d.currentValue *= d.decayRate d.currentValue = math.Max(d.minValue, d.currentValue) return float32(d.currentValue) } // Initial value for the schedule. func (d *DecaySchedule) Initial() float32 { return float32(d.initialValue) } // DecayScheduleOpt is an option for a decay schedule. type DecayScheduleOpt func(*DecaySchedule) // WithDecayRate adds a decay rate to a default decay schedule. func WithDecayRate(rate float32) func(*DecaySchedule) { return func(d *DecaySchedule) { d.decayRate = float64(rate) } } // WithMinValue adds a minimum value rate to a default decay schedule. func WithMinValue(rate float32) func(*DecaySchedule) { return func(d *DecaySchedule) { d.decayRate = float64(rate) } } // DefaultDecaySchedule is the default decay schedule. func DefaultDecaySchedule(opts ...DecayScheduleOpt) *DecaySchedule { s := &DecaySchedule{ decayRate: 0.995, initialValue: 1.0, minValue: 0.01, currentValue: 1.0, } for _, opt := range opts { opt(s) } return s }
pkg/v1/common/schedule.go
0.875973
0.630372
schedule.go
starcoder
package byteio import ( "bytes" ) // Scanner defines an interface for scanning line-based data type Scanner interface { Scan() bool Bytes() []byte Error() error Position() int Length() int Seek(int) Filename() string } // ByteSliceScanner defines a structure for reading lines from a byte slice type ByteSliceScanner struct { beg, end int data, current []byte filename string } // NewByteSliceScanner returns a new ByteSliceReader reading from the give input slice func NewByteSliceScanner(data []byte, filename string) *ByteSliceScanner { data = skipBom(data) return &ByteSliceScanner{data: data, end: len(data), filename: filename} } // Scan scans the current slice for a new line and returns true if one was found func (r *ByteSliceScanner) Scan() bool { if end := bytes.IndexByte(r.data[r.beg:r.end], '\n'); end == -1 { r.current = r.data[r.beg:r.end] result := r.beg != r.end r.beg = r.end return result } else { end += r.beg if r.beg != end && r.data[end-1] == '\r' { r.current = r.data[r.beg : end-1] } else { r.current = r.data[r.beg:end] } r.beg = end + 1 if r.beg > r.end { r.beg = r.end } return true } } func (r *ByteSliceScanner) Error() error { return nil } // Bytes returns the byte slice of the current line func (r *ByteSliceScanner) Bytes() []byte { return r.current } func skipBom(data []byte) []byte { bom := []byte{0xEF, 0xBB, 0xBF} if bytes.HasPrefix(data, bom) { data = data[len(bom):] } return data } // CloneBytesSlice clones the input slice func CloneBytesSlice(input [][]byte) [][]byte { result := make([][]byte, 0, len(input)) length := 0 for _, s := range input { length += len(s) } data := make([]byte, 0, length) for _, s := range input { data = append(data, s...) result = append(result, data[len(data)-len(s):]) } return result } func (r *ByteSliceScanner) Position() int { return r.beg } func (r *ByteSliceScanner) Length() int { return r.beg } func (r *ByteSliceScanner) Seek(pos int) { if r.beg = pos; r.beg > r.end { r.beg = r.end } else if r.beg < 0 { r.beg = 0 } } func (r *ByteSliceScanner) Filename() string { return r.filename }
byteio/byteslicescanner.go
0.657209
0.409457
byteslicescanner.go
starcoder
package draw2dAnimation import ( "image/color" "math" ) // An abstract figure type. Represents a base struct for all figures. type Figure struct { subClass Figurer id int depth int startPoint Point rotationDegrees float64 fillColor color.RGBA strokeColor color.RGBA isFilled bool lineWidth float64 scale Point updateTypes updateType updateTranslation Point updateRotationDegrees float64 updateMethod *UpdateMethod } // Default constructor. func NewFigure() *Figure { return NewFigure4(0, Point{0.0, 0.0}, 0.0, 1.0) } // Constructor accepting depth(layer in the image), startPoint(to which all actions are related) and rotation degrees. func NewFigure4(depth int, startPoint Point, rotationDegrees float64, lineWidth float64) *Figure { nextFigureId++ return &Figure{ id: nextFigureId, depth: depth, startPoint: startPoint, rotationDegrees: rotationDegrees, fillColor: color.RGBA{255, 255, 255, 255}, strokeColor: color.RGBA{0, 0, 0, 255}, isFilled: false, lineWidth: lineWidth, scale: Point{1.0, 1.0}, updateTypes: None} } // Gets the current instance. Has meaning to be used from struct extending this one if need. func (this *Figure) GetBase() *Figure { return this } // Sets the final figure in the extending chain. Should be called in the constructor of each extending struct. Has meaning to be used from struct extending this one to use function as virtual. func (this *Figure) SetSubClass(value Figurer) { this.subClass = value } // Gets the unique ID for the figure. Used to maintain order of figures when their depth is equal. func (this *Figure) getId() int { return this.id } // Gets the depth(layer) of the figure in the image. func (this *Figure) GetDepth() int { return this.depth } // Sets the depth(layer) of the figure in the image. func (this *Figure) SetDepth(value int) { this.depth = value } // Gets the start point of the figure. func (this *Figure) GetStartPoint() Point { return Point{this.startPoint.X, this.startPoint.Y} } //Sets the start point of the figure. func (this *Figure) SetStartPoint(value Point) { this.startPoint = value } // Get the current degrees by which the figure is rotated. func (this *Figure) GetRotationDegrees() float64 { return this.rotationDegrees } // Sets the rotation of the figure for the time after the next call of Update() func (this *Figure) SetRotationDegrees(value float64) { this.rotationDegrees = value } // Gets the color used to fill the figure. func (this *Figure) GetFillColor() color.RGBA { return this.fillColor } // Sets the color to be used to fill the figure. Automatically set the figure as one to be filled. func (this *Figure) SetFillColor(value color.RGBA) { this.fillColor = value this.isFilled = true } // Gets to color used for drawing the contour of the figure. func (this *Figure) GetStrokeColor() color.RGBA { return this.strokeColor } // Sets the color to be used to draw the contour of the figure. func (this *Figure) SetStrokeColor(value color.RGBA) { this.strokeColor = value } // Gets whether the figure should be filled or stroked. func (this *Figure) GetIsFilled() bool { return this.isFilled } // Sets whether the figure should be filled or stroked. func (this *Figure) SetIsFilled(value bool) { this.isFilled = value } // Gets the line width of the figure. func (this *Figure) GetLineWidth() float64 { return this.lineWidth } // Sets the line width of the figure. func (this *Figure) SetLineWidth(value float64) { this.lineWidth = value } // Gets the current scale ratio. func (this *Figure) GetScale() Point { return this.scale } // Sets the scale of the figure. func (this *Figure) SetScale(value Point) { this.scale = value } // Gets the degrees by which the figure rotates on each call of Update(). func (this *Figure) GetUpdateRotationDegrees() float64 { return this.updateRotationDegrees } // Sets the degrees by which the figure should rotate on each call of Update(). func (this *Figure) SetUpdateRotationDegrees(value float64) { if value == 0.0 { this.updateTypes &^= Rotation } else { this.updateTypes |= Rotation } this.updateRotationDegrees = value } // Gets the vector by which the figure is translated on each call of Update(). func (this *Figure) GetUpdateTranslation() Point { return this.updateTranslation } // Sets the vector by which the figure should translate on each call of Update(). func (this *Figure) SetUpdateTranslation(value Point) { if value.X == 0 && value.Y == 0 { this.updateTypes &^= Translation } else { this.updateTypes |= Translation } this.updateTranslation = value } // Gets the update method used to update the figure on each call of Update(). func (this *Figure) GetUpdateMethod() *UpdateMethod { return this.updateMethod } // Sets the update method to be used for updating the figure on each call of Update(). A copy of the update method is used to avoid use of the same method by more than one figures. func (this *Figure) SetUpdateMethod(value *UpdateMethod) { if value == nil { this.updateTypes &^= Custom } else { this.updateTypes |= Custom } // copy by value updateMethod := *value; this.updateMethod = &updateMethod } // Draws the figure taking into account the translation, rotation and the rest common properties of all figures and using the implemented by the extending subClass Visualize() method. func (this Figure) Draw() { graphicContext := GetTheImageGraphicContext() graphicContext.Save() if this.startPoint.X != 0 || this.startPoint.Y != 0 { graphicContext.Translate(this.startPoint.X, this.startPoint.Y) } if this.rotationDegrees != 0.0 { graphicContext.Rotate(this.rotationDegrees * (math.Pi / 180.0)) } graphicContext.SetLineWidth(this.lineWidth) if this.isFilled { graphicContext.SetFillColor(this.fillColor) } graphicContext.SetStrokeColor(this.strokeColor) if this.scale.X != 1.0 || this.scale.Y != 1.0 { graphicContext.Scale(this.scale.X, this.scale.Y) } this.subClass.Visualize() if this.isFilled { graphicContext.FillStroke() } else { graphicContext.Stroke() } graphicContext.Restore() } // Updates the figure by the update method, the update translation and the update rotation degrees. func (this *Figure) Update() { if (this.updateTypes & Custom) != 0 { this.updateMethod.Update(this.subClass) } if (this.updateTypes & Translation) != 0 { this.startPoint.X += this.updateTranslation.X this.startPoint.Y += this.updateTranslation.Y } if (this.updateTypes & Rotation) != 0 { this.rotationDegrees += this.updateRotationDegrees } }
draw2dAnimation/figure.go
0.911859
0.594581
figure.go
starcoder
package forGraphBLASGo func MxM[DC, DA, DB any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op Semiring[DC, DA, DB], A *Matrix[DA], B *Matrix[DB], desc Descriptor) error { nrows, ncols, err := C.Size() if err != nil { return err } Anrows, Ancols, err := A.Size() if err != nil { return err } Bnrows, Bncols, err := B.Size() if err != nil { return err } AIsTran, err := desc.Is(Inp0, Tran) if err != nil { panic(err) } if AIsTran { Anrows, Ancols = Ancols, Anrows } BIsTran, err := desc.Is(Inp1, Tran) if err != nil { panic(err) } if BIsTran { Bnrows, Bncols = Bncols, Bnrows } if nrows != Anrows || ncols != Bncols || Ancols != Bnrows { return DimensionMismatch } maskAsStructure, err := matrixMask(mask, nrows, ncols) if err != nil { return err } C.ref = newMatrixReference[DC](newComputedMatrix[DC]( nrows, ncols, C.ref, maskAsStructure, accum, newMatrixMult[DC, DA, DB](op, maybeTran(A.ref, AIsTran), maybeTran(B.ref, BIsTran)), desc, ), -1) return nil } func VxM[Dw, Du, DA any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op Semiring[Dw, Du, DA], u *Vector[Du], A *Matrix[DA], desc Descriptor) error { wsize, err := w.Size() if err != nil { return err } usize, err := u.Size() if err != nil { return err } AIsTran, err := A.expectSizeTran(usize, wsize, desc, Inp1) if err != nil { return err } maskAsStructure, err := vectorMask(mask, wsize) if err != nil { return err } w.ref = newVectorReference[Dw](newComputedVector[Dw]( wsize, w.ref, maskAsStructure, accum, newVxM[Dw](op, u.ref, maybeTran(A.ref, AIsTran)), desc, ), -1) return nil } func MxV[Dw, DA, Du any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op Semiring[Dw, DA, Du], A *Matrix[DA], u *Vector[Du], desc Descriptor) error { wsize, err := w.Size() if err != nil { return err } usize, err := u.Size() if err != nil { return err } AIsTran, err := A.expectSizeTran(wsize, usize, desc, Inp0) if err != nil { return err } maskAsStructure, err := vectorMask(mask, wsize) if err != nil { return err } w.ref = newVectorReference[Dw](newComputedVector[Dw]( wsize, w.ref, maskAsStructure, accum, newMxV[Dw](op, maybeTran(A.ref, AIsTran), u.ref), desc, ), -1) return nil } func KroneckerBinaryOp[DC, DA, DB any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op BinaryOp[DC, DA, DB], A *Matrix[DA], B *Matrix[DB], desc Descriptor) error { Anrows, Ancols, err := A.Size() if err != nil { return err } Bnrows, Bncols, err := B.Size() if err != nil { return err } AIsTran, err := desc.Is(Inp0, Tran) if err != nil { panic(err) } if AIsTran { Anrows, Ancols = Ancols, Anrows } BIsTran, err := desc.Is(Inp1, Tran) if err != nil { panic(err) } if BIsTran { Bnrows, Bncols = Bncols, Bnrows } nrows, ncols := Anrows*Bnrows, Ancols*Bncols if err = C.expectSize(nrows, ncols); err != nil { return err } maskAsStructure, err := matrixMask(mask, nrows, ncols) if err != nil { return err } C.ref = newMatrixReference[DC](newComputedMatrix[DC]( nrows, ncols, C.ref, maskAsStructure, accum, newKroneckerBinaryOp[DC, DA, DB](op, maybeTran(A.ref, AIsTran), maybeTran(B.ref, BIsTran)), desc, ), -1) return nil } func KroneckerMonoid[D any](C *Matrix[D], mask *Matrix[bool], accum BinaryOp[D, D, D], op Monoid[D], A, B *Matrix[D], desc Descriptor) error { return KroneckerBinaryOp(C, mask, accum, op.operator(), A, B, desc) } func KroneckerSemiring[DC, DA, DB any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op Semiring[DC, DA, DB], A *Matrix[DA], B *Matrix[DB], desc Descriptor) error { return KroneckerBinaryOp(C, mask, accum, op.multiplication(), A, B, desc) }
api_Mult.go
0.638497
0.454896
api_Mult.go
starcoder
package matrix import ( "strconv" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) // Combinations is a slice of combinations of Parameters from a Matrix. type Combinations []*Combination // Combination is a specific combination of Parameters from a Matrix. type Combination struct { // MatrixID is an identification of a combination from Parameters in a Matrix. MatrixID string // Params is a specific combination of Parameters in a Matrix. Params []v1beta1.Param } func (combinations Combinations) fanOut(param v1beta1.Param) Combinations { if len(combinations) == 0 { return initializeCombinations(param) } return combinations.distribute(param) } func (combinations Combinations) distribute(param v1beta1.Param) Combinations { // when there are existing combinations, this is a non-first parameter in the matrix, and we need to distribute // it among the existing combinations var expandedCombinations Combinations var count int for _, value := range param.Value.ArrayVal { for _, combination := range combinations { expandedCombinations = append(expandedCombinations, createCombination(count, param.Name, value, combination.Params)) count++ } } return expandedCombinations } func initializeCombinations(param v1beta1.Param) Combinations { // when there are no existing combinations, this is the first parameter in the matrix, so we initialize the // combinations with the first Parameter var combinations Combinations for i, value := range param.Value.ArrayVal { combinations = append(combinations, createCombination(i, param.Name, value, []v1beta1.Param{})) } return combinations } func createCombination(i int, name string, value string, parameters []v1beta1.Param) *Combination { return &Combination{ MatrixID: strconv.Itoa(i), Params: append(parameters, v1beta1.Param{ Name: name, Value: v1beta1.ArrayOrString{Type: v1beta1.ParamTypeString, StringVal: value}, }), } } // ToMap converts a list of Combinations to a map where the key is the matrixId and the values are Parameters. func (combinations Combinations) ToMap() map[string][]v1beta1.Param { m := map[string][]v1beta1.Param{} for _, combination := range combinations { m[combination.MatrixID] = combination.Params } return m }
pkg/matrix/matrix_types.go
0.74008
0.400075
matrix_types.go
starcoder